##// END OF EJS Templates
Merge pull request #6380 from ellisonbg/latex-complete...
Thomas Kluyver -
r17812:3b47a9b4 merge
parent child Browse files
Show More
This diff has been collapsed as it changes many lines, (1297 lines changed) Show them Hide them
@@ -0,0 +1,1297 b''
1 # encoding: utf-8
2
3 # DO NOT EDIT THIS FILE BY HAND.
4
5 # To update this file, run the script /tools/gen_latex_symbols.py using Python 3
6
7 # This file is autogenerated from the file:
8 # https://raw.githubusercontent.com/JuliaLang/julia/master/base/latex_symbols.jl
9 # This original list is filtered to remove any unicode characters that are not valid
10 # Python identifiers.
11
12 latex_symbols = {
13
14 "\\^a" : "ᵃ",
15 "\\^b" : "ᵇ",
16 "\\^c" : "ᶜ",
17 "\\^d" : "ᵈ",
18 "\\^e" : "ᵉ",
19 "\\^f" : "ᶠ",
20 "\\^g" : "ᵍ",
21 "\\^h" : "ʰ",
22 "\\^i" : "ⁱ",
23 "\\^j" : "ʲ",
24 "\\^k" : "ᵏ",
25 "\\^l" : "ˡ",
26 "\\^m" : "ᵐ",
27 "\\^n" : "ⁿ",
28 "\\^o" : "ᵒ",
29 "\\^p" : "ᵖ",
30 "\\^r" : "ʳ",
31 "\\^s" : "ˢ",
32 "\\^t" : "ᵗ",
33 "\\^u" : "ᵘ",
34 "\\^v" : "ᵛ",
35 "\\^w" : "ʷ",
36 "\\^x" : "ˣ",
37 "\\^y" : "ʸ",
38 "\\^z" : "ᶻ",
39 "\\^A" : "ᴬ",
40 "\\^B" : "ᴮ",
41 "\\^D" : "ᴰ",
42 "\\^E" : "ᴱ",
43 "\\^G" : "ᴳ",
44 "\\^H" : "ᴴ",
45 "\\^I" : "ᴵ",
46 "\\^J" : "ᴶ",
47 "\\^K" : "ᴷ",
48 "\\^L" : "ᴸ",
49 "\\^M" : "ᴹ",
50 "\\^N" : "ᴺ",
51 "\\^O" : "ᴼ",
52 "\\^P" : "ᴾ",
53 "\\^R" : "ᴿ",
54 "\\^T" : "ᵀ",
55 "\\^U" : "ᵁ",
56 "\\^V" : "ⱽ",
57 "\\^W" : "ᵂ",
58 "\\^alpha" : "ᵅ",
59 "\\^beta" : "ᵝ",
60 "\\^gamma" : "ᵞ",
61 "\\^delta" : "ᵟ",
62 "\\^epsilon" : "ᵋ",
63 "\\^theta" : "ᶿ",
64 "\\^iota" : "ᶥ",
65 "\\^phi" : "ᵠ",
66 "\\^chi" : "ᵡ",
67 "\\^Phi" : "ᶲ",
68 "\\_a" : "ₐ",
69 "\\_e" : "ₑ",
70 "\\_h" : "ₕ",
71 "\\_i" : "ᵢ",
72 "\\_j" : "ⱼ",
73 "\\_k" : "ₖ",
74 "\\_l" : "ₗ",
75 "\\_m" : "ₘ",
76 "\\_n" : "ₙ",
77 "\\_o" : "ₒ",
78 "\\_p" : "ₚ",
79 "\\_r" : "ᵣ",
80 "\\_s" : "ₛ",
81 "\\_t" : "ₜ",
82 "\\_u" : "ᵤ",
83 "\\_v" : "ᵥ",
84 "\\_x" : "ₓ",
85 "\\_schwa" : "ₔ",
86 "\\_beta" : "ᵦ",
87 "\\_gamma" : "ᵧ",
88 "\\_rho" : "ᵨ",
89 "\\_phi" : "ᵩ",
90 "\\_chi" : "ᵪ",
91 "\\hbar" : "ħ",
92 "\\sout" : "̶",
93 "\\textordfeminine" : "ª",
94 "\\cdotp" : "·",
95 "\\textordmasculine" : "º",
96 "\\AA" : "Å",
97 "\\AE" : "Æ",
98 "\\DH" : "Ð",
99 "\\O" : "Ø",
100 "\\TH" : "Þ",
101 "\\ss" : "ß",
102 "\\aa" : "å",
103 "\\ae" : "æ",
104 "\\eth" : "ð",
105 "\\o" : "ø",
106 "\\th" : "þ",
107 "\\DJ" : "Đ",
108 "\\dj" : "đ",
109 "\\Elzxh" : "ħ",
110 "\\imath" : "ı",
111 "\\L" : "Ł",
112 "\\l" : "ł",
113 "\\NG" : "Ŋ",
114 "\\ng" : "ŋ",
115 "\\OE" : "Œ",
116 "\\oe" : "œ",
117 "\\texthvlig" : "ƕ",
118 "\\textnrleg" : "ƞ",
119 "\\textdoublepipe" : "ǂ",
120 "\\Elztrna" : "ɐ",
121 "\\Elztrnsa" : "ɒ",
122 "\\Elzopeno" : "ɔ",
123 "\\Elzrtld" : "ɖ",
124 "\\Elzschwa" : "ə",
125 "\\varepsilon" : "ɛ",
126 "\\Elzpgamma" : "ɣ",
127 "\\Elzpbgam" : "ɤ",
128 "\\Elztrnh" : "ɥ",
129 "\\Elzbtdl" : "ɬ",
130 "\\Elzrtll" : "ɭ",
131 "\\Elztrnm" : "ɯ",
132 "\\Elztrnmlr" : "ɰ",
133 "\\Elzltlmr" : "ɱ",
134 "\\Elzltln" : "ɲ",
135 "\\Elzrtln" : "ɳ",
136 "\\Elzclomeg" : "ɷ",
137 "\\textphi" : "ɸ",
138 "\\Elztrnr" : "ɹ",
139 "\\Elztrnrl" : "ɺ",
140 "\\Elzrttrnr" : "ɻ",
141 "\\Elzrl" : "ɼ",
142 "\\Elzrtlr" : "ɽ",
143 "\\Elzfhr" : "ɾ",
144 "\\Elzrtls" : "ʂ",
145 "\\Elzesh" : "ʃ",
146 "\\Elztrnt" : "ʇ",
147 "\\Elzrtlt" : "ʈ",
148 "\\Elzpupsil" : "ʊ",
149 "\\Elzpscrv" : "ʋ",
150 "\\Elzinvv" : "ʌ",
151 "\\Elzinvw" : "ʍ",
152 "\\Elztrny" : "ʎ",
153 "\\Elzrtlz" : "ʐ",
154 "\\Elzyogh" : "ʒ",
155 "\\Elzglst" : "ʔ",
156 "\\Elzreglst" : "ʕ",
157 "\\Elzinglst" : "ʖ",
158 "\\textturnk" : "ʞ",
159 "\\Elzdyogh" : "ʤ",
160 "\\Elztesh" : "ʧ",
161 "\\rasp" : "ʼ",
162 "\\textasciicaron" : "ˇ",
163 "\\Elzverts" : "ˈ",
164 "\\Elzverti" : "ˌ",
165 "\\Elzlmrk" : "ː",
166 "\\Elzhlmrk" : "ˑ",
167 "\\grave" : "̀",
168 "\\acute" : "́",
169 "\\hat" : "̂",
170 "\\tilde" : "̃",
171 "\\bar" : "̄",
172 "\\breve" : "̆",
173 "\\dot" : "̇",
174 "\\ddot" : "̈",
175 "\\ocirc" : "̊",
176 "\\H" : "̋",
177 "\\check" : "̌",
178 "\\Elzpalh" : "̡",
179 "\\Elzrh" : "̢",
180 "\\c" : "̧",
181 "\\k" : "̨",
182 "\\Elzsbbrg" : "̪",
183 "\\Elzxl" : "̵",
184 "\\Elzbar" : "̶",
185 "\\Alpha" : "Α",
186 "\\Beta" : "Β",
187 "\\Gamma" : "Γ",
188 "\\Delta" : "Δ",
189 "\\Epsilon" : "Ε",
190 "\\Zeta" : "Ζ",
191 "\\Eta" : "Η",
192 "\\Theta" : "Θ",
193 "\\Iota" : "Ι",
194 "\\Kappa" : "Κ",
195 "\\Lambda" : "Λ",
196 "\\Xi" : "Ξ",
197 "\\Pi" : "Π",
198 "\\Rho" : "Ρ",
199 "\\Sigma" : "Σ",
200 "\\Tau" : "Τ",
201 "\\Upsilon" : "Υ",
202 "\\Phi" : "Φ",
203 "\\Chi" : "Χ",
204 "\\Psi" : "Ψ",
205 "\\Omega" : "Ω",
206 "\\alpha" : "α",
207 "\\beta" : "β",
208 "\\gamma" : "γ",
209 "\\delta" : "δ",
210 "\\zeta" : "ζ",
211 "\\eta" : "η",
212 "\\theta" : "θ",
213 "\\iota" : "ι",
214 "\\kappa" : "κ",
215 "\\lambda" : "λ",
216 "\\mu" : "μ",
217 "\\nu" : "ν",
218 "\\xi" : "ξ",
219 "\\pi" : "π",
220 "\\rho" : "ρ",
221 "\\varsigma" : "ς",
222 "\\sigma" : "σ",
223 "\\tau" : "τ",
224 "\\upsilon" : "υ",
225 "\\varphi" : "φ",
226 "\\chi" : "χ",
227 "\\psi" : "ψ",
228 "\\omega" : "ω",
229 "\\vartheta" : "ϑ",
230 "\\phi" : "ϕ",
231 "\\varpi" : "ϖ",
232 "\\Stigma" : "Ϛ",
233 "\\Digamma" : "Ϝ",
234 "\\digamma" : "ϝ",
235 "\\Koppa" : "Ϟ",
236 "\\Sampi" : "Ϡ",
237 "\\varkappa" : "ϰ",
238 "\\varrho" : "ϱ",
239 "\\textTheta" : "ϴ",
240 "\\epsilon" : "ϵ",
241 "\\dddot" : "⃛",
242 "\\ddddot" : "⃜",
243 "\\hslash" : "ℏ",
244 "\\Im" : "ℑ",
245 "\\ell" : "ℓ",
246 "\\wp" : "℘",
247 "\\Re" : "ℜ",
248 "\\aleph" : "ℵ",
249 "\\beth" : "ℶ",
250 "\\gimel" : "ℷ",
251 "\\daleth" : "ℸ",
252 "\\BbbPi" : "ℿ",
253 "\\Zbar" : "Ƶ",
254 "\\overbar" : "̅",
255 "\\ovhook" : "̉",
256 "\\candra" : "̐",
257 "\\oturnedcomma" : "̒",
258 "\\ocommatopright" : "̕",
259 "\\droang" : "̚",
260 "\\wideutilde" : "̰",
261 "\\underbar" : "̱",
262 "\\not" : "̸",
263 "\\upMu" : "Μ",
264 "\\upNu" : "Ν",
265 "\\upOmicron" : "Ο",
266 "\\upepsilon" : "ε",
267 "\\upomicron" : "ο",
268 "\\upvarbeta" : "ϐ",
269 "\\upoldKoppa" : "Ϙ",
270 "\\upoldkoppa" : "ϙ",
271 "\\upstigma" : "ϛ",
272 "\\upkoppa" : "ϟ",
273 "\\upsampi" : "ϡ",
274 "\\tieconcat" : "⁀",
275 "\\leftharpoonaccent" : "⃐",
276 "\\rightharpoonaccent" : "⃑",
277 "\\vertoverlay" : "⃒",
278 "\\overleftarrow" : "⃖",
279 "\\vec" : "⃗",
280 "\\overleftrightarrow" : "⃡",
281 "\\annuity" : "⃧",
282 "\\threeunderdot" : "⃨",
283 "\\widebridgeabove" : "⃩",
284 "\\BbbC" : "ℂ",
285 "\\Eulerconst" : "ℇ",
286 "\\mscrg" : "ℊ",
287 "\\mscrH" : "ℋ",
288 "\\mfrakH" : "ℌ",
289 "\\BbbH" : "ℍ",
290 "\\Planckconst" : "ℎ",
291 "\\mscrI" : "ℐ",
292 "\\mscrL" : "ℒ",
293 "\\BbbN" : "ℕ",
294 "\\BbbP" : "ℙ",
295 "\\BbbQ" : "ℚ",
296 "\\mscrR" : "ℛ",
297 "\\BbbR" : "ℝ",
298 "\\BbbZ" : "ℤ",
299 "\\mfrakZ" : "ℨ",
300 "\\Angstrom" : "Å",
301 "\\mscrB" : "ℬ",
302 "\\mfrakC" : "ℭ",
303 "\\mscre" : "ℯ",
304 "\\mscrE" : "ℰ",
305 "\\mscrF" : "ℱ",
306 "\\Finv" : "Ⅎ",
307 "\\mscrM" : "ℳ",
308 "\\mscro" : "ℴ",
309 "\\Bbbgamma" : "ℽ",
310 "\\BbbGamma" : "ℾ",
311 "\\mitBbbD" : "ⅅ",
312 "\\mitBbbd" : "ⅆ",
313 "\\mitBbbe" : "ⅇ",
314 "\\mitBbbi" : "ⅈ",
315 "\\mitBbbj" : "ⅉ",
316 "\\mbfA" : "𝐀",
317 "\\mbfB" : "𝐁",
318 "\\mbfC" : "𝐂",
319 "\\mbfD" : "𝐃",
320 "\\mbfE" : "𝐄",
321 "\\mbfF" : "𝐅",
322 "\\mbfG" : "𝐆",
323 "\\mbfH" : "𝐇",
324 "\\mbfI" : "𝐈",
325 "\\mbfJ" : "𝐉",
326 "\\mbfK" : "𝐊",
327 "\\mbfL" : "𝐋",
328 "\\mbfM" : "𝐌",
329 "\\mbfN" : "𝐍",
330 "\\mbfO" : "𝐎",
331 "\\mbfP" : "𝐏",
332 "\\mbfQ" : "𝐐",
333 "\\mbfR" : "𝐑",
334 "\\mbfS" : "𝐒",
335 "\\mbfT" : "𝐓",
336 "\\mbfU" : "𝐔",
337 "\\mbfV" : "𝐕",
338 "\\mbfW" : "𝐖",
339 "\\mbfX" : "𝐗",
340 "\\mbfY" : "𝐘",
341 "\\mbfZ" : "𝐙",
342 "\\mbfa" : "𝐚",
343 "\\mbfb" : "𝐛",
344 "\\mbfc" : "𝐜",
345 "\\mbfd" : "𝐝",
346 "\\mbfe" : "𝐞",
347 "\\mbff" : "𝐟",
348 "\\mbfg" : "𝐠",
349 "\\mbfh" : "𝐡",
350 "\\mbfi" : "𝐢",
351 "\\mbfj" : "𝐣",
352 "\\mbfk" : "𝐤",
353 "\\mbfl" : "𝐥",
354 "\\mbfm" : "𝐦",
355 "\\mbfn" : "𝐧",
356 "\\mbfo" : "𝐨",
357 "\\mbfp" : "𝐩",
358 "\\mbfq" : "𝐪",
359 "\\mbfr" : "𝐫",
360 "\\mbfs" : "𝐬",
361 "\\mbft" : "𝐭",
362 "\\mbfu" : "𝐮",
363 "\\mbfv" : "𝐯",
364 "\\mbfw" : "𝐰",
365 "\\mbfx" : "𝐱",
366 "\\mbfy" : "𝐲",
367 "\\mbfz" : "𝐳",
368 "\\mitA" : "𝐴",
369 "\\mitB" : "𝐵",
370 "\\mitC" : "𝐶",
371 "\\mitD" : "𝐷",
372 "\\mitE" : "𝐸",
373 "\\mitF" : "𝐹",
374 "\\mitG" : "𝐺",
375 "\\mitH" : "𝐻",
376 "\\mitI" : "𝐼",
377 "\\mitJ" : "𝐽",
378 "\\mitK" : "𝐾",
379 "\\mitL" : "𝐿",
380 "\\mitM" : "𝑀",
381 "\\mitN" : "𝑁",
382 "\\mitO" : "𝑂",
383 "\\mitP" : "𝑃",
384 "\\mitQ" : "𝑄",
385 "\\mitR" : "𝑅",
386 "\\mitS" : "𝑆",
387 "\\mitT" : "𝑇",
388 "\\mitU" : "𝑈",
389 "\\mitV" : "𝑉",
390 "\\mitW" : "𝑊",
391 "\\mitX" : "𝑋",
392 "\\mitY" : "𝑌",
393 "\\mitZ" : "𝑍",
394 "\\mita" : "𝑎",
395 "\\mitb" : "𝑏",
396 "\\mitc" : "𝑐",
397 "\\mitd" : "𝑑",
398 "\\mite" : "𝑒",
399 "\\mitf" : "𝑓",
400 "\\mitg" : "𝑔",
401 "\\miti" : "𝑖",
402 "\\mitj" : "𝑗",
403 "\\mitk" : "𝑘",
404 "\\mitl" : "𝑙",
405 "\\mitm" : "𝑚",
406 "\\mitn" : "𝑛",
407 "\\mito" : "𝑜",
408 "\\mitp" : "𝑝",
409 "\\mitq" : "𝑞",
410 "\\mitr" : "𝑟",
411 "\\mits" : "𝑠",
412 "\\mitt" : "𝑡",
413 "\\mitu" : "𝑢",
414 "\\mitv" : "𝑣",
415 "\\mitw" : "𝑤",
416 "\\mitx" : "𝑥",
417 "\\mity" : "𝑦",
418 "\\mitz" : "𝑧",
419 "\\mbfitA" : "𝑨",
420 "\\mbfitB" : "𝑩",
421 "\\mbfitC" : "𝑪",
422 "\\mbfitD" : "𝑫",
423 "\\mbfitE" : "𝑬",
424 "\\mbfitF" : "𝑭",
425 "\\mbfitG" : "𝑮",
426 "\\mbfitH" : "𝑯",
427 "\\mbfitI" : "𝑰",
428 "\\mbfitJ" : "𝑱",
429 "\\mbfitK" : "𝑲",
430 "\\mbfitL" : "𝑳",
431 "\\mbfitM" : "𝑴",
432 "\\mbfitN" : "𝑵",
433 "\\mbfitO" : "𝑶",
434 "\\mbfitP" : "𝑷",
435 "\\mbfitQ" : "𝑸",
436 "\\mbfitR" : "𝑹",
437 "\\mbfitS" : "𝑺",
438 "\\mbfitT" : "𝑻",
439 "\\mbfitU" : "𝑼",
440 "\\mbfitV" : "𝑽",
441 "\\mbfitW" : "𝑾",
442 "\\mbfitX" : "𝑿",
443 "\\mbfitY" : "𝒀",
444 "\\mbfitZ" : "𝒁",
445 "\\mbfita" : "𝒂",
446 "\\mbfitb" : "𝒃",
447 "\\mbfitc" : "𝒄",
448 "\\mbfitd" : "𝒅",
449 "\\mbfite" : "𝒆",
450 "\\mbfitf" : "𝒇",
451 "\\mbfitg" : "𝒈",
452 "\\mbfith" : "𝒉",
453 "\\mbfiti" : "𝒊",
454 "\\mbfitj" : "𝒋",
455 "\\mbfitk" : "𝒌",
456 "\\mbfitl" : "𝒍",
457 "\\mbfitm" : "𝒎",
458 "\\mbfitn" : "𝒏",
459 "\\mbfito" : "𝒐",
460 "\\mbfitp" : "𝒑",
461 "\\mbfitq" : "𝒒",
462 "\\mbfitr" : "𝒓",
463 "\\mbfits" : "𝒔",
464 "\\mbfitt" : "𝒕",
465 "\\mbfitu" : "𝒖",
466 "\\mbfitv" : "𝒗",
467 "\\mbfitw" : "𝒘",
468 "\\mbfitx" : "𝒙",
469 "\\mbfity" : "𝒚",
470 "\\mbfitz" : "𝒛",
471 "\\mscrA" : "𝒜",
472 "\\mscrC" : "𝒞",
473 "\\mscrD" : "𝒟",
474 "\\mscrG" : "𝒢",
475 "\\mscrJ" : "𝒥",
476 "\\mscrK" : "𝒦",
477 "\\mscrN" : "𝒩",
478 "\\mscrO" : "𝒪",
479 "\\mscrP" : "𝒫",
480 "\\mscrQ" : "𝒬",
481 "\\mscrS" : "𝒮",
482 "\\mscrT" : "𝒯",
483 "\\mscrU" : "𝒰",
484 "\\mscrV" : "𝒱",
485 "\\mscrW" : "𝒲",
486 "\\mscrX" : "𝒳",
487 "\\mscrY" : "𝒴",
488 "\\mscrZ" : "𝒵",
489 "\\mscra" : "𝒶",
490 "\\mscrb" : "𝒷",
491 "\\mscrc" : "𝒸",
492 "\\mscrd" : "𝒹",
493 "\\mscrf" : "𝒻",
494 "\\mscrh" : "𝒽",
495 "\\mscri" : "𝒾",
496 "\\mscrj" : "𝒿",
497 "\\mscrk" : "𝓀",
498 "\\mscrm" : "𝓂",
499 "\\mscrn" : "𝓃",
500 "\\mscrp" : "𝓅",
501 "\\mscrq" : "𝓆",
502 "\\mscrr" : "𝓇",
503 "\\mscrs" : "𝓈",
504 "\\mscrt" : "𝓉",
505 "\\mscru" : "𝓊",
506 "\\mscrv" : "𝓋",
507 "\\mscrw" : "𝓌",
508 "\\mscrx" : "𝓍",
509 "\\mscry" : "𝓎",
510 "\\mscrz" : "𝓏",
511 "\\mbfscrA" : "𝓐",
512 "\\mbfscrB" : "𝓑",
513 "\\mbfscrC" : "𝓒",
514 "\\mbfscrD" : "𝓓",
515 "\\mbfscrE" : "𝓔",
516 "\\mbfscrF" : "𝓕",
517 "\\mbfscrG" : "𝓖",
518 "\\mbfscrH" : "𝓗",
519 "\\mbfscrI" : "𝓘",
520 "\\mbfscrJ" : "𝓙",
521 "\\mbfscrK" : "𝓚",
522 "\\mbfscrL" : "𝓛",
523 "\\mbfscrM" : "𝓜",
524 "\\mbfscrN" : "𝓝",
525 "\\mbfscrO" : "𝓞",
526 "\\mbfscrP" : "𝓟",
527 "\\mbfscrQ" : "𝓠",
528 "\\mbfscrR" : "𝓡",
529 "\\mbfscrS" : "𝓢",
530 "\\mbfscrT" : "𝓣",
531 "\\mbfscrU" : "𝓤",
532 "\\mbfscrV" : "𝓥",
533 "\\mbfscrW" : "𝓦",
534 "\\mbfscrX" : "𝓧",
535 "\\mbfscrY" : "𝓨",
536 "\\mbfscrZ" : "𝓩",
537 "\\mbfscra" : "𝓪",
538 "\\mbfscrb" : "𝓫",
539 "\\mbfscrc" : "𝓬",
540 "\\mbfscrd" : "𝓭",
541 "\\mbfscre" : "𝓮",
542 "\\mbfscrf" : "𝓯",
543 "\\mbfscrg" : "𝓰",
544 "\\mbfscrh" : "𝓱",
545 "\\mbfscri" : "𝓲",
546 "\\mbfscrj" : "𝓳",
547 "\\mbfscrk" : "𝓴",
548 "\\mbfscrl" : "𝓵",
549 "\\mbfscrm" : "𝓶",
550 "\\mbfscrn" : "𝓷",
551 "\\mbfscro" : "𝓸",
552 "\\mbfscrp" : "𝓹",
553 "\\mbfscrq" : "𝓺",
554 "\\mbfscrr" : "𝓻",
555 "\\mbfscrs" : "𝓼",
556 "\\mbfscrt" : "𝓽",
557 "\\mbfscru" : "𝓾",
558 "\\mbfscrv" : "𝓿",
559 "\\mbfscrw" : "𝔀",
560 "\\mbfscrx" : "𝔁",
561 "\\mbfscry" : "𝔂",
562 "\\mbfscrz" : "𝔃",
563 "\\mfrakA" : "𝔄",
564 "\\mfrakB" : "𝔅",
565 "\\mfrakD" : "𝔇",
566 "\\mfrakE" : "𝔈",
567 "\\mfrakF" : "𝔉",
568 "\\mfrakG" : "𝔊",
569 "\\mfrakJ" : "𝔍",
570 "\\mfrakK" : "𝔎",
571 "\\mfrakL" : "𝔏",
572 "\\mfrakM" : "𝔐",
573 "\\mfrakN" : "𝔑",
574 "\\mfrakO" : "𝔒",
575 "\\mfrakP" : "𝔓",
576 "\\mfrakQ" : "𝔔",
577 "\\mfrakS" : "𝔖",
578 "\\mfrakT" : "𝔗",
579 "\\mfrakU" : "𝔘",
580 "\\mfrakV" : "𝔙",
581 "\\mfrakW" : "𝔚",
582 "\\mfrakX" : "𝔛",
583 "\\mfrakY" : "𝔜",
584 "\\mfraka" : "𝔞",
585 "\\mfrakb" : "𝔟",
586 "\\mfrakc" : "𝔠",
587 "\\mfrakd" : "𝔡",
588 "\\mfrake" : "𝔢",
589 "\\mfrakf" : "𝔣",
590 "\\mfrakg" : "𝔤",
591 "\\mfrakh" : "𝔥",
592 "\\mfraki" : "𝔦",
593 "\\mfrakj" : "𝔧",
594 "\\mfrakk" : "𝔨",
595 "\\mfrakl" : "𝔩",
596 "\\mfrakm" : "𝔪",
597 "\\mfrakn" : "𝔫",
598 "\\mfrako" : "𝔬",
599 "\\mfrakp" : "𝔭",
600 "\\mfrakq" : "𝔮",
601 "\\mfrakr" : "𝔯",
602 "\\mfraks" : "𝔰",
603 "\\mfrakt" : "𝔱",
604 "\\mfraku" : "𝔲",
605 "\\mfrakv" : "𝔳",
606 "\\mfrakw" : "𝔴",
607 "\\mfrakx" : "𝔵",
608 "\\mfraky" : "𝔶",
609 "\\mfrakz" : "𝔷",
610 "\\BbbA" : "𝔸",
611 "\\BbbB" : "𝔹",
612 "\\BbbD" : "𝔻",
613 "\\BbbE" : "𝔼",
614 "\\BbbF" : "𝔽",
615 "\\BbbG" : "𝔾",
616 "\\BbbI" : "𝕀",
617 "\\BbbJ" : "𝕁",
618 "\\BbbK" : "𝕂",
619 "\\BbbL" : "𝕃",
620 "\\BbbM" : "𝕄",
621 "\\BbbO" : "𝕆",
622 "\\BbbS" : "𝕊",
623 "\\BbbT" : "𝕋",
624 "\\BbbU" : "𝕌",
625 "\\BbbV" : "𝕍",
626 "\\BbbW" : "𝕎",
627 "\\BbbX" : "𝕏",
628 "\\BbbY" : "𝕐",
629 "\\Bbba" : "𝕒",
630 "\\Bbbb" : "𝕓",
631 "\\Bbbc" : "𝕔",
632 "\\Bbbd" : "𝕕",
633 "\\Bbbe" : "𝕖",
634 "\\Bbbf" : "𝕗",
635 "\\Bbbg" : "𝕘",
636 "\\Bbbh" : "𝕙",
637 "\\Bbbi" : "𝕚",
638 "\\Bbbj" : "𝕛",
639 "\\Bbbk" : "𝕜",
640 "\\Bbbl" : "𝕝",
641 "\\Bbbm" : "𝕞",
642 "\\Bbbn" : "𝕟",
643 "\\Bbbo" : "𝕠",
644 "\\Bbbp" : "𝕡",
645 "\\Bbbq" : "𝕢",
646 "\\Bbbr" : "𝕣",
647 "\\Bbbs" : "𝕤",
648 "\\Bbbt" : "𝕥",
649 "\\Bbbu" : "𝕦",
650 "\\Bbbv" : "𝕧",
651 "\\Bbbw" : "𝕨",
652 "\\Bbbx" : "𝕩",
653 "\\Bbby" : "𝕪",
654 "\\Bbbz" : "𝕫",
655 "\\mbffrakA" : "𝕬",
656 "\\mbffrakB" : "𝕭",
657 "\\mbffrakC" : "𝕮",
658 "\\mbffrakD" : "𝕯",
659 "\\mbffrakE" : "𝕰",
660 "\\mbffrakF" : "𝕱",
661 "\\mbffrakG" : "𝕲",
662 "\\mbffrakH" : "𝕳",
663 "\\mbffrakI" : "𝕴",
664 "\\mbffrakJ" : "𝕵",
665 "\\mbffrakK" : "𝕶",
666 "\\mbffrakL" : "𝕷",
667 "\\mbffrakM" : "𝕸",
668 "\\mbffrakN" : "𝕹",
669 "\\mbffrakO" : "𝕺",
670 "\\mbffrakP" : "𝕻",
671 "\\mbffrakQ" : "𝕼",
672 "\\mbffrakR" : "𝕽",
673 "\\mbffrakS" : "𝕾",
674 "\\mbffrakT" : "𝕿",
675 "\\mbffrakU" : "𝖀",
676 "\\mbffrakV" : "𝖁",
677 "\\mbffrakW" : "𝖂",
678 "\\mbffrakX" : "𝖃",
679 "\\mbffrakY" : "𝖄",
680 "\\mbffrakZ" : "𝖅",
681 "\\mbffraka" : "𝖆",
682 "\\mbffrakb" : "𝖇",
683 "\\mbffrakc" : "𝖈",
684 "\\mbffrakd" : "𝖉",
685 "\\mbffrake" : "𝖊",
686 "\\mbffrakf" : "𝖋",
687 "\\mbffrakg" : "𝖌",
688 "\\mbffrakh" : "𝖍",
689 "\\mbffraki" : "𝖎",
690 "\\mbffrakj" : "𝖏",
691 "\\mbffrakk" : "𝖐",
692 "\\mbffrakl" : "𝖑",
693 "\\mbffrakm" : "𝖒",
694 "\\mbffrakn" : "𝖓",
695 "\\mbffrako" : "𝖔",
696 "\\mbffrakp" : "𝖕",
697 "\\mbffrakq" : "𝖖",
698 "\\mbffrakr" : "𝖗",
699 "\\mbffraks" : "𝖘",
700 "\\mbffrakt" : "𝖙",
701 "\\mbffraku" : "𝖚",
702 "\\mbffrakv" : "𝖛",
703 "\\mbffrakw" : "𝖜",
704 "\\mbffrakx" : "𝖝",
705 "\\mbffraky" : "𝖞",
706 "\\mbffrakz" : "𝖟",
707 "\\msansA" : "𝖠",
708 "\\msansB" : "𝖡",
709 "\\msansC" : "𝖢",
710 "\\msansD" : "𝖣",
711 "\\msansE" : "𝖤",
712 "\\msansF" : "𝖥",
713 "\\msansG" : "𝖦",
714 "\\msansH" : "𝖧",
715 "\\msansI" : "𝖨",
716 "\\msansJ" : "𝖩",
717 "\\msansK" : "𝖪",
718 "\\msansL" : "𝖫",
719 "\\msansM" : "𝖬",
720 "\\msansN" : "𝖭",
721 "\\msansO" : "𝖮",
722 "\\msansP" : "𝖯",
723 "\\msansQ" : "𝖰",
724 "\\msansR" : "𝖱",
725 "\\msansS" : "𝖲",
726 "\\msansT" : "𝖳",
727 "\\msansU" : "𝖴",
728 "\\msansV" : "𝖵",
729 "\\msansW" : "𝖶",
730 "\\msansX" : "𝖷",
731 "\\msansY" : "𝖸",
732 "\\msansZ" : "𝖹",
733 "\\msansa" : "𝖺",
734 "\\msansb" : "𝖻",
735 "\\msansc" : "𝖼",
736 "\\msansd" : "𝖽",
737 "\\msanse" : "𝖾",
738 "\\msansf" : "𝖿",
739 "\\msansg" : "𝗀",
740 "\\msansh" : "𝗁",
741 "\\msansi" : "𝗂",
742 "\\msansj" : "𝗃",
743 "\\msansk" : "𝗄",
744 "\\msansl" : "𝗅",
745 "\\msansm" : "𝗆",
746 "\\msansn" : "𝗇",
747 "\\msanso" : "𝗈",
748 "\\msansp" : "𝗉",
749 "\\msansq" : "𝗊",
750 "\\msansr" : "𝗋",
751 "\\msanss" : "𝗌",
752 "\\msanst" : "𝗍",
753 "\\msansu" : "𝗎",
754 "\\msansv" : "𝗏",
755 "\\msansw" : "𝗐",
756 "\\msansx" : "𝗑",
757 "\\msansy" : "𝗒",
758 "\\msansz" : "𝗓",
759 "\\mbfsansA" : "𝗔",
760 "\\mbfsansB" : "𝗕",
761 "\\mbfsansC" : "𝗖",
762 "\\mbfsansD" : "𝗗",
763 "\\mbfsansE" : "𝗘",
764 "\\mbfsansF" : "𝗙",
765 "\\mbfsansG" : "𝗚",
766 "\\mbfsansH" : "𝗛",
767 "\\mbfsansI" : "𝗜",
768 "\\mbfsansJ" : "𝗝",
769 "\\mbfsansK" : "𝗞",
770 "\\mbfsansL" : "𝗟",
771 "\\mbfsansM" : "𝗠",
772 "\\mbfsansN" : "𝗡",
773 "\\mbfsansO" : "𝗢",
774 "\\mbfsansP" : "𝗣",
775 "\\mbfsansQ" : "𝗤",
776 "\\mbfsansR" : "𝗥",
777 "\\mbfsansS" : "𝗦",
778 "\\mbfsansT" : "𝗧",
779 "\\mbfsansU" : "𝗨",
780 "\\mbfsansV" : "𝗩",
781 "\\mbfsansW" : "𝗪",
782 "\\mbfsansX" : "𝗫",
783 "\\mbfsansY" : "𝗬",
784 "\\mbfsansZ" : "𝗭",
785 "\\mbfsansa" : "𝗮",
786 "\\mbfsansb" : "𝗯",
787 "\\mbfsansc" : "𝗰",
788 "\\mbfsansd" : "𝗱",
789 "\\mbfsanse" : "𝗲",
790 "\\mbfsansf" : "𝗳",
791 "\\mbfsansg" : "𝗴",
792 "\\mbfsansh" : "𝗵",
793 "\\mbfsansi" : "𝗶",
794 "\\mbfsansj" : "𝗷",
795 "\\mbfsansk" : "𝗸",
796 "\\mbfsansl" : "𝗹",
797 "\\mbfsansm" : "𝗺",
798 "\\mbfsansn" : "𝗻",
799 "\\mbfsanso" : "𝗼",
800 "\\mbfsansp" : "𝗽",
801 "\\mbfsansq" : "𝗾",
802 "\\mbfsansr" : "𝗿",
803 "\\mbfsanss" : "𝘀",
804 "\\mbfsanst" : "𝘁",
805 "\\mbfsansu" : "𝘂",
806 "\\mbfsansv" : "𝘃",
807 "\\mbfsansw" : "𝘄",
808 "\\mbfsansx" : "𝘅",
809 "\\mbfsansy" : "𝘆",
810 "\\mbfsansz" : "𝘇",
811 "\\mitsansA" : "𝘈",
812 "\\mitsansB" : "𝘉",
813 "\\mitsansC" : "𝘊",
814 "\\mitsansD" : "𝘋",
815 "\\mitsansE" : "𝘌",
816 "\\mitsansF" : "𝘍",
817 "\\mitsansG" : "𝘎",
818 "\\mitsansH" : "𝘏",
819 "\\mitsansI" : "𝘐",
820 "\\mitsansJ" : "𝘑",
821 "\\mitsansK" : "𝘒",
822 "\\mitsansL" : "𝘓",
823 "\\mitsansM" : "𝘔",
824 "\\mitsansN" : "𝘕",
825 "\\mitsansO" : "𝘖",
826 "\\mitsansP" : "𝘗",
827 "\\mitsansQ" : "𝘘",
828 "\\mitsansR" : "𝘙",
829 "\\mitsansS" : "𝘚",
830 "\\mitsansT" : "𝘛",
831 "\\mitsansU" : "𝘜",
832 "\\mitsansV" : "𝘝",
833 "\\mitsansW" : "𝘞",
834 "\\mitsansX" : "𝘟",
835 "\\mitsansY" : "𝘠",
836 "\\mitsansZ" : "𝘡",
837 "\\mitsansa" : "𝘢",
838 "\\mitsansb" : "𝘣",
839 "\\mitsansc" : "𝘤",
840 "\\mitsansd" : "𝘥",
841 "\\mitsanse" : "𝘦",
842 "\\mitsansf" : "𝘧",
843 "\\mitsansg" : "𝘨",
844 "\\mitsansh" : "𝘩",
845 "\\mitsansi" : "𝘪",
846 "\\mitsansj" : "𝘫",
847 "\\mitsansk" : "𝘬",
848 "\\mitsansl" : "𝘭",
849 "\\mitsansm" : "𝘮",
850 "\\mitsansn" : "𝘯",
851 "\\mitsanso" : "𝘰",
852 "\\mitsansp" : "𝘱",
853 "\\mitsansq" : "𝘲",
854 "\\mitsansr" : "𝘳",
855 "\\mitsanss" : "𝘴",
856 "\\mitsanst" : "𝘵",
857 "\\mitsansu" : "𝘶",
858 "\\mitsansv" : "𝘷",
859 "\\mitsansw" : "𝘸",
860 "\\mitsansx" : "𝘹",
861 "\\mitsansy" : "𝘺",
862 "\\mitsansz" : "𝘻",
863 "\\mbfitsansA" : "𝘼",
864 "\\mbfitsansB" : "𝘽",
865 "\\mbfitsansC" : "𝘾",
866 "\\mbfitsansD" : "𝘿",
867 "\\mbfitsansE" : "𝙀",
868 "\\mbfitsansF" : "𝙁",
869 "\\mbfitsansG" : "𝙂",
870 "\\mbfitsansH" : "𝙃",
871 "\\mbfitsansI" : "𝙄",
872 "\\mbfitsansJ" : "𝙅",
873 "\\mbfitsansK" : "𝙆",
874 "\\mbfitsansL" : "𝙇",
875 "\\mbfitsansM" : "𝙈",
876 "\\mbfitsansN" : "𝙉",
877 "\\mbfitsansO" : "𝙊",
878 "\\mbfitsansP" : "𝙋",
879 "\\mbfitsansQ" : "𝙌",
880 "\\mbfitsansR" : "𝙍",
881 "\\mbfitsansS" : "𝙎",
882 "\\mbfitsansT" : "𝙏",
883 "\\mbfitsansU" : "𝙐",
884 "\\mbfitsansV" : "𝙑",
885 "\\mbfitsansW" : "𝙒",
886 "\\mbfitsansX" : "𝙓",
887 "\\mbfitsansY" : "𝙔",
888 "\\mbfitsansZ" : "𝙕",
889 "\\mbfitsansa" : "𝙖",
890 "\\mbfitsansb" : "𝙗",
891 "\\mbfitsansc" : "𝙘",
892 "\\mbfitsansd" : "𝙙",
893 "\\mbfitsanse" : "𝙚",
894 "\\mbfitsansf" : "𝙛",
895 "\\mbfitsansg" : "𝙜",
896 "\\mbfitsansh" : "𝙝",
897 "\\mbfitsansi" : "𝙞",
898 "\\mbfitsansj" : "𝙟",
899 "\\mbfitsansk" : "𝙠",
900 "\\mbfitsansl" : "𝙡",
901 "\\mbfitsansm" : "𝙢",
902 "\\mbfitsansn" : "𝙣",
903 "\\mbfitsanso" : "𝙤",
904 "\\mbfitsansp" : "𝙥",
905 "\\mbfitsansq" : "𝙦",
906 "\\mbfitsansr" : "𝙧",
907 "\\mbfitsanss" : "𝙨",
908 "\\mbfitsanst" : "𝙩",
909 "\\mbfitsansu" : "𝙪",
910 "\\mbfitsansv" : "𝙫",
911 "\\mbfitsansw" : "𝙬",
912 "\\mbfitsansx" : "𝙭",
913 "\\mbfitsansy" : "𝙮",
914 "\\mbfitsansz" : "𝙯",
915 "\\mttA" : "𝙰",
916 "\\mttB" : "𝙱",
917 "\\mttC" : "𝙲",
918 "\\mttD" : "𝙳",
919 "\\mttE" : "𝙴",
920 "\\mttF" : "𝙵",
921 "\\mttG" : "𝙶",
922 "\\mttH" : "𝙷",
923 "\\mttI" : "𝙸",
924 "\\mttJ" : "𝙹",
925 "\\mttK" : "𝙺",
926 "\\mttL" : "𝙻",
927 "\\mttM" : "𝙼",
928 "\\mttN" : "𝙽",
929 "\\mttO" : "𝙾",
930 "\\mttP" : "𝙿",
931 "\\mttQ" : "𝚀",
932 "\\mttR" : "𝚁",
933 "\\mttS" : "𝚂",
934 "\\mttT" : "𝚃",
935 "\\mttU" : "𝚄",
936 "\\mttV" : "𝚅",
937 "\\mttW" : "𝚆",
938 "\\mttX" : "𝚇",
939 "\\mttY" : "𝚈",
940 "\\mttZ" : "𝚉",
941 "\\mtta" : "𝚊",
942 "\\mttb" : "𝚋",
943 "\\mttc" : "𝚌",
944 "\\mttd" : "𝚍",
945 "\\mtte" : "𝚎",
946 "\\mttf" : "𝚏",
947 "\\mttg" : "𝚐",
948 "\\mtth" : "𝚑",
949 "\\mtti" : "𝚒",
950 "\\mttj" : "𝚓",
951 "\\mttk" : "𝚔",
952 "\\mttl" : "𝚕",
953 "\\mttm" : "𝚖",
954 "\\mttn" : "𝚗",
955 "\\mtto" : "𝚘",
956 "\\mttp" : "𝚙",
957 "\\mttq" : "𝚚",
958 "\\mttr" : "𝚛",
959 "\\mtts" : "𝚜",
960 "\\mttt" : "𝚝",
961 "\\mttu" : "𝚞",
962 "\\mttv" : "𝚟",
963 "\\mttw" : "𝚠",
964 "\\mttx" : "𝚡",
965 "\\mtty" : "𝚢",
966 "\\mttz" : "𝚣",
967 "\\mbfAlpha" : "𝚨",
968 "\\mbfBeta" : "𝚩",
969 "\\mbfGamma" : "𝚪",
970 "\\mbfDelta" : "𝚫",
971 "\\mbfEpsilon" : "𝚬",
972 "\\mbfZeta" : "𝚭",
973 "\\mbfEta" : "𝚮",
974 "\\mbfTheta" : "𝚯",
975 "\\mbfIota" : "𝚰",
976 "\\mbfKappa" : "𝚱",
977 "\\mbfLambda" : "𝚲",
978 "\\mbfMu" : "𝚳",
979 "\\mbfNu" : "𝚴",
980 "\\mbfXi" : "𝚵",
981 "\\mbfOmicron" : "𝚶",
982 "\\mbfPi" : "𝚷",
983 "\\mbfRho" : "𝚸",
984 "\\mbfvarTheta" : "𝚹",
985 "\\mbfSigma" : "𝚺",
986 "\\mbfTau" : "𝚻",
987 "\\mbfUpsilon" : "𝚼",
988 "\\mbfPhi" : "𝚽",
989 "\\mbfChi" : "𝚾",
990 "\\mbfPsi" : "𝚿",
991 "\\mbfOmega" : "𝛀",
992 "\\mbfalpha" : "𝛂",
993 "\\mbfbeta" : "𝛃",
994 "\\mbfgamma" : "𝛄",
995 "\\mbfdelta" : "𝛅",
996 "\\mbfepsilon" : "𝛆",
997 "\\mbfzeta" : "𝛇",
998 "\\mbfeta" : "𝛈",
999 "\\mbftheta" : "𝛉",
1000 "\\mbfiota" : "𝛊",
1001 "\\mbfkappa" : "𝛋",
1002 "\\mbflambda" : "𝛌",
1003 "\\mbfmu" : "𝛍",
1004 "\\mbfnu" : "𝛎",
1005 "\\mbfxi" : "𝛏",
1006 "\\mbfomicron" : "𝛐",
1007 "\\mbfpi" : "𝛑",
1008 "\\mbfrho" : "𝛒",
1009 "\\mbfvarsigma" : "𝛓",
1010 "\\mbfsigma" : "𝛔",
1011 "\\mbftau" : "𝛕",
1012 "\\mbfupsilon" : "𝛖",
1013 "\\mbfvarphi" : "𝛗",
1014 "\\mbfchi" : "𝛘",
1015 "\\mbfpsi" : "𝛙",
1016 "\\mbfomega" : "𝛚",
1017 "\\mbfvarepsilon" : "𝛜",
1018 "\\mbfvartheta" : "𝛝",
1019 "\\mbfvarkappa" : "𝛞",
1020 "\\mbfphi" : "𝛟",
1021 "\\mbfvarrho" : "𝛠",
1022 "\\mbfvarpi" : "𝛡",
1023 "\\mitAlpha" : "𝛢",
1024 "\\mitBeta" : "𝛣",
1025 "\\mitGamma" : "𝛤",
1026 "\\mitDelta" : "𝛥",
1027 "\\mitEpsilon" : "𝛦",
1028 "\\mitZeta" : "𝛧",
1029 "\\mitEta" : "𝛨",
1030 "\\mitTheta" : "𝛩",
1031 "\\mitIota" : "𝛪",
1032 "\\mitKappa" : "𝛫",
1033 "\\mitLambda" : "𝛬",
1034 "\\mitMu" : "𝛭",
1035 "\\mitNu" : "𝛮",
1036 "\\mitXi" : "𝛯",
1037 "\\mitOmicron" : "𝛰",
1038 "\\mitPi" : "𝛱",
1039 "\\mitRho" : "𝛲",
1040 "\\mitvarTheta" : "𝛳",
1041 "\\mitSigma" : "𝛴",
1042 "\\mitTau" : "𝛵",
1043 "\\mitUpsilon" : "𝛶",
1044 "\\mitPhi" : "𝛷",
1045 "\\mitChi" : "𝛸",
1046 "\\mitPsi" : "𝛹",
1047 "\\mitOmega" : "𝛺",
1048 "\\mitalpha" : "𝛼",
1049 "\\mitbeta" : "𝛽",
1050 "\\mitgamma" : "𝛾",
1051 "\\mitdelta" : "𝛿",
1052 "\\mitepsilon" : "𝜀",
1053 "\\mitzeta" : "𝜁",
1054 "\\miteta" : "𝜂",
1055 "\\mittheta" : "𝜃",
1056 "\\mitiota" : "𝜄",
1057 "\\mitkappa" : "𝜅",
1058 "\\mitlambda" : "𝜆",
1059 "\\mitmu" : "𝜇",
1060 "\\mitnu" : "𝜈",
1061 "\\mitxi" : "𝜉",
1062 "\\mitomicron" : "𝜊",
1063 "\\mitpi" : "𝜋",
1064 "\\mitrho" : "𝜌",
1065 "\\mitvarsigma" : "𝜍",
1066 "\\mitsigma" : "𝜎",
1067 "\\mittau" : "𝜏",
1068 "\\mitupsilon" : "𝜐",
1069 "\\mitphi" : "𝜑",
1070 "\\mitchi" : "𝜒",
1071 "\\mitpsi" : "𝜓",
1072 "\\mitomega" : "𝜔",
1073 "\\mitvarepsilon" : "𝜖",
1074 "\\mitvartheta" : "𝜗",
1075 "\\mitvarkappa" : "𝜘",
1076 "\\mitvarphi" : "𝜙",
1077 "\\mitvarrho" : "𝜚",
1078 "\\mitvarpi" : "𝜛",
1079 "\\mbfitAlpha" : "𝜜",
1080 "\\mbfitBeta" : "𝜝",
1081 "\\mbfitGamma" : "𝜞",
1082 "\\mbfitDelta" : "𝜟",
1083 "\\mbfitEpsilon" : "𝜠",
1084 "\\mbfitZeta" : "𝜡",
1085 "\\mbfitEta" : "𝜢",
1086 "\\mbfitTheta" : "𝜣",
1087 "\\mbfitIota" : "𝜤",
1088 "\\mbfitKappa" : "𝜥",
1089 "\\mbfitLambda" : "𝜦",
1090 "\\mbfitMu" : "𝜧",
1091 "\\mbfitNu" : "𝜨",
1092 "\\mbfitXi" : "𝜩",
1093 "\\mbfitOmicron" : "𝜪",
1094 "\\mbfitPi" : "𝜫",
1095 "\\mbfitRho" : "𝜬",
1096 "\\mbfitvarTheta" : "𝜭",
1097 "\\mbfitSigma" : "𝜮",
1098 "\\mbfitTau" : "𝜯",
1099 "\\mbfitUpsilon" : "𝜰",
1100 "\\mbfitPhi" : "𝜱",
1101 "\\mbfitChi" : "𝜲",
1102 "\\mbfitPsi" : "𝜳",
1103 "\\mbfitOmega" : "𝜴",
1104 "\\mbfitalpha" : "𝜶",
1105 "\\mbfitbeta" : "𝜷",
1106 "\\mbfitgamma" : "𝜸",
1107 "\\mbfitdelta" : "𝜹",
1108 "\\mbfitepsilon" : "𝜺",
1109 "\\mbfitzeta" : "𝜻",
1110 "\\mbfiteta" : "𝜼",
1111 "\\mbfittheta" : "𝜽",
1112 "\\mbfitiota" : "𝜾",
1113 "\\mbfitkappa" : "𝜿",
1114 "\\mbfitlambda" : "𝝀",
1115 "\\mbfitmu" : "𝝁",
1116 "\\mbfitnu" : "𝝂",
1117 "\\mbfitxi" : "𝝃",
1118 "\\mbfitomicron" : "𝝄",
1119 "\\mbfitpi" : "𝝅",
1120 "\\mbfitrho" : "𝝆",
1121 "\\mbfitvarsigma" : "𝝇",
1122 "\\mbfitsigma" : "𝝈",
1123 "\\mbfittau" : "𝝉",
1124 "\\mbfitupsilon" : "𝝊",
1125 "\\mbfitphi" : "𝝋",
1126 "\\mbfitchi" : "𝝌",
1127 "\\mbfitpsi" : "𝝍",
1128 "\\mbfitomega" : "𝝎",
1129 "\\mbfitvarepsilon" : "𝝐",
1130 "\\mbfitvartheta" : "𝝑",
1131 "\\mbfitvarkappa" : "𝝒",
1132 "\\mbfitvarphi" : "𝝓",
1133 "\\mbfitvarrho" : "𝝔",
1134 "\\mbfitvarpi" : "𝝕",
1135 "\\mbfsansAlpha" : "𝝖",
1136 "\\mbfsansBeta" : "𝝗",
1137 "\\mbfsansGamma" : "𝝘",
1138 "\\mbfsansDelta" : "𝝙",
1139 "\\mbfsansEpsilon" : "𝝚",
1140 "\\mbfsansZeta" : "𝝛",
1141 "\\mbfsansEta" : "𝝜",
1142 "\\mbfsansTheta" : "𝝝",
1143 "\\mbfsansIota" : "𝝞",
1144 "\\mbfsansKappa" : "𝝟",
1145 "\\mbfsansLambda" : "𝝠",
1146 "\\mbfsansMu" : "𝝡",
1147 "\\mbfsansNu" : "𝝢",
1148 "\\mbfsansXi" : "𝝣",
1149 "\\mbfsansOmicron" : "𝝤",
1150 "\\mbfsansPi" : "𝝥",
1151 "\\mbfsansRho" : "𝝦",
1152 "\\mbfsansvarTheta" : "𝝧",
1153 "\\mbfsansSigma" : "𝝨",
1154 "\\mbfsansTau" : "𝝩",
1155 "\\mbfsansUpsilon" : "𝝪",
1156 "\\mbfsansPhi" : "𝝫",
1157 "\\mbfsansChi" : "𝝬",
1158 "\\mbfsansPsi" : "𝝭",
1159 "\\mbfsansOmega" : "𝝮",
1160 "\\mbfsansalpha" : "𝝰",
1161 "\\mbfsansbeta" : "𝝱",
1162 "\\mbfsansgamma" : "𝝲",
1163 "\\mbfsansdelta" : "𝝳",
1164 "\\mbfsansepsilon" : "𝝴",
1165 "\\mbfsanszeta" : "𝝵",
1166 "\\mbfsanseta" : "𝝶",
1167 "\\mbfsanstheta" : "𝝷",
1168 "\\mbfsansiota" : "𝝸",
1169 "\\mbfsanskappa" : "𝝹",
1170 "\\mbfsanslambda" : "𝝺",
1171 "\\mbfsansmu" : "𝝻",
1172 "\\mbfsansnu" : "𝝼",
1173 "\\mbfsansxi" : "𝝽",
1174 "\\mbfsansomicron" : "𝝾",
1175 "\\mbfsanspi" : "𝝿",
1176 "\\mbfsansrho" : "𝞀",
1177 "\\mbfsansvarsigma" : "𝞁",
1178 "\\mbfsanssigma" : "𝞂",
1179 "\\mbfsanstau" : "𝞃",
1180 "\\mbfsansupsilon" : "𝞄",
1181 "\\mbfsansphi" : "𝞅",
1182 "\\mbfsanschi" : "𝞆",
1183 "\\mbfsanspsi" : "𝞇",
1184 "\\mbfsansomega" : "𝞈",
1185 "\\mbfsansvarepsilon" : "𝞊",
1186 "\\mbfsansvartheta" : "𝞋",
1187 "\\mbfsansvarkappa" : "𝞌",
1188 "\\mbfsansvarphi" : "𝞍",
1189 "\\mbfsansvarrho" : "𝞎",
1190 "\\mbfsansvarpi" : "𝞏",
1191 "\\mbfitsansAlpha" : "𝞐",
1192 "\\mbfitsansBeta" : "𝞑",
1193 "\\mbfitsansGamma" : "𝞒",
1194 "\\mbfitsansDelta" : "𝞓",
1195 "\\mbfitsansEpsilon" : "𝞔",
1196 "\\mbfitsansZeta" : "𝞕",
1197 "\\mbfitsansEta" : "𝞖",
1198 "\\mbfitsansTheta" : "𝞗",
1199 "\\mbfitsansIota" : "𝞘",
1200 "\\mbfitsansKappa" : "𝞙",
1201 "\\mbfitsansLambda" : "𝞚",
1202 "\\mbfitsansMu" : "𝞛",
1203 "\\mbfitsansNu" : "𝞜",
1204 "\\mbfitsansXi" : "𝞝",
1205 "\\mbfitsansOmicron" : "𝞞",
1206 "\\mbfitsansPi" : "𝞟",
1207 "\\mbfitsansRho" : "𝞠",
1208 "\\mbfitsansvarTheta" : "𝞡",
1209 "\\mbfitsansSigma" : "𝞢",
1210 "\\mbfitsansTau" : "𝞣",
1211 "\\mbfitsansUpsilon" : "𝞤",
1212 "\\mbfitsansPhi" : "𝞥",
1213 "\\mbfitsansChi" : "𝞦",
1214 "\\mbfitsansPsi" : "𝞧",
1215 "\\mbfitsansOmega" : "𝞨",
1216 "\\mbfitsansalpha" : "𝞪",
1217 "\\mbfitsansbeta" : "𝞫",
1218 "\\mbfitsansgamma" : "𝞬",
1219 "\\mbfitsansdelta" : "𝞭",
1220 "\\mbfitsansepsilon" : "𝞮",
1221 "\\mbfitsanszeta" : "𝞯",
1222 "\\mbfitsanseta" : "𝞰",
1223 "\\mbfitsanstheta" : "𝞱",
1224 "\\mbfitsansiota" : "𝞲",
1225 "\\mbfitsanskappa" : "𝞳",
1226 "\\mbfitsanslambda" : "𝞴",
1227 "\\mbfitsansmu" : "𝞵",
1228 "\\mbfitsansnu" : "𝞶",
1229 "\\mbfitsansxi" : "𝞷",
1230 "\\mbfitsansomicron" : "𝞸",
1231 "\\mbfitsanspi" : "𝞹",
1232 "\\mbfitsansrho" : "𝞺",
1233 "\\mbfitsansvarsigma" : "𝞻",
1234 "\\mbfitsanssigma" : "𝞼",
1235 "\\mbfitsanstau" : "𝞽",
1236 "\\mbfitsansupsilon" : "𝞾",
1237 "\\mbfitsansphi" : "𝞿",
1238 "\\mbfitsanschi" : "𝟀",
1239 "\\mbfitsanspsi" : "𝟁",
1240 "\\mbfitsansomega" : "𝟂",
1241 "\\mbfitsansvarepsilon" : "𝟄",
1242 "\\mbfitsansvartheta" : "𝟅",
1243 "\\mbfitsansvarkappa" : "𝟆",
1244 "\\mbfitsansvarphi" : "𝟇",
1245 "\\mbfitsansvarrho" : "𝟈",
1246 "\\mbfitsansvarpi" : "𝟉",
1247 "\\mbfzero" : "𝟎",
1248 "\\mbfone" : "𝟏",
1249 "\\mbftwo" : "𝟐",
1250 "\\mbfthree" : "𝟑",
1251 "\\mbffour" : "𝟒",
1252 "\\mbffive" : "𝟓",
1253 "\\mbfsix" : "𝟔",
1254 "\\mbfseven" : "𝟕",
1255 "\\mbfeight" : "𝟖",
1256 "\\mbfnine" : "𝟗",
1257 "\\Bbbzero" : "𝟘",
1258 "\\Bbbone" : "𝟙",
1259 "\\Bbbtwo" : "𝟚",
1260 "\\Bbbthree" : "𝟛",
1261 "\\Bbbfour" : "𝟜",
1262 "\\Bbbfive" : "𝟝",
1263 "\\Bbbsix" : "𝟞",
1264 "\\Bbbseven" : "𝟟",
1265 "\\Bbbeight" : "𝟠",
1266 "\\Bbbnine" : "𝟡",
1267 "\\msanszero" : "𝟢",
1268 "\\msansone" : "𝟣",
1269 "\\msanstwo" : "𝟤",
1270 "\\msansthree" : "𝟥",
1271 "\\msansfour" : "𝟦",
1272 "\\msansfive" : "𝟧",
1273 "\\msanssix" : "𝟨",
1274 "\\msansseven" : "𝟩",
1275 "\\msanseight" : "𝟪",
1276 "\\msansnine" : "𝟫",
1277 "\\mbfsanszero" : "𝟬",
1278 "\\mbfsansone" : "𝟭",
1279 "\\mbfsanstwo" : "𝟮",
1280 "\\mbfsansthree" : "𝟯",
1281 "\\mbfsansfour" : "𝟰",
1282 "\\mbfsansfive" : "𝟱",
1283 "\\mbfsanssix" : "𝟲",
1284 "\\mbfsansseven" : "𝟳",
1285 "\\mbfsanseight" : "𝟴",
1286 "\\mbfsansnine" : "𝟵",
1287 "\\mttzero" : "𝟶",
1288 "\\mttone" : "𝟷",
1289 "\\mtttwo" : "𝟸",
1290 "\\mttthree" : "𝟹",
1291 "\\mttfour" : "𝟺",
1292 "\\mttfive" : "𝟻",
1293 "\\mttsix" : "𝟼",
1294 "\\mttseven" : "𝟽",
1295 "\\mtteight" : "𝟾",
1296 "\\mttnine" : "𝟿",
1297 }
@@ -0,0 +1,84 b''
1 # coding: utf-8
2
3 # This script autogenerates `IPython.core.latex_symbols.py`, which contains a
4 # single dict , named `latex_symbols`. The keys in this dict are latex symbols,
5 # such as `\\alpha` and the values in the dict are the unicode equivalents for
6 # those. Most importantly, only unicode symbols that are valid identifers in
7 # Python 3 are included.
8
9 #
10 # The original mapping of latex symbols to unicode comes from the `latex_symbols.jl` files from Julia.
11
12 from __future__ import print_function
13 import os, sys
14
15 if not sys.version_info[0] == 3:
16 print("This script must be run with Python 3, exiting...")
17 sys.exit(1)
18
19 # Import the Julia LaTeX symbols
20 print('Importing latex_symbols.js from Julia...')
21 import requests
22 url = 'https://raw.githubusercontent.com/JuliaLang/julia/master/base/latex_symbols.jl'
23 r = requests.get(url)
24
25
26 # Build a list of key, value pairs
27 print('Building a list of (latex, unicode) key-vaule pairs...')
28 lines = r.text.splitlines()[60:]
29 lines = [line for line in lines if '=>' in line]
30 lines = [line.replace('=>',':') for line in lines]
31
32 def line_to_tuple(line):
33 """Convert a single line of the .jl file to a 2-tuple of strings like ("\\alpha", "α")"""
34 kv = line.split(',')[0].split(':')
35 # kv = tuple(line.strip(', ').split(':'))
36 k, v = kv[0].strip(' "'), kv[1].strip(' "')
37 # if not test_ident(v):
38 # print(line)
39 return k, v
40
41 assert line_to_tuple(' "\\sqrt" : "\u221A",') == ('\\sqrt', '\u221A')
42 lines = [line_to_tuple(line) for line in lines]
43
44
45 # Filter out non-valid identifiers
46 print('Filtering out characters that are not valid Python 3 identifiers')
47
48 def test_ident(i):
49 """Is the unicode string valid in a Python 3 identifer."""
50 # Some characters are not valid at the start of a name, but we still want to
51 # include them. So prefix with 'a', which is valid at the start.
52 return ('a' + i).isidentifier()
53
54 assert test_ident("α")
55 assert not test_ident('‴')
56
57 valid_idents = [line for line in lines if test_ident(line[1])]
58
59
60 # Write the `latex_symbols.py` module in the cwd
61
62 s = """# encoding: utf-8
63
64 # DO NOT EDIT THIS FILE BY HAND.
65
66 # To update this file, run the script /tools/gen_latex_symbols.py using Python 3
67
68 # This file is autogenerated from the file:
69 # https://raw.githubusercontent.com/JuliaLang/julia/master/base/latex_symbols.jl
70 # This original list is filtered to remove any unicode characters that are not valid
71 # Python identifiers.
72
73 latex_symbols = {\n
74 """
75 for line in valid_idents:
76 s += ' "%s" : "%s",\n' % (line[0], line[1])
77 s += "}\n"
78
79 fn = os.path.join('..','IPython','core','latex_symbols.py')
80 print("Writing the file: %s" % fn)
81 with open(fn, 'w', encoding='utf-8') as f:
82 f.write(s)
83
84
@@ -1,1142 +1,1171 b''
1 # encoding: utf-8
1 2 """Word completion for IPython.
2 3
3 4 This module is a fork of the rlcompleter module in the Python standard
4 5 library. The original enhancements made to rlcompleter have been sent
5 6 upstream and were accepted as of Python 2.3, but we need a lot more
6 7 functionality specific to IPython, so this module will continue to live as an
7 8 IPython-specific utility.
8 9
9 10 Original rlcompleter documentation:
10 11
11 12 This requires the latest extension to the readline module (the
12 13 completes keywords, built-ins and globals in __main__; when completing
13 14 NAME.NAME..., it evaluates (!) the expression up to the last dot and
14 15 completes its attributes.
15 16
16 17 It's very cool to do "import string" type "string.", hit the
17 18 completion key (twice), and see the list of names defined by the
18 19 string module!
19 20
20 21 Tip: to use the tab key as the completion key, call
21 22
22 23 readline.parse_and_bind("tab: complete")
23 24
24 25 Notes:
25 26
26 27 - Exceptions raised by the completer function are *ignored* (and
27 28 generally cause the completion to fail). This is a feature -- since
28 29 readline sets the tty device in raw (or cbreak) mode, printing a
29 30 traceback wouldn't work well without some complicated hoopla to save,
30 31 reset and restore the tty state.
31 32
32 33 - The evaluation of the NAME.NAME... form may cause arbitrary
33 34 application defined code to be executed if an object with a
34 35 ``__getattr__`` hook is found. Since it is the responsibility of the
35 36 application (or the user) to enable this feature, I consider this an
36 37 acceptable risk. More complicated expressions (e.g. function calls or
37 38 indexing operations) are *not* evaluated.
38 39
39 40 - GNU readline is also used by the built-in functions input() and
40 41 raw_input(), and thus these also benefit/suffer from the completer
41 42 features. Clearly an interactive application can benefit by
42 43 specifying its own completer function and using raw_input() for all
43 44 its input.
44 45
45 46 - When the original stdin is not a tty device, GNU readline is never
46 47 used, and this module (and the readline module) are silently inactive.
47 48 """
48 49
49 50 # Copyright (c) IPython Development Team.
50 51 # Distributed under the terms of the Modified BSD License.
51 52 #
52 53 # Some of this code originated from rlcompleter in the Python standard library
53 54 # Copyright (C) 2001 Python Software Foundation, www.python.org
54 55
55 56 import __main__
56 57 import glob
57 58 import inspect
58 59 import itertools
59 60 import keyword
60 61 import os
61 62 import re
62 63 import sys
63 64
64 65 from IPython.config.configurable import Configurable
65 66 from IPython.core.error import TryNext
66 67 from IPython.core.inputsplitter import ESC_MAGIC
68 from IPython.core.latex_symbols import latex_symbols
67 69 from IPython.utils import generics
68 70 from IPython.utils import io
69 71 from IPython.utils.decorators import undoc
70 72 from IPython.utils.dir2 import dir2
71 73 from IPython.utils.process import arg_split
72 from IPython.utils.py3compat import builtin_mod, string_types
74 from IPython.utils.py3compat import builtin_mod, string_types, PY3
73 75 from IPython.utils.traitlets import CBool, Enum
74 76
75 77 #-----------------------------------------------------------------------------
76 78 # Globals
77 79 #-----------------------------------------------------------------------------
78 80
79 81 # Public API
80 82 __all__ = ['Completer','IPCompleter']
81 83
82 84 if sys.platform == 'win32':
83 85 PROTECTABLES = ' '
84 86 else:
85 87 PROTECTABLES = ' ()[]{}?=\\|;:\'#*"^&'
86 88
87 89
88 90 #-----------------------------------------------------------------------------
89 91 # Main functions and classes
90 92 #-----------------------------------------------------------------------------
91 93
92 94 def has_open_quotes(s):
93 95 """Return whether a string has open quotes.
94 96
95 97 This simply counts whether the number of quote characters of either type in
96 98 the string is odd.
97 99
98 100 Returns
99 101 -------
100 102 If there is an open quote, the quote character is returned. Else, return
101 103 False.
102 104 """
103 105 # We check " first, then ', so complex cases with nested quotes will get
104 106 # the " to take precedence.
105 107 if s.count('"') % 2:
106 108 return '"'
107 109 elif s.count("'") % 2:
108 110 return "'"
109 111 else:
110 112 return False
111 113
112 114
113 115 def protect_filename(s):
114 116 """Escape a string to protect certain characters."""
115 117
116 118 return "".join([(ch in PROTECTABLES and '\\' + ch or ch)
117 119 for ch in s])
118 120
119 121 def expand_user(path):
120 122 """Expand '~'-style usernames in strings.
121 123
122 124 This is similar to :func:`os.path.expanduser`, but it computes and returns
123 125 extra information that will be useful if the input was being used in
124 126 computing completions, and you wish to return the completions with the
125 127 original '~' instead of its expanded value.
126 128
127 129 Parameters
128 130 ----------
129 131 path : str
130 132 String to be expanded. If no ~ is present, the output is the same as the
131 133 input.
132 134
133 135 Returns
134 136 -------
135 137 newpath : str
136 138 Result of ~ expansion in the input path.
137 139 tilde_expand : bool
138 140 Whether any expansion was performed or not.
139 141 tilde_val : str
140 142 The value that ~ was replaced with.
141 143 """
142 144 # Default values
143 145 tilde_expand = False
144 146 tilde_val = ''
145 147 newpath = path
146 148
147 149 if path.startswith('~'):
148 150 tilde_expand = True
149 151 rest = len(path)-1
150 152 newpath = os.path.expanduser(path)
151 153 if rest:
152 154 tilde_val = newpath[:-rest]
153 155 else:
154 156 tilde_val = newpath
155 157
156 158 return newpath, tilde_expand, tilde_val
157 159
158 160
159 161 def compress_user(path, tilde_expand, tilde_val):
160 162 """Does the opposite of expand_user, with its outputs.
161 163 """
162 164 if tilde_expand:
163 165 return path.replace(tilde_val, '~')
164 166 else:
165 167 return path
166 168
167 169
168 170
169 171 def penalize_magics_key(word):
170 172 """key for sorting that penalizes magic commands in the ordering
171 173
172 174 Normal words are left alone.
173 175
174 176 Magic commands have the initial % moved to the end, e.g.
175 177 %matplotlib is transformed as follows:
176 178
177 179 %matplotlib -> matplotlib%
178 180
179 181 [The choice of the final % is arbitrary.]
180 182
181 183 Since "matplotlib" < "matplotlib%" as strings,
182 184 "timeit" will appear before the magic "%timeit" in the ordering
183 185
184 186 For consistency, move "%%" to the end, so cell magics appear *after*
185 187 line magics with the same name.
186 188
187 189 A check is performed that there are no other "%" in the string;
188 190 if there are, then the string is not a magic command and is left unchanged.
189 191
190 192 """
191 193
192 194 # Move any % signs from start to end of the key
193 195 # provided there are no others elsewhere in the string
194 196
195 197 if word[:2] == "%%":
196 198 if not "%" in word[2:]:
197 199 return word[2:] + "%%"
198 200
199 201 if word[:1] == "%":
200 202 if not "%" in word[1:]:
201 203 return word[1:] + "%"
202 204
203 205 return word
204 206
205 207
206 208 @undoc
207 209 class Bunch(object): pass
208 210
209 211
210 212 DELIMS = ' \t\n`!@#$^&*()=+[{]}\\|;:\'",<>?'
211 213 GREEDY_DELIMS = ' =\r\n'
212 214
213 215
214 216 class CompletionSplitter(object):
215 217 """An object to split an input line in a manner similar to readline.
216 218
217 219 By having our own implementation, we can expose readline-like completion in
218 220 a uniform manner to all frontends. This object only needs to be given the
219 221 line of text to be split and the cursor position on said line, and it
220 222 returns the 'word' to be completed on at the cursor after splitting the
221 223 entire line.
222 224
223 225 What characters are used as splitting delimiters can be controlled by
224 226 setting the `delims` attribute (this is a property that internally
225 227 automatically builds the necessary regular expression)"""
226 228
227 229 # Private interface
228 230
229 231 # A string of delimiter characters. The default value makes sense for
230 232 # IPython's most typical usage patterns.
231 233 _delims = DELIMS
232 234
233 235 # The expression (a normal string) to be compiled into a regular expression
234 236 # for actual splitting. We store it as an attribute mostly for ease of
235 237 # debugging, since this type of code can be so tricky to debug.
236 238 _delim_expr = None
237 239
238 240 # The regular expression that does the actual splitting
239 241 _delim_re = None
240 242
241 243 def __init__(self, delims=None):
242 244 delims = CompletionSplitter._delims if delims is None else delims
243 245 self.delims = delims
244 246
245 247 @property
246 248 def delims(self):
247 249 """Return the string of delimiter characters."""
248 250 return self._delims
249 251
250 252 @delims.setter
251 253 def delims(self, delims):
252 254 """Set the delimiters for line splitting."""
253 255 expr = '[' + ''.join('\\'+ c for c in delims) + ']'
254 256 self._delim_re = re.compile(expr)
255 257 self._delims = delims
256 258 self._delim_expr = expr
257 259
258 260 def split_line(self, line, cursor_pos=None):
259 261 """Split a line of text with a cursor at the given position.
260 262 """
261 263 l = line if cursor_pos is None else line[:cursor_pos]
262 264 return self._delim_re.split(l)[-1]
263 265
264 266
265 267 class Completer(Configurable):
266 268
267 269 greedy = CBool(False, config=True,
268 270 help="""Activate greedy completion
269 271
270 272 This will enable completion on elements of lists, results of function calls, etc.,
271 273 but can be unsafe because the code is actually evaluated on TAB.
272 274 """
273 275 )
274 276
275 277
276 278 def __init__(self, namespace=None, global_namespace=None, **kwargs):
277 279 """Create a new completer for the command line.
278 280
279 281 Completer(namespace=ns,global_namespace=ns2) -> completer instance.
280 282
281 283 If unspecified, the default namespace where completions are performed
282 284 is __main__ (technically, __main__.__dict__). Namespaces should be
283 285 given as dictionaries.
284 286
285 287 An optional second namespace can be given. This allows the completer
286 288 to handle cases where both the local and global scopes need to be
287 289 distinguished.
288 290
289 291 Completer instances should be used as the completion mechanism of
290 292 readline via the set_completer() call:
291 293
292 294 readline.set_completer(Completer(my_namespace).complete)
293 295 """
294 296
295 297 # Don't bind to namespace quite yet, but flag whether the user wants a
296 298 # specific namespace or to use __main__.__dict__. This will allow us
297 299 # to bind to __main__.__dict__ at completion time, not now.
298 300 if namespace is None:
299 301 self.use_main_ns = 1
300 302 else:
301 303 self.use_main_ns = 0
302 304 self.namespace = namespace
303 305
304 306 # The global namespace, if given, can be bound directly
305 307 if global_namespace is None:
306 308 self.global_namespace = {}
307 309 else:
308 310 self.global_namespace = global_namespace
309 311
310 312 super(Completer, self).__init__(**kwargs)
311 313
312 314 def complete(self, text, state):
313 315 """Return the next possible completion for 'text'.
314 316
315 317 This is called successively with state == 0, 1, 2, ... until it
316 318 returns None. The completion should begin with 'text'.
317 319
318 320 """
319 321 if self.use_main_ns:
320 322 self.namespace = __main__.__dict__
321 323
322 324 if state == 0:
323 325 if "." in text:
324 326 self.matches = self.attr_matches(text)
325 327 else:
326 328 self.matches = self.global_matches(text)
327 329 try:
328 330 return self.matches[state]
329 331 except IndexError:
330 332 return None
331 333
332 334 def global_matches(self, text):
333 335 """Compute matches when text is a simple name.
334 336
335 337 Return a list of all keywords, built-in functions and names currently
336 338 defined in self.namespace or self.global_namespace that match.
337 339
338 340 """
339 341 #print 'Completer->global_matches, txt=%r' % text # dbg
340 342 matches = []
341 343 match_append = matches.append
342 344 n = len(text)
343 345 for lst in [keyword.kwlist,
344 346 builtin_mod.__dict__.keys(),
345 347 self.namespace.keys(),
346 348 self.global_namespace.keys()]:
347 349 for word in lst:
348 350 if word[:n] == text and word != "__builtins__":
349 351 match_append(word)
350 352 return matches
351 353
352 354 def attr_matches(self, text):
353 355 """Compute matches when text contains a dot.
354 356
355 357 Assuming the text is of the form NAME.NAME....[NAME], and is
356 358 evaluatable in self.namespace or self.global_namespace, it will be
357 359 evaluated and its attributes (as revealed by dir()) are used as
358 360 possible completions. (For class instances, class members are are
359 361 also considered.)
360 362
361 363 WARNING: this can still invoke arbitrary C code, if an object
362 364 with a __getattr__ hook is evaluated.
363 365
364 366 """
365 367
366 368 #io.rprint('Completer->attr_matches, txt=%r' % text) # dbg
367 369 # Another option, seems to work great. Catches things like ''.<tab>
368 370 m = re.match(r"(\S+(\.\w+)*)\.(\w*)$", text)
369 371
370 372 if m:
371 373 expr, attr = m.group(1, 3)
372 374 elif self.greedy:
373 375 m2 = re.match(r"(.+)\.(\w*)$", self.line_buffer)
374 376 if not m2:
375 377 return []
376 378 expr, attr = m2.group(1,2)
377 379 else:
378 380 return []
379 381
380 382 try:
381 383 obj = eval(expr, self.namespace)
382 384 except:
383 385 try:
384 386 obj = eval(expr, self.global_namespace)
385 387 except:
386 388 return []
387 389
388 390 if self.limit_to__all__ and hasattr(obj, '__all__'):
389 391 words = get__all__entries(obj)
390 392 else:
391 393 words = dir2(obj)
392 394
393 395 try:
394 396 words = generics.complete_object(obj, words)
395 397 except TryNext:
396 398 pass
397 399 except Exception:
398 400 # Silence errors from completion function
399 401 #raise # dbg
400 402 pass
401 403 # Build match list to return
402 404 n = len(attr)
403 405 res = ["%s.%s" % (expr, w) for w in words if w[:n] == attr ]
404 406 return res
405 407
406 408
407 409 def get__all__entries(obj):
408 410 """returns the strings in the __all__ attribute"""
409 411 try:
410 412 words = getattr(obj, '__all__')
411 413 except:
412 414 return []
413 415
414 416 return [w for w in words if isinstance(w, string_types)]
415 417
416 418
417 419 def match_dict_keys(keys, prefix):
418 420 """Used by dict_key_matches, matching the prefix to a list of keys"""
419 421 if not prefix:
420 422 return None, 0, [repr(k) for k in keys
421 423 if isinstance(k, (string_types, bytes))]
422 424 quote_match = re.search('["\']', prefix)
423 425 quote = quote_match.group()
424 426 try:
425 427 prefix_str = eval(prefix + quote, {})
426 428 except Exception:
427 429 return None, 0, []
428 430
429 431 token_match = re.search(r'\w*$', prefix, re.UNICODE)
430 432 token_start = token_match.start()
431 433 token_prefix = token_match.group()
432 434
433 435 # TODO: support bytes in Py3k
434 436 matched = []
435 437 for key in keys:
436 438 try:
437 439 if not key.startswith(prefix_str):
438 440 continue
439 441 except (AttributeError, TypeError, UnicodeError):
440 442 # Python 3+ TypeError on b'a'.startswith('a') or vice-versa
441 443 continue
442 444
443 445 # reformat remainder of key to begin with prefix
444 446 rem = key[len(prefix_str):]
445 447 # force repr wrapped in '
446 448 rem_repr = repr(rem + '"')
447 449 if rem_repr.startswith('u') and prefix[0] not in 'uU':
448 450 # Found key is unicode, but prefix is Py2 string.
449 451 # Therefore attempt to interpret key as string.
450 452 try:
451 453 rem_repr = repr(rem.encode('ascii') + '"')
452 454 except UnicodeEncodeError:
453 455 continue
454 456
455 457 rem_repr = rem_repr[1 + rem_repr.index("'"):-2]
456 458 if quote == '"':
457 459 # The entered prefix is quoted with ",
458 460 # but the match is quoted with '.
459 461 # A contained " hence needs escaping for comparison:
460 462 rem_repr = rem_repr.replace('"', '\\"')
461 463
462 464 # then reinsert prefix from start of token
463 465 matched.append('%s%s' % (token_prefix, rem_repr))
464 466 return quote, token_start, matched
465 467
466 468
467 469 def _safe_isinstance(obj, module, class_name):
468 470 """Checks if obj is an instance of module.class_name if loaded
469 471 """
470 472 return (module in sys.modules and
471 473 isinstance(obj, getattr(__import__(module), class_name)))
472 474
473 475
474 476
475 477 class IPCompleter(Completer):
476 478 """Extension of the completer class with IPython-specific features"""
477 479
478 480 def _greedy_changed(self, name, old, new):
479 481 """update the splitter and readline delims when greedy is changed"""
480 482 if new:
481 483 self.splitter.delims = GREEDY_DELIMS
482 484 else:
483 485 self.splitter.delims = DELIMS
484 486
485 487 if self.readline:
486 488 self.readline.set_completer_delims(self.splitter.delims)
487 489
488 490 merge_completions = CBool(True, config=True,
489 491 help="""Whether to merge completion results into a single list
490 492
491 493 If False, only the completion results from the first non-empty
492 494 completer will be returned.
493 495 """
494 496 )
495 497 omit__names = Enum((0,1,2), default_value=2, config=True,
496 498 help="""Instruct the completer to omit private method names
497 499
498 500 Specifically, when completing on ``object.<tab>``.
499 501
500 502 When 2 [default]: all names that start with '_' will be excluded.
501 503
502 504 When 1: all 'magic' names (``__foo__``) will be excluded.
503 505
504 506 When 0: nothing will be excluded.
505 507 """
506 508 )
507 509 limit_to__all__ = CBool(default_value=False, config=True,
508 510 help="""Instruct the completer to use __all__ for the completion
509 511
510 512 Specifically, when completing on ``object.<tab>``.
511 513
512 514 When True: only those names in obj.__all__ will be included.
513 515
514 516 When False [default]: the __all__ attribute is ignored
515 517 """
516 518 )
517 519
518 520 def __init__(self, shell=None, namespace=None, global_namespace=None,
519 521 use_readline=True, config=None, **kwargs):
520 522 """IPCompleter() -> completer
521 523
522 524 Return a completer object suitable for use by the readline library
523 525 via readline.set_completer().
524 526
525 527 Inputs:
526 528
527 529 - shell: a pointer to the ipython shell itself. This is needed
528 530 because this completer knows about magic functions, and those can
529 531 only be accessed via the ipython instance.
530 532
531 533 - namespace: an optional dict where completions are performed.
532 534
533 535 - global_namespace: secondary optional dict for completions, to
534 536 handle cases (such as IPython embedded inside functions) where
535 537 both Python scopes are visible.
536 538
537 539 use_readline : bool, optional
538 540 If true, use the readline library. This completer can still function
539 541 without readline, though in that case callers must provide some extra
540 542 information on each call about the current line."""
541 543
542 544 self.magic_escape = ESC_MAGIC
543 545 self.splitter = CompletionSplitter()
544 546
545 547 # Readline configuration, only used by the rlcompleter method.
546 548 if use_readline:
547 549 # We store the right version of readline so that later code
548 550 import IPython.utils.rlineimpl as readline
549 551 self.readline = readline
550 552 else:
551 553 self.readline = None
552 554
553 555 # _greedy_changed() depends on splitter and readline being defined:
554 556 Completer.__init__(self, namespace=namespace, global_namespace=global_namespace,
555 557 config=config, **kwargs)
556 558
557 559 # List where completion matches will be stored
558 560 self.matches = []
559 561 self.shell = shell
560 562 # Regexp to split filenames with spaces in them
561 563 self.space_name_re = re.compile(r'([^\\] )')
562 564 # Hold a local ref. to glob.glob for speed
563 565 self.glob = glob.glob
564 566
565 567 # Determine if we are running on 'dumb' terminals, like (X)Emacs
566 568 # buffers, to avoid completion problems.
567 569 term = os.environ.get('TERM','xterm')
568 570 self.dumb_terminal = term in ['dumb','emacs']
569 571
570 572 # Special handling of backslashes needed in win32 platforms
571 573 if sys.platform == "win32":
572 574 self.clean_glob = self._clean_glob_win32
573 575 else:
574 576 self.clean_glob = self._clean_glob
575 577
576 578 #regexp to parse docstring for function signature
577 579 self.docstring_sig_re = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
578 580 self.docstring_kwd_re = re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
579 581 #use this if positional argument name is also needed
580 582 #= re.compile(r'[\s|\[]*(\w+)(?:\s*=?\s*.*)')
581 583
582 584 # All active matcher routines for completion
583 585 self.matchers = [self.python_matches,
584 586 self.file_matches,
585 587 self.magic_matches,
586 588 self.python_func_kw_matches,
587 589 self.dict_key_matches,
588 590 ]
589 591
590 592 def all_completions(self, text):
591 593 """
592 594 Wrapper around the complete method for the benefit of emacs
593 595 and pydb.
594 596 """
595 597 return self.complete(text)[1]
596 598
597 599 def _clean_glob(self,text):
598 600 return self.glob("%s*" % text)
599 601
600 602 def _clean_glob_win32(self,text):
601 603 return [f.replace("\\","/")
602 604 for f in self.glob("%s*" % text)]
603 605
604 606 def file_matches(self, text):
605 607 """Match filenames, expanding ~USER type strings.
606 608
607 609 Most of the seemingly convoluted logic in this completer is an
608 610 attempt to handle filenames with spaces in them. And yet it's not
609 611 quite perfect, because Python's readline doesn't expose all of the
610 612 GNU readline details needed for this to be done correctly.
611 613
612 614 For a filename with a space in it, the printed completions will be
613 615 only the parts after what's already been typed (instead of the
614 616 full completions, as is normally done). I don't think with the
615 617 current (as of Python 2.3) Python readline it's possible to do
616 618 better."""
617 619
618 620 #io.rprint('Completer->file_matches: <%r>' % text) # dbg
619 621
620 622 # chars that require escaping with backslash - i.e. chars
621 623 # that readline treats incorrectly as delimiters, but we
622 624 # don't want to treat as delimiters in filename matching
623 625 # when escaped with backslash
624 626 if text.startswith('!'):
625 627 text = text[1:]
626 628 text_prefix = '!'
627 629 else:
628 630 text_prefix = ''
629 631
630 632 text_until_cursor = self.text_until_cursor
631 633 # track strings with open quotes
632 634 open_quotes = has_open_quotes(text_until_cursor)
633 635
634 636 if '(' in text_until_cursor or '[' in text_until_cursor:
635 637 lsplit = text
636 638 else:
637 639 try:
638 640 # arg_split ~ shlex.split, but with unicode bugs fixed by us
639 641 lsplit = arg_split(text_until_cursor)[-1]
640 642 except ValueError:
641 643 # typically an unmatched ", or backslash without escaped char.
642 644 if open_quotes:
643 645 lsplit = text_until_cursor.split(open_quotes)[-1]
644 646 else:
645 647 return []
646 648 except IndexError:
647 649 # tab pressed on empty line
648 650 lsplit = ""
649 651
650 652 if not open_quotes and lsplit != protect_filename(lsplit):
651 653 # if protectables are found, do matching on the whole escaped name
652 654 has_protectables = True
653 655 text0,text = text,lsplit
654 656 else:
655 657 has_protectables = False
656 658 text = os.path.expanduser(text)
657 659
658 660 if text == "":
659 661 return [text_prefix + protect_filename(f) for f in self.glob("*")]
660 662
661 663 # Compute the matches from the filesystem
662 664 m0 = self.clean_glob(text.replace('\\',''))
663 665
664 666 if has_protectables:
665 667 # If we had protectables, we need to revert our changes to the
666 668 # beginning of filename so that we don't double-write the part
667 669 # of the filename we have so far
668 670 len_lsplit = len(lsplit)
669 671 matches = [text_prefix + text0 +
670 672 protect_filename(f[len_lsplit:]) for f in m0]
671 673 else:
672 674 if open_quotes:
673 675 # if we have a string with an open quote, we don't need to
674 676 # protect the names at all (and we _shouldn't_, as it
675 677 # would cause bugs when the filesystem call is made).
676 678 matches = m0
677 679 else:
678 680 matches = [text_prefix +
679 681 protect_filename(f) for f in m0]
680 682
681 683 #io.rprint('mm', matches) # dbg
682 684
683 685 # Mark directories in input list by appending '/' to their names.
684 686 matches = [x+'/' if os.path.isdir(x) else x for x in matches]
685 687 return matches
686 688
687 689 def magic_matches(self, text):
688 690 """Match magics"""
689 691 #print 'Completer->magic_matches:',text,'lb',self.text_until_cursor # dbg
690 692 # Get all shell magics now rather than statically, so magics loaded at
691 693 # runtime show up too.
692 694 lsm = self.shell.magics_manager.lsmagic()
693 695 line_magics = lsm['line']
694 696 cell_magics = lsm['cell']
695 697 pre = self.magic_escape
696 698 pre2 = pre+pre
697 699
698 700 # Completion logic:
699 701 # - user gives %%: only do cell magics
700 702 # - user gives %: do both line and cell magics
701 703 # - no prefix: do both
702 704 # In other words, line magics are skipped if the user gives %% explicitly
703 705 bare_text = text.lstrip(pre)
704 706 comp = [ pre2+m for m in cell_magics if m.startswith(bare_text)]
705 707 if not text.startswith(pre2):
706 708 comp += [ pre+m for m in line_magics if m.startswith(bare_text)]
707 709 return comp
708 710
709 711 def python_matches(self,text):
710 712 """Match attributes or global python names"""
711 713
712 714 #io.rprint('Completer->python_matches, txt=%r' % text) # dbg
713 715 if "." in text:
714 716 try:
715 717 matches = self.attr_matches(text)
716 718 if text.endswith('.') and self.omit__names:
717 719 if self.omit__names == 1:
718 720 # true if txt is _not_ a __ name, false otherwise:
719 721 no__name = (lambda txt:
720 722 re.match(r'.*\.__.*?__',txt) is None)
721 723 else:
722 724 # true if txt is _not_ a _ name, false otherwise:
723 725 no__name = (lambda txt:
724 726 re.match(r'\._.*?',txt[txt.rindex('.'):]) is None)
725 727 matches = filter(no__name, matches)
726 728 except NameError:
727 729 # catches <undefined attributes>.<tab>
728 730 matches = []
729 731 else:
730 732 matches = self.global_matches(text)
731 733
732 734 return matches
733 735
734 736 def _default_arguments_from_docstring(self, doc):
735 737 """Parse the first line of docstring for call signature.
736 738
737 739 Docstring should be of the form 'min(iterable[, key=func])\n'.
738 740 It can also parse cython docstring of the form
739 741 'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)'.
740 742 """
741 743 if doc is None:
742 744 return []
743 745
744 746 #care only the firstline
745 747 line = doc.lstrip().splitlines()[0]
746 748
747 749 #p = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
748 750 #'min(iterable[, key=func])\n' -> 'iterable[, key=func]'
749 751 sig = self.docstring_sig_re.search(line)
750 752 if sig is None:
751 753 return []
752 754 # iterable[, key=func]' -> ['iterable[' ,' key=func]']
753 755 sig = sig.groups()[0].split(',')
754 756 ret = []
755 757 for s in sig:
756 758 #re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
757 759 ret += self.docstring_kwd_re.findall(s)
758 760 return ret
759 761
760 762 def _default_arguments(self, obj):
761 763 """Return the list of default arguments of obj if it is callable,
762 764 or empty list otherwise."""
763 765 call_obj = obj
764 766 ret = []
765 767 if inspect.isbuiltin(obj):
766 768 pass
767 769 elif not (inspect.isfunction(obj) or inspect.ismethod(obj)):
768 770 if inspect.isclass(obj):
769 771 #for cython embededsignature=True the constructor docstring
770 772 #belongs to the object itself not __init__
771 773 ret += self._default_arguments_from_docstring(
772 774 getattr(obj, '__doc__', ''))
773 775 # for classes, check for __init__,__new__
774 776 call_obj = (getattr(obj, '__init__', None) or
775 777 getattr(obj, '__new__', None))
776 778 # for all others, check if they are __call__able
777 779 elif hasattr(obj, '__call__'):
778 780 call_obj = obj.__call__
779 781
780 782 ret += self._default_arguments_from_docstring(
781 783 getattr(call_obj, '__doc__', ''))
782 784
783 785 try:
784 786 args,_,_1,defaults = inspect.getargspec(call_obj)
785 787 if defaults:
786 788 ret+=args[-len(defaults):]
787 789 except TypeError:
788 790 pass
789 791
790 792 return list(set(ret))
791 793
792 794 def python_func_kw_matches(self,text):
793 795 """Match named parameters (kwargs) of the last open function"""
794 796
795 797 if "." in text: # a parameter cannot be dotted
796 798 return []
797 799 try: regexp = self.__funcParamsRegex
798 800 except AttributeError:
799 801 regexp = self.__funcParamsRegex = re.compile(r'''
800 802 '.*?(?<!\\)' | # single quoted strings or
801 803 ".*?(?<!\\)" | # double quoted strings or
802 804 \w+ | # identifier
803 805 \S # other characters
804 806 ''', re.VERBOSE | re.DOTALL)
805 807 # 1. find the nearest identifier that comes before an unclosed
806 808 # parenthesis before the cursor
807 809 # e.g. for "foo (1+bar(x), pa<cursor>,a=1)", the candidate is "foo"
808 810 tokens = regexp.findall(self.text_until_cursor)
809 811 tokens.reverse()
810 812 iterTokens = iter(tokens); openPar = 0
811 813
812 814 for token in iterTokens:
813 815 if token == ')':
814 816 openPar -= 1
815 817 elif token == '(':
816 818 openPar += 1
817 819 if openPar > 0:
818 820 # found the last unclosed parenthesis
819 821 break
820 822 else:
821 823 return []
822 824 # 2. Concatenate dotted names ("foo.bar" for "foo.bar(x, pa" )
823 825 ids = []
824 826 isId = re.compile(r'\w+$').match
825 827
826 828 while True:
827 829 try:
828 830 ids.append(next(iterTokens))
829 831 if not isId(ids[-1]):
830 832 ids.pop(); break
831 833 if not next(iterTokens) == '.':
832 834 break
833 835 except StopIteration:
834 836 break
835 837 # lookup the candidate callable matches either using global_matches
836 838 # or attr_matches for dotted names
837 839 if len(ids) == 1:
838 840 callableMatches = self.global_matches(ids[0])
839 841 else:
840 842 callableMatches = self.attr_matches('.'.join(ids[::-1]))
841 843 argMatches = []
842 844 for callableMatch in callableMatches:
843 845 try:
844 846 namedArgs = self._default_arguments(eval(callableMatch,
845 847 self.namespace))
846 848 except:
847 849 continue
848 850
849 851 for namedArg in namedArgs:
850 852 if namedArg.startswith(text):
851 853 argMatches.append("%s=" %namedArg)
852 854 return argMatches
853 855
854 856 def dict_key_matches(self, text):
855 857 "Match string keys in a dictionary, after e.g. 'foo[' "
856 858 def get_keys(obj):
857 859 # Only allow completion for known in-memory dict-like types
858 860 if isinstance(obj, dict) or\
859 861 _safe_isinstance(obj, 'pandas', 'DataFrame'):
860 862 try:
861 863 return list(obj.keys())
862 864 except Exception:
863 865 return []
864 866 elif _safe_isinstance(obj, 'numpy', 'ndarray'):
865 867 return obj.dtype.names or []
866 868 return []
867 869
868 870 try:
869 871 regexps = self.__dict_key_regexps
870 872 except AttributeError:
871 873 dict_key_re_fmt = r'''(?x)
872 874 ( # match dict-referring expression wrt greedy setting
873 875 %s
874 876 )
875 877 \[ # open bracket
876 878 \s* # and optional whitespace
877 879 ([uUbB]? # string prefix (r not handled)
878 880 (?: # unclosed string
879 881 '(?:[^']|(?<!\\)\\')*
880 882 |
881 883 "(?:[^"]|(?<!\\)\\")*
882 884 )
883 885 )?
884 886 $
885 887 '''
886 888 regexps = self.__dict_key_regexps = {
887 889 False: re.compile(dict_key_re_fmt % '''
888 890 # identifiers separated by .
889 891 (?!\d)\w+
890 892 (?:\.(?!\d)\w+)*
891 893 '''),
892 894 True: re.compile(dict_key_re_fmt % '''
893 895 .+
894 896 ''')
895 897 }
896 898
897 899 match = regexps[self.greedy].search(self.text_until_cursor)
898 900 if match is None:
899 901 return []
900 902
901 903 expr, prefix = match.groups()
902 904 try:
903 905 obj = eval(expr, self.namespace)
904 906 except Exception:
905 907 try:
906 908 obj = eval(expr, self.global_namespace)
907 909 except Exception:
908 910 return []
909 911
910 912 keys = get_keys(obj)
911 913 if not keys:
912 914 return keys
913 915 closing_quote, token_offset, matches = match_dict_keys(keys, prefix)
914 916 if not matches:
915 917 return matches
916 918
917 919 # get the cursor position of
918 920 # - the text being completed
919 921 # - the start of the key text
920 922 # - the start of the completion
921 923 text_start = len(self.text_until_cursor) - len(text)
922 924 if prefix:
923 925 key_start = match.start(2)
924 926 completion_start = key_start + token_offset
925 927 else:
926 928 key_start = completion_start = match.end()
927 929
928 930 # grab the leading prefix, to make sure all completions start with `text`
929 931 if text_start > key_start:
930 932 leading = ''
931 933 else:
932 934 leading = text[text_start:completion_start]
933 935
934 936 # the index of the `[` character
935 937 bracket_idx = match.end(1)
936 938
937 939 # append closing quote and bracket as appropriate
938 940 # this is *not* appropriate if the opening quote or bracket is outside
939 941 # the text given to this method
940 942 suf = ''
941 943 continuation = self.line_buffer[len(self.text_until_cursor):]
942 944 if key_start > text_start and closing_quote:
943 945 # quotes were opened inside text, maybe close them
944 946 if continuation.startswith(closing_quote):
945 947 continuation = continuation[len(closing_quote):]
946 948 else:
947 949 suf += closing_quote
948 950 if bracket_idx > text_start:
949 951 # brackets were opened inside text, maybe close them
950 952 if not continuation.startswith(']'):
951 953 suf += ']'
952 954
953 955 return [leading + k + suf for k in matches]
954 956
957 def latex_matches(self, text):
958 u"""Match Latex syntax for unicode characters.
959
960 This does both \\alp -> \\alpha and \\alpha -> α
961
962 Used on Python 3 only.
963 """
964 slashpos = text.rfind('\\')
965 if slashpos > -1:
966 s = text[slashpos:]
967 if s in latex_symbols:
968 # Try to complete a full latex symbol to unicode
969 # \\alpha -> α
970 return s, [latex_symbols[s]]
971 else:
972 # If a user has partially typed a latex symbol, give them
973 # a full list of options \al -> [\aleph, \alpha]
974 matches = [k for k in latex_symbols if k.startswith(s)]
975 return s, matches
976 return u'', []
977
955 978 def dispatch_custom_completer(self, text):
956 979 #io.rprint("Custom! '%s' %s" % (text, self.custom_completers)) # dbg
957 980 line = self.line_buffer
958 981 if not line.strip():
959 982 return None
960 983
961 984 # Create a little structure to pass all the relevant information about
962 985 # the current completion to any custom completer.
963 986 event = Bunch()
964 987 event.line = line
965 988 event.symbol = text
966 989 cmd = line.split(None,1)[0]
967 990 event.command = cmd
968 991 event.text_until_cursor = self.text_until_cursor
969 992
970 993 #print "\ncustom:{%s]\n" % event # dbg
971 994
972 995 # for foo etc, try also to find completer for %foo
973 996 if not cmd.startswith(self.magic_escape):
974 997 try_magic = self.custom_completers.s_matches(
975 998 self.magic_escape + cmd)
976 999 else:
977 1000 try_magic = []
978 1001
979 1002 for c in itertools.chain(self.custom_completers.s_matches(cmd),
980 1003 try_magic,
981 1004 self.custom_completers.flat_matches(self.text_until_cursor)):
982 1005 #print "try",c # dbg
983 1006 try:
984 1007 res = c(event)
985 1008 if res:
986 1009 # first, try case sensitive match
987 1010 withcase = [r for r in res if r.startswith(text)]
988 1011 if withcase:
989 1012 return withcase
990 1013 # if none, then case insensitive ones are ok too
991 1014 text_low = text.lower()
992 1015 return [r for r in res if r.lower().startswith(text_low)]
993 1016 except TryNext:
994 1017 pass
995 1018
996 1019 return None
997 1020
998 1021 def complete(self, text=None, line_buffer=None, cursor_pos=None):
999 1022 """Find completions for the given text and line context.
1000 1023
1001 1024 Note that both the text and the line_buffer are optional, but at least
1002 1025 one of them must be given.
1003 1026
1004 1027 Parameters
1005 1028 ----------
1006 1029 text : string, optional
1007 1030 Text to perform the completion on. If not given, the line buffer
1008 1031 is split using the instance's CompletionSplitter object.
1009 1032
1010 1033 line_buffer : string, optional
1011 1034 If not given, the completer attempts to obtain the current line
1012 1035 buffer via readline. This keyword allows clients which are
1013 1036 requesting for text completions in non-readline contexts to inform
1014 1037 the completer of the entire text.
1015 1038
1016 1039 cursor_pos : int, optional
1017 1040 Index of the cursor in the full line buffer. Should be provided by
1018 1041 remote frontends where kernel has no access to frontend state.
1019 1042
1020 1043 Returns
1021 1044 -------
1022 1045 text : str
1023 1046 Text that was actually used in the completion.
1024 1047
1025 1048 matches : list
1026 1049 A list of completion matches.
1027 1050 """
1028 #io.rprint('\nCOMP1 %r %r %r' % (text, line_buffer, cursor_pos)) # dbg
1051 # io.rprint('\nCOMP1 %r %r %r' % (text, line_buffer, cursor_pos)) # dbg
1029 1052
1030 1053 # if the cursor position isn't given, the only sane assumption we can
1031 1054 # make is that it's at the end of the line (the common case)
1032 1055 if cursor_pos is None:
1033 1056 cursor_pos = len(line_buffer) if text is None else len(text)
1034 1057
1058 if PY3:
1059 latex_text = text if not line_buffer else line_buffer[:cursor_pos]
1060 latex_text, latex_matches = self.latex_matches(latex_text)
1061 if latex_matches:
1062 return latex_text, latex_matches
1063
1035 1064 # if text is either None or an empty string, rely on the line buffer
1036 1065 if not text:
1037 1066 text = self.splitter.split_line(line_buffer, cursor_pos)
1038 1067
1039 1068 # If no line buffer is given, assume the input text is all there was
1040 1069 if line_buffer is None:
1041 1070 line_buffer = text
1042 1071
1043 1072 self.line_buffer = line_buffer
1044 1073 self.text_until_cursor = self.line_buffer[:cursor_pos]
1045 #io.rprint('COMP2 %r %r %r' % (text, line_buffer, cursor_pos)) # dbg
1074 # io.rprint('COMP2 %r %r %r' % (text, line_buffer, cursor_pos)) # dbg
1046 1075
1047 1076 # Start with a clean slate of completions
1048 1077 self.matches[:] = []
1049 1078 custom_res = self.dispatch_custom_completer(text)
1050 1079 if custom_res is not None:
1051 1080 # did custom completers produce something?
1052 1081 self.matches = custom_res
1053 1082 else:
1054 1083 # Extend the list of completions with the results of each
1055 1084 # matcher, so we return results to the user from all
1056 1085 # namespaces.
1057 1086 if self.merge_completions:
1058 1087 self.matches = []
1059 1088 for matcher in self.matchers:
1060 1089 try:
1061 1090 self.matches.extend(matcher(text))
1062 1091 except:
1063 1092 # Show the ugly traceback if the matcher causes an
1064 1093 # exception, but do NOT crash the kernel!
1065 1094 sys.excepthook(*sys.exc_info())
1066 1095 else:
1067 1096 for matcher in self.matchers:
1068 1097 self.matches = matcher(text)
1069 1098 if self.matches:
1070 1099 break
1071 1100 # FIXME: we should extend our api to return a dict with completions for
1072 1101 # different types of objects. The rlcomplete() method could then
1073 1102 # simply collapse the dict into a list for readline, but we'd have
1074 1103 # richer completion semantics in other evironments.
1075 1104
1076 1105 # use penalize_magics_key to put magics after variables with same name
1077 1106 self.matches = sorted(set(self.matches), key=penalize_magics_key)
1078 1107
1079 1108 #io.rprint('COMP TEXT, MATCHES: %r, %r' % (text, self.matches)) # dbg
1080 1109 return text, self.matches
1081 1110
1082 1111 def rlcomplete(self, text, state):
1083 1112 """Return the state-th possible completion for 'text'.
1084 1113
1085 1114 This is called successively with state == 0, 1, 2, ... until it
1086 1115 returns None. The completion should begin with 'text'.
1087 1116
1088 1117 Parameters
1089 1118 ----------
1090 1119 text : string
1091 1120 Text to perform the completion on.
1092 1121
1093 1122 state : int
1094 1123 Counter used by readline.
1095 1124 """
1096 1125 if state==0:
1097 1126
1098 1127 self.line_buffer = line_buffer = self.readline.get_line_buffer()
1099 1128 cursor_pos = self.readline.get_endidx()
1100 1129
1101 1130 #io.rprint("\nRLCOMPLETE: %r %r %r" %
1102 1131 # (text, line_buffer, cursor_pos) ) # dbg
1103 1132
1104 1133 # if there is only a tab on a line with only whitespace, instead of
1105 1134 # the mostly useless 'do you want to see all million completions'
1106 1135 # message, just do the right thing and give the user his tab!
1107 1136 # Incidentally, this enables pasting of tabbed text from an editor
1108 1137 # (as long as autoindent is off).
1109 1138
1110 1139 # It should be noted that at least pyreadline still shows file
1111 1140 # completions - is there a way around it?
1112 1141
1113 1142 # don't apply this on 'dumb' terminals, such as emacs buffers, so
1114 1143 # we don't interfere with their own tab-completion mechanism.
1115 1144 if not (self.dumb_terminal or line_buffer.strip()):
1116 1145 self.readline.insert_text('\t')
1117 1146 sys.stdout.flush()
1118 1147 return None
1119 1148
1120 1149 # Note: debugging exceptions that may occur in completion is very
1121 1150 # tricky, because readline unconditionally silences them. So if
1122 1151 # during development you suspect a bug in the completion code, turn
1123 1152 # this flag on temporarily by uncommenting the second form (don't
1124 1153 # flip the value in the first line, as the '# dbg' marker can be
1125 1154 # automatically detected and is used elsewhere).
1126 1155 DEBUG = False
1127 1156 #DEBUG = True # dbg
1128 1157 if DEBUG:
1129 1158 try:
1130 1159 self.complete(text, line_buffer, cursor_pos)
1131 1160 except:
1132 1161 import traceback; traceback.print_exc()
1133 1162 else:
1134 1163 # The normal production version is here
1135 1164
1136 1165 # This method computes the self.matches array
1137 1166 self.complete(text, line_buffer, cursor_pos)
1138 1167
1139 1168 try:
1140 1169 return self.matches[state]
1141 1170 except IndexError:
1142 1171 return None
@@ -1,352 +1,353 b''
1 # encoding: utf-8
1 2 """Implementations for various useful completers.
2 3
3 4 These are all loaded by default by IPython.
4 5 """
5 6 #-----------------------------------------------------------------------------
6 7 # Copyright (C) 2010-2011 The IPython Development Team.
7 8 #
8 9 # Distributed under the terms of the BSD License.
9 10 #
10 11 # The full license is in the file COPYING.txt, distributed with this software.
11 12 #-----------------------------------------------------------------------------
12 13
13 14 #-----------------------------------------------------------------------------
14 15 # Imports
15 16 #-----------------------------------------------------------------------------
16 17 from __future__ import print_function
17 18
18 19 # Stdlib imports
19 20 import glob
20 21 import inspect
21 22 import os
22 23 import re
23 24 import sys
24 25
25 26 try:
26 27 # Python >= 3.3
27 28 from importlib.machinery import all_suffixes
28 29 _suffixes = all_suffixes()
29 30 except ImportError:
30 31 from imp import get_suffixes
31 32 _suffixes = [ s[0] for s in get_suffixes() ]
32 33
33 34 # Third-party imports
34 35 from time import time
35 36 from zipimport import zipimporter
36 37
37 38 # Our own imports
38 39 from IPython.core.completer import expand_user, compress_user
39 40 from IPython.core.error import TryNext
40 41 from IPython.utils._process_common import arg_split
41 42 from IPython.utils.py3compat import string_types
42 43
43 44 # FIXME: this should be pulled in with the right call via the component system
44 45 from IPython import get_ipython
45 46
46 47 #-----------------------------------------------------------------------------
47 48 # Globals and constants
48 49 #-----------------------------------------------------------------------------
49 50
50 51 # Time in seconds after which the rootmodules will be stored permanently in the
51 52 # ipython ip.db database (kept in the user's .ipython dir).
52 53 TIMEOUT_STORAGE = 2
53 54
54 55 # Time in seconds after which we give up
55 56 TIMEOUT_GIVEUP = 20
56 57
57 58 # Regular expression for the python import statement
58 59 import_re = re.compile(r'(?P<name>[a-zA-Z_][a-zA-Z0-9_]*?)'
59 60 r'(?P<package>[/\\]__init__)?'
60 61 r'(?P<suffix>%s)$' %
61 62 r'|'.join(re.escape(s) for s in _suffixes))
62 63
63 64 # RE for the ipython %run command (python + ipython scripts)
64 65 magic_run_re = re.compile(r'.*(\.ipy|\.ipynb|\.py[w]?)$')
65 66
66 67 #-----------------------------------------------------------------------------
67 68 # Local utilities
68 69 #-----------------------------------------------------------------------------
69 70
70 71 def module_list(path):
71 72 """
72 73 Return the list containing the names of the modules available in the given
73 74 folder.
74 75 """
75 76 # sys.path has the cwd as an empty string, but isdir/listdir need it as '.'
76 77 if path == '':
77 78 path = '.'
78 79
79 80 # A few local constants to be used in loops below
80 81 pjoin = os.path.join
81 82
82 83 if os.path.isdir(path):
83 84 # Build a list of all files in the directory and all files
84 85 # in its subdirectories. For performance reasons, do not
85 86 # recurse more than one level into subdirectories.
86 87 files = []
87 88 for root, dirs, nondirs in os.walk(path, followlinks=True):
88 89 subdir = root[len(path)+1:]
89 90 if subdir:
90 91 files.extend(pjoin(subdir, f) for f in nondirs)
91 92 dirs[:] = [] # Do not recurse into additional subdirectories.
92 93 else:
93 94 files.extend(nondirs)
94 95
95 96 else:
96 97 try:
97 98 files = list(zipimporter(path)._files.keys())
98 99 except:
99 100 files = []
100 101
101 102 # Build a list of modules which match the import_re regex.
102 103 modules = []
103 104 for f in files:
104 105 m = import_re.match(f)
105 106 if m:
106 107 modules.append(m.group('name'))
107 108 return list(set(modules))
108 109
109 110
110 111 def get_root_modules():
111 112 """
112 113 Returns a list containing the names of all the modules available in the
113 114 folders of the pythonpath.
114 115
115 116 ip.db['rootmodules_cache'] maps sys.path entries to list of modules.
116 117 """
117 118 ip = get_ipython()
118 119 rootmodules_cache = ip.db.get('rootmodules_cache', {})
119 120 rootmodules = list(sys.builtin_module_names)
120 121 start_time = time()
121 122 store = False
122 123 for path in sys.path:
123 124 try:
124 125 modules = rootmodules_cache[path]
125 126 except KeyError:
126 127 modules = module_list(path)
127 128 try:
128 129 modules.remove('__init__')
129 130 except ValueError:
130 131 pass
131 132 if path not in ('', '.'): # cwd modules should not be cached
132 133 rootmodules_cache[path] = modules
133 134 if time() - start_time > TIMEOUT_STORAGE and not store:
134 135 store = True
135 136 print("\nCaching the list of root modules, please wait!")
136 137 print("(This will only be done once - type '%rehashx' to "
137 138 "reset cache!)\n")
138 139 sys.stdout.flush()
139 140 if time() - start_time > TIMEOUT_GIVEUP:
140 141 print("This is taking too long, we give up.\n")
141 142 return []
142 143 rootmodules.extend(modules)
143 144 if store:
144 145 ip.db['rootmodules_cache'] = rootmodules_cache
145 146 rootmodules = list(set(rootmodules))
146 147 return rootmodules
147 148
148 149
149 150 def is_importable(module, attr, only_modules):
150 151 if only_modules:
151 152 return inspect.ismodule(getattr(module, attr))
152 153 else:
153 154 return not(attr[:2] == '__' and attr[-2:] == '__')
154 155
155 156
156 157 def try_import(mod, only_modules=False):
157 158 try:
158 159 m = __import__(mod)
159 160 except:
160 161 return []
161 162 mods = mod.split('.')
162 163 for module in mods[1:]:
163 164 m = getattr(m, module)
164 165
165 166 m_is_init = hasattr(m, '__file__') and '__init__' in m.__file__
166 167
167 168 completions = []
168 169 if (not hasattr(m, '__file__')) or (not only_modules) or m_is_init:
169 170 completions.extend( [attr for attr in dir(m) if
170 171 is_importable(m, attr, only_modules)])
171 172
172 173 completions.extend(getattr(m, '__all__', []))
173 174 if m_is_init:
174 175 completions.extend(module_list(os.path.dirname(m.__file__)))
175 176 completions = set(completions)
176 177 if '__init__' in completions:
177 178 completions.remove('__init__')
178 179 return list(completions)
179 180
180 181
181 182 #-----------------------------------------------------------------------------
182 183 # Completion-related functions.
183 184 #-----------------------------------------------------------------------------
184 185
185 186 def quick_completer(cmd, completions):
186 187 """ Easily create a trivial completer for a command.
187 188
188 189 Takes either a list of completions, or all completions in string (that will
189 190 be split on whitespace).
190 191
191 192 Example::
192 193
193 194 [d:\ipython]|1> import ipy_completers
194 195 [d:\ipython]|2> ipy_completers.quick_completer('foo', ['bar','baz'])
195 196 [d:\ipython]|3> foo b<TAB>
196 197 bar baz
197 198 [d:\ipython]|3> foo ba
198 199 """
199 200
200 201 if isinstance(completions, string_types):
201 202 completions = completions.split()
202 203
203 204 def do_complete(self, event):
204 205 return completions
205 206
206 207 get_ipython().set_hook('complete_command',do_complete, str_key = cmd)
207 208
208 209 def module_completion(line):
209 210 """
210 211 Returns a list containing the completion possibilities for an import line.
211 212
212 213 The line looks like this :
213 214 'import xml.d'
214 215 'from xml.dom import'
215 216 """
216 217
217 218 words = line.split(' ')
218 219 nwords = len(words)
219 220
220 221 # from whatever <tab> -> 'import '
221 222 if nwords == 3 and words[0] == 'from':
222 223 return ['import ']
223 224
224 225 # 'from xy<tab>' or 'import xy<tab>'
225 226 if nwords < 3 and (words[0] in ['import','from']) :
226 227 if nwords == 1:
227 228 return get_root_modules()
228 229 mod = words[1].split('.')
229 230 if len(mod) < 2:
230 231 return get_root_modules()
231 232 completion_list = try_import('.'.join(mod[:-1]), True)
232 233 return ['.'.join(mod[:-1] + [el]) for el in completion_list]
233 234
234 235 # 'from xyz import abc<tab>'
235 236 if nwords >= 3 and words[0] == 'from':
236 237 mod = words[1]
237 238 return try_import(mod)
238 239
239 240 #-----------------------------------------------------------------------------
240 241 # Completers
241 242 #-----------------------------------------------------------------------------
242 243 # These all have the func(self, event) signature to be used as custom
243 244 # completers
244 245
245 246 def module_completer(self,event):
246 247 """Give completions after user has typed 'import ...' or 'from ...'"""
247 248
248 249 # This works in all versions of python. While 2.5 has
249 250 # pkgutil.walk_packages(), that particular routine is fairly dangerous,
250 251 # since it imports *EVERYTHING* on sys.path. That is: a) very slow b) full
251 252 # of possibly problematic side effects.
252 253 # This search the folders in the sys.path for available modules.
253 254
254 255 return module_completion(event.line)
255 256
256 257 # FIXME: there's a lot of logic common to the run, cd and builtin file
257 258 # completers, that is currently reimplemented in each.
258 259
259 260 def magic_run_completer(self, event):
260 261 """Complete files that end in .py or .ipy or .ipynb for the %run command.
261 262 """
262 263 comps = arg_split(event.line, strict=False)
263 264 # relpath should be the current token that we need to complete.
264 265 if (len(comps) > 1) and (not event.line.endswith(' ')):
265 266 relpath = comps[-1].strip("'\"")
266 267 else:
267 268 relpath = ''
268 269
269 270 #print("\nev=", event) # dbg
270 271 #print("rp=", relpath) # dbg
271 272 #print('comps=', comps) # dbg
272 273
273 274 lglob = glob.glob
274 275 isdir = os.path.isdir
275 276 relpath, tilde_expand, tilde_val = expand_user(relpath)
276 277
277 278 # Find if the user has already typed the first filename, after which we
278 279 # should complete on all files, since after the first one other files may
279 280 # be arguments to the input script.
280 281
281 282 if any(magic_run_re.match(c) for c in comps):
282 283 matches = [f.replace('\\','/') + ('/' if isdir(f) else '')
283 284 for f in lglob(relpath+'*')]
284 285 else:
285 286 dirs = [f.replace('\\','/') + "/" for f in lglob(relpath+'*') if isdir(f)]
286 287 pys = [f.replace('\\','/')
287 288 for f in lglob(relpath+'*.py') + lglob(relpath+'*.ipy') +
288 289 lglob(relpath+'*.ipynb') + lglob(relpath + '*.pyw')]
289 290
290 291 matches = dirs + pys
291 292
292 293 #print('run comp:', dirs+pys) # dbg
293 294 return [compress_user(p, tilde_expand, tilde_val) for p in matches]
294 295
295 296
296 297 def cd_completer(self, event):
297 298 """Completer function for cd, which only returns directories."""
298 299 ip = get_ipython()
299 300 relpath = event.symbol
300 301
301 302 #print(event) # dbg
302 303 if event.line.endswith('-b') or ' -b ' in event.line:
303 304 # return only bookmark completions
304 305 bkms = self.db.get('bookmarks', None)
305 306 if bkms:
306 307 return bkms.keys()
307 308 else:
308 309 return []
309 310
310 311 if event.symbol == '-':
311 312 width_dh = str(len(str(len(ip.user_ns['_dh']) + 1)))
312 313 # jump in directory history by number
313 314 fmt = '-%0' + width_dh +'d [%s]'
314 315 ents = [ fmt % (i,s) for i,s in enumerate(ip.user_ns['_dh'])]
315 316 if len(ents) > 1:
316 317 return ents
317 318 return []
318 319
319 320 if event.symbol.startswith('--'):
320 321 return ["--" + os.path.basename(d) for d in ip.user_ns['_dh']]
321 322
322 323 # Expand ~ in path and normalize directory separators.
323 324 relpath, tilde_expand, tilde_val = expand_user(relpath)
324 325 relpath = relpath.replace('\\','/')
325 326
326 327 found = []
327 328 for d in [f.replace('\\','/') + '/' for f in glob.glob(relpath+'*')
328 329 if os.path.isdir(f)]:
329 330 if ' ' in d:
330 331 # we don't want to deal with any of that, complex code
331 332 # for this is elsewhere
332 333 raise TryNext
333 334
334 335 found.append(d)
335 336
336 337 if not found:
337 338 if os.path.isdir(relpath):
338 339 return [compress_user(relpath, tilde_expand, tilde_val)]
339 340
340 341 # if no completions so far, try bookmarks
341 342 bks = self.db.get('bookmarks',{})
342 343 bkmatches = [s for s in bks if s.startswith(event.symbol)]
343 344 if bkmatches:
344 345 return bkmatches
345 346
346 347 raise TryNext
347 348
348 349 return [compress_user(p, tilde_expand, tilde_val) for p in found]
349 350
350 351 def reset_completer(self, event):
351 352 "A completer for %reset magic"
352 353 return '-f -s in out array dhist'.split()
@@ -1,680 +1,701 b''
1 1 # encoding: utf-8
2 2 """Tests for the IPython tab-completion machinery."""
3 3
4 4 # Copyright (c) IPython Development Team.
5 5 # Distributed under the terms of the Modified BSD License.
6 6
7 7 import os
8 8 import sys
9 9 import unittest
10 10
11 11 from contextlib import contextmanager
12 12
13 13 import nose.tools as nt
14 14
15 15 from IPython.config.loader import Config
16 16 from IPython.core import completer
17 17 from IPython.external.decorators import knownfailureif
18 18 from IPython.utils.tempdir import TemporaryDirectory, TemporaryWorkingDirectory
19 19 from IPython.utils.generics import complete_object
20 20 from IPython.utils import py3compat
21 21 from IPython.utils.py3compat import string_types, unicode_type
22 22 from IPython.testing import decorators as dec
23 23
24 24 #-----------------------------------------------------------------------------
25 25 # Test functions
26 26 #-----------------------------------------------------------------------------
27 27
28 28 @contextmanager
29 29 def greedy_completion():
30 30 ip = get_ipython()
31 31 greedy_original = ip.Completer.greedy
32 32 try:
33 33 ip.Completer.greedy = True
34 34 yield
35 35 finally:
36 36 ip.Completer.greedy = greedy_original
37 37
38 38 def test_protect_filename():
39 39 pairs = [ ('abc','abc'),
40 40 (' abc',r'\ abc'),
41 41 ('a bc',r'a\ bc'),
42 42 ('a bc',r'a\ \ bc'),
43 43 (' bc',r'\ \ bc'),
44 44 ]
45 45 # On posix, we also protect parens and other special characters
46 46 if sys.platform != 'win32':
47 47 pairs.extend( [('a(bc',r'a\(bc'),
48 48 ('a)bc',r'a\)bc'),
49 49 ('a( )bc',r'a\(\ \)bc'),
50 50 ('a[1]bc', r'a\[1\]bc'),
51 51 ('a{1}bc', r'a\{1\}bc'),
52 52 ('a#bc', r'a\#bc'),
53 53 ('a?bc', r'a\?bc'),
54 54 ('a=bc', r'a\=bc'),
55 55 ('a\\bc', r'a\\bc'),
56 56 ('a|bc', r'a\|bc'),
57 57 ('a;bc', r'a\;bc'),
58 58 ('a:bc', r'a\:bc'),
59 59 ("a'bc", r"a\'bc"),
60 60 ('a*bc', r'a\*bc'),
61 61 ('a"bc', r'a\"bc'),
62 62 ('a^bc', r'a\^bc'),
63 63 ('a&bc', r'a\&bc'),
64 64 ] )
65 65 # run the actual tests
66 66 for s1, s2 in pairs:
67 67 s1p = completer.protect_filename(s1)
68 68 nt.assert_equal(s1p, s2)
69 69
70 70
71 71 def check_line_split(splitter, test_specs):
72 72 for part1, part2, split in test_specs:
73 73 cursor_pos = len(part1)
74 74 line = part1+part2
75 75 out = splitter.split_line(line, cursor_pos)
76 76 nt.assert_equal(out, split)
77 77
78 78
79 79 def test_line_split():
80 80 """Basic line splitter test with default specs."""
81 81 sp = completer.CompletionSplitter()
82 82 # The format of the test specs is: part1, part2, expected answer. Parts 1
83 83 # and 2 are joined into the 'line' sent to the splitter, as if the cursor
84 84 # was at the end of part1. So an empty part2 represents someone hitting
85 85 # tab at the end of the line, the most common case.
86 86 t = [('run some/scrip', '', 'some/scrip'),
87 87 ('run scripts/er', 'ror.py foo', 'scripts/er'),
88 88 ('echo $HOM', '', 'HOM'),
89 89 ('print sys.pa', '', 'sys.pa'),
90 90 ('print(sys.pa', '', 'sys.pa'),
91 91 ("execfile('scripts/er", '', 'scripts/er'),
92 92 ('a[x.', '', 'x.'),
93 93 ('a[x.', 'y', 'x.'),
94 94 ('cd "some_file/', '', 'some_file/'),
95 95 ]
96 96 check_line_split(sp, t)
97 97 # Ensure splitting works OK with unicode by re-running the tests with
98 98 # all inputs turned into unicode
99 99 check_line_split(sp, [ map(unicode_type, p) for p in t] )
100 100
101 101
102 102 def test_custom_completion_error():
103 103 """Test that errors from custom attribute completers are silenced."""
104 104 ip = get_ipython()
105 105 class A(object): pass
106 106 ip.user_ns['a'] = A()
107 107
108 108 @complete_object.when_type(A)
109 109 def complete_A(a, existing_completions):
110 110 raise TypeError("this should be silenced")
111 111
112 112 ip.complete("a.")
113 113
114 114
115 115 def test_unicode_completions():
116 116 ip = get_ipython()
117 117 # Some strings that trigger different types of completion. Check them both
118 118 # in str and unicode forms
119 119 s = ['ru', '%ru', 'cd /', 'floa', 'float(x)/']
120 120 for t in s + list(map(unicode_type, s)):
121 121 # We don't need to check exact completion values (they may change
122 122 # depending on the state of the namespace, but at least no exceptions
123 123 # should be thrown and the return value should be a pair of text, list
124 124 # values.
125 125 text, matches = ip.complete(t)
126 126 nt.assert_true(isinstance(text, string_types))
127 127 nt.assert_true(isinstance(matches, list))
128 128
129 @dec.onlyif(sys.version_info[0] >= 3, 'This test only applies in Py>=3')
130 def test_latex_completions():
131 from IPython.core.latex_symbols import latex_symbols
132 import random
133 ip = get_ipython()
134 # Test some random unicode symbols
135 keys = random.sample(latex_symbols.keys(), 10)
136 for k in keys:
137 text, matches = ip.complete(k)
138 nt.assert_equal(len(matches),1)
139 nt.assert_equal(text, k)
140 nt.assert_equal(matches[0], latex_symbols[k])
141 # Test a more complex line
142 text, matches = ip.complete(u'print(\\alpha')
143 nt.assert_equals(text, u'\\alpha')
144 nt.assert_equals(matches[0], latex_symbols['\\alpha'])
145 # Test multiple matching latex symbols
146 text, matches = ip.complete(u'\\al')
147 nt.assert_in('\\alpha', matches)
148 nt.assert_in('\\aleph', matches)
149
129 150
130 151 class CompletionSplitterTestCase(unittest.TestCase):
131 152 def setUp(self):
132 153 self.sp = completer.CompletionSplitter()
133 154
134 155 def test_delim_setting(self):
135 156 self.sp.delims = ' '
136 157 nt.assert_equal(self.sp.delims, ' ')
137 158 nt.assert_equal(self.sp._delim_expr, '[\ ]')
138 159
139 160 def test_spaces(self):
140 161 """Test with only spaces as split chars."""
141 162 self.sp.delims = ' '
142 163 t = [('foo', '', 'foo'),
143 164 ('run foo', '', 'foo'),
144 165 ('run foo', 'bar', 'foo'),
145 166 ]
146 167 check_line_split(self.sp, t)
147 168
148 169
149 170 def test_has_open_quotes1():
150 171 for s in ["'", "'''", "'hi' '"]:
151 172 nt.assert_equal(completer.has_open_quotes(s), "'")
152 173
153 174
154 175 def test_has_open_quotes2():
155 176 for s in ['"', '"""', '"hi" "']:
156 177 nt.assert_equal(completer.has_open_quotes(s), '"')
157 178
158 179
159 180 def test_has_open_quotes3():
160 181 for s in ["''", "''' '''", "'hi' 'ipython'"]:
161 182 nt.assert_false(completer.has_open_quotes(s))
162 183
163 184
164 185 def test_has_open_quotes4():
165 186 for s in ['""', '""" """', '"hi" "ipython"']:
166 187 nt.assert_false(completer.has_open_quotes(s))
167 188
168 189
169 190 @knownfailureif(sys.platform == 'win32', "abspath completions fail on Windows")
170 191 def test_abspath_file_completions():
171 192 ip = get_ipython()
172 193 with TemporaryDirectory() as tmpdir:
173 194 prefix = os.path.join(tmpdir, 'foo')
174 195 suffixes = ['1', '2']
175 196 names = [prefix+s for s in suffixes]
176 197 for n in names:
177 198 open(n, 'w').close()
178 199
179 200 # Check simple completion
180 201 c = ip.complete(prefix)[1]
181 202 nt.assert_equal(c, names)
182 203
183 204 # Now check with a function call
184 205 cmd = 'a = f("%s' % prefix
185 206 c = ip.complete(prefix, cmd)[1]
186 207 comp = [prefix+s for s in suffixes]
187 208 nt.assert_equal(c, comp)
188 209
189 210
190 211 def test_local_file_completions():
191 212 ip = get_ipython()
192 213 with TemporaryWorkingDirectory():
193 214 prefix = './foo'
194 215 suffixes = ['1', '2']
195 216 names = [prefix+s for s in suffixes]
196 217 for n in names:
197 218 open(n, 'w').close()
198 219
199 220 # Check simple completion
200 221 c = ip.complete(prefix)[1]
201 222 nt.assert_equal(c, names)
202 223
203 224 # Now check with a function call
204 225 cmd = 'a = f("%s' % prefix
205 226 c = ip.complete(prefix, cmd)[1]
206 227 comp = [prefix+s for s in suffixes]
207 228 nt.assert_equal(c, comp)
208 229
209 230
210 231 def test_greedy_completions():
211 232 ip = get_ipython()
212 233 ip.ex('a=list(range(5))')
213 234 _,c = ip.complete('.',line='a[0].')
214 235 nt.assert_false('a[0].real' in c,
215 236 "Shouldn't have completed on a[0]: %s"%c)
216 237 with greedy_completion():
217 238 _,c = ip.complete('.',line='a[0].')
218 239 nt.assert_true('a[0].real' in c, "Should have completed on a[0]: %s"%c)
219 240
220 241
221 242 def test_omit__names():
222 243 # also happens to test IPCompleter as a configurable
223 244 ip = get_ipython()
224 245 ip._hidden_attr = 1
225 246 ip._x = {}
226 247 c = ip.Completer
227 248 ip.ex('ip=get_ipython()')
228 249 cfg = Config()
229 250 cfg.IPCompleter.omit__names = 0
230 251 c.update_config(cfg)
231 252 s,matches = c.complete('ip.')
232 253 nt.assert_in('ip.__str__', matches)
233 254 nt.assert_in('ip._hidden_attr', matches)
234 255 cfg.IPCompleter.omit__names = 1
235 256 c.update_config(cfg)
236 257 s,matches = c.complete('ip.')
237 258 nt.assert_not_in('ip.__str__', matches)
238 259 nt.assert_in('ip._hidden_attr', matches)
239 260 cfg.IPCompleter.omit__names = 2
240 261 c.update_config(cfg)
241 262 s,matches = c.complete('ip.')
242 263 nt.assert_not_in('ip.__str__', matches)
243 264 nt.assert_not_in('ip._hidden_attr', matches)
244 265 s,matches = c.complete('ip._x.')
245 266 nt.assert_in('ip._x.keys', matches)
246 267 del ip._hidden_attr
247 268
248 269
249 270 def test_limit_to__all__False_ok():
250 271 ip = get_ipython()
251 272 c = ip.Completer
252 273 ip.ex('class D: x=24')
253 274 ip.ex('d=D()')
254 275 cfg = Config()
255 276 cfg.IPCompleter.limit_to__all__ = False
256 277 c.update_config(cfg)
257 278 s, matches = c.complete('d.')
258 279 nt.assert_in('d.x', matches)
259 280
260 281
261 282 def test_limit_to__all__True_ok():
262 283 ip = get_ipython()
263 284 c = ip.Completer
264 285 ip.ex('class D: x=24')
265 286 ip.ex('d=D()')
266 287 ip.ex("d.__all__=['z']")
267 288 cfg = Config()
268 289 cfg.IPCompleter.limit_to__all__ = True
269 290 c.update_config(cfg)
270 291 s, matches = c.complete('d.')
271 292 nt.assert_in('d.z', matches)
272 293 nt.assert_not_in('d.x', matches)
273 294
274 295
275 296 def test_get__all__entries_ok():
276 297 class A(object):
277 298 __all__ = ['x', 1]
278 299 words = completer.get__all__entries(A())
279 300 nt.assert_equal(words, ['x'])
280 301
281 302
282 303 def test_get__all__entries_no__all__ok():
283 304 class A(object):
284 305 pass
285 306 words = completer.get__all__entries(A())
286 307 nt.assert_equal(words, [])
287 308
288 309
289 310 def test_func_kw_completions():
290 311 ip = get_ipython()
291 312 c = ip.Completer
292 313 ip.ex('def myfunc(a=1,b=2): return a+b')
293 314 s, matches = c.complete(None, 'myfunc(1,b')
294 315 nt.assert_in('b=', matches)
295 316 # Simulate completing with cursor right after b (pos==10):
296 317 s, matches = c.complete(None, 'myfunc(1,b)', 10)
297 318 nt.assert_in('b=', matches)
298 319 s, matches = c.complete(None, 'myfunc(a="escaped\\")string",b')
299 320 nt.assert_in('b=', matches)
300 321 #builtin function
301 322 s, matches = c.complete(None, 'min(k, k')
302 323 nt.assert_in('key=', matches)
303 324
304 325
305 326 def test_default_arguments_from_docstring():
306 327 doc = min.__doc__
307 328 ip = get_ipython()
308 329 c = ip.Completer
309 330 kwd = c._default_arguments_from_docstring(
310 331 'min(iterable[, key=func]) -> value')
311 332 nt.assert_equal(kwd, ['key'])
312 333 #with cython type etc
313 334 kwd = c._default_arguments_from_docstring(
314 335 'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)\n')
315 336 nt.assert_equal(kwd, ['ncall', 'resume', 'nsplit'])
316 337 #white spaces
317 338 kwd = c._default_arguments_from_docstring(
318 339 '\n Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)\n')
319 340 nt.assert_equal(kwd, ['ncall', 'resume', 'nsplit'])
320 341
321 342 def test_line_magics():
322 343 ip = get_ipython()
323 344 c = ip.Completer
324 345 s, matches = c.complete(None, 'lsmag')
325 346 nt.assert_in('%lsmagic', matches)
326 347 s, matches = c.complete(None, '%lsmag')
327 348 nt.assert_in('%lsmagic', matches)
328 349
329 350
330 351 def test_cell_magics():
331 352 from IPython.core.magic import register_cell_magic
332 353
333 354 @register_cell_magic
334 355 def _foo_cellm(line, cell):
335 356 pass
336 357
337 358 ip = get_ipython()
338 359 c = ip.Completer
339 360
340 361 s, matches = c.complete(None, '_foo_ce')
341 362 nt.assert_in('%%_foo_cellm', matches)
342 363 s, matches = c.complete(None, '%%_foo_ce')
343 364 nt.assert_in('%%_foo_cellm', matches)
344 365
345 366
346 367 def test_line_cell_magics():
347 368 from IPython.core.magic import register_line_cell_magic
348 369
349 370 @register_line_cell_magic
350 371 def _bar_cellm(line, cell):
351 372 pass
352 373
353 374 ip = get_ipython()
354 375 c = ip.Completer
355 376
356 377 # The policy here is trickier, see comments in completion code. The
357 378 # returned values depend on whether the user passes %% or not explicitly,
358 379 # and this will show a difference if the same name is both a line and cell
359 380 # magic.
360 381 s, matches = c.complete(None, '_bar_ce')
361 382 nt.assert_in('%_bar_cellm', matches)
362 383 nt.assert_in('%%_bar_cellm', matches)
363 384 s, matches = c.complete(None, '%_bar_ce')
364 385 nt.assert_in('%_bar_cellm', matches)
365 386 nt.assert_in('%%_bar_cellm', matches)
366 387 s, matches = c.complete(None, '%%_bar_ce')
367 388 nt.assert_not_in('%_bar_cellm', matches)
368 389 nt.assert_in('%%_bar_cellm', matches)
369 390
370 391
371 392 def test_magic_completion_order():
372 393
373 394 ip = get_ipython()
374 395 c = ip.Completer
375 396
376 397 # Test ordering of magics and non-magics with the same name
377 398 # We want the non-magic first
378 399
379 400 # Before importing matplotlib, there should only be one option:
380 401
381 402 text, matches = c.complete('mat')
382 403 nt.assert_equal(matches, ["%matplotlib"])
383 404
384 405
385 406 ip.run_cell("matplotlib = 1") # introduce name into namespace
386 407
387 408 # After the import, there should be two options, ordered like this:
388 409 text, matches = c.complete('mat')
389 410 nt.assert_equal(matches, ["matplotlib", "%matplotlib"])
390 411
391 412
392 413 ip.run_cell("timeit = 1") # define a user variable called 'timeit'
393 414
394 415 # Order of user variable and line and cell magics with same name:
395 416 text, matches = c.complete('timeit')
396 417 nt.assert_equal(matches, ["timeit", "%timeit","%%timeit"])
397 418
398 419
399 420 def test_dict_key_completion_string():
400 421 """Test dictionary key completion for string keys"""
401 422 ip = get_ipython()
402 423 complete = ip.Completer.complete
403 424
404 425 ip.user_ns['d'] = {'abc': None}
405 426
406 427 # check completion at different stages
407 428 _, matches = complete(line_buffer="d[")
408 429 nt.assert_in("'abc'", matches)
409 430 nt.assert_not_in("'abc']", matches)
410 431
411 432 _, matches = complete(line_buffer="d['")
412 433 nt.assert_in("abc", matches)
413 434 nt.assert_not_in("abc']", matches)
414 435
415 436 _, matches = complete(line_buffer="d['a")
416 437 nt.assert_in("abc", matches)
417 438 nt.assert_not_in("abc']", matches)
418 439
419 440 # check use of different quoting
420 441 _, matches = complete(line_buffer="d[\"")
421 442 nt.assert_in("abc", matches)
422 443 nt.assert_not_in('abc\"]', matches)
423 444
424 445 _, matches = complete(line_buffer="d[\"a")
425 446 nt.assert_in("abc", matches)
426 447 nt.assert_not_in('abc\"]', matches)
427 448
428 449 # check sensitivity to following context
429 450 _, matches = complete(line_buffer="d[]", cursor_pos=2)
430 451 nt.assert_in("'abc'", matches)
431 452
432 453 _, matches = complete(line_buffer="d['']", cursor_pos=3)
433 454 nt.assert_in("abc", matches)
434 455 nt.assert_not_in("abc'", matches)
435 456 nt.assert_not_in("abc']", matches)
436 457
437 458 # check multiple solutions are correctly returned and that noise is not
438 459 ip.user_ns['d'] = {'abc': None, 'abd': None, 'bad': None, object(): None,
439 460 5: None}
440 461
441 462 _, matches = complete(line_buffer="d['a")
442 463 nt.assert_in("abc", matches)
443 464 nt.assert_in("abd", matches)
444 465 nt.assert_not_in("bad", matches)
445 466 assert not any(m.endswith((']', '"', "'")) for m in matches), matches
446 467
447 468 # check escaping and whitespace
448 469 ip.user_ns['d'] = {'a\nb': None, 'a\'b': None, 'a"b': None, 'a word': None}
449 470 _, matches = complete(line_buffer="d['a")
450 471 nt.assert_in("a\\nb", matches)
451 472 nt.assert_in("a\\'b", matches)
452 473 nt.assert_in("a\"b", matches)
453 474 nt.assert_in("a word", matches)
454 475 assert not any(m.endswith((']', '"', "'")) for m in matches), matches
455 476
456 477 # - can complete on non-initial word of the string
457 478 _, matches = complete(line_buffer="d['a w")
458 479 nt.assert_in("word", matches)
459 480
460 481 # - understands quote escaping
461 482 _, matches = complete(line_buffer="d['a\\'")
462 483 nt.assert_in("b", matches)
463 484
464 485 # - default quoting should work like repr
465 486 _, matches = complete(line_buffer="d[")
466 487 nt.assert_in("\"a'b\"", matches)
467 488
468 489 # - when opening quote with ", possible to match with unescaped apostrophe
469 490 _, matches = complete(line_buffer="d[\"a'")
470 491 nt.assert_in("b", matches)
471 492
472 493
473 494 def test_dict_key_completion_contexts():
474 495 """Test expression contexts in which dict key completion occurs"""
475 496 ip = get_ipython()
476 497 complete = ip.Completer.complete
477 498 d = {'abc': None}
478 499 ip.user_ns['d'] = d
479 500
480 501 class C:
481 502 data = d
482 503 ip.user_ns['C'] = C
483 504 ip.user_ns['get'] = lambda: d
484 505
485 506 def assert_no_completion(**kwargs):
486 507 _, matches = complete(**kwargs)
487 508 nt.assert_not_in('abc', matches)
488 509 nt.assert_not_in('abc\'', matches)
489 510 nt.assert_not_in('abc\']', matches)
490 511 nt.assert_not_in('\'abc\'', matches)
491 512 nt.assert_not_in('\'abc\']', matches)
492 513
493 514 def assert_completion(**kwargs):
494 515 _, matches = complete(**kwargs)
495 516 nt.assert_in("'abc'", matches)
496 517 nt.assert_not_in("'abc']", matches)
497 518
498 519 # no completion after string closed, even if reopened
499 520 assert_no_completion(line_buffer="d['a'")
500 521 assert_no_completion(line_buffer="d[\"a\"")
501 522 assert_no_completion(line_buffer="d['a' + ")
502 523 assert_no_completion(line_buffer="d['a' + '")
503 524
504 525 # completion in non-trivial expressions
505 526 assert_completion(line_buffer="+ d[")
506 527 assert_completion(line_buffer="(d[")
507 528 assert_completion(line_buffer="C.data[")
508 529
509 530 # greedy flag
510 531 def assert_completion(**kwargs):
511 532 _, matches = complete(**kwargs)
512 533 nt.assert_in("get()['abc']", matches)
513 534
514 535 assert_no_completion(line_buffer="get()[")
515 536 with greedy_completion():
516 537 assert_completion(line_buffer="get()[")
517 538 assert_completion(line_buffer="get()['")
518 539 assert_completion(line_buffer="get()['a")
519 540 assert_completion(line_buffer="get()['ab")
520 541 assert_completion(line_buffer="get()['abc")
521 542
522 543
523 544
524 545 @dec.onlyif(sys.version_info[0] >= 3, 'This test only applies in Py>=3')
525 546 def test_dict_key_completion_bytes():
526 547 """Test handling of bytes in dict key completion"""
527 548 ip = get_ipython()
528 549 complete = ip.Completer.complete
529 550
530 551 ip.user_ns['d'] = {'abc': None, b'abd': None}
531 552
532 553 _, matches = complete(line_buffer="d[")
533 554 nt.assert_in("'abc'", matches)
534 555 nt.assert_in("b'abd'", matches)
535 556
536 557 if False: # not currently implemented
537 558 _, matches = complete(line_buffer="d[b")
538 559 nt.assert_in("b'abd'", matches)
539 560 nt.assert_not_in("b'abc'", matches)
540 561
541 562 _, matches = complete(line_buffer="d[b'")
542 563 nt.assert_in("abd", matches)
543 564 nt.assert_not_in("abc", matches)
544 565
545 566 _, matches = complete(line_buffer="d[B'")
546 567 nt.assert_in("abd", matches)
547 568 nt.assert_not_in("abc", matches)
548 569
549 570 _, matches = complete(line_buffer="d['")
550 571 nt.assert_in("abc", matches)
551 572 nt.assert_not_in("abd", matches)
552 573
553 574
554 575 @dec.onlyif(sys.version_info[0] < 3, 'This test only applies in Py<3')
555 576 def test_dict_key_completion_unicode_py2():
556 577 """Test handling of unicode in dict key completion"""
557 578 ip = get_ipython()
558 579 complete = ip.Completer.complete
559 580
560 581 ip.user_ns['d'] = {u'abc': None,
561 582 u'a\u05d0b': None}
562 583
563 584 _, matches = complete(line_buffer="d[")
564 585 nt.assert_in("u'abc'", matches)
565 586 nt.assert_in("u'a\\u05d0b'", matches)
566 587
567 588 _, matches = complete(line_buffer="d['a")
568 589 nt.assert_in("abc", matches)
569 590 nt.assert_not_in("a\\u05d0b", matches)
570 591
571 592 _, matches = complete(line_buffer="d[u'a")
572 593 nt.assert_in("abc", matches)
573 594 nt.assert_in("a\\u05d0b", matches)
574 595
575 596 _, matches = complete(line_buffer="d[U'a")
576 597 nt.assert_in("abc", matches)
577 598 nt.assert_in("a\\u05d0b", matches)
578 599
579 600 # query using escape
580 601 _, matches = complete(line_buffer=u"d[u'a\\u05d0")
581 602 nt.assert_in("u05d0b", matches) # tokenized after \\
582 603
583 604 # query using character
584 605 _, matches = complete(line_buffer=u"d[u'a\u05d0")
585 606 nt.assert_in(u"a\u05d0b", matches)
586 607
587 608 with greedy_completion():
588 609 _, matches = complete(line_buffer="d[")
589 610 nt.assert_in("d[u'abc']", matches)
590 611 nt.assert_in("d[u'a\\u05d0b']", matches)
591 612
592 613 _, matches = complete(line_buffer="d['a")
593 614 nt.assert_in("d['abc']", matches)
594 615 nt.assert_not_in("d[u'a\\u05d0b']", matches)
595 616
596 617 _, matches = complete(line_buffer="d[u'a")
597 618 nt.assert_in("d[u'abc']", matches)
598 619 nt.assert_in("d[u'a\\u05d0b']", matches)
599 620
600 621 _, matches = complete(line_buffer="d[U'a")
601 622 nt.assert_in("d[U'abc']", matches)
602 623 nt.assert_in("d[U'a\\u05d0b']", matches)
603 624
604 625 # query using escape
605 626 _, matches = complete(line_buffer=u"d[u'a\\u05d0")
606 627 nt.assert_in("d[u'a\\u05d0b']", matches) # tokenized after \\
607 628
608 629 # query using character
609 630 _, matches = complete(line_buffer=u"d[u'a\u05d0")
610 631 nt.assert_in(u"d[u'a\u05d0b']", matches)
611 632
612 633
613 634 @dec.onlyif(sys.version_info[0] >= 3, 'This test only applies in Py>=3')
614 635 def test_dict_key_completion_unicode_py3():
615 636 """Test handling of unicode in dict key completion"""
616 637 ip = get_ipython()
617 638 complete = ip.Completer.complete
618 639
619 640 ip.user_ns['d'] = {u'a\u05d0': None}
620 641
621 642 # query using escape
622 643 _, matches = complete(line_buffer="d['a\\u05d0")
623 644 nt.assert_in("u05d0", matches) # tokenized after \\
624 645
625 646 # query using character
626 647 _, matches = complete(line_buffer="d['a\u05d0")
627 648 nt.assert_in(u"a\u05d0", matches)
628 649
629 650 with greedy_completion():
630 651 # query using escape
631 652 _, matches = complete(line_buffer="d['a\\u05d0")
632 653 nt.assert_in("d['a\\u05d0']", matches) # tokenized after \\
633 654
634 655 # query using character
635 656 _, matches = complete(line_buffer="d['a\u05d0")
636 657 nt.assert_in(u"d['a\u05d0']", matches)
637 658
638 659
639 660
640 661 @dec.skip_without('numpy')
641 662 def test_struct_array_key_completion():
642 663 """Test dict key completion applies to numpy struct arrays"""
643 664 import numpy
644 665 ip = get_ipython()
645 666 complete = ip.Completer.complete
646 667 ip.user_ns['d'] = numpy.array([], dtype=[('hello', 'f'), ('world', 'f')])
647 668 _, matches = complete(line_buffer="d['")
648 669 nt.assert_in("hello", matches)
649 670 nt.assert_in("world", matches)
650 671
651 672
652 673 @dec.skip_without('pandas')
653 674 def test_dataframe_key_completion():
654 675 """Test dict key completion applies to pandas DataFrames"""
655 676 import pandas
656 677 ip = get_ipython()
657 678 complete = ip.Completer.complete
658 679 ip.user_ns['d'] = pandas.DataFrame({'hello': [1], 'world': [2]})
659 680 _, matches = complete(line_buffer="d['")
660 681 nt.assert_in("hello", matches)
661 682 nt.assert_in("world", matches)
662 683
663 684
664 685 def test_dict_key_completion_invalids():
665 686 """Smoke test cases dict key completion can't handle"""
666 687 ip = get_ipython()
667 688 complete = ip.Completer.complete
668 689
669 690 ip.user_ns['no_getitem'] = None
670 691 ip.user_ns['no_keys'] = []
671 692 ip.user_ns['cant_call_keys'] = dict
672 693 ip.user_ns['empty'] = {}
673 694 ip.user_ns['d'] = {'abc': 5}
674 695
675 696 _, matches = complete(line_buffer="no_getitem['")
676 697 _, matches = complete(line_buffer="no_keys['")
677 698 _, matches = complete(line_buffer="cant_call_keys['")
678 699 _, matches = complete(line_buffer="empty['")
679 700 _, matches = complete(line_buffer="name_error['")
680 701 _, matches = complete(line_buffer="d['\\") # incomplete escape
@@ -1,22 +1,27 b''
1 1 // IPython mode is just a slightly altered Python Mode with `?` beeing a extra
2 2 // single operator. Here we define `ipython` mode in the require `python`
3 3 // callback to auto-load python mode, which is more likely not the best things
4 4 // to do, but at least the simple one for now.
5 5
6 6 CodeMirror.requireMode('python',function(){
7 7 "use strict";
8 8
9 9 CodeMirror.defineMode("ipython", function(conf, parserConf) {
10 10 var pythonConf = {};
11 11 for (var prop in parserConf) {
12 12 if (parserConf.hasOwnProperty(prop)) {
13 13 pythonConf[prop] = parserConf[prop];
14 14 }
15 15 }
16 16 pythonConf.name = 'python';
17 17 pythonConf.singleOperators = new RegExp("^[\\+\\-\\*/%&|\\^~<>!\\?]");
18 if (pythonConf.version === 3) {
19 pythonConf.identifiers = new RegExp("^[_A-Za-z\u00A1-\uFFFF][_A-Za-z0-9\u00A1-\uFFFF]*");
20 } else if (pythonConf.version === 2) {
21 pythonConf.identifiers = new RegExp("^[_A-Za-z][_A-Za-z0-9]*");
22 }
18 23 return CodeMirror.getMode(conf, pythonConf);
19 24 }, 'python');
20 25
21 26 CodeMirror.defineMIME("text/x-ipython", "ipython");
22 27 })
General Comments 0
You need to be logged in to leave comments. Login now