##// END OF EJS Templates
Add request number and logging (do not use `print`) (#14846)...
Michał Krassowski -
r29533:0c9949c3 merge
parent child Browse files
Show More
@@ -0,0 +1,89
1 import asyncio
2
3 try:
4 from jupyter_ai_magics import BaseProvider
5 from langchain_community.llms import FakeListLLM
6 except ImportError:
7
8 class BaseProvider:
9 pass
10
11 class FakeListLLM:
12 pass
13
14
15 FIBONACCI = """\
16 def fib(n):
17 if n < 2: return n
18 return fib(n - 1) + fib(n - 2)
19 """
20
21
22 class FibonacciCompletionProvider(BaseProvider, FakeListLLM): # type: ignore[misc, valid-type]
23
24 id = "my_provider"
25 name = "My Provider"
26 model_id_key = "model"
27 models = ["model_a"]
28
29 def __init__(self, **kwargs):
30 kwargs["responses"] = ["This fake response will not be used for completion"]
31 kwargs["model_id"] = "model_a"
32 super().__init__(**kwargs)
33
34 async def generate_inline_completions(self, request):
35 raise ValueError("IPython only supports streaming models.")
36
37 async def stream_inline_completions(self, request):
38 from jupyter_ai.completions.models import (
39 InlineCompletionList,
40 InlineCompletionReply,
41 )
42
43 assert request.number > 0
44 token = f"t{request.number}s0"
45 last_line = request.prefix.rstrip("\n").splitlines()[-1]
46
47 if not FIBONACCI.startswith(last_line):
48 return
49
50 yield InlineCompletionReply(
51 list=InlineCompletionList(
52 items=[
53 {"insertText": "", "isIncomplete": True, "token": token},
54 ]
55 ),
56 reply_to=request.number,
57 )
58
59 async for reply in self._stream(
60 FIBONACCI[len(last_line) :],
61 request.number,
62 token,
63 ):
64 yield reply
65
66 async def _stream(self, sentence, request_number, token, start_with=""):
67 from jupyter_ai.completions.models import InlineCompletionStreamChunk
68
69 suggestion = start_with
70
71 for fragment in sentence.split(" "):
72 await asyncio.sleep(0.05)
73 if suggestion:
74 suggestion += " "
75 suggestion += fragment
76 yield InlineCompletionStreamChunk(
77 type="stream",
78 response={"insertText": suggestion, "token": token},
79 reply_to=request_number,
80 done=False,
81 )
82
83 # finally, send a message confirming that we are done
84 yield InlineCompletionStreamChunk(
85 type="stream",
86 response={"insertText": suggestion, "token": token},
87 reply_to=request_number,
88 done=True,
89 )
@@ -518,7 +518,6 class TerminalInteractiveShell(InteractiveShell):
518 name = self.llm_prefix_from_history
518 name = self.llm_prefix_from_history
519
519
520 if name == "no_prefix":
520 if name == "no_prefix":
521 print("set tofun1", self.llm_prefix_from_history)
522
521
523 def no_prefix(history_manager):
522 def no_prefix(history_manager):
524 return ""
523 return ""
@@ -181,6 +181,7 class NavigableAutoSuggestFromHistory(AutoSuggestFromHistory):
181 self.skip_lines = 0
181 self.skip_lines = 0
182 self._connected_apps = []
182 self._connected_apps = []
183 self._llm_provider = None
183 self._llm_provider = None
184 self._request_number = 0
184
185
185 def reset_history_position(self, _: Buffer) -> None:
186 def reset_history_position(self, _: Buffer) -> None:
186 self.skip_lines = 0
187 self.skip_lines = 0
@@ -346,7 +347,7 class NavigableAutoSuggestFromHistory(AutoSuggestFromHistory):
346 try:
347 try:
347 await self._trigger_llm_core(buffer)
348 await self._trigger_llm_core(buffer)
348 except Exception as e:
349 except Exception as e:
349 get_ipython().log.error("error")
350 get_ipython().log.error("error %s", e)
350 raise
351 raise
351
352
352 # here we need a cancellable task so we can't just await the error caught
353 # here we need a cancellable task so we can't just await the error caught
@@ -361,9 +362,8 class NavigableAutoSuggestFromHistory(AutoSuggestFromHistory):
361 provider to stream it's response back to us iteratively setting it as
362 provider to stream it's response back to us iteratively setting it as
362 the suggestion on the current buffer.
363 the suggestion on the current buffer.
363
364
364 Unlike with JupyterAi, as we do not have multiple cell, the cell number
365 Unlike with JupyterAi, as we do not have multiple cell, the cell id
365 is always set to `0`, note that we _could_ set it to a new number each
366 is always set to `None`.
366 time and ignore threply from past numbers.
367
367
368 We set the prefix to the current cell content, but could also inset the
368 We set the prefix to the current cell content, but could also inset the
369 rest of the history or even just the non-fail history.
369 rest of the history or even just the non-fail history.
@@ -385,10 +385,12 class NavigableAutoSuggestFromHistory(AutoSuggestFromHistory):
385
385
386 hm = buffer.history.shell.history_manager
386 hm = buffer.history.shell.history_manager
387 prefix = self._llm_prefixer(hm)
387 prefix = self._llm_prefixer(hm)
388 print(prefix)
388 get_ipython().log.debug("prefix: %s", prefix)
389
389
390 self._request_number += 1
391 request_number = self._request_number
390 request = jai_models.InlineCompletionRequest(
392 request = jai_models.InlineCompletionRequest(
391 number=0,
393 number=request_number,
392 prefix=prefix + buffer.document.text,
394 prefix=prefix + buffer.document.text,
393 suffix="",
395 suffix="",
394 mime="text/x-python",
396 mime="text/x-python",
@@ -401,6 +403,9 class NavigableAutoSuggestFromHistory(AutoSuggestFromHistory):
401 async for reply_and_chunks in self._llm_provider.stream_inline_completions(
403 async for reply_and_chunks in self._llm_provider.stream_inline_completions(
402 request
404 request
403 ):
405 ):
406 if self._request_number != request_number:
407 # If a new suggestion was requested, skip processing this one.
408 return
404 if isinstance(reply_and_chunks, jai_models.InlineCompletionReply):
409 if isinstance(reply_and_chunks, jai_models.InlineCompletionReply):
405 if len(reply_and_chunks.list.items) > 1:
410 if len(reply_and_chunks.list.items) > 1:
406 raise ValueError(
411 raise ValueError(
@@ -77,6 +77,7 test = [
77 test_extra = [
77 test_extra = [
78 "ipython[test]",
78 "ipython[test]",
79 "curio",
79 "curio",
80 "jupyter_ai",
80 "matplotlib!=3.2.0",
81 "matplotlib!=3.2.0",
81 "nbformat",
82 "nbformat",
82 "numpy>=1.23",
83 "numpy>=1.23",
@@ -7,12 +7,14 from IPython.terminal.shortcuts.auto_suggest import (
7 accept_word,
7 accept_word,
8 accept_and_keep_cursor,
8 accept_and_keep_cursor,
9 discard,
9 discard,
10 llm_autosuggestion,
10 NavigableAutoSuggestFromHistory,
11 NavigableAutoSuggestFromHistory,
11 swap_autosuggestion_up,
12 swap_autosuggestion_up,
12 swap_autosuggestion_down,
13 swap_autosuggestion_down,
13 )
14 )
14 from IPython.terminal.shortcuts.auto_match import skip_over
15 from IPython.terminal.shortcuts.auto_match import skip_over
15 from IPython.terminal.shortcuts import create_ipython_shortcuts, reset_search_buffer
16 from IPython.terminal.shortcuts import create_ipython_shortcuts, reset_search_buffer
17 from IPython.testing import decorators as dec
16
18
17 from prompt_toolkit.history import InMemoryHistory
19 from prompt_toolkit.history import InMemoryHistory
18 from prompt_toolkit.buffer import Buffer
20 from prompt_toolkit.buffer import Buffer
@@ -34,6 +36,26 def make_event(text, cursor, suggestion):
34 return event
36 return event
35
37
36
38
39 try:
40 from .fake_llm import FIBONACCI
41 except ImportError:
42 FIBONACCI = None
43
44
45 @dec.skip_without("jupyter_ai")
46 @pytest.mark.asyncio
47 async def test_llm_autosuggestion():
48 provider = NavigableAutoSuggestFromHistory()
49 ip = get_ipython()
50 ip.auto_suggest = provider
51 ip.llm_provider_class = "tests.fake_llm.FibonacciCompletionProvider"
52 text = "def fib"
53 event = make_event(text, len(text), "")
54 event.current_buffer.history.shell.history_manager.get_range = Mock(return_value=[])
55 await llm_autosuggestion(event)
56 assert event.current_buffer.suggestion.text == FIBONACCI[len(text) :]
57
58
37 @pytest.mark.parametrize(
59 @pytest.mark.parametrize(
38 "text, suggestion, expected",
60 "text, suggestion, expected",
39 [
61 [
@@ -219,6 +241,7 def test_other_providers():
219 assert swap_autosuggestion_down(event) is None
241 assert swap_autosuggestion_down(event) is None
220
242
221
243
244 @pytest.mark.asyncio
222 async def test_navigable_provider():
245 async def test_navigable_provider():
223 provider = NavigableAutoSuggestFromHistory()
246 provider = NavigableAutoSuggestFromHistory()
224 history = InMemoryHistory(history_strings=["very_a", "very", "very_b", "very_c"])
247 history = InMemoryHistory(history_strings=["very_a", "very", "very_b", "very_c"])
@@ -271,6 +294,7 async def test_navigable_provider():
271 assert get_suggestion().text == "_a"
294 assert get_suggestion().text == "_a"
272
295
273
296
297 @pytest.mark.asyncio
274 async def test_navigable_provider_multiline_entries():
298 async def test_navigable_provider_multiline_entries():
275 provider = NavigableAutoSuggestFromHistory()
299 provider = NavigableAutoSuggestFromHistory()
276 history = InMemoryHistory(history_strings=["very_a\nvery_b", "very_c"])
300 history = InMemoryHistory(history_strings=["very_a\nvery_b", "very_c"])
General Comments 0
You need to be logged in to leave comments. Login now