1
1
import argparse
2
2
import os
3
3
import sys
4
+ import asyncio
4
5
from datetime import datetime , timezone
5
6
from pathlib import Path
6
-
7
- import openai
8
7
from prompt_toolkit import PromptSession
9
8
from prompt_toolkit .auto_suggest import AutoSuggestFromHistory
10
9
from prompt_toolkit .history import FileHistory
15
14
from rich .syntax import Syntax
16
15
from rich .text import Text
17
16
17
+ from pydantic_ai import Agent , result as pa_result
18
+
18
19
20
+ # Prettify code fences with Rich
19
21
class SimpleCodeBlock (CodeBlock ):
20
22
def __rich_console__ (
21
23
self , console : Console , options : ConsoleOptions
@@ -39,61 +41,53 @@ def app() -> int:
39
41
parser = argparse .ArgumentParser (
40
42
prog = "aicli" ,
41
43
description = """\
42
- OpenAI powered AI CLI (thank you samuelcolvin)
44
+ Pydantic AI powered CLI
43
45
44
46
Special prompts:
45
47
* `show-markdown` - show the markdown output from the previous response
46
48
* `multiline` - toggle multiline mode
47
49
""" ,
48
50
)
49
- parser .add_argument (
50
- "prompt" , nargs = "?" , help = "AI Prompt, if omitted fall into interactive mode"
51
- )
52
-
53
- parser .add_argument (
54
- "--no-stream" ,
55
- action = "store_true" ,
56
- help = "Whether to stream responses from OpenAI" ,
57
- )
58
-
51
+ parser .add_argument ("prompt" , nargs = "?" , help = "AI Prompt, else interactive mode" )
52
+ parser .add_argument ("--no-stream" , action = "store_true" , help = "Disable streaming" )
59
53
parser .add_argument ("--version" , action = "store_true" , help = "Show version and exit" )
60
54
61
55
args = parser .parse_args ()
62
56
63
57
console = Console ()
64
- console .print ("OpenAI powered AI CLI" , style = "green bold" , highlight = False )
58
+ console .print ("Pydantic AI CLI" , style = "green bold" , highlight = False )
65
59
if args .version :
66
60
return 0
67
61
68
- try :
69
- openai_api_key = os .environ ["OPENAI_API_KEY" ]
70
- except KeyError :
62
+ # Check for an API key (e.g. OPENAI_API_KEY)
63
+ if "OPENAI_API_KEY" not in os .environ :
71
64
console .print (
72
65
"You must set the OPENAI_API_KEY environment variable" , style = "red"
73
66
)
74
67
return 1
75
68
76
- client = openai .OpenAI (api_key = openai_api_key )
77
-
78
- now_utc = datetime .now (timezone .utc )
79
- t = now_utc .astimezone ().tzinfo .tzname (now_utc ) # type: ignore
80
- setup = f"""\
81
- Help the user by responding to their request, the output should
82
- be concise and always written in markdown. The current date and time
83
- is { datetime .now ()} { t } . The user is running { sys .platform } ."""
69
+ # Create your agent; we set a global system prompt
70
+ agent = Agent (
71
+ "openai:gpt-4o" ,
72
+ system_prompt = "Be a helpful assistant and respond in concise markdown." ,
73
+ )
84
74
75
+ # We'll accumulate the conversation in here (both user and assistant messages)
76
+ conversation = None
85
77
stream = not args .no_stream
86
- messages = [{"role" : "system" , "content" : setup }]
87
78
79
+ # If the user supplied a single prompt, just run once
88
80
if args .prompt :
89
- messages .append ({"role" : "user" , "content" : args .prompt })
90
81
try :
91
- ask_openai (client , messages , stream , console )
82
+ asyncio .run (
83
+ run_and_display (agent , args .prompt , conversation , stream , console )
84
+ )
92
85
except KeyboardInterrupt :
93
86
pass
94
87
return 0
95
88
96
- history = Path ().home () / ".openai-prompt-history.txt"
89
+ # Otherwise, interactive mode with prompt_toolkit
90
+ history = Path .home () / ".openai-prompt-history.txt"
97
91
session = PromptSession (history = FileHistory (str (history )))
98
92
multiline = False
99
93
@@ -105,70 +99,87 @@ def app() -> int:
105
99
except (KeyboardInterrupt , EOFError ):
106
100
return 0
107
101
108
- if not text .strip ():
102
+ cmd = text .lower ().strip ()
103
+ if not cmd :
109
104
continue
110
105
111
- ident_prompt = text .lower ().strip (" " ).replace (" " , "-" )
112
- if ident_prompt == "show-markdown" :
113
- last_content = messages [- 1 ]["content" ]
114
- console .print ("[dim]Last markdown output of last question:[/dim]\n " )
115
- console .print (
116
- Syntax (last_content , lexer = "markdown" , background_color = "default" )
117
- )
106
+ if cmd == "show-markdown" :
107
+ # Show last assistant message
108
+ if not conversation :
109
+ console .print ("No messages yet." , style = "dim" )
110
+ continue
111
+ # The last run result's assistant message is the last item
112
+ # (the user might have broken the loop, so we search from end)
113
+ assistant_msg = None
114
+ for m in reversed (conversation ):
115
+ if m .kind == "response" :
116
+ # Collect text parts from the response
117
+ text_part = "" .join (
118
+ p .content for p in m .parts if p .part_kind == "text"
119
+ )
120
+ assistant_msg = text_part
121
+ break
122
+ if assistant_msg :
123
+ console .print ("[dim]Last assistant markdown output:[/dim]\n " )
124
+ console .print (
125
+ Syntax (assistant_msg , lexer = "markdown" , background_color = "default" )
126
+ )
127
+ else :
128
+ console .print ("No assistant response found." , style = "dim" )
118
129
continue
119
- elif ident_prompt == "multiline" :
130
+
131
+ elif cmd == "multiline" :
120
132
multiline = not multiline
121
133
if multiline :
122
134
console .print (
123
135
"Enabling multiline mode. "
124
- "[dim]Press [Meta+Enter] or [Esc] followed by [Enter] to accept input .[/dim]"
136
+ "[dim]Press [Meta+Enter] or [Esc] then [Enter] to submit .[/dim]"
125
137
)
126
138
else :
127
139
console .print ("Disabling multiline mode." )
128
140
continue
129
141
130
- messages .append ({"role" : "user" , "content" : text })
131
-
142
+ # Normal user prompt
132
143
try :
133
- content = ask_openai (client , messages , stream , console )
144
+ conversation = asyncio .run (
145
+ run_and_display (agent , text , conversation , stream , console )
146
+ )
134
147
except KeyboardInterrupt :
135
148
return 0
136
- messages .append ({"role" : "assistant" , "content" : content })
137
-
138
-
139
- def ask_openai (
140
- client : openai .OpenAI ,
141
- messages : list [dict [str , str ]],
142
- stream : bool ,
143
- console : Console ,
144
- ) -> str :
145
- with Status ("[dim]Working on it…[/dim]" , console = console ):
146
- response = client .chat .completions .create (
147
- model = "gpt-4" , messages = messages , stream = stream
148
- )
149
149
150
+ return 0
151
+
152
+
153
+ async def run_and_display (
154
+ agent : Agent , user_text : str , conversation , stream : bool , console : Console
155
+ ):
156
+ """
157
+ Runs the agent (stream or not) with user_text, returning the updated conversation.
158
+ If conversation is None, run from scratch (includes system prompt).
159
+ Otherwise pass conversation as message_history to continue it.
160
+ """
150
161
console .print ("\n Response:" , style = "green" )
151
- if stream :
152
- content = ""
153
- interrupted = False
154
- with Live ( "" , refresh_per_second = 15 , console = console ) as live :
155
- try :
156
- for chunk in response :
157
- if chunk . choices [ 0 ]. finish_reason is not None :
158
- break
159
- chunk_text = chunk . choices [ 0 ]. delta . content
160
- content += chunk_text
161
- live . update ( Markdown ( content ))
162
- except KeyboardInterrupt :
163
- interrupted = True
164
-
165
- if interrupted :
166
- console . print ( "[dim]Interrupted[/dim]" )
167
- else :
168
- content = response . choices [ 0 ]. message . content
169
- console . print ( Markdown ( content ) )
170
-
171
- return content
162
+
163
+ with Live (
164
+ "[dim]Working on it…[/dim]" ,
165
+ console = console ,
166
+ refresh_per_second = 15 ,
167
+ vertical_overflow = "visible" ,
168
+ ) as live :
169
+ if stream :
170
+ async with agent . run_stream ( user_text , message_history = conversation ) as run :
171
+ try :
172
+ async for chunk in run . stream_text ():
173
+ live . update ( Markdown ( chunk ))
174
+ except Exception as e :
175
+ console . print ( f"Error: { e } " , style = "red" )
176
+ new_conversation = run . all_messages ()
177
+ else :
178
+ run_result = await agent . run ( user_text , message_history = conversation )
179
+ live . update ( Markdown ( run_result . data ))
180
+ new_conversation = run_result . all_messages ( )
181
+
182
+ return new_conversation
172
183
173
184
174
185
if __name__ == "__main__" :
0 commit comments