Some checks failed
CI / build (push) Failing after 12s
GitOrigin-RevId: 6370f6ea785709295b6abcf9c60717cacf3ac432
148 lines
No EOL
5.7 KiB
Python
148 lines
No EOL
5.7 KiB
Python
import typer
|
|
from typing import List, Optional
|
|
from pathlib import Path
|
|
import os
|
|
import litellm
|
|
from dotenv import load_dotenv
|
|
|
|
app = typer.Typer()
|
|
|
|
# --- Configuration ---
|
|
PROJECT_ROOT = Path(os.getcwd())
|
|
MICROAGENTS_DIR = PROJECT_ROOT / "ai" / "process" / "microagents"
|
|
|
|
def invoke_agent_with_litellm(
|
|
agent_name: str,
|
|
user_prompt: str,
|
|
context_files: list[str] = [],
|
|
model_name: str = "openai/gpt-3.5-turbo",
|
|
api_base: str | None = None,
|
|
api_key: str | None = None,
|
|
system_prompt_path: str | None = None,
|
|
) -> str:
|
|
"""
|
|
Invokes a microagent using the litellm library with OpenAI-compatible providers.
|
|
"""
|
|
# 1. Load API Key and Base URL
|
|
dotenv_path = PROJECT_ROOT / '.env'
|
|
load_dotenv(dotenv_path=dotenv_path)
|
|
|
|
# Use provided parameters or fall back to environment variables
|
|
api_key = api_key or os.getenv('OPENAI_API_KEY')
|
|
if not api_key:
|
|
raise ValueError("OPENAI_API_KEY not found in environment or .env file.")
|
|
|
|
api_base = api_base or os.getenv('OPENAI_API_BASE')
|
|
if not api_base:
|
|
raise ValueError("OPENAI_API_BASE not found in environment or .env file.")
|
|
|
|
# 2. Load System Prompt
|
|
if system_prompt_path:
|
|
system_prompt_file = Path(system_prompt_path)
|
|
else:
|
|
system_prompt_file = MICROAGENTS_DIR / f"{agent_name.lower()}.md"
|
|
|
|
if not system_prompt_file.exists():
|
|
raise FileNotFoundError(f"System prompt not found for agent '{agent_name}' at {system_prompt_file}")
|
|
system_prompt = system_prompt_file.read_text()
|
|
|
|
# 3. Construct Full User Prompt
|
|
full_user_prompt = ""
|
|
if context_files:
|
|
for file_path in context_files:
|
|
try:
|
|
p = Path(file_path)
|
|
if not p.is_absolute():
|
|
p = PROJECT_ROOT / p
|
|
full_user_prompt += f"--- CONTEXT FILE: {p.name} ---\n"
|
|
try:
|
|
full_user_prompt += p.read_text() + "\n\n"
|
|
except UnicodeDecodeError:
|
|
full_user_prompt += "[Binary file - content not displayed]\n\n"
|
|
except FileNotFoundError:
|
|
raise FileNotFoundError(f"Context file not found: {file_path}")
|
|
except Exception as e:
|
|
raise IOError(f"Error reading context file {file_path}: {e}")
|
|
|
|
full_user_prompt += "--- USER PROMPT ---\n"
|
|
full_user_prompt += user_prompt
|
|
|
|
# 4. Construct Messages for litellm
|
|
messages = [
|
|
{"role": "system", "content": system_prompt},
|
|
{"role": "user", "content": full_user_prompt}
|
|
]
|
|
|
|
# 5. Invoke Model using litellm
|
|
try:
|
|
response = litellm.completion(
|
|
model=model_name,
|
|
messages=messages,
|
|
api_key=api_key,
|
|
api_base=api_base
|
|
)
|
|
|
|
# Extract the response content
|
|
if hasattr(response, 'choices') and len(response.choices) > 0:
|
|
return response.choices[0].message.content
|
|
else:
|
|
return f"Unexpected response format: {response}"
|
|
|
|
except Exception as e:
|
|
return f"Error generating response: {e}\n\nFull response object:\n{response if 'response' in locals() else 'No response generated'}"
|
|
|
|
|
|
@app.command()
|
|
def invoke(
|
|
agent_name: str = typer.Argument(..., help="The name of the agent to invoke (e.g., 'librarian')."),
|
|
user_prompt: Optional[str] = typer.Argument(None, help="The user's prompt for the agent. Required if --prompt-file is not used."),
|
|
prompt_file: Optional[Path] = typer.Option(None, "--prompt-file", "-p", help="Path to a file containing the user's prompt."),
|
|
context_file: Optional[List[Path]] = typer.Option(None, "--context-file", "-c", help="Path to a context file to prepend to the prompt. Can be specified multiple times."),
|
|
# TODO: acmcarther@ - Disabled to test summarization performance.
|
|
#model: str = typer.Option("openai/qwen3-coder-30b-a3b-instruct-mlx", help="The name of the model to use (e.g., 'openai/gpt-4', 'openai/claude-3-sonnet')."),
|
|
model: str = typer.Option("openai/gpt-oss-120b", help="The name of the model to use (e.g., 'openai/gpt-4', 'openai/claude-3-sonnet')."),
|
|
api_base: Optional[str] = typer.Option("http://192.168.0.235:1234/v1", "--api-base", help="The API base URL for the OpenAI-compatible provider. Defaults to OPENAI_API_BASE env var."),
|
|
api_key: Optional[str] = typer.Option("lm-studio", "--api-key", help="The API key for the provider. Defaults to OPENAI_API_KEY env var."),
|
|
):
|
|
"""
|
|
Invokes a specialized, single-purpose 'microagent' using litellm with OpenAI-compatible providers.
|
|
"""
|
|
if not user_prompt and not prompt_file:
|
|
print("Error: Either a user prompt or a prompt file must be provided.")
|
|
raise typer.Exit(code=1)
|
|
|
|
if prompt_file:
|
|
try:
|
|
prompt_text = prompt_file.read_text()
|
|
except FileNotFoundError:
|
|
print(f"Error: Prompt file not found at {prompt_file}")
|
|
raise typer.Exit(code=1)
|
|
except Exception as e:
|
|
print(f"Error reading prompt file: {e}")
|
|
raise typer.Exit(code=1)
|
|
elif user_prompt:
|
|
prompt_text = user_prompt
|
|
else:
|
|
return
|
|
|
|
context_paths = [str(p) for p in context_file] if context_file else []
|
|
|
|
try:
|
|
response = invoke_agent_with_litellm(
|
|
agent_name=agent_name,
|
|
user_prompt=prompt_text,
|
|
context_files=context_paths,
|
|
model_name=model,
|
|
api_base=api_base,
|
|
api_key=api_key,
|
|
)
|
|
print(response)
|
|
except (ValueError, FileNotFoundError, IOError) as e:
|
|
print(f"Error: {e}")
|
|
raise typer.Exit(code=1)
|
|
except Exception as e:
|
|
print(f"An unexpected error occurred: {e}")
|
|
raise typer.Exit(code=1)
|
|
|
|
if __name__ == "__main__":
|
|
app() |