Skip to content

LLM Providers

Radkit supports multiple LLM providers through a unified BaseLlm trait. This allows you to switch between models from different providers with minimal code changes.

All providers follow a similar pattern:

  1. Initialization: Create an LLM client, usually from an environment variable containing the API key.
  2. Configuration: Optionally, chain builder methods to configure parameters like max_tokens or temperature.
  3. Execution: Call generate_content (or the simpler generate for text-only) with a Thread.
use radkit::models::providers::AnthropicLlm;
use radkit::models::{BaseLlm, Thread};
// From environment variable (ANTHROPIC_API_KEY)
let llm = AnthropicLlm::from_env("claude-3-sonnet-20240229")?;
// With configuration
let llm = AnthropicLlm::from_env("claude-3-opus-20240229")?
.with_max_tokens(4096)
.with_temperature(0.7);
// Generate content
let thread = Thread::from_user("Explain quantum computing");
let response = llm.generate_content(thread, None).await?;
println!("Response: {}", response.content().first_text().unwrap());
use radkit::models::providers::OpenAILlm;
use radkit::models::BaseLlm;
// From environment variable (OPENAI_API_KEY)
let llm = OpenAILlm::from_env("gpt-4o")?;
// With configuration
let llm = OpenAILlm::from_env("gpt-4o-mini")?
.with_max_tokens(2000)
.with_temperature(0.5);
let response = llm.generate("What is machine learning?", None).await?;
use radkit::models::providers::GeminiLlm;
use radkit::models::BaseLlm;
// From environment variable (GEMINI_API_KEY)
let llm = GeminiLlm::from_env("gemini-1.5-flash-latest")?;
let response = llm.generate("Explain neural networks", None).await?;
use radkit::models::providers::GrokLlm;
use radkit::models::BaseLlm;
// From environment variable (XAI_API_KEY)
let llm = GrokLlm::from_env("grok-1.5-flash")?;
let response = llm.generate("What is the meaning of life?", None).await?;
use radkit::models::providers::DeepSeekLlm;
use radkit::models::BaseLlm;
// From environment variable (DEEPSEEK_API_KEY)
let llm = DeepSeekLlm::from_env("deepseek-chat")?;
let response = llm.generate("Code review best practices", None).await?;