chore: initial clean commit

This commit is contained in:
2026-02-26 13:56:21 -05:00
commit 1755075657
53 changed files with 18068 additions and 0 deletions

51
src/utils/tokens.rs Normal file
View File

@@ -0,0 +1,51 @@
use tiktoken_rs::get_bpe_from_model;
use crate::models::UnifiedRequest;
/// Count tokens for a given model and text
pub fn count_tokens(model: &str, text: &str) -> u32 {
// If we can't get the bpe for the model, fallback to a safe default (cl100k_base for GPT-4/o1)
let bpe = get_bpe_from_model(model).unwrap_or_else(|_| {
tiktoken_rs::cl100k_base().expect("Failed to get cl100k_base encoding")
});
bpe.encode_with_special_tokens(text).len() as u32
}
/// Estimate tokens for a unified request
pub fn estimate_request_tokens(model: &str, request: &UnifiedRequest) -> u32 {
let mut total_tokens = 0;
// Base tokens per message for OpenAI (approximate)
let tokens_per_message = 3;
let _tokens_per_name = 1;
for msg in &request.messages {
total_tokens += tokens_per_message;
for part in &msg.content {
match part {
crate::models::ContentPart::Text { text } => {
total_tokens += count_tokens(model, text);
}
crate::models::ContentPart::Image { .. } => {
// Vision models usually have a fixed cost or calculation based on size
// For now, let's use a conservative estimate of 1000 tokens
total_tokens += 1000;
}
}
}
// Add name tokens if we had names (we don't in UnifiedMessage yet)
// total_tokens += tokens_per_name;
}
// Add 3 tokens for the assistant reply header
total_tokens += 3;
total_tokens
}
/// Estimate tokens for completion text
pub fn estimate_completion_tokens(text: &str, model: &str) -> u32 {
count_tokens(model, text)
}