Files
Hoya26/backend/src/rag/embeddings.py
2026-01-24 07:52:48 +00:00

33 lines
1.0 KiB
Python

import ollama
import os
client = ollama.Client(host="https://ollama.sirblob.co")
DEFAULT_MODEL = "nomic-embed-text:latest"
def get_embedding(text, model=DEFAULT_MODEL):
try:
response = client.embeddings(model=model, prompt=text)
return response["embedding"]
except Exception as e:
print(f"Error getting embedding from Ollama: {e}")
raise e
def get_embeddings_batch(texts, model=DEFAULT_MODEL, batch_size=50):
all_embeddings = []
for i in range(0, len(texts), batch_size):
batch = texts[i:i + batch_size]
try:
response = client.embed(model=model, input=batch)
if "embeddings" in response:
all_embeddings.extend(response["embeddings"])
else:
raise ValueError("Unexpected response format from client.embed")
except Exception as e:
print(f"Error embedding batch {i}-{i+batch_size}: {e}")
raise e
return all_embeddings