mirror of
https://github.com/SirBlobby/Hoya26.git
synced 2026-02-03 19:24:34 -05:00
33 lines
1.0 KiB
Python
33 lines
1.0 KiB
Python
import ollama
|
|
import os
|
|
|
|
client = ollama.Client(host="https://ollama.sirblob.co")
|
|
DEFAULT_MODEL = "nomic-embed-text:latest"
|
|
|
|
def get_embedding(text, model=DEFAULT_MODEL):
|
|
try:
|
|
response = client.embeddings(model=model, prompt=text)
|
|
return response["embedding"]
|
|
except Exception as e:
|
|
print(f"Error getting embedding from Ollama: {e}")
|
|
raise e
|
|
|
|
def get_embeddings_batch(texts, model=DEFAULT_MODEL, batch_size=50):
|
|
all_embeddings = []
|
|
|
|
for i in range(0, len(texts), batch_size):
|
|
batch = texts[i:i + batch_size]
|
|
try:
|
|
response = client.embed(model=model, input=batch)
|
|
|
|
if "embeddings" in response:
|
|
all_embeddings.extend(response["embeddings"])
|
|
else:
|
|
raise ValueError("Unexpected response format from client.embed")
|
|
|
|
except Exception as e:
|
|
print(f"Error embedding batch {i}-{i+batch_size}: {e}")
|
|
raise e
|
|
|
|
return all_embeddings
|