diff --git a/package.json b/package.json index 9640d98..004a242 100644 --- a/package.json +++ b/package.json @@ -11,6 +11,9 @@ }, "dependencies": { "@clerk/nextjs": "^6.10.2", + "@huggingface/inference": "^3.1.2", + "@langchain/community": "^0.3.27", + "@langchain/core": "^0.3.36", "@radix-ui/react-collapsible": "^1.1.2", "@radix-ui/react-dialog": "^1.1.5", "@radix-ui/react-dropdown-menu": "^2.1.5", @@ -30,6 +33,7 @@ "formidable": "^3.5.2", "hoyahax-2025": "file:", "https-localhost": "^4.7.1", + "langchain": "^0.3.13", "lucide-react": "^0.474.0", "mongoose": "^8.9.5", "multer": "1.4.5-lts.1", diff --git a/src/app/api/chat/route.js b/src/app/api/chat/route.js index fd19ce9..dd6c531 100644 --- a/src/app/api/chat/route.js +++ b/src/app/api/chat/route.js @@ -1,15 +1,18 @@ -import { InferenceAPI } from '@huggingface/inference'; - -const hfAPI = new InferenceAPI({ apiKey: process.env.HUGGING_FACE_API_KEY }); +import { HuggingFaceInference } from "@langchain/community/llms/hf"; export default async function handler(req, res) { if (req.method === 'POST') { try { const { query } = req.body; - const response = await hfAPI.query('m42-health/Llama3-Med42-8B', { inputs: { text: query } }); + const model = new HuggingFaceInference({ + model: 'm42-health/Llama3-Med42-8B', + apiKey: process.env.HUGGING_FACE_API_KEY, + }); - res.status(200).json({ answer: response.data[0].generated_text }); + const response = await model.invoke(query); + + res.status(200).json({ answer: response }); } catch (error) { console.error(error); res.status(500).json({ error: 'Error generating response' });