diff --git a/src/app/(panels)/suite/patient/chat/page.tsx b/src/app/(panels)/suite/patient/chat/page.tsx new file mode 100644 index 0000000..f361192 --- /dev/null +++ b/src/app/(panels)/suite/patient/chat/page.tsx @@ -0,0 +1,10 @@ +"use client" + +export default function Chat() { + return ( +
+ +
+ ) +} + diff --git a/src/app/(web)/transcribe/layout.tsx b/src/app/(web)/transcribe/layout.tsx deleted file mode 100644 index a14e64f..0000000 --- a/src/app/(web)/transcribe/layout.tsx +++ /dev/null @@ -1,16 +0,0 @@ -export const metadata = { - title: 'Next.js', - description: 'Generated by Next.js', -} - -export default function RootLayout({ - children, -}: { - children: React.ReactNode -}) { - return ( - - {children} - - ) -} diff --git a/src/app/(web)/transcribe/page.tsx b/src/app/(web)/transcribe/page.tsx index 76fe1e9..74017c5 100644 --- a/src/app/(web)/transcribe/page.tsx +++ b/src/app/(web)/transcribe/page.tsx @@ -1,56 +1,122 @@ "use client"; -import React, { useState } from "react"; +import React, { useState, useRef } from "react"; import axios from "axios"; const AudioTranscriber: React.FC = () => { - const [file, setFile] = useState(null); - const [transcription, setTranscription] = useState(null); - const [loading, setLoading] = useState(false); + const [file, setFile] = useState(null); + const [transcription, setTranscription] = useState(null); + const [loading, setLoading] = useState(false); + const [recording, setRecording] = useState(false); + const mediaRecorderRef = useRef(null); + const audioChunksRef = useRef([]); - const handleFileChange = (event: React.ChangeEvent) => { - if (event.target.files && event.target.files.length > 0) { - setFile(event.target.files[0]); - } - }; + // Handle file selection + const handleFileChange = (event: React.ChangeEvent) => { + if (event.target.files && event.target.files.length > 0) { + setFile(event.target.files[0]); + } + }; - const handleTranscription = async () => { - if (!file) return alert("Please select an audio file to transcribe!"); + // Handle file transcription + const handleTranscription = async (audioFile: File) => { + if (!audioFile) return alert("No audio file to transcribe!"); - const formData = new FormData(); - formData.append("file", file); + const formData = new FormData(); + formData.append("file", audioFile); - setLoading(true); - try { - const response = await axios.post("http://localhost:8000/transcribe", formData, { - headers: { - "Content-Type": "multipart/form-data", - }, - }); - setTranscription(response.data.transcription); - } catch (error) { - console.error("Error transcribing audio:", error); - alert("Failed to transcribe audio. Please try again."); - } finally { - setLoading(false); - } - }; + setLoading(true); + try { + const response = await axios.post("http://localhost:8000/transcribe", formData, { + headers: { + "Content-Type": "multipart/form-data", + }, + }); + setTranscription(response.data.transcription); + } catch (error) { + console.error("Error transcribing audio:", error); + alert("Failed to transcribe audio. Please try again."); + } finally { + setLoading(false); + } + }; - return ( -
-

Audio Transcription

- - - {transcription && ( -
-

Transcription:

-

{transcription}

-
- )} -
- ); + // Start recording audio + const startRecording = async () => { + try { + const stream = await navigator.mediaDevices.getUserMedia({ audio: true }); + mediaRecorderRef.current = new MediaRecorder(stream); + + audioChunksRef.current = []; // Reset audio chunks + mediaRecorderRef.current.ondataavailable = (event) => { + if (event.data.size > 0) { + audioChunksRef.current.push(event.data); + } + }; + + mediaRecorderRef.current.onstop = async () => { + const audioBlob = new Blob(audioChunksRef.current, { type: "audio/mp3" }); + const audioFile = new File([audioBlob], "recording.mp3", { type: "audio/mp3" }); + setFile(audioFile); // Save the recorded file + + // Transcribe the recorded audio + setTranscription("Transcribing the recorded audio..."); + await handleTranscription(audioFile); + }; + + mediaRecorderRef.current.start(); + setRecording(true); + } catch (error) { + console.error("Error starting recording:", error); + alert("Failed to start recording. Please check microphone permissions."); + } + }; + + // Stop recording audio + const stopRecording = () => { + if (mediaRecorderRef.current) { + mediaRecorderRef.current.stop(); + setRecording(false); + } + }; + + return ( +
+

Audio Transcription

+
+

Upload or Record Audio

+ {/* File Upload */} + + +
+ + {/* Recording Controls */} +
+

Record Audio

+ {!recording ? ( + + ) : ( + + )} +
+ + {/* Transcription Result */} +
+

Transcription:

+ {loading ? ( +

Processing transcription...

+ ) : transcription ? ( +

{transcription}

+ ) : ( +

No transcription available yet.

+ )} +
+
+ ); }; export default AudioTranscriber; diff --git a/src/app/api/transcribe/__pycache__/app.cpython-39.pyc b/src/app/api/transcribe/__pycache__/app.cpython-39.pyc index 423ad4b..9002252 100644 Binary files a/src/app/api/transcribe/__pycache__/app.cpython-39.pyc and b/src/app/api/transcribe/__pycache__/app.cpython-39.pyc differ diff --git a/src/app/api/transcribe/app.py b/src/app/api/transcribe/app.py index 12f6408..d039ad1 100644 --- a/src/app/api/transcribe/app.py +++ b/src/app/api/transcribe/app.py @@ -24,9 +24,8 @@ async def transcribe_audio(file: UploadFile = File(...)): try: # Transcribe the audio - result = model.transcribe("inputs/test.mp3") + result = model.transcribe(temp_path) transcription = result["text"] - print(transcription) finally: # Clean up temporary file os.remove(temp_path) diff --git a/src/app/api/transcribe/route.ts b/src/app/api/transcribe/route.ts deleted file mode 100644 index ce4efac..0000000 --- a/src/app/api/transcribe/route.ts +++ /dev/null @@ -1,55 +0,0 @@ -import { NextRequest, NextResponse } from "next/server"; -import multer from "multer"; -import fs from "fs/promises"; -import whisper from "openai-whisper"; - -const upload = multer({ dest: "uploads/" }); - -export const config = { - api: { - bodyParser: false, // Disable Next.js's body parsing for file uploads - }, -}; - -// Whisper model (initialize once for efficiency) -const model = whisper.load_model("base"); - -// Utility to transcribe audio -async function transcribeAudio(filePath: string): Promise { - const transcription = await model.transcribe(filePath); - return transcription.text; -} - -async function parseMultipartForm(req: NextRequest): Promise { - return new Promise((resolve, reject) => { - const multerMiddleware = upload.single("audio"); - multerMiddleware(req as any, {} as any, (error: any) => { - if (error) return reject(error); - resolve(req.file); - }); - }); -} - -export async function POST(req: NextRequest) { - try { - // Parse the incoming multipart form data - const file = await parseMultipartForm(req); - - if (!file) { - return NextResponse.json({ error: "No audio file provided" }, { status: 400 }); - } - - const filePath = file.path; - - // Transcribe the audio - const transcription = await transcribeAudio(filePath); - - // Clean up the uploaded file - await fs.unlink(filePath); - - return NextResponse.json({ transcription }); - } catch (error) { - console.error("Error during transcription:", error); - return NextResponse.json({ error: "Transcription failed" }, { status: 500 }); - } -}