Merge remote-tracking branch 'refs/remotes/origin/main'
This commit is contained in:
10
src/app/(panels)/suite/patient/chat/page.tsx
Normal file
10
src/app/(panels)/suite/patient/chat/page.tsx
Normal file
@@ -0,0 +1,10 @@
|
||||
"use client"
|
||||
|
||||
export default function Chat() {
|
||||
return (
|
||||
<div className="container mx-auto">
|
||||
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
@@ -1,16 +0,0 @@
|
||||
export const metadata = {
|
||||
title: 'Next.js',
|
||||
description: 'Generated by Next.js',
|
||||
}
|
||||
|
||||
export default function RootLayout({
|
||||
children,
|
||||
}: {
|
||||
children: React.ReactNode
|
||||
}) {
|
||||
return (
|
||||
<html lang="en">
|
||||
<body>{children}</body>
|
||||
</html>
|
||||
)
|
||||
}
|
||||
@@ -1,56 +1,122 @@
|
||||
"use client";
|
||||
|
||||
import React, { useState } from "react";
|
||||
import React, { useState, useRef } from "react";
|
||||
import axios from "axios";
|
||||
|
||||
const AudioTranscriber: React.FC = () => {
|
||||
const [file, setFile] = useState<File | null>(null);
|
||||
const [transcription, setTranscription] = useState<string | null>(null);
|
||||
const [loading, setLoading] = useState(false);
|
||||
const [file, setFile] = useState<File | null>(null);
|
||||
const [transcription, setTranscription] = useState<string | null>(null);
|
||||
const [loading, setLoading] = useState(false);
|
||||
const [recording, setRecording] = useState(false);
|
||||
const mediaRecorderRef = useRef<MediaRecorder | null>(null);
|
||||
const audioChunksRef = useRef<Blob[]>([]);
|
||||
|
||||
const handleFileChange = (event: React.ChangeEvent<HTMLInputElement>) => {
|
||||
if (event.target.files && event.target.files.length > 0) {
|
||||
setFile(event.target.files[0]);
|
||||
}
|
||||
};
|
||||
// Handle file selection
|
||||
const handleFileChange = (event: React.ChangeEvent<HTMLInputElement>) => {
|
||||
if (event.target.files && event.target.files.length > 0) {
|
||||
setFile(event.target.files[0]);
|
||||
}
|
||||
};
|
||||
|
||||
const handleTranscription = async () => {
|
||||
if (!file) return alert("Please select an audio file to transcribe!");
|
||||
// Handle file transcription
|
||||
const handleTranscription = async (audioFile: File) => {
|
||||
if (!audioFile) return alert("No audio file to transcribe!");
|
||||
|
||||
const formData = new FormData();
|
||||
formData.append("file", file);
|
||||
const formData = new FormData();
|
||||
formData.append("file", audioFile);
|
||||
|
||||
setLoading(true);
|
||||
try {
|
||||
const response = await axios.post("http://localhost:8000/transcribe", formData, {
|
||||
headers: {
|
||||
"Content-Type": "multipart/form-data",
|
||||
},
|
||||
});
|
||||
setTranscription(response.data.transcription);
|
||||
} catch (error) {
|
||||
console.error("Error transcribing audio:", error);
|
||||
alert("Failed to transcribe audio. Please try again.");
|
||||
} finally {
|
||||
setLoading(false);
|
||||
}
|
||||
};
|
||||
setLoading(true);
|
||||
try {
|
||||
const response = await axios.post("http://localhost:8000/transcribe", formData, {
|
||||
headers: {
|
||||
"Content-Type": "multipart/form-data",
|
||||
},
|
||||
});
|
||||
setTranscription(response.data.transcription);
|
||||
} catch (error) {
|
||||
console.error("Error transcribing audio:", error);
|
||||
alert("Failed to transcribe audio. Please try again.");
|
||||
} finally {
|
||||
setLoading(false);
|
||||
}
|
||||
};
|
||||
|
||||
return (
|
||||
<div>
|
||||
<h1>Audio Transcription</h1>
|
||||
<input type="file" accept="audio/*" onChange={handleFileChange} />
|
||||
<button onClick={handleTranscription} disabled={loading}>
|
||||
{loading ? "Transcribing..." : "Transcribe"}
|
||||
</button>
|
||||
{transcription && (
|
||||
<div>
|
||||
<h2>Transcription:</h2>
|
||||
<p>{transcription}</p>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
// Start recording audio
|
||||
const startRecording = async () => {
|
||||
try {
|
||||
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
|
||||
mediaRecorderRef.current = new MediaRecorder(stream);
|
||||
|
||||
audioChunksRef.current = []; // Reset audio chunks
|
||||
mediaRecorderRef.current.ondataavailable = (event) => {
|
||||
if (event.data.size > 0) {
|
||||
audioChunksRef.current.push(event.data);
|
||||
}
|
||||
};
|
||||
|
||||
mediaRecorderRef.current.onstop = async () => {
|
||||
const audioBlob = new Blob(audioChunksRef.current, { type: "audio/mp3" });
|
||||
const audioFile = new File([audioBlob], "recording.mp3", { type: "audio/mp3" });
|
||||
setFile(audioFile); // Save the recorded file
|
||||
|
||||
// Transcribe the recorded audio
|
||||
setTranscription("Transcribing the recorded audio...");
|
||||
await handleTranscription(audioFile);
|
||||
};
|
||||
|
||||
mediaRecorderRef.current.start();
|
||||
setRecording(true);
|
||||
} catch (error) {
|
||||
console.error("Error starting recording:", error);
|
||||
alert("Failed to start recording. Please check microphone permissions.");
|
||||
}
|
||||
};
|
||||
|
||||
// Stop recording audio
|
||||
const stopRecording = () => {
|
||||
if (mediaRecorderRef.current) {
|
||||
mediaRecorderRef.current.stop();
|
||||
setRecording(false);
|
||||
}
|
||||
};
|
||||
|
||||
return (
|
||||
<div>
|
||||
<h1>Audio Transcription</h1>
|
||||
<div>
|
||||
<h2>Upload or Record Audio</h2>
|
||||
{/* File Upload */}
|
||||
<input type="file" accept="audio/*" onChange={handleFileChange} />
|
||||
<button onClick={() => file && handleTranscription(file)} disabled={loading || !file}>
|
||||
{loading ? "Transcribing..." : "Transcribe"}
|
||||
</button>
|
||||
</div>
|
||||
|
||||
{/* Recording Controls */}
|
||||
<div>
|
||||
<h2>Record Audio</h2>
|
||||
{!recording ? (
|
||||
<button onClick={startRecording}>Start Recording</button>
|
||||
) : (
|
||||
<button onClick={stopRecording} disabled={!recording}>
|
||||
Stop Recording
|
||||
</button>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{/* Transcription Result */}
|
||||
<div>
|
||||
<h2>Transcription:</h2>
|
||||
{loading ? (
|
||||
<p>Processing transcription...</p>
|
||||
) : transcription ? (
|
||||
<p>{transcription}</p>
|
||||
) : (
|
||||
<p>No transcription available yet.</p>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
export default AudioTranscriber;
|
||||
|
||||
Binary file not shown.
@@ -24,9 +24,8 @@ async def transcribe_audio(file: UploadFile = File(...)):
|
||||
|
||||
try:
|
||||
# Transcribe the audio
|
||||
result = model.transcribe("inputs/test.mp3")
|
||||
result = model.transcribe(temp_path)
|
||||
transcription = result["text"]
|
||||
print(transcription)
|
||||
finally:
|
||||
# Clean up temporary file
|
||||
os.remove(temp_path)
|
||||
|
||||
@@ -1,55 +0,0 @@
|
||||
import { NextRequest, NextResponse } from "next/server";
|
||||
import multer from "multer";
|
||||
import fs from "fs/promises";
|
||||
import whisper from "openai-whisper";
|
||||
|
||||
const upload = multer({ dest: "uploads/" });
|
||||
|
||||
export const config = {
|
||||
api: {
|
||||
bodyParser: false, // Disable Next.js's body parsing for file uploads
|
||||
},
|
||||
};
|
||||
|
||||
// Whisper model (initialize once for efficiency)
|
||||
const model = whisper.load_model("base");
|
||||
|
||||
// Utility to transcribe audio
|
||||
async function transcribeAudio(filePath: string): Promise<string> {
|
||||
const transcription = await model.transcribe(filePath);
|
||||
return transcription.text;
|
||||
}
|
||||
|
||||
async function parseMultipartForm(req: NextRequest): Promise<File> {
|
||||
return new Promise((resolve, reject) => {
|
||||
const multerMiddleware = upload.single("audio");
|
||||
multerMiddleware(req as any, {} as any, (error: any) => {
|
||||
if (error) return reject(error);
|
||||
resolve(req.file);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
export async function POST(req: NextRequest) {
|
||||
try {
|
||||
// Parse the incoming multipart form data
|
||||
const file = await parseMultipartForm(req);
|
||||
|
||||
if (!file) {
|
||||
return NextResponse.json({ error: "No audio file provided" }, { status: 400 });
|
||||
}
|
||||
|
||||
const filePath = file.path;
|
||||
|
||||
// Transcribe the audio
|
||||
const transcription = await transcribeAudio(filePath);
|
||||
|
||||
// Clean up the uploaded file
|
||||
await fs.unlink(filePath);
|
||||
|
||||
return NextResponse.json({ transcription });
|
||||
} catch (error) {
|
||||
console.error("Error during transcription:", error);
|
||||
return NextResponse.json({ error: "Transcription failed" }, { status: 500 });
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user