final commit
This commit is contained in:
@@ -1,10 +1,8 @@
|
|||||||
"use client";
|
"use client";
|
||||||
import { Button } from "@/components/ui/button";
|
import { Button } from "@/components/ui/button";
|
||||||
import { Input } from "@/components/ui/input";
|
import { Input } from "@/components/ui/input";
|
||||||
//import Hero1 from '@/components/Hero1'
|
import axios from "axios";
|
||||||
//IMPORT THE HERO1 FUNCTION TO MAKE THE TRANSCRIBE PAGE LOOK BETTER
|
|
||||||
import React, { useState, useRef } from "react";
|
import React, { useState, useRef } from "react";
|
||||||
// import axios from "axios";
|
|
||||||
|
|
||||||
import { AlertCircle } from "lucide-react"
|
import { AlertCircle } from "lucide-react"
|
||||||
|
|
||||||
@@ -15,17 +13,18 @@ import {
|
|||||||
} from "@/components/ui/alert"
|
} from "@/components/ui/alert"
|
||||||
import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card";
|
import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card";
|
||||||
|
|
||||||
const AudioTranscriber: React.FC = () => {
|
const AudioTranscriber = () => {
|
||||||
const [file, setFile] = useState<File | null>(null);
|
const [file, setFile] = useState(null);
|
||||||
const [transcription, setTranscription] = useState<string | null>(null);
|
const [transcription, setTranscription] = useState(null);
|
||||||
const [loading, setLoading] = useState(false);
|
const [loading, setLoading] = useState(false);
|
||||||
const [recording, setRecording] = useState(false);
|
const [recording, setRecording] = useState(false);
|
||||||
const [error, setError] = useState<string | null>(null);
|
const [error, setError] = useState(null);
|
||||||
const mediaRecorderRef = useRef<MediaRecorder | null>(null);
|
const mediaRecorderRef = useRef(null);
|
||||||
const audioChunksRef = useRef<Blob[]>([]);
|
const audioChunksRef = useRef([]);
|
||||||
|
const [audioBlob, setAudioBlob] = useState(null);
|
||||||
|
|
||||||
// Handle file selection
|
// Handle file selection
|
||||||
const handleFileChange = (event: React.ChangeEvent<HTMLInputElement>) => {
|
const handleFileChange = (event) => {
|
||||||
if (event.target.files && event.target.files.length > 0) {
|
if (event.target.files && event.target.files.length > 0) {
|
||||||
setFile(event.target.files[0]);
|
setFile(event.target.files[0]);
|
||||||
console.log("File selected:", event.target.files[0].name);
|
console.log("File selected:", event.target.files[0].name);
|
||||||
@@ -33,42 +32,31 @@ const AudioTranscriber: React.FC = () => {
|
|||||||
};
|
};
|
||||||
|
|
||||||
// Handle file transcription
|
// Handle file transcription
|
||||||
const handleTranscription = async (audioFile: File) => {
|
const handleTranscription = async () => {
|
||||||
if (!audioFile) {
|
console.log(audioBlob, process.env.REACT_APP_GOOGLE_CLOUD_API_KEY)
|
||||||
alert("No audio file to transcribe!");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
console.log("Starting transcription for:", audioFile.name);
|
|
||||||
|
|
||||||
const formData = new FormData();
|
const formData = new FormData();
|
||||||
formData.append("file", audioFile);
|
formData.append('audio', new Blob(audioChunksRef.current, { type: "audio/mp3" }));
|
||||||
setLoading(true);
|
|
||||||
setError(null); // Clear previous errors
|
|
||||||
try {
|
try {
|
||||||
let response = await fetch("/api/transcribe", {
|
const response = await axios.post(
|
||||||
method: "POST",
|
'https://speech.googleapis.com/v1/speech:recognize', // Replace with your actual endpoint
|
||||||
body: formData,
|
formData,
|
||||||
headers: {
|
{
|
||||||
"Content-Type": "multipart/form-data",
|
headers: {
|
||||||
|
'Access-Control-Allow-Origin': '*/*',
|
||||||
|
'Authorization': `Bearer AIzaSyCgQ_pa8AbNBUViQkqf2sNJQ8zl-fIPQfs`, // Replace with your API key
|
||||||
|
'Content-Type': 'multipart/form-data',
|
||||||
|
},
|
||||||
}
|
}
|
||||||
})
|
);
|
||||||
|
|
||||||
console.log("! response:", response);
|
console.log(response.data.results[0].alternatives[0].transcript);
|
||||||
response = await response.json();
|
|
||||||
console.log("@ response:", response);
|
|
||||||
|
|
||||||
// if (response.data && response.data.transcription) {
|
setTranscription(response.data.results[0].alternatives[0].transcript);
|
||||||
// setTranscription(response.data.transcription);
|
|
||||||
// } else {
|
|
||||||
// setError("Unexpected response format. Check backend API.");
|
|
||||||
// console.error("Invalid response format:", response.data);
|
|
||||||
// }
|
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error("Error transcribing audio:", error);
|
console.error('Error transcribing audio:', error);
|
||||||
setError("Failed to transcribe audio. Please try again.");
|
setTranscription('Transcription failed.');
|
||||||
} finally {
|
|
||||||
setLoading(false);
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -89,14 +77,11 @@ const AudioTranscriber: React.FC = () => {
|
|||||||
};
|
};
|
||||||
|
|
||||||
mediaRecorderRef.current.onstop = async () => {
|
mediaRecorderRef.current.onstop = async () => {
|
||||||
const audioBlob = new Blob(audioChunksRef.current, { type: "audio/mp3" });
|
|
||||||
const audioFile = new File([audioBlob], "recording.mp3", { type: "audio/mp3" });
|
|
||||||
|
|
||||||
console.log("Recording stopped. Blob created:", audioBlob);
|
console.log("Recording stopped. Blob created:", audioBlob);
|
||||||
|
|
||||||
setFile(audioFile); // Save the recorded file
|
setAudioBlob(new Blob(audioChunksRef.current, { type: "audio/mp3" }));
|
||||||
setTranscription("Processing transcription for recorded audio...");
|
setTranscription("Processing transcription for recorded audio...");
|
||||||
await handleTranscription(audioFile); // Automatically transcribe
|
await handleTranscription(); // Automatically transcribe
|
||||||
};
|
};
|
||||||
|
|
||||||
mediaRecorderRef.current.start();
|
mediaRecorderRef.current.start();
|
||||||
@@ -129,7 +114,7 @@ const AudioTranscriber: React.FC = () => {
|
|||||||
<h2>Upload or Record Audio</h2>
|
<h2>Upload or Record Audio</h2>
|
||||||
|
|
||||||
<Input type="file" accept="audio/*" onChange={handleFileChange} />
|
<Input type="file" accept="audio/*" onChange={handleFileChange} />
|
||||||
<Button className="my-4" onClick={() => file && handleTranscription(file)} disabled={loading || !file}>
|
<Button className="my-4" onClick={() => file && handleTranscription()} disabled={loading || !file}>
|
||||||
{loading ? "Transcribing..." : "Transcribe"}
|
{loading ? "Transcribing..." : "Transcribe"}
|
||||||
</Button>
|
</Button>
|
||||||
</div>
|
</div>
|
||||||
74
src/app/(web)/transcribe/textsp.jsx
Normal file
74
src/app/(web)/transcribe/textsp.jsx
Normal file
@@ -0,0 +1,74 @@
|
|||||||
|
import React, { useState, useRef } from 'react';
|
||||||
|
import axios from 'axios';
|
||||||
|
|
||||||
|
function SpeechToText() {
|
||||||
|
const [isRecording, setIsRecording] = useState(false);
|
||||||
|
const [audioBlob, setAudioBlob] = useState(null);
|
||||||
|
const [transcription, setTranscription] = useState('');
|
||||||
|
const mediaRecorderRef = useRef(null);
|
||||||
|
|
||||||
|
const startRecording = async () => {
|
||||||
|
try {
|
||||||
|
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
|
||||||
|
mediaRecorderRef.current = new MediaRecorder(stream);
|
||||||
|
|
||||||
|
mediaRecorderRef.current.ondataavailable = (event) => {
|
||||||
|
if (event.data && event.data.size > 0) {
|
||||||
|
setAudioBlob(new Blob([event.data], { type: 'audio/webm' }));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
mediaRecorderRef.current.start();
|
||||||
|
setIsRecording(true);
|
||||||
|
} catch (error) {
|
||||||
|
console.error('Error accessing microphone:', error);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const stopRecording = () => {
|
||||||
|
mediaRecorderRef.current.stop();
|
||||||
|
setIsRecording(false);
|
||||||
|
};
|
||||||
|
|
||||||
|
const handleTranscription = async () => {
|
||||||
|
if (!audioBlob) {
|
||||||
|
alert('No audio recorded.');
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const formData = new FormData();
|
||||||
|
formData.append('audio', audioBlob, 'audio.webm');
|
||||||
|
|
||||||
|
try {
|
||||||
|
const response = await axios.post(
|
||||||
|
'https://speech.googleapis.com', // Replace with your actual endpoint
|
||||||
|
formData,
|
||||||
|
{
|
||||||
|
headers: {
|
||||||
|
'Authorization': `Bearer ${process.env.REACT_APP_GOOGLE_CLOUD_API_KEY}`, // Replace with your API key
|
||||||
|
'Content-Type': 'multipart/form-data',
|
||||||
|
},
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
|
setTranscription(response.data.results[0].alternatives[0].transcript);
|
||||||
|
} catch (error) {
|
||||||
|
console.error('Error transcribing audio:', error);
|
||||||
|
setTranscription('Transcription failed.');
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
return (
|
||||||
|
<div>
|
||||||
|
<button onClick={isRecording ? stopRecording : startRecording}>
|
||||||
|
{isRecording ? 'Stop Recording' : 'Start Recording'}
|
||||||
|
</button>
|
||||||
|
<button onClick={handleTranscription} disabled={!audioBlob}>
|
||||||
|
Transcribe
|
||||||
|
</button>
|
||||||
|
<p>Transcription: {transcription}</p>
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
export default SpeechToText;
|
||||||
@@ -11,103 +11,103 @@ config();
|
|||||||
const OPENAI_API_KEY = process.env.OPENAI_API_KEY;
|
const OPENAI_API_KEY = process.env.OPENAI_API_KEY;
|
||||||
|
|
||||||
if (!OPENAI_API_KEY) {
|
if (!OPENAI_API_KEY) {
|
||||||
throw new Error("OpenAI API key is missing. Set OPENAI_API_KEY in your .env.local file.");
|
throw new Error("OpenAI API key is missing. Set OPENAI_API_KEY in your .env.local file.");
|
||||||
}
|
}
|
||||||
|
|
||||||
// Initialize OpenAI client
|
// Initialize OpenAI client
|
||||||
const openaiClient = new OpenAI({
|
const openaiClient = new OpenAI({
|
||||||
apiKey: OPENAI_API_KEY,
|
apiKey: OPENAI_API_KEY,
|
||||||
});
|
});
|
||||||
|
|
||||||
// Helper to parse multipart form data
|
// Helper to parse multipart form data
|
||||||
async function parseMultipartForm(req) {
|
async function parseMultipartForm(req) {
|
||||||
const form = formidable({
|
const form = formidable({
|
||||||
multiples: false,
|
multiples: false,
|
||||||
uploadDir: "/tmp",
|
uploadDir: "/tmp",
|
||||||
keepExtensions: true,
|
keepExtensions: true,
|
||||||
maxFileSize: 50 * 1024 * 1024,
|
maxFileSize: 50 * 1024 * 1024,
|
||||||
});
|
});
|
||||||
|
|
||||||
return new Promise((resolve, reject) => {
|
return new Promise((resolve, reject) => {
|
||||||
form.parse(req, (err, fields, files) => {
|
form.parse(req, (err, fields, files) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
return reject(err);
|
return reject(err);
|
||||||
}
|
}
|
||||||
const file = files.file[0];
|
const file = files.file[0];
|
||||||
resolve({
|
resolve({
|
||||||
filePath: file.filepath,
|
filePath: file.filepath,
|
||||||
originalFilename: file.originalFilename || 'unknown',
|
originalFilename: file.originalFilename || 'unknown',
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
// Main handler
|
// Main handler
|
||||||
export async function POST(req, res) {
|
export async function POST(req, res) {
|
||||||
if (req.method !== "POST") {
|
if (req.method !== "POST") {
|
||||||
return res.status(405).json({ error: "Method not allowed. Use POST." });
|
return res.status(405).json({ error: "Method not allowed. Use POST." });
|
||||||
}
|
}
|
||||||
|
|
||||||
let filePath = null;
|
let filePath = null;
|
||||||
|
|
||||||
try {
|
try {
|
||||||
// Parse file upload
|
// Parse file upload
|
||||||
const { filePath: tempFilePath, originalFilename } = await parseMultipartForm(req);
|
const { filePath: tempFilePath, originalFilename } = await parseMultipartForm(req);
|
||||||
filePath = tempFilePath;
|
filePath = tempFilePath;
|
||||||
|
|
||||||
// Log file details
|
// Log file details
|
||||||
console.log("Uploaded file path:", filePath);
|
console.log("Uploaded file path:", filePath);
|
||||||
console.log("Original filename:", originalFilename);
|
console.log("Original filename:", originalFilename);
|
||||||
|
|
||||||
// Validate file extension
|
// Validate file extension
|
||||||
const allowedExtensions = ["mp3", "wav", "m4a"];
|
const allowedExtensions = ["mp3", "wav", "m4a"];
|
||||||
const fileExtension = path.extname(originalFilename).toLowerCase().replace(".", "");
|
const fileExtension = path.extname(originalFilename).toLowerCase().replace(".", "");
|
||||||
if (!allowedExtensions.includes(fileExtension)) {
|
if (!allowedExtensions.includes(fileExtension)) {
|
||||||
unlinkSync(filePath);
|
unlinkSync(filePath);
|
||||||
return res.status(400).json({
|
return res.status(400).json({
|
||||||
error: `Invalid file format. Only ${allowedExtensions.join(", ")} are supported.`,
|
error: `Invalid file format. Only ${allowedExtensions.join(", ")} are supported.`,
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get transcription from OpenAI Whisper model
|
// Get transcription from OpenAI Whisper model
|
||||||
console.log("Requesting transcription...");
|
console.log("Requesting transcription...");
|
||||||
const transcription = await openaiClient.audio.transcriptions.create({
|
const transcription = await openaiClient.audio.transcriptions.create({
|
||||||
file: fs.createReadStream(filePath), // Pass the file stream here
|
file: fs.createReadStream(filePath), // Pass the file stream here
|
||||||
model: "whisper-1",
|
model: "whisper-1",
|
||||||
response_format: "text", // Ensure this is set
|
response_format: "text", // Ensure this is set
|
||||||
});
|
});
|
||||||
|
|
||||||
console.log("Transcription:", transcription); // Log the transcription response
|
console.log("Transcription:", transcription); // Log the transcription response
|
||||||
|
|
||||||
// Clean up temporary file
|
// Clean up temporary file
|
||||||
if (filePath && existsSync(filePath)) {
|
if (filePath && existsSync(filePath)) {
|
||||||
unlinkSync(filePath);
|
unlinkSync(filePath);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Send response back to client
|
// Send response back to client
|
||||||
return res.status(200).json({ transcription: transcription.text });
|
return res.status(200).json({ transcription: transcription.text });
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error("Error during transcription:", error);
|
console.error("Error during transcription:", error);
|
||||||
|
|
||||||
if (error instanceof AxiosError) {
|
if (error instanceof AxiosError) {
|
||||||
console.error("OpenAI API error:", error.response?.data || error.message);
|
console.error("OpenAI API error:", error.response?.data || error.message);
|
||||||
return res.status(error.response?.status || 500).json({
|
return res.status(error.response?.status || 500).json({
|
||||||
error: error.response?.data?.error?.message || "OpenAI API Error.",
|
error: error.response?.data?.error?.message || "OpenAI API Error.",
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
return res.status(500).json({
|
return res.status(500).json({
|
||||||
error: "An unexpected error occurred.",
|
error: "An unexpected error occurred.",
|
||||||
});
|
});
|
||||||
} finally {
|
} finally {
|
||||||
if (filePath) {
|
if (filePath) {
|
||||||
try {
|
try {
|
||||||
if (existsSync(filePath)) {
|
if (existsSync(filePath)) {
|
||||||
unlinkSync(filePath);
|
unlinkSync(filePath);
|
||||||
}
|
}
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
console.error("Failed to clean up temporary file:", err);
|
console.error("Failed to clean up temporary file:", err);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -22,6 +22,6 @@
|
|||||||
"@/*": ["./src/*"]
|
"@/*": ["./src/*"]
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"include": ["next-env.d.ts", "**/*.ts", "**/*.tsx", ".next/types/**/*.ts", "src/app/api/connectDB.js", "src/lib/utils.js", "src/app/(web)/account/page.jsx", "src/app/(panels)/suite/patient/account/page.jsx", "src/app/(panels)/suite/patient/dashboard/MedicationTable.jsx", "src/app/(panels)/suite/patient/dashboard/page.jsx", "src/app/api/transcribe/route.js", "src/components/ui/calendar.jsx", "src/app/(web)/page.jsx", "src/app/(web)/transcribe/page.jsx", "src/app/(web)/login/page.jsx", "src/app/(panels)/suite/layout.jsx", "src/app/(panels)/suite/doctor/dashboard/page.jsx", "src/app/(panels)/suite/patient/chat/page.jsx", "src/app/(panels)/suite/doctor/dashboard/AppList.jsx", "src/app/(panels)/suite/doctor/patients/page.jsx", "src/app/(panels)/suite/doctor/dashboard/AgeChart.jsx"],
|
"include": ["next-env.d.ts", "**/*.ts", "**/*.tsx", ".next/types/**/*.ts", "src/app/api/connectDB.js", "src/lib/utils.js", "src/app/(web)/account/page.jsx", "src/app/(panels)/suite/patient/account/page.jsx", "src/app/(panels)/suite/patient/dashboard/MedicationTable.jsx", "src/app/(panels)/suite/patient/dashboard/page.jsx", "src/app/api/transcribe/route.js", "src/components/ui/calendar.jsx", "src/app/(web)/page.jsx", "src/app/(web)/transcribe/textsp.jsx", "src/app/(web)/login/page.jsx", "src/app/(panels)/suite/layout.jsx", "src/app/(panels)/suite/doctor/dashboard/page.jsx", "src/app/(panels)/suite/patient/chat/page.jsx", "src/app/(panels)/suite/doctor/dashboard/AppList.jsx", "src/app/(panels)/suite/doctor/patients/page.jsx", "src/app/(panels)/suite/doctor/dashboard/AgeChart.jsx", "src/app/(web)/transcribe/textsp.jsx", "src/app/(web)/transcribe/page.jsx"],
|
||||||
"exclude": ["node_modules"]
|
"exclude": ["node_modules"]
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user