UI Update

This commit is contained in:
Sir Blob
2025-01-26 05:50:08 -05:00
parent 6fbd80c3fb
commit f705390c3f

View File

@@ -1,156 +1,174 @@
"use client"; "use client";
import { Button } from "@/components/ui/button";
import { Input } from "@/components/ui/input";
//import Hero1 from '@/components/Hero1' //import Hero1 from '@/components/Hero1'
//IMPORT THE HERO1 FUNCTION TO MAKE THE TRANSCRIBE PAGE LOOK BETTER //IMPORT THE HERO1 FUNCTION TO MAKE THE TRANSCRIBE PAGE LOOK BETTER
import React, { useState, useRef } from "react"; import React, { useState, useRef } from "react";
// import axios from "axios"; // import axios from "axios";
import { AlertCircle } from "lucide-react"
import {
Alert,
AlertDescription,
AlertTitle,
} from "@/components/ui/alert"
import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card";
const AudioTranscriber: React.FC = () => { const AudioTranscriber: React.FC = () => {
const [file, setFile] = useState<File | null>(null); const [file, setFile] = useState<File | null>(null);
const [transcription, setTranscription] = useState<string | null>(null); const [transcription, setTranscription] = useState<string | null>(null);
const [loading, setLoading] = useState(false); const [loading, setLoading] = useState(false);
const [recording, setRecording] = useState(false); const [recording, setRecording] = useState(false);
const [error, setError] = useState<string | null>(null); const [error, setError] = useState<string | null>(null);
const mediaRecorderRef = useRef<MediaRecorder | null>(null); const mediaRecorderRef = useRef<MediaRecorder | null>(null);
const audioChunksRef = useRef<Blob[]>([]); const audioChunksRef = useRef<Blob[]>([]);
// Handle file selection // Handle file selection
const handleFileChange = (event: React.ChangeEvent<HTMLInputElement>) => { const handleFileChange = (event: React.ChangeEvent<HTMLInputElement>) => {
if (event.target.files && event.target.files.length > 0) { if (event.target.files && event.target.files.length > 0) {
setFile(event.target.files[0]); setFile(event.target.files[0]);
console.log("File selected:", event.target.files[0].name); console.log("File selected:", event.target.files[0].name);
} }
}; };
// Handle file transcription // Handle file transcription
const handleTranscription = async (audioFile: File) => { const handleTranscription = async (audioFile: File) => {
if (!audioFile) { if (!audioFile) {
alert("No audio file to transcribe!"); alert("No audio file to transcribe!");
return; return;
} }
console.log("Starting transcription for:", audioFile.name); console.log("Starting transcription for:", audioFile.name);
const formData = new FormData(); const formData = new FormData();
formData.append("file", audioFile); formData.append("file", audioFile);
console.log(audioFile); setLoading(true);
setLoading(true); setError(null); // Clear previous errors
setError(null); // Clear previous errors try {
try { let response = await fetch("/api/transcribe", {
let response = await fetch("/api/transcribe", { method: "POST",
method: "POST", body: formData,
body: formData, headers: {
headers: { "Content-Type": "multipart/form-data",
"Content-Type": "multipart/form-data", }
} })
})
response = await response.json(); console.log("! response:", response);
console.log("Transcription response:", response); response = await response.json();
console.log("@ response:", response);
// if (response.data && response.data.transcription) { // if (response.data && response.data.transcription) {
// setTranscription(response.data.transcription); // setTranscription(response.data.transcription);
// } else { // } else {
// setError("Unexpected response format. Check backend API."); // setError("Unexpected response format. Check backend API.");
// console.error("Invalid response format:", response.data); // console.error("Invalid response format:", response.data);
// } // }
} catch (error) { } catch (error) {
console.error("Error transcribing audio:", error); console.error("Error transcribing audio:", error);
setError("Failed to transcribe audio. Please try again."); setError("Failed to transcribe audio. Please try again.");
} finally { } finally {
setLoading(false); setLoading(false);
} }
}; };
// Start recording audio // Start recording audio
const startRecording = async () => { const startRecording = async () => {
try { try {
const stream = await navigator.mediaDevices.getUserMedia({ audio: true }); const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
console.log("Microphone access granted."); console.log("Microphone access granted.");
mediaRecorderRef.current = new MediaRecorder(stream); mediaRecorderRef.current = new MediaRecorder(stream);
audioChunksRef.current = []; // Reset audio chunks audioChunksRef.current = []; // Reset audio chunks
mediaRecorderRef.current.ondataavailable = (event) => { mediaRecorderRef.current.ondataavailable = (event) => {
if (event.data.size > 0) { if (event.data.size > 0) {
console.log("Audio chunk received:", event.data); console.log("Audio chunk received:", event.data);
audioChunksRef.current.push(event.data); audioChunksRef.current.push(event.data);
} }
}; };
mediaRecorderRef.current.onstop = async () => { mediaRecorderRef.current.onstop = async () => {
const audioBlob = new Blob(audioChunksRef.current, { type: "audio/mp3" }); const audioBlob = new Blob(audioChunksRef.current, { type: "audio/mp3" });
const audioFile = new File([audioBlob], "recording.mp3", { type: "audio/mp3" }); const audioFile = new File([audioBlob], "recording.mp3", { type: "audio/mp3" });
console.log("Recording stopped. Blob created:", audioBlob); console.log("Recording stopped. Blob created:", audioBlob);
setFile(audioFile); // Save the recorded file setFile(audioFile); // Save the recorded file
setTranscription("Processing transcription for recorded audio..."); setTranscription("Processing transcription for recorded audio...");
await handleTranscription(audioFile); // Automatically transcribe await handleTranscription(audioFile); // Automatically transcribe
}; };
mediaRecorderRef.current.start(); mediaRecorderRef.current.start();
console.log("Recording started."); console.log("Recording started.");
setRecording(true); setRecording(true);
} catch (error) { } catch (error) {
console.error("Error starting recording:", error); console.error("Error starting recording:", error);
setError("Failed to start recording. Please check microphone permissions."); setError("Failed to start recording. Please check microphone permissions.");
} }
}; };
// Stop recording audio // Stop recording audio
const stopRecording = () => { const stopRecording = () => {
if (mediaRecorderRef.current) { if (mediaRecorderRef.current) {
console.log("Stopping recording..."); console.log("Stopping recording...");
mediaRecorderRef.current.stop(); mediaRecorderRef.current.stop();
setRecording(false); setRecording(false);
} }
}; };
return ( return (
<div> <section className="mx-auto my-auto bg-white dark:bg-neutral-950 h-screen w-1/2">
<h1> <h1 className="text-4xl font-bold text-center mb-8">
<center> <center>
Audio Transcription Audio Transcription
</center> </center>
</h1> </h1>
<div> <br />
<h2>Upload or Record Audio</h2> <div>
<h2>Upload or Record Audio</h2>
<input type="file" accept="audio/*" onChange={handleFileChange} />
<button onClick={() => file && handleTranscription(file)} disabled={loading || !file}>
{loading ? "Transcribing..." : "Transcribe"}
</button>
</div>
<div> <Input type="file" accept="audio/*" onChange={handleFileChange} />
<h2>Record Audio</h2> <Button className="my-4" onClick={() => file && handleTranscription(file)} disabled={loading || !file}>
{!recording ? ( {loading ? "Transcribing..." : "Transcribe"}
<button onClick={startRecording}>Start Recording</button> </Button>
) : ( </div>
<button onClick={stopRecording} disabled={!recording}>
Stop Recording
</button>
)}
</div>
<div> <div>
<h2>Transcription:</h2> <h2>Record Audio</h2>
{loading ? ( {!recording ? (
<p>Processing transcription...</p> <Button onClick={startRecording}>Start Recording</Button>
) : transcription ? ( ) : (
<p>{transcription}</p> <Button onClick={stopRecording} disabled={!recording}>Stop Recording</Button>
) : ( )}
<p>No transcription available yet.</p> </div>
)}
</div>
{error && ( <Card className="mt-4 bg-neutral-200 dark:bg-neutral-800">
<div style={{ color: "red" }}> <CardHeader>
<strong>Error:</strong> {error} <CardTitle>Transcription Result</CardTitle>
</div> </CardHeader>
)} <CardContent>
</div> <CardContent>
); {loading
? "Processing transcription..."
: transcription
? "Your transcription is ready"
: "No transcription available yet"}
</CardContent>
</CardContent>
</Card>
{error && (
<Alert variant="destructive">
<AlertCircle className="h-4 w-4" />
<AlertTitle>Error</AlertTitle>
<AlertDescription>
<strong>Error:</strong> {error}
</AlertDescription>
</Alert>
)}
</section>
);
}; };
export default AudioTranscriber; export default AudioTranscriber;
@@ -182,179 +200,179 @@ const AudioTranscriber: React.FC = () => {
const audioChunksRef = useRef<Blob[]>([]) const audioChunksRef = useRef<Blob[]>([])
const handleFileChange = (event: React.ChangeEvent<HTMLInputElement>) => { const handleFileChange = (event: React.ChangeEvent<HTMLInputElement>) => {
if (event.target.files && event.target.files.length > 0) { if (event.target.files && event.target.files.length > 0) {
setFile(event.target.files[0]) setFile(event.target.files[0])
console.log("File selected:", event.target.files[0].name) console.log("File selected:", event.target.files[0].name)
} }
} }
const handleTranscription = async (audioFile: File) => { const handleTranscription = async (audioFile: File) => {
if (!audioFile) { if (!audioFile) {
setError("No audio file to transcribe!") setError("No audio file to transcribe!")
return return
} }
console.log("Starting transcription for:", audioFile.name) console.log("Starting transcription for:", audioFile.name)
const formData = new FormData() const formData = new FormData()
formData.append("file", audioFile) formData.append("file", audioFile)
setLoading(true) setLoading(true)
setError(null) setError(null)
try { try {
const response = await axios.post("http://localhost:8000/transcribe", formData, { const response = await axios.post("http://localhost:8000/transcribe", formData, {
headers: { headers: {
"Content-Type": "multipart/form-data", "Content-Type": "multipart/form-data",
}, },
}) })
console.log("Transcription response:", response.data) console.log("Transcription response:", response.data)
if (response.data && response.data.transcription) { if (response.data && response.data.transcription) {
setTranscription(response.data.transcription) setTranscription(response.data.transcription)
} else { } else {
setError("Unexpected response format. Check backend API.") setError("Unexpected response format. Check backend API.")
console.error("Invalid response format:", response.data) console.error("Invalid response format:", response.data)
} }
} catch (error) { } catch (error) {
console.error("Error transcribing audio:", error) console.error("Error transcribing audio:", error)
setError("Failed to transcribe audio. Please try again.") setError("Failed to transcribe audio. Please try again.")
} finally { } finally {
setLoading(false) setLoading(false)
} }
} }
const startRecording = async () => { const startRecording = async () => {
try { try {
const stream = await navigator.mediaDevices.getUserMedia({ audio: true }) const stream = await navigator.mediaDevices.getUserMedia({ audio: true })
console.log("Microphone access granted.") console.log("Microphone access granted.")
mediaRecorderRef.current = new MediaRecorder(stream) mediaRecorderRef.current = new MediaRecorder(stream)
audioChunksRef.current = [] audioChunksRef.current = []
mediaRecorderRef.current.ondataavailable = (event) => { mediaRecorderRef.current.ondataavailable = (event) => {
if (event.data.size > 0) { if (event.data.size > 0) {
console.log("Audio chunk received:", event.data) console.log("Audio chunk received:", event.data)
audioChunksRef.current.push(event.data) audioChunksRef.current.push(event.data)
} }
} }
mediaRecorderRef.current.onstop = async () => { mediaRecorderRef.current.onstop = async () => {
const audioBlob = new Blob(audioChunksRef.current, { type: "audio/mp3" }) const audioBlob = new Blob(audioChunksRef.current, { type: "audio/mp3" })
const audioFile = new File([audioBlob], "recording.mp3", { type: "audio/mp3" }) const audioFile = new File([audioBlob], "recording.mp3", { type: "audio/mp3" })
console.log("Recording stopped. Blob created:", audioBlob) console.log("Recording stopped. Blob created:", audioBlob)
setFile(audioFile) setFile(audioFile)
setTranscription("Processing transcription for recorded audio...") setTranscription("Processing transcription for recorded audio...")
await handleTranscription(audioFile) await handleTranscription(audioFile)
} }
mediaRecorderRef.current.start() mediaRecorderRef.current.start()
console.log("Recording started.") console.log("Recording started.")
setRecording(true) setRecording(true)
} catch (error) { } catch (error) {
console.error("Error starting recording:", error) console.error("Error starting recording:", error)
setError("Failed to start recording. Please check microphone permissions.") setError("Failed to start recording. Please check microphone permissions.")
} }
} }
const stopRecording = () => { const stopRecording = () => {
if (mediaRecorderRef.current) { if (mediaRecorderRef.current) {
console.log("Stopping recording...") console.log("Stopping recording...")
mediaRecorderRef.current.stop() mediaRecorderRef.current.stop()
setRecording(false) setRecording(false)
} }
} }
return ( return (
<div className="container mx-auto px-4 py-8"> <div className="container mx-auto px-4 py-8">
<h1 className="text-4xl font-bold text-center mb-8">Audio Transcriber</h1> <h1 className="text-4xl font-bold text-center mb-8">Audio Transcriber</h1>
<Card className="mb-8"> <Card className="mb-8">
<CardHeader> <CardHeader>
<CardTitle>Transcribe Audio</CardTitle> <CardTitle>Transcribe Audio</CardTitle>
<CardDescription>Upload an audio file or record directly to transcribe</CardDescription> <CardDescription>Upload an audio file or record directly to transcribe</CardDescription>
</CardHeader> </CardHeader>
<CardContent> <CardContent>
<Tabs defaultValue="upload" className="w-full"> <Tabs defaultValue="upload" className="w-full">
<TabsList className="grid w-full grid-cols-2"> <TabsList className="grid w-full grid-cols-2">
<TabsTrigger value="upload">Upload Audio</TabsTrigger> <TabsTrigger value="upload">Upload Audio</TabsTrigger>
<TabsTrigger value="record">Record Audio</TabsTrigger> <TabsTrigger value="record">Record Audio</TabsTrigger>
</TabsList> </TabsList>
<TabsContent value="upload"> <TabsContent value="upload">
<div className="space-y-4"> <div className="space-y-4">
<Label htmlFor="audio-file">Select an audio file</Label> <Label htmlFor="audio-file">Select an audio file</Label>
<Input id="audio-file" type="file" accept="audio/*" onChange={handleFileChange} /> <Input id="audio-file" type="file" accept="audio/*" onChange={handleFileChange} />
<Button <Button
onClick={() => file && handleTranscription(file)} onClick={() => file && handleTranscription(file)}
disabled={loading || !file} disabled={loading || !file}
className="w-full" className="w-full"
> >
{loading ? "Transcribing..." : "Transcribe"} {loading ? "Transcribing..." : "Transcribe"}
<Upload className="ml-2 h-4 w-4" /> <Upload className="ml-2 h-4 w-4" />
</Button> </Button>
</div> </div>
</TabsContent> </TabsContent>
<TabsContent value="record"> <TabsContent value="record">
<div className="space-y-4"> <div className="space-y-4">
<Label>Record audio from your microphone</Label> <Label>Record audio from your microphone</Label>
{!recording ? ( {!recording ? (
<Button onClick={startRecording} className="w-full"> <Button onClick={startRecording} className="w-full">
Start Recording Start Recording
<Mic className="ml-2 h-4 w-4" /> <Mic className="ml-2 h-4 w-4" />
</Button> </Button>
) : ( ) : (
<Button onClick={stopRecording} variant="destructive" className="w-full"> <Button onClick={stopRecording} variant="destructive" className="w-full">
Stop Recording Stop Recording
<StopCircle className="ml-2 h-4 w-4" /> <StopCircle className="ml-2 h-4 w-4" />
</Button> </Button>
)} )}
</div> </div>
</TabsContent> </TabsContent>
</Tabs> </Tabs>
</CardContent> </CardContent>
</Card> </Card>
<Card> <Card>
<CardHeader> <CardHeader>
<CardTitle>Transcription Result</CardTitle> <CardTitle>Transcription Result</CardTitle>
<CardDescription> <CardDescription>
{loading {loading
? "Processing transcription..." ? "Processing transcription..."
: transcription : transcription
? "Your transcription is ready" ? "Your transcription is ready"
: "No transcription available yet"} : "No transcription available yet"}
</CardDescription> </CardDescription>
</CardHeader> </CardHeader>
<CardContent> <CardContent>
<Textarea <Textarea
value={transcription || ""} value={transcription || ""}
readOnly readOnly
placeholder="Transcription will appear here" placeholder="Transcription will appear here"
className="min-h-[200px]" className="min-h-[200px]"
/> />
</CardContent> </CardContent>
{file && ( {file && (
<CardFooter className="flex justify-between items-center"> <CardFooter className="flex justify-between items-center">
<div className="flex items-center"> <div className="flex items-center">
<FileAudio className="mr-2 h-4 w-4" /> <FileAudio className="mr-2 h-4 w-4" />
<span className="text-sm text-muted-foreground">{file.name}</span> <span className="text-sm text-muted-foreground">{file.name}</span>
</div> </div>
<Button variant="outline" onClick={() => setFile(null)}> <Button variant="outline" onClick={() => setFile(null)}>
Clear Clear
</Button> </Button>
</CardFooter> </CardFooter>
)} )}
</Card> </Card>
{error && ( {error && (
<Alert variant="destructive" className="mt-4"> <Alert variant="destructive" className="mt-4">
<AlertTitle>Error</AlertTitle> <AlertTitle>Error</AlertTitle>
<AlertDescription>{error}</AlertDescription> <AlertDescription>{error}</AlertDescription>
</Alert> </Alert>
)} )}
</div> </div>
) )
} }