transcript

This commit is contained in:
suraj.shenoy.b@gmail.com
2025-01-25 18:40:30 -06:00
parent 9f8fbccdfe
commit fe8e1faed8
6 changed files with 177 additions and 93 deletions

View File

@@ -16,59 +16,48 @@ const AudioTranscriber: React.FC = () => {
const handleFileChange = (event: React.ChangeEvent<HTMLInputElement>) => {
if (event.target.files && event.target.files.length > 0) {
setFile(event.target.files[0]);
console.log("File selected:", event.target.files[0].name);
}
};
// Handle file transcription
// Send the file to the backend for transcription
const handleTranscription = async (audioFile: File) => {
if (!audioFile) {
alert("No audio file to transcribe!");
return;
}
console.log("Starting transcription for:", audioFile.name);
const formData = new FormData();
formData.append("file", audioFile);
setLoading(true);
setError(null); // Clear previous errors
setError(null);
try {
const response = await axios.post("http://localhost:8000/transcribe", formData, {
headers: {
"Content-Type": "multipart/form-data",
},
});
console.log("Transcription response:", response.data);
// Handle missing transcription property in the response
if (response.data && response.data.transcription) {
setTranscription(response.data.transcription);
} else {
setError("Unexpected response format. Check backend API.");
console.error("Invalid response format:", response.data);
setError("No transcription available.");
}
} catch (error) {
console.error("Error transcribing audio:", error);
console.error("Error during transcription:", error);
setError("Failed to transcribe audio. Please try again.");
} finally {
setLoading(false);
}
};
// Start recording audio
const startRecording = async () => {
try {
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
console.log("Microphone access granted.");
mediaRecorderRef.current = new MediaRecorder(stream);
audioChunksRef.current = []; // Reset audio chunks
audioChunksRef.current = [];
mediaRecorderRef.current.ondataavailable = (event) => {
if (event.data.size > 0) {
console.log("Audio chunk received:", event.data);
audioChunksRef.current.push(event.data);
}
};
@@ -77,15 +66,11 @@ const AudioTranscriber: React.FC = () => {
const audioBlob = new Blob(audioChunksRef.current, { type: "audio/mp3" });
const audioFile = new File([audioBlob], "recording.mp3", { type: "audio/mp3" });
console.log("Recording stopped. Blob created:", audioBlob);
setFile(audioFile); // Save the recorded file
setTranscription("Processing transcription for recorded audio...");
await handleTranscription(audioFile); // Automatically transcribe
};
mediaRecorderRef.current.start();
console.log("Recording started.");
setRecording(true);
} catch (error) {
console.error("Error starting recording:", error);
@@ -96,7 +81,6 @@ const AudioTranscriber: React.FC = () => {
// Stop recording audio
const stopRecording = () => {
if (mediaRecorderRef.current) {
console.log("Stopping recording...");
mediaRecorderRef.current.stop();
setRecording(false);
}
@@ -104,17 +88,19 @@ const AudioTranscriber: React.FC = () => {
return (
<div>
<h1>Audio Transcription</h1>
<h1>Audio Transcription Tool</h1>
<div>
<h2>Upload or Record Audio</h2>
{/* File Upload */}
<input type="file" accept="audio/*" onChange={handleFileChange} />
<button onClick={() => file && handleTranscription(file)} disabled={loading || !file}>
<button
onClick={() => file && handleTranscription(file)}
disabled={loading || !file}
>
{loading ? "Transcribing..." : "Transcribe"}
</button>
</div>
{/* Recording Controls */}
<div>
<h2>Record Audio</h2>
{!recording ? (
@@ -126,19 +112,13 @@ const AudioTranscriber: React.FC = () => {
)}
</div>
{/* Transcription Result */}
<div>
<h2>Transcription:</h2>
{loading ? (
<p>Processing transcription...</p>
) : transcription ? (
{transcription && (
<div>
<h2>Transcription:</h2>
<p>{transcription}</p>
) : (
<p>No transcription available yet.</p>
)}
</div>
</div>
)}
{/* Error Message */}
{error && (
<div style={{ color: "red" }}>
<strong>Error:</strong> {error}