UI Update
This commit is contained in:
@@ -1,156 +1,174 @@
|
||||
"use client";
|
||||
import { Button } from "@/components/ui/button";
|
||||
import { Input } from "@/components/ui/input";
|
||||
//import Hero1 from '@/components/Hero1'
|
||||
//IMPORT THE HERO1 FUNCTION TO MAKE THE TRANSCRIBE PAGE LOOK BETTER
|
||||
import React, { useState, useRef } from "react";
|
||||
// import axios from "axios";
|
||||
|
||||
import { AlertCircle } from "lucide-react"
|
||||
|
||||
import {
|
||||
Alert,
|
||||
AlertDescription,
|
||||
AlertTitle,
|
||||
} from "@/components/ui/alert"
|
||||
import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card";
|
||||
|
||||
const AudioTranscriber: React.FC = () => {
|
||||
const [file, setFile] = useState<File | null>(null);
|
||||
const [transcription, setTranscription] = useState<string | null>(null);
|
||||
const [loading, setLoading] = useState(false);
|
||||
const [recording, setRecording] = useState(false);
|
||||
const [error, setError] = useState<string | null>(null);
|
||||
const mediaRecorderRef = useRef<MediaRecorder | null>(null);
|
||||
const audioChunksRef = useRef<Blob[]>([]);
|
||||
const [file, setFile] = useState<File | null>(null);
|
||||
const [transcription, setTranscription] = useState<string | null>(null);
|
||||
const [loading, setLoading] = useState(false);
|
||||
const [recording, setRecording] = useState(false);
|
||||
const [error, setError] = useState<string | null>(null);
|
||||
const mediaRecorderRef = useRef<MediaRecorder | null>(null);
|
||||
const audioChunksRef = useRef<Blob[]>([]);
|
||||
|
||||
// Handle file selection
|
||||
const handleFileChange = (event: React.ChangeEvent<HTMLInputElement>) => {
|
||||
if (event.target.files && event.target.files.length > 0) {
|
||||
setFile(event.target.files[0]);
|
||||
console.log("File selected:", event.target.files[0].name);
|
||||
}
|
||||
};
|
||||
// Handle file selection
|
||||
const handleFileChange = (event: React.ChangeEvent<HTMLInputElement>) => {
|
||||
if (event.target.files && event.target.files.length > 0) {
|
||||
setFile(event.target.files[0]);
|
||||
console.log("File selected:", event.target.files[0].name);
|
||||
}
|
||||
};
|
||||
|
||||
// Handle file transcription
|
||||
const handleTranscription = async (audioFile: File) => {
|
||||
if (!audioFile) {
|
||||
alert("No audio file to transcribe!");
|
||||
return;
|
||||
}
|
||||
// Handle file transcription
|
||||
const handleTranscription = async (audioFile: File) => {
|
||||
if (!audioFile) {
|
||||
alert("No audio file to transcribe!");
|
||||
return;
|
||||
}
|
||||
|
||||
console.log("Starting transcription for:", audioFile.name);
|
||||
console.log("Starting transcription for:", audioFile.name);
|
||||
|
||||
const formData = new FormData();
|
||||
formData.append("file", audioFile);
|
||||
console.log(audioFile);
|
||||
setLoading(true);
|
||||
setError(null); // Clear previous errors
|
||||
try {
|
||||
let response = await fetch("/api/transcribe", {
|
||||
method: "POST",
|
||||
body: formData,
|
||||
headers: {
|
||||
"Content-Type": "multipart/form-data",
|
||||
}
|
||||
})
|
||||
const formData = new FormData();
|
||||
formData.append("file", audioFile);
|
||||
setLoading(true);
|
||||
setError(null); // Clear previous errors
|
||||
try {
|
||||
let response = await fetch("/api/transcribe", {
|
||||
method: "POST",
|
||||
body: formData,
|
||||
headers: {
|
||||
"Content-Type": "multipart/form-data",
|
||||
}
|
||||
})
|
||||
|
||||
response = await response.json();
|
||||
console.log("Transcription response:", response);
|
||||
console.log("! response:", response);
|
||||
response = await response.json();
|
||||
console.log("@ response:", response);
|
||||
|
||||
// if (response.data && response.data.transcription) {
|
||||
// setTranscription(response.data.transcription);
|
||||
// } else {
|
||||
// setError("Unexpected response format. Check backend API.");
|
||||
// console.error("Invalid response format:", response.data);
|
||||
// }
|
||||
} catch (error) {
|
||||
console.error("Error transcribing audio:", error);
|
||||
setError("Failed to transcribe audio. Please try again.");
|
||||
} finally {
|
||||
setLoading(false);
|
||||
}
|
||||
};
|
||||
// if (response.data && response.data.transcription) {
|
||||
// setTranscription(response.data.transcription);
|
||||
// } else {
|
||||
// setError("Unexpected response format. Check backend API.");
|
||||
// console.error("Invalid response format:", response.data);
|
||||
// }
|
||||
} catch (error) {
|
||||
console.error("Error transcribing audio:", error);
|
||||
setError("Failed to transcribe audio. Please try again.");
|
||||
} finally {
|
||||
setLoading(false);
|
||||
}
|
||||
};
|
||||
|
||||
// Start recording audio
|
||||
const startRecording = async () => {
|
||||
try {
|
||||
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
|
||||
console.log("Microphone access granted.");
|
||||
// Start recording audio
|
||||
const startRecording = async () => {
|
||||
try {
|
||||
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
|
||||
console.log("Microphone access granted.");
|
||||
|
||||
mediaRecorderRef.current = new MediaRecorder(stream);
|
||||
audioChunksRef.current = []; // Reset audio chunks
|
||||
mediaRecorderRef.current = new MediaRecorder(stream);
|
||||
audioChunksRef.current = []; // Reset audio chunks
|
||||
|
||||
mediaRecorderRef.current.ondataavailable = (event) => {
|
||||
if (event.data.size > 0) {
|
||||
console.log("Audio chunk received:", event.data);
|
||||
audioChunksRef.current.push(event.data);
|
||||
}
|
||||
};
|
||||
mediaRecorderRef.current.ondataavailable = (event) => {
|
||||
if (event.data.size > 0) {
|
||||
console.log("Audio chunk received:", event.data);
|
||||
audioChunksRef.current.push(event.data);
|
||||
}
|
||||
};
|
||||
|
||||
mediaRecorderRef.current.onstop = async () => {
|
||||
const audioBlob = new Blob(audioChunksRef.current, { type: "audio/mp3" });
|
||||
const audioFile = new File([audioBlob], "recording.mp3", { type: "audio/mp3" });
|
||||
mediaRecorderRef.current.onstop = async () => {
|
||||
const audioBlob = new Blob(audioChunksRef.current, { type: "audio/mp3" });
|
||||
const audioFile = new File([audioBlob], "recording.mp3", { type: "audio/mp3" });
|
||||
|
||||
console.log("Recording stopped. Blob created:", audioBlob);
|
||||
console.log("Recording stopped. Blob created:", audioBlob);
|
||||
|
||||
setFile(audioFile); // Save the recorded file
|
||||
setTranscription("Processing transcription for recorded audio...");
|
||||
await handleTranscription(audioFile); // Automatically transcribe
|
||||
};
|
||||
setFile(audioFile); // Save the recorded file
|
||||
setTranscription("Processing transcription for recorded audio...");
|
||||
await handleTranscription(audioFile); // Automatically transcribe
|
||||
};
|
||||
|
||||
mediaRecorderRef.current.start();
|
||||
console.log("Recording started.");
|
||||
setRecording(true);
|
||||
} catch (error) {
|
||||
console.error("Error starting recording:", error);
|
||||
setError("Failed to start recording. Please check microphone permissions.");
|
||||
}
|
||||
};
|
||||
mediaRecorderRef.current.start();
|
||||
console.log("Recording started.");
|
||||
setRecording(true);
|
||||
} catch (error) {
|
||||
console.error("Error starting recording:", error);
|
||||
setError("Failed to start recording. Please check microphone permissions.");
|
||||
}
|
||||
};
|
||||
|
||||
// Stop recording audio
|
||||
const stopRecording = () => {
|
||||
if (mediaRecorderRef.current) {
|
||||
console.log("Stopping recording...");
|
||||
mediaRecorderRef.current.stop();
|
||||
setRecording(false);
|
||||
}
|
||||
};
|
||||
// Stop recording audio
|
||||
const stopRecording = () => {
|
||||
if (mediaRecorderRef.current) {
|
||||
console.log("Stopping recording...");
|
||||
mediaRecorderRef.current.stop();
|
||||
setRecording(false);
|
||||
}
|
||||
};
|
||||
|
||||
return (
|
||||
<div>
|
||||
<h1>
|
||||
<center>
|
||||
Audio Transcription
|
||||
</center>
|
||||
</h1>
|
||||
<div>
|
||||
<h2>Upload or Record Audio</h2>
|
||||
return (
|
||||
<section className="mx-auto my-auto bg-white dark:bg-neutral-950 h-screen w-1/2">
|
||||
<h1 className="text-4xl font-bold text-center mb-8">
|
||||
<center>
|
||||
Audio Transcription
|
||||
</center>
|
||||
</h1>
|
||||
<br />
|
||||
<div>
|
||||
<h2>Upload or Record Audio</h2>
|
||||
|
||||
<input type="file" accept="audio/*" onChange={handleFileChange} />
|
||||
<button onClick={() => file && handleTranscription(file)} disabled={loading || !file}>
|
||||
{loading ? "Transcribing..." : "Transcribe"}
|
||||
</button>
|
||||
</div>
|
||||
<Input type="file" accept="audio/*" onChange={handleFileChange} />
|
||||
<Button className="my-4" onClick={() => file && handleTranscription(file)} disabled={loading || !file}>
|
||||
{loading ? "Transcribing..." : "Transcribe"}
|
||||
</Button>
|
||||
</div>
|
||||
|
||||
<div>
|
||||
<h2>Record Audio</h2>
|
||||
{!recording ? (
|
||||
<button onClick={startRecording}>Start Recording</button>
|
||||
) : (
|
||||
<button onClick={stopRecording} disabled={!recording}>
|
||||
Stop Recording
|
||||
</button>
|
||||
)}
|
||||
</div>
|
||||
<div>
|
||||
<h2>Record Audio</h2>
|
||||
{!recording ? (
|
||||
<Button onClick={startRecording}>Start Recording</Button>
|
||||
) : (
|
||||
<Button onClick={stopRecording} disabled={!recording}>Stop Recording</Button>
|
||||
)}
|
||||
</div>
|
||||
|
||||
<div>
|
||||
<h2>Transcription:</h2>
|
||||
{loading ? (
|
||||
<p>Processing transcription...</p>
|
||||
) : transcription ? (
|
||||
<p>{transcription}</p>
|
||||
) : (
|
||||
<p>No transcription available yet.</p>
|
||||
)}
|
||||
</div>
|
||||
<Card className="mt-4 bg-neutral-200 dark:bg-neutral-800">
|
||||
<CardHeader>
|
||||
<CardTitle>Transcription Result</CardTitle>
|
||||
</CardHeader>
|
||||
<CardContent>
|
||||
<CardContent>
|
||||
{loading
|
||||
? "Processing transcription..."
|
||||
: transcription
|
||||
? "Your transcription is ready"
|
||||
: "No transcription available yet"}
|
||||
</CardContent>
|
||||
</CardContent>
|
||||
</Card>
|
||||
|
||||
{error && (
|
||||
<div style={{ color: "red" }}>
|
||||
<strong>Error:</strong> {error}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
{error && (
|
||||
<Alert variant="destructive">
|
||||
<AlertCircle className="h-4 w-4" />
|
||||
<AlertTitle>Error</AlertTitle>
|
||||
<AlertDescription>
|
||||
<strong>Error:</strong> {error}
|
||||
</AlertDescription>
|
||||
</Alert>
|
||||
)}
|
||||
</section>
|
||||
);
|
||||
};
|
||||
|
||||
export default AudioTranscriber;
|
||||
@@ -182,179 +200,179 @@ const AudioTranscriber: React.FC = () => {
|
||||
const audioChunksRef = useRef<Blob[]>([])
|
||||
|
||||
const handleFileChange = (event: React.ChangeEvent<HTMLInputElement>) => {
|
||||
if (event.target.files && event.target.files.length > 0) {
|
||||
setFile(event.target.files[0])
|
||||
console.log("File selected:", event.target.files[0].name)
|
||||
}
|
||||
if (event.target.files && event.target.files.length > 0) {
|
||||
setFile(event.target.files[0])
|
||||
console.log("File selected:", event.target.files[0].name)
|
||||
}
|
||||
}
|
||||
|
||||
const handleTranscription = async (audioFile: File) => {
|
||||
if (!audioFile) {
|
||||
setError("No audio file to transcribe!")
|
||||
return
|
||||
}
|
||||
if (!audioFile) {
|
||||
setError("No audio file to transcribe!")
|
||||
return
|
||||
}
|
||||
|
||||
console.log("Starting transcription for:", audioFile.name)
|
||||
console.log("Starting transcription for:", audioFile.name)
|
||||
|
||||
const formData = new FormData()
|
||||
formData.append("file", audioFile)
|
||||
const formData = new FormData()
|
||||
formData.append("file", audioFile)
|
||||
|
||||
setLoading(true)
|
||||
setError(null)
|
||||
try {
|
||||
const response = await axios.post("http://localhost:8000/transcribe", formData, {
|
||||
headers: {
|
||||
"Content-Type": "multipart/form-data",
|
||||
},
|
||||
})
|
||||
setLoading(true)
|
||||
setError(null)
|
||||
try {
|
||||
const response = await axios.post("http://localhost:8000/transcribe", formData, {
|
||||
headers: {
|
||||
"Content-Type": "multipart/form-data",
|
||||
},
|
||||
})
|
||||
|
||||
console.log("Transcription response:", response.data)
|
||||
console.log("Transcription response:", response.data)
|
||||
|
||||
if (response.data && response.data.transcription) {
|
||||
setTranscription(response.data.transcription)
|
||||
} else {
|
||||
setError("Unexpected response format. Check backend API.")
|
||||
console.error("Invalid response format:", response.data)
|
||||
}
|
||||
} catch (error) {
|
||||
console.error("Error transcribing audio:", error)
|
||||
setError("Failed to transcribe audio. Please try again.")
|
||||
} finally {
|
||||
setLoading(false)
|
||||
}
|
||||
if (response.data && response.data.transcription) {
|
||||
setTranscription(response.data.transcription)
|
||||
} else {
|
||||
setError("Unexpected response format. Check backend API.")
|
||||
console.error("Invalid response format:", response.data)
|
||||
}
|
||||
} catch (error) {
|
||||
console.error("Error transcribing audio:", error)
|
||||
setError("Failed to transcribe audio. Please try again.")
|
||||
} finally {
|
||||
setLoading(false)
|
||||
}
|
||||
}
|
||||
|
||||
const startRecording = async () => {
|
||||
try {
|
||||
const stream = await navigator.mediaDevices.getUserMedia({ audio: true })
|
||||
console.log("Microphone access granted.")
|
||||
try {
|
||||
const stream = await navigator.mediaDevices.getUserMedia({ audio: true })
|
||||
console.log("Microphone access granted.")
|
||||
|
||||
mediaRecorderRef.current = new MediaRecorder(stream)
|
||||
audioChunksRef.current = []
|
||||
mediaRecorderRef.current = new MediaRecorder(stream)
|
||||
audioChunksRef.current = []
|
||||
|
||||
mediaRecorderRef.current.ondataavailable = (event) => {
|
||||
if (event.data.size > 0) {
|
||||
console.log("Audio chunk received:", event.data)
|
||||
audioChunksRef.current.push(event.data)
|
||||
}
|
||||
}
|
||||
mediaRecorderRef.current.ondataavailable = (event) => {
|
||||
if (event.data.size > 0) {
|
||||
console.log("Audio chunk received:", event.data)
|
||||
audioChunksRef.current.push(event.data)
|
||||
}
|
||||
}
|
||||
|
||||
mediaRecorderRef.current.onstop = async () => {
|
||||
const audioBlob = new Blob(audioChunksRef.current, { type: "audio/mp3" })
|
||||
const audioFile = new File([audioBlob], "recording.mp3", { type: "audio/mp3" })
|
||||
mediaRecorderRef.current.onstop = async () => {
|
||||
const audioBlob = new Blob(audioChunksRef.current, { type: "audio/mp3" })
|
||||
const audioFile = new File([audioBlob], "recording.mp3", { type: "audio/mp3" })
|
||||
|
||||
console.log("Recording stopped. Blob created:", audioBlob)
|
||||
console.log("Recording stopped. Blob created:", audioBlob)
|
||||
|
||||
setFile(audioFile)
|
||||
setTranscription("Processing transcription for recorded audio...")
|
||||
await handleTranscription(audioFile)
|
||||
}
|
||||
setFile(audioFile)
|
||||
setTranscription("Processing transcription for recorded audio...")
|
||||
await handleTranscription(audioFile)
|
||||
}
|
||||
|
||||
mediaRecorderRef.current.start()
|
||||
console.log("Recording started.")
|
||||
setRecording(true)
|
||||
} catch (error) {
|
||||
console.error("Error starting recording:", error)
|
||||
setError("Failed to start recording. Please check microphone permissions.")
|
||||
}
|
||||
mediaRecorderRef.current.start()
|
||||
console.log("Recording started.")
|
||||
setRecording(true)
|
||||
} catch (error) {
|
||||
console.error("Error starting recording:", error)
|
||||
setError("Failed to start recording. Please check microphone permissions.")
|
||||
}
|
||||
}
|
||||
|
||||
const stopRecording = () => {
|
||||
if (mediaRecorderRef.current) {
|
||||
console.log("Stopping recording...")
|
||||
mediaRecorderRef.current.stop()
|
||||
setRecording(false)
|
||||
}
|
||||
if (mediaRecorderRef.current) {
|
||||
console.log("Stopping recording...")
|
||||
mediaRecorderRef.current.stop()
|
||||
setRecording(false)
|
||||
}
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="container mx-auto px-4 py-8">
|
||||
<h1 className="text-4xl font-bold text-center mb-8">Audio Transcriber</h1>
|
||||
<div className="container mx-auto px-4 py-8">
|
||||
<h1 className="text-4xl font-bold text-center mb-8">Audio Transcriber</h1>
|
||||
|
||||
<Card className="mb-8">
|
||||
<CardHeader>
|
||||
<CardTitle>Transcribe Audio</CardTitle>
|
||||
<CardDescription>Upload an audio file or record directly to transcribe</CardDescription>
|
||||
</CardHeader>
|
||||
<CardContent>
|
||||
<Tabs defaultValue="upload" className="w-full">
|
||||
<TabsList className="grid w-full grid-cols-2">
|
||||
<TabsTrigger value="upload">Upload Audio</TabsTrigger>
|
||||
<TabsTrigger value="record">Record Audio</TabsTrigger>
|
||||
</TabsList>
|
||||
<TabsContent value="upload">
|
||||
<div className="space-y-4">
|
||||
<Label htmlFor="audio-file">Select an audio file</Label>
|
||||
<Input id="audio-file" type="file" accept="audio/*" onChange={handleFileChange} />
|
||||
<Button
|
||||
onClick={() => file && handleTranscription(file)}
|
||||
disabled={loading || !file}
|
||||
className="w-full"
|
||||
>
|
||||
{loading ? "Transcribing..." : "Transcribe"}
|
||||
<Upload className="ml-2 h-4 w-4" />
|
||||
</Button>
|
||||
</div>
|
||||
</TabsContent>
|
||||
<TabsContent value="record">
|
||||
<div className="space-y-4">
|
||||
<Label>Record audio from your microphone</Label>
|
||||
{!recording ? (
|
||||
<Button onClick={startRecording} className="w-full">
|
||||
Start Recording
|
||||
<Mic className="ml-2 h-4 w-4" />
|
||||
</Button>
|
||||
) : (
|
||||
<Button onClick={stopRecording} variant="destructive" className="w-full">
|
||||
Stop Recording
|
||||
<StopCircle className="ml-2 h-4 w-4" />
|
||||
</Button>
|
||||
)}
|
||||
</div>
|
||||
</TabsContent>
|
||||
</Tabs>
|
||||
</CardContent>
|
||||
</Card>
|
||||
<Card className="mb-8">
|
||||
<CardHeader>
|
||||
<CardTitle>Transcribe Audio</CardTitle>
|
||||
<CardDescription>Upload an audio file or record directly to transcribe</CardDescription>
|
||||
</CardHeader>
|
||||
<CardContent>
|
||||
<Tabs defaultValue="upload" className="w-full">
|
||||
<TabsList className="grid w-full grid-cols-2">
|
||||
<TabsTrigger value="upload">Upload Audio</TabsTrigger>
|
||||
<TabsTrigger value="record">Record Audio</TabsTrigger>
|
||||
</TabsList>
|
||||
<TabsContent value="upload">
|
||||
<div className="space-y-4">
|
||||
<Label htmlFor="audio-file">Select an audio file</Label>
|
||||
<Input id="audio-file" type="file" accept="audio/*" onChange={handleFileChange} />
|
||||
<Button
|
||||
onClick={() => file && handleTranscription(file)}
|
||||
disabled={loading || !file}
|
||||
className="w-full"
|
||||
>
|
||||
{loading ? "Transcribing..." : "Transcribe"}
|
||||
<Upload className="ml-2 h-4 w-4" />
|
||||
</Button>
|
||||
</div>
|
||||
</TabsContent>
|
||||
<TabsContent value="record">
|
||||
<div className="space-y-4">
|
||||
<Label>Record audio from your microphone</Label>
|
||||
{!recording ? (
|
||||
<Button onClick={startRecording} className="w-full">
|
||||
Start Recording
|
||||
<Mic className="ml-2 h-4 w-4" />
|
||||
</Button>
|
||||
) : (
|
||||
<Button onClick={stopRecording} variant="destructive" className="w-full">
|
||||
Stop Recording
|
||||
<StopCircle className="ml-2 h-4 w-4" />
|
||||
</Button>
|
||||
)}
|
||||
</div>
|
||||
</TabsContent>
|
||||
</Tabs>
|
||||
</CardContent>
|
||||
</Card>
|
||||
|
||||
<Card>
|
||||
<CardHeader>
|
||||
<CardTitle>Transcription Result</CardTitle>
|
||||
<CardDescription>
|
||||
{loading
|
||||
? "Processing transcription..."
|
||||
: transcription
|
||||
? "Your transcription is ready"
|
||||
: "No transcription available yet"}
|
||||
</CardDescription>
|
||||
</CardHeader>
|
||||
<CardContent>
|
||||
<Textarea
|
||||
value={transcription || ""}
|
||||
readOnly
|
||||
placeholder="Transcription will appear here"
|
||||
className="min-h-[200px]"
|
||||
/>
|
||||
</CardContent>
|
||||
{file && (
|
||||
<CardFooter className="flex justify-between items-center">
|
||||
<div className="flex items-center">
|
||||
<FileAudio className="mr-2 h-4 w-4" />
|
||||
<span className="text-sm text-muted-foreground">{file.name}</span>
|
||||
</div>
|
||||
<Button variant="outline" onClick={() => setFile(null)}>
|
||||
Clear
|
||||
</Button>
|
||||
</CardFooter>
|
||||
)}
|
||||
</Card>
|
||||
<Card>
|
||||
<CardHeader>
|
||||
<CardTitle>Transcription Result</CardTitle>
|
||||
<CardDescription>
|
||||
{loading
|
||||
? "Processing transcription..."
|
||||
: transcription
|
||||
? "Your transcription is ready"
|
||||
: "No transcription available yet"}
|
||||
</CardDescription>
|
||||
</CardHeader>
|
||||
<CardContent>
|
||||
<Textarea
|
||||
value={transcription || ""}
|
||||
readOnly
|
||||
placeholder="Transcription will appear here"
|
||||
className="min-h-[200px]"
|
||||
/>
|
||||
</CardContent>
|
||||
{file && (
|
||||
<CardFooter className="flex justify-between items-center">
|
||||
<div className="flex items-center">
|
||||
<FileAudio className="mr-2 h-4 w-4" />
|
||||
<span className="text-sm text-muted-foreground">{file.name}</span>
|
||||
</div>
|
||||
<Button variant="outline" onClick={() => setFile(null)}>
|
||||
Clear
|
||||
</Button>
|
||||
</CardFooter>
|
||||
)}
|
||||
</Card>
|
||||
|
||||
{error && (
|
||||
<Alert variant="destructive" className="mt-4">
|
||||
<AlertTitle>Error</AlertTitle>
|
||||
<AlertDescription>{error}</AlertDescription>
|
||||
</Alert>
|
||||
)}
|
||||
</div>
|
||||
{error && (
|
||||
<Alert variant="destructive" className="mt-4">
|
||||
<AlertTitle>Error</AlertTitle>
|
||||
<AlertDescription>{error}</AlertDescription>
|
||||
</Alert>
|
||||
)}
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user