diff --git a/README.md b/README.md
index e215bc4..ae40525 100644
--- a/README.md
+++ b/README.md
@@ -1,36 +1,191 @@
-This is a [Next.js](https://nextjs.org) project bootstrapped with [`create-next-app`](https://nextjs.org/docs/app/api-reference/cli/create-next-app).
+
+
+
+
+
+
+
+[![Contributors][contributors-shield]][contributors-url]
+[![Stargazers][stars-shield]][stars-url]
+[![Issues][issues-shield]][issues-url]
+[![Apache License][license-shield]][license-url]
+
+
+
+
+
+
Dashboard
-
)
diff --git a/src/app/(panels)/suite/doctor/account/PatientForm.tsx b/src/app/(panels)/suite/doctor/patient/PatientForm.tsx
similarity index 100%
rename from src/app/(panels)/suite/doctor/account/PatientForm.tsx
rename to src/app/(panels)/suite/doctor/patient/PatientForm.tsx
diff --git a/src/app/(panels)/suite/doctor/patient/page.jsx b/src/app/(panels)/suite/doctor/patient/page.jsx
new file mode 100644
index 0000000..f3f2144
--- /dev/null
+++ b/src/app/(panels)/suite/doctor/patient/page.jsx
@@ -0,0 +1,76 @@
+
+"use client";
+
+import { useEffect, useState } from "react";
+
+import { useRouter } from "next/navigation";
+
+import axios from "axios";
+import { useUser } from "@clerk/nextjs";
+import { CardContent } from "@/components/ui/card";
+
+import { Button } from "@/components/ui/button";
+import { ChevronDown } from "lucide-react";
+import { Collapsible, CollapsibleContent, CollapsibleTrigger } from "@/components/ui/collapsible";
+import PersonForm from "@/components/PersonForm";
+import { Card } from "@/components/ui/card";
+
+
+export default function Dashboard() {
+
+ const router = useRouter();
+ const { user } = useUser();
+ const [userData, setUserData] = useState(null);
+ const [patients, setPatients] = useState([]);
+
+ useEffect(() => {
+ if (user) {
+ axios.get(`/api/user?userId=${user.id}`).then(response => {
+ setUserData(response.data);
+ });
+ }
+ }, [user]);
+
+ if (userData) {
+ if (userData.role != "caregiver") {
+ router.push("/suite/patient/dashboard");
+ }
+ }
+
+ return (
+
+
Patients
+
+
+
+ {userData.role === 'caregiver' && (
+
+
+ {patients.map(patient => (
+
+
+
+
{patient.name}
+
{patient.role}
+
+
+
+
+ Toggle
+
+
+
+
+
+
+
+ ))}
+
+
+ )}
+
+
+
+
+ )
+}
\ No newline at end of file
diff --git a/src/app/(web)/transcribe/page.tsx b/src/app/(web)/transcribe/page.tsx
index 7358505..d460e86 100644
--- a/src/app/(web)/transcribe/page.tsx
+++ b/src/app/(web)/transcribe/page.tsx
@@ -1,156 +1,174 @@
"use client";
+import { Button } from "@/components/ui/button";
+import { Input } from "@/components/ui/input";
//import Hero1 from '@/components/Hero1'
//IMPORT THE HERO1 FUNCTION TO MAKE THE TRANSCRIBE PAGE LOOK BETTER
import React, { useState, useRef } from "react";
// import axios from "axios";
+import { AlertCircle } from "lucide-react"
+
+import {
+ Alert,
+ AlertDescription,
+ AlertTitle,
+} from "@/components/ui/alert"
+import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card";
+
const AudioTranscriber: React.FC = () => {
- const [file, setFile] = useState
(null);
- const [transcription, setTranscription] = useState(null);
- const [loading, setLoading] = useState(false);
- const [recording, setRecording] = useState(false);
- const [error, setError] = useState(null);
- const mediaRecorderRef = useRef(null);
- const audioChunksRef = useRef([]);
+ const [file, setFile] = useState(null);
+ const [transcription, setTranscription] = useState(null);
+ const [loading, setLoading] = useState(false);
+ const [recording, setRecording] = useState(false);
+ const [error, setError] = useState(null);
+ const mediaRecorderRef = useRef(null);
+ const audioChunksRef = useRef([]);
- // Handle file selection
- const handleFileChange = (event: React.ChangeEvent) => {
- if (event.target.files && event.target.files.length > 0) {
- setFile(event.target.files[0]);
- console.log("File selected:", event.target.files[0].name);
- }
- };
+ // Handle file selection
+ const handleFileChange = (event: React.ChangeEvent) => {
+ if (event.target.files && event.target.files.length > 0) {
+ setFile(event.target.files[0]);
+ console.log("File selected:", event.target.files[0].name);
+ }
+ };
- // Handle file transcription
- const handleTranscription = async (audioFile: File) => {
- if (!audioFile) {
- alert("No audio file to transcribe!");
- return;
- }
+ // Handle file transcription
+ const handleTranscription = async (audioFile: File) => {
+ if (!audioFile) {
+ alert("No audio file to transcribe!");
+ return;
+ }
- console.log("Starting transcription for:", audioFile.name);
+ console.log("Starting transcription for:", audioFile.name);
- const formData = new FormData();
- formData.append("file", audioFile);
- console.log(audioFile);
- setLoading(true);
- setError(null); // Clear previous errors
- try {
- let response = await fetch("/api/transcribe", {
- method: "POST",
- body: formData,
- headers: {
- "Content-Type": "multipart/form-data",
- }
- })
+ const formData = new FormData();
+ formData.append("file", audioFile);
+ setLoading(true);
+ setError(null); // Clear previous errors
+ try {
+ let response = await fetch("/api/transcribe", {
+ method: "POST",
+ body: formData,
+ headers: {
+ "Content-Type": "multipart/form-data",
+ }
+ })
- response = await response.json();
- console.log("Transcription response:", response);
+ console.log("! response:", response);
+ response = await response.json();
+ console.log("@ response:", response);
- // if (response.data && response.data.transcription) {
- // setTranscription(response.data.transcription);
- // } else {
- // setError("Unexpected response format. Check backend API.");
- // console.error("Invalid response format:", response.data);
- // }
- } catch (error) {
- console.error("Error transcribing audio:", error);
- setError("Failed to transcribe audio. Please try again.");
- } finally {
- setLoading(false);
- }
- };
+ // if (response.data && response.data.transcription) {
+ // setTranscription(response.data.transcription);
+ // } else {
+ // setError("Unexpected response format. Check backend API.");
+ // console.error("Invalid response format:", response.data);
+ // }
+ } catch (error) {
+ console.error("Error transcribing audio:", error);
+ setError("Failed to transcribe audio. Please try again.");
+ } finally {
+ setLoading(false);
+ }
+ };
- // Start recording audio
- const startRecording = async () => {
- try {
- const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
- console.log("Microphone access granted.");
+ // Start recording audio
+ const startRecording = async () => {
+ try {
+ const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
+ console.log("Microphone access granted.");
- mediaRecorderRef.current = new MediaRecorder(stream);
- audioChunksRef.current = []; // Reset audio chunks
+ mediaRecorderRef.current = new MediaRecorder(stream);
+ audioChunksRef.current = []; // Reset audio chunks
- mediaRecorderRef.current.ondataavailable = (event) => {
- if (event.data.size > 0) {
- console.log("Audio chunk received:", event.data);
- audioChunksRef.current.push(event.data);
- }
- };
+ mediaRecorderRef.current.ondataavailable = (event) => {
+ if (event.data.size > 0) {
+ console.log("Audio chunk received:", event.data);
+ audioChunksRef.current.push(event.data);
+ }
+ };
- mediaRecorderRef.current.onstop = async () => {
- const audioBlob = new Blob(audioChunksRef.current, { type: "audio/mp3" });
- const audioFile = new File([audioBlob], "recording.mp3", { type: "audio/mp3" });
+ mediaRecorderRef.current.onstop = async () => {
+ const audioBlob = new Blob(audioChunksRef.current, { type: "audio/mp3" });
+ const audioFile = new File([audioBlob], "recording.mp3", { type: "audio/mp3" });
- console.log("Recording stopped. Blob created:", audioBlob);
+ console.log("Recording stopped. Blob created:", audioBlob);
- setFile(audioFile); // Save the recorded file
- setTranscription("Processing transcription for recorded audio...");
- await handleTranscription(audioFile); // Automatically transcribe
- };
+ setFile(audioFile); // Save the recorded file
+ setTranscription("Processing transcription for recorded audio...");
+ await handleTranscription(audioFile); // Automatically transcribe
+ };
- mediaRecorderRef.current.start();
- console.log("Recording started.");
- setRecording(true);
- } catch (error) {
- console.error("Error starting recording:", error);
- setError("Failed to start recording. Please check microphone permissions.");
- }
- };
+ mediaRecorderRef.current.start();
+ console.log("Recording started.");
+ setRecording(true);
+ } catch (error) {
+ console.error("Error starting recording:", error);
+ setError("Failed to start recording. Please check microphone permissions.");
+ }
+ };
- // Stop recording audio
- const stopRecording = () => {
- if (mediaRecorderRef.current) {
- console.log("Stopping recording...");
- mediaRecorderRef.current.stop();
- setRecording(false);
- }
- };
+ // Stop recording audio
+ const stopRecording = () => {
+ if (mediaRecorderRef.current) {
+ console.log("Stopping recording...");
+ mediaRecorderRef.current.stop();
+ setRecording(false);
+ }
+ };
- return (
-
-
-
- Audio Transcription
-
-
-
-
Upload or Record Audio
-
-
- file && handleTranscription(file)} disabled={loading || !file}>
- {loading ? "Transcribing..." : "Transcribe"}
-
-
+ return (
+
+
+
+ Audio Transcription
+
+
+
+
+
Upload or Record Audio
-
-
Record Audio
- {!recording ? (
- Start Recording
- ) : (
-
- Stop Recording
-
- )}
-
+
+
file && handleTranscription(file)} disabled={loading || !file}>
+ {loading ? "Transcribing..." : "Transcribe"}
+
+
-
-
Transcription:
- {loading ? (
-
Processing transcription...
- ) : transcription ? (
-
{transcription}
- ) : (
-
No transcription available yet.
- )}
-
+
+
Record Audio
+ {!recording ? (
+ Start Recording
+ ) : (
+ Stop Recording
+ )}
+
- {error && (
-
- Error: {error}
-
- )}
-
- );
+
+
+ Transcription Result
+
+
+
+ {loading
+ ? "Processing transcription..."
+ : transcription
+ ? "Your transcription is ready"
+ : "No transcription available yet"}
+
+
+
+
+ {error && (
+
+
+ Error
+
+ Error: {error}
+
+
+ )}
+
+ );
};
export default AudioTranscriber;
@@ -182,179 +200,179 @@ const AudioTranscriber: React.FC = () => {
const audioChunksRef = useRef([])
const handleFileChange = (event: React.ChangeEvent) => {
- if (event.target.files && event.target.files.length > 0) {
- setFile(event.target.files[0])
- console.log("File selected:", event.target.files[0].name)
- }
+ if (event.target.files && event.target.files.length > 0) {
+ setFile(event.target.files[0])
+ console.log("File selected:", event.target.files[0].name)
+ }
}
const handleTranscription = async (audioFile: File) => {
- if (!audioFile) {
- setError("No audio file to transcribe!")
- return
- }
+ if (!audioFile) {
+ setError("No audio file to transcribe!")
+ return
+ }
- console.log("Starting transcription for:", audioFile.name)
+ console.log("Starting transcription for:", audioFile.name)
- const formData = new FormData()
- formData.append("file", audioFile)
+ const formData = new FormData()
+ formData.append("file", audioFile)
- setLoading(true)
- setError(null)
- try {
- const response = await axios.post("http://localhost:8000/transcribe", formData, {
- headers: {
- "Content-Type": "multipart/form-data",
- },
- })
+ setLoading(true)
+ setError(null)
+ try {
+ const response = await axios.post("http://localhost:8000/transcribe", formData, {
+ headers: {
+ "Content-Type": "multipart/form-data",
+ },
+ })
- console.log("Transcription response:", response.data)
+ console.log("Transcription response:", response.data)
- if (response.data && response.data.transcription) {
- setTranscription(response.data.transcription)
- } else {
- setError("Unexpected response format. Check backend API.")
- console.error("Invalid response format:", response.data)
- }
- } catch (error) {
- console.error("Error transcribing audio:", error)
- setError("Failed to transcribe audio. Please try again.")
- } finally {
- setLoading(false)
- }
+ if (response.data && response.data.transcription) {
+ setTranscription(response.data.transcription)
+ } else {
+ setError("Unexpected response format. Check backend API.")
+ console.error("Invalid response format:", response.data)
+ }
+ } catch (error) {
+ console.error("Error transcribing audio:", error)
+ setError("Failed to transcribe audio. Please try again.")
+ } finally {
+ setLoading(false)
+ }
}
const startRecording = async () => {
- try {
- const stream = await navigator.mediaDevices.getUserMedia({ audio: true })
- console.log("Microphone access granted.")
+ try {
+ const stream = await navigator.mediaDevices.getUserMedia({ audio: true })
+ console.log("Microphone access granted.")
- mediaRecorderRef.current = new MediaRecorder(stream)
- audioChunksRef.current = []
+ mediaRecorderRef.current = new MediaRecorder(stream)
+ audioChunksRef.current = []
- mediaRecorderRef.current.ondataavailable = (event) => {
- if (event.data.size > 0) {
- console.log("Audio chunk received:", event.data)
- audioChunksRef.current.push(event.data)
- }
- }
+ mediaRecorderRef.current.ondataavailable = (event) => {
+ if (event.data.size > 0) {
+ console.log("Audio chunk received:", event.data)
+ audioChunksRef.current.push(event.data)
+ }
+ }
- mediaRecorderRef.current.onstop = async () => {
- const audioBlob = new Blob(audioChunksRef.current, { type: "audio/mp3" })
- const audioFile = new File([audioBlob], "recording.mp3", { type: "audio/mp3" })
+ mediaRecorderRef.current.onstop = async () => {
+ const audioBlob = new Blob(audioChunksRef.current, { type: "audio/mp3" })
+ const audioFile = new File([audioBlob], "recording.mp3", { type: "audio/mp3" })
- console.log("Recording stopped. Blob created:", audioBlob)
+ console.log("Recording stopped. Blob created:", audioBlob)
- setFile(audioFile)
- setTranscription("Processing transcription for recorded audio...")
- await handleTranscription(audioFile)
- }
+ setFile(audioFile)
+ setTranscription("Processing transcription for recorded audio...")
+ await handleTranscription(audioFile)
+ }
- mediaRecorderRef.current.start()
- console.log("Recording started.")
- setRecording(true)
- } catch (error) {
- console.error("Error starting recording:", error)
- setError("Failed to start recording. Please check microphone permissions.")
- }
+ mediaRecorderRef.current.start()
+ console.log("Recording started.")
+ setRecording(true)
+ } catch (error) {
+ console.error("Error starting recording:", error)
+ setError("Failed to start recording. Please check microphone permissions.")
+ }
}
const stopRecording = () => {
- if (mediaRecorderRef.current) {
- console.log("Stopping recording...")
- mediaRecorderRef.current.stop()
- setRecording(false)
- }
+ if (mediaRecorderRef.current) {
+ console.log("Stopping recording...")
+ mediaRecorderRef.current.stop()
+ setRecording(false)
+ }
}
return (
-
-
Audio Transcriber
+
+
Audio Transcriber
-
-
- Transcribe Audio
- Upload an audio file or record directly to transcribe
-
-
-
-
- Upload Audio
- Record Audio
-
-
-
- Select an audio file
-
- file && handleTranscription(file)}
- disabled={loading || !file}
- className="w-full"
- >
- {loading ? "Transcribing..." : "Transcribe"}
-
-
-
-
-
-
- Record audio from your microphone
- {!recording ? (
-
- Start Recording
-
-
- ) : (
-
- Stop Recording
-
-
- )}
-
-
-
-
-
+
+
+ Transcribe Audio
+ Upload an audio file or record directly to transcribe
+
+
+
+
+ Upload Audio
+ Record Audio
+
+
+
+ Select an audio file
+
+ file && handleTranscription(file)}
+ disabled={loading || !file}
+ className="w-full"
+ >
+ {loading ? "Transcribing..." : "Transcribe"}
+
+
+
+
+
+
+ Record audio from your microphone
+ {!recording ? (
+
+ Start Recording
+
+
+ ) : (
+
+ Stop Recording
+
+
+ )}
+
+
+
+
+
-
-
- Transcription Result
-
- {loading
- ? "Processing transcription..."
- : transcription
- ? "Your transcription is ready"
- : "No transcription available yet"}
-
-
-
-
-
- {file && (
-
-
-
- {file.name}
-
- setFile(null)}>
- Clear
-
-
- )}
-
+
+
+ Transcription Result
+
+ {loading
+ ? "Processing transcription..."
+ : transcription
+ ? "Your transcription is ready"
+ : "No transcription available yet"}
+
+
+
+
+
+ {file && (
+
+
+
+ {file.name}
+
+ setFile(null)}>
+ Clear
+
+
+ )}
+
- {error && (
-
- Error
- {error}
-
- )}
-
+ {error && (
+
+ Error
+ {error}
+
+ )}
+
)
}
diff --git a/tsconfig.json b/tsconfig.json
index 34c66d8..161b9f9 100644
--- a/tsconfig.json
+++ b/tsconfig.json
@@ -22,6 +22,6 @@
"@/*": ["./src/*"]
}
},
- "include": ["next-env.d.ts", "**/*.ts", "**/*.tsx", ".next/types/**/*.ts", "src/app/api/connectDB.js", "src/lib/utils.js", "src/app/(web)/account/page.jsx", "src/app/(panels)/suite/patient/account/page.jsx", "src/app/(panels)/suite/patient/dashboard/MedicationTable.jsx", "src/app/(panels)/suite/patient/dashboard/page.jsx", "src/app/api/transcribe/route.js", "src/components/ui/calendar.jsx", "src/app/(web)/page.jsx", "src/app/(web)/transcribe/page.jsx", "src/app/(web)/login/page.jsx", "src/app/(panels)/suite/layout.jsx", "src/app/(panels)/suite/doctor/dashboard/page.jsx", "src/app/(panels)/suite/patient/chat/page.jsx", "src/app/api/chat/route.js"],
+ "include": ["next-env.d.ts", "**/*.ts", "**/*.tsx", ".next/types/**/*.ts", "src/app/api/connectDB.js", "src/lib/utils.js", "src/app/(web)/account/page.jsx", "src/app/(panels)/suite/patient/account/page.jsx", "src/app/(panels)/suite/patient/dashboard/MedicationTable.jsx", "src/app/(panels)/suite/patient/dashboard/page.jsx", "src/app/api/transcribe/route.js", "src/components/ui/calendar.jsx", "src/app/(web)/page.jsx", "src/app/(web)/transcribe/page.jsx", "src/app/(web)/login/page.jsx", "src/app/(panels)/suite/layout.jsx", "src/app/(panels)/suite/doctor/dashboard/page.jsx", "src/app/(panels)/suite/patient/chat/page.jsx", "src/app/(panels)/suite/doctor/dashboard/AppList.jsx", "src/app/(panels)/suite/doctor/patient/page.jsx"],
"exclude": ["node_modules"]
}