Merge branch 'main' of https://github.com/GamerBoss101/HooHacks-12
This commit is contained in:
1
.gitignore
vendored
1
.gitignore
vendored
@@ -134,3 +134,4 @@ dist
|
|||||||
.yarn/build-state.yml
|
.yarn/build-state.yml
|
||||||
.yarn/install-state.gz
|
.yarn/install-state.gz
|
||||||
.pnp.*
|
.pnp.*
|
||||||
|
Backend/test.py
|
||||||
|
|||||||
1003
Backend/index.html
1003
Backend/index.html
File diff suppressed because it is too large
Load Diff
@@ -1,297 +1,699 @@
|
|||||||
import os
|
import os
|
||||||
import base64
|
import base64
|
||||||
import json
|
import json
|
||||||
import asyncio
|
import time
|
||||||
|
import math
|
||||||
|
import gc
|
||||||
|
import logging
|
||||||
|
import numpy as np
|
||||||
import torch
|
import torch
|
||||||
import torchaudio
|
import torchaudio
|
||||||
import numpy as np
|
|
||||||
from io import BytesIO
|
from io import BytesIO
|
||||||
from typing import List, Dict, Any, Optional
|
from typing import List, Dict, Any, Optional
|
||||||
from fastapi import FastAPI, WebSocket, WebSocketDisconnect
|
from flask import Flask, request, send_from_directory, Response
|
||||||
from fastapi.middleware.cors import CORSMiddleware
|
from flask_cors import CORS
|
||||||
from pydantic import BaseModel
|
from flask_socketio import SocketIO, emit, disconnect
|
||||||
from generator import load_csm_1b, Segment
|
from generator import load_csm_1b, Segment
|
||||||
import uvicorn
|
|
||||||
import time
|
|
||||||
from collections import deque
|
from collections import deque
|
||||||
|
from threading import Lock
|
||||||
|
|
||||||
# Select device
|
# Configure logging
|
||||||
if torch.cuda.is_available():
|
logging.basicConfig(
|
||||||
device = "cuda"
|
level=logging.INFO,
|
||||||
|
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
||||||
|
)
|
||||||
|
logger = logging.getLogger("sesame-server")
|
||||||
|
|
||||||
|
# Determine best compute device
|
||||||
|
if torch.backends.mps.is_available():
|
||||||
|
device = "mps"
|
||||||
|
elif torch.cuda.is_available():
|
||||||
|
try:
|
||||||
|
# Test CUDA functionality
|
||||||
|
torch.rand(10, device="cuda")
|
||||||
|
torch.backends.cuda.matmul.allow_tf32 = True
|
||||||
|
torch.backends.cudnn.allow_tf32 = True
|
||||||
|
torch.backends.cudnn.benchmark = True
|
||||||
|
device = "cuda"
|
||||||
|
logger.info("CUDA is fully functional")
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"CUDA available but not working correctly: {e}")
|
||||||
|
device = "cpu"
|
||||||
else:
|
else:
|
||||||
device = "cpu"
|
device = "cpu"
|
||||||
print(f"Using device: {device}")
|
logger.info("Using CPU")
|
||||||
|
|
||||||
# Initialize the model
|
# Constants and Configuration
|
||||||
generator = load_csm_1b(device=device)
|
SILENCE_THRESHOLD = 0.01
|
||||||
|
SILENCE_DURATION_SEC = 0.75
|
||||||
|
MAX_BUFFER_SIZE = 30 # Maximum chunks to buffer before processing
|
||||||
|
CHUNK_SIZE_MS = 500 # Size of audio chunks when streaming responses
|
||||||
|
|
||||||
app = FastAPI()
|
# Define the base directory and static files directory
|
||||||
|
base_dir = os.path.dirname(os.path.abspath(__file__))
|
||||||
|
static_dir = os.path.join(base_dir, "static")
|
||||||
|
os.makedirs(static_dir, exist_ok=True)
|
||||||
|
|
||||||
# Add CORS middleware to allow cross-origin requests
|
# Define a simple energy-based speech detector
|
||||||
app.add_middleware(
|
class SpeechDetector:
|
||||||
CORSMiddleware,
|
|
||||||
allow_origins=["*"], # Allow all origins in development
|
|
||||||
allow_credentials=True,
|
|
||||||
allow_methods=["*"],
|
|
||||||
allow_headers=["*"],
|
|
||||||
)
|
|
||||||
|
|
||||||
# Connection manager to handle multiple clients
|
|
||||||
class ConnectionManager:
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.active_connections: List[WebSocket] = []
|
self.min_speech_energy = 0.01
|
||||||
|
self.speech_window = 0.2 # seconds
|
||||||
|
|
||||||
|
def detect_speech(self, audio_tensor, sample_rate):
|
||||||
|
# Calculate frame size based on window size
|
||||||
|
frame_size = int(sample_rate * self.speech_window)
|
||||||
|
|
||||||
|
# If audio is shorter than frame size, use the entire audio
|
||||||
|
if audio_tensor.shape[0] < frame_size:
|
||||||
|
frames = [audio_tensor]
|
||||||
|
else:
|
||||||
|
# Split audio into frames
|
||||||
|
frames = [audio_tensor[i:i+frame_size] for i in range(0, len(audio_tensor), frame_size)]
|
||||||
|
|
||||||
|
# Calculate energy per frame
|
||||||
|
energies = [torch.mean(frame**2).item() for frame in frames]
|
||||||
|
|
||||||
|
# Determine if there's speech based on energy threshold
|
||||||
|
has_speech = any(e > self.min_speech_energy for e in energies)
|
||||||
|
|
||||||
|
return has_speech
|
||||||
|
|
||||||
async def connect(self, websocket: WebSocket):
|
speech_detector = SpeechDetector()
|
||||||
await websocket.accept()
|
logger.info("Initialized simple speech detector")
|
||||||
self.active_connections.append(websocket)
|
|
||||||
|
|
||||||
def disconnect(self, websocket: WebSocket):
|
# Model Loading Functions
|
||||||
self.active_connections.remove(websocket)
|
def load_speech_models():
|
||||||
|
"""Load speech generation model"""
|
||||||
manager = ConnectionManager()
|
# Load speech generation model (Sesame CSM)
|
||||||
|
|
||||||
# Silence detection parameters
|
|
||||||
SILENCE_THRESHOLD = 0.01 # Adjust based on your audio normalization
|
|
||||||
SILENCE_DURATION_SEC = 1.0 # How long silence must persist to be considered "stopped talking"
|
|
||||||
|
|
||||||
# Helper function to convert audio data
|
|
||||||
async def decode_audio_data(audio_data: str) -> torch.Tensor:
|
|
||||||
"""Decode base64 audio data to a torch tensor"""
|
|
||||||
try:
|
try:
|
||||||
|
logger.info(f"Loading Sesame CSM model on {device}...")
|
||||||
|
generator = load_csm_1b(device=device)
|
||||||
|
logger.info("Sesame CSM model loaded successfully")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error loading Sesame CSM on {device}: {e}")
|
||||||
|
if device == "cuda":
|
||||||
|
try:
|
||||||
|
logger.info("Trying to load Sesame CSM on CPU instead...")
|
||||||
|
generator = load_csm_1b(device="cpu")
|
||||||
|
logger.info("Sesame CSM model loaded on CPU successfully")
|
||||||
|
except Exception as cpu_error:
|
||||||
|
logger.critical(f"Failed to load speech synthesis model: {cpu_error}")
|
||||||
|
raise RuntimeError("Failed to load speech synthesis model")
|
||||||
|
else:
|
||||||
|
raise RuntimeError("Failed to load speech synthesis model on any device")
|
||||||
|
|
||||||
|
return generator
|
||||||
|
|
||||||
|
# Load speech model
|
||||||
|
generator = load_speech_models()
|
||||||
|
|
||||||
|
# Set up Flask and Socket.IO
|
||||||
|
app = Flask(__name__)
|
||||||
|
CORS(app)
|
||||||
|
socketio = SocketIO(app, cors_allowed_origins="*", async_mode='eventlet')
|
||||||
|
|
||||||
|
# Socket connection management
|
||||||
|
thread_lock = Lock()
|
||||||
|
active_clients = {} # Map client_id to client context
|
||||||
|
|
||||||
|
# Audio Utility Functions
|
||||||
|
def decode_audio_data(audio_data: str) -> torch.Tensor:
|
||||||
|
"""Decode base64 audio data to a torch tensor with improved error handling"""
|
||||||
|
try:
|
||||||
|
# Skip empty audio data
|
||||||
|
if not audio_data or len(audio_data) < 100:
|
||||||
|
logger.warning("Empty or too short audio data received")
|
||||||
|
return torch.zeros(generator.sample_rate // 2) # 0.5 seconds of silence
|
||||||
|
|
||||||
|
# Extract the actual base64 content
|
||||||
|
if ',' in audio_data:
|
||||||
|
audio_data = audio_data.split(',')[1]
|
||||||
|
|
||||||
# Decode base64 audio data
|
# Decode base64 audio data
|
||||||
binary_data = base64.b64decode(audio_data.split(',')[1] if ',' in audio_data else audio_data)
|
try:
|
||||||
|
binary_data = base64.b64decode(audio_data)
|
||||||
|
logger.debug(f"Decoded base64 data: {len(binary_data)} bytes")
|
||||||
|
|
||||||
|
# Check if we have enough data for a valid WAV
|
||||||
|
if len(binary_data) < 44: # WAV header is 44 bytes
|
||||||
|
logger.warning("Data too small to be a valid WAV file")
|
||||||
|
return torch.zeros(generator.sample_rate // 2)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Base64 decoding error: {e}")
|
||||||
|
return torch.zeros(generator.sample_rate // 2)
|
||||||
|
|
||||||
# Save to a temporary WAV file first
|
# Multiple approaches to handle audio data
|
||||||
temp_file = BytesIO(binary_data)
|
audio_tensor = None
|
||||||
|
sample_rate = None
|
||||||
|
|
||||||
# Load audio from binary data, explicitly specifying the format
|
# Approach 1: Direct loading with torchaudio
|
||||||
audio_tensor, sample_rate = torchaudio.load(temp_file, format="wav")
|
try:
|
||||||
|
with BytesIO(binary_data) as temp_file:
|
||||||
|
temp_file.seek(0)
|
||||||
|
audio_tensor, sample_rate = torchaudio.load(temp_file, format="wav")
|
||||||
|
logger.debug(f"Loaded audio: shape={audio_tensor.shape}, rate={sample_rate}Hz")
|
||||||
|
|
||||||
|
# Validate tensor
|
||||||
|
if audio_tensor.numel() == 0 or torch.isnan(audio_tensor).any():
|
||||||
|
raise ValueError("Invalid audio tensor")
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Direct loading failed: {e}")
|
||||||
|
|
||||||
|
# Approach 2: Using wave module and numpy
|
||||||
|
try:
|
||||||
|
temp_path = os.path.join(base_dir, f"temp_{time.time()}.wav")
|
||||||
|
with open(temp_path, 'wb') as f:
|
||||||
|
f.write(binary_data)
|
||||||
|
|
||||||
|
import wave
|
||||||
|
with wave.open(temp_path, 'rb') as wf:
|
||||||
|
n_channels = wf.getnchannels()
|
||||||
|
sample_width = wf.getsampwidth()
|
||||||
|
sample_rate = wf.getframerate()
|
||||||
|
n_frames = wf.getnframes()
|
||||||
|
frames = wf.readframes(n_frames)
|
||||||
|
|
||||||
|
# Convert to numpy array
|
||||||
|
if sample_width == 2: # 16-bit audio
|
||||||
|
data = np.frombuffer(frames, dtype=np.int16).astype(np.float32) / 32768.0
|
||||||
|
elif sample_width == 1: # 8-bit audio
|
||||||
|
data = np.frombuffer(frames, dtype=np.uint8).astype(np.float32) / 128.0 - 1.0
|
||||||
|
else:
|
||||||
|
raise ValueError(f"Unsupported sample width: {sample_width}")
|
||||||
|
|
||||||
|
# Convert to mono if needed
|
||||||
|
if n_channels > 1:
|
||||||
|
data = data.reshape(-1, n_channels)
|
||||||
|
data = data.mean(axis=1)
|
||||||
|
|
||||||
|
# Convert to torch tensor
|
||||||
|
audio_tensor = torch.from_numpy(data)
|
||||||
|
logger.info(f"Loaded audio using wave: shape={audio_tensor.shape}")
|
||||||
|
|
||||||
|
# Clean up temp file
|
||||||
|
if os.path.exists(temp_path):
|
||||||
|
os.remove(temp_path)
|
||||||
|
|
||||||
|
except Exception as e2:
|
||||||
|
logger.error(f"All audio loading methods failed: {e2}")
|
||||||
|
return torch.zeros(generator.sample_rate // 2)
|
||||||
|
|
||||||
|
# Format corrections
|
||||||
|
if audio_tensor is None:
|
||||||
|
return torch.zeros(generator.sample_rate // 2)
|
||||||
|
|
||||||
|
# Ensure audio is mono
|
||||||
|
if len(audio_tensor.shape) > 1 and audio_tensor.shape[0] > 1:
|
||||||
|
audio_tensor = torch.mean(audio_tensor, dim=0)
|
||||||
|
|
||||||
|
# Ensure 1D tensor
|
||||||
|
audio_tensor = audio_tensor.squeeze()
|
||||||
|
|
||||||
# Resample if needed
|
# Resample if needed
|
||||||
if sample_rate != generator.sample_rate:
|
if sample_rate != generator.sample_rate:
|
||||||
audio_tensor = torchaudio.functional.resample(
|
try:
|
||||||
audio_tensor.squeeze(0),
|
logger.debug(f"Resampling from {sample_rate}Hz to {generator.sample_rate}Hz")
|
||||||
orig_freq=sample_rate,
|
resampler = torchaudio.transforms.Resample(
|
||||||
new_freq=generator.sample_rate
|
orig_freq=sample_rate,
|
||||||
)
|
new_freq=generator.sample_rate
|
||||||
else:
|
)
|
||||||
audio_tensor = audio_tensor.squeeze(0)
|
audio_tensor = resampler(audio_tensor)
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Resampling error: {e}")
|
||||||
|
|
||||||
|
# Normalize audio to avoid issues
|
||||||
|
if torch.abs(audio_tensor).max() > 0:
|
||||||
|
audio_tensor = audio_tensor / torch.abs(audio_tensor).max()
|
||||||
|
|
||||||
return audio_tensor
|
return audio_tensor
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"Error decoding audio: {str(e)}")
|
logger.error(f"Unhandled error in decode_audio_data: {e}")
|
||||||
# Return a small silent audio segment as fallback
|
return torch.zeros(generator.sample_rate // 2)
|
||||||
return torch.zeros(generator.sample_rate // 2) # 0.5 seconds of silence
|
|
||||||
|
|
||||||
|
def encode_audio_data(audio_tensor: torch.Tensor) -> str:
|
||||||
async def encode_audio_data(audio_tensor: torch.Tensor) -> str:
|
|
||||||
"""Encode torch tensor audio to base64 string"""
|
"""Encode torch tensor audio to base64 string"""
|
||||||
buf = BytesIO()
|
try:
|
||||||
torchaudio.save(buf, audio_tensor.unsqueeze(0).cpu(), generator.sample_rate, format="wav")
|
buf = BytesIO()
|
||||||
buf.seek(0)
|
torchaudio.save(buf, audio_tensor.unsqueeze(0).cpu(), generator.sample_rate, format="wav")
|
||||||
audio_base64 = base64.b64encode(buf.read()).decode('utf-8')
|
buf.seek(0)
|
||||||
return f"data:audio/wav;base64,{audio_base64}"
|
audio_base64 = base64.b64encode(buf.read()).decode('utf-8')
|
||||||
|
return f"data:audio/wav;base64,{audio_base64}"
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error encoding audio: {e}")
|
||||||
|
# Return a minimal silent audio file
|
||||||
|
silence = torch.zeros(generator.sample_rate // 2).unsqueeze(0)
|
||||||
|
buf = BytesIO()
|
||||||
|
torchaudio.save(buf, silence, generator.sample_rate, format="wav")
|
||||||
|
buf.seek(0)
|
||||||
|
return f"data:audio/wav;base64,{base64.b64encode(buf.read()).decode('utf-8')}"
|
||||||
|
|
||||||
|
def process_speech(audio_tensor: torch.Tensor, client_id: str) -> str:
|
||||||
@app.websocket("/ws")
|
"""Process speech and return a simple response"""
|
||||||
async def websocket_endpoint(websocket: WebSocket):
|
# In this simplified version, we'll just check if there's sound
|
||||||
await manager.connect(websocket)
|
# and provide basic responses instead of doing actual speech recognition
|
||||||
context_segments = [] # Store conversation context
|
|
||||||
streaming_buffer = [] # Buffer for streaming audio chunks
|
|
||||||
is_streaming = False
|
|
||||||
|
|
||||||
# Variables for silence detection
|
if speech_detector and speech_detector.detect_speech(audio_tensor, generator.sample_rate):
|
||||||
last_active_time = time.time()
|
# Generate a response based on audio energy
|
||||||
is_silence = False
|
energy = torch.mean(torch.abs(audio_tensor)).item()
|
||||||
energy_window = deque(maxlen=10) # For tracking recent audio energy
|
|
||||||
|
if energy > 0.1: # Louder speech
|
||||||
|
return "I heard you speaking clearly. How can I help you today?"
|
||||||
|
elif energy > 0.05: # Moderate speech
|
||||||
|
return "I heard you say something. Could you please repeat that?"
|
||||||
|
else: # Soft speech
|
||||||
|
return "I detected some speech, but it was quite soft. Could you speak up a bit?"
|
||||||
|
else:
|
||||||
|
return "I didn't detect any speech. Could you please try again?"
|
||||||
|
|
||||||
|
def generate_response(text: str, conversation_history: List[Segment]) -> str:
|
||||||
|
"""Generate a contextual response based on the transcribed text"""
|
||||||
|
# Simple response logic - can be replaced with a more sophisticated LLM
|
||||||
|
responses = {
|
||||||
|
"hello": "Hello there! How can I help you today?",
|
||||||
|
"hi": "Hi there! What can I do for you?",
|
||||||
|
"how are you": "I'm doing well, thanks for asking! How about you?",
|
||||||
|
"what is your name": "I'm Sesame, your voice assistant. How can I help you?",
|
||||||
|
"who are you": "I'm Sesame, an AI voice assistant. I'm here to chat with you!",
|
||||||
|
"bye": "Goodbye! It was nice chatting with you.",
|
||||||
|
"thank you": "You're welcome! Is there anything else I can help with?",
|
||||||
|
"weather": "I don't have real-time weather data, but I hope it's nice where you are!",
|
||||||
|
"help": "I can chat with you using natural voice. Just speak normally and I'll respond.",
|
||||||
|
"what can you do": "I can have a conversation with you, answer questions, and provide assistance with various topics.",
|
||||||
|
}
|
||||||
|
|
||||||
|
text_lower = text.lower()
|
||||||
|
|
||||||
|
# Check for matching keywords
|
||||||
|
for key, response in responses.items():
|
||||||
|
if key in text_lower:
|
||||||
|
return response
|
||||||
|
|
||||||
|
# Default responses based on text length
|
||||||
|
if not text:
|
||||||
|
return "I didn't catch that. Could you please repeat?"
|
||||||
|
elif len(text) < 10:
|
||||||
|
return "Thanks for your message. Could you elaborate a bit more?"
|
||||||
|
else:
|
||||||
|
return f"I heard you speaking. That's interesting! Can you tell me more about that?"
|
||||||
|
|
||||||
|
# Flask Routes
|
||||||
|
@app.route('/')
|
||||||
|
def index():
|
||||||
|
return send_from_directory(base_dir, 'index.html')
|
||||||
|
|
||||||
|
@app.route('/favicon.ico')
|
||||||
|
def favicon():
|
||||||
|
if os.path.exists(os.path.join(static_dir, 'favicon.ico')):
|
||||||
|
return send_from_directory(static_dir, 'favicon.ico')
|
||||||
|
return Response(status=204)
|
||||||
|
|
||||||
|
@app.route('/voice-chat.js')
|
||||||
|
def voice_chat_js():
|
||||||
|
return send_from_directory(base_dir, 'voice-chat.js')
|
||||||
|
|
||||||
|
@app.route('/static/<path:path>')
|
||||||
|
def serve_static(path):
|
||||||
|
return send_from_directory(static_dir, path)
|
||||||
|
|
||||||
|
# Socket.IO Event Handlers
|
||||||
|
@socketio.on('connect')
|
||||||
|
def handle_connect():
|
||||||
|
client_id = request.sid
|
||||||
|
logger.info(f"Client connected: {client_id}")
|
||||||
|
|
||||||
|
# Initialize client context
|
||||||
|
active_clients[client_id] = {
|
||||||
|
'context_segments': [],
|
||||||
|
'streaming_buffer': [],
|
||||||
|
'is_streaming': False,
|
||||||
|
'is_silence': False,
|
||||||
|
'last_active_time': time.time(),
|
||||||
|
'energy_window': deque(maxlen=10)
|
||||||
|
}
|
||||||
|
|
||||||
|
emit('status', {'type': 'connected', 'message': 'Connected to server'})
|
||||||
|
|
||||||
|
@socketio.on('disconnect')
|
||||||
|
def handle_disconnect():
|
||||||
|
client_id = request.sid
|
||||||
|
if client_id in active_clients:
|
||||||
|
del active_clients[client_id]
|
||||||
|
logger.info(f"Client disconnected: {client_id}")
|
||||||
|
|
||||||
|
@socketio.on('generate')
|
||||||
|
def handle_generate(data):
|
||||||
|
client_id = request.sid
|
||||||
|
if client_id not in active_clients:
|
||||||
|
emit('error', {'message': 'Client not registered'})
|
||||||
|
return
|
||||||
|
|
||||||
try:
|
try:
|
||||||
while True:
|
text = data.get('text', '')
|
||||||
# Receive JSON data from client
|
speaker_id = data.get('speaker', 0)
|
||||||
data = await websocket.receive_text()
|
|
||||||
request = json.loads(data)
|
logger.info(f"Generating audio for: '{text}' with speaker {speaker_id}")
|
||||||
|
|
||||||
|
# Generate audio response
|
||||||
|
audio_tensor = generator.generate(
|
||||||
|
text=text,
|
||||||
|
speaker=speaker_id,
|
||||||
|
context=active_clients[client_id]['context_segments'],
|
||||||
|
max_audio_length_ms=10_000,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Add to conversation context
|
||||||
|
active_clients[client_id]['context_segments'].append(
|
||||||
|
Segment(text=text, speaker=speaker_id, audio=audio_tensor)
|
||||||
|
)
|
||||||
|
|
||||||
|
# Convert audio to base64 and send back to client
|
||||||
|
audio_base64 = encode_audio_data(audio_tensor)
|
||||||
|
emit('audio_response', {
|
||||||
|
'type': 'audio_response',
|
||||||
|
'audio': audio_base64,
|
||||||
|
'text': text
|
||||||
|
})
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error generating audio: {e}")
|
||||||
|
emit('error', {
|
||||||
|
'type': 'error',
|
||||||
|
'message': f"Error generating audio: {str(e)}"
|
||||||
|
})
|
||||||
|
|
||||||
|
@socketio.on('add_to_context')
|
||||||
|
def handle_add_to_context(data):
|
||||||
|
client_id = request.sid
|
||||||
|
if client_id not in active_clients:
|
||||||
|
emit('error', {'message': 'Client not registered'})
|
||||||
|
return
|
||||||
|
|
||||||
|
try:
|
||||||
|
text = data.get('text', '')
|
||||||
|
speaker_id = data.get('speaker', 0)
|
||||||
|
audio_data = data.get('audio', '')
|
||||||
|
|
||||||
|
# Convert received audio to tensor
|
||||||
|
audio_tensor = decode_audio_data(audio_data)
|
||||||
|
|
||||||
|
# Add to conversation context
|
||||||
|
active_clients[client_id]['context_segments'].append(
|
||||||
|
Segment(text=text, speaker=speaker_id, audio=audio_tensor)
|
||||||
|
)
|
||||||
|
|
||||||
|
emit('context_updated', {
|
||||||
|
'type': 'context_updated',
|
||||||
|
'message': 'Audio added to context'
|
||||||
|
})
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error adding to context: {e}")
|
||||||
|
emit('error', {
|
||||||
|
'type': 'error',
|
||||||
|
'message': f"Error processing audio: {str(e)}"
|
||||||
|
})
|
||||||
|
|
||||||
|
@socketio.on('clear_context')
|
||||||
|
def handle_clear_context():
|
||||||
|
client_id = request.sid
|
||||||
|
if client_id in active_clients:
|
||||||
|
active_clients[client_id]['context_segments'] = []
|
||||||
|
|
||||||
|
emit('context_updated', {
|
||||||
|
'type': 'context_updated',
|
||||||
|
'message': 'Context cleared'
|
||||||
|
})
|
||||||
|
|
||||||
|
@socketio.on('stream_audio')
|
||||||
|
def handle_stream_audio(data):
|
||||||
|
client_id = request.sid
|
||||||
|
if client_id not in active_clients:
|
||||||
|
emit('error', {'message': 'Client not registered'})
|
||||||
|
return
|
||||||
|
|
||||||
|
client = active_clients[client_id]
|
||||||
|
|
||||||
|
try:
|
||||||
|
speaker_id = data.get('speaker', 0)
|
||||||
|
audio_data = data.get('audio', '')
|
||||||
|
|
||||||
|
# Skip if no audio data (might be just a connection test)
|
||||||
|
if not audio_data:
|
||||||
|
logger.debug("Empty audio data received, ignoring")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Convert received audio to tensor
|
||||||
|
audio_chunk = decode_audio_data(audio_data)
|
||||||
|
|
||||||
|
# Start streaming mode if not already started
|
||||||
|
if not client['is_streaming']:
|
||||||
|
client['is_streaming'] = True
|
||||||
|
client['streaming_buffer'] = []
|
||||||
|
client['energy_window'].clear()
|
||||||
|
client['is_silence'] = False
|
||||||
|
client['last_active_time'] = time.time()
|
||||||
|
logger.info(f"[{client_id[:8]}] Streaming started with speaker ID: {speaker_id}")
|
||||||
|
emit('streaming_status', {
|
||||||
|
'type': 'streaming_status',
|
||||||
|
'status': 'started'
|
||||||
|
})
|
||||||
|
|
||||||
|
# Calculate audio energy for silence detection
|
||||||
|
chunk_energy = torch.mean(torch.abs(audio_chunk)).item()
|
||||||
|
client['energy_window'].append(chunk_energy)
|
||||||
|
avg_energy = sum(client['energy_window']) / len(client['energy_window'])
|
||||||
|
|
||||||
|
# Check if audio is silent
|
||||||
|
current_silence = avg_energy < SILENCE_THRESHOLD
|
||||||
|
|
||||||
|
# Track silence transition
|
||||||
|
if not client['is_silence'] and current_silence:
|
||||||
|
# Transition to silence
|
||||||
|
client['is_silence'] = True
|
||||||
|
client['last_active_time'] = time.time()
|
||||||
|
elif client['is_silence'] and not current_silence:
|
||||||
|
# User started talking again
|
||||||
|
client['is_silence'] = False
|
||||||
|
|
||||||
|
# Add chunk to buffer regardless of silence state
|
||||||
|
client['streaming_buffer'].append(audio_chunk)
|
||||||
|
|
||||||
action = request.get("action")
|
# Check if silence has persisted long enough to consider "stopped talking"
|
||||||
|
silence_elapsed = time.time() - client['last_active_time']
|
||||||
|
|
||||||
|
if client['is_silence'] and silence_elapsed >= SILENCE_DURATION_SEC and len(client['streaming_buffer']) > 0:
|
||||||
|
# User has stopped talking - process the collected audio
|
||||||
|
logger.info(f"[{client_id[:8]}] Processing audio after {silence_elapsed:.2f}s of silence")
|
||||||
|
process_complete_utterance(client_id, client, speaker_id)
|
||||||
|
|
||||||
|
# If buffer gets too large without silence, process it anyway
|
||||||
|
elif len(client['streaming_buffer']) >= MAX_BUFFER_SIZE:
|
||||||
|
logger.info(f"[{client_id[:8]}] Processing long audio segment without silence")
|
||||||
|
process_complete_utterance(client_id, client, speaker_id, is_incomplete=True)
|
||||||
|
|
||||||
if action == "generate":
|
# Keep half of the buffer for context (sliding window approach)
|
||||||
|
half_point = len(client['streaming_buffer']) // 2
|
||||||
|
client['streaming_buffer'] = client['streaming_buffer'][half_point:]
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
import traceback
|
||||||
|
traceback.print_exc()
|
||||||
|
logger.error(f"Error processing streaming audio: {e}")
|
||||||
|
emit('error', {
|
||||||
|
'type': 'error',
|
||||||
|
'message': f"Error processing streaming audio: {str(e)}"
|
||||||
|
})
|
||||||
|
|
||||||
|
def process_complete_utterance(client_id, client, speaker_id, is_incomplete=False):
|
||||||
|
"""Process a complete utterance (after silence or buffer limit)"""
|
||||||
|
try:
|
||||||
|
# Combine audio chunks
|
||||||
|
full_audio = torch.cat(client['streaming_buffer'], dim=0)
|
||||||
|
|
||||||
|
# Process audio to generate a response (no speech recognition)
|
||||||
|
generated_text = process_speech(full_audio, client_id)
|
||||||
|
|
||||||
|
# Add suffix for incomplete utterances
|
||||||
|
if is_incomplete:
|
||||||
|
generated_text += " (processing continued speech...)"
|
||||||
|
|
||||||
|
# Log the generated text
|
||||||
|
logger.info(f"[{client_id[:8]}] Generated text: '{generated_text}'")
|
||||||
|
|
||||||
|
# Handle the result
|
||||||
|
if generated_text:
|
||||||
|
# Add user message to context
|
||||||
|
user_segment = Segment(text=generated_text, speaker=speaker_id, audio=full_audio)
|
||||||
|
client['context_segments'].append(user_segment)
|
||||||
|
|
||||||
|
# Send the text to client
|
||||||
|
emit('transcription', {
|
||||||
|
'type': 'transcription',
|
||||||
|
'text': generated_text
|
||||||
|
}, room=client_id)
|
||||||
|
|
||||||
|
# Only generate a response if this is a complete utterance
|
||||||
|
if not is_incomplete:
|
||||||
|
# Generate a contextual response
|
||||||
|
response_text = generate_response(generated_text, client['context_segments'])
|
||||||
|
logger.info(f"[{client_id[:8]}] Generating response: '{response_text}'")
|
||||||
|
|
||||||
|
# Let the client know we're processing
|
||||||
|
emit('processing_status', {
|
||||||
|
'type': 'processing_status',
|
||||||
|
'status': 'generating_audio',
|
||||||
|
'message': 'Generating audio response...'
|
||||||
|
}, room=client_id)
|
||||||
|
|
||||||
|
# Generate audio for the response
|
||||||
try:
|
try:
|
||||||
text = request.get("text", "")
|
# Use a different speaker than the user
|
||||||
speaker_id = request.get("speaker", 0)
|
ai_speaker_id = 1 if speaker_id == 0 else 0
|
||||||
|
|
||||||
# Generate audio response
|
# Generate the full response
|
||||||
print(f"Generating audio for: '{text}' with speaker {speaker_id}")
|
|
||||||
audio_tensor = generator.generate(
|
audio_tensor = generator.generate(
|
||||||
text=text,
|
text=response_text,
|
||||||
speaker=speaker_id,
|
speaker=ai_speaker_id,
|
||||||
context=context_segments,
|
context=client['context_segments'],
|
||||||
max_audio_length_ms=10_000,
|
max_audio_length_ms=10_000,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Add to conversation context
|
# Add response to context
|
||||||
context_segments.append(Segment(text=text, speaker=speaker_id, audio=audio_tensor))
|
ai_segment = Segment(
|
||||||
|
text=response_text,
|
||||||
|
speaker=ai_speaker_id,
|
||||||
|
audio=audio_tensor
|
||||||
|
)
|
||||||
|
client['context_segments'].append(ai_segment)
|
||||||
|
|
||||||
# Convert audio to base64 and send back to client
|
# Convert audio to base64 and send back to client
|
||||||
audio_base64 = await encode_audio_data(audio_tensor)
|
audio_base64 = encode_audio_data(audio_tensor)
|
||||||
await websocket.send_json({
|
emit('audio_response', {
|
||||||
"type": "audio_response",
|
'type': 'audio_response',
|
||||||
"audio": audio_base64
|
'text': response_text,
|
||||||
})
|
'audio': audio_base64
|
||||||
|
}, room=client_id)
|
||||||
|
|
||||||
|
logger.info(f"[{client_id[:8]}] Audio response sent")
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"Error generating audio: {str(e)}")
|
logger.error(f"Error generating audio response: {e}")
|
||||||
await websocket.send_json({
|
emit('error', {
|
||||||
"type": "error",
|
'type': 'error',
|
||||||
"message": f"Error generating audio: {str(e)}"
|
'message': "Sorry, there was an error generating the audio response."
|
||||||
})
|
}, room=client_id)
|
||||||
|
else:
|
||||||
elif action == "add_to_context":
|
# If processing failed, send a notification
|
||||||
try:
|
emit('error', {
|
||||||
text = request.get("text", "")
|
'type': 'error',
|
||||||
speaker_id = request.get("speaker", 0)
|
'message': "Sorry, I couldn't understand what you said. Could you try again?"
|
||||||
audio_data = request.get("audio", "")
|
}, room=client_id)
|
||||||
|
|
||||||
# Convert received audio to tensor
|
# Only clear buffer for complete utterances
|
||||||
audio_tensor = await decode_audio_data(audio_data)
|
if not is_incomplete:
|
||||||
|
# Reset state
|
||||||
# Add to conversation context
|
client['streaming_buffer'] = []
|
||||||
context_segments.append(Segment(text=text, speaker=speaker_id, audio=audio_tensor))
|
client['energy_window'].clear()
|
||||||
|
client['is_silence'] = False
|
||||||
await websocket.send_json({
|
client['last_active_time'] = time.time()
|
||||||
"type": "context_updated",
|
|
||||||
"message": "Audio added to context"
|
|
||||||
})
|
|
||||||
except Exception as e:
|
|
||||||
print(f"Error adding to context: {str(e)}")
|
|
||||||
await websocket.send_json({
|
|
||||||
"type": "error",
|
|
||||||
"message": f"Error processing audio: {str(e)}"
|
|
||||||
})
|
|
||||||
|
|
||||||
elif action == "clear_context":
|
|
||||||
context_segments = []
|
|
||||||
await websocket.send_json({
|
|
||||||
"type": "context_updated",
|
|
||||||
"message": "Context cleared"
|
|
||||||
})
|
|
||||||
|
|
||||||
elif action == "stream_audio":
|
|
||||||
try:
|
|
||||||
speaker_id = request.get("speaker", 0)
|
|
||||||
audio_data = request.get("audio", "")
|
|
||||||
|
|
||||||
# Convert received audio to tensor
|
|
||||||
audio_chunk = await decode_audio_data(audio_data)
|
|
||||||
|
|
||||||
# Start streaming mode if not already started
|
|
||||||
if not is_streaming:
|
|
||||||
is_streaming = True
|
|
||||||
streaming_buffer = []
|
|
||||||
energy_window.clear()
|
|
||||||
is_silence = False
|
|
||||||
last_active_time = time.time()
|
|
||||||
await websocket.send_json({
|
|
||||||
"type": "streaming_status",
|
|
||||||
"status": "started"
|
|
||||||
})
|
|
||||||
|
|
||||||
# Calculate audio energy for silence detection
|
|
||||||
chunk_energy = torch.mean(torch.abs(audio_chunk)).item()
|
|
||||||
energy_window.append(chunk_energy)
|
|
||||||
avg_energy = sum(energy_window) / len(energy_window)
|
|
||||||
|
|
||||||
# Check if audio is silent
|
|
||||||
current_silence = avg_energy < SILENCE_THRESHOLD
|
|
||||||
|
|
||||||
# Track silence transition
|
|
||||||
if not is_silence and current_silence:
|
|
||||||
# Transition to silence
|
|
||||||
is_silence = True
|
|
||||||
last_active_time = time.time()
|
|
||||||
elif is_silence and not current_silence:
|
|
||||||
# User started talking again
|
|
||||||
is_silence = False
|
|
||||||
|
|
||||||
# Add chunk to buffer regardless of silence state
|
|
||||||
streaming_buffer.append(audio_chunk)
|
|
||||||
|
|
||||||
# Check if silence has persisted long enough to consider "stopped talking"
|
|
||||||
silence_elapsed = time.time() - last_active_time
|
|
||||||
|
|
||||||
if is_silence and silence_elapsed >= SILENCE_DURATION_SEC and len(streaming_buffer) > 0:
|
|
||||||
# User has stopped talking - process the collected audio
|
|
||||||
full_audio = torch.cat(streaming_buffer, dim=0)
|
|
||||||
|
|
||||||
# Process with speech-to-text (you would need to implement this)
|
|
||||||
# For now, just use a placeholder text
|
|
||||||
text = f"User audio from speaker {speaker_id}"
|
|
||||||
|
|
||||||
print(f"Detected end of speech, processing {len(streaming_buffer)} chunks")
|
|
||||||
|
|
||||||
# Add to conversation context
|
|
||||||
context_segments.append(Segment(text=text, speaker=speaker_id, audio=full_audio))
|
|
||||||
|
|
||||||
# Generate response
|
|
||||||
response_text = "This is a response to what you just said"
|
|
||||||
audio_tensor = generator.generate(
|
|
||||||
text=response_text,
|
|
||||||
speaker=1 if speaker_id == 0 else 0, # Use opposite speaker
|
|
||||||
context=context_segments,
|
|
||||||
max_audio_length_ms=10_000,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Convert audio to base64 and send back to client
|
|
||||||
audio_base64 = await encode_audio_data(audio_tensor)
|
|
||||||
await websocket.send_json({
|
|
||||||
"type": "audio_response",
|
|
||||||
"audio": audio_base64
|
|
||||||
})
|
|
||||||
|
|
||||||
# Clear buffer and reset silence detection
|
|
||||||
streaming_buffer = []
|
|
||||||
energy_window.clear()
|
|
||||||
is_silence = False
|
|
||||||
last_active_time = time.time()
|
|
||||||
|
|
||||||
# If buffer gets too large without silence, process it anyway
|
|
||||||
# This prevents memory issues with very long streams
|
|
||||||
elif len(streaming_buffer) >= 30: # ~6 seconds of audio at 5 chunks/sec
|
|
||||||
print("Buffer limit reached, processing audio")
|
|
||||||
full_audio = torch.cat(streaming_buffer, dim=0)
|
|
||||||
text = f"Continued speech from speaker {speaker_id}"
|
|
||||||
context_segments.append(Segment(text=text, speaker=speaker_id, audio=full_audio))
|
|
||||||
streaming_buffer = []
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
print(f"Error processing streaming audio: {str(e)}")
|
|
||||||
await websocket.send_json({
|
|
||||||
"type": "error",
|
|
||||||
"message": f"Error processing streaming audio: {str(e)}"
|
|
||||||
})
|
|
||||||
|
|
||||||
elif action == "stop_streaming":
|
|
||||||
is_streaming = False
|
|
||||||
if streaming_buffer:
|
|
||||||
# Process any remaining audio in the buffer
|
|
||||||
full_audio = torch.cat(streaming_buffer, dim=0)
|
|
||||||
text = f"Final streaming audio from speaker {request.get('speaker', 0)}"
|
|
||||||
context_segments.append(Segment(text=text, speaker=request.get("speaker", 0), audio=full_audio))
|
|
||||||
|
|
||||||
streaming_buffer = []
|
|
||||||
await websocket.send_json({
|
|
||||||
"type": "streaming_status",
|
|
||||||
"status": "stopped"
|
|
||||||
})
|
|
||||||
|
|
||||||
except WebSocketDisconnect:
|
|
||||||
manager.disconnect(websocket)
|
|
||||||
print("Client disconnected")
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"Error: {str(e)}")
|
logger.error(f"Error processing utterance: {e}")
|
||||||
await websocket.send_json({
|
emit('error', {
|
||||||
"type": "error",
|
'type': 'error',
|
||||||
"message": str(e)
|
'message': f"Error processing audio: {str(e)}"
|
||||||
})
|
}, room=client_id)
|
||||||
manager.disconnect(websocket)
|
|
||||||
|
|
||||||
|
@socketio.on('stop_streaming')
|
||||||
|
def handle_stop_streaming(data):
|
||||||
|
client_id = request.sid
|
||||||
|
if client_id not in active_clients:
|
||||||
|
return
|
||||||
|
|
||||||
|
client = active_clients[client_id]
|
||||||
|
client['is_streaming'] = False
|
||||||
|
|
||||||
|
if client['streaming_buffer'] and len(client['streaming_buffer']) > 5:
|
||||||
|
# Process any remaining audio in the buffer
|
||||||
|
logger.info(f"[{client_id[:8]}] Processing final audio buffer on stop")
|
||||||
|
process_complete_utterance(client_id, client, data.get("speaker", 0))
|
||||||
|
|
||||||
|
client['streaming_buffer'] = []
|
||||||
|
emit('streaming_status', {
|
||||||
|
'type': 'streaming_status',
|
||||||
|
'status': 'stopped'
|
||||||
|
})
|
||||||
|
|
||||||
|
def stream_audio_to_client(client_id, audio_tensor, text, speaker_id, chunk_size_ms=CHUNK_SIZE_MS):
|
||||||
|
"""Stream audio to client in chunks to simulate real-time generation"""
|
||||||
|
try:
|
||||||
|
if client_id not in active_clients:
|
||||||
|
logger.warning(f"Client {client_id} not found for streaming")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Calculate chunk size in samples
|
||||||
|
chunk_size = int(generator.sample_rate * chunk_size_ms / 1000)
|
||||||
|
total_chunks = math.ceil(audio_tensor.size(0) / chunk_size)
|
||||||
|
|
||||||
|
logger.info(f"Streaming audio in {total_chunks} chunks of {chunk_size_ms}ms each")
|
||||||
|
|
||||||
|
# Send initial response with text but no audio yet
|
||||||
|
socketio.emit('audio_response_start', {
|
||||||
|
'type': 'audio_response_start',
|
||||||
|
'text': text,
|
||||||
|
'total_chunks': total_chunks
|
||||||
|
}, room=client_id)
|
||||||
|
|
||||||
|
# Stream each chunk
|
||||||
|
for i in range(total_chunks):
|
||||||
|
start_idx = i * chunk_size
|
||||||
|
end_idx = min(start_idx + chunk_size, audio_tensor.size(0))
|
||||||
|
|
||||||
|
# Extract chunk
|
||||||
|
chunk = audio_tensor[start_idx:end_idx]
|
||||||
|
|
||||||
|
# Encode chunk
|
||||||
|
chunk_base64 = encode_audio_data(chunk)
|
||||||
|
|
||||||
|
# Send chunk
|
||||||
|
socketio.emit('audio_response_chunk', {
|
||||||
|
'type': 'audio_response_chunk',
|
||||||
|
'chunk_index': i,
|
||||||
|
'total_chunks': total_chunks,
|
||||||
|
'audio': chunk_base64,
|
||||||
|
'is_last': i == total_chunks - 1
|
||||||
|
}, room=client_id)
|
||||||
|
|
||||||
|
# Brief pause between chunks to simulate streaming
|
||||||
|
time.sleep(0.1)
|
||||||
|
|
||||||
|
# Send completion message
|
||||||
|
socketio.emit('audio_response_complete', {
|
||||||
|
'type': 'audio_response_complete',
|
||||||
|
'text': text
|
||||||
|
}, room=client_id)
|
||||||
|
|
||||||
|
logger.info(f"Audio streaming complete: {total_chunks} chunks sent")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error streaming audio to client: {e}")
|
||||||
|
import traceback
|
||||||
|
traceback.print_exc()
|
||||||
|
|
||||||
|
# Main server start
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
uvicorn.run(app, host="localhost", port=8000)
|
print(f"\n{'='*60}")
|
||||||
|
print(f"🔊 Sesame AI Voice Chat Server")
|
||||||
|
print(f"{'='*60}")
|
||||||
|
print(f"📡 Server Information:")
|
||||||
|
print(f" - Local URL: http://localhost:5000")
|
||||||
|
print(f" - Network URL: http://<your-ip-address>:5000")
|
||||||
|
print(f"{'='*60}")
|
||||||
|
print(f"🌐 Device: {device.upper()}")
|
||||||
|
print(f"🧠 Models: Sesame CSM (TTS only)")
|
||||||
|
print(f"🔧 Serving from: {os.path.join(base_dir, 'index.html')}")
|
||||||
|
print(f"{'='*60}")
|
||||||
|
print(f"Ready to receive connections! Press Ctrl+C to stop the server.\n")
|
||||||
|
|
||||||
|
socketio.run(app, host="0.0.0.0", port=5000, debug=False)
|
||||||
852
Backend/voice-chat.js
Normal file
852
Backend/voice-chat.js
Normal file
@@ -0,0 +1,852 @@
|
|||||||
|
/**
|
||||||
|
* Sesame AI Voice Chat Client
|
||||||
|
*
|
||||||
|
* A web client that connects to a Sesame AI voice chat server and enables
|
||||||
|
* real-time voice conversation with an AI assistant.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Configuration constants
|
||||||
|
const SERVER_URL = window.location.hostname === 'localhost' ?
|
||||||
|
'http://localhost:5000' : window.location.origin;
|
||||||
|
const ENERGY_WINDOW_SIZE = 15;
|
||||||
|
const CLIENT_SILENCE_DURATION_MS = 750;
|
||||||
|
|
||||||
|
// DOM elements
|
||||||
|
const elements = {
|
||||||
|
conversation: null,
|
||||||
|
streamButton: null,
|
||||||
|
clearButton: null,
|
||||||
|
thresholdSlider: null,
|
||||||
|
thresholdValue: null,
|
||||||
|
visualizerCanvas: null,
|
||||||
|
visualizerLabel: null,
|
||||||
|
volumeLevel: null,
|
||||||
|
statusDot: null,
|
||||||
|
statusText: null,
|
||||||
|
speakerSelection: null,
|
||||||
|
autoPlayResponses: null,
|
||||||
|
showVisualizer: null
|
||||||
|
};
|
||||||
|
|
||||||
|
// Application state
|
||||||
|
const state = {
|
||||||
|
socket: null,
|
||||||
|
audioContext: null,
|
||||||
|
analyser: null,
|
||||||
|
microphone: null,
|
||||||
|
streamProcessor: null,
|
||||||
|
isStreaming: false,
|
||||||
|
isSpeaking: false,
|
||||||
|
silenceThreshold: 0.01,
|
||||||
|
energyWindow: [],
|
||||||
|
silenceTimer: null,
|
||||||
|
volumeUpdateInterval: null,
|
||||||
|
visualizerAnimationFrame: null,
|
||||||
|
currentSpeaker: 0
|
||||||
|
};
|
||||||
|
|
||||||
|
// Visualizer variables
|
||||||
|
let canvasContext = null;
|
||||||
|
let visualizerBufferLength = 0;
|
||||||
|
let visualizerDataArray = null;
|
||||||
|
|
||||||
|
// Initialize the application
|
||||||
|
function initializeApp() {
|
||||||
|
// Initialize the UI elements
|
||||||
|
initializeUIElements();
|
||||||
|
|
||||||
|
// Initialize socket.io connection
|
||||||
|
setupSocketConnection();
|
||||||
|
|
||||||
|
// Setup event listeners
|
||||||
|
setupEventListeners();
|
||||||
|
|
||||||
|
// Initialize visualizer
|
||||||
|
setupVisualizer();
|
||||||
|
|
||||||
|
// Show welcome message
|
||||||
|
addSystemMessage('Welcome to Sesame AI Voice Chat! Click "Start Conversation" to begin.');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initialize UI elements
|
||||||
|
function initializeUIElements() {
|
||||||
|
// Store references to UI elements
|
||||||
|
elements.conversation = document.getElementById('conversation');
|
||||||
|
elements.streamButton = document.getElementById('streamButton');
|
||||||
|
elements.clearButton = document.getElementById('clearButton');
|
||||||
|
elements.thresholdSlider = document.getElementById('thresholdSlider');
|
||||||
|
elements.thresholdValue = document.getElementById('thresholdValue');
|
||||||
|
elements.visualizerCanvas = document.getElementById('audioVisualizer');
|
||||||
|
elements.visualizerLabel = document.getElementById('visualizerLabel');
|
||||||
|
elements.volumeLevel = document.getElementById('volumeLevel');
|
||||||
|
elements.statusDot = document.getElementById('statusDot');
|
||||||
|
elements.statusText = document.getElementById('statusText');
|
||||||
|
elements.speakerSelection = document.getElementById('speakerSelect'); // Changed to match HTML
|
||||||
|
elements.autoPlayResponses = document.getElementById('autoPlayResponses');
|
||||||
|
elements.showVisualizer = document.getElementById('showVisualizer');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Setup Socket.IO connection
|
||||||
|
function setupSocketConnection() {
|
||||||
|
state.socket = io(SERVER_URL);
|
||||||
|
|
||||||
|
// Connection events
|
||||||
|
state.socket.on('connect', () => {
|
||||||
|
console.log('Connected to server');
|
||||||
|
updateConnectionStatus(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
state.socket.on('disconnect', () => {
|
||||||
|
console.log('Disconnected from server');
|
||||||
|
updateConnectionStatus(false);
|
||||||
|
|
||||||
|
// Stop streaming if active
|
||||||
|
if (state.isStreaming) {
|
||||||
|
stopStreaming(false);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
state.socket.on('error', (data) => {
|
||||||
|
console.error('Socket error:', data.message);
|
||||||
|
addSystemMessage(`Error: ${data.message}`);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Register message handlers
|
||||||
|
state.socket.on('audio_response', handleAudioResponse);
|
||||||
|
state.socket.on('transcription', handleTranscription);
|
||||||
|
state.socket.on('context_updated', handleContextUpdate);
|
||||||
|
state.socket.on('streaming_status', handleStreamingStatus);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Setup event listeners
|
||||||
|
function setupEventListeners() {
|
||||||
|
// Stream button
|
||||||
|
elements.streamButton.addEventListener('click', toggleStreaming);
|
||||||
|
|
||||||
|
// Clear button
|
||||||
|
elements.clearButton.addEventListener('click', clearConversation);
|
||||||
|
|
||||||
|
// Threshold slider
|
||||||
|
elements.thresholdSlider.addEventListener('input', updateThreshold);
|
||||||
|
|
||||||
|
// Speaker selection
|
||||||
|
elements.speakerSelection.addEventListener('change', () => {
|
||||||
|
state.currentSpeaker = parseInt(elements.speakerSelection.value, 10);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Visualizer toggle
|
||||||
|
elements.showVisualizer.addEventListener('change', toggleVisualizerVisibility);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Setup audio visualizer
|
||||||
|
function setupVisualizer() {
|
||||||
|
if (!elements.visualizerCanvas) return;
|
||||||
|
|
||||||
|
canvasContext = elements.visualizerCanvas.getContext('2d');
|
||||||
|
|
||||||
|
// Set canvas dimensions
|
||||||
|
elements.visualizerCanvas.width = elements.visualizerCanvas.offsetWidth;
|
||||||
|
elements.visualizerCanvas.height = elements.visualizerCanvas.offsetHeight;
|
||||||
|
|
||||||
|
// Initialize the visualizer
|
||||||
|
drawVisualizer();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update connection status UI
|
||||||
|
function updateConnectionStatus(isConnected) {
|
||||||
|
elements.statusDot.classList.toggle('active', isConnected);
|
||||||
|
elements.statusText.textContent = isConnected ? 'Connected' : 'Disconnected';
|
||||||
|
}
|
||||||
|
|
||||||
|
// Toggle streaming state
|
||||||
|
function toggleStreaming() {
|
||||||
|
if (state.isStreaming) {
|
||||||
|
stopStreaming(true);
|
||||||
|
} else {
|
||||||
|
startStreaming();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start streaming audio to the server
|
||||||
|
function startStreaming() {
|
||||||
|
if (state.isStreaming) return;
|
||||||
|
|
||||||
|
// Request microphone access
|
||||||
|
navigator.mediaDevices.getUserMedia({ audio: true, video: false })
|
||||||
|
.then(stream => {
|
||||||
|
// Show processing state while setting up
|
||||||
|
elements.streamButton.innerHTML = '<i class="fas fa-spinner fa-spin"></i> Initializing...';
|
||||||
|
|
||||||
|
// Create audio context
|
||||||
|
state.audioContext = new (window.AudioContext || window.webkitAudioContext)();
|
||||||
|
|
||||||
|
// Create microphone source
|
||||||
|
state.microphone = state.audioContext.createMediaStreamSource(stream);
|
||||||
|
|
||||||
|
// Create analyser for visualizer
|
||||||
|
state.analyser = state.audioContext.createAnalyser();
|
||||||
|
state.analyser.fftSize = 256;
|
||||||
|
visualizerBufferLength = state.analyser.frequencyBinCount;
|
||||||
|
visualizerDataArray = new Uint8Array(visualizerBufferLength);
|
||||||
|
|
||||||
|
// Connect microphone to analyser
|
||||||
|
state.microphone.connect(state.analyser);
|
||||||
|
|
||||||
|
// Create script processor for audio processing
|
||||||
|
const bufferSize = 4096;
|
||||||
|
state.streamProcessor = state.audioContext.createScriptProcessor(bufferSize, 1, 1);
|
||||||
|
|
||||||
|
// Set up audio processing callback
|
||||||
|
state.streamProcessor.onaudioprocess = handleAudioProcess;
|
||||||
|
|
||||||
|
// Connect the processors
|
||||||
|
state.analyser.connect(state.streamProcessor);
|
||||||
|
state.streamProcessor.connect(state.audioContext.destination);
|
||||||
|
|
||||||
|
// Update UI
|
||||||
|
state.isStreaming = true;
|
||||||
|
elements.streamButton.innerHTML = '<i class="fas fa-microphone"></i> Listening...';
|
||||||
|
elements.streamButton.classList.add('recording');
|
||||||
|
|
||||||
|
// Initialize energy window
|
||||||
|
state.energyWindow = [];
|
||||||
|
|
||||||
|
// Start volume meter updates
|
||||||
|
state.volumeUpdateInterval = setInterval(updateVolumeMeter, 100);
|
||||||
|
|
||||||
|
// Start visualizer if enabled
|
||||||
|
if (elements.showVisualizer.checked && !state.visualizerAnimationFrame) {
|
||||||
|
drawVisualizer();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Show starting message
|
||||||
|
addSystemMessage('Listening... Speak clearly into your microphone.');
|
||||||
|
|
||||||
|
// Notify the server that we're starting
|
||||||
|
state.socket.emit('stream_audio', {
|
||||||
|
audio: '',
|
||||||
|
speaker: state.currentSpeaker
|
||||||
|
});
|
||||||
|
})
|
||||||
|
.catch(err => {
|
||||||
|
console.error('Error accessing microphone:', err);
|
||||||
|
addSystemMessage(`Error: ${err.message}. Please make sure your microphone is connected and you've granted permission.`);
|
||||||
|
elements.streamButton.innerHTML = '<i class="fas fa-microphone"></i> Start Conversation';
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop streaming audio
|
||||||
|
function stopStreaming(notifyServer = true) {
|
||||||
|
if (!state.isStreaming) return;
|
||||||
|
|
||||||
|
// Update UI first
|
||||||
|
elements.streamButton.innerHTML = '<i class="fas fa-microphone"></i> Start Conversation';
|
||||||
|
elements.streamButton.classList.remove('recording');
|
||||||
|
elements.streamButton.classList.remove('processing');
|
||||||
|
|
||||||
|
// Stop volume meter updates
|
||||||
|
if (state.volumeUpdateInterval) {
|
||||||
|
clearInterval(state.volumeUpdateInterval);
|
||||||
|
state.volumeUpdateInterval = null;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop all audio processing
|
||||||
|
if (state.streamProcessor) {
|
||||||
|
state.streamProcessor.disconnect();
|
||||||
|
state.streamProcessor = null;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (state.analyser) {
|
||||||
|
state.analyser.disconnect();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (state.microphone) {
|
||||||
|
state.microphone.disconnect();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close audio context
|
||||||
|
if (state.audioContext && state.audioContext.state !== 'closed') {
|
||||||
|
state.audioContext.close().catch(err => console.warn('Error closing audio context:', err));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cleanup animation frames
|
||||||
|
if (state.visualizerAnimationFrame) {
|
||||||
|
cancelAnimationFrame(state.visualizerAnimationFrame);
|
||||||
|
state.visualizerAnimationFrame = null;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset state
|
||||||
|
state.isStreaming = false;
|
||||||
|
state.isSpeaking = false;
|
||||||
|
|
||||||
|
// Notify the server
|
||||||
|
if (notifyServer && state.socket && state.socket.connected) {
|
||||||
|
state.socket.emit('stop_streaming', {
|
||||||
|
speaker: state.currentSpeaker
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Show message
|
||||||
|
addSystemMessage('Conversation paused. Click "Start Conversation" to resume.');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle audio processing
|
||||||
|
function handleAudioProcess(event) {
|
||||||
|
const inputData = event.inputBuffer.getChannelData(0);
|
||||||
|
|
||||||
|
// Log audio buffer statistics
|
||||||
|
console.log(`Audio buffer: length=${inputData.length}, sample rate=${state.audioContext.sampleRate}Hz`);
|
||||||
|
|
||||||
|
// Calculate audio energy (volume level)
|
||||||
|
const energy = calculateAudioEnergy(inputData);
|
||||||
|
console.log(`Energy: ${energy.toFixed(6)}, threshold: ${state.silenceThreshold}`);
|
||||||
|
|
||||||
|
// Update energy window for averaging
|
||||||
|
updateEnergyWindow(energy);
|
||||||
|
|
||||||
|
// Calculate average energy
|
||||||
|
const avgEnergy = calculateAverageEnergy();
|
||||||
|
|
||||||
|
// Determine if audio is silent
|
||||||
|
const isSilent = avgEnergy < state.silenceThreshold;
|
||||||
|
console.log(`Silent: ${isSilent ? 'Yes' : 'No'}, avg energy: ${avgEnergy.toFixed(6)}`);
|
||||||
|
|
||||||
|
// Handle speech state based on silence
|
||||||
|
handleSpeechState(isSilent);
|
||||||
|
|
||||||
|
// Only send audio chunk if we detect speech
|
||||||
|
if (!isSilent) {
|
||||||
|
// Create a resampled version at 24kHz for the server
|
||||||
|
// Most WebRTC audio is 48kHz, but we want 24kHz for the model
|
||||||
|
const resampledData = downsampleBuffer(inputData, state.audioContext.sampleRate, 24000);
|
||||||
|
console.log(`Resampled audio: ${state.audioContext.sampleRate}Hz → 24000Hz, new length: ${resampledData.length}`);
|
||||||
|
|
||||||
|
// Send the audio chunk to the server
|
||||||
|
sendAudioChunk(resampledData, state.currentSpeaker);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cleanup audio resources when done
|
||||||
|
function cleanupAudioResources() {
|
||||||
|
// Stop all audio processing
|
||||||
|
if (state.streamProcessor) {
|
||||||
|
state.streamProcessor.disconnect();
|
||||||
|
state.streamProcessor = null;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (state.analyser) {
|
||||||
|
state.analyser.disconnect();
|
||||||
|
state.analyser = null;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (state.microphone) {
|
||||||
|
state.microphone.disconnect();
|
||||||
|
state.microphone = null;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close audio context
|
||||||
|
if (state.audioContext && state.audioContext.state !== 'closed') {
|
||||||
|
state.audioContext.close().catch(err => console.warn('Error closing audio context:', err));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cancel all timers and animation frames
|
||||||
|
if (state.volumeUpdateInterval) {
|
||||||
|
clearInterval(state.volumeUpdateInterval);
|
||||||
|
state.volumeUpdateInterval = null;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (state.visualizerAnimationFrame) {
|
||||||
|
cancelAnimationFrame(state.visualizerAnimationFrame);
|
||||||
|
state.visualizerAnimationFrame = null;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (state.silenceTimer) {
|
||||||
|
clearTimeout(state.silenceTimer);
|
||||||
|
state.silenceTimer = null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clear conversation history
|
||||||
|
function clearConversation() {
|
||||||
|
if (elements.conversation) {
|
||||||
|
elements.conversation.innerHTML = '';
|
||||||
|
addSystemMessage('Conversation cleared.');
|
||||||
|
|
||||||
|
// Notify server to clear context
|
||||||
|
if (state.socket && state.socket.connected) {
|
||||||
|
state.socket.emit('clear_context');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate audio energy (volume)
|
||||||
|
function calculateAudioEnergy(buffer) {
|
||||||
|
let sum = 0;
|
||||||
|
for (let i = 0; i < buffer.length; i++) {
|
||||||
|
sum += buffer[i] * buffer[i];
|
||||||
|
}
|
||||||
|
return Math.sqrt(sum / buffer.length);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update energy window for averaging
|
||||||
|
function updateEnergyWindow(energy) {
|
||||||
|
state.energyWindow.push(energy);
|
||||||
|
if (state.energyWindow.length > ENERGY_WINDOW_SIZE) {
|
||||||
|
state.energyWindow.shift();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate average energy from window
|
||||||
|
function calculateAverageEnergy() {
|
||||||
|
if (state.energyWindow.length === 0) return 0;
|
||||||
|
|
||||||
|
const sum = state.energyWindow.reduce((a, b) => a + b, 0);
|
||||||
|
return sum / state.energyWindow.length;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update the threshold from the slider
|
||||||
|
function updateThreshold() {
|
||||||
|
state.silenceThreshold = parseFloat(elements.thresholdSlider.value);
|
||||||
|
elements.thresholdValue.textContent = state.silenceThreshold.toFixed(3);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update the volume meter display
|
||||||
|
function updateVolumeMeter() {
|
||||||
|
if (!state.isStreaming || !state.energyWindow.length) return;
|
||||||
|
|
||||||
|
const avgEnergy = calculateAverageEnergy();
|
||||||
|
|
||||||
|
// Scale energy to percentage (0-100)
|
||||||
|
// Typically, energy values will be very small (e.g., 0.001 to 0.1)
|
||||||
|
// So we multiply by a factor to make it more visible
|
||||||
|
const scaleFactor = 1000;
|
||||||
|
const percentage = Math.min(100, Math.max(0, avgEnergy * scaleFactor));
|
||||||
|
|
||||||
|
// Update volume meter width
|
||||||
|
elements.volumeLevel.style.width = `${percentage}%`;
|
||||||
|
|
||||||
|
// Change color based on level
|
||||||
|
if (percentage > 70) {
|
||||||
|
elements.volumeLevel.style.backgroundColor = '#ff5252';
|
||||||
|
} else if (percentage > 30) {
|
||||||
|
elements.volumeLevel.style.backgroundColor = '#4CAF50';
|
||||||
|
} else {
|
||||||
|
elements.volumeLevel.style.backgroundColor = '#4c84ff';
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle speech/silence state transitions
|
||||||
|
function handleSpeechState(isSilent) {
|
||||||
|
if (state.isSpeaking && isSilent) {
|
||||||
|
// Transition from speaking to silence
|
||||||
|
if (!state.silenceTimer) {
|
||||||
|
state.silenceTimer = setTimeout(() => {
|
||||||
|
// Only consider it a real silence after a certain duration
|
||||||
|
// This prevents detecting brief pauses as the end of speech
|
||||||
|
state.isSpeaking = false;
|
||||||
|
state.silenceTimer = null;
|
||||||
|
}, CLIENT_SILENCE_DURATION_MS);
|
||||||
|
}
|
||||||
|
} else if (state.silenceTimer && !isSilent) {
|
||||||
|
// User started speaking again, cancel the silence timer
|
||||||
|
clearTimeout(state.silenceTimer);
|
||||||
|
state.silenceTimer = null;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update speaking state for non-silent audio
|
||||||
|
if (!isSilent) {
|
||||||
|
state.isSpeaking = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send audio chunk to server
|
||||||
|
function sendAudioChunk(audioData, speaker) {
|
||||||
|
if (!state.socket || !state.socket.connected) {
|
||||||
|
console.warn('Socket not connected');
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log(`Preparing audio chunk: length=${audioData.length}, speaker=${speaker}`);
|
||||||
|
|
||||||
|
// Check for NaN or invalid values
|
||||||
|
let hasInvalidValues = false;
|
||||||
|
for (let i = 0; i < audioData.length; i++) {
|
||||||
|
if (isNaN(audioData[i]) || !isFinite(audioData[i])) {
|
||||||
|
hasInvalidValues = true;
|
||||||
|
console.warn(`Invalid audio value at index ${i}: ${audioData[i]}`);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (hasInvalidValues) {
|
||||||
|
console.warn('Audio data contains invalid values. Creating silent audio.');
|
||||||
|
audioData = new Float32Array(audioData.length).fill(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Create WAV blob
|
||||||
|
const wavData = createWavBlob(audioData, 24000);
|
||||||
|
console.log(`WAV blob created: ${wavData.size} bytes`);
|
||||||
|
|
||||||
|
const reader = new FileReader();
|
||||||
|
|
||||||
|
reader.onloadend = function() {
|
||||||
|
try {
|
||||||
|
// Get base64 data
|
||||||
|
const base64data = reader.result;
|
||||||
|
console.log(`Base64 data created: ${base64data.length} bytes`);
|
||||||
|
|
||||||
|
// Send to server
|
||||||
|
state.socket.emit('stream_audio', {
|
||||||
|
audio: base64data,
|
||||||
|
speaker: speaker
|
||||||
|
});
|
||||||
|
console.log('Audio chunk sent to server');
|
||||||
|
} catch (err) {
|
||||||
|
console.error('Error preparing audio data:', err);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
reader.onerror = function() {
|
||||||
|
console.error('Error reading audio data as base64');
|
||||||
|
};
|
||||||
|
|
||||||
|
reader.readAsDataURL(wavData);
|
||||||
|
} catch (err) {
|
||||||
|
console.error('Error creating WAV data:', err);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create WAV blob from audio data with improved error handling
|
||||||
|
function createWavBlob(audioData, sampleRate) {
|
||||||
|
// Validate input
|
||||||
|
if (!audioData || audioData.length === 0) {
|
||||||
|
console.warn('Empty audio data provided to createWavBlob');
|
||||||
|
audioData = new Float32Array(1024).fill(0); // Create 1024 samples of silence
|
||||||
|
}
|
||||||
|
|
||||||
|
// Function to convert Float32Array to Int16Array for WAV format
|
||||||
|
function floatTo16BitPCM(output, offset, input) {
|
||||||
|
for (let i = 0; i < input.length; i++, offset += 2) {
|
||||||
|
// Ensure values are in -1 to 1 range
|
||||||
|
const s = Math.max(-1, Math.min(1, input[i]));
|
||||||
|
// Convert to 16-bit PCM
|
||||||
|
output.setInt16(offset, s < 0 ? s * 0x8000 : s * 0x7FFF, true);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create WAV header
|
||||||
|
function writeString(view, offset, string) {
|
||||||
|
for (let i = 0; i < string.length; i++) {
|
||||||
|
view.setUint8(offset + i, string.charCodeAt(i));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Create WAV file with header - careful with buffer sizes
|
||||||
|
const buffer = new ArrayBuffer(44 + audioData.length * 2);
|
||||||
|
const view = new DataView(buffer);
|
||||||
|
|
||||||
|
// RIFF identifier
|
||||||
|
writeString(view, 0, 'RIFF');
|
||||||
|
|
||||||
|
// File length (will be filled later)
|
||||||
|
view.setUint32(4, 36 + audioData.length * 2, true);
|
||||||
|
|
||||||
|
// WAVE identifier
|
||||||
|
writeString(view, 8, 'WAVE');
|
||||||
|
|
||||||
|
// fmt chunk identifier
|
||||||
|
writeString(view, 12, 'fmt ');
|
||||||
|
|
||||||
|
// fmt chunk length
|
||||||
|
view.setUint32(16, 16, true);
|
||||||
|
|
||||||
|
// Sample format (1 is PCM)
|
||||||
|
view.setUint16(20, 1, true);
|
||||||
|
|
||||||
|
// Mono channel
|
||||||
|
view.setUint16(22, 1, true);
|
||||||
|
|
||||||
|
// Sample rate
|
||||||
|
view.setUint32(24, sampleRate, true);
|
||||||
|
|
||||||
|
// Byte rate (sample rate * block align)
|
||||||
|
view.setUint32(28, sampleRate * 2, true);
|
||||||
|
|
||||||
|
// Block align (channels * bytes per sample)
|
||||||
|
view.setUint16(32, 2, true);
|
||||||
|
|
||||||
|
// Bits per sample
|
||||||
|
view.setUint16(34, 16, true);
|
||||||
|
|
||||||
|
// data chunk identifier
|
||||||
|
writeString(view, 36, 'data');
|
||||||
|
|
||||||
|
// data chunk length
|
||||||
|
view.setUint32(40, audioData.length * 2, true);
|
||||||
|
|
||||||
|
// Write the PCM samples
|
||||||
|
floatTo16BitPCM(view, 44, audioData);
|
||||||
|
|
||||||
|
// Create and return blob
|
||||||
|
return new Blob([view], { type: 'audio/wav' });
|
||||||
|
} catch (err) {
|
||||||
|
console.error('Error in createWavBlob:', err);
|
||||||
|
|
||||||
|
// Create a minimal valid WAV file with silence as fallback
|
||||||
|
const fallbackSamples = new Float32Array(1024).fill(0);
|
||||||
|
const fallbackBuffer = new ArrayBuffer(44 + fallbackSamples.length * 2);
|
||||||
|
const fallbackView = new DataView(fallbackBuffer);
|
||||||
|
|
||||||
|
writeString(fallbackView, 0, 'RIFF');
|
||||||
|
fallbackView.setUint32(4, 36 + fallbackSamples.length * 2, true);
|
||||||
|
writeString(fallbackView, 8, 'WAVE');
|
||||||
|
writeString(fallbackView, 12, 'fmt ');
|
||||||
|
fallbackView.setUint32(16, 16, true);
|
||||||
|
fallbackView.setUint16(20, 1, true);
|
||||||
|
fallbackView.setUint16(22, 1, true);
|
||||||
|
fallbackView.setUint32(24, sampleRate, true);
|
||||||
|
fallbackView.setUint32(28, sampleRate * 2, true);
|
||||||
|
fallbackView.setUint16(32, 2, true);
|
||||||
|
fallbackView.setUint16(34, 16, true);
|
||||||
|
writeString(fallbackView, 36, 'data');
|
||||||
|
fallbackView.setUint32(40, fallbackSamples.length * 2, true);
|
||||||
|
floatTo16BitPCM(fallbackView, 44, fallbackSamples);
|
||||||
|
|
||||||
|
return new Blob([fallbackView], { type: 'audio/wav' });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Draw audio visualizer
|
||||||
|
function drawVisualizer() {
|
||||||
|
if (!canvasContext) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
state.visualizerAnimationFrame = requestAnimationFrame(drawVisualizer);
|
||||||
|
|
||||||
|
// Skip drawing if visualizer is hidden
|
||||||
|
if (!elements.showVisualizer.checked) {
|
||||||
|
if (elements.visualizerCanvas.style.opacity !== '0') {
|
||||||
|
elements.visualizerCanvas.style.opacity = '0';
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
} else if (elements.visualizerCanvas.style.opacity !== '1') {
|
||||||
|
elements.visualizerCanvas.style.opacity = '1';
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get frequency data if available
|
||||||
|
if (state.isStreaming && state.analyser) {
|
||||||
|
try {
|
||||||
|
state.analyser.getByteFrequencyData(visualizerDataArray);
|
||||||
|
} catch (e) {
|
||||||
|
console.warn('Error getting frequency data:', e);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Fade out when not streaming
|
||||||
|
for (let i = 0; i < visualizerDataArray.length; i++) {
|
||||||
|
visualizerDataArray[i] = Math.max(0, visualizerDataArray[i] - 5);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clear canvas
|
||||||
|
canvasContext.fillStyle = 'rgb(0, 0, 0)';
|
||||||
|
canvasContext.fillRect(0, 0, elements.visualizerCanvas.width, elements.visualizerCanvas.height);
|
||||||
|
|
||||||
|
// Draw gradient bars
|
||||||
|
const width = elements.visualizerCanvas.width;
|
||||||
|
const height = elements.visualizerCanvas.height;
|
||||||
|
const barCount = Math.min(visualizerBufferLength, 64);
|
||||||
|
const barWidth = width / barCount - 1;
|
||||||
|
|
||||||
|
for (let i = 0; i < barCount; i++) {
|
||||||
|
const index = Math.floor(i * visualizerBufferLength / barCount);
|
||||||
|
const value = visualizerDataArray[index];
|
||||||
|
|
||||||
|
// Use logarithmic scale for better audio visualization
|
||||||
|
// This makes low values more visible while still maintaining full range
|
||||||
|
const logFactor = 20;
|
||||||
|
const scaledValue = Math.log(1 + (value / 255) * logFactor) / Math.log(1 + logFactor);
|
||||||
|
const barHeight = scaledValue * height;
|
||||||
|
|
||||||
|
// Position bars
|
||||||
|
const x = i * (barWidth + 1);
|
||||||
|
const y = height - barHeight;
|
||||||
|
|
||||||
|
// Create color gradient based on frequency and amplitude
|
||||||
|
const hue = i / barCount * 360; // Full color spectrum
|
||||||
|
const saturation = 80 + (value / 255 * 20); // Higher values more saturated
|
||||||
|
const lightness = 40 + (value / 255 * 20); // Dynamic brightness based on amplitude
|
||||||
|
|
||||||
|
// Draw main bar
|
||||||
|
canvasContext.fillStyle = `hsl(${hue}, ${saturation}%, ${lightness}%)`;
|
||||||
|
canvasContext.fillRect(x, y, barWidth, barHeight);
|
||||||
|
|
||||||
|
// Add reflection effect
|
||||||
|
if (barHeight > 5) {
|
||||||
|
const gradient = canvasContext.createLinearGradient(
|
||||||
|
x, y,
|
||||||
|
x, y + barHeight * 0.5
|
||||||
|
);
|
||||||
|
gradient.addColorStop(0, `hsla(${hue}, ${saturation}%, ${lightness + 20}%, 0.4)`);
|
||||||
|
gradient.addColorStop(1, `hsla(${hue}, ${saturation}%, ${lightness}%, 0)`);
|
||||||
|
canvasContext.fillStyle = gradient;
|
||||||
|
canvasContext.fillRect(x, y, barWidth, barHeight * 0.5);
|
||||||
|
|
||||||
|
// Add highlight on top of the bar for better 3D effect
|
||||||
|
canvasContext.fillStyle = `hsla(${hue}, ${saturation - 20}%, ${lightness + 30}%, 0.7)`;
|
||||||
|
canvasContext.fillRect(x, y, barWidth, 2);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Show/hide the label
|
||||||
|
elements.visualizerLabel.style.opacity = (state.isStreaming) ? '0' : '0.7';
|
||||||
|
}
|
||||||
|
|
||||||
|
// Toggle visualizer visibility
|
||||||
|
function toggleVisualizerVisibility() {
|
||||||
|
const isVisible = elements.showVisualizer.checked;
|
||||||
|
elements.visualizerCanvas.style.opacity = isVisible ? '1' : '0';
|
||||||
|
|
||||||
|
if (isVisible && state.isStreaming && !state.visualizerAnimationFrame) {
|
||||||
|
drawVisualizer();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle audio response from server
|
||||||
|
function handleAudioResponse(data) {
|
||||||
|
console.log('Received audio response');
|
||||||
|
|
||||||
|
// Create message container
|
||||||
|
const messageElement = document.createElement('div');
|
||||||
|
messageElement.className = 'message ai';
|
||||||
|
|
||||||
|
// Add text content if available
|
||||||
|
if (data.text) {
|
||||||
|
const textElement = document.createElement('p');
|
||||||
|
textElement.textContent = data.text;
|
||||||
|
messageElement.appendChild(textElement);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create and configure audio element
|
||||||
|
const audioElement = document.createElement('audio');
|
||||||
|
audioElement.controls = true;
|
||||||
|
audioElement.className = 'audio-player';
|
||||||
|
|
||||||
|
// Set audio source
|
||||||
|
const audioSource = document.createElement('source');
|
||||||
|
audioSource.src = data.audio;
|
||||||
|
audioSource.type = 'audio/wav';
|
||||||
|
|
||||||
|
// Add fallback text
|
||||||
|
audioElement.textContent = 'Your browser does not support the audio element.';
|
||||||
|
|
||||||
|
// Assemble audio element
|
||||||
|
audioElement.appendChild(audioSource);
|
||||||
|
messageElement.appendChild(audioElement);
|
||||||
|
|
||||||
|
// Add timestamp
|
||||||
|
const timeElement = document.createElement('span');
|
||||||
|
timeElement.className = 'message-time';
|
||||||
|
timeElement.textContent = new Date().toLocaleTimeString();
|
||||||
|
messageElement.appendChild(timeElement);
|
||||||
|
|
||||||
|
// Add to conversation
|
||||||
|
elements.conversation.appendChild(messageElement);
|
||||||
|
|
||||||
|
// Auto-scroll to bottom
|
||||||
|
elements.conversation.scrollTop = elements.conversation.scrollHeight;
|
||||||
|
|
||||||
|
// Auto-play if enabled
|
||||||
|
if (elements.autoPlayResponses.checked) {
|
||||||
|
audioElement.play()
|
||||||
|
.catch(err => {
|
||||||
|
console.warn('Auto-play failed:', err);
|
||||||
|
addSystemMessage('Auto-play failed. Please click play to hear the response.');
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Re-enable stream button after processing is complete
|
||||||
|
if (state.isStreaming) {
|
||||||
|
elements.streamButton.innerHTML = '<i class="fas fa-microphone"></i> Listening...';
|
||||||
|
elements.streamButton.classList.add('recording');
|
||||||
|
elements.streamButton.classList.remove('processing');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle transcription response from server
|
||||||
|
function handleTranscription(data) {
|
||||||
|
console.log('Received transcription:', data.text);
|
||||||
|
|
||||||
|
// Create message element
|
||||||
|
const messageElement = document.createElement('div');
|
||||||
|
messageElement.className = 'message user';
|
||||||
|
|
||||||
|
// Add text content
|
||||||
|
const textElement = document.createElement('p');
|
||||||
|
textElement.textContent = data.text;
|
||||||
|
messageElement.appendChild(textElement);
|
||||||
|
|
||||||
|
// Add timestamp
|
||||||
|
const timeElement = document.createElement('span');
|
||||||
|
timeElement.className = 'message-time';
|
||||||
|
timeElement.textContent = new Date().toLocaleTimeString();
|
||||||
|
messageElement.appendChild(timeElement);
|
||||||
|
|
||||||
|
// Add to conversation
|
||||||
|
elements.conversation.appendChild(messageElement);
|
||||||
|
|
||||||
|
// Auto-scroll to bottom
|
||||||
|
elements.conversation.scrollTop = elements.conversation.scrollHeight;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle context update from server
|
||||||
|
function handleContextUpdate(data) {
|
||||||
|
console.log('Context updated:', data.message);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle streaming status updates from server
|
||||||
|
function handleStreamingStatus(data) {
|
||||||
|
console.log('Streaming status:', data.status);
|
||||||
|
|
||||||
|
if (data.status === 'stopped') {
|
||||||
|
// Reset UI if needed
|
||||||
|
if (state.isStreaming) {
|
||||||
|
stopStreaming(false); // Don't send to server since this came from server
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add a system message to the conversation
|
||||||
|
function addSystemMessage(message) {
|
||||||
|
const messageElement = document.createElement('div');
|
||||||
|
messageElement.className = 'message system';
|
||||||
|
messageElement.textContent = message;
|
||||||
|
elements.conversation.appendChild(messageElement);
|
||||||
|
|
||||||
|
// Auto-scroll to bottom
|
||||||
|
elements.conversation.scrollTop = elements.conversation.scrollHeight;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Downsample audio buffer to target sample rate
|
||||||
|
function downsampleBuffer(buffer, originalSampleRate, targetSampleRate) {
|
||||||
|
if (originalSampleRate === targetSampleRate) {
|
||||||
|
return buffer;
|
||||||
|
}
|
||||||
|
|
||||||
|
const ratio = originalSampleRate / targetSampleRate;
|
||||||
|
const newLength = Math.round(buffer.length / ratio);
|
||||||
|
const result = new Float32Array(newLength);
|
||||||
|
|
||||||
|
for (let i = 0; i < newLength; i++) {
|
||||||
|
const pos = Math.round(i * ratio);
|
||||||
|
result[i] = buffer[pos];
|
||||||
|
}
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initialize the application when DOM is fully loaded
|
||||||
|
document.addEventListener('DOMContentLoaded', initializeApp);
|
||||||
|
|
||||||
@@ -5,6 +5,7 @@
|
|||||||
"name": "my-app",
|
"name": "my-app",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@auth0/nextjs-auth0": "^4.3.0",
|
"@auth0/nextjs-auth0": "^4.3.0",
|
||||||
|
"mongoose": "^8.13.1",
|
||||||
"next": "15.2.4",
|
"next": "15.2.4",
|
||||||
"react": "^19.1.0",
|
"react": "^19.1.0",
|
||||||
"react-dom": "^19.1.0",
|
"react-dom": "^19.1.0",
|
||||||
@@ -68,6 +69,8 @@
|
|||||||
|
|
||||||
"@img/sharp-win32-x64": ["@img/sharp-win32-x64@0.33.5", "", { "os": "win32", "cpu": "x64" }, "sha512-MpY/o8/8kj+EcnxwvrP4aTJSWw/aZ7JIGR4aBeZkZw5B7/Jn+tY9/VNwtcoGmdT7GfggGIU4kygOMSbYnOrAbg=="],
|
"@img/sharp-win32-x64": ["@img/sharp-win32-x64@0.33.5", "", { "os": "win32", "cpu": "x64" }, "sha512-MpY/o8/8kj+EcnxwvrP4aTJSWw/aZ7JIGR4aBeZkZw5B7/Jn+tY9/VNwtcoGmdT7GfggGIU4kygOMSbYnOrAbg=="],
|
||||||
|
|
||||||
|
"@mongodb-js/saslprep": ["@mongodb-js/saslprep@1.2.0", "", { "dependencies": { "sparse-bitfield": "^3.0.3" } }, "sha512-+ywrb0AqkfaYuhHs6LxKWgqbh3I72EpEgESCw37o+9qPx9WTCkgDm2B+eMrwehGtHBWHFU4GXvnSCNiFhhausg=="],
|
||||||
|
|
||||||
"@next/env": ["@next/env@15.2.4", "", {}, "sha512-+SFtMgoiYP3WoSswuNmxJOCwi06TdWE733D+WPjpXIe4LXGULwEaofiiAy6kbS0+XjM5xF5n3lKuBwN2SnqD9g=="],
|
"@next/env": ["@next/env@15.2.4", "", {}, "sha512-+SFtMgoiYP3WoSswuNmxJOCwi06TdWE733D+WPjpXIe4LXGULwEaofiiAy6kbS0+XjM5xF5n3lKuBwN2SnqD9g=="],
|
||||||
|
|
||||||
"@next/swc-darwin-arm64": ["@next/swc-darwin-arm64@15.2.4", "", { "os": "darwin", "cpu": "arm64" }, "sha512-1AnMfs655ipJEDC/FHkSr0r3lXBgpqKo4K1kiwfUf3iE68rDFXZ1TtHdMvf7D0hMItgDZ7Vuq3JgNMbt/+3bYw=="],
|
"@next/swc-darwin-arm64": ["@next/swc-darwin-arm64@15.2.4", "", { "os": "darwin", "cpu": "arm64" }, "sha512-1AnMfs655ipJEDC/FHkSr0r3lXBgpqKo4K1kiwfUf3iE68rDFXZ1TtHdMvf7D0hMItgDZ7Vuq3JgNMbt/+3bYw=="],
|
||||||
@@ -130,6 +133,10 @@
|
|||||||
|
|
||||||
"@types/react-dom": ["@types/react-dom@19.0.4", "", { "peerDependencies": { "@types/react": "^19.0.0" } }, "sha512-4fSQ8vWFkg+TGhePfUzVmat3eC14TXYSsiiDSLI0dVLsrm9gZFABjPy/Qu6TKgl1tq1Bu1yDsuQgY3A3DOjCcg=="],
|
"@types/react-dom": ["@types/react-dom@19.0.4", "", { "peerDependencies": { "@types/react": "^19.0.0" } }, "sha512-4fSQ8vWFkg+TGhePfUzVmat3eC14TXYSsiiDSLI0dVLsrm9gZFABjPy/Qu6TKgl1tq1Bu1yDsuQgY3A3DOjCcg=="],
|
||||||
|
|
||||||
|
"@types/webidl-conversions": ["@types/webidl-conversions@7.0.3", "", {}, "sha512-CiJJvcRtIgzadHCYXw7dqEnMNRjhGZlYK05Mj9OyktqV8uVT8fD2BFOB7S1uwBE3Kj2Z+4UyPmFw/Ixgw/LAlA=="],
|
||||||
|
|
||||||
|
"@types/whatwg-url": ["@types/whatwg-url@11.0.5", "", { "dependencies": { "@types/webidl-conversions": "*" } }, "sha512-coYR071JRaHa+xoEvvYqvnIHaVqaYrLPbsufM9BF63HkwI5Lgmy2QR8Q5K/lYDYo5AK82wOvSOS0UsLTpTG7uQ=="],
|
||||||
|
|
||||||
"@zag-js/accordion": ["@zag-js/accordion@1.7.0", "", { "dependencies": { "@zag-js/anatomy": "1.7.0", "@zag-js/core": "1.7.0", "@zag-js/dom-query": "1.7.0", "@zag-js/types": "1.7.0", "@zag-js/utils": "1.7.0" } }, "sha512-LNJOjLTW2KwrToXBrXIbNIAiISA94n0AdWp14H8RrskdokywmEGiC0GgWTGEJ7DNA6TGP6Ae5o9rJ4fHSmCsDQ=="],
|
"@zag-js/accordion": ["@zag-js/accordion@1.7.0", "", { "dependencies": { "@zag-js/anatomy": "1.7.0", "@zag-js/core": "1.7.0", "@zag-js/dom-query": "1.7.0", "@zag-js/types": "1.7.0", "@zag-js/utils": "1.7.0" } }, "sha512-LNJOjLTW2KwrToXBrXIbNIAiISA94n0AdWp14H8RrskdokywmEGiC0GgWTGEJ7DNA6TGP6Ae5o9rJ4fHSmCsDQ=="],
|
||||||
|
|
||||||
"@zag-js/anatomy": ["@zag-js/anatomy@1.7.0", "", {}, "sha512-fkRgH6vPCwykmRdV38uAJeTtJc8tayAnURfoovHAtB9bK0goagPbpdcYTNyGn8msul0h+KBloOtnw4obvX0nPw=="],
|
"@zag-js/anatomy": ["@zag-js/anatomy@1.7.0", "", {}, "sha512-fkRgH6vPCwykmRdV38uAJeTtJc8tayAnURfoovHAtB9bK0goagPbpdcYTNyGn8msul0h+KBloOtnw4obvX0nPw=="],
|
||||||
@@ -182,6 +189,8 @@
|
|||||||
|
|
||||||
"@zag-js/utils": ["@zag-js/utils@1.7.0", "", {}, "sha512-yIxvH5V27a1WuLgCxHX7qpdtFo8vTJaZLafBpSNfVYG4B8FaxTE+P7JAcpmAzs3UyXura/WfAY2eVWWVBpk9ZA=="],
|
"@zag-js/utils": ["@zag-js/utils@1.7.0", "", {}, "sha512-yIxvH5V27a1WuLgCxHX7qpdtFo8vTJaZLafBpSNfVYG4B8FaxTE+P7JAcpmAzs3UyXura/WfAY2eVWWVBpk9ZA=="],
|
||||||
|
|
||||||
|
"bson": ["bson@6.10.3", "", {}, "sha512-MTxGsqgYTwfshYWTRdmZRC+M7FnG1b4y7RO7p2k3X24Wq0yv1m77Wsj0BzlPzd/IowgESfsruQCUToa7vbOpPQ=="],
|
||||||
|
|
||||||
"busboy": ["busboy@1.6.0", "", { "dependencies": { "streamsearch": "^1.1.0" } }, "sha512-8SFQbg/0hQ9xy3UNTB0YEnsNBbWfhf7RtnzpL7TkBiTBRfrQ9Fxcnz7VJsleJpyp6rVLvXiuORqjlHi5q+PYuA=="],
|
"busboy": ["busboy@1.6.0", "", { "dependencies": { "streamsearch": "^1.1.0" } }, "sha512-8SFQbg/0hQ9xy3UNTB0YEnsNBbWfhf7RtnzpL7TkBiTBRfrQ9Fxcnz7VJsleJpyp6rVLvXiuORqjlHi5q+PYuA=="],
|
||||||
|
|
||||||
"caniuse-lite": ["caniuse-lite@1.0.30001707", "", {}, "sha512-3qtRjw/HQSMlDWf+X79N206fepf4SOOU6SQLMaq/0KkZLmSjPxAkBOQQ+FxbHKfHmYLZFfdWsO3KA90ceHPSnw=="],
|
"caniuse-lite": ["caniuse-lite@1.0.30001707", "", {}, "sha512-3qtRjw/HQSMlDWf+X79N206fepf4SOOU6SQLMaq/0KkZLmSjPxAkBOQQ+FxbHKfHmYLZFfdWsO3KA90ceHPSnw=="],
|
||||||
@@ -198,6 +207,8 @@
|
|||||||
|
|
||||||
"csstype": ["csstype@3.1.3", "", {}, "sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw=="],
|
"csstype": ["csstype@3.1.3", "", {}, "sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw=="],
|
||||||
|
|
||||||
|
"debug": ["debug@4.4.0", "", { "dependencies": { "ms": "^2.1.3" } }, "sha512-6WTZ/IxCY/T6BALoZHaE4ctp9xm+Z5kY/pzYaCHRFeyVhojxlrm+46y68HA6hr0TcwEssoxNiDEUJQjfPZ/RYA=="],
|
||||||
|
|
||||||
"dequal": ["dequal@2.0.3", "", {}, "sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA=="],
|
"dequal": ["dequal@2.0.3", "", {}, "sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA=="],
|
||||||
|
|
||||||
"detect-libc": ["detect-libc@2.0.3", "", {}, "sha512-bwy0MGW55bG41VqxxypOsdSdGqLwXPI/focwgTYCFMbdUiBAxLg9CFzG08sz2aqzknwiX7Hkl0bQENjg8iLByw=="],
|
"detect-libc": ["detect-libc@2.0.3", "", {}, "sha512-bwy0MGW55bG41VqxxypOsdSdGqLwXPI/focwgTYCFMbdUiBAxLg9CFzG08sz2aqzknwiX7Hkl0bQENjg8iLByw=="],
|
||||||
@@ -212,6 +223,8 @@
|
|||||||
|
|
||||||
"jose": ["jose@5.10.0", "", {}, "sha512-s+3Al/p9g32Iq+oqXxkW//7jk2Vig6FF1CFqzVXoTUXt2qz89YWbL+OwS17NFYEvxC35n0FKeGO2LGYSxeM2Gg=="],
|
"jose": ["jose@5.10.0", "", {}, "sha512-s+3Al/p9g32Iq+oqXxkW//7jk2Vig6FF1CFqzVXoTUXt2qz89YWbL+OwS17NFYEvxC35n0FKeGO2LGYSxeM2Gg=="],
|
||||||
|
|
||||||
|
"kareem": ["kareem@2.6.3", "", {}, "sha512-C3iHfuGUXK2u8/ipq9LfjFfXFxAZMQJJq7vLS45r3D9Y2xQ/m4S8zaR4zMLFWh9AsNPXmcFfUDhTEO8UIC/V6Q=="],
|
||||||
|
|
||||||
"lightningcss": ["lightningcss@1.29.2", "", { "dependencies": { "detect-libc": "^2.0.3" }, "optionalDependencies": { "lightningcss-darwin-arm64": "1.29.2", "lightningcss-darwin-x64": "1.29.2", "lightningcss-freebsd-x64": "1.29.2", "lightningcss-linux-arm-gnueabihf": "1.29.2", "lightningcss-linux-arm64-gnu": "1.29.2", "lightningcss-linux-arm64-musl": "1.29.2", "lightningcss-linux-x64-gnu": "1.29.2", "lightningcss-linux-x64-musl": "1.29.2", "lightningcss-win32-arm64-msvc": "1.29.2", "lightningcss-win32-x64-msvc": "1.29.2" } }, "sha512-6b6gd/RUXKaw5keVdSEtqFVdzWnU5jMxTUjA2bVcMNPLwSQ08Sv/UodBVtETLCn7k4S1Ibxwh7k68IwLZPgKaA=="],
|
"lightningcss": ["lightningcss@1.29.2", "", { "dependencies": { "detect-libc": "^2.0.3" }, "optionalDependencies": { "lightningcss-darwin-arm64": "1.29.2", "lightningcss-darwin-x64": "1.29.2", "lightningcss-freebsd-x64": "1.29.2", "lightningcss-linux-arm-gnueabihf": "1.29.2", "lightningcss-linux-arm64-gnu": "1.29.2", "lightningcss-linux-arm64-musl": "1.29.2", "lightningcss-linux-x64-gnu": "1.29.2", "lightningcss-linux-x64-musl": "1.29.2", "lightningcss-win32-arm64-msvc": "1.29.2", "lightningcss-win32-x64-msvc": "1.29.2" } }, "sha512-6b6gd/RUXKaw5keVdSEtqFVdzWnU5jMxTUjA2bVcMNPLwSQ08Sv/UodBVtETLCn7k4S1Ibxwh7k68IwLZPgKaA=="],
|
||||||
|
|
||||||
"lightningcss-darwin-arm64": ["lightningcss-darwin-arm64@1.29.2", "", { "os": "darwin", "cpu": "arm64" }, "sha512-cK/eMabSViKn/PG8U/a7aCorpeKLMlK0bQeNHmdb7qUnBkNPnL+oV5DjJUo0kqWsJUapZsM4jCfYItbqBDvlcA=="],
|
"lightningcss-darwin-arm64": ["lightningcss-darwin-arm64@1.29.2", "", { "os": "darwin", "cpu": "arm64" }, "sha512-cK/eMabSViKn/PG8U/a7aCorpeKLMlK0bQeNHmdb7qUnBkNPnL+oV5DjJUo0kqWsJUapZsM4jCfYItbqBDvlcA=="],
|
||||||
@@ -234,6 +247,20 @@
|
|||||||
|
|
||||||
"lightningcss-win32-x64-msvc": ["lightningcss-win32-x64-msvc@1.29.2", "", { "os": "win32", "cpu": "x64" }, "sha512-EdIUW3B2vLuHmv7urfzMI/h2fmlnOQBk1xlsDxkN1tCWKjNFjfLhGxYk8C8mzpSfr+A6jFFIi8fU6LbQGsRWjA=="],
|
"lightningcss-win32-x64-msvc": ["lightningcss-win32-x64-msvc@1.29.2", "", { "os": "win32", "cpu": "x64" }, "sha512-EdIUW3B2vLuHmv7urfzMI/h2fmlnOQBk1xlsDxkN1tCWKjNFjfLhGxYk8C8mzpSfr+A6jFFIi8fU6LbQGsRWjA=="],
|
||||||
|
|
||||||
|
"memory-pager": ["memory-pager@1.5.0", "", {}, "sha512-ZS4Bp4r/Zoeq6+NLJpP+0Zzm0pR8whtGPf1XExKLJBAczGMnSi3It14OiNCStjQjM6NU1okjQGSxgEZN8eBYKg=="],
|
||||||
|
|
||||||
|
"mongodb": ["mongodb@6.15.0", "", { "dependencies": { "@mongodb-js/saslprep": "^1.1.9", "bson": "^6.10.3", "mongodb-connection-string-url": "^3.0.0" }, "peerDependencies": { "@aws-sdk/credential-providers": "^3.188.0", "@mongodb-js/zstd": "^1.1.0 || ^2.0.0", "gcp-metadata": "^5.2.0", "kerberos": "^2.0.1", "mongodb-client-encryption": ">=6.0.0 <7", "snappy": "^7.2.2", "socks": "^2.7.1" }, "optionalPeers": ["@aws-sdk/credential-providers", "@mongodb-js/zstd", "gcp-metadata", "kerberos", "mongodb-client-encryption", "snappy", "socks"] }, "sha512-ifBhQ0rRzHDzqp9jAQP6OwHSH7dbYIQjD3SbJs9YYk9AikKEettW/9s/tbSFDTpXcRbF+u1aLrhHxDFaYtZpFQ=="],
|
||||||
|
|
||||||
|
"mongodb-connection-string-url": ["mongodb-connection-string-url@3.0.2", "", { "dependencies": { "@types/whatwg-url": "^11.0.2", "whatwg-url": "^14.1.0 || ^13.0.0" } }, "sha512-rMO7CGo/9BFwyZABcKAWL8UJwH/Kc2x0g72uhDWzG48URRax5TCIcJ7Rc3RZqffZzO/Gwff/jyKwCU9TN8gehA=="],
|
||||||
|
|
||||||
|
"mongoose": ["mongoose@8.13.1", "", { "dependencies": { "bson": "^6.10.3", "kareem": "2.6.3", "mongodb": "~6.15.0", "mpath": "0.9.0", "mquery": "5.0.0", "ms": "2.1.3", "sift": "17.1.3" } }, "sha512-sRqlXI+6jhr9/KicCOjet1VVPONFsOxTrh14tfueX5y3GJ2ihswc5ewUUojuwdSS/5koGXLIPmGivDSApVXflA=="],
|
||||||
|
|
||||||
|
"mpath": ["mpath@0.9.0", "", {}, "sha512-ikJRQTk8hw5DEoFVxHG1Gn9T/xcjtdnOKIU1JTmGjZZlg9LST2mBLmcX3/ICIbgJydT2GOc15RnNy5mHmzfSew=="],
|
||||||
|
|
||||||
|
"mquery": ["mquery@5.0.0", "", { "dependencies": { "debug": "4.x" } }, "sha512-iQMncpmEK8R8ncT8HJGsGc9Dsp8xcgYMVSbs5jgnm1lFHTZqMJTUWTDx1LBO8+mK3tPNZWFLBghQEIOULSTHZg=="],
|
||||||
|
|
||||||
|
"ms": ["ms@2.1.3", "", {}, "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA=="],
|
||||||
|
|
||||||
"nanoid": ["nanoid@3.3.11", "", { "bin": { "nanoid": "bin/nanoid.cjs" } }, "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w=="],
|
"nanoid": ["nanoid@3.3.11", "", { "bin": { "nanoid": "bin/nanoid.cjs" } }, "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w=="],
|
||||||
|
|
||||||
"next": ["next@15.2.4", "", { "dependencies": { "@next/env": "15.2.4", "@swc/counter": "0.1.3", "@swc/helpers": "0.5.15", "busboy": "1.6.0", "caniuse-lite": "^1.0.30001579", "postcss": "8.4.31", "styled-jsx": "5.1.6" }, "optionalDependencies": { "@next/swc-darwin-arm64": "15.2.4", "@next/swc-darwin-x64": "15.2.4", "@next/swc-linux-arm64-gnu": "15.2.4", "@next/swc-linux-arm64-musl": "15.2.4", "@next/swc-linux-x64-gnu": "15.2.4", "@next/swc-linux-x64-musl": "15.2.4", "@next/swc-win32-arm64-msvc": "15.2.4", "@next/swc-win32-x64-msvc": "15.2.4", "sharp": "^0.33.5" }, "peerDependencies": { "@opentelemetry/api": "^1.1.0", "@playwright/test": "^1.41.2", "babel-plugin-react-compiler": "*", "react": "^18.2.0 || 19.0.0-rc-de68d2f4-20241204 || ^19.0.0", "react-dom": "^18.2.0 || 19.0.0-rc-de68d2f4-20241204 || ^19.0.0", "sass": "^1.3.0" }, "optionalPeers": ["@opentelemetry/api", "@playwright/test", "babel-plugin-react-compiler", "sass"], "bin": { "next": "dist/bin/next" } }, "sha512-VwL+LAaPSxEkd3lU2xWbgEOtrM8oedmyhBqaVNmgKB+GvZlCy9rgaEc+y2on0wv+l0oSFqLtYD6dcC1eAedUaQ=="],
|
"next": ["next@15.2.4", "", { "dependencies": { "@next/env": "15.2.4", "@swc/counter": "0.1.3", "@swc/helpers": "0.5.15", "busboy": "1.6.0", "caniuse-lite": "^1.0.30001579", "postcss": "8.4.31", "styled-jsx": "5.1.6" }, "optionalDependencies": { "@next/swc-darwin-arm64": "15.2.4", "@next/swc-darwin-x64": "15.2.4", "@next/swc-linux-arm64-gnu": "15.2.4", "@next/swc-linux-arm64-musl": "15.2.4", "@next/swc-linux-x64-gnu": "15.2.4", "@next/swc-linux-x64-musl": "15.2.4", "@next/swc-win32-arm64-msvc": "15.2.4", "@next/swc-win32-x64-msvc": "15.2.4", "sharp": "^0.33.5" }, "peerDependencies": { "@opentelemetry/api": "^1.1.0", "@playwright/test": "^1.41.2", "babel-plugin-react-compiler": "*", "react": "^18.2.0 || 19.0.0-rc-de68d2f4-20241204 || ^19.0.0", "react-dom": "^18.2.0 || 19.0.0-rc-de68d2f4-20241204 || ^19.0.0", "sass": "^1.3.0" }, "optionalPeers": ["@opentelemetry/api", "@playwright/test", "babel-plugin-react-compiler", "sass"], "bin": { "next": "dist/bin/next" } }, "sha512-VwL+LAaPSxEkd3lU2xWbgEOtrM8oedmyhBqaVNmgKB+GvZlCy9rgaEc+y2on0wv+l0oSFqLtYD6dcC1eAedUaQ=="],
|
||||||
@@ -246,6 +273,8 @@
|
|||||||
|
|
||||||
"proxy-compare": ["proxy-compare@3.0.1", "", {}, "sha512-V9plBAt3qjMlS1+nC8771KNf6oJ12gExvaxnNzN/9yVRLdTv/lc+oJlnSzrdYDAvBfTStPCoiaCOTmTs0adv7Q=="],
|
"proxy-compare": ["proxy-compare@3.0.1", "", {}, "sha512-V9plBAt3qjMlS1+nC8771KNf6oJ12gExvaxnNzN/9yVRLdTv/lc+oJlnSzrdYDAvBfTStPCoiaCOTmTs0adv7Q=="],
|
||||||
|
|
||||||
|
"punycode": ["punycode@2.3.1", "", {}, "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg=="],
|
||||||
|
|
||||||
"react": ["react@19.1.0", "", {}, "sha512-FS+XFBNvn3GTAWq26joslQgWNoFu08F4kl0J4CgdNKADkdSGXQyTCnKteIAJy96Br6YbpEU1LSzV5dYtjMkMDg=="],
|
"react": ["react@19.1.0", "", {}, "sha512-FS+XFBNvn3GTAWq26joslQgWNoFu08F4kl0J4CgdNKADkdSGXQyTCnKteIAJy96Br6YbpEU1LSzV5dYtjMkMDg=="],
|
||||||
|
|
||||||
"react-dom": ["react-dom@19.1.0", "", { "dependencies": { "scheduler": "^0.26.0" }, "peerDependencies": { "react": "^19.1.0" } }, "sha512-Xs1hdnE+DyKgeHJeJznQmYMIBG3TKIHJJT95Q58nHLSrElKlGQqDTR2HQ9fx5CN/Gk6Vh/kupBTDLU11/nDk/g=="],
|
"react-dom": ["react-dom@19.1.0", "", { "dependencies": { "scheduler": "^0.26.0" }, "peerDependencies": { "react": "^19.1.0" } }, "sha512-Xs1hdnE+DyKgeHJeJznQmYMIBG3TKIHJJT95Q58nHLSrElKlGQqDTR2HQ9fx5CN/Gk6Vh/kupBTDLU11/nDk/g=="],
|
||||||
@@ -256,10 +285,14 @@
|
|||||||
|
|
||||||
"sharp": ["sharp@0.33.5", "", { "dependencies": { "color": "^4.2.3", "detect-libc": "^2.0.3", "semver": "^7.6.3" }, "optionalDependencies": { "@img/sharp-darwin-arm64": "0.33.5", "@img/sharp-darwin-x64": "0.33.5", "@img/sharp-libvips-darwin-arm64": "1.0.4", "@img/sharp-libvips-darwin-x64": "1.0.4", "@img/sharp-libvips-linux-arm": "1.0.5", "@img/sharp-libvips-linux-arm64": "1.0.4", "@img/sharp-libvips-linux-s390x": "1.0.4", "@img/sharp-libvips-linux-x64": "1.0.4", "@img/sharp-libvips-linuxmusl-arm64": "1.0.4", "@img/sharp-libvips-linuxmusl-x64": "1.0.4", "@img/sharp-linux-arm": "0.33.5", "@img/sharp-linux-arm64": "0.33.5", "@img/sharp-linux-s390x": "0.33.5", "@img/sharp-linux-x64": "0.33.5", "@img/sharp-linuxmusl-arm64": "0.33.5", "@img/sharp-linuxmusl-x64": "0.33.5", "@img/sharp-wasm32": "0.33.5", "@img/sharp-win32-ia32": "0.33.5", "@img/sharp-win32-x64": "0.33.5" } }, "sha512-haPVm1EkS9pgvHrQ/F3Xy+hgcuMV0Wm9vfIBSiwZ05k+xgb0PkBQpGsAA/oWdDobNaZTH5ppvHtzCFbnSEwHVw=="],
|
"sharp": ["sharp@0.33.5", "", { "dependencies": { "color": "^4.2.3", "detect-libc": "^2.0.3", "semver": "^7.6.3" }, "optionalDependencies": { "@img/sharp-darwin-arm64": "0.33.5", "@img/sharp-darwin-x64": "0.33.5", "@img/sharp-libvips-darwin-arm64": "1.0.4", "@img/sharp-libvips-darwin-x64": "1.0.4", "@img/sharp-libvips-linux-arm": "1.0.5", "@img/sharp-libvips-linux-arm64": "1.0.4", "@img/sharp-libvips-linux-s390x": "1.0.4", "@img/sharp-libvips-linux-x64": "1.0.4", "@img/sharp-libvips-linuxmusl-arm64": "1.0.4", "@img/sharp-libvips-linuxmusl-x64": "1.0.4", "@img/sharp-linux-arm": "0.33.5", "@img/sharp-linux-arm64": "0.33.5", "@img/sharp-linux-s390x": "0.33.5", "@img/sharp-linux-x64": "0.33.5", "@img/sharp-linuxmusl-arm64": "0.33.5", "@img/sharp-linuxmusl-x64": "0.33.5", "@img/sharp-wasm32": "0.33.5", "@img/sharp-win32-ia32": "0.33.5", "@img/sharp-win32-x64": "0.33.5" } }, "sha512-haPVm1EkS9pgvHrQ/F3Xy+hgcuMV0Wm9vfIBSiwZ05k+xgb0PkBQpGsAA/oWdDobNaZTH5ppvHtzCFbnSEwHVw=="],
|
||||||
|
|
||||||
|
"sift": ["sift@17.1.3", "", {}, "sha512-Rtlj66/b0ICeFzYTuNvX/EF1igRbbnGSvEyT79McoZa/DeGhMyC5pWKOEsZKnpkqtSeovd5FL/bjHWC3CIIvCQ=="],
|
||||||
|
|
||||||
"simple-swizzle": ["simple-swizzle@0.2.2", "", { "dependencies": { "is-arrayish": "^0.3.1" } }, "sha512-JA//kQgZtbuY83m+xT+tXJkmJncGMTFT+C+g2h2R9uxkYIrE2yy9sgmcLhCnw57/WSD+Eh3J97FPEDFnbXnDUg=="],
|
"simple-swizzle": ["simple-swizzle@0.2.2", "", { "dependencies": { "is-arrayish": "^0.3.1" } }, "sha512-JA//kQgZtbuY83m+xT+tXJkmJncGMTFT+C+g2h2R9uxkYIrE2yy9sgmcLhCnw57/WSD+Eh3J97FPEDFnbXnDUg=="],
|
||||||
|
|
||||||
"source-map-js": ["source-map-js@1.2.1", "", {}, "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA=="],
|
"source-map-js": ["source-map-js@1.2.1", "", {}, "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA=="],
|
||||||
|
|
||||||
|
"sparse-bitfield": ["sparse-bitfield@3.0.3", "", { "dependencies": { "memory-pager": "^1.0.2" } }, "sha512-kvzhi7vqKTfkh0PZU+2D2PIllw2ymqJKujUcyPMd9Y75Nv4nPbGJZXNhxsgdQab2BmlDct1YnfQCguEvHr7VsQ=="],
|
||||||
|
|
||||||
"streamsearch": ["streamsearch@1.1.0", "", {}, "sha512-Mcc5wHehp9aXz1ax6bZUyY5afg9u2rv5cqQI3mRrYkGC8rW2hM02jWuwjtL++LS5qinSyhj2QfLyNsuc+VsExg=="],
|
"streamsearch": ["streamsearch@1.1.0", "", {}, "sha512-Mcc5wHehp9aXz1ax6bZUyY5afg9u2rv5cqQI3mRrYkGC8rW2hM02jWuwjtL++LS5qinSyhj2QfLyNsuc+VsExg=="],
|
||||||
|
|
||||||
"styled-jsx": ["styled-jsx@5.1.6", "", { "dependencies": { "client-only": "0.0.1" }, "peerDependencies": { "react": ">= 16.8.0 || 17.x.x || ^18.0.0-0 || ^19.0.0-0" } }, "sha512-qSVyDTeMotdvQYoHWLNGwRFJHC+i+ZvdBRYosOFgC+Wg1vx4frN2/RG/NA7SYqqvKNLf39P2LSRA2pu6n0XYZA=="],
|
"styled-jsx": ["styled-jsx@5.1.6", "", { "dependencies": { "client-only": "0.0.1" }, "peerDependencies": { "react": ">= 16.8.0 || 17.x.x || ^18.0.0-0 || ^19.0.0-0" } }, "sha512-qSVyDTeMotdvQYoHWLNGwRFJHC+i+ZvdBRYosOFgC+Wg1vx4frN2/RG/NA7SYqqvKNLf39P2LSRA2pu6n0XYZA=="],
|
||||||
@@ -270,6 +303,8 @@
|
|||||||
|
|
||||||
"tapable": ["tapable@2.2.1", "", {}, "sha512-GNzQvQTOIP6RyTfE2Qxb8ZVlNmw0n88vp1szwWRimP02mnTsx3Wtn5qRdqY9w2XduFNUgvOwhNnQsjwCp+kqaQ=="],
|
"tapable": ["tapable@2.2.1", "", {}, "sha512-GNzQvQTOIP6RyTfE2Qxb8ZVlNmw0n88vp1szwWRimP02mnTsx3Wtn5qRdqY9w2XduFNUgvOwhNnQsjwCp+kqaQ=="],
|
||||||
|
|
||||||
|
"tr46": ["tr46@5.1.0", "", { "dependencies": { "punycode": "^2.3.1" } }, "sha512-IUWnUK7ADYR5Sl1fZlO1INDUhVhatWl7BtJWsIhwJ0UAK7ilzzIa8uIqOO/aYVWHZPJkKbEL+362wrzoeRF7bw=="],
|
||||||
|
|
||||||
"tslib": ["tslib@2.8.1", "", {}, "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w=="],
|
"tslib": ["tslib@2.8.1", "", {}, "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w=="],
|
||||||
|
|
||||||
"typescript": ["typescript@5.8.2", "", { "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" } }, "sha512-aJn6wq13/afZp/jT9QZmwEjDqqvSGp1VT5GVg+f/t6/oVyrgXM6BY1h9BRh/O5p3PlUPAe+WuiEZOmb/49RqoQ=="],
|
"typescript": ["typescript@5.8.2", "", { "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" } }, "sha512-aJn6wq13/afZp/jT9QZmwEjDqqvSGp1VT5GVg+f/t6/oVyrgXM6BY1h9BRh/O5p3PlUPAe+WuiEZOmb/49RqoQ=="],
|
||||||
@@ -278,6 +313,10 @@
|
|||||||
|
|
||||||
"use-sync-external-store": ["use-sync-external-store@1.5.0", "", { "peerDependencies": { "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" } }, "sha512-Rb46I4cGGVBmjamjphe8L/UnvJD+uPPtTkNvX5mZgqdbavhI4EbgIWJiIHXJ8bc/i9EQGPRh4DwEURJ552Do0A=="],
|
"use-sync-external-store": ["use-sync-external-store@1.5.0", "", { "peerDependencies": { "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" } }, "sha512-Rb46I4cGGVBmjamjphe8L/UnvJD+uPPtTkNvX5mZgqdbavhI4EbgIWJiIHXJ8bc/i9EQGPRh4DwEURJ552Do0A=="],
|
||||||
|
|
||||||
|
"webidl-conversions": ["webidl-conversions@7.0.0", "", {}, "sha512-VwddBukDzu71offAQR975unBIGqfKZpM+8ZX6ySk8nYhVoo5CYaZyzt3YBvYtRtO+aoGlqxPg/B87NGVZ/fu6g=="],
|
||||||
|
|
||||||
|
"whatwg-url": ["whatwg-url@14.2.0", "", { "dependencies": { "tr46": "^5.1.0", "webidl-conversions": "^7.0.0" } }, "sha512-De72GdQZzNTUBBChsXueQUnPKDkg/5A5zp7pFDuQAj5UFoENpiACU0wlCvzpAGnTkj++ihpKwKyYewn/XNUbKw=="],
|
||||||
|
|
||||||
"next/postcss": ["postcss@8.4.31", "", { "dependencies": { "nanoid": "^3.3.6", "picocolors": "^1.0.0", "source-map-js": "^1.0.2" } }, "sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ=="],
|
"next/postcss": ["postcss@8.4.31", "", { "dependencies": { "nanoid": "^3.3.6", "picocolors": "^1.0.0", "source-map-js": "^1.0.2" } }, "sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ=="],
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -10,6 +10,7 @@
|
|||||||
},
|
},
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@auth0/nextjs-auth0": "^4.3.0",
|
"@auth0/nextjs-auth0": "^4.3.0",
|
||||||
|
"mongoose": "^8.13.1",
|
||||||
"next": "15.2.4",
|
"next": "15.2.4",
|
||||||
"react": "^19.1.0",
|
"react": "^19.1.0",
|
||||||
"react-dom": "^19.1.0"
|
"react-dom": "^19.1.0"
|
||||||
|
|||||||
@@ -40,14 +40,29 @@ export default async function Home() {
|
|||||||
type="submit">Set codeword</button>
|
type="submit">Set codeword</button>
|
||||||
</form>
|
</form>
|
||||||
{/* form for adding contacts */}
|
{/* form for adding contacts */}
|
||||||
<form className="flex flex-col gap-[32px] row-start-2 items-center sm:items-start" onSubmit={(e) => e.preventDefault()}>
|
<form className="space-y-5 flex flex-col gap-[32px] row-start-2 items-center sm:items-start" onSubmit={(e) => e.preventDefault()}>
|
||||||
<input
|
<input
|
||||||
type="text"
|
type="text"
|
||||||
value={contacts}
|
value={contacts}
|
||||||
onChange={(e) => setContacts(e.target.value.split(","))}
|
onChange={(e) => setContacts(e.target.value.split(","))}
|
||||||
placeholder="contacts (comma separated)"
|
placeholder="Write down an emergency contact"
|
||||||
className="border border-gray-300 rounded-md p-2"
|
className="border border-gray-300 rounded-md p-2"
|
||||||
/>
|
/>
|
||||||
|
<input
|
||||||
|
type="text"
|
||||||
|
value={contacts}
|
||||||
|
onChange={(e) => setContacts(e.target.value.split(","))}
|
||||||
|
placeholder="Write down an emergency contact"
|
||||||
|
className="border border-gray-300 rounded-md p-2"
|
||||||
|
/>
|
||||||
|
<input
|
||||||
|
type="text"
|
||||||
|
value={contacts}
|
||||||
|
onChange={(e) => setContacts(e.target.value.split(","))}
|
||||||
|
placeholder="Write down an emergency contact"
|
||||||
|
className="border border-gray-300 rounded-md p-2"
|
||||||
|
/>
|
||||||
|
<button type="button">Add</button>
|
||||||
<button className="bg-slate-500 text-yellow-300 text-stretch-50% font-lg rounded-md p-2" type="submit">Set contacts</button>
|
<button className="bg-slate-500 text-yellow-300 text-stretch-50% font-lg rounded-md p-2" type="submit">Set contacts</button>
|
||||||
</form>
|
</form>
|
||||||
</div>
|
</div>
|
||||||
@@ -76,14 +91,45 @@ export default async function Home() {
|
|||||||
type="submit">Set codeword</button>
|
type="submit">Set codeword</button>
|
||||||
</form>
|
</form>
|
||||||
{/* form for adding contacts */}
|
{/* form for adding contacts */}
|
||||||
<form className="flex flex-col gap-[32px] row-start-2 items-center sm:items-start" onSubmit={(e) => e.preventDefault()}>
|
<form id="Contacts" className="space-y-5 flex flex-col gap-[32px] row-start-2 items-center sm:items-start" onSubmit={(e) => e.preventDefault()}>
|
||||||
<input
|
<input
|
||||||
type="text"
|
type="text"
|
||||||
value={contacts}
|
value={contacts}
|
||||||
onChange={(e) => setContacts(e.target.value.split(","))}
|
onChange={(e) => setContacts(e.target.value.split(","))}
|
||||||
placeholder="contacts (comma separated)"
|
placeholder="Write down an emergency contact"
|
||||||
className="border border-gray-300 rounded-md p-2"
|
className="border border-gray-300 rounded-md p-2"
|
||||||
/>
|
/>
|
||||||
|
<input
|
||||||
|
type="text"
|
||||||
|
value={contacts}
|
||||||
|
onChange={(e) => setContacts(e.target.value.split(","))}
|
||||||
|
placeholder="Write down an emergency contact"
|
||||||
|
className="border border-gray-300 rounded-md p-2"
|
||||||
|
/>
|
||||||
|
<input
|
||||||
|
type="text"
|
||||||
|
value={contacts}
|
||||||
|
onChange={(e) => setContacts(e.target.value.split(","))}
|
||||||
|
placeholder="Write down an emergency contact"
|
||||||
|
className="border border-gray-300 rounded-md p-2"
|
||||||
|
/>
|
||||||
|
<input
|
||||||
|
type="text"
|
||||||
|
value={contacts}
|
||||||
|
onChange={(e) => setContacts(e.target.value.split(","))}
|
||||||
|
placeholder="Write down an emergency contact"
|
||||||
|
className="text-input border border-gray-300 rounded-md p-2"
|
||||||
|
/>
|
||||||
|
<button onClick={() => {
|
||||||
|
alert("Adding contact...");
|
||||||
|
let elem = document.getElementsByClassName("text-input")[0] as HTMLElement;
|
||||||
|
console.log("Element:", elem);
|
||||||
|
let d = elem.cloneNode(true) as HTMLElement;
|
||||||
|
document.getElementById("Contacts")?.appendChild(d);
|
||||||
|
}}
|
||||||
|
className="bg-emerald-500 text-fuchsia-300"
|
||||||
|
type="button">Add</button>
|
||||||
|
|
||||||
<button className="bg-slate-500 text-yellow-300 text-stretch-50% font-lg rounded-md p-2" type="submit">Set contacts</button>
|
<button className="bg-slate-500 text-yellow-300 text-stretch-50% font-lg rounded-md p-2" type="submit">Set contacts</button>
|
||||||
</form>
|
</form>
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user