Restore code and save recent updates

This commit is contained in:
2026-01-25 03:31:01 +00:00
parent bae861c71f
commit 5ce0b4d278
54 changed files with 2963 additions and 2899 deletions

View File

@@ -1,24 +1,24 @@
# Use a lightweight Python image
FROM python:3.9-slim FROM python:3.9-slim
# Set working directory inside the container
WORKDIR /app WORKDIR /app
# Copy requirements first (for better caching)
COPY requirements.txt . COPY requirements.txt .
# Install dependencies
# 'gunicorn' must be in your requirements.txt or installed here
RUN pip install --no-cache-dir -r requirements.txt RUN pip install --no-cache-dir -r requirements.txt
RUN pip install gunicorn RUN pip install gunicorn
# Copy the rest of the application
COPY . . COPY . .
# Expose the internal port (Gunicorn default is 8000, or we choose one)
EXPOSE 5000 EXPOSE 5000
# Command to run production server
# -w 4: 4 worker processes
# -b 0.0.0.0:5000: Bind to all interfaces inside container on port 5000
CMD ["gunicorn", "--workers", "4", "--bind", "0.0.0.0:5000", "app:app"] CMD ["gunicorn", "--workers", "4", "--bind", "0.0.0.0:5000", "app:app"]

View File

@@ -1,7 +1,7 @@
import os import os
from src import create_app from src import create_app
app = create_app() app =create_app ()
if __name__ == "__main__": if __name__ =="__main__":
app.run(debug=True, port=5000, host="0.0.0.0") app .run (debug =True ,port =5000 ,host ="0.0.0.0")

View File

@@ -1,7 +1,7 @@
flask flask
gunicorn gunicorn
ultralytics ultralytics
opencv-python opencv-python-headless
transformers transformers
torch torch
pandas pandas

View File

@@ -3,60 +3,60 @@ import sys
import argparse import argparse
from pathlib import Path from pathlib import Path
sys.path.append(os.path.join(os.path.dirname(__file__), '..')) sys .path .append (os .path .join (os .path .dirname (__file__ ),'..'))
from dotenv import load_dotenv from dotenv import load_dotenv
load_dotenv() load_dotenv ()
from src.rag.ingest import process_file from src .rag .ingest import process_file
from src.rag.store import ingest_documents from src .rag .store import ingest_documents
from src.mongo.metadata import is_file_processed, log_processed_file from src .mongo .metadata import is_file_processed ,log_processed_file
def populate_from_dataset(dataset_dir, category=None): def populate_from_dataset (dataset_dir ,category =None ):
dataset_path = Path(dataset_dir) dataset_path =Path (dataset_dir )
if not dataset_path.exists(): if not dataset_path .exists ():
print(f"Dataset directory not found: {dataset_dir}") print (f"Dataset directory not found: {dataset_dir }")
return return
print(f"Scanning {dataset_dir}...") print (f"Scanning {dataset_dir }...")
if category: if category :
print(f"Category: {category}") print (f"Category: {category }")
total_chunks = 0 total_chunks =0
files_processed = 0 files_processed =0
for file_path in dataset_path.glob('*'): for file_path in dataset_path .glob ('*'):
if file_path.is_file() and file_path.suffix.lower() in ['.csv', '.pdf', '.txt', '.xlsx']: if file_path .is_file ()and file_path .suffix .lower ()in ['.csv','.pdf','.txt','.xlsx']:
if is_file_processed(file_path.name): if is_file_processed (file_path .name ):
print(f"Skipping {file_path.name} (already processed)") print (f"Skipping {file_path .name } (already processed)")
continue continue
print(f"Processing {file_path.name}...") print (f"Processing {file_path .name }...")
try: try :
chunks = process_file(str(file_path)) chunks =process_file (str (file_path ))
if chunks: if chunks :
count = ingest_documents(chunks, source_file=file_path.name, category=category) count =ingest_documents (chunks ,source_file =file_path .name ,category =category )
print(f" Ingested {count} chunks.") print (f" Ingested {count } chunks.")
if count > 0: if count >0 :
log_processed_file(file_path.name, category=category, chunk_count=count) log_processed_file (file_path .name ,category =category ,chunk_count =count )
total_chunks += count total_chunks +=count
files_processed += 1 files_processed +=1
else: else :
print(" No text found/extracted.") print (" No text found/extracted.")
except Exception as e: except Exception as e :
print(f" Error processing file: {e}") print (f" Error processing file: {e }")
print(f"\nFinished! Processed {files_processed} files. Total chunks ingested: {total_chunks}") print (f"\nFinished! Processed {files_processed } files. Total chunks ingested: {total_chunks }")
if __name__ == "__main__": if __name__ =="__main__":
parser = argparse.ArgumentParser(description="Populate vector database from dataset files") parser =argparse .ArgumentParser (description ="Populate vector database from dataset files")
parser.add_argument("--category", "-c", type=str, help="Category to assign to ingested documents") parser .add_argument ("--category","-c",type =str ,help ="Category to assign to ingested documents")
parser.add_argument("--dir", "-d", type=str, default=None, help="Dataset directory path") parser .add_argument ("--dir","-d",type =str ,default =None ,help ="Dataset directory path")
args = parser.parse_args() args =parser .parse_args ()
if args.dir: if args .dir :
dataset_dir = args.dir dataset_dir =args .dir
else: else :
dataset_dir = os.path.join(os.path.dirname(__file__), '../dataset') dataset_dir =os .path .join (os .path .dirname (__file__ ),'../dataset')
populate_from_dataset(dataset_dir, category=args.category) populate_from_dataset (dataset_dir ,category =args .category )

View File

@@ -1,20 +1,20 @@
from flask import Flask from flask import Flask
from flask_cors import CORS from flask_cors import CORS
from .routes.main import main_bp from .routes .main import main_bp
from .routes.rag import rag_bp from .routes .rag import rag_bp
from .routes.gemini import gemini_bp from .routes .gemini import gemini_bp
def create_app(): def create_app ():
app = Flask(__name__) app =Flask (__name__ )
CORS(app) CORS (app )
app.register_blueprint(main_bp) app .register_blueprint (main_bp )
app.register_blueprint(rag_bp, url_prefix='/api/rag') app .register_blueprint (rag_bp ,url_prefix ='/api/rag')
app.register_blueprint(gemini_bp, url_prefix='/api/gemini') app .register_blueprint (gemini_bp ,url_prefix ='/api/gemini')
from .routes.reports import reports_bp from .routes .reports import reports_bp
app.register_blueprint(reports_bp, url_prefix='/api/reports') app .register_blueprint (reports_bp ,url_prefix ='/api/reports')
from .routes.incidents import incidents_bp from .routes .incidents import incidents_bp
app.register_blueprint(incidents_bp, url_prefix='/api/incidents') app .register_blueprint (incidents_bp ,url_prefix ='/api/incidents')
return app return app

View File

@@ -1,80 +1,80 @@
import chromadb import chromadb
CHROMA_HOST = "http://chroma.sirblob.co" CHROMA_HOST ="http://chroma.sirblob.co"
COLLECTION_NAME = "rag_documents" COLLECTION_NAME ="rag_documents"
_client = None _client =None
def get_chroma_client(): def get_chroma_client ():
global _client global _client
if _client is None: if _client is None :
_client = chromadb.HttpClient(host=CHROMA_HOST) _client =chromadb .HttpClient (host =CHROMA_HOST )
return _client return _client
def get_collection(collection_name=COLLECTION_NAME): def get_collection (collection_name =COLLECTION_NAME ):
client = get_chroma_client() client =get_chroma_client ()
return client.get_or_create_collection(name=collection_name) return client .get_or_create_collection (name =collection_name )
def insert_documents(texts, embeddings, collection_name=COLLECTION_NAME, metadata_list=None): def insert_documents (texts ,embeddings ,collection_name =COLLECTION_NAME ,metadata_list =None ):
collection = get_collection(collection_name) collection =get_collection (collection_name )
ids = [f"doc_{i}_{hash(text)}" for i, text in enumerate(texts)] ids =[f"doc_{i }_{hash (text )}"for i ,text in enumerate (texts )]
if metadata_list: if metadata_list :
collection.add( collection .add (
ids=ids, ids =ids ,
embeddings=embeddings, embeddings =embeddings ,
documents=texts, documents =texts ,
metadatas=metadata_list metadatas =metadata_list
) )
else: else :
collection.add( collection .add (
ids=ids, ids =ids ,
embeddings=embeddings, embeddings =embeddings ,
documents=texts documents =texts
) )
return len(texts) return len (texts )
def search_documents(query_embedding, collection_name=COLLECTION_NAME, num_results=5, filter_metadata=None): def search_documents (query_embedding ,collection_name =COLLECTION_NAME ,num_results =5 ,filter_metadata =None ):
collection = get_collection(collection_name) collection =get_collection (collection_name )
query_params = { query_params ={
"query_embeddings": [query_embedding], "query_embeddings":[query_embedding ],
"n_results": num_results "n_results":num_results
} }
if filter_metadata: if filter_metadata :
query_params["where"] = filter_metadata query_params ["where"]=filter_metadata
results = collection.query(**query_params) results =collection .query (**query_params )
output = [] output =[]
if results and results["documents"]: if results and results ["documents"]:
for i, doc in enumerate(results["documents"][0]): for i ,doc in enumerate (results ["documents"][0 ]):
score = results["distances"][0][i] if "distances" in results else None score =results ["distances"][0 ][i ]if "distances"in results else None
meta = results["metadatas"][0][i] if "metadatas" in results else {} meta =results ["metadatas"][0 ][i ]if "metadatas"in results else {}
output.append({ output .append ({
"text": doc, "text":doc ,
"score": score, "score":score ,
"metadata": meta "metadata":meta
}) })
return output return output
def delete_documents_by_source(source_file, collection_name=COLLECTION_NAME): def delete_documents_by_source (source_file ,collection_name =COLLECTION_NAME ):
collection = get_collection(collection_name) collection =get_collection (collection_name )
results = collection.get(where={"source": source_file}) results =collection .get (where ={"source":source_file })
if results["ids"]: if results ["ids"]:
collection.delete(ids=results["ids"]) collection .delete (ids =results ["ids"])
return len(results["ids"]) return len (results ["ids"])
return 0 return 0
def get_all_metadatas(collection_name=COLLECTION_NAME, limit=None): def get_all_metadatas (collection_name =COLLECTION_NAME ,limit =None ):
collection = get_collection(collection_name) collection =get_collection (collection_name )
# Only fetch metadatas to be lightweight
if limit: if limit :
results = collection.get(include=["metadatas"], limit=limit) results =collection .get (include =["metadatas"],limit =limit )
else: else :
results = collection.get(include=["metadatas"]) results =collection .get (include =["metadatas"])
return results["metadatas"] if results and "metadatas" in results else [] return results ["metadatas"]if results and "metadatas"in results else []

View File

@@ -1,47 +1,47 @@
from .config import ( from .config import (
CV_DIR, CV_DIR ,
DATA_DIR, DATA_DIR ,
MODELS_DIR, MODELS_DIR ,
ULTRALYTICS_AVAILABLE, ULTRALYTICS_AVAILABLE ,
YOLO26_MODELS, YOLO26_MODELS ,
SUPER_CATEGORIES, SUPER_CATEGORIES ,
COMMON_BRANDS, COMMON_BRANDS ,
COLORS, COLORS ,
DEFAULT_CONF_THRESHOLD, DEFAULT_CONF_THRESHOLD ,
DEFAULT_IOU_THRESHOLD, DEFAULT_IOU_THRESHOLD ,
DEFAULT_IMG_SIZE, DEFAULT_IMG_SIZE ,
) )
from .detectors import ( from .detectors import (
YOLO26Detector, YOLO26Detector ,
HybridLogoDetector, HybridLogoDetector ,
) )
from .yolo_scanner import ( from .yolo_scanner import (
start_scanner as start_yolo_scanner, start_scanner as start_yolo_scanner ,
detect_objects as detect_yolo_objects, detect_objects as detect_yolo_objects ,
) )
from .scanner import ( from .scanner import (
start_interactive_capture as start_ollama_scanner, start_interactive_capture as start_ollama_scanner ,
capture_and_analyze as capture_ollama_once, capture_and_analyze as capture_ollama_once ,
) )
__all__ = [ __all__ =[
"CV_DIR", "CV_DIR",
"DATA_DIR", "DATA_DIR",
"MODELS_DIR", "MODELS_DIR",
"ULTRALYTICS_AVAILABLE", "ULTRALYTICS_AVAILABLE",
"YOLO26_MODELS", "YOLO26_MODELS",
"SUPER_CATEGORIES", "SUPER_CATEGORIES",
"COMMON_BRANDS", "COMMON_BRANDS",
"COLORS", "COLORS",
"DEFAULT_CONF_THRESHOLD", "DEFAULT_CONF_THRESHOLD",
"DEFAULT_IOU_THRESHOLD", "DEFAULT_IOU_THRESHOLD",
"DEFAULT_IMG_SIZE", "DEFAULT_IMG_SIZE",
"YOLO26Detector", "YOLO26Detector",
"HybridLogoDetector", "HybridLogoDetector",
"start_yolo_scanner", "start_yolo_scanner",
"detect_yolo_objects", "detect_yolo_objects",
"start_ollama_scanner", "start_ollama_scanner",
"capture_ollama_once", "capture_ollama_once",
] ]
__version__ = "2.0.0" __version__ ="2.0.0"

View File

@@ -1,4 +1,4 @@
from .cli import main from .cli import main
if __name__ == "__main__": if __name__ =="__main__":
main() main ()

View File

@@ -1,2 +1,2 @@
print('API chain') print ('API chain')

View File

@@ -1,47 +1,47 @@
#!/usr/bin/env python3
import argparse import argparse
from .config import YOLO26_MODELS from .config import YOLO26_MODELS
from .yolo_scanner import start_scanner as start_yolo, detect_objects from .yolo_scanner import start_scanner as start_yolo ,detect_objects
from .scanner import start_interactive_capture as start_ollama from .scanner import start_interactive_capture as start_ollama
def main(): def main ():
parser = argparse.ArgumentParser( parser =argparse .ArgumentParser (
description="Ollama and YOLO Logo Detection Scanner" description ="Ollama and YOLO Logo Detection Scanner"
) )
parser.add_argument("--model", "-m", type=str) parser .add_argument ("--model","-m",type =str )
parser.add_argument("--size", "-s", type=str, default="nano", parser .add_argument ("--size","-s",type =str ,default ="nano",
choices=["nano", "small", "medium", "large", "xlarge"]) choices =["nano","small","medium","large","xlarge"])
parser.add_argument("--logo-model", type=str) parser .add_argument ("--logo-model",type =str )
parser.add_argument("--yolo", action="store_true") parser .add_argument ("--yolo",action ="store_true")
parser.add_argument("--no-gui", action="store_true") parser .add_argument ("--no-gui",action ="store_true")
parser.add_argument("--track", "-t", action="store_true") parser .add_argument ("--track","-t",action ="store_true")
parser.add_argument("--hybrid", action="store_true") parser .add_argument ("--hybrid",action ="store_true")
parser.add_argument("--image", "-i", type=str) parser .add_argument ("--image","-i",type =str )
args = parser.parse_args() args =parser .parse_args ()
if args.image: if args .image :
detections = detect_objects( detections =detect_objects (
args.image, model_size=args.size, hybrid_mode=args.hybrid args .image ,model_size =args .size ,hybrid_mode =args .hybrid
) )
print(f"Found {len(detections)} detections:") print (f"Found {len (detections )} detections:")
for det in detections: for det in detections :
print(f" {det['label']}: {det['confidence']:.2%}") print (f" {det ['label']}: {det ['confidence']:.2%}")
elif args.yolo: elif args .yolo :
start_yolo( start_yolo (
model_path=args.model, model_path =args .model ,
model_size=args.size, model_size =args .size ,
logo_model_path=args.logo_model, logo_model_path =args .logo_model ,
use_gui=not args.no_gui, use_gui =not args .no_gui ,
use_tracking=args.track, use_tracking =args .track ,
hybrid_mode=args.hybrid hybrid_mode =args .hybrid
) )
else: else :
start_ollama() start_ollama ()
if __name__ == "__main__": if __name__ =="__main__":
main() main ()

View File

@@ -2,60 +2,60 @@ import os
from pathlib import Path from pathlib import Path
from typing import Dict from typing import Dict
CV_DIR = Path(__file__).parent CV_DIR =Path (__file__ ).parent
DATA_DIR = CV_DIR / "data" DATA_DIR =CV_DIR /"data"
MODELS_DIR = CV_DIR / "models" MODELS_DIR =CV_DIR /"models"
DATA_DIR.mkdir(parents=True, exist_ok=True) DATA_DIR .mkdir (parents =True ,exist_ok =True )
MODELS_DIR.mkdir(parents=True, exist_ok=True) MODELS_DIR .mkdir (parents =True ,exist_ok =True )
try: try :
from ultralytics import YOLO from ultralytics import YOLO
ULTRALYTICS_AVAILABLE = True ULTRALYTICS_AVAILABLE =True
except ImportError: except ImportError :
ULTRALYTICS_AVAILABLE = False ULTRALYTICS_AVAILABLE =False
YOLO = None YOLO =None
YOLO26_MODELS: Dict[str, str] = { YOLO26_MODELS :Dict [str ,str ]={
"nano": "yolo26n.pt", "nano":"yolo26n.pt",
"small": "yolo26s.pt", "small":"yolo26s.pt",
"medium": "yolo26m.pt", "medium":"yolo26m.pt",
"large": "yolo26l.pt", "large":"yolo26l.pt",
"xlarge": "yolo26x.pt", "xlarge":"yolo26x.pt",
} }
SUPER_CATEGORIES: Dict[str, int] = { SUPER_CATEGORIES :Dict [str ,int ]={
"Food": 932, "Food":932 ,
"Clothes": 604, "Clothes":604 ,
"Necessities": 432, "Necessities":432 ,
"Others": 371, "Others":371 ,
"Electronic": 224, "Electronic":224 ,
"Transportation": 213, "Transportation":213 ,
"Leisure": 111, "Leisure":111 ,
"Sports": 66, "Sports":66 ,
"Medical": 47 "Medical":47
} }
COMMON_BRANDS = [ COMMON_BRANDS =[
"McDonalds", "Starbucks", "CocaCola", "Pepsi", "KFC", "BurgerKing", "McDonalds","Starbucks","CocaCola","Pepsi","KFC","BurgerKing",
"Subway", "DunkinDonuts", "PizzaHut", "Dominos", "Nestle", "Heineken", "Subway","DunkinDonuts","PizzaHut","Dominos","Nestle","Heineken",
"Nike", "Adidas", "Puma", "UnderArmour", "Levis", "HM", "Zara", "Gap", "Nike","Adidas","Puma","UnderArmour","Levis","HM","Zara","Gap",
"Gucci", "LouisVuitton", "Chanel", "Versace", "Prada", "Armani", "Gucci","LouisVuitton","Chanel","Versace","Prada","Armani",
"Apple", "Samsung", "HP", "Dell", "Intel", "AMD", "Nvidia", "Microsoft", "Apple","Samsung","HP","Dell","Intel","AMD","Nvidia","Microsoft",
"Sony", "LG", "Huawei", "Xiaomi", "Lenovo", "Asus", "Acer", "Sony","LG","Huawei","Xiaomi","Lenovo","Asus","Acer",
"BMW", "Mercedes", "Audi", "Toyota", "Honda", "Ford", "Chevrolet", "BMW","Mercedes","Audi","Toyota","Honda","Ford","Chevrolet",
"Volkswagen", "Tesla", "Porsche", "Ferrari", "Lamborghini", "Nissan", "Volkswagen","Tesla","Porsche","Ferrari","Lamborghini","Nissan",
"Google", "Facebook", "Twitter", "Instagram", "YouTube", "Amazon", "Google","Facebook","Twitter","Instagram","YouTube","Amazon",
"Netflix", "Spotify", "Uber", "Airbnb", "PayPal", "Visa", "Mastercard" "Netflix","Spotify","Uber","Airbnb","PayPal","Visa","Mastercard"
] ]
COLORS = { COLORS ={
"high_conf": (0, 255, 0), "high_conf":(0 ,255 ,0 ),
"medium_conf": (0, 255, 255), "medium_conf":(0 ,255 ,255 ),
"low_conf": (0, 165, 255), "low_conf":(0 ,165 ,255 ),
"logo": (255, 0, 255), "logo":(255 ,0 ,255 ),
} }
DEFAULT_CONF_THRESHOLD = 0.25 DEFAULT_CONF_THRESHOLD =0.25
DEFAULT_IOU_THRESHOLD = 0.45 DEFAULT_IOU_THRESHOLD =0.45
DEFAULT_IMG_SIZE = 640 DEFAULT_IMG_SIZE =640

View File

@@ -1,7 +1,7 @@
from .yolo26 import YOLO26Detector from .yolo26 import YOLO26Detector
from .hybrid import HybridLogoDetector from .hybrid import HybridLogoDetector
__all__ = [ __all__ =[
"YOLO26Detector", "YOLO26Detector",
"HybridLogoDetector", "HybridLogoDetector",
] ]

View File

@@ -1,154 +1,154 @@
import cv2 import cv2
import numpy as np import numpy as np
import os import os
from typing import List, Dict, Optional from typing import List ,Dict ,Optional
from ..config import ( from ..config import (
ULTRALYTICS_AVAILABLE, ULTRALYTICS_AVAILABLE ,
MODELS_DIR, MODELS_DIR ,
COLORS, COLORS ,
DEFAULT_CONF_THRESHOLD, DEFAULT_CONF_THRESHOLD ,
) )
from .yolo26 import YOLO26Detector from .yolo26 import YOLO26Detector
if ULTRALYTICS_AVAILABLE: if ULTRALYTICS_AVAILABLE :
from ultralytics import YOLO from ultralytics import YOLO
class HybridLogoDetector: class HybridLogoDetector :
def __init__(self, def __init__ (self ,
coco_model_size: str = "nano", coco_model_size :str ="nano",
logo_model_path: Optional[str] = None, logo_model_path :Optional [str ]=None ,
conf_threshold: float = DEFAULT_CONF_THRESHOLD, conf_threshold :float =DEFAULT_CONF_THRESHOLD ,
device: str = "auto"): device :str ="auto"):
self.conf_threshold = conf_threshold self .conf_threshold =conf_threshold
self.device = device self .device =device
self.coco_detector = None self .coco_detector =None
self.logo_model = None self .logo_model =None
if not ULTRALYTICS_AVAILABLE: if not ULTRALYTICS_AVAILABLE :
raise RuntimeError("Ultralytics not installed. Run: pip install ultralytics") raise RuntimeError ("Ultralytics not installed. Run: pip install ultralytics")
print("Loading YOLO26 COCO base model...") print ("Loading YOLO26 COCO base model...")
self.coco_detector = YOLO26Detector( self .coco_detector =YOLO26Detector (
model_size=coco_model_size, model_size =coco_model_size ,
conf_threshold=conf_threshold, conf_threshold =conf_threshold ,
device=device device =device
) )
if logo_model_path and os.path.exists(logo_model_path): if logo_model_path and os .path .exists (logo_model_path ):
print(f"Loading logo model: {logo_model_path}") print (f"Loading logo model: {logo_model_path }")
self.logo_model = YOLO(logo_model_path) self .logo_model =YOLO (logo_model_path )
print("Logo model loaded!") print ("Logo model loaded!")
else: else :
default_logo_model = MODELS_DIR / "logo_detector.pt" default_logo_model =MODELS_DIR /"logo_detector.pt"
if default_logo_model.exists(): if default_logo_model .exists ():
print(f"Loading default logo model: {default_logo_model}") print (f"Loading default logo model: {default_logo_model }")
self.logo_model = YOLO(str(default_logo_model)) self .logo_model =YOLO (str (default_logo_model ))
print("Logo model loaded!") print ("Logo model loaded!")
else: else :
print("No logo model found.") print ("No logo model found.")
print("Hybrid detector ready!") print ("Hybrid detector ready!")
def detect(self, def detect (self ,
frame: np.ndarray, frame :np .ndarray ,
detect_objects: bool = True, detect_objects :bool =True ,
detect_logos: bool = True, detect_logos :bool =True ,
conf_threshold: Optional[float] = None) -> List[Dict]: conf_threshold :Optional [float ]=None )->List [Dict ]:
conf = conf_threshold if conf_threshold is not None else self.conf_threshold conf =conf_threshold if conf_threshold is not None else self .conf_threshold
all_detections = [] all_detections =[]
if detect_objects and self.coco_detector: if detect_objects and self .coco_detector :
object_detections = self.coco_detector.detect(frame, conf_threshold=conf) object_detections =self .coco_detector .detect (frame ,conf_threshold =conf )
for det in object_detections: for det in object_detections :
det["type"] = "object" det ["type"]="object"
all_detections.extend(object_detections) all_detections .extend (object_detections )
if detect_logos and self.logo_model: if detect_logos and self .logo_model :
logo_detections = self._detect_logos(frame, conf) logo_detections =self ._detect_logos (frame ,conf )
for det in logo_detections: for det in logo_detections :
det["type"] = "logo" det ["type"]="logo"
all_detections.extend(logo_detections) all_detections .extend (logo_detections )
return all_detections return all_detections
def _detect_logos(self, frame: np.ndarray, conf_threshold: float) -> List[Dict]: def _detect_logos (self ,frame :np .ndarray ,conf_threshold :float )->List [Dict ]:
if self.logo_model is None: if self .logo_model is None :
return [] return []
results = self.logo_model( results =self .logo_model (
frame, frame ,
conf=conf_threshold, conf =conf_threshold ,
device=self.device if self.device != "auto" else None, device =self .device if self .device !="auto"else None ,
verbose=False verbose =False
) )
detections = [] detections =[]
for result in results: for result in results :
boxes = result.boxes boxes =result .boxes
if boxes is None: if boxes is None :
continue continue
for i in range(len(boxes)): for i in range (len (boxes )):
xyxy = boxes.xyxy[i].cpu().numpy() xyxy =boxes .xyxy [i ].cpu ().numpy ()
x1, y1, x2, y2 = map(int, xyxy) x1 ,y1 ,x2 ,y2 =map (int ,xyxy )
conf_val = float(boxes.conf[i].cpu().numpy()) conf_val =float (boxes .conf [i ].cpu ().numpy ())
class_id = int(boxes.cls[i].cpu().numpy()) class_id =int (boxes .cls [i ].cpu ().numpy ())
label = self.logo_model.names[class_id] label =self .logo_model .names [class_id ]
detections.append({ detections .append ({
"bbox": (x1, y1, x2, y2), "bbox":(x1 ,y1 ,x2 ,y2 ),
"label": label, "label":label ,
"confidence": conf_val, "confidence":conf_val ,
"class_id": class_id, "class_id":class_id ,
"brand": label "brand":label
}) })
return detections return detections
def draw_detections(self, def draw_detections (self ,
frame: np.ndarray, frame :np .ndarray ,
detections: List[Dict], detections :List [Dict ],
show_labels: bool = True) -> np.ndarray: show_labels :bool =True )->np .ndarray :
result = frame.copy() result =frame .copy ()
for det in detections: for det in detections :
x1, y1, x2, y2 = det["bbox"] x1 ,y1 ,x2 ,y2 =det ["bbox"]
label = det["label"] label =det ["label"]
conf = det["confidence"] conf =det ["confidence"]
det_type = det.get("type", "object") det_type =det .get ("type","object")
if det_type == "logo": if det_type =="logo":
color = COLORS["logo"] color =COLORS ["logo"]
elif conf > 0.7: elif conf >0.7 :
color = COLORS["high_conf"] color =COLORS ["high_conf"]
elif conf > 0.5: elif conf >0.5 :
color = COLORS["medium_conf"] color =COLORS ["medium_conf"]
else: else :
color = COLORS["low_conf"] color =COLORS ["low_conf"]
cv2.rectangle(result, (x1, y1), (x2, y2), color, 2) cv2 .rectangle (result ,(x1 ,y1 ),(x2 ,y2 ),color ,2 )
if show_labels: if show_labels :
label_text = f"{label}: {conf:.2f}" label_text =f"{label }: {conf :.2f}"
(text_w, text_h), _ = cv2.getTextSize( (text_w ,text_h ),_ =cv2 .getTextSize (
label_text, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1 label_text ,cv2 .FONT_HERSHEY_SIMPLEX ,0.5 ,1
) )
cv2.rectangle( cv2 .rectangle (
result, result ,
(x1, y1 - text_h - 8), (x1 ,y1 -text_h -8 ),
(x1 + text_w + 4, y1), (x1 +text_w +4 ,y1 ),
color, color ,
-1 -1
) )
cv2.putText( cv2 .putText (
result, result ,
label_text, label_text ,
(x1 + 2, y1 - 4), (x1 +2 ,y1 -4 ),
cv2.FONT_HERSHEY_SIMPLEX, cv2 .FONT_HERSHEY_SIMPLEX ,
0.5, 0.5 ,
(255, 255, 255) if det_type == "logo" else (0, 0, 0), (255 ,255 ,255 )if det_type =="logo"else (0 ,0 ,0 ),
1 1
) )
return result return result

View File

@@ -1,186 +1,186 @@
import cv2 import cv2
import numpy as np import numpy as np
import os import os
from typing import List, Dict, Optional from typing import List ,Dict ,Optional
from ..config import ( from ..config import (
ULTRALYTICS_AVAILABLE, ULTRALYTICS_AVAILABLE ,
YOLO26_MODELS, YOLO26_MODELS ,
COLORS, COLORS ,
DEFAULT_CONF_THRESHOLD, DEFAULT_CONF_THRESHOLD ,
DEFAULT_IOU_THRESHOLD, DEFAULT_IOU_THRESHOLD ,
) )
if ULTRALYTICS_AVAILABLE: if ULTRALYTICS_AVAILABLE :
from ultralytics import YOLO from ultralytics import YOLO
class YOLO26Detector: class YOLO26Detector :
def __init__(self, def __init__ (self ,
model_size: str = "nano", model_size :str ="nano",
model_path: Optional[str] = None, model_path :Optional [str ]=None ,
conf_threshold: float = DEFAULT_CONF_THRESHOLD, conf_threshold :float =DEFAULT_CONF_THRESHOLD ,
iou_threshold: float = DEFAULT_IOU_THRESHOLD, iou_threshold :float =DEFAULT_IOU_THRESHOLD ,
device: str = "auto"): device :str ="auto"):
self.conf_threshold = conf_threshold self .conf_threshold =conf_threshold
self.iou_threshold = iou_threshold self .iou_threshold =iou_threshold
self.device = device self .device =device
self.model = None self .model =None
if not ULTRALYTICS_AVAILABLE: if not ULTRALYTICS_AVAILABLE :
raise RuntimeError("Ultralytics not installed. Run: pip install ultralytics") raise RuntimeError ("Ultralytics not installed. Run: pip install ultralytics")
if model_path and os.path.exists(model_path): if model_path and os .path .exists (model_path ):
model_name = model_path model_name =model_path
elif model_size in YOLO26_MODELS: elif model_size in YOLO26_MODELS :
model_name = YOLO26_MODELS[model_size] model_name =YOLO26_MODELS [model_size ]
else: else :
print(f"Unknown model size '{model_size}', defaulting to 'nano'") print (f"Unknown model size '{model_size }', defaulting to 'nano'")
model_name = YOLO26_MODELS["nano"] model_name =YOLO26_MODELS ["nano"]
print(f"Loading YOLO26 model: {model_name}") print (f"Loading YOLO26 model: {model_name }")
self.model = YOLO(model_name) self .model =YOLO (model_name )
print(f"YOLO26 model loaded successfully!") print (f"YOLO26 model loaded successfully!")
print(f"Classes: {len(self.model.names)} | Device: {device}") print (f"Classes: {len (self .model .names )} | Device: {device }")
def detect(self, def detect (self ,
frame: np.ndarray, frame :np .ndarray ,
conf_threshold: Optional[float] = None, conf_threshold :Optional [float ]=None ,
classes: Optional[List[int]] = None) -> List[Dict]: classes :Optional [List [int ]]=None )->List [Dict ]:
if self.model is None: if self .model is None :
return [] return []
conf = conf_threshold if conf_threshold is not None else self.conf_threshold conf =conf_threshold if conf_threshold is not None else self .conf_threshold
results = self.model( results =self .model (
frame, frame ,
conf=conf, conf =conf ,
iou=self.iou_threshold, iou =self .iou_threshold ,
device=self.device if self.device != "auto" else None, device =self .device if self .device !="auto"else None ,
classes=classes, classes =classes ,
verbose=False verbose =False
) )
detections = [] detections =[]
for result in results: for result in results :
boxes = result.boxes boxes =result .boxes
if boxes is None: if boxes is None :
continue continue
for i in range(len(boxes)): for i in range (len (boxes )):
xyxy = boxes.xyxy[i].cpu().numpy() xyxy =boxes .xyxy [i ].cpu ().numpy ()
x1, y1, x2, y2 = map(int, xyxy) x1 ,y1 ,x2 ,y2 =map (int ,xyxy )
conf_val = float(boxes.conf[i].cpu().numpy()) conf_val =float (boxes .conf [i ].cpu ().numpy ())
class_id = int(boxes.cls[i].cpu().numpy()) class_id =int (boxes .cls [i ].cpu ().numpy ())
label = self.model.names[class_id] label =self .model .names [class_id ]
detections.append({ detections .append ({
"bbox": (x1, y1, x2, y2), "bbox":(x1 ,y1 ,x2 ,y2 ),
"label": label, "label":label ,
"confidence": conf_val, "confidence":conf_val ,
"class_id": class_id "class_id":class_id
}) })
return detections return detections
def detect_and_track(self, def detect_and_track (self ,
frame: np.ndarray, frame :np .ndarray ,
conf_threshold: Optional[float] = None, conf_threshold :Optional [float ]=None ,
tracker: str = "bytetrack.yaml") -> List[Dict]: tracker :str ="bytetrack.yaml")->List [Dict ]:
if self.model is None: if self .model is None :
return [] return []
conf = conf_threshold if conf_threshold is not None else self.conf_threshold conf =conf_threshold if conf_threshold is not None else self .conf_threshold
results = self.model.track( results =self .model .track (
frame, frame ,
conf=conf, conf =conf ,
iou=self.iou_threshold, iou =self .iou_threshold ,
device=self.device if self.device != "auto" else None, device =self .device if self .device !="auto"else None ,
tracker=tracker, tracker =tracker ,
persist=True, persist =True ,
verbose=False verbose =False
) )
detections = [] detections =[]
for result in results: for result in results :
boxes = result.boxes boxes =result .boxes
if boxes is None: if boxes is None :
continue continue
for i in range(len(boxes)): for i in range (len (boxes )):
xyxy = boxes.xyxy[i].cpu().numpy() xyxy =boxes .xyxy [i ].cpu ().numpy ()
x1, y1, x2, y2 = map(int, xyxy) x1 ,y1 ,x2 ,y2 =map (int ,xyxy )
conf_val = float(boxes.conf[i].cpu().numpy()) conf_val =float (boxes .conf [i ].cpu ().numpy ())
class_id = int(boxes.cls[i].cpu().numpy()) class_id =int (boxes .cls [i ].cpu ().numpy ())
label = self.model.names[class_id] label =self .model .names [class_id ]
track_id = None track_id =None
if boxes.id is not None: if boxes .id is not None :
track_id = int(boxes.id[i].cpu().numpy()) track_id =int (boxes .id [i ].cpu ().numpy ())
detections.append({ detections .append ({
"bbox": (x1, y1, x2, y2), "bbox":(x1 ,y1 ,x2 ,y2 ),
"label": label, "label":label ,
"confidence": conf_val, "confidence":conf_val ,
"class_id": class_id, "class_id":class_id ,
"track_id": track_id "track_id":track_id
}) })
return detections return detections
def draw_detections(self, def draw_detections (self ,
frame: np.ndarray, frame :np .ndarray ,
detections: List[Dict], detections :List [Dict ],
show_labels: bool = True, show_labels :bool =True ,
show_conf: bool = True) -> np.ndarray: show_conf :bool =True )->np .ndarray :
result = frame.copy() result =frame .copy ()
for det in detections: for det in detections :
x1, y1, x2, y2 = det["bbox"] x1 ,y1 ,x2 ,y2 =det ["bbox"]
label = det["label"] label =det ["label"]
conf = det["confidence"] conf =det ["confidence"]
track_id = det.get("track_id") track_id =det .get ("track_id")
if conf > 0.7: if conf >0.7 :
color = COLORS["high_conf"] color =COLORS ["high_conf"]
elif conf > 0.5: elif conf >0.5 :
color = COLORS["medium_conf"] color =COLORS ["medium_conf"]
else: else :
color = COLORS["low_conf"] color =COLORS ["low_conf"]
cv2.rectangle(result, (x1, y1), (x2, y2), color, 2) cv2 .rectangle (result ,(x1 ,y1 ),(x2 ,y2 ),color ,2 )
if show_labels: if show_labels :
label_parts = [label] label_parts =[label ]
if track_id is not None: if track_id is not None :
label_parts.append(f"ID:{track_id}") label_parts .append (f"ID:{track_id }")
if show_conf: if show_conf :
label_parts.append(f"{conf:.2f}") label_parts .append (f"{conf :.2f}")
label_text = " | ".join(label_parts) label_text =" | ".join (label_parts )
(text_w, text_h), baseline = cv2.getTextSize( (text_w ,text_h ),baseline =cv2 .getTextSize (
label_text, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1 label_text ,cv2 .FONT_HERSHEY_SIMPLEX ,0.5 ,1
) )
cv2.rectangle( cv2 .rectangle (
result, result ,
(x1, y1 - text_h - 8), (x1 ,y1 -text_h -8 ),
(x1 + text_w + 4, y1), (x1 +text_w +4 ,y1 ),
color, color ,
-1 -1
) )
cv2.putText( cv2 .putText (
result, result ,
label_text, label_text ,
(x1 + 2, y1 - 4), (x1 +2 ,y1 -4 ),
cv2.FONT_HERSHEY_SIMPLEX, cv2 .FONT_HERSHEY_SIMPLEX ,
0.5, 0.5 ,
(0, 0, 0), (0 ,0 ,0 ),
1 1
) )
return result return result
def get_class_names(self) -> Dict[int, str]: def get_class_names (self )->Dict [int ,str ]:
return self.model.names if self.model else {} return self .model .names if self .model else {}

View File

@@ -3,195 +3,195 @@ import json
import numpy as np import numpy as np
from datetime import datetime from datetime import datetime
from pathlib import Path from pathlib import Path
from typing import Dict, Optional from typing import Dict ,Optional
from ..ollama.detector import OllamaLogoDetector from ..ollama .detector import OllamaLogoDetector
def capture_and_analyze(model: str = "ministral-3:latest", def capture_and_analyze (model :str ="ministral-3:latest",
save_image: bool = True, save_image :bool =True ,
output_dir: Optional[str] = None) -> Dict: output_dir :Optional [str ]=None )->Dict :
cap = cv2.VideoCapture(0) cap =cv2 .VideoCapture (0 )
if not cap.isOpened(): if not cap .isOpened ():
raise RuntimeError("Could not access camera") raise RuntimeError ("Could not access camera")
print("Camera ready. Press SPACE to capture, Q to quit.") print ("Camera ready. Press SPACE to capture, Q to quit.")
result = None result =None
while True: while True :
ret, frame = cap.read() ret ,frame =cap .read ()
if not ret: if not ret :
break break
display = frame.copy() display =frame .copy ()
cv2.putText(display, "Press SPACE to capture | Q to quit", cv2 .putText (display ,"Press SPACE to capture | Q to quit",
(10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2) (10 ,30 ),cv2 .FONT_HERSHEY_SIMPLEX ,0.7 ,(0 ,255 ,0 ),2 )
cv2.imshow("Capture", display) cv2 .imshow ("Capture",display )
key = cv2.waitKey(1) & 0xFF key =cv2 .waitKey (1 )&0xFF
if key == ord(' '): if key ==ord (' '):
print("Analyzing image...") print ("Analyzing image...")
if save_image: if save_image :
if output_dir is None: if output_dir is None :
output_dir = "./captures" output_dir ="./captures"
Path(output_dir).mkdir(parents=True, exist_ok=True) Path (output_dir ).mkdir (parents =True ,exist_ok =True )
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") timestamp =datetime .now ().strftime ("%Y%m%d_%H%M%S")
img_path = Path(output_dir) / f"capture_{timestamp}.jpg" img_path =Path (output_dir )/f"capture_{timestamp }.jpg"
cv2.imwrite(str(img_path), frame) cv2 .imwrite (str (img_path ),frame )
print(f"Saved: {img_path}") print (f"Saved: {img_path }")
detector = OllamaLogoDetector(model=model) detector =OllamaLogoDetector (model =model )
result = detector.detect_from_numpy(frame) result =detector .detect_from_numpy (frame )
_display_results(result) _display_results (result )
break break
elif key == ord('q'): elif key ==ord ('q'):
break break
cap.release() cap .release ()
cv2.destroyAllWindows() cv2 .destroyAllWindows ()
return result if result else {"logos_detected": [], "total_count": 0} return result if result else {"logos_detected":[],"total_count":0 }
def start_interactive_capture(model: str = "ministral-3:latest", def start_interactive_capture (model :str ="ministral-3:latest",
save_images: bool = True, save_images :bool =True ,
output_dir: Optional[str] = None): output_dir :Optional [str ]=None ):
cap = cv2.VideoCapture(0) cap =cv2 .VideoCapture (0 )
if not cap.isOpened(): if not cap .isOpened ():
raise RuntimeError("Could not access camera") raise RuntimeError ("Could not access camera")
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) width =int (cap .get (cv2 .CAP_PROP_FRAME_WIDTH ))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) height =int (cap .get (cv2 .CAP_PROP_FRAME_HEIGHT ))
print("=" * 60) print ("="*60 )
print("Ollama Logo Detection - Interactive Mode") print ("Ollama Logo Detection - Interactive Mode")
print("=" * 60) print ("="*60 )
print(f"Camera: {width}x{height}") print (f"Camera: {width }x{height }")
print(f"Model: {model}") print (f"Model: {model }")
print("\nControls:") print ("\nControls:")
print(" SPACE - Capture and analyze") print (" SPACE - Capture and analyze")
print(" S - Save frame only") print (" S - Save frame only")
print(" R - Show last results") print (" R - Show last results")
print(" Q - Quit") print (" Q - Quit")
print("=" * 60) print ("="*60 )
detector = OllamaLogoDetector(model=model) detector =OllamaLogoDetector (model =model )
last_result = None last_result =None
analyzing = False analyzing =False
status_message = "Ready - Press SPACE to capture" status_message ="Ready - Press SPACE to capture"
if output_dir is None: if output_dir is None :
output_dir = "./captures" output_dir ="./captures"
Path(output_dir).mkdir(parents=True, exist_ok=True) Path (output_dir ).mkdir (parents =True ,exist_ok =True )
while True: while True :
ret, frame = cap.read() ret ,frame =cap .read ()
if not ret: if not ret :
break break
display = frame.copy() display =frame .copy ()
cv2.rectangle(display, (0, 0), (width, 40), (40, 40, 40), -1) cv2 .rectangle (display ,(0 ,0 ),(width ,40 ),(40 ,40 ,40 ),-1 )
cv2.putText(display, status_message, (10, 28), cv2 .putText (display ,status_message ,(10 ,28 ),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2) cv2 .FONT_HERSHEY_SIMPLEX ,0.7 ,(0 ,255 ,0 ),2 )
if last_result and last_result.get("logos_detected"): if last_result and last_result .get ("logos_detected"):
brands = [l.get("brand", "?") for l in last_result["logos_detected"]] brands =[l .get ("brand","?")for l in last_result ["logos_detected"]]
brand_text = f"Detected: {', '.join(brands[:3])}" brand_text =f"Detected: {', '.join (brands [:3 ])}"
if len(brands) > 3: if len (brands )>3 :
brand_text += f" +{len(brands)-3} more" brand_text +=f" +{len (brands )-3 } more"
cv2.rectangle(display, (0, height-35), (width, height), (40, 40, 40), -1) cv2 .rectangle (display ,(0 ,height -35 ),(width ,height ),(40 ,40 ,40 ),-1 )
cv2.putText(display, brand_text, (10, height-10), cv2 .putText (display ,brand_text ,(10 ,height -10 ),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 0), 2) cv2 .FONT_HERSHEY_SIMPLEX ,0.6 ,(255 ,255 ,0 ),2 )
cv2.imshow("Ollama Logo Detection", display) cv2 .imshow ("Ollama Logo Detection",display )
key = cv2.waitKey(1) & 0xFF key =cv2 .waitKey (1 )&0xFF
if key == ord(' ') and not analyzing: if key ==ord (' ')and not analyzing :
analyzing = True analyzing =True
status_message = "Analyzing with Ollama..." status_message ="Analyzing with Ollama..."
cv2.imshow("Ollama Logo Detection", display) cv2 .imshow ("Ollama Logo Detection",display )
cv2.waitKey(1) cv2 .waitKey (1 )
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") timestamp =datetime .now ().strftime ("%Y%m%d_%H%M%S")
img_path = Path(output_dir) / f"capture_{timestamp}.jpg" img_path =Path (output_dir )/f"capture_{timestamp }.jpg"
if save_images: if save_images :
cv2.imwrite(str(img_path), frame) cv2 .imwrite (str (img_path ),frame )
last_result = detector.detect_from_numpy(frame) last_result =detector .detect_from_numpy (frame )
json_path = Path(output_dir) / f"result_{timestamp}.json" json_path =Path (output_dir )/f"result_{timestamp }.json"
with open(json_path, 'w') as f: with open (json_path ,'w')as f :
json.dump(last_result, f, indent=2) json .dump (last_result ,f ,indent =2 )
count = last_result.get("total_count", 0) count =last_result .get ("total_count",0 )
if count > 0: if count >0 :
status_message = f"Found {count} logo(s)! Press R for details" status_message =f"Found {count } logo(s)! Press R for details"
else: else :
status_message = "No logos detected. Try again!" status_message ="No logos detected. Try again!"
print(f"\nCaptured: {img_path}") print (f"\nCaptured: {img_path }")
print(f"Results: {json_path}") print (f"Results: {json_path }")
_display_results(last_result) _display_results (last_result )
analyzing = False analyzing =False
elif key == ord('s'): elif key ==ord ('s'):
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") timestamp =datetime .now ().strftime ("%Y%m%d_%H%M%S")
img_path = Path(output_dir) / f"capture_{timestamp}.jpg" img_path =Path (output_dir )/f"capture_{timestamp }.jpg"
cv2.imwrite(str(img_path), frame) cv2 .imwrite (str (img_path ),frame )
status_message = f"Saved: {img_path.name}" status_message =f"Saved: {img_path .name }"
print(f"Saved: {img_path}") print (f"Saved: {img_path }")
elif key == ord('r') and last_result: elif key ==ord ('r')and last_result :
print("\n" + "=" * 40) print ("\n"+"="*40 )
print("Last Detection Results:") print ("Last Detection Results:")
print("=" * 40) print ("="*40 )
_display_results(last_result) _display_results (last_result )
elif key == ord('q'): elif key ==ord ('q'):
break break
cap.release() cap .release ()
cv2.destroyAllWindows() cv2 .destroyAllWindows ()
print("\nGoodbye!") print ("\nGoodbye!")
return last_result return last_result
def _display_results(result: Dict): def _display_results (result :Dict ):
print("\n" + "-" * 40) print ("\n"+"-"*40 )
logos = result.get("logos_detected", []) logos =result .get ("logos_detected",[])
count = result.get("total_count", len(logos)) count =result .get ("total_count",len (logos ))
if count == 0: if count ==0 :
print("No logos or brands detected") print ("No logos or brands detected")
if "description" in result: if "description"in result :
print(f"Description: {result['description']}") print (f"Description: {result ['description']}")
else: else :
print(f"Detected {count} logo(s)/brand(s):\n") print (f"Detected {count } logo(s)/brand(s):\n")
for i, logo in enumerate(logos, 1): for i ,logo in enumerate (logos ,1 ):
brand = logo.get("brand", "Unknown") brand =logo .get ("brand","Unknown")
conf = logo.get("confidence", "unknown") conf =logo .get ("confidence","unknown")
loc = logo.get("location", "unknown") loc =logo .get ("location","unknown")
cat = logo.get("category", "") cat =logo .get ("category","")
print(f" {i}. {brand}") print (f" {i }. {brand }")
print(f" Confidence: {conf}") print (f" Confidence: {conf }")
print(f" Location: {loc}") print (f" Location: {loc }")
if cat: if cat :
print(f" Category: {cat}") print (f" Category: {cat }")
print() print ()
if "error" in result: if "error"in result :
print(f"Error: {result['error']}") print (f"Error: {result ['error']}")
print("-" * 40) print ("-"*40 )
print("\nJSON Output:") print ("\nJSON Output:")
print(json.dumps(result, indent=2)) print (json .dumps (result ,indent =2 ))

View File

@@ -1,28 +1,28 @@
from .config import ( from .config import (
CV_DIR, CV_DIR ,
DATA_DIR, DATA_DIR ,
MODELS_DIR, MODELS_DIR ,
ULTRALYTICS_AVAILABLE, ULTRALYTICS_AVAILABLE ,
YOLO26_MODELS, YOLO26_MODELS ,
SUPER_CATEGORIES, SUPER_CATEGORIES ,
COMMON_BRANDS, COMMON_BRANDS ,
COLORS, COLORS ,
DEFAULT_CONF_THRESHOLD, DEFAULT_CONF_THRESHOLD ,
DEFAULT_IOU_THRESHOLD, DEFAULT_IOU_THRESHOLD ,
DEFAULT_IMG_SIZE, DEFAULT_IMG_SIZE ,
) )
from .detectors import ( from .detectors import (
YOLO26Detector, YOLO26Detector ,
HybridLogoDetector, HybridLogoDetector ,
) )
from .yolo_scanner import ( from .yolo_scanner import (
start_scanner as start_yolo_scanner, start_scanner as start_yolo_scanner ,
detect_objects as detect_yolo_objects, detect_objects as detect_yolo_objects ,
) )
from .scanner import ( from .scanner import (
start_interactive_capture as start_ollama_scanner, start_interactive_capture as start_ollama_scanner ,
) )
if __name__ == "__main__": if __name__ =="__main__":
from .cli import main from .cli import main
main() main ()

View File

@@ -1,166 +1,166 @@
import cv2 import cv2
from pathlib import Path from pathlib import Path
from typing import List, Dict, Optional from typing import List ,Dict ,Optional
from .config import ( from .config import (
CV_DIR, CV_DIR ,
ULTRALYTICS_AVAILABLE, ULTRALYTICS_AVAILABLE ,
) )
from .detectors import YOLO26Detector, HybridLogoDetector from .detectors import YOLO26Detector ,HybridLogoDetector
def start_scanner(model_path: Optional[str] = None, def start_scanner (model_path :Optional [str ]=None ,
model_size: str = "nano", model_size :str ="nano",
logo_model_path: Optional[str] = None, logo_model_path :Optional [str ]=None ,
use_gui: bool = True, use_gui :bool =True ,
use_tracking: bool = False, use_tracking :bool =False ,
hybrid_mode: bool = False): hybrid_mode :bool =False ):
print("=" * 60) print ("="*60 )
if hybrid_mode: if hybrid_mode :
print("YOLO26 Hybrid Scanner (COCO + Logos)") print ("YOLO26 Hybrid Scanner (COCO + Logos)")
else: else :
print("YOLO26 Object Detection Scanner") print ("YOLO26 Object Detection Scanner")
print("=" * 60) print ("="*60 )
detector = None detector =None
if hybrid_mode and ULTRALYTICS_AVAILABLE: if hybrid_mode and ULTRALYTICS_AVAILABLE :
try: try :
detector = HybridLogoDetector( detector =HybridLogoDetector (
coco_model_size=model_size, coco_model_size =model_size ,
logo_model_path=logo_model_path, logo_model_path =logo_model_path ,
conf_threshold=0.25, conf_threshold =0.25 ,
device="auto" device ="auto"
) )
except Exception as e: except Exception as e :
print(f"Hybrid detector failed: {e}") print (f"Hybrid detector failed: {e }")
hybrid_mode = False hybrid_mode =False
if detector is None and ULTRALYTICS_AVAILABLE: if detector is None and ULTRALYTICS_AVAILABLE :
try: try :
detector = YOLO26Detector( detector =YOLO26Detector (
model_size=model_size, model_size =model_size ,
model_path=model_path, model_path =model_path ,
conf_threshold=0.25, conf_threshold =0.25 ,
device="auto" device ="auto"
) )
except Exception as e: except Exception as e :
print(f"YOLO26 failed: {e}") print (f"YOLO26 failed: {e }")
if detector is None: if detector is None :
print("Error: No detector available.") print ("Error: No detector available.")
return return
cap = cv2.VideoCapture(0) cap =cv2 .VideoCapture (0 )
if not cap.isOpened(): if not cap .isOpened ():
print("Error: Could not access camera.") print ("Error: Could not access camera.")
return return
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) width =int (cap .get (cv2 .CAP_PROP_FRAME_WIDTH ))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) height =int (cap .get (cv2 .CAP_PROP_FRAME_HEIGHT ))
fps = cap.get(cv2.CAP_PROP_FPS) or 30.0 fps =cap .get (cv2 .CAP_PROP_FPS )or 30.0
writer = None writer =None
output_path = CV_DIR / "output.mp4" output_path =CV_DIR /"output.mp4"
print(f"Camera: {width}x{height} @ {fps:.1f}fps") print (f"Camera: {width }x{height } @ {fps :.1f}fps")
print("Controls: q=quit | s=screenshot | t=tracking") print ("Controls: q=quit | s=screenshot | t=tracking")
if hybrid_mode: if hybrid_mode :
print(" o=objects | l=logos | b=both") print (" o=objects | l=logos | b=both")
frame_count = 0 frame_count =0
detect_objects_flag = True detect_objects_flag =True
detect_logos_flag = True detect_logos_flag =True
try: try :
while True: while True :
ret, frame = cap.read() ret ,frame =cap .read ()
if not ret: if not ret :
break break
frame_count += 1 frame_count +=1
if hybrid_mode and isinstance(detector, HybridLogoDetector): if hybrid_mode and isinstance (detector ,HybridLogoDetector ):
detections = detector.detect( detections =detector .detect (
frame, frame ,
detect_objects=detect_objects_flag, detect_objects =detect_objects_flag ,
detect_logos=detect_logos_flag detect_logos =detect_logos_flag
) )
elif use_tracking and isinstance(detector, YOLO26Detector): elif use_tracking and isinstance (detector ,YOLO26Detector ):
detections = detector.detect_and_track(frame) detections =detector .detect_and_track (frame )
else: else :
detections = detector.detect(frame) detections =detector .detect (frame )
result_frame = detector.draw_detections(frame, detections) result_frame =detector .draw_detections (frame ,detections )
mode_str = "HYBRID" if hybrid_mode else ("TRACK" if use_tracking else "DETECT") mode_str ="HYBRID"if hybrid_mode else ("TRACK"if use_tracking else "DETECT")
cv2.putText(result_frame, f"{mode_str} | {len(detections)} objects", cv2 .putText (result_frame ,f"{mode_str } | {len (detections )} objects",
(10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2) (10 ,30 ),cv2 .FONT_HERSHEY_SIMPLEX ,0.6 ,(0 ,255 ,0 ),2 )
if use_gui: if use_gui :
try: try :
cv2.imshow('YOLO26 Scanner', result_frame) cv2 .imshow ('YOLO26 Scanner',result_frame )
key = cv2.waitKey(1) & 0xFF key =cv2 .waitKey (1 )&0xFF
if key == ord('q'): if key ==ord ('q'):
break break
elif key == ord('s'): elif key ==ord ('s'):
path = CV_DIR / f"screenshot_{frame_count}.jpg" path =CV_DIR /f"screenshot_{frame_count }.jpg"
cv2.imwrite(str(path), result_frame) cv2 .imwrite (str (path ),result_frame )
print(f"Saved: {path}") print (f"Saved: {path }")
elif key == ord('t') and isinstance(detector, YOLO26Detector): elif key ==ord ('t')and isinstance (detector ,YOLO26Detector ):
use_tracking = not use_tracking use_tracking =not use_tracking
elif key == ord('o') and hybrid_mode: elif key ==ord ('o')and hybrid_mode :
detect_objects_flag, detect_logos_flag = True, False detect_objects_flag ,detect_logos_flag =True ,False
elif key == ord('l') and hybrid_mode: elif key ==ord ('l')and hybrid_mode :
detect_objects_flag, detect_logos_flag = False, True detect_objects_flag ,detect_logos_flag =False ,True
elif key == ord('b') and hybrid_mode: elif key ==ord ('b')and hybrid_mode :
detect_objects_flag, detect_logos_flag = True, True detect_objects_flag ,detect_logos_flag =True ,True
except cv2.error: except cv2 .error :
use_gui = False use_gui =False
writer = cv2.VideoWriter( writer =cv2 .VideoWriter (
str(output_path), cv2.VideoWriter_fourcc(*'mp4v'), str (output_path ),cv2 .VideoWriter_fourcc (*'mp4v'),
fps, (width, height) fps ,(width ,height )
) )
if not use_gui and writer: if not use_gui and writer :
writer.write(result_frame) writer .write (result_frame )
except KeyboardInterrupt: except KeyboardInterrupt :
print("Stopping...") print ("Stopping...")
finally: finally :
cap.release() cap .release ()
if writer: if writer :
writer.release() writer .release ()
cv2.destroyAllWindows() cv2 .destroyAllWindows ()
def detect_objects(image_path: str, def detect_objects (image_path :str ,
model_size: str = "nano", model_size :str ="nano",
conf_threshold: float = 0.25, conf_threshold :float =0.25 ,
save_output: bool = True, save_output :bool =True ,
hybrid_mode: bool = False) -> List[Dict]: hybrid_mode :bool =False )->List [Dict ]:
if not ULTRALYTICS_AVAILABLE: if not ULTRALYTICS_AVAILABLE :
raise RuntimeError("Ultralytics not installed") raise RuntimeError ("Ultralytics not installed")
if hybrid_mode: if hybrid_mode :
detector = HybridLogoDetector( detector =HybridLogoDetector (
coco_model_size=model_size, coco_model_size =model_size ,
conf_threshold=conf_threshold conf_threshold =conf_threshold
) )
else: else :
detector = YOLO26Detector( detector =YOLO26Detector (
model_size=model_size, model_size =model_size ,
conf_threshold=conf_threshold conf_threshold =conf_threshold
) )
image = cv2.imread(image_path) image =cv2 .imread (image_path )
if image is None: if image is None :
raise ValueError(f"Could not load: {image_path}") raise ValueError (f"Could not load: {image_path }")
detections = detector.detect(image) detections =detector .detect (image )
if save_output: if save_output :
result = detector.draw_detections(image, detections) result =detector .draw_detections (image ,detections )
output = Path(image_path).stem + "_detected.jpg" output =Path (image_path ).stem +"_detected.jpg"
cv2.imwrite(output, result) cv2 .imwrite (output ,result )
print(f"Saved: {output}") print (f"Saved: {output }")
return detections return detections

View File

@@ -1,9 +1,9 @@
import os import os
from google import genai from google import genai
from src.chroma.vector_store import search_documents from src .chroma .vector_store import search_documents
from src.rag.embeddings import get_embedding from src .rag .embeddings import get_embedding
GREENWASHING_ANALYSIS_PROMPT = """ GREENWASHING_ANALYSIS_PROMPT ="""
You are an expert Environmental, Social, and Governance (ESG) Analyst specialized in detecting 'Greenwashing'. You are an expert Environmental, Social, and Governance (ESG) Analyst specialized in detecting 'Greenwashing'.
Your task is to analyze the provided context from a company's data reports and determine if they are engaging in greenwashing. Your task is to analyze the provided context from a company's data reports and determine if they are engaging in greenwashing.
@@ -21,123 +21,123 @@ Based on the context provided, give a final verdict:
- EVIDENCE: [Quote specific parts of the context if possible] - EVIDENCE: [Quote specific parts of the context if possible]
""" """
def ask(prompt): def ask (prompt ):
client = genai.Client(api_key=os.environ.get("GOOGLE_API_KEY")) client =genai .Client (api_key =os .environ .get ("GOOGLE_API_KEY"))
return client.models.generate_content(model="gemini-3-flash-preview", contents=prompt).text return client .models .generate_content (model ="gemini-3-pro-preview",contents =prompt ).text
def ask_gemini_with_rag(prompt, category=None): def ask_gemini_with_rag (prompt ,category =None ):
"""Ask Gemini with RAG context from the vector database.""" """Ask Gemini with RAG context from the vector database."""
# Get embedding for the prompt
query_embedding = get_embedding(prompt)
# Search for relevant documents query_embedding =get_embedding (prompt )
results = search_documents(query_embedding, num_results=5)
# Build context from results
context = ""
for res in results:
context += f"--- Document ---\n{res['text']}\n\n"
# Create full prompt with context results =search_documents (query_embedding ,num_results =5 )
full_prompt = f"""You are a helpful sustainability assistant. Use the following context to answer the user's question.
context =""
for res in results :
context +=f"--- Document ---\n{res ['text']}\n\n"
full_prompt =f"""You are a helpful sustainability assistant. Use the following context to answer the user's question.
If the context doesn't contain relevant information, you can use your general knowledge but mention that. If the context doesn't contain relevant information, you can use your general knowledge but mention that.
CONTEXT: CONTEXT:
{context} {context }
USER QUESTION: {prompt} USER QUESTION: {prompt }
Please provide a helpful and concise response.""" Please provide a helpful and concise response."""
return ask(full_prompt) return ask (full_prompt )
def analyze(query, query_embedding, num_results=5, num_alternatives=3): def analyze (query ,query_embedding ,num_results =5 ,num_alternatives =3 ):
try: try :
results = search_documents(query_embedding, num_results=num_results + num_alternatives + 5) results =search_documents (query_embedding ,num_results =num_results +num_alternatives +5 )
except Exception as e: except Exception as e :
print(f"Chroma error: {e}") print (f"Chroma error: {e }")
results = [] results =[]
if not results: if not results :
context = "No data found in database for this brand." context ="No data found in database for this brand."
else: else :
context = "--- START OF REPORT CONTEXT ---\n" context ="--- START OF REPORT CONTEXT ---\n"
for res in results[:num_results]: for res in results [:num_results ]:
context += f"RELEVANT DATA CHUNK: {res['text']}\n\n" context +=f"RELEVANT DATA CHUNK: {res ['text']}\n\n"
context += "--- END OF REPORT CONTEXT ---\n" context +="--- END OF REPORT CONTEXT ---\n"
full_prompt = f"{GREENWASHING_ANALYSIS_PROMPT}\n\n{context}\n\nUSER QUERY/COMPANY FOCUS: {query}" full_prompt =f"{GREENWASHING_ANALYSIS_PROMPT }\n\n{context }\n\nUSER QUERY/COMPANY FOCUS: {query }"
analysis_text = ask(full_prompt) analysis_text =ask (full_prompt )
alternatives = [] alternatives =[]
seen_texts = set() seen_texts =set ()
for res in results[num_results:]: for res in results [num_results :]:
text_preview = res['text'][:200] text_preview =res ['text'][:200 ]
if text_preview not in seen_texts: if text_preview not in seen_texts :
seen_texts.add(text_preview) seen_texts .add (text_preview )
alternatives.append({"text": res['text'], "score": res.get('score'), "summary": text_preview}) alternatives .append ({"text":res ['text'],"score":res .get ('score'),"summary":text_preview })
if len(alternatives) >= num_alternatives: if len (alternatives )>=num_alternatives :
break break
return {"analysis": analysis_text, "alternatives": alternatives} return {"analysis":analysis_text ,"alternatives":alternatives }
def ask_gemini_with_rag(query, category=None, num_results=5): def ask_gemini_with_rag (query ,category =None ,num_results =5 ):
embedding = get_embedding(query) embedding =get_embedding (query )
result = analyze(query, embedding, num_results=num_results) result =analyze (query ,embedding ,num_results =num_results )
return result["analysis"] return result ["analysis"]
def analyze_brand(brand_name): def analyze_brand (brand_name ):
print(f"\n{'='*60}") print (f"\n{'='*60 }")
print(f"Analyzing brand: {brand_name}") print (f"Analyzing brand: {brand_name }")
print('='*60) print ('='*60 )
try: try :
print("\n[1/3] Getting embedding for brand...") print ("\n[1/3] Getting embedding for brand...")
embedding = get_embedding(brand_name) embedding =get_embedding (brand_name )
print("[2/3] Querying Chroma database...") print ("[2/3] Querying Chroma database...")
result = analyze(brand_name, embedding) result =analyze (brand_name ,embedding )
print("[3/3] Gemini Analysis Complete!\n") print ("[3/3] Gemini Analysis Complete!\n")
print("-"*60) print ("-"*60 )
print("ANALYSIS:") print ("ANALYSIS:")
print("-"*60) print ("-"*60 )
print(result["analysis"]) print (result ["analysis"])
print("\n" + "-"*60) print ("\n"+"-"*60 )
print("ALTERNATIVES FROM DATABASE:") print ("ALTERNATIVES FROM DATABASE:")
print("-"*60) print ("-"*60 )
if result["alternatives"]: if result ["alternatives"]:
for i, alt in enumerate(result["alternatives"], 1): for i ,alt in enumerate (result ["alternatives"],1 ):
print(f"\n{i}. {alt['summary']}...") print (f"\n{i }. {alt ['summary']}...")
else: else :
print("No alternatives found in database.") print ("No alternatives found in database.")
print("\n" + "="*60) print ("\n"+"="*60 )
return result return result
except Exception as e: except Exception as e :
print(f"\nError during analysis: {e}") print (f"\nError during analysis: {e }")
return None return None
def scan_and_analyze(): def scan_and_analyze ():
from src.cv.scanner import capture_and_analyze as cv_capture from src .cv .scanner import capture_and_analyze as cv_capture
print("\n" + "="*60) print ("\n"+"="*60 )
print("CV + Gemini Greenwashing Scanner") print ("CV + Gemini Greenwashing Scanner")
print("="*60) print ("="*60 )
print("Using camera to detect brands...") print ("Using camera to detect brands...")
cv_result = cv_capture() cv_result =cv_capture ()
logos = cv_result.get("logos_detected", []) logos =cv_result .get ("logos_detected",[])
if not logos: if not logos :
print("No brands detected. Try again!") print ("No brands detected. Try again!")
return None return None
brand = logos[0].get("brand", "Unknown") brand =logos [0 ].get ("brand","Unknown")
print(f"\nDetected brand: {brand}") print (f"\nDetected brand: {brand }")
return analyze_brand(brand) return analyze_brand (brand )
if __name__ == "__main__": if __name__ =="__main__":
scan_and_analyze() scan_and_analyze ()

View File

@@ -1,11 +1,11 @@
from google import genai from google import genai
import os import os
def generate_content(prompt, model_name="gemini-2.0-flash-exp"): def generate_content (prompt ,model_name ="gemini-2.0-flash-exp"):
api_key = os.environ.get("GOOGLE_API_KEY") api_key =os .environ .get ("GOOGLE_API_KEY")
client = genai.Client(api_key=api_key) client =genai .Client (api_key =api_key )
response = client.models.generate_content( response =client .models .generate_content (
model=model_name, model =model_name ,
contents=prompt, contents =prompt ,
) )
return response.text return response .text

View File

@@ -2,20 +2,20 @@ import os
from pymongo import MongoClient from pymongo import MongoClient
from dotenv import load_dotenv from dotenv import load_dotenv
script_dir = os.path.dirname(os.path.abspath(__file__)) script_dir =os .path .dirname (os .path .abspath (__file__ ))
env_path = os.path.join(script_dir, '..', 'rag', '.env') env_path =os .path .join (script_dir ,'..','rag','.env')
load_dotenv(env_path) load_dotenv (env_path )
def get_database(): def get_database ():
uri = os.getenv("MONGO_URI") uri =os .getenv ("MONGO_URI")
try: try :
client = MongoClient(uri) client =MongoClient (uri )
db = client["my_rag_app"] db =client ["my_rag_app"]
print("SUCCESS: Connected to MongoDB Atlas!") print ("SUCCESS: Connected to MongoDB Atlas!")
return db return db
except Exception as e: except Exception as e :
print(f"ERROR: Could not connect to MongoDB: {e}") print (f"ERROR: Could not connect to MongoDB: {e }")
return None return None
if __name__ == "__main__": if __name__ =="__main__":
get_database() get_database ()

View File

@@ -1,8 +1,8 @@
import os import os
from pymongo import MongoClient from pymongo import MongoClient
def get_mongo_client(): def get_mongo_client ():
uri = os.environ.get("MONGO_URI") uri =os .environ .get ("MONGO_URI")
if not uri: if not uri :
raise ValueError("MONGO_URI environment variable not set") raise ValueError ("MONGO_URI environment variable not set")
return MongoClient(uri) return MongoClient (uri )

View File

@@ -1,62 +1,62 @@
from .connection import get_mongo_client from .connection import get_mongo_client
from datetime import datetime from datetime import datetime
DB_NAME = "hoya_metadata" DB_NAME ="hoya_metadata"
def get_datasets_collection(): def get_datasets_collection ():
client = get_mongo_client() client =get_mongo_client ()
db = client.get_database(DB_NAME) db =client .get_database (DB_NAME )
return db["datasets"] return db ["datasets"]
def get_categories_collection(): def get_categories_collection ():
client = get_mongo_client() client =get_mongo_client ()
db = client.get_database(DB_NAME) db =client .get_database (DB_NAME )
return db["categories"] return db ["categories"]
def is_file_processed(filename): def is_file_processed (filename ):
collection = get_datasets_collection() collection =get_datasets_collection ()
return collection.find_one({"filename": filename}) is not None return collection .find_one ({"filename":filename })is not None
def log_processed_file(filename, category=None, chunk_count=0): def log_processed_file (filename ,category =None ,chunk_count =0 ):
collection = get_datasets_collection() collection =get_datasets_collection ()
doc = { doc ={
"filename": filename, "filename":filename ,
"category": category, "category":category ,
"chunk_count": chunk_count, "chunk_count":chunk_count ,
"processed_at": datetime.utcnow(), "processed_at":datetime .utcnow (),
"status": "processed" "status":"processed"
} }
collection.insert_one(doc) collection .insert_one (doc )
def get_all_datasets(): def get_all_datasets ():
collection = get_datasets_collection() collection =get_datasets_collection ()
return list(collection.find({}, {"_id": 0})) return list (collection .find ({},{"_id":0 }))
def get_datasets_by_category(category): def get_datasets_by_category (category ):
collection = get_datasets_collection() collection =get_datasets_collection ()
return list(collection.find({"category": category}, {"_id": 0})) return list (collection .find ({"category":category },{"_id":0 }))
def delete_dataset_record(filename): def delete_dataset_record (filename ):
collection = get_datasets_collection() collection =get_datasets_collection ()
result = collection.delete_one({"filename": filename}) result =collection .delete_one ({"filename":filename })
return result.deleted_count > 0 return result .deleted_count >0
def create_category(name, description=""): def create_category (name ,description =""):
collection = get_categories_collection() collection =get_categories_collection ()
if collection.find_one({"name": name}): if collection .find_one ({"name":name }):
return False return False
collection.insert_one({ collection .insert_one ({
"name": name, "name":name ,
"description": description, "description":description ,
"created_at": datetime.utcnow() "created_at":datetime .utcnow ()
}) })
return True return True
def get_all_categories(): def get_all_categories ():
collection = get_categories_collection() collection =get_categories_collection ()
return list(collection.find({}, {"_id": 0})) return list (collection .find ({},{"_id":0 }))
def delete_category(name): def delete_category (name ):
collection = get_categories_collection() collection =get_categories_collection ()
result = collection.delete_one({"name": name}) result =collection .delete_one ({"name":name })
return result.deleted_count > 0 return result .deleted_count >0

View File

@@ -1,49 +1,49 @@
from .connection import get_mongo_client from .connection import get_mongo_client
def insert_rag_documents(documents, collection_name="rag_documents", db_name="vectors_db"): def insert_rag_documents (documents ,collection_name ="rag_documents",db_name ="vectors_db"):
client = get_mongo_client() client =get_mongo_client ()
db = client.get_database(db_name) db =client .get_database (db_name )
collection = db[collection_name] collection =db [collection_name ]
if documents: if documents :
result = collection.insert_many(documents) result =collection .insert_many (documents )
return len(result.inserted_ids) return len (result .inserted_ids )
return 0 return 0
def search_rag_documents(query_embedding, collection_name="rag_documents", db_name="vectors_db", num_results=5): def search_rag_documents (query_embedding ,collection_name ="rag_documents",db_name ="vectors_db",num_results =5 ):
client = get_mongo_client() client =get_mongo_client ()
db = client.get_database(db_name) db =client .get_database (db_name )
collection = db[collection_name] collection =db [collection_name ]
pipeline = [ pipeline =[
{ {
"$vectorSearch": { "$vectorSearch":{
"index": "vector_index", "index":"vector_index",
"path": "embedding", "path":"embedding",
"queryVector": query_embedding, "queryVector":query_embedding ,
"numCandidates": num_results * 10, "numCandidates":num_results *10 ,
"limit": num_results "limit":num_results
} }
}, },
{ {
"$project": { "$project":{
"_id": 0, "_id":0 ,
"text": 1, "text":1 ,
"score": { "$meta": "vectorSearchScore" } "score":{"$meta":"vectorSearchScore"}
} }
} }
] ]
return list(collection.aggregate(pipeline)) return list (collection .aggregate (pipeline ))
def is_file_processed(filename, log_collection="ingested_files", db_name="vectors_db"): def is_file_processed (filename ,log_collection ="ingested_files",db_name ="vectors_db"):
client = get_mongo_client() client =get_mongo_client ()
db = client.get_database(db_name) db =client .get_database (db_name )
collection = db[log_collection] collection =db [log_collection ]
return collection.find_one({"filename": filename}) is not None return collection .find_one ({"filename":filename })is not None
def log_processed_file(filename, log_collection="ingested_files", db_name="vectors_db"): def log_processed_file (filename ,log_collection ="ingested_files",db_name ="vectors_db"):
client = get_mongo_client() client =get_mongo_client ()
db = client.get_database(db_name) db =client .get_database (db_name )
collection = db[log_collection] collection =db [log_collection ]
collection.insert_one({"filename": filename, "processed_at": 1}) collection .insert_one ({"filename":filename ,"processed_at":1 })

View File

@@ -1,5 +1,5 @@
from .detector import OllamaLogoDetector from .detector import OllamaLogoDetector
__all__ = [ __all__ =[
"OllamaLogoDetector", "OllamaLogoDetector",
] ]

View File

@@ -1,4 +1,4 @@
from .cli import main from .cli import main
if __name__ == "__main__": if __name__ =="__main__":
main() main ()

View File

@@ -1,103 +1,103 @@
#!/usr/bin/env python3
import argparse import argparse
import json import json
import sys import sys
from .detector import OllamaLogoDetector from .detector import OllamaLogoDetector
from .camera import capture_and_analyze, start_interactive_capture from .camera import capture_and_analyze ,start_interactive_capture
def main(): def main ():
parser = argparse.ArgumentParser( parser =argparse .ArgumentParser (
description="Detect logos and companies using Ollama vision models" description ="Detect logos and companies using Ollama vision models"
) )
parser.add_argument("--image", "-i", type=str) parser .add_argument ("--image","-i",type =str )
parser.add_argument("--model", "-m", type=str, default="ministral-3:latest") parser .add_argument ("--model","-m",type =str ,default ="ministral-3:latest")
parser.add_argument("--output", "-o", type=str) parser .add_argument ("--output","-o",type =str )
parser.add_argument("--host", type=str) parser .add_argument ("--host",type =str )
parser.add_argument("--single", "-s", action="store_true") parser .add_argument ("--single","-s",action ="store_true")
parser.add_argument("--no-save", action="store_true") parser .add_argument ("--no-save",action ="store_true")
parser.add_argument("--output-dir", type=str, default="./captures") parser .add_argument ("--output-dir",type =str ,default ="./captures")
args = parser.parse_args() args =parser .parse_args ()
try: try :
if args.image: if args .image :
print(f"Analyzing: {args.image}") print (f"Analyzing: {args .image }")
print(f"Model: {args.model}") print (f"Model: {args .model }")
detector = OllamaLogoDetector(model=args.model, host=args.host) detector =OllamaLogoDetector (model =args .model ,host =args .host )
result = detector.detect_from_file(args.image) result =detector .detect_from_file (args .image )
_print_results(result) _print_results (result )
if args.output: if args .output :
with open(args.output, 'w') as f: with open (args .output ,'w')as f :
json.dump(result, f, indent=2) json .dump (result ,f ,indent =2 )
print(f"Results saved to: {args.output}") print (f"Results saved to: {args .output }")
elif args.single: elif args .single :
result = capture_and_analyze( result =capture_and_analyze (
model=args.model, model =args .model ,
save_image=not args.no_save, save_image =not args .no_save ,
output_dir=args.output_dir output_dir =args .output_dir
) )
if args.output and result: if args .output and result :
with open(args.output, 'w') as f: with open (args .output ,'w')as f :
json.dump(result, f, indent=2) json .dump (result ,f ,indent =2 )
print(f"Results saved to: {args.output}") print (f"Results saved to: {args .output }")
else: else :
start_interactive_capture( start_interactive_capture (
model=args.model, model =args .model ,
save_images=not args.no_save, save_images =not args .no_save ,
output_dir=args.output_dir output_dir =args .output_dir
) )
except KeyboardInterrupt: except KeyboardInterrupt :
sys.exit(0) sys .exit (0 )
except Exception as e: except Exception as e :
print(f"Error: {e}") print (f"Error: {e }")
sys.exit(1) sys .exit (1 )
def _print_results(result: dict): def _print_results (result :dict ):
print("\n" + "=" * 50) print ("\n"+"="*50 )
print("DETECTION RESULTS") print ("DETECTION RESULTS")
print("=" * 50) print ("="*50 )
logos = result.get("logos_detected", []) logos =result .get ("logos_detected",[])
count = result.get("total_count", len(logos)) count =result .get ("total_count",len (logos ))
if count == 0: if count ==0 :
print("\nNo logos or companies detected") print ("\nNo logos or companies detected")
if desc := result.get("description"): if desc :=result .get ("description"):
print(f"\nImage description: {desc}") print (f"\nImage description: {desc }")
else: else :
print(f"\nFound {count} logo(s)/company(s):\n") print (f"\nFound {count } logo(s)/company(s):\n")
for i, logo in enumerate(logos, 1): for i ,logo in enumerate (logos ,1 ):
brand = logo.get("brand", "Unknown") brand =logo .get ("brand","Unknown")
conf = logo.get("confidence", "unknown") conf =logo .get ("confidence","unknown")
loc = logo.get("location", "unknown") loc =logo .get ("location","unknown")
cat = logo.get("category", "N/A") cat =logo .get ("category","N/A")
print(f" {i}. {brand}") print (f" {i }. {brand }")
print(f" Confidence: {conf}") print (f" Confidence: {conf }")
print(f" Location: {loc}") print (f" Location: {loc }")
print(f" Category: {cat}") print (f" Category: {cat }")
print() print ()
if "error" in result: if "error"in result :
print(f"\nError occurred: {result['error']}") print (f"\nError occurred: {result ['error']}")
if "raw_response" in result and result.get("parse_error"): if "raw_response"in result and result .get ("parse_error"):
print(f"\nParse error: {result['parse_error']}") print (f"\nParse error: {result ['parse_error']}")
print(f"Raw response:\n{result['raw_response'][:500]}...") print (f"Raw response:\n{result ['raw_response'][:500 ]}...")
print("=" * 50) print ("="*50 )
print("\nRaw JSON:") print ("\nRaw JSON:")
print(json.dumps(result, indent=2)) print (json .dumps (result ,indent =2 ))
if __name__ == "__main__": if __name__ =="__main__":
main() main ()

View File

@@ -2,19 +2,19 @@ import base64
import json import json
import re import re
from pathlib import Path from pathlib import Path
from typing import Dict, List, Optional, Union from typing import Dict ,List ,Optional ,Union
try: try :
import ollama import ollama
OLLAMA_AVAILABLE = True OLLAMA_AVAILABLE =True
except ImportError: except ImportError :
OLLAMA_AVAILABLE = False OLLAMA_AVAILABLE =False
print("Ollama not installed. Run: pip install ollama") print ("Ollama not installed. Run: pip install ollama")
DEFAULT_HOST = "https://ollama.sirblob.co" DEFAULT_HOST ="https://ollama.sirblob.co"
DEFAULT_MODEL = "ministral-3:latest" DEFAULT_MODEL ="ministral-3:latest"
DEFAULT_PROMPT = """Analyze this image and identify ALL logos, brand names, and company names visible. DEFAULT_PROMPT ="""Analyze this image and identify ALL logos, brand names, and company names visible.
For each logo or brand you detect, provide: For each logo or brand you detect, provide:
1. The company/brand name 1. The company/brand name
@@ -45,120 +45,120 @@ If no logos are found, return:
IMPORTANT: Return ONLY the JSON object, no other text.""" IMPORTANT: Return ONLY the JSON object, no other text."""
class OllamaLogoDetector: class OllamaLogoDetector :
def __init__(self, def __init__ (self ,
model: str = DEFAULT_MODEL, model :str =DEFAULT_MODEL ,
host: str = DEFAULT_HOST): host :str =DEFAULT_HOST ):
if not OLLAMA_AVAILABLE: if not OLLAMA_AVAILABLE :
raise RuntimeError("Ollama not installed. Run: pip install ollama") raise RuntimeError ("Ollama not installed. Run: pip install ollama")
self.model = model self .model =model
self.host = host self .host =host
self.client = ollama.Client(host=host) self .client =ollama .Client (host =host )
try: try :
models = self.client.list() models =self .client .list ()
model_names = [m['name'] for m in models.get('models', [])] model_names =[m ['name']for m in models .get ('models',[])]
model_base = model.split(':')[0] model_base =model .split (':')[0 ]
if not any(model_base in name for name in model_names): if not any (model_base in name for name in model_names ):
print(f"Model '{model}' not found. Available models: {model_names}") print (f"Model '{model }' not found. Available models: {model_names }")
print(f"Pulling {model}...") print (f"Pulling {model }...")
self.client.pull(model) self .client .pull (model )
print(f"Model {model} ready!") print (f"Model {model } ready!")
else: else :
print(f"Using Ollama model: {model}") print (f"Using Ollama model: {model }")
except Exception as e: except Exception as e :
print(f"Could not verify model: {e}") print (f"Could not verify model: {e }")
print("Make sure Ollama is running: ollama serve") print ("Make sure Ollama is running: ollama serve")
def detect_from_file(self, def detect_from_file (self ,
image_path: str, image_path :str ,
prompt: Optional[str] = None) -> Dict: prompt :Optional [str ]=None )->Dict :
path = Path(image_path) path =Path (image_path )
if not path.exists(): if not path .exists ():
raise FileNotFoundError(f"Image not found: {image_path}") raise FileNotFoundError (f"Image not found: {image_path }")
with open(path, 'rb') as f: with open (path ,'rb')as f :
image_data = base64.b64encode(f.read()).decode('utf-8') image_data =base64 .b64encode (f .read ()).decode ('utf-8')
return self._analyze_image(image_data, prompt) return self ._analyze_image (image_data ,prompt )
def detect_from_bytes(self, def detect_from_bytes (self ,
image_bytes: bytes, image_bytes :bytes ,
prompt: Optional[str] = None) -> Dict: prompt :Optional [str ]=None )->Dict :
image_data = base64.b64encode(image_bytes).decode('utf-8') image_data =base64 .b64encode (image_bytes ).decode ('utf-8')
return self._analyze_image(image_data, prompt) return self ._analyze_image (image_data ,prompt )
def detect_from_numpy(self, def detect_from_numpy (self ,
image_array, image_array ,
prompt: Optional[str] = None) -> Dict: prompt :Optional [str ]=None )->Dict :
import cv2 import cv2
success, buffer = cv2.imencode('.jpg', image_array) success ,buffer =cv2 .imencode ('.jpg',image_array )
if not success: if not success :
raise ValueError("Failed to encode image") raise ValueError ("Failed to encode image")
return self.detect_from_bytes(buffer.tobytes(), prompt) return self .detect_from_bytes (buffer .tobytes (),prompt )
def _analyze_image(self, def _analyze_image (self ,
image_base64: str, image_base64 :str ,
prompt: Optional[str] = None) -> Dict: prompt :Optional [str ]=None )->Dict :
if prompt is None: if prompt is None :
prompt = DEFAULT_PROMPT prompt =DEFAULT_PROMPT
try: try :
response = self.client.chat( response =self .client .chat (
model=self.model, model =self .model ,
messages=[{ messages =[{
'role': 'user', 'role':'user',
'content': prompt, 'content':prompt ,
'images': [image_base64] 'images':[image_base64 ]
}], }],
options={ options ={
'temperature': 0.1, 'temperature':0.1 ,
} }
) )
content = response['message']['content'] content =response ['message']['content']
return self._parse_response(content) return self ._parse_response (content )
except Exception as e: except Exception as e :
return { return {
"logos_detected": [], "logos_detected":[],
"total_count": 0, "total_count":0 ,
"error": str(e), "error":str (e ),
"raw_response": None "raw_response":None
} }
def _parse_response(self, content: str) -> Dict: def _parse_response (self ,content :str )->Dict :
try: try :
return json.loads(content) return json .loads (content )
except json.JSONDecodeError: except json .JSONDecodeError :
pass pass
json_patterns = [ json_patterns =[
r'```json\s*([\s\S]*?)\s*```', r'```json\s*([\s\S]*?)\s*```',
r'```\s*([\s\S]*?)\s*```', r'```\s*([\s\S]*?)\s*```',
r'\{[\s\S]*\}' r'\{[\s\S]*\}'
] ]
for pattern in json_patterns: for pattern in json_patterns :
match = re.search(pattern, content) match =re .search (pattern ,content )
if match: if match :
try: try :
json_str = match.group(1) if '```' in pattern else match.group(0) json_str =match .group (1 )if '```'in pattern else match .group (0 )
return json.loads(json_str) return json .loads (json_str )
except json.JSONDecodeError: except json .JSONDecodeError :
continue continue
return { return {
"logos_detected": [], "logos_detected":[],
"total_count": 0, "total_count":0 ,
"raw_response": content, "raw_response":content ,
"parse_error": "Could not extract valid JSON from response" "parse_error":"Could not extract valid JSON from response"
} }
def get_brands_list(self, result: Dict) -> List[str]: def get_brands_list (self ,result :Dict )->List [str ]:
logos = result.get("logos_detected", []) logos =result .get ("logos_detected",[])
return [logo.get("brand", "Unknown") for logo in logos] return [logo .get ("brand","Unknown")for logo in logos ]

View File

@@ -1,32 +1,32 @@
import ollama import ollama
import os import os
client = ollama.Client(host="https://ollama.sirblob.co") client =ollama .Client (host ="https://ollama.sirblob.co")
DEFAULT_MODEL = "nomic-embed-text:latest" DEFAULT_MODEL ="nomic-embed-text:latest"
def get_embedding(text, model=DEFAULT_MODEL): def get_embedding (text ,model =DEFAULT_MODEL ):
try: try :
response = client.embeddings(model=model, prompt=text) response =client .embeddings (model =model ,prompt =text )
return response["embedding"] return response ["embedding"]
except Exception as e: except Exception as e :
print(f"Error getting embedding from Ollama: {e}") print (f"Error getting embedding from Ollama: {e }")
raise e raise e
def get_embeddings_batch(texts, model=DEFAULT_MODEL, batch_size=50): def get_embeddings_batch (texts ,model =DEFAULT_MODEL ,batch_size =50 ):
all_embeddings = [] all_embeddings =[]
for i in range(0, len(texts), batch_size): for i in range (0 ,len (texts ),batch_size ):
batch = texts[i:i + batch_size] batch =texts [i :i +batch_size ]
try: try :
response = client.embed(model=model, input=batch) response =client .embed (model =model ,input =batch )
if "embeddings" in response: if "embeddings"in response :
all_embeddings.extend(response["embeddings"]) all_embeddings .extend (response ["embeddings"])
else: else :
raise ValueError("Unexpected response format from client.embed") raise ValueError ("Unexpected response format from client.embed")
except Exception as e: except Exception as e :
print(f"Error embedding batch {i}-{i+batch_size}: {e}") print (f"Error embedding batch {i }-{i +batch_size }: {e }")
raise e raise e
return all_embeddings return all_embeddings

View File

@@ -2,42 +2,42 @@ import os
from google import genai from google import genai
from dotenv import load_dotenv from dotenv import load_dotenv
script_dir = os.path.dirname(os.path.abspath(__file__)) script_dir =os .path .dirname (os .path .abspath (__file__ ))
load_dotenv(os.path.join(script_dir, '.env')) load_dotenv (os .path .join (script_dir ,'.env'))
class GeminiClient: class GeminiClient :
def __init__(self): def __init__ (self ):
self.api_key = os.getenv("GOOGLE_API_KEY") self .api_key =os .getenv ("GOOGLE_API_KEY")
if not self.api_key: if not self .api_key :
raise ValueError("No GOOGLE_API_KEY found in .env file!") raise ValueError ("No GOOGLE_API_KEY found in .env file!")
self.client = genai.Client(api_key=self.api_key) self .client =genai .Client (api_key =self .api_key )
self.model_name = "gemini-2.0-flash" self .model_name ="gemini-2.0-flash"
def ask(self, prompt, context=""): def ask (self ,prompt ,context =""):
try: try :
if context: if context :
full_message = f"Use this information to answer: {context}\n\nQuestion: {prompt}" full_message =f"Use this information to answer: {context }\n\nQuestion: {prompt }"
else: else :
full_message = prompt full_message =prompt
response = self.client.models.generate_content( response =self .client .models .generate_content (
model=self.model_name, model =self .model_name ,
contents=full_message, contents =full_message ,
config={ config ={
'system_instruction': 'You are a concise sustainability assistant. Your responses must be a single short paragraph, maximum 6 sentences long. Do not use bullet points or multiple sections.' 'system_instruction':'You are a concise sustainability assistant. Your responses must be a single short paragraph, maximum 6 sentences long. Do not use bullet points or multiple sections.'
} }
) )
return response.text return response .text
except Exception as e: except Exception as e :
return f"Error talking to Gemini: {str(e)}" return f"Error talking to Gemini: {str (e )}"
if __name__ == "__main__": if __name__ =="__main__":
try: try :
brain = GeminiClient() brain =GeminiClient ()
print("--- Testing Class Connection ---") print ("--- Testing Class Connection ---")
print(brain.ask("Hello! Give me a 1-sentence coding tip.")) print (brain .ask ("Hello! Give me a 1-sentence coding tip."))
except Exception as e: except Exception as e :
print(f"Failed to start Gemini: {e}") print (f"Failed to start Gemini: {e }")

View File

@@ -3,90 +3,90 @@ from pypdf import PdfReader
import io import io
import os import os
def chunk_text(text, target_length=2000, overlap=100): def chunk_text (text ,target_length =2000 ,overlap =100 ):
if not text: if not text :
return [] return []
chunks = [] chunks =[]
paragraphs = text.split('\n\n') paragraphs =text .split ('\n\n')
current_chunk = "" current_chunk =""
for para in paragraphs: for para in paragraphs :
if len(current_chunk) + len(para) > target_length: if len (current_chunk )+len (para )>target_length :
if current_chunk: if current_chunk :
chunks.append(current_chunk.strip()) chunks .append (current_chunk .strip ())
if len(para) > target_length: if len (para )>target_length :
start = 0 start =0
while start < len(para): while start <len (para ):
end = start + target_length end =start +target_length
chunks.append(para[start:end].strip()) chunks .append (para [start :end ].strip ())
start += (target_length - overlap) start +=(target_length -overlap )
current_chunk = "" current_chunk =""
else: else :
current_chunk = para current_chunk =para
else: else :
if current_chunk: if current_chunk :
current_chunk += "\n\n" + para current_chunk +="\n\n"+para
else: else :
current_chunk = para current_chunk =para
if current_chunk: if current_chunk :
chunks.append(current_chunk.strip()) chunks .append (current_chunk .strip ())
return chunks return chunks
def load_csv(file_path): def load_csv (file_path ):
df = pd.read_csv(file_path) df =pd .read_csv (file_path )
return df.apply(lambda x: ' | '.join(x.astype(str)), axis=1).tolist() return df .apply (lambda x :' | '.join (x .astype (str )),axis =1 ).tolist ()
def load_pdf(file_path): def load_pdf (file_path ):
reader = PdfReader(file_path) reader =PdfReader (file_path )
text_chunks = [] text_chunks =[]
for page in reader.pages: for page in reader .pages :
text = page.extract_text() text =page .extract_text ()
if text: if text :
if len(text) > 4000: if len (text )>4000 :
text_chunks.extend(chunk_text(text)) text_chunks .extend (chunk_text (text ))
else: else :
text_chunks.append(text) text_chunks .append (text )
return text_chunks return text_chunks
def load_txt(file_path): def load_txt (file_path ):
with open(file_path, 'r', encoding='utf-8') as f: with open (file_path ,'r',encoding ='utf-8')as f :
content = f.read() content =f .read ()
return chunk_text(content) return chunk_text (content )
def load_xlsx(file_path): def load_xlsx (file_path ):
all_rows = [] all_rows =[]
try: try :
sheets = pd.read_excel(file_path, sheet_name=None) sheets =pd .read_excel (file_path ,sheet_name =None )
except Exception as e: except Exception as e :
raise ValueError(f"Pandas read_excel failed: {e}") raise ValueError (f"Pandas read_excel failed: {e }")
for sheet_name, df in sheets.items(): for sheet_name ,df in sheets .items ():
if df.empty: if df .empty :
continue continue
df = df.fillna("") df =df .fillna ("")
for row in df.values: for row in df .values :
row_items = [str(x) for x in row if str(x).strip() != ""] row_items =[str (x )for x in row if str (x ).strip ()!=""]
if row_items: if row_items :
row_str = f"Sheet: {str(sheet_name)} | " + " | ".join(row_items) row_str =f"Sheet: {str (sheet_name )} | "+" | ".join (row_items )
if len(row_str) > 8000: if len (row_str )>8000 :
all_rows.extend(chunk_text(row_str)) all_rows .extend (chunk_text (row_str ))
else: else :
all_rows.append(row_str) all_rows .append (row_str )
return all_rows return all_rows
def process_file(file_path): def process_file (file_path ):
ext = os.path.splitext(file_path)[1].lower() ext =os .path .splitext (file_path )[1 ].lower ()
if ext == '.csv': if ext =='.csv':
return load_csv(file_path) return load_csv (file_path )
elif ext == '.pdf': elif ext =='.pdf':
return load_pdf(file_path) return load_pdf (file_path )
elif ext == '.txt': elif ext =='.txt':
return load_txt(file_path) return load_txt (file_path )
elif ext == '.xlsx': elif ext =='.xlsx':
return load_xlsx(file_path) return load_xlsx (file_path )
else: else :
raise ValueError(f"Unsupported file type: {ext}") raise ValueError (f"Unsupported file type: {ext }")

View File

@@ -1,27 +1,27 @@
from .embeddings import get_embeddings_batch, get_embedding from .embeddings import get_embeddings_batch ,get_embedding
from ..chroma.vector_store import insert_documents, search_documents from ..chroma .vector_store import insert_documents ,search_documents
def ingest_documents(text_chunks, collection_name="rag_documents", source_file=None, category=None): def ingest_documents (text_chunks ,collection_name ="rag_documents",source_file =None ,category =None ):
embeddings = get_embeddings_batch(text_chunks) embeddings =get_embeddings_batch (text_chunks )
metadata_list = None metadata_list =None
if source_file or category: if source_file or category :
metadata_list = [] metadata_list =[]
for _ in text_chunks: for _ in text_chunks :
meta = {} meta ={}
if source_file: if source_file :
meta["source"] = source_file meta ["source"]=source_file
if category: if category :
meta["category"] = category meta ["category"]=category
metadata_list.append(meta) metadata_list .append (meta )
return insert_documents(text_chunks, embeddings, collection_name=collection_name, metadata_list=metadata_list) return insert_documents (text_chunks ,embeddings ,collection_name =collection_name ,metadata_list =metadata_list )
def vector_search(query_text, collection_name="rag_documents", num_results=5, category=None): def vector_search (query_text ,collection_name ="rag_documents",num_results =5 ,category =None ):
query_embedding = get_embedding(query_text) query_embedding =get_embedding (query_text )
filter_metadata = None filter_metadata =None
if category: if category :
filter_metadata = {"category": category} filter_metadata ={"category":category }
return search_documents(query_embedding, collection_name=collection_name, num_results=num_results, filter_metadata=filter_metadata) return search_documents (query_embedding ,collection_name =collection_name ,num_results =num_results ,filter_metadata =filter_metadata )

View File

@@ -1,62 +1,62 @@
from flask import Blueprint, request, jsonify from flask import Blueprint ,request ,jsonify
from src.rag.gemeni import GeminiClient from src .rag .gemeni import GeminiClient
from src.gemini import ask_gemini_with_rag from src .gemini import ask_gemini_with_rag
gemini_bp = Blueprint('gemini', __name__) gemini_bp =Blueprint ('gemini',__name__ )
brain = None brain =None
def get_brain(): def get_brain ():
global brain global brain
if brain is None: if brain is None :
brain = GeminiClient() brain =GeminiClient ()
return brain return brain
@gemini_bp.route('/ask', methods=['POST']) @gemini_bp .route ('/ask',methods =['POST'])
def ask(): def ask ():
data = request.json data =request .json
prompt = data.get("prompt") prompt =data .get ("prompt")
context = data.get("context", "") context =data .get ("context","")
if not prompt: if not prompt :
return jsonify({"error": "No prompt provided"}), 400 return jsonify ({"error":"No prompt provided"}),400
try: try :
client = get_brain() client =get_brain ()
response = client.ask(prompt, context) response =client .ask (prompt ,context )
return jsonify({ return jsonify ({
"status": "success", "status":"success",
"reply": response "reply":response
}) })
except Exception as e: except Exception as e :
return jsonify({ return jsonify ({
"status": "error", "status":"error",
"message": str(e) "message":str (e )
}), 500 }),500
@gemini_bp.route('/rag', methods=['POST']) @gemini_bp .route ('/rag',methods =['POST'])
def rag(): def rag ():
data = request.json data =request .json
prompt = data.get("prompt") prompt =data .get ("prompt")
category = data.get("category") category =data .get ("category")
if not prompt: if not prompt :
return jsonify({"error": "No prompt provided"}), 400 return jsonify ({"error":"No prompt provided"}),400
try: try :
response = ask_gemini_with_rag(prompt, category=category) response =ask_gemini_with_rag (prompt ,category =category )
return jsonify({ return jsonify ({
"status": "success", "status":"success",
"reply": response "reply":response
}) })
except Exception as e: except Exception as e :
return jsonify({ return jsonify ({
"status": "error", "status":"error",
"message": str(e) "message":str (e )
}), 500 }),500
@gemini_bp.route('/vision', methods=['POST']) @gemini_bp .route ('/vision',methods =['POST'])
def vision(): def vision ():
return jsonify({ return jsonify ({
"status": "error", "status":"error",
"message": "Vision endpoint not yet implemented" "message":"Vision endpoint not yet implemented"
}), 501 }),501

View File

@@ -4,63 +4,110 @@ Uses structured outputs with Pydantic for reliable JSON responses
""" """
import base64 import base64
import os import os
import cv2
import numpy as np
from datetime import datetime from datetime import datetime
from flask import Blueprint, request, jsonify from flask import Blueprint ,request ,jsonify
from google import genai from google import genai
from pydantic import BaseModel, Field from pydantic import BaseModel ,Field
from typing import List, Optional, Literal from typing import List ,Optional ,Literal
from src.ollama.detector import OllamaLogoDetector from src .ollama .detector import OllamaLogoDetector
from src.chroma.vector_store import search_documents, insert_documents from src .chroma .vector_store import search_documents ,insert_documents
from src.rag.embeddings import get_embedding from src .rag .embeddings import get_embedding
from src.mongo.connection import get_mongo_client from src .mongo .connection import get_mongo_client
incidents_bp = Blueprint('incidents', __name__) incidents_bp =Blueprint ('incidents',__name__ )
# Initialize detector lazily
_detector = None
def get_detector(): _detector =None
def get_detector ():
global _detector global _detector
if _detector is None: if _detector is None :
_detector = OllamaLogoDetector() _detector =OllamaLogoDetector ()
return _detector return _detector
# ============= Pydantic Models for Structured Outputs ============= def compress_image (image_bytes :bytes ,max_width :int =800 ,quality :int =85 )->str :
"""
Compress image using OpenCV and return Base64 string
class GreenwashingAnalysis(BaseModel): Args:
image_bytes: Original image bytes
max_width: Maximum width for resized image
quality: JPEG quality (1-100)
Returns:
Base64 encoded compressed image
"""
try :
nparr =np .frombuffer (image_bytes ,np .uint8 )
img =cv2 .imdecode (nparr ,cv2 .IMREAD_COLOR )
if img is None :
raise ValueError ("Failed to decode image")
height ,width =img .shape [:2 ]
if width >max_width :
ratio =max_width /width
new_width =max_width
new_height =int (height *ratio )
img =cv2 .resize (img ,(new_width ,new_height ),interpolation =cv2 .INTER_AREA )
encode_param =[int (cv2 .IMWRITE_JPEG_QUALITY ),quality ]
_ ,buffer =cv2 .imencode ('.jpg',img ,encode_param )
compressed_base64 =base64 .b64encode (buffer ).decode ('utf-8')
return compressed_base64
except Exception as e :
print (f"Image compression error: {e }")
return base64 .b64encode (image_bytes ).decode ('utf-8')
class GreenwashingAnalysis (BaseModel ):
"""Structured output for greenwashing analysis""" """Structured output for greenwashing analysis"""
is_greenwashing: bool = Field(description="Whether this is a case of greenwashing") is_greenwashing :bool =Field (description ="Whether this is a case of greenwashing")
confidence: Literal["high", "medium", "low"] = Field(description="Confidence level of the analysis") confidence :Literal ["high","medium","low"]=Field (description ="Confidence level of the analysis")
verdict: str = Field(description="Brief one-sentence verdict") verdict :str =Field (description ="Brief one-sentence verdict")
reasoning: str = Field(description="Detailed explanation of why this is or isn't greenwashing") reasoning :str =Field (description ="Detailed explanation of why this is or isn't greenwashing")
severity: Literal["high", "medium", "low"] = Field(description="Severity of the greenwashing if detected") severity :Literal ["high","medium","low"]=Field (description ="Severity of the greenwashing if detected")
recommendations: str = Field(description="What consumers should know about this case") recommendations :str =Field (description ="What consumers should know about this case")
key_claims: List[str] = Field(description="List of specific environmental claims made by the company") key_claims :List [str ]=Field (description ="List of specific environmental claims made by the company")
red_flags: List[str] = Field(description="List of red flags or concerning practices identified") red_flags :List [str ]=Field (description ="List of red flags or concerning practices identified")
class LogoDetection(BaseModel): class LogoDetection (BaseModel ):
"""Structured output for logo detection from Ollama""" """Structured output for logo detection from Ollama"""
brand: str = Field(description="The company or brand name detected") brand :str =Field (description ="The company or brand name detected")
confidence: Literal["high", "medium", "low"] = Field(description="Confidence level of detection") confidence :Literal ["high","medium","low"]=Field (description ="Confidence level of detection")
location: str = Field(description="Location in image (e.g., center, top-left)") location :str =Field (description ="Location in image (e.g., center, top-left)")
category: str = Field(description="Product category if identifiable") category :str =Field (description ="Product category if identifiable")
class ImageAnalysis(BaseModel): class ImageAnalysis (BaseModel ):
"""Structured output for full image analysis""" """Structured output for full image analysis"""
logos_detected: List[LogoDetection] = Field(description="List of logos/brands detected in the image") logos_detected :List [LogoDetection ]=Field (description ="List of logos/brands detected in the image")
total_count: int = Field(description="Total number of logos detected") total_count :int =Field (description ="Total number of logos detected")
description: str = Field(description="Brief description of what's in the image") description :str =Field (description ="Brief description of what's in the image")
environmental_claims: List[str] = Field(description="Any environmental or eco-friendly claims visible in the image") environmental_claims :List [str ]=Field (description ="Any environmental or eco-friendly claims visible in the image")
packaging_description: str = Field(description="Description of the product packaging and design") packaging_description :str =Field (description ="Description of the product packaging and design")
# ============= Analysis Functions =============
GREENWASHING_ANALYSIS_PROMPT = """You are an expert at detecting greenwashing - misleading environmental claims by companies.
GREENWASHING_ANALYSIS_PROMPT ="""You are an expert at detecting greenwashing - misleading environmental claims by companies.
Analyze the following user-submitted report about a potential greenwashing incident: Analyze the following user-submitted report about a potential greenwashing incident:
@@ -81,48 +128,48 @@ Based on this information, determine if this is a valid case of greenwashing. Co
Provide your analysis in the structured format requested.""" Provide your analysis in the structured format requested."""
def analyze_with_gemini(product_name: str, user_description: str, detected_brand: str, def analyze_with_gemini (product_name :str ,user_description :str ,detected_brand :str ,
image_description: str, context: str) -> GreenwashingAnalysis: image_description :str ,context :str )->GreenwashingAnalysis :
"""Send analysis request to Gemini with structured output""" """Send analysis request to Gemini with structured output"""
api_key = os.environ.get("GOOGLE_API_KEY") api_key =os .environ .get ("GOOGLE_API_KEY")
if not api_key: if not api_key :
raise ValueError("GOOGLE_API_KEY not set") raise ValueError ("GOOGLE_API_KEY not set")
prompt = GREENWASHING_ANALYSIS_PROMPT.format( prompt =GREENWASHING_ANALYSIS_PROMPT .format (
product_name=product_name, product_name =product_name ,
user_description=user_description, user_description =user_description ,
detected_brand=detected_brand, detected_brand =detected_brand ,
image_description=image_description, image_description =image_description ,
context=context context =context
) )
client = genai.Client(api_key=api_key) client =genai .Client (api_key =api_key )
# Use structured output with Pydantic schema
response = client.models.generate_content( response =client .models .generate_content (
model="gemini-3-flash-preview", model ="gemini-3-pro-preview",
contents=prompt, contents =prompt ,
config={ config ={
"response_mime_type": "application/json", "response_mime_type":"application/json",
"response_json_schema": GreenwashingAnalysis.model_json_schema(), "response_json_schema":GreenwashingAnalysis .model_json_schema (),
} }
) )
# Validate and parse the response
analysis = GreenwashingAnalysis.model_validate_json(response.text) analysis =GreenwashingAnalysis .model_validate_json (response .text )
return analysis return analysis
def analyze_image_with_ollama(image_bytes: bytes) -> ImageAnalysis: def analyze_image_with_ollama (image_bytes :bytes )->ImageAnalysis :
"""Analyze image using Ollama with structured output""" """Analyze image using Ollama with structured output"""
try: try :
import ollama import ollama
client = ollama.Client(host="https://ollama.sirblob.co") client =ollama .Client (host ="https://ollama.sirblob.co")
image_base64 = base64.b64encode(image_bytes).decode('utf-8') image_base64 =base64 .b64encode (image_bytes ).decode ('utf-8')
prompt = """Analyze this image for a greenwashing detection system. prompt ="""Analyze this image for a greenwashing detection system.
Identify: Identify:
1. All visible logos, brand names, and company names 1. All visible logos, brand names, and company names
@@ -131,275 +178,318 @@ Identify:
Respond with structured JSON matching the schema provided.""" Respond with structured JSON matching the schema provided."""
response = client.chat( response =client .chat (
model="ministral-3:latest", model ="ministral-3:latest",
messages=[{ messages =[{
'role': 'user', 'role':'user',
'content': prompt, 'content':prompt ,
'images': [image_base64], 'images':[image_base64 ],
}], }],
format=ImageAnalysis.model_json_schema(), format =ImageAnalysis .model_json_schema (),
options={'temperature': 0.1} options ={'temperature':0.1 }
) )
# Validate and parse
analysis = ImageAnalysis.model_validate_json(response['message']['content']) analysis =ImageAnalysis .model_validate_json (response ['message']['content'])
return analysis return analysis
except Exception as e: except Exception as e :
print(f"Ollama structured analysis failed: {e}") print (f"Ollama structured analysis failed: {e }")
# Fall back to basic detection
detector = get_detector()
result = detector.detect_from_bytes(image_bytes)
# Convert to structured format detector =get_detector ()
logos = [] result =detector .detect_from_bytes (image_bytes )
for logo in result.get('logos_detected', []):
logos.append(LogoDetection(
brand=logo.get('brand', 'Unknown'), logos =[]
confidence=logo.get('confidence', 'low'), for logo in result .get ('logos_detected',[]):
location=logo.get('location', 'unknown'), logos .append (LogoDetection (
category=logo.get('category', 'unknown') brand =logo .get ('brand','Unknown'),
confidence =logo .get ('confidence','low'),
location =logo .get ('location','unknown'),
category =logo .get ('category','unknown')
)) ))
return ImageAnalysis( return ImageAnalysis (
logos_detected=logos, logos_detected =logos ,
total_count=result.get('total_count', 0), total_count =result .get ('total_count',0 ),
description=result.get('description', 'No description available'), description =result .get ('description','No description available'),
environmental_claims=[], environmental_claims =[],
packaging_description="" packaging_description =""
) )
def save_to_mongodb(incident_data: dict) -> str: def save_to_mongodb (incident_data :dict )->str :
"""Save incident to MongoDB and return the ID""" """Save incident to MongoDB and return the ID"""
client = get_mongo_client() client =get_mongo_client ()
db = client["ethix"] db =client ["ethix"]
collection = db["incidents"] collection =db ["incidents"]
result = collection.insert_one(incident_data) result =collection .insert_one (incident_data )
return str(result.inserted_id) return str (result .inserted_id )
def save_to_chromadb(incident_data: dict, incident_id: str): def save_to_chromadb (incident_data :dict ,incident_id :str ):
"""Save incident as context for the chatbot""" """
analysis = incident_data['analysis'] Save incident as context for the chatbot
Includes verdict, full analysis, and environmental impact information
"""
analysis =incident_data ['analysis']
# Create a rich text representation of the incident
red_flags = "\n".join(f"- {flag}" for flag in analysis.get('red_flags', []))
key_claims = "\n".join(f"- {claim}" for claim in analysis.get('key_claims', []))
text = f"""GREENWASHING INCIDENT REPORT #{incident_id} red_flags ="\n".join (f"- {flag }"for flag in analysis .get ('red_flags',[]))
Date: {incident_data['created_at']} key_claims ="\n".join (f"- {claim }"for claim in analysis .get ('key_claims',[]))
Company/Product: {incident_data['product_name']} ({incident_data.get('detected_brand', 'Unknown brand')}) env_claims ="\n".join (f"- {claim }"for claim in incident_data .get ('environmental_claims',[]))
USER REPORT: {incident_data['user_description']}
ANALYSIS VERDICT: {analysis['verdict']} text =f"""GREENWASHING INCIDENT REPORT #{incident_id }
Confidence: {analysis['confidence']} Report Date: {incident_data ['created_at']}
Severity: {analysis['severity']} Company/Product: {incident_data ['product_name']}
Detected Brand: {incident_data .get ('detected_brand','Unknown brand')}
Status: {incident_data ['status']}
DETAILED REASONING: === VERDICT ===
{analysis['reasoning']} {analysis ['verdict']}
KEY ENVIRONMENTAL CLAIMS MADE: Greenwashing Detected: {'YES'if analysis ['is_greenwashing']else 'NO'}
{key_claims} Confidence Level: {analysis ['confidence']}
Severity Assessment: {analysis ['severity']}
RED FLAGS IDENTIFIED: === USER COMPLAINT ===
{red_flags} {incident_data ['user_description']}
CONSUMER RECOMMENDATIONS: === IMAGE ANALYSIS ===
{analysis['recommendations']} {incident_data .get ('image_description','No image analysis available')}
=== ENVIRONMENTAL CLAIMS IDENTIFIED ===
{env_claims if env_claims else 'No specific environmental claims identified'}
=== DETAILED ANALYSIS & REASONING ===
{analysis ['reasoning']}
=== KEY MARKETING CLAIMS ===
{key_claims if key_claims else 'No key claims identified'}
=== RED FLAGS IDENTIFIED ===
{red_flags if red_flags else 'No specific red flags identified'}
=== CONSUMER RECOMMENDATIONS ===
{analysis ['recommendations']}
=== ENVIRONMENTAL IMPACT ASSESSMENT ===
This report highlights potential misleading environmental claims by {incident_data .get ('detected_brand','the company')}.
Consumers should be aware that {analysis ['severity']} severity greenwashing has been identified with {analysis ['confidence']} confidence.
This incident has been documented for future reference and to help inform sustainable purchasing decisions.
""" """
# Get embedding for the incident
embedding = get_embedding(text)
# Store in ChromaDB with metadata embedding =get_embedding (text )
metadata = {
"type": "incident_report",
"source": f"incident_{incident_id}", metadata ={
"product_name": incident_data['product_name'], "type":"incident_report",
"brand": incident_data.get('detected_brand', 'Unknown'), "source":f"incident_{incident_id }",
"severity": analysis['severity'], "product_name":incident_data ['product_name'],
"confidence": analysis['confidence'], "brand":incident_data .get ('detected_brand','Unknown'),
"is_greenwashing": True, "severity":analysis ['severity'],
"created_at": incident_data['created_at'] "confidence":analysis ['confidence'],
"is_greenwashing":True ,
"verdict":analysis ['verdict'],
"status":incident_data ['status'],
"created_at":incident_data ['created_at'],
"num_red_flags":len (analysis .get ('red_flags',[])),
"num_claims":len (analysis .get ('key_claims',[]))
} }
insert_documents( insert_documents (
texts=[text], texts =[text ],
embeddings=[embedding], embeddings =[embedding ],
metadata_list=[metadata] metadata_list =[metadata ]
) )
print (f"✓ Incident #{incident_id } saved to ChromaDB for AI chat context")
# ============= API Endpoints =============
@incidents_bp.route('/submit', methods=['POST'])
def submit_incident():
@incidents_bp .route ('/submit',methods =['POST'])
def submit_incident ():
""" """
Submit a greenwashing incident report Submit a greenwashing incident report
Expects JSON with: Expects JSON with:
- product_name: Name of the product/company - product_name: Name of the product/company
- description: User's description of the misleading claim - description: User's description of the misleading claim
- image: Base64 encoded image (optional, but recommended) - report_type: 'product' or 'company'
- image: Base64 encoded image (for product reports)
- pdf_data: Base64 encoded PDF (for company reports)
""" """
data = request.json data =request .json
if not data: if not data :
return jsonify({"error": "No data provided"}), 400 return jsonify ({"error":"No data provided"}),400
product_name = data.get('product_name', '').strip() product_name =data .get ('product_name','').strip ()
user_description = data.get('description', '').strip() user_description =data .get ('description','').strip ()
image_base64 = data.get('image') # Base64 encoded image report_type =data .get ('report_type','product')
image_base64 =data .get ('image')
if not product_name: if not product_name :
return jsonify({"error": "Product name is required"}), 400 return jsonify ({"error":"Product name is required"}),400
if not user_description: if not user_description :
return jsonify({"error": "Description is required"}), 400 return jsonify ({"error":"Description is required"}),400
try: try :
# Step 1: Analyze image with Ollama (structured output)
detected_brand = "Unknown"
image_description = "No image provided"
environmental_claims = []
if image_base64: detected_brand ="Unknown"
try: image_description ="No image provided"
# Remove data URL prefix if present environmental_claims =[]
if ',' in image_base64: compressed_image_base64 =None
image_base64 = image_base64.split(',')[1]
image_bytes = base64.b64decode(image_base64) if report_type =='product'and image_base64 :
try :
# Use structured image analysis if ','in image_base64 :
image_analysis = analyze_image_with_ollama(image_bytes) image_base64 =image_base64 .split (',')[1 ]
if image_analysis.logos_detected: image_bytes =base64 .b64decode (image_base64 )
detected_brand = image_analysis.logos_detected[0].brand
image_description = image_analysis.description
environmental_claims = image_analysis.environmental_claims
except Exception as e: print ("Compressing image with OpenCV...")
print(f"Image analysis error: {e}") compressed_image_base64 =compress_image (image_bytes ,max_width =600 ,quality =75 )
# Continue without image analysis
# Step 2: Get relevant context from vector database
search_query = f"{product_name} {detected_brand} environmental claims sustainability greenwashing"
query_embedding = get_embedding(search_query)
search_results = search_documents(query_embedding, num_results=5)
context = "" image_analysis =analyze_image_with_ollama (image_bytes )
for res in search_results:
context += f"--- Document ---\n{res['text'][:500]}\n\n"
if not context: if image_analysis .logos_detected :
context = "No prior information found about this company in our database." detected_brand =image_analysis .logos_detected [0 ].brand
# Add environmental claims from image to context image_description =image_analysis .description
if environmental_claims: environmental_claims =image_analysis .environmental_claims
context += "\n--- Claims visible in submitted image ---\n"
context += "\n".join(f"- {claim}" for claim in environmental_claims)
# Step 3: Analyze with Gemini (structured output) except Exception as e :
analysis = analyze_with_gemini( print (f"Image processing error: {e }")
product_name=product_name,
user_description=user_description,
detected_brand=detected_brand,
image_description=image_description,
context=context search_query =f"{product_name } {detected_brand } environmental claims sustainability greenwashing"
query_embedding =get_embedding (search_query )
search_results =search_documents (query_embedding ,num_results =5 )
context =""
for res in search_results :
context +=f"--- Document ---\n{res ['text'][:500 ]}\n\n"
if not context :
context ="No prior information found about this company in our database."
if environmental_claims :
context +="\n--- Claims visible in submitted image ---\n"
context +="\n".join (f"- {claim }"for claim in environmental_claims )
analysis =analyze_with_gemini (
product_name =product_name ,
user_description =user_description ,
detected_brand =detected_brand ,
image_description =image_description ,
context =context
) )
# Convert Pydantic model to dict
analysis_dict = analysis.model_dump()
# Step 4: Prepare incident data analysis_dict =analysis .model_dump ()
incident_data = {
"product_name": product_name,
"user_description": user_description, incident_data ={
"detected_brand": detected_brand, "product_name":product_name ,
"image_description": image_description, "user_description":user_description ,
"environmental_claims": environmental_claims, "detected_brand":detected_brand ,
"analysis": analysis_dict, "image_description":image_description ,
"is_greenwashing": analysis.is_greenwashing, "environmental_claims":environmental_claims ,
"created_at": datetime.utcnow().isoformat(), "analysis":analysis_dict ,
"status": "confirmed" if analysis.is_greenwashing else "dismissed" "is_greenwashing":analysis .is_greenwashing ,
"created_at":datetime .utcnow ().isoformat (),
"status":"confirmed"if analysis .is_greenwashing else "dismissed",
"report_type":report_type
} }
incident_id = None
# Step 5: If greenwashing detected, save to databases if compressed_image_base64 :
if analysis.is_greenwashing: incident_data ["image_base64"]=compressed_image_base64
# Save to MongoDB
incident_id = save_to_mongodb(incident_data)
# Save to ChromaDB for chatbot context incident_id =None
save_to_chromadb(incident_data, incident_id)
return jsonify({
"status": "success", if analysis .is_greenwashing :
"is_greenwashing": analysis.is_greenwashing,
"incident_id": incident_id, incident_id =save_to_mongodb (incident_data )
"analysis": analysis_dict,
"detected_brand": detected_brand,
"environmental_claims": environmental_claims save_to_chromadb (incident_data ,incident_id )
return jsonify ({
"status":"success",
"is_greenwashing":analysis .is_greenwashing ,
"incident_id":incident_id ,
"analysis":analysis_dict ,
"detected_brand":detected_brand ,
"environmental_claims":environmental_claims
}) })
except Exception as e: except Exception as e :
import traceback import traceback
traceback.print_exc() traceback .print_exc ()
return jsonify({ return jsonify ({
"status": "error", "status":"error",
"message": str(e) "message":str (e )
}), 500 }),500
@incidents_bp.route('/list', methods=['GET']) @incidents_bp .route ('/list',methods =['GET'])
def list_incidents(): def list_incidents ():
"""Get all confirmed greenwashing incidents""" """Get all confirmed greenwashing incidents"""
try: try :
client = get_mongo_client() client =get_mongo_client ()
db = client["ethix"] db =client ["ethix"]
collection = db["incidents"] collection =db ["incidents"]
# Get recent incidents with full analysis details
incidents = list(collection.find(
{"is_greenwashing": True},
{"_id": 1, "product_name": 1, "detected_brand": 1,
"user_description": 1, "analysis": 1, "created_at": 1}
).sort("created_at", -1).limit(50))
# Convert ObjectId to string
for inc in incidents:
inc["_id"] = str(inc["_id"])
return jsonify(incidents)
except Exception as e:
return jsonify({"error": str(e)}), 500
@incidents_bp.route('/<incident_id>', methods=['GET']) incidents =list (collection .find (
def get_incident(incident_id): {"is_greenwashing":True },
{"_id":1 ,"product_name":1 ,"detected_brand":1 ,
"user_description":1 ,"analysis":1 ,"created_at":1 ,
"image_base64":1 ,"report_type":1 }
).sort ("created_at",-1 ).limit (50 ))
for inc in incidents :
inc ["_id"]=str (inc ["_id"])
return jsonify (incidents )
except Exception as e :
return jsonify ({"error":str (e )}),500
@incidents_bp .route ('/<incident_id>',methods =['GET'])
def get_incident (incident_id ):
"""Get a specific incident by ID""" """Get a specific incident by ID"""
try: try :
from bson import ObjectId from bson import ObjectId
client = get_mongo_client() client =get_mongo_client ()
db = client["ethix"] db =client ["ethix"]
collection = db["incidents"] collection =db ["incidents"]
incident = collection.find_one({"_id": ObjectId(incident_id)}) incident =collection .find_one ({"_id":ObjectId (incident_id )})
if not incident: if not incident :
return jsonify({"error": "Incident not found"}), 404 return jsonify ({"error":"Incident not found"}),404
incident["_id"] = str(incident["_id"]) incident ["_id"]=str (incident ["_id"])
return jsonify(incident) return jsonify (incident )
except Exception as e: except Exception as e :
return jsonify({"error": str(e)}), 500 return jsonify ({"error":str (e )}),500

View File

@@ -1,7 +1,7 @@
from flask import Blueprint from flask import Blueprint
main_bp = Blueprint('main', __name__) main_bp =Blueprint ('main',__name__ )
@main_bp.route('/') @main_bp .route ('/')
def index(): def index ():
return "Hello from the organized Flask App!" return "Hello from the organized Flask App!"

View File

@@ -1,24 +1,24 @@
from flask import Blueprint, request, jsonify from flask import Blueprint ,request ,jsonify
from ..rag.store import vector_search, ingest_documents from ..rag .store import vector_search ,ingest_documents
rag_bp = Blueprint('rag', __name__) rag_bp =Blueprint ('rag',__name__ )
@rag_bp.route('/ingest', methods=['POST']) @rag_bp .route ('/ingest',methods =['POST'])
def ingest(): def ingest ():
data = request.json data =request .json
text_chunks = data.get('chunks', []) text_chunks =data .get ('chunks',[])
if not text_chunks: if not text_chunks :
return jsonify({"error": "No chunks provided"}), 400 return jsonify ({"error":"No chunks provided"}),400
count = ingest_documents(text_chunks) count =ingest_documents (text_chunks )
return jsonify({"message": f"Ingested {count} documents"}), 201 return jsonify ({"message":f"Ingested {count } documents"}),201
@rag_bp.route('/search', methods=['POST']) @rag_bp .route ('/search',methods =['POST'])
def search(): def search ():
data = request.json data =request .json
query = data.get('query') query =data .get ('query')
if not query: if not query :
return jsonify({"error": "No query provided"}), 400 return jsonify ({"error":"No query provided"}),400
results = vector_search(query) results =vector_search (query )
return jsonify({"results": results}), 200 return jsonify ({"results":results }),200

View File

@@ -1,233 +1,233 @@
from flask import Blueprint, jsonify, request from flask import Blueprint ,jsonify ,request
from src.chroma.vector_store import get_all_metadatas, search_documents from src .chroma .vector_store import get_all_metadatas ,search_documents
from src.rag.embeddings import get_embedding from src .rag .embeddings import get_embedding
reports_bp = Blueprint('reports', __name__) reports_bp =Blueprint ('reports',__name__ )
@reports_bp.route('/', methods=['GET']) @reports_bp .route ('/',methods =['GET'])
def get_reports(): def get_reports ():
try: try :
# Fetch all metadatas to ensure we get diversity.
# 60k items is manageable for metadata-only fetch.
metadatas = get_all_metadatas()
unique_reports = {}
for meta in metadatas: metadatas =get_all_metadatas ()
filename = meta.get('source') or meta.get('filename')
if not filename:
continue
# Skip incident reports - these are user-submitted greenwashing reports unique_reports ={}
if meta.get('type') == 'incident_report' or filename.startswith('incident_'):
for meta in metadatas :
filename =meta .get ('source')or meta .get ('filename')
if not filename :
continue continue
if filename not in unique_reports: if meta .get ('type')=='incident_report'or filename .startswith ('incident_'):
# Attempt to extract info from filename continue
# Common patterns:
# 2020-tesla-impact-report.pdf
# google-2023-environmental-report.pdf
# ghgp_data_2021.xlsx
company_name = "Unknown"
year = "N/A"
sector = "Other"
lower_name = filename.lower() if filename not in unique_reports :
company_name ="Unknown"
year ="N/A"
sector ="Other"
lower_name =filename .lower ()
# Extract Year
import re import re
year_match = re.search(r'20\d{2}', lower_name) year_match =re .search (r'20\d{2}',lower_name )
if year_match: if year_match :
year = year_match.group(0) year =year_match .group (0 )
# Extract Company (heuristics)
if 'tesla' in lower_name:
company_name = "Tesla"
sector = "Automotive"
elif 'google' in lower_name:
company_name = "Google"
sector = "Tech"
elif 'apple' in lower_name:
company_name = "Apple"
sector = "Tech"
elif 'microsoft' in lower_name:
company_name = "Microsoft"
sector = "Tech"
elif 'amazon' in lower_name:
company_name = "Amazon"
sector = "Tech"
elif 'boeing' in lower_name:
company_name = "Boeing"
sector = "Aerospace"
elif 'ghgp' in lower_name:
company_name = "GHGP Data"
sector = "Data"
elif 'salesforce' in lower_name:
company_name = "Salesforce"
sector = "Tech"
elif 'hp ' in lower_name or 'hp-' in lower_name:
company_name = "HP"
sector = "Tech"
else:
# Fallback: capitalize first word of filename
parts = re.split(r'[-_.]', filename)
if parts:
company_name = parts[0].capitalize()
if company_name.isdigit(): # If starts with year
company_name = parts[1].capitalize() if len(parts) > 1 else "Unknown"
unique_reports[filename] = { if 'tesla'in lower_name :
'company_name': company_name, company_name ="Tesla"
'year': year, sector ="Automotive"
'sector': sector, elif 'google'in lower_name :
'greenwashing_score': meta.get('greenwashing_score', 0), # Likely 0 company_name ="Google"
'filename': filename, sector ="Tech"
'title': f"{company_name} {year} Report" elif 'apple'in lower_name :
company_name ="Apple"
sector ="Tech"
elif 'microsoft'in lower_name :
company_name ="Microsoft"
sector ="Tech"
elif 'amazon'in lower_name :
company_name ="Amazon"
sector ="Tech"
elif 'boeing'in lower_name :
company_name ="Boeing"
sector ="Aerospace"
elif 'ghgp'in lower_name :
company_name ="GHGP Data"
sector ="Data"
elif 'salesforce'in lower_name :
company_name ="Salesforce"
sector ="Tech"
elif 'hp 'in lower_name or 'hp-'in lower_name :
company_name ="HP"
sector ="Tech"
else :
parts =re .split (r'[-_.]',filename )
if parts :
company_name =parts [0 ].capitalize ()
if company_name .isdigit ():
company_name =parts [1 ].capitalize ()if len (parts )>1 else "Unknown"
unique_reports [filename ]={
'company_name':company_name ,
'year':year ,
'sector':sector ,
'greenwashing_score':meta .get ('greenwashing_score',0 ),
'filename':filename ,
'title':f"{company_name } {year } Report"
} }
reports_list = list(unique_reports.values()) reports_list =list (unique_reports .values ())
return jsonify(reports_list) return jsonify (reports_list )
except Exception as e: except Exception as e :
print(f"Error fetching reports: {e}") print (f"Error fetching reports: {e }")
import traceback import traceback
traceback.print_exc() traceback .print_exc ()
return jsonify({'error': str(e)}), 500 return jsonify ({'error':str (e )}),500
@reports_bp.route('/search', methods=['POST']) @reports_bp .route ('/search',methods =['POST'])
def search_reports(): def search_reports ():
data = request.json data =request .json
query = data.get('query', '') query =data .get ('query','')
if not query: if not query :
return jsonify([]) return jsonify ([])
try: try :
import re import re
# Get embedding for the query
query_embedding = get_embedding(query)
# Search in Chroma - get more results to filter query_embedding =get_embedding (query )
results = search_documents(query_embedding, num_results=50)
query_lower = query.lower()
# Helper function to extract company info results =search_documents (query_embedding ,num_results =50 )
def extract_company_info(filename):
company_name = "Unknown"
year = "N/A"
sector = "Other"
lower_name = filename.lower() query_lower =query .lower ()
# Extract Year
year_match = re.search(r'20\d{2}', lower_name)
if year_match:
year = year_match.group(0)
# Extract Company (heuristics) def extract_company_info (filename ):
if 'tesla' in lower_name: company_name ="Unknown"
company_name = "Tesla" year ="N/A"
sector = "Automotive" sector ="Other"
elif 'google' in lower_name:
company_name = "Google"
sector = "Tech"
elif 'apple' in lower_name:
company_name = "Apple"
sector = "Tech"
elif 'microsoft' in lower_name:
company_name = "Microsoft"
sector = "Tech"
elif 'amazon' in lower_name:
company_name = "Amazon"
sector = "Tech"
elif 'boeing' in lower_name:
company_name = "Boeing"
sector = "Aerospace"
elif 'ghgp' in lower_name:
company_name = "GHGP Data"
sector = "Data"
elif 'salesforce' in lower_name:
company_name = "Salesforce"
sector = "Tech"
elif 'hp ' in lower_name or 'hp-' in lower_name or lower_name.startswith('hp'):
company_name = "HP"
sector = "Tech"
else:
parts = re.split(r'[-_.]', filename)
if parts:
company_name = parts[0].capitalize()
if company_name.isdigit():
company_name = parts[1].capitalize() if len(parts) > 1 else "Unknown"
return company_name, year, sector lower_name =filename .lower ()
output = []
seen_filenames = set()
for item in results: year_match =re .search (r'20\d{2}',lower_name )
meta = item.get('metadata', {}) if year_match :
text = item.get('text', '') year =year_match .group (0 )
filename = meta.get('source') or meta.get('filename', 'Unknown')
# Skip duplicates if 'tesla'in lower_name :
if filename in seen_filenames: company_name ="Tesla"
sector ="Automotive"
elif 'google'in lower_name :
company_name ="Google"
sector ="Tech"
elif 'apple'in lower_name :
company_name ="Apple"
sector ="Tech"
elif 'microsoft'in lower_name :
company_name ="Microsoft"
sector ="Tech"
elif 'amazon'in lower_name :
company_name ="Amazon"
sector ="Tech"
elif 'boeing'in lower_name :
company_name ="Boeing"
sector ="Aerospace"
elif 'ghgp'in lower_name :
company_name ="GHGP Data"
sector ="Data"
elif 'salesforce'in lower_name :
company_name ="Salesforce"
sector ="Tech"
elif 'hp 'in lower_name or 'hp-'in lower_name or lower_name .startswith ('hp'):
company_name ="HP"
sector ="Tech"
else :
parts =re .split (r'[-_.]',filename )
if parts :
company_name =parts [0 ].capitalize ()
if company_name .isdigit ():
company_name =parts [1 ].capitalize ()if len (parts )>1 else "Unknown"
return company_name ,year ,sector
output =[]
seen_filenames =set ()
for item in results :
meta =item .get ('metadata',{})
text =item .get ('text','')
filename =meta .get ('source')or meta .get ('filename','Unknown')
if filename in seen_filenames :
continue continue
seen_filenames.add(filename) seen_filenames .add (filename )
company_name, year, sector = extract_company_info(filename) company_name ,year ,sector =extract_company_info (filename )
# Calculate match score - boost if query matches company/filename
match_boost = 0
if query_lower in filename.lower():
match_boost = 1000 # Strong boost for filename match
if query_lower in company_name.lower():
match_boost = 1000 # Strong boost for company match
# Semantic score (inverted distance, higher = better) match_boost =0
semantic_score = 1 / (item.get('score', 1) + 0.001) if item.get('score') else 0 if query_lower in filename .lower ():
match_boost =1000
if query_lower in company_name .lower ():
match_boost =1000
combined_score = match_boost + semantic_score
# Format snippet semantic_score =1 /(item .get ('score',1 )+0.001 )if item .get ('score')else 0
snippet = text[:300] + "..." if len(text) > 300 else text
output.append({ combined_score =match_boost +semantic_score
'company_name': company_name,
'year': year,
'filename': filename, snippet =text [:300 ]+"..."if len (text )>300 else text
'sector': sector,
'greenwashing_score': meta.get('greenwashing_score', 0), output .append ({
'snippet': snippet, 'company_name':company_name ,
'relevance_score': item.get('score'), 'year':year ,
'_combined_score': combined_score 'filename':filename ,
'sector':sector ,
'greenwashing_score':meta .get ('greenwashing_score',0 ),
'snippet':snippet ,
'relevance_score':item .get ('score'),
'_combined_score':combined_score
}) })
# Sort by combined score (descending - higher is better)
output.sort(key=lambda x: x.get('_combined_score', 0), reverse=True)
# Remove internal score field and limit results output .sort (key =lambda x :x .get ('_combined_score',0 ),reverse =True )
for item in output:
item.pop('_combined_score', None)
return jsonify(output[:20])
except Exception as e:
print(f"Error searching reports: {e}")
return jsonify({'error': str(e)}), 500
@reports_bp.route('/view/<path:filename>', methods=['GET']) for item in output :
def view_report_file(filename): item .pop ('_combined_score',None )
return jsonify (output [:20 ])
except Exception as e :
print (f"Error searching reports: {e }")
return jsonify ({'error':str (e )}),500
@reports_bp .route ('/view/<path:filename>',methods =['GET'])
def view_report_file (filename ):
import os import os
from flask import send_from_directory from flask import send_from_directory
# Dataset path relative to this file
# src/routes/reports.py -> src/routes -> src -> backend -> dataset
# So ../../../dataset
current_dir = os.path.dirname(os.path.abspath(__file__))
dataset_dir = os.path.join(current_dir, '..', '..', 'dataset')
return send_from_directory(dataset_dir, filename)
current_dir =os .path .dirname (os .path .abspath (__file__ ))
dataset_dir =os .path .join (current_dir ,'..','..','dataset')
return send_from_directory (dataset_dir ,filename )

View File

@@ -30,6 +30,7 @@
"@sveltejs/kit": "^2.50.1", "@sveltejs/kit": "^2.50.1",
"@sveltejs/vite-plugin-svelte": "^5.1.1", "@sveltejs/vite-plugin-svelte": "^5.1.1",
"@tauri-apps/cli": "^2.9.6", "@tauri-apps/cli": "^2.9.6",
"@types/node": "^25.0.10",
"svelte": "^5.48.2", "svelte": "^5.48.2",
"svelte-check": "^4.3.5", "svelte-check": "^4.3.5",
"typescript": "~5.6.3", "typescript": "~5.6.3",

View File

@@ -9,14 +9,14 @@ const __dirname = path.dirname(__filename);
const app = express(); const app = express();
const PORT = process.env.PORT || 3000; const PORT = process.env.PORT || 3000;
// Enable gzip compression
app.use(compression()); app.use(compression());
// Serve static files from the build directory (one level up from server folder)
const buildPath = path.join(__dirname, '../build'); const buildPath = path.join(__dirname, '../build');
app.use(express.static(buildPath)); app.use(express.static(buildPath));
// Handle SPA routing: serve index.html for any unknown routes
app.get(/.*/, (req, res) => { app.get(/.*/, (req, res) => {
res.sendFile(path.join(buildPath, 'index.html')); res.sendFile(path.join(buildPath, 'index.html'));
}); });

View File

@@ -1,4 +1,4 @@
// Learn more about Tauri commands at https://tauri.app/develop/calling-rust/
#[tauri::command] #[tauri::command]
fn greet(name: &str) -> String { fn greet(name: &str) -> String {
format!("Hello, {}! You've been greeted from Rust!", name) format!("Hello, {}! You've been greeted from Rust!", name)

View File

@@ -1,4 +1,4 @@
// Prevents additional console window on Windows in release, DO NOT REMOVE!!
#![cfg_attr(not(debug_assertions), windows_subsystem = "windows")] #![cfg_attr(not(debug_assertions), windows_subsystem = "windows")]
fn main() { fn main() {

View File

@@ -201,7 +201,7 @@
const config = activeConfig(); const config = activeConfig();
if (!config.staticScene || config.scenes) { if (!config.staticScene || config.scenes) {
// Always use window scroll now
scrollContainer = null; scrollContainer = null;
updateMeasurements(); updateMeasurements();
} }

View File

@@ -1,5 +1,6 @@
<script lang="ts"> <script lang="ts">
import Icon from "@iconify/svelte"; import Icon from "@iconify/svelte";
import Pagination from "./Pagination.svelte";
let { let {
viewMode = $bindable(), viewMode = $bindable(),
@@ -7,24 +8,32 @@
selectedCategory = $bindable(), selectedCategory = $bindable(),
categories, categories,
onSearchInput, onSearchInput,
currentPage,
totalPages,
goToPage,
}: { }: {
viewMode: "company" | "user"; viewMode: "company" | "user";
searchQuery: string; searchQuery: string;
selectedCategory: string; selectedCategory: string;
categories: string[]; categories: string[];
onSearchInput: () => void; onSearchInput: () => void;
currentPage: number;
totalPages: number;
goToPage: (page: number) => void;
} = $props(); } = $props();
</script> </script>
<div <div
class="bg-black/80 backdrop-blur-[30px] border border-white/20 rounded-[32px] p-10 mb-10 shadow-[0_32px_64px_rgba(0,0,0,0.4)]" class="bg-black/60 backdrop-blur-2xl border border-white/15 rounded-4xl p-12 lg:p-14 mb-12 shadow-2xl shadow-black/50"
> >
<!-- Header content -->
<div class="mb-8 px-3 text-center"> <div class="mb-10 px-4 text-center">
<h1 class="text-white text-[42px] font-black m-0 tracking-[-2px]"> <h1
class="text-white text-[42px] lg:text-[48px] font-black m-0 tracking-[-2px]"
>
Sustainability Database Sustainability Database
</h1> </h1>
<p class="text-white/70 text-base mt-2 font-medium"> <p class="text-white/80 text-base lg:text-lg mt-3 font-medium">
{#if viewMode === "company"} {#if viewMode === "company"}
Search within verified company reports and impact assessments Search within verified company reports and impact assessments
{:else} {:else}
@@ -33,35 +42,35 @@
</p> </p>
</div> </div>
<!-- View Mode Toggle Switch -->
<div class="flex items-center justify-center gap-4 my-6"> <div class="flex items-center justify-center gap-5 my-8">
<span <span
class="flex items-center gap-1.5 text-sm font-semibold transition-all duration-300 {viewMode === class="flex items-center gap-2 text-sm lg:text-base font-semibold transition-all duration-300 {viewMode ===
'company' 'company'
? 'text-emerald-400' ? 'text-emerald-400'
: 'text-white/40'}" : 'text-white/50'}"
> >
<Icon icon="ri:building-2-line" width="16" /> <Icon icon="ri:building-2-line" width="16" />
Company Company
</span> </span>
<button <button
class="relative w-14 h-7 bg-white/15 border-none rounded-full cursor-pointer transition-all duration-300 p-0 hover:bg-white/20" class="relative w-16 h-8 bg-white/10 border-none rounded-full cursor-pointer transition-all duration-300 p-0 hover:bg-white/15 shadow-inner"
onclick={() => onclick={() =>
(viewMode = viewMode === "company" ? "user" : "company")} (viewMode = viewMode === "company" ? "user" : "company")}
aria-label="Toggle between company and user reports" aria-label="Toggle between company and user reports"
> >
<span <span
class="absolute top-1/2 -translate-y-1/2 w-[22px] h-[22px] bg-emerald-600 rounded-full transition-all duration-300 shadow-[0_2px_8px_rgba(34,197,94,0.4)] {viewMode === class="absolute top-1/2 -translate-y-1/2 w-6 h-6 bg-emerald-500 rounded-full transition-all duration-300 shadow-[0_2px_12px_rgba(34,197,94,0.5)] {viewMode ===
'user' 'user'
? 'left-[calc(100%-25px)]' ? 'left-[calc(100%-28px)]'
: 'left-[3px]'}" : 'left-1'}"
></span> ></span>
</button> </button>
<span <span
class="flex items-center gap-1.5 text-sm font-semibold transition-all duration-300 {viewMode === class="flex items-center gap-2 text-sm lg:text-base font-semibold transition-all duration-300 {viewMode ===
'user' 'user'
? 'text-emerald-400' ? 'text-emerald-400'
: 'text-white/40'}" : 'text-white/50'}"
> >
<Icon icon="ri:user-voice-line" width="16" /> <Icon icon="ri:user-voice-line" width="16" />
User Reports User Reports
@@ -69,33 +78,35 @@
</div> </div>
{#if viewMode === "company"} {#if viewMode === "company"}
<div class="relative max-w-[600px] mx-auto mb-8"> <div class="relative max-w-[40.625rem] mx-auto mb-10">
<div <div
class="absolute left-5 top-1/2 -translate-y-1/2 text-white/60 flex items-center pointer-events-none" class="absolute left-6 top-1/2 -translate-y-1/2 text-white/60 flex items-center pointer-events-none"
> >
<Icon icon="ri:search-line" width="20" /> <Icon icon="ri:search-line" width="22" />
</div> </div>
<input <input
type="text" type="text"
class="w-full bg-white/5 border border-white/10 rounded-full py-4 pl-[52px] pr-5 text-white text-base font-medium outline-none transition-all duration-200 focus:bg-white/10 focus:border-emerald-400 placeholder:text-white/40" class="w-full bg-black/30 border border-white/15 rounded-full py-4 lg:py-5 pl-14 pr-6 text-white text-base lg:text-lg font-medium outline-none transition-all duration-200 focus:bg-black/40 focus:border-emerald-400 placeholder:text-white/50 shadow-inner"
placeholder="Search for companies, topics (e.g., 'emissions')..." placeholder="Search for companies, topics (e.g., 'emissions')..."
bind:value={searchQuery} bind:value={searchQuery}
oninput={onSearchInput} oninput={onSearchInput}
/> />
</div> </div>
<div class="flex justify-center flex-wrap gap-3 mb-5"> <div class="flex justify-center flex-wrap gap-3.5 mb-6">
{#each categories as category} {#each categories as category}
<button <button
class="px-6 py-2.5 rounded-full text-sm font-semibold cursor-pointer transition-all duration-200 border {selectedCategory === class="px-7 py-3 rounded-full text-sm lg:text-base font-semibold cursor-pointer transition-all duration-200 border {selectedCategory ===
category category
? 'bg-emerald-500 border-emerald-500 text-emerald-950 shadow-[0_4px_15px_rgba(34,197,94,0.3)]' ? 'bg-emerald-500 border-emerald-500 text-emerald-950 shadow-[0_4px_20px_rgba(34,197,94,0.4)]'
: 'bg-white/5 border-white/10 text-white/70 hover:bg-white/10 hover:text-white hover:-translate-y-0.5'}" : 'bg-black/30 backdrop-blur-sm border-white/15 text-white/70 hover:bg-black/40 hover:text-white hover:-translate-y-0.5 hover:shadow-lg'}"
onclick={() => (selectedCategory = category)} onclick={() => (selectedCategory = category)}
> >
{category} {category}
</button> </button>
{/each} {/each}
</div> </div>
<Pagination {currentPage} {totalPages} {goToPage} />
{/if} {/if}
</div> </div>

View File

@@ -25,12 +25,13 @@
aria-label="Close modal" aria-label="Close modal"
> >
<div <div
class="bg-slate-900 border border-slate-700 rounded-[24px] w-full max-w-[1000px] h-[90vh] flex flex-col shadow-[0_24px_64px_rgba(0,0,0,0.5)] overflow-hidden outline-none" class="bg-slate-900 border border-slate-700 rounded-3xl w-full max-w-5xl h-[90vh] flex flex-col shadow-[0_24px_64px_rgba(0,0,0,0.5)] overflow-hidden outline-none"
onclick={(e) => e.stopPropagation()} onclick={(e) => e.stopPropagation()}
onkeydown={(e) => e.stopPropagation()} onkeydown={(e) => e.stopPropagation()}
transition:scale={{ duration: 300, start: 0.95 }} transition:scale={{ duration: 300, start: 0.95 }}
role="document" role="dialog"
tabindex="0" aria-modal="true"
tabindex="-1"
> >
<div <div
class="px-6 py-5 bg-slate-800 border-b border-slate-700 flex justify-between items-center shrink-0" class="px-6 py-5 bg-slate-800 border-b border-slate-700 flex justify-between items-center shrink-0"

View File

@@ -7,6 +7,7 @@
detected_brand: string; detected_brand: string;
user_description?: string; user_description?: string;
created_at: string; created_at: string;
image_base64?: string;
analysis: { analysis: {
verdict: string; verdict: string;
confidence: string; confidence: string;
@@ -21,66 +22,82 @@
</script> </script>
<button <button
class="flex items-center gap-6 p-6 bg-black/80 backdrop-blur-[30px] border border-red-500/30 rounded-[24px] transition-all duration-300 shadow-[0_16px_48px_rgba(0,0,0,0.5)] w-full text-left cursor-pointer hover:bg-black/95 hover:border-red-500/60 hover:-translate-y-1 hover:scale-[1.01] hover:shadow-[0_24px_64px_rgba(0,0,0,0.6)] outline-none" class="flex items-center gap-6 lg:gap-8 p-7 lg:p-8 bg-black/60 backdrop-blur-2xl border border-red-500/30 rounded-3xl transition-all duration-300 shadow-2xl shadow-black/50 w-full text-left cursor-pointer hover:bg-black/70 hover:border-red-500/60 hover:-translate-y-1 hover:scale-[1.01] hover:shadow-[0_24px_72px_rgba(0,0,0,0.7)] outline-none group"
{onclick} {onclick}
> >
<div {#if incident.image_base64}
class="w-[52px] h-[52px] rounded-xe flex items-center justify-center shrink-0 rounded-[14px] <div
{incident.analysis?.severity === 'high' class="w-16 h-16 lg:w-20 lg:h-20 shrink-0 rounded-2xl overflow-hidden border border-white/10 group-hover:border-white/20 transition-colors"
? 'bg-red-500/20 text-red-500' >
: 'bg-amber-500/20 text-amber-500'}" <img
> src="data:image/jpeg;base64,{incident.image_base64}"
<Icon icon="ri:alert-fill" width="28" /> alt={incident.product_name}
</div> class="w-full h-full object-cover"
/>
</div>
{:else}
<div
class="w-14 h-14 lg:w-16 lg:h-16 rounded-xe flex items-center justify-center shrink-0 rounded-2xl transition-transform
{incident.analysis?.severity === 'high'
? 'bg-red-500/20 text-red-500'
: 'bg-amber-500/20 text-amber-500'}"
>
<Icon icon="ri:alert-fill" width="32" class="lg:w-9" />
</div>
{/if}
<div class="flex-1 min-w-0"> <div class="flex-1 min-w-0">
<div class="flex items-center gap-3 mb-1.5"> <div class="flex items-center gap-3 mb-2">
<h3 class="text-white text-[18px] font-bold m-0 italic"> <h3
class="text-white text-[19px] lg:text-[20px] font-bold m-0 italic"
>
{incident.product_name} {incident.product_name}
</h3> </h3>
{#if incident.detected_brand && incident.detected_brand !== "Unknown"} {#if incident.detected_brand && incident.detected_brand !== "Unknown"}
<span <span
class="bg-white/10 text-white/70 px-2.5 py-0.5 rounded-full text-[12px] font-semibold" class="bg-white/10 backdrop-blur-sm text-white/80 px-3 py-1 rounded-full text-[12px] lg:text-[13px] font-semibold"
>{incident.detected_brand}</span >{incident.detected_brand}</span
> >
{/if} {/if}
</div> </div>
<p class="text-white/70 text-sm m-0 mb-2.5 leading-tight italic"> <p
class="text-white/70 text-sm lg:text-base m-0 mb-3 leading-tight italic"
>
{incident.analysis?.verdict || "Greenwashing detected"} {incident.analysis?.verdict || "Greenwashing detected"}
</p> </p>
<div class="flex flex-wrap gap-2"> <div class="flex flex-wrap gap-2.5">
<span <span
class="flex items-center gap-1 px-2.5 py-1 rounded-full text-[11px] font-semibold capitalize class="flex items-center gap-1.5 px-3 py-1.5 rounded-full text-[11px] lg:text-[12px] font-semibold capitalize backdrop-blur-sm
{incident.analysis?.severity === 'high' {incident.analysis?.severity === 'high'
? 'bg-red-500/20 text-red-400' ? 'bg-red-500/20 text-red-400'
: incident.analysis?.severity === 'medium' : incident.analysis?.severity === 'medium'
? 'bg-amber-500/20 text-amber-400' ? 'bg-amber-500/20 text-amber-400'
: 'bg-emerald-500/20 text-emerald-400'}" : 'bg-emerald-500/20 text-emerald-400'}"
> >
<Icon icon="ri:error-warning-fill" width="14" /> <Icon icon="ri:error-warning-fill" width="15" />
{incident.analysis?.severity || "unknown"} severity {incident.analysis?.severity || "unknown"} severity
</span> </span>
<span <span
class="flex items-center gap-1 px-2.5 py-1 rounded-full text-[11px] font-semibold bg-indigo-500/20 text-indigo-300" class="flex items-center gap-1.5 px-3 py-1.5 rounded-full text-[11px] lg:text-[12px] font-semibold bg-indigo-500/20 text-indigo-300 backdrop-blur-sm"
> >
<Icon icon="ri:shield-check-fill" width="14" /> <Icon icon="ri:shield-check-fill" width="15" />
{incident.analysis?.confidence || "unknown"} confidence {incident.analysis?.confidence || "unknown"} confidence
</span> </span>
<span <span
class="flex items-center gap-1 px-2.5 py-1 rounded-full text-[11px] font-semibold bg-white/10 text-white/60" class="flex items-center gap-1.5 px-3 py-1.5 rounded-full text-[11px] lg:text-[12px] font-semibold bg-white/10 text-white/70 backdrop-blur-sm"
> >
<Icon icon="ri:calendar-line" width="14" /> <Icon icon="ri:calendar-line" width="15" />
{new Date(incident.created_at).toLocaleDateString()} {new Date(incident.created_at).toLocaleDateString()}
</span> </span>
</div> </div>
</div> </div>
<div <div
class="flex flex-col items-center gap-1 text-[11px] font-bold uppercase text-red-500" class="flex flex-col items-center gap-1.5 text-[11px] lg:text-[12px] font-bold uppercase text-red-400"
> >
<Icon icon="ri:spam-2-fill" width="24" class="text-red-500" /> <Icon icon="ri:spam-2-fill" width="26" class="text-red-400 lg:w-7" />
<span>Confirmed</span> <span>Confirmed</span>
</div> </div>
</button> </button>

View File

@@ -8,6 +8,7 @@
detected_brand: string; detected_brand: string;
user_description?: string; user_description?: string;
created_at: string; created_at: string;
image_base64?: string;
analysis: { analysis: {
verdict: string; verdict: string;
confidence: string; confidence: string;
@@ -25,7 +26,7 @@
</script> </script>
<div <div
class="fixed inset-0 bg-black/60 backdrop-blur-md z-1000 flex justify-center items-center p-5 outline-none" class="fixed inset-0 bg-black/50 backdrop-blur-lg z-1000 flex justify-center items-center p-6 outline-none"
onclick={onclose} onclick={onclose}
onkeydown={(e) => e.key === "Escape" && onclose()} onkeydown={(e) => e.key === "Escape" && onclose()}
transition:fade={{ duration: 200 }} transition:fade={{ duration: 200 }}
@@ -33,8 +34,10 @@
tabindex="0" tabindex="0"
aria-label="Close modal" aria-label="Close modal"
> >
<div <div
class="max-w-[700px] max-h-[85vh] w-full overflow-y-auto bg-black/85 backdrop-blur-[40px] border border-white/20 rounded-[32px] flex flex-col shadow-[0_32px_128px_rgba(0,0,0,0.7)] scrollbar-hide outline-none" class="max-w-3xl max-h-[88vh] w-full overflow-y-auto bg-white/8 backdrop-blur-2xl border border-white/20 rounded-4xl flex flex-col shadow-2xl shadow-black/60 scrollbar-hide outline-none"
onclick={(e) => e.stopPropagation()} onclick={(e) => e.stopPropagation()}
onkeydown={(e) => e.stopPropagation()} onkeydown={(e) => e.stopPropagation()}
transition:scale={{ duration: 300, start: 0.95 }} transition:scale={{ duration: 300, start: 0.95 }}
@@ -42,105 +45,138 @@
tabindex="0" tabindex="0"
> >
<div <div
class="px-10 py-[30px] bg-white/3 border-b border-red-500/20 flex justify-between items-center shrink-0" class="px-10 lg:px-12 py-8 bg-white/5 border-b border-red-500/25 flex justify-between items-center shrink-0"
> >
<div class="flex flex-col"> <div class="flex flex-col gap-2">
<div class="flex items-center gap-3"> <div class="flex items-center gap-3">
<Icon <Icon
icon="ri:alert-fill" icon="ri:alert-fill"
width="28" width="30"
class="text-red-500" class="text-red-400"
/> />
<h2 class="m-0 text-white text-[28px] font-extrabold"> <h2
class="m-0 text-white text-[28px] lg:text-[32px] font-extrabold"
>
{incident.product_name} {incident.product_name}
</h2> </h2>
</div> </div>
{#if incident.detected_brand && incident.detected_brand !== "Unknown"} {#if incident.detected_brand && incident.detected_brand !== "Unknown"}
<span class="mt-1 text-white/50 text-sm font-medium" <span class="text-white/60 text-sm lg:text-base font-medium"
>Brand: {incident.detected_brand}</span >Brand: {incident.detected_brand}</span
> >
{/if} {/if}
</div> </div>
<button <button
class="bg-white/5 border border-white/10 text-white w-10 h-10 rounded-xl flex items-center justify-center cursor-pointer transition-all duration-200 hover:bg-white/10 hover:rotate-90" class="bg-white/5 border border-white/10 text-white w-11 h-11 rounded-xl flex items-center justify-center cursor-pointer transition-all duration-200 hover:bg-white/15 hover:rotate-90 hover:border-white/20"
onclick={onclose} onclick={onclose}
> >
<Icon icon="ri:close-line" width="24" /> <Icon icon="ri:close-line" width="28" />
</button> </button>
</div> </div>
<div class="p-10 flex flex-col gap-[30px]"> <div class="p-10 lg:p-12 flex flex-col gap-8">
<!-- Status Badges -->
<div class="flex flex-wrap gap-3"> {#if incident.image_base64}
<div
class="w-full bg-black/20 rounded-3xl overflow-hidden border border-white/10 relative group"
>
<img
src="data:image/jpeg;base64,{incident.image_base64}"
alt="Evidence"
class="w-full max-h-[400px] object-contain bg-black/40"
/>
<div
class="absolute top-4 left-4 bg-black/60 backdrop-blur-md px-3 py-1.5 rounded-full flex items-center gap-2 border border-white/10"
>
<Icon
icon="ri:camera-fill"
width="16"
class="text-white/80"
/>
<span
class="text-xs font-bold text-white uppercase tracking-wider"
>Evidence of Greenwashing</span
>
</div>
</div>
{/if}
<div class="flex flex-wrap gap-3.5">
<span <span
class="flex items-center gap-2 px-5 py-3 rounded-[14px] text-[11px] font-extrabold tracking-wider class="flex items-center gap-2 px-5 py-3 rounded-[14px] text-[11px] lg:text-[12px] font-extrabold tracking-wider backdrop-blur-sm
{incident.analysis?.severity === 'high' {incident.analysis?.severity === 'high'
? 'bg-red-500/20 text-red-400 border border-red-500/30' ? 'bg-red-500/25 text-red-300 border border-red-500/40'
: incident.analysis?.severity === 'medium' : incident.analysis?.severity === 'medium'
? 'bg-amber-500/20 text-amber-400 border border-amber-500/30' ? 'bg-amber-500/25 text-amber-300 border border-amber-500/40'
: 'bg-emerald-500/20 text-emerald-400 border border-emerald-500/30'} uppercase" : 'bg-emerald-500/25 text-emerald-300 border border-emerald-500/40'} uppercase"
> >
<Icon icon="ri:error-warning-fill" width="18" /> <Icon icon="ri:error-warning-fill" width="18" />
{incident.analysis?.severity || "UNKNOWN"} SEVERITY {incident.analysis?.severity || "UNKNOWN"} SEVERITY
</span> </span>
<span <span
class="flex items-center gap-2 px-5 py-3 rounded-[14px] text-[11px] font-extrabold tracking-wider bg-indigo-500/20 text-indigo-300 border border-indigo-500/30 uppercase" class="flex items-center gap-2 px-5 py-3 rounded-[14px] text-[11px] lg:text-[12px] font-extrabold tracking-wider bg-indigo-500/25 text-indigo-300 border border-indigo-500/40 uppercase backdrop-blur-sm"
> >
<Icon icon="ri:shield-check-fill" width="18" /> <Icon icon="ri:shield-check-fill" width="18" />
{incident.analysis?.confidence || "UNKNOWN"} CONFIDENCE {incident.analysis?.confidence || "UNKNOWN"} CONFIDENCE
</span> </span>
<span <span
class="flex items-center gap-2 px-5 py-3 rounded-[14px] text-[11px] font-extrabold tracking-wider bg-white/10 text-white/70 border border-white/10 uppercase" class="flex items-center gap-2 px-5 py-3 rounded-[14px] text-[11px] lg:text-[12px] font-extrabold tracking-wider bg-white/10 text-white/80 border border-white/20 uppercase backdrop-blur-sm"
> >
<Icon icon="ri:calendar-check-fill" width="18" /> <Icon icon="ri:calendar-check-fill" width="18" />
{new Date(incident.created_at).toLocaleDateString()} {new Date(incident.created_at).toLocaleDateString()}
</span> </span>
</div> </div>
<!-- Verdict -->
<div class="bg-white/4 border border-white/6 rounded-[20px] p-6"> <div
class="bg-white/5 backdrop-blur-sm border border-white/10 rounded-[20px] p-7"
>
<h3 <h3
class="flex items-center gap-[10px] text-white text-base font-bold mb-4" class="flex items-center gap-2.5 text-white text-base lg:text-lg font-bold mb-4"
> >
<Icon icon="ri:scales-3-fill" width="20" /> <Icon icon="ri:scales-3-fill" width="22" />
Verdict Verdict
</h3> </h3>
<p <p
class="text-amber-400 text-[18px] font-semibold m-0 leading-normal" class="text-amber-300 text-[18px] lg:text-[19px] font-semibold m-0 leading-normal"
> >
{incident.analysis?.verdict || "Greenwashing detected"} {incident.analysis?.verdict || "Greenwashing detected"}
</p> </p>
</div> </div>
<!-- Detailed Analysis -->
<div class="bg-white/4 border border-white/6 rounded-[20px] p-6"> <div
class="bg-white/5 backdrop-blur-sm border border-white/10 rounded-[20px] p-7"
>
<h3 <h3
class="flex items-center gap-[10px] text-white text-base font-bold mb-4" class="flex items-center gap-2.5 text-white text-base lg:text-lg font-bold mb-4"
> >
<Icon icon="ri:file-text-fill" width="20" /> <Icon icon="ri:file-text-fill" width="22" />
Detailed Analysis Detailed Analysis
</h3> </h3>
<p class="text-white/85 text-[15px] leading-[1.7] m-0"> <p
class="text-white/85 text-[15px] lg:text-base leading-[1.7] m-0"
>
{incident.analysis?.reasoning || {incident.analysis?.reasoning ||
"No detailed analysis available."} "No detailed analysis available."}
</p> </p>
</div> </div>
<!-- Red Flags -->
{#if incident.analysis?.red_flags && incident.analysis.red_flags.length > 0} {#if incident.analysis?.red_flags && incident.analysis.red_flags.length > 0}
<div <div
class="bg-white/4 border border-white/[0.06] rounded-[20px] p-6" class="bg-white/5 backdrop-blur-sm border border-white/10 rounded-[20px] p-7"
> >
<h3 <h3
class="flex items-center gap-[10px] text-red-400 text-base font-bold mb-4" class="flex items-center gap-2.5 text-red-300 text-base lg:text-lg font-bold mb-4"
> >
<Icon icon="ri:flag-fill" width="20" /> <Icon icon="ri:flag-fill" width="22" />
Red Flags Identified Red Flags Identified
</h3> </h3>
<ul class="list-none p-0 m-0 flex flex-col gap-3"> <ul class="list-none p-0 m-0 flex flex-col gap-3.5">
{#each incident.analysis.red_flags as flag} {#each incident.analysis.red_flags as flag}
<li <li
class="flex items-start gap-3 text-red-300/80 text-sm leading-[1.6]" class="flex items-start gap-3 text-red-200/80 text-sm lg:text-base leading-[1.6]"
> >
<Icon <Icon
icon="ri:error-warning-line" icon="ri:error-warning-line"
@@ -154,21 +190,21 @@
</div> </div>
{/if} {/if}
<!-- Key Claims -->
{#if incident.analysis?.key_claims && incident.analysis.key_claims.length > 0} {#if incident.analysis?.key_claims && incident.analysis.key_claims.length > 0}
<div <div
class="bg-white/4 border border-white/[0.06] rounded-[20px] p-6" class="bg-white/5 backdrop-blur-sm border border-white/10 rounded-[20px] p-7"
> >
<h3 <h3
class="flex items-center gap-[10px] text-white text-base font-bold mb-4" class="flex items-center gap-2.5 text-white text-base lg:text-lg font-bold mb-4"
> >
<Icon icon="ri:chat-quote-fill" width="20" /> <Icon icon="ri:chat-quote-fill" width="22" />
Environmental Claims Made Environmental Claims Made
</h3> </h3>
<ul class="list-none p-0 m-0 flex flex-col gap-3"> <ul class="list-none p-0 m-0 flex flex-col gap-3.5">
{#each incident.analysis.key_claims as claim} {#each incident.analysis.key_claims as claim}
<li <li
class="flex items-start gap-3 text-white/70 text-sm italic leading-[1.6]" class="flex items-start gap-3 text-white/75 text-sm lg:text-base italic leading-[1.6]"
> >
<Icon <Icon
icon="ri:double-quotes-l" icon="ri:double-quotes-l"
@@ -182,36 +218,38 @@
</div> </div>
{/if} {/if}
<!-- Recommendations -->
{#if incident.analysis?.recommendations} {#if incident.analysis?.recommendations}
<div <div
class="bg-emerald-500/8 border border-emerald-500/20 rounded-[20px] p-6" class="bg-emerald-500/12 backdrop-blur-sm border border-emerald-500/30 rounded-[20px] p-7"
> >
<h3 <h3
class="flex items-center gap-[10px] text-emerald-400 text-base font-bold mb-4" class="flex items-center gap-2.5 text-emerald-300 text-base lg:text-lg font-bold mb-4"
> >
<Icon icon="ri:lightbulb-fill" width="20" /> <Icon icon="ri:lightbulb-fill" width="22" />
Consumer Recommendations Consumer Recommendations
</h3> </h3>
<p class="text-emerald-300 text-[15px] leading-[1.6] m-0"> <p
class="text-emerald-200 text-[15px] lg:text-base leading-[1.6] m-0"
>
{incident.analysis.recommendations} {incident.analysis.recommendations}
</p> </p>
</div> </div>
{/if} {/if}
<!-- User's Original Report -->
{#if incident.user_description} {#if incident.user_description}
<div <div
class="bg-indigo-500/8 border border-indigo-500/20 rounded-[20px] p-6" class="bg-indigo-500/12 backdrop-blur-sm border border-indigo-500/30 rounded-[20px] p-7"
> >
<h3 <h3
class="flex items-center gap-[10px] text-indigo-400 text-base font-bold mb-4" class="flex items-center gap-2.5 text-indigo-300 text-base lg:text-lg font-bold mb-4"
> >
<Icon icon="ri:user-voice-fill" width="20" /> <Icon icon="ri:user-voice-fill" width="22" />
Original User Report Original User Report
</h3> </h3>
<p <p
class="text-indigo-200 text-[15px] italic leading-[1.6] m-0" class="text-indigo-200 text-[15px] lg:text-base italic leading-[1.6] m-0"
> >
"{incident.user_description}" "{incident.user_description}"
</p> </p>
@@ -222,7 +260,7 @@
</div> </div>
<style> <style>
/* Custom utility to hide scrollbar if tailwind plugin not present */
.scrollbar-hide { .scrollbar-hide {
scrollbar-width: none; scrollbar-width: none;
-ms-overflow-style: none; -ms-overflow-style: none;

View File

@@ -43,52 +43,52 @@
</script> </script>
<button <button
class="group flex items-center gap-6 bg-black/80 backdrop-blur-[30px] border border-white/20 rounded-3xl p-8 w-full text-left cursor-pointer transition-all duration-300 hover:bg-black/90 hover:border-emerald-500/60 hover:-translate-y-1 hover:scale-[1.01] hover:shadow-[0_24px_48px_rgba(0,0,0,0.5)] relative overflow-hidden outline-none" class="group flex items-center gap-6 lg:gap-8 bg-black/60 backdrop-blur-2xl border border-white/15 rounded-3xl p-7 lg:p-8 w-full text-left cursor-pointer transition-all duration-300 hover:bg-black/70 hover:border-emerald-500/50 hover:-translate-y-1 hover:scale-[1.01] hover:shadow-2xl hover:shadow-black/50 relative overflow-hidden outline-none"
onclick={() => openReport(report)} onclick={() => openReport(report)}
> >
<div <div
class="w-16 h-16 bg-emerald-500/10 rounded-[18px] flex items-center justify-center shrink-0 transition-all duration-300 group-hover:bg-emerald-500 group-hover:-rotate-6" class="w-16 h-16 lg:w-[4.375rem] lg:h-[4.375rem] bg-emerald-500/15 backdrop-blur-sm rounded-[1.125rem] flex items-center justify-center shrink-0 transition-all duration-300 group-hover:bg-emerald-500 group-hover:-rotate-6 group-hover:scale-110"
> >
<Icon icon={fileDetails.icon} width="32" class="text-white" /> <Icon icon={fileDetails.icon} width="32" class="text-white" />
</div> </div>
<div class="flex-1 min-w-0"> <div class="flex-1 min-w-0">
<div class="flex items-center gap-3 mb-2"> <div class="flex items-center gap-3 mb-2.5">
<h3 class="text-white text-xl font-extrabold m-0"> <h3 class="text-white text-xl lg:text-[22px] font-extrabold m-0">
{report.company_name} {report.company_name}
</h3> </h3>
<span <span
class="text-emerald-400 bg-emerald-500/10 px-2 py-0.5 rounded-md text-[12px] font-bold" class="text-emerald-300 bg-emerald-500/15 backdrop-blur-sm px-3 py-1 rounded-lg text-[12px] lg:text-[13px] font-bold"
>{report.year}</span >{report.year}</span
> >
</div> </div>
{#if report.snippet} {#if report.snippet}
<p <p
class="text-white/60 text-sm leading-relaxed mb-4 line-clamp-2 m-0" class="text-white/70 text-sm lg:text-base leading-relaxed mb-4 line-clamp-2 m-0"
> >
{@html report.snippet.replace( {@html report.snippet.replace(
new RegExp(searchQuery || "", "gi"), new RegExp(searchQuery || "", "gi"),
(match) => (match) =>
`<span class="text-emerald-400 bg-emerald-500/20 px-0.5 rounded font-semibold">${match}</span>`, `<span class="text-emerald-300 bg-emerald-500/25 px-1 rounded font-semibold">${match}</span>`,
)} )}
</p> </p>
{:else} {:else}
<p class="text-white/40 text-sm mb-4 m-0"> <p class="text-white/50 text-sm lg:text-base mb-4 m-0">
{report.sector} Sector • Impact Report {report.sector} Sector • Impact Report
</p> </p>
{/if} {/if}
<div class="flex flex-wrap gap-2"> <div class="flex flex-wrap gap-2.5">
<span <span
class="flex items-center gap-1.5 px-3 py-1.5 rounded-xl text-[11px] font-bold tracking-tight bg-white/5 text-white/60 max-w-[200px] truncate" class="flex items-center gap-1.5 px-3 py-1.5 rounded-xl text-[11px] lg:text-[12px] font-bold tracking-tight bg-white/10 backdrop-blur-sm text-white/70 max-w-xs truncate"
title={report.filename} title={report.filename}
> >
<Icon icon={fileDetails.icon} width="14" /> <Icon icon={fileDetails.icon} width="15" />
{fileDetails.type} {fileDetails.type}
</span> </span>
<span <span
class="flex items-center gap-1.5 px-3 py-1.5 rounded-xl text-[11px] font-bold tracking-tight bg-emerald-500/10 text-emerald-400" class="flex items-center gap-1.5 px-3 py-1.5 rounded-xl text-[11px] lg:text-[12px] font-bold tracking-tight bg-emerald-500/15 backdrop-blur-sm text-emerald-300"
> >
<Icon <Icon
icon="ri:checkbox-circle-fill" icon="ri:checkbox-circle-fill"
@@ -101,18 +101,18 @@
</div> </div>
{#if report.greenwashing_score} {#if report.greenwashing_score}
<div class="text-center ml-5"> <div class="text-center ml-5 lg:ml-6">
<div <div
class="w-[52px] h-[52px] rounded-xe flex items-center justify-center mb-1 rounded-[14px] {getScoreColor( class="w-14 h-14 lg:w-[3.875rem] lg:h-[3.875rem] rounded-xe flex items-center justify-center mb-2 rounded-2xl shadow-lg transition-transform group-hover:scale-110 {getScoreColor(
report.greenwashing_score, report.greenwashing_score,
)}" )}"
> >
<span class="text-emerald-950 text-[18px] font-black" <span class="text-emerald-950 text-[19px] lg:text-[21px] font-black"
>{Math.round(Number(report.greenwashing_score))}</span >{Math.round(Number(report.greenwashing_score))}</span
> >
</div> </div>
<span <span
class="text-white/40 text-[10px] font-extrabold uppercase tracking-widest" class="text-white/50 text-[10px] lg:text-[11px] font-extrabold uppercase tracking-widest"
>Trust Score</span >Trust Score</span
> >
</div> </div>

View File

@@ -0,0 +1,45 @@
<script lang="ts">
import Icon from "@iconify/svelte";
let {
inputText = $bindable(),
isLoading,
onSend,
} = $props<{
inputText: string;
isLoading: boolean;
onSend: () => void;
}>();
function handleKeyDown(event: KeyboardEvent) {
if (event.key === "Enter" && !event.shiftKey) {
event.preventDefault();
onSend();
}
}
</script>
<div
class="p-4 pb-8 md:pb-4 bg-[#051f18] md:bg-white/5 border-t border-[#1f473b] md:border-white/10 flex items-center gap-3 w-full"
>
<input
type="text"
class="flex-1 bg-[#0d2e25] md:bg-white/10 text-white p-3.5 px-5 border border-[#1f473b] md:border-white/10 rounded-full text-[15px] outline-none transition-all duration-200 placeholder-gray-500 focus:border-emerald-400 focus:bg-[#11382e] md:focus:bg-white/15"
placeholder="Ask about sustainability..."
bind:value={inputText}
onkeydown={handleKeyDown}
disabled={isLoading}
/>
<button
class="bg-emerald-500 w-12 h-12 rounded-full flex items-center justify-center text-white shadow-lg transition-all duration-200 hover:scale-105 hover:shadow-emerald-500/50 disabled:opacity-50 disabled:hover:scale-100 disabled:cursor-not-allowed"
onclick={onSend}
aria-label="Send message"
disabled={isLoading}
>
{#if isLoading}
<Icon icon="ri:loader-4-line" width="24" class="animate-spin" />
{:else}
<Icon icon="ri:send-plane-fill" width="24" />
{/if}
</button>
</div>

View File

@@ -0,0 +1,43 @@
<script lang="ts">
import { marked } from "marked";
let { text, sender } = $props<{
text: string;
sender: "user" | "ai";
}>();
</script>
<div
class="p-3 px-5 rounded-[20px] max-w-[85%] text-[15px] leading-relaxed transition-all duration-200
{sender === 'user'
? 'self-end bg-gradient-to-br from-emerald-500 to-emerald-600 text-white rounded-br-md shadow-lg shadow-emerald-500/20'
: 'self-start bg-teal-900/40 border border-white/10 text-white rounded-bl-md shadow-sm backdrop-blur-sm'}"
>
<div
class="message-content prose prose-invert prose-p:my-1 prose-headings:my-2 prose-strong:text-white prose-ul:my-1 max-w-none"
>
{@html marked.parse(text)}
</div>
</div>
<style>
:global(.message-content p) {
margin: 0 0 0.5rem 0;
}
:global(.message-content p:last-child) {
margin: 0;
}
:global(.message-content strong) {
font-weight: 700;
color: inherit;
}
:global(.message-content ul),
:global(.message-content ol) {
margin: 0.25rem 0 0.5rem 1.25rem;
padding: 0;
}
:global(.message-content li) {
margin-bottom: 0.25rem;
}
</style>

View File

@@ -0,0 +1,14 @@
<div
class="self-start bg-teal-900/40 border border-white/10 rounded-bl-md rounded-[20px] p-3 px-4 w-fit shadow-sm backdrop-blur-sm"
>
<div class="flex items-center gap-1">
<span
class="w-1.5 h-1.5 bg-emerald-400 rounded-full animate-bounce [animation-delay:-0.32s]"
></span>
<span
class="w-1.5 h-1.5 bg-emerald-400 rounded-full animate-bounce [animation-delay:-0.16s]"
></span>
<span class="w-1.5 h-1.5 bg-emerald-400 rounded-full animate-bounce"
></span>
</div>
</div>

View File

@@ -0,0 +1,73 @@
<script lang="ts">
import { onMount } from "svelte";
let canvasElement = $state<HTMLCanvasElement>();
onMount(() => {
if (!canvasElement) return;
const ctx = canvasElement.getContext("2d");
if (!ctx) return;
let frame = 0;
let animationId: number;
function animate() {
frame++;
if (!ctx || !canvasElement) return;
ctx.clearRect(0, 0, 40, 40);
const yOffset = Math.sin(frame * 0.05) * 3;
ctx.fillStyle = "#e0e0e0";
ctx.beginPath();
ctx.arc(20, 20, 18, 0, Math.PI * 2);
ctx.fill();
ctx.fillStyle = "#22c55e";
ctx.beginPath();
ctx.arc(20, 20 + yOffset, 14, 0, Math.PI * 2);
ctx.fill();
ctx.fillStyle = "#16a34a";
ctx.beginPath();
ctx.moveTo(20, 8 + yOffset);
ctx.quadraticCurveTo(28, 4 + yOffset, 24, 16 + yOffset);
ctx.closePath();
ctx.fill();
ctx.fillStyle = "white";
ctx.fillRect(14, 16 + yOffset, 3, 5);
ctx.fillRect(23, 16 + yOffset, 3, 5);
ctx.strokeStyle = "white";
ctx.lineWidth = 2;
ctx.beginPath();
ctx.arc(20, 26 + yOffset, 4, 0.2, Math.PI - 0.2);
ctx.stroke();
animationId = requestAnimationFrame(animate);
}
animate();
return () => {
if (animationId) cancelAnimationFrame(animationId);
};
});
</script>
<div class="relative w-10 h-10">
<canvas
bind:this={canvasElement}
width="40"
height="40"
class="w-10 h-10 drop-shadow-[0_4px_12px_rgba(16,185,129,0.4)]"
></canvas>
<div
class="absolute bottom-0.5 right-0.5 w-2.5 h-2.5 bg-emerald-400 border-2 border-[#051f18] rounded-full shadow-[0_0_8px_rgba(52,211,153,0.6)]"
></div>
</div>

View File

@@ -8,13 +8,12 @@
import IncidentCard from "$lib/components/catalogue/IncidentCard.svelte"; import IncidentCard from "$lib/components/catalogue/IncidentCard.svelte";
import CompanyModal from "$lib/components/catalogue/CompanyModal.svelte"; import CompanyModal from "$lib/components/catalogue/CompanyModal.svelte";
import IncidentModal from "$lib/components/catalogue/IncidentModal.svelte"; import IncidentModal from "$lib/components/catalogue/IncidentModal.svelte";
import Pagination from "$lib/components/catalogue/Pagination.svelte";
// View mode toggle
type ViewMode = "company" | "user"; type ViewMode = "company" | "user";
let viewMode = $state<ViewMode>("company"); let viewMode = $state<ViewMode>("company");
// Data Types
interface Report { interface Report {
company_name: string; company_name: string;
year: string | number; year: string | number;
@@ -31,6 +30,8 @@
detected_brand: string; detected_brand: string;
user_description?: string; user_description?: string;
created_at: string; created_at: string;
image_base64?: string;
report_type?: "product" | "company";
analysis: { analysis: {
verdict: string; verdict: string;
confidence: string; confidence: string;
@@ -48,7 +49,7 @@
let searchQuery = $state(""); let searchQuery = $state("");
let isLoading = $state(false); let isLoading = $state(false);
// Predefined categories
const categories = [ const categories = [
"All", "All",
"Tech", "Tech",
@@ -61,7 +62,7 @@
]; ];
let selectedCategory = $state("All"); let selectedCategory = $state("All");
// Fetching logic
async function fetchReports() { async function fetchReports() {
isLoading = true; isLoading = true;
try { try {
@@ -93,7 +94,7 @@
fetchIncidents(); fetchIncidents();
}); });
// Search
async function handleSearch() { async function handleSearch() {
if (!searchQuery.trim()) { if (!searchQuery.trim()) {
fetchReports(); fetchReports();
@@ -124,7 +125,7 @@
debounceTimer = setTimeout(handleSearch, 600); debounceTimer = setTimeout(handleSearch, 600);
} }
// Pagination & Filtering
let currentPage = $state(1); let currentPage = $state(1);
const itemsPerPage = 10; const itemsPerPage = 10;
@@ -154,11 +155,10 @@
function goToPage(page: number) { function goToPage(page: number) {
if (page >= 1 && page <= totalPages) { if (page >= 1 && page <= totalPages) {
currentPage = page; currentPage = page;
window.scrollTo({ top: 0, behavior: "smooth" });
} }
} }
// Modals
let selectedReport = $state<Report | null>(null); let selectedReport = $state<Report | null>(null);
let selectedIncident = $state<Incident | null>(null); let selectedIncident = $state<Incident | null>(null);
</script> </script>
@@ -172,7 +172,7 @@
</svelte:head> </svelte:head>
<div class="relative w-full min-h-screen overflow-x-hidden"> <div class="relative w-full min-h-screen overflow-x-hidden">
<!-- Modals -->
{#if selectedReport} {#if selectedReport}
<CompanyModal <CompanyModal
report={selectedReport} report={selectedReport}
@@ -193,7 +193,7 @@
</div> </div>
<div <div
class="relative z-10 px-6 pt-[100px] pb-[120px] max-w-[1000px] mx-auto" class="relative z-10 px-6 sm:px-8 lg:px-12 pt-16 pb-35 max-w-275 mx-auto"
> >
<CatalogueHeader <CatalogueHeader
bind:viewMode bind:viewMode
@@ -201,30 +201,35 @@
bind:selectedCategory bind:selectedCategory
{categories} {categories}
{onSearchInput} {onSearchInput}
{currentPage}
{totalPages}
{goToPage}
/> />
{#if isLoading} {#if isLoading}
<div class="flex flex-col items-center justify-center py-20 gap-4"> <div
class="flex flex-col items-center justify-center py-24 gap-5 bg-black/60 backdrop-blur-2xl rounded-3xl border border-white/15 shadow-2xl shadow-black/50 mt-8"
>
<Icon <Icon
icon="eos-icons:loading" icon="eos-icons:loading"
width="40" width="48"
class="text-emerald-400" class="text-emerald-400"
/> />
<p class="text-white/60 font-medium"> <p class="text-white/70 font-medium text-lg">
Syncing with database... Syncing with database...
</p> </p>
</div> </div>
{:else if viewMode === "company"} {:else if viewMode === "company"}
{#if filteredReports.length === 0} {#if filteredReports.length === 0}
<div <div
class="bg-black/40 backdrop-blur-md rounded-3xl p-12 text-center border border-white/10" class="bg-black/60 backdrop-blur-2xl rounded-3xl p-16 text-center border border-white/15 shadow-2xl shadow-black/50 mt-8"
> >
<p class="text-white/60 text-lg"> <p class="text-white/70 text-lg font-medium">
No reports found matching your criteria. No reports found matching your criteria.
</p> </p>
</div> </div>
{:else} {:else}
<div class="flex flex-col gap-5"> <div class="flex flex-col gap-6 mt-8">
{#each paginatedReports as report} {#each paginatedReports as report}
<ReportCard <ReportCard
{report} {report}
@@ -233,24 +238,25 @@
/> />
{/each} {/each}
</div> </div>
<Pagination bind:currentPage {totalPages} {goToPage} />
{/if} {/if}
{:else if incidents.length === 0} {:else if incidents.length === 0}
<div <div
class="bg-black/40 backdrop-blur-md rounded-3xl p-20 text-center border border-white/10 flex flex-col items-center gap-4" class="bg-black/60 backdrop-blur-2xl rounded-3xl p-20 text-center border border-white/15 shadow-2xl shadow-black/50 flex flex-col items-center gap-5 mt-8"
> >
<Icon <Icon
icon="ri:file-warning-line" icon="ri:file-warning-line"
width="48" width="56"
class="text-white/30" class="text-white/30"
/> />
<p class="text-white/60 text-lg">No user reports yet.</p> <p class="text-white/70 text-lg font-medium">
<p class="text-white/40 text-sm"> No user reports yet.
</p>
<p class="text-white/50 text-sm">
Be the first to report greenwashing! Be the first to report greenwashing!
</p> </p>
</div> </div>
{:else} {:else}
<div class="flex flex-col gap-5"> <div class="flex flex-col gap-6 mt-8">
{#each incidents as incident} {#each incidents as incident}
<IncidentCard <IncidentCard
{incident} {incident}
@@ -264,7 +270,7 @@
<style> <style>
:global(body) { :global(body) {
background-color: #051010; background-color: #0c0c0c;
color: white; color: white;
} }
</style> </style>

View File

@@ -1,10 +1,18 @@
<script lang="ts"> <script lang="ts">
import { onMount } from "svelte";
import ParallaxLandscape from "$lib/components/ParallaxLandscape.svelte"; import ParallaxLandscape from "$lib/components/ParallaxLandscape.svelte";
import Icon from "@iconify/svelte"; import Icon from "@iconify/svelte";
import { marked } from "marked"; import ChatMessage from "$lib/components/chat/ChatMessage.svelte";
import ChatInput from "$lib/components/chat/ChatInput.svelte";
import Mascot from "$lib/components/chat/Mascot.svelte";
import LoadingBubble from "$lib/components/chat/LoadingBubble.svelte";
let messages = $state([ type Message = {
id: number;
text: string;
sender: "user" | "ai";
};
let messages = $state<Message[]>([
{ {
id: 1, id: 1,
text: "Hello! I'm Ethix AI. Ask me anything about recycling, sustainability, or green products.", text: "Hello! I'm Ethix AI. Ask me anything about recycling, sustainability, or green products.",
@@ -12,9 +20,8 @@
}, },
]); ]);
let inputText = $state(""); let inputText = $state("");
let canvasElement = $state<HTMLCanvasElement>();
let isLoading = $state(false); let isLoading = $state(false);
let chatWindowFn: HTMLDivElement | undefined = $state(); let chatWindowFn = $state<HTMLDivElement>();
function scrollToBottom() { function scrollToBottom() {
if (chatWindowFn) { if (chatWindowFn) {
@@ -25,7 +32,7 @@
} }
$effect(() => { $effect(() => {
// Dependencies to trigger scroll
messages; messages;
isLoading; isLoading;
scrollToBottom(); scrollToBottom();
@@ -38,7 +45,7 @@
const userMsg = { const userMsg = {
id: Date.now(), id: Date.now(),
text: userText, text: userText,
sender: "user", sender: "user" as const,
}; };
messages = [...messages, userMsg]; messages = [...messages, userMsg];
inputText = ""; inputText = "";
@@ -49,12 +56,8 @@
"http://localhost:5000/api/gemini/ask", "http://localhost:5000/api/gemini/ask",
{ {
method: "POST", method: "POST",
headers: { headers: { "Content-Type": "application/json" },
"Content-Type": "application/json", body: JSON.stringify({ prompt: userText }),
},
body: JSON.stringify({
prompt: userText,
}),
}, },
); );
@@ -64,7 +67,7 @@
const aiMsg = { const aiMsg = {
id: Date.now() + 1, id: Date.now() + 1,
text: data.reply, text: data.reply,
sender: "ai", sender: "ai" as const,
}; };
messages = [...messages, aiMsg]; messages = [...messages, aiMsg];
} else { } else {
@@ -74,7 +77,7 @@
const errorMsg = { const errorMsg = {
id: Date.now() + 1, id: Date.now() + 1,
text: "Sorry, I'm having trouble connecting to my brain right now. Please try again later.", text: "Sorry, I'm having trouble connecting to my brain right now. Please try again later.",
sender: "ai", sender: "ai" as const,
}; };
messages = [...messages, errorMsg]; messages = [...messages, errorMsg];
console.error("Chat Error:", error); console.error("Chat Error:", error);
@@ -82,63 +85,6 @@
isLoading = false; isLoading = false;
} }
} }
function handleKeyDown(event: KeyboardEvent) {
if (event.key === "Enter" && !event.shiftKey) {
event.preventDefault();
sendMessage();
}
}
onMount(() => {
if (!canvasElement) return;
const ctx = canvasElement.getContext("2d");
if (!ctx) return;
let frame = 0;
function animate() {
frame++;
if (!ctx || !canvasElement) return;
ctx.clearRect(0, 0, 40, 40);
const yOffset = Math.sin(frame * 0.05) * 3; // Reduced amplitude
// Head
ctx.fillStyle = "#e0e0e0";
ctx.beginPath();
ctx.arc(20, 20, 18, 0, Math.PI * 2); // Center at 20,20, Radius 18
ctx.fill();
// Face/Visor
ctx.fillStyle = "#22c55e";
ctx.beginPath();
ctx.arc(20, 20 + yOffset, 14, 0, Math.PI * 2); // Center 20,20
ctx.fill();
// Reflection/Detail
ctx.fillStyle = "#16a34a";
ctx.beginPath();
ctx.moveTo(20, 8 + yOffset);
ctx.quadraticCurveTo(28, 4 + yOffset, 24, 16 + yOffset);
ctx.closePath();
ctx.fill();
// Eyes
ctx.fillStyle = "white";
ctx.fillRect(14, 16 + yOffset, 3, 5);
ctx.fillRect(23, 16 + yOffset, 3, 5);
// Smile
ctx.strokeStyle = "white";
ctx.lineWidth = 2;
ctx.beginPath();
ctx.arc(20, 26 + yOffset, 4, 0.2, Math.PI - 0.2);
ctx.stroke();
requestAnimationFrame(animate);
}
animate();
});
</script> </script>
<svelte:head> <svelte:head>
@@ -149,426 +95,68 @@
/> />
</svelte:head> </svelte:head>
<div class="page-wrapper"> <div class="fixed inset-0 w-full h-full overflow-hidden bg-[#0c0c0c]">
<div class="desktop-bg"> <div class="hidden md:block absolute inset-0 pointer-events-none">
<ParallaxLandscape /> <ParallaxLandscape />
</div> </div>
<div class="chat-container"> <div
<div class="chat-card"> class="relative z-10 max-w-6xl mx-auto h-full flex flex-col pt-24 pb-6 px-0 md:px-6 md:pt-20 md:pb-10"
<div class="header"> >
<div class="mascot-container"> <div
<canvas class="flex flex-col h-full bg-[#0d2e25] md:bg-black/40 md:backdrop-blur-xl border-x md:border border-[#1f473b] md:border-white/10 md:rounded-[32px] overflow-hidden shadow-2xl"
bind:this={canvasElement} >
width="40"
height="40" <div
class="mascot-canvas" class="p-3 px-5 border-b border-[#1f473b] md:border-white/10 bg-[#051f18] md:bg-transparent flex items-center gap-3 shrink-0"
></canvas> >
<div class="mascot-status-dot"></div> <Mascot />
</div> <div class="flex flex-col items-start gap-0.5">
<div class="header-text-center"> <h1
<h1 class="page-title">Ethix Assistant</h1> class="text-white text-base font-bold leading-tight m-0"
<div class="powered-by"> >
Ethix Assistant
</h1>
<div
class="flex items-center gap-1 text-[10px] font-semibold text-emerald-400 tracking-wide bg-emerald-500/10 px-2 py-0.5 rounded-xl border border-emerald-500/20"
>
<Icon icon="ri:shining-fill" width="10" /> <Icon icon="ri:shining-fill" width="10" />
<span>Powered by Gemini</span> <span>Powered by Gemini</span>
</div> </div>
</div> </div>
</div> </div>
<div class="chat-window">
<div class="messages-container" bind:this={chatWindowFn}> <div
class="flex-1 flex flex-col overflow-hidden bg-[#0d2e25] md:bg-transparent"
>
<div
class="flex-1 overflow-y-auto p-6 pb-5 flex flex-col gap-4 scroll-smooth scrollbar-none"
bind:this={chatWindowFn}
>
{#each messages as msg (msg.id)} {#each messages as msg (msg.id)}
<div <ChatMessage text={msg.text} sender={msg.sender} />
class="message"
class:user-message={msg.sender === "user"}
class:ai-message={msg.sender === "ai"}
>
<div class="message-content">
{@html marked.parse(msg.text)}
</div>
</div>
{/each} {/each}
{#if isLoading} {#if isLoading}
<div class="message ai-message loading-bubble"> <LoadingBubble />
<div class="typing-indicator">
<span></span>
<span></span>
<span></span>
</div>
</div>
{/if} {/if}
</div> </div>
<div class="input-container"> <ChatInput bind:inputText {isLoading} onSend={sendMessage} />
<input
type="text"
class="message-input"
placeholder="Ask about sustainability..."
bind:value={inputText}
onkeydown={handleKeyDown}
/>
<button
class="send-button"
onclick={sendMessage}
aria-label="Send message"
>
<Icon icon="ri:send-plane-fill" width="24" />
</button>
</div>
</div> </div>
</div> </div>
</div> </div>
</div> </div>
<style> <style>
.page-wrapper {
width: 100%;
height: 100vh;
position: relative;
overflow: hidden;
}
.desktop-bg { .scrollbar-none::-webkit-scrollbar {
display: none; display: none;
} }
.chat-container { .scrollbar-none {
position: relative; -ms-overflow-style: none;
z-index: 10; scrollbar-width: none;
padding: 100px 24px 40px;
max-width: 800px;
margin: 0 auto;
height: 100vh;
box-sizing: border-box;
display: flex;
flex-direction: column;
}
.chat-card {
background: #0d2e25;
border: 1px solid #1f473b;
border-radius: 32px;
box-shadow: 0 16px 48px rgba(0, 0, 0, 0.3);
overflow: hidden;
display: flex;
flex-direction: column;
flex: 1;
min-height: 0;
}
.header {
padding: 12px 20px;
border-bottom: 1px solid #1f473b;
background: #051f18;
display: flex;
flex-direction: row; /* Horizontal Layout for Compactness */
align-items: center;
justify-content: flex-start;
gap: 12px;
}
.header-text-center {
display: flex;
flex-direction: column;
align-items: flex-start; /* Left Align Text */
gap: 2px;
}
.mascot-container {
position: relative;
width: 40px;
height: 40px;
}
.mascot-canvas {
width: 40px;
height: 40px;
filter: drop-shadow(0 4px 12px rgba(16, 185, 129, 0.4));
}
.mascot-status-dot {
position: absolute;
bottom: 2px;
right: 2px;
width: 10px;
height: 10px;
background: #34d399;
border: 2px solid #051f18;
border-radius: 50%;
box-shadow: 0 0 8px rgba(52, 211, 153, 0.6);
}
.page-title {
color: white;
font-size: 16px;
font-weight: 700;
margin: 0;
line-height: 1.2;
}
.powered-by {
display: inline-flex;
align-items: center;
gap: 4px;
font-size: 10px;
font-weight: 600;
color: #34d399;
letter-spacing: 0.5px;
background: rgba(34, 197, 94, 0.1);
padding: 2px 8px;
border-radius: 12px;
border: 1px solid rgba(34, 197, 94, 0.2);
}
.chat-window {
flex: 1;
display: flex;
flex-direction: column;
overflow: hidden;
background: #0d2e25;
}
.messages-container {
flex: 1;
overflow-y: auto;
padding: 24px;
padding-bottom: 20px;
display: flex;
flex-direction: column;
gap: 16px;
scroll-behavior: smooth;
}
.message {
padding: 12px 18px;
border-radius: 20px;
max-width: 85%;
font-size: 15px;
line-height: 1.5;
}
/* Message Content Markdown Styles */
.message-content :global(p) {
margin: 0 0 8px 0;
}
.message-content :global(p:last-child) {
margin: 0;
}
.message-content :global(strong) {
font-weight: 700;
color: inherit;
}
.message-content :global(ul),
.message-content :global(ol) {
margin: 4px 0 8px 20px;
padding: 0;
}
.message-content :global(li) {
margin-bottom: 4px;
}
.message-content :global(h1),
.message-content :global(h2),
.message-content :global(h3) {
font-weight: 700;
font-size: 1.1em;
margin: 8px 0 4px 0;
}
.user-message {
align-self: flex-end;
background: linear-gradient(135deg, #10b981, #059669);
border-bottom-right-radius: 6px;
color: white;
font-weight: 500;
box-shadow: 0 4px 12px rgba(16, 185, 129, 0.2);
}
.ai-message {
align-self: flex-start;
background: #134e4a; /* Teal-900 for better visibility */
border-bottom-left-radius: 6px;
color: white; /* White text for contrast */
border: 1px solid rgba(255, 255, 255, 0.1);
box-shadow: 0 2px 8px rgba(0, 0, 0, 0.2);
}
/* Loading Bubble */
.loading-bubble {
padding: 12px 16px;
width: fit-content;
}
.typing-indicator {
display: flex;
align-items: center;
gap: 4px;
}
.typing-indicator span {
width: 6px;
height: 6px;
background-color: #34d399;
border-radius: 50%;
display: inline-block;
animation: bounce 1.4s infinite ease-in-out both;
}
.typing-indicator span:nth-child(1) {
animation-delay: -0.32s;
}
.typing-indicator span:nth-child(2) {
animation-delay: -0.16s;
}
@keyframes bounce {
0%,
80%,
100% {
transform: scale(0);
}
40% {
transform: scale(1);
}
}
.input-container {
padding: 16px 20px;
padding-bottom: 30px;
background: #051f18;
border-top: 1px solid #1f473b;
display: flex;
align-items: center;
gap: 12px;
}
.message-input {
flex: 1;
background: #0d2e25;
color: white;
padding: 14px 20px;
border: 1px solid #1f473b;
border-radius: 50px;
font-size: 15px;
outline: none;
transition: all 0.2s;
}
.message-input:focus {
border-color: #34d399;
background: #11382e;
}
.message-input::placeholder {
color: #6b7280;
}
.send-button {
background: #10b981;
width: 48px;
height: 48px;
border-radius: 50%;
border: none;
display: flex;
justify-content: center;
align-items: center;
cursor: pointer;
transition: all 0.2s ease;
flex-shrink: 0;
color: white;
}
.send-button:hover {
transform: scale(1.08);
box-shadow: 0 0 20px rgba(16, 185, 129, 0.5);
}
@media (min-width: 768px) {
.desktop-bg {
display: block;
position: fixed;
top: 0;
left: 0;
width: 100%;
height: 100%;
pointer-events: none;
}
.chat-container {
padding-top: 100px;
height: auto;
max-width: 1200px;
}
.chat-card {
background: rgba(0, 0, 0, 0.4);
backdrop-filter: blur(20px);
-webkit-backdrop-filter: blur(20px);
border: 1px solid rgba(255, 255, 255, 0.1);
border-radius: 32px;
box-shadow: 0 16px 48px rgba(0, 0, 0, 0.3);
height: 85vh;
max-height: 900px;
}
.header {
background: transparent;
border-bottom: 1px solid rgba(255, 255, 255, 0.08);
}
.chat-window {
background: transparent;
}
.ai-message {
background: rgba(255, 255, 255, 0.1);
border: 1px solid rgba(255, 255, 255, 0.1);
}
.input-container {
background: rgba(255, 255, 255, 0.05);
border-top: 1px solid rgba(255, 255, 255, 0.08);
padding-bottom: 16px;
}
.message-input {
background: rgba(255, 255, 255, 0.08);
border: 1px solid rgba(255, 255, 255, 0.1);
}
.message-input:focus {
background: rgba(255, 255, 255, 0.12);
border-color: rgba(34, 197, 94, 0.5);
}
}
@media (max-width: 767px) {
.chat-container {
padding: 0;
max-width: 100%;
height: 100vh;
display: flex;
flex-direction: column;
}
.chat-card {
height: 100%;
max-height: none;
border-radius: 0;
border: none;
}
.header {
padding: 12px 16px;
padding-top: 50px; /* Safe area for some mobile devices */
}
.messages-container {
padding-bottom: 20px;
}
.input-container {
padding-bottom: 120px;
}
} }
</style> </style>

View File

@@ -30,7 +30,11 @@
{#if item} {#if item}
<div class="safe-area"> <div class="safe-area">
<div class="header"> <div class="header">
<button class="back-button" on:click={() => window.history.back()}> <button
class="back-button"
onclick={() => window.history.back()}
aria-label="Go back"
>
<svg <svg
xmlns="http://www.w3.org/2000/svg" xmlns="http://www.w3.org/2000/svg"
viewBox="0 0 320 512" viewBox="0 0 320 512"
@@ -65,7 +69,9 @@
<svg <svg
xmlns="http://www.w3.org/2000/svg" xmlns="http://www.w3.org/2000/svg"
viewBox="0 0 512 512" viewBox="0 0 512 512"
fill={item.impact === "High" ? "#ef4444" : "#22c55e"} fill={item.impact === "High"
? "#ef4444"
: "#22c55e"}
class="alert-icon" class="alert-icon"
> >
<path <path
@@ -76,17 +82,21 @@
<h3>Analysis Result</h3> <h3>Analysis Result</h3>
<p> <p>
{#if item.impact === "High"} {#if item.impact === "High"}
This item takes 450+ years to decompose. Consider switching This item takes 450+ years to decompose.
to sustainable alternatives immediately. Consider switching to sustainable
alternatives immediately.
{:else} {:else}
This item is eco-friendly or easily recyclable. Good choice! This item is eco-friendly or easily
recyclable. Good choice!
{/if} {/if}
</p> </p>
</div> </div>
</div> </div>
<div class="alternatives-section"> <div class="alternatives-section">
<h3 class="alternatives-title">Recommended Alternatives</h3> <h3 class="alternatives-title">
Recommended Alternatives
</h3>
<div class="alternatives-scroll"> <div class="alternatives-scroll">
<div class="alternative-card glass"> <div class="alternative-card glass">
<div class="alt-header"> <div class="alt-header">
@@ -142,7 +152,10 @@
</div> </div>
{#if item.impact !== "Low"} {#if item.impact !== "Low"}
<button class="report-button" on:click={navigateToReport}> <button
class="report-button"
onclick={navigateToReport}
>
<svg <svg
xmlns="http://www.w3.org/2000/svg" xmlns="http://www.w3.org/2000/svg"
viewBox="0 0 448 512" viewBox="0 0 448 512"

View File

@@ -4,6 +4,9 @@
let productName = $state(""); let productName = $state("");
let description = $state(""); let description = $state("");
let image = $state<string | null>(null); let image = $state<string | null>(null);
let reportType = $state<"product" | "company">("product");
let pdfData = $state<string | null>(null);
let pdfName = $state<string | null>(null);
let submitted = $state(false); let submitted = $state(false);
let isLoading = $state(false); let isLoading = $state(false);
let analysisResult = $state<any>(null); let analysisResult = $state<any>(null);
@@ -40,6 +43,25 @@
input.click(); input.click();
} }
async function pickPdf() {
const input = document.createElement("input");
input.type = "file";
input.accept = "application/pdf";
input.onchange = (e: Event) => {
const target = e.target as HTMLInputElement;
const file = target.files?.[0];
if (file) {
pdfName = file.name;
const reader = new FileReader();
reader.onload = (event) => {
pdfData = event.target?.result as string;
};
reader.readAsDataURL(file);
}
};
input.click();
}
const progressSteps = [ const progressSteps = [
{ id: 1, label: "Scanning image...", icon: "ri:camera-lens-line" }, { id: 1, label: "Scanning image...", icon: "ri:camera-lens-line" },
{ {
@@ -65,7 +87,7 @@
error = null; error = null;
currentStep = 1; currentStep = 1;
// Simulate progress steps
const stepInterval = setInterval(() => { const stepInterval = setInterval(() => {
if (currentStep < progressSteps.length) { if (currentStep < progressSteps.length) {
currentStep++; currentStep++;
@@ -83,18 +105,20 @@
body: JSON.stringify({ body: JSON.stringify({
product_name: productName, product_name: productName,
description: description, description: description,
image: image, // Base64 encoded report_type: reportType,
image: reportType === "product" ? image : null,
pdf_data: reportType === "company" ? pdfData : null,
}), }),
}, },
); );
clearInterval(stepInterval); clearInterval(stepInterval);
currentStep = progressSteps.length; // Complete all steps currentStep = progressSteps.length;
const data = await response.json(); const data = await response.json();
if (data.status === "success") { if (data.status === "success") {
// Brief pause to show completion
await new Promise((r) => setTimeout(r, 500)); await new Promise((r) => setTimeout(r, 500));
analysisResult = data; analysisResult = data;
submitted = true; submitted = true;
@@ -200,10 +224,28 @@
<div class="glass-card form-card"> <div class="glass-card form-card">
<div class="form-content"> <div class="form-content">
<div class="form-group"> <div class="report-type-toggle">
<label class="label" for="productName" <button
>Product Name</label class="toggle-option"
class:active={reportType === "product"}
onclick={() => (reportType = "product")}
> >
Product Incident
</button>
<button
class="toggle-option"
class:active={reportType === "company"}
onclick={() => (reportType = "company")}
>
Company Report
</button>
</div>
<div class="form-group">
<label class="label" for="productName">
{reportType === "product"
? "Product Name"
: "Company Name"}
</label>
<div class="input-wrapper"> <div class="input-wrapper">
<iconify-icon <iconify-icon
icon="ri:price-tag-3-line" icon="ri:price-tag-3-line"
@@ -213,7 +255,9 @@
type="text" type="text"
id="productName" id="productName"
class="input" class="input"
placeholder="e.g. 'Eco-Friendly' Water Bottle" placeholder={reportType === "product"
? "e.g. 'Eco-Friendly' Water Bottle"
: "e.g. Acme Corp"}
bind:value={productName} bind:value={productName}
/> />
</div> </div>
@@ -238,35 +282,70 @@
</div> </div>
<div class="form-group"> <div class="form-group">
<span class="label">Evidence (Photo)</span> <span class="label">
<button {reportType === "product"
class="image-picker" ? "Evidence (Photo)"
onclick={pickImage} : "Company Report (PDF)"}
type="button" </span>
> {#if reportType === "product"}
{#if image} <button
<img class="image-picker"
src={image} onclick={pickImage}
alt="Evidence" type="button"
class="picked-image" >
/> {#if image}
<div class="change-overlay"> <img
<iconify-icon src={image}
icon="ri:camera-switch-line" alt="Evidence"
width="24" class="picked-image"
></iconify-icon> />
</div> <div class="change-overlay">
{:else} <iconify-icon
<div class="picker-placeholder"> icon="ri:camera-switch-line"
<iconify-icon width="24"
icon="ri:camera-add-line" ></iconify-icon>
width="32" </div>
style="color: rgba(255,255,255,0.4);" {:else}
></iconify-icon> <div class="picker-placeholder">
<p>Upload Photo</p> <iconify-icon
</div> icon="ri:camera-add-line"
{/if} width="32"
</button> style="color: rgba(255,255,255,0.4);"
></iconify-icon>
<p>Upload Photo</p>
</div>
{/if}
</button>
{:else}
<button
class="image-picker pdf-picker"
onclick={pickPdf}
type="button"
>
{#if pdfData}
<div class="picker-placeholder active-pdf">
<iconify-icon
icon="ri:file-pdf-2-fill"
width="48"
style="color: #ef4444;"
></iconify-icon>
<p class="pdf-name">{pdfName}</p>
<span class="change-text"
>Click to change</span
>
</div>
{:else}
<div class="picker-placeholder">
<iconify-icon
icon="ri:file-upload-line"
width="32"
style="color: rgba(255,255,255,0.4);"
></iconify-icon>
<p>Upload PDF Report</p>
</div>
{/if}
</button>
{/if}
</div> </div>
{#if error} {#if error}
@@ -410,6 +489,51 @@
gap: 16px; gap: 16px;
} }
.report-type-toggle {
display: flex;
background: rgba(255, 255, 255, 0.05);
border-radius: 12px;
padding: 4px;
border: 1px solid rgba(255, 255, 255, 0.1);
}
.toggle-option {
flex: 1;
background: transparent;
border: none;
color: rgba(255, 255, 255, 0.6);
padding: 10px;
border-radius: 8px;
font-weight: 600;
font-size: 13px;
cursor: pointer;
transition: all 0.2s;
}
.toggle-option:hover {
color: white;
background: rgba(255, 255, 255, 0.05);
}
.toggle-option.active {
background: rgba(34, 197, 94, 0.15);
color: #4ade80;
}
.pdf-name {
color: white !important;
font-size: 14px;
text-align: center;
padding: 0 20px;
word-break: break-all;
}
.change-text {
font-size: 12px;
color: rgba(255, 255, 255, 0.4);
margin-top: 4px;
}
.form-group { .form-group {
display: flex; display: flex;
flex-direction: column; flex-direction: column;