Demo Fixes 6
This commit is contained in:
@@ -112,6 +112,15 @@ def load_models():
|
||||
torch_dtype=torch.bfloat16
|
||||
)
|
||||
models.tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-3.2-1B")
|
||||
|
||||
# Configure all special tokens
|
||||
models.tokenizer.pad_token = models.tokenizer.eos_token
|
||||
models.tokenizer.padding_side = "left" # For causal language modeling
|
||||
|
||||
# Inform the model about the pad token
|
||||
if hasattr(models.llm.config, "pad_token_id") and models.llm.config.pad_token_id is None:
|
||||
models.llm.config.pad_token_id = models.tokenizer.pad_token_id
|
||||
|
||||
logger.info("Llama 3.2 model loaded successfully")
|
||||
socketio.emit('model_status', {'model': 'llm', 'status': 'loaded'})
|
||||
progress = 100
|
||||
@@ -392,6 +401,11 @@ def process_audio_and_respond(session_id, data):
|
||||
prompt = f"{conversation_history}Assistant: "
|
||||
|
||||
# Generate response with Llama
|
||||
try:
|
||||
# Ensure pad token is set
|
||||
if models.tokenizer.pad_token is None:
|
||||
models.tokenizer.pad_token = models.tokenizer.eos_token
|
||||
|
||||
input_tokens = models.tokenizer(
|
||||
prompt,
|
||||
return_tensors="pt",
|
||||
@@ -417,6 +431,11 @@ def process_audio_and_respond(session_id, data):
|
||||
generated_ids[0][input_ids.shape[1]:],
|
||||
skip_special_tokens=True
|
||||
).strip()
|
||||
except Exception as e:
|
||||
logger.error(f"Error generating response: {str(e)}")
|
||||
import traceback
|
||||
logger.error(traceback.format_exc())
|
||||
response_text = "I'm sorry, I encountered an error while processing your request."
|
||||
|
||||
# Synthesize speech
|
||||
with app.app_context():
|
||||
|
||||
Reference in New Issue
Block a user