DeadPool1236 commited on
Commit
b16478b
·
verified ·
1 Parent(s): 5bd3c40

Upload 14 files

Browse files
Files changed (14) hide show
  1. .dockerignore +24 -0
  2. Agent.py +1189 -0
  3. Dockerfile +33 -0
  4. Index.py +192 -0
  5. app.py +449 -0
  6. audio_processor.py +301 -0
  7. config.json +13 -0
  8. conversations.json +0 -0
  9. index.html +161 -0
  10. language_utils.py +28 -0
  11. rag_system.log +18 -0
  12. requirements.txt +22 -0
  13. script.js +463 -0
  14. styles.css +1344 -0
.dockerignore ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .git
2
+ .gitignore
3
+ *.log
4
+ __pycache__
5
+ *.pyc
6
+ *.pyo
7
+ *.pyd
8
+ .Python
9
+ env
10
+ pip-log.txt
11
+ .tox
12
+ .coverage
13
+ .cache
14
+ nosetests.xml
15
+ coverage.xml
16
+ *.cover
17
+ .hypothesis/
18
+ .pytest_cache/
19
+ .DS_Store
20
+ rag_system.log
21
+ test_openrouter_api.py
22
+ DataSet/
23
+ *.tmp
24
+ temp/
Agent.py ADDED
@@ -0,0 +1,1189 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import requests
4
+ import time
5
+ import pickle
6
+ import hashlib
7
+ import random
8
+ import re
9
+ from typing import List, Optional, Any
10
+ import logging
11
+ from dotenv import load_dotenv
12
+ from datetime import datetime
13
+
14
+ # === LANGUAGE DETECTION IMPORTS ===
15
+ from langdetect import detect, DetectorFactory
16
+ DetectorFactory.seed = 0
17
+ # ==================================
18
+
19
+ # === OPENAI CLIENT IMPORT ===
20
+ from openai import OpenAI
21
+ # ============================
22
+
23
+ # === HUGGING FACE DETECTION ===
24
+ IS_HUGGING_FACE = os.path.exists('/.dockerenv') or 'SPACE_ID' in os.environ
25
+
26
+ if IS_HUGGING_FACE:
27
+ print("🚀 Hugging Face Space detected")
28
+ os.environ['FORCE_FREE_MODEL'] = 'true'
29
+
30
+ # Load environment variables
31
+ if not IS_HUGGING_FACE:
32
+ env_path = os.path.join("config", ".env")
33
+ print(f"🔍 Looking for .env file at: {env_path}")
34
+ if os.path.exists(env_path):
35
+ load_dotenv(env_path)
36
+ print("✅ .env file loaded successfully")
37
+ else:
38
+ print(f"❌ .env file not found at: {env_path}")
39
+ else:
40
+ print("✅ Hugging Face environment - using repository secrets")
41
+
42
+ # === CACHE CONFIGURATION ===
43
+ CACHE_DIR = "cache"
44
+ RESPONSE_CACHE_FILE = os.path.join(CACHE_DIR, "response_cache.pkl")
45
+
46
+ class ResponseCache:
47
+ """Simple response cache to avoid repeated LLM calls"""
48
+
49
+ def __init__(self):
50
+ self.cache = {}
51
+ self.load_cache()
52
+
53
+ def get_cache_key(self, query: str, context_chunks: List[Any]) -> str:
54
+ """Generate unique cache key from query and context"""
55
+ query_hash = hashlib.md5(query.encode()).hexdigest()
56
+ if context_chunks:
57
+ context_text = "".join([chunk.text[:100] for chunk in context_chunks if hasattr(chunk, 'text')])
58
+ context_hash = hashlib.md5(context_text.encode()).hexdigest()
59
+ else:
60
+ context_hash = "no_context"
61
+ return f"{query_hash}_{context_hash}"
62
+
63
+ def get(self, key: str) -> Optional[str]:
64
+ """Get cached response if exists and not expired"""
65
+ if key in self.cache:
66
+ cached_time, response = self.cache[key]
67
+ if time.time() - cached_time < 24 * 3600:
68
+ return response
69
+ else:
70
+ del self.cache[key]
71
+ return None
72
+
73
+ def set(self, key: str, response: str):
74
+ """Cache response with timestamp"""
75
+ self.cache[key] = (time.time(), response)
76
+ self.save_cache()
77
+
78
+ def save_cache(self):
79
+ """Save cache to disk"""
80
+ os.makedirs(CACHE_DIR, exist_ok=True)
81
+ with open(RESPONSE_CACHE_FILE, 'wb') as f:
82
+ pickle.dump(self.cache, f)
83
+
84
+ def load_cache(self):
85
+ """Load cache from disk"""
86
+ try:
87
+ if os.path.exists(RESPONSE_CACHE_FILE):
88
+ with open(RESPONSE_CACHE_FILE, 'rb') as f:
89
+ self.cache = pickle.load(f)
90
+ print(f"✅ Loaded response cache with {len(self.cache)} entries")
91
+ except Exception as e:
92
+ print(f"⚠️ Could not load cache: {e}")
93
+ self.cache = {}
94
+
95
+ # Initialize cache globally
96
+ response_cache = ResponseCache()
97
+
98
+ # === Conversation Logger ===
99
+ class ConversationLogger:
100
+ """JSON-based conversation logging system"""
101
+
102
+ def __init__(self, log_file="conversations.json"):
103
+ self.log_file = log_file
104
+ self.ensure_log_file()
105
+
106
+ def ensure_log_file(self):
107
+ """Ensure log file exists with proper structure"""
108
+ if not os.path.exists(self.log_file):
109
+ with open(self.log_file, 'w', encoding='utf-8') as f:
110
+ json.dump([], f, indent=2)
111
+
112
+ def log_conversation(self, user_input: str, llm_response: str, language: str, response_type: str):
113
+ """Log conversation to JSON file"""
114
+ try:
115
+ # Read existing data
116
+ with open(self.log_file, 'r', encoding='utf-8') as f:
117
+ conversations = json.load(f)
118
+
119
+ # Add new conversation
120
+ conversation_entry = {
121
+ "user_input": user_input,
122
+ "llm_response": llm_response,
123
+ "language": language,
124
+ "response_type": response_type
125
+ }
126
+
127
+ conversations.append(conversation_entry)
128
+
129
+ # Write back to file
130
+ with open(self.log_file, 'w', encoding='utf-8') as f:
131
+ json.dump(conversations, f, indent=2, ensure_ascii=False)
132
+
133
+ print(f"💾 Conversation logged to {self.log_file}")
134
+
135
+ except Exception as e:
136
+ print(f"❌ Error logging conversation: {e}")
137
+
138
+ # Initialize global logger
139
+ conversation_logger = ConversationLogger()
140
+
141
+ # === Centralized Configuration System ===
142
+ class Config:
143
+ """Centralized configuration - loads from config.json and .env"""
144
+
145
+ def __init__(self):
146
+ self.api_keys = []
147
+ self.current_key_index = 0
148
+ self.settings = self._load_config_file()
149
+ self._correct_dataset_path()
150
+
151
+ if IS_HUGGING_FACE:
152
+ self._apply_hugging_face_optimizations()
153
+
154
+ self.SUPPORTED_LANGUAGES = ["english", "urdu"]
155
+ self.DEFAULT_LANGUAGE = "english"
156
+
157
+ # Apply settings
158
+ self.MODEL_PROVIDER = self.settings["model_provider"]
159
+ self.MODEL_ID = self.settings["model_id"]
160
+ self.API_KEYS_FOLDER = self.settings["api_keys_folder"]
161
+ self.INDEX_PATH = self.settings["index_path"]
162
+ self.DATASET_PATH = self.settings["dataset_path"]
163
+ self.SIMILARITY_TOP_K = self.settings["similarity_top_k"]
164
+ self.TEMPERATURE = self.settings["temperature"]
165
+ self.MAX_TOKENS = self.settings["max_tokens"]
166
+ self.FALLBACK_MESSAGE = self.settings["fallback_message"]
167
+
168
+ self.api_keys = self._load_api_keys()
169
+ self.api_key = self._get_current_api_key()
170
+
171
+ self._validate_config()
172
+
173
+ def _correct_dataset_path(self):
174
+ """Automatically find the correct dataset path"""
175
+ original_path = self.settings["dataset_path"]
176
+ possible_paths = [
177
+ original_path,
178
+ f"DataSet/{original_path}",
179
+ f"data/{original_path}",
180
+ "DataSet/breast_cancer.json",
181
+ "breast_cancer.json"
182
+ ]
183
+
184
+ for path in possible_paths:
185
+ if os.path.exists(path):
186
+ if path != original_path:
187
+ print(f"🔄 Using dataset at: {path}")
188
+ self.settings["dataset_path"] = path
189
+ else:
190
+ print(f"✅ Dataset found at: {path}")
191
+ return
192
+
193
+ print(f"❌ Dataset not found in any location")
194
+
195
+ def _apply_hugging_face_optimizations(self):
196
+ """Apply optimizations for Hugging Face deployment"""
197
+ print("🔧 Applying Hugging Face optimizations...")
198
+ self.settings["max_tokens"] = 300
199
+ self.settings["similarity_top_k"] = 3
200
+ self.settings["model_id"] = "meta-llama/llama-3.3-70b-instruct:free"
201
+
202
+ def _load_config_file(self):
203
+ """Load configuration from config/config.json file"""
204
+ config_file = os.path.join("config", "config.json")
205
+ default_config = {
206
+ "model_provider": "openrouter",
207
+ "model_id": "deepseek/deepseek-r1:free",
208
+ "api_keys_folder": "config",
209
+ "index_path": "cancer_index_store",
210
+ "dataset_path": "breast_cancer.json",
211
+ "similarity_top_k": 5,
212
+ "temperature": 0.2,
213
+ "max_tokens": 350,
214
+ "fallback_message": "Sorry, I don't know the answer."
215
+ }
216
+
217
+ try:
218
+ if os.path.exists(config_file):
219
+ with open(config_file, 'r', encoding='utf-8') as f:
220
+ loaded_config = json.load(f)
221
+ merged_config = {**default_config, **loaded_config}
222
+
223
+ if IS_HUGGING_FACE:
224
+ merged_config["model_id"] = "meta-llama/llama-3.3-70b-instruct:free"
225
+ merged_config["max_tokens"] = 300
226
+
227
+ return merged_config
228
+ else:
229
+ os.makedirs(os.path.dirname(config_file), exist_ok=True)
230
+ with open(config_file, 'w', encoding='utf-8') as f:
231
+ json.dump(default_config, f, indent=4)
232
+ print("📁 Created default config file")
233
+ return default_config
234
+ except Exception as e:
235
+ print(f"❌ Error loading config: {e}")
236
+ return default_config
237
+
238
+ def _load_api_keys(self) -> List[str]:
239
+ """Load API keys from environment variables"""
240
+ api_keys = []
241
+ print("🔍 Checking for API keys in environment variables...")
242
+
243
+ keys_to_check = ["API_KEY", "API_KEY_2", "API_KEY_3", "API_KEY_4", "API_KEY_5"]
244
+
245
+ for key_name in keys_to_check:
246
+ key_value = os.getenv(key_name)
247
+ if key_value and key_value.strip():
248
+ api_keys.append(key_value.strip())
249
+ print(f"✅ Found {key_name}")
250
+
251
+ return api_keys
252
+
253
+ def _get_current_api_key(self) -> str:
254
+ """Get current active API key"""
255
+ if self.api_keys and self.current_key_index < len(self.api_keys):
256
+ return self.api_keys[self.current_key_index]
257
+ return ""
258
+
259
+ def rotate_to_next_key(self) -> bool:
260
+ """Rotate to next API key if available"""
261
+ if self.current_key_index < len(self.api_keys) - 1:
262
+ self.current_key_index += 1
263
+ self.api_key = self._get_current_api_key()
264
+ print(f"🔄 Rotated to API key {self.current_key_index + 1}")
265
+ return True
266
+ else:
267
+ print("❌ No more API keys available")
268
+ return False
269
+
270
+ def _validate_config(self):
271
+ """Validate configuration"""
272
+ if not self.api_keys:
273
+ print("❌ No API keys found in environment variables")
274
+ if IS_HUGGING_FACE:
275
+ print("💡 Please add API keys in Hugging Face Space Settings → Repository secrets")
276
+ else:
277
+ print(f"✅ Found {len(self.api_keys)} API key(s)")
278
+
279
+ # Initialize configuration
280
+ config = Config()
281
+
282
+ # === Setup Logging ===
283
+ if IS_HUGGING_FACE:
284
+ logging.basicConfig(
285
+ level=logging.INFO,
286
+ format='%(asctime)s - %(levelname)s - %(message)s',
287
+ handlers=[logging.StreamHandler()]
288
+ )
289
+ else:
290
+ logging.basicConfig(
291
+ level=logging.INFO,
292
+ format='%(asctime)s - %(levelname)s - %(message)s',
293
+ handlers=[
294
+ logging.StreamHandler(),
295
+ logging.FileHandler('rag_system.log')
296
+ ]
297
+ )
298
+
299
+ # === FAST INDEX LOADING ===
300
+ def load_index_fast():
301
+ """Fast index loading by reusing cached embeddings"""
302
+ try:
303
+ from llama_index.core import StorageContext, VectorStoreIndex
304
+ from llama_index.embeddings.huggingface import HuggingFaceEmbedding
305
+
306
+ print(f"🔍 Loading index from: {config.INDEX_PATH}")
307
+ if not os.path.exists(config.INDEX_PATH):
308
+ print(f"❌ Index path doesn't exist: {config.INDEX_PATH}")
309
+ return None, None
310
+
311
+ embed_model = HuggingFaceEmbedding(model_name="sentence-transformers/all-MiniLM-L6-v2")
312
+ storage_context = StorageContext.from_defaults(persist_dir=config.INDEX_PATH)
313
+ index = VectorStoreIndex.from_documents(
314
+ [],
315
+ storage_context=storage_context,
316
+ embed_model=embed_model
317
+ )
318
+ retriever = index.as_retriever(similarity_top_k=config.SIMILARITY_TOP_K)
319
+ print("✅ Index loaded successfully")
320
+ return index, retriever
321
+
322
+ except Exception as e:
323
+ print(f"❌ Failed to load index: {e}")
324
+ import traceback
325
+ traceback.print_exc()
326
+ return None, None
327
+
328
+ def load_index():
329
+ return load_index_fast()
330
+
331
+ # === Enhanced RAG System Class ===
332
+ class BreastCancerRAGSystem:
333
+ """Enhanced RAG system for breast cancer information with emotional support"""
334
+
335
+ def __init__(self, index, retriever):
336
+ self.index = index
337
+ self.retriever = retriever
338
+ self.conversation_history = []
339
+
340
+ if not config.api_keys:
341
+ logging.error("🚫 System initialized without API key - LLM features will not work")
342
+
343
+ def get_predefined_questions(self, language: str = "english") -> List[dict]:
344
+ """Get predefined daily routine questions for breast cancer patients"""
345
+
346
+ english_questions = [
347
+ {
348
+ "question": "What are some gentle exercises I can do during recovery?",
349
+ "category": "exercise",
350
+ "icon": "fas fa-walking"
351
+ },
352
+ {
353
+ "question": "How do I deal with anxiety about my next treatment?",
354
+ "category": "emotional",
355
+ "icon": "fas fa-heart"
356
+ },
357
+ {
358
+ "question": "When can I expect my hair to grow back after treatment?",
359
+ "category": "appearance",
360
+ "icon": "fas fa-user"
361
+ },
362
+ {
363
+ "question": "How do I talk to my family about my diagnosis?",
364
+ "category": "emotional",
365
+ "icon": "fas fa-users"
366
+ },
367
+ {
368
+ "question": "What are the signs of infection I should watch for?",
369
+ "category": "symptoms",
370
+ "icon": "fas fa-exclamation-triangle"
371
+ }
372
+ ]
373
+
374
+ urdu_questions = [
375
+ {
376
+ "question": "کیموتھراپی کے دوران تھکاوٹ کیسے کم کریں؟",
377
+ "category": "symptoms",
378
+ "icon": "fas fa-bed"
379
+ },
380
+ {
381
+ "question": "ریکوری کے دوران ہلکی پھلکی ورزشیں کون سی ہیں؟",
382
+ "category": "exercise",
383
+ "icon": "fas fa-walking"
384
+ },
385
+ {
386
+ "question": "اگلے علاج کے بارے میں پریشانی کیسے دور کریں؟",
387
+ "category": "emotional",
388
+ "icon": "fas fa-heart"
389
+ },
390
+ {
391
+ "question": "کیموتھراپی کے بعد متلی کے لیے کون سی غذائیں مفید ہیں؟",
392
+ "category": "nutrition",
393
+ "icon": "fas fa-apple-alt"
394
+ },
395
+ {
396
+ "question": "ماسٹکٹومی کے بعد درد کیسے منظم کریں؟",
397
+ "category": "pain",
398
+ "icon": "fas fa-hand-holding-heart"
399
+ },
400
+ ]
401
+
402
+ return urdu_questions if language == "urdu" else english_questions
403
+
404
+ def detect_language(self, text: str) -> str:
405
+ """Detect language of user query"""
406
+ try:
407
+ urdu_pattern = re.compile(r'[\u0600-\u06FF\u0750-\u077F\u08A0-\u08FF]+')
408
+ if urdu_pattern.search(text):
409
+ return 'urdu'
410
+ detected_lang = detect(text)
411
+ return 'urdu' if detected_lang == 'ur' else 'english'
412
+ except:
413
+ return 'english'
414
+
415
+ def _clean_urdu_text(self, text: str) -> str:
416
+ """Advanced cleaning for Urdu text with comprehensive spelling correction"""
417
+ if not text or not text.strip():
418
+ return text
419
+
420
+ # Comprehensive spelling correction dictionary
421
+ spelling_corrections = {
422
+ # Character repetition fixes
423
+ 'مجہے': 'مجھے',
424
+ 'پروگرہوں': 'پروگرام',
425
+ 'کہےنسر': 'کینسر',
426
+ 'ڈڈاکٹر': 'ڈاکٹر',
427
+ 'ہےہ': 'ہے',
428
+ 'مہےں': 'میں',
429
+ 'ہےں': 'ہیں',
430
+ 'ھے': 'ہے',
431
+ 'ھوں': 'ہوں',
432
+ 'ھیں': 'ہیں',
433
+ 'ےے': 'ے',
434
+ 'ںں': 'ں',
435
+ 'ہہ': 'ہ',
436
+ 'یی': 'ی',
437
+
438
+ # Common phrase corrections
439
+ 'ے لہےے': 'کے لیے',
440
+ 'کا ے لہےے': 'کے لیے',
441
+ 'و ہےہ': 'کو',
442
+ 'ہےقہےن': 'یقین',
443
+ 'اکہےلے': 'اکیلے',
444
+ 'نہہےں': 'نہیں',
445
+ 'ہہےں': 'ہیں',
446
+ 'کا ے': 'کے',
447
+ 'ساتھ ہہےں': 'ساتھ ہیں',
448
+ 'تجوہےز': 'تجویز',
449
+ 'ضرورہے': 'ضروری',
450
+ 'بارے مہےں': 'بارے میں',
451
+ 'کرہےں': 'کریں',
452
+ 'بہترہےن': 'بہترین',
453
+ 'ہے مدد': 'کی مدد',
454
+ 'خوشہے': 'خوشی',
455
+ 'ترجہےح': 'ترجیح',
456
+ 'جسے سے': 'جس سے',
457
+
458
+ # Medical term corrections
459
+ 'برہےسٹ': 'بریسٹ',
460
+ 'کہےموتھراپہے': 'کیموتھراپی',
461
+ 'متلہے': 'متلی',
462
+ 'غذائہےں': 'غذائیں',
463
+ 'چربہے': 'چربی',
464
+ 'ہلکے': 'ہلکی',
465
+ 'آسانہے': 'آسانی',
466
+ 'ہائہےڈرہےٹنگ': 'ہائیڈریٹنگ',
467
+ 'ہائہےڈرہےٹڈ': 'ہائیڈریٹڈ',
468
+
469
+ # Grammar and structure fixes
470
+ 'کرنےے': 'کرنے',
471
+ 'ہونےے': 'ہونے',
472
+ 'سکتےے': 'سکتے',
473
+ 'سکتیی': 'سکتی',
474
+ 'والےے': 'والے',
475
+ 'والیی': 'والی',
476
+ 'کہے': 'کے',
477
+ 'ہےے': 'ہے',
478
+
479
+ # Common word fixes
480
+ 'ام ': 'ہوں ',
481
+ 'می ': 'میں ',
482
+ 'آپ ک': 'آپ کا ',
483
+ 'دوران ': 'دوران ',
484
+ 'عام ': 'عام ',
485
+ 'مسئل ': 'مسئلہ ',
486
+ 'اس ': 'اس ',
487
+ 'کو ': 'کو ',
488
+ 'کرن ': 'کرنے ',
489
+ 'س ': 'سے ',
490
+ 'طریق ': 'طریقے ',
491
+ 'بتا ': 'بتا ',
492
+ 'سکتی ': 'سکتی ',
493
+ 'اکٹر': 'ڈاکٹر',
494
+ 'اکیل': 'اکیلے',
495
+ 'میش': 'میں',
496
+ 'وتی': 'ہوتی',
497
+ 'لکی': 'ہلکی',
498
+ 'بتر': 'بہتر',
499
+ 'محفوظ ر': 'محفوظ رکھتی ہے',
500
+ 'رشت': 'رشتہ داروں',
501
+ }
502
+
503
+ # Apply spelling corrections iteratively
504
+ for wrong, correct in spelling_corrections.items():
505
+ text = text.replace(wrong, correct)
506
+
507
+ # Fix common grammatical patterns using regex for better coverage
508
+ import re
509
+
510
+ # Fix character repetition patterns
511
+ repetition_patterns = [
512
+ (r'ہہ', 'ہ'),
513
+ (r'یی', 'ی'),
514
+ (r'ےے', 'ے'),
515
+ (r'ںں', 'ں'),
516
+ (r'کک', 'ک'),
517
+ (r'گگ', 'گ'),
518
+ ]
519
+
520
+ for pattern, replacement in repetition_patterns:
521
+ text = re.sub(pattern, replacement, text)
522
+
523
+ # Fix common grammatical patterns
524
+ grammatical_fixes = [
525
+ ('ک دوران', 'کے دوران'),
526
+ ('ک بار', 'کے بارے'),
527
+ ('ک بعد', 'کے بعد'),
528
+ ('ک لی', 'کے لیے'),
529
+ ('ک ساتھ', 'کے ساتھ'),
530
+ ('ک طور', 'کے طور'),
531
+ ('ک ذریع', 'کے ذریعے'),
532
+ ('ک مطابق', 'کے مطابق'),
533
+ ]
534
+
535
+ for wrong, correct in grammatical_fixes:
536
+ text = text.replace(wrong, correct)
537
+
538
+ # Fix spacing and punctuation issues
539
+ text = re.sub(r'\s+', ' ', text) # Multiple spaces to single space
540
+ text = re.sub(r' \.', '.', text) # Space before period
541
+ text = re.sub(r' ،', '،', text) # Space before comma
542
+ text = re.sub(r' ', ' ', text) # Double spaces
543
+ text = re.sub(r'۔۔', '۔', text) # Double periods
544
+
545
+ # Ensure sentence completion and structure
546
+ sentences = text.split('۔')
547
+ cleaned_sentences = []
548
+
549
+ for sentence in sentences:
550
+ sentence = sentence.strip()
551
+ if sentence and len(sentence.split()) >= 2: # At least 2 words
552
+ # Ensure sentence starts properly (no hanging characters)
553
+ if sentence and sentence[0] in [' ', '،', '۔']:
554
+ sentence = sentence[1:].strip()
555
+ if sentence:
556
+ cleaned_sentences.append(sentence)
557
+
558
+ # Reconstruct text with proper punctuation
559
+ if cleaned_sentences:
560
+ text = '۔ '.join(cleaned_sentences) + '۔'
561
+ else:
562
+ text = text.strip()
563
+
564
+ # Final normalization
565
+ text = text.strip()
566
+
567
+ return text
568
+
569
+ def _detect_emotional_needs(self, user_query: str, language: str = "english") -> dict:
570
+ """Enhanced emotional need detection with better Urdu support"""
571
+ query_lower = user_query.lower()
572
+
573
+ # Emotional triggers in both languages
574
+ emotional_triggers_english = [
575
+ "scared", "afraid", "worried", "anxious", "fear", "nervous", "stressed",
576
+ "overwhelmed", "depressed", "sad", "lonely", "alone", "hopeless",
577
+ "can't cope", "struggling", "difficult", "hard time", "suffering",
578
+ "terrified", "panic", "breakdown", "crying", "tears", "misery"
579
+ ]
580
+
581
+ emotional_triggers_urdu = [
582
+ "خوف", "ڈر", "پریشانی", "فکر", "تنہائی", "اداسی", "مایوسی", "تکلیف",
583
+ "گھبراہٹ", "بے چینی", "بے بسی", "رونا", "آنسو", "دکھ", "غم",
584
+ "ہمت", "طاقت", "حوصلہ", "پرسکون", "سکون", "چین"
585
+ ]
586
+
587
+ # Information triggers
588
+ info_triggers_english = [
589
+ "what", "how", "when", "where", "which", "why",
590
+ "treatment", "medication", "exercise", "diet", "symptoms",
591
+ "pain", "side effects", "recovery", "diagnosis", "procedure"
592
+ ]
593
+
594
+ info_triggers_urdu = [
595
+ "کیا", "کیسے", "کب", "کہاں", "کون سا", "کیوں", "کس طرح",
596
+ "علاج", "دوا", "ورزش", "غذا", "علامات", "درد", "مراحل",
597
+ "طریقہ", "عمل", "تفصیل", "معلومات"
598
+ ]
599
+
600
+ if language == "urdu":
601
+ emotional_triggers = emotional_triggers_urdu
602
+ info_triggers = info_triggers_urdu
603
+ else:
604
+ emotional_triggers = emotional_triggers_english
605
+ info_triggers = info_triggers_english
606
+
607
+ emotional_score = sum(1 for trigger in emotional_triggers if trigger in query_lower)
608
+ info_score = sum(1 for trigger in info_triggers if trigger in query_lower)
609
+
610
+ return {
611
+ "needs_emotional_support": emotional_score > 0,
612
+ "needs_information": info_score > 0,
613
+ "emotional_score": emotional_score,
614
+ "info_score": info_score
615
+ }
616
+
617
+ def _add_emotional_support(self, response: str, user_query: str, language: str = "english") -> str:
618
+ """Add natural emotional support integrated into the response"""
619
+ emotional_needs = self._detect_emotional_needs(user_query, language)
620
+
621
+ # Always add some level of emotional support, but more if detected
622
+ if language == "urdu":
623
+ if emotional_needs["needs_emotional_support"]:
624
+ # Strong emotional support phrases
625
+ support_phrases = [
626
+ "آپ کی طاقت قابلِ تعریف ہے، اور میں آپ کے ساتھ ہوں۔",
627
+ "یہ مشکل وقت ہے، لیکن آپ اکیلے نہیں ہیں۔ ہم مل کر اس کا سامنا کریں گے۔",
628
+ "آپ کی ہمت اور صبر کو سلام، بہتر دن ضرور آئیں گے۔",
629
+ ]
630
+ else:
631
+ # Gentle emotional support phrases
632
+ support_phrases = [
633
+ "آپ کی صحت اور خوشی ہماری پہلی ترجیح ہے۔",
634
+ "یقین رکھیں، ہر طوفان کے بعد سکون ضرور آتا ہے۔",
635
+ "آپ جیسے بہادر لوگ ہی دنیا کو روشن کرتے ہیں۔",
636
+ ]
637
+ else:
638
+ if emotional_needs["needs_emotional_support"]:
639
+ # Strong emotional support phrases
640
+ support_phrases = [
641
+ "Your strength is truly admirable, and I'm here with you every step of the way.",
642
+ "This is a challenging time, but you're not alone. We'll face this together.",
643
+ "I want you to know how much courage you're showing, and better days will come.",
644
+ ]
645
+ else:
646
+ # Gentle emotional support phrases
647
+ support_phrases = [
648
+ "Your wellbeing and happiness are my top priority right now.",
649
+ "Please remember that after every storm comes calm.",
650
+ "People like you, with such resilience, truly light up the world.",
651
+ ]
652
+
653
+ # Choose a support phrase that fits naturally
654
+ support_text = random.choice(support_phrases)
655
+
656
+ # Integrate support naturally - for Urdu, place at beginning for impact
657
+ if language == "urdu":
658
+ if support_text not in response:
659
+ # Check if response already has emotional content
660
+ if not any(phrase in response for phrase in ['طاقت', 'ہمت', 'حوصلہ', 'سکون', 'خوشی']):
661
+ return f"{support_text}\n\n{response}"
662
+ else:
663
+ if support_text not in response:
664
+ # Check if response already has emotional content
665
+ if not any(phrase in response for phrase in ['strength', 'courage', 'hope', 'together', 'proud']):
666
+ return f"{support_text}\n\n{response}"
667
+
668
+ return response
669
+
670
+ def retrieve_relevant_chunks(self, user_query: str, language: str = "english") -> List[Any]:
671
+ """Retrieve relevant chunks with language-specific prioritization"""
672
+ if not hasattr(self, 'retriever') or self.retriever is None:
673
+ print("❌ Retriever not available")
674
+ return []
675
+
676
+ try:
677
+ if language == "urdu":
678
+ print("🔍 Prioritizing Urdu content for Urdu query...")
679
+ from llama_index.core.vector_stores import MetadataFilter, MetadataFilters
680
+ urdu_filter = MetadataFilter(key="language", value="urdu")
681
+ urdu_results = self.retriever.retrieve(
682
+ user_query,
683
+ filters=MetadataFilters(filters=[urdu_filter])
684
+ )
685
+
686
+ quality_threshold = 0.5
687
+ high_quality_urdu = [
688
+ result for result in urdu_results
689
+ if hasattr(result, 'score') and result.score >= quality_threshold
690
+ ]
691
+
692
+ if high_quality_urdu:
693
+ print(f"✅ Found {len(high_quality_urdu)} high-quality Urdu chunks")
694
+ return high_quality_urdu[:5]
695
+ elif urdu_results:
696
+ print(f"⚠️ Using {len(urdu_results)} lower-confidence Urdu chunks")
697
+ return urdu_results[:3]
698
+
699
+ print("🔍 No Urdu content found, searching all content...")
700
+
701
+ retrieval_results = self.retriever.retrieve(user_query)
702
+ quality_threshold = 0.5
703
+ high_quality_results = [
704
+ result for result in retrieval_results
705
+ if hasattr(result, 'score') and result.score >= quality_threshold
706
+ ]
707
+
708
+ if not high_quality_results and retrieval_results:
709
+ print("⚠️ Using lower confidence results")
710
+ return retrieval_results[:3]
711
+
712
+ print(f"✅ Retrieved {len(high_quality_results)} relevant chunks")
713
+ return high_quality_results[:5]
714
+
715
+ except Exception as e:
716
+ print(f"❌ Retrieval error: {e}")
717
+ return []
718
+
719
+ def build_concise_prompt(self, user_query: str, context_chunks: List[Any], language: str = "english") -> str:
720
+ """Build prompt for concise, targeted responses with emotional intelligence"""
721
+
722
+ context_text = ""
723
+ if context_chunks:
724
+ context_parts = []
725
+ for i, chunk in enumerate(context_chunks[:2]):
726
+ chunk_text = chunk.text if hasattr(chunk, 'text') else str(chunk)
727
+ key_points = " ".join(chunk_text.split()[:100])
728
+ context_parts.append(f"CONTEXT {i+1}: {key_points}")
729
+ context_text = "\n".join(context_parts)
730
+
731
+ # Analyze emotional and information needs
732
+ needs_analysis = self._detect_emotional_needs(user_query, language)
733
+
734
+ if language == "urdu":
735
+ prompt = f"""
736
+ # WELL BEING AGENT - BREAST CANCER SUPPORT
737
+ # CRITICAL: RESPOND ONLY IN URDU LANGUAGE USING CORRECT URDU SPELLING AND GRAMMAR
738
+ # ABSOLUTELY NO HINDI, ARABIC, OR OTHER LANGUAGES - PURE URDU ONLY
739
+
740
+ ## PATIENT'S QUERY:
741
+ "{user_query}"
742
+
743
+ ## EMOTIONAL ANALYSIS:
744
+ - Needs Emotional Support: {'YES' if needs_analysis['needs_emotional_support'] else 'NO'}
745
+ - Needs Information: {'YES' if needs_analysis['needs_information'] else 'NO'}
746
+
747
+ ## CONTEXT (USE IF RELEVANT):
748
+ {context_text if context_text else "General breast cancer knowledge"}
749
+
750
+ ## CRITICAL SPELLING RULES - MUST FOLLOW:
751
+ 1. ✅ "مجھے" ❌ "مجہے"
752
+ 2. ✅ "پروگرام" ❌ "پروگرہوں"
753
+ 3. ✅ "کینسر" ❌ "کہےنسر"
754
+ 4. ✅ "ڈاکٹر" ❌ "ڈڈاکٹر"
755
+ 5. ✅ "ہے" ❌ "ہےہ"
756
+ 6. ✅ "میں" ❌ "مہےں"
757
+ 7. ✅ "کے لیے" ❌ "کا ے لہےے"
758
+ 8. ✅ "جس سے" ❌ "جسے سے"
759
+
760
+ ## RESPONSE REQUIREMENTS - URDU:
761
+ 1. **LANGUAGE:** صرف اردو میں جواب دیں، درست ہجے اور قواعد کا استعمال کریں
762
+ 2. **EMOTIONAL TONE:** ہمدردانہ، گرمجوش، اور امید بخش انداز اپنائیں
763
+ 3. **CONTENT:** اگر معلومات درکار ہوں تو واضح، درست معلومات دیں
764
+ 4. **SUPPORT:** جذباتی مدد قدرتی طور پر پیش کریں، الگ سے ذکر نہ کریں
765
+ 5. **LENGTH:** 4-6 جملے، مختصر مگر جامع
766
+ 6. **SPELLING:** درست اردو ہجے استعمال کریں، غلط ہجے سے پرہیز کریں
767
+ 7. **COMPLETENESS:** مکمل جملے لکھیں، ادھورے جملے نہ چھوڑیں
768
+
769
+ ## آپ کا گرمجوش، درست اردو میں اور مکمل جواب:
770
+ """
771
+ else:
772
+ prompt = f"""
773
+ # WELL BEING AGENT - BREAST CANCER SUPPORT
774
+
775
+ ## PATIENT'S QUERY:
776
+ "{user_query}"
777
+
778
+ ## EMOTIONAL ANALYSIS:
779
+ - Needs Emotional Support: {'YES' if needs_analysis['needs_emotional_support'] else 'NO'}
780
+ - Needs Information: {'YES' if needs_analysis['needs_information'] else 'NO'}
781
+
782
+ ## CONTEXT (USE IF RELEVANT):
783
+ {context_text if context_text else "General breast cancer knowledge"}
784
+
785
+ ## RESPONSE REQUIREMENTS:
786
+
787
+ 1. **TONE:** Warm, compassionate, and hopeful
788
+ 2. **CONTENT:** Provide accurate information if needed
789
+ 3. **SUPPORT:** Integrate emotional support naturally without explicitly stating it
790
+ 4. **LENGTH:** 4-6 sentences, concise but comprehensive
791
+ 5. **FOCUS:** Be caring and present with the patient
792
+ 6. **COMPLETENESS:** Write complete sentences, no incomplete thoughts
793
+
794
+ ## YOUR COMPASSIONATE RESPONSE:
795
+ """
796
+
797
+ return prompt.strip()
798
+
799
+ def build_urdu_prompt(self, user_query: str, context_chunks: List[Any]) -> str:
800
+ """Build detailed prompt for Urdu responses with strong language enforcement"""
801
+ context_text = ""
802
+ if context_chunks:
803
+ context_parts = []
804
+ for i, chunk in enumerate(context_chunks[:3]):
805
+ chunk_text = chunk.text if hasattr(chunk, 'text') else str(chunk)
806
+ source_topic = chunk.metadata.get('topic', 'General Information') if hasattr(chunk, 'metadata') else 'General Information'
807
+ context_parts.append(f"SOURCE {i+1} - {source_topic}:\n{chunk_text}")
808
+ context_text = "\n\n".join(context_parts)
809
+
810
+ urdu_prompt = f"""
811
+ # WELL BEING AGENT - BREAST CANCER SUPPORT
812
+ # CRITICAL: RESPOND ONLY IN URDU LANGUAGE WITH PERFECT SPELLING
813
+ # ABSOLUTELY NO HINDI, ARABIC, OR ENGLISH - PURE URDU ONLY
814
+
815
+ ## YOUR ROLE IN URDU:
816
+ آپ بریسٹ کینسر کی سپیشلائزڈ ویل بینگ ایجنٹ ہیں۔ آپ مریضوں کو نہ صرف طبی معلومات بلکہ قدرتی طور پر جذباتی مدد اور ہمت بھی فراہم کرتی ہیں۔
817
+
818
+ ## AVAILABLE CONTEXT:
819
+ {context_text if context_text else "General breast cancer knowledge"}
820
+
821
+ ## USER'S QUESTION (IN URDU):
822
+ "{user_query}"
823
+
824
+ ## CRITICAL SPELLING RULES - MUST FOLLOW:
825
+ 1. ✅ "مجھے" ❌ "مجہے"
826
+ 2. ✅ "پروگرام" ❌ "پروگرہوں"
827
+ 3. ✅ "کینسر" ❌ "کہےنسر"
828
+ 4. ✅ "ڈاکٹر" ❌ "ڈڈاکٹر"
829
+ 5. ✅ "ہے" ❌ "ہےہ"
830
+ 6. ✅ "میں" ❌ "مہےں"
831
+ 7. ✅ "کے لیے" ❌ "کا ے لہےے"
832
+ 8. ✅ "جس سے" ❌ "جسے سے"
833
+
834
+ ## RESPONSE REQUIREMENTS - URDU:
835
+ 1. **LANGUAGE ENFORCEMENT:** صرف اور صرف اردو میں جواب دیں
836
+ 2. **SPELLING ACCURACY:** درست اردو ہجے استعمال کریں، عام غلطیوں سے پرہیز کریں
837
+ 3. **EMOTIONAL INTEGRATION:** جذباتی مدد کو قدرتی انداز میں پیش کریں
838
+ 4. **COMPASSIONATE TONE:** گرمجوش، ہمدردانہ، اور امید بخش انداز
839
+ 5. **INFORMATION ACCURACY:** سیاق و سباق کے مطابق درست معلومات دیں
840
+ 6. **COMPLETE SENTENCES:** مکمل جملے لکھیں، ادھورے جملے نہ چھوڑیں
841
+
842
+ ## EXAMPLES OF CORRECT URDU:
843
+ - ✅ "بریسٹ کینسر کے بارے میں معلومات حاصل ��رنا ایک اہم قدم ہے۔"
844
+ - ✅ "میں آپ کو درست معلومات فراہم کرنے کی کوشش کروں گی۔"
845
+ - ✅ "آپ کے سوال کا جواب دینے میں مجھے خوشی ہو رہی ہے۔"
846
+
847
+ ## آپ کا درست ہجے، مکمل جملوں اور ہمدردانہ انداز میں جواب:
848
+ """
849
+ return urdu_prompt.strip()
850
+
851
+ def build_enhanced_prompt(self, user_query: str, context_chunks: List[Any]) -> str:
852
+ """Build prompt for English responses with emotional intelligence"""
853
+ context_text = ""
854
+ if context_chunks:
855
+ context_parts = []
856
+ for i, chunk in enumerate(context_chunks[:3]):
857
+ chunk_text = chunk.text if hasattr(chunk, 'text') else str(chunk)
858
+ source_topic = chunk.metadata.get('topic', 'General Information') if hasattr(chunk, 'metadata') else 'General Information'
859
+ context_parts.append(f"SOURCE {i+1} - {source_topic}:\n{chunk_text}")
860
+ context_text = "\n\n".join(context_parts)
861
+
862
+ # Analyze emotional needs
863
+ needs_analysis = self._detect_emotional_needs(user_query, "english")
864
+
865
+ prompt = f"""
866
+ # WELL BEING AGENT - BREAST CANCER SUPPORT
867
+
868
+ ## YOUR ROLE
869
+ You are a compassionate Well Being Agent specializing in breast cancer support. You provide supportive information, emotional comfort, and evidence-based guidance.
870
+
871
+ ## EMOTIONAL ANALYSIS:
872
+ - Patient Needs Emotional Support: {'YES' if needs_analysis['needs_emotional_support'] else 'NO'}
873
+ - Patient Needs Information: {'YES' if needs_analysis['needs_information'] else 'NO'}
874
+
875
+ ## RESPONSE GUIDELINES
876
+ - **Tone**: Warm, supportive, compassionate, and hopeful
877
+ - **Emotional Integration**: Naturally incorporate emotional support without explicitly stating it
878
+ - **Information**: Provide evidence-based guidance when needed
879
+ - **Presence**: Be fully present and caring with the patient
880
+ - **Completeness**: Write complete sentences, no incomplete thoughts
881
+
882
+ ## AVAILABLE CONTEXT
883
+ {context_text if context_text else "General breast cancer knowledge"}
884
+
885
+ ## USER'S QUESTION
886
+ "{user_query}"
887
+
888
+ ## RESPONSE REQUIREMENTS
889
+ 1. If emotional support is needed: Integrate comfort and hope naturally into your response
890
+ 2. If information is needed: Provide clear, accurate guidance
891
+ 3. Always acknowledge the patient's strength implicitly
892
+ 4. Maintain a caring, present tone throughout
893
+ 5. Keep response concise but comprehensive (4-6 complete sentences)
894
+
895
+ ## YOUR COMPASSIONATE RESPONSE:
896
+ """
897
+ return prompt.strip()
898
+
899
+ def query_llm_with_retry(self, prompt: str, language: str = "english", max_retries: int = 3) -> str:
900
+ """Enhanced LLM query using OpenAI client format"""
901
+ if not config.api_key:
902
+ print("❌ No API key available")
903
+ return config.FALLBACK_MESSAGE
904
+
905
+ # Enhanced system message with Urdu-specific instructions
906
+ if language == "urdu":
907
+ system_message = """آپ بریسٹ کینسر کی سپیشلائزڈ ویل بینگ ایجنٹ ہیں۔
908
+
909
+ CRITICAL URDU LANGUAGE RULES:
910
+ 1. صرف اور صرف اردو میں جواب دیں
911
+ 2. ہر لفظ کے ہجے درست ہوں
912
+ 3. مکمل اور واضح جملے استعمال کریں
913
+ 4. غلط ہجے اور ادھورے جملوں سے پرہیز کریں
914
+ 5. طبی معلومات درست اور واضح ہوں
915
+
916
+ مثال کے طور پر:
917
+ ✅ "بریسٹ کینسر کے علاج کے مختلف طریقے ہیں۔"
918
+ ❌ "برہےسٹ کہےنسر کا علاچ کہے طرح ہےہ۔"
919
+
920
+ جذباتی مدد قدرتی طور پر پیش کریں اور مریض کی طاقت کو تسلیم کریں۔"""
921
+ else:
922
+ system_message = """You are a compassionate Well Being Agent for breast cancer support. Provide direct, helpful information while naturally integrating emotional support. Always maintain a warm, hopeful, and caring tone. Ensure complete sentences and clear information."""
923
+
924
+ for attempt in range(max_retries):
925
+ try:
926
+ # Initialize OpenAI client with OpenRouter configuration
927
+ client = OpenAI(
928
+ base_url="https://openrouter.ai/api/v1",
929
+ api_key=config.api_key,
930
+ )
931
+
932
+ # Adjust parameters for better Urdu quality
933
+ temperature = 0.2 if language == "urdu" else 0.3
934
+ max_tokens = 500 if language == "urdu" else config.MAX_TOKENS
935
+
936
+ print(f"🔄 Sending request to {config.MODEL_PROVIDER} (attempt {attempt + 1})")
937
+
938
+ completion = client.chat.completions.create(
939
+ extra_headers={
940
+ "HTTP-Referer": "https://huggingface.co",
941
+ "X-Title": "Well Being Agent",
942
+ },
943
+ extra_body={},
944
+ model=config.MODEL_ID,
945
+ messages=[
946
+ {
947
+ "role": "system",
948
+ "content": system_message
949
+ },
950
+ {
951
+ "role": "user",
952
+ "content": prompt
953
+ }
954
+ ],
955
+ temperature=temperature,
956
+ max_tokens=max_tokens,
957
+ )
958
+
959
+ response_text = completion.choices[0].message.content
960
+ print("✅ LLM response received")
961
+
962
+ # For Urdu, do immediate quality check
963
+ if language == "urdu":
964
+ if self._is_urdu_response_corrupted(response_text):
965
+ print("⚠️ Urdu response appears corrupted, applying enhanced cleaning")
966
+
967
+ return response_text
968
+
969
+ except Exception as e:
970
+ print(f"❌ Request failed: {e}")
971
+ if "429" in str(e):
972
+ wait_time = 2 ** attempt
973
+ print(f"⏳ Rate limited. Waiting {wait_time} seconds...")
974
+ time.sleep(wait_time)
975
+ continue
976
+ elif "401" in str(e) or "402" in str(e):
977
+ print(f"❌ API key issue")
978
+ if config.rotate_to_next_key():
979
+ continue
980
+ else:
981
+ return config.FALLBACK_MESSAGE
982
+
983
+ if attempt == max_retries - 1:
984
+ if config.rotate_to_next_key():
985
+ return self.query_llm_with_retry(prompt, language, max_retries)
986
+ return config.FALLBACK_MESSAGE
987
+ time.sleep(1)
988
+
989
+ return config.FALLBACK_MESSAGE
990
+
991
+ def _is_urdu_response_corrupted(self, text: str) -> bool:
992
+ """Check if Urdu response has common corruption patterns"""
993
+ corruption_indicators = [
994
+ 'ہےہ', 'مہےں', 'کہے', 'پروگرہوں', 'ڈڈاکٹر', 'کا ے لہےے', 'جسے سے'
995
+ ]
996
+
997
+ for indicator in corruption_indicators:
998
+ if indicator in text:
999
+ return True
1000
+
1001
+ # Check for excessive character repetition
1002
+ import re
1003
+ if re.search(r'(.)\1\1', text): # Three repeated characters
1004
+ return True
1005
+
1006
+ return False
1007
+
1008
+ def _verify_language_compliance(self, text: str, expected_language: str) -> str:
1009
+ """Verify and correct language compliance"""
1010
+ if expected_language == "urdu":
1011
+ # Check for common incorrect language patterns
1012
+ hindi_pattern = re.compile(r'[\u0900-\u097F]+') # Hindi characters
1013
+ arabic_pattern = re.compile(r'[\uFE70-\uFEFF]+') # Arabic specific characters
1014
+
1015
+ if hindi_pattern.search(text):
1016
+ print("⚠️ Hindi detected in Urdu response, applying correction...")
1017
+ # Add Urdu language reminder
1018
+ return text + "\n\nبراہ کرم صرف اردو میں جواب دیں۔"
1019
+
1020
+ if arabic_pattern.search(text):
1021
+ print("⚠️ Arabic detected in Urdu response, applying correction...")
1022
+ # Add Urdu language reminder
1023
+ return text + "\n\nبراہ کرم صرف اردو میں جواب دیں۔"
1024
+
1025
+ return text
1026
+
1027
+ def format_final_response(self, llm_answer: str, language: str = "english") -> str:
1028
+ cleaned_answer = llm_answer.strip()
1029
+
1030
+ # Enhanced Urdu text cleaning
1031
+ if language == 'urdu':
1032
+ print("🧹 Applying advanced Urdu text cleaning...")
1033
+ cleaned_answer = self._clean_urdu_text(cleaned_answer)
1034
+
1035
+ # Verify language compliance
1036
+ cleaned_answer = self._verify_language_compliance(cleaned_answer, language)
1037
+
1038
+ if language == 'urdu':
1039
+ gentle_reminder = "\n\nاپنی صحت کی دیکھ بھال ٹیم سے اپنے خدشات پر بات کرنا یاد رکھیں۔"
1040
+ else:
1041
+ gentle_reminder = "\n\nRemember to discuss any concerns with your healthcare team."
1042
+
1043
+ if gentle_reminder not in cleaned_answer:
1044
+ cleaned_answer += gentle_reminder
1045
+
1046
+ return cleaned_answer.strip()
1047
+
1048
+ def get_enhanced_answer(self, user_query: str, language: str = None, response_type: str = "text") -> str:
1049
+ print(f"🔍 Processing query: '{user_query}' (Type: {response_type})")
1050
+
1051
+ if language is None:
1052
+ language = self.detect_language(user_query)
1053
+ print(f"🌐 Detected language: {language}")
1054
+
1055
+ # Special handling for problematic Urdu queries
1056
+ if language == "urdu":
1057
+ problematic_patterns = ['اوج ایک انسر', 'اصلاح ملکم', 'نعم']
1058
+ if any(pattern in user_query for pattern in problematic_patterns):
1059
+ print("⚠️ Detected problematic query pattern, applying enhanced Urdu handling")
1060
+
1061
+ chunks = self.retrieve_relevant_chunks(user_query, language)
1062
+
1063
+ cache_key = response_cache.get_cache_key(user_query, chunks)
1064
+ cached_response = response_cache.get(cache_key)
1065
+
1066
+ if cached_response:
1067
+ print("✅ Using cached response")
1068
+ final_answer = cached_response
1069
+ else:
1070
+ # Enhanced prompt selection with quality focus
1071
+ query_lower = user_query.lower()
1072
+ wants_details = any(phrase in query_lower for phrase in [
1073
+ "give details", "more detail", "explain more", "tell me more",
1074
+ "elaborate", "in detail", "detailed", "comprehensive"
1075
+ ])
1076
+
1077
+ if language == 'urdu':
1078
+ if wants_details:
1079
+ prompt = self.build_urdu_prompt(user_query, chunks)
1080
+ else:
1081
+ prompt = self.build_concise_prompt(user_query, chunks, language)
1082
+ else:
1083
+ if wants_details:
1084
+ prompt = self.build_enhanced_prompt(user_query, chunks)
1085
+ else:
1086
+ prompt = self.build_concise_prompt(user_query, chunks, language)
1087
+
1088
+ llm_answer = self.query_llm_with_retry(prompt, language)
1089
+
1090
+ # Enhanced cleaning and validation for Urdu
1091
+ if language == 'urdu':
1092
+ original_length = len(llm_answer.strip().split())
1093
+ llm_answer = self.format_final_response(llm_answer, language)
1094
+ cleaned_length = len(llm_answer.strip().split())
1095
+
1096
+ if cleaned_length < 5: # Too short
1097
+ print("⚠️ Urdu response too short, may be incomplete")
1098
+ elif cleaned_length < original_length * 0.7: # Significant reduction
1099
+ print("⚠️ Significant text reduction during cleaning")
1100
+
1101
+ final_answer = self.format_final_response(llm_answer, language)
1102
+
1103
+ # Always add emotional support naturally
1104
+ final_answer = self._add_emotional_support(final_answer, user_query, language)
1105
+
1106
+ response_cache.set(cache_key, final_answer)
1107
+ print("💾 Response cached for future use")
1108
+
1109
+ # Log conversation to JSON
1110
+ conversation_logger.log_conversation(
1111
+ user_input=user_query,
1112
+ llm_response=final_answer,
1113
+ language=language,
1114
+ response_type=response_type
1115
+ )
1116
+
1117
+ self.conversation_history.append({
1118
+ "query": user_query,
1119
+ "answer": final_answer,
1120
+ "language": language,
1121
+ "response_type": response_type,
1122
+ "timestamp": time.time()
1123
+ })
1124
+
1125
+ return final_answer
1126
+
1127
+ # === Pre-load Index at Module Level ===
1128
+ print("🚀 Starting Well Being Agent with optimized loading...")
1129
+ _start_time = time.time()
1130
+
1131
+ print("🔄 Loading vector index...")
1132
+ index, retriever = load_index_fast()
1133
+
1134
+ _load_time = time.time() - _start_time
1135
+ print(f"✅ System ready in {_load_time:.2f} seconds")
1136
+
1137
+ # Create global RAG system instance
1138
+ rag_system = BreastCancerRAGSystem(index, retriever)
1139
+
1140
+ # === Interactive Chat Mode ===
1141
+ def interactive_chat():
1142
+ print("💬 Well Being Agent - Breast Cancer Support")
1143
+ print("=" * 50)
1144
+ print("Type 'quit' to exit, 'topics' to see available topics, 'cache' for cache stats")
1145
+ print("=" * 50)
1146
+
1147
+ global rag_system
1148
+
1149
+ while True:
1150
+ user_input = input("\n❓ Your question: ").strip()
1151
+ if user_input.lower() in ['quit', 'exit', 'q']:
1152
+ break
1153
+ elif user_input.lower() == 'topics':
1154
+ print("\n📚 Available topics: Fertility, Treatment, Symptoms, Diagnosis, etc.")
1155
+ continue
1156
+ elif user_input.lower() == 'cache':
1157
+ print(f"\n📊 Cache stats: {len(response_cache.cache)} cached responses")
1158
+ continue
1159
+ elif not user_input:
1160
+ continue
1161
+
1162
+ print("🤔 Thinking...")
1163
+ start_time = time.time()
1164
+ answer = rag_system.get_enhanced_answer(user_input)
1165
+ response_time = time.time() - start_time
1166
+ print(f"\n💡 {answer}")
1167
+ print(f"⏱️ Response time: {response_time:.2f} seconds")
1168
+
1169
+ # === Main Function ===
1170
+ def main():
1171
+ print("🏥 Well Being Agent - Breast Cancer Support System")
1172
+ print("=" * 50)
1173
+ print(f"📋 Current Configuration:")
1174
+ print(f" Model: {config.MODEL_ID}")
1175
+ print(f" Provider: {config.MODEL_PROVIDER}")
1176
+ print(f" Index: {config.INDEX_PATH}")
1177
+ print(f" Cache: {len(response_cache.cache)} responses loaded")
1178
+ print("=" * 50)
1179
+
1180
+ if not config.api_keys:
1181
+ print("❌ API keys not configured.")
1182
+ if IS_HUGGING_FACE:
1183
+ print("💡 Add API keys in Space Settings → Repository secrets")
1184
+ return
1185
+
1186
+ interactive_chat()
1187
+
1188
+ if __name__ == "__main__":
1189
+ main()
Dockerfile ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.12-slim
2
+
3
+ WORKDIR /app
4
+
5
+ # Install system dependencies including audio libraries
6
+ RUN apt-get update && apt-get install -y \
7
+ gcc \
8
+ g++ \
9
+ git \
10
+ ffmpeg \
11
+ portaudio19-dev \
12
+ && rm -rf /var/lib/apt/lists/* \
13
+ && apt-get clean
14
+
15
+ # Copy requirements first for better caching
16
+ COPY requirements.txt .
17
+ RUN pip install --no-cache-dir -r requirements.txt
18
+
19
+ # Copy application files
20
+ COPY . .
21
+
22
+ # Create necessary directories
23
+ RUN mkdir -p static/audio config cache
24
+
25
+ # Expose port (Hugging Face uses 7860)
26
+ EXPOSE 7860
27
+
28
+ # Health check
29
+ HEALTHCHECK --interval=30s --timeout=30s --start-period=5s --retries=3 \
30
+ CMD curl -f http://localhost:7860/health || exit 1
31
+
32
+ # Start command for Hugging Face
33
+ CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
Index.py ADDED
@@ -0,0 +1,192 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Index.py - Multilingual version (English + Urdu) for creating embeddings and vector index
2
+ import os
3
+ import json
4
+ import logging
5
+ from pathlib import Path
6
+
7
+ # === Centralized Configuration System ===
8
+ class Config:
9
+ """Centralized configuration - loads from config.json"""
10
+
11
+ def __init__(self):
12
+ # Load settings from config.json
13
+ self.settings = self._load_config_file()
14
+
15
+ # Apply settings
16
+ self.INDEX_PATH = self.settings["index_path"]
17
+ self.DATASET_PATH = "DataSet/breast_cancer.json" # Fixed path
18
+ self._validate_config()
19
+
20
+ def _load_config_file(self):
21
+ """Load configuration from config/config.json file"""
22
+ config_file = os.path.join("config", "config.json")
23
+ default_config = {
24
+ "model_provider": "openrouter",
25
+ "model_id": "meta-llama/llama-3.3-70b-instruct:free",
26
+ "api_keys_folder": "config",
27
+ "index_path": "cancer_index_store",
28
+ "dataset_path": "DataSet/breast_cancer.json",
29
+ "similarity_top_k": 5,
30
+ "temperature": 0.2,
31
+ "max_tokens": 350,
32
+ "combine_sources": True,
33
+ "fallback_message": "Sorry, I don't know the answer.",
34
+ "strict_breast_cancer_only": True
35
+ }
36
+
37
+ try:
38
+ if os.path.exists(config_file):
39
+ with open(config_file, 'r', encoding='utf-8') as f:
40
+ loaded_config = json.load(f)
41
+ merged_config = {**default_config, **loaded_config}
42
+ logging.info("✅ Configuration loaded from config/config.json")
43
+ return merged_config
44
+ else:
45
+ os.makedirs(os.path.dirname(config_file), exist_ok=True)
46
+ with open(config_file, 'w', encoding='utf-8') as f:
47
+ json.dump(default_config, f, indent=4)
48
+ logging.info("📁 Created default config/config.json file")
49
+ return default_config
50
+ except Exception as e:
51
+ logging.error(f"❌ Error loading config/config.json: {e}")
52
+ logging.info("🔄 Using default configuration")
53
+ return default_config
54
+
55
+ def _validate_config(self):
56
+ """Validate configuration"""
57
+ if not os.path.exists(self.DATASET_PATH):
58
+ logging.error(f"❌ Dataset file not found: {self.DATASET_PATH}")
59
+ possible_locations = [
60
+ "DataSet/breast_cancer.json",
61
+ "breast_cancer.json",
62
+ "data/breast_cancer.json",
63
+ "../DataSet/breast_cancer.json"
64
+ ]
65
+ for location in possible_locations:
66
+ if os.path.exists(location):
67
+ logging.info(f"💡 Found dataset at: {location}")
68
+ self.DATASET_PATH = location
69
+ return
70
+ else:
71
+ logging.info(f"✅ Dataset found: {self.DATASET_PATH}")
72
+ logging.info(f"✅ Index will be stored at: {self.INDEX_PATH}")
73
+
74
+ # Initialize configuration
75
+ config = Config()
76
+
77
+ # === Setup Logging ===
78
+ logging.basicConfig(
79
+ level=logging.INFO,
80
+ format='%(asctime)s - %(levelname)s - %(message)s',
81
+ handlers=[logging.StreamHandler()]
82
+ )
83
+
84
+ def create_vector_index():
85
+ """
86
+ Creates embeddings and builds vector index from dataset
87
+ Supports both English and Urdu text.
88
+ """
89
+ try:
90
+ from llama_index.core import VectorStoreIndex, Document, StorageContext
91
+ from llama_index.embeddings.huggingface import HuggingFaceEmbedding
92
+ from llama_index.core.node_parser import SimpleNodeParser
93
+
94
+ print("🚀 Starting Multilingual Vector Index Creation...")
95
+ print("=" * 60)
96
+
97
+ # Check dataset
98
+ if not os.path.exists(config.DATASET_PATH):
99
+ print(f"❌ Dataset not found: {config.DATASET_PATH}")
100
+ return False
101
+
102
+ # Load dataset
103
+ print(f"📖 Loading dataset from: {config.DATASET_PATH}")
104
+ with open(config.DATASET_PATH, 'r', encoding='utf-8') as f:
105
+ dataset = json.load(f)
106
+ print(f"✅ Loaded dataset with {len(dataset)} entries")
107
+
108
+ # Normalize dataset for multilingual consistency
109
+ documents = []
110
+ for item in dataset:
111
+ if isinstance(item, dict):
112
+ text = item.get('content') or item.get('text') or item.get('answer') or str(item)
113
+ if not text or len(text.strip()) < 10:
114
+ continue # skip empty
115
+ metadata = {k: v for k, v in item.items() if k not in ['content', 'text', 'answer']}
116
+ documents.append(Document(text=text.strip(), metadata=metadata))
117
+ else:
118
+ documents.append(Document(text=str(item)))
119
+
120
+ print(f"✅ Created {len(documents)} documents for embedding")
121
+
122
+ # === Multilingual embedding model ===
123
+ # Supports 50+ languages including Urdu + English
124
+ multilingual_model = "sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2"
125
+ print(f"🔧 Loading embedding model: {multilingual_model}")
126
+ embed_model = HuggingFaceEmbedding(model_name=multilingual_model)
127
+
128
+ # Create node parser
129
+ node_parser = SimpleNodeParser.from_defaults(chunk_size=512, chunk_overlap=50)
130
+
131
+ # Parse documents
132
+ print("🔨 Parsing documents into nodes...")
133
+ nodes = node_parser.get_nodes_from_documents(documents)
134
+ print(f"✅ Created {len(nodes)} nodes")
135
+
136
+ # Build index
137
+ print("🏗️ Building multilingual vector index...")
138
+ index = VectorStoreIndex(nodes=nodes, embed_model=embed_model, show_progress=True)
139
+
140
+ # Persist
141
+ os.makedirs(config.INDEX_PATH, exist_ok=True)
142
+ print(f"💾 Saving index to: {config.INDEX_PATH}")
143
+ index.storage_context.persist(persist_dir=config.INDEX_PATH)
144
+
145
+ print("✅ Multilingual vector index created successfully!")
146
+ print(f"📁 Index location: {config.INDEX_PATH}")
147
+ print(f"📊 Total nodes embedded: {len(nodes)}")
148
+
149
+ # Test retrieval in both languages
150
+ retriever = index.as_retriever(similarity_top_k=2)
151
+ print("🔍 Testing bilingual retrieval:")
152
+ en_test = retriever.retrieve("What are the symptoms of breast cancer?")
153
+ ur_test = retriever.retrieve("بریسٹ کینسر کی علامات کیا ہیں؟")
154
+ print(f"✅ English test retrieved {len(en_test)} results")
155
+ print(f"✅ Urdu test retrieved {len(ur_test)} results")
156
+
157
+ print("\n🎉 Multilingual index ready for RAG pipeline!")
158
+ return True
159
+
160
+ except Exception as e:
161
+ print(f"❌ Failed to create multilingual vector index: {e}")
162
+ import traceback; traceback.print_exc()
163
+ return False
164
+
165
+ def check_index_exists():
166
+ if os.path.exists(config.INDEX_PATH):
167
+ print(f"✅ Index already exists at: {config.INDEX_PATH}")
168
+ return True
169
+ else:
170
+ print(f"❌ Index not found at: {config.INDEX_PATH}")
171
+ return False
172
+
173
+ def main():
174
+ print("🏥 Well Being Agent - Multilingual Index Creator")
175
+ print("=" * 60)
176
+
177
+ if check_index_exists():
178
+ response = input("Index already exists. Recreate? (y/n): ").strip().lower()
179
+ if response != 'y':
180
+ print("Operation cancelled.")
181
+ return
182
+
183
+ success = create_vector_index()
184
+ if success:
185
+ print("\n🎯 Next steps:")
186
+ print("1️⃣ Run Agent.py for RAG operations")
187
+ print("2️⃣ Run app.py for web interface")
188
+ else:
189
+ print("\n💥 Index creation failed!")
190
+
191
+ if __name__ == "__main__":
192
+ main()
app.py ADDED
@@ -0,0 +1,449 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # app.py - Enhanced with voice query support (Text responses only)
2
+ from fastapi import FastAPI, HTTPException, UploadFile, File
3
+ from fastapi.staticfiles import StaticFiles
4
+ from fastapi.responses import FileResponse, JSONResponse, HTMLResponse
5
+ from fastapi.middleware.cors import CORSMiddleware
6
+ from pydantic import BaseModel
7
+ from contextlib import asynccontextmanager
8
+ import sys
9
+ import os
10
+ import asyncio
11
+ import time
12
+ import uuid
13
+ from typing import Optional
14
+ import logging
15
+
16
+ # Set up logging
17
+ logging.basicConfig(level=logging.INFO)
18
+ logger = logging.getLogger(__name__)
19
+
20
+ # Add current directory to path to fix import issues
21
+ sys.path.append(os.path.dirname(os.path.abspath(__file__)))
22
+
23
+ # Global RAG system instance
24
+ rag_system = None
25
+
26
+ @asynccontextmanager
27
+ async def lifespan(app: FastAPI):
28
+ """Lifespan context manager for startup/shutdown events"""
29
+ # Startup
30
+ global rag_system
31
+ try:
32
+ logger.info("🚀 Starting Well Being Agent...")
33
+
34
+ # Import here to avoid circular imports
35
+ from Agent import load_index, BreastCancerRAGSystem
36
+
37
+ logger.info("📋 Loading configuration and index...")
38
+
39
+ # Add delay to ensure everything is loaded
40
+ await asyncio.sleep(2)
41
+
42
+ index, retriever = load_index()
43
+ if index and retriever:
44
+ rag_system = BreastCancerRAGSystem(index, retriever)
45
+ logger.info("✅ RAG System initialized successfully")
46
+
47
+ # Test the system
48
+ try:
49
+ test_answer = rag_system.get_enhanced_answer("Hello, are you working?")
50
+ logger.info(f"✅ System test successful: {test_answer[:50]}...")
51
+ except Exception as e:
52
+ logger.warning(f"⚠️ System test failed: {e}")
53
+
54
+ else:
55
+ logger.error("❌ Failed to load index - system will not work properly")
56
+ rag_system = None
57
+
58
+ except Exception as e:
59
+ logger.error(f"❌ Startup error: {e}")
60
+ import traceback
61
+ traceback.print_exc()
62
+
63
+ yield
64
+
65
+ # Shutdown
66
+ logger.info("🛑 Shutting down Well Being Agent...")
67
+
68
+ app = FastAPI(
69
+ title="Well Being Agent - Breast Cancer Support",
70
+ description="AI-powered breast cancer support system providing evidence-based information and emotional support",
71
+ version="1.0.0",
72
+ lifespan=lifespan
73
+ )
74
+
75
+ # Add CORS middleware
76
+ app.add_middleware(
77
+ CORSMiddleware,
78
+ allow_origins=["*"],
79
+ allow_credentials=True,
80
+ allow_methods=["*"],
81
+ allow_headers=["*"],
82
+ )
83
+
84
+ class QueryRequest(BaseModel):
85
+ query: str
86
+ language: str = "auto"
87
+ response_type: str = "text"
88
+
89
+ class QueryResponse(BaseModel):
90
+ answer: str
91
+ status: str
92
+ language: str = "english"
93
+
94
+ class VoiceResponse(BaseModel):
95
+ text: str
96
+ language: str = "english"
97
+ status: str = "success"
98
+
99
+ # Create directories if they don't exist
100
+ os.makedirs("static/audio", exist_ok=True)
101
+ logger.info(f"📁 Created directory structure: static/audio")
102
+ logger.info(f"📁 Current working directory: {os.getcwd()}")
103
+
104
+ # Serve static files
105
+ try:
106
+ app.mount("/static", StaticFiles(directory="static"), name="static")
107
+ logger.info("✅ Static files mounted successfully")
108
+ except Exception as e:
109
+ logger.error(f"❌ Failed to mount static files: {e}")
110
+
111
+ # Frontend serving
112
+ @app.get("/")
113
+ async def serve_frontend():
114
+ """Serve the main frontend page"""
115
+ try:
116
+ if not os.path.exists('index.html'):
117
+ logger.error("❌ index.html not found!")
118
+ fallback_html = """
119
+ <!DOCTYPE html>
120
+ <html>
121
+ <head>
122
+ <title>Well Being Agent - System Running</title>
123
+ <style>
124
+ body { font-family: Arial, sans-serif; margin: 40px; background: #f5f5f5; }
125
+ .container { max-width: 800px; margin: 0 auto; background: white; padding: 30px; border-radius: 15px; box-shadow: 0 5px 15px rgba(0,0,0,0.1); }
126
+ .status { color: green; font-weight: bold; }
127
+ .error { color: red; }
128
+ </style>
129
+ </head>
130
+ <body>
131
+ <div class="container">
132
+ <h1>🚀 Well Being Agent - Backend Running</h1>
133
+ <p class="status">✅ Server is running successfully</p>
134
+ <p class="error">⚠️ index.html file not found</p>
135
+ <p>Current directory: """ + os.getcwd() + """</p>
136
+ <p>Static audio directory: """ + str(os.path.exists('static/audio')) + """</p>
137
+ </div>
138
+ </body>
139
+ </html>
140
+ """
141
+ return HTMLResponse(content=fallback_html, status_code=200)
142
+
143
+ return FileResponse('index.html')
144
+
145
+ except Exception as e:
146
+ logger.error(f"❌ Error serving frontend: {e}")
147
+ return JSONResponse(
148
+ {"error": "Frontend serving failed", "details": str(e)},
149
+ status_code=500
150
+ )
151
+
152
+ @app.get("/styles.css")
153
+ async def serve_css():
154
+ """Serve CSS file"""
155
+ try:
156
+ if os.path.exists('styles.css'):
157
+ return FileResponse('styles.css', media_type='text/css')
158
+ else:
159
+ return JSONResponse({"error": "CSS file not found"}, status_code=404)
160
+ except Exception as e:
161
+ return JSONResponse({"error": "CSS serving failed"}, status_code=500)
162
+
163
+ @app.get("/script.js")
164
+ async def serve_js():
165
+ """Serve JavaScript file"""
166
+ try:
167
+ if os.path.exists('script.js'):
168
+ return FileResponse('script.js', media_type='application/javascript')
169
+ else:
170
+ return JSONResponse({"error": "JavaScript file not found"}, status_code=404)
171
+ except Exception as e:
172
+ return JSONResponse({"error": "JavaScript serving failed"}, status_code=500)
173
+
174
+ @app.post("/ask-query", response_model=QueryResponse)
175
+ async def ask_query(request: QueryRequest):
176
+ """Main endpoint for processing queries"""
177
+ try:
178
+ if not rag_system:
179
+ return QueryResponse(
180
+ answer="I'm currently initializing. Please wait a moment and try again.",
181
+ status="error",
182
+ language="english"
183
+ )
184
+
185
+ if not request.query or not request.query.strip():
186
+ return QueryResponse(
187
+ answer="Please enter a question about breast cancer support.",
188
+ status="error",
189
+ language="english"
190
+ )
191
+
192
+ # Determine language
193
+ if request.language == "auto":
194
+ detected_language = rag_system.detect_language(request.query)
195
+ else:
196
+ detected_language = request.language
197
+
198
+ logger.info(f"🌐 Processing query in {detected_language}, Type: {request.response_type}")
199
+
200
+ # Process the query with response type
201
+ answer = rag_system.get_enhanced_answer(
202
+ user_query=request.query,
203
+ language=detected_language,
204
+ response_type=request.response_type
205
+ )
206
+
207
+ return QueryResponse(
208
+ answer=answer,
209
+ status="success",
210
+ language=detected_language
211
+ )
212
+
213
+ except Exception as e:
214
+ logger.error(f"Error processing query: {e}")
215
+ return QueryResponse(
216
+ answer="I apologize, but I'm having trouble processing your request right now. Please try again in a moment.",
217
+ status="error",
218
+ language="english"
219
+ )
220
+
221
+ @app.post("/voice-query", response_model=VoiceResponse)
222
+ async def process_voice_query(
223
+ file: UploadFile = File(...),
224
+ language: str = "auto" # Auto-detect language from speech
225
+ ):
226
+ """Process voice query and return TEXT response only (English & Urdu)"""
227
+ try:
228
+ # Validate file type
229
+ if not file.content_type or not file.content_type.startswith('audio/'):
230
+ raise HTTPException(status_code=400, detail="File must be an audio file")
231
+
232
+ logger.info(f"🎤 Processing voice query - Language preference: {language}")
233
+
234
+ # Import audio processor with proper error handling
235
+ try:
236
+ from audio_processor import audio_processor
237
+ except ImportError as e:
238
+ logger.error(f"❌ Failed to import audio_processor: {e}")
239
+ return VoiceResponse(
240
+ text="Audio processing service is currently unavailable.",
241
+ status="error",
242
+ language="english"
243
+ )
244
+
245
+ # Convert speech to text with language detection
246
+ stt_result = await audio_processor.speech_to_text(file, language)
247
+
248
+ if not stt_result or not stt_result.get('text'):
249
+ raise HTTPException(status_code=400, detail="Could not transcribe audio")
250
+
251
+ query_text = stt_result['text']
252
+ detected_language = stt_result.get('language', 'english')
253
+
254
+ logger.info(f"📝 Transcribed text ({detected_language}): {query_text}")
255
+
256
+ # Process the query through RAG system
257
+ if not rag_system:
258
+ return VoiceResponse(
259
+ text="System is initializing. Please try again in a moment.",
260
+ status="error",
261
+ language=detected_language
262
+ )
263
+
264
+ # ✅ Always use TEXT response type for voice queries
265
+ answer = rag_system.get_enhanced_answer(
266
+ user_query=query_text,
267
+ language=detected_language, # Use detected language
268
+ response_type="text" # Always text response
269
+ )
270
+
271
+ logger.info(f"✅ Voice query processed successfully - Response in {detected_language}")
272
+
273
+ return VoiceResponse(
274
+ text=answer, # Always return text
275
+ language=detected_language,
276
+ status="success"
277
+ )
278
+
279
+ except HTTPException:
280
+ raise
281
+ except Exception as e:
282
+ logger.error(f"Error processing voice query: {e}")
283
+ return VoiceResponse(
284
+ text="Sorry, I encountered an error processing your voice message.",
285
+ status="error",
286
+ language="english"
287
+ )
288
+
289
+ # Audio serving endpoint (kept for any future use)
290
+ @app.get("/audio/{filename}")
291
+ async def serve_audio_direct(filename: str):
292
+ """Direct audio serving endpoint"""
293
+ try:
294
+ audio_path = os.path.join("static", "audio", filename)
295
+ logger.info(f"🔍 Direct audio request for: {filename}")
296
+
297
+ if not os.path.exists(audio_path):
298
+ logger.error(f"❌ Audio file not found: {audio_path}")
299
+ raise HTTPException(status_code=404, detail=f"Audio file {filename} not found")
300
+
301
+ # Determine content type
302
+ if filename.endswith('.mp3'):
303
+ media_type = "audio/mpeg"
304
+ elif filename.endswith('.wav'):
305
+ media_type = "audio/wav"
306
+ else:
307
+ media_type = "audio/mpeg"
308
+
309
+ logger.info(f"🔊 Serving audio file: {audio_path}")
310
+ return FileResponse(audio_path, media_type=media_type, filename=filename)
311
+
312
+ except Exception as e:
313
+ logger.error(f"❌ Error serving audio file: {e}")
314
+ raise HTTPException(status_code=500, detail="Error serving audio file")
315
+
316
+ @app.get("/debug-audio")
317
+ async def debug_audio():
318
+ """Debug endpoint to check audio file locations"""
319
+ import glob
320
+
321
+ audio_info = {
322
+ "current_directory": os.getcwd(),
323
+ "static_directory_exists": os.path.exists("static"),
324
+ "static_audio_exists": os.path.exists("static/audio"),
325
+ "audio_files_in_static_audio": [],
326
+ "static_files_mounted": True
327
+ }
328
+
329
+ # Check static/audio directory
330
+ if os.path.exists("static/audio"):
331
+ audio_files = glob.glob("static/audio/*.mp3") + glob.glob("static/audio/*.wav")
332
+ audio_info["audio_files_in_static_audio"] = [
333
+ {
334
+ "name": os.path.basename(f),
335
+ "size": os.path.getsize(f),
336
+ "path": f,
337
+ "absolute_path": os.path.abspath(f),
338
+ }
339
+ for f in audio_files
340
+ ]
341
+
342
+ return JSONResponse(audio_info)
343
+
344
+ @app.get("/predefined-questions")
345
+ async def get_predefined_questions(language: str = "english"):
346
+ """Get predefined questions for breast cancer patients"""
347
+ try:
348
+ if not rag_system:
349
+ return JSONResponse({
350
+ "questions": [],
351
+ "status": "system_initializing"
352
+ })
353
+
354
+ questions = rag_system.get_predefined_questions(language)
355
+ return JSONResponse({
356
+ "questions": questions,
357
+ "status": "success",
358
+ "language": language
359
+ })
360
+
361
+ except Exception as e:
362
+ logger.error(f"Error getting predefined questions: {e}")
363
+ return JSONResponse({
364
+ "questions": [],
365
+ "status": "error"
366
+ })
367
+ @app.get("/health")
368
+ async def health_check():
369
+ """Health check endpoint"""
370
+ health_status = {
371
+ "status": "healthy" if rag_system else "initializing",
372
+ "rag_system_loaded": rag_system is not None,
373
+ "service": "Well Being Agent - Breast Cancer Support",
374
+ "version": "1.0.0"
375
+ }
376
+
377
+ return JSONResponse(health_status)
378
+
379
+ @app.get("/info")
380
+ async def system_info():
381
+ """System information endpoint"""
382
+ info = {
383
+ "name": "Well Being Agent",
384
+ "description": "AI-powered breast cancer support system",
385
+ "version": "1.0.0",
386
+ "status": "ready" if rag_system else "initializing",
387
+ "features": [
388
+ "Breast cancer information",
389
+ "Treatment guidance",
390
+ "Fertility options",
391
+ "Recovery timelines",
392
+ "Emotional support",
393
+ "Multilingual support (English/Urdu)",
394
+ "Voice query support (Text responses)"
395
+ ]
396
+ }
397
+ return JSONResponse(info)
398
+
399
+ # Debug endpoint to check file existence
400
+ @app.get("/debug-files")
401
+ async def debug_files():
402
+ """Check if required files exist"""
403
+ files = {
404
+ 'index.html': os.path.exists('index.html'),
405
+ 'styles.css': os.path.exists('styles.css'),
406
+ 'script.js': os.path.exists('script.js'),
407
+ 'Agent.py': os.path.exists('Agent.py'),
408
+ 'audio_processor.py': os.path.exists('audio_processor.py'),
409
+ 'current_directory': os.getcwd()
410
+ }
411
+ return JSONResponse(files)
412
+
413
+ async def cleanup_old_audio_files():
414
+ """Clean up audio files older than 1 hour"""
415
+ while True:
416
+ try:
417
+ audio_dir = os.path.join("static", "audio")
418
+ if os.path.exists(audio_dir):
419
+ current_time = time.time()
420
+ for filename in os.listdir(audio_dir):
421
+ file_path = os.path.join(audio_dir, filename)
422
+ if os.path.isfile(file_path):
423
+ # Delete files older than 1 hour
424
+ if current_time - os.path.getctime(file_path) > 3600:
425
+ os.remove(file_path)
426
+ logger.info(f"🧹 Cleaned up old audio file: {filename}")
427
+ except Exception as e:
428
+ logger.error(f"Error cleaning up audio files: {e}")
429
+
430
+ await asyncio.sleep(3600)
431
+
432
+ # Start cleanup task when app starts
433
+ @app.on_event("startup")
434
+ async def start_cleanup_task():
435
+ asyncio.create_task(cleanup_old_audio_files())
436
+
437
+ # Fallback route for SPA
438
+ @app.get("/{full_path:path}")
439
+ async def serve_frontend_fallback(full_path: str):
440
+ """Fallback to serve index.html for SPA routing"""
441
+ if os.path.exists(full_path) and full_path != "":
442
+ return FileResponse(full_path)
443
+ return FileResponse('index.html')
444
+
445
+ if __name__ == "__main__":
446
+ import uvicorn
447
+ port = int(os.environ.get("PORT", 8000))
448
+ logger.info(f"🌐 Starting Well Being Agent Server on port {port}...")
449
+ uvicorn.run(app, host="0.0.0.0", port=port, log_level="info")
audio_processor.py ADDED
@@ -0,0 +1,301 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # audio_processor.py - FREE TTS and STT for English AND Urdu voice notes
2
+ import os
3
+ import tempfile
4
+ import logging
5
+ import time
6
+ from typing import Optional, Dict, Any
7
+ from fastapi import HTTPException, UploadFile
8
+ import uuid
9
+ import re
10
+
11
+ logger = logging.getLogger(__name__)
12
+
13
+ class AudioProcessor:
14
+ """FREE Audio processing system for STT and TTS functionality (English + Urdu ONLY)"""
15
+
16
+ def __init__(self):
17
+ self.supported_languages = ["english", "urdu"]
18
+ logger.info("🎵 FREE Audio Processor initialized - Supporting English & Urdu ONLY")
19
+
20
+ async def speech_to_text(self, audio_file: UploadFile, language: str = "auto") -> Dict[str, Any]:
21
+ """
22
+ Convert speech to text using FREE STT services for English AND Urdu ONLY
23
+ """
24
+ try:
25
+ logger.info(f"🎤 Converting speech to text - Language: {language}")
26
+
27
+ # Read audio file
28
+ audio_content = await audio_file.read()
29
+
30
+ # Try local Whisper for multilingual support
31
+ stt_result = await self._try_whisper_stt(audio_content, language)
32
+ if stt_result:
33
+ # Verify detected language is only Urdu or English
34
+ detected_language = self._strict_detect_language_from_text(stt_result["text"])
35
+ if detected_language not in ["english", "urdu"]:
36
+ logger.warning(f"⚠️ Detected non-supported language: {detected_language}, treating as English")
37
+ detected_language = "english"
38
+
39
+ stt_result["language"] = detected_language
40
+ return stt_result
41
+
42
+ # Fallback to SpeechRecognition with Google Web API (mainly English)
43
+ stt_result = await self._try_speech_recognition(audio_content)
44
+ if stt_result:
45
+ detected_language = self._strict_detect_language_from_text(stt_result["text"])
46
+ if detected_language not in ["english", "urdu"]:
47
+ detected_language = "english"
48
+
49
+ stt_result["language"] = detected_language
50
+ return stt_result
51
+
52
+ raise HTTPException(status_code=400, detail="No FREE STT service available")
53
+
54
+ except Exception as e:
55
+ logger.error(f"❌ STT Error: {e}")
56
+ raise HTTPException(status_code=500, detail=f"Speech recognition failed: {str(e)}")
57
+
58
+ async def _try_whisper_stt(self, audio_content: bytes, language: str = "auto") -> Optional[Dict[str, Any]]:
59
+ """Try local Whisper model with strict language filtering"""
60
+ try:
61
+ import whisper
62
+
63
+ # Create temporary file
64
+ with tempfile.NamedTemporaryFile(suffix=".webm", delete=False) as temp_audio:
65
+ temp_audio.write(audio_content)
66
+ temp_audio_path = temp_audio.name
67
+
68
+ try:
69
+ logger.info("🔊 Using local Whisper (English/Urdu)...")
70
+ model = whisper.load_model("base")
71
+
72
+ # Set language parameter for Whisper - only allow English or Urdu
73
+ whisper_language = None
74
+ if language == "urdu":
75
+ whisper_language = "urdu"
76
+ elif language == "english":
77
+ whisper_language = "english"
78
+ # For "auto", let Whisper detect but we'll filter later
79
+
80
+ result = model.transcribe(temp_audio_path, language=whisper_language)
81
+
82
+ # Apply strict language detection
83
+ detected_language = self._strict_detect_language_from_text(result["text"])
84
+
85
+ return {
86
+ "text": result["text"].strip(),
87
+ "language": detected_language,
88
+ "service": "local_whisper",
89
+ "confidence": 0.8
90
+ }
91
+ finally:
92
+ if os.path.exists(temp_audio_path):
93
+ os.unlink(temp_audio_path)
94
+
95
+ except ImportError:
96
+ logger.warning("Whisper not available for local STT")
97
+ return None
98
+ except Exception as e:
99
+ logger.warning(f"Local Whisper STT failed: {e}")
100
+ return None
101
+
102
+ async def _try_speech_recognition(self, audio_content: bytes) -> Optional[Dict[str, Any]]:
103
+ """Try SpeechRecognition with Google Web API (mainly English)"""
104
+ try:
105
+ import speech_recognition as sr
106
+ from pydub import AudioSegment
107
+ import io
108
+
109
+ # Convert webm to wav for SpeechRecognition
110
+ audio = AudioSegment.from_file(io.BytesIO(audio_content), format="webm")
111
+ wav_data = io.BytesIO()
112
+ audio.export(wav_data, format="wav")
113
+ wav_data.seek(0)
114
+
115
+ recognizer = sr.Recognizer()
116
+
117
+ with sr.AudioFile(wav_data) as source:
118
+ audio_data = recognizer.record(source)
119
+ text = recognizer.recognize_google(audio_data)
120
+
121
+ # Apply strict language detection
122
+ detected_language = self._strict_detect_language_from_text(text)
123
+
124
+ return {
125
+ "text": text,
126
+ "language": detected_language,
127
+ "service": "google_web_api",
128
+ "confidence": 0.7
129
+ }
130
+
131
+ except ImportError:
132
+ logger.warning("SpeechRecognition not available")
133
+ return None
134
+ except Exception as e:
135
+ logger.warning(f"SpeechRecognition failed: {e}")
136
+ return None
137
+
138
+ def _strict_detect_language_from_text(self, text: str) -> str:
139
+ """
140
+ Strict language detection that only identifies Urdu or English
141
+ Specifically excludes Hindi, Arabic, and other languages
142
+ """
143
+ try:
144
+ text = text.strip()
145
+ if not text:
146
+ return "english" # Default to English for empty text
147
+
148
+ # === STRICT URDU DETECTION ===
149
+ # Urdu-specific character ranges (excluding Arabic and Hindi overlaps)
150
+ urdu_specific_ranges = [
151
+ r'[\u0679-\u0679]', # Urdu-specific letters
152
+ r'[\u067E-\u067E]', # Peh
153
+ r'[\u0686-\u0686]', # Cheh
154
+ r'[\u0688-\u0688]', # Ddal
155
+ r'[\u0691-\u0691]', # Rreh
156
+ r'[\u0698-\u0698]', # Jeh
157
+ r'[\u06A9-\u06A9]', # Keheh
158
+ r'[\u06AF-\u06AF]', # Gaf
159
+ r'[\u06BA-\u06BA]', # Noon Ghunna
160
+ r'[\u06BE-\u06BE]', # Heh Doachashmee
161
+ r'[\u06C1-\u06C1]', # Heh Goal
162
+ r'[\u06C2-\u06C2]', # Heh Goal with Hamza Above
163
+ r'[\u06CC-\u06CC]', # Farsi Yeh
164
+ r'[\u06D2-\u06D2]', # Yeh Barree
165
+ ]
166
+
167
+ # Common Urdu words that are distinct from Hindi/Arabic
168
+ urdu_specific_words = [
169
+ 'ہے', 'ہیں', 'ہوں', 'کیا', 'کے', 'کو', 'سے', 'پر', 'میں',
170
+ 'اور', 'لیکن', 'اگر', 'تو', 'بھی', 'ہی', 'تھا', 'تھی',
171
+ 'تھے', 'ہو', 'رہا', 'رہی', 'رہے', 'دیں', 'دی', 'دو', 'دیجیے',
172
+ 'برائے', 'کےلیے', 'کےساتھ', 'کےبعد', 'کےپاس', 'کےنیچے'
173
+ ]
174
+
175
+ # Check for Urdu-specific characters
176
+ urdu_char_count = 0
177
+ for pattern in urdu_specific_ranges:
178
+ urdu_char_count += len(re.findall(pattern, text))
179
+
180
+ # Check for Urdu-specific words
181
+ urdu_word_count = sum(1 for word in urdu_specific_words if word in text)
182
+
183
+ # Check for common Urdu sentence structures
184
+ urdu_indicators = [
185
+ ' کا ', ' کی ', ' کے ', ' کو ', ' سے ', ' پر ', ' میں ', ' نے ',
186
+ ' ہی ', ' بھی ', ' تو ', ' اگر ', ' لیکن ', ' اور ', ' یا '
187
+ ]
188
+ urdu_structure_count = sum(1 for indicator in urdu_indicators if indicator in text)
189
+
190
+ # === HINDI EXCLUSION ===
191
+ # Hindi-specific characters and words to exclude
192
+ hindi_specific_chars = r'[\u0900-\u097F]' # Devanagari range
193
+ hindi_char_count = len(re.findall(hindi_specific_chars, text))
194
+
195
+ hindi_specific_words = ['है', 'हो', 'की', 'के', 'को', 'से', 'में', 'ना', 'नी', 'ने']
196
+ hindi_word_count = sum(1 for word in hindi_specific_words if word in text)
197
+
198
+ # === ARABIC EXCLUSION ===
199
+ # Arabic-specific characters (excluding common Urdu-Arabic overlaps)
200
+ arabic_specific_chars = r'[\uFE70-\uFEFF]' # Arabic presentation forms
201
+ arabic_char_count = len(re.findall(arabic_specific_chars, text))
202
+
203
+ # === ENGLISH DETECTION ===
204
+ english_words = [
205
+ 'the', 'and', 'you', 'that', 'was', 'for', 'are', 'with', 'his', 'they',
206
+ 'this', 'have', 'from', 'one', 'had', 'word', 'but', 'not', 'what', 'all',
207
+ 'were', 'when', 'your', 'can', 'said', 'there', 'each', 'which', 'she', 'do',
208
+ 'how', 'their', 'will', 'other', 'about', 'out', 'many', 'then', 'them', 'these'
209
+ ]
210
+ text_lower = text.lower()
211
+ english_score = sum(1 for word in english_words if word in text_lower)
212
+
213
+ # === LANGUAGE DECISION LOGIC ===
214
+
215
+ # First, exclude Hindi and Arabic
216
+ if hindi_char_count > 2 or hindi_word_count > 1:
217
+ logger.info("🔍 Hindi detected, treating as English")
218
+ return "english"
219
+
220
+ if arabic_char_count > 2:
221
+ logger.info("🔍 Arabic detected, treating as English")
222
+ return "english"
223
+
224
+ # Then detect Urdu with high confidence
225
+ urdu_confidence_score = (
226
+ urdu_char_count * 2 +
227
+ urdu_word_count * 3 +
228
+ urdu_structure_count * 1.5
229
+ )
230
+
231
+ # Strong Urdu detection thresholds
232
+ if urdu_confidence_score >= 5:
233
+ logger.info(f"🔍 Urdu detected (confidence: {urdu_confidence_score})")
234
+ return "urdu"
235
+
236
+ # English detection
237
+ if english_score >= 3 or len(text.split()) >= 4:
238
+ logger.info(f"🔍 English detected (score: {english_score})")
239
+ return "english"
240
+
241
+ # If we have some Urdu indicators but not enough for confident detection
242
+ if urdu_confidence_score >= 2:
243
+ logger.info(f"🔍 Weak Urdu signals, treating as Urdu (confidence: {urdu_confidence_score})")
244
+ return "urdu"
245
+
246
+ # Default to English
247
+ logger.info("🔍 Defaulting to English")
248
+ return "english"
249
+
250
+ except Exception as e:
251
+ logger.error(f"❌ Language detection error: {e}")
252
+ return "english" # Safe default
253
+
254
+ def _detect_language_from_text(self, text: str) -> str:
255
+ """Legacy method for backward compatibility"""
256
+ return self._strict_detect_language_from_text(text)
257
+
258
+ async def text_to_speech(self, text: str, language: str = "english") -> Optional[Dict[str, Any]]:
259
+ """
260
+ Convert text to speech using FREE TTS services
261
+ NOTE: Keeping TTS for potential future use, but currently disabled for responses
262
+ """
263
+ try:
264
+ # Since we're only returning text responses now, TTS is optional
265
+ # But keeping the function for potential future use
266
+ logger.info(f"🔊 TTS requested for {language}: {text[:50]}...")
267
+ return None # Disable TTS for now
268
+
269
+ except Exception as e:
270
+ logger.error(f"❌ TTS Error: {e}")
271
+ return None
272
+
273
+ async def cleanup_old_audio_files(self, max_age_hours: int = 1):
274
+ """Clean up audio files older than specified hours"""
275
+ try:
276
+ audio_dir = os.path.join("static", "audio")
277
+ if not os.path.exists(audio_dir):
278
+ return
279
+
280
+ current_time = time.time()
281
+ deleted_count = 0
282
+
283
+ for filename in os.listdir(audio_dir):
284
+ if filename.startswith("tts_") and (filename.endswith(".mp3") or filename.endswith(".wav")):
285
+ file_path = os.path.join(audio_dir, filename)
286
+ if os.path.isfile(file_path):
287
+ # Delete files older than max_age_hours
288
+ file_age_hours = (current_time - os.path.getctime(file_path)) / 3600
289
+ if file_age_hours > max_age_hours:
290
+ os.remove(file_path)
291
+ deleted_count += 1
292
+ logger.info(f"🧹 Cleaned up old audio file: {filename}")
293
+
294
+ if deleted_count > 0:
295
+ logger.info(f"🧹 Cleaned up {deleted_count} old audio file(s)")
296
+
297
+ except Exception as e:
298
+ logger.error(f"Error cleaning up audio files: {e}")
299
+
300
+ # Global audio processor instance
301
+ audio_processor = AudioProcessor()
config.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model_provider": "openrouter",
3
+ "model_id": "deepseek/deepseek-r1:free",
4
+ "api_keys_folder": "config",
5
+ "index_path": "cancer_index_store",
6
+ "dataset_path": "DataSet/breast_cancer.json",
7
+ "similarity_top_k": 5,
8
+ "temperature": 0.7,
9
+ "max_tokens": 1024,
10
+ "combine_sources": true,
11
+ "fallback_message": "Sorry, I don't know the answer.",
12
+ "strict_breast_cancer_only": true
13
+ }
conversations.json ADDED
The diff for this file is too large to render. See raw diff
 
index.html ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html lang="en">
3
+ <head>
4
+ <meta charset="UTF-8">
5
+ <meta name="viewport" content="width=device-width, initial-scale=1.0">
6
+ <title>Well Being Agent - Breast Cancer Support</title>
7
+
8
+ <!-- Optimized Urdu Fonts -->
9
+ <link rel="preconnect" href="https://fonts.googleapis.com">
10
+ <link rel="preconnect" href="https://fonts.gstatic.com" crossorigin>
11
+ <link href="https://fonts.googleapis.com/css2?family=Noto+Nastaliq+Urdu:wght@400;500;600;700&family=Noto+Naskh+Arabic:wght@400;500;600;700&family=Scheherazade+New:wght@400;500;600;700&family=Lateef:wght@400;500;600;700&family=Amiri:wght@400;500;600;700&family=Poppins:wght@300;400;500;600;700&family=Playfair+Display:wght@400;500;600&display=swap" rel="stylesheet">
12
+
13
+ <link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.4.0/css/all.min.css">
14
+ <link rel="stylesheet" href="styles.css">
15
+ </head>
16
+ <body>
17
+ <!-- Animated Background Elements -->
18
+ <div class="floating-elements">
19
+ <div class="floating-element el-1"></div>
20
+ <div class="floating-element el-2"></div>
21
+ <div class="floating-element el-3"></div>
22
+ <div class="floating-element el-4"></div>
23
+ </div>
24
+
25
+ <div class="container">
26
+ <!-- Header Section -->
27
+ <header class="header">
28
+ <div class="header-content">
29
+ <div class="logo-container">
30
+ <div class="logo">
31
+ <i class="fas fa-heartbeat"></i>
32
+ </div>
33
+ <h1 class="title">Well Being Agent</h1>
34
+ </div>
35
+ <p class="subtitle">Breast Cancer Support System</p>
36
+ <p class="compassion-message">Providing compassionate, evidence-based support for breast cancer patients and their families</p>
37
+ </div>
38
+ </header>
39
+
40
+ <!-- Main Content -->
41
+ <main class="main-content">
42
+ <!-- Quick Questions Section -->
43
+ <aside class="quick-questions">
44
+ <div class="section-header">
45
+ <h2 class="section-title">Quick Questions</h2>
46
+ <div class="decoration-line"></div>
47
+ </div>
48
+
49
+ <!-- Tab Navigation -->
50
+ <div class="tabs-container">
51
+ <div class="tabs">
52
+ <button class="tab-btn active" data-tab="english">
53
+ <i class="fas fa-language"></i> English Questions
54
+ </button>
55
+ <button class="tab-btn" data-tab="urdu">
56
+ <i class="fas fa-language"></i> اردو سوالات
57
+ </button>
58
+ </div>
59
+
60
+ <!-- English Questions Tab -->
61
+ <div class="tab-content active" id="english-tab">
62
+ <div class="questions-grid" id="english-questions">
63
+ <!-- Recent English questions will appear here dynamically -->
64
+ <div class="empty-state">
65
+ <i class="fas fa-comments"></i>
66
+ <p>No recent English questions yet. Start chatting to see them here!</p>
67
+ </div>
68
+ </div>
69
+ </div>
70
+
71
+ <!-- Urdu Questions Tab -->
72
+ <div class="tab-content" id="urdu-tab">
73
+ <div class="questions-grid urdu-questions" id="urdu-questions">
74
+ <!-- Recent Urdu questions will appear here dynamically -->
75
+ <div class="empty-state">
76
+ <i class="fas fa-comments"></i>
77
+ <p>ابھی تک کوئی اردو سوالات نہیں ہیں۔ بات چیت شروع کریں!</p>
78
+ </div>
79
+ </div>
80
+ </div>
81
+ </div>
82
+ </aside>
83
+
84
+ <!-- Chat Section -->
85
+ <section class="chat-section">
86
+ <div class="chat-container">
87
+ <!-- Chat Header -->
88
+ <div class="chat-header">
89
+ <div class="chat-profile">
90
+ <div class="profile-avatar">
91
+ <i class="fas fa-robot"></i>
92
+ <div class="online-indicator"></div>
93
+ </div>
94
+ <div class="profile-info">
95
+ <h3>Well Being Assistant</h3>
96
+ <p>Always here to support you</p>
97
+ </div>
98
+ </div>
99
+ <div class="chat-stats">
100
+ <div class="stat">
101
+ <span class="stat-number" id="messageCount">0</span>
102
+ <span class="stat-label">Messages</span>
103
+ </div>
104
+ <div class="stat">
105
+ <span class="stat-number" id="activeLanguage">English</span>
106
+ <span class="stat-label">Language</span>
107
+ </div>
108
+ </div>
109
+ </div>
110
+
111
+ <!-- Chat Messages -->
112
+ <div class="chat-messages" id="chatMessages">
113
+ <div class="message system-message">
114
+ <div class="message-avatar">
115
+ <i class="fas fa-robot"></i>
116
+ </div>
117
+ <div class="message-content">
118
+ <p>Hello! I'm your Well Being Agent. I'm here to provide supportive, evidence-based information about breast cancer. How can I help you today?</p>
119
+ <span class="message-time" id="welcomeTime"></span>
120
+ </div>
121
+ </div>
122
+ </div>
123
+
124
+ <!-- Typing Indicator -->
125
+ <div class="typing-indicator" id="typingIndicator">
126
+ <div class="typing-dots">
127
+ <span></span>
128
+ <span></span>
129
+ <span></span>
130
+ </div>
131
+ <p>Thinking of the best way to help you...</p>
132
+ </div>
133
+
134
+ <!-- Chat Input -->
135
+ <div class="chat-input-container">
136
+ <div class="input-wrapper">
137
+ <input type="text" id="userInput" placeholder="Ask about breast cancer support, treatment options, or recovery...">
138
+
139
+ <!-- 🎤 New voice recording button -->
140
+ <button id="voiceButton" class="voice-button">
141
+ <i class="fas fa-microphone"></i>
142
+ </button>
143
+ <!-- ✉️ Existing send button -->
144
+ <button id="sendButton" class="send-button">
145
+ <i class="fas fa-paper-plane"></i>
146
+ <span class="button-text">Send</span>
147
+ </button>
148
+ </div>
149
+ <div class="input-hint">
150
+ <i class="fas fa-lightbulb"></i>
151
+ <span>Current language: <strong id="currentLanguageDisplay">English</strong> - You can ask about symptoms, treatments, recovery, or emotional support</span>
152
+ </div>
153
+ </div>
154
+ </div>
155
+ </section>
156
+ </main>
157
+ </div>
158
+
159
+ <script src="script.js"></script>
160
+ </body>
161
+ </html>
language_utils.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # language_utils.py
2
+ import re
3
+ from langdetect import detect, LangDetectError
4
+
5
+ def detect_query_language(text: str) -> str:
6
+ """
7
+ Detect if text is English or Urdu
8
+ Returns: 'english' or 'urdu'
9
+ """
10
+ try:
11
+ # First check for Urdu characters (more reliable)
12
+ urdu_pattern = re.compile(r'[\u0600-\u06FF\u0750-\u077F\u08A0-\u08FF]+')
13
+ if urdu_pattern.search(text):
14
+ return 'urdu'
15
+
16
+ # Then use langdetect for other cases
17
+ detected_lang = detect(text)
18
+ return 'urdu' if detected_lang == 'ur' else 'english'
19
+
20
+ except LangDetectError:
21
+ return 'english'
22
+ except Exception:
23
+ return 'english'
24
+
25
+ def is_urdu_text(text: str) -> bool:
26
+ """Check if text contains Urdu characters"""
27
+ urdu_pattern = re.compile(r'[\u0600-\u06FF\u0750-\u077F\u08A0-\u08FF]+')
28
+ return bool(urdu_pattern.search(text))
rag_system.log ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2025-11-10 22:28:46,853 - INFO - Load pretrained SentenceTransformer: sentence-transformers/all-MiniLM-L6-v2
2
+ 2025-11-10 22:30:05,985 - INFO - Load pretrained SentenceTransformer: sentence-transformers/all-MiniLM-L6-v2
3
+ 2025-11-10 22:49:31,018 - INFO - Load pretrained SentenceTransformer: sentence-transformers/all-MiniLM-L6-v2
4
+ 2025-11-10 23:05:34,645 - INFO - Load pretrained SentenceTransformer: sentence-transformers/all-MiniLM-L6-v2
5
+ 2025-11-11 17:57:12,543 - INFO - Load pretrained SentenceTransformer: sentence-transformers/all-MiniLM-L6-v2
6
+ 2025-11-11 19:35:15,603 - INFO - Load pretrained SentenceTransformer: sentence-transformers/all-MiniLM-L6-v2
7
+ 2025-11-11 19:35:28,651 - INFO - Load pretrained SentenceTransformer: sentence-transformers/all-MiniLM-L6-v2
8
+ 2025-11-11 21:33:02,584 - INFO - Load pretrained SentenceTransformer: sentence-transformers/all-MiniLM-L6-v2
9
+ 2025-11-11 23:10:45,825 - INFO - Load pretrained SentenceTransformer: sentence-transformers/all-MiniLM-L6-v2
10
+ 2025-11-11 23:33:40,189 - INFO - Load pretrained SentenceTransformer: sentence-transformers/all-MiniLM-L6-v2
11
+ 2025-11-12 00:13:26,990 - INFO - Load pretrained SentenceTransformer: sentence-transformers/all-MiniLM-L6-v2
12
+ 2025-11-12 18:01:31,774 - INFO - Load pretrained SentenceTransformer: sentence-transformers/all-MiniLM-L6-v2
13
+ 2025-11-12 18:43:39,916 - INFO - Load pretrained SentenceTransformer: sentence-transformers/all-MiniLM-L6-v2
14
+ 2025-11-12 19:08:33,936 - INFO - Load pretrained SentenceTransformer: sentence-transformers/all-MiniLM-L6-v2
15
+ 2025-11-12 19:24:28,082 - INFO - Load pretrained SentenceTransformer: sentence-transformers/all-MiniLM-L6-v2
16
+ 2025-11-12 20:50:23,292 - INFO - Load pretrained SentenceTransformer: sentence-transformers/all-MiniLM-L6-v2
17
+ 2025-11-12 20:58:54,128 - INFO - Load pretrained SentenceTransformer: sentence-transformers/all-MiniLM-L6-v2
18
+ 2025-11-12 22:19:25,719 - INFO - Load pretrained SentenceTransformer: sentence-transformers/all-MiniLM-L6-v2
requirements.txt ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ fastapi
2
+ uvicorn
3
+ python-dotenv
4
+ requests
5
+ pydantic
6
+ langdetect
7
+ torch
8
+ transformers
9
+ sentence-transformers
10
+ llama-index-core
11
+ llama-index-embeddings-huggingface
12
+ gtts
13
+ pyttsx3
14
+ SpeechRecognition
15
+ pydub
16
+ openai-whisper
17
+ python-multipart
18
+ python-dateutil
19
+ huggingface_hub
20
+ openai
21
+ numpy
22
+ pillow
script.js ADDED
@@ -0,0 +1,463 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // script.js - Voice queries now return text responses only
2
+ // DOM Elements
3
+ const chatMessages = document.getElementById('chatMessages');
4
+ const userInput = document.getElementById('userInput');
5
+ const sendButton = document.getElementById('sendButton');
6
+ const voiceButton = document.getElementById('voiceButton');
7
+ const typingIndicator = document.getElementById('typingIndicator');
8
+ const messageCount = document.getElementById('messageCount');
9
+ const activeLanguage = document.getElementById('activeLanguage');
10
+ const currentLanguageDisplay = document.getElementById('currentLanguageDisplay');
11
+ const welcomeTime = document.getElementById('welcomeTime');
12
+ const englishQuestions = document.getElementById('english-questions');
13
+ const urduQuestions = document.getElementById('urdu-questions');
14
+
15
+ // State Management
16
+ let currentLanguage = 'english';
17
+ let messageCounter = 0;
18
+ let mediaRecorder;
19
+ let audioChunks = [];
20
+ let isRecording = false;
21
+
22
+ // Initialize
23
+ document.addEventListener('DOMContentLoaded', function() {
24
+ initializeApp();
25
+ setupEventListeners();
26
+ setWelcomeTime();
27
+ initializeWelcomeMessage();
28
+ });
29
+
30
+ async function initializeApp() {
31
+ updateLanguageDisplay();
32
+ updateInputPlaceholder();
33
+ await loadPredefinedQuestions();
34
+ updateQuestionsDisplay();
35
+ }
36
+
37
+ function setupEventListeners() {
38
+ // Send message on button click
39
+ sendButton.addEventListener('click', sendMessage);
40
+
41
+ // Send message on Enter key
42
+ userInput.addEventListener('keypress', function(e) {
43
+ if (e.key === 'Enter') {
44
+ sendMessage();
45
+ }
46
+ });
47
+
48
+ // Tab switching
49
+ document.querySelectorAll('.tab-btn').forEach(btn => {
50
+ btn.addEventListener('click', function() {
51
+ const tabId = this.getAttribute('data-tab');
52
+ switchTab(tabId);
53
+ });
54
+ });
55
+
56
+ // Voice button listener
57
+ if (voiceButton) {
58
+ voiceButton.addEventListener('click', async () => {
59
+ if (!isRecording) {
60
+ startRecording();
61
+ } else {
62
+ stopRecording();
63
+ }
64
+ });
65
+ }
66
+
67
+ userInput.focus();
68
+ }
69
+
70
+ function initializeWelcomeMessage() {
71
+ // Ensure the welcome message has proper styling and is visible
72
+ const welcomeMessage = document.querySelector('.system-message');
73
+ if (welcomeMessage) {
74
+ welcomeMessage.style.opacity = '1';
75
+ welcomeMessage.style.transform = 'translateY(0) scale(1)';
76
+ }
77
+ }
78
+
79
+ function setWelcomeTime() {
80
+ const now = new Date();
81
+ welcomeTime.textContent = now.toLocaleTimeString([], {
82
+ hour: '2-digit',
83
+ minute: '2-digit'
84
+ });
85
+ }
86
+
87
+ function switchTab(tabId) {
88
+ document.querySelectorAll('.tab-btn').forEach(btn => btn.classList.remove('active'));
89
+ document.querySelector(`[data-tab="${tabId}"]`).classList.add('active');
90
+
91
+ document.querySelectorAll('.tab-content').forEach(content => content.classList.remove('active'));
92
+ document.getElementById(`${tabId}-tab`).classList.add('active');
93
+
94
+ const newLanguage = tabId === 'urdu' ? 'urdu' : 'english';
95
+ if (currentLanguage !== newLanguage) {
96
+ currentLanguage = newLanguage;
97
+ updateLanguageDisplay();
98
+ updateInputPlaceholder();
99
+ }
100
+ }
101
+
102
+ function updateLanguageDisplay() {
103
+ activeLanguage.textContent = currentLanguage === 'urdu' ? 'اردو' : 'English';
104
+ currentLanguageDisplay.textContent = currentLanguage === 'urdu' ? 'Urdu' : 'English';
105
+ }
106
+
107
+ function updateInputPlaceholder() {
108
+ if (currentLanguage === 'urdu') {
109
+ userInput.placeholder = 'بریسٹ کینسر کے بارے میں پوچھیں...';
110
+ userInput.style.direction = 'rtl';
111
+ userInput.style.textAlign = 'right';
112
+ } else {
113
+ userInput.placeholder = 'Ask about breast cancer support, treatment options, or recovery...';
114
+ userInput.style.direction = 'ltr';
115
+ userInput.style.textAlign = 'left';
116
+ }
117
+ }
118
+
119
+ async function loadPredefinedQuestions() {
120
+ try {
121
+ const englishResponse = await fetch('/predefined-questions?language=english');
122
+ const englishData = await englishResponse.json();
123
+ if (englishData.status === 'success') {
124
+ window.predefinedEnglishQuestions = englishData.questions;
125
+ }
126
+
127
+ const urduResponse = await fetch('/predefined-questions?language=urdu');
128
+ const urduData = await urduResponse.json();
129
+ if (urduData.status === 'success') {
130
+ window.predefinedUrduQuestions = urduData.questions;
131
+ }
132
+ } catch (error) {
133
+ console.error('Error loading predefined questions:', error);
134
+ }
135
+ }
136
+
137
+ function updateQuestionsDisplay() {
138
+ updateQuestionList(englishQuestions, 'english');
139
+ updateQuestionList(urduQuestions, 'urdu');
140
+ }
141
+
142
+ function updateQuestionList(container, language) {
143
+ container.innerHTML = '';
144
+ const predefinedQuestions = language === 'urdu' ?
145
+ window.predefinedUrduQuestions :
146
+ window.predefinedEnglishQuestions;
147
+
148
+ if (predefinedQuestions && predefinedQuestions.length > 0) {
149
+ predefinedQuestions.forEach((questionData) => {
150
+ const questionCard = createQuestionCard(questionData, language);
151
+ container.appendChild(questionCard);
152
+ });
153
+ } else {
154
+ const emptyState = document.createElement('div');
155
+ emptyState.className = 'empty-state';
156
+ emptyState.innerHTML = `
157
+ <i class="fas fa-comments"></i>
158
+ <p>${language === 'urdu' ? 'ابھی تک کوئی اردو سوالات نہیں ہیں۔' : 'No questions available yet.'}</p>
159
+ `;
160
+ container.appendChild(emptyState);
161
+ }
162
+ }
163
+
164
+ function createQuestionCard(questionData, language) {
165
+ const questionCard = document.createElement('button');
166
+ questionCard.className = `question-card ${language === 'urdu' ? 'urdu-text' : ''} predefined-card`;
167
+ questionCard.setAttribute('data-question', questionData.question);
168
+ const icon = questionData.icon || 'fas fa-question-circle';
169
+ questionCard.innerHTML = `
170
+ <div class="card-icon ${questionData.category || 'general'}">
171
+ <i class="${icon}"></i>
172
+ </div>
173
+ <div class="card-content">
174
+ <h3>${questionData.question}</h3>
175
+ </div>
176
+ <div class="card-arrow">
177
+ <i class="fas fa-chevron-right"></i>
178
+ </div>
179
+ `;
180
+ questionCard.addEventListener('click', function() {
181
+ userInput.value = questionData.question;
182
+ sendMessage();
183
+ });
184
+ return questionCard;
185
+ }
186
+
187
+ async function sendMessage() {
188
+ const message = userInput.value.trim();
189
+ if (!message) return;
190
+
191
+ addMessageToChat(message, 'user', currentLanguage);
192
+ userInput.value = '';
193
+ userInput.disabled = true;
194
+ sendButton.disabled = true;
195
+ showTypingIndicator();
196
+
197
+ try {
198
+ const response = await fetch('/ask-query', {
199
+ method: 'POST',
200
+ headers: { 'Content-Type': 'application/json' },
201
+ body: JSON.stringify({ query: message, language: currentLanguage, response_type: 'text' })
202
+ });
203
+ const data = await response.json();
204
+ hideTypingIndicator();
205
+
206
+ if (data.status === 'success') {
207
+ addMessageToChat(data.answer, 'system', data.language);
208
+ updateMessageCount();
209
+ } else {
210
+ addMessageToChat("I'm having trouble processing your request. Please try again.", 'system', 'english');
211
+ }
212
+ } catch (error) {
213
+ console.error('Error:', error);
214
+ hideTypingIndicator();
215
+ addMessageToChat("Connection issue. Please try again.", 'system', 'english');
216
+ } finally {
217
+ userInput.disabled = false;
218
+ sendButton.disabled = false;
219
+ userInput.focus();
220
+ }
221
+ }
222
+
223
+ function cleanUrduText(text) {
224
+ const urduFixes = {
225
+ // Character fixes
226
+ 'ہےہ': 'ہے',
227
+ 'مہےں': 'میں',
228
+ 'ہےں': 'ہیں',
229
+ 'ھے': 'ہے',
230
+ 'ھوں': 'ہوں',
231
+ 'ھیں': 'ہیں',
232
+ 'ےے': 'ے',
233
+ 'ںں': 'ں',
234
+ 'ہہ': 'ہ',
235
+ 'یی': 'ی',
236
+
237
+ // Word fixes
238
+ 'مجہے': 'مجھے',
239
+ 'پروگرہوں': 'پروگرام',
240
+ 'کہےنسر': 'کینسر',
241
+ 'ڈڈاکٹر': 'ڈاکٹر',
242
+ 'کا ے لہےے': 'کے لیے',
243
+ 'جسے سے': 'جس سے',
244
+ 'اکٹر': 'ڈاکٹر',
245
+ 'اکیل': 'اکیلے',
246
+ 'میش': 'میں',
247
+ 'وتی': 'ہوتی',
248
+ 'لکی': 'ہلکی',
249
+ 'بتر': 'بہتر',
250
+
251
+ // Grammar fixes
252
+ 'ک دوران': 'کے دوران',
253
+ 'ک بار': 'کے بارے',
254
+ 'ک بعد': 'کے بعد',
255
+ 'ک لی': 'کے لیے',
256
+ 'ک ساتھ': 'کے ساتھ',
257
+ 'ک طور': 'کے طور',
258
+ 'ک ذریع': 'کے ذریعے',
259
+ 'ک مطابق': 'کے مطابق'
260
+ };
261
+
262
+ let cleanedText = text;
263
+
264
+ // Apply all fixes
265
+ Object.keys(urduFixes).forEach(wrong => {
266
+ const regex = new RegExp(escapeRegExp(wrong), 'g');
267
+ cleanedText = cleanedText.replace(regex, urduFixes[wrong]);
268
+ });
269
+
270
+ // Fix spacing issues
271
+ cleanedText = cleanedText.replace(/\s+/g, ' ');
272
+ cleanedText = cleanedText.replace(/ \./g, '.');
273
+ cleanedText = cleanedText.replace(/ ،/g, '،');
274
+ cleanedText = cleanedText.replace(/ /g, ' ');
275
+ cleanedText = cleanedText.replace(/۔۔/g, '۔');
276
+
277
+ return cleanedText.trim();
278
+ }
279
+
280
+ function escapeRegExp(string) {
281
+ return string.replace(/[.*+?^${}()|[\]\\]/g, '\\$&');
282
+ }
283
+
284
+ function addMessageToChat(message, sender, language = 'english') {
285
+ // Clean Urdu text before displaying
286
+ if (language === 'urdu') {
287
+ message = cleanUrduText(message);
288
+ }
289
+
290
+ const messageDiv = document.createElement('div');
291
+ messageDiv.className = `message ${sender}-message`;
292
+ if (language === 'urdu') {
293
+ messageDiv.classList.add('urdu-text');
294
+ }
295
+
296
+ const timestamp = new Date().toLocaleTimeString([], { hour: '2-digit', minute: '2-digit' });
297
+ const avatarIcon = sender === 'user' ? 'fas fa-user' : 'fas fa-robot';
298
+
299
+ messageDiv.innerHTML = `
300
+ <div class="message-avatar">
301
+ <i class="${avatarIcon}"></i>
302
+ </div>
303
+ <div class="message-content ${language === 'urdu' ? 'urdu-text' : ''}">
304
+ <p>${formatMessage(message)}</p>
305
+ <span class="message-time">${timestamp}</span>
306
+ ${language === 'urdu' ? '<div class="language-badge">اردو</div>' : ''}
307
+ </div>
308
+ `;
309
+
310
+ chatMessages.appendChild(messageDiv);
311
+ scrollToBottom();
312
+
313
+ // Apply animation to new messages only
314
+ setTimeout(() => {
315
+ messageDiv.style.opacity = '1';
316
+ messageDiv.style.transform = 'translateY(0) scale(1)';
317
+ }, 10);
318
+ }
319
+
320
+ function formatMessage(message) {
321
+ return message.replace(/\n/g, '<br>');
322
+ }
323
+
324
+ function showTypingIndicator() {
325
+ typingIndicator.style.display = 'flex';
326
+ scrollToBottom();
327
+ }
328
+
329
+ function hideTypingIndicator() {
330
+ typingIndicator.style.display = 'none';
331
+ }
332
+
333
+ function updateMessageCount() {
334
+ messageCounter++;
335
+ messageCount.textContent = messageCounter;
336
+ }
337
+
338
+ function scrollToBottom() {
339
+ setTimeout(() => {
340
+ chatMessages.scrollTo({ top: chatMessages.scrollHeight, behavior: 'smooth' });
341
+ }, 100);
342
+ }
343
+
344
+ // Voice Recording Feature - Now returns text responses only
345
+ async function startRecording() {
346
+ try {
347
+ const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
348
+ mediaRecorder = new MediaRecorder(stream);
349
+ audioChunks = [];
350
+
351
+ mediaRecorder.ondataavailable = e => audioChunks.push(e.data);
352
+ mediaRecorder.onstop = async () => {
353
+ const audioBlob = new Blob(audioChunks, { type: 'audio/webm' });
354
+ const formData = new FormData();
355
+ formData.append('file', audioBlob, 'voiceNote.webm');
356
+ formData.append('language', currentLanguage); // Pass current tab language
357
+
358
+ // Add user voice message to chat
359
+ addUserVoiceMessageToChat(audioBlob);
360
+ showTypingIndicator();
361
+
362
+ try {
363
+ const response = await fetch('/voice-query', {
364
+ method: 'POST',
365
+ body: formData
366
+ });
367
+ const data = await response.json();
368
+ hideTypingIndicator();
369
+
370
+ if (data.status === 'success') {
371
+ // ✅ ALWAYS show text response for voice queries
372
+ if (data.text && data.text.trim() !== '') {
373
+ addMessageToChat(data.text, 'system', data.language);
374
+ } else {
375
+ // Fallback message
376
+ const fallbackMessage = data.language === 'urdu'
377
+ ? "میں آپ کی آواز کا پیغام سمجھ گئی ہوں۔ آپ کیسے مدد کر سکتی ہوں؟"
378
+ : "I've processed your voice message. How can I help you further?";
379
+ addMessageToChat(fallbackMessage, 'system', data.language);
380
+ }
381
+
382
+ updateMessageCount();
383
+ } else {
384
+ const errorMessage = currentLanguage === 'urdu'
385
+ ? "معذرت، آپ کی آواز کا پیغام پروسیس نہیں کر سکی۔"
386
+ : "Sorry, couldn't process your voice message.";
387
+ addMessageToChat(errorMessage, 'system', currentLanguage);
388
+ }
389
+ } catch (err) {
390
+ console.error('Voice query error:', err);
391
+ hideTypingIndicator();
392
+ const errorMessage = currentLanguage === 'urdu'
393
+ ? "آواز کے پروسیس میں خرابی۔"
394
+ : "Error processing voice input.";
395
+ addMessageToChat(errorMessage, 'system', currentLanguage);
396
+ }
397
+ };
398
+
399
+ mediaRecorder.start();
400
+ isRecording = true;
401
+ voiceButton.classList.add('recording');
402
+ voiceButton.innerHTML = '<i class="fas fa-stop"></i>';
403
+ } catch (err) {
404
+ console.error('Microphone access error:', err);
405
+ const errorMessage = currentLanguage === 'urdu'
406
+ ? 'براہ کرم آواز ریکارڈ کرنے کے لیے مائیکروفون کی رسائی کی اجازت دیں۔'
407
+ : 'Please allow microphone access to record voice messages.';
408
+ alert(errorMessage);
409
+ }
410
+ }
411
+
412
+ function addUserVoiceMessageToChat(audioBlob) {
413
+ const messageDiv = document.createElement('div');
414
+ messageDiv.className = `message user-message audio-message`;
415
+
416
+ const timestamp = new Date().toLocaleTimeString([], { hour: '2-digit', minute: '2-digit' });
417
+ const audioUrl = URL.createObjectURL(audioBlob);
418
+
419
+ messageDiv.innerHTML = `
420
+ <div class="message-avatar">
421
+ <i class="fas fa-user"></i>
422
+ </div>
423
+ <div class="message-content">
424
+ <div class="audio-message-container user-audio">
425
+ <div class="audio-player-wrapper">
426
+ <audio controls class="voice-note-player">
427
+ <source src="${audioUrl}" type="audio/webm">
428
+ Your browser does not support the audio element.
429
+ </audio>
430
+ </div>
431
+ <div class="audio-duration">Your voice message</div>
432
+ </div>
433
+ <span class="message-time">${timestamp}</span>
434
+ </div>
435
+ `;
436
+
437
+ chatMessages.appendChild(messageDiv);
438
+ scrollToBottom();
439
+
440
+ setTimeout(() => {
441
+ messageDiv.style.opacity = '1';
442
+ messageDiv.style.transform = 'translateY(0) scale(1)';
443
+ }, 10);
444
+ }
445
+
446
+ function stopRecording() {
447
+ if (mediaRecorder && mediaRecorder.state !== 'inactive') {
448
+ mediaRecorder.stop();
449
+ }
450
+ isRecording = false;
451
+ voiceButton.classList.remove('recording');
452
+ voiceButton.innerHTML = '<i class="fas fa-microphone"></i>';
453
+ }
454
+
455
+ // Logo animation
456
+ const logo = document.querySelector('.logo');
457
+ if (logo) {
458
+ setInterval(() => {
459
+ logo.style.transform = 'rotate(5deg)';
460
+ setTimeout(() => { logo.style.transform = 'rotate(-5deg)'; }, 1000);
461
+ setTimeout(() => { logo.style.transform = 'rotate(0deg)'; }, 2000);
462
+ }, 5000);
463
+ }
styles.css ADDED
@@ -0,0 +1,1344 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ :root {
2
+ --primary-pink: #ff6b93;
3
+ --primary-pink-light: #ff8fa9;
4
+ --primary-pink-lighter: #ffb6c8;
5
+ --primary-pink-lightest: #ffe8ee;
6
+ --secondary-pink: #f8c9d4;
7
+ --accent-pink: #ff4d7a;
8
+ --white: #ffffff;
9
+ --white-soft: #fefefe;
10
+ --white-softer: #fafafa;
11
+ --gray-light: #f5f5f5;
12
+ --gray-medium: #e0e0e0;
13
+ --gray-dark: #666666;
14
+ --text-dark: #333333;
15
+ --text-secondary: #666666;
16
+ --shadow-soft: 0 8px 32px rgba(255, 107, 147, 0.1);
17
+ --shadow-medium: 0 15px 35px rgba(255, 107, 147, 0.15);
18
+ --shadow-strong: 0 20px 50px rgba(255, 107, 147, 0.2);
19
+ --border-radius: 24px;
20
+ --border-radius-small: 16px;
21
+ --transition: all 0.4s cubic-bezier(0.175, 0.885, 0.32, 1.1);
22
+ --transition-fast: all 0.2s ease;
23
+ }
24
+
25
+ * {
26
+ margin: 0;
27
+ padding: 0;
28
+ box-sizing: border-box;
29
+ }
30
+
31
+ body {
32
+ font-family: 'Poppins', sans-serif;
33
+ background: linear-gradient(135deg, #fff5f7 0%, #ffffff 100%);
34
+ color: var(--text-dark);
35
+ min-height: 100vh;
36
+ overflow-x: hidden;
37
+ position: relative;
38
+ }
39
+
40
+ .floating-elements {
41
+ position: fixed;
42
+ top: 0;
43
+ left: 0;
44
+ width: 100%;
45
+ height: 100%;
46
+ z-index: -1;
47
+ overflow: hidden;
48
+ }
49
+
50
+ .floating-element {
51
+ position: absolute;
52
+ border-radius: 50%;
53
+ background: linear-gradient(135deg, var(--primary-pink-lightest), var(--primary-pink-lighter));
54
+ opacity: 0.4;
55
+ animation: float 20s infinite linear;
56
+ }
57
+
58
+ .floating-element.el-1 {
59
+ width: 200px;
60
+ height: 200px;
61
+ top: 10%;
62
+ left: 5%;
63
+ animation-duration: 25s;
64
+ }
65
+
66
+ .floating-element.el-2 {
67
+ width: 150px;
68
+ height: 150px;
69
+ top: 60%;
70
+ right: 10%;
71
+ animation-duration: 20s;
72
+ animation-direction: reverse;
73
+ }
74
+
75
+ .floating-element.el-3 {
76
+ width: 100px;
77
+ height: 100px;
78
+ bottom: 20%;
79
+ left: 15%;
80
+ animation-duration: 30s;
81
+ }
82
+
83
+ .floating-element.el-4 {
84
+ width: 120px;
85
+ height: 120px;
86
+ top: 20%;
87
+ right: 20%;
88
+ animation-duration: 35s;
89
+ animation-direction: reverse;
90
+ }
91
+
92
+ @keyframes float {
93
+ 0% {
94
+ transform: translateY(0) rotate(0deg);
95
+ }
96
+ 33% {
97
+ transform: translateY(-30px) rotate(120deg);
98
+ }
99
+ 66% {
100
+ transform: translateY(20px) rotate(240deg);
101
+ }
102
+ 100% {
103
+ transform: translateY(0) rotate(360deg);
104
+ }
105
+ }
106
+
107
+ .container {
108
+ max-width: 1400px;
109
+ margin: 0 auto;
110
+ padding: 2rem;
111
+ width: 100%;
112
+ }
113
+
114
+ .header {
115
+ text-align: center;
116
+ margin-bottom: 3rem;
117
+ animation: fadeInUp 1s ease-out;
118
+ }
119
+
120
+ .header-content {
121
+ max-width: 800px;
122
+ margin: 0 auto;
123
+ width: 100%;
124
+ }
125
+
126
+ .logo-container {
127
+ display: flex;
128
+ align-items: center;
129
+ justify-content: center;
130
+ gap: 1rem;
131
+ margin-bottom: 1rem;
132
+ flex-wrap: wrap;
133
+ }
134
+
135
+ .logo {
136
+ width: 70px;
137
+ height: 70px;
138
+ background: linear-gradient(135deg, var(--primary-pink), var(--accent-pink));
139
+ border-radius: 50%;
140
+ display: flex;
141
+ align-items: center;
142
+ justify-content: center;
143
+ font-size: 2rem;
144
+ color: var(--white);
145
+ box-shadow: var(--shadow-strong);
146
+ animation: pulse 2s infinite;
147
+ flex-shrink: 0;
148
+ }
149
+
150
+ @keyframes pulse {
151
+ 0% {
152
+ transform: scale(1);
153
+ box-shadow: var(--shadow-strong);
154
+ }
155
+ 50% {
156
+ transform: scale(1.05);
157
+ box-shadow: 0 0 30px rgba(255, 107, 147, 0.4);
158
+ }
159
+ 100% {
160
+ transform: scale(1);
161
+ box-shadow: var(--shadow-strong);
162
+ }
163
+ }
164
+
165
+ .title {
166
+ font-family: 'Playfair Display', serif;
167
+ font-size: clamp(2.5rem, 5vw, 3.5rem);
168
+ font-weight: 600;
169
+ background: linear-gradient(135deg, var(--primary-pink), var(--accent-pink));
170
+ -webkit-background-clip: text;
171
+ -webkit-text-fill-color: transparent;
172
+ background-clip: text;
173
+ margin-bottom: 0.5rem;
174
+ line-height: 1.2;
175
+ }
176
+
177
+ .subtitle {
178
+ font-size: clamp(1.1rem, 2.5vw, 1.4rem);
179
+ color: var(--text-secondary);
180
+ margin-bottom: 1rem;
181
+ font-weight: 400;
182
+ }
183
+
184
+ .compassion-message {
185
+ font-size: clamp(0.9rem, 2vw, 1.1rem);
186
+ color: var(--text-secondary);
187
+ line-height: 1.6;
188
+ max-width: 600px;
189
+ margin: 0 auto;
190
+ }
191
+
192
+ .main-content {
193
+ display: grid;
194
+ grid-template-columns: 1fr 1.5fr;
195
+ gap: 2.5rem;
196
+ animation: fadeInUp 1s ease-out 0.3s both;
197
+ width: 100%;
198
+ }
199
+
200
+ @keyframes fadeInUp {
201
+ from {
202
+ opacity: 0;
203
+ transform: translateY(30px);
204
+ }
205
+ to {
206
+ opacity: 1;
207
+ transform: translateY(0);
208
+ }
209
+ }
210
+
211
+ .quick-questions {
212
+ animation: slideInLeft 1s ease-out 0.5s both;
213
+ width: 100%;
214
+ }
215
+
216
+ @keyframes slideInLeft {
217
+ from {
218
+ opacity: 0;
219
+ transform: translateX(-30px);
220
+ }
221
+ to {
222
+ opacity: 1;
223
+ transform: translateX(0);
224
+ }
225
+ }
226
+
227
+ .section-header {
228
+ margin-bottom: 2rem;
229
+ }
230
+
231
+ .section-title {
232
+ font-family: 'Playfair Display', serif;
233
+ font-size: clamp(1.5rem, 3vw, 1.8rem);
234
+ font-weight: 600;
235
+ color: var(--text-dark);
236
+ margin-bottom: 0.5rem;
237
+ }
238
+
239
+ .decoration-line {
240
+ width: 60px;
241
+ height: 4px;
242
+ background: linear-gradient(to right, var(--primary-pink), var(--accent-pink));
243
+ border-radius: 2px;
244
+ }
245
+
246
+ .tabs-container {
247
+ margin-top: 1rem;
248
+ width: 100%;
249
+ }
250
+
251
+ .tabs {
252
+ display: flex;
253
+ background: var(--white);
254
+ border-radius: 12px;
255
+ padding: 0.5rem;
256
+ margin-bottom: 1.5rem;
257
+ box-shadow: var(--shadow-soft);
258
+ width: 100%;
259
+ }
260
+
261
+ .tab-btn {
262
+ flex: 1;
263
+ padding: 0.75rem 1rem;
264
+ border: none;
265
+ background: transparent;
266
+ color: var(--text-secondary);
267
+ border-radius: 8px;
268
+ cursor: pointer;
269
+ transition: var(--transition);
270
+ font-weight: 500;
271
+ font-size: clamp(0.8rem, 1.5vw, 0.9rem);
272
+ min-width: 0;
273
+ white-space: nowrap;
274
+ overflow: hidden;
275
+ text-overflow: ellipsis;
276
+ }
277
+
278
+ .tab-btn.active {
279
+ background: linear-gradient(135deg, var(--primary-pink), var(--accent-pink));
280
+ color: var(--white);
281
+ box-shadow: var(--shadow-soft);
282
+ }
283
+
284
+ .tab-btn:hover:not(.active) {
285
+ background: var(--primary-pink-lightest);
286
+ color: var(--primary-pink);
287
+ }
288
+
289
+ .tab-content {
290
+ display: none;
291
+ }
292
+
293
+ .tab-content.active {
294
+ display: block;
295
+ animation: fadeIn 0.5s ease-in-out;
296
+ }
297
+
298
+ @keyframes fadeIn {
299
+ from { opacity: 0; transform: translateY(10px); }
300
+ to { opacity: 1; transform: translateY(0); }
301
+ }
302
+
303
+ .questions-grid {
304
+ display: flex;
305
+ flex-direction: column;
306
+ gap: 1rem;
307
+ width: 100%;
308
+ }
309
+
310
+ .question-card {
311
+ background: var(--white);
312
+ border: none;
313
+ border-radius: var(--border-radius-small);
314
+ padding: 1.5rem;
315
+ display: flex;
316
+ align-items: center;
317
+ gap: 1rem;
318
+ cursor: pointer;
319
+ transition: var(--transition);
320
+ box-shadow: var(--shadow-soft);
321
+ text-align: left;
322
+ position: relative;
323
+ overflow: hidden;
324
+ width: 100%;
325
+ }
326
+
327
+ .question-card::before {
328
+ content: '';
329
+ position: absolute;
330
+ top: 0;
331
+ left: -100%;
332
+ width: 100%;
333
+ height: 100%;
334
+ background: linear-gradient(90deg, transparent, rgba(255, 255, 255, 0.4), transparent);
335
+ transition: var(--transition);
336
+ }
337
+
338
+ .question-card:hover::before {
339
+ left: 100%;
340
+ }
341
+
342
+ .question-card:hover {
343
+ transform: translateY(-5px);
344
+ box-shadow: var(--shadow-medium);
345
+ }
346
+
347
+ .card-icon {
348
+ width: 50px;
349
+ height: 50px;
350
+ background: linear-gradient(135deg, var(--primary-pink-lightest), var(--secondary-pink));
351
+ border-radius: 12px;
352
+ display: flex;
353
+ align-items: center;
354
+ justify-content: center;
355
+ font-size: 1.2rem;
356
+ color: var(--primary-pink);
357
+ flex-shrink: 0;
358
+ transition: var(--transition);
359
+ }
360
+
361
+ .question-card:hover .card-icon {
362
+ transform: scale(1.1);
363
+ background: linear-gradient(135deg, var(--primary-pink), var(--accent-pink));
364
+ color: var(--white);
365
+ }
366
+
367
+ .card-content {
368
+ flex: 1;
369
+ min-width: 0;
370
+ }
371
+
372
+ .card-content h3 {
373
+ font-size: clamp(0.9rem, 1.5vw, 1rem);
374
+ font-weight: 600;
375
+ color: var(--text-dark);
376
+ margin-bottom: 0.25rem;
377
+ line-height: 1.4;
378
+ word-wrap: break-word;
379
+ }
380
+
381
+ .card-arrow {
382
+ color: var(--gray-medium);
383
+ transition: var(--transition);
384
+ flex-shrink: 0;
385
+ }
386
+
387
+ .question-card:hover .card-arrow {
388
+ color: var(--primary-pink);
389
+ transform: translateX(5px);
390
+ }
391
+
392
+ .predefined-card {
393
+ border-left: 4px solid var(--primary-pink);
394
+ background: linear-gradient(135deg, var(--white), var(--white-soft));
395
+ }
396
+
397
+ .card-icon.symptoms {
398
+ background: linear-gradient(135deg, #4CAF50, #45a049) !important;
399
+ color: white !important;
400
+ }
401
+
402
+ .card-icon.pain {
403
+ background: linear-gradient(135deg, #FF6B6B, #FF5252) !important;
404
+ color: white !important;
405
+ }
406
+
407
+ .card-icon.emotional {
408
+ background: linear-gradient(135deg, #FFD93D, #FFCD38) !important;
409
+ color: white !important;
410
+ }
411
+
412
+ .card-icon.exercise {
413
+ background: linear-gradient(135deg, #6BCF7F, #4CAF50) !important;
414
+ color: white !important;
415
+ }
416
+
417
+ .card-icon.nutrition {
418
+ background: linear-gradient(135deg, #A78BFA, #8B5CF6) !important;
419
+ color: white !important;
420
+ }
421
+
422
+ .card-icon.general {
423
+ background: linear-gradient(135deg, #4FC3F7, #29B6F6) !important;
424
+ color: white !important;
425
+ }
426
+
427
+ /* ===== OPTIMIZED URDU LANGUAGE STYLING - MATCHING ENGLISH SIZE ===== */
428
+ /* ===== OPTIMIZED URDU LANGUAGE STYLING - MATCHING ENGLISH SIZE ===== */
429
+ .urdu-text {
430
+ font-family: 'Noto Nastaliq Urdu', 'Noto Naskh Arabic', 'Scheherazade New', 'Lateef', 'Amiri', serif !important;
431
+ direction: rtl !important;
432
+ text-align: right !important;
433
+ line-height: 1.8 !important;
434
+ font-size: 1em !important;
435
+ font-weight: 400 !important;
436
+ letter-spacing: 0 !important;
437
+ word-spacing: normal !important;
438
+ }
439
+
440
+ /* Urdu Chat Messages */
441
+ .message.urdu-text .message-content p {
442
+ font-family: 'Noto Nastaliq Urdu', 'Scheherazade New', 'Lateef', serif !important;
443
+ font-size: 1em !important;
444
+ line-height: 2.0 !important;
445
+ text-align: right !important;
446
+ direction: rtl !important;
447
+ margin: 0 !important;
448
+ padding: 0 !important;
449
+ font-feature-settings: "kern" 1, "liga" 1, "clig" 1, "calt" 1;
450
+ }
451
+
452
+ /* Urdu Input Field */
453
+ #userInput[style*="direction: rtl"],
454
+ #userInput[style*="text-align: right"] {
455
+ font-family: 'Noto Nastaliq Urdu', 'Noto Naskh Arabic', 'Scheherazade New', 'Segoe UI', Tahoma, sans-serif !important;
456
+ font-size: 1em !important;
457
+ direction: rtl !important;
458
+ text-align: right !important;
459
+ line-height: 1.8 !important;
460
+ }
461
+
462
+ /* Urdu Questions Section */
463
+ #urdu-tab .questions-grid,
464
+ .urdu-questions .question-card {
465
+ font-family: 'Noto Nastaliq Urdu', 'Scheherazade New', 'Lateef', Tahoma, sans-serif !important;
466
+ font-size: 1em !important;
467
+ line-height: 1.8 !important;
468
+ direction: rtl !important;
469
+ text-align: right !important;
470
+ }
471
+
472
+ /* Urdu Empty States */
473
+ #urdu-tab .empty-state {
474
+ font-family: 'Noto Nastaliq Urdu', 'Scheherazade New', 'Lateef', Tahoma, sans-serif !important;
475
+ direction: rtl !important;
476
+ font-size: 1em !important;
477
+ line-height: 1.8 !important;
478
+ }
479
+
480
+ /* Urdu Language Badge */
481
+ .language-badge {
482
+ font-family: 'Noto Naskh Arabic', 'Segoe UI', Tahoma, sans-serif !important;
483
+ font-size: 0.75em !important;
484
+ font-weight: 600 !important;
485
+ background: linear-gradient(135deg, #667eea, #764ba2) !important;
486
+ color: white !important;
487
+ padding: 4px 10px !important;
488
+ border-radius: 12px !important;
489
+ margin-top: 8px !important;
490
+ display: inline-block !important;
491
+ }
492
+
493
+ /* Enhanced Text Rendering for Urdu */
494
+ .urdu-text,
495
+ [lang="ur"],
496
+ [dir="rtl"] {
497
+ text-rendering: optimizeLegibility !important;
498
+ -webkit-font-smoothing: antialiased !important;
499
+ -moz-osx-font-smoothing: grayscale !important;
500
+ font-feature-settings: "kern" 1, "liga" 1, "clig" 1, "calt" 1;
501
+ }
502
+
503
+ /* Urdu Tab Button */
504
+ .tab-btn[data-tab="urdu"] {
505
+ font-family: 'Noto Naskh Arabic', 'Segoe UI', Tahoma, sans-serif !important;
506
+ font-weight: 600 !important;
507
+ font-size: 0.9em !important;
508
+ }
509
+
510
+ /* Urdu Text Card Specific */
511
+ .urdu-text .card-content {
512
+ text-align: right !important;
513
+ direction: rtl !important;
514
+ }
515
+
516
+ .urdu-text.question-card {
517
+ border-left: none !important;
518
+ border-right: 4px solid var(--primary-pink) !important;
519
+ }
520
+
521
+ .urdu-text.question-card:hover {
522
+ transform: translateX(-5px) translateY(-5px) !important;
523
+ }
524
+
525
+ .chat-section {
526
+ animation: slideInRight 1s ease-out 0.5s both;
527
+ width: 100%;
528
+ }
529
+
530
+ @keyframes slideInRight {
531
+ from {
532
+ opacity: 0;
533
+ transform: translateX(30px);
534
+ }
535
+ to {
536
+ opacity: 1;
537
+ transform: translateX(0);
538
+ }
539
+ }
540
+
541
+ .chat-container {
542
+ background: var(--white);
543
+ border-radius: var(--border-radius);
544
+ box-shadow: var(--shadow-strong);
545
+ overflow: hidden;
546
+ height: 700px;
547
+ display: flex;
548
+ flex-direction: column;
549
+ position: relative;
550
+ width: 100%;
551
+ }
552
+
553
+ .chat-header {
554
+ background: linear-gradient(135deg, var(--white-soft), var(--white-softer));
555
+ padding: 1.5rem 2rem;
556
+ display: flex;
557
+ justify-content: space-between;
558
+ align-items: center;
559
+ border-bottom: 1px solid var(--gray-light);
560
+ flex-wrap: wrap;
561
+ gap: 1rem;
562
+ }
563
+
564
+ .chat-profile {
565
+ display: flex;
566
+ align-items: center;
567
+ gap: 1rem;
568
+ flex: 1;
569
+ min-width: 0;
570
+ }
571
+
572
+ .profile-avatar {
573
+ position: relative;
574
+ width: 60px;
575
+ height: 60px;
576
+ background: linear-gradient(135deg, var(--primary-pink), var(--accent-pink));
577
+ border-radius: 50%;
578
+ display: flex;
579
+ align-items: center;
580
+ justify-content: center;
581
+ font-size: 1.5rem;
582
+ color: var(--white);
583
+ box-shadow: var(--shadow-soft);
584
+ flex-shrink: 0;
585
+ }
586
+
587
+ .online-indicator {
588
+ position: absolute;
589
+ bottom: 4px;
590
+ right: 4px;
591
+ width: 14px;
592
+ height: 14px;
593
+ background: #4CAF50;
594
+ border: 2px solid var(--white);
595
+ border-radius: 50%;
596
+ }
597
+
598
+ .profile-info {
599
+ min-width: 0;
600
+ }
601
+
602
+ .profile-info h3 {
603
+ font-size: clamp(1.1rem, 2vw, 1.3rem);
604
+ font-weight: 600;
605
+ color: var(--text-dark);
606
+ white-space: nowrap;
607
+ overflow: hidden;
608
+ text-overflow: ellipsis;
609
+ }
610
+
611
+ .profile-info p {
612
+ color: var(--text-secondary);
613
+ font-size: clamp(0.8rem, 1.5vw, 0.9rem);
614
+ white-space: nowrap;
615
+ overflow: hidden;
616
+ text-overflow: ellipsis;
617
+ }
618
+
619
+ .chat-stats {
620
+ display: flex;
621
+ gap: 1.5rem;
622
+ flex-shrink: 0;
623
+ }
624
+
625
+ .stat {
626
+ text-align: center;
627
+ }
628
+
629
+ .stat-number {
630
+ display: block;
631
+ font-size: clamp(1rem, 1.5vw, 1.1rem);
632
+ font-weight: 600;
633
+ color: var(--primary-pink);
634
+ }
635
+
636
+ .stat-label {
637
+ font-size: clamp(0.7rem, 1.2vw, 0.8rem);
638
+ color: var(--text-secondary);
639
+ }
640
+
641
+ .chat-messages {
642
+ flex: 1;
643
+ overflow-y: auto;
644
+ padding: 2rem;
645
+ display: flex;
646
+ flex-direction: column;
647
+ gap: 1.5rem;
648
+ background: var(--white-softer);
649
+ width: 100%;
650
+ }
651
+
652
+ .message {
653
+ display: flex;
654
+ gap: 1rem;
655
+ max-width: 85%;
656
+ animation: messageAppear 0.4s cubic-bezier(0.175, 0.885, 0.32, 1.275);
657
+ width: fit-content;
658
+ }
659
+
660
+ @keyframes messageAppear {
661
+ from {
662
+ opacity: 0;
663
+ transform: translateY(20px) scale(0.95);
664
+ }
665
+ to {
666
+ opacity: 1;
667
+ transform: translateY(0) scale(1);
668
+ }
669
+ }
670
+
671
+ .user-message {
672
+ align-self: flex-end;
673
+ flex-direction: row-reverse;
674
+ }
675
+
676
+ .system-message {
677
+ align-self: flex-start;
678
+ }
679
+
680
+ .message-avatar {
681
+ width: 45px;
682
+ height: 45px;
683
+ border-radius: 50%;
684
+ display: flex;
685
+ align-items: center;
686
+ justify-content: center;
687
+ font-size: 1.1rem;
688
+ flex-shrink: 0;
689
+ margin-top: 0.5rem;
690
+ }
691
+
692
+ .user-message .message-avatar {
693
+ background: linear-gradient(135deg, var(--primary-pink), var(--accent-pink));
694
+ color: var(--white);
695
+ }
696
+
697
+ .system-message .message-avatar {
698
+ background: linear-gradient(135deg, var(--primary-pink-lightest), var(--secondary-pink));
699
+ color: var(--primary-pink);
700
+ }
701
+
702
+ .message-content {
703
+ background: var(--white);
704
+ padding: 1.2rem 1.5rem;
705
+ border-radius: 20px;
706
+ box-shadow: var(--shadow-soft);
707
+ position: relative;
708
+ transition: var(--transition-fast);
709
+ max-width: 100%;
710
+ word-wrap: break-word;
711
+ }
712
+
713
+ .user-message .message-content {
714
+ background: linear-gradient(135deg, var(--primary-pink), var(--accent-pink));
715
+ color: var(--white);
716
+ border-bottom-right-radius: 5px;
717
+ }
718
+
719
+ .system-message .message-content {
720
+ background: var(--white);
721
+ color: var(--text-dark);
722
+ border-bottom-left-radius: 5px;
723
+ }
724
+
725
+ .message-content::before {
726
+ content: '';
727
+ position: absolute;
728
+ bottom: 0;
729
+ width: 15px;
730
+ height: 15px;
731
+ }
732
+
733
+ .user-message .message-content::before {
734
+ right: -8px;
735
+ background: linear-gradient(135deg, var(--primary-pink), var(--accent-pink));
736
+ clip-path: polygon(0 0, 100% 0, 100% 100%);
737
+ }
738
+
739
+ .system-message .message-content::before {
740
+ left: -8px;
741
+ background: var(--white);
742
+ clip-path: polygon(0 0, 100% 0, 0 100%);
743
+ }
744
+
745
+ .message-content p {
746
+ line-height: 1.6;
747
+ margin-bottom: 0.5rem;
748
+ word-wrap: break-word;
749
+ }
750
+
751
+ .message-time {
752
+ font-size: 0.75rem;
753
+ opacity: 0.7;
754
+ }
755
+
756
+ .typing-indicator {
757
+ display: none;
758
+ align-items: center;
759
+ gap: 1rem;
760
+ padding: 1rem 2rem;
761
+ background: var(--white-softer);
762
+ border-top: 1px solid var(--gray-light);
763
+ width: 100%;
764
+ }
765
+
766
+ .typing-dots {
767
+ display: flex;
768
+ gap: 4px;
769
+ }
770
+
771
+ .typing-dots span {
772
+ width: 8px;
773
+ height: 8px;
774
+ background: var(--primary-pink);
775
+ border-radius: 50%;
776
+ animation: typing 1.4s infinite ease-in-out;
777
+ }
778
+
779
+ .typing-dots span:nth-child(1) { animation-delay: -0.32s; }
780
+ .typing-dots span:nth-child(2) { animation-delay: -0.16s; }
781
+
782
+ @keyframes typing {
783
+ 0%, 80%, 100% {
784
+ transform: scale(0.8);
785
+ opacity: 0.5;
786
+ }
787
+ 40% {
788
+ transform: scale(1);
789
+ opacity: 1;
790
+ }
791
+ }
792
+
793
+ .typing-indicator p {
794
+ color: var(--text-secondary);
795
+ font-size: clamp(0.8rem, 1.5vw, 0.9rem);
796
+ }
797
+
798
+ .chat-input-container {
799
+ padding: 1.5rem 2rem;
800
+ border-top: 1px solid var(--gray-light);
801
+ background: var(--white);
802
+ width: 100%;
803
+ }
804
+
805
+ .input-wrapper {
806
+ display: flex;
807
+ gap: 1rem;
808
+ margin-bottom: 0.5rem;
809
+ width: 100%;
810
+ }
811
+
812
+ #userInput {
813
+ flex: 1;
814
+ border: none;
815
+ outline: none;
816
+ padding: 1.2rem 1.5rem;
817
+ font-size: clamp(0.9rem, 1.5vw, 1rem);
818
+ background: var(--white-softer);
819
+ border-radius: 50px;
820
+ box-shadow: inset 0 2px 10px rgba(0, 0, 0, 0.05);
821
+ transition: var(--transition);
822
+ border: 2px solid transparent;
823
+ min-width: 0;
824
+ }
825
+
826
+ #userInput:focus {
827
+ border-color: var(--primary-pink-light);
828
+ box-shadow: 0 0 0 3px rgba(255, 107, 147, 0.1), inset 0 2px 10px rgba(0, 0, 0, 0.05);
829
+ }
830
+
831
+ .send-button {
832
+ display: flex;
833
+ align-items: center;
834
+ gap: 0.5rem;
835
+ padding: 1.2rem 1.8rem;
836
+ background: linear-gradient(135deg, var(--primary-pink), var(--accent-pink));
837
+ color: var(--white);
838
+ border: none;
839
+ border-radius: 50px;
840
+ cursor: pointer;
841
+ transition: var(--transition);
842
+ font-weight: 500;
843
+ box-shadow: var(--shadow-soft);
844
+ flex-shrink: 0;
845
+ }
846
+
847
+ .send-button:hover {
848
+ transform: translateY(-2px);
849
+ box-shadow: var(--shadow-medium);
850
+ }
851
+
852
+ .send-button:active {
853
+ transform: translateY(0);
854
+ }
855
+
856
+ .voice-button {
857
+ display: flex;
858
+ align-items: center;
859
+ justify-content: center;
860
+ padding: 1.2rem 1.4rem;
861
+ background: linear-gradient(135deg, var(--primary-pink-lightest), var(--secondary-pink));
862
+ color: var(--primary-pink);
863
+ border: none;
864
+ border-radius: 50%;
865
+ cursor: pointer;
866
+ transition: var(--transition);
867
+ box-shadow: var(--shadow-soft);
868
+ }
869
+
870
+ .voice-button.recording {
871
+ background: linear-gradient(135deg, #ff4d7a, #ff6b93);
872
+ color: white;
873
+ animation: pulse 1s infinite;
874
+ }
875
+
876
+ .voice-button:hover {
877
+ transform: scale(1.1);
878
+ }
879
+
880
+ .input-hint {
881
+ display: flex;
882
+ align-items: center;
883
+ gap: 0.5rem;
884
+ font-size: clamp(0.7rem, 1.2vw, 0.8rem);
885
+ color: var(--text-secondary);
886
+ padding-left: 1rem;
887
+ flex-wrap: wrap;
888
+ }
889
+
890
+ .input-hint i {
891
+ color: var(--primary-pink);
892
+ }
893
+
894
+ .chat-messages::-webkit-scrollbar {
895
+ width: 6px;
896
+ }
897
+
898
+ .chat-messages::-webkit-scrollbar-track {
899
+ background: rgba(0, 0, 0, 0.05);
900
+ border-radius: 3px;
901
+ }
902
+
903
+ .chat-messages::-webkit-scrollbar-thumb {
904
+ background: var(--primary-pink-light);
905
+ border-radius: 3px;
906
+ }
907
+
908
+ .chat-messages::-webkit-scrollbar-thumb:hover {
909
+ background: var(--primary-pink);
910
+ }
911
+
912
+ .empty-state {
913
+ text-align: center;
914
+ padding: 3rem 2rem;
915
+ color: var(--text-secondary);
916
+ width: 100%;
917
+ }
918
+
919
+ .empty-state i {
920
+ font-size: clamp(2.5rem, 5vw, 3rem);
921
+ margin-bottom: 1rem;
922
+ color: var(--primary-pink-light);
923
+ }
924
+
925
+ .empty-state p {
926
+ font-size: clamp(0.9rem, 1.5vw, 1rem);
927
+ line-height: 1.5;
928
+ }
929
+
930
+ #activeLanguage {
931
+ color: var(--primary-pink);
932
+ font-weight: 600;
933
+ }
934
+
935
+ #currentLanguageDisplay {
936
+ color: var(--primary-pink);
937
+ font-weight: 600;
938
+ }
939
+
940
+ .audio-message {
941
+ margin: 10px 0;
942
+ }
943
+
944
+ .audio-message-container {
945
+ background: #f8f9fa;
946
+ border-radius: 18px;
947
+ padding: 12px 16px;
948
+ max-width: 280px;
949
+ border: 1px solid #e9ecef;
950
+ }
951
+
952
+ .user-audio .audio-message-container {
953
+ background: #007bff;
954
+ color: white;
955
+ }
956
+
957
+ .audio-player-wrapper {
958
+ display: flex;
959
+ align-items: center;
960
+ gap: 12px;
961
+ }
962
+
963
+ .voice-note-player {
964
+ flex: 1;
965
+ height: 32px;
966
+ border-radius: 16px;
967
+ background: white;
968
+ border: 1px solid #ddd;
969
+ }
970
+
971
+ .user-audio .voice-note-player {
972
+ background: #0056b3;
973
+ border-color: #004085;
974
+ }
975
+
976
+ .audio-visualizer {
977
+ display: flex;
978
+ align-items: center;
979
+ gap: 2px;
980
+ height: 20px;
981
+ }
982
+
983
+ .audio-wave {
984
+ width: 3px;
985
+ height: 8px;
986
+ background: #6c757d;
987
+ border-radius: 2px;
988
+ transition: all 0.3s ease;
989
+ }
990
+
991
+ .audio-visualizer.playing .audio-wave {
992
+ background: #007bff;
993
+ }
994
+
995
+ .user-audio .audio-wave {
996
+ background: rgba(255, 255, 255, 0.6);
997
+ }
998
+
999
+ .user-audio .audio-visualizer.playing .audio-wave {
1000
+ background: white;
1001
+ }
1002
+
1003
+ .audio-duration {
1004
+ font-size: 11px;
1005
+ color: #6c757d;
1006
+ margin-top: 4px;
1007
+ text-align: center;
1008
+ }
1009
+
1010
+ .user-audio .audio-duration {
1011
+ color: rgba(255, 255, 255, 0.8);
1012
+ }
1013
+
1014
+ @keyframes pulse {
1015
+ 0% {
1016
+ height: 8px;
1017
+ opacity: 0.7;
1018
+ }
1019
+ 100% {
1020
+ height: 16px;
1021
+ opacity: 1;
1022
+ }
1023
+ }
1024
+
1025
+ .manual-play-required .audio-player-wrapper::before {
1026
+ content: "Click to play";
1027
+ position: absolute;
1028
+ top: -20px;
1029
+ left: 50%;
1030
+ transform: translateX(-50%);
1031
+ background: #ffc107;
1032
+ color: #212529;
1033
+ padding: 4px 8px;
1034
+ border-radius: 4px;
1035
+ font-size: 10px;
1036
+ white-space: nowrap;
1037
+ }
1038
+
1039
+ .message {
1040
+ opacity: 0;
1041
+ transform: translateY(20px) scale(0.95);
1042
+ transition: all 0.3s ease;
1043
+ }
1044
+
1045
+ .message.show {
1046
+ opacity: 1;
1047
+ transform: translateY(0) scale(1);
1048
+ }
1049
+
1050
+ #voiceButton.recording {
1051
+ background: #dc3545;
1052
+ animation: pulse 1.5s infinite;
1053
+ }
1054
+
1055
+ @keyframes pulse {
1056
+ 0% {
1057
+ box-shadow: 0 0 0 0 rgba(220, 53, 69, 0.7);
1058
+ }
1059
+ 70% {
1060
+ box-shadow: 0 0 0 10px rgba(220, 53, 69, 0);
1061
+ }
1062
+ 100% {
1063
+ box-shadow: 0 0 0 0 rgba(220, 53, 69, 0);
1064
+ }
1065
+ }
1066
+
1067
+ @media (max-width: 768px) {
1068
+ .urdu-text {
1069
+ font-size: 0.95em;
1070
+ line-height: 1.7;
1071
+ }
1072
+
1073
+ .chat-messages .message.urdu-text .message-content p {
1074
+ font-size: 0.95em;
1075
+ line-height: 1.9;
1076
+ }
1077
+
1078
+ .audio-message-container {
1079
+ max-width: 220px;
1080
+ }
1081
+
1082
+ .voice-note-player {
1083
+ height: 28px;
1084
+ }
1085
+ }
1086
+ @media (max-width: 1200px) {
1087
+ .container {
1088
+ padding: 1.5rem;
1089
+ }
1090
+
1091
+ .main-content {
1092
+ gap: 2rem;
1093
+ }
1094
+
1095
+ .chat-container {
1096
+ height: 650px;
1097
+ }
1098
+ }
1099
+
1100
+ @media (max-width: 1024px) {
1101
+ .main-content {
1102
+ grid-template-columns: 1fr;
1103
+ gap: 2rem;
1104
+ }
1105
+
1106
+ .chat-container {
1107
+ height: 600px;
1108
+ }
1109
+
1110
+ .chat-header {
1111
+ padding: 1.25rem;
1112
+ }
1113
+
1114
+ .chat-messages {
1115
+ padding: 1.5rem;
1116
+ }
1117
+ }
1118
+
1119
+ @media (max-width: 768px) {
1120
+ .container {
1121
+ padding: 1rem;
1122
+ }
1123
+
1124
+ .logo-container {
1125
+ flex-direction: column;
1126
+ text-align: center;
1127
+ gap: 0.5rem;
1128
+ }
1129
+
1130
+ .title {
1131
+ font-size: 2.5rem;
1132
+ }
1133
+
1134
+ .chat-header {
1135
+ padding: 1rem;
1136
+ flex-direction: column;
1137
+ gap: 1rem;
1138
+ align-items: flex-start;
1139
+ }
1140
+
1141
+ .chat-profile {
1142
+ width: 100%;
1143
+ }
1144
+
1145
+ .chat-stats {
1146
+ align-self: stretch;
1147
+ justify-content: space-around;
1148
+ }
1149
+
1150
+ .message {
1151
+ max-width: 95%;
1152
+ }
1153
+
1154
+ .chat-messages {
1155
+ padding: 1rem;
1156
+ gap: 1rem;
1157
+ }
1158
+
1159
+ .chat-input-container {
1160
+ padding: 1rem;
1161
+ }
1162
+
1163
+ .input-wrapper {
1164
+ flex-direction: column;
1165
+ }
1166
+
1167
+ .send-button {
1168
+ align-self: flex-end;
1169
+ width: fit-content;
1170
+ padding: 1rem 1.5rem;
1171
+ }
1172
+
1173
+ .tabs {
1174
+ flex-direction: column;
1175
+ gap: 0.5rem;
1176
+ }
1177
+
1178
+ .tab-btn {
1179
+ padding: 1rem;
1180
+ text-align: center;
1181
+ }
1182
+
1183
+ .question-card {
1184
+ padding: 1.25rem;
1185
+ }
1186
+
1187
+ .card-icon {
1188
+ width: 45px;
1189
+ height: 45px;
1190
+ font-size: 1.1rem;
1191
+ }
1192
+
1193
+ .empty-state {
1194
+ padding: 2rem 1rem;
1195
+ }
1196
+
1197
+ .profile-avatar {
1198
+ width: 50px;
1199
+ height: 50px;
1200
+ font-size: 1.3rem;
1201
+ }
1202
+ }
1203
+
1204
+ @media (max-width: 480px) {
1205
+ .container {
1206
+ padding: 0.75rem;
1207
+ }
1208
+
1209
+ .header {
1210
+ margin-bottom: 2rem;
1211
+ }
1212
+
1213
+ .logo {
1214
+ width: 60px;
1215
+ height: 60px;
1216
+ font-size: 1.7rem;
1217
+ }
1218
+
1219
+ .title {
1220
+ font-size: 2rem;
1221
+ }
1222
+
1223
+ .subtitle {
1224
+ font-size: 1rem;
1225
+ }
1226
+
1227
+ .compassion-message {
1228
+ font-size: 0.85rem;
1229
+ }
1230
+
1231
+ .main-content {
1232
+ gap: 1.5rem;
1233
+ }
1234
+
1235
+ .section-header {
1236
+ margin-bottom: 1.5rem;
1237
+ }
1238
+
1239
+ .section-title {
1240
+ font-size: 1.3rem;
1241
+ }
1242
+
1243
+ .chat-container {
1244
+ height: 550px;
1245
+ border-radius: 20px;
1246
+ }
1247
+
1248
+ .chat-messages {
1249
+ padding: 0.75rem;
1250
+ gap: 0.75rem;
1251
+ }
1252
+
1253
+ .message-content {
1254
+ padding: 1rem 1.25rem;
1255
+ }
1256
+
1257
+ .message-avatar {
1258
+ width: 40px;
1259
+ height: 40px;
1260
+ font-size: 1rem;
1261
+ }
1262
+
1263
+ .question-card {
1264
+ padding: 1rem;
1265
+ gap: 0.75rem;
1266
+ }
1267
+
1268
+ .card-icon {
1269
+ width: 40px;
1270
+ height: 40px;
1271
+ font-size: 1rem;
1272
+ }
1273
+
1274
+ .floating-element {
1275
+ display: none;
1276
+ }
1277
+
1278
+ .floating-element.el-1,
1279
+ .floating-element.el-2 {
1280
+ display: block;
1281
+ width: 100px;
1282
+ height: 100px;
1283
+ }
1284
+ }
1285
+
1286
+ @media (max-width: 360px) {
1287
+ .container {
1288
+ padding: 0.5rem;
1289
+ }
1290
+
1291
+ .chat-container {
1292
+ height: 500px;
1293
+ }
1294
+
1295
+ .chat-header {
1296
+ padding: 0.75rem;
1297
+ }
1298
+
1299
+ .profile-avatar {
1300
+ width: 45px;
1301
+ height: 45px;
1302
+ font-size: 1.2rem;
1303
+ }
1304
+
1305
+ .chat-stats {
1306
+ gap: 1rem;
1307
+ }
1308
+
1309
+ .question-card {
1310
+ flex-direction: column;
1311
+ text-align: center;
1312
+ gap: 0.5rem;
1313
+ }
1314
+
1315
+ .card-content h3 {
1316
+ text-align: center;
1317
+ }
1318
+
1319
+ .urdu-text.question-card {
1320
+ flex-direction: column;
1321
+ }
1322
+
1323
+ .urdu-text .card-content {
1324
+ text-align: center;
1325
+ }
1326
+ }
1327
+
1328
+ @media print {
1329
+ .floating-elements,
1330
+ .send-button,
1331
+ .typing-indicator,
1332
+ .chat-input-container {
1333
+ display: none !important;
1334
+ }
1335
+
1336
+ .chat-container {
1337
+ box-shadow: none;
1338
+ height: auto;
1339
+ }
1340
+
1341
+ .chat-messages {
1342
+ overflow: visible;
1343
+ }
1344
+ }