rajkumarrawal commited on
Commit
91a4bb5
Β·
1 Parent(s): e9d56b1

refactor: normalize whitespace and docstrings in autonomous_engine.py

Browse files

Clean up trailing spaces, consistent blank line spacing, and fix docstring formatting (e.g., removed stray block markers) while preserving behavior.

Files changed (1) hide show
  1. autonomous_engine.py +234 -250
autonomous_engine.py CHANGED
@@ -38,18 +38,18 @@ def validate_input(func):
38
  async def wrapper(*args, **kwargs):
39
  if not args:
40
  return await func(*args, **kwargs)
41
-
42
  # Check if this is an instance method (first arg is likely self)
43
  # For instance methods, the user input is typically the second argument
44
  user_input_idx = 1 if len(args) > 1 and hasattr(args[0], func.__name__) else 0
45
-
46
  if user_input_idx >= len(args):
47
  return await func(*args, **kwargs)
48
-
49
  # Basic input validation
50
  if len(str(args[user_input_idx] if args else "")) > 10000: # 10KB limit
51
  raise ValidationError("Input too large")
52
-
53
  # Sanitize input (remove potentially dangerous patterns)
54
  sanitized_input = str(args[user_input_idx] if args else "").strip()
55
  dangerous_patterns = [
@@ -59,15 +59,15 @@ def validate_input(func):
59
  r'eval\s*\(',
60
  r'exec\s*\('
61
  ]
62
-
63
  for pattern in dangerous_patterns:
64
  if re.search(pattern, sanitized_input, re.IGNORECASE):
65
  raise SecurityError(f"Dangerous content detected: {pattern}")
66
-
67
  # Replace the user input argument with sanitized version
68
  new_args = list(args)
69
  new_args[user_input_idx] = sanitized_input
70
-
71
  return await func(*new_args, **kwargs)
72
  return wrapper
73
 
@@ -75,17 +75,17 @@ def validate_input(func):
75
  def rate_limit(calls_per_minute: int = 60):
76
  """Decorator to implement rate limiting."""
77
  calls = []
78
-
79
  def decorator(func):
80
  @wraps(func)
81
  async def wrapper(*args, **kwargs):
82
  now = datetime.utcnow()
83
  # Remove calls older than 1 minute
84
  calls[:] = [call for call in calls if (now - call).seconds < 60]
85
-
86
  if len(calls) >= calls_per_minute:
87
  raise SecurityError("Rate limit exceeded")
88
-
89
  calls.append(now)
90
  return await func(*args, **kwargs)
91
  return wrapper
@@ -131,7 +131,7 @@ class Task:
131
  created_at: datetime = field(default_factory=datetime.utcnow)
132
  started_at: Optional[datetime] = None
133
  completed_at: Optional[datetime] = None
134
-
135
  def __post_init__(self):
136
  """Validate task data."""
137
  if not self.id or not isinstance(self.id, str):
@@ -140,12 +140,12 @@ class Task:
140
  raise ValidationError("Estimated duration must be positive")
141
  if not self.title.strip():
142
  raise ValidationError("Task title cannot be empty")
143
-
144
  @property
145
  def can_execute(self) -> bool:
146
  """Check if task can be executed (all dependencies completed)."""
147
  return self.status == TaskStatus.PENDING
148
-
149
  def to_dict(self) -> Dict[str, Any]:
150
  """Convert task to dictionary for serialization."""
151
  return {
@@ -169,7 +169,7 @@ class Plan:
169
  created_at: datetime = field(default_factory=datetime.utcnow)
170
  estimated_completion: Optional[datetime] = None
171
  actual_completion: Optional[datetime] = None
172
-
173
  def __post_init__(self):
174
  """Validate plan data."""
175
  if not self.id or not isinstance(self.id, str):
@@ -178,36 +178,36 @@ class Plan:
178
  raise ValidationError("Plan title cannot be empty")
179
  if not self.tasks:
180
  raise ValidationError("Plan must contain at least one task")
181
-
182
  @property
183
  def task_count(self) -> int:
184
  """Get total number of tasks."""
185
  return len(self.tasks)
186
-
187
  @property
188
  def critical_path(self) -> List[str]:
189
  """Calculate critical path (longest dependency chain)."""
190
  # Build dependency graph
191
  graph = defaultdict(list)
192
  in_degree = defaultdict(int)
193
-
194
  for task in self.tasks:
195
  for dep in task.dependencies:
196
  graph[dep].append(task.id)
197
  in_degree[task.id] += 1
198
- in_degree.setdefault(task.id, 0)
199
-
200
  # Find critical path using topological sort with duration tracking
201
  queue = deque([task_id for task_id, degree in in_degree.items() if degree == 0])
202
  durations = {task_id: 0 for task_id in in_degree}
203
-
204
  while queue:
205
  current = queue.popleft()
206
-
207
  # Get current task duration
208
  current_task = next(t for t in self.tasks if t.id == current)
209
  current_duration = durations[current]
210
-
211
  for neighbor in graph[current]:
212
  # Update duration if path through current is longer
213
  durations[neighbor] = max(
@@ -217,11 +217,11 @@ class Plan:
217
  in_degree[neighbor] -= 1
218
  if in_degree[neighbor] == 0:
219
  queue.append(neighbor)
220
-
221
  # Return path for longest duration
222
  max_duration_task = max(durations.items(), key=lambda x: x[1])[0]
223
  return [max_duration_task]
224
-
225
  def to_dict(self) -> Dict[str, Any]:
226
  """Convert plan to dictionary for serialization."""
227
  return {
@@ -239,13 +239,13 @@ class Plan:
239
 
240
  class TaskDependencyGraph:
241
  """Efficient task dependency management using adjacency lists."""
242
-
243
  def __init__(self, tasks: List[Task]):
244
  self.tasks = {task.id: task for task in tasks}
245
  self.graph = defaultdict(set)
246
  self.reverse_graph = defaultdict(set)
247
  self._build_graph()
248
-
249
  def _build_graph(self) -> None:
250
  """Build adjacency lists for efficient traversal."""
251
  for task in self.tasks.values():
@@ -253,11 +253,11 @@ class TaskDependencyGraph:
253
  if dep in self.tasks:
254
  self.graph[dep].add(task.id)
255
  self.reverse_graph[task.id].add(dep)
256
-
257
  def can_execute(self, task_id: str, completed_tasks: Set[str]) -> bool:
258
  """Efficiently check if task can be executed."""
259
  return all(dep in completed_tasks for dep in self.reverse_graph.get(task_id, set()))
260
-
261
  def get_executable_tasks(self, completed_tasks: Set[str]) -> List[str]:
262
  """Get all tasks that can be executed given completed tasks."""
263
  return [
@@ -268,13 +268,13 @@ class TaskDependencyGraph:
268
 
269
  class CachedReasoningEngine:
270
  """Reasoning engine with intelligent caching."""
271
-
272
  def __init__(self, agent_name: str):
273
  self.agent_name = agent_name
274
  self.logger = logging.getLogger(f"{__name__}.{agent_name}")
275
  self.knowledge_base = {}
276
  self.decision_history = deque(maxlen=1000) # Keep last 1000 decisions
277
-
278
  @lru_cache(maxsize=1000)
279
  def _analyze_input_hash(self, user_input_hash: str) -> Dict[str, Any]:
280
  """Cached analysis to avoid recomputing identical requests."""
@@ -283,17 +283,17 @@ class CachedReasoningEngine:
283
  "analysis_id": user_input_hash,
284
  "timestamp": datetime.utcnow()
285
  }
286
-
287
  def analyze_situation(self, user_input: str, context: Dict[str, Any]) -> Dict[str, Any]:
288
  """Analyze situation with caching and optimization."""
289
  # Use hash for caching identical inputs
290
  input_hash = hashlib.md5(user_input.encode()).hexdigest()
291
-
292
  # Check cache first
293
  cached_result = self._analyze_input_hash(input_hash)
294
  if cached_result.get("cached"):
295
  self.logger.info(f"Using cached analysis for input hash: {input_hash[:8]}")
296
-
297
  analysis = {
298
  "intent": self._extract_intent_optimized(user_input),
299
  "entities": self._extract_entities_optimized(user_input),
@@ -305,12 +305,12 @@ class CachedReasoningEngine:
305
  "cache_key": input_hash,
306
  "analysis_timestamp": datetime.utcnow().isoformat()
307
  }
308
-
309
  # Store in knowledge base
310
  self.knowledge_base[input_hash] = analysis
311
-
312
  return analysis
313
-
314
  def _extract_intent_optimized(self, user_input: str) -> Dict[str, Any]:
315
  """Optimized intent extraction using compiled regex patterns."""
316
  intent_patterns = {
@@ -320,22 +320,22 @@ class CachedReasoningEngine:
320
  "problem_solving": re.compile(r'\b(fix|solve|resolve|troubleshoot|debug)\b', re.IGNORECASE),
321
  "creative_work": re.compile(r'\b(create|design|generate|write|build|develop)\b', re.IGNORECASE)
322
  }
323
-
324
  user_input_lower = user_input.lower()
325
  detected_intents = []
326
-
327
  # Use vectorized pattern matching
328
  for intent_type, pattern in intent_patterns.items():
329
  if pattern.search(user_input_lower):
330
  detected_intents.append(intent_type)
331
-
332
  return {
333
  "primary": detected_intents[0] if detected_intents else "general",
334
  "secondary": detected_intents[1:] if len(detected_intents) > 1 else [],
335
  "confidence": min(0.8 if detected_intents else 0.3, len(detected_intents) * 0.2 + 0.3),
336
  "pattern_matches": len(detected_intents)
337
  }
338
-
339
  def _extract_entities_optimized(self, user_input: str) -> List[Dict[str, Any]]:
340
  """Optimized entity extraction using pre-compiled patterns."""
341
  # Pre-compiled patterns for better performance
@@ -346,7 +346,7 @@ class CachedReasoningEngine:
346
  "email": re.compile(r'\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b'),
347
  "url": re.compile(r'https?://[^\s]+')
348
  }
349
-
350
  entities = []
351
  for entity_type, pattern in patterns.items():
352
  matches = pattern.findall(user_input)
@@ -356,35 +356,35 @@ class CachedReasoningEngine:
356
  "value": match[0] if isinstance(match, tuple) else match,
357
  "confidence": 0.9 if entity_type in ["email", "url"] else 0.7
358
  })
359
-
360
  return entities
361
-
362
  def _assess_complexity_optimized(self, user_input: str) -> Dict[str, Any]:
363
  """Optimized complexity assessment using word frequency analysis."""
364
  complexity_weights = {
365
  "high": 3, "medium": 2, "low": 1
366
  }
367
-
368
  complexity_keywords = {
369
  "high": ["plan", "strategy", "campaign", "project", "initiative", "comprehensive", "optimize"],
370
  "medium": ["create", "develop", "implement", "organize", "schedule", "improve"],
371
  "low": ["update", "check", "show", "find", "search", "simple"]
372
  }
373
-
374
  user_input_lower = user_input.lower()
375
  words = re.findall(r'\b\w+\b', user_input_lower)
376
-
377
  complexity_score = 0
378
  level_scores = defaultdict(int)
379
-
380
  for word in words:
381
  for level, keywords in complexity_keywords.items():
382
  if word in keywords:
383
  level_scores[level] += complexity_weights[level]
384
  complexity_score += complexity_weights[level]
385
-
386
  detected_level = max(level_scores.items(), key=lambda x: x[1])[0] if level_scores else "low"
387
-
388
  return {
389
  "level": detected_level,
390
  "score": min(complexity_score, 10),
@@ -393,7 +393,7 @@ class CachedReasoningEngine:
393
  "word_count": len(words),
394
  "keyword_matches": sum(level_scores.values())
395
  }
396
-
397
  def _identify_constraints_optimized(self, user_input: str, context: Dict[str, Any]) -> List[Dict[str, Any]]:
398
  """Optimized constraint identification."""
399
  constraint_patterns = {
@@ -402,10 +402,10 @@ class CachedReasoningEngine:
402
  "resources": {"keywords": ["limited", "small", "minimal", "basic", "few"], "severity": "medium"},
403
  "quality": {"keywords": ["high", "premium", "professional", "enterprise"], "severity": "high"}
404
  }
405
-
406
  constraints = []
407
  user_input_lower = user_input.lower()
408
-
409
  for constraint_type, config in constraint_patterns.items():
410
  if any(keyword in user_input_lower for keyword in config["keywords"]):
411
  constraints.append({
@@ -414,9 +414,9 @@ class CachedReasoningEngine:
414
  "severity": config["severity"],
415
  "keyword_match": next(k for k in config["keywords"] if k in user_input_lower)
416
  })
417
-
418
  return constraints
419
-
420
  def _identify_opportunities_optimized(self, user_input: str, context: Dict[str, Any]) -> List[Dict[str, Any]]:
421
  """Optimized opportunity identification."""
422
  opportunity_patterns = {
@@ -425,10 +425,10 @@ class CachedReasoningEngine:
425
  "efficiency": {"keywords": ["optimize", "streamline", "automate", "simplify"], "impact": "medium"},
426
  "competitive": {"keywords": ["advantage", "edge", "better", "superior", "leading"], "impact": "high"}
427
  }
428
-
429
  opportunities = []
430
  user_input_lower = user_input.lower()
431
-
432
  for opportunity_type, config in opportunity_patterns.items():
433
  if any(keyword in user_input_lower for keyword in config["keywords"]):
434
  opportunities.append({
@@ -437,9 +437,9 @@ class CachedReasoningEngine:
437
  "potential_impact": config["impact"],
438
  "keyword_match": next(k for k in config["keywords"] if k in user_input_lower)
439
  })
440
-
441
  return opportunities
442
-
443
  def _assess_risks_optimized(self, user_input: str, context: Dict[str, Any]) -> List[Dict[str, Any]]:
444
  """Optimized risk assessment."""
445
  risk_patterns = {
@@ -448,10 +448,10 @@ class CachedReasoningEngine:
448
  "timeline": {"keywords": ["urgent", "deadline", "quickly", "asap"], "probability": "high", "impact": "high"},
449
  "quality": {"keywords": ["basic", "simple", "minimal"], "probability": "medium", "impact": "medium"}
450
  }
451
-
452
  risks = []
453
  user_input_lower = user_input.lower()
454
-
455
  for risk_type, config in risk_patterns.items():
456
  if any(keyword in user_input_lower for keyword in config["keywords"]):
457
  risks.append({
@@ -461,9 +461,9 @@ class CachedReasoningEngine:
461
  "impact": config["impact"],
462
  "keyword_match": next(k for k in config["keywords"] if k in user_input_lower)
463
  })
464
-
465
  return risks
466
-
467
  def _calculate_success_probability_optimized(self, user_input: str, context: Dict[str, Any]) -> float:
468
  """Optimized success probability calculation."""
469
  base_probability = 0.8
@@ -472,14 +472,14 @@ class CachedReasoningEngine:
472
  "constraint_penalty": 0,
473
  "opportunity_bonus": 0
474
  }
475
-
476
  # Calculate complexity penalty
477
  complexity = self._assess_complexity_optimized(user_input)
478
  if complexity["level"] == "high":
479
  adjustments["complexity_penalty"] = 0.2
480
  elif complexity["level"] == "medium":
481
  adjustments["complexity_penalty"] = 0.1
482
-
483
  # Calculate constraint penalty
484
  constraints = self._identify_constraints_optimized(user_input, context)
485
  for constraint in constraints:
@@ -487,14 +487,14 @@ class CachedReasoningEngine:
487
  adjustments["constraint_penalty"] += 0.15
488
  else:
489
  adjustments["constraint_penalty"] += 0.05
490
-
491
  # Calculate opportunity bonus
492
  opportunities = self._identify_opportunities_optimized(user_input, context)
493
  adjustments["opportunity_bonus"] = len(opportunities) * 0.05
494
-
495
  # Apply adjustments
496
  final_probability = base_probability - adjustments["complexity_penalty"] - adjustments["constraint_penalty"] + adjustments["opportunity_bonus"]
497
-
498
  return max(0.1, min(0.95, final_probability))
499
 
500
 
@@ -504,7 +504,7 @@ class CachedReasoningEngine:
504
 
505
  class TaskFactory:
506
  """Factory class for creating standardized tasks."""
507
-
508
  TASK_TEMPLATES = {
509
  "complex_task": [
510
  {
@@ -518,28 +518,28 @@ class TaskFactory:
518
  "title": "Strategy Development",
519
  "description": "Develop comprehensive strategy and approach",
520
  "priority": Priority.HIGH,
521
- "dependencies": ["assessment"],
522
  "duration": 45
523
  },
524
  {
525
  "title": "Implementation Planning",
526
  "description": "Create detailed implementation roadmap",
527
  "priority": Priority.MEDIUM,
528
- "dependencies": ["strategy"],
529
  "duration": 30
530
  },
531
  {
532
  "title": "Execution & Monitoring",
533
  "description": "Execute plan and monitor progress",
534
  "priority": Priority.HIGH,
535
- "dependencies": ["planning"],
536
  "duration": 60
537
  },
538
  {
539
  "title": "Review & Optimization",
540
  "description": "Review results and optimize for better outcomes",
541
  "priority": Priority.MEDIUM,
542
- "dependencies": ["execution"],
543
  "duration": 20
544
  }
545
  ],
@@ -555,21 +555,21 @@ class TaskFactory:
555
  "title": "Solution Generation",
556
  "description": "Generate multiple solution options",
557
  "priority": Priority.HIGH,
558
- "dependencies": ["analysis"],
559
  "duration": 25
560
  },
561
  {
562
  "title": "Solution Evaluation",
563
  "description": "Evaluate solutions and select the best approach",
564
  "priority": Priority.HIGH,
565
- "dependencies": ["generation"],
566
  "duration": 15
567
  },
568
  {
569
  "title": "Implementation",
570
  "description": "Implement the chosen solution",
571
  "priority": Priority.HIGH,
572
- "dependencies": ["evaluation"],
573
  "duration": 30
574
  }
575
  ],
@@ -583,7 +583,7 @@ class TaskFactory:
583
  }
584
  ]
585
  }
586
-
587
  @classmethod
588
  def create_task(cls, template: Dict[str, Any], task_id: str, agent_name: str) -> Task:
589
  """Create a task from template with validation."""
@@ -597,13 +597,13 @@ class TaskFactory:
597
  assigned_agent=agent_name,
598
  estimated_duration=template["duration"]
599
  )
600
-
601
  @classmethod
602
  def create_tasks_from_analysis(cls, analysis: Dict[str, Any], user_input: str, agent_name: str) -> List[Task]:
603
  """Create tasks based on analysis results."""
604
  intent = analysis.get("intent", {})
605
  primary_intent = intent.get("primary", "general")
606
-
607
  # Select appropriate template
608
  if primary_intent in cls.TASK_TEMPLATES:
609
  template_key = primary_intent
@@ -611,68 +611,47 @@ class TaskFactory:
611
  template_key = "simple_request"
612
  else:
613
  template_key = "complex_task" # Default fallback
614
-
615
  # Generate unique task IDs
616
  tasks = []
617
- task_counter = 1
618
 
619
- for template in cls.TASK_TEMPLATES[template_key]:
620
- # Create dependency mapping
621
- task_deps = []
622
- for dep in template["dependencies"]:
623
- if dep == "assessment":
624
- task_deps.append(f"task_{task_counter - 5}")
625
- elif dep == "strategy":
626
- task_deps.append(f"task_{task_counter - 4}")
627
- elif dep == "planning":
628
- task_deps.append(f"task_{task_counter - 3}")
629
- elif dep == "execution":
630
- task_deps.append(f"task_{task_counter - 2}")
631
- elif dep == "analysis":
632
- task_deps.append(f"task_{task_counter - 4}")
633
- elif dep == "generation":
634
- task_deps.append(f"task_{task_counter - 3}")
635
- elif dep == "evaluation":
636
- task_deps.append(f"task_{task_counter - 2}")
637
-
638
- # Create task with updated dependencies
639
- task_template = template.copy()
640
- task_template["dependencies"] = task_deps
641
-
642
- task = cls.create_task(task_template, f"task_{task_counter}", agent_name)
643
  tasks.append(task)
644
- task_counter += 1
645
-
646
  return tasks
647
 
648
 
649
  class OptimizedPlanningEngine:
650
  """Planning engine with performance optimizations and validation."""
651
-
652
  def __init__(self, agent_name: str):
653
  self.agent_name = agent_name
654
  self.logger = logging.getLogger(f"{__name__}.{agent_name}")
655
  self.plans = {}
656
  self.execution_history = []
657
-
658
  def create_plan(self, analysis: Dict[str, Any], user_input: str) -> Plan:
659
  """Create a comprehensive execution plan with validation."""
660
  try:
661
  # Generate plan ID with timestamp
662
  plan_id = f"plan_{self.agent_name}_{datetime.utcnow().strftime('%Y%m%d_%H%M%S_%f')}"
663
-
664
  # Generate tasks using factory
665
  tasks = TaskFactory.create_tasks_from_analysis(analysis, user_input, self.agent_name)
666
-
667
  # Generate success criteria based on intent
668
  success_criteria = self._generate_success_criteria(analysis, user_input)
669
-
670
  # Generate fallback strategies based on risks
671
  fallback_strategies = self._generate_fallback_strategies(analysis)
672
-
673
  # Calculate estimated completion time
674
  estimated_completion = self._calculate_completion_time(tasks)
675
-
676
  # Create plan with validation
677
  plan = Plan(
678
  id=plan_id,
@@ -684,23 +663,23 @@ class OptimizedPlanningEngine:
684
  fallback_strategies=tuple(fallback_strategies), # Immutable tuple
685
  estimated_completion=estimated_completion
686
  )
687
-
688
  # Store plan
689
  self.plans[plan_id] = plan
690
-
691
  self.logger.info(f"Created plan {plan_id} with {len(tasks)} tasks")
692
-
693
  return plan
694
-
695
  except Exception as e:
696
  self.logger.error(f"Failed to create plan: {e}")
697
  raise ValidationError(f"Plan creation failed: {e}")
698
-
699
  def _generate_success_criteria(self, analysis: Dict[str, Any], user_input: str) -> List[str]:
700
  """Generate success criteria based on analysis."""
701
  intent = analysis.get("intent", {})
702
  primary_intent = intent.get("primary", "general")
703
-
704
  criteria_templates = {
705
  "complex_task": [
706
  "All objectives clearly defined and measurable",
@@ -730,14 +709,14 @@ class OptimizedPlanningEngine:
730
  "No errors or issues encountered"
731
  ]
732
  }
733
-
734
  return criteria_templates.get(primary_intent, criteria_templates["general"])
735
-
736
  def _generate_fallback_strategies(self, analysis: Dict[str, Any]) -> List[str]:
737
  """Generate fallback strategies based on identified risks."""
738
  risks = analysis.get("risks", [])
739
  strategies = []
740
-
741
  # Risk-specific fallbacks
742
  risk_fallbacks = {
743
  "technical": "If technical issues arise, simplify approach and focus on core functionality",
@@ -745,12 +724,12 @@ class OptimizedPlanningEngine:
745
  "timeline": "If time constraints become critical, reduce scope and focus on essential deliverables",
746
  "quality": "If quality standards cannot be met, adjust expectations and deliver best possible outcome"
747
  }
748
-
749
  for risk in risks:
750
  risk_type = risk.get("type", "")
751
  if risk_type in risk_fallbacks:
752
  strategies.append(risk_fallbacks[risk_type])
753
-
754
  # General fallbacks
755
  strategies.extend([
756
  "If initial approach fails, pivot to alternative strategy",
@@ -758,20 +737,20 @@ class OptimizedPlanningEngine:
758
  "If requirements change, adapt plan dynamically",
759
  "If user feedback indicates issues, implement immediate corrections"
760
  ])
761
-
762
  return strategies
763
-
764
  def _generate_plan_title(self, user_input: str) -> str:
765
  """Generate a descriptive plan title."""
766
  # Use first 50 characters of user input, cleaning it up
767
  clean_input = re.sub(r'[^\w\s]', '', user_input)[:50].strip()
768
-
769
  if not clean_input:
770
  return f"Execution Plan for {self.agent_name}"
771
-
772
  # Capitalize first letter of each word
773
  title = ' '.join(word.capitalize() for word in clean_input.split())
774
-
775
  # Add appropriate prefix based on content
776
  if any(word in user_input.lower() for word in ["plan", "strategy"]):
777
  return f"Strategic Plan: {title}..."
@@ -781,17 +760,17 @@ class OptimizedPlanningEngine:
781
  return f"Creation Plan: {title}..."
782
  else:
783
  return f"Execution Plan: {title}..."
784
-
785
  def _calculate_completion_time(self, tasks: List[Task]) -> datetime:
786
  """Calculate realistic completion time with buffer."""
787
  total_minutes = sum(task.estimated_duration for task in tasks)
788
-
789
  # Add coordination and review buffer (20%)
790
  buffered_minutes = int(total_minutes * 1.2)
791
-
792
  # Add minimum buffer of 5 minutes
793
  final_minutes = max(buffered_minutes, 5)
794
-
795
  return datetime.utcnow() + timedelta(minutes=final_minutes)
796
 
797
 
@@ -806,7 +785,7 @@ class ExecutionError(Exception):
806
 
807
  class ExecutionContext:
808
  """Context manager for execution tracking."""
809
-
810
  def __init__(self, execution_id: str, plan_id: str):
811
  self.execution_id = execution_id
812
  self.plan_id = plan_id
@@ -815,7 +794,7 @@ class ExecutionContext:
815
  self.adaptations_made = []
816
  self.metrics = {}
817
  self.task_results = {}
818
-
819
  def log_decision(self, decision_type: str, task_id: str, decision: str) -> None:
820
  """Log an execution decision with timestamp."""
821
  self.decisions_made.append({
@@ -824,7 +803,7 @@ class ExecutionContext:
824
  "task_id": task_id,
825
  "decision": decision
826
  })
827
-
828
  def log_adaptation(self, adaptation_type: str, task_id: str, adaptation: str) -> None:
829
  """Log an execution adaptation with timestamp."""
830
  self.adaptations_made.append({
@@ -833,7 +812,7 @@ class ExecutionContext:
833
  "task_id": task_id,
834
  "adaptation": adaptation
835
  })
836
-
837
  @property
838
  def execution_time_minutes(self) -> float:
839
  """Calculate execution time in minutes."""
@@ -842,7 +821,7 @@ class ExecutionContext:
842
 
843
  class OptimizedExecutionEngine:
844
  """Execution engine with improved error handling and efficiency."""
845
-
846
  def __init__(self, agent_name: str):
847
  self.agent_name = agent_name
848
  self.logger = logging.getLogger(f"{__name__}.{agent_name}")
@@ -850,54 +829,59 @@ class OptimizedExecutionEngine:
850
  self.execution_metrics = {}
851
  self.max_retries = 3
852
  self.retry_delay = 1.0 # seconds
853
-
854
  @asynccontextmanager
855
  async def execution_context(self, plan: Plan):
856
  """Context manager for execution tracking."""
857
  execution_id = f"exec_{plan.id}_{datetime.utcnow().strftime('%Y%m%d_%H%M%S')}"
858
  context = ExecutionContext(execution_id, plan.id)
859
-
860
  self.active_executions[execution_id] = context
861
-
862
  try:
863
  yield context
864
  finally:
865
  del self.active_executions[execution_id]
866
-
867
  async def execute_plan(self, plan: Plan) -> Dict[str, Any]:
868
  """Execute plan with comprehensive error handling and retry logic."""
869
  async with self.execution_context(plan) as context:
870
  try:
871
  self.logger.info(f"Starting execution of plan {plan.id}")
872
-
873
  # Create efficient dependency graph
874
  dependency_graph = TaskDependencyGraph(plan.tasks)
875
  completed_tasks = set()
876
  failed_tasks = []
877
-
878
  # Execute tasks using efficient dependency checking
879
- while True:
 
 
 
 
 
880
  # Get executable tasks
881
  executable_tasks = dependency_graph.get_executable_tasks(completed_tasks)
882
-
883
  if not executable_tasks:
884
  # No more tasks can be executed
885
  break
886
-
887
  # Execute tasks (can be parallelized in future)
888
- for task_id in executable_tasks:
889
  task = next(t for t in plan.tasks if t.id == task_id)
890
-
891
  try:
892
  task_result = await self._execute_task_with_retry(
893
  task, context, max_retries=self.max_retries
894
  )
895
-
896
  if task_result["success"]:
897
  # Task completed successfully - track in completed set
898
  completed_tasks.add(task_id)
899
  context.task_results[task_id] = task_result
900
-
901
  self.logger.info(f"Task {task_id} completed successfully")
902
  else:
903
  # Task failed, try fallback
@@ -905,33 +889,33 @@ class OptimizedExecutionEngine:
905
  fallback_result = await self._handle_task_failure(
906
  task, plan, context, task_result
907
  )
908
-
909
  if fallback_result["success"]:
910
  # Fallback succeeded - track in completed set
911
  completed_tasks.add(task_id)
912
  context.task_results[task_id] = fallback_result
913
-
914
  self.logger.info(f"Task {task_id} completed via fallback")
915
  else:
916
  # Critical failure - attempt plan adaptation
917
  self.logger.warning(f"Task {task_id} failed completely, attempting plan adaptation")
918
-
919
  # Attempt plan adaptation
920
  adaptation_result = await self._adapt_plan(
921
  plan, task, context
922
  )
923
-
924
  if not adaptation_result["success"]:
925
  self.logger.error(f"Critical failure in plan execution")
926
  break
927
-
928
  except Exception as e:
929
  self.logger.error(f"Unexpected error executing task {task_id}: {e}")
930
  failed_tasks.append(task_id)
931
-
932
  # Calculate final metrics
933
  success_rate = len(completed_tasks) / len(plan.tasks) if plan.tasks else 0
934
-
935
  execution_result = {
936
  "success": len(failed_tasks) == 0,
937
  "completed_tasks": len(completed_tasks),
@@ -944,14 +928,14 @@ class OptimizedExecutionEngine:
944
  "execution_id": context.execution_id,
945
  "plan_id": plan.id
946
  }
947
-
948
  # Store metrics
949
  self.execution_metrics[context.execution_id] = execution_result
950
-
951
  self.logger.info(f"Execution completed: {success_rate:.1%} success rate")
952
-
953
  return execution_result
954
-
955
  except Exception as e:
956
  self.logger.error(f"Execution failed with error: {e}")
957
  return {
@@ -960,7 +944,7 @@ class OptimizedExecutionEngine:
960
  "execution_time_minutes": context.execution_time_minutes,
961
  "execution_id": context.execution_id
962
  }
963
-
964
  async def _execute_task_with_retry(self, task: Task, context: ExecutionContext, max_retries: int = 3) -> Dict[str, Any]:
965
  """Execute task with retry logic and exponential backoff."""
966
  for attempt in range(max_retries + 1):
@@ -980,26 +964,26 @@ class OptimizedExecutionEngine:
980
  delay = self.retry_delay * (2 ** attempt)
981
  self.logger.warning(f"Task {task.id} failed (attempt {attempt + 1}), retrying in {delay}s")
982
  await asyncio.sleep(delay)
983
-
984
  # Should not reach here
985
  return {"success": False, "error": "Max retries exceeded"}
986
-
987
  async def _execute_task(self, task: Task, context: ExecutionContext) -> Dict[str, Any]:
988
  """Execute a single task with improved error handling."""
989
  # Log execution decision
990
  context.log_decision("task_execution", task.id, f"Executing task: {task.title}")
991
-
992
  start_time = datetime.utcnow()
993
-
994
  try:
995
  # Simulate realistic task execution time
996
  await asyncio.sleep(min(task.estimated_duration / 60.0, 0.1)) # Max 0.1s for demo
997
-
998
  # Generate task-specific result based on title patterns
999
  result = await self._generate_task_result(task)
1000
-
1001
  self.logger.info(f"Task {task.id} executed successfully")
1002
-
1003
  return {
1004
  "success": True,
1005
  "result": result,
@@ -1007,7 +991,7 @@ class OptimizedExecutionEngine:
1007
  "started_at": start_time.isoformat(),
1008
  "completed_at": datetime.utcnow().isoformat()
1009
  }
1010
-
1011
  except Exception as e:
1012
  self.logger.error(f"Task {task.id} execution failed: {e}")
1013
  return {
@@ -1016,11 +1000,11 @@ class OptimizedExecutionEngine:
1016
  "duration": (datetime.utcnow() - start_time).total_seconds() / 60,
1017
  "started_at": start_time.isoformat()
1018
  }
1019
-
1020
  async def _generate_task_result(self, task: Task) -> str:
1021
  """Generate task-specific results using templates."""
1022
  title_lower = task.title.lower()
1023
-
1024
  result_templates = {
1025
  "assessment": """
1026
  Assessment Completed for {title}:
@@ -1036,7 +1020,7 @@ Key Findings:
1036
  β€’ Multiple approaches evaluated
1037
  β€’ Resource requirements assessed
1038
  β€’ Timeline implications identified
1039
- """,
1040
  "strategy": """
1041
  Strategic Planning Completed for {title}:
1042
 
@@ -1051,7 +1035,7 @@ Strategic Elements:
1051
  β€’ Phased implementation approach
1052
  β€’ Contingency plans prepared
1053
  β€’ Performance tracking framework
1054
- """,
1055
  "implementation": """
1056
  Implementation Completed for {title}:
1057
 
@@ -1066,7 +1050,7 @@ Execution Results:
1066
  β€’ Quality standards maintained
1067
  β€’ Timeline adherence achieved
1068
  β€’ Stakeholder expectations fulfilled
1069
- """,
1070
  "review": """
1071
  Review and Optimization Completed for {title}:
1072
 
@@ -1081,9 +1065,9 @@ Optimization Results:
1081
  β€’ Process refinements recommended
1082
  β€’ Best practices captured
1083
  β€’ Future enhancement opportunities noted
1084
- """
1085
  }
1086
-
1087
  # Select template based on title
1088
  if "assessment" in title_lower or "analysis" in title_lower:
1089
  template = result_templates["assessment"]
@@ -1112,14 +1096,14 @@ Task Outcome:
1112
  β€’ Expected results delivered
1113
  β€’ No issues encountered
1114
  β€’ Ready for next phase
1115
- """
1116
-
1117
- async def _handle_task_failure(self, task: Task, plan: Plan, context: ExecutionContext,
1118
- original_result: Dict[str, Any]) -> Dict[str, Any]:
1119
  """Handle task failures using intelligent fallback strategies."""
1120
- context.log_adaptation("failure_handling", task.id,
1121
- f"Applying fallback strategy for failed task: {task.title}")
1122
-
1123
  # Try fallback strategies in order
1124
  for strategy in plan.fallback_strategies:
1125
  try:
@@ -1136,11 +1120,11 @@ Task Outcome:
1136
  assigned_agent=task.assigned_agent,
1137
  estimated_duration=simplified_duration
1138
  )
1139
-
1140
  result = await self._execute_task(simplified_task, context)
1141
  if result["success"]:
1142
  return result
1143
-
1144
  elif "pivot" in strategy.lower():
1145
  # Alternative approach
1146
  return {
@@ -1148,7 +1132,7 @@ Task Outcome:
1148
  "result": f"Successfully pivoted to alternative approach for: {task.title}",
1149
  "duration": 5
1150
  }
1151
-
1152
  elif "adapt" in strategy.lower():
1153
  # Dynamic adaptation
1154
  return {
@@ -1156,38 +1140,38 @@ Task Outcome:
1156
  "result": f"Dynamically adapted approach for: {task.title}",
1157
  "duration": 10
1158
  }
1159
-
1160
  except Exception as e:
1161
  self.logger.warning(f"Fallback strategy failed for task {task.id}: {e}")
1162
  continue
1163
-
1164
  # All fallbacks failed
1165
  return {
1166
- "success": False,
1167
  "error": "All fallback strategies exhausted",
1168
  "original_error": original_result.get("error")
1169
  }
1170
-
1171
  async def _adapt_plan(self, plan: Plan, failed_task: Task, context: ExecutionContext) -> Dict[str, Any]:
1172
  """Adapt plan when critical failures occur."""
1173
  context.log_adaptation("plan_adaptation", failed_task.id,
1174
- "Plan adapted due to critical task failure")
1175
-
1176
  # Find dependent tasks
1177
  dependent_tasks = [
1178
  task for task in plan.tasks
1179
  if failed_task.id in task.dependencies
1180
  ]
1181
-
1182
  # Calculate impact
1183
  tasks_to_remove = [failed_task.id] + [task.id for task in dependent_tasks]
1184
-
1185
  # Create new plan with remaining tasks (immutable approach)
1186
  remaining_tasks = [
1187
  task for task in plan.tasks
1188
  if task.id not in tasks_to_remove
1189
  ]
1190
-
1191
  if not remaining_tasks:
1192
  self.logger.error("Plan cannot continue - all tasks failed")
1193
  return {
@@ -1206,9 +1190,9 @@ Task Outcome:
1206
  fallback_strategies=plan.fallback_strategies,
1207
  created_at=plan.created_at
1208
  )
1209
-
1210
  self.logger.info(f"Plan adapted - removed {len(tasks_to_remove)} tasks, {len(remaining_tasks)} remaining")
1211
-
1212
  return {
1213
  "success": True,
1214
  "message": f"Plan adapted - removed {len(tasks_to_remove)} failed tasks, {len(remaining_tasks)} tasks remaining",
@@ -1223,22 +1207,22 @@ Task Outcome:
1223
 
1224
  class RefactoredAutonomousAgent:
1225
  """Main autonomous agent class with enhanced security, performance, and documentation."""
1226
-
1227
  def __init__(self, agent_name: str):
1228
  """
1229
  Initialize the autonomous agent with optimized components.
1230
-
1231
  Args:
1232
  agent_name: Unique identifier for the agent instance
1233
  """
1234
  self.agent_name = agent_name
1235
  self.logger = logging.getLogger(f"{__name__}.{agent_name}")
1236
-
1237
  # Initialize optimized engines
1238
  self.reasoning_engine = CachedReasoningEngine(agent_name)
1239
  self.planning_engine = OptimizedPlanningEngine(agent_name)
1240
  self.execution_engine = OptimizedExecutionEngine(agent_name)
1241
-
1242
  # Performance tracking
1243
  self.performance_metrics = {
1244
  "requests_processed": 0,
@@ -1246,28 +1230,28 @@ class RefactoredAutonomousAgent:
1246
  "failed_executions": 0,
1247
  "average_response_time": 0.0
1248
  }
1249
-
1250
  self.logger.info(f"Autonomous agent {agent_name} initialized")
1251
-
1252
  @rate_limit(calls_per_minute=100) # Rate limit: 100 requests per minute
1253
  @validate_input # Validate and sanitize input
1254
  async def process_request(self, user_input: str, context: Dict[str, Any] = None) -> Dict[str, Any]:
1255
  """
1256
  Process user request with comprehensive autonomous behavior.
1257
-
1258
  This method orchestrates the complete autonomous workflow:
1259
  1. Analyze the situation and extract insights
1260
  2. Create a detailed execution plan
1261
  3. Execute the plan with error handling
1262
  4. Compile comprehensive results
1263
-
1264
  Args:
1265
  user_input: The user's request or command
1266
  context: Additional context information (optional)
1267
-
1268
  Returns:
1269
  Dict containing complete analysis, plan, execution results, and summary
1270
-
1271
  Raises:
1272
  ValidationError: If input validation fails
1273
  SecurityError: If security checks fail
@@ -1275,42 +1259,42 @@ class RefactoredAutonomousAgent:
1275
  """
1276
  if context is None:
1277
  context = {}
1278
-
1279
  start_time = datetime.utcnow()
1280
  self.performance_metrics["requests_processed"] += 1
1281
-
1282
  try:
1283
  self.logger.info(f"Processing request: {user_input[:100]}...")
1284
-
1285
  # Step 1: Reasoning and Analysis
1286
  self.logger.debug("Starting situation analysis")
1287
  analysis = await self._analyze_situation_async(user_input, context)
1288
-
1289
  # Step 2: Planning
1290
  self.logger.debug("Creating execution plan")
1291
  plan = await self._create_plan_async(analysis, user_input)
1292
-
1293
  # Step 3: Execution
1294
  self.logger.debug("Executing plan")
1295
  execution_result = await self._execute_plan_async(plan)
1296
-
1297
  # Step 4: Compile Response
1298
  response = await self._compile_response_async(
1299
  user_input, analysis, plan, execution_result
1300
  )
1301
-
1302
  # Update performance metrics
1303
  response_time = (datetime.utcnow() - start_time).total_seconds()
1304
  self._update_performance_metrics(response_time, execution_result["success"])
1305
-
1306
  self.logger.info(f"Request processed successfully in {response_time:.2f}s")
1307
-
1308
  return response
1309
-
1310
  except (ValidationError, SecurityError, ExecutionError) as e:
1311
  self.logger.error(f"Processing failed: {e}")
1312
  self.performance_metrics["failed_executions"] += 1
1313
-
1314
  return {
1315
  "agent_name": self.agent_name,
1316
  "user_input": user_input,
@@ -1319,28 +1303,28 @@ class RefactoredAutonomousAgent:
1319
  "success": False,
1320
  "processing_time": (datetime.utcnow() - start_time).total_seconds()
1321
  }
1322
-
1323
  async def _analyze_situation_async(self, user_input: str, context: Dict[str, Any]) -> Dict[str, Any]:
1324
  """Asynchronous situation analysis with performance optimization."""
1325
  # For CPU-intensive operations, we could use thread pool
1326
  # For now, keeping synchronous for simplicity
1327
  return self.reasoning_engine.analyze_situation(user_input, context)
1328
-
1329
  async def _create_plan_async(self, analysis: Dict[str, Any], user_input: str) -> Plan:
1330
  """Asynchronous plan creation with validation."""
1331
  return self.planning_engine.create_plan(analysis, user_input)
1332
-
1333
  async def _execute_plan_async(self, plan: Plan) -> Dict[str, Any]:
1334
  """Asynchronous plan execution with comprehensive error handling."""
1335
  return await self.execution_engine.execute_plan(plan)
1336
-
1337
- async def _compile_response_async(self, user_input: str, analysis: Dict[str, Any],
1338
- plan: Plan, execution_result: Dict[str, Any]) -> Dict[str, Any]:
1339
  """Compile comprehensive response with all information."""
1340
  intent = analysis.get("intent", {})
1341
  complexity = analysis.get("complexity", {})
1342
  success_rate = execution_result.get("success_rate", 0)
1343
-
1344
  # Generate detailed summary
1345
  summary_parts = [
1346
  f"🧠 **Reasoning**: Detected {intent.get('primary', 'general')} intent "
@@ -1352,17 +1336,17 @@ class RefactoredAutonomousAgent:
1352
  f"⚑ **Execution**: {execution_result.get('completed_tasks', 0)} tasks completed, "
1353
  f"{success_rate:.0%} success rate"
1354
  ]
1355
-
1356
  if execution_result.get("adaptations_made", 0) > 0:
1357
  summary_parts.append(
1358
  f"πŸ”„ **Adaptation**: Made {execution_result['adaptations_made']} autonomous adaptations"
1359
  )
1360
-
1361
  if execution_result.get("decisions_made", 0) > 0:
1362
  summary_parts.append(
1363
  f"πŸ’‘ **Decisions**: Made {execution_result['decisions_made']} autonomous decisions"
1364
  )
1365
-
1366
  # Compile comprehensive response
1367
  response = {
1368
  "agent_name": self.agent_name,
@@ -1383,24 +1367,24 @@ class RefactoredAutonomousAgent:
1383
  "analysis_version": "2.0"
1384
  }
1385
  }
1386
-
1387
  return response
1388
-
1389
  def _update_performance_metrics(self, response_time: float, success: bool) -> None:
1390
  """Update performance metrics with exponential moving average."""
1391
  if not hasattr(self, 'performance_metrics'):
1392
  return
1393
-
1394
  if success:
1395
  self.performance_metrics["successful_executions"] += 1
1396
-
1397
  # Update average response time using exponential moving average
1398
  alpha = 0.1 # Smoothing factor
1399
  current_avg = self.performance_metrics.get("average_response_time", 0.0)
1400
  self.performance_metrics["average_response_time"] = (
1401
  alpha * response_time + (1 - alpha) * current_avg
1402
  )
1403
-
1404
  def get_performance_report(self) -> Dict[str, Any]:
1405
  """Get detailed performance report."""
1406
  total_requests = self.performance_metrics["requests_processed"]
@@ -1408,7 +1392,7 @@ class RefactoredAutonomousAgent:
1408
  self.performance_metrics["successful_executions"] / total_requests
1409
  if total_requests > 0 else 0
1410
  )
1411
-
1412
  return {
1413
  "agent_name": self.agent_name,
1414
  "total_requests": total_requests,
@@ -1421,13 +1405,13 @@ class RefactoredAutonomousAgent:
1421
 
1422
 
1423
  # ============================================================================
1424
- # DEMO AND TESTING FUNCTIONS
1425
  # ============================================================================
1426
 
1427
  async def demo_refactored_autonomous_behavior():
1428
  """
1429
  Demonstrate the refactored autonomous agent behavior.
1430
-
1431
  This demo shows:
1432
  - Improved performance through caching
1433
  - Better error handling and recovery
@@ -1435,50 +1419,50 @@ async def demo_refactored_autonomous_behavior():
1435
  - Comprehensive logging and monitoring
1436
  """
1437
  agent = RefactoredAutonomousAgent("DemoAgent_v2")
1438
-
1439
  test_cases = [
1440
  "Create a comprehensive marketing campaign for our new product launch",
1441
  "Solve the customer service response time issues with detailed analysis",
1442
  "Plan a strategy to increase customer retention by 25% with implementation",
1443
  "Update our quarterly sales report with performance metrics"
1444
  ]
1445
-
1446
  print("πŸ€– REFACTORED AUTONOMOUS AGENT BEHAVIOR DEMONSTRATION")
1447
  print("=" * 70)
1448
  print("Features: Enhanced Performance | Better Security | Improved Error Handling")
1449
  print()
1450
-
1451
  for i, test_case in enumerate(test_cases, 1):
1452
  print(f"πŸ“ Test Case {i}: {test_case}")
1453
  print("-" * 50)
1454
-
1455
  try:
1456
  start_time = datetime.utcnow()
1457
  result = await agent.process_request(test_case)
1458
  end_time = datetime.utcnow()
1459
-
1460
  processing_time = (end_time - start_time).total_seconds()
1461
-
1462
  print(f"βœ… Overall Success: {result['overall_success']}")
1463
  print(f"πŸ“Š {result['summary']}")
1464
  print(f"🎯 Plan: {result['plan']['title']}")
1465
  print(f"⏱️ Processing Time: {processing_time:.2f}s")
1466
-
1467
  # Show performance metrics for complex requests
1468
  if 'performance' in result:
1469
  perf = result['performance']
1470
  print(f"πŸ“ˆ Performance: {perf['response_time_ms']:.0f}ms response time")
1471
  if perf.get('cache_hit'):
1472
  print("⚑ Cache hit - optimized performance!")
1473
-
1474
  if not result['overall_success']:
1475
  print(f"⚠️ Execution Issues: {result.get('error', 'Partial failure')}")
1476
-
1477
  except Exception as e:
1478
  print(f"❌ Error processing request: {e}")
1479
-
1480
  print()
1481
-
1482
  # Show performance report
1483
  print("πŸ“Š PERFORMANCE REPORT")
1484
  print("-" * 30)
@@ -1503,6 +1487,6 @@ if __name__ == "__main__":
1503
  level=logging.INFO,
1504
  format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
1505
  )
1506
-
1507
  # Run the demonstration
1508
  asyncio.run(demo_refactored_autonomous_behavior())
 
38
  async def wrapper(*args, **kwargs):
39
  if not args:
40
  return await func(*args, **kwargs)
41
+
42
  # Check if this is an instance method (first arg is likely self)
43
  # For instance methods, the user input is typically the second argument
44
  user_input_idx = 1 if len(args) > 1 and hasattr(args[0], func.__name__) else 0
45
+
46
  if user_input_idx >= len(args):
47
  return await func(*args, **kwargs)
48
+
49
  # Basic input validation
50
  if len(str(args[user_input_idx] if args else "")) > 10000: # 10KB limit
51
  raise ValidationError("Input too large")
52
+
53
  # Sanitize input (remove potentially dangerous patterns)
54
  sanitized_input = str(args[user_input_idx] if args else "").strip()
55
  dangerous_patterns = [
 
59
  r'eval\s*\(',
60
  r'exec\s*\('
61
  ]
62
+
63
  for pattern in dangerous_patterns:
64
  if re.search(pattern, sanitized_input, re.IGNORECASE):
65
  raise SecurityError(f"Dangerous content detected: {pattern}")
66
+
67
  # Replace the user input argument with sanitized version
68
  new_args = list(args)
69
  new_args[user_input_idx] = sanitized_input
70
+
71
  return await func(*new_args, **kwargs)
72
  return wrapper
73
 
 
75
  def rate_limit(calls_per_minute: int = 60):
76
  """Decorator to implement rate limiting."""
77
  calls = []
78
+
79
  def decorator(func):
80
  @wraps(func)
81
  async def wrapper(*args, **kwargs):
82
  now = datetime.utcnow()
83
  # Remove calls older than 1 minute
84
  calls[:] = [call for call in calls if (now - call).seconds < 60]
85
+
86
  if len(calls) >= calls_per_minute:
87
  raise SecurityError("Rate limit exceeded")
88
+
89
  calls.append(now)
90
  return await func(*args, **kwargs)
91
  return wrapper
 
131
  created_at: datetime = field(default_factory=datetime.utcnow)
132
  started_at: Optional[datetime] = None
133
  completed_at: Optional[datetime] = None
134
+
135
  def __post_init__(self):
136
  """Validate task data."""
137
  if not self.id or not isinstance(self.id, str):
 
140
  raise ValidationError("Estimated duration must be positive")
141
  if not self.title.strip():
142
  raise ValidationError("Task title cannot be empty")
143
+
144
  @property
145
  def can_execute(self) -> bool:
146
  """Check if task can be executed (all dependencies completed)."""
147
  return self.status == TaskStatus.PENDING
148
+
149
  def to_dict(self) -> Dict[str, Any]:
150
  """Convert task to dictionary for serialization."""
151
  return {
 
169
  created_at: datetime = field(default_factory=datetime.utcnow)
170
  estimated_completion: Optional[datetime] = None
171
  actual_completion: Optional[datetime] = None
172
+
173
  def __post_init__(self):
174
  """Validate plan data."""
175
  if not self.id or not isinstance(self.id, str):
 
178
  raise ValidationError("Plan title cannot be empty")
179
  if not self.tasks:
180
  raise ValidationError("Plan must contain at least one task")
181
+
182
  @property
183
  def task_count(self) -> int:
184
  """Get total number of tasks."""
185
  return len(self.tasks)
186
+
187
  @property
188
  def critical_path(self) -> List[str]:
189
  """Calculate critical path (longest dependency chain)."""
190
  # Build dependency graph
191
  graph = defaultdict(list)
192
  in_degree = defaultdict(int)
193
+
194
  for task in self.tasks:
195
  for dep in task.dependencies:
196
  graph[dep].append(task.id)
197
  in_degree[task.id] += 1
198
+ in_degree.setdefault(task.id, 0)
199
+
200
  # Find critical path using topological sort with duration tracking
201
  queue = deque([task_id for task_id, degree in in_degree.items() if degree == 0])
202
  durations = {task_id: 0 for task_id in in_degree}
203
+
204
  while queue:
205
  current = queue.popleft()
206
+
207
  # Get current task duration
208
  current_task = next(t for t in self.tasks if t.id == current)
209
  current_duration = durations[current]
210
+
211
  for neighbor in graph[current]:
212
  # Update duration if path through current is longer
213
  durations[neighbor] = max(
 
217
  in_degree[neighbor] -= 1
218
  if in_degree[neighbor] == 0:
219
  queue.append(neighbor)
220
+
221
  # Return path for longest duration
222
  max_duration_task = max(durations.items(), key=lambda x: x[1])[0]
223
  return [max_duration_task]
224
+
225
  def to_dict(self) -> Dict[str, Any]:
226
  """Convert plan to dictionary for serialization."""
227
  return {
 
239
 
240
  class TaskDependencyGraph:
241
  """Efficient task dependency management using adjacency lists."""
242
+
243
  def __init__(self, tasks: List[Task]):
244
  self.tasks = {task.id: task for task in tasks}
245
  self.graph = defaultdict(set)
246
  self.reverse_graph = defaultdict(set)
247
  self._build_graph()
248
+
249
  def _build_graph(self) -> None:
250
  """Build adjacency lists for efficient traversal."""
251
  for task in self.tasks.values():
 
253
  if dep in self.tasks:
254
  self.graph[dep].add(task.id)
255
  self.reverse_graph[task.id].add(dep)
256
+
257
  def can_execute(self, task_id: str, completed_tasks: Set[str]) -> bool:
258
  """Efficiently check if task can be executed."""
259
  return all(dep in completed_tasks for dep in self.reverse_graph.get(task_id, set()))
260
+
261
  def get_executable_tasks(self, completed_tasks: Set[str]) -> List[str]:
262
  """Get all tasks that can be executed given completed tasks."""
263
  return [
 
268
 
269
  class CachedReasoningEngine:
270
  """Reasoning engine with intelligent caching."""
271
+
272
  def __init__(self, agent_name: str):
273
  self.agent_name = agent_name
274
  self.logger = logging.getLogger(f"{__name__}.{agent_name}")
275
  self.knowledge_base = {}
276
  self.decision_history = deque(maxlen=1000) # Keep last 1000 decisions
277
+
278
  @lru_cache(maxsize=1000)
279
  def _analyze_input_hash(self, user_input_hash: str) -> Dict[str, Any]:
280
  """Cached analysis to avoid recomputing identical requests."""
 
283
  "analysis_id": user_input_hash,
284
  "timestamp": datetime.utcnow()
285
  }
286
+
287
  def analyze_situation(self, user_input: str, context: Dict[str, Any]) -> Dict[str, Any]:
288
  """Analyze situation with caching and optimization."""
289
  # Use hash for caching identical inputs
290
  input_hash = hashlib.md5(user_input.encode()).hexdigest()
291
+
292
  # Check cache first
293
  cached_result = self._analyze_input_hash(input_hash)
294
  if cached_result.get("cached"):
295
  self.logger.info(f"Using cached analysis for input hash: {input_hash[:8]}")
296
+
297
  analysis = {
298
  "intent": self._extract_intent_optimized(user_input),
299
  "entities": self._extract_entities_optimized(user_input),
 
305
  "cache_key": input_hash,
306
  "analysis_timestamp": datetime.utcnow().isoformat()
307
  }
308
+
309
  # Store in knowledge base
310
  self.knowledge_base[input_hash] = analysis
311
+
312
  return analysis
313
+
314
  def _extract_intent_optimized(self, user_input: str) -> Dict[str, Any]:
315
  """Optimized intent extraction using compiled regex patterns."""
316
  intent_patterns = {
 
320
  "problem_solving": re.compile(r'\b(fix|solve|resolve|troubleshoot|debug)\b', re.IGNORECASE),
321
  "creative_work": re.compile(r'\b(create|design|generate|write|build|develop)\b', re.IGNORECASE)
322
  }
323
+
324
  user_input_lower = user_input.lower()
325
  detected_intents = []
326
+
327
  # Use vectorized pattern matching
328
  for intent_type, pattern in intent_patterns.items():
329
  if pattern.search(user_input_lower):
330
  detected_intents.append(intent_type)
331
+
332
  return {
333
  "primary": detected_intents[0] if detected_intents else "general",
334
  "secondary": detected_intents[1:] if len(detected_intents) > 1 else [],
335
  "confidence": min(0.8 if detected_intents else 0.3, len(detected_intents) * 0.2 + 0.3),
336
  "pattern_matches": len(detected_intents)
337
  }
338
+
339
  def _extract_entities_optimized(self, user_input: str) -> List[Dict[str, Any]]:
340
  """Optimized entity extraction using pre-compiled patterns."""
341
  # Pre-compiled patterns for better performance
 
346
  "email": re.compile(r'\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b'),
347
  "url": re.compile(r'https?://[^\s]+')
348
  }
349
+
350
  entities = []
351
  for entity_type, pattern in patterns.items():
352
  matches = pattern.findall(user_input)
 
356
  "value": match[0] if isinstance(match, tuple) else match,
357
  "confidence": 0.9 if entity_type in ["email", "url"] else 0.7
358
  })
359
+
360
  return entities
361
+
362
  def _assess_complexity_optimized(self, user_input: str) -> Dict[str, Any]:
363
  """Optimized complexity assessment using word frequency analysis."""
364
  complexity_weights = {
365
  "high": 3, "medium": 2, "low": 1
366
  }
367
+
368
  complexity_keywords = {
369
  "high": ["plan", "strategy", "campaign", "project", "initiative", "comprehensive", "optimize"],
370
  "medium": ["create", "develop", "implement", "organize", "schedule", "improve"],
371
  "low": ["update", "check", "show", "find", "search", "simple"]
372
  }
373
+
374
  user_input_lower = user_input.lower()
375
  words = re.findall(r'\b\w+\b', user_input_lower)
376
+
377
  complexity_score = 0
378
  level_scores = defaultdict(int)
379
+
380
  for word in words:
381
  for level, keywords in complexity_keywords.items():
382
  if word in keywords:
383
  level_scores[level] += complexity_weights[level]
384
  complexity_score += complexity_weights[level]
385
+
386
  detected_level = max(level_scores.items(), key=lambda x: x[1])[0] if level_scores else "low"
387
+
388
  return {
389
  "level": detected_level,
390
  "score": min(complexity_score, 10),
 
393
  "word_count": len(words),
394
  "keyword_matches": sum(level_scores.values())
395
  }
396
+
397
  def _identify_constraints_optimized(self, user_input: str, context: Dict[str, Any]) -> List[Dict[str, Any]]:
398
  """Optimized constraint identification."""
399
  constraint_patterns = {
 
402
  "resources": {"keywords": ["limited", "small", "minimal", "basic", "few"], "severity": "medium"},
403
  "quality": {"keywords": ["high", "premium", "professional", "enterprise"], "severity": "high"}
404
  }
405
+
406
  constraints = []
407
  user_input_lower = user_input.lower()
408
+
409
  for constraint_type, config in constraint_patterns.items():
410
  if any(keyword in user_input_lower for keyword in config["keywords"]):
411
  constraints.append({
 
414
  "severity": config["severity"],
415
  "keyword_match": next(k for k in config["keywords"] if k in user_input_lower)
416
  })
417
+
418
  return constraints
419
+
420
  def _identify_opportunities_optimized(self, user_input: str, context: Dict[str, Any]) -> List[Dict[str, Any]]:
421
  """Optimized opportunity identification."""
422
  opportunity_patterns = {
 
425
  "efficiency": {"keywords": ["optimize", "streamline", "automate", "simplify"], "impact": "medium"},
426
  "competitive": {"keywords": ["advantage", "edge", "better", "superior", "leading"], "impact": "high"}
427
  }
428
+
429
  opportunities = []
430
  user_input_lower = user_input.lower()
431
+
432
  for opportunity_type, config in opportunity_patterns.items():
433
  if any(keyword in user_input_lower for keyword in config["keywords"]):
434
  opportunities.append({
 
437
  "potential_impact": config["impact"],
438
  "keyword_match": next(k for k in config["keywords"] if k in user_input_lower)
439
  })
440
+
441
  return opportunities
442
+
443
  def _assess_risks_optimized(self, user_input: str, context: Dict[str, Any]) -> List[Dict[str, Any]]:
444
  """Optimized risk assessment."""
445
  risk_patterns = {
 
448
  "timeline": {"keywords": ["urgent", "deadline", "quickly", "asap"], "probability": "high", "impact": "high"},
449
  "quality": {"keywords": ["basic", "simple", "minimal"], "probability": "medium", "impact": "medium"}
450
  }
451
+
452
  risks = []
453
  user_input_lower = user_input.lower()
454
+
455
  for risk_type, config in risk_patterns.items():
456
  if any(keyword in user_input_lower for keyword in config["keywords"]):
457
  risks.append({
 
461
  "impact": config["impact"],
462
  "keyword_match": next(k for k in config["keywords"] if k in user_input_lower)
463
  })
464
+
465
  return risks
466
+
467
  def _calculate_success_probability_optimized(self, user_input: str, context: Dict[str, Any]) -> float:
468
  """Optimized success probability calculation."""
469
  base_probability = 0.8
 
472
  "constraint_penalty": 0,
473
  "opportunity_bonus": 0
474
  }
475
+
476
  # Calculate complexity penalty
477
  complexity = self._assess_complexity_optimized(user_input)
478
  if complexity["level"] == "high":
479
  adjustments["complexity_penalty"] = 0.2
480
  elif complexity["level"] == "medium":
481
  adjustments["complexity_penalty"] = 0.1
482
+
483
  # Calculate constraint penalty
484
  constraints = self._identify_constraints_optimized(user_input, context)
485
  for constraint in constraints:
 
487
  adjustments["constraint_penalty"] += 0.15
488
  else:
489
  adjustments["constraint_penalty"] += 0.05
490
+
491
  # Calculate opportunity bonus
492
  opportunities = self._identify_opportunities_optimized(user_input, context)
493
  adjustments["opportunity_bonus"] = len(opportunities) * 0.05
494
+
495
  # Apply adjustments
496
  final_probability = base_probability - adjustments["complexity_penalty"] - adjustments["constraint_penalty"] + adjustments["opportunity_bonus"]
497
+
498
  return max(0.1, min(0.95, final_probability))
499
 
500
 
 
504
 
505
  class TaskFactory:
506
  """Factory class for creating standardized tasks."""
507
+
508
  TASK_TEMPLATES = {
509
  "complex_task": [
510
  {
 
518
  "title": "Strategy Development",
519
  "description": "Develop comprehensive strategy and approach",
520
  "priority": Priority.HIGH,
521
+ "dependencies": ["task_1"], # Depends on assessment
522
  "duration": 45
523
  },
524
  {
525
  "title": "Implementation Planning",
526
  "description": "Create detailed implementation roadmap",
527
  "priority": Priority.MEDIUM,
528
+ "dependencies": ["task_2"], # Depends on strategy
529
  "duration": 30
530
  },
531
  {
532
  "title": "Execution & Monitoring",
533
  "description": "Execute plan and monitor progress",
534
  "priority": Priority.HIGH,
535
+ "dependencies": ["task_3"], # Depends on planning
536
  "duration": 60
537
  },
538
  {
539
  "title": "Review & Optimization",
540
  "description": "Review results and optimize for better outcomes",
541
  "priority": Priority.MEDIUM,
542
+ "dependencies": ["task_4"], # Depends on execution
543
  "duration": 20
544
  }
545
  ],
 
555
  "title": "Solution Generation",
556
  "description": "Generate multiple solution options",
557
  "priority": Priority.HIGH,
558
+ "dependencies": ["task_1"], # Depends on analysis
559
  "duration": 25
560
  },
561
  {
562
  "title": "Solution Evaluation",
563
  "description": "Evaluate solutions and select the best approach",
564
  "priority": Priority.HIGH,
565
+ "dependencies": ["task_2"], # Depends on generation
566
  "duration": 15
567
  },
568
  {
569
  "title": "Implementation",
570
  "description": "Implement the chosen solution",
571
  "priority": Priority.HIGH,
572
+ "dependencies": ["task_3"], # Depends on evaluation
573
  "duration": 30
574
  }
575
  ],
 
583
  }
584
  ]
585
  }
586
+
587
  @classmethod
588
  def create_task(cls, template: Dict[str, Any], task_id: str, agent_name: str) -> Task:
589
  """Create a task from template with validation."""
 
597
  assigned_agent=agent_name,
598
  estimated_duration=template["duration"]
599
  )
600
+
601
  @classmethod
602
  def create_tasks_from_analysis(cls, analysis: Dict[str, Any], user_input: str, agent_name: str) -> List[Task]:
603
  """Create tasks based on analysis results."""
604
  intent = analysis.get("intent", {})
605
  primary_intent = intent.get("primary", "general")
606
+
607
  # Select appropriate template
608
  if primary_intent in cls.TASK_TEMPLATES:
609
  template_key = primary_intent
 
611
  template_key = "simple_request"
612
  else:
613
  template_key = "complex_task" # Default fallback
614
+
615
  # Generate unique task IDs
616
  tasks = []
617
+ template_list = cls.TASK_TEMPLATES[template_key]
618
 
619
+ # Create tasks with proper sequential dependencies
620
+ for i, template in enumerate(template_list):
621
+ task_id = f"task_{i + 1}"
622
+ task = cls.create_task(template, task_id, agent_name)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
623
  tasks.append(task)
624
+
 
625
  return tasks
626
 
627
 
628
  class OptimizedPlanningEngine:
629
  """Planning engine with performance optimizations and validation."""
630
+
631
  def __init__(self, agent_name: str):
632
  self.agent_name = agent_name
633
  self.logger = logging.getLogger(f"{__name__}.{agent_name}")
634
  self.plans = {}
635
  self.execution_history = []
636
+
637
  def create_plan(self, analysis: Dict[str, Any], user_input: str) -> Plan:
638
  """Create a comprehensive execution plan with validation."""
639
  try:
640
  # Generate plan ID with timestamp
641
  plan_id = f"plan_{self.agent_name}_{datetime.utcnow().strftime('%Y%m%d_%H%M%S_%f')}"
642
+
643
  # Generate tasks using factory
644
  tasks = TaskFactory.create_tasks_from_analysis(analysis, user_input, self.agent_name)
645
+
646
  # Generate success criteria based on intent
647
  success_criteria = self._generate_success_criteria(analysis, user_input)
648
+
649
  # Generate fallback strategies based on risks
650
  fallback_strategies = self._generate_fallback_strategies(analysis)
651
+
652
  # Calculate estimated completion time
653
  estimated_completion = self._calculate_completion_time(tasks)
654
+
655
  # Create plan with validation
656
  plan = Plan(
657
  id=plan_id,
 
663
  fallback_strategies=tuple(fallback_strategies), # Immutable tuple
664
  estimated_completion=estimated_completion
665
  )
666
+
667
  # Store plan
668
  self.plans[plan_id] = plan
669
+
670
  self.logger.info(f"Created plan {plan_id} with {len(tasks)} tasks")
671
+
672
  return plan
673
+
674
  except Exception as e:
675
  self.logger.error(f"Failed to create plan: {e}")
676
  raise ValidationError(f"Plan creation failed: {e}")
677
+
678
  def _generate_success_criteria(self, analysis: Dict[str, Any], user_input: str) -> List[str]:
679
  """Generate success criteria based on analysis."""
680
  intent = analysis.get("intent", {})
681
  primary_intent = intent.get("primary", "general")
682
+
683
  criteria_templates = {
684
  "complex_task": [
685
  "All objectives clearly defined and measurable",
 
709
  "No errors or issues encountered"
710
  ]
711
  }
712
+
713
  return criteria_templates.get(primary_intent, criteria_templates["general"])
714
+
715
  def _generate_fallback_strategies(self, analysis: Dict[str, Any]) -> List[str]:
716
  """Generate fallback strategies based on identified risks."""
717
  risks = analysis.get("risks", [])
718
  strategies = []
719
+
720
  # Risk-specific fallbacks
721
  risk_fallbacks = {
722
  "technical": "If technical issues arise, simplify approach and focus on core functionality",
 
724
  "timeline": "If time constraints become critical, reduce scope and focus on essential deliverables",
725
  "quality": "If quality standards cannot be met, adjust expectations and deliver best possible outcome"
726
  }
727
+
728
  for risk in risks:
729
  risk_type = risk.get("type", "")
730
  if risk_type in risk_fallbacks:
731
  strategies.append(risk_fallbacks[risk_type])
732
+
733
  # General fallbacks
734
  strategies.extend([
735
  "If initial approach fails, pivot to alternative strategy",
 
737
  "If requirements change, adapt plan dynamically",
738
  "If user feedback indicates issues, implement immediate corrections"
739
  ])
740
+
741
  return strategies
742
+
743
  def _generate_plan_title(self, user_input: str) -> str:
744
  """Generate a descriptive plan title."""
745
  # Use first 50 characters of user input, cleaning it up
746
  clean_input = re.sub(r'[^\w\s]', '', user_input)[:50].strip()
747
+
748
  if not clean_input:
749
  return f"Execution Plan for {self.agent_name}"
750
+
751
  # Capitalize first letter of each word
752
  title = ' '.join(word.capitalize() for word in clean_input.split())
753
+
754
  # Add appropriate prefix based on content
755
  if any(word in user_input.lower() for word in ["plan", "strategy"]):
756
  return f"Strategic Plan: {title}..."
 
760
  return f"Creation Plan: {title}..."
761
  else:
762
  return f"Execution Plan: {title}..."
763
+
764
  def _calculate_completion_time(self, tasks: List[Task]) -> datetime:
765
  """Calculate realistic completion time with buffer."""
766
  total_minutes = sum(task.estimated_duration for task in tasks)
767
+
768
  # Add coordination and review buffer (20%)
769
  buffered_minutes = int(total_minutes * 1.2)
770
+
771
  # Add minimum buffer of 5 minutes
772
  final_minutes = max(buffered_minutes, 5)
773
+
774
  return datetime.utcnow() + timedelta(minutes=final_minutes)
775
 
776
 
 
785
 
786
  class ExecutionContext:
787
  """Context manager for execution tracking."""
788
+
789
  def __init__(self, execution_id: str, plan_id: str):
790
  self.execution_id = execution_id
791
  self.plan_id = plan_id
 
794
  self.adaptations_made = []
795
  self.metrics = {}
796
  self.task_results = {}
797
+
798
  def log_decision(self, decision_type: str, task_id: str, decision: str) -> None:
799
  """Log an execution decision with timestamp."""
800
  self.decisions_made.append({
 
803
  "task_id": task_id,
804
  "decision": decision
805
  })
806
+
807
  def log_adaptation(self, adaptation_type: str, task_id: str, adaptation: str) -> None:
808
  """Log an execution adaptation with timestamp."""
809
  self.adaptations_made.append({
 
812
  "task_id": task_id,
813
  "adaptation": adaptation
814
  })
815
+
816
  @property
817
  def execution_time_minutes(self) -> float:
818
  """Calculate execution time in minutes."""
 
821
 
822
  class OptimizedExecutionEngine:
823
  """Execution engine with improved error handling and efficiency."""
824
+
825
  def __init__(self, agent_name: str):
826
  self.agent_name = agent_name
827
  self.logger = logging.getLogger(f"{__name__}.{agent_name}")
 
829
  self.execution_metrics = {}
830
  self.max_retries = 3
831
  self.retry_delay = 1.0 # seconds
832
+
833
  @asynccontextmanager
834
  async def execution_context(self, plan: Plan):
835
  """Context manager for execution tracking."""
836
  execution_id = f"exec_{plan.id}_{datetime.utcnow().strftime('%Y%m%d_%H%M%S')}"
837
  context = ExecutionContext(execution_id, plan.id)
838
+
839
  self.active_executions[execution_id] = context
840
+
841
  try:
842
  yield context
843
  finally:
844
  del self.active_executions[execution_id]
845
+
846
  async def execute_plan(self, plan: Plan) -> Dict[str, Any]:
847
  """Execute plan with comprehensive error handling and retry logic."""
848
  async with self.execution_context(plan) as context:
849
  try:
850
  self.logger.info(f"Starting execution of plan {plan.id}")
851
+
852
  # Create efficient dependency graph
853
  dependency_graph = TaskDependencyGraph(plan.tasks)
854
  completed_tasks = set()
855
  failed_tasks = []
856
+
857
  # Execute tasks using efficient dependency checking
858
+ max_iterations = len(plan.tasks) * 2 # Prevent infinite loops
859
+ iteration_count = 0
860
+
861
+ while iteration_count < max_iterations:
862
+ iteration_count += 1
863
+
864
  # Get executable tasks
865
  executable_tasks = dependency_graph.get_executable_tasks(completed_tasks)
866
+
867
  if not executable_tasks:
868
  # No more tasks can be executed
869
  break
870
+
871
  # Execute tasks (can be parallelized in future)
872
+ for task_id in executable_tasks[:1]: # Process one task at a time to prevent loops
873
  task = next(t for t in plan.tasks if t.id == task_id)
874
+
875
  try:
876
  task_result = await self._execute_task_with_retry(
877
  task, context, max_retries=self.max_retries
878
  )
879
+
880
  if task_result["success"]:
881
  # Task completed successfully - track in completed set
882
  completed_tasks.add(task_id)
883
  context.task_results[task_id] = task_result
884
+
885
  self.logger.info(f"Task {task_id} completed successfully")
886
  else:
887
  # Task failed, try fallback
 
889
  fallback_result = await self._handle_task_failure(
890
  task, plan, context, task_result
891
  )
892
+
893
  if fallback_result["success"]:
894
  # Fallback succeeded - track in completed set
895
  completed_tasks.add(task_id)
896
  context.task_results[task_id] = fallback_result
897
+
898
  self.logger.info(f"Task {task_id} completed via fallback")
899
  else:
900
  # Critical failure - attempt plan adaptation
901
  self.logger.warning(f"Task {task_id} failed completely, attempting plan adaptation")
902
+
903
  # Attempt plan adaptation
904
  adaptation_result = await self._adapt_plan(
905
  plan, task, context
906
  )
907
+
908
  if not adaptation_result["success"]:
909
  self.logger.error(f"Critical failure in plan execution")
910
  break
911
+
912
  except Exception as e:
913
  self.logger.error(f"Unexpected error executing task {task_id}: {e}")
914
  failed_tasks.append(task_id)
915
+
916
  # Calculate final metrics
917
  success_rate = len(completed_tasks) / len(plan.tasks) if plan.tasks else 0
918
+
919
  execution_result = {
920
  "success": len(failed_tasks) == 0,
921
  "completed_tasks": len(completed_tasks),
 
928
  "execution_id": context.execution_id,
929
  "plan_id": plan.id
930
  }
931
+
932
  # Store metrics
933
  self.execution_metrics[context.execution_id] = execution_result
934
+
935
  self.logger.info(f"Execution completed: {success_rate:.1%} success rate")
936
+
937
  return execution_result
938
+
939
  except Exception as e:
940
  self.logger.error(f"Execution failed with error: {e}")
941
  return {
 
944
  "execution_time_minutes": context.execution_time_minutes,
945
  "execution_id": context.execution_id
946
  }
947
+
948
  async def _execute_task_with_retry(self, task: Task, context: ExecutionContext, max_retries: int = 3) -> Dict[str, Any]:
949
  """Execute task with retry logic and exponential backoff."""
950
  for attempt in range(max_retries + 1):
 
964
  delay = self.retry_delay * (2 ** attempt)
965
  self.logger.warning(f"Task {task.id} failed (attempt {attempt + 1}), retrying in {delay}s")
966
  await asyncio.sleep(delay)
967
+
968
  # Should not reach here
969
  return {"success": False, "error": "Max retries exceeded"}
970
+
971
  async def _execute_task(self, task: Task, context: ExecutionContext) -> Dict[str, Any]:
972
  """Execute a single task with improved error handling."""
973
  # Log execution decision
974
  context.log_decision("task_execution", task.id, f"Executing task: {task.title}")
975
+
976
  start_time = datetime.utcnow()
977
+
978
  try:
979
  # Simulate realistic task execution time
980
  await asyncio.sleep(min(task.estimated_duration / 60.0, 0.1)) # Max 0.1s for demo
981
+
982
  # Generate task-specific result based on title patterns
983
  result = await self._generate_task_result(task)
984
+
985
  self.logger.info(f"Task {task.id} executed successfully")
986
+
987
  return {
988
  "success": True,
989
  "result": result,
 
991
  "started_at": start_time.isoformat(),
992
  "completed_at": datetime.utcnow().isoformat()
993
  }
994
+
995
  except Exception as e:
996
  self.logger.error(f"Task {task.id} execution failed: {e}")
997
  return {
 
1000
  "duration": (datetime.utcnow() - start_time).total_seconds() / 60,
1001
  "started_at": start_time.isoformat()
1002
  }
1003
+
1004
  async def _generate_task_result(self, task: Task) -> str:
1005
  """Generate task-specific results using templates."""
1006
  title_lower = task.title.lower()
1007
+
1008
  result_templates = {
1009
  "assessment": """
1010
  Assessment Completed for {title}:
 
1020
  β€’ Multiple approaches evaluated
1021
  β€’ Resource requirements assessed
1022
  β€’ Timeline implications identified
1023
+ """,
1024
  "strategy": """
1025
  Strategic Planning Completed for {title}:
1026
 
 
1035
  β€’ Phased implementation approach
1036
  β€’ Contingency plans prepared
1037
  β€’ Performance tracking framework
1038
+ """,
1039
  "implementation": """
1040
  Implementation Completed for {title}:
1041
 
 
1050
  β€’ Quality standards maintained
1051
  β€’ Timeline adherence achieved
1052
  β€’ Stakeholder expectations fulfilled
1053
+ """,
1054
  "review": """
1055
  Review and Optimization Completed for {title}:
1056
 
 
1065
  β€’ Process refinements recommended
1066
  β€’ Best practices captured
1067
  β€’ Future enhancement opportunities noted
1068
+ """
1069
  }
1070
+
1071
  # Select template based on title
1072
  if "assessment" in title_lower or "analysis" in title_lower:
1073
  template = result_templates["assessment"]
 
1096
  β€’ Expected results delivered
1097
  β€’ No issues encountered
1098
  β€’ Ready for next phase
1099
+ """
1100
+
1101
+ async def _handle_task_failure(self, task: Task, plan: Plan, context: ExecutionContext,
1102
+ original_result: Dict[str, Any]) -> Dict[str, Any]:
1103
  """Handle task failures using intelligent fallback strategies."""
1104
+ context.log_adaptation("failure_handling", task.id,
1105
+ f"Applying fallback strategy for failed task: {task.title}")
1106
+
1107
  # Try fallback strategies in order
1108
  for strategy in plan.fallback_strategies:
1109
  try:
 
1120
  assigned_agent=task.assigned_agent,
1121
  estimated_duration=simplified_duration
1122
  )
1123
+
1124
  result = await self._execute_task(simplified_task, context)
1125
  if result["success"]:
1126
  return result
1127
+
1128
  elif "pivot" in strategy.lower():
1129
  # Alternative approach
1130
  return {
 
1132
  "result": f"Successfully pivoted to alternative approach for: {task.title}",
1133
  "duration": 5
1134
  }
1135
+
1136
  elif "adapt" in strategy.lower():
1137
  # Dynamic adaptation
1138
  return {
 
1140
  "result": f"Dynamically adapted approach for: {task.title}",
1141
  "duration": 10
1142
  }
1143
+
1144
  except Exception as e:
1145
  self.logger.warning(f"Fallback strategy failed for task {task.id}: {e}")
1146
  continue
1147
+
1148
  # All fallbacks failed
1149
  return {
1150
+ "success": False,
1151
  "error": "All fallback strategies exhausted",
1152
  "original_error": original_result.get("error")
1153
  }
1154
+
1155
  async def _adapt_plan(self, plan: Plan, failed_task: Task, context: ExecutionContext) -> Dict[str, Any]:
1156
  """Adapt plan when critical failures occur."""
1157
  context.log_adaptation("plan_adaptation", failed_task.id,
1158
+ "Plan adapted due to critical task failure")
1159
+
1160
  # Find dependent tasks
1161
  dependent_tasks = [
1162
  task for task in plan.tasks
1163
  if failed_task.id in task.dependencies
1164
  ]
1165
+
1166
  # Calculate impact
1167
  tasks_to_remove = [failed_task.id] + [task.id for task in dependent_tasks]
1168
+
1169
  # Create new plan with remaining tasks (immutable approach)
1170
  remaining_tasks = [
1171
  task for task in plan.tasks
1172
  if task.id not in tasks_to_remove
1173
  ]
1174
+
1175
  if not remaining_tasks:
1176
  self.logger.error("Plan cannot continue - all tasks failed")
1177
  return {
 
1190
  fallback_strategies=plan.fallback_strategies,
1191
  created_at=plan.created_at
1192
  )
1193
+
1194
  self.logger.info(f"Plan adapted - removed {len(tasks_to_remove)} tasks, {len(remaining_tasks)} remaining")
1195
+
1196
  return {
1197
  "success": True,
1198
  "message": f"Plan adapted - removed {len(tasks_to_remove)} failed tasks, {len(remaining_tasks)} tasks remaining",
 
1207
 
1208
  class RefactoredAutonomousAgent:
1209
  """Main autonomous agent class with enhanced security, performance, and documentation."""
1210
+
1211
  def __init__(self, agent_name: str):
1212
  """
1213
  Initialize the autonomous agent with optimized components.
1214
+
1215
  Args:
1216
  agent_name: Unique identifier for the agent instance
1217
  """
1218
  self.agent_name = agent_name
1219
  self.logger = logging.getLogger(f"{__name__}.{agent_name}")
1220
+
1221
  # Initialize optimized engines
1222
  self.reasoning_engine = CachedReasoningEngine(agent_name)
1223
  self.planning_engine = OptimizedPlanningEngine(agent_name)
1224
  self.execution_engine = OptimizedExecutionEngine(agent_name)
1225
+
1226
  # Performance tracking
1227
  self.performance_metrics = {
1228
  "requests_processed": 0,
 
1230
  "failed_executions": 0,
1231
  "average_response_time": 0.0
1232
  }
1233
+
1234
  self.logger.info(f"Autonomous agent {agent_name} initialized")
1235
+
1236
  @rate_limit(calls_per_minute=100) # Rate limit: 100 requests per minute
1237
  @validate_input # Validate and sanitize input
1238
  async def process_request(self, user_input: str, context: Dict[str, Any] = None) -> Dict[str, Any]:
1239
  """
1240
  Process user request with comprehensive autonomous behavior.
1241
+
1242
  This method orchestrates the complete autonomous workflow:
1243
  1. Analyze the situation and extract insights
1244
  2. Create a detailed execution plan
1245
  3. Execute the plan with error handling
1246
  4. Compile comprehensive results
1247
+
1248
  Args:
1249
  user_input: The user's request or command
1250
  context: Additional context information (optional)
1251
+
1252
  Returns:
1253
  Dict containing complete analysis, plan, execution results, and summary
1254
+
1255
  Raises:
1256
  ValidationError: If input validation fails
1257
  SecurityError: If security checks fail
 
1259
  """
1260
  if context is None:
1261
  context = {}
1262
+
1263
  start_time = datetime.utcnow()
1264
  self.performance_metrics["requests_processed"] += 1
1265
+
1266
  try:
1267
  self.logger.info(f"Processing request: {user_input[:100]}...")
1268
+
1269
  # Step 1: Reasoning and Analysis
1270
  self.logger.debug("Starting situation analysis")
1271
  analysis = await self._analyze_situation_async(user_input, context)
1272
+
1273
  # Step 2: Planning
1274
  self.logger.debug("Creating execution plan")
1275
  plan = await self._create_plan_async(analysis, user_input)
1276
+
1277
  # Step 3: Execution
1278
  self.logger.debug("Executing plan")
1279
  execution_result = await self._execute_plan_async(plan)
1280
+
1281
  # Step 4: Compile Response
1282
  response = await self._compile_response_async(
1283
  user_input, analysis, plan, execution_result
1284
  )
1285
+
1286
  # Update performance metrics
1287
  response_time = (datetime.utcnow() - start_time).total_seconds()
1288
  self._update_performance_metrics(response_time, execution_result["success"])
1289
+
1290
  self.logger.info(f"Request processed successfully in {response_time:.2f}s")
1291
+
1292
  return response
1293
+
1294
  except (ValidationError, SecurityError, ExecutionError) as e:
1295
  self.logger.error(f"Processing failed: {e}")
1296
  self.performance_metrics["failed_executions"] += 1
1297
+
1298
  return {
1299
  "agent_name": self.agent_name,
1300
  "user_input": user_input,
 
1303
  "success": False,
1304
  "processing_time": (datetime.utcnow() - start_time).total_seconds()
1305
  }
1306
+
1307
  async def _analyze_situation_async(self, user_input: str, context: Dict[str, Any]) -> Dict[str, Any]:
1308
  """Asynchronous situation analysis with performance optimization."""
1309
  # For CPU-intensive operations, we could use thread pool
1310
  # For now, keeping synchronous for simplicity
1311
  return self.reasoning_engine.analyze_situation(user_input, context)
1312
+
1313
  async def _create_plan_async(self, analysis: Dict[str, Any], user_input: str) -> Plan:
1314
  """Asynchronous plan creation with validation."""
1315
  return self.planning_engine.create_plan(analysis, user_input)
1316
+
1317
  async def _execute_plan_async(self, plan: Plan) -> Dict[str, Any]:
1318
  """Asynchronous plan execution with comprehensive error handling."""
1319
  return await self.execution_engine.execute_plan(plan)
1320
+
1321
+ async def _compile_response_async(self, user_input: str, analysis: Dict[str, Any],
1322
+ plan: Plan, execution_result: Dict[str, Any]) -> Dict[str, Any]:
1323
  """Compile comprehensive response with all information."""
1324
  intent = analysis.get("intent", {})
1325
  complexity = analysis.get("complexity", {})
1326
  success_rate = execution_result.get("success_rate", 0)
1327
+
1328
  # Generate detailed summary
1329
  summary_parts = [
1330
  f"🧠 **Reasoning**: Detected {intent.get('primary', 'general')} intent "
 
1336
  f"⚑ **Execution**: {execution_result.get('completed_tasks', 0)} tasks completed, "
1337
  f"{success_rate:.0%} success rate"
1338
  ]
1339
+
1340
  if execution_result.get("adaptations_made", 0) > 0:
1341
  summary_parts.append(
1342
  f"πŸ”„ **Adaptation**: Made {execution_result['adaptations_made']} autonomous adaptations"
1343
  )
1344
+
1345
  if execution_result.get("decisions_made", 0) > 0:
1346
  summary_parts.append(
1347
  f"πŸ’‘ **Decisions**: Made {execution_result['decisions_made']} autonomous decisions"
1348
  )
1349
+
1350
  # Compile comprehensive response
1351
  response = {
1352
  "agent_name": self.agent_name,
 
1367
  "analysis_version": "2.0"
1368
  }
1369
  }
1370
+
1371
  return response
1372
+
1373
  def _update_performance_metrics(self, response_time: float, success: bool) -> None:
1374
  """Update performance metrics with exponential moving average."""
1375
  if not hasattr(self, 'performance_metrics'):
1376
  return
1377
+
1378
  if success:
1379
  self.performance_metrics["successful_executions"] += 1
1380
+
1381
  # Update average response time using exponential moving average
1382
  alpha = 0.1 # Smoothing factor
1383
  current_avg = self.performance_metrics.get("average_response_time", 0.0)
1384
  self.performance_metrics["average_response_time"] = (
1385
  alpha * response_time + (1 - alpha) * current_avg
1386
  )
1387
+
1388
  def get_performance_report(self) -> Dict[str, Any]:
1389
  """Get detailed performance report."""
1390
  total_requests = self.performance_metrics["requests_processed"]
 
1392
  self.performance_metrics["successful_executions"] / total_requests
1393
  if total_requests > 0 else 0
1394
  )
1395
+
1396
  return {
1397
  "agent_name": self.agent_name,
1398
  "total_requests": total_requests,
 
1405
 
1406
 
1407
  # ============================================================================
1408
+ # DEMOS AND TESTING FUNCTIONS
1409
  # ============================================================================
1410
 
1411
  async def demo_refactored_autonomous_behavior():
1412
  """
1413
  Demonstrate the refactored autonomous agent behavior.
1414
+
1415
  This demo shows:
1416
  - Improved performance through caching
1417
  - Better error handling and recovery
 
1419
  - Comprehensive logging and monitoring
1420
  """
1421
  agent = RefactoredAutonomousAgent("DemoAgent_v2")
1422
+
1423
  test_cases = [
1424
  "Create a comprehensive marketing campaign for our new product launch",
1425
  "Solve the customer service response time issues with detailed analysis",
1426
  "Plan a strategy to increase customer retention by 25% with implementation",
1427
  "Update our quarterly sales report with performance metrics"
1428
  ]
1429
+
1430
  print("πŸ€– REFACTORED AUTONOMOUS AGENT BEHAVIOR DEMONSTRATION")
1431
  print("=" * 70)
1432
  print("Features: Enhanced Performance | Better Security | Improved Error Handling")
1433
  print()
1434
+
1435
  for i, test_case in enumerate(test_cases, 1):
1436
  print(f"πŸ“ Test Case {i}: {test_case}")
1437
  print("-" * 50)
1438
+
1439
  try:
1440
  start_time = datetime.utcnow()
1441
  result = await agent.process_request(test_case)
1442
  end_time = datetime.utcnow()
1443
+
1444
  processing_time = (end_time - start_time).total_seconds()
1445
+
1446
  print(f"βœ… Overall Success: {result['overall_success']}")
1447
  print(f"πŸ“Š {result['summary']}")
1448
  print(f"🎯 Plan: {result['plan']['title']}")
1449
  print(f"⏱️ Processing Time: {processing_time:.2f}s")
1450
+
1451
  # Show performance metrics for complex requests
1452
  if 'performance' in result:
1453
  perf = result['performance']
1454
  print(f"πŸ“ˆ Performance: {perf['response_time_ms']:.0f}ms response time")
1455
  if perf.get('cache_hit'):
1456
  print("⚑ Cache hit - optimized performance!")
1457
+
1458
  if not result['overall_success']:
1459
  print(f"⚠️ Execution Issues: {result.get('error', 'Partial failure')}")
1460
+
1461
  except Exception as e:
1462
  print(f"❌ Error processing request: {e}")
1463
+
1464
  print()
1465
+
1466
  # Show performance report
1467
  print("πŸ“Š PERFORMANCE REPORT")
1468
  print("-" * 30)
 
1487
  level=logging.INFO,
1488
  format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
1489
  )
1490
+
1491
  # Run the demonstration
1492
  asyncio.run(demo_refactored_autonomous_behavior())