Coverage for src / local_deep_research / advanced_search_system / constraint_checking / constraint_checker.py: 77%
60 statements
« prev ^ index » next coverage.py v7.12.0, created at 2026-01-11 00:51 +0000
« prev ^ index » next coverage.py v7.12.0, created at 2026-01-11 00:51 +0000
1"""
2Main constraint checker that orchestrates constraint validation.
4This module provides the primary interface for checking candidates against constraints.
5"""
7from dataclasses import dataclass
8from typing import Dict, List, Optional
10from langchain_core.language_models import BaseChatModel
11from loguru import logger
13from ..candidates.base_candidate import Candidate
14from ..constraints.base_constraint import Constraint
15from .evidence_analyzer import EvidenceAnalyzer
16from .rejection_engine import RejectionEngine, RejectionResult
19@dataclass
20class ConstraintCheckResult:
21 """Result of checking a candidate against all constraints."""
23 candidate: Candidate
24 total_score: float
25 constraint_scores: Dict[str, Dict]
26 rejection_result: Optional[RejectionResult]
27 detailed_results: List[Dict]
30class ConstraintChecker:
31 """
32 Main constraint checker that validates candidates against constraints.
34 This checker:
35 1. Gathers evidence for each constraint
36 2. Analyzes evidence using dual confidence scoring
37 3. Makes rejection decisions based on evidence
38 4. Provides detailed scoring breakdown
39 """
41 def __init__(
42 self,
43 model: BaseChatModel,
44 evidence_gatherer=None, # Will be passed in from strategy
45 negative_threshold: float = 0.25,
46 positive_threshold: float = 0.4,
47 uncertainty_penalty: float = 0.2,
48 negative_weight: float = 0.5,
49 ):
50 """
51 Initialize the constraint checker.
53 Args:
54 model: Language model for evidence analysis
55 evidence_gatherer: Function to gather evidence (from strategy)
56 negative_threshold: Rejection threshold for negative evidence
57 positive_threshold: Minimum positive evidence required
58 uncertainty_penalty: Penalty for uncertain evidence
59 negative_weight: Weight for negative evidence in scoring
60 """
61 self.model = model
62 self.evidence_gatherer = evidence_gatherer
64 # Initialize components
65 self.evidence_analyzer = EvidenceAnalyzer(model)
66 self.rejection_engine = RejectionEngine(
67 negative_threshold, positive_threshold
68 )
70 # Scoring parameters
71 self.uncertainty_penalty = uncertainty_penalty
72 self.negative_weight = negative_weight
74 def check_candidate(
75 self, candidate: Candidate, constraints: List[Constraint]
76 ) -> ConstraintCheckResult:
77 """
78 Check a candidate against all constraints.
80 Args:
81 candidate: The candidate to check
82 constraints: List of constraints to check against
84 Returns:
85 ConstraintCheckResult: Complete evaluation result
86 """
87 logger.info(f"Checking candidate: {candidate.name}")
89 constraint_results = {}
90 constraint_scores = {}
91 detailed_results = []
92 total_score = 0.0
94 for constraint in constraints:
95 # Gather evidence for this constraint
96 evidence_list = self._gather_evidence_for_constraint(
97 candidate, constraint
98 )
100 if evidence_list: 100 ↛ 102line 100 didn't jump to line 102 because the condition on line 100 was never true
101 # Analyze evidence with dual confidence
102 dual_evidence = [
103 self.evidence_analyzer.analyze_evidence_dual_confidence(
104 e, constraint
105 )
106 for e in evidence_list
107 ]
109 constraint_results[constraint] = dual_evidence
111 # Calculate average scores for this constraint
112 avg_positive = sum(
113 e.positive_confidence for e in dual_evidence
114 ) / len(dual_evidence)
115 avg_negative = sum(
116 e.negative_confidence for e in dual_evidence
117 ) / len(dual_evidence)
118 avg_uncertainty = sum(
119 e.uncertainty for e in dual_evidence
120 ) / len(dual_evidence)
122 # Calculate constraint score
123 score = self.evidence_analyzer.evaluate_evidence_list(
124 evidence_list,
125 constraint,
126 self.uncertainty_penalty,
127 self.negative_weight,
128 )
130 # Store results
131 constraint_scores[constraint.value] = {
132 "total": score,
133 "positive": avg_positive,
134 "negative": avg_negative,
135 "uncertainty": avg_uncertainty,
136 "weight": constraint.weight,
137 }
139 detailed_results.append(
140 {
141 "constraint": constraint.value,
142 "score": score,
143 "positive": avg_positive,
144 "negative": avg_negative,
145 "uncertainty": avg_uncertainty,
146 "weight": constraint.weight,
147 "type": constraint.type.value,
148 }
149 )
151 # Log result
152 symbol = "✓" if score >= 0.8 else "○" if score >= 0.5 else "✗"
153 logger.info(
154 f"{symbol} {candidate.name} | {constraint.value}: {int(score * 100)}% "
155 f"(+{int(avg_positive * 100)}% -{int(avg_negative * 100)}% ?{int(avg_uncertainty * 100)}%)"
156 )
158 else:
159 # No evidence found
160 score = 0.5 - self.uncertainty_penalty
162 constraint_scores[constraint.value] = {
163 "total": score,
164 "positive": 0.0,
165 "negative": 0.0,
166 "uncertainty": 1.0,
167 "weight": constraint.weight,
168 }
170 detailed_results.append(
171 {
172 "constraint": constraint.value,
173 "score": score,
174 "positive": 0.0,
175 "negative": 0.0,
176 "uncertainty": 1.0,
177 "weight": constraint.weight,
178 "type": constraint.type.value,
179 }
180 )
182 logger.info(
183 f"? {candidate.name} | {constraint.value}: No evidence found"
184 )
186 # Check for rejection
187 rejection_result = self.rejection_engine.check_all_constraints(
188 candidate, constraint_results
189 )
191 if rejection_result and rejection_result.should_reject: 191 ↛ 193line 191 didn't jump to line 193 because the condition on line 191 was never true
192 # Candidate should be rejected
193 total_score = 0.0
194 else:
195 # Calculate weighted average score
196 if detailed_results: 196 ↛ 203line 196 didn't jump to line 203 because the condition on line 196 was always true
197 weights = [r["weight"] for r in detailed_results]
198 scores = [r["score"] for r in detailed_results]
199 total_score = sum(
200 s * w for s, w in zip(scores, weights, strict=False)
201 ) / sum(weights)
203 logger.info(f"Final score for {candidate.name}: {total_score:.2%}")
205 return ConstraintCheckResult(
206 candidate=candidate,
207 total_score=total_score,
208 constraint_scores=constraint_scores,
209 rejection_result=rejection_result,
210 detailed_results=detailed_results,
211 )
213 def _gather_evidence_for_constraint(
214 self, candidate: Candidate, constraint: Constraint
215 ) -> List[Dict]:
216 """Gather evidence for a constraint using the provided evidence gatherer."""
217 if self.evidence_gatherer: 217 ↛ 218line 217 didn't jump to line 218 because the condition on line 217 was never true
218 return self.evidence_gatherer(candidate, constraint)
219 else:
220 logger.warning(
221 "No evidence gatherer provided - cannot gather evidence"
222 )
223 return []