Coverage for src / local_deep_research / error_handling / report_generator.py: 97%
106 statements
« prev ^ index » next coverage.py v7.12.0, created at 2026-01-11 00:51 +0000
« prev ^ index » next coverage.py v7.12.0, created at 2026-01-11 00:51 +0000
1"""
2ErrorReportGenerator - Create user-friendly error reports
3"""
5from typing import Any, Dict, Optional
7from loguru import logger
9from .error_reporter import ErrorReporter
12class ErrorReportGenerator:
13 """
14 Generates comprehensive, user-friendly error reports
15 """
17 def __init__(self, llm=None):
18 """
19 Initialize error report generator
21 Args:
22 llm: Optional LLM instance (unused, kept for compatibility)
23 """
24 self.error_reporter = ErrorReporter()
26 def generate_error_report(
27 self,
28 error_message: str,
29 query: str,
30 partial_results: Optional[Dict[str, Any]] = None,
31 search_iterations: int = 0,
32 research_id: Optional[str] = None,
33 ) -> str:
34 """
35 Generate a comprehensive error report
37 Args:
38 error_message: The error that occurred
39 query: The research query
40 partial_results: Any partial results that were collected
41 search_iterations: Number of search iterations completed
42 research_id: Research ID for reference
44 Returns:
45 str: Formatted error report in Markdown
46 """
47 try:
48 # Analyze the error
49 context = {
50 "query": query,
51 "search_iterations": search_iterations,
52 "research_id": research_id,
53 "partial_results": partial_results,
54 }
56 if partial_results:
57 context.update(partial_results)
59 error_analysis = self.error_reporter.analyze_error(
60 error_message, context
61 )
63 # Build the simplified report
64 report_parts = []
66 # Header with user-friendly error message and logs reference
67 user_friendly_message = self._make_error_user_friendly(
68 error_message
69 )
70 category_title = error_analysis.get("title", "Error")
72 report_parts.append("# ⚠️ Research Failed")
73 report_parts.append(f"\n**Error Type:** {category_title}")
74 report_parts.append(f"\n**What happened:** {user_friendly_message}")
75 report_parts.append(
76 '\n*For detailed error information, scroll down to the research logs and select "Errors" from the filter.*'
77 )
79 # Support links - moved up for better visibility
80 report_parts.append("\n## 💬 Get Help")
81 report_parts.append("We're here to help you get this working:")
82 report_parts.append(
83 "- 📖 **Documentation & guides:** [Wiki](https://github.com/LearningCircuit/local-deep-research/wiki)"
84 )
85 report_parts.append(
86 "- 💬 **Chat with the community:** [Discord #help-and-support](https://discord.gg/ttcqQeFcJ3)"
87 )
88 report_parts.append(
89 "- 🐛 **Report bugs or get help:** [GitHub Issues](https://github.com/LearningCircuit/local-deep-research/issues) *(don't hesitate to ask if you're stuck!)*"
90 )
91 report_parts.append(
92 "- 💭 **Join discussions:** [Reddit r/LocalDeepResearch](https://www.reddit.com/r/LocalDeepResearch/) *(checked less frequently)*"
93 )
95 # Show partial results if available (in expandable section)
96 if error_analysis.get("has_partial_results"):
97 partial_content = self._format_partial_results(partial_results)
98 if partial_content: 98 ↛ 103line 98 didn't jump to line 103 because the condition on line 98 was always true
99 report_parts.append(
100 f"\n<details>\n<summary>📊 Partial Results Available</summary>\n\n{partial_content}\n</details>"
101 )
103 return "\n".join(report_parts)
105 except Exception:
106 # Fallback: always return something, even if error report generation fails
107 logger.exception("Failed to generate error report")
108 return f"""# ⚠️ Research Failed
110**What happened:** {error_message}
112## 💬 Get Help
113We're here to help you get this working:
114- 📖 **Documentation & guides:** [Wiki](https://github.com/LearningCircuit/local-deep-research/wiki)
115- 💬 **Chat with the community:** [Discord #help-and-support](https://discord.gg/ttcqQeFcJ3)
116- 🐛 **Report bugs or get help:** [GitHub Issues](https://github.com/LearningCircuit/local-deep-research/issues) *(don't hesitate to ask if you're stuck!)*
118*Note: Error report generation failed - showing basic error information.*"""
120 def _format_partial_results(
121 self, partial_results: Optional[Dict[str, Any]]
122 ) -> str:
123 """
124 Format partial results for display
126 Args:
127 partial_results: Partial results data
129 Returns:
130 str: Formatted partial results
131 """
132 if not partial_results:
133 return ""
135 formatted_parts = []
137 # Current knowledge summary
138 if "current_knowledge" in partial_results:
139 knowledge = partial_results["current_knowledge"]
140 if knowledge and len(knowledge.strip()) > 50:
141 formatted_parts.append("### Research Summary\n")
142 formatted_parts.append(
143 knowledge[:1000] + "..."
144 if len(knowledge) > 1000
145 else knowledge
146 )
147 formatted_parts.append("")
149 # Search results
150 if "search_results" in partial_results:
151 results = partial_results["search_results"]
152 if results: 152 ↛ 163line 152 didn't jump to line 163 because the condition on line 152 was always true
153 formatted_parts.append("### Search Results Found\n")
154 for i, result in enumerate(results[:5], 1): # Show top 5
155 title = result.get("title", "Untitled")
156 url = result.get("url", "")
157 formatted_parts.append(f"{i}. **{title}**")
158 if url: 158 ↛ 154line 158 didn't jump to line 154 because the condition on line 158 was always true
159 formatted_parts.append(f" - URL: {url}")
160 formatted_parts.append("")
162 # Findings
163 if "findings" in partial_results:
164 findings = partial_results["findings"]
165 if findings: 165 ↛ 179line 165 didn't jump to line 179 because the condition on line 165 was always true
166 formatted_parts.append("### Research Findings\n")
167 for i, finding in enumerate(findings[:3], 1): # Show top 3
168 content = finding.get("content", "")
169 if content and not content.startswith("Error:"):
170 phase = finding.get("phase", f"Finding {i}")
171 formatted_parts.append(f"**{phase}:**")
172 formatted_parts.append(
173 content[:500] + "..."
174 if len(content) > 500
175 else content
176 )
177 formatted_parts.append("")
179 if formatted_parts:
180 formatted_parts.append(
181 "*Note: The above results were successfully collected before the error occurred.*"
182 )
184 return "\n".join(formatted_parts) if formatted_parts else ""
186 def _get_technical_context(
187 self,
188 error_analysis: Dict[str, Any],
189 partial_results: Optional[Dict[str, Any]],
190 ) -> str:
191 """
192 Get additional technical context for the error
194 Args:
195 error_analysis: Error analysis results
196 partial_results: Partial results if available
198 Returns:
199 str: Technical context information
200 """
201 context_parts = []
203 # Add timing information if available
204 if partial_results:
205 if "start_time" in partial_results:
206 context_parts.append(
207 f"- **Start Time:** {partial_results['start_time']}"
208 )
210 if "last_activity" in partial_results:
211 context_parts.append(
212 f"- **Last Activity:** {partial_results['last_activity']}"
213 )
215 # Add model information
216 if "model_config" in partial_results:
217 config = partial_results["model_config"]
218 context_parts.append(
219 f"- **Model:** {config.get('model_name', 'Unknown')}"
220 )
221 context_parts.append(
222 f"- **Provider:** {config.get('provider', 'Unknown')}"
223 )
225 # Add search information
226 if "search_config" in partial_results:
227 search_config = partial_results["search_config"]
228 context_parts.append(
229 f"- **Search Engine:** {search_config.get('engine', 'Unknown')}"
230 )
231 context_parts.append(
232 f"- **Max Results:** {search_config.get('max_results', 'Unknown')}"
233 )
235 # Add any error codes or HTTP status
236 if "status_code" in partial_results:
237 context_parts.append(
238 f"- **Status Code:** {partial_results['status_code']}"
239 )
241 if "error_code" in partial_results:
242 context_parts.append(
243 f"- **Error Code:** {partial_results['error_code']}"
244 )
246 # Add error-specific context based on category
247 category = error_analysis.get("category")
248 if category: 248 ↛ 264line 248 didn't jump to line 264 because the condition on line 248 was always true
249 if "connection" in category.value.lower():
250 context_parts.append(
251 "- **Network Error:** Connection-related issue detected"
252 )
253 context_parts.append(
254 "- **Retry Recommended:** Check service status and try again"
255 )
256 elif "model" in category.value.lower():
257 context_parts.append(
258 "- **Model Error:** Issue with AI model or configuration"
259 )
260 context_parts.append(
261 "- **Check:** Model service availability and parameters"
262 )
264 return "\n".join(context_parts) if context_parts else ""
266 def generate_quick_error_summary(
267 self, error_message: str
268 ) -> Dict[str, str]:
269 """
270 Generate a quick error summary for API responses
272 Args:
273 error_message: The error message
275 Returns:
276 dict: Quick error summary
277 """
278 error_analysis = self.error_reporter.analyze_error(error_message)
280 return {
281 "title": error_analysis["title"],
282 "category": error_analysis["category"].value,
283 "severity": error_analysis["severity"],
284 "recoverable": error_analysis["recoverable"],
285 }
287 def _make_error_user_friendly(self, error_message: str) -> str:
288 """
289 Replace cryptic technical error messages with user-friendly versions
291 Args:
292 error_message: The original technical error message
294 Returns:
295 str: User-friendly error message, or original if no replacement found
296 """
297 # Dictionary of technical errors to user-friendly messages
298 error_replacements = {
299 "max_workers must be greater than 0": (
300 "The LLM failed to generate search questions. This usually means the LLM service isn't responding properly.\n\n"
301 "**Try this:**\n"
302 "- Check if your LLM service (Ollama/LM Studio) is running\n"
303 "- Restart the LLM service\n"
304 "- Try a different model"
305 ),
306 "POST predict.*EOF": (
307 "Lost connection to Ollama. This usually means Ollama stopped responding or there's a network issue.\n\n"
308 "**Try this:**\n"
309 "- Restart Ollama: `ollama serve`\n"
310 "- Check if Ollama is still running: `ps aux | grep ollama`\n"
311 "- Try a different port if 11434 is in use"
312 ),
313 "HTTP error 404.*research results": (
314 "The research completed but the results can't be displayed. The files were likely generated successfully.\n\n"
315 "**Try this:**\n"
316 "- Check the `research_outputs` folder for your report\n"
317 "- Ensure the folder has proper read/write permissions\n"
318 "- Restart the LDR web interface"
319 ),
320 "Connection refused|\\[Errno 111\\]": (
321 "Cannot connect to the LLM service. The service might not be running or is using a different address.\n\n"
322 "**Try this:**\n"
323 "- Start your LLM service (Ollama: `ollama serve`, LM Studio: launch the app)\n"
324 "- **Docker on Mac/Windows:** Change URL from `http://localhost:1234` to `http://host.docker.internal:1234`\n"
325 "- **Docker on Linux:** Use your host IP instead of localhost (find with `hostname -I`)\n"
326 "- Check the service URL in settings matches where your LLM is running\n"
327 "- Verify the port number is correct (Ollama: 11434, LM Studio: 1234)"
328 ),
329 "The search is longer than 256 characters": (
330 "Your search query is too long for GitHub's API (max 256 characters).\n\n"
331 "**Try this:**\n"
332 "- Shorten your research query\n"
333 "- Use a different search engine (DuckDuckGo, Searx, etc.)\n"
334 "- Break your research into smaller, focused queries"
335 ),
336 "No module named.*local_deep_research": (
337 "Installation issue detected. The package isn't properly installed.\n\n"
338 "**Try this:**\n"
339 "- Reinstall: `pip install -e .` from the project directory\n"
340 "- Check you're using the right Python environment\n"
341 "- For Docker users: rebuild the container"
342 ),
343 "Failed to create search engine|could not be found": (
344 "Search engine configuration problem.\n\n"
345 "**Try this:**\n"
346 "- Use the default search engine (auto)\n"
347 "- Check search engine settings in Advanced Options\n"
348 "- Ensure required API keys are set for external search engines"
349 ),
350 "No search results found|All search engines.*blocked.*rate.*limited": (
351 "No search results were found for your query. This could mean all search engines are unavailable.\n\n"
352 "**Try this:**\n"
353 "- **If using SearXNG:** Check if your SearXNG Docker container is running: `docker ps`\n"
354 "- **Start SearXNG:** `docker run -d -p 8080:8080 searxng/searxng` then set URL to `http://localhost:8080`\n"
355 "- **Try different search terms:** Use broader, more general keywords\n"
356 "- **Check network connection:** Ensure you can access the internet\n"
357 "- **Switch search engines:** Try DuckDuckGo, Brave, or Google (if API key configured)\n"
358 "- **Check for typos** in your research query"
359 ),
360 "TypeError.*Context.*Size|'<' not supported between": (
361 "Model configuration issue. The context size setting might not be compatible with your model.\n\n"
362 "**Try this:**\n"
363 "- Check your model's maximum context size\n"
364 "- Leave context size settings at default\n"
365 "- Try a different model"
366 ),
367 "Model.*not found in Ollama": (
368 "The specified model isn't available in Ollama.\n\n"
369 "**Try this:**\n"
370 "- Check available models: `ollama list`\n"
371 "- Pull the model: `ollama pull <model-name>`\n"
372 "- Use the exact model name shown in `ollama list` (e.g., 'gemma2:9b' not 'gemma:latest')"
373 ),
374 "No auth credentials found|401.*API key": (
375 "API key is missing or incorrectly configured.\n\n"
376 "**Try this:**\n"
377 "- Set API key in the web UI settings (not in .env files)\n"
378 "- Go to Settings → Advanced → enter your API key\n"
379 "- For custom endpoints, ensure the key format matches what your provider expects"
380 ),
381 "Attempt to write readonly database": (
382 "Permission issue with the database file.\n\n"
383 "**Try this:**\n"
384 "- On Windows: Run as Administrator\n"
385 "- On Linux/Mac: Check folder permissions\n"
386 "- Delete and recreate the database file if corrupted"
387 ),
388 "Invalid value.*SearXNG|database.*locked": (
389 "SearXNG configuration or rate limiting issue.\n\n"
390 "**Try this:**\n"
391 "- Keep 'Search snippets only' enabled (don't turn it off)\n"
392 "- Restart SearXNG: `docker restart searxng`\n"
393 "- If rate limited, wait a few minutes or use a VPN"
394 ),
395 "host.*localhost.*Docker|127\\.0\\.0\\.1.*Docker|localhost.*1234.*Docker|LM.*Studio.*Docker.*Mac": (
396 "Docker networking issue - can't connect to services on host.\n\n"
397 "**Try this:**\n"
398 "- **On Mac/Windows Docker:** Replace 'localhost' or '127.0.0.1' with 'host.docker.internal'\n"
399 "- **On Linux Docker:** Use your host's actual IP address (find with `hostname -I`)\n"
400 "- **Example:** Change `http://localhost:1234` to `http://host.docker.internal:1234`\n"
401 "- Ensure the service port isn't blocked by firewall\n"
402 "- Alternative: Use host networking mode (see wiki for setup)"
403 ),
404 }
406 # Check each pattern and replace if found
407 for pattern, replacement in error_replacements.items():
408 import re
410 if re.search(pattern, error_message, re.IGNORECASE):
411 return f"{replacement}\n\nTechnical error: {error_message}"
413 # If no specific replacement found, return original message
414 return error_message