Coverage for src / local_deep_research / mcp / server.py: 94%
284 statements
« prev ^ index » next coverage.py v7.13.5, created at 2026-04-14 23:55 +0000
« prev ^ index » next coverage.py v7.13.5, created at 2026-04-14 23:55 +0000
1"""
2MCP Server for Local Deep Research.
4This module provides an MCP (Model Context Protocol) server that exposes
5LDR's research capabilities to AI agents like Claude.
7Security Notice:
8 This server is designed for LOCAL USE ONLY via STDIO transport
9 (e.g., Claude Desktop). It has no built-in authentication or rate
10 limiting. Do NOT expose this server over a network without implementing
11 proper security controls (OAuth, rate limiting, input validation).
13 When running locally via STDIO, security is provided by your operating
14 system's user permissions.
16Tools:
17 - quick_research: Fast research summary (1-5 min)
18 - detailed_research: Comprehensive analysis (5-15 min)
19 - generate_report: Full markdown report (10-30 min)
20 - analyze_documents: Search local document collection (30s-2 min)
21 - search: Raw search results without LLM processing (5-30s)
22 - list_search_engines: List available search engines
23 - list_strategies: List available research strategies
24 - get_configuration: Get current server configuration
26Usage:
27 python -m local_deep_research.mcp
28 # or
29 ldr-mcp
30"""
32import re
33import sys
34from typing import Any, Dict, Optional
36from loguru import logger
37from mcp.server.fastmcp import FastMCP
39from local_deep_research.api.research_functions import (
40 analyze_documents as ldr_analyze_documents,
41 detailed_research as ldr_detailed_research,
42 generate_report as ldr_generate_report,
43 quick_summary as ldr_quick_summary,
44)
45from local_deep_research.api.settings_utils import create_settings_snapshot
46from local_deep_research.search_system_factory import (
47 get_available_strategies,
48)
50# Create FastMCP server instance
51mcp = FastMCP(
52 "local-deep-research",
53 instructions="AI-powered deep research assistant with iterative analysis using LLMs and web searches",
54)
57def _classify_error(error_msg: str) -> str:
58 """Classify error for client handling."""
59 error_lower = error_msg.lower()
60 if "503" in error_msg or "unavailable" in error_lower:
61 return "service_unavailable"
62 if "404" in error_msg or "not found" in error_lower:
63 return "model_not_found"
64 if (
65 "api key" in error_lower
66 or "authentication" in error_lower
67 or "unauthorized" in error_lower
68 or "401" in error_msg
69 ):
70 return "auth_error"
71 if "timeout" in error_lower or "timed out" in error_lower:
72 return "timeout"
73 if "rate limit" in error_lower or "429" in error_msg:
74 return "rate_limit"
75 if "connection" in error_lower:
76 return "connection_error"
77 if "validation" in error_lower or "invalid" in error_lower:
78 return "validation_error"
79 return "unknown"
82class ValidationError(Exception):
83 """Raised when parameter validation fails."""
85 pass
88_COLLECTION_NAME_RE = re.compile(r"^[A-Za-z0-9 _-]{1,100}$")
91def _validate_query(query: str) -> str:
92 """Validate and sanitize query parameter."""
93 if not query or not query.strip():
94 raise ValidationError("Query cannot be empty")
95 query = query.strip()
96 if len(query) > 10000:
97 raise ValidationError(
98 "Query exceeds maximum length of 10000 characters"
99 )
100 return query
103def _validate_iterations(
104 iterations: Optional[int], max_val: int = 20
105) -> Optional[int]:
106 """Validate iterations parameter."""
107 if iterations is None:
108 return None
109 if not isinstance(iterations, int) or iterations < 1:
110 raise ValidationError("Iterations must be a positive integer")
111 if iterations > max_val:
112 raise ValidationError(f"Iterations cannot exceed {max_val}")
113 return iterations
116def _validate_questions_per_iteration(qpi: Optional[int]) -> Optional[int]:
117 """Validate questions_per_iteration parameter."""
118 if qpi is None:
119 return None
120 if not isinstance(qpi, int) or qpi < 1:
121 raise ValidationError(
122 "Questions per iteration must be a positive integer"
123 )
124 if qpi > 10:
125 raise ValidationError("Questions per iteration cannot exceed 10")
126 return qpi
129def _validate_max_results(max_results: int) -> int:
130 """Validate max_results parameter."""
131 if not isinstance(max_results, int) or max_results < 1:
132 raise ValidationError("Max results must be a positive integer")
133 if max_results > 100:
134 raise ValidationError("Max results cannot exceed 100")
135 return max_results
138def _validate_temperature(temperature: Optional[float]) -> Optional[float]:
139 """Validate temperature parameter."""
140 if temperature is None:
141 return None
142 if not isinstance(temperature, (int, float)):
143 raise ValidationError("Temperature must be a number")
144 if temperature < 0.0 or temperature > 2.0:
145 raise ValidationError("Temperature must be between 0.0 and 2.0")
146 return float(temperature)
149def _validate_search_engine(engine: Optional[str]) -> Optional[str]:
150 """Validate search engine name against available engines."""
151 if engine is None:
152 return None
153 engine = engine.strip()
154 if not engine:
155 return None
156 try:
157 from local_deep_research.web_search_engines.search_engines_config import (
158 search_config,
159 )
161 settings = create_settings_snapshot()
162 available = search_config(settings_snapshot=settings)
163 if engine not in available:
164 available_names = sorted(available.keys())
165 raise ValidationError( # noqa: TRY301
166 f"Unknown search engine '{engine}'. Available: {', '.join(available_names)}"
167 )
168 except ValidationError:
169 raise
170 except Exception:
171 logger.exception("Could not load engine config to validate engine")
172 raise ValidationError(
173 f"Cannot validate search engine '{engine}': engine configuration unavailable"
174 )
175 return engine
178def _validate_strategy(strategy: Optional[str]) -> Optional[str]:
179 """Validate strategy name against available strategies."""
180 if strategy is None: 180 ↛ 181line 180 didn't jump to line 181 because the condition on line 180 was never true
181 return None
182 strategy = strategy.strip()
183 if not strategy:
184 return None
185 available = get_available_strategies(show_all=True)
186 available_names = [s["name"] for s in available]
187 if strategy not in available_names:
188 raise ValidationError(
189 f"Unknown strategy '{strategy}'. Available: {', '.join(available_names)}"
190 )
191 return strategy
194def _build_settings_overrides(
195 search_engine: Optional[str] = None,
196 strategy: Optional[str] = None,
197 iterations: Optional[int] = None,
198 questions_per_iteration: Optional[int] = None,
199 temperature: Optional[float] = None,
200) -> Dict[str, Any]:
201 """Build settings overrides dict from tool parameters."""
202 overrides: dict[str, Any] = {}
203 if search_engine is not None:
204 search_engine = _validate_search_engine(search_engine)
205 if search_engine:
206 overrides["search.tool"] = search_engine
207 if strategy is not None:
208 strategy = _validate_strategy(strategy)
209 if strategy:
210 overrides["search.search_strategy"] = strategy
211 if iterations is not None:
212 overrides["search.iterations"] = iterations
213 if questions_per_iteration is not None:
214 overrides["search.questions_per_iteration"] = questions_per_iteration
215 if temperature is not None:
216 overrides["llm.temperature"] = temperature
217 return overrides
220# =============================================================================
221# Research Tools
222# =============================================================================
225@mcp.tool()
226def quick_research(
227 query: str,
228 search_engine: Optional[str] = None,
229 strategy: Optional[str] = None,
230 iterations: Optional[int] = None,
231 questions_per_iteration: Optional[int] = None,
232) -> Dict[str, Any]:
233 """
234 Perform quick research on a topic.
236 This tool performs a fast research summary on the given query. It searches
237 the web, analyzes sources, and generates a concise summary with findings.
239 IMPORTANT: This is a synchronous operation that typically takes 1-5 minutes
240 to complete depending on the complexity and configuration.
242 Args:
243 query: The research question or topic to investigate.
244 search_engine: Search engine to use (e.g., "wikipedia", "arxiv", "searxng", "auto").
245 Use list_search_engines() to see available options.
246 strategy: Research strategy to use (e.g., "source-based", "rapid", "iterative").
247 Use list_strategies() to see available options.
248 iterations: Number of search iterations (1-10). More iterations = deeper research.
249 questions_per_iteration: Questions to generate per iteration (1-5).
251 Returns:
252 Dictionary containing:
253 - status: "success" or "error"
254 - summary: The research summary text
255 - findings: List of detailed findings from each search
256 - sources: List of source URLs discovered
257 - iterations: Number of iterations performed
258 - error: Error message (only if status is "error")
259 - error_type: Error classification (only if status is "error")
260 """
261 try:
262 # Validate parameters
263 query = _validate_query(query)
264 iterations = _validate_iterations(iterations, max_val=10)
265 questions_per_iteration = _validate_questions_per_iteration(
266 questions_per_iteration
267 )
269 logger.info(f"Starting quick research for query: {query[:100]}...")
271 overrides = _build_settings_overrides(
272 search_engine=search_engine,
273 strategy=strategy,
274 iterations=iterations,
275 questions_per_iteration=questions_per_iteration,
276 )
278 settings = (
279 create_settings_snapshot(overrides=overrides)
280 if overrides
281 else create_settings_snapshot()
282 )
284 result = ldr_quick_summary(query, settings_snapshot=settings)
286 return {
287 "status": "success",
288 "summary": result.get("summary", ""),
289 "findings": result.get("findings", []),
290 "sources": result.get("sources", []),
291 "iterations": result.get("iterations", 0),
292 "formatted_findings": result.get("formatted_findings", ""),
293 }
295 except ValidationError as e:
296 logger.warning("Validation failed for quick research")
297 return {
298 "status": "error",
299 "error": str(e),
300 "error_type": "validation_error",
301 }
302 except Exception as e:
303 logger.exception(
304 f"Quick research failed for query: {query[:100] if query else 'empty'}"
305 )
306 error_type = _classify_error(str(e))
307 return {
308 "status": "error",
309 "error": f"Quick research failed ({error_type}). Check server logs for details.",
310 "error_type": error_type,
311 }
314@mcp.tool()
315def detailed_research(
316 query: str,
317 search_engine: Optional[str] = None,
318 strategy: Optional[str] = None,
319 iterations: Optional[int] = None,
320 questions_per_iteration: Optional[int] = None,
321) -> Dict[str, Any]:
322 """
323 Perform detailed research with comprehensive analysis.
325 This tool performs a thorough research analysis on the given query, returning
326 structured data with detailed findings, sources, and metadata.
328 IMPORTANT: This is a synchronous operation that typically takes 5-15 minutes
329 to complete depending on the complexity and configuration.
331 Args:
332 query: The research question or topic to investigate.
333 search_engine: Search engine to use (e.g., "wikipedia", "arxiv", "searxng", "auto").
334 strategy: Research strategy to use (e.g., "source-based", "iterative", "evidence").
335 iterations: Number of search iterations (1-10). More iterations = deeper research.
336 questions_per_iteration: Questions to generate per iteration (1-5).
338 Returns:
339 Dictionary containing:
340 - status: "success" or "error"
341 - query: The original query
342 - research_id: Unique identifier for this research
343 - summary: The research summary text
344 - findings: List of detailed findings
345 - sources: List of source URLs
346 - iterations: Number of iterations performed
347 - metadata: Additional metadata (timestamp, search_tool, strategy)
348 - error/error_type: Error info (only if status is "error")
349 """
350 try:
351 # Validate parameters
352 query = _validate_query(query)
353 iterations = _validate_iterations(iterations, max_val=20)
354 questions_per_iteration = _validate_questions_per_iteration(
355 questions_per_iteration
356 )
358 logger.info(f"Starting detailed research for query: {query[:100]}...")
360 overrides = _build_settings_overrides(
361 search_engine=search_engine,
362 strategy=strategy,
363 iterations=iterations,
364 questions_per_iteration=questions_per_iteration,
365 )
367 settings = (
368 create_settings_snapshot(overrides=overrides)
369 if overrides
370 else create_settings_snapshot()
371 )
373 result = ldr_detailed_research(query, settings_snapshot=settings)
375 return {
376 "status": "success",
377 "query": result.get("query", query),
378 "research_id": result.get("research_id", ""),
379 "summary": result.get("summary", ""),
380 "findings": result.get("findings", []),
381 "sources": result.get("sources", []),
382 "iterations": result.get("iterations", 0),
383 "formatted_findings": result.get("formatted_findings", ""),
384 "metadata": result.get("metadata", {}),
385 }
387 except ValidationError as e:
388 logger.warning("Validation failed for detailed research")
389 return {
390 "status": "error",
391 "error": str(e),
392 "error_type": "validation_error",
393 }
394 except Exception as e:
395 logger.exception(
396 f"Detailed research failed for query: {query[:100] if query else 'empty'}"
397 )
398 error_type = _classify_error(str(e))
399 return {
400 "status": "error",
401 "error": f"Detailed research failed ({error_type}). Check server logs for details.",
402 "error_type": error_type,
403 }
406@mcp.tool()
407def generate_report(
408 query: str,
409 search_engine: Optional[str] = None,
410 searches_per_section: int = 2,
411) -> Dict[str, Any]:
412 """
413 Generate a comprehensive markdown research report.
415 This tool generates a full structured research report with sections,
416 citations, and comprehensive analysis. The output is formatted as markdown.
418 IMPORTANT: This is a synchronous operation that typically takes 10-30 minutes
419 to complete due to the comprehensive nature of the report.
421 Args:
422 query: The research question or topic for the report.
423 search_engine: Search engine to use (e.g., "wikipedia", "arxiv", "searxng", "auto").
424 searches_per_section: Number of searches per report section (1-10). Default is 2.
426 Returns:
427 Dictionary containing:
428 - status: "success" or "error"
429 - content: The full report content in markdown format
430 - metadata: Report metadata (timestamp, query)
431 - error/error_type: Error info (only if status is "error")
432 """
433 try:
434 # Validate parameters
435 query = _validate_query(query)
436 if (
437 not isinstance(searches_per_section, int)
438 or searches_per_section < 1
439 ):
440 raise ValidationError( # noqa: TRY301
441 "Searches per section must be a positive integer"
442 )
443 if searches_per_section > 10:
444 raise ValidationError("Searches per section cannot exceed 10") # noqa: TRY301
446 logger.info(f"Starting report generation for query: {query[:100]}...")
448 overrides = {}
449 if search_engine:
450 search_engine = _validate_search_engine(search_engine)
451 if search_engine: 451 ↛ 454line 451 didn't jump to line 454 because the condition on line 451 was always true
452 overrides["search.tool"] = search_engine
454 settings = (
455 create_settings_snapshot(overrides=overrides)
456 if overrides
457 else create_settings_snapshot()
458 )
460 result = ldr_generate_report(
461 query,
462 settings_snapshot=settings,
463 searches_per_section=searches_per_section,
464 )
466 return {
467 "status": "success",
468 "content": result.get("content", ""),
469 "metadata": result.get("metadata", {}),
470 }
472 except ValidationError as e:
473 logger.warning("Validation failed for report generation")
474 return {
475 "status": "error",
476 "error": str(e),
477 "error_type": "validation_error",
478 }
479 except Exception as e:
480 logger.exception(
481 f"Report generation failed for query: {query[:100] if query else 'empty'}"
482 )
483 error_type = _classify_error(str(e))
484 return {
485 "status": "error",
486 "error": f"Report generation failed ({error_type}). Check server logs for details.",
487 "error_type": error_type,
488 }
491@mcp.tool()
492def analyze_documents(
493 query: str,
494 collection_name: str,
495 max_results: int = 10,
496) -> Dict[str, Any]:
497 """
498 Search and analyze documents in a local collection.
500 This tool performs RAG (Retrieval Augmented Generation) search on a
501 local document collection and generates a summary of relevant findings.
503 Args:
504 query: The search query for the documents.
505 collection_name: Name of the local document collection to search.
506 max_results: Maximum number of documents to retrieve (1-100). Default is 10.
508 Returns:
509 Dictionary containing:
510 - status: "success" or "error"
511 - summary: Summary of findings from the documents
512 - documents: List of matching documents with content and metadata
513 - collection: Name of the collection searched
514 - document_count: Number of documents found
515 - error/error_type: Error info (only if status is "error")
516 """
517 try:
518 # Validate parameters
519 query = _validate_query(query)
520 if not collection_name or not collection_name.strip():
521 raise ValidationError("Collection name cannot be empty") # noqa: TRY301
522 collection_name = collection_name.strip()
523 if not _COLLECTION_NAME_RE.match(collection_name): 523 ↛ 524line 523 didn't jump to line 524 because the condition on line 523 was never true
524 raise ValidationError( # noqa: TRY301
525 "Collection name may only contain letters, digits, spaces, hyphens, and underscores (max 100 chars)"
526 )
527 max_results = _validate_max_results(max_results)
529 logger.info(
530 f"Analyzing documents in '{collection_name}' for query: {query[:100]}..."
531 )
533 result = ldr_analyze_documents(
534 query=query,
535 collection_name=collection_name,
536 max_results=max_results,
537 )
539 return {
540 "status": "success",
541 "summary": result.get("summary", ""),
542 "documents": result.get("documents", []),
543 "collection": result.get("collection", collection_name),
544 "document_count": result.get("document_count", 0),
545 }
547 except ValidationError as e:
548 logger.warning("Validation failed for document analysis")
549 return {
550 "status": "error",
551 "error": str(e),
552 "error_type": "validation_error",
553 }
554 except Exception as e:
555 logger.exception(
556 f"Document analysis failed for collection: {collection_name if collection_name else 'empty'}"
557 )
558 error_type = _classify_error(str(e))
559 return {
560 "status": "error",
561 "error": f"Document analysis failed ({error_type}). Check server logs for details.",
562 "error_type": error_type,
563 }
566@mcp.tool()
567def search(
568 query: str,
569 engine: str,
570 max_results: int = 10,
571) -> Dict[str, Any]:
572 """
573 Search using a specific engine and return raw results without LLM processing.
575 This tool performs a direct search query against the specified engine and
576 returns raw results (title, link, snippet). No LLM is involved, making it
577 fast and free of LLM costs.
579 IMPORTANT: This is a fast operation, typically completing in 5-30 seconds.
581 Args:
582 query: The search query string.
583 engine: Search engine to use (e.g., "arxiv", "wikipedia", "searxng", "brave").
584 This is required — use list_search_engines() to see available options.
585 max_results: Maximum number of results to return (1-100). Default is 10.
587 Returns:
588 Dictionary containing:
589 - status: "success" or "error"
590 - query: The original query
591 - engine: The engine used
592 - result_count: Number of results returned
593 - results: List of results, each with title, link, and snippet
594 - error/error_type: Error info (only if status is "error")
595 """
596 try:
597 # Validate parameters
598 query = _validate_query(query)
599 max_results = _validate_max_results(max_results)
601 # Validate engine is non-empty (required parameter)
602 if not engine or not engine.strip(): 602 ↛ 603line 602 didn't jump to line 603 because the condition on line 602 was never true
603 raise ValidationError( # noqa: TRY301
604 "Engine name cannot be empty. Use list_search_engines() to see available options."
605 )
606 engine = engine.strip()
608 # Create settings snapshot (reused for all steps)
609 settings = create_settings_snapshot()
611 # Validate engine name against available engines
612 from local_deep_research.web_search_engines.search_engines_config import (
613 search_config,
614 )
616 engines_config = search_config(settings_snapshot=settings)
617 if engine not in engines_config:
618 available_names = sorted(engines_config.keys())
619 raise ValidationError( # noqa: TRY301
620 f"Unknown search engine '{engine}'. Available: {', '.join(available_names)}"
621 )
623 # Check API key requirement
624 engine_config = engines_config[engine]
625 if engine_config.get("requires_api_key", False):
626 api_key_setting = settings.get(
627 f"search.engine.web.{engine}.api_key"
628 )
629 api_key = None
630 if api_key_setting: 630 ↛ 631line 630 didn't jump to line 631 because the condition on line 630 was never true
631 api_key = (
632 api_key_setting.get("value")
633 if isinstance(api_key_setting, dict)
634 else api_key_setting
635 )
636 if not api_key: 636 ↛ 643line 636 didn't jump to line 643 because the condition on line 636 was always true
637 raise ValidationError( # noqa: TRY301
638 f"Engine '{engine}' requires an API key. "
639 f"Set the LDR_SEARCH_ENGINE_WEB_{engine.upper()}_API_KEY environment variable "
640 f"or configure it in the UI at search.engine.web.{engine}.api_key"
641 )
643 logger.info(
644 f"Starting search on '{engine}' for query: {query[:100]}..."
645 )
647 # Set thread-local settings context so that engine constructors
648 # which internally call get_llm() or get_setting_from_snapshot()
649 # (e.g., arxiv's JournalReputationFilter) can resolve settings.
650 from local_deep_research.config.thread_settings import (
651 clear_settings_context,
652 set_settings_context,
653 )
654 from local_deep_research.settings.manager import SnapshotSettingsContext
656 set_settings_context(SnapshotSettingsContext(settings))
657 try:
658 return _execute_search(query, engine, max_results, settings)
659 finally:
660 clear_settings_context()
662 except ValidationError as e:
663 logger.warning("Validation failed for search")
664 return {
665 "status": "error",
666 "error": str(e),
667 "error_type": "validation_error",
668 }
669 except Exception as e:
670 logger.exception(
671 f"Search failed for query: {query[:100] if query else 'empty'}"
672 )
673 error_type = _classify_error(str(e))
674 return {
675 "status": "error",
676 "error": f"Search failed ({error_type}). Check server logs for details.",
677 "error_type": error_type,
678 }
681def _execute_search(
682 query: str, engine: str, max_results: int, settings: Dict[str, Any]
683) -> Dict[str, Any]:
684 """Execute the search after settings context is established."""
685 from local_deep_research.web_search_engines.search_engine_factory import (
686 create_search_engine,
687 )
689 search_engine = create_search_engine(
690 engine_name=engine,
691 llm=None,
692 settings_snapshot=settings,
693 programmatic_mode=True,
694 max_results=max_results,
695 search_snippets_only=True,
696 )
698 if search_engine is None:
699 return {
700 "status": "error",
701 "error": f"Failed to create search engine '{engine}'. "
702 f"This engine may require an LLM or have other prerequisites. "
703 f"Check server logs for details.",
704 "error_type": "configuration_error",
705 }
707 try:
708 # Execute search
709 results = search_engine.run(query)
711 # Normalize results: ensure consistent 'snippet' key
712 for result in results:
713 if "snippet" not in result and "body" in result:
714 result["snippet"] = result["body"]
716 return {
717 "status": "success",
718 "query": query,
719 "engine": engine,
720 "result_count": len(results),
721 "results": results,
722 }
723 finally:
724 from local_deep_research.utilities.resource_utils import safe_close
726 safe_close(search_engine, "MCP search engine")
729# =============================================================================
730# Discovery Tools
731# =============================================================================
734@mcp.tool()
735def list_search_engines() -> Dict[str, Any]:
736 """
737 List available search engines.
739 Returns a list of search engines that can be used with the research tools.
740 Each engine has different strengths - some are better for academic research,
741 others for current events, etc.
743 Returns:
744 Dictionary containing:
745 - status: "success" or "error"
746 - engines: List of available search engine configurations
747 - error/error_type: Error info (only if status is "error")
748 """
749 try:
750 from local_deep_research.api.settings_utils import (
751 create_settings_snapshot,
752 )
753 from local_deep_research.web_search_engines.search_engines_config import (
754 search_config,
755 )
757 settings = create_settings_snapshot()
758 engines_config = search_config(settings_snapshot=settings)
760 engines = []
761 for name, config in engines_config.items():
762 engine_info = {
763 "name": name,
764 "description": config.get("description", ""),
765 "strengths": config.get("strengths", []),
766 "weaknesses": config.get("weaknesses", []),
767 "requires_api_key": config.get("requires_api_key", False),
768 "is_local": config.get("is_local", False),
769 }
770 engines.append(engine_info)
772 return {
773 "status": "success",
774 "engines": sorted(engines, key=lambda x: x["name"]),
775 }
777 except Exception as e:
778 logger.exception("Failed to list search engines")
779 error_type = _classify_error(str(e))
780 return {
781 "status": "error",
782 "error": f"Failed to list search engines ({error_type}). Check server logs for details.",
783 "error_type": error_type,
784 }
787@mcp.tool()
788def list_strategies() -> Dict[str, Any]:
789 """
790 List available research strategies.
792 Returns a list of research strategies that can be used with the research tools.
793 Each strategy has different characteristics suited for different types of queries.
795 Returns:
796 Dictionary containing:
797 - status: "success" or "error"
798 - strategies: List of available strategies with names and descriptions
799 - error/error_type: Error info (only if status is "error")
800 """
801 try:
802 return {
803 "status": "success",
804 "strategies": get_available_strategies(show_all=True),
805 }
807 except Exception as e:
808 logger.exception("Failed to list strategies")
809 error_type = _classify_error(str(e))
810 return {
811 "status": "error",
812 "error": f"Failed to list strategies ({error_type}). Check server logs for details.",
813 "error_type": error_type,
814 }
817@mcp.tool()
818def get_configuration() -> Dict[str, Any]:
819 """
820 Get current server configuration.
822 Returns the current configuration settings being used by the MCP server,
823 including LLM provider, default search engine, and other settings.
825 Returns:
826 Dictionary containing:
827 - status: "success" or "error"
828 - config: Current configuration settings
829 - error/error_type: Error info (only if status is "error")
830 """
831 try:
832 from local_deep_research.api.settings_utils import (
833 create_settings_snapshot,
834 extract_setting_value,
835 )
837 settings = create_settings_snapshot()
839 config = {
840 "llm": {
841 "provider": extract_setting_value(
842 settings, "llm.provider", "unknown"
843 ),
844 "model": extract_setting_value(
845 settings, "llm.model", "unknown"
846 ),
847 "temperature": extract_setting_value(
848 settings, "llm.temperature", 0.7
849 ),
850 },
851 "search": {
852 "default_engine": extract_setting_value(
853 settings, "search.tool", "auto"
854 ),
855 "default_strategy": extract_setting_value(
856 settings, "search.search_strategy", "source-based"
857 ),
858 "iterations": extract_setting_value(
859 settings, "search.iterations", 2
860 ),
861 "questions_per_iteration": extract_setting_value(
862 settings, "search.questions_per_iteration", 3
863 ),
864 "max_results": extract_setting_value(
865 settings, "search.max_results", 10
866 ),
867 },
868 }
870 return {
871 "status": "success",
872 "config": config,
873 }
875 except Exception as e:
876 logger.exception("Failed to get configuration")
877 error_type = _classify_error(str(e))
878 return {
879 "status": "error",
880 "error": f"Failed to get configuration ({error_type}). Check server logs for details.",
881 "error_type": error_type,
882 }
885# =============================================================================
886# Server Entry Point
887# =============================================================================
890def run_server():
891 """Run the MCP server using STDIO transport."""
892 # MCP uses stdout for JSON-RPC, so redirect all logging to stderr.
893 # This runs in a separate subprocess (ldr-mcp) — logger.remove() only
894 # affects this MCP process, not the main LDR application.
895 logger.remove()
896 logger.add(
897 sys.stderr,
898 level="INFO",
899 format="{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {name}:{function}:{line} | {message}",
900 )
901 logger.info("Starting Local Deep Research MCP server...")
902 mcp.run(transport="stdio")
905if __name__ == "__main__": 905 ↛ 906line 905 didn't jump to line 906 because the condition on line 905 was never true
906 run_server()