Coverage for src / local_deep_research / web / routes / api_routes.py: 77%
211 statements
« prev ^ index » next coverage.py v7.12.0, created at 2026-01-11 00:51 +0000
« prev ^ index » next coverage.py v7.12.0, created at 2026-01-11 00:51 +0000
1import requests
2from flask import (
3 Blueprint,
4 current_app,
5 jsonify,
6 request,
7)
8from loguru import logger
9from datetime import datetime, UTC
11from ...database.models import ResearchHistory
12from ...database.session_context import get_user_db_session
13from ...utilities.url_utils import normalize_url
14from ..auth.decorators import login_required
15from ..routes.research_routes import active_research, termination_flags
16from ..services.research_service import (
17 cancel_research,
18 run_research_process,
19 start_research_process,
20)
21from ..services.resource_service import (
22 add_resource,
23 delete_resource,
24 get_resources_for_research,
25)
26from local_deep_research.settings import SettingsManager
27from ...security import safe_get
29# Create blueprint
30api_bp = Blueprint("api", __name__)
33@api_bp.route("/settings/current-config", methods=["GET"])
34@login_required
35def get_current_config():
36 """Get the current configuration from database settings."""
37 try:
38 with get_user_db_session() as session:
39 settings_manager = SettingsManager(session)
41 config = {
42 "provider": settings_manager.get_setting(
43 "llm.provider", "Not configured"
44 ),
45 "model": settings_manager.get_setting(
46 "llm.model", "Not configured"
47 ),
48 "search_tool": settings_manager.get_setting(
49 "search.tool", "searxng"
50 ),
51 "iterations": settings_manager.get_setting("search.iterations", 8),
52 "questions_per_iteration": settings_manager.get_setting(
53 "search.questions_per_iteration", 5
54 ),
55 "search_strategy": settings_manager.get_setting(
56 "search.search_strategy", "focused_iteration"
57 ),
58 }
60 session.close()
62 return jsonify({"success": True, "config": config})
64 except Exception:
65 logger.exception("Error getting current config")
66 return jsonify(
67 {"success": False, "error": "An internal error occurred"}
68 ), 500
71# API Routes
72@api_bp.route("/start", methods=["POST"])
73@login_required
74def api_start_research():
75 """
76 Start a new research process
77 """
78 data = request.json
79 query = data.get("query", "")
80 mode = data.get("mode", "quick")
82 if not query:
83 return jsonify({"status": "error", "message": "Query is required"}), 400
85 try:
86 # Create a record in the database with explicit UTC timestamp
87 created_at = datetime.now(UTC).isoformat()
89 # Save basic research settings for API route
90 research_settings = {
91 "model_provider": "OLLAMA", # Default
92 "model": "llama2", # Default
93 "search_engine": "searxng", # Default
94 }
96 with get_user_db_session() as db_session:
97 research = ResearchHistory(
98 query=query,
99 mode=mode,
100 status="in_progress",
101 created_at=created_at,
102 progress_log=[{"time": created_at, "progress": 0}],
103 research_meta=research_settings,
104 )
105 db_session.add(research)
106 db_session.commit()
107 research_id = research.id
109 # Start the research process
110 research_thread = start_research_process(
111 research_id,
112 query,
113 mode,
114 active_research,
115 termination_flags,
116 run_research_process,
117 )
119 # Store the thread reference
120 active_research[research_id]["thread"] = research_thread
122 return jsonify(
123 {
124 "status": "success",
125 "message": "Research started successfully",
126 "research_id": research_id,
127 }
128 )
129 except Exception:
130 logger.exception("Error starting research")
131 return jsonify(
132 {"status": "error", "message": "Failed to start research"}, 500
133 )
136@api_bp.route("/status/<string:research_id>", methods=["GET"])
137@login_required
138def api_research_status(research_id):
139 """
140 Get the status of a research process
141 """
142 try:
143 # Get a fresh session to avoid conflicts with the research process
145 with get_user_db_session() as db_session:
146 research = (
147 db_session.query(ResearchHistory)
148 .filter_by(id=research_id)
149 .first()
150 )
152 if research is None:
153 return jsonify({"error": "Research not found"}), 404
155 # Get metadata
156 metadata = research.research_meta or {}
158 return jsonify(
159 {
160 "status": research.status,
161 "progress": research.progress,
162 "completed_at": research.completed_at,
163 "report_path": research.report_path,
164 "metadata": metadata,
165 }
166 )
167 except Exception as e:
168 logger.exception(f"Error getting research status: {e!s}")
169 return jsonify(
170 {"status": "error", "message": "Failed to get research status"}
171 ), 500
174@api_bp.route("/terminate/<string:research_id>", methods=["POST"])
175@login_required
176def api_terminate_research(research_id):
177 """
178 Terminate a research process
179 """
180 try:
181 from flask import session
183 username = session.get("username")
184 result = cancel_research(research_id, username)
185 if result:
186 return jsonify(
187 {
188 "status": "success",
189 "message": "Research terminated",
190 "result": result,
191 }
192 )
193 else:
194 return jsonify(
195 {
196 "status": "success",
197 "message": "Research not found or already completed",
198 "result": result,
199 }
200 )
201 except Exception:
202 logger.exception("Error terminating research")
203 return (
204 jsonify({"status": "error", "message": "Failed to stop research."}),
205 500,
206 )
209@api_bp.route("/resources/<string:research_id>", methods=["GET"])
210@login_required
211def api_get_resources(research_id):
212 """
213 Get resources for a specific research
214 """
215 try:
216 resources = get_resources_for_research(research_id)
217 return jsonify({"status": "success", "resources": resources})
218 except Exception:
219 logger.exception("Error getting resources for research")
220 return jsonify(
221 {"status": "error", "message": "Failed to get resources"}, 500
222 )
225@api_bp.route("/resources/<string:research_id>", methods=["POST"])
226@login_required
227def api_add_resource(research_id):
228 """
229 Add a new resource to a research project
230 """
231 try:
232 data = request.json
234 # Required fields
235 title = data.get("title")
236 url = data.get("url")
238 # Optional fields
239 content_preview = data.get("content_preview")
240 source_type = data.get("source_type", "web")
241 metadata = data.get("metadata", {})
243 # Validate required fields
244 if not title or not url:
245 return (
246 jsonify(
247 {"status": "error", "message": "Title and URL are required"}
248 ),
249 400,
250 )
252 # Check if the research exists
253 with get_user_db_session() as db_session:
254 research = (
255 db_session.query(ResearchHistory)
256 .filter_by(id=research_id)
257 .first()
258 )
260 if not research: 260 ↛ 266line 260 didn't jump to line 266 because the condition on line 260 was always true
261 return jsonify(
262 {"status": "error", "message": "Research not found"}
263 ), 404
265 # Add the resource
266 resource_id = add_resource(
267 research_id=research_id,
268 title=title,
269 url=url,
270 content_preview=content_preview,
271 source_type=source_type,
272 metadata=metadata,
273 )
275 return jsonify(
276 {
277 "status": "success",
278 "message": "Resource added successfully",
279 "resource_id": resource_id,
280 }
281 )
282 except Exception as e:
283 logger.exception(f"Error adding resource: {e!s}")
284 return jsonify(
285 {"status": "error", "message": "Failed to add resource"}
286 ), 500
289@api_bp.route(
290 "/resources/<string:research_id>/delete/<int:resource_id>",
291 methods=["DELETE"],
292)
293@login_required
294def api_delete_resource(research_id, resource_id):
295 """
296 Delete a resource from a research project
297 """
298 try:
299 # Delete the resource
300 success = delete_resource(resource_id)
302 if success:
303 return jsonify(
304 {
305 "status": "success",
306 "message": "Resource deleted successfully",
307 }
308 )
309 else:
310 return jsonify(
311 {"status": "error", "message": "Resource not found"}
312 ), 404
313 except Exception as e:
314 logger.exception(f"Error deleting resource: {e!s}")
315 return jsonify(
316 {
317 "status": "error",
318 "message": "An internal error occurred while deleting the resource.",
319 }
320 ), 500
323@api_bp.route("/check/ollama_status", methods=["GET"])
324@login_required
325def check_ollama_status():
326 """
327 Check if Ollama API is running
328 """
329 try:
330 # Get Ollama URL from config
331 llm_config = current_app.config.get("LLM_CONFIG", {})
332 provider = llm_config.get("provider", "ollama")
334 if provider.lower() != "ollama":
335 return jsonify(
336 {
337 "running": True,
338 "message": f"Using provider: {provider}, not Ollama",
339 }
340 )
342 # Get Ollama API URL from LLM config
343 raw_ollama_base_url = llm_config.get(
344 "ollama_base_url", "http://localhost:11434"
345 )
346 ollama_base_url = (
347 normalize_url(raw_ollama_base_url)
348 if raw_ollama_base_url
349 else "http://localhost:11434"
350 )
352 logger.info(f"Checking Ollama status at: {ollama_base_url}")
354 # Check if Ollama is running
355 try:
356 response = safe_get(
357 f"{ollama_base_url}/api/tags",
358 timeout=5,
359 allow_localhost=True,
360 allow_private_ips=True,
361 )
363 # Add response details for debugging
364 logger.debug(
365 f"Ollama status check response code: {response.status_code}"
366 )
368 if response.status_code == 200: 368 ↛ 404line 368 didn't jump to line 404 because the condition on line 368 was always true
369 # Try to validate the response content
370 try:
371 data = response.json()
373 # Check the format
374 if "models" in data: 374 ↛ 381line 374 didn't jump to line 381 because the condition on line 374 was always true
375 model_count = len(data.get("models", []))
376 logger.info(
377 f"Ollama service is running with {model_count} models (new API format)"
378 )
379 else:
380 # Older API format
381 model_count = len(data)
382 logger.info(
383 f"Ollama service is running with {model_count} models (old API format)"
384 )
386 return jsonify(
387 {
388 "running": True,
389 "message": f"Ollama service is running with {model_count} models",
390 "model_count": model_count,
391 }
392 )
393 except ValueError as json_err:
394 logger.warning(f"Ollama returned invalid JSON: {json_err}")
395 # It's running but returned invalid JSON
396 return jsonify(
397 {
398 "running": True,
399 "message": "Ollama service is running but returned invalid data format",
400 "error_details": "Invalid response format from the service.",
401 }
402 )
403 else:
404 logger.warning(
405 f"Ollama returned non-200 status code: {response.status_code}"
406 )
407 return jsonify(
408 {
409 "running": False,
410 "message": f"Ollama service returned status code: {response.status_code}",
411 "status_code": response.status_code,
412 }
413 )
415 except requests.exceptions.ConnectionError as conn_err:
416 logger.warning(f"Ollama connection error: {conn_err}")
417 return jsonify(
418 {
419 "running": False,
420 "message": "Ollama service is not running or not accessible",
421 "error_type": "connection_error",
422 "error_details": "Unable to connect to the service. Please check if the service is running.",
423 }
424 )
425 except requests.exceptions.Timeout as timeout_err:
426 logger.warning(f"Ollama request timed out: {timeout_err}")
427 return jsonify(
428 {
429 "running": False,
430 "message": "Ollama service request timed out after 5 seconds",
431 "error_type": "timeout",
432 "error_details": "Request timed out. The service may be overloaded.",
433 }
434 )
436 except Exception as e:
437 logger.exception(f"Error checking Ollama status: {e!s}")
438 return jsonify(
439 {
440 "running": False,
441 "message": "An internal error occurred while checking Ollama status.",
442 "error_type": "exception",
443 "error_details": "An internal error occurred.",
444 }
445 )
448@api_bp.route("/check/ollama_model", methods=["GET"])
449@login_required
450def check_ollama_model():
451 """
452 Check if the configured Ollama model is available
453 """
454 try:
455 # Get Ollama configuration
456 llm_config = current_app.config.get("LLM_CONFIG", {})
457 provider = llm_config.get("provider", "ollama")
459 if provider.lower() != "ollama":
460 return jsonify(
461 {
462 "available": True,
463 "message": f"Using provider: {provider}, not Ollama",
464 "provider": provider,
465 }
466 )
468 # Get model name from request or use config default
469 model_name = request.args.get("model")
470 if not model_name: 470 ↛ 474line 470 didn't jump to line 474 because the condition on line 470 was always true
471 model_name = llm_config.get("model", "gemma3:12b")
473 # Log which model we're checking for debugging
474 logger.info(f"Checking availability of Ollama model: {model_name}")
476 # Get Ollama API URL from LLM config
477 raw_ollama_base_url = llm_config.get(
478 "ollama_base_url", "http://localhost:11434"
479 )
480 ollama_base_url = (
481 normalize_url(raw_ollama_base_url)
482 if raw_ollama_base_url
483 else "http://localhost:11434"
484 )
486 # Check if the model is available
487 try:
488 response = safe_get(
489 f"{ollama_base_url}/api/tags",
490 timeout=5,
491 allow_localhost=True,
492 allow_private_ips=True,
493 )
495 # Log response details for debugging
496 logger.debug(f"Ollama API response status: {response.status_code}")
498 if response.status_code != 200: 498 ↛ 499line 498 didn't jump to line 499 because the condition on line 498 was never true
499 logger.warning(
500 f"Ollama API returned non-200 status: {response.status_code}"
501 )
502 return jsonify(
503 {
504 "available": False,
505 "model": model_name,
506 "message": f"Could not access Ollama service - status code: {response.status_code}",
507 "status_code": response.status_code,
508 }
509 )
511 # Try to parse the response
512 try:
513 data = response.json()
515 # Debug log the first bit of the response
516 response_preview = (
517 str(data)[:500] + "..."
518 if len(str(data)) > 500
519 else str(data)
520 )
521 logger.debug(f"Ollama API response data: {response_preview}")
523 # Get models based on API format
524 models = []
525 if "models" in data: 525 ↛ 531line 525 didn't jump to line 531 because the condition on line 525 was always true
526 # Newer Ollama API
527 logger.debug("Using new Ollama API format (models key)")
528 models = data.get("models", [])
529 else:
530 # Older Ollama API format
531 logger.debug("Using old Ollama API format (array)")
532 models = data
534 # Log available models for debugging
535 model_names = [m.get("name", "") for m in models]
536 logger.debug(
537 f"Available Ollama models: {', '.join(model_names[:10])}"
538 + (
539 f" and {len(model_names) - 10} more"
540 if len(model_names) > 10
541 else ""
542 )
543 )
545 # Case-insensitive model name comparison
546 model_exists = any(
547 m.get("name", "").lower() == model_name.lower()
548 for m in models
549 )
551 if model_exists:
552 logger.info(f"Ollama model {model_name} is available")
553 return jsonify(
554 {
555 "available": True,
556 "model": model_name,
557 "message": f"Model {model_name} is available",
558 "all_models": model_names,
559 }
560 )
561 else:
562 # Check if models were found at all
563 if not models: 563 ↛ 564line 563 didn't jump to line 564 because the condition on line 563 was never true
564 logger.warning("No models found in Ollama")
565 message = "No models found in Ollama. Please pull models first."
566 else:
567 logger.warning(
568 f"Model {model_name} not found among {len(models)} available models"
569 )
570 # Don't expose available models for security reasons
571 message = f"Model {model_name} is not available"
573 return jsonify(
574 {
575 "available": False,
576 "model": model_name,
577 "message": message,
578 # Remove all_models to prevent information disclosure
579 }
580 )
581 except ValueError as json_err:
582 # JSON parsing error
583 logger.exception(
584 f"Failed to parse Ollama API response: {json_err}"
585 )
586 return jsonify(
587 {
588 "available": False,
589 "model": model_name,
590 "message": "Invalid response from Ollama API",
591 "error_type": "json_parse_error",
592 }
593 )
595 except requests.exceptions.ConnectionError as conn_err:
596 # Connection error
597 logger.warning(f"Connection error to Ollama API: {conn_err}")
598 return jsonify(
599 {
600 "available": False,
601 "model": model_name,
602 "message": "Could not connect to Ollama service",
603 "error_type": "connection_error",
604 "error_details": "Unable to connect to the service. Please check if the service is running.",
605 }
606 )
607 except requests.exceptions.Timeout:
608 # Timeout error
609 logger.warning("Timeout connecting to Ollama API")
610 return jsonify(
611 {
612 "available": False,
613 "model": model_name,
614 "message": "Connection to Ollama service timed out",
615 "error_type": "timeout",
616 }
617 )
619 except Exception:
620 # General exception
621 logger.exception("Error checking Ollama model")
623 return jsonify(
624 {
625 "available": False,
626 "model": (
627 model_name
628 if "model_name" in locals()
629 else llm_config.get("model", "gemma3:12b")
630 ),
631 "message": "An internal error occurred while checking the model.",
632 "error_type": "exception",
633 "error_details": "An internal error occurred.",
634 }
635 )
638# Helper route to get system configuration
639@api_bp.route("/config", methods=["GET"])
640@login_required
641def api_get_config():
642 """
643 Get public system configuration
644 """
645 # Only return public configuration
646 public_config = {
647 "version": current_app.config.get("VERSION", "0.1.0"),
648 "llm_provider": current_app.config.get("LLM_CONFIG", {}).get(
649 "provider", "ollama"
650 ),
651 "search_tool": current_app.config.get("SEARCH_CONFIG", {}).get(
652 "search_tool", "auto"
653 ),
654 "features": {
655 "notifications": current_app.config.get(
656 "ENABLE_NOTIFICATIONS", False
657 )
658 },
659 }
661 return jsonify(public_config)