Coverage for src / local_deep_research / web / routes / api_routes.py: 97%
205 statements
« prev ^ index » next coverage.py v7.13.5, created at 2026-04-14 23:55 +0000
« prev ^ index » next coverage.py v7.13.5, created at 2026-04-14 23:55 +0000
1import requests
2from flask import (
3 Blueprint,
4 current_app,
5 jsonify,
6 request,
7)
8from loguru import logger
10from ...database.models import QueuedResearch, ResearchHistory
11from ...database.session_context import get_user_db_session
12from ...config.constants import DEFAULT_OLLAMA_URL
13from ...constants import ResearchStatus
14from ...utilities.url_utils import normalize_url
15from ...security.decorators import require_json_body
16from ..auth.decorators import login_required
17from ..services.research_service import (
18 cancel_research,
19)
20from ..services.resource_service import (
21 add_resource,
22 delete_resource,
23 get_resources_for_research,
24)
25from local_deep_research.settings import SettingsManager
26from ...llm.providers.base import normalize_provider
27from ...security import safe_get, strip_settings_snapshot
29# Create blueprint
30api_bp = Blueprint("api", __name__)
32# NOTE: Routes use session["username"] (not .get()) intentionally.
33# @login_required guarantees the key exists; direct access fails fast
34# if the decorator is ever removed.
37@api_bp.route("/settings/current-config", methods=["GET"])
38@login_required
39def get_current_config():
40 """Get the current configuration from database settings."""
41 try:
42 with get_user_db_session() as session:
43 settings_manager = SettingsManager(session)
44 config = {
45 "provider": settings_manager.get_setting(
46 "llm.provider", "Not configured"
47 ),
48 "model": settings_manager.get_setting(
49 "llm.model", "Not configured"
50 ),
51 "search_tool": settings_manager.get_setting(
52 "search.tool", "searxng"
53 ),
54 "iterations": settings_manager.get_setting(
55 "search.iterations", 8
56 ),
57 "questions_per_iteration": settings_manager.get_setting(
58 "search.questions_per_iteration", 5
59 ),
60 "search_strategy": settings_manager.get_setting(
61 "search.search_strategy", "focused_iteration"
62 ),
63 }
65 return jsonify({"success": True, "config": config})
67 except Exception:
68 logger.exception("Error getting current config")
69 return jsonify(
70 {"success": False, "error": "An internal error occurred"}
71 ), 500
74# API Routes
75@api_bp.route("/start", methods=["POST"])
76@login_required
77def api_start_research():
78 """
79 Start a new research process.
81 Delegates to the full-featured start_research() in research_routes,
82 which reads settings from the database, handles queueing, and starts
83 the research thread.
84 """
85 from ..routes.research_routes import start_research
87 return start_research()
90@api_bp.route("/status/<string:research_id>", methods=["GET"])
91@login_required
92def api_research_status(research_id):
93 """
94 Get the status of a research process
95 """
96 try:
97 # Get a fresh session to avoid conflicts with the research process
99 with get_user_db_session() as db_session:
100 research = (
101 db_session.query(ResearchHistory)
102 .filter_by(id=research_id)
103 .first()
104 )
106 if research is None:
107 return jsonify({"error": "Research not found"}), 404
109 # Extract attributes while session is active
110 # to avoid DetachedInstanceError after the with block exits
111 result = {
112 "status": research.status,
113 "progress": research.progress,
114 "completed_at": research.completed_at,
115 "report_path": research.report_path,
116 "metadata": strip_settings_snapshot(research.research_meta),
117 }
119 # Include queue position for queued research
120 if research.status == ResearchStatus.QUEUED: 120 ↛ 121line 120 didn't jump to line 121 because the condition on line 120 was never true
121 queued = (
122 db_session.query(QueuedResearch)
123 .filter_by(research_id=research_id)
124 .first()
125 )
126 if queued:
127 result["queue_position"] = queued.position
129 return jsonify(result)
130 except Exception:
131 logger.exception("Error getting research status")
132 return jsonify(
133 {"status": "error", "message": "Failed to get research status"}
134 ), 500
137@api_bp.route("/terminate/<string:research_id>", methods=["POST"])
138@login_required
139def api_terminate_research(research_id):
140 """
141 Terminate a research process
142 """
143 try:
144 from flask import session
146 username = session["username"]
147 result = cancel_research(research_id, username)
148 if result:
149 return jsonify(
150 {
151 "status": "success",
152 "message": "Research terminated",
153 "result": result,
154 }
155 )
156 return jsonify(
157 {
158 "status": "success",
159 "message": "Research not found or already completed",
160 "result": result,
161 }
162 )
163 except Exception:
164 logger.exception("Error terminating research")
165 return (
166 jsonify({"status": "error", "message": "Failed to stop research."}),
167 500,
168 )
171@api_bp.route("/resources/<string:research_id>", methods=["GET"])
172@login_required
173def api_get_resources(research_id):
174 """
175 Get resources for a specific research
176 """
177 try:
178 resources = get_resources_for_research(research_id)
179 return jsonify({"status": "success", "resources": resources})
180 except Exception:
181 logger.exception("Error getting resources for research")
182 return jsonify(
183 {"status": "error", "message": "Failed to get resources"}
184 ), 500
187@api_bp.route("/resources/<string:research_id>", methods=["POST"])
188@login_required
189@require_json_body(error_format="status")
190def api_add_resource(research_id):
191 """
192 Add a new resource to a research project
193 """
194 try:
195 data = request.json
196 # Required fields
197 title = data.get("title")
198 url = data.get("url")
200 # Optional fields
201 content_preview = data.get("content_preview")
202 source_type = data.get("source_type", "web")
203 metadata = data.get("metadata", {})
205 # Validate required fields
206 if not title or not url:
207 return (
208 jsonify(
209 {"status": "error", "message": "Title and URL are required"}
210 ),
211 400,
212 )
214 # Security: Validate URL to prevent SSRF attacks
215 from ...security.ssrf_validator import validate_url
217 is_valid = validate_url(url)
218 if not is_valid:
219 logger.warning(f"SSRF protection: Rejected URL {url}")
220 return (
221 jsonify({"status": "error", "message": "Invalid URL"}),
222 400,
223 )
225 # Check if the research exists
226 with get_user_db_session() as db_session:
227 research = (
228 db_session.query(ResearchHistory)
229 .filter_by(id=research_id)
230 .first()
231 )
233 if not research:
234 return jsonify(
235 {"status": "error", "message": "Research not found"}
236 ), 404
238 # Add the resource
239 resource_id = add_resource(
240 research_id=research_id,
241 title=title,
242 url=url,
243 content_preview=content_preview,
244 source_type=source_type,
245 metadata=metadata,
246 )
248 return jsonify(
249 {
250 "status": "success",
251 "message": "Resource added successfully",
252 "resource_id": resource_id,
253 }
254 )
255 except Exception:
256 logger.exception("Error adding resource")
257 return jsonify(
258 {"status": "error", "message": "Failed to add resource"}
259 ), 500
262@api_bp.route(
263 "/resources/<string:research_id>/delete/<int:resource_id>",
264 methods=["DELETE"],
265)
266@login_required
267def api_delete_resource(research_id, resource_id):
268 """
269 Delete a resource from a research project
270 """
271 try:
272 # Delete the resource
273 success = delete_resource(resource_id)
275 if success:
276 return jsonify(
277 {
278 "status": "success",
279 "message": "Resource deleted successfully",
280 }
281 )
282 return jsonify(
283 {"status": "error", "message": "Resource not found"}
284 ), 404
285 except Exception:
286 logger.exception("Error deleting resource")
287 return jsonify(
288 {
289 "status": "error",
290 "message": "An internal error occurred while deleting the resource.",
291 }
292 ), 500
295@api_bp.route("/check/ollama_status", methods=["GET"])
296@login_required
297def check_ollama_status():
298 """
299 Check if Ollama API is running
300 """
301 try:
302 # Get Ollama URL from config
303 llm_config = current_app.config.get("LLM_CONFIG", {})
304 provider = normalize_provider(llm_config.get("provider", "ollama"))
306 if provider != "ollama":
307 return jsonify(
308 {
309 "running": True,
310 "message": f"Using provider: {provider}, not Ollama",
311 }
312 )
314 # Get Ollama API URL from LLM config
315 raw_ollama_base_url = llm_config.get(
316 "ollama_base_url", DEFAULT_OLLAMA_URL
317 )
318 ollama_base_url = (
319 normalize_url(raw_ollama_base_url)
320 if raw_ollama_base_url
321 else DEFAULT_OLLAMA_URL
322 )
324 logger.info(f"Checking Ollama status at: {ollama_base_url}")
326 # Check if Ollama is running
327 try:
328 response = safe_get(
329 f"{ollama_base_url}/api/tags",
330 timeout=5,
331 allow_localhost=True,
332 allow_private_ips=True,
333 )
335 # Add response details for debugging
336 logger.debug(
337 f"Ollama status check response code: {response.status_code}"
338 )
340 if response.status_code == 200:
341 # Try to validate the response content
342 try:
343 data = response.json()
345 # Check the format
346 if "models" in data:
347 model_count = len(data.get("models", []))
348 logger.info(
349 f"Ollama service is running with {model_count} models (new API format)"
350 )
351 else:
352 # Older API format
353 model_count = len(data)
354 logger.info(
355 f"Ollama service is running with {model_count} models (old API format)"
356 )
358 return jsonify(
359 {
360 "running": True,
361 "message": f"Ollama service is running with {model_count} models",
362 "model_count": model_count,
363 }
364 )
365 except ValueError:
366 logger.warning("Ollama returned invalid JSON")
367 # It's running but returned invalid JSON
368 return jsonify(
369 {
370 "running": True,
371 "message": "Ollama service is running but returned invalid data format",
372 "error_details": "Invalid response format from the service.",
373 }
374 )
375 else:
376 logger.warning(
377 f"Ollama returned non-200 status code: {response.status_code}"
378 )
379 return jsonify(
380 {
381 "running": False,
382 "message": f"Ollama service returned status code: {response.status_code}",
383 "status_code": response.status_code,
384 }
385 )
387 except requests.exceptions.ConnectionError:
388 logger.warning("Ollama connection error")
389 return jsonify(
390 {
391 "running": False,
392 "message": "Ollama service is not running or not accessible",
393 "error_type": "connection_error",
394 "error_details": "Unable to connect to the service. Please check if the service is running.",
395 }
396 )
397 except requests.exceptions.Timeout:
398 logger.warning("Ollama request timed out")
399 return jsonify(
400 {
401 "running": False,
402 "message": "Ollama service request timed out after 5 seconds",
403 "error_type": "timeout",
404 "error_details": "Request timed out. The service may be overloaded.",
405 }
406 )
408 except Exception:
409 logger.exception("Error checking Ollama status")
410 return jsonify(
411 {
412 "running": False,
413 "message": "An internal error occurred while checking Ollama status.",
414 "error_type": "exception",
415 "error_details": "An internal error occurred.",
416 }
417 )
420@api_bp.route("/check/ollama_model", methods=["GET"])
421@login_required
422def check_ollama_model():
423 """
424 Check if the configured Ollama model is available
425 """
426 try:
427 # Get Ollama configuration
428 llm_config = current_app.config.get("LLM_CONFIG", {})
429 provider = normalize_provider(llm_config.get("provider", "ollama"))
431 if provider != "ollama":
432 return jsonify(
433 {
434 "available": True,
435 "message": f"Using provider: {provider}, not Ollama",
436 "provider": provider,
437 }
438 )
440 # Get model name from request or use config default
441 model_name = request.args.get("model")
442 if not model_name:
443 model_name = llm_config.get("model", "gemma3:12b")
445 # Log which model we're checking for debugging
446 logger.info(f"Checking availability of Ollama model: {model_name}")
448 # Get Ollama API URL from LLM config
449 raw_ollama_base_url = llm_config.get(
450 "ollama_base_url", DEFAULT_OLLAMA_URL
451 )
452 ollama_base_url = (
453 normalize_url(raw_ollama_base_url)
454 if raw_ollama_base_url
455 else DEFAULT_OLLAMA_URL
456 )
458 # Check if the model is available
459 try:
460 response = safe_get(
461 f"{ollama_base_url}/api/tags",
462 timeout=5,
463 allow_localhost=True,
464 allow_private_ips=True,
465 )
467 # Log response details for debugging
468 logger.debug(f"Ollama API response status: {response.status_code}")
470 if response.status_code != 200:
471 logger.warning(
472 f"Ollama API returned non-200 status: {response.status_code}"
473 )
474 return jsonify(
475 {
476 "available": False,
477 "model": model_name,
478 "message": f"Could not access Ollama service - status code: {response.status_code}",
479 "status_code": response.status_code,
480 }
481 )
483 # Try to parse the response
484 try:
485 data = response.json()
487 # Debug log the first bit of the response
488 response_preview = (
489 str(data)[:500] + "..."
490 if len(str(data)) > 500
491 else str(data)
492 )
493 logger.debug(f"Ollama API response data: {response_preview}")
495 # Get models based on API format
496 models = []
497 if "models" in data:
498 # Newer Ollama API
499 logger.debug("Using new Ollama API format (models key)")
500 models = data.get("models", [])
501 else:
502 # Older Ollama API format
503 logger.debug("Using old Ollama API format (array)")
504 models = data
506 # Log available models for debugging
507 model_names = [m.get("name", "") for m in models]
508 logger.debug(
509 f"Available Ollama models: {', '.join(model_names[:10])}"
510 + (
511 f" and {len(model_names) - 10} more"
512 if len(model_names) > 10
513 else ""
514 )
515 )
517 # Case-insensitive model name comparison
518 model_exists = any(
519 m.get("name", "").lower() == model_name.lower()
520 for m in models
521 )
523 if model_exists:
524 logger.info(f"Ollama model {model_name} is available")
525 return jsonify(
526 {
527 "available": True,
528 "model": model_name,
529 "message": f"Model {model_name} is available",
530 "all_models": model_names,
531 }
532 )
533 # Check if models were found at all
534 if not models:
535 logger.warning("No models found in Ollama")
536 message = (
537 "No models found in Ollama. Please pull models first."
538 )
539 else:
540 logger.warning(
541 f"Model {model_name} not found among {len(models)} available models"
542 )
543 # Don't expose available models for security reasons
544 message = f"Model {model_name} is not available"
546 return jsonify(
547 {
548 "available": False,
549 "model": model_name,
550 "message": message,
551 # Remove all_models to prevent information disclosure
552 }
553 )
554 except ValueError as json_err:
555 # JSON parsing error
556 logger.exception(
557 f"Failed to parse Ollama API response: {json_err}"
558 )
559 return jsonify(
560 {
561 "available": False,
562 "model": model_name,
563 "message": "Invalid response from Ollama API",
564 "error_type": "json_parse_error",
565 }
566 )
568 except requests.exceptions.ConnectionError:
569 # Connection error
570 logger.warning("Connection error to Ollama API")
571 return jsonify(
572 {
573 "available": False,
574 "model": model_name,
575 "message": "Could not connect to Ollama service",
576 "error_type": "connection_error",
577 "error_details": "Unable to connect to the service. Please check if the service is running.",
578 }
579 )
580 except requests.exceptions.Timeout:
581 # Timeout error
582 logger.warning("Timeout connecting to Ollama API")
583 return jsonify(
584 {
585 "available": False,
586 "model": model_name,
587 "message": "Connection to Ollama service timed out",
588 "error_type": "timeout",
589 }
590 )
592 except Exception:
593 # General exception
594 logger.exception("Error checking Ollama model")
596 return jsonify(
597 {
598 "available": False,
599 "model": (
600 model_name
601 if "model_name" in locals()
602 else llm_config.get("model", "gemma3:12b")
603 ),
604 "message": "An internal error occurred while checking the model.",
605 "error_type": "exception",
606 "error_details": "An internal error occurred.",
607 }
608 )
611# Helper route to get system configuration
612@api_bp.route("/config", methods=["GET"])
613@login_required
614def api_get_config():
615 """
616 Get public system configuration
617 """
618 # Only return public configuration
619 public_config = {
620 "version": current_app.config.get("VERSION", "0.1.0"),
621 "llm_provider": current_app.config.get("LLM_CONFIG", {}).get(
622 "provider", "ollama"
623 ),
624 "search_tool": current_app.config.get("SEARCH_CONFIG", {}).get(
625 "search_tool", "auto"
626 ),
627 "features": {
628 "notifications": current_app.config.get(
629 "ENABLE_NOTIFICATIONS", False
630 )
631 },
632 }
634 return jsonify(public_config)