Coverage for src / local_deep_research / llm / providers / implementations / ollama.py: 97%
93 statements
« prev ^ index » next coverage.py v7.13.5, created at 2026-04-14 23:55 +0000
« prev ^ index » next coverage.py v7.13.5, created at 2026-04-14 23:55 +0000
1"""Ollama LLM provider for Local Deep Research."""
3import requests
4from langchain_ollama import ChatOllama
5from loguru import logger
7from ....config.thread_settings import get_setting_from_snapshot
8from ....utilities.url_utils import normalize_url
9from ....security import safe_get
10from ..base import BaseLLMProvider
13class OllamaProvider(BaseLLMProvider):
14 """Ollama provider for Local Deep Research.
16 This is the Ollama local model provider.
17 """
19 provider_name = "Ollama"
20 default_model = "gemma3:12b"
21 api_key_setting = "llm.ollama.api_key" # Optional API key for authenticated Ollama instances
22 url_setting = "llm.ollama.url" # URL setting for model listing
24 # Metadata for auto-discovery
25 provider_key = "OLLAMA"
26 company_name = "Ollama"
27 is_cloud = False
29 @classmethod
30 def _get_auth_headers(cls, api_key=None, settings_snapshot=None):
31 """Get authentication headers for Ollama API requests.
33 Args:
34 api_key: Optional API key to use (takes precedence)
35 settings_snapshot: Optional settings snapshot to get API key from
37 Returns:
38 Dict of headers, empty if no API key configured
39 """
40 headers = {}
42 # Use provided API key or get from settings
43 if api_key is None and settings_snapshot is not None:
44 api_key = get_setting_from_snapshot(
45 cls.api_key_setting,
46 "", # Empty string instead of None to avoid NoSettingsContextError
47 settings_snapshot=settings_snapshot,
48 )
50 if api_key:
51 # Support Bearer token authentication for proxied Ollama instances
52 headers["Authorization"] = f"Bearer {api_key}"
54 return headers
56 @classmethod
57 def list_models_for_api(cls, api_key=None, base_url=None):
58 """Get available models from Ollama.
60 Args:
61 api_key: Optional API key for authentication
62 base_url: Base URL for Ollama API (required)
64 Returns:
65 List of model dictionaries with 'value' and 'label' keys
66 """
67 from ....utilities.llm_utils import fetch_ollama_models
69 if not base_url:
70 logger.warning("Ollama URL not configured")
71 return []
73 base_url = normalize_url(base_url)
75 # Get authentication headers
76 headers = cls._get_auth_headers(api_key=api_key)
78 # Fetch models using centralized function
79 models = fetch_ollama_models(
80 base_url, timeout=2.0, auth_headers=headers
81 )
83 # Add provider info and format for LLM API
84 for model in models:
85 # Clean up the model name for display
86 model_name = model["value"]
87 display_name = model_name.replace(":latest", "").replace(":", " ")
88 model["label"] = f"{display_name} (Ollama)"
89 model["provider"] = "OLLAMA"
91 logger.info(f"Found {len(models)} Ollama models")
92 return models
94 @classmethod
95 def create_llm(cls, model_name=None, temperature=0.7, **kwargs):
96 """Factory function for Ollama LLMs.
98 Args:
99 model_name: Name of the model to use
100 temperature: Model temperature (0.0-1.0)
101 **kwargs: Additional arguments including settings_snapshot
103 Returns:
104 A configured ChatOllama instance
106 Raises:
107 ValueError: If Ollama is not available
108 """
109 settings_snapshot = kwargs.get("settings_snapshot")
111 # Use default model if none specified
112 if not model_name:
113 model_name = cls.default_model
115 # Use the configurable Ollama base URL
116 raw_base_url = get_setting_from_snapshot(
117 "llm.ollama.url",
118 None,
119 settings_snapshot=settings_snapshot,
120 )
121 if not raw_base_url:
122 raise ValueError(
123 "Ollama URL not configured. Please set llm.ollama.url in settings."
124 )
125 base_url = normalize_url(raw_base_url)
127 logger.info(
128 f"Creating ChatOllama with model={model_name}, base_url={base_url}"
129 )
131 # Build Ollama parameters
132 ollama_params = {
133 "model": model_name,
134 "base_url": base_url,
135 "temperature": temperature,
136 }
138 # Add authentication headers if configured
139 headers = cls._get_auth_headers(settings_snapshot=settings_snapshot)
140 if headers: 140 ↛ 142line 140 didn't jump to line 142 because the condition on line 140 was never true
141 # ChatOllama supports auth via headers parameter
142 ollama_params["headers"] = headers
144 # Get context window size from settings for local providers
145 context_window_size = get_setting_from_snapshot(
146 "llm.local_context_window_size",
147 4096,
148 settings_snapshot=settings_snapshot,
149 )
150 if context_window_size is not None:
151 ollama_params["num_ctx"] = int(context_window_size)
153 # Add max_tokens if specified in settings and supported
154 if get_setting_from_snapshot( 154 ↛ 171line 154 didn't jump to line 171 because the condition on line 154 was always true
155 "llm.supports_max_tokens", True, settings_snapshot=settings_snapshot
156 ):
157 # Use 80% of context window to leave room for prompts
158 if context_window_size is not None:
159 max_tokens = min(
160 int(
161 get_setting_from_snapshot(
162 "llm.max_tokens",
163 100000,
164 settings_snapshot=settings_snapshot,
165 )
166 ),
167 int(context_window_size * 0.8),
168 )
169 ollama_params["max_tokens"] = max_tokens
171 llm = ChatOllama(**ollama_params)
173 # Log the actual client configuration after creation
174 logger.debug(
175 f"ChatOllama created - base_url attribute: {getattr(llm, 'base_url', 'not found')}"
176 )
178 return llm
180 @classmethod
181 def is_available(cls, settings_snapshot=None):
182 """Check if Ollama is running.
184 Args:
185 settings_snapshot: Optional settings snapshot to use
187 Returns:
188 True if Ollama is available, False otherwise
189 """
190 try:
191 raw_base_url = get_setting_from_snapshot(
192 "llm.ollama.url",
193 None,
194 settings_snapshot=settings_snapshot,
195 )
196 if not raw_base_url:
197 logger.debug("Ollama URL not configured")
198 return False
199 base_url = normalize_url(raw_base_url)
200 logger.info(f"Checking Ollama availability at {base_url}/api/tags")
202 # Get authentication headers
203 headers = cls._get_auth_headers(settings_snapshot=settings_snapshot)
205 try:
206 response = safe_get(
207 f"{base_url}/api/tags",
208 timeout=3,
209 headers=headers,
210 allow_localhost=True,
211 allow_private_ips=True,
212 )
213 if response.status_code == 200:
214 logger.info(
215 f"Ollama is available. Status code: {response.status_code}"
216 )
217 # Log first 100 chars of response to debug
218 logger.info(f"Response preview: {str(response.text)[:100]}")
219 return True
220 logger.warning(
221 f"Ollama API returned status code: {response.status_code}"
222 )
223 return False
224 except requests.exceptions.RequestException:
225 logger.warning("Request error when checking Ollama")
226 return False
227 except Exception:
228 logger.warning("Unexpected error when checking Ollama")
229 return False
230 except Exception:
231 logger.warning("Error in OllamaProvider.is_available")
232 return False
234 @classmethod
235 def requires_auth_for_models(cls):
236 """Ollama is local and does not need auth to list models."""
237 return False