Coverage for src / local_deep_research / llm / providers / openai_base.py: 83%
121 statements
« prev ^ index » next coverage.py v7.12.0, created at 2026-01-11 00:51 +0000
« prev ^ index » next coverage.py v7.12.0, created at 2026-01-11 00:51 +0000
1"""Base OpenAI-compatible endpoint provider for Local Deep Research."""
3from langchain_openai import ChatOpenAI
4from loguru import logger
6from ...config.thread_settings import (
7 get_setting_from_snapshot as _get_setting_from_snapshot,
8 NoSettingsContextError,
9)
10from ...utilities.url_utils import normalize_url
13def get_setting_from_snapshot(
14 key, default=None, username=None, settings_snapshot=None
15):
16 """Get setting from context only - no database access from threads.
18 This is a wrapper around the shared function that enables fallback LLM check.
19 """
20 return _get_setting_from_snapshot(
21 key, default, username, settings_snapshot, check_fallback_llm=True
22 )
25class OpenAICompatibleProvider:
26 """Base class for OpenAI-compatible API providers.
28 This class provides a common implementation for any service that offers
29 an OpenAI-compatible API endpoint (Google, OpenRouter, Groq, Together, etc.)
30 """
32 # Override these in subclasses
33 provider_name = "openai_endpoint" # Name used in logs
34 api_key_setting = "llm.openai_endpoint.api_key" # Settings key for API key
35 url_setting = None # Settings key for URL (e.g., "llm.lmstudio.url")
36 default_base_url = "https://api.openai.com/v1" # Default endpoint URL
37 default_model = "gpt-3.5-turbo" # Default model if none specified
39 @classmethod
40 def create_llm(cls, model_name=None, temperature=0.7, **kwargs):
41 """Factory function for OpenAI-compatible LLMs.
43 Args:
44 model_name: Name of the model to use
45 temperature: Model temperature (0.0-1.0)
46 **kwargs: Additional arguments including settings_snapshot
48 Returns:
49 A configured ChatOpenAI instance
51 Raises:
52 ValueError: If API key is not configured
53 """
54 settings_snapshot = kwargs.get("settings_snapshot")
56 # Get API key from settings (if provider requires one)
57 if cls.api_key_setting: 57 ↛ 74line 57 didn't jump to line 74 because the condition on line 57 was always true
58 api_key = get_setting_from_snapshot(
59 cls.api_key_setting,
60 default=None,
61 settings_snapshot=settings_snapshot,
62 )
64 if not api_key:
65 logger.error(
66 f"{cls.provider_name} API key not found in settings"
67 )
68 raise ValueError(
69 f"{cls.provider_name} API key not configured. "
70 f"Please set {cls.api_key_setting} in settings."
71 )
72 else:
73 # Provider doesn't require API key (e.g., LM Studio)
74 api_key = kwargs.get("api_key", "dummy-key")
76 # Use default model if none specified
77 if not model_name:
78 model_name = cls.default_model
80 # Get endpoint URL (can be overridden in kwargs for flexibility)
81 base_url = kwargs.get("base_url", cls.default_base_url)
82 base_url = normalize_url(base_url) if base_url else cls.default_base_url
84 # Build parameters for OpenAI client
85 llm_params = {
86 "model": model_name,
87 "api_key": api_key,
88 "base_url": base_url,
89 "temperature": temperature,
90 }
92 # Add max_tokens if specified in settings
93 try:
94 max_tokens = get_setting_from_snapshot(
95 "llm.max_tokens",
96 default=None,
97 settings_snapshot=settings_snapshot,
98 )
99 if max_tokens:
100 llm_params["max_tokens"] = int(max_tokens)
101 except NoSettingsContextError:
102 pass # Optional parameter
104 # Add streaming if specified
105 try:
106 streaming = get_setting_from_snapshot(
107 "llm.streaming",
108 default=None,
109 settings_snapshot=settings_snapshot,
110 )
111 if streaming is not None:
112 llm_params["streaming"] = streaming
113 except NoSettingsContextError:
114 pass # Optional parameter
116 # Add max_retries if specified
117 try:
118 max_retries = get_setting_from_snapshot(
119 "llm.max_retries",
120 default=None,
121 settings_snapshot=settings_snapshot,
122 )
123 if max_retries is not None:
124 llm_params["max_retries"] = max_retries
125 except NoSettingsContextError:
126 pass # Optional parameter
128 # Add request_timeout if specified
129 try:
130 request_timeout = get_setting_from_snapshot(
131 "llm.request_timeout",
132 default=None,
133 settings_snapshot=settings_snapshot,
134 )
135 if request_timeout is not None:
136 llm_params["request_timeout"] = request_timeout
137 except NoSettingsContextError:
138 pass # Optional parameter
140 logger.info(
141 f"Creating {cls.provider_name} LLM with model: {model_name}, "
142 f"temperature: {temperature}, endpoint: {base_url}"
143 )
145 return ChatOpenAI(**llm_params)
147 @classmethod
148 def _create_llm_instance(cls, model_name=None, temperature=0.7, **kwargs):
149 """Internal method to create LLM instance with provided parameters.
151 This bypasses API key checking for providers that handle auth differently.
152 """
153 settings_snapshot = kwargs.get("settings_snapshot")
155 # Use default model if none specified
156 if not model_name:
157 model_name = cls.default_model
159 # Get endpoint URL (can be overridden in kwargs for flexibility)
160 base_url = kwargs.get("base_url", cls.default_base_url)
161 base_url = normalize_url(base_url) if base_url else cls.default_base_url
163 # Get API key from kwargs (caller is responsible for providing it)
164 api_key = kwargs.get("api_key", "dummy-key")
166 # Build parameters for OpenAI client
167 llm_params = {
168 "model": model_name,
169 "api_key": api_key,
170 "base_url": base_url,
171 "temperature": temperature,
172 }
174 # Add optional parameters (same as in create_llm)
175 try:
176 max_tokens = get_setting_from_snapshot(
177 "llm.max_tokens",
178 default=None,
179 settings_snapshot=settings_snapshot,
180 )
181 if max_tokens:
182 llm_params["max_tokens"] = int(max_tokens)
183 except NoSettingsContextError:
184 pass
186 return ChatOpenAI(**llm_params)
188 @classmethod
189 def is_available(cls, settings_snapshot=None):
190 """Check if this provider is available.
192 Args:
193 settings_snapshot: Optional settings snapshot to use
195 Returns:
196 True if API key is configured (or not needed), False otherwise
197 """
198 try:
199 # If provider doesn't require API key, it's available
200 if not cls.api_key_setting:
201 return True
203 # Check if API key is configured
204 api_key = get_setting_from_snapshot(
205 cls.api_key_setting,
206 default=None,
207 settings_snapshot=settings_snapshot,
208 )
209 return bool(api_key)
210 except Exception:
211 return False
213 @classmethod
214 def requires_auth_for_models(cls):
215 """Check if this provider requires authentication for listing models.
217 Override in subclasses that don't require auth.
219 Returns:
220 True if authentication is required, False otherwise
221 """
222 return True
224 @classmethod
225 def _get_base_url_for_models(cls, settings_snapshot=None):
226 """Get the base URL to use for listing models.
228 Reads from url_setting if defined, otherwise uses default_base_url.
230 Args:
231 settings_snapshot: Optional settings snapshot dict
233 Returns:
234 The base URL string to use for model listing
235 """
236 if cls.url_setting: 236 ↛ 239line 236 didn't jump to line 239 because the condition on line 236 was never true
237 # Use get_setting_from_snapshot which handles both settings_snapshot
238 # and thread-local context, with proper fallback
239 url = get_setting_from_snapshot(
240 cls.url_setting,
241 default=None,
242 settings_snapshot=settings_snapshot,
243 )
244 if url:
245 return url.rstrip("/")
247 return cls.default_base_url
249 @classmethod
250 def list_models_for_api(cls, api_key=None, base_url=None):
251 """List available models for API endpoint use.
253 This method is designed to be called from Flask routes.
255 Args:
256 api_key: Optional API key (if None and required, returns empty list)
257 base_url: Optional base URL to use (if None, uses cls.default_base_url)
259 Returns:
260 List of model dictionaries with 'value' and 'label' keys
261 """
262 try:
263 # Check if auth is required
264 if cls.requires_auth_for_models():
265 if not api_key:
266 logger.debug(
267 f"{cls.provider_name} requires API key for model listing"
268 )
269 return []
270 else:
271 # Use a dummy key for providers that don't require auth
272 api_key = api_key or "dummy-key-for-models-list"
274 from openai import OpenAI
276 # Use provided base_url or fall back to class default
277 if not base_url:
278 base_url = cls.default_base_url
280 # Create OpenAI client (uses library defaults for timeout)
281 client = OpenAI(api_key=api_key, base_url=base_url)
283 # Fetch models
284 logger.debug(
285 f"Fetching models from {cls.provider_name} at {base_url}"
286 )
287 models_response = client.models.list()
289 models = []
290 for model in models_response.data:
291 if model.id: 291 ↛ 290line 291 didn't jump to line 290 because the condition on line 291 was always true
292 models.append(
293 {
294 "value": model.id,
295 "label": model.id,
296 }
297 )
299 logger.info(f"Found {len(models)} models from {cls.provider_name}")
300 return models
302 except Exception as e:
303 # Use warning level since connection failures are expected
304 # when the provider is not running (e.g., LM Studio not started)
305 logger.warning(
306 f"Could not list models from {cls.provider_name}: {e}"
307 )
308 return []
310 @classmethod
311 def list_models(cls, settings_snapshot=None):
312 """List available models from this provider.
314 Args:
315 settings_snapshot: Optional settings snapshot to use
317 Returns:
318 List of model dictionaries with 'value' and 'label' keys
319 """
320 try:
321 # Get API key from settings if auth is required
322 api_key = None
323 if cls.requires_auth_for_models(): 323 ↛ 331line 323 didn't jump to line 331 because the condition on line 323 was always true
324 api_key = get_setting_from_snapshot(
325 cls.api_key_setting,
326 default=None,
327 settings_snapshot=settings_snapshot,
328 )
330 # Get base URL from settings if provider has configurable URL
331 base_url = cls._get_base_url_for_models(settings_snapshot)
333 return cls.list_models_for_api(api_key, base_url)
335 except Exception as e:
336 logger.exception(
337 f"Error listing models from {cls.provider_name}: {e}"
338 )
339 return []