Add YAML config support and Compose deployment example
This commit is contained in:
@@ -2,26 +2,66 @@ from __future__ import annotations
|
||||
|
||||
import os
|
||||
from functools import lru_cache
|
||||
from typing import Literal
|
||||
from pathlib import Path
|
||||
from typing import Any, Literal
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
import yaml
|
||||
from pydantic import BaseModel
|
||||
|
||||
Provider = Literal["openai", "anthropic"]
|
||||
DEFAULT_CONFIG_PATHS = ["config.yml", "config.yaml", "/config/config.yml", "/config/config.yaml"]
|
||||
|
||||
|
||||
class LLMSettings(BaseModel):
|
||||
provider: Provider = Field(default=os.getenv("LLM_PROVIDER", "openai"))
|
||||
api_key: str = Field(default=os.getenv("LLM_API_KEY", "none"))
|
||||
model: str = Field(default=os.getenv("LLM_MODEL", "qwen2.5-7b-instruct.q4_k_m"))
|
||||
base_url: str = Field(default=os.getenv("LLM_BASE_URL", "http://ollama.internal.henryhosted.com:9292/v1"))
|
||||
temperature: float = Field(default=float(os.getenv("LLM_TEMPERATURE", "0.1")))
|
||||
timeout_seconds: float = Field(default=float(os.getenv("LLM_TIMEOUT_SECONDS", "60")))
|
||||
max_retries: int = Field(default=int(os.getenv("LLM_MAX_RETRIES", "3")))
|
||||
provider: Provider = "openai"
|
||||
api_key: str = "none"
|
||||
model: str = "qwen2.5-7b-instruct.q4_k_m"
|
||||
base_url: str = "http://ollama.internal.henryhosted.com:9292/v1"
|
||||
temperature: float = 0.1
|
||||
timeout_seconds: float = 60
|
||||
max_retries: int = 3
|
||||
|
||||
|
||||
def _load_yaml_config() -> dict[str, Any]:
|
||||
explicit = os.getenv("EMAIL_CLASSIFIER_CONFIG") or os.getenv("APP_CONFIG_FILE")
|
||||
candidates = [explicit] if explicit else DEFAULT_CONFIG_PATHS
|
||||
for candidate in candidates:
|
||||
if not candidate:
|
||||
continue
|
||||
path = Path(candidate)
|
||||
if not path.exists() or not path.is_file():
|
||||
continue
|
||||
data = yaml.safe_load(path.read_text()) or {}
|
||||
if not isinstance(data, dict):
|
||||
raise ValueError(f"Config file must contain a mapping/object: {path}")
|
||||
llm = data.get("llm", data)
|
||||
if not isinstance(llm, dict):
|
||||
raise ValueError(f"LLM config must be a mapping/object: {path}")
|
||||
return llm
|
||||
return {}
|
||||
|
||||
|
||||
def _env_or_yaml(env_name: str, yaml_data: dict[str, Any], yaml_key: str, default: Any) -> Any:
|
||||
value = os.getenv(env_name)
|
||||
if value is not None:
|
||||
return value
|
||||
if yaml_key in yaml_data and yaml_data[yaml_key] is not None:
|
||||
return yaml_data[yaml_key]
|
||||
return default
|
||||
|
||||
|
||||
@lru_cache(maxsize=1)
|
||||
def get_settings() -> LLMSettings:
|
||||
return LLMSettings()
|
||||
yaml_data = _load_yaml_config()
|
||||
return LLMSettings(
|
||||
provider=_env_or_yaml("LLM_PROVIDER", yaml_data, "provider", "openai"),
|
||||
api_key=_env_or_yaml("LLM_API_KEY", yaml_data, "api_key", "none"),
|
||||
model=_env_or_yaml("LLM_MODEL", yaml_data, "model", "qwen2.5-7b-instruct.q4_k_m"),
|
||||
base_url=_env_or_yaml("LLM_BASE_URL", yaml_data, "base_url", "http://ollama.internal.henryhosted.com:9292/v1"),
|
||||
temperature=float(_env_or_yaml("LLM_TEMPERATURE", yaml_data, "temperature", 0.1)),
|
||||
timeout_seconds=float(_env_or_yaml("LLM_TIMEOUT_SECONDS", yaml_data, "timeout_seconds", 60)),
|
||||
max_retries=int(_env_or_yaml("LLM_MAX_RETRIES", yaml_data, "max_retries", 3)),
|
||||
)
|
||||
|
||||
|
||||
def get_request_settings(
|
||||
|
||||
Reference in New Issue
Block a user