|
1 | | -""" Constants for llmvision component""" |
| 1 | +"""Constants for llmvision component""" |
2 | 2 |
|
3 | 3 | # Global constants |
4 | 4 | DOMAIN = "llmvision" |
5 | 5 |
|
6 | 6 | # CONFIGURABLE VARIABLES FOR SETUP |
7 | | -CONF_PROVIDER = 'provider' |
8 | | -CONF_API_KEY = 'api_key' |
9 | | -CONF_IP_ADDRESS= 'ip_address' |
10 | | -CONF_PORT = 'port' |
11 | | -CONF_HTTPS = 'https' |
12 | | -CONF_DEFAULT_MODEL = 'default_model' |
13 | | -CONF_TEMPERATURE = 'temperature' |
14 | | -CONF_TOP_P = 'top_p' |
15 | | -CONF_CONTEXT_WINDOW = 'context_window' #(ollama: num_ctx) |
16 | | -CONF_KEEP_ALIVE = 'keep_alive' |
| 7 | +CONF_PROVIDER = "provider" |
| 8 | +CONF_API_KEY = "api_key" |
| 9 | +CONF_IP_ADDRESS = "ip_address" |
| 10 | +CONF_PORT = "port" |
| 11 | +CONF_HTTPS = "https" |
| 12 | +CONF_DEFAULT_MODEL = "default_model" |
| 13 | +CONF_TEMPERATURE = "temperature" |
| 14 | +CONF_TOP_P = "top_p" |
| 15 | +CONF_CONTEXT_WINDOW = "context_window" # (ollama: num_ctx) |
| 16 | +CONF_KEEP_ALIVE = "keep_alive" |
17 | 17 |
|
18 | 18 | # Azure specific |
19 | | -CONF_AZURE_BASE_URL = 'azure_base_url' |
20 | | -CONF_AZURE_DEPLOYMENT = 'azure_deployment' |
21 | | -CONF_AZURE_VERSION = 'azure_version' |
| 19 | +CONF_AZURE_BASE_URL = "azure_base_url" |
| 20 | +CONF_AZURE_DEPLOYMENT = "azure_deployment" |
| 21 | +CONF_AZURE_VERSION = "azure_version" |
22 | 22 |
|
23 | 23 | # AWS specific |
24 | | -CONF_AWS_ACCESS_KEY_ID = 'aws_access_key_id' |
25 | | -CONF_AWS_SECRET_ACCESS_KEY = 'aws_secret_access_key' |
26 | | -CONF_AWS_REGION_NAME = 'aws_region_name' |
| 24 | +CONF_AWS_ACCESS_KEY_ID = "aws_access_key_id" |
| 25 | +CONF_AWS_SECRET_ACCESS_KEY = "aws_secret_access_key" |
| 26 | +CONF_AWS_REGION_NAME = "aws_region_name" |
27 | 27 |
|
28 | 28 | # Custom OpenAI specific |
29 | | -CONF_CUSTOM_OPENAI_ENDPOINT = 'custom_openai_endpoint' |
| 29 | +CONF_CUSTOM_OPENAI_ENDPOINT = "custom_openai_endpoint" |
30 | 30 |
|
31 | 31 | # Timeline |
32 | | -CONF_RETENTION_TIME = 'retention_time' |
| 32 | +CONF_RETENTION_TIME = "retention_time" |
33 | 33 |
|
34 | 34 | # Settings |
35 | | -CONF_FALLBACK_PROVIDER = 'fallback_provider' |
36 | | -CONF_TIMELINE_TODAY_SUMMARY = 'timeline_today_summary' |
37 | | -CONF_TIMELINE_SUMMARY_PROMPT = 'timeline_summary_prompt' |
38 | | -CONF_MEMORY_PATHS = 'memory_paths' |
39 | | -CONG_MEMORY_IMAGES_ENCODED = 'memory_images_encoded' |
40 | | -CONF_MEMORY_STRINGS = 'memory_strings' |
41 | | -CONF_SYSTEM_PROMPT = 'system_prompt' |
42 | | -CONF_TITLE_PROMPT = 'title_prompt' |
43 | | -CONF_MEMORY_PATHS = 'memory_paths' |
44 | | -CONF_MEMORY_IMAGES_ENCODED = 'memory_images_encoded' |
45 | | -CONF_MEMORY_STRINGS = 'memory_strings' |
| 35 | +CONF_FALLBACK_PROVIDER = "fallback_provider" |
| 36 | +CONF_TIMELINE_TODAY_SUMMARY = "timeline_today_summary" |
| 37 | +CONF_TIMELINE_SUMMARY_PROMPT = "timeline_summary_prompt" |
| 38 | +CONF_MEMORY_PATHS = "memory_paths" |
| 39 | +CONG_MEMORY_IMAGES_ENCODED = "memory_images_encoded" |
| 40 | +CONF_MEMORY_STRINGS = "memory_strings" |
| 41 | +CONF_SYSTEM_PROMPT = "system_prompt" |
| 42 | +CONF_TITLE_PROMPT = "title_prompt" |
| 43 | +CONF_MEMORY_PATHS = "memory_paths" |
| 44 | +CONF_MEMORY_IMAGES_ENCODED = "memory_images_encoded" |
| 45 | +CONF_MEMORY_STRINGS = "memory_strings" |
46 | 46 |
|
47 | 47 |
|
48 | 48 | # SERVICE CALL CONSTANTS |
49 | | -MESSAGE = 'message' |
50 | | -REMEMBER = 'remember' |
51 | | -USE_MEMORY = 'use_memory' |
52 | | -PROVIDER = 'provider' |
53 | | -MAXTOKENS = 'max_tokens' |
54 | | -TARGET_WIDTH = 'target_width' |
55 | | -MODEL = 'model' |
56 | | -IMAGE_FILE = 'image_file' |
57 | | -IMAGE_ENTITY = 'image_entity' |
58 | | -VIDEO_FILE = 'video_file' |
59 | | -EVENT_ID = 'event_id' |
60 | | -INTERVAL = 'interval' |
61 | | -DURATION = 'duration' |
62 | | -FRIGATE_RETRY_ATTEMPTS = 'frigate_retry_attempts' |
63 | | -FRIGATE_RETRY_SECONDS = 'frigate_retry_seconds' |
64 | | -MAX_FRAMES = 'max_frames' |
65 | | -INCLUDE_FILENAME = 'include_filename' |
66 | | -EXPOSE_IMAGES = 'expose_images' |
67 | | -GENERATE_TITLE = 'generate_title' |
68 | | -SENSOR_ENTITY = 'sensor_entity' |
| 49 | +MESSAGE = "message" |
| 50 | +REMEMBER = "remember" |
| 51 | +USE_MEMORY = "use_memory" |
| 52 | +PROVIDER = "provider" |
| 53 | +MAXTOKENS = "max_tokens" |
| 54 | +TARGET_WIDTH = "target_width" |
| 55 | +MODEL = "model" |
| 56 | +IMAGE_FILE = "image_file" |
| 57 | +IMAGE_ENTITY = "image_entity" |
| 58 | +VIDEO_FILE = "video_file" |
| 59 | +EVENT_ID = "event_id" |
| 60 | +INTERVAL = "interval" |
| 61 | +DURATION = "duration" |
| 62 | +FRIGATE_RETRY_ATTEMPTS = "frigate_retry_attempts" |
| 63 | +FRIGATE_RETRY_SECONDS = "frigate_retry_seconds" |
| 64 | +MAX_FRAMES = "max_frames" |
| 65 | +INCLUDE_FILENAME = "include_filename" |
| 66 | +EXPOSE_IMAGES = "expose_images" |
| 67 | +GENERATE_TITLE = "generate_title" |
| 68 | +SENSOR_ENTITY = "sensor_entity" |
69 | 69 |
|
70 | 70 | # Error messages |
71 | 71 | ERROR_NOT_CONFIGURED = "{provider} is not configured" |
|
74 | 74 | ERROR_HANDSHAKE_FAILED = "Connection could not be established" |
75 | 75 |
|
76 | 76 | # Versions |
77 | | -# https://docs.anthropic.com/en/api/versioning |
78 | | -VERSION_ANTHROPIC = "2023-06-01" |
| 77 | +VERSION_ANTHROPIC = "2023-06-01" # https://docs.anthropic.com/en/api/versioning |
| 78 | +VERSION_AZURE = "2025-04-01-preview" # https://learn.microsoft.com/en-us/azure/ai-foundry/openai/api-version-lifecycle?tabs=key |
79 | 79 |
|
80 | 80 | # Defaults |
81 | 81 | DEFAULT_SYSTEM_PROMPT = "Your task is to analyze a series of images and provide a concise event description based on user instructions. Focus on identifying and describing the actions of people, pet and dynamic objects (e.g., vehicles) rather than static background details. When multiple images are provided, track and summarize movements or changes over time (e.g., 'A person walks to the front door' or 'A car pulls out of the driveway'). Keep responses brief objective, and aligned with the user's prompt. Avoid speculation and prioritize observable activity. The length of the summary must be less than 255 characters, so you must summarise it to the best readability within 255 chaaracters." |
|
87 | 87 | DEFAULT_ANTHROPIC_MODEL = "claude-3-5-sonnet-latest" |
88 | 88 | DEFAULT_AZURE_MODEL = "gpt-4o-mini" |
89 | 89 | DEFAULT_GOOGLE_MODEL = "gemini-2.0-flash" |
90 | | -DEFAULT_GROQ_MODEL = "llama-3.2-11b-vision-preview" |
| 90 | +DEFAULT_GROQ_MODEL = "meta-llama/llama-4-scout-17b-16e-instruct" |
91 | 91 | DEFAULT_LOCALAI_MODEL = "llava" |
92 | 92 | DEFAULT_OLLAMA_MODEL = "gemma3:4b" |
93 | 93 | DEFAULT_CUSTOM_OPENAI_MODEL = "gpt-4o-mini" |
94 | 94 | DEFAULT_AWS_MODEL = "us.amazon.nova-pro-v1:0" |
95 | 95 | DEFAULT_OPENWEBUI_MODEL = "gemma3:4b" |
| 96 | +DEFAULT_OPENROUTER_MODEL = "openai/gpt-4o-mini" |
96 | 97 |
|
97 | 98 | DEFAULT_SUMMARY_PROMPT = "Provide a brief summary for the following titles. Focus on the key actions or changes that occurred over time and avoid unnecessary details or subjective interpretations. The summary should be concise, objective, and relevant to the content of the images. Keep the summary under 50 words and ensure it captures the main events or activities described in the descriptions. Here are the descriptions:\n " |
98 | 99 |
|
|
105 | 106 | ENDPOINT_OLLAMA = "{protocol}://{ip_address}:{port}/api/chat" |
106 | 107 | ENDPOINT_OPENWEBUI = "{protocol}://{ip_address}:{port}/api/chat/completions" |
107 | 108 | ENDPOINT_AZURE = "{base_url}openai/deployments/{deployment}/chat/completions?api-version={api_version}" |
| 109 | +ENDPOINT_OPENROUTER = "https://openrouter.ai/api/v1/chat/completions" |
0 commit comments