docs: rewrite README with structured overview and quick start guide
Replaces the minimal project description with a comprehensive README including a component overview table, quick start instructions, common Ansible operations, and links to detailed documentation. Aligns with Red Panda Approval™ standards.
This commit is contained in:
79
ansible/anythingllm/env.j2
Normal file
79
ansible/anythingllm/env.j2
Normal file
@@ -0,0 +1,79 @@
|
||||
# AnythingLLM Server Environment Configuration
|
||||
# Managed by Ansible - Red Panda Approved
|
||||
# Generated for {{ inventory_hostname }}
|
||||
|
||||
# ============================================
|
||||
# Server Configuration
|
||||
# ============================================
|
||||
SERVER_PORT={{ anythingllm_port }}
|
||||
STORAGE_DIR={{ anythingllm_directory }}/storage
|
||||
|
||||
# ============================================
|
||||
# Security
|
||||
# ============================================
|
||||
JWT_SECRET={{ anythingllm_jwt_secret }}
|
||||
SIG_KEY={{ anythingllm_sig_key }}
|
||||
SIG_SALT={{ anythingllm_sig_salt }}
|
||||
|
||||
# ============================================
|
||||
# PostgreSQL + pgvector (Portia)
|
||||
# ============================================
|
||||
VECTOR_DB=pgvector
|
||||
PGVECTOR_CONNECTION_STRING=postgresql://{{ anythingllm_db_user }}:{{ anythingllm_db_password }}@{{ anythingllm_db_host }}:{{ anythingllm_db_port }}/{{ anythingllm_db_name }}
|
||||
|
||||
# ============================================
|
||||
# LLM Provider - AWS Bedrock
|
||||
# ============================================
|
||||
# LLM_PROVIDER='bedrock'
|
||||
# AWS_BEDROCK_LLM_ACCESS_KEY_ID=
|
||||
# AWS_BEDROCK_LLM_ACCESS_KEY=
|
||||
# AWS_BEDROCK_LLM_REGION=us-west-2
|
||||
# AWS_BEDROCK_LLM_MODEL_PREFERENCE=meta.llama3-1-8b-instruct-v1:0
|
||||
# AWS_BEDROCK_LLM_MODEL_TOKEN_LIMIT=8191
|
||||
# AWS_BEDROCK_LLM_CONNECTION_METHOD=iam
|
||||
# AWS_BEDROCK_LLM_MAX_OUTPUT_TOKENS=4096
|
||||
# AWS_BEDROCK_LLM_SESSION_TOKEN= # Only required if CONNECTION_METHOD is 'sessionToken'
|
||||
# or even use Short and Long Term API keys
|
||||
# AWS_BEDROCK_LLM_CONNECTION_METHOD="apiKey"
|
||||
# AWS_BEDROCK_LLM_API_KEY=
|
||||
|
||||
# ============================================
|
||||
# LLM Provider - Generic OpenAI (llama-cpp)
|
||||
# ============================================
|
||||
LLM_PROVIDER=generic-openai
|
||||
GENERIC_OPEN_AI_BASE_PATH={{ anythingllm_llm_base_url }}
|
||||
GENERIC_OPEN_AI_MODEL_PREF={{ anythingllm_llm_model }}
|
||||
GENERIC_OPEN_AI_MODEL_TOKEN_LIMIT={{ anythingllm_llm_token_limit }}
|
||||
GENERIC_OPEN_AI_API_KEY={{ anythingllm_llm_api_key }}
|
||||
|
||||
# ============================================
|
||||
# Embedding Configuration
|
||||
# ============================================
|
||||
EMBEDDING_ENGINE={{ anythingllm_embedding_engine }}
|
||||
EMBEDDING_MODEL_PREF={{ anythingllm_embedding_model }}
|
||||
|
||||
# ============================================
|
||||
# TTS Configuration (FastKokoro)
|
||||
# ============================================
|
||||
TTS_PROVIDER={{ anythingllm_tts_provider }}
|
||||
{% if anythingllm_tts_provider == 'openai' %}
|
||||
TTS_OPEN_AI_KEY={{ anythingllm_tts_api_key }}
|
||||
TTS_OPEN_AI_ENDPOINT={{ anythingllm_tts_endpoint }}
|
||||
TTS_OPEN_AI_MODEL={{ anythingllm_tts_model }}
|
||||
TTS_OPEN_AI_VOICE={{ anythingllm_tts_voice }}
|
||||
{% endif %}
|
||||
|
||||
# ============================================
|
||||
# Whisper Configuration
|
||||
# ============================================
|
||||
WHISPER_PROVIDER=local
|
||||
|
||||
# use openai hosted whisper model.
|
||||
# WHISPER_PROVIDER="openai"
|
||||
# OPEN_AI_KEY=sk-xxxxxxxx
|
||||
|
||||
# ============================================
|
||||
# Telemetry & Environment
|
||||
# ============================================
|
||||
DISABLE_TELEMETRY=true
|
||||
NODE_ENV=production
|
||||
Reference in New Issue
Block a user