     1|# Honcho — self-hosted with configurable LLM providers
     2|#
     3|# Data sovereignty: all memory/state stays on this VM (PostgreSQL + Redis).
     4|#
     5|# PROVIDER STRATEGY:
     6|#   Primary: "vllm" slot (any OpenAI-compatible API, default: OpenRouter)
     7|#   Backup:  "custom" slot (optional, kicks in on final retry)
     8|#
     9|# MODEL TIERS (chosen for function-calling reliability):
    10|#   Light:  deepseek-v4-flash  — 79.5% tau-bench, fast, cheap
    11|#   Medium: deepseek-v4-flash  — purpose-built for tool use, 2M context
    12|#   Heavy:  deepseek-v4-flash          — 89.7% tau2-bench, best agentic scores
    13|#
    14|#   THINKING_BUDGET_TOKENS = 1 (these providers don't support Anthropic thinking)
    15|
    16|[app]
    17|LOG_LEVEL = "INFO"
    18|SESSION_OBSERVERS_LIMIT = 10
    19|GET_CONTEXT_MAX_TOKENS = 100000
    20|MAX_FILE_SIZE = 5242880
    21|MAX_MESSAGE_SIZE = 25000
    22|EMBED_MESSAGES = true
    23|MAX_EMBEDDING_TOKENS = 8192
    24|NAMESPACE = "honcho"
    25|
    26|[db]
    27|CONNECTION_URI = "postgresql+psycopg://honcho:honcho@database:5432/honcho"
    28|SCHEMA = "public"
    29|POOL_SIZE = 10
    30|MAX_OVERFLOW = 20
    31|POOL_TIMEOUT = 30
    32|POOL_RECYCLE = 300
    33|
    34|[auth]
    35|USE_AUTH = false
    36|
    37|[cache]
    38|ENABLED = true
    39|URL = "redis://redis:6379/0?suppress=true"
    40|DEFAULT_TTL_SECONDS = 300
    41|
    42|# ---------------------------------------------------------------------------
    43|# LLM providers
    44|#   "vllm"   = primary   — set in .env: LLM_VLLM_API_KEY + LLM_VLLM_BASE_URL
    45|#   "custom"  = backup    — set in .env: LLM_OPENAI_COMPATIBLE_API_KEY
    46|# ---------------------------------------------------------------------------
    47|[llm]
    48|DEFAULT_MAX_TOKENS = 2500
    49|EMBEDDING_PROVIDER = "openrouter"
    50|
    51|# Embedding key, URL, and model are set via env vars (LLM_EMBEDDING_*)
    52|# so they can differ per deployment without editing this file.
    53|# If LLM_EMBEDDING_API_KEY is empty, falls back to LLM_OPENAI_COMPATIBLE_API_KEY.
    54|# If LLM_EMBEDDING_BASE_URL is empty, falls back to OPENAI_COMPATIBLE_BASE_URL below.
    55|
    56|# Backup provider base URL (set in .env or overridden by setup script)
    57|OPENAI_COMPATIBLE_BASE_URL = "https://api.deepseek.com/v1"
    58|
    59|# ---------------------------------------------------------------------------
    60|# Deriver — observation extraction (runs every message, needs speed + tools)
    61|# ---------------------------------------------------------------------------
    62|[deriver]
    63|ENABLED = true
    64|WORKERS = 1
    65|POLLING_SLEEP_INTERVAL_SECONDS = 1.0
    66|STALE_SESSION_TIMEOUT_MINUTES = 5
    67|PROVIDER = "vllm"
    68|MODEL = "deepseek-v4-flash"
    71|DEDUPLICATE = true
    72|MAX_OUTPUT_TOKENS = 4096
    73|THINKING_BUDGET_TOKENS = 1
    74|MAX_INPUT_TOKENS = 23000
    75|WORKING_REPRESENTATION_MAX_OBSERVATIONS = 100
    76|REPRESENTATION_BATCH_MAX_TOKENS = 1024
    77|FLUSH_ENABLED = false
    78|
    79|# ---------------------------------------------------------------------------
    80|# Dialectic — multi-level reasoning for peer.chat
    81|# ---------------------------------------------------------------------------
    82|[dialectic]
    83|MAX_OUTPUT_TOKENS = 8192
    84|MAX_INPUT_TOKENS = 100000
    85|HISTORY_TOKEN_LIMIT = 8192
    86|SESSION_HISTORY_MAX_TOKENS = 4096
    87|
    88|[dialectic.levels.minimal]
    89|PROVIDER = "vllm"
    90|MODEL = "deepseek-v4-flash"
    93|THINKING_BUDGET_TOKENS = 1
    94|MAX_TOOL_ITERATIONS = 1
    95|MAX_OUTPUT_TOKENS = 250
    96|
    97|[dialectic.levels.low]
    98|PROVIDER = "vllm"
    99|MODEL = "deepseek-v4-flash"
   102|THINKING_BUDGET_TOKENS = 1
   103|MAX_TOOL_ITERATIONS = 5
   104|
   105|[dialectic.levels.medium]
   106|PROVIDER = "vllm"
   107|MODEL = "deepseek-v4-flash"
   110|THINKING_BUDGET_TOKENS = 1
   111|MAX_TOOL_ITERATIONS = 2
   112|
   113|[dialectic.levels.high]
   114|PROVIDER = "vllm"
   115|MODEL = "deepseek-v4-flash"
   118|THINKING_BUDGET_TOKENS = 1
   119|MAX_TOOL_ITERATIONS = 4
   120|
   121|[dialectic.levels.max]
   122|PROVIDER = "vllm"
   123|MODEL = "deepseek-v4-flash"
   126|THINKING_BUDGET_TOKENS = 1
   127|MAX_TOOL_ITERATIONS = 10
   128|
   129|# ---------------------------------------------------------------------------
   130|# Summary — session summarization
   131|# ---------------------------------------------------------------------------
   132|[summary]
   133|ENABLED = true
   134|MESSAGES_PER_SHORT_SUMMARY = 20
   135|MESSAGES_PER_LONG_SUMMARY = 60
   136|PROVIDER = "vllm"
   137|MODEL = "deepseek-v4-flash"
   140|MAX_TOKENS_SHORT = 1000
   141|MAX_TOKENS_LONG = 4000
   142|THINKING_BUDGET_TOKENS = 1
   143|
   144|# ---------------------------------------------------------------------------
   145|# Dream — memory consolidation (runs rarely, needs best quality)
   146|# ---------------------------------------------------------------------------
   147|[dream]
   148|ENABLED = true
   149|DOCUMENT_THRESHOLD = 50
   150|IDLE_TIMEOUT_MINUTES = 60
   151|MIN_HOURS_BETWEEN_DREAMS = 8
   152|ENABLED_TYPES = ["omni"]
   153|PROVIDER = "vllm"
   154|MODEL = "deepseek-v4-flash"
   157|MAX_OUTPUT_TOKENS = 16384
   158|THINKING_BUDGET_TOKENS = 1
   159|MAX_TOOL_ITERATIONS = 20
   160|HISTORY_TOKEN_LIMIT = 16384
   161|DEDUCTION_MODEL = "deepseek-v4-flash"
   162|INDUCTION_MODEL = "deepseek-v4-flash"
   163|
   164|[peer_card]
   165|ENABLED = true
   166|
   167|[vector_store]
   168|TYPE = "pgvector"
   169|DIMENSIONS = 1536
   170|
   171|[metrics]
   172|ENABLED = false
   173|
   174|[telemetry]
   175|ENABLED = false
   176|
   177|[sentry]
   178|ENABLED = false
   179|