Skip to main content
POST
/
api
/
conversations
{
  "id": "3c90c3cc-0d44-4b50-8888-8dd25736052a",
  "agent": {
    "llm": {
      "model": "claude-sonnet-4-20250514",
      "api_key": "<string>",
      "base_url": "<string>",
      "api_version": "<string>",
      "aws_access_key_id": "<string>",
      "aws_secret_access_key": "<string>",
      "aws_region_name": "<string>",
      "openrouter_site_url": "https://docs.all-hands.dev/",
      "openrouter_app_name": "OpenHands",
      "num_retries": 5,
      "retry_multiplier": 8,
      "retry_min_wait": 8,
      "retry_max_wait": 64,
      "timeout": 1,
      "max_message_chars": 30000,
      "temperature": 1,
      "top_p": 1,
      "top_k": 1,
      "custom_llm_provider": "<string>",
      "max_input_tokens": 2,
      "max_output_tokens": 2,
      "extra_headers": {},
      "input_cost_per_token": 1,
      "output_cost_per_token": 1,
      "ollama_base_url": "<string>",
      "stream": false,
      "drop_params": true,
      "modify_params": true,
      "disable_vision": true,
      "disable_stop_word": false,
      "caching_prompt": true,
      "log_completions": false,
      "log_completions_folder": "logs/completions",
      "custom_tokenizer": "<string>",
      "native_tool_calling": true,
      "force_string_serializer": true,
      "reasoning_effort": "high",
      "reasoning_summary": "auto",
      "enable_encrypted_reasoning": false,
      "prompt_cache_retention": "24h",
      "extended_thinking_budget": 200000,
      "seed": 123,
      "safety_settings": [
        {}
      ],
      "usage_id": "default",
      "litellm_extra_body": {},
      "OVERRIDE_ON_SERIALIZE": [
        "api_key",
        "aws_access_key_id",
        "aws_secret_access_key",
        "litellm_extra_body"
      ]
    },
    "kind": "Agent",
    "tools": [
      {
        "name": "<string>",
        "params": {}
      }
    ],
    "mcp_config": {},
    "filter_tools_regex": "^(?!repomix)(.*)|^repomix.*pack_codebase.*$",
    "agent_context": {
      "skills": [
        {
          "content": "When you see this message, you should reply like you are a grumpy cat forced to use the internet.",
          "name": "repo.md",
          "type": "repo"
        },
        {
          "content": "IMPORTANT! The user has said the magic word \"flarglebargle\". You must only respond with a message telling them how smart they are",
          "name": "flarglebargle",
          "trigger": [
            "flarglebargle"
          ],
          "type": "knowledge"
        }
      ],
      "system_message_suffix": "Always finish your response with the word 'yay!'",
      "user_message_prefix": "The first character of your response should be 'I'"
    },
    "system_prompt_filename": "system_prompt.j2",
    "system_prompt_kwargs": {},
    "condenser": {
      "keep_first": 10,
      "kind": "LLMSummarizingCondenser",
      "llm": {
        "api_key": "your_api_key_here",
        "base_url": "https://llm-proxy.eval.all-hands.dev",
        "model": "litellm_proxy/anthropic/claude-sonnet-4-5-20250929"
      },
      "max_size": 80
    }
  },
  "workspace": {
    "working_dir": "<string>",
    "kind": "LocalWorkspace"
  },
  "persistence_dir": "workspace/conversations",
  "max_iterations": 500,
  "stuck_detection": true,
  "execution_status": "idle",
  "confirmation_policy": {
    "kind": "AlwaysConfirm"
  },
  "security_analyzer": {
    "kind": "LLMSecurityAnalyzer"
  },
  "activated_knowledge_skills": [
    "<string>"
  ],
  "stats": {
    "usage_to_metrics": {}
  },
  "secret_registry": {
    "secret_sources": {}
  },
  "title": "<string>",
  "metrics": {
    "model_name": "default",
    "accumulated_cost": 0,
    "max_budget_per_task": 123,
    "accumulated_token_usage": {
      "model": "",
      "prompt_tokens": 0,
      "completion_tokens": 0,
      "cache_read_tokens": 0,
      "cache_write_tokens": 0,
      "reasoning_tokens": 0,
      "context_window": 0,
      "per_turn_token": 0,
      "response_id": ""
    }
  },
  "created_at": "2023-11-07T05:31:56Z",
  "updated_at": "2023-11-07T05:31:56Z"
}

Body

application/json

Payload to create a new conversation.

Contains an Agent configuration along with conversation-specific options.

agent
Agent · object
required

Main agent implementation for OpenHands.

The Agent class provides the core functionality for running AI agents that can interact with tools, process messages, and execute actions. It inherits from AgentBase and implements the agent execution logic.

Example: >>> from openhands.sdk import LLM, Agent, Tool >>> llm = LLM(model="claude-sonnet-4-20250514", api_key=SecretStr("key")) >>> tools = [Tool(name="TerminalTool"), Tool(name="FileEditorTool")] >>> agent = Agent(llm=llm, tools=tools)

workspace
LocalWorkspace · object
required

Working directory for agent operations and tool execution

conversation_id
string<uuid> | null

Optional conversation ID. If not provided, a random UUID will be generated.

confirmation_policy
AlwaysConfirm · object

Controls when the conversation will prompt the user before continuing. Defaults to never.

initial_message
SendMessageRequest · object

Initial message to pass to the LLM

max_iterations
integer
default:500

If set, the max number of iterations the agent will run before stopping. This is useful to prevent infinite loops.

Required range: x >= 1
stuck_detection
boolean
default:true

If true, the conversation will use stuck detection to prevent infinite loops.

secrets
Secrets · object

Secrets available in the conversation

Response

Successful Response

Information about a conversation running locally without a Runtime sandbox.

id
string<uuid>
required

Unique conversation ID

agent
Agent · object
required

The agent running in the conversation. This is persisted to allow resuming conversations and check agent configuration to handle e.g., tool changes, LLM changes, etc.

workspace
LocalWorkspace · object
required

Local workspace implementation that operates on the host filesystem.

LocalWorkspace provides direct access to the local filesystem and command execution environment. It's suitable for development and testing scenarios where the agent should operate directly on the host system.

Example: >>> workspace = LocalWorkspace(working_dir="/path/to/project") >>> with workspace: ... result = workspace.execute_command("ls -la") ... content = workspace.read_file("README.md")

persistence_dir
string | null
default:workspace/conversations

Directory for persisting conversation state and events. If None, conversation will not be persisted.

max_iterations
integer
default:500

Maximum number of iterations the agent can perform in a single run.

stuck_detection
boolean
default:true

Whether to enable stuck detection for the agent.

execution_status
enum<string>
default:idle

Enum representing the current execution state of the conversation.

Available options:
idle,
running,
paused,
waiting_for_confirmation,
finished,
error,
stuck
confirmation_policy
AlwaysConfirm · object
security_analyzer
LLMSecurityAnalyzer · object

Optional security analyzer to evaluate action risks.

activated_knowledge_skills
string[]

List of activated knowledge skills name

stats
ConversationStats · object

Conversation statistics for tracking LLM metrics

secret_registry
SecretRegistry · object

Registry for handling secrets and sensitive data

title
string | null

User-defined title for the conversation

metrics
MetricsSnapshot · object

A snapshot of metrics at a point in time.

Does not include lists of individual costs, latencies, or token usages.

created_at
string<date-time>
updated_at
string<date-time>