Secure Agent Skills Execution

Acontext Sandbox provides secure, isolated environments for code and file management.

Open Source Alternative to Claude Computer Use

Simple & Open Source

Model Agnostic

Composable

How Sandbox Works with LLM

Give your AI agent secure code execution capabilities through an agentic tool loop

setup.py
# Create isolated sandbox & persistent disk
sandbox = client.sandboxes.create()
disk = client.disks.create()
# Upload agent skill (pptx generator, etc.)
skill = client.skills.create(file=zip_file)
📦Sandbox Ready
sandbox_id:"sbx_7k2m9x"
env:Python 3, bash, ripgrep...
💾Disk Created
disk_id:"dsk_3f8n2p"
storage:"persistent"
🧩Skill Uploaded
skill_id:"skl_pptx_v1"
mount_path:"/skills/pptx/"
context.py
# Build context with tools & mounted skills
ctx = SANDBOX_TOOLS.format_context(
  client, sandbox_id, disk_id,
  mount_skills=[skill.id]
)
# Get OpenAI-compatible tool schema
tools = SANDBOX_TOOLS.to_openai_tool_schema()
📄System Prompt Includes
🔧Available tools: bash, editor, export
📁Mounted skills at /skills/
📋Skill instructions & examples
🔧Tools → LLM
bash_execution_sandbox
Run shell commands
text_editor_sandbox
View/create/edit files
export_file_sandbox
Export with public URL
👤
User Request
"Create a sales report presentation"
🧠
LLM
Decides tool call
Tool Call
bash_execution_sandbox
{cmd: "python3 /skills/pptx/main.py"}
📦
Sandbox
Executes securely
Tool Result
stdout: "Presentation created at /output/report.pptx"
Loop until done
export.py
# LLM calls export tool to share results
if tool_name == "export_file_sandbox":
  result = SANDBOX_TOOLS.execute_tool(
    ctx, tool_name, {"sandbox_path": path}
  )
🔗Public URL Generated
{
  "public_url": "https://cdn.acontext.io/f/report.pptx",
  "expires_in": "24h",
  "file_size": "2.4 MB"
}
🤖
Assistant
Done! Here's your presentation: cdn.acontext.io/f/report.pptx
Sandbox

Upload, Manage, Execute Skill in One API

Acontext
Acontext
User-Scoped · Direct Control · Self-Hostable
Upload skills per user, mount to sandbox, execute directly.
from acontext import AcontextClient
from acontext.agent import SANDBOX_TOOLS
from openai import OpenAI
 
client = AcontextClient()
llm = OpenAI(base_url="https://openrouter.ai/api/v1")
 
# 1. Upload skill with user identifier
skill = client.skills.create(
file=("my_skill.zip", open("my_skill.zip", "rb")),
user="alice@example.com"
)
 
# 2. Composable: Sandbox + Disk + Skills
sandbox = client.sandboxes.create()
disk = client.disks.create()
ctx = SANDBOX_TOOLS.format_context(client, sandbox.sandbox_id, disk.id, mount_skills=[skill.id])
 
# 3. Use ANY LLM via OpenRouter or any OpenAI-compatible API
response = llm.chat.completions.create(
model="openai/gpt-4o", # Or anthropic/claude-3.5-sonnet, deepseek/deepseek-r1...
messages=[{"role": "system", "content": ctx.get_context_prompt()},
{"role": "user", "content": "Run"}],
tools=SANDBOX_TOOLS.to_openai_tool_schema()
)
Claude
Claude API
Workspace-Scoped · No Direct Control
Upload skills to workspace, use via container parameter only.
import anthropic
from anthropic.lib import files_from_dir
 
client = anthropic.Anthropic()
 
# 1. Upload skill (workspace-scoped only)
skill = client.beta.skills.create(
display_title="My Skill",
files=files_from_dir("/path/to/skill"),
betas=["skills-2025-10-02"]
)
 
# 2. Use skill in container (no sandbox control)
response = client.beta.messages.create(
model="claude-sonnet-4-5-20250929",
max_tokens=4096,
betas=["code-execution-2025-08-25", "skills-2025-10-02"],
container={"skills": [{"type": "custom", "skill_id": skill.id, "version": "latest"}]},
messages=[{"role": "user", "content": "Run"}],
tools=[{"type": "code_execution_20250825", "name": "code_execution"}]
)
Model Support

Any LLM + Skills + SANDBOX TOOLS

Acontext
Acontext
OpenAI · Anthropic · Gemini · OpenRouter · any compatible API
Mount skills, get context prompt, use any LLM with tool schemas.
from acontext import AcontextClient
from acontext.agent import SANDBOX_TOOLS
from openai import OpenAI
 
client = AcontextClient()
sandbox = client.sandboxes.create()
disk = client.disks.create()
 
# Mount skills via SANDBOX_TOOLS
ctx = SANDBOX_TOOLS.format_context(client, sandbox.sandbox_id, disk.id, mount_skills=[skill.id])
 
# Use ANY LLM via OpenRouter or any OpenAI-compatible API
llm = OpenAI(base_url="https://openrouter.ai/api/v1")
response = llm.chat.completions.create(
model="deepseek/deepseek-r1",
messages=[{"role": "system", "content": ctx.get_context_prompt()},
{"role": "user", "content": "..."}],
tools=SANDBOX_TOOLS.to_openai_tool_schema()
)
Claude
Claude API
Claude Models Only
Locked to Claude models, no custom tool schemas.
import anthropic
 
client = anthropic.Anthropic()
 
# Locked to Claude models only
response = client.beta.messages.create(
model="claude-sonnet-4-5-20250929",
max_tokens=4096,
betas=["code-execution-2025-08-25", "skills-2025-10-02"],
container={"skills": [{"type": "custom", "skill_id": skill.id, "version": "latest"}]},
messages=[{"role": "user", "content": "..."}],
tools=[{"type": "code_execution_20250825", "name": "code_execution"}]
)
 
# Cannot use GPT-4o, Gemini, DeepSeek...
# Cannot customize tool schemas

Acontext Skills Execution vs Claude Computer Use

Compare the complexity of traditional approaches with Acontext's unified SDK

Code Execution & Skills

Execute code in sandboxes and use agent skills without complex beta APIs

# Acontext SDK - Simple & unified
from acontext import AcontextClient
from acontext.agent.sandbox import SANDBOX_TOOLS
from openai import OpenAI

client = AcontextClient()
openai = OpenAI()

# Create sandbox with skill mounted
sandbox = client.sandboxes.create()
disk = client.disks.create()

ctx = SANDBOX_TOOLS.format_context(
    client,
    sandbox_id=sandbox.sandbox_id,
    disk_id=disk.id,
    mount_skills=["excel-skill-uuid"]  # Mount any skill
)

# Use with any LLM - OpenAI, Anthropic, etc.
response = openai.chat.completions.create(
    model="gpt-4.1",
    messages=[
        {"role": "system", "content": ctx.get_context_prompt()},
        {"role": "user", "content": "Create an Excel budget spreadsheet"}
    ],
    tools=SANDBOX_TOOLS.to_openai_tool_schema()
)

# Execute tool and get result with download URL
for tc in response.choices[0].message.tool_calls:
    result = SANDBOX_TOOLS.execute_tool(
        ctx, 
        tc.function.name, 
        json.loads(tc.function.arguments)
    )
    # Result includes public_url for downloads