How to build a secure first agent runtime with OpenClaw Gateway, Capabilities, and Managed Tooling

In this tutorial, we create and implement a fully localized, formal schema OpenClaw time to work. We configure the OpenClaw gateway by binding a strong loopback, set up access to the authenticated model using environment variables, and define a secure signing environment using the built-in exec tool. We then create a structured custom skill that the OpenClaw agent can discover and invoke at will. Instead of running Python scripts manually, we let OpenClaw organize the model’s reasoning, skill selection, and tool-controlled execution at the agent’s runtime. Throughout this process, we focus on OpenClaw’s design, gate control plane, agent automation, model routing, and skill extraction to understand how OpenClaw connects autonomous behavior to a secure, first-place setting.
import os, json, textwrap, subprocess, time, re, pathlib, shlex
from getpass import getpass
def sh(cmd, check=True, capture=False, env=None):
p = subprocess.run(
["bash", "-lc", cmd],
check=check,
text=True,
capture_output=capture,
env=env or os.environ.copy(),
)
return p.stdout if capture else None
def require_secret_env(var="OPENAI_API_KEY"):
if os.environ.get(var, "").strip():
return
key = getpass(f"Enter {var} (hidden): ").strip()
if not key:
raise RuntimeError(f"{var} is required.")
os.environ[var] = key
def install_node_22_and_openclaw():
sh("sudo apt-get update -y")
sh("sudo apt-get install -y ca-certificates curl gnupg")
sh("curl -fsSL | sudo -E bash -")
sh("sudo apt-get install -y nodejs")
sh("node -v && npm -v")
sh("npm install -g openclaw@latest")
sh("openclaw --version", check=False)
We define core resource functions that allow us to execute shell commands, securely capture local variables, and install OpenClaw on the required Node.js runtime. We develop a control interface that combines the use of Python and the OpenClaw CLI. Here, we configure the environment so that OpenClaw can act as a central runtime agent within Colab.
def write_openclaw_config_valid():
home = pathlib.Path.home()
base = home / ".openclaw"
workspace = base / "workspace"
(workspace / "skills").mkdir(parents=True, exist_ok=True)
cfg = {
"gateway": {
"mode": "local",
"port": 18789,
"bind": "loopback",
"auth": {"mode": "none"},
"controlUi": {
"enabled": True,
"basePath": "/openclaw",
"dangerouslyDisableDeviceAuth": True
}
},
"agents": {
"defaults": {
"workspace": str(workspace),
"model": {"primary": "openai/gpt-4o-mini"}
}
},
"tools": {
"exec": {
"backgroundMs": 10000,
"timeoutSec": 1800,
"cleanupMs": 1800000,
"notifyOnExit": True,
"notifyOnExitEmptySuccess": False,
"applyPatch": {"enabled": False, "allowModels": ["openai/gpt-5.2"]}
}
}
}
base.mkdir(parents=True, exist_ok=True)
(base / "openclaw.json").write_text(json.dumps(cfg, indent=2))
return str(base / "openclaw.json")
def start_gateway_background():
sh("rm -f /tmp/openclaw_gateway.log /tmp/openclaw_gateway.pid", check=False)
sh("nohup openclaw gateway --port 18789 --bind loopback --verbose > /tmp/openclaw_gateway.log 2>&1 & echo $! > /tmp/openclaw_gateway.pid")
for _ in range(60):
time.sleep(1)
log = sh("tail -n 120 /tmp/openclaw_gateway.log || true", capture=True, check=False) or ""
if re.search(r"(listening|ready|ws|http).*18789|18789.*listening", log, re.IGNORECASE):
return True
print("Gateway log tail:n", sh("tail -n 220 /tmp/openclaw_gateway.log || true", capture=True, check=False))
raise RuntimeError("OpenClaw gateway did not start cleanly.")
We write a valid OpenClaw schema configuration file and initialize the local gateway settings. We describe the workspace, model routing, and tool behavior in accordance with the formal structure of the OpenClaw configuration. Then we start the OpenClaw gateway in loopback mode to ensure that the agent runtime opens correctly and securely.
def pick_model_from_openclaw():
out = sh("openclaw models list --json", capture=True, check=False) or ""
refs = []
try:
data = json.loads(out)
if isinstance(data, dict):
for k in ["models", "items", "list"]:
if isinstance(data.get(k), list):
data = data[k]
break
if isinstance(data, list):
for it in data:
if isinstance(it, str) and "/" in it:
refs.append(it)
elif isinstance(it, dict):
for key in ["ref", "id", "model", "name"]:
v = it.get(key)
if isinstance(v, str) and "/" in v:
refs.append(v)
break
except Exception:
pass
refs = [r for r in refs if r.startswith("openai/")]
preferred = ["openai/gpt-4o-mini", "openai/gpt-4.1-mini", "openai/gpt-4o", "openai/gpt-5.2-mini", "openai/gpt-5.2"]
for p in preferred:
if p in refs:
return p
return refs[0] if refs else "openai/gpt-4o-mini"
def set_default_model(model_ref):
sh(f'openclaw config set agents.defaults.model.primary "{model_ref}"', check=False)
We dynamically query OpenClaw for available models and select the appropriate OpenAI provider model. We configure the default agent program so that OpenClaw routes all logic requests to the selected model. Here, we enable OpenClaw to handle model release and provider validation seamlessly.
def create_custom_skill_rag():
home = pathlib.Path.home()
skill_dir = home / ".openclaw" / "workspace" / "skills" / "colab_rag_lab"
skill_dir.mkdir(parents=True, exist_ok=True)
tool_py = skill_dir / "rag_tool.py"
tool_py.write_text(textwrap.dedent(r"""
import sys, re, subprocess
def pip(*args): subprocess.check_call([sys.executable, "-m", "pip", "-q", "install", *args])
q = " ".join(sys.argv[1:]).strip()
if not q:
print("Usage: python3 rag_tool.py ", file=sys.stderr)
raise SystemExit(2)
try:
import numpy as np
except Exception:
pip("numpy"); import numpy as np
try:
import faiss
except Exception:
pip("faiss-cpu"); import faiss
try:
from sentence_transformers import SentenceTransformer
except Exception:
pip("sentence-transformers"); from sentence_transformers import SentenceTransformer
CORPUS = [
("OpenClaw basics", "OpenClaw runs an agent runtime behind a local gateway and can execute tools and skills in a controlled way."),
("Strict config schema", "OpenClaw gateway refuses to start if openclaw.json has unknown keys; use openclaw doctor to diagnose issues."),
("Exec tool config", "tools.exec config sets timeouts and behavior; it does not use an enabled flag in the config schema."),
("Gateway auth", "Even on localhost, gateway auth exists; auth.mode can be none for trusted loopback-only setups."),
("Skills", "Skills define repeatable tool-use patterns; agents can select a skill and then call exec with a fixed command template.")
]
docs = []
for title, body in CORPUS:
sents = re.split(r'(?<=[.!?])s+', body.strip())
for i, s in enumerate(sents):
s = s.strip()
if s:
docs.append((f"{title}#{i+1}", s))
model = SentenceTransformer("all-MiniLM-L6-v2")
emb = model.encode([d[1] for d in docs], normalize_embeddings=True).astype("float32")
index = faiss.IndexFlatIP(emb.shape[1])
index.add(emb)
q_emb = model.encode([q], normalize_embeddings=True).astype("float32")
D, I = index.search(q_emb, 4)
hits = []
for score, idx in zip(D[0].tolist(), I[0].tolist()):
if idx >= 0:
ref, txt = docs[idx]
hits.append((score, ref, txt))
print("Answer (grounded to retrieved snippets):n")
print("Question:", q, "n")
print("Key points:")
for score, ref, txt in hits:
print(f"- ({score:.3f}) {txt} [{ref}]")
print("nCitations:")
for _, ref, _ in hits:
print(f"- {ref}")
""").strip() + "n")
sh(f"chmod +x {shlex.quote(str(tool_py))}")
skill_md = skill_dir / "SKILL.md"
skill_md.write_text(textwrap.dedent(f"""
---
name: colab_rag_lab
description: Deterministic local RAG invoked via a fixed exec command.
---
# Colab RAG Lab
## Tooling rule (strict)
Always run exactly:
`python3 {tool_py} ""`
## Output rule
Return the tool output verbatim.
""").strip() + "n")
We create a custom OpenClaw skill within the directory of the selected workspace. We define a deterministic work pattern in SKILL.md and pair it with a structured RAG tool script that an agent can request. We rely on OpenClaw’s ability loading method to automatically register and use this tool during the agent runtime.
def refresh_skills():
sh('openclaw agent --message "refresh skills" --thinking low', check=False)
def run_openclaw_agent_demo():
prompt = (
'Use the skill `colab_rag_lab` to answer: '
'Why did my gateway refuse to start when I used agents.defaults.thinking and tools.exec.enabled, '
'and what are the correct config knobs instead?'
)
out = sh(f'openclaw agent --message {shlex.quote(prompt)} --thinking high', capture=True, check=False)
print(out)
require_secret_env("OPENAI_API_KEY")
install_node_22_and_openclaw()
cfg_path = write_openclaw_config_valid()
print("Wrote schema-valid config:", cfg_path)
print("n--- openclaw doctor ---n")
print(sh("openclaw doctor", capture=True, check=False))
start_gateway_background()
model = pick_model_from_openclaw()
set_default_model(model)
print("Selected model:", model)
create_custom_skill_rag()
refresh_skills()
print("n--- OpenClaw agent run (skill-driven) ---n")
run_openclaw_agent_demo()
print("n--- Gateway log tail ---n")
print(sh("tail -n 180 /tmp/openclaw_gateway.log || true", capture=True, check=False))
We renew the registration of OpenClaw skills and request an OpenClaw agent with a planned order. We let OpenClaw do the inference, select the skill, execute the exec tool, and return the grounded output. Here, we show the complete OpenClaw orchestration cycle, from configuration to independent agent execution.
In conclusion, we have deployed and implemented an optimized OpenClaw workflow in a managed Colab environment. We verified the configuration schema, initialized the gateway, dynamically selected the model provider, registered the capability, and implemented it through the OpenClaw agent interface. Rather than treat OpenClaw as a wrapper, we use it as a central orchestration layer that manages authentication, skill loading, tooling, and runtime management. We’ve shown how OpenClaw leverages programmed execution while enabling automated reasoning, showing how it can serve as a solid foundation for building secure, scalable agent systems in production-oriented environments.
Check it out Full Codes here. Also, feel free to follow us Twitter and don’t forget to join our 120k+ ML SubReddit and Subscribe to Our newspaper. Wait! are you on telegram? now you can join us on telegram too.
Need to work with us on developing your GitHub Repo OR Hug Face Page OR Product Release OR Webinar etc.? contact us


