Implement Tools and Automation for OpenClaw

This commit is contained in:
2026-02-14 09:13:00 -03:00
parent 93bb416da6
commit 4712055cc6
3 changed files with 493 additions and 216 deletions

BIN
.DS_Store vendored Normal file

Binary file not shown.

View File

@@ -2,152 +2,333 @@ import os
import time
import json
import uuid
from typing import List, Optional, Dict, Any, Iterable
from typing import Optional, List, Dict, Any
import re
import subprocess
import requests
import oci
from fastapi import FastAPI, HTTPException
from fastapi import Request
from fastapi.responses import JSONResponse, StreamingResponse
from fastapi import FastAPI, Request, HTTPException
from fastapi.responses import StreamingResponse
from pydantic import BaseModel, ConfigDict
import requests
import os
import requests
def get_weather_from_api(city: str) -> str:
"""
Consulta clima atual usando Open-Meteo (100% free, sem API key)
"""
print("LOG: EXECUTE TOOL WEATHER")
try:
# 1⃣ Geocoding (cidade -> lat/lon)
geo_url = "https://geocoding-api.open-meteo.com/v1/search"
geo_params = {
"name": city,
"count": 1,
"language": "pt",
"format": "json"
}
geo_response = requests.get(geo_url, params=geo_params, timeout=10)
if geo_response.status_code != 200:
return f"Erro geocoding: {geo_response.text}"
geo_data = geo_response.json()
if "results" not in geo_data or len(geo_data["results"]) == 0:
return f"Cidade '{city}' não encontrada."
location = geo_data["results"][0]
latitude = location["latitude"]
longitude = location["longitude"]
resolved_name = location["name"]
country = location.get("country", "")
# 2⃣ Clima atual
weather_url = "https://api.open-meteo.com/v1/forecast"
weather_params = {
"latitude": latitude,
"longitude": longitude,
"current_weather": True,
"timezone": "auto"
}
weather_response = requests.get(weather_url, params=weather_params, timeout=10)
if weather_response.status_code != 200:
return f"Erro clima: {weather_response.text}"
weather_data = weather_response.json()
current = weather_data.get("current_weather")
if not current:
return "Dados de clima indisponíveis."
temperature = current["temperature"]
windspeed = current["windspeed"]
return (
f"Temperatura atual em {resolved_name}, {country}: {temperature}°C.\n"
f"Velocidade do vento: {windspeed} km/h."
)
except Exception as e:
return f"Weather tool error: {str(e)}"
# ============================================================
# CONFIG
# ============================================================
OCI_CONFIG_FILE = os.getenv("OCI_CONFIG_FILE", os.path.expanduser("~/.oci/config"))
OCI_PROFILE = os.getenv("OCI_PROFILE", "DEFAULT")
OCI_COMPARTMENT_ID = os.getenv("OCI_COMPARTMENT_ID", "<YOUR_COMPARTMENT_ID>")
OCI_COMPARTMENT_ID = os.getenv("OCI_COMPARTMENT_ID", "ocid1.compartment.oc1..aaaaaaaaexpiw4a7dio64mkfv2t273s2hgdl6mgfvvyv7tycalnjlvpvfl3q")
OCI_GENAI_ENDPOINT = os.getenv(
"OCI_GENAI_ENDPOINT",
"https://inference.generativeai.<region>.oci.oraclecloud.com"
"https://inference.generativeai.us-chicago-1.oci.oraclecloud.com"
)
OPENCLAW_TOOLS_ACTIVE = True
SYSTEM_AGENT_PROMPT = """
You are an enterprise AI agent.
You MUST respond ONLY in valid JSON.
Available tools:
- weather(city: string)
Response format:
If you need to call a tool:
{
"action": "call_tool",
"tool": "<tool_name>",
"arguments": { ... }
}
If you are returning a final answer:
{
"action": "final_answer",
"content": "<final user answer>"
}
Never include explanations outside JSON.
"""
TOOLS = {
"weather": lambda city: get_weather_from_api(city)
}
if not OCI_COMPARTMENT_ID:
raise RuntimeError("OCI_COMPARTMENT_ID not defined")
# Mapeamento OpenAI → OCI
MODEL_MAP = {
"gpt-4o-mini": "openai.gpt-4.1",
"text-embedding-3-small": "cohere.embed-multilingual-v3.0",
"gpt-5": "openai.gpt-4.1",
"openai/gpt-5": "openai.gpt-4.1",
"openai-compatible/gpt-5": "openai.gpt-4.1",
}
app = FastAPI(title="OCI OpenAI-Compatible Gateway")
# ============================================================
# Pydantic Models (OpenAI-compatible)
# ============================================================
class Message(BaseModel):
role: str
content: str
class EmbeddingRequest(BaseModel):
model: str
input: List[str] | str
class ChatRequest(BaseModel):
model: str
messages: List[Message]
temperature: Optional[float] = None
max_tokens: Optional[int] = None
max_completion_tokens: Optional[int] = None
top_p: Optional[float] = None
stream: Optional[bool] = False
tools: Optional[Any] = None
tool_choice: Optional[Any] = None
model_config = ConfigDict(extra="allow")
# ============================================================
# OCI SIGNER
# ============================================================
def get_signer():
config = oci.config.from_file(OCI_CONFIG_FILE, OCI_PROFILE)
signer = oci.signer.Signer(
return oci.signer.Signer(
tenancy=config["tenancy"],
user=config["user"],
fingerprint=config["fingerprint"],
private_key_file_location=config["key_file"],
pass_phrase=config.get("pass_phrase"),
)
return signer
# ============================================================
# CONVERSION HELPERS
# OCI CHAT CALL (OPENAI FORMAT)
# ============================================================
def openai_to_oci_messages(messages: list, model_id: str) -> list:
oci_messages = []
for m in messages:
role = m.get("role", "").upper()
def _openai_messages_to_generic(messages: list) -> list:
"""
OpenAI: {"role":"user","content":"..."}
Generic: {"role":"USER","content":[{"type":"TEXT","text":"..."}]}
"""
out = []
for m in messages or []:
role = (m.get("role") or "user").upper()
# OCI GENERIC geralmente espera USER/ASSISTANT
if role == "SYSTEM":
role = "SYSTEM"
elif role == "ASSISTANT":
role = "ASSISTANT"
else:
role = "USER"
elif role == "TOOL":
role = "USER"
oci_messages.append({
content = m.get("content", "")
# Se vier lista (OpenAI multimodal), extrai texto
if isinstance(content, list):
parts = []
for item in content:
if isinstance(item, dict) and item.get("type") in ("text", "TEXT"):
parts.append(item.get("text", ""))
content = "\n".join(parts)
out.append({
"role": role,
"content": [
{
"type": "TEXT",
"text": m.get("content", "")
}
]
"content": [{"type": "TEXT", "text": str(content)}]
})
return out
return oci_messages
def build_openai_response(model: str, text: str) -> Dict[str, Any]:
return {
"id": f"chatcmpl-{uuid.uuid4().hex}",
"object": "chat.completion",
"created": int(time.time()),
"model": model,
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": text,
},
"finish_reason": "stop",
}
],
"usage": {
"prompt_tokens": None,
"completion_tokens": None,
"total_tokens": None,
def call_oci_chat(body: dict):
signer = get_signer()
model = body.get("model")
oci_model = MODEL_MAP.get(model, model)
url = f"{OCI_GENAI_ENDPOINT}/20231130/actions/chat"
generic_messages = _openai_messages_to_generic(body.get("messages", []))
payload = {
"compartmentId": OCI_COMPARTMENT_ID,
"servingMode": {
"servingType": "ON_DEMAND",
"modelId": oci_model
},
"chatRequest": {
"apiFormat": "GENERIC",
"messages": generic_messages,
"maxTokens": int(body.get("max_tokens", 1024)),
"temperature": float(body.get("temperature", 0.0)),
"topP": float(body.get("top_p", 1.0)),
}
}
def normalize_messages(messages: list) -> list:
normalized = []
# ⚠️ IMPORTANTÍSSIMO:
# Em GENERIC, NÃO envie tools/tool_choice/stream (você orquestra tools no proxy)
# Se você mandar, pode dar 400 "correct format of request".
for m in messages:
content = m.get("content")
print("\n=== PAYLOAD FINAL (GENERIC) ===")
print(json.dumps(payload, indent=2, ensure_ascii=False))
# Caso OpenClaw envie array [{type:"text", text:"..."}]
if isinstance(content, list):
text_parts = []
for item in content:
if isinstance(item, dict) and item.get("type") == "text":
text_parts.append(item.get("text", ""))
content = "\n".join(text_parts)
r = requests.post(url, json=payload, auth=signer)
if r.status_code != 200:
print("OCI ERROR:", r.text)
raise HTTPException(status_code=r.status_code, detail=r.text)
normalized.append({
"role": m.get("role"),
"content": content
})
return r.json()["chatResponse"]
return normalized
def detect_tool_call(text: str):
pattern = r"exec\s*\(\s*([^\s]+)\s*(.*?)\s*\)"
match = re.search(pattern, text)
if not match:
return None
tool_name = "exec"
command = match.group(1)
args = match.group(2)
return {
"tool": tool_name,
"args_raw": f"{command} {args}".strip()
}
def execute_exec_command(command: str):
try:
print(f"LOG: EXEC COMMAND: {command}")
result = subprocess.check_output(command, shell=True, stderr=subprocess.STDOUT)
return result.decode()
except subprocess.CalledProcessError as e:
return e.output.decode()
def execute_real_tool(name, args):
if name == "weather":
city = args.get("city")
return get_weather_from_api(city)
return "Tool not implemented"
def _extract_generic_text(oci_message: dict) -> str:
content = oci_message.get("content")
if isinstance(content, list):
return "".join([i.get("text", "") for i in content if isinstance(i, dict) and i.get("type") == "TEXT"])
if isinstance(content, str):
return content
return str(content)
def agent_loop(body: dict, max_iterations=5):
# Trabalhe sempre com OpenAI messages internamente,
# mas call_oci_chat converte pra GENERIC.
messages = []
messages.append({"role": "system", "content": SYSTEM_AGENT_PROMPT})
messages.extend(body.get("messages", []))
for _ in range(max_iterations):
response = call_oci_chat({**body, "messages": messages})
oci_choice = response["choices"][0]
oci_message = oci_choice["message"]
text = _extract_generic_text(oci_message)
try:
agent_output = json.loads(text)
except:
# modelo não retornou JSON (quebrou regra)
return response
if agent_output.get("action") == "call_tool":
tool_name = agent_output.get("tool")
args = agent_output.get("arguments", {})
if tool_name not in TOOLS:
# devolve pro modelo como erro
messages.append({"role": "assistant", "content": text})
messages.append({"role": "user", "content": json.dumps({
"tool_error": f"Tool '{tool_name}' not implemented"
})})
continue
tool_result = TOOLS[tool_name](**args)
# Mantém o histórico: (1) decisão do agente, (2) resultado do tool
messages.append({"role": "assistant", "content": text})
messages.append({"role": "user", "content": json.dumps({
"tool_result": {
"tool": tool_name,
"arguments": args,
"result": tool_result
}
}, ensure_ascii=False)})
continue
if agent_output.get("action") == "final_answer":
return response
return response
# ============================================================
# STREAMING ADAPTER
# ============================================================
def stream_openai_format(chat_response: dict, model: str):
def fake_stream(text: str, model: str):
completion_id = f"chatcmpl-{uuid.uuid4().hex}"
created = int(time.time())
content = chat_response["choices"][0]["message"]["content"]
yield f"data: {json.dumps({
'id': completion_id,
'object': 'chat.completion.chunk',
@@ -160,8 +341,8 @@ def fake_stream(text: str, model: str):
}]
})}\n\n"
for i in range(0, len(text), 40):
chunk = text[i:i+40]
for i in range(0, len(content), 60):
chunk = content[i:i+60]
yield f"data: {json.dumps({
'id': completion_id,
'object': 'chat.completion.chunk',
@@ -176,75 +357,14 @@ def fake_stream(text: str, model: str):
yield "data: [DONE]\n\n"
# ============================================================
# OCI CHAT CALL (REST 20231130)
# ============================================================
def call_oci_chat(request: dict) -> str:
signer = get_signer()
model = request.get("model")
oci_model = MODEL_MAP.get(model, model)
url = f"{OCI_GENAI_ENDPOINT}/20231130/actions/chat"
oci_messages = []
for m in request.get("messages", []):
oci_messages.append({
"role": m["role"].upper(),
"content": [
{
"type": "TEXT",
"text": m["content"]
}
]
})
payload = {
"compartmentId": OCI_COMPARTMENT_ID,
"servingMode": {
"servingType": "ON_DEMAND",
"modelId": oci_model
},
"chatRequest": {
"apiFormat": "GENERIC",
"messages": oci_messages,
"maxTokens": request.get("max_tokens", 512),
"temperature": request.get("temperature", 0.7),
"topP": request.get("top_p", 0.9)
}
}
print("\n================ OCI PAYLOAD ================")
print(json.dumps(payload, indent=2, ensure_ascii=False))
print("=============================================\n")
response = requests.post(
url,
json=payload,
auth=signer,
headers={"Content-Type": "application/json"},
)
if response.status_code != 200:
print("\n================ OCI ERROR =================")
print(response.text)
print("===========================================\n")
raise HTTPException(status_code=500, detail=response.text)
data = response.json()
# Caminho correto da resposta GENERIC
choices = data["chatResponse"]["choices"]
message = choices[0]["message"]
content = message["content"]
return content[0]["text"]
# ============================================================
# ENDPOINTS
# ============================================================
@app.get("/health")
def health():
return {"status": "ok"}
@app.get("/v1/models")
def list_models():
return {
@@ -255,72 +375,206 @@ def list_models():
],
}
# ------------------------------------------------------------
# CHAT COMPLETIONS
# ------------------------------------------------------------
@app.post("/v1/chat/completions")
async def chat_completions(request: Request):
body = await request.json()
# chat_response = call_oci_chat(body)
# chat_response = agent_loop(body)
print("\n=== OPENCLAW BODY ===")
print(json.dumps(body, indent=2))
print("=====================\n")
if OPENCLAW_TOOLS_ACTIVE:
chat_response = call_oci_chat(body)
body["messages"] = normalize_messages(body["messages"])
text = call_oci_chat(body)
oci_choice = chat_response["choices"][0]
oci_message = oci_choice["message"]
content_text = _extract_generic_text(oci_message)
# 🔥 DETECT EXEC
exec_match = re.search(r"\(exec\s+(.*?)\)", content_text)
if exec_match:
command = exec_match.group(1)
result = execute_exec_command(command)
# Injeta resultado e chama novamente
new_messages = body["messages"] + [
{"role": "assistant", "content": content_text},
{"role": "user", "content": f"Tool result:\n{result}"}
]
chat_response = call_oci_chat({
**body,
"messages": new_messages
})
else:
# 🔥 Modo enterprise → seu agent_loop controla tools
chat_response = agent_loop(body)
print("FINAL RESPONSE:", json.dumps(chat_response, indent=2))
oci_choice = chat_response["choices"][0]
oci_message = oci_choice["message"]
# 🔥 SE É TOOL CALL → RETORNA DIRETO
if oci_message.get("tool_calls"):
return chat_response
content_text = ""
content = oci_message.get("content")
if isinstance(content, list):
for item in content:
if isinstance(item, dict) and item.get("type") == "TEXT":
content_text += item.get("text", "")
elif isinstance(content, str):
content_text = content
else:
content_text = str(content)
finish_reason = oci_choice.get("finishReason", "stop")
# 🔥 SE STREAMING
if body.get("stream"):
async def event_stream():
completion_id = f"chatcmpl-{uuid.uuid4().hex}"
created = int(time.time())
# role chunk
yield f"data: {json.dumps({
'id': completion_id,
'object': 'chat.completion.chunk',
'created': created,
'model': body['model'],
'choices': [{
'index': 0,
'delta': {'role': 'assistant'},
'finish_reason': None
}]
})}\n\n"
# content chunks
for i in range(0, len(content_text), 50):
chunk = content_text[i:i+50]
yield f"data: {json.dumps({
'id': completion_id,
'object': 'chat.completion.chunk',
'created': created,
'model': body['model'],
'choices': [{
'index': 0,
'delta': {'content': chunk},
'finish_reason': None
}]
})}\n\n"
# final chunk
yield f"data: {json.dumps({
'id': completion_id,
'object': 'chat.completion.chunk',
'created': created,
'model': body['model'],
'choices': [{
'index': 0,
'delta': {},
'finish_reason': finish_reason
}]
})}\n\n"
yield "data: [DONE]\n\n"
return StreamingResponse(
fake_stream(text, body["model"]),
media_type="text/event-stream",
headers={
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"X-Accel-Buffering": "no"
}
event_stream(),
media_type="text/event-stream"
)
return build_openai_response(body["model"], text)
# 🔥 SE NÃO FOR STREAM
return {
"id": f"chatcmpl-{uuid.uuid4().hex}",
"object": "chat.completion",
"created": int(time.time()),
"model": body["model"],
"choices": [{
"index": 0,
"message": {
"role": "assistant",
"content": content_text
},
"finish_reason": finish_reason
}],
"usage": {
"prompt_tokens": 0,
"completion_tokens": 0,
"total_tokens": 0
}
}
# ------------------------------------------------------------
# RESPONSES (OpenAI 2024 format)
# ------------------------------------------------------------
# ============================================================
# HEALTHCHECK
# ============================================================
@app.post("/v1/responses")
async def responses(request: Request):
@app.get("/health")
def health():
return {"status": "ok"}
body = await request.json()
# chat_response = call_oci_chat(body)
chat_response = agent_loop(body)
oci_choice = chat_response["choices"][0]
oci_message = oci_choice["message"]
content_text = ""
content = oci_message.get("content")
if isinstance(content, list):
for item in content:
if item.get("type") == "TEXT":
content_text += item.get("text", "")
elif isinstance(content, str):
content_text = content
return {
"id": f"resp_{uuid.uuid4().hex}",
"object": "response",
"created": int(time.time()),
"model": body.get("model"),
"output": [
{
"type": "message",
"role": "assistant",
"content": [
{
"type": "output_text",
"text": content_text
}
]
}
],
"usage": {
"input_tokens": 0,
"output_tokens": 0,
"total_tokens": 0
}
}
@app.middleware("http")
async def log_requests(request: Request, call_next):
body = await request.body()
print("\n>>> ENDPOINT:", request.method, request.url.path)
body = await request.body()
try:
body_json = json.loads(body.decode())
print(">>> BODY:", json.dumps(body_json, indent=2))
except:
body_json = body.decode()
print(">>> BODY RAW:", body.decode())
print("\n>>> HIT:", request.method, request.url.path)
print(">>> BODY:", json.dumps(body_json, indent=2, ensure_ascii=False))
# NÃO mexe no request._receive
response = await call_next(request)
return response
@app.post("/v1/responses")
async def responses_passthrough(request: Request):
body = await request.json()
body["messages"] = normalize_messages(body.get("messages", []))
text = call_oci_chat(body)
if body.get("stream"):
return StreamingResponse(
fake_stream(text, body["model"]),
media_type="text/event-stream",
headers={
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"X-Accel-Buffering": "no"
}
)
return build_openai_response(body["model"], text)
print(">>> STATUS:", response.status_code)
return response

View File

@@ -1,11 +1,11 @@
{
"meta": {
"lastTouchedVersion": "2026.2.6-3",
"lastTouchedAt": "2026-02-12T10:50:18.766Z"
"lastTouchedVersion": "2026.2.1",
"lastTouchedAt": "2026-02-14T03:24:55.922Z"
},
"wizard": {
"lastRunAt": "2026-02-12T10:50:18.763Z",
"lastRunVersion": "2026.2.6-3",
"lastRunAt": "2026-02-14T03:24:55.917Z",
"lastRunVersion": "2026.2.1",
"lastRunCommand": "onboard",
"lastRunMode": "local"
},
@@ -17,8 +17,8 @@
"api": "openai-completions",
"models": [
{
"id": "gpt-4o-mini",
"name": "gpt-4o-mini",
"id": "gpt-5",
"name": "gpt-5" ,
"reasoning": false,
"input": ["text"],
"cost": { "input": 0, "output": 0, "cacheRead": 0, "cacheWrite": 0 },
@@ -32,7 +32,18 @@
"agents": {
"defaults": {
"model": {
"primary": "openai-compatible/gpt-4o-mini"
"primary": "openai-compatible/gpt-5"
},
"models": {
"openai-compatible/gpt-5": {}
},
"workspace": "/home/hoshikawa2/.openclaw/workspace",
"compaction": {
"mode": "safeguard"
},
"maxConcurrent": 4,
"subagents": {
"maxConcurrent": 8
}
}
},
@@ -40,8 +51,20 @@
"ackReactionScope": "group-mentions"
},
"commands": {
"native": false,
"nativeSkills": false
"native": "auto",
"nativeSkills": "auto"
},
"channels": {
"whatsapp": {
"dmPolicy": "allowlist",
"selfChatMode": true,
"allowFrom": [
"+5511999961711"
],
"groupPolicy": "allowlist",
"mediaMaxMb": 50,
"debounceMs": 0
}
},
"gateway": {
"port": 18789,
@@ -49,7 +72,7 @@
"bind": "loopback",
"auth": {
"mode": "token",
"token": "<Openclaw TOKEN - maintain your current config json>"
"token": "5459cc59afcb0a4de09e0ce23ef6409090059a7d35df1740"
},
"tailscale": {
"mode": "off",