Writing Your First Plugin: Channel, Tool and Provider Type Walkthroughs
Chapter 24ใWriting Your First Plugin: Channel / Tool / Provider in Practice
24.1ใPlugin Directory Structure and Manifest Format
Before writing Plugin code, establish the correct directory structure. Whether building a Channel, Tool, or Provider Plugin, the directory structure follows the same convention.
Standard Plugin Directory Structure
my-plugin/
โโโ plugin.manifest.yaml # Control plane config (required)
โโโ package.json # npm package descriptor
โโโ tsconfig.json # TypeScript configuration
โโโ src/
โ โโโ index.ts # Plugin entry (development)
โโโ dist/
โ โโโ index.js # Compiled output (production)
โโโ README.md # Plugin documentation
package.json Base Configuration
{
"name": "@myorg/openclaw-plugin-example",
"version": "1.0.0",
"description": "Example OpenClaw Plugin",
"main": "dist/index.js",
"types": "dist/index.d.ts",
"scripts": {
"build": "tsc",
"dev": "tsc --watch",
"prepublish": "npm run build"
},
"peerDependencies": {
"@openclaw/plugin-api": "^2.0.0"
},
"devDependencies": {
"@openclaw/plugin-api": "^2.0.0",
"typescript": "^5.0.0"
}
}
Recommended tsconfig.json
{
"compilerOptions": {
"target": "ES2022",
"module": "ESNext",
"moduleResolution": "bundler",
"strict": true,
"declaration": true,
"declarationMap": true,
"sourceMap": true,
"outDir": "dist",
"rootDir": "src"
},
"include": ["src/**/*"],
"exclude": ["node_modules", "dist"]
}
Complete Plugin Manifest Format
# plugin.manifest.yaml โ full field documentation
id: my-example-plugin # Required: globally unique ID (lowercase, numbers, hyphens)
version: "1.0.0" # Required: SemVer version
displayName: Example Plugin # Required: user-visible name
description: > # Required: functional description
An example plugin demonstrating all manifest fields.
author: MyOrg # Recommended: publisher name or org
homepage: https://github.com/myorg/my-example-plugin
# Entry points
entryPoint: dist/index.js # Required: production entry (pre-built JS)
devEntryPoint: src/index.ts # Recommended: dev entry (TypeScript source)
# API compatibility
apiVersion: "2.0" # Plugin API version requirement
# Capability declarations (affects Registry behavior)
capabilities:
- channel # Registers a message channel
- tool # Registers a tool
- provider # Registers an LLM Provider
- command # Registers a CLI command
- httpRoute # Registers an HTTP endpoint
# Permission declarations (Safety Gates will check these)
permissions:
network:
- "*.slack.com"
- "slack.com"
fileSystem:
- read: "~/.config/my-plugin/**"
# Configuration schema (JSON Schema format)
configuration:
schema:
type: object
properties:
apiKey:
type: string
secret: true # secret: true triggers encrypted storage
description: API key for authentication
enabled:
type: boolean
default: true
required: [apiKey]
24.2ใChannel Plugin in Practice: Adding a New Messaging Platform
Requirements
Build a Discord Channel Plugin that enables the OpenClaw Agent to receive and respond to Discord messages.
Complete Implementation
// src/index.ts
import type { PluginApi, ChannelMessage, ChannelContext } from '@openclaw/plugin-api'
import { Client, GatewayIntentBits, Message } from 'discord.js'
export async function setup(api: PluginApi) {
const config = api.getConfig<{
botToken: string
guildId: string
channelAllowlist: string[]
commandPrefix: string
}>()
// Initialize Discord client
const discordClient = new Client({
intents: [
GatewayIntentBits.Guilds,
GatewayIntentBits.GuildMessages,
GatewayIntentBits.MessageContent,
],
})
// Register the Discord message channel
api.registerChannel({
id: 'discord',
displayName: 'Discord',
// Channel initialization: connect to Discord and start listening
onInit: async (channelHandler) => {
discordClient.on('messageCreate', async (message: Message) => {
// Filter: ignore bot messages
if (message.author.bot) return
// Filter: only process channels in the allowlist
if (config.channelAllowlist.length > 0 &&
!config.channelAllowlist.includes(message.channelId)) {
return
}
// Filter: check command prefix (e.g., "!ai ")
if (config.commandPrefix && !message.content.startsWith(config.commandPrefix)) {
return
}
const content = config.commandPrefix
? message.content.slice(config.commandPrefix.length).trim()
: message.content
// Build a standard ChannelMessage and submit it to the Agent for processing
const channelMessage: ChannelMessage = {
id: message.id,
content,
author: {
id: message.author.id,
displayName: message.author.displayName,
},
threadId: message.channelId,
timestamp: message.createdAt,
attachments: message.attachments.map(att => ({
type: att.contentType?.startsWith('image/') ? 'image' : 'file',
url: att.url,
name: att.name ?? 'attachment',
})),
}
// Trigger Agent inference
await channelHandler.onMessage(channelMessage)
})
await discordClient.login(config.botToken)
console.log(`[discord-plugin] Logged in as ${discordClient.user?.tag}`)
},
// Send response back to Discord
sendMessage: async (response, originalMessage) => {
const channel = await discordClient.channels.fetch(originalMessage.threadId)
if (!channel?.isTextBased()) {
throw new Error(`Channel ${originalMessage.threadId} is not text-based`)
}
// Handle long messages (Discord limit: 2000 characters)
const chunks = splitIntoChunks(response.content, 1900)
for (const chunk of chunks) {
await channel.send(chunk)
}
},
// Support typing indicator (user sees "is typing...")
sendTypingIndicator: async (threadId) => {
const channel = await discordClient.channels.fetch(threadId)
if (channel?.isTextBased()) {
await channel.sendTyping()
}
},
// Clean up when channel is destroyed
onDestroy: async () => {
discordClient.destroy()
console.log('[discord-plugin] Discord client disconnected')
},
})
}
// Helper function: split long text into multiple chunks
function splitIntoChunks(text: string, maxLength: number): string[] {
const chunks: string[] = []
let current = ''
for (const line of text.split('\n')) {
if (current.length + line.length + 1 > maxLength) {
if (current) chunks.push(current.trim())
current = line
} else {
current += (current ? '\n' : '') + line
}
}
if (current) chunks.push(current.trim())
return chunks
}
Corresponding Manifest
# plugin.manifest.yaml (Discord Channel Plugin)
id: discord-channel-plugin
version: "1.0.0"
displayName: Discord Channel
description: Enables OpenClaw Agent to receive and respond to Discord messages.
author: MyOrg
entryPoint: dist/index.js
devEntryPoint: src/index.ts
apiVersion: "2.0"
capabilities:
- channel
permissions:
network:
- "*.discord.com"
- "discord.com"
- "gateway.discord.gg"
configuration:
schema:
type: object
properties:
botToken:
type: string
secret: true
description: Discord bot token from Discord Developer Portal
guildId:
type: string
description: Discord server (guild) ID to operate in
channelAllowlist:
type: array
items:
type: string
default: []
description: Only respond in these channel IDs (empty = all channels)
commandPrefix:
type: string
default: "!ai "
description: Message prefix that triggers the Agent (empty = all messages)
required: [botToken, guildId]
Key Interface Summary
| Interface | Required | Description |
|---|---|---|
onInit(channelHandler) |
Yes | Connect to platform and start listening |
sendMessage(response, original) |
Yes | Send Agent response back to platform |
sendTypingIndicator(threadId) |
No | Send "is typing" indicator |
onDestroy() |
Recommended | Clean up connections to prevent resource leaks |
24.3ใTool Plugin in Practice: Exposing a New Tool
Requirements
Build a "Database Query Tool" that allows the Agent to execute read-only queries against a PostgreSQL database.
Complete Implementation
// src/index.ts
import type { PluginApi } from '@openclaw/plugin-api'
import { Pool } from 'pg'
import { z } from 'zod'
export async function setup(api: PluginApi) {
const config = api.getConfig<{
connectionString: string
maxConnections: number
queryTimeoutMs: number
allowedSchemas: string[]
}>()
// Initialize connection pool
const pool = new Pool({
connectionString: config.connectionString,
max: config.maxConnections ?? 5,
idleTimeoutMillis: 30000,
})
// Test connection
const client = await pool.connect()
client.release()
console.log('[db-query-plugin] Connected to PostgreSQL')
// Register the database query tool
api.registerTool({
id: 'database.query',
displayName: 'Database Query',
description: `Execute a read-only SQL query against the database.
Only SELECT statements are allowed.
Available schemas: ${config.allowedSchemas.join(', ')}.`,
// Tool input schema (JSON Schema format)
inputSchema: {
type: 'object',
properties: {
query: {
type: 'string',
description: 'The SQL SELECT query to execute',
},
limit: {
type: 'integer',
description: 'Maximum number of rows to return (default: 100, max: 1000)',
default: 100,
minimum: 1,
maximum: 1000,
},
parameters: {
type: 'array',
items: {},
description: 'Query parameters for parameterized queries ($1, $2, ...)',
default: [],
},
},
required: ['query'],
},
// Tool execution handler
handler: async (input, ctx) => {
// Runtime Zod validation (provides better error messages)
const params = z.object({
query: z.string().min(1),
limit: z.number().int().min(1).max(1000).default(100),
parameters: z.array(z.unknown()).default([]),
}).parse(input)
// Safety check 1: Only allow SELECT statements
const normalizedQuery = params.query.trim().toLowerCase()
if (!normalizedQuery.startsWith('select')) {
throw new ToolError('Only SELECT queries are allowed for safety reasons.')
}
// Safety check 2: Disallow write operations in subqueries
const dangerousKeywords = ['insert', 'update', 'delete', 'drop', 'truncate', 'create', 'alter']
for (const keyword of dangerousKeywords) {
if (normalizedQuery.includes(keyword)) {
throw new ToolError(`Query contains forbidden keyword: ${keyword}`)
}
}
// Add LIMIT protection
const finalQuery = params.query.includes('LIMIT')
? params.query
: `${params.query} LIMIT ${params.limit}`
// Execute query (with timeout)
const client = await pool.connect()
try {
await client.query(`SET statement_timeout = ${config.queryTimeoutMs ?? 5000}`)
const result = await client.query(finalQuery, params.parameters)
return {
rowCount: result.rowCount,
columns: result.fields.map(f => ({
name: f.name,
type: getPostgresTypeName(f.dataTypeID),
})),
rows: result.rows,
// Provide a human-readable summary
summary: `Query returned ${result.rowCount} row(s) with columns: ${
result.fields.map(f => f.name).join(', ')
}`,
}
} finally {
client.release()
}
},
})
// Register a second tool: list database tables
api.registerTool({
id: 'database.listTables',
displayName: 'List Database Tables',
description: 'List all available tables in the allowed schemas.',
inputSchema: {
type: 'object',
properties: {
schema: {
type: 'string',
description: 'Schema to list tables from (default: public)',
default: 'public',
},
},
},
handler: async (input) => {
const schema = (input.schema as string) ?? 'public'
if (!config.allowedSchemas.includes(schema)) {
throw new ToolError(`Schema '${schema}' is not in the allowed list.`)
}
const client = await pool.connect()
try {
const result = await client.query(
`SELECT table_name, table_type
FROM information_schema.tables
WHERE table_schema = $1
ORDER BY table_name`,
[schema]
)
return {
schema,
tables: result.rows,
count: result.rowCount,
}
} finally {
client.release()
}
},
})
// Cleanup: close connection pool when process exits
process.on('exit', () => pool.end())
}
function getPostgresTypeName(oid: number): string {
const types: Record<number, string> = {
16: 'boolean', 20: 'bigint', 21: 'smallint', 23: 'integer',
25: 'text', 700: 'float4', 701: 'float8', 1082: 'date',
1114: 'timestamp', 1184: 'timestamptz', 1700: 'numeric',
}
return types[oid] ?? `type_${oid}`
}
class ToolError extends Error {
constructor(message: string) {
super(message)
this.name = 'ToolError'
}
}
Key Principles for Tool inputSchema Design
inputSchema design principles:
โ Every field has a clear description (the Agent uses this to decide how to call the tool)
โ Required fields declared in the required array
โ Sensible default values (reduces ambiguity when Agent calls the tool)
โ Appropriate constraints (minimum/maximum/enum etc.)
โ Don't make the Agent guess field formatsโdescription must be precise
24.4ใProvider Plugin in Practice: Integrating a New LLM
Requirements
Integrate a hypothetical "Nebula AI" LLM service. The service offers an OpenAI-compatible REST API, but uses its own authentication scheme (HMAC signing) and has special model capability configuration.
Complete Implementation
// src/index.ts
import type { PluginApi, LLMRequest, StreamChunk } from '@openclaw/plugin-api'
import crypto from 'crypto'
export async function setup(api: PluginApi) {
const config = api.getConfig<{
baseUrl: string
apiKey: string
secretKey: string // Used for HMAC signing
organizationId?: string
}>()
api.registerProvider({
id: 'nebula-ai',
label: 'Nebula AI',
// ==========================================
// Phase 1: Config Materialization
// ==========================================
catalog: async () => ({
models: [
{
id: 'nebula-ai/nebula-fast',
displayName: 'Nebula Fast',
description: 'Optimized for speed, ideal for interactive use',
contextWindow: 64000,
capabilities: {
streaming: true,
functionCalling: true,
vision: false,
},
pricing: {
inputPerMToken: 0.30,
outputPerMToken: 0.80,
}
},
{
id: 'nebula-ai/nebula-pro',
displayName: 'Nebula Pro',
description: 'Maximum capability for complex reasoning tasks',
contextWindow: 256000,
capabilities: {
streaming: true,
functionCalling: true,
vision: true,
},
pricing: {
inputPerMToken: 2.00,
outputPerMToken: 6.00,
}
},
]
}),
applyConfigDefaults: (config) => ({
...config,
defaultModel: config.defaultModel ?? 'nebula-ai/nebula-fast',
maxRetries: config.maxRetries ?? 3,
}),
// ==========================================
// Phase 2: Model Resolution
// ==========================================
normalizeModelId: (modelId) => {
// Handle short-form aliases
const aliases: Record<string, string> = {
'nebula-fast': 'nebula-ai/nebula-fast',
'fast': 'nebula-ai/nebula-fast',
'nebula-pro': 'nebula-ai/nebula-pro',
'pro': 'nebula-ai/nebula-pro',
}
return aliases[modelId] ?? null
},
normalizeTransport: (modelId) => ({
type: 'openai-compat',
baseUrl: config.baseUrl,
// nebula-pro uses a longer timeout
timeoutMs: modelId === 'nebula-ai/nebula-pro' ? 120000 : 30000,
}),
normalizeConfig: (rawConfig, modelId) => ({
...rawConfig,
// Nebula API uses max_completion_tokens instead of max_tokens
max_completion_tokens: rawConfig.maxOutputTokens ?? 4096,
// nebula-pro supports extended_thinking
...(modelId === 'nebula-ai/nebula-pro' && {
extended_thinking: rawConfig.thinkingBudget ? {
type: 'enabled',
budget_tokens: rawConfig.thinkingBudget,
} : undefined,
}),
}),
// ==========================================
// Phase 3: Auth Resolution
// ==========================================
resolveConfigApiKey: () => config.apiKey,
// ==========================================
// Phase 4: Model Preparation
// ==========================================
capabilities: (modelId) => ({
streaming: true,
functionCalling: true,
vision: modelId === 'nebula-ai/nebula-pro',
contextWindow: modelId === 'nebula-ai/nebula-pro' ? 256000 : 64000,
outputFormats: ['text', 'json'],
// nebula-fast doesn't support parallel tool calls
parallelToolCalls: modelId !== 'nebula-ai/nebula-fast',
}),
contributeResolvedModelCompat: (modelId) => {
if (modelId === 'nebula-ai/nebula-fast') {
return {
// nebula-fast tool schema doesn't support additionalProperties
toolSchemaStrict: false,
}
}
return null
},
// ==========================================
// Phase 6: Streaming (Core Implementation)
// ==========================================
createStreamFn: (cfg, auth) => {
return async function* nebulaStream(request: LLMRequest): AsyncIterable<StreamChunk> {
// Build request body
const body = {
model: request.modelId.replace('nebula-ai/', ''), // Strip prefix
messages: request.messages,
stream: true,
max_completion_tokens: request.maxOutputTokens ?? 4096,
temperature: request.temperature ?? 0.7,
tools: request.tools?.map(t => ({
type: 'function',
function: {
name: t.name,
description: t.description,
parameters: t.inputSchema,
},
})),
}
const bodyStr = JSON.stringify(body)
const timestamp = Date.now().toString()
// Nebula API uses HMAC-SHA256 signature authentication
const signature = computeHmacSignature(
bodyStr,
timestamp,
config.secretKey
)
const response = await fetch(`${config.baseUrl}/v1/chat/completions`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Authorization': `ApiKey ${auth.apiKey}`,
'X-Nebula-Timestamp': timestamp,
'X-Nebula-Signature': signature,
...(config.organizationId && {
'X-Nebula-Org': config.organizationId,
}),
},
body: bodyStr,
})
if (!response.ok) {
const errorBody = await response.json().catch(() => ({}))
throw new ProviderError(
`Nebula API error ${response.status}: ${errorBody.error?.message ?? 'Unknown error'}`,
response.status,
errorBody
)
}
// Parse SSE stream
const reader = response.body!.getReader()
const decoder = new TextDecoder()
let buffer = ''
while (true) {
const { done, value } = await reader.read()
if (done) break
buffer += decoder.decode(value, { stream: true })
const lines = buffer.split('\n')
buffer = lines.pop() ?? '' // Keep potentially incomplete last line
for (const line of lines) {
if (!line.startsWith('data: ')) continue
const data = line.slice(6).trim()
if (data === '[DONE]') return
let parsed: any
try {
parsed = JSON.parse(data)
} catch {
continue
}
const choice = parsed.choices?.[0]
if (!choice) continue
const delta = choice.delta
// Text content
if (delta?.content) {
yield { type: 'text', content: delta.content }
}
// Tool calls
if (delta?.tool_calls) {
for (const toolCall of delta.tool_calls) {
yield {
type: 'tool_call',
toolCallId: toolCall.id,
toolName: toolCall.function?.name,
argumentsDelta: toolCall.function?.arguments,
}
}
}
// Usage statistics (at stream end)
if (choice.finish_reason && parsed.usage) {
yield {
type: 'usage',
inputTokens: parsed.usage.prompt_tokens,
outputTokens: parsed.usage.completion_tokens,
}
}
}
}
}
},
// ==========================================
// Phase 7: Runtime
// ==========================================
buildReplayPolicy: (error, request) => {
const status = (error as any).status
if (status === 429) {
// Nebula provides Retry-After in response headers
const retryAfter = parseInt((error as any).headers?.['retry-after'] ?? '5')
return {
shouldRetry: true,
delayMs: retryAfter * 1000,
maxAttempts: 3,
}
}
if (status >= 500 && status < 600) {
return { shouldRetry: true, delayMs: 1000, maxAttempts: 2 }
}
return { shouldRetry: false }
},
})
}
// HMAC-SHA256 signature computation
function computeHmacSignature(body: string, timestamp: string, secret: string): string {
const message = `${timestamp}.${body}`
return crypto.createHmac('sha256', secret).update(message).digest('hex')
}
class ProviderError extends Error {
status: number
body: unknown
constructor(message: string, status: number, body: unknown) {
super(message)
this.name = 'ProviderError'
this.status = status
this.body = body
}
}
24.5ใjiti Development Mode vs. Build-and-Release Mode
Development Phase Workflow
# 1. Install dependencies
npm install
# 2. Register Plugin in openclaw.config.yaml (development path)
cat >> ~/.openclaw/config.yaml << EOF
plugins:
- path: /path/to/my-plugin
dev: true # Enable devEntryPoint + jiti
EOF
# 3. Start OpenClaw in development mode
openclaw --dev
# 4. Hot-reload after modifying src/index.ts
openclaw plugin reload my-plugin-id
# 5. Check Plugin status
openclaw plugin status my-plugin-id
Production Build Process
# 1. Run TypeScript compilation
npm run build
# Generates:
# dist/index.js โ Compiled JS
# dist/index.d.ts โ Type declaration file
# dist/index.js.map โ Source Map (for debugging)
# 2. Test the production build locally
openclaw plugin install /path/to/my-plugin
# 3. Verify installation
openclaw plugin list
openclaw plugin verify my-plugin-id
Development vs. Production Comparison
| Feature | Development mode (jiti) | Production mode (native loader) |
|---|---|---|
| Entry file | src/index.ts |
dist/index.js |
| Requires build step | No | Yes |
| Hot reload | Supported | Requires restart |
| Startup speed | Slower (jiti compilation) | Fast |
| Type checking | Runtime | Compile time |
| Best for | Plugin development | Production deployment |
24.6ใPublishing a Plugin to npm
# 1. Confirm package.json is correctly configured
# - name: @yourscope/openclaw-plugin-xxx
# - main: dist/index.js
# - files: ["dist", "plugin.manifest.yaml", "README.md"]
# 2. Build
npm run build
# 3. Local verification
openclaw plugin install .
openclaw plugin verify @yourscope/openclaw-plugin-xxx
# 4. Login to npm
npm login
# 5. Publish
npm publish --access public
# 6. How users install
openclaw plugin install @yourscope/openclaw-plugin-xxx
package.json Publish Configuration
{
"name": "@myorg/openclaw-plugin-nebula",
"version": "1.0.0",
"files": [
"dist",
"plugin.manifest.yaml",
"README.md"
],
"keywords": [
"openclaw",
"openclaw-plugin",
"llm-provider",
"nebula-ai"
],
"openclaw": {
"pluginType": "provider",
"compatibleApiVersions": ["2.0"]
}
}
Including openclaw-plugin in keywords is a convention that lets users discover your Plugin via openclaw plugin search which searches npm.
24.7ใCommon Pitfalls
Pitfall 1: Directly Modifying Global State
// โ Wrong: modifying global variables inside a Plugin
import { globalRegistry } from '@openclaw/core' // This import shouldn't exist at all
globalRegistry.providers.set('my-provider', ...) // Bypasses the Plugin loading pipeline
// โ Correct: register through api
api.registerProvider({
id: 'my-provider',
// ...
})
Pitfall 2: Storing Config Reference Outside setup
// โ Wrong: storing config at module level (config doesn't exist before setup is called)
let globalConfig: any // Dangerous module-level variable
export function setup(api: PluginApi) {
globalConfig = api.getConfig() // Race condition if setup is called multiple times (e.g., hot reload)
}
// โ Correct: pass config to where it's needed via closure
export function setup(api: PluginApi) {
const config = api.getConfig() // Local variable, passed through closure
api.registerProvider({
createStreamFn: (cfg, auth) => {
// config is safely accessible through closure
return async function* (request) { /* ... */ }
}
})
}
Pitfall 3: Ignoring Passthrough RulesโnormalizeModelId Intercepts All Requests
// โ Wrong: non-Provider Plugin's normalizeModelId intercepts all models
api.registerProvider({
id: 'my-logging-plugin',
normalizeModelId: (modelId) => {
console.log('Model requested:', modelId)
return modelId // โ Wrong! Returning non-null blocks passthrough
}
})
// โ Correct: non-Provider Plugins should not implement normalizeModelId
// If you need logging, use wrapStreamFn
api.registerProvider({
id: 'my-logging-plugin',
wrapStreamFn: (originalStream) => {
return async function* (request) {
console.log('Request for model:', request.modelId)
yield* originalStream(request)
}
}
})
Pitfall 4: Leaking Promises in Handlers (Forgetting await)
// โ Wrong: unawaited async operation
handler: async (input, ctx) => {
pool.query(input.query) // Forgot await!
return { status: 'done' } // Returns immediately; query runs in background
}
// โ Correct:
handler: async (input, ctx) => {
const result = await pool.query(input.query) // Wait for completion
return { rows: result.rows }
}
24.8ใChapter Summary
Core differences between the three Plugin types:
| Type | Core API | Required Hooks | Primary Role |
|---|---|---|---|
| Channel | registerChannel |
onInit, sendMessage |
Add a new messaging platform entry |
| Tool | registerTool |
handler |
Provide new tools to the Agent |
| Provider | registerProvider |
catalog, createStreamFn |
Integrate a new LLM Provider |
jiti development mode dramatically reduces iteration costโsee changes immediately without a compilation step. Before publishing to production, always switch to native loader mode to verify the build artifact's correctness.
The next chapter takes a higher-level view, exploring the decision framework for choosing between Skills and Plugins: given a requirement, how do you determine which approach to use?