Get Tool UI working in your app in minutes. This guide uses assistant-ui (recommended) for the best experience.
Set up the assistant-ui runtime provider that manages the connection between your frontend and backend. This handles message streaming, tool execution, and state management.
What this demonstrates:
useChatRuntime creates a runtime that connects to your backend APIAssistantChatTransport handles the communication protocol with your /api/chat endpointAssistantRuntimeProvider makes the runtime available to all child components"use client";import { AssistantRuntimeProvider } from "@assistant-ui/react";import { useChatRuntime, AssistantChatTransport } from "@assistant-ui/react-ai-sdk";function App() { const runtime = useChatRuntime({ transport: new AssistantChatTransport({ api: "/api/chat" }), }); return ( <AssistantRuntimeProvider runtime={runtime}> {/* Your chat UI */} </AssistantRuntimeProvider> );}Create a tool on your server that the LLM can call. The tool returns structured JSON that matches a Tool UI component schema. Here we're creating a link preview tool that returns data for the MediaCard component.
What this demonstrates:
serializableMediaCardSchema ensures your tool output matches the MediaCard component's expected formatoutputSchema validates that returned data is type-safe and serializableimport { streamText, tool } from "ai";import { openai } from "@ai-sdk/openai";import { z } from "zod";// Import the schema from the Tool UI packageimport { serializableMediaCardSchema } from "@tool-ui/media-card";export async function POST(req: Request) { const { messages } = await req.json(); const result = streamText({ model: openai("gpt-4o"), messages, tools: { previewLink: tool({ description: "Show a preview card for a URL", inputSchema: z.object({ url: z.string().url() }), outputSchema: serializableMediaCardSchema, async execute({ url }) { // In production, you'd fetch real metadata here return { id: "link-1", kind: "link", href: url, title: "Example Site", description: "A description of the linked content", thumb: "https://example.com/image.jpg", }; }, }), }, }); return result.toUIMessageStreamResponse();}Connect your backend tool to a frontend component. When the LLM calls the previewLink tool, assistant-ui will automatically render the MediaCard component with the returned data.
What this demonstrates:
makeAssistantToolUI creates a component that listens for tool calls by nametoolName must exactly match your backend tool nameresult contains the validated data from your backend tool's execute function<PreviewLinkUI /> in your app registers the UI for that toolimport { makeAssistantToolUI } from "@assistant-ui/react";import { MediaCard, type SerializableMediaCard } from "@tool-ui/media-card";export const PreviewLinkUI = makeAssistantToolUI< { url: string }, SerializableMediaCard>({ toolName: "previewLink", // Must match backend tool name render: ({ result }) => ( <MediaCard {...result} maxWidth="420px" /> ),});// Register in your appfunction App() { return ( <AssistantRuntimeProvider runtime={runtime}> <PreviewLinkUI /> <Thread /> </AssistantRuntimeProvider> );}That's it! When a user asks the assistant to preview a link, it will call your tool and render a beautiful MediaCard component.
The quickest way to get started is using the assistant-ui CLI, which will set up your project with all necessary dependencies:
npx assistant-ui@latest initOr install packages manually if you prefer to configure yourself:
pnpm add @assistant-ui/react @assistant-ui/react-ai-sdk ai @ai-sdk/openai zodAdd your OpenAI API key to your environment variables:
OPENAI_API_KEY=sk-...Tool UI components work with any React app. Without assistant-ui, you'll need to manually parse tool outputs and render components. For the best experience, we recommend using assistant-ui.
assistant-ui supports multiple runtimes: AI SDK, LangGraph, LangServe, Mastra, or custom backends. The examples above use AI SDK v5.
"use client";
import { AssistantRuntimeProvider } from "@assistant-ui/react";
import { useChatRuntime, AssistantChatTransport } from "@assistant-ui/react-ai-sdk";
function App() {
const runtime = useChatRuntime({
transport: new AssistantChatTransport({ api: "/api/chat" }),
});
return (
<AssistantRuntimeProvider runtime={runtime}>
{/* Your chat UI */}
</AssistantRuntimeProvider>
);
}import { streamText, tool } from "ai";
import { openai } from "@ai-sdk/openai";
import { z } from "zod";
// Import the schema from the Tool UI package
import { serializableMediaCardSchema } from "@tool-ui/media-card";
export async function POST(req: Request) {
const { messages } = await req.json();
const result = streamText({
model: openai("gpt-4o"),
messages,
tools: {
previewLink: tool({
description: "Show a preview card for a URL",
inputSchema: z.object({ url: z.string().url() }),
outputSchema: serializableMediaCardSchema,
async execute({ url }) {
// In production, you'd fetch real metadata here
return {
id: "link-1",
kind: "link",
href: url,
title: "Example Site",
description: "A description of the linked content",
thumb: "https://example.com/image.jpg",
};
},
}),
},
});
return result.toUIMessageStreamResponse();
}import { makeAssistantToolUI } from "@assistant-ui/react";
import { MediaCard, type SerializableMediaCard } from "@tool-ui/media-card";
export const PreviewLinkUI = makeAssistantToolUI<
{ url: string },
SerializableMediaCard
>({
toolName: "previewLink", // Must match backend tool name
render: ({ result }) => (
<MediaCard {...result} maxWidth="420px" />
),
});
// Register in your app
function App() {
return (
<AssistantRuntimeProvider runtime={runtime}>
<PreviewLinkUI />
<Thread />
</AssistantRuntimeProvider>
);
}npx assistant-ui@latest initpnpm add @assistant-ui/react @assistant-ui/react-ai-sdk ai @ai-sdk/openai zodOPENAI_API_KEY=sk-...