'use client';
import * as React from 'react';
import { Plate, usePlateEditor } from 'platejs/react';
import { EditorKit } from '@/components/editor/editor-kit';
import { Editor, EditorContainer } from '@/components/ui/editor';
import { DEMO_VALUES } from './values/demo-values';
export default function Demo({ id }: { id: string }) {
const editor = usePlateEditor({
plugins: EditorKit,
value: DEMO_VALUES[id],
});
return (
<Plate editor={editor}>
<EditorContainer variant="demo">
<Editor />
</EditorContainer>
</Plate>
);
}
Features
- Intelligent Command Menu: Combobox interface with predefined AI commands for generation and editing
- Multiple Trigger Modes:
- Cursor Mode: Trigger at block end with space
- Selection Mode: Trigger with selected text
- Block Selection Mode: Trigger with selected blocks
- Response Modes:
- Chat Mode: Preview responses with accept/reject options
- Insert Mode: Direct content insertion with markdown streaming
- Smart Content Processing: Optimized chunking for tables, code blocks, and complex structures
- Streaming Responses: Real-time AI content generation
- Markdown Integration: Full support for Markdown syntax in AI responses
- Customizable Prompts: Template system for user and system prompts
- Built-in Vercel AI SDK Support: Ready-to-use chat API integration
Kit Usage
Installation
The fastest way to add AI functionality is with the AIKit
, which includes pre-configured AIPlugin
and AIChatPlugin
along with cursor overlay and markdown support and their Plate UI components.
'use client';
import type { AIChatPluginConfig } from '@platejs/ai/react';
import type { UseChatOptions } from 'ai/react';
import { streamInsertChunk, withAIBatch } from '@platejs/ai';
import { AIChatPlugin, AIPlugin, useChatChunk } from '@platejs/ai/react';
import { KEYS, PathApi } from 'platejs';
import { usePluginOption } from 'platejs/react';
import { AILoadingBar, AIMenu } from '@/components/ui/ai-menu';
import { AIAnchorElement, AILeaf } from '@/components/ui/ai-node';
import { CursorOverlayKit } from './cursor-overlay-kit';
import { MarkdownKit } from './markdown-kit';
export const aiChatPlugin = AIChatPlugin.extend({
options: {
chatOptions: {
api: '/api/ai/command',
body: {},
} as UseChatOptions,
promptTemplate: ({ isBlockSelecting, isSelecting }) => {
return isBlockSelecting
? PROMPT_TEMPLATES.userBlockSelecting
: isSelecting
? PROMPT_TEMPLATES.userSelecting
: PROMPT_TEMPLATES.userDefault;
},
systemTemplate: ({ isBlockSelecting, isSelecting }) => {
return isBlockSelecting
? PROMPT_TEMPLATES.systemBlockSelecting
: isSelecting
? PROMPT_TEMPLATES.systemSelecting
: PROMPT_TEMPLATES.systemDefault;
},
},
render: {
afterContainer: AILoadingBar,
afterEditable: AIMenu,
node: AIAnchorElement,
},
shortcuts: { show: { keys: 'mod+j' } },
useHooks: ({ editor, getOption }) => {
const mode = usePluginOption(
{ key: KEYS.aiChat } as AIChatPluginConfig,
'mode'
);
useChatChunk({
onChunk: ({ chunk, isFirst, nodes }) => {
if (isFirst && mode == 'insert') {
editor.tf.withoutSaving(() => {
editor.tf.insertNodes(
{
children: [{ text: '' }],
type: KEYS.aiChat,
},
{
at: PathApi.next(editor.selection!.focus.path.slice(0, 1)),
}
);
});
editor.setOption(AIChatPlugin, 'streaming', true);
}
if (mode === 'insert' && nodes.length > 0) {
withAIBatch(
editor,
() => {
if (!getOption('streaming')) return;
editor.tf.withScrolling(() => {
streamInsertChunk(editor, chunk, {
textProps: {
ai: true,
},
});
});
},
{ split: isFirst }
);
}
},
onFinish: () => {
editor.setOption(AIChatPlugin, 'streaming', false);
editor.setOption(AIChatPlugin, '_blockChunks', '');
editor.setOption(AIChatPlugin, '_blockPath', null);
},
});
},
});
export const AIKit = [
...CursorOverlayKit,
...MarkdownKit,
AIPlugin.withComponent(AILeaf),
aiChatPlugin,
];
const systemCommon = `\
You are an advanced AI-powered note-taking assistant, designed to enhance productivity and creativity in note management.
Respond directly to user prompts with clear, concise, and relevant content. Maintain a neutral, helpful tone.
Rules:
- <Document> is the entire note the user is working on.
- <Reminder> is a reminder of how you should reply to INSTRUCTIONS. It does not apply to questions.
- Anything else is the user prompt.
- Your response should be tailored to the user's prompt, providing precise assistance to optimize note management.
- For INSTRUCTIONS: Follow the <Reminder> exactly. Provide ONLY the content to be inserted or replaced. No explanations or comments.
- For QUESTIONS: Provide a helpful and concise answer. You may include brief explanations if necessary.
- CRITICAL: DO NOT remove or modify the following custom MDX tags: <u>, <callout>, <kbd>, <toc>, <sub>, <sup>, <mark>, <del>, <date>, <span>, <column>, <column_group>, <file>, <audio>, <video> in <Selection> unless the user explicitly requests this change.
- CRITICAL: Distinguish between INSTRUCTIONS and QUESTIONS. Instructions typically ask you to modify or add content. Questions ask for information or clarification.
- CRITICAL: when asked to write in markdown, do not start with \`\`\`markdown.
`;
const systemDefault = `\
${systemCommon}
- <Block> is the current block of text the user is working on.
- Ensure your output can seamlessly fit into the existing <Block> structure.
<Block>
{block}
</Block>
`;
const systemSelecting = `\
${systemCommon}
- <Block> is the block of text containing the user's selection, providing context.
- Ensure your output can seamlessly fit into the existing <Block> structure.
- <Selection> is the specific text the user has selected in the block and wants to modify or ask about.
- Consider the context provided by <Block>, but only modify <Selection>. Your response should be a direct replacement for <Selection>.
<Block>
{block}
</Block>
<Selection>
{selection}
</Selection>
`;
const systemBlockSelecting = `\
${systemCommon}
- <Selection> represents the full blocks of text the user has selected and wants to modify or ask about.
- Your response should be a direct replacement for the entire <Selection>.
- Maintain the overall structure and formatting of the selected blocks, unless explicitly instructed otherwise.
- CRITICAL: Provide only the content to replace <Selection>. Do not add additional blocks or change the block structure unless specifically requested.
<Selection>
{block}
</Selection>
`;
const userDefault = `<Reminder>
CRITICAL: NEVER write <Block>.
</Reminder>
{prompt}`;
const userSelecting = `<Reminder>
If this is a question, provide a helpful and concise answer about <Selection>.
If this is an instruction, provide ONLY the text to replace <Selection>. No explanations.
Ensure it fits seamlessly within <Block>. If <Block> is empty, write ONE random sentence.
NEVER write <Block> or <Selection>.
</Reminder>
{prompt} about <Selection>`;
const userBlockSelecting = `<Reminder>
If this is a question, provide a helpful and concise answer about <Selection>.
If this is an instruction, provide ONLY the content to replace the entire <Selection>. No explanations.
Maintain the overall structure unless instructed otherwise.
NEVER write <Block> or <Selection>.
</Reminder>
{prompt} about <Selection>`;
export const PROMPT_TEMPLATES = {
systemBlockSelecting,
systemDefault,
systemSelecting,
userBlockSelecting,
userDefault,
userSelecting,
};
AIMenu
: Renders the AI command interfaceAILoadingBar
: Shows AI processing statusAIAnchorElement
: Anchor element for the AI MenuAILeaf
: Renders AI-generated content with visual distinction
Add Kit
import { createPlateEditor } from 'platejs/react';
import { AIKit } from '@/components/editor/plugins/ai-kit';
const editor = createPlateEditor({
plugins: [
// ...otherPlugins,
...AIKit,
],
});
Add API Route
AI functionality requires a server-side API endpoint. Add the pre-configured AI command route:
import type { TextStreamPart, ToolSet } from 'ai';
import type { NextRequest } from 'next/server';
import { createOpenAI } from '@ai-sdk/openai';
import { InvalidArgumentError } from '@ai-sdk/provider';
import { delay as originalDelay } from '@ai-sdk/provider-utils';
import { convertToCoreMessages, streamText } from 'ai';
import { NextResponse } from 'next/server';
/**
* Detects the first chunk in a buffer.
*
* @param buffer - The buffer to detect the first chunk in.
* @returns The first detected chunk, or `undefined` if no chunk was detected.
*/
export type ChunkDetector = (buffer: string) => string | null | undefined;
type delayer = (buffer: string) => number;
/**
* Smooths text streaming output.
*
* @param delayInMs - The delay in milliseconds between each chunk. Defaults to
* 10ms. Can be set to `null` to skip the delay.
* @param chunking - Controls how the text is chunked for streaming. Use "word"
* to stream word by word (default), "line" to stream line by line, or provide
* a custom RegExp pattern for custom chunking.
* @returns A transform stream that smooths text streaming output.
*/
function smoothStream<TOOLS extends ToolSet>({
_internal: { delay = originalDelay } = {},
chunking = 'word',
delayInMs = 10,
}: {
/** Internal. For test use only. May change without notice. */
_internal?: {
delay?: (delayInMs: number | null) => Promise<void>;
};
chunking?: ChunkDetector | RegExp | 'line' | 'word';
delayInMs?: delayer | number | null;
} = {}): (options: {
tools: TOOLS;
}) => TransformStream<TextStreamPart<TOOLS>, TextStreamPart<TOOLS>> {
let detectChunk: ChunkDetector;
if (typeof chunking === 'function') {
detectChunk = (buffer) => {
const match = chunking(buffer);
if (match == null) {
return null;
}
if (match.length === 0) {
throw new Error(`Chunking function must return a non-empty string.`);
}
if (!buffer.startsWith(match)) {
throw new Error(
`Chunking function must return a match that is a prefix of the buffer. Received: "${match}" expected to start with "${buffer}"`
);
}
return match;
};
} else {
const chunkingRegex =
typeof chunking === 'string' ? CHUNKING_REGEXPS[chunking] : chunking;
if (chunkingRegex == null) {
throw new InvalidArgumentError({
argument: 'chunking',
message: `Chunking must be "word" or "line" or a RegExp. Received: ${chunking}`,
});
}
detectChunk = (buffer) => {
const match = chunkingRegex.exec(buffer);
if (!match) {
return null;
}
return buffer.slice(0, match.index) + match?.[0];
};
}
return () => {
let buffer = '';
return new TransformStream<TextStreamPart<TOOLS>, TextStreamPart<TOOLS>>({
async transform(chunk, controller) {
if (chunk.type !== 'text-delta') {
console.info(buffer, 'finished');
if (buffer.length > 0) {
controller.enqueue({ textDelta: buffer, type: 'text-delta' });
buffer = '';
}
controller.enqueue(chunk);
return;
}
buffer += chunk.textDelta;
let match;
while ((match = detectChunk(buffer)) != null) {
controller.enqueue({ textDelta: match, type: 'text-delta' });
buffer = buffer.slice(match.length);
const _delayInMs =
typeof delayInMs === 'number'
? delayInMs
: (delayInMs?.(buffer) ?? 10);
await delay(_delayInMs);
}
},
});
};
}
const CHUNKING_REGEXPS = {
line: /\n+/m,
list: /.{8}/m,
word: /\S+\s+/m,
};
export async function POST(req: NextRequest) {
const { apiKey: key, messages, system } = await req.json();
const apiKey = key || process.env.OPENAI_API_KEY;
if (!apiKey) {
return NextResponse.json(
{ error: 'Missing OpenAI API key.' },
{ status: 401 }
);
}
const openai = createOpenAI({ apiKey });
let isInCodeBlock = false;
let isInTable = false;
let isInList = false;
let isInLink = false;
try {
const result = streamText({
experimental_transform: smoothStream({
chunking: (buffer) => {
// Check for code block markers
if (/```[^\s]+/.test(buffer)) {
isInCodeBlock = true;
} else if (isInCodeBlock && buffer.includes('```')) {
isInCodeBlock = false;
}
// test case: should not deserialize link with markdown syntax
if (buffer.includes('http')) {
isInLink = true;
} else if (buffer.includes('https')) {
isInLink = true;
} else if (buffer.includes('\n') && isInLink) {
isInLink = false;
}
if (buffer.includes('*') || buffer.includes('-')) {
isInList = true;
} else if (buffer.includes('\n') && isInList) {
isInList = false;
}
// Simple table detection: enter on |, exit on double newline
if (!isInTable && buffer.includes('|')) {
isInTable = true;
} else if (isInTable && buffer.includes('\n\n')) {
isInTable = false;
}
// Use line chunking for code blocks and tables, word chunking otherwise
// Choose the appropriate chunking strategy based on content type
let match;
if (isInCodeBlock || isInTable || isInLink) {
// Use line chunking for code blocks and tables
match = CHUNKING_REGEXPS.line.exec(buffer);
} else if (isInList) {
// Use list chunking for lists
match = CHUNKING_REGEXPS.list.exec(buffer);
} else {
// Use word chunking for regular text
match = CHUNKING_REGEXPS.word.exec(buffer);
}
if (!match) {
return null;
}
return buffer.slice(0, match.index) + match?.[0];
},
delayInMs: () => (isInCodeBlock || isInTable ? 100 : 30),
}),
maxTokens: 2048,
messages: convertToCoreMessages(messages),
model: openai('gpt-4o'),
system: system,
});
return result.toDataStreamResponse();
} catch {
return NextResponse.json(
{ error: 'Failed to process AI request' },
{ status: 500 }
);
}
}
Configure Environment
Ensure your OpenAI API key is set in your environment variables:
OPENAI_API_KEY="your-api-key"
Manual Usage
Installation
pnpm add @platejs/ai @platejs/selection @platejs/markdown @platejs/basic-nodes
Add Plugins
import { AIPlugin, AIChatPlugin } from '@platejs/ai/react';
import { createPlateEditor } from 'platejs/react';
import { MarkdownKit } from '@/components/editor/plugins/markdown-kit';
const editor = createPlateEditor({
plugins: [
// ...otherPlugins,
...MarkdownKit, // Required for AI content processing
AIPlugin,
AIChatPlugin,
],
});
MarkdownKit
: Required for processing AI responses with Markdown syntax and MDX support.AIPlugin
: Core plugin for AI content management and transforms.AIChatPlugin
: Handles AI chat interface, streaming, and user interactions.
Configure Plugins
Create the extended aiChatPlugin
with basic configuration:
import type { AIChatPluginConfig } from '@platejs/ai/react';
import type { UseChatOptions } from 'ai/react';
import { KEYS, PathApi } from 'platejs';
import { streamInsertChunk, withAIBatch } from '@platejs/ai';
import { AIChatPlugin, AIPlugin, useChatChunk } from '@platejs/ai/react';
import { usePluginOption } from 'platejs/react';
import { MarkdownKit } from '@/components/editor/plugins/markdown-kit';
import { AILoadingBar, AIMenu } from '@/components/ui/ai-menu';
import { AIAnchorElement, AILeaf } from '@/components/ui/ai-node';
export const aiChatPlugin = AIChatPlugin.extend({
options: {
chatOptions: {
api: '/api/ai/command',
body: {},
} as UseChatOptions,
},
render: {
afterContainer: AILoadingBar,
afterEditable: AIMenu,
node: AIAnchorElement,
},
shortcuts: { show: { keys: 'mod+j' } },
});
const plugins = [
// ...otherPlugins,
...MarkdownKit,
AIPlugin.withComponent(AILeaf),
aiChatPlugin,
];
chatOptions
: Configuration for the Vercel AI SDKuseChat
hook.render
: UI components for the AI interface.shortcuts
: Keyboard shortcuts (Cmd+J
to show AI menu).
Add Streaming with useHooks
The useChatChunk
hook processes streaming AI responses in real-time, handling content insertion and chunk management. It monitors the chat state and processes incoming text chunks, inserting them into the editor as they arrive:
export const aiChatPlugin = AIChatPlugin.extend({
// ... previous options
useHooks: ({ editor, getOption }) => {
const mode = usePluginOption(
{ key: KEYS.aiChat } as AIChatPluginConfig,
'mode'
);
useChatChunk({
onChunk: ({ chunk, isFirst, nodes }) => {
if (isFirst && mode == 'insert') {
editor.tf.withoutSaving(() => {
editor.tf.insertNodes(
{
children: [{ text: '' }],
type: KEYS.aiChat,
},
{
at: PathApi.next(editor.selection!.focus.path.slice(0, 1)),
}
);
});
editor.setOption(AIChatPlugin, 'streaming', true);
}
if (mode === 'insert' && nodes.length > 0) {
withAIBatch(
editor,
() => {
if (!getOption('streaming')) return;
editor.tf.withScrolling(() => {
streamInsertChunk(editor, chunk, {
textProps: {
ai: true,
},
});
});
},
{ split: isFirst }
);
}
},
onFinish: () => {
editor.setOption(AIChatPlugin, 'streaming', false);
editor.setOption(AIChatPlugin, '_blockChunks', '');
editor.setOption(AIChatPlugin, '_blockPath', null);
},
});
},
});
onChunk
: Handles each streaming chunk, creating AI nodes on first chunk and inserting content in real-timeonFinish
: Cleans up streaming state when the response completes- Uses
withAIBatch
andstreamInsertChunk
for optimized content insertion
System Prompt
The system prompt defines the AI's role and behavior. You can customize the systemTemplate
in your extended plugin:
export const customAIChatPlugin = AIChatPlugin.extend({
options: {
systemTemplate: ({ isBlockSelecting, isSelecting }) => {
const customSystem = `You are a technical documentation assistant specialized in code and API documentation.
Rules:
- Provide accurate, well-structured technical content
- Use appropriate code formatting and syntax highlighting
- Include relevant examples and best practices
- Maintain consistent documentation style
- CRITICAL: DO NOT remove or modify custom MDX tags unless explicitly requested.
- CRITICAL: Distinguish between INSTRUCTIONS and QUESTIONS.`;
return isBlockSelecting
? `${customSystem}
- <Selection> represents the full blocks of text the user has selected and wants to modify or ask about.
- Your response should be a direct replacement for the entire <Selection>.
- Maintain the overall structure and formatting of the selected blocks, unless explicitly instructed otherwise.
<Selection>
{block}
</Selection>`
: isSelecting
? `${customSystem}
- <Block> is the block of text containing the user's selection, providing context.
- <Selection> is the specific text the user has selected in the block and wants to modify or ask about.
- Consider the context provided by <Block>, but only modify <Selection>.
<Block>
{block}
</Block>
<Selection>
{selection}
</Selection>`
: `${customSystem}
- <Block> is the current block of text the user is working on.
<Block>
{block}
</Block>`;
},
// ...other options
},
}),
User Prompt
Customize how user prompts are formatted and contextualized in your extended plugin:
export const customAIChatPlugin = AIChatPlugin.extend({
options: {
promptTemplate: ({ isBlockSelecting, isSelecting }) => {
return isBlockSelecting
? `<Reminder>
If this is a question, provide a helpful and concise answer about <Selection>.
If this is an instruction, provide ONLY the content to replace the entire <Selection>. No explanations.
Analyze and improve the following content blocks maintaining structure and clarity.
NEVER write <Block> or <Selection>.
</Reminder>
{prompt} about <Selection>`
: isSelecting
? `<Reminder>
If this is a question, provide a helpful and concise answer about <Selection>.
If this is an instruction, provide ONLY the text to replace <Selection>. No explanations.
Ensure it fits seamlessly within <Block>. If <Block> is empty, write ONE random sentence.
NEVER write <Block> or <Selection>.
</Reminder>
{prompt} about <Selection>`
: `<Reminder>
CRITICAL: NEVER write <Block>.
Continue or improve the content naturally.
</Reminder>
{prompt}`;
},
// ...other options
},
}),
Add API Route
Create an API route handler with optimized streaming for different content types:
import type { TextStreamPart, ToolSet } from 'ai';
import type { NextRequest } from 'next/server';
import { createOpenAI } from '@ai-sdk/openai';
import { InvalidArgumentError } from '@ai-sdk/provider';
import { delay as originalDelay } from '@ai-sdk/provider-utils';
import { convertToCoreMessages, streamText } from 'ai';
import { NextResponse } from 'next/server';
const CHUNKING_REGEXPS = {
line: /\n+/m,
list: /.{8}/m,
word: /\S+\s+/m,
};
export async function POST(req: NextRequest) {
const { apiKey: key, messages, system } = await req.json();
const apiKey = key || process.env.OPENAI_API_KEY;
if (!apiKey) {
return NextResponse.json(
{ error: 'Missing OpenAI API key.' },
{ status: 401 }
);
}
const openai = createOpenAI({ apiKey });
let isInCodeBlock = false;
let isInTable = false;
let isInList = false;
let isInLink = false;
try {
const result = streamText({
experimental_transform: smoothStream({
chunking: (buffer) => {
// Detect content types for optimized chunking
if (/```[^\s]+/.test(buffer)) {
isInCodeBlock = true;
} else if (isInCodeBlock && buffer.includes('```')) {
isInCodeBlock = false;
}
if (buffer.includes('http')) {
isInLink = true;
} else if (buffer.includes('https')) {
isInLink = true;
} else if (buffer.includes('\n') && isInLink) {
isInLink = false;
}
if (buffer.includes('*') || buffer.includes('-')) {
isInList = true;
} else if (buffer.includes('\n') && isInList) {
isInList = false;
}
if (!isInTable && buffer.includes('|')) {
isInTable = true;
} else if (isInTable && buffer.includes('\n\n')) {
isInTable = false;
}
// Choose chunking strategy based on content type
let match;
if (isInCodeBlock || isInTable || isInLink) {
match = CHUNKING_REGEXPS.line.exec(buffer);
} else if (isInList) {
match = CHUNKING_REGEXPS.list.exec(buffer);
} else {
match = CHUNKING_REGEXPS.word.exec(buffer);
}
if (!match) return null;
return buffer.slice(0, match.index) + match?.[0];
},
delayInMs: () => (isInCodeBlock || isInTable ? 100 : 30),
}),
maxTokens: 2048,
messages: convertToCoreMessages(messages),
model: openai('gpt-4o'),
system: system,
});
return result.toDataStreamResponse();
} catch {
return NextResponse.json(
{ error: 'Failed to process AI request' },
{ status: 500 }
);
}
}
// Smooth streaming implementation for optimized chunking
function smoothStream<TOOLS extends ToolSet>({
_internal: { delay = originalDelay } = {},
chunking = 'word',
delayInMs = 10,
}: {
_internal?: {
delay?: (delayInMs: number | null) => Promise<void>;
};
chunking?: ChunkDetector | RegExp | 'line' | 'word';
delayInMs?: delayer | number | null;
} = {}): (options: {
tools: TOOLS;
}) => TransformStream<TextStreamPart<TOOLS>, TextStreamPart<TOOLS>> {
let detectChunk: ChunkDetector;
if (typeof chunking === 'function') {
detectChunk = (buffer) => {
const match = chunking(buffer);
if (match == null) return null;
if (match.length === 0) {
throw new Error(`Chunking function must return a non-empty string.`);
}
if (!buffer.startsWith(match)) {
throw new Error(
`Chunking function must return a match that is a prefix of the buffer.`
);
}
return match;
};
} else {
const chunkingRegex =
typeof chunking === 'string' ? CHUNKING_REGEXPS[chunking] : chunking;
if (chunkingRegex == null) {
throw new InvalidArgumentError({
argument: 'chunking',
message: `Chunking must be "word" or "line" or a RegExp. Received: ${chunking}`,
});
}
detectChunk = (buffer) => {
const match = chunkingRegex.exec(buffer);
if (!match) return null;
return buffer.slice(0, match.index) + match?.[0];
};
}
return () => {
let buffer = '';
return new TransformStream<TextStreamPart<TOOLS>, TextStreamPart<TOOLS>>({
async transform(chunk, controller) {
if (chunk.type !== 'text-delta') {
if (buffer.length > 0) {
controller.enqueue({ textDelta: buffer, type: 'text-delta' });
buffer = '';
}
controller.enqueue(chunk);
return;
}
buffer += chunk.textDelta;
let match;
while ((match = detectChunk(buffer)) != null) {
controller.enqueue({ textDelta: match, type: 'text-delta' });
buffer = buffer.slice(match.length);
const _delayInMs =
typeof delayInMs === 'number'
? delayInMs
: (delayInMs?.(buffer) ?? 10);
await delay(_delayInMs);
}
},
});
};
}
Then, set your OPENAI_API_KEY
in .env.local
.
Add Toolbar Button
You can add AIToolbarButton
to your Toolbar to open the AI menu.
Keyboard Shortcuts
Key | Description |
---|---|
Space | Open AI menu in empty block (cursor mode) |
Cmd + J | Open AI menu (cursor or selection mode) |
Escape | Close AI menu |
Plate Plus
Combobox menu with free-form prompt input
- Additional trigger methods:
- Block menu button
- Slash command menu
- Beautifully crafted UI
Customization
Adding Custom AI Commands
'use client';
import * as React from 'react';
import {
AIChatPlugin,
AIPlugin,
useEditorChat,
useLastAssistantMessage,
} from '@platejs/ai/react';
import { BlockSelectionPlugin, useIsSelecting } from '@platejs/selection/react';
import { Command as CommandPrimitive } from 'cmdk';
import {
Album,
BadgeHelp,
BookOpenCheck,
Check,
CornerUpLeft,
FeatherIcon,
ListEnd,
ListMinus,
ListPlus,
Loader2Icon,
PauseIcon,
PenLine,
SmileIcon,
Wand,
X,
} from 'lucide-react';
import { type NodeEntry, type SlateEditor, isHotkey, NodeApi } from 'platejs';
import { useEditorPlugin, useHotkeys, usePluginOption } from 'platejs/react';
import { type PlateEditor, useEditorRef } from 'platejs/react';
import { Button } from '@/components/ui/button';
import {
Command,
CommandGroup,
CommandItem,
CommandList,
} from '@/components/ui/command';
import {
Popover,
PopoverAnchor,
PopoverContent,
} from '@/components/ui/popover';
import { cn } from '@/lib/utils';
import { useChat } from '@/components/editor/use-chat';
import { AIChatEditor } from './ai-chat-editor';
export function AIMenu() {
const { api, editor } = useEditorPlugin(AIChatPlugin);
const open = usePluginOption(AIChatPlugin, 'open');
const mode = usePluginOption(AIChatPlugin, 'mode');
const streaming = usePluginOption(AIChatPlugin, 'streaming');
const isSelecting = useIsSelecting();
const [value, setValue] = React.useState('');
const chat = useChat();
const { input, messages, setInput, status } = chat;
const [anchorElement, setAnchorElement] = React.useState<HTMLElement | null>(
null
);
const content = useLastAssistantMessage()?.content;
React.useEffect(() => {
if (streaming) {
const anchor = api.aiChat.node({ anchor: true });
setTimeout(() => {
const anchorDom = editor.api.toDOMNode(anchor![0])!;
setAnchorElement(anchorDom);
}, 0);
}
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [streaming]);
const setOpen = (open: boolean) => {
if (open) {
api.aiChat.show();
} else {
api.aiChat.hide();
}
};
const show = (anchorElement: HTMLElement) => {
setAnchorElement(anchorElement);
setOpen(true);
};
useEditorChat({
chat,
onOpenBlockSelection: (blocks: NodeEntry[]) => {
show(editor.api.toDOMNode(blocks.at(-1)![0])!);
},
onOpenChange: (open) => {
if (!open) {
setAnchorElement(null);
setInput('');
}
},
onOpenCursor: () => {
const [ancestor] = editor.api.block({ highest: true })!;
if (!editor.api.isAt({ end: true }) && !editor.api.isEmpty(ancestor)) {
editor
.getApi(BlockSelectionPlugin)
.blockSelection.set(ancestor.id as string);
}
show(editor.api.toDOMNode(ancestor)!);
},
onOpenSelection: () => {
show(editor.api.toDOMNode(editor.api.blocks().at(-1)![0])!);
},
});
useHotkeys('esc', () => {
api.aiChat.stop();
// remove when you implement the route /api/ai/command
chat._abortFakeStream();
});
const isLoading = status === 'streaming' || status === 'submitted';
if (isLoading && mode === 'insert') {
return null;
}
return (
<Popover open={open} onOpenChange={setOpen} modal={false}>
<PopoverAnchor virtualRef={{ current: anchorElement! }} />
<PopoverContent
className="border-none bg-transparent p-0 shadow-none"
style={{
width: anchorElement?.offsetWidth,
}}
onEscapeKeyDown={(e) => {
e.preventDefault();
api.aiChat.hide();
}}
align="center"
side="bottom"
>
<Command
className="w-full rounded-lg border shadow-md"
value={value}
onValueChange={setValue}
>
{mode === 'chat' && isSelecting && content && (
<AIChatEditor content={content} />
)}
{isLoading ? (
<div className="flex grow items-center gap-2 p-2 text-sm text-muted-foreground select-none">
<Loader2Icon className="size-4 animate-spin" />
{messages.length > 1 ? 'Editing...' : 'Thinking...'}
</div>
) : (
<CommandPrimitive.Input
className={cn(
'flex h-9 w-full min-w-0 border-input bg-transparent px-3 py-1 text-base transition-[color,box-shadow] outline-none placeholder:text-muted-foreground md:text-sm dark:bg-input/30',
'aria-invalid:border-destructive aria-invalid:ring-destructive/20 dark:aria-invalid:ring-destructive/40',
'border-b focus-visible:ring-transparent'
)}
value={input}
onKeyDown={(e) => {
if (isHotkey('backspace')(e) && input.length === 0) {
e.preventDefault();
api.aiChat.hide();
}
if (isHotkey('enter')(e) && !e.shiftKey && !value) {
e.preventDefault();
void api.aiChat.submit();
}
}}
onValueChange={setInput}
placeholder="Ask AI anything..."
data-plate-focus
autoFocus
/>
)}
{!isLoading && (
<CommandList>
<AIMenuItems setValue={setValue} />
</CommandList>
)}
</Command>
</PopoverContent>
</Popover>
);
}
type EditorChatState =
| 'cursorCommand'
| 'cursorSuggestion'
| 'selectionCommand'
| 'selectionSuggestion';
const aiChatItems = {
accept: {
icon: <Check />,
label: 'Accept',
value: 'accept',
onSelect: ({ editor }) => {
editor.getTransforms(AIChatPlugin).aiChat.accept();
editor.tf.focus({ edge: 'end' });
},
},
continueWrite: {
icon: <PenLine />,
label: 'Continue writing',
value: 'continueWrite',
onSelect: ({ editor }) => {
const ancestorNode = editor.api.block({ highest: true });
if (!ancestorNode) return;
const isEmpty = NodeApi.string(ancestorNode[0]).trim().length === 0;
void editor.getApi(AIChatPlugin).aiChat.submit({
mode: 'insert',
prompt: isEmpty
? `<Document>
{editor}
</Document>
Start writing a new paragraph AFTER <Document> ONLY ONE SENTENCE`
: 'Continue writing AFTER <Block> ONLY ONE SENTENCE. DONT REPEAT THE TEXT.',
});
},
},
discard: {
icon: <X />,
label: 'Discard',
shortcut: 'Escape',
value: 'discard',
onSelect: ({ editor }) => {
editor.getTransforms(AIPlugin).ai.undo();
editor.getApi(AIChatPlugin).aiChat.hide();
},
},
emojify: {
icon: <SmileIcon />,
label: 'Emojify',
value: 'emojify',
onSelect: ({ editor }) => {
void editor.getApi(AIChatPlugin).aiChat.submit({
prompt: 'Emojify',
});
},
},
explain: {
icon: <BadgeHelp />,
label: 'Explain',
value: 'explain',
onSelect: ({ editor }) => {
void editor.getApi(AIChatPlugin).aiChat.submit({
prompt: {
default: 'Explain {editor}',
selecting: 'Explain',
},
});
},
},
fixSpelling: {
icon: <Check />,
label: 'Fix spelling & grammar',
value: 'fixSpelling',
onSelect: ({ editor }) => {
void editor.getApi(AIChatPlugin).aiChat.submit({
prompt: 'Fix spelling and grammar',
});
},
},
generateMarkdownSample: {
icon: <BookOpenCheck />,
label: 'Generate Markdown sample',
value: 'generateMarkdownSample',
onSelect: ({ editor }) => {
void editor.getApi(AIChatPlugin).aiChat.submit({
prompt: 'Generate a markdown sample',
});
},
},
generateMdxSample: {
icon: <BookOpenCheck />,
label: 'Generate MDX sample',
value: 'generateMdxSample',
onSelect: ({ editor }) => {
void editor.getApi(AIChatPlugin).aiChat.submit({
prompt: 'Generate a mdx sample',
});
},
},
improveWriting: {
icon: <Wand />,
label: 'Improve writing',
value: 'improveWriting',
onSelect: ({ editor }) => {
void editor.getApi(AIChatPlugin).aiChat.submit({
prompt: 'Improve the writing',
});
},
},
insertBelow: {
icon: <ListEnd />,
label: 'Insert below',
value: 'insertBelow',
onSelect: ({ aiEditor, editor }) => {
void editor.getTransforms(AIChatPlugin).aiChat.insertBelow(aiEditor);
},
},
makeLonger: {
icon: <ListPlus />,
label: 'Make longer',
value: 'makeLonger',
onSelect: ({ editor }) => {
void editor.getApi(AIChatPlugin).aiChat.submit({
prompt: 'Make longer',
});
},
},
makeShorter: {
icon: <ListMinus />,
label: 'Make shorter',
value: 'makeShorter',
onSelect: ({ editor }) => {
void editor.getApi(AIChatPlugin).aiChat.submit({
prompt: 'Make shorter',
});
},
},
replace: {
icon: <Check />,
label: 'Replace selection',
value: 'replace',
onSelect: ({ aiEditor, editor }) => {
void editor.getTransforms(AIChatPlugin).aiChat.replaceSelection(aiEditor);
},
},
simplifyLanguage: {
icon: <FeatherIcon />,
label: 'Simplify language',
value: 'simplifyLanguage',
onSelect: ({ editor }) => {
void editor.getApi(AIChatPlugin).aiChat.submit({
prompt: 'Simplify the language',
});
},
},
summarize: {
icon: <Album />,
label: 'Add a summary',
value: 'summarize',
onSelect: ({ editor }) => {
void editor.getApi(AIChatPlugin).aiChat.submit({
mode: 'insert',
prompt: {
default: 'Summarize {editor}',
selecting: 'Summarize',
},
});
},
},
tryAgain: {
icon: <CornerUpLeft />,
label: 'Try again',
value: 'tryAgain',
onSelect: ({ editor }) => {
void editor.getApi(AIChatPlugin).aiChat.reload();
},
},
} satisfies Record<
string,
{
icon: React.ReactNode;
label: string;
value: string;
component?: React.ComponentType<{ menuState: EditorChatState }>;
filterItems?: boolean;
items?: { label: string; value: string }[];
shortcut?: string;
onSelect?: ({
aiEditor,
editor,
}: {
aiEditor: SlateEditor;
editor: PlateEditor;
}) => void;
}
>;
const menuStateItems: Record<
EditorChatState,
{
items: (typeof aiChatItems)[keyof typeof aiChatItems][];
heading?: string;
}[]
> = {
cursorCommand: [
{
items: [
aiChatItems.generateMdxSample,
aiChatItems.generateMarkdownSample,
aiChatItems.continueWrite,
aiChatItems.summarize,
aiChatItems.explain,
],
},
],
cursorSuggestion: [
{
items: [aiChatItems.accept, aiChatItems.discard, aiChatItems.tryAgain],
},
],
selectionCommand: [
{
items: [
aiChatItems.improveWriting,
aiChatItems.emojify,
aiChatItems.makeLonger,
aiChatItems.makeShorter,
aiChatItems.fixSpelling,
aiChatItems.simplifyLanguage,
],
},
],
selectionSuggestion: [
{
items: [
aiChatItems.replace,
aiChatItems.insertBelow,
aiChatItems.discard,
aiChatItems.tryAgain,
],
},
],
};
export const AIMenuItems = ({
setValue,
}: {
setValue: (value: string) => void;
}) => {
const editor = useEditorRef();
const { messages } = usePluginOption(AIChatPlugin, 'chat');
const aiEditor = usePluginOption(AIChatPlugin, 'aiEditor')!;
const isSelecting = useIsSelecting();
const menuState = React.useMemo(() => {
if (messages && messages.length > 0) {
return isSelecting ? 'selectionSuggestion' : 'cursorSuggestion';
}
return isSelecting ? 'selectionCommand' : 'cursorCommand';
}, [isSelecting, messages]);
const menuGroups = React.useMemo(() => {
const items = menuStateItems[menuState];
return items;
}, [menuState]);
React.useEffect(() => {
if (menuGroups.length > 0 && menuGroups[0].items.length > 0) {
setValue(menuGroups[0].items[0].value);
}
}, [menuGroups, setValue]);
return (
<>
{menuGroups.map((group, index) => (
<CommandGroup key={index} heading={group.heading}>
{group.items.map((menuItem) => (
<CommandItem
key={menuItem.value}
className="[&_svg]:text-muted-foreground"
value={menuItem.value}
onSelect={() => {
menuItem.onSelect?.({
aiEditor,
editor: editor,
});
}}
>
{menuItem.icon}
<span>{menuItem.label}</span>
</CommandItem>
))}
</CommandGroup>
))}
</>
);
};
export function AILoadingBar() {
const chat = usePluginOption(AIChatPlugin, 'chat');
const mode = usePluginOption(AIChatPlugin, 'mode');
const { status } = chat;
const { api } = useEditorPlugin(AIChatPlugin);
const isLoading = status === 'streaming' || status === 'submitted';
const visible = isLoading && mode === 'insert';
if (!visible) return null;
return (
<div
className={cn(
'absolute bottom-4 left-1/2 z-10 flex -translate-x-1/2 items-center gap-3 rounded-md border border-border bg-muted px-3 py-1.5 text-sm text-muted-foreground shadow-md transition-all duration-300'
)}
>
<span className="h-4 w-4 animate-spin rounded-full border-2 border-muted-foreground border-t-transparent" />
<span>{status === 'submitted' ? 'Thinking...' : 'Writing...'}</span>
<Button
size="sm"
variant="ghost"
className="flex items-center gap-1 text-xs"
onClick={() => api.aiChat.stop()}
>
<PauseIcon className="h-4 w-4" />
Stop
<kbd className="ml-1 rounded bg-border px-1 font-mono text-[10px] text-muted-foreground shadow-sm">
Esc
</kbd>
</Button>
</div>
);
}
You can extend the AI menu with custom commands by adding new items to the aiChatItems
object and updating the menu state items.
Simple Custom Command
Add a basic command that submits a custom prompt:
// Add to your ai-menu.tsx aiChatItems object
summarizeInBullets: {
icon: <ListIcon />,
label: 'Summarize in bullets',
value: 'summarizeInBullets',
onSelect: ({ editor }) => {
void editor.getApi(AIChatPlugin).aiChat.submit({
prompt: 'Summarize this content as bullet points',
});
},
},
Command with Complex Logic
Create commands with client-side logic before submission:
generateTOC: {
icon: <BookIcon />,
label: 'Generate table of contents',
value: 'generateTOC',
onSelect: ({ editor }) => {
// Check if document has headings
const headings = editor.api.nodes({
match: (n) => ['h1', 'h2', 'h3'].includes(n.type as string),
});
if (headings.length === 0) {
void editor.getApi(AIChatPlugin).aiChat.submit({
mode: 'insert',
prompt: 'Create a table of contents with sample headings for this document',
});
} else {
void editor.getApi(AIChatPlugin).aiChat.submit({
mode: 'insert',
prompt: 'Generate a table of contents based on the existing headings',
});
}
},
},
Understanding Menu States
The AI menu adapts to different contexts based on user selection and AI response state:
const menuState = React.useMemo(() => {
// If AI has already responded, show suggestion actions
if (messages && messages.length > 0) {
return isSelecting ? 'selectionSuggestion' : 'cursorSuggestion';
}
// If no AI response yet, show command actions
return isSelecting ? 'selectionCommand' : 'cursorCommand';
}, [isSelecting, messages]);
Menu States:
cursorCommand
: No selection, no AI response → Show generation commands (Continue writing, Summarize, etc.)selectionCommand
: Text selected, no AI response → Show editing commands (Improve writing, Fix spelling, etc.)cursorSuggestion
: No selection, AI responded → Show suggestion actions (Accept, Discard, Try again)selectionSuggestion
: Text selected, AI responded → Show replacement actions (Replace selection, Insert below, etc.)
Update Menu States
Add your custom commands to the appropriate menu states in menuStateItems
:
const menuStateItems: Record<EditorChatState, { items: any[] }[]> = {
cursorCommand: [
{
items: [
aiChatItems.generateTOC,
aiChatItems.summarizeInBullets,
// ... existing items
],
},
],
selectionCommand: [
{
items: [
aiChatItems.summarizeInBullets, // Works for selected text too
// ... existing items
],
},
],
// ... other states
};
Switching AI Models
Configure different AI models and providers in your API route:
import { createOpenAI } from '@ai-sdk/openai';
import { createAnthropic } from '@ai-sdk/anthropic';
export async function POST(req: NextRequest) {
const { model = 'gpt-4o', provider = 'openai', ...rest } = await req.json();
let aiProvider;
switch (provider) {
case 'anthropic':
aiProvider = createAnthropic({ apiKey: process.env.ANTHROPIC_API_KEY });
break;
case 'openai':
default:
aiProvider = createOpenAI({ apiKey: process.env.OPENAI_API_KEY });
break;
}
const result = streamText({
model: aiProvider(model),
// ... other options
});
return result.toDataStreamResponse();
}
Configure the model in your aiChatPlugin
:
export const aiChatPlugin = AIChatPlugin.extend({
options: {
chatOptions: {
api: '/api/ai/command',
body: {
model: 'gpt-4o-mini', // or 'claude-4-sonnet'
provider: 'openai', // or 'anthropic'
},
},
// ... other options
},
});
For more AI providers and models, see the Vercel AI SDK documentation.
Custom Streaming Optimization
Optimize streaming performance for specific content types with custom chunking strategies:
const customChunking = (buffer: string) => {
// Detect JSON content for slower chunking
if (buffer.includes('{') && buffer.includes('}')) {
const jsonMatch = /\{[^}]*\}/g.exec(buffer);
if (jsonMatch) {
return buffer.slice(0, jsonMatch.index + jsonMatch[0].length);
}
}
// Detect code blocks for line-based chunking
if (buffer.includes('```')) {
const lineMatch = /\n+/m.exec(buffer);
return lineMatch ? buffer.slice(0, lineMatch.index + lineMatch[0].length) : null;
}
// Default word chunking
const wordMatch = /\S+\s+/m.exec(buffer);
return wordMatch ? buffer.slice(0, wordMatch.index + wordMatch[0].length) : null;
};
// Use in your streamText configuration
const result = streamText({
experimental_transform: smoothStream({
chunking: customChunking,
delayInMs: (buffer) => {
// Slower for complex content, faster for simple text
return buffer.includes('```') || buffer.includes('{') ? 80 : 20;
},
}),
// ... other options
});
Security Considerations
Implement security best practices for AI functionality:
export async function POST(req: NextRequest) {
const { messages, system } = await req.json();
// Validate request structure
if (!messages || !Array.isArray(messages)) {
return NextResponse.json({ error: 'Invalid messages' }, { status: 400 });
}
// Content length validation
const totalContent = messages.map(m => m.content).join('');
if (totalContent.length > 50000) {
return NextResponse.json({ error: 'Content too long' }, { status: 413 });
}
// Rate limiting (implement with your preferred solution)
// await rateLimit(req);
// Content filtering (optional)
// const filteredMessages = await filterContent(messages);
// Process AI request...
}
Security Guidelines:
- Validate Input: Always validate and sanitize user prompts
- Rate Limiting: Implement rate limiting on AI endpoints
- Content Filtering: Consider content filtering for responses
- API Key Security: Never expose API keys client-side
- User Privacy: Be mindful of data sent to AI models
Plugins
AIPlugin
Core plugin that extends the editor with AI content management capabilities.
AIChatPlugin
Main plugin that enables AI chat operations, streaming, and user interface interactions.
api
: API endpoint for AI requestsbody
: Additional request body parameters'chat'
: Shows preview with accept/reject options'insert'
: Directly inserts content into editor- Default:
'chat'
- Default:
false
- Default:
false
{block}
: Markdown of blocks in selection{editor}
: Markdown of entire editor content{selection}
: Markdown of current selection{prompt}
: Actual user prompt- Default:
'{prompt}'
- Default:
null
Configuration options for the Vercel AI SDK useChat
hook.
Specifies how assistant messages are handled:
Whether the AI chat interface is open.
Whether AI response is currently streaming.
Template for generating user prompts. Supports placeholders:
Template for system messages. Supports same placeholders as promptTemplate
.
The editor instance used to generate AI responses.
Chat helpers returned by useChat
hook.
API
api.aiChat.accept()
Accepts the current AI suggestion:
- Removes AI marks from the content
- Hides the AI chat interface
- Focuses the editor
api.aiChat.insertBelow()
Inserts AI-generated content below the current block.
Handles both block selection and normal selection modes:
- In block selection: Inserts after the last selected block, applying formatting from the last block
- In normal selection: Inserts after the current block, applying formatting from the current block
api.aiChat.replaceSelection()
Replaces the current selection with AI-generated content.
Handles different selection modes:
- Single block selection: Replaces the selected block, applying its formatting to inserted content based on format option
- Multiple block selection: Replaces all selected blocks
- With
format: 'none'
or'single'
: Preserves original formatting - With
format: 'all'
: Applies first block's formatting to all content
- With
- Normal selection: Replaces the current selection while maintaining surrounding context
api.aiChat.reset()
Resets the chat state:
- Stops any ongoing generation
- Clears chat messages
- Removes all AI nodes from the editor
api.aiChat.node()
Gets the AI chat node entry.
api.aiChat.reload()
Reloads the current AI chat:
- In insert mode: Undoes previous AI changes
- Reloads the chat with the current system prompt
api.aiChat.show()
Shows the AI chat interface:
- Resets the chat state
- Clears messages
- Sets the open state to true
api.aiChat.hide()
Hides the AI chat interface:
- Resets the chat state
- Sets the open state to false
- Focuses the editor
- Removes the AI anchor
api.aiChat.stop()
Stops the current AI generation:
- Sets streaming state to false
- Calls the chat stop function
api.aiChat.submit()
Submits a prompt to generate AI content.
Transforms
tf.aiChat.removeAnchor()
Removes the AI chat anchor node from the editor.
tf.aiChat.accept()
Accepts the current AI suggestion and integrates it into the editor content.
tf.aiChat.insertBelow()
Transform that inserts AI content below the current block.
tf.aiChat.replaceSelection()
Transform that replaces the current selection with AI content.
tf.ai.insertNodes()
Inserts AI-generated nodes with the AI mark.
tf.ai.removeMarks()
Removes AI marks from nodes in the specified location.
tf.ai.removeNodes()
Removes nodes that have the AI mark.
tf.ai.undo()
Special undo operation for AI changes:
- Undoes the last operation if it was AI-generated
- Removes the redo stack entry to prevent redoing AI operations
Hooks
useAIChatEditor
A hook that registers an editor in the AI chat plugin and deserializes markdown content with block-level memoization.
const AIChatEditor = ({ content }: { content: string }) => {
const aiEditor = usePlateEditor({
plugins: [
// Your editor plugins
MarkdownPlugin,
AIPlugin,
AIChatPlugin,
// etc...
],
});
useAIChatEditor(aiEditor, content, {
// Optional markdown parser options
parser: {
exclude: ['space'],
},
});
return <Editor editor={aiEditor} />;
};
On This Page
FeaturesKit UsageInstallationAdd KitAdd API RouteConfigure EnvironmentManual UsageInstallationAdd PluginsConfigure PluginsAdd Streaming with useHooksSystem PromptUser PromptAdd API RouteAdd Toolbar ButtonKeyboard ShortcutsPlate PlusCustomizationAdding Custom AI CommandsSimple Custom CommandCommand with Complex LogicUnderstanding Menu StatesUpdate Menu StatesSwitching AI ModelsCustom Streaming OptimizationSecurity ConsiderationsPluginsAIPluginAIChatPluginAPIapi.aiChat.accept()api.aiChat.insertBelow()api.aiChat.replaceSelection()api.aiChat.reset()api.aiChat.node()api.aiChat.reload()api.aiChat.show()api.aiChat.hide()api.aiChat.stop()api.aiChat.submit()Transformstf.aiChat.removeAnchor()tf.aiChat.accept()tf.aiChat.insertBelow()tf.aiChat.replaceSelection()tf.ai.insertNodes()tf.ai.removeMarks()tf.ai.removeNodes()tf.ai.undo()HooksuseAIChatEditor