Most AI chat tutorials use the Vercel AI SDK. But you don't need it. Here's how to build streaming AI chat from scratch — giving you full control.
Why skip the AI SDK?
- Full control over every byte in the stream
- Provider agnostic — swap OpenAI for Anthropic by changing one URL
- Smaller bundle — no extra dependencies
The API Route
// src/app/api/ai/chat/route.ts
import { NextRequest } from "next/server";
export async function POST(req: NextRequest) {
const { messages } = await req.json();
const response = await fetch("https://api.openai.com/v1/chat/completions", {
method: "POST",
headers: {
"Content-Type": "application/json",
Authorization: `Bearer ${process.env.OPENAI_API_KEY}`,
},
body: JSON.stringify({ model: "gpt-4o-mini", messages, stream: true }),
});
const stream = new ReadableStream({
async start(controller) {
const reader = response.body!.getReader();
const decoder = new TextDecoder();
try {
while (true) {
const { done, value } = await reader.read();
if (done) break;
const chunk = decoder.decode(value);
const lines = chunk.split("\n").filter(l => l.startsWith("data: "));
for (const line of lines) {
const data = line.replace("data: ", "");
if (data === "[DONE]") { controller.close(); return; }
try {
const content = JSON.parse(data).choices?.[0]?.delta?.content;
if (content) controller.enqueue(new TextEncoder().encode(content));
} catch { /* skip */ }
}
}
} catch (err) { controller.error(err); }
controller.close();
},
});
return new Response(stream, {
headers: { "Content-Type": "text/plain; charset=utf-8" },
});
}
The Chat Component
The client reads the stream and appends chunks in real-time:
"use client";
import { useState } from "react";
interface Message { role: "user" | "assistant"; content: string; }
export default function ChatInterface() {
const [messages, setMessages] = useState<Message[]>([]);
const [input, setInput] = useState("");
const [isStreaming, setIsStreaming] = useState(false);
async function handleSubmit(e: React.FormEvent) {
e.preventDefault();
if (!input.trim() || isStreaming) return;
const userMsg: Message = { role: "user", content: input.trim() };
const updated = [...messages, userMsg];
setMessages([...updated, { role: "assistant", content: "" }]);
setInput("");
setIsStreaming(true);
try {
const res = await fetch("/api/ai/chat", {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({ messages: updated }),
});
const reader = res.body!.getReader();
const decoder = new TextDecoder();
while (true) {
const { done, value } = await reader.read();
if (done) break;
const chunk = decoder.decode(value);
setMessages(prev => {
const msgs = [...prev];
msgs[msgs.length - 1] = {
...msgs[msgs.length - 1],
content: msgs[msgs.length - 1].content + chunk,
};
return msgs;
});
}
} catch (err) {
console.error(err);
} finally {
setIsStreaming(false);
}
}
return (
<div className="flex flex-col h-[600px]">
<div className="flex-1 overflow-y-auto p-4 space-y-4">
{messages.map((msg, i) => (
<div key={i} className={`p-3 rounded-lg max-w-[80%] ${
msg.role === "user" ? "bg-blue-600 ml-auto" : "bg-zinc-800"
}`}>
<p className="text-sm whitespace-pre-wrap">{msg.content}</p>
</div>
))}
</div>
<form onSubmit={handleSubmit} className="p-4 border-t border-zinc-800 flex gap-2">
<input value={input} onChange={e => setInput(e.target.value)}
placeholder="Ask anything..." className="flex-1 bg-zinc-900 border border-zinc-700 rounded-lg px-4 py-2" />
<button type="submit" disabled={isStreaming}
className="bg-blue-600 text-white px-4 py-2 rounded-lg disabled:opacity-50">Send</button>
</form>
</div>
);
}
How it works
- Client sends messages via POST
- Server forwards to OpenAI with
stream: true - Server parses SSE, forwards clean text chunks
- Client appends chunks to the last message
No SDK. No magic. Just fetch() + ReadableStream + getReader().
Swap to Anthropic
Change the URL to https://api.anthropic.com/v1/messages, swap auth to x-api-key, adjust SSE parsing. Same pattern.
Want the full stack?
This AI chat (plus auth, billing, email, dashboard) comes pre-wired in LaunchKit — a production-ready SaaS starter. $49.
Top comments (0)