AI Streaming Chat
Token-by-token AI responses delivered via WebSocket. Each message gets a dedicated session channel — clients subscribe before sending, then receive tokens as they’re generated.
Demonstrates: ai:token / ai:done events, session-scoped channels, streaming UX with cursor animation
Source: examples/socket-hub/src/examples/AiStreamingChat/
Architecture
Client subscribes to channel('ai-chat/{sessionId}')
↓
Client: POST /api/ai/chat { message, sessionId }
↓
Server: for each token from LLM:
sdk.socket.emit('ai:token', { token }, 'ai-chat/{sessionId}')
↓
Client receives tokens, appends to message
↓
Server: sdk.socket.emit('ai:done', {}, 'ai-chat/{sessionId}')
↓
Client: isStreaming = false, show final messageClient code
import { useAerostack } from '@aerostack/react'
import { useEffect, useRef, useState } from 'react'
import { v4 as uuid } from 'uuid'
interface Message {
role: 'user' | 'assistant'
content: string
id: string
}
export function AiStreamingChat() {
const { realtime } = useAerostack()
const [messages, setMessages] = useState<Message[]>([])
const [inputText, setInputText] = useState('')
const [isStreaming, setIsStreaming] = useState(false)
const sessionId = useRef(uuid())
const bottomRef = useRef<HTMLDivElement>(null)
useEffect(() => {
const channel = realtime.channel(`ai-chat/${sessionId.current}`)
channel
.on('ai:token', ({ data }) => {
setMessages(prev => {
const last = prev[prev.length - 1]
if (last?.role === 'assistant') {
return [
...prev.slice(0, -1),
{ ...last, content: last.content + data.token },
]
}
return [...prev, { role: 'assistant', content: data.token, id: uuid() }]
})
})
.on('ai:done', () => {
setIsStreaming(false)
})
.subscribe()
return () => channel.unsubscribe()
}, [realtime])
useEffect(() => {
bottomRef.current?.scrollIntoView({ behavior: 'smooth' })
}, [messages])
const sendMessage = async () => {
if (!inputText.trim() || isStreaming) return
const userMessage: Message = {
role: 'user',
content: inputText,
id: uuid(),
}
setMessages(prev => [...prev, userMessage])
setInputText('')
setIsStreaming(true)
await fetch('/api/ai/chat', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
message: inputText,
sessionId: sessionId.current,
}),
})
}
return (
<div className="flex flex-col h-screen max-w-2xl mx-auto p-4">
<div className="flex items-center gap-3 mb-4 p-4 rounded-xl border border-gray-800 bg-gray-900/50">
<div className="w-8 h-8 rounded-full bg-purple-500/20 flex items-center justify-center">
<span className="text-purple-400 text-sm">✦</span>
</div>
<div>
<div className="text-sm font-medium text-white">AI Assistant</div>
<div className="text-xs text-gray-500">Powered by Aerostack AI + Realtime</div>
</div>
</div>
<div className="flex-1 overflow-y-auto space-y-4 mb-4">
{messages.length === 0 && (
<div className="text-center text-gray-600 py-16">
<p className="text-4xl mb-4">✦</p>
<p>Send a message to start the conversation.</p>
</div>
)}
{messages.map((msg, i) => {
const isLastAssistant = msg.role === 'assistant' && i === messages.length - 1
return (
<div key={msg.id} className={`flex gap-3 ${msg.role === 'user' ? 'flex-row-reverse' : ''}`}>
<div className={`w-7 h-7 rounded-full flex-shrink-0 flex items-center justify-center text-xs ${
msg.role === 'user' ? 'bg-blue-600 text-white' : 'bg-purple-500/20 text-purple-400'
}`}>
{msg.role === 'user' ? 'U' : '✦'}
</div>
<div className={`max-w-md px-4 py-3 rounded-2xl text-sm leading-relaxed ${
msg.role === 'user'
? 'bg-blue-600 text-white'
: 'bg-gray-800 text-gray-100'
}`}>
{msg.content}
{isLastAssistant && isStreaming && (
<span className="inline-block w-0.5 h-4 bg-purple-400 ml-0.5 animate-pulse" />
)}
</div>
</div>
)
})}
<div ref={bottomRef} />
</div>
<form
onSubmit={e => { e.preventDefault(); sendMessage() }}
className="flex gap-2"
>
<input
value={inputText}
onChange={e => setInputText(e.target.value)}
placeholder={isStreaming ? 'AI is responding...' : 'Ask anything...'}
disabled={isStreaming}
className="flex-1 bg-gray-900 border border-gray-700 rounded-xl px-4 py-3 text-white disabled:opacity-50"
/>
<button
type="submit"
disabled={isStreaming || !inputText.trim()}
className="bg-purple-600 text-white px-6 py-3 rounded-xl font-medium disabled:opacity-50"
>
Send
</button>
</form>
</div>
)
}Server code
import { Hono } from 'hono'
import { sdk } from '@aerostack/sdk'
const app = new Hono()
app.post('/api/ai/chat', async (c) => {
const { message, sessionId } = await c.req.json()
const channel = `ai-chat/${sessionId}`
// Stream from your AI provider
const stream = await sdk.ai.streamCompletion({
prompt: message,
model: 'gpt-4o-mini',
maxTokens: 512,
})
// Emit each token as it arrives
for await (const token of stream) {
sdk.socket.emit('ai:token', { token }, channel)
}
// Signal completion
sdk.socket.emit('ai:done', {}, channel)
return c.json({ ok: true })
})
export default appEach user has their own sessionId, so AI responses are private by default. To let multiple users watch the same stream (e.g., for collaborative Q&A), share the session ID between them.