feat(cli): upgrade to ai sdk v5 (#487)

This commit is contained in:
Aman Varshney
2025-08-10 20:19:55 +05:30
committed by GitHub
parent ea908ddc86
commit f412d8f0c7
18 changed files with 439 additions and 277 deletions

View File

@@ -14,7 +14,7 @@ import { createContext } from "./lib/context";
import cors from "cors";
import express from "express";
{{#if (includes examples "ai")}}
import { streamText } from "ai";
import { streamText, type UIMessage, convertToModelMessages } from "ai";
import { google } from "@ai-sdk/google";
{{/if}}
{{#if auth}}
@@ -44,16 +44,16 @@ app.use(
"/trpc",
createExpressMiddleware({
router: appRouter,
createContext
createContext,
})
);
{{/if}}
{{#if (eq api "orpc")}}
const handler = new RPCHandler(appRouter);
app.use('/rpc{*path}', async (req, res, next) => {
app.use("/rpc{*path}", async (req, res, next) => {
const { matched } = await handler.handle(req, res, {
prefix: '/rpc',
prefix: "/rpc",
{{#if auth}}
context: await createContext({ req }),
{{else}}
@@ -65,16 +65,16 @@ app.use('/rpc{*path}', async (req, res, next) => {
});
{{/if}}
app.use(express.json())
app.use(express.json());
{{#if (includes examples "ai")}}
app.post("/ai", async (req, res) => {
const { messages = [] } = req.body || {};
const { messages = [] } = (req.body || {}) as { messages: UIMessage[] };
const result = streamText({
model: google("gemini-1.5-flash"),
messages,
messages: convertToModelMessages(messages),
});
result.pipeDataStreamToResponse(res);
result.pipeUIMessageStreamToResponse(res);
});
{{/if}}
@@ -85,4 +85,4 @@ app.get("/", (_req, res) => {
const port = process.env.PORT || 3000;
app.listen(port, () => {
console.log(`Server is running on port ${port}`);
});
});

View File

@@ -19,8 +19,7 @@ import { createContext } from "./lib/context";
{{/if}}
{{#if (includes examples "ai")}}
import type { FastifyRequest, FastifyReply } from "fastify";
import { streamText, type Message } from "ai";
import { streamText, type UIMessage, convertToModelMessages } from "ai";
import { google } from "@ai-sdk/google";
{{/if}}
@@ -99,7 +98,7 @@ fastify.route({
response.headers.forEach((value, key) => reply.header(key, value));
reply.send(response.body ? await response.text() : null);
} catch (error) {
fastify.log.error("Authentication Error:", error);
fastify.log.error({ err: error }, "Authentication Error:");
reply.status(500).send({
error: "Internal authentication error",
code: "AUTH_FAILURE"
@@ -125,26 +124,24 @@ fastify.register(fastifyTRPCPlugin, {
{{#if (includes examples "ai")}}
interface AiRequestBody {
id?: string;
messages: Message[];
messages: UIMessage[];
}
fastify.post('/ai', async function (request, reply) {
// there are some issues with the ai sdk and fastify, docs: https://ai-sdk.dev/cookbook/api-servers/fastify
const { messages } = request.body as AiRequestBody;
const result = streamText({
model: google('gemini-1.5-flash'),
messages,
messages: convertToModelMessages(messages),
});
reply.header('X-Vercel-AI-Data-Stream', 'v1');
reply.header('Content-Type', 'text/plain; charset=utf-8');
return reply.send(result.toDataStream());
return result.pipeUIMessageStreamToResponse(reply.raw);
});
{{/if}}
fastify.get('/', async () => {
return 'OK'
})
return 'OK';
});
fastify.listen({ port: 3000 }, (err) => {
if (err) {
@@ -152,4 +149,4 @@ fastify.listen({ port: 3000 }, (err) => {
process.exit(1);
}
console.log("Server running on port 3000");
});
});

View File

@@ -21,32 +21,33 @@ import { Hono } from "hono";
import { cors } from "hono/cors";
import { logger } from "hono/logger";
{{#if (and (includes examples "ai") (or (eq runtime "bun") (eq runtime "node")))}}
import { streamText } from "ai";
import { streamText, convertToModelMessages } from "ai";
import { google } from "@ai-sdk/google";
import { stream } from "hono/streaming";
{{/if}}
{{#if (and (includes examples "ai") (eq runtime "workers"))}}
import { streamText } from "ai";
import { stream } from "hono/streaming";
import { streamText, convertToModelMessages } from "ai";
import { createGoogleGenerativeAI } from "@ai-sdk/google";
{{/if}}
const app = new Hono();
app.use(logger());
app.use("/*", cors({
{{#if (or (eq runtime "bun") (eq runtime "node"))}}
origin: process.env.CORS_ORIGIN || "",
{{/if}}
{{#if (eq runtime "workers")}}
origin: env.CORS_ORIGIN || "",
{{/if}}
allowMethods: ["GET", "POST", "OPTIONS"],
{{#if auth}}
allowHeaders: ["Content-Type", "Authorization"],
credentials: true,
{{/if}}
}));
app.use(
"/*",
cors({
{{#if (or (eq runtime "bun") (eq runtime "node"))}}
origin: process.env.CORS_ORIGIN || "",
{{/if}}
{{#if (eq runtime "workers")}}
origin: env.CORS_ORIGIN || "",
{{/if}}
allowMethods: ["GET", "POST", "OPTIONS"],
{{#if auth}}
allowHeaders: ["Content-Type", "Authorization"],
credentials: true,
{{/if}}
})
);
{{#if auth}}
app.on(["POST", "GET"], "/api/auth/**", (c) => auth.handler(c.req.raw));
@@ -69,44 +70,43 @@ app.use("/rpc/*", async (c, next) => {
{{/if}}
{{#if (eq api "trpc")}}
app.use("/trpc/*", trpcServer({
router: appRouter,
createContext: (_opts, context) => {
return createContext({ context });
},
}));
app.use(
"/trpc/*",
trpcServer({
router: appRouter,
createContext: (_opts, context) => {
return createContext({ context });
},
})
);
{{/if}}
{{#if (and (includes examples "ai") (or (eq runtime "bun") (eq runtime "node")))}}
app.post("/ai", async (c) => {
const body = await c.req.json();
const messages = body.messages || [];
const uiMessages = body.messages || [];
const result = streamText({
model: google("gemini-1.5-flash"),
messages,
messages: convertToModelMessages(uiMessages),
});
c.header("X-Vercel-AI-Data-Stream", "v1");
c.header("Content-Type", "text/plain; charset=utf-8");
return stream(c, (stream) => stream.pipe(result.toDataStream()));
return result.toUIMessageStreamResponse();
});
{{/if}}
{{#if (and (includes examples "ai") (eq runtime "workers"))}}
app.post("/ai", async (c) => {
const body = await c.req.json();
const messages = body.messages || [];
const uiMessages = body.messages || [];
const google = createGoogleGenerativeAI({
apiKey: env.GOOGLE_GENERATIVE_AI_API_KEY,
});
const result = streamText({
model: google("gemini-1.5-flash"),
messages,
messages: convertToModelMessages(uiMessages),
});
c.header("X-Vercel-AI-Data-Stream", "v1");
c.header("Content-Type", "text/plain; charset=utf-8");
return stream(c, (stream) => stream.pipe(result.toDataStream()));
return result.toUIMessageStreamResponse();
});
{{/if}}
@@ -117,17 +117,20 @@ app.get("/", (c) => {
{{#if (eq runtime "node")}}
import { serve } from "@hono/node-server";
serve({
fetch: app.fetch,
port: 3000,
}, (info) => {
console.log(`Server is running on http://localhost:${info.port}`);
});
serve(
{
fetch: app.fetch,
port: 3000,
},
(info) => {
console.log(`Server is running on http://localhost:${info.port}`);
}
);
{{else}}
{{#if (eq runtime "bun")}}
{{#if (eq runtime "bun")}}
export default app;
{{/if}}
{{#if (eq runtime "workers")}}
export default app;
{{/if}}
{{/if}}
{{#if (eq runtime "workers")}}
export default app;
{{/if}}
{{/if}}