mirror of
https://github.com/FranP-code/create-better-t-stack.git
synced 2025-10-12 23:52:15 +00:00
134 lines
3.4 KiB
Handlebars
134 lines
3.4 KiB
Handlebars
{{#if (or (eq runtime "bun") (eq runtime "node"))}}
|
|
import "dotenv/config";
|
|
{{/if}}
|
|
{{#if (eq runtime "workers")}}
|
|
import { env } from "cloudflare:workers";
|
|
{{/if}}
|
|
{{#if (eq api "orpc")}}
|
|
import { RPCHandler } from "@orpc/server/fetch";
|
|
import { createContext } from "./lib/context";
|
|
import { appRouter } from "./routers/index";
|
|
{{/if}}
|
|
{{#if (eq api "trpc")}}
|
|
import { trpcServer } from "@hono/trpc-server";
|
|
import { createContext } from "./lib/context";
|
|
import { appRouter } from "./routers/index";
|
|
{{/if}}
|
|
{{#if auth}}
|
|
import { auth } from "./lib/auth";
|
|
{{/if}}
|
|
import { Hono } from "hono";
|
|
import { cors } from "hono/cors";
|
|
import { logger } from "hono/logger";
|
|
{{#if (and (includes examples "ai") (or (eq runtime "bun") (eq runtime "node")))}}
|
|
import { streamText } from "ai";
|
|
import { google } from "@ai-sdk/google";
|
|
import { stream } from "hono/streaming";
|
|
{{/if}}
|
|
{{#if (and (includes examples "ai") (eq runtime "workers"))}}
|
|
import { streamText } from "ai";
|
|
import { stream } from "hono/streaming";
|
|
import { createGoogleGenerativeAI } from "@ai-sdk/google";
|
|
{{/if}}
|
|
|
|
const app = new Hono();
|
|
|
|
app.use(logger());
|
|
app.use("/*", cors({
|
|
{{#if (or (eq runtime "bun") (eq runtime "node"))}}
|
|
origin: process.env.CORS_ORIGIN || "",
|
|
{{/if}}
|
|
{{#if (eq runtime "workers")}}
|
|
origin: env.CORS_ORIGIN || "",
|
|
{{/if}}
|
|
allowMethods: ["GET", "POST", "OPTIONS"],
|
|
{{#if auth}}
|
|
allowHeaders: ["Content-Type", "Authorization"],
|
|
credentials: true,
|
|
{{/if}}
|
|
}));
|
|
|
|
{{#if auth}}
|
|
app.on(["POST", "GET"], "/api/auth/**", (c) => auth.handler(c.req.raw));
|
|
{{/if}}
|
|
|
|
{{#if (eq api "orpc")}}
|
|
const handler = new RPCHandler(appRouter);
|
|
app.use("/rpc/*", async (c, next) => {
|
|
const context = await createContext({ context: c });
|
|
const { matched, response } = await handler.handle(c.req.raw, {
|
|
prefix: "/rpc",
|
|
context: context,
|
|
});
|
|
|
|
if (matched) {
|
|
return c.newResponse(response.body, response);
|
|
}
|
|
await next();
|
|
});
|
|
{{/if}}
|
|
|
|
{{#if (eq api "trpc")}}
|
|
app.use("/trpc/*", trpcServer({
|
|
router: appRouter,
|
|
createContext: (_opts, context) => {
|
|
return createContext({ context });
|
|
},
|
|
}));
|
|
{{/if}}
|
|
|
|
{{#if (and (includes examples "ai") (or (eq runtime "bun") (eq runtime "node")))}}
|
|
app.post("/ai", async (c) => {
|
|
const body = await c.req.json();
|
|
const messages = body.messages || [];
|
|
const result = streamText({
|
|
model: google("gemini-1.5-flash"),
|
|
messages,
|
|
});
|
|
|
|
c.header("X-Vercel-AI-Data-Stream", "v1");
|
|
c.header("Content-Type", "text/plain; charset=utf-8");
|
|
return stream(c, (stream) => stream.pipe(result.toDataStream()));
|
|
});
|
|
{{/if}}
|
|
|
|
{{#if (and (includes examples "ai") (eq runtime "workers"))}}
|
|
app.post("/ai", async (c) => {
|
|
const body = await c.req.json();
|
|
const messages = body.messages || [];
|
|
const google = createGoogleGenerativeAI({
|
|
apiKey: env.GOOGLE_GENERATIVE_AI_API_KEY,
|
|
});
|
|
const result = streamText({
|
|
model: google("gemini-1.5-flash"),
|
|
messages,
|
|
});
|
|
|
|
c.header("X-Vercel-AI-Data-Stream", "v1");
|
|
c.header("Content-Type", "text/plain; charset=utf-8");
|
|
return stream(c, (stream) => stream.pipe(result.toDataStream()));
|
|
});
|
|
{{/if}}
|
|
|
|
app.get("/", (c) => {
|
|
return c.text("OK");
|
|
});
|
|
|
|
{{#if (eq runtime "node")}}
|
|
import { serve } from "@hono/node-server";
|
|
|
|
serve({
|
|
fetch: app.fetch,
|
|
port: 3000,
|
|
}, (info) => {
|
|
console.log(`Server is running on http://localhost:${info.port}`);
|
|
});
|
|
{{else}}
|
|
{{#if (eq runtime "bun")}}
|
|
export default app;
|
|
{{/if}}
|
|
{{#if (eq runtime "workers")}}
|
|
export default app;
|
|
{{/if}}
|
|
{{/if}}
|