feat: allow user-configurable AI model selection with server-side validation

This commit is contained in:
2025-09-10 20:10:48 -03:00
parent 86f511cd8e
commit f4e996d753
10 changed files with 251 additions and 9 deletions

View File

@@ -7,6 +7,7 @@ import { env } from "hono/adapter";
import { cors } from "hono/cors";
import { logger as honoLogger } from "hono/logger";
import { createContext } from "./lib/context";
import { sanitizeModel } from "./lib/ai-models";
import { logger } from "./lib/logger";
import { appRouter } from "./routers/index";
@@ -30,7 +31,7 @@ app.use(
cors({
origin: process.env.CORS_ORIGIN || "",
allowMethods: ["GET", "POST", "OPTIONS"],
allowHeaders: ["Content-Type", "Authorization"],
allowHeaders: ["Content-Type", "Authorization", "x-ai-model"],
credentials: true,
})
);
@@ -91,8 +92,9 @@ app.post("/ai/ocr", async (c) => {
const base64 = arrayBufferToBase64(await blob.arrayBuffer());
const dataUrl = `data:${contentType};base64,${base64}`;
const model = sanitizeModel(c.req.header("x-ai-model"));
const body = {
model: "openrouter/sonoma-sky-alpha",
model,
messages: [
{
role: "system",
@@ -161,8 +163,9 @@ app.post("/ai/generate", async (c) => {
return c.json({ error: "Missing prompt" }, 400);
}
const modelGen = sanitizeModel(c.req.header("x-ai-model"));
const body = {
model: "openrouter/sonoma-sky-alpha",
model: modelGen,
temperature,
messages: [
{ role: "system", content: system },

View File

@@ -0,0 +1,19 @@
export const ALLOWED_MODELS: readonly string[] = [
"openrouter/sonoma-sky-alpha",
"deepseek/deepseek-chat-v3.1:free",
"openrouter/sonoma-dusk-alpha",
"deepseek/deepseek-chat-v3-0324:free",
"mistralai/mistral-small-3.2-24b-instruct:free",
"meta-llama/llama-4-maverick:free",
"qwen/qwen2.5-vl-72b-instruct:free"
] as const;
// Default model to fall back to when input is invalid or missing
export const DEFAULT_MODEL = ALLOWED_MODELS[0]
export function sanitizeModel(input?: string | null): string {
const candidate = (input || "").trim();
if (!candidate) return DEFAULT_MODEL;
if (ALLOWED_MODELS.includes(candidate)) return candidate;
return DEFAULT_MODEL;
}

View File

@@ -1,5 +1,6 @@
import type { Context as HonoContext } from "hono";
import { Account, Client } from "node-appwrite";
import { sanitizeModel } from "./ai-models";
// Hoisted regex for performance and linting
const BEARER_REGEX = /^Bearer\s+(.+)$/i;
@@ -16,12 +17,16 @@ export type CreateContextOptions = {
};
export async function createContext({ context }: CreateContextOptions) {
// Capture selected AI model from client header (optional)
const aiModelHeader = context.req.header("x-ai-model");
const aiModel = sanitizeModel(aiModelHeader);
const endpoint = process.env.APPWRITE_ENDPOINT;
const projectId = process.env.APPWRITE_PROJECT_ID;
if (!(endpoint && projectId)) {
// Appwrite not configured; treat as unauthenticated
return { user: null as AuthUser };
return { user: null as AuthUser, aiModel };
}
// Initialize client per request
@@ -68,9 +73,9 @@ export async function createContext({ context }: CreateContextOptions) {
name: user.name ?? null,
email: user.email ?? null,
};
return { user: minimal };
return { user: minimal, aiModel };
} catch {
return { user: null as AuthUser };
return { user: null as AuthUser, aiModel };
}
}

View File

@@ -1,13 +1,21 @@
import { protectedProcedure, publicProcedure, router } from "../lib/trpc";
import { ALLOWED_MODELS, DEFAULT_MODEL } from "../lib/ai-models";
export const appRouter = router({
healthCheck: publicProcedure.query(() => {
return "OK";
}),
allowedModels: publicProcedure.query(() => {
return {
models: ALLOWED_MODELS,
defaultModel: DEFAULT_MODEL,
} as const;
}),
privateData: protectedProcedure.query(({ ctx }) => {
return {
message: "This is private",
user: ctx.user,
aiModel: ctx.aiModel ?? null,
};
}),
});