mirror of
https://github.com/FranP-code/Reflecto.git
synced 2025-10-13 00:43:31 +00:00
feat: add AI text generation with prompt shape and temperature control
This commit is contained in:
@@ -128,6 +128,64 @@ app.post("/ai/ocr", async (c) => {
|
||||
return c.json({ text });
|
||||
});
|
||||
|
||||
// Text generation via OpenRouter (same model as OCR, text-only)
|
||||
app.post("/ai/generate", async (c) => {
|
||||
const { OPENROUTER_API_KEY, OPENROUTER_SITE_URL, OPENROUTER_SITE_NAME } =
|
||||
env<{
|
||||
OPENROUTER_API_KEY?: string;
|
||||
OPENROUTER_SITE_URL?: string;
|
||||
OPENROUTER_SITE_NAME?: string;
|
||||
}>(c);
|
||||
if (!OPENROUTER_API_KEY) {
|
||||
return c.json({ error: "Missing OPENROUTER_API_KEY" }, 500);
|
||||
}
|
||||
|
||||
type Req = { prompt?: string; system?: string; temperature?: number };
|
||||
let bodyJson: Req | null = null;
|
||||
try {
|
||||
bodyJson = await c.req.json<Req>();
|
||||
} catch {
|
||||
// ignore and treat as empty
|
||||
}
|
||||
const prompt = bodyJson?.prompt ?? "";
|
||||
const baseSystem =
|
||||
"You are a concise assistant. Respond with a brief, plain text answer. Do not use markdown, lists, headings, or code fences. Keep it under ~150 words and avoid extra commentary.";
|
||||
const system = bodyJson?.system
|
||||
? `${bodyJson.system}\n\nRules: Respond concisely in plain text only. No markdown, lists, headings, or code fences. Keep it under ~150 words.`
|
||||
: baseSystem;
|
||||
const temperature = Number.isFinite(bodyJson?.temperature)
|
||||
? (bodyJson?.temperature as number)
|
||||
: 0.7;
|
||||
|
||||
if (!prompt.trim()) {
|
||||
return c.json({ error: "Missing prompt" }, 400);
|
||||
}
|
||||
|
||||
const body = {
|
||||
model: "openrouter/sonoma-sky-alpha",
|
||||
temperature,
|
||||
messages: [
|
||||
{ role: "system", content: system },
|
||||
{ role: "user", content: prompt },
|
||||
],
|
||||
};
|
||||
|
||||
const res = await openRouterChat(OPENROUTER_API_KEY as string, body, {
|
||||
referer: OPENROUTER_SITE_URL,
|
||||
title: OPENROUTER_SITE_NAME,
|
||||
});
|
||||
if (!res.ok) {
|
||||
const err = await res.text();
|
||||
return c.json(
|
||||
{ error: `OpenRouter error ${res.status}`, details: err },
|
||||
502
|
||||
);
|
||||
}
|
||||
const json = (await res.json()) as any;
|
||||
const text: string = json?.choices?.[0]?.message?.content ?? "";
|
||||
return c.json({ text });
|
||||
});
|
||||
|
||||
app.get("/", (c) => {
|
||||
return c.text("OK");
|
||||
});
|
||||
|
||||
@@ -23,12 +23,166 @@ export type AIImageShape = TLBaseShape<
|
||||
h: number;
|
||||
}
|
||||
>;
|
||||
// New: AI Prompt shape
|
||||
export type AIPromptShape = TLBaseShape<
|
||||
"ai-prompt",
|
||||
{
|
||||
prompt: string;
|
||||
status: "idle" | "processing" | "completed" | "error";
|
||||
createdDate: number;
|
||||
w: number;
|
||||
h: number;
|
||||
temperature: number;
|
||||
}
|
||||
>;
|
||||
|
||||
export class AIPromptShapeUtil extends ShapeUtil<AIPromptShape> {
|
||||
static override type = "ai-prompt" as const;
|
||||
|
||||
static override props: RecordProps<AIPromptShape> = {
|
||||
prompt: T.string,
|
||||
status: T.literalEnum("idle", "processing", "completed", "error"),
|
||||
createdDate: T.number,
|
||||
w: T.number,
|
||||
h: T.number,
|
||||
temperature: T.number,
|
||||
};
|
||||
|
||||
getDefaultProps(): AIPromptShape["props"] {
|
||||
return {
|
||||
prompt: "",
|
||||
status: "idle",
|
||||
createdDate: Date.now(),
|
||||
w: 320,
|
||||
h: 180,
|
||||
temperature: 0.7,
|
||||
};
|
||||
}
|
||||
|
||||
override canEdit() {
|
||||
return true;
|
||||
}
|
||||
override canResize() {
|
||||
return true;
|
||||
}
|
||||
override onResize(shape: AIPromptShape, info: TLResizeInfo<AIPromptShape>) {
|
||||
return resizeBox(shape, info);
|
||||
}
|
||||
|
||||
getGeometry(shape: AIPromptShape) {
|
||||
return new Rectangle2d({ width: shape.props.w, height: shape.props.h, isFilled: true });
|
||||
}
|
||||
|
||||
component(shape: AIPromptShape) {
|
||||
const isEditing = this.editor.getEditingShapeId() === shape.id;
|
||||
const [temp, setTemp] = useState(shape.props.temperature);
|
||||
useEffect(() => setTemp(shape.props.temperature), [shape.props.temperature]);
|
||||
|
||||
const triggerGenerate = () => {
|
||||
try {
|
||||
(window as any).__aiGenerate?.(shape.id);
|
||||
} catch {}
|
||||
};
|
||||
|
||||
return (
|
||||
<HTMLContainer id={shape.id} style={{ pointerEvents: "all" }}>
|
||||
<div
|
||||
style={{
|
||||
width: "100%",
|
||||
height: "100%",
|
||||
border: "2px solid #6f42c1",
|
||||
borderRadius: 8,
|
||||
padding: 12,
|
||||
backgroundColor: "#fff",
|
||||
display: "flex",
|
||||
flexDirection: "column",
|
||||
gap: 8,
|
||||
}}
|
||||
>
|
||||
<div style={{ display: "flex", alignItems: "center", justifyContent: "space-between" }}>
|
||||
<span style={{ fontWeight: 700, color: "#6f42c1" }}>✨ AI Prompt</span>
|
||||
<span
|
||||
style={{
|
||||
fontSize: 12,
|
||||
fontWeight: 700,
|
||||
color:
|
||||
shape.props.status === "processing"
|
||||
? "#b08500"
|
||||
: shape.props.status === "completed"
|
||||
? "#0b6b3a"
|
||||
: shape.props.status === "error"
|
||||
? "#a4000f"
|
||||
: "#495057",
|
||||
}}
|
||||
>
|
||||
{shape.props.status}
|
||||
</span>
|
||||
</div>
|
||||
<textarea
|
||||
value={shape.props.prompt}
|
||||
onChange={(e) =>
|
||||
this.editor.updateShape({ id: shape.id, type: "ai-prompt", props: { prompt: e.target.value } })
|
||||
}
|
||||
placeholder="Write your prompt..."
|
||||
style={{
|
||||
flex: "1 1 0%",
|
||||
minHeight: 0,
|
||||
resize: "none",
|
||||
width: "100%",
|
||||
border: "1px solid #ced4da",
|
||||
borderRadius: 6,
|
||||
padding: 8,
|
||||
lineHeight: 1.4,
|
||||
color: "#212529",
|
||||
outline: "none",
|
||||
}}
|
||||
/>
|
||||
<div style={{ display: "flex", alignItems: "center", gap: 8 }}>
|
||||
<label style={{ fontSize: 12, color: "#495057" }}>Temp</label>
|
||||
<input
|
||||
type="range"
|
||||
min={0}
|
||||
max={1}
|
||||
step={0.05}
|
||||
value={temp}
|
||||
onChange={(e) => setTemp(parseFloat(e.target.value))}
|
||||
onMouseUp={() =>
|
||||
this.editor.updateShape({ id: shape.id, type: "ai-prompt", props: { temperature: temp } })
|
||||
}
|
||||
style={{ flex: 1 }}
|
||||
/>
|
||||
<button
|
||||
type="button"
|
||||
onClick={triggerGenerate}
|
||||
disabled={shape.props.status === "processing" || !shape.props.prompt.trim()}
|
||||
style={{
|
||||
padding: "6px 10px",
|
||||
backgroundColor: "#6f42c1",
|
||||
color: "#fff",
|
||||
border: 0,
|
||||
borderRadius: 6,
|
||||
cursor: shape.props.status === "processing" || !shape.props.prompt.trim() ? "not-allowed" : "pointer",
|
||||
fontWeight: 700,
|
||||
}}
|
||||
>
|
||||
Generate
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
</HTMLContainer>
|
||||
);
|
||||
}
|
||||
|
||||
indicator(shape: AIPromptShape) {
|
||||
return <rect height={shape.props.h} width={shape.props.w} />;
|
||||
}
|
||||
}
|
||||
|
||||
export type AITextResultShape = TLBaseShape<
|
||||
"ai-text-result",
|
||||
{
|
||||
content: string;
|
||||
sourceType: "image" | "analysis";
|
||||
sourceType: "image" | "analysis" | "prompt";
|
||||
sourceShapeId: string;
|
||||
createdDate: number;
|
||||
w: number;
|
||||
@@ -155,7 +309,7 @@ export class AITextResultShapeUtil extends ShapeUtil<AITextResultShape> {
|
||||
|
||||
static override props: RecordProps<AITextResultShape> = {
|
||||
content: T.string,
|
||||
sourceType: T.literalEnum("image", "analysis"),
|
||||
sourceType: T.literalEnum("image", "analysis", "prompt"),
|
||||
sourceShapeId: T.string,
|
||||
createdDate: T.number,
|
||||
w: T.number,
|
||||
@@ -199,54 +353,101 @@ export class AITextResultShapeUtil extends ShapeUtil<AITextResultShape> {
|
||||
const textareaRef = useRef<HTMLTextAreaElement | null>(null);
|
||||
const headerRef = useRef<HTMLDivElement | null>(null);
|
||||
const contentRef = useRef<HTMLDivElement | null>(null);
|
||||
const rootRef = useRef<HTMLDivElement | null>(null);
|
||||
|
||||
useEffect(() => {
|
||||
if (isEditing && textareaRef.current) textareaRef.current.focus();
|
||||
}, [isEditing]);
|
||||
|
||||
// Auto-size to content: measure and adjust w/h
|
||||
// Ensure textarea grows with content while editing
|
||||
useEffect(() => {
|
||||
const headerEl = headerRef.current;
|
||||
const contentEl = isEditing ? (textareaRef.current as HTMLElement | null) : contentRef.current;
|
||||
|
||||
console.log({
|
||||
headerEl,
|
||||
contentEl
|
||||
})
|
||||
if (!headerEl || !contentEl) return;
|
||||
if (!isEditing) return;
|
||||
const ta = textareaRef.current;
|
||||
if (!ta) return;
|
||||
// Reset then set to scrollHeight for accurate sizing
|
||||
ta.style.height = "auto";
|
||||
ta.style.height = `${ta.scrollHeight}px`;
|
||||
}, [isEditing, shape.props.content]);
|
||||
|
||||
// Measure desired sizes
|
||||
const headerH = Math.ceil(headerEl.getBoundingClientRect().height);
|
||||
const contentH = Math.ceil((contentEl as HTMLElement).scrollHeight);
|
||||
const contentW = Math.ceil((contentEl as HTMLElement).scrollWidth);
|
||||
// Auto-size to content: measure and adjust height once per content/editing change
|
||||
const autosizeAppliedRef = useRef<{ content: string; editing: boolean } | null>(null);
|
||||
const autosizePassRef = useRef(0);
|
||||
useEffect(() => {
|
||||
const prev = autosizeAppliedRef.current;
|
||||
if (prev && prev.content === shape.props.content && prev.editing === isEditing) return;
|
||||
|
||||
if (!contentH || !contentW) return;
|
||||
const rootEl = rootRef.current;
|
||||
if (!rootEl) return;
|
||||
|
||||
const padding = 24; // root padding 12 top + 12 bottom
|
||||
const marginBetween = 8; // header bottom margin
|
||||
// Defer measurement to ensure layout is final
|
||||
const measureAndUpdate = () => {
|
||||
const minH = 100;
|
||||
// If editing, size to textarea content + header/padding precisely
|
||||
if (isEditing && textareaRef.current && headerRef.current) {
|
||||
const ta = textareaRef.current as HTMLTextAreaElement;
|
||||
const headerH = Math.ceil(headerRef.current.getBoundingClientRect().height);
|
||||
// Ensure textarea is sized to its content before measuring
|
||||
const prevTaHeight = ta.style.height;
|
||||
ta.style.height = "auto";
|
||||
const taH = Math.ceil(ta.scrollHeight);
|
||||
ta.style.height = prevTaHeight || `${taH}px`;
|
||||
const padding = 24; // 12 top + 12 bottom
|
||||
const marginBetween = 8; // header bottom margin
|
||||
const headerDivider = 1; // 1px border-bottom
|
||||
const borderY = 4; // 2px top + 2px bottom on card
|
||||
const desiredH = Math.max(minH, headerH + marginBetween + taH + padding + headerDivider + borderY);
|
||||
const dh = Math.abs(desiredH - shape.props.h);
|
||||
if (dh > 2) {
|
||||
this.editor.updateShape({ id: shape.id, type: "ai-text-result", props: { h: desiredH } });
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
const minW = 220;
|
||||
const minH = 120;
|
||||
// Non-editing: measure the entire card's natural height at the current width
|
||||
const prevWidth = rootEl.style.width;
|
||||
const prevHeight = rootEl.style.height;
|
||||
rootEl.style.width = `${Math.max(120, Math.floor(shape.props.w))}px`;
|
||||
rootEl.style.height = "auto"; // let it expand naturally for measurement
|
||||
const cardH = Math.ceil(rootEl.scrollHeight);
|
||||
// restore styles
|
||||
rootEl.style.width = prevWidth;
|
||||
rootEl.style.height = prevHeight;
|
||||
|
||||
const desiredW = Math.max(minW, contentW + padding);
|
||||
const desiredH = Math.max(minH, headerH + marginBetween + contentH + padding);
|
||||
const finalH = Math.max(minH, cardH);
|
||||
const dh = Math.abs(finalH - shape.props.h);
|
||||
if (dh > 2) {
|
||||
this.editor.updateShape({ id: shape.id, type: "ai-text-result", props: { h: finalH } });
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
};
|
||||
|
||||
// Avoid tight loops: only update if significant delta
|
||||
const dw = Math.abs(desiredW - shape.props.w);
|
||||
const dh = Math.abs(desiredH - shape.props.h);
|
||||
if (dw > 1 || dh > 1) {
|
||||
this.editor.updateShape({
|
||||
id: shape.id,
|
||||
type: "ai-text-result",
|
||||
props: { w: desiredW, h: desiredH },
|
||||
});
|
||||
}
|
||||
}, [isEditing, shape.id, shape.props.content, shape.props.w, shape.props.h]);
|
||||
const raf1 = requestAnimationFrame(() => {
|
||||
const updated = measureAndUpdate();
|
||||
if (updated && autosizePassRef.current < 1) {
|
||||
autosizePassRef.current += 1;
|
||||
requestAnimationFrame(() => {
|
||||
measureAndUpdate();
|
||||
autosizeAppliedRef.current = { content: shape.props.content, editing: isEditing };
|
||||
autosizePassRef.current = 0;
|
||||
});
|
||||
} else {
|
||||
autosizeAppliedRef.current = { content: shape.props.content, editing: isEditing };
|
||||
autosizePassRef.current = 0;
|
||||
}
|
||||
});
|
||||
return () => {
|
||||
cancelAnimationFrame(raf1);
|
||||
};
|
||||
}, [isEditing, shape.id, shape.props.content]);
|
||||
|
||||
const header =
|
||||
shape.props.sourceType === "image"
|
||||
? "📄 OCR Result"
|
||||
: "🧠 AI Analysis";
|
||||
: shape.props.sourceType === "prompt"
|
||||
? "✨ AI Generation"
|
||||
: "🧠 AI Analysis";
|
||||
|
||||
return (
|
||||
<HTMLContainer
|
||||
@@ -255,6 +456,7 @@ contentEl
|
||||
style={{ pointerEvents: isEditing ? "all" : "none" }}
|
||||
>
|
||||
<div
|
||||
ref={rootRef}
|
||||
style={{
|
||||
width: "100%",
|
||||
height: "100%",
|
||||
@@ -293,10 +495,16 @@ contentEl
|
||||
props: { content: e.target.value },
|
||||
})
|
||||
}
|
||||
onInput={(e) => {
|
||||
const ta = e.currentTarget;
|
||||
ta.style.height = "auto";
|
||||
ta.style.height = `${ta.scrollHeight}px`;
|
||||
}}
|
||||
aria-label="Edit extracted text"
|
||||
style={{
|
||||
flex: "1 1 0%",
|
||||
flex: "0 0 auto",
|
||||
minHeight: 0,
|
||||
height: "auto",
|
||||
resize: "none",
|
||||
width: "100%",
|
||||
border: "1px solid #ced4da",
|
||||
@@ -313,7 +521,7 @@ contentEl
|
||||
<div
|
||||
ref={contentRef}
|
||||
style={{
|
||||
flex: "1 1 0%",
|
||||
flex: "0 0 auto",
|
||||
minHeight: 0,
|
||||
lineHeight: 1.4,
|
||||
color: "#495057",
|
||||
@@ -338,8 +546,8 @@ contentEl
|
||||
export function createAITextResult(
|
||||
editor: any,
|
||||
opts: {
|
||||
fromShapeId: string;
|
||||
sourceType: "image" | "analysis";
|
||||
fromShapeId: string | null;
|
||||
sourceType: "image" | "analysis" | "prompt";
|
||||
content: string;
|
||||
x: number;
|
||||
y: number;
|
||||
@@ -347,10 +555,9 @@ export function createAITextResult(
|
||||
) {
|
||||
const id = createShapeId();
|
||||
const baseW = 280;
|
||||
const baseH = 150;
|
||||
const extra = Math.min(600, Math.floor(opts.content.length / 6));
|
||||
const dynW = Math.min(640, baseW + Math.floor(extra * 0.6));
|
||||
const dynH = Math.min(480, baseH + Math.floor(extra * 0.4));
|
||||
const baseH = 100;
|
||||
const dynW = baseW;
|
||||
const dynH = baseH;
|
||||
editor.createShape({
|
||||
id,
|
||||
type: "ai-text-result",
|
||||
@@ -359,7 +566,7 @@ export function createAITextResult(
|
||||
props: {
|
||||
content: opts.content,
|
||||
sourceType: opts.sourceType,
|
||||
sourceShapeId: opts.fromShapeId,
|
||||
sourceShapeId: opts.fromShapeId ?? "",
|
||||
createdDate: Date.now(),
|
||||
w: dynW,
|
||||
h: dynH,
|
||||
|
||||
@@ -16,6 +16,60 @@ async function postBinary(url: string, file: File): Promise<{ text: string }> {
|
||||
return res.json() as Promise<{ text: string }>;
|
||||
}
|
||||
|
||||
async function postJson<TReq extends object, TRes>(url: string, body: TReq): Promise<TRes> {
|
||||
const res = await fetch(url, {
|
||||
method: "POST",
|
||||
headers: { "Content-Type": "application/json" },
|
||||
body: JSON.stringify(body),
|
||||
});
|
||||
if (!res.ok) throw new Error(`Request failed: ${res.status}`);
|
||||
return res.json() as Promise<TRes>;
|
||||
}
|
||||
|
||||
export async function generateText(prompt: string, temperature: number = 0.7): Promise<string> {
|
||||
const { text } = await postJson<{ prompt: string; temperature: number }, { text: string }>(
|
||||
`${API_BASE}/ai/generate`,
|
||||
{ prompt, temperature }
|
||||
);
|
||||
return text;
|
||||
}
|
||||
|
||||
export async function processPromptGeneration(
|
||||
editor: any,
|
||||
promptShapeId: string
|
||||
): Promise<void> {
|
||||
const shape = editor.getShape(promptShapeId);
|
||||
if (!shape || shape.type !== "ai-prompt") return;
|
||||
const prompt: string = shape.props.prompt ?? "";
|
||||
const temperature: number = Number(shape.props.temperature ?? 0.7) || 0.7;
|
||||
editor.updateShape({ id: promptShapeId, type: "ai-prompt", props: { status: "processing" } });
|
||||
try {
|
||||
const { text } = await postJson<{ prompt: string; temperature: number }, { text: string }>(
|
||||
`${API_BASE}/ai/generate`,
|
||||
{ prompt, temperature }
|
||||
);
|
||||
|
||||
const pShape = editor.getShape(promptShapeId);
|
||||
const pW = Math.max(1, pShape?.props?.w ?? 320);
|
||||
const gap = Math.min(80, Math.max(24, Math.floor(pW * 0.12)));
|
||||
const textShapeId = createAITextResult(editor, {
|
||||
fromShapeId: promptShapeId,
|
||||
sourceType: "prompt",
|
||||
content: text,
|
||||
x: pShape.x + pW + gap,
|
||||
y: pShape.y,
|
||||
});
|
||||
|
||||
const kgManager = new KnowledgeGraphManager(editor);
|
||||
kgManager.createConnection(promptShapeId, textShapeId, "generates");
|
||||
await kgManager.analyzeConnections(textShapeId);
|
||||
|
||||
editor.updateShape({ id: promptShapeId, type: "ai-prompt", props: { status: "completed" } });
|
||||
} catch (e) {
|
||||
editor.updateShape({ id: promptShapeId, type: "ai-prompt", props: { status: "error" } });
|
||||
}
|
||||
}
|
||||
|
||||
export async function processImageWithOCR(
|
||||
editor: any,
|
||||
shapeId: string,
|
||||
@@ -139,3 +193,35 @@ export async function createImageShapeFromFile(
|
||||
});
|
||||
await processImageWithOCR(editor, shapeId, file);
|
||||
}
|
||||
|
||||
export async function createPromptShape(
|
||||
editor: any,
|
||||
position: { x: number; y: number }
|
||||
) {
|
||||
const shapeId = createShapeId();
|
||||
editor.createShape({
|
||||
id: shapeId,
|
||||
type: "ai-prompt",
|
||||
x: position.x,
|
||||
y: position.y,
|
||||
props: {
|
||||
prompt: "",
|
||||
status: "idle",
|
||||
createdDate: Date.now(),
|
||||
w: 320,
|
||||
h: 180,
|
||||
temperature: 0.7,
|
||||
},
|
||||
});
|
||||
return shapeId;
|
||||
}
|
||||
|
||||
export function registerPromptGenerator(editor: any) {
|
||||
(window as any).__aiGenerate = async (shapeId: string) => {
|
||||
try {
|
||||
await processPromptGeneration(editor, shapeId);
|
||||
} catch {
|
||||
// status updates happen inside
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@@ -23,8 +23,11 @@ import {
|
||||
} from "@/lib/appwrite-db";
|
||||
import { authClient } from "@/lib/auth-client";
|
||||
import { AIImageShapeUtil, AITextResultShapeUtil } from "@/lib/tldraw/ai-shapes";
|
||||
import { createImageShapeFromFile, setupFileDropHandler } from "@/lib/tldraw/processing";
|
||||
import { Camera } from "lucide-react";
|
||||
import { createImageShapeFromFile, setupFileDropHandler, generateText } from "@/lib/tldraw/processing";
|
||||
import { createAITextResult } from "@/lib/tldraw/ai-shapes";
|
||||
import { KnowledgeGraphManager } from "@/lib/tldraw/knowledge-graph";
|
||||
import { Camera, Sparkles } from "lucide-react";
|
||||
import { Dialog, DialogContent, DialogDescription, DialogFooter, DialogHeader, DialogTitle } from "@/components/ui/dialog";
|
||||
import { Tooltip, TooltipContent, TooltipTrigger } from "@/components/ui/tooltip";
|
||||
|
||||
export const Route = createFileRoute("/space")({
|
||||
@@ -103,6 +106,11 @@ function SpaceRoute() {
|
||||
const components: TLComponents = {
|
||||
Toolbar: (props) => {
|
||||
const editor = useEditor();
|
||||
const [promptOpen, setPromptOpen] = useState(false);
|
||||
const [promptText, setPromptText] = useState("");
|
||||
const [temperature, setTemperature] = useState(0.7);
|
||||
const [isGenLoading, setIsGenLoading] = useState(false);
|
||||
const [genError, setGenError] = useState<string | null>(null);
|
||||
|
||||
async function pickImageFile(): Promise<File | null> {
|
||||
try {
|
||||
@@ -144,6 +152,42 @@ function SpaceRoute() {
|
||||
await createImageShapeFromFile(editor, file, { x: x + w / 2, y: y + h / 2 });
|
||||
};
|
||||
|
||||
const handleOpenPrompt = () => {
|
||||
setPromptText("");
|
||||
setGenError(null);
|
||||
setTemperature(0.7);
|
||||
setPromptOpen(true);
|
||||
};
|
||||
|
||||
const handleGenerateFromDialog = async () => {
|
||||
if (!promptText.trim()) return;
|
||||
setIsGenLoading(true);
|
||||
setGenError(null);
|
||||
try {
|
||||
let text = await generateText(promptText, temperature);
|
||||
text = text.trim();
|
||||
// Center position
|
||||
const { x, y, w, h } = editor.getViewportPageBounds();
|
||||
const cx = x + w / 2;
|
||||
const cy = y + h / 2;
|
||||
const textShapeId = createAITextResult(editor, {
|
||||
fromShapeId: null,
|
||||
sourceType: "prompt",
|
||||
content: text,
|
||||
x: cx,
|
||||
y: cy,
|
||||
});
|
||||
// Analyze connections
|
||||
const kg = new KnowledgeGraphManager(editor);
|
||||
await kg.analyzeConnections(textShapeId);
|
||||
setPromptOpen(false);
|
||||
} catch (e: any) {
|
||||
setGenError(e?.message ?? "Failed to generate text");
|
||||
} finally {
|
||||
setIsGenLoading(false);
|
||||
}
|
||||
};
|
||||
|
||||
return (
|
||||
<DefaultToolbar {...props}>
|
||||
{/* Custom actions group at the start (left) of the toolbar */}
|
||||
@@ -151,14 +195,63 @@ function SpaceRoute() {
|
||||
<Tooltip>
|
||||
<TooltipTrigger>
|
||||
<Button type="button" onClick={handlePickImage}>
|
||||
{/* Upload Image (OCR) */}
|
||||
<Camera />
|
||||
</Button>
|
||||
</TooltipTrigger >
|
||||
<TooltipContent >
|
||||
Upload Image (OCR)
|
||||
</TooltipContent >
|
||||
<TooltipContent>Upload Image (OCR)</TooltipContent>
|
||||
</Tooltip>
|
||||
|
||||
<Dialog open={promptOpen} onOpenChange={setPromptOpen}>
|
||||
<Tooltip>
|
||||
<TooltipTrigger>
|
||||
<Button type="button" onClick={handleOpenPrompt}>
|
||||
<Sparkles />
|
||||
</Button>
|
||||
</TooltipTrigger>
|
||||
<TooltipContent>Generate Text</TooltipContent>
|
||||
</Tooltip>
|
||||
<DialogContent>
|
||||
<DialogHeader>
|
||||
<DialogTitle>Generate Text</DialogTitle>
|
||||
<DialogDescription>Enter a prompt to generate AI text. The result will be inserted on the canvas.</DialogDescription>
|
||||
</DialogHeader>
|
||||
<div style={{ display: "flex", flexDirection: "column", gap: 8 }}>
|
||||
<textarea
|
||||
value={promptText}
|
||||
onChange={(e) => setPromptText(e.target.value)}
|
||||
placeholder="Write your prompt..."
|
||||
style={{
|
||||
minHeight: 120,
|
||||
resize: "vertical",
|
||||
width: "100%",
|
||||
border: "1px solid #ced4da",
|
||||
borderRadius: 6,
|
||||
padding: 8,
|
||||
lineHeight: 1.4,
|
||||
outline: "none",
|
||||
}}
|
||||
/>
|
||||
<label style={{ fontSize: 12, }}>Temperature: {temperature.toFixed(2)}</label>
|
||||
<input
|
||||
type="range"
|
||||
min={0}
|
||||
max={1}
|
||||
step={0.05}
|
||||
value={temperature}
|
||||
onChange={(e) => setTemperature(parseFloat(e.target.value))}
|
||||
/>
|
||||
{genError ? (
|
||||
<div style={{ color: "#a4000f", fontSize: 12 }}>{genError}</div>
|
||||
) : null}
|
||||
</div>
|
||||
<DialogFooter>
|
||||
<Button type="button" variant="secondary" onClick={() => setPromptOpen(false)} disabled={isGenLoading}>Cancel</Button>
|
||||
<Button type="button" onClick={handleGenerateFromDialog} disabled={isGenLoading || !promptText.trim()}>
|
||||
{isGenLoading ? "Generating..." : "Generate"}
|
||||
</Button>
|
||||
</DialogFooter>
|
||||
</DialogContent>
|
||||
</Dialog>
|
||||
</div>
|
||||
{/* Separator */}
|
||||
<div style={{ borderRight: "1px solid #888", height: "30px", margin: "0 8px" }} />
|
||||
@@ -175,6 +268,7 @@ function SpaceRoute() {
|
||||
editor.user.updateUserPreferences({ colorScheme: "dark" });
|
||||
// Expose editor for helper UI
|
||||
(window as any).editor = editor;
|
||||
// Prompt generator handled via toolbar dialog
|
||||
|
||||
// Debounced save on document changes (user-originated)
|
||||
const debounceMs = 1200;
|
||||
|
||||
Reference in New Issue
Block a user