feat: enhance AI image generation and prompt handling in canvas components

- Introduced shimmer animation for loading states in AI image nodes.
- Updated prompt node to handle image generation with improved error handling and user feedback.
- Refactored AI image node to manage generation status and display loading indicators.
- Enhanced data handling in canvas components to include canvasId for better context management.
- Improved status message handling in Convex mutations for clearer user feedback.
This commit is contained in:
Matthias
2026-03-25 18:18:55 +01:00
parent 2f4d8a7172
commit 8d6ce275f8
10 changed files with 615 additions and 104 deletions

View File

@@ -128,3 +128,18 @@
@apply font-sans;
}
}
@keyframes shimmer {
0% {
transform: translateX(-100%);
}
100% {
transform: translateX(100%);
}
}
@layer utilities {
.animate-shimmer {
animation: shimmer 1.5s ease-in-out infinite;
}
}

View File

@@ -26,7 +26,7 @@ const nodeTemplates = [
label: "Prompt",
width: 320,
height: 140,
defaultData: { content: "", model: "" },
defaultData: { prompt: "", model: "" },
},
{
type: "note",
@@ -67,7 +67,7 @@ export default function CanvasToolbar({ canvasId }: CanvasToolbarProps) {
positionY: 100 + offset,
width,
height,
data,
data: { ...data, canvasId },
});
};

View File

@@ -192,7 +192,7 @@ function CanvasInner({ canvasId }: CanvasInnerProps) {
positionY: position.y,
width: defaults.width,
height: defaults.height,
data: defaults.data,
data: { ...defaults.data, canvasId },
});
},
[screenToFlowPosition, createNode, canvasId],

View File

@@ -1,78 +1,209 @@
"use client";
import { Handle, Position, type NodeProps, type Node } from "@xyflow/react";
import { useCallback, useState } from "react";
import { Handle, Position, useReactFlow, type NodeProps, type Node } from "@xyflow/react";
import { useAction } from "convex/react";
import { api } from "@/convex/_generated/api";
import type { Id } from "@/convex/_generated/dataModel";
import BaseNodeWrapper from "./base-node-wrapper";
import { DEFAULT_MODEL_ID, getModel } from "@/lib/ai-models";
import {
Loader2,
AlertCircle,
RefreshCw,
ImageIcon,
} from "lucide-react";
type AiImageNodeData = {
storageId?: string;
url?: string;
prompt?: string;
model?: string;
modelTier?: string;
generatedAt?: number;
canvasId?: string;
_status?: string;
_statusMessage?: string;
};
export type AiImageNode = Node<AiImageNodeData, "ai-image">;
type NodeStatus =
| "idle"
| "analyzing"
| "clarifying"
| "executing"
| "done"
| "error";
export default function AiImageNode({
id,
data,
selected,
}: NodeProps<AiImageNode>) {
const status = data._status ?? "idle";
const nodeData = data as AiImageNodeData;
const { getEdges, getNode } = useReactFlow();
const [isGenerating, setIsGenerating] = useState(false);
const [localError, setLocalError] = useState<string | null>(null);
const generateImage = useAction(api.ai.generateImage);
const status = (nodeData._status ?? "idle") as NodeStatus;
const errorMessage = nodeData._statusMessage;
const isLoading =
status === "executing" ||
status === "analyzing" ||
status === "clarifying" ||
isGenerating;
const handleRegenerate = useCallback(async () => {
if (isLoading) return;
setLocalError(null);
setIsGenerating(true);
try {
const canvasId = nodeData.canvasId as Id<"canvases">;
if (!canvasId) throw new Error("Missing canvasId");
const prompt = nodeData.prompt;
if (!prompt) throw new Error("No prompt — use Generate from a Prompt node");
const edges = getEdges();
const incomingEdges = edges.filter((e) => e.target === id);
let referenceStorageId: Id<"_storage"> | undefined;
for (const edge of incomingEdges) {
const src = getNode(edge.source);
if (src?.type === "image") {
const srcData = src.data as { storageId?: string };
if (srcData.storageId) {
referenceStorageId = srcData.storageId as Id<"_storage">;
break;
}
}
}
await generateImage({
canvasId,
nodeId: id as Id<"nodes">,
prompt,
referenceStorageId,
model: nodeData.model ?? DEFAULT_MODEL_ID,
});
} catch (err) {
setLocalError(err instanceof Error ? err.message : "Generation failed");
} finally {
setIsGenerating(false);
}
}, [isLoading, nodeData, id, getEdges, getNode, generateImage]);
const modelName =
getModel(nodeData.model ?? DEFAULT_MODEL_ID)?.name ?? "AI";
return (
<BaseNodeWrapper
selected={selected}
status={status}
statusMessage={data._statusMessage}
>
<div className="p-2">
<div className="text-xs font-medium text-emerald-500 mb-1">
🤖 KI-Bild
<BaseNodeWrapper selected={selected} className="w-[320px] overflow-hidden">
<Handle
type="target"
position={Position.Left}
id="prompt-in"
className="!h-3 !w-3 !bg-violet-500 !border-2 !border-background"
/>
<div className="border-b border-border px-3 py-2">
<div className="text-xs font-medium text-emerald-600 dark:text-emerald-400">
🖼 AI Image
</div>
</div>
{status === "executing" && (
<div className="flex h-36 w-56 items-center justify-center rounded-lg bg-muted">
<div className="h-6 w-6 animate-spin rounded-full border-2 border-primary border-t-transparent" />
<div className="relative h-[320px] overflow-hidden bg-muted">
{status === "idle" && !nodeData.url && (
<div className="absolute inset-0 flex flex-col items-center justify-center gap-3 text-muted-foreground">
<ImageIcon className="h-10 w-10 opacity-30" />
<p className="px-6 text-center text-xs opacity-60">
Connect a Prompt node and click Generate
</p>
</div>
)}
{status === "done" && data.url && (
{isLoading && (
<div className="absolute inset-0 z-10 flex flex-col items-center justify-center gap-3 bg-muted">
<div className="absolute inset-0 overflow-hidden">
<div className="animate-shimmer absolute inset-0 bg-gradient-to-r from-transparent via-white/10 to-transparent" />
</div>
<Loader2 className="relative z-10 h-8 w-8 animate-spin text-violet-500" />
<p className="relative z-10 text-xs text-muted-foreground">
{status === "analyzing" && "Analyzing…"}
{status === "clarifying" && "Clarifying…"}
{(status === "executing" || isGenerating) && "Generating…"}
</p>
<p className="relative z-10 text-[10px] text-muted-foreground/60">
{modelName}
</p>
</div>
)}
{status === "error" && !isLoading && (
<div className="absolute inset-0 z-10 flex flex-col items-center justify-center gap-3 bg-muted">
<AlertCircle className="h-8 w-8 text-destructive" />
<p className="px-4 text-center text-xs font-medium text-destructive">
Generation failed
</p>
<p className="px-6 text-center text-[10px] text-muted-foreground">
{errorMessage ?? localError ?? "Unknown error"} Credits not
charged
</p>
<button
type="button"
onClick={() => void handleRegenerate()}
className="nodrag mt-1 flex items-center gap-1.5 rounded-md border border-border bg-background px-3 py-1.5 text-xs font-medium transition-colors hover:bg-accent"
>
<RefreshCw className="h-3 w-3" />
Try again
</button>
</div>
)}
{nodeData.url && !isLoading && (
// eslint-disable-next-line @next/next/no-img-element
<img
src={data.url}
alt={data.prompt ?? "KI-generiertes Bild"}
className="rounded-lg object-cover max-w-[260px]"
src={nodeData.url}
alt={nodeData.prompt ?? "AI generated image"}
className="absolute inset-0 h-full w-full object-contain"
draggable={false}
/>
)}
{status === "error" && (
<div className="flex h-36 w-56 items-center justify-center rounded-lg bg-red-50 dark:bg-red-950/20 text-sm text-red-600">
{data._statusMessage ?? "Fehler bei der Generierung"}
{status === "done" && nodeData.url && !isLoading && (
<div className="absolute inset-0 z-20 flex items-end justify-end p-2 opacity-0 transition-opacity hover:opacity-100">
<button
type="button"
onClick={() => void handleRegenerate()}
className="nodrag flex items-center gap-1.5 rounded-md border border-border bg-background/90 px-2.5 py-1.5 text-xs font-medium backdrop-blur-sm transition-colors hover:bg-background"
>
<RefreshCw className="h-3 w-3" />
Regenerate
</button>
</div>
)}
{status === "idle" && (
<div className="flex h-36 w-56 items-center justify-center rounded-lg border-2 border-dashed text-sm text-muted-foreground">
Prompt verbinden
</div>
)}
{data.prompt && status === "done" && (
<p className="mt-1 text-xs text-muted-foreground truncate max-w-[260px]">
{data.prompt}
{nodeData.prompt && (
<div className="border-t border-border px-3 py-2">
<p className="line-clamp-2 text-[10px] text-muted-foreground">
{nodeData.prompt}
</p>
<p className="mt-0.5 text-[10px] text-muted-foreground/60">
{modelName}
</p>
)}
</div>
)}
<Handle
type="target"
position={Position.Left}
className="!h-3 !w-3 !bg-emerald-500 !border-2 !border-background"
/>
<Handle
type="source"
position={Position.Right}
className="!h-3 !w-3 !bg-primary !border-2 !border-background"
id="image-out"
className="!h-3 !w-3 !bg-violet-500 !border-2 !border-background"
/>
</BaseNodeWrapper>
);

View File

@@ -1,16 +1,19 @@
"use client";
import { useState, useCallback, useEffect } from "react";
import { Handle, Position, type NodeProps, type Node } from "@xyflow/react";
import { useMutation } from "convex/react";
import { useCallback, useEffect, useRef, useState } from "react";
import { Handle, Position, useReactFlow, type NodeProps, type Node } from "@xyflow/react";
import { useMutation, useAction } from "convex/react";
import { api } from "@/convex/_generated/api";
import type { Id } from "@/convex/_generated/dataModel";
import { useDebouncedCallback } from "@/hooks/use-debounced-callback";
import BaseNodeWrapper from "./base-node-wrapper";
import { useDebouncedCallback } from "@/hooks/use-debounced-callback";
import { DEFAULT_MODEL_ID } from "@/lib/ai-models";
import { Sparkles, Loader2 } from "lucide-react";
type PromptNodeData = {
prompt?: string;
model?: string;
canvasId?: string;
_status?: string;
_statusMessage?: string;
};
@@ -22,82 +25,166 @@ export default function PromptNode({
data,
selected,
}: NodeProps<PromptNode>) {
const updateData = useMutation(api.nodes.updateData);
const [prompt, setPrompt] = useState(data.prompt ?? "");
const [isEditing, setIsEditing] = useState(false);
const nodeData = data as PromptNodeData;
const { getEdges, getNode } = useReactFlow();
const [prompt, setPrompt] = useState(nodeData.prompt ?? "");
const [isGenerating, setIsGenerating] = useState(false);
const [error, setError] = useState<string | null>(null);
useEffect(() => {
if (!isEditing) {
setPrompt(data.prompt ?? "");
}
}, [data.prompt, isEditing]);
setPrompt(nodeData.prompt ?? "");
}, [nodeData.prompt]);
const savePrompt = useDebouncedCallback(
(newPrompt: string) => {
const dataRef = useRef(data);
dataRef.current = data;
const updateData = useMutation(api.nodes.updateData);
const createNode = useMutation(api.nodes.create);
const generateImage = useAction(api.ai.generateImage);
const debouncedSave = useDebouncedCallback((value: string) => {
const raw = dataRef.current as Record<string, unknown>;
const { _status, _statusMessage, ...rest } = raw;
void _status;
void _statusMessage;
updateData({
nodeId: id as Id<"nodes">,
data: {
...data,
prompt: newPrompt,
_status: undefined,
_statusMessage: undefined,
},
data: { ...rest, prompt: value },
});
}, 500);
const handlePromptChange = useCallback(
(e: React.ChangeEvent<HTMLTextAreaElement>) => {
const value = e.target.value;
setPrompt(value);
debouncedSave(value);
},
500,
[debouncedSave]
);
const handleChange = useCallback(
(e: React.ChangeEvent<HTMLTextAreaElement>) => {
const newPrompt = e.target.value;
setPrompt(newPrompt);
savePrompt(newPrompt);
const handleGenerate = useCallback(async () => {
if (!prompt.trim() || isGenerating) return;
setError(null);
setIsGenerating(true);
try {
const canvasId = nodeData.canvasId as Id<"canvases">;
if (!canvasId) throw new Error("Missing canvasId on node");
const edges = getEdges();
const incomingEdges = edges.filter((e) => e.target === id);
let referenceStorageId: Id<"_storage"> | undefined;
for (const edge of incomingEdges) {
const sourceNode = getNode(edge.source);
if (sourceNode?.type === "image") {
const srcData = sourceNode.data as { storageId?: string };
if (srcData.storageId) {
referenceStorageId = srcData.storageId as Id<"_storage">;
break;
}
}
}
const currentNode = getNode(id);
const offsetX = (currentNode?.measured?.width ?? 280) + 32;
const posX = (currentNode?.position?.x ?? 0) + offsetX;
const posY = currentNode?.position?.y ?? 0;
const aiNodeId = await createNode({
canvasId,
type: "ai-image",
positionX: posX,
positionY: posY,
width: 320,
height: 320,
data: {
prompt,
model: DEFAULT_MODEL_ID,
modelTier: "standard",
canvasId,
},
[savePrompt],
);
});
await generateImage({
canvasId,
nodeId: aiNodeId,
prompt,
referenceStorageId,
model: DEFAULT_MODEL_ID,
});
} catch (err) {
setError(err instanceof Error ? err.message : "Generation failed");
} finally {
setIsGenerating(false);
}
}, [
prompt,
isGenerating,
nodeData.canvasId,
id,
getEdges,
getNode,
createNode,
generateImage,
]);
return (
<BaseNodeWrapper
selected={selected}
status={data._status}
className="border-purple-500/30"
status={nodeData._status}
statusMessage={nodeData._statusMessage}
className="min-w-[240px] border-violet-500/30"
>
<div className="w-72 p-3">
<div className="text-xs font-medium text-purple-500 mb-1">
<Handle
type="target"
position={Position.Left}
id="image-in"
className="!h-3 !w-3 !bg-violet-500 !border-2 !border-background"
/>
<div className="flex flex-col gap-2 p-3">
<div className="text-xs font-medium text-violet-600 dark:text-violet-400">
Prompt
</div>
{isEditing ? (
<textarea
value={prompt}
onChange={handleChange}
onBlur={() => setIsEditing(false)}
autoFocus
className="nodrag nowheel w-full resize-none rounded-md border-0 bg-transparent p-0 text-sm outline-none focus:ring-0 min-h-[3rem]"
placeholder="Prompt eingeben…"
onChange={handlePromptChange}
placeholder="Describe what you want to generate…"
rows={4}
className="nodrag nowheel w-full resize-none rounded-md border border-border bg-background px-3 py-2 text-sm placeholder:text-muted-foreground focus:outline-none focus:ring-1 focus:ring-violet-500"
/>
) : (
<div
onDoubleClick={() => setIsEditing(true)}
className="min-h-[2rem] cursor-text text-sm whitespace-pre-wrap"
{error && (
<p className="text-xs text-destructive">{error}</p>
)}
<button
type="button"
onClick={() => void handleGenerate()}
disabled={!prompt.trim() || isGenerating}
className="nodrag flex items-center justify-center gap-2 rounded-md bg-violet-600 px-3 py-2 text-sm font-medium text-white transition-colors hover:bg-violet-700 disabled:cursor-not-allowed disabled:opacity-50"
>
{prompt || (
<span className="text-muted-foreground">
Doppelklick zum Bearbeiten
</span>
)}
</div>
)}
{data.model && (
<div className="mt-2 text-xs text-muted-foreground">
Modell: {data.model}
</div>
{isGenerating ? (
<>
<Loader2 className="h-4 w-4 animate-spin" />
Generating
</>
) : (
<>
<Sparkles className="h-4 w-4" />
Generate Image
</>
)}
</button>
</div>
<Handle
type="source"
position={Position.Right}
className="!h-3 !w-3 !bg-purple-500 !border-2 !border-background"
id="prompt-out"
className="!h-3 !w-3 !bg-violet-500 !border-2 !border-background"
/>
</BaseNodeWrapper>
);

View File

@@ -8,6 +8,7 @@
* @module
*/
import type * as ai from "../ai.js";
import type * as auth from "../auth.js";
import type * as canvases from "../canvases.js";
import type * as credits from "../credits.js";
@@ -15,6 +16,7 @@ import type * as edges from "../edges.js";
import type * as helpers from "../helpers.js";
import type * as http from "../http.js";
import type * as nodes from "../nodes.js";
import type * as openrouter from "../openrouter.js";
import type * as storage from "../storage.js";
import type {
@@ -24,6 +26,7 @@ import type {
} from "convex/server";
declare const fullApi: ApiFromModules<{
ai: typeof ai;
auth: typeof auth;
canvases: typeof canvases;
credits: typeof credits;
@@ -31,6 +34,7 @@ declare const fullApi: ApiFromModules<{
helpers: typeof helpers;
http: typeof http;
nodes: typeof nodes;
openrouter: typeof openrouter;
storage: typeof storage;
}>;

109
convex/ai.ts Normal file
View File

@@ -0,0 +1,109 @@
import { v } from "convex/values";
import { action } from "./_generated/server";
import { api } from "./_generated/api";
import {
generateImageViaOpenRouter,
DEFAULT_IMAGE_MODEL,
IMAGE_MODELS,
} from "./openrouter";
export const generateImage = action({
args: {
canvasId: v.id("canvases"),
nodeId: v.id("nodes"),
prompt: v.string(),
referenceStorageId: v.optional(v.id("_storage")),
model: v.optional(v.string()),
},
handler: async (ctx, args) => {
const apiKey = process.env.OPENROUTER_API_KEY;
if (!apiKey) {
throw new Error("OPENROUTER_API_KEY is not set");
}
const modelId = args.model ?? DEFAULT_IMAGE_MODEL;
const modelConfig = IMAGE_MODELS[modelId];
if (!modelConfig) {
throw new Error(`Unknown model: ${modelId}`);
}
if (!(await ctx.runQuery(api.auth.getCurrentUser, {}))) {
throw new Error("User not found");
}
const reservationId = await ctx.runMutation(api.credits.reserve, {
estimatedCost: modelConfig.estimatedCostPerImage,
description: `Bildgenerierung — ${modelConfig.name}`,
model: modelId,
nodeId: args.nodeId,
canvasId: args.canvasId,
});
await ctx.runMutation(api.nodes.updateStatus, {
nodeId: args.nodeId,
status: "executing",
});
try {
let referenceImageUrl: string | undefined;
if (args.referenceStorageId) {
referenceImageUrl =
(await ctx.storage.getUrl(args.referenceStorageId)) ?? undefined;
}
const result = await generateImageViaOpenRouter(apiKey, {
prompt: args.prompt,
referenceImageUrl,
model: modelId,
});
const binaryString = atob(result.imageBase64);
const bytes = new Uint8Array(binaryString.length);
for (let i = 0; i < binaryString.length; i++) {
bytes[i] = binaryString.charCodeAt(i);
}
const blob = new Blob([bytes], { type: result.mimeType });
const storageId = await ctx.storage.store(blob);
const existing = await ctx.runQuery(api.nodes.get, { nodeId: args.nodeId });
if (!existing) throw new Error("Node not found");
const prev = (existing.data ?? {}) as Record<string, unknown>;
await ctx.runMutation(api.nodes.updateData, {
nodeId: args.nodeId,
data: {
...prev,
storageId,
prompt: args.prompt,
model: modelId,
modelTier: modelConfig.tier,
generatedAt: Date.now(),
},
});
await ctx.runMutation(api.nodes.updateStatus, {
nodeId: args.nodeId,
status: "done",
});
await ctx.runMutation(api.credits.commit, {
transactionId: reservationId,
actualCost: modelConfig.estimatedCostPerImage,
});
} catch (error) {
await ctx.runMutation(api.credits.release, {
transactionId: reservationId,
});
await ctx.runMutation(api.nodes.updateStatus, {
nodeId: args.nodeId,
status: "error",
statusMessage:
error instanceof Error ? error.message : "Generation failed",
});
throw error;
}
},
});

View File

@@ -270,7 +270,15 @@ export const updateStatus = mutation({
if (!node) throw new Error("Node not found");
await getCanvasOrThrow(ctx, node.canvasId, user.userId);
await ctx.db.patch(nodeId, { status, statusMessage });
const patch: { status: typeof status; statusMessage?: string } = {
status,
};
if (statusMessage !== undefined) {
patch.statusMessage = statusMessage;
} else if (status === "done" || status === "executing" || status === "idle") {
patch.statusMessage = undefined;
}
await ctx.db.patch(nodeId, patch);
},
});

112
convex/openrouter.ts Normal file
View File

@@ -0,0 +1,112 @@
export const OPENROUTER_BASE_URL = "https://openrouter.ai/api/v1";
export interface OpenRouterModel {
id: string;
name: string;
tier: "budget" | "standard" | "premium";
estimatedCostPerImage: number; // in Euro-Cent (for credit reservation)
}
// Phase 1: Gemini 2.5 Flash Image only.
// Add more models here in Phase 2 when the model selector UI is built.
export const IMAGE_MODELS: Record<string, OpenRouterModel> = {
"google/gemini-2.5-flash-image": {
id: "google/gemini-2.5-flash-image",
name: "Gemini 2.5 Flash",
tier: "standard",
estimatedCostPerImage: 4, // ~€0.04 in Euro-Cent
},
};
export const DEFAULT_IMAGE_MODEL = "google/gemini-2.5-flash-image";
export interface GenerateImageParams {
prompt: string;
referenceImageUrl?: string; // optional image-to-image input
model?: string;
}
export interface OpenRouterImageResponse {
imageBase64: string; // base64-encoded PNG/JPEG
mimeType: string;
}
/**
* Calls the OpenRouter API to generate an image.
* Uses the chat/completions endpoint with a vision-capable model that returns
* an inline image in the response (base64).
*
* Must be called from a Convex Action (has access to fetch + env vars).
*/
export async function generateImageViaOpenRouter(
apiKey: string,
params: GenerateImageParams
): Promise<OpenRouterImageResponse> {
const modelId = params.model ?? DEFAULT_IMAGE_MODEL;
// Build message content — text prompt, optionally with a reference image
const userContent: object[] = [];
if (params.referenceImageUrl) {
userContent.push({
type: "image_url",
image_url: { url: params.referenceImageUrl },
});
}
userContent.push({
type: "text",
text: params.prompt,
});
const body = {
model: modelId,
modalities: ["image", "text"],
messages: [
{
role: "user",
content: userContent,
},
],
};
const response = await fetch(`${OPENROUTER_BASE_URL}/chat/completions`, {
method: "POST",
headers: {
Authorization: `Bearer ${apiKey}`,
"Content-Type": "application/json",
"HTTP-Referer": "https://app.lemonspace.io",
"X-Title": "LemonSpace",
},
body: JSON.stringify(body),
});
if (!response.ok) {
const errorText = await response.text();
throw new Error(`OpenRouter API error ${response.status}: ${errorText}`);
}
const data = await response.json();
// OpenRouter returns generated images in message.images (separate from content)
const images = data?.choices?.[0]?.message?.images;
if (!images || images.length === 0) {
throw new Error("No image found in OpenRouter response");
}
const imageUrl = images[0]?.image_url?.url;
if (!imageUrl) {
throw new Error("Image block missing image_url.url");
}
// The URL is a data URI: "data:image/png;base64,<data>"
const dataUri: string = imageUrl;
const [meta, base64Data] = dataUri.split(",");
const mimeType = meta.replace("data:", "").replace(";base64", "");
return {
imageBase64: base64Data,
mimeType: mimeType || "image/png",
};
}

45
lib/ai-models.ts Normal file
View File

@@ -0,0 +1,45 @@
// Client-side model definitions for the UI.
// Mirrors the backend config in convex/openrouter.ts — keep in sync.
export interface AiModel {
id: string;
name: string;
tier: "budget" | "standard" | "premium";
description: string;
estimatedCost: string; // human-readable, e.g. "~€0.04"
minTier: "free" | "starter" | "pro" | "business"; // minimum subscription tier
}
export const IMAGE_MODELS: AiModel[] = [
{
id: "google/gemini-2.5-flash-image",
name: "Gemini 2.5 Flash",
tier: "standard",
description: "Fast, high-quality generation",
estimatedCost: "~€0.04",
minTier: "free",
},
// Phase 2 — uncomment when model selector UI is ready:
// {
// id: "black-forest-labs/flux.2-klein-4b",
// name: "FLUX.2 Klein",
// tier: "budget",
// description: "Photorealism, fastest Flux",
// estimatedCost: "~€0.02",
// minTier: "free",
// },
// {
// id: "openai/gpt-5-image",
// name: "GPT-5 Image",
// tier: "premium",
// description: "Best instruction following, text in image",
// estimatedCost: "~€0.15",
// minTier: "starter",
// },
];
export const DEFAULT_MODEL_ID = "google/gemini-2.5-flash-image";
export function getModel(id: string): AiModel | undefined {
return IMAGE_MODELS.find((m) => m.id === id);
}