feat(canvas): add video-prompt node and enhance video generation support

- Introduced a new node type "video-prompt" for AI video generation, including its integration into the canvas command palette and node template picker.
- Updated connection validation to allow connections from text nodes to video-prompt and from video-prompt to ai-video nodes.
- Enhanced error handling and messaging for video generation failures, including specific cases for provider issues.
- Added tests to validate new video-prompt functionality and connection policies.
- Updated localization files to include new labels and prompts for video-prompt and ai-video nodes.
This commit is contained in:
2026-04-07 08:50:59 +02:00
parent 456b910532
commit ed08b976f9
28 changed files with 2899 additions and 9 deletions

View File

@@ -432,6 +432,64 @@ describe("useCanvasConnections", () => {
expect(latestHandlersRef.current?.connectionDropMenu).toBeNull();
});
it("rejects text to ai-video body drops", async () => {
const runCreateEdgeMutation = vi.fn(async () => undefined);
const showConnectionRejectedToast = vi.fn();
container = document.createElement("div");
document.body.appendChild(container);
root = createRoot(container);
await act(async () => {
root?.render(
<HookHarness
helperResult={{
sourceNodeId: "node-source",
targetNodeId: "node-target",
sourceHandle: undefined,
targetHandle: undefined,
}}
nodes={[
{ id: "node-source", type: "text", position: { x: 0, y: 0 }, data: {} },
{ id: "node-target", type: "ai-video", position: { x: 300, y: 200 }, data: {} },
]}
runCreateEdgeMutation={runCreateEdgeMutation}
showConnectionRejectedToast={showConnectionRejectedToast}
/>,
);
});
await act(async () => {
latestHandlersRef.current?.onConnectStart?.(
{} as MouseEvent,
{
nodeId: "node-source",
handleId: null,
handleType: "source",
} as never,
);
latestHandlersRef.current?.onConnectEnd(
{ clientX: 400, clientY: 260 } as MouseEvent,
{
isValid: false,
from: { x: 0, y: 0 },
fromNode: { id: "node-source", type: "text" },
fromHandle: { id: null, type: "source" },
fromPosition: null,
to: { x: 400, y: 260 },
toHandle: null,
toNode: null,
toPosition: null,
pointer: null,
} as never,
);
});
expect(runCreateEdgeMutation).not.toHaveBeenCalled();
expect(showConnectionRejectedToast).toHaveBeenCalledWith("ai-video-source-invalid");
expect(latestHandlersRef.current?.connectionDropMenu).toBeNull();
});
it("ignores onConnectEnd when no connect drag is active", async () => {
const runCreateEdgeMutation = vi.fn(async () => undefined);
const showConnectionRejectedToast = vi.fn();

View File

@@ -740,4 +740,33 @@ describe("useCanvasEdgeInsertions", () => {
expect(templateTypes).not.toContain("text");
expect(templateTypes).not.toContain("ai-image");
});
it("offers video-prompt as valid split for text to ai-video", async () => {
container = document.createElement("div");
document.body.appendChild(container);
root = createRoot(container);
await act(async () => {
root?.render(
<HookHarness
nodes={[
createNode({ id: "source", type: "text", position: { x: 0, y: 0 } }),
createNode({ id: "target", type: "ai-video", position: { x: 360, y: 0 } }),
]}
edges={[createEdge({ id: "edge-1", source: "source", target: "target" })]}
/>,
);
});
await act(async () => {
latestHandlersRef.current?.openEdgeInsertMenu({ edgeId: "edge-1", screenX: 20, screenY: 20 });
});
const templateTypes = (latestHandlersRef.current?.edgeInsertTemplates ?? []).map(
(template) => template.type,
);
expect(templateTypes).toContain("video-prompt");
expect(templateTypes).not.toContain("prompt");
});
});

View File

@@ -55,6 +55,7 @@ const CATALOG_ICONS: Partial<Record<string, LucideIcon>> = {
image: Image,
text: Type,
prompt: Sparkles,
"video-prompt": Video,
color: Palette,
video: Video,
asset: Package,

View File

@@ -26,7 +26,7 @@ export function useGenerationFailureWarnings(
for (const node of convexNodes) {
nextNodeStatusMap.set(node._id, node.status);
if (node.type !== "ai-image") {
if (node.type !== "ai-image" && node.type !== "ai-video") {
continue;
}
@@ -61,7 +61,7 @@ export function useGenerationFailureWarnings(
}
if (recentFailures.length >= GENERATION_FAILURE_THRESHOLD) {
toast.warning(t('ai.openrouterIssuesTitle'), t('ai.openrouterIssuesDesc'));
toast.warning(t('ai.providerIssuesTitle'), t('ai.providerIssuesDesc'));
recentGenerationFailureTimestampsRef.current = [];
return;
}

View File

@@ -27,6 +27,7 @@ const NODE_ICONS: Record<CanvasNodeTemplate["type"], LucideIcon> = {
image: Image,
text: Type,
prompt: Sparkles,
"video-prompt": Video,
note: StickyNote,
frame: Frame,
compare: GitCompare,
@@ -46,6 +47,7 @@ const NODE_SEARCH_KEYWORDS: Partial<
image: ["image", "photo", "foto"],
text: ["text", "typo"],
prompt: ["prompt", "ai", "generate", "ki-bild", "ki", "bild"],
"video-prompt": ["video", "ai", "ki-video", "ki", "prompt"],
note: ["note", "sticky", "notiz"],
frame: ["frame", "artboard"],
compare: ["compare", "before", "after", "vergleich"],

View File

@@ -48,6 +48,7 @@ const CATALOG_ICONS: Partial<Record<string, LucideIcon>> = {
image: Image,
text: Type,
prompt: Sparkles,
"video-prompt": Video,
color: Palette,
video: Video,
asset: Package,

View File

@@ -1,7 +1,9 @@
import ImageNode from "./nodes/image-node";
import TextNode from "./nodes/text-node";
import PromptNode from "./nodes/prompt-node";
import VideoPromptNode from "./nodes/video-prompt-node";
import AiImageNode from "./nodes/ai-image-node";
import AiVideoNode from "./nodes/ai-video-node";
import GroupNode from "./nodes/group-node";
import FrameNode from "./nodes/frame-node";
import NoteNode from "./nodes/note-node";
@@ -25,7 +27,9 @@ export const nodeTypes = {
image: ImageNode,
text: TextNode,
prompt: PromptNode,
"video-prompt": VideoPromptNode,
"ai-image": AiImageNode,
"ai-video": AiVideoNode,
group: GroupNode,
frame: FrameNode,
note: NoteNode,

View File

@@ -0,0 +1,251 @@
"use client";
import { useCallback, useState } from "react";
import { useAction } from "convex/react";
import type { FunctionReference } from "convex/server";
import { useTranslations } from "next-intl";
import { AlertCircle, Download, Loader2, RefreshCw, Video } from "lucide-react";
import { Handle, Position, useReactFlow, type Node, type NodeProps } from "@xyflow/react";
import { api } from "@/convex/_generated/api";
import type { Id } from "@/convex/_generated/dataModel";
import { useCanvasSync } from "@/components/canvas/canvas-sync-context";
import { classifyError } from "@/lib/ai-errors";
import { getVideoModel, type VideoModelDurationSeconds } from "@/lib/ai-video-models";
import { toast } from "@/lib/toast";
import BaseNodeWrapper from "./base-node-wrapper";
type AiVideoNodeData = {
prompt?: string;
modelId?: string;
durationSeconds?: VideoModelDurationSeconds;
creditCost?: number;
canvasId?: string;
url?: string;
_status?: string;
_statusMessage?: string;
};
type NodeStatus =
| "idle"
| "analyzing"
| "clarifying"
| "executing"
| "done"
| "error";
export type AiVideoNodeType = Node<AiVideoNodeData, "ai-video">;
export default function AiVideoNode({ id, data, selected }: NodeProps<AiVideoNodeType>) {
const t = useTranslations("aiVideoNode");
const tToast = useTranslations("toasts");
const nodeData = data as AiVideoNodeData;
const { getEdges, getNode } = useReactFlow();
const { status: syncStatus } = useCanvasSync();
const generateVideo = useAction(
(api as unknown as {
ai: {
generateVideo: FunctionReference<
"action",
"public",
{
canvasId: Id<"canvases">;
sourceNodeId: Id<"nodes">;
outputNodeId: Id<"nodes">;
prompt: string;
modelId: string;
durationSeconds: 5 | 10;
},
{ queued: true; outputNodeId: Id<"nodes"> }
>;
};
}).ai.generateVideo,
);
const status = (nodeData._status ?? "idle") as NodeStatus;
const [isRetrying, setIsRetrying] = useState(false);
const [localError, setLocalError] = useState<string | null>(null);
const classifiedError = classifyError(nodeData._statusMessage ?? localError);
const isLoading =
status === "executing" || status === "analyzing" || status === "clarifying" || isRetrying;
const modelLabel =
typeof nodeData.modelId === "string"
? getVideoModel(nodeData.modelId)?.label ?? nodeData.modelId
: "-";
const handleRetry = useCallback(async () => {
if (isRetrying) return;
if (syncStatus.isOffline) {
toast.warning(
"Offline aktuell nicht unterstuetzt",
"KI-Generierung benoetigt eine aktive Verbindung.",
);
return;
}
const prompt = nodeData.prompt?.trim();
const modelId = nodeData.modelId;
const durationSeconds = nodeData.durationSeconds;
if (!prompt || !modelId || !durationSeconds) {
setLocalError(t("errorFallback"));
return;
}
const incomingEdge = getEdges().find((edge) => edge.target === id);
if (!incomingEdge) {
setLocalError(t("errorFallback"));
return;
}
const sourceNode = getNode(incomingEdge.source);
if (!sourceNode || sourceNode.type !== "video-prompt") {
setLocalError(t("errorFallback"));
return;
}
const sourceData = sourceNode.data as { canvasId?: string } | undefined;
const canvasId = (nodeData.canvasId ?? sourceData?.canvasId) as Id<"canvases"> | undefined;
if (!canvasId) {
setLocalError(t("errorFallback"));
return;
}
setLocalError(null);
setIsRetrying(true);
try {
await toast.promise(
generateVideo({
canvasId,
sourceNodeId: incomingEdge.source as Id<"nodes">,
outputNodeId: id as Id<"nodes">,
prompt,
modelId,
durationSeconds,
}),
{
loading: tToast("ai.generating"),
success: tToast("ai.generationQueued"),
error: tToast("ai.generationFailed"),
},
);
} catch (error) {
const classified = classifyError(error);
setLocalError(classified.rawMessage ?? tToast("ai.generationFailed"));
} finally {
setIsRetrying(false);
}
}, [
generateVideo,
getEdges,
getNode,
id,
isRetrying,
nodeData.canvasId,
nodeData.durationSeconds,
nodeData.modelId,
nodeData.prompt,
syncStatus.isOffline,
t,
tToast,
]);
return (
<BaseNodeWrapper
nodeType="ai-video"
selected={selected}
status={nodeData._status}
statusMessage={nodeData._statusMessage}
className="flex h-full w-full min-h-0 min-w-0 flex-col"
>
<Handle
type="target"
position={Position.Left}
id="video-in"
className="!h-3 !w-3 !bg-violet-600 !border-2 !border-background"
/>
<div className="shrink-0 border-b border-border px-3 py-2">
<div className="flex items-center gap-1.5 text-xs font-medium text-violet-700 dark:text-violet-300">
<Video className="h-3.5 w-3.5" />
{t("label")}
</div>
</div>
<div className="relative min-h-0 flex-1 overflow-hidden bg-muted/30">
{status === "idle" && !nodeData.url ? (
<div className="absolute inset-0 flex items-center justify-center px-6 text-center text-xs text-muted-foreground">
{t("idleHint")}
</div>
) : null}
{isLoading ? (
<div className="absolute inset-0 flex flex-col items-center justify-center gap-2">
<Loader2 className="h-7 w-7 animate-spin text-violet-500" />
<p className="text-xs text-muted-foreground">{t("generating")}</p>
</div>
) : null}
{status === "error" && !isLoading ? (
<div className="absolute inset-0 flex flex-col items-center justify-center gap-2 px-4">
<AlertCircle className="h-7 w-7 text-destructive" />
<p className="text-center text-xs text-destructive">
{classifiedError.rawMessage ?? t("errorFallback")}
</p>
<button
type="button"
onClick={() => void handleRetry()}
disabled={isRetrying}
className="nodrag inline-flex items-center gap-1.5 rounded-md border border-border bg-background px-2.5 py-1.5 text-xs text-muted-foreground disabled:cursor-not-allowed disabled:opacity-60"
>
<RefreshCw className={`h-3 w-3${isRetrying ? " animate-spin" : ""}`} />
{t("retryButton")}
</button>
</div>
) : null}
{nodeData.url && !isLoading ? (
<video
src={nodeData.url}
controls
playsInline
preload="metadata"
className="h-full w-full object-contain"
/>
) : null}
</div>
<div className="flex shrink-0 flex-col gap-1 border-t border-border px-3 py-2 text-[10px] text-muted-foreground">
<p className="truncate" title={modelLabel}>
{t("modelMeta", { model: modelLabel })}
</p>
{typeof nodeData.durationSeconds === "number" ? (
<p>{t("durationMeta", { duration: nodeData.durationSeconds })}</p>
) : null}
{typeof nodeData.creditCost === "number" ? (
<p>{t("creditMeta", { credits: nodeData.creditCost })}</p>
) : null}
{nodeData.prompt ? <p className="line-clamp-1">{nodeData.prompt}</p> : null}
{nodeData.url ? (
<a
href={nodeData.url}
download
className="nodrag inline-flex items-center gap-1 text-xs text-violet-700 underline-offset-2 hover:underline dark:text-violet-300"
>
<Download className="h-3 w-3" />
{t("downloadButton")}
</a>
) : null}
</div>
<Handle
type="source"
position={Position.Right}
id="video-out"
className="!h-3 !w-3 !bg-violet-600 !border-2 !border-background"
/>
</BaseNodeWrapper>
);
}

View File

@@ -0,0 +1,418 @@
"use client";
import { useCallback, useMemo, useState } from "react";
import { Handle, Position, useReactFlow, useStore, type Node, type NodeProps } from "@xyflow/react";
import { useAction } from "convex/react";
import type { FunctionReference } from "convex/server";
import { useRouter } from "next/navigation";
import { Coins, Loader2, Sparkles, Video } from "lucide-react";
import { useTranslations } from "next-intl";
import { useDebouncedCallback } from "@/hooks/use-debounced-callback";
import {
DEFAULT_VIDEO_MODEL_ID,
getAvailableVideoModels,
getVideoModel,
isVideoModelId,
type VideoModelDurationSeconds,
type VideoModelId,
} from "@/lib/ai-video-models";
import type { Id } from "@/convex/_generated/dataModel";
import { useCanvasPlacement } from "@/components/canvas/canvas-placement-context";
import { useCanvasSync } from "@/components/canvas/canvas-sync-context";
import { useAuthQuery } from "@/hooks/use-auth-query";
import { api } from "@/convex/_generated/api";
import { toast } from "@/lib/toast";
import { classifyError } from "@/lib/ai-errors";
import BaseNodeWrapper from "./base-node-wrapper";
import { Label } from "@/components/ui/label";
import {
Select,
SelectContent,
SelectItem,
SelectTrigger,
SelectValue,
} from "@/components/ui/select";
type VideoPromptNodeData = {
prompt?: string;
modelId?: string;
durationSeconds?: number;
hasAudio?: boolean;
canvasId?: string;
_status?: string;
_statusMessage?: string;
};
export type VideoPromptNodeType = Node<VideoPromptNodeData, "video-prompt">;
function normalizeDuration(value: number | undefined): VideoModelDurationSeconds {
return value === 10 ? 10 : 5;
}
export default function VideoPromptNode({
id,
data,
selected,
}: NodeProps<VideoPromptNodeType>) {
const t = useTranslations("videoPromptNode");
const tToast = useTranslations("toasts");
const nodeData = data as VideoPromptNodeData;
const router = useRouter();
const { getNode } = useReactFlow();
const { queueNodeDataUpdate, status } = useCanvasSync();
const { createNodeConnectedFromSource } = useCanvasPlacement();
const balance = useAuthQuery(api.credits.getBalance);
const edges = useStore((store) => store.edges);
const nodes = useStore((store) => store.nodes);
const generateVideo = useAction(
(api as unknown as {
ai: {
generateVideo: FunctionReference<
"action",
"public",
{
canvasId: Id<"canvases">;
sourceNodeId: Id<"nodes">;
outputNodeId: Id<"nodes">;
prompt: string;
modelId: string;
durationSeconds: 5 | 10;
},
{ queued: true; outputNodeId: Id<"nodes"> }
>;
};
}).ai.generateVideo,
);
const [prompt, setPrompt] = useState(nodeData.prompt ?? "");
const [modelId, setModelId] = useState<VideoModelId>(
isVideoModelId(nodeData.modelId ?? "")
? (nodeData.modelId as VideoModelId)
: DEFAULT_VIDEO_MODEL_ID,
);
const [durationSeconds, setDurationSeconds] = useState<VideoModelDurationSeconds>(
normalizeDuration(nodeData.durationSeconds),
);
const [isGenerating, setIsGenerating] = useState(false);
const [error, setError] = useState<string | null>(null);
const inputMeta = useMemo(() => {
const incomingEdges = edges.filter((edge) => edge.target === id);
let textPrompt: string | undefined;
let hasTextInput = false;
for (const edge of incomingEdges) {
const sourceNode = nodes.find((node) => node.id === edge.source);
if (sourceNode?.type !== "text") continue;
hasTextInput = true;
const sourceData = sourceNode.data as { content?: string };
if (typeof sourceData.content === "string") {
textPrompt = sourceData.content;
break;
}
}
return {
hasTextInput,
textPrompt: textPrompt ?? "",
};
}, [edges, id, nodes]);
const effectivePrompt = inputMeta.hasTextInput ? inputMeta.textPrompt : prompt;
const selectedModel = getVideoModel(modelId) ?? getVideoModel(DEFAULT_VIDEO_MODEL_ID);
const creditCost = selectedModel?.creditCost[durationSeconds] ?? 0;
const availableCredits =
balance !== undefined ? balance.balance - balance.reserved : null;
const hasEnoughCredits =
availableCredits === null ? true : availableCredits >= creditCost;
const debouncedSave = useDebouncedCallback(
(
nextPrompt: string,
nextModelId: VideoModelId,
nextDurationSeconds: VideoModelDurationSeconds,
) => {
const raw = data as Record<string, unknown>;
const { _status, _statusMessage, ...rest } = raw;
void _status;
void _statusMessage;
void queueNodeDataUpdate({
nodeId: id as Id<"nodes">,
data: {
...rest,
prompt: nextPrompt,
modelId: nextModelId,
durationSeconds: nextDurationSeconds,
},
});
},
500,
);
const handlePromptChange = useCallback(
(event: React.ChangeEvent<HTMLTextAreaElement>) => {
const value = event.target.value;
setPrompt(value);
debouncedSave(value, modelId, durationSeconds);
},
[debouncedSave, durationSeconds, modelId],
);
const handleModelChange = useCallback(
(value: string) => {
if (!isVideoModelId(value)) return;
setModelId(value);
debouncedSave(prompt, value, durationSeconds);
},
[debouncedSave, durationSeconds, prompt],
);
const handleDurationChange = useCallback(
(value: VideoModelDurationSeconds) => {
setDurationSeconds(value);
debouncedSave(prompt, modelId, value);
},
[debouncedSave, modelId, prompt],
);
const generateDisabled =
!effectivePrompt.trim() || balance === undefined || !hasEnoughCredits || isGenerating;
const handleGenerate = useCallback(async () => {
if (!effectivePrompt.trim() || isGenerating) return;
if (status.isOffline) {
toast.warning(
"Offline aktuell nicht unterstuetzt",
"KI-Generierung benoetigt eine aktive Verbindung.",
);
return;
}
if (availableCredits !== null && !hasEnoughCredits) {
toast.action(tToast("ai.insufficientCreditsTitle"), {
description: tToast("ai.insufficientCreditsDesc", {
needed: creditCost,
available: availableCredits,
}),
label: tToast("billing.topUp"),
onClick: () => router.push("/settings/billing"),
type: "warning",
});
return;
}
setError(null);
setIsGenerating(true);
try {
const canvasId = nodeData.canvasId as Id<"canvases">;
if (!canvasId) {
throw new Error("Canvas-ID fehlt in der Node");
}
const promptToUse = effectivePrompt.trim();
if (!promptToUse) return;
const currentNode = getNode(id);
const offsetX = (currentNode?.measured?.width ?? 260) + 32;
const position = {
x: (currentNode?.position?.x ?? 0) + offsetX,
y: currentNode?.position?.y ?? 0,
};
const clientRequestId = crypto.randomUUID();
const outputNodeId = await createNodeConnectedFromSource({
type: "ai-video",
position,
data: {
prompt: promptToUse,
modelId,
durationSeconds,
creditCost,
canvasId,
},
clientRequestId,
sourceNodeId: id as Id<"nodes">,
sourceHandle: "video-prompt-out",
targetHandle: "video-in",
});
await toast.promise(
generateVideo({
canvasId,
sourceNodeId: id as Id<"nodes">,
outputNodeId,
prompt: promptToUse,
modelId,
durationSeconds,
}),
{
loading: tToast("ai.generating"),
success: tToast("ai.generationQueued"),
error: tToast("ai.generationFailed"),
},
);
} catch (err) {
const classified = classifyError(err);
if (classified.type === "dailyCap") {
toast.error(
tToast("billing.dailyLimitReachedTitle"),
"Morgen stehen wieder Generierungen zur Verfuegung.",
);
} else if (classified.type === "concurrency") {
toast.warning(
tToast("ai.concurrentLimitReachedTitle"),
tToast("ai.concurrentLimitReachedDesc"),
);
} else {
setError(classified.rawMessage || tToast("ai.generationFailed"));
}
} finally {
setIsGenerating(false);
}
}, [
availableCredits,
createNodeConnectedFromSource,
creditCost,
durationSeconds,
effectivePrompt,
generateVideo,
getNode,
hasEnoughCredits,
id,
isGenerating,
modelId,
nodeData.canvasId,
router,
status.isOffline,
tToast,
]);
return (
<BaseNodeWrapper
nodeType="video-prompt"
selected={selected}
status={nodeData._status}
statusMessage={nodeData._statusMessage}
className="min-w-[260px] border-violet-500/30"
>
<Handle
type="target"
position={Position.Left}
id="video-prompt-in"
className="!h-3 !w-3 !bg-violet-600 !border-2 !border-background"
/>
<div className="flex h-full flex-col gap-2 p-3">
<div className="flex items-center gap-1.5 text-xs font-medium text-violet-700 dark:text-violet-300">
<Video className="h-3.5 w-3.5" />
{t("label")}
</div>
{inputMeta.hasTextInput ? (
<div className="flex-1 overflow-auto rounded-md border border-violet-500/30 bg-violet-500/5 px-3 py-2">
<p className="text-[11px] font-medium text-violet-700 dark:text-violet-300">
{t("promptFromTextNode")}
</p>
<p className="mt-1 whitespace-pre-wrap text-sm text-foreground">
{inputMeta.textPrompt.trim() || t("noPromptHint")}
</p>
</div>
) : (
<textarea
value={prompt}
onChange={handlePromptChange}
placeholder={t("promptPlaceholder")}
className="nodrag nowheel min-h-[72px] w-full flex-1 resize-none rounded-md border border-border bg-background px-3 py-2 text-sm placeholder:text-muted-foreground focus:outline-none focus:ring-1 focus:ring-violet-500"
/>
)}
<div className="flex flex-col gap-1.5">
<Label htmlFor={`video-model-${id}`} className="text-[11px] text-muted-foreground">
{t("modelLabel")}
</Label>
<Select value={modelId} onValueChange={handleModelChange}>
<SelectTrigger id={`video-model-${id}`} className="nodrag nowheel w-full" size="sm">
<SelectValue />
</SelectTrigger>
<SelectContent className="nodrag">
{getAvailableVideoModels("pro").map((model) => (
<SelectItem key={model.id} value={model.id}>
{model.label}
</SelectItem>
))}
</SelectContent>
</Select>
</div>
<div className="flex flex-col gap-1.5">
<Label className="text-[11px] text-muted-foreground">{t("durationLabel")}</Label>
<div className="grid grid-cols-2 gap-1">
<button
type="button"
onClick={() => handleDurationChange(5)}
className={`nodrag rounded-md border px-2 py-1.5 text-xs ${
durationSeconds === 5
? "border-violet-500 bg-violet-500/10 text-violet-700 dark:text-violet-300"
: "border-border bg-background"
}`}
>
{t("duration5s")}
</button>
<button
type="button"
onClick={() => handleDurationChange(10)}
className={`nodrag rounded-md border px-2 py-1.5 text-xs ${
durationSeconds === 10
? "border-violet-500 bg-violet-500/10 text-violet-700 dark:text-violet-300"
: "border-border bg-background"
}`}
>
{t("duration10s")}
</button>
</div>
</div>
{error ? <p className="text-xs text-destructive">{error}</p> : null}
<button
type="button"
onClick={() => void handleGenerate()}
disabled={generateDisabled}
className="nodrag inline-flex items-center justify-center gap-2 rounded-md bg-violet-600 px-3 py-2 text-sm font-medium text-white disabled:cursor-not-allowed disabled:opacity-50"
>
{isGenerating ? (
<>
<Loader2 className="h-4 w-4 animate-spin" />
{tToast("ai.generating")}
</>
) : (
<>
<Sparkles className="h-4 w-4" />
{t("generateButton")}
<span className="inline-flex items-center gap-1 text-xs opacity-90">
<Coins className="h-3 w-3" />
{creditCost} Cr
</span>
</>
)}
</button>
{availableCredits !== null && !hasEnoughCredits ? (
<p className="text-center text-xs text-destructive">{t("insufficientCredits")}</p>
) : null}
</div>
<Handle
type="source"
position={Position.Right}
id="video-prompt-out"
className="!h-3 !w-3 !bg-violet-600 !border-2 !border-background"
/>
</BaseNodeWrapper>
);
}

View File

@@ -13,8 +13,23 @@ import {
} from "./openrouter";
import type { Id } from "./_generated/dataModel";
import { assertNodeBelongsToCanvasOrThrow } from "./ai_utils";
import {
createVideoTask,
downloadVideoAsBlob,
FreepikApiError,
getVideoTaskStatus,
} from "./freepik";
import { getVideoModel, isVideoModelId } from "../lib/ai-video-models";
import {
shouldLogVideoPollAttempt,
shouldLogVideoPollResult,
type VideoPollStatus,
} from "../lib/video-poll-logging";
import { normalizePublicTier } from "../lib/tier-credits";
const MAX_IMAGE_RETRIES = 2;
const MAX_VIDEO_POLL_ATTEMPTS = 30;
const MAX_VIDEO_POLL_TOTAL_MS = 10 * 60 * 1000;
type ErrorCategory =
| "credits"
@@ -34,9 +49,36 @@ function getErrorCode(error: unknown): string | undefined {
const data = error.data as ErrorData;
return data?.code;
}
if (error instanceof FreepikApiError) {
return error.code;
}
return undefined;
}
function getErrorSource(error: unknown): string | undefined {
if (error instanceof FreepikApiError) {
return error.source;
}
if (error && typeof error === "object") {
const source = (error as { source?: unknown }).source;
return typeof source === "string" ? source : undefined;
}
return undefined;
}
function getProviderStatus(error: unknown): number | null {
if (error instanceof FreepikApiError) {
return typeof error.status === "number" ? error.status : null;
}
if (error && typeof error === "object") {
const status = (error as { status?: unknown }).status;
if (typeof status === "number" && Number.isFinite(status)) {
return status;
}
}
return null;
}
function errorMessage(error: unknown): string {
if (error instanceof Error) return error.message;
return String(error ?? "Generation failed");
@@ -54,9 +96,25 @@ function categorizeError(error: unknown): {
retryable: boolean;
} {
const code = getErrorCode(error);
const source = getErrorSource(error);
const message = errorMessage(error);
const lower = message.toLowerCase();
const status = parseOpenRouterStatus(message);
const status = getProviderStatus(error) ?? parseOpenRouterStatus(message);
if (source === "freepik") {
if (code === "model_unavailable") {
return {
category: "provider",
retryable: status === 503,
};
}
if (code === "timeout") {
return { category: "timeout", retryable: true };
}
if (code === "transient") {
return { category: "transient", retryable: true };
}
}
if (
code === "CREDITS_TEST_DISABLED" ||
@@ -552,6 +610,7 @@ export const generateImage = action({
model: modelId,
nodeId: verifiedNodeId,
canvasId: verifiedCanvasId,
provider: "openrouter",
})
: null;
@@ -627,3 +686,553 @@ export const generateImage = action({
}
},
});
function isVideoModelAllowedForTier(modelTier: "free" | "starter" | "pro", userTier: "free" | "starter" | "pro" | "max") {
const tierOrder = { free: 0, starter: 1, pro: 2, max: 3 } as const;
return tierOrder[userTier] >= tierOrder[modelTier];
}
export const setVideoTaskInfo = internalMutation({
args: {
nodeId: v.id("nodes"),
taskId: v.string(),
},
handler: async (ctx, { nodeId, taskId }) => {
const node = await ctx.db.get(nodeId);
if (!node) {
throw new Error("Node not found");
}
const prev =
node.data && typeof node.data === "object"
? (node.data as Record<string, unknown>)
: {};
await ctx.db.patch(nodeId, {
data: {
...prev,
taskId,
},
});
},
});
export const markVideoPollingRetry = internalMutation({
args: {
nodeId: v.id("nodes"),
attempt: v.number(),
maxAttempts: v.number(),
failureMessage: v.string(),
},
handler: async (ctx, { nodeId, attempt, maxAttempts, failureMessage }) => {
await ctx.db.patch(nodeId, {
status: "executing",
retryCount: attempt,
statusMessage: `Retry ${attempt}/${maxAttempts} - ${failureMessage}`,
});
},
});
export const finalizeVideoSuccess = internalMutation({
args: {
nodeId: v.id("nodes"),
prompt: v.string(),
modelId: v.string(),
durationSeconds: v.union(v.literal(5), v.literal(10)),
storageId: v.id("_storage"),
retryCount: v.number(),
creditCost: v.number(),
},
handler: async (
ctx,
{ nodeId, prompt, modelId, durationSeconds, storageId, retryCount, creditCost }
) => {
const model = getVideoModel(modelId);
if (!model) {
throw new Error(`Unknown video model: ${modelId}`);
}
const existing = await ctx.db.get(nodeId);
if (!existing) {
throw new Error("Node not found");
}
const prev =
existing.data && typeof existing.data === "object"
? (existing.data as Record<string, unknown>)
: {};
await ctx.db.patch(nodeId, {
status: "done",
retryCount,
statusMessage: undefined,
data: {
...prev,
taskId: undefined,
storageId,
prompt,
model: modelId,
modelLabel: model.label,
durationSeconds,
generatedAt: Date.now(),
creditCost,
},
});
},
});
export const finalizeVideoFailure = internalMutation({
args: {
nodeId: v.id("nodes"),
retryCount: v.number(),
statusMessage: v.string(),
},
handler: async (ctx, { nodeId, retryCount, statusMessage }) => {
const existing = await ctx.db.get(nodeId);
if (!existing) {
throw new Error("Node not found");
}
const prev =
existing.data && typeof existing.data === "object"
? (existing.data as Record<string, unknown>)
: {};
await ctx.db.patch(nodeId, {
status: "error",
retryCount,
statusMessage,
data: {
...prev,
taskId: undefined,
},
});
},
});
export const processVideoGeneration = internalAction({
args: {
outputNodeId: v.id("nodes"),
prompt: v.string(),
modelId: v.string(),
durationSeconds: v.union(v.literal(5), v.literal(10)),
creditCost: v.number(),
reservationId: v.optional(v.id("creditTransactions")),
shouldDecrementConcurrency: v.boolean(),
userId: v.string(),
},
handler: async (ctx, args) => {
const model = getVideoModel(args.modelId);
if (!model) {
throw new Error(`Unknown video model: ${args.modelId}`);
}
console.info("[processVideoGeneration] start", {
outputNodeId: args.outputNodeId,
modelId: args.modelId,
endpoint: model.freepikEndpoint,
durationSeconds: args.durationSeconds,
promptLength: args.prompt.length,
hasReservation: Boolean(args.reservationId),
shouldDecrementConcurrency: args.shouldDecrementConcurrency,
});
try {
const { task_id } = await createVideoTask({
endpoint: model.freepikEndpoint,
prompt: args.prompt,
durationSeconds: args.durationSeconds,
});
console.info("[processVideoGeneration] task created", {
outputNodeId: args.outputNodeId,
taskId: task_id,
modelId: args.modelId,
});
await ctx.runMutation(internal.ai.setVideoTaskInfo, {
nodeId: args.outputNodeId,
taskId: task_id,
});
await ctx.scheduler.runAfter(5000, internal.ai.pollVideoTask, {
taskId: task_id,
outputNodeId: args.outputNodeId,
prompt: args.prompt,
modelId: args.modelId,
durationSeconds: args.durationSeconds,
creditCost: args.creditCost,
reservationId: args.reservationId,
shouldDecrementConcurrency: args.shouldDecrementConcurrency,
userId: args.userId,
attempt: 1,
startedAtMs: Date.now(),
});
} catch (error) {
console.warn("[processVideoGeneration] failed before polling", {
outputNodeId: args.outputNodeId,
modelId: args.modelId,
errorMessage: errorMessage(error),
errorCode: getErrorCode(error) ?? null,
source: getErrorSource(error) ?? null,
providerStatus: getProviderStatus(error),
freepikBody: error instanceof FreepikApiError ? error.body : undefined,
});
if (args.reservationId) {
try {
await ctx.runMutation(internal.credits.releaseInternal, {
transactionId: args.reservationId,
});
} catch {
// Keep node failure updates best-effort even if release fails.
}
}
await ctx.runMutation(internal.ai.finalizeVideoFailure, {
nodeId: args.outputNodeId,
retryCount: 0,
statusMessage: formatTerminalStatusMessage(error),
});
if (args.shouldDecrementConcurrency) {
await ctx.runMutation(internal.credits.decrementConcurrency, {
userId: args.userId,
});
}
}
},
});
export const pollVideoTask = internalAction({
args: {
taskId: v.string(),
outputNodeId: v.id("nodes"),
prompt: v.string(),
modelId: v.string(),
durationSeconds: v.union(v.literal(5), v.literal(10)),
creditCost: v.number(),
reservationId: v.optional(v.id("creditTransactions")),
shouldDecrementConcurrency: v.boolean(),
userId: v.string(),
attempt: v.number(),
startedAtMs: v.number(),
},
handler: async (ctx, args) => {
const elapsedMs = Date.now() - args.startedAtMs;
if (args.attempt > MAX_VIDEO_POLL_ATTEMPTS || elapsedMs > MAX_VIDEO_POLL_TOTAL_MS) {
if (args.reservationId) {
try {
await ctx.runMutation(internal.credits.releaseInternal, {
transactionId: args.reservationId,
});
} catch {
// Keep node status updates best-effort.
}
}
await ctx.runMutation(internal.ai.finalizeVideoFailure, {
nodeId: args.outputNodeId,
retryCount: args.attempt,
statusMessage: "Timeout: Video generation exceeded maximum polling time",
});
if (args.shouldDecrementConcurrency) {
await ctx.runMutation(internal.credits.decrementConcurrency, {
userId: args.userId,
});
}
return;
}
try {
if (shouldLogVideoPollAttempt(args.attempt)) {
console.info("[pollVideoTask] poll start", {
outputNodeId: args.outputNodeId,
taskId: args.taskId,
attempt: args.attempt,
elapsedMs,
});
}
const model = getVideoModel(args.modelId);
if (!model) {
throw new Error(`Unknown video model: ${args.modelId}`);
}
const status = await getVideoTaskStatus({
taskId: args.taskId,
statusEndpointPath: model.statusEndpointPath,
attempt: args.attempt,
});
if (shouldLogVideoPollResult(args.attempt, status.status as VideoPollStatus)) {
console.info("[pollVideoTask] poll result", {
outputNodeId: args.outputNodeId,
taskId: args.taskId,
attempt: args.attempt,
status: status.status,
generatedCount: status.generated?.length ?? 0,
hasError: Boolean(status.error),
statusError: status.error ?? null,
});
}
if (status.status === "FAILED") {
if (args.reservationId) {
try {
await ctx.runMutation(internal.credits.releaseInternal, {
transactionId: args.reservationId,
});
} catch {
// Keep node status updates best-effort.
}
}
await ctx.runMutation(internal.ai.finalizeVideoFailure, {
nodeId: args.outputNodeId,
retryCount: args.attempt,
statusMessage: status.error?.trim() || "Provider: Video generation failed",
});
if (args.shouldDecrementConcurrency) {
await ctx.runMutation(internal.credits.decrementConcurrency, {
userId: args.userId,
});
}
return;
}
if (status.status === "COMPLETED") {
const generatedUrl = status.generated?.[0]?.url;
if (!generatedUrl) {
throw new Error("Freepik completed without generated video URL");
}
const blob = await downloadVideoAsBlob(generatedUrl);
const storageId = await ctx.storage.store(blob);
await ctx.runMutation(internal.ai.finalizeVideoSuccess, {
nodeId: args.outputNodeId,
prompt: args.prompt,
modelId: args.modelId,
durationSeconds: args.durationSeconds,
storageId: storageId as Id<"_storage">,
retryCount: args.attempt,
creditCost: args.creditCost,
});
if (args.reservationId) {
await ctx.runMutation(internal.credits.commitInternal, {
transactionId: args.reservationId,
actualCost: args.creditCost,
});
}
if (args.shouldDecrementConcurrency) {
await ctx.runMutation(internal.credits.decrementConcurrency, {
userId: args.userId,
});
}
return;
}
} catch (error) {
console.warn("[pollVideoTask] poll failed", {
outputNodeId: args.outputNodeId,
taskId: args.taskId,
attempt: args.attempt,
elapsedMs,
errorMessage: errorMessage(error),
errorCode: getErrorCode(error) ?? null,
source: getErrorSource(error) ?? null,
providerStatus: getProviderStatus(error),
retryable: categorizeError(error).retryable,
freepikBody: error instanceof FreepikApiError ? error.body : undefined,
});
const { retryable } = categorizeError(error);
if (retryable && args.attempt < MAX_VIDEO_POLL_ATTEMPTS) {
await ctx.runMutation(internal.ai.markVideoPollingRetry, {
nodeId: args.outputNodeId,
attempt: args.attempt,
maxAttempts: MAX_VIDEO_POLL_ATTEMPTS,
failureMessage: errorMessage(error),
});
const retryDelayMs =
args.attempt <= 5 ? 5000 : args.attempt <= 15 ? 10000 : 20000;
await ctx.scheduler.runAfter(retryDelayMs, internal.ai.pollVideoTask, {
...args,
attempt: args.attempt + 1,
});
return;
}
if (args.reservationId) {
try {
await ctx.runMutation(internal.credits.releaseInternal, {
transactionId: args.reservationId,
});
} catch {
// Keep node status updates best-effort.
}
}
await ctx.runMutation(internal.ai.finalizeVideoFailure, {
nodeId: args.outputNodeId,
retryCount: args.attempt,
statusMessage: formatTerminalStatusMessage(error),
});
if (args.shouldDecrementConcurrency) {
await ctx.runMutation(internal.credits.decrementConcurrency, {
userId: args.userId,
});
}
return;
}
const delayMs = args.attempt <= 5 ? 5000 : args.attempt <= 15 ? 10000 : 20000;
await ctx.scheduler.runAfter(delayMs, internal.ai.pollVideoTask, {
...args,
attempt: args.attempt + 1,
});
},
});
export const generateVideo = action({
args: {
canvasId: v.id("canvases"),
sourceNodeId: v.id("nodes"),
outputNodeId: v.id("nodes"),
prompt: v.string(),
modelId: v.string(),
durationSeconds: v.union(v.literal(5), v.literal(10)),
},
handler: async (ctx, args): Promise<{ queued: true; outputNodeId: Id<"nodes"> }> => {
const canvas = await ctx.runQuery(api.canvases.get, {
canvasId: args.canvasId,
});
if (!canvas) {
throw new Error("Canvas not found");
}
const sourceNode = await ctx.runQuery(
api.nodes.get as FunctionReference<"query", "public">,
{
nodeId: args.sourceNodeId,
includeStorageUrl: false,
}
);
if (!sourceNode) {
throw new Error("Source node not found");
}
assertNodeBelongsToCanvasOrThrow(sourceNode, args.canvasId);
const outputNode = await ctx.runQuery(
api.nodes.get as FunctionReference<"query", "public">,
{
nodeId: args.outputNodeId,
includeStorageUrl: false,
}
);
if (!outputNode) {
throw new Error("Output node not found");
}
assertNodeBelongsToCanvasOrThrow(outputNode, args.canvasId);
if (outputNode.type !== "ai-video") {
throw new Error("Output node must be ai-video");
}
if (!isVideoModelId(args.modelId)) {
throw new Error(`Unknown video model: ${args.modelId}`);
}
const model = getVideoModel(args.modelId);
if (!model) {
throw new Error(`Unknown video model: ${args.modelId}`);
}
const subscription = await ctx.runQuery(api.credits.getSubscription, {});
const userTier = normalizePublicTier(subscription?.tier);
if (!isVideoModelAllowedForTier(model.tier, userTier)) {
throw new Error(`Model ${args.modelId} requires ${model.tier} tier`);
}
const prompt = args.prompt.trim();
if (!prompt) {
throw new Error("Prompt is required");
}
const userId = canvas.ownerId;
const creditCost = model.creditCost[args.durationSeconds];
const internalCreditsEnabled = process.env.INTERNAL_CREDITS_ENABLED === "true";
await ctx.runMutation(internal.credits.checkAbuseLimits, {});
let usageIncremented = false;
const reservationId: Id<"creditTransactions"> | null = internalCreditsEnabled
? await ctx.runMutation(api.credits.reserve, {
estimatedCost: creditCost,
description: `Videogenerierung - ${model.label} (${args.durationSeconds}s)`,
model: args.modelId,
nodeId: args.outputNodeId,
canvasId: args.canvasId,
provider: "freepik",
videoMeta: {
model: args.modelId,
durationSeconds: args.durationSeconds,
hasAudio: false,
},
})
: null;
if (!internalCreditsEnabled) {
await ctx.runMutation(internal.credits.incrementUsage, {});
usageIncremented = true;
}
try {
await ctx.runMutation(internal.ai.markNodeExecuting, {
nodeId: args.outputNodeId,
});
await ctx.scheduler.runAfter(0, internal.ai.processVideoGeneration, {
outputNodeId: args.outputNodeId,
prompt,
modelId: args.modelId,
durationSeconds: args.durationSeconds,
creditCost,
reservationId: reservationId ?? undefined,
shouldDecrementConcurrency: usageIncremented,
userId,
});
return { queued: true, outputNodeId: args.outputNodeId };
} catch (error) {
if (reservationId) {
try {
await ctx.runMutation(api.credits.release, {
transactionId: reservationId,
});
} catch {
// Prefer returning a clear node error over masking with cleanup failures.
}
}
await ctx.runMutation(internal.ai.finalizeVideoFailure, {
nodeId: args.outputNodeId,
retryCount: 0,
statusMessage: formatTerminalStatusMessage(error),
});
if (usageIncremented) {
await ctx.runMutation(internal.credits.decrementConcurrency, {
userId,
});
}
throw error;
}
},
});

View File

@@ -420,6 +420,12 @@ export const reserve = mutation({
nodeId: v.optional(v.id("nodes")),
canvasId: v.optional(v.id("canvases")),
model: v.optional(v.string()),
provider: v.optional(v.union(v.literal("openrouter"), v.literal("freepik"))),
videoMeta: v.optional(v.object({
model: v.string(),
durationSeconds: v.number(),
hasAudio: v.boolean(),
})),
},
handler: async (ctx, args) => {
const user = await requireAuth(ctx);
@@ -502,6 +508,8 @@ export const reserve = mutation({
nodeId: args.nodeId,
canvasId: args.canvasId,
model: args.model,
provider: args.provider,
videoMeta: args.videoMeta,
});
return transactionId;

View File

@@ -2,8 +2,52 @@
import { v } from "convex/values";
import { action } from "./_generated/server";
import { shouldLogVideoPollResult, type VideoPollStatus } from "../lib/video-poll-logging";
const FREEPIK_BASE = "https://api.freepik.com";
const FREEPIK_REQUEST_TIMEOUT_MS = 30_000;
const FREEPIK_MAX_RETRIES = 2;
export type FreepikVideoTaskStatus =
| "CREATED"
| "IN_PROGRESS"
| "COMPLETED"
| "FAILED";
export interface FreepikVideoTaskStatusResponse {
status: FreepikVideoTaskStatus;
generated?: Array<{ url: string }>;
error?: string;
}
export interface FreepikMappedError {
code: "model_unavailable" | "timeout" | "transient" | "unknown";
message: string;
retryable: boolean;
}
export class FreepikApiError extends Error {
readonly source = "freepik" as const;
readonly status?: number;
readonly code: FreepikMappedError["code"];
readonly retryable: boolean;
readonly body?: unknown;
constructor(args: {
status?: number;
code: FreepikMappedError["code"];
message: string;
retryable: boolean;
body?: unknown;
}) {
super(args.message);
this.name = "FreepikApiError";
this.status = args.status;
this.code = args.code;
this.retryable = args.retryable;
this.body = args.body;
}
}
type AssetType = "photo" | "vector" | "icon";
@@ -39,6 +83,495 @@ function parseSize(size?: string): { width?: number; height?: number } {
return { width, height };
}
function wait(ms: number): Promise<void> {
return new Promise((resolve) => {
setTimeout(resolve, ms);
});
}
function getFreepikApiKeyOrThrow(): string {
const apiKey = process.env.FREEPIK_API_KEY;
if (!apiKey) {
throw new FreepikApiError({
code: "model_unavailable",
message: "FREEPIK_API_KEY not set",
retryable: false,
});
}
return apiKey;
}
function normalizeFreepikEndpoint(path: string): string {
if (path.startsWith("http://") || path.startsWith("https://")) {
return path;
}
if (path.startsWith("/")) {
return `${FREEPIK_BASE}${path}`;
}
return `${FREEPIK_BASE}/${path}`;
}
function isRecord(value: unknown): value is Record<string, unknown> {
return value !== null && typeof value === "object";
}
function extractErrorDetail(body: unknown): string | undefined {
if (typeof body === "string" && body.trim().length > 0) {
return body.trim();
}
if (!isRecord(body)) {
return undefined;
}
const direct =
typeof body.error === "string"
? body.error
: typeof body.message === "string"
? body.message
: undefined;
if (direct && direct.trim().length > 0) {
return direct.trim();
}
const data = body.data;
if (isRecord(data)) {
const nested =
typeof data.error === "string"
? data.error
: typeof data.message === "string"
? data.message
: undefined;
if (nested && nested.trim().length > 0) {
return nested.trim();
}
}
return undefined;
}
export function mapFreepikError(status: number, body: unknown): FreepikMappedError {
const detail = extractErrorDetail(body);
if (status === 401) {
return {
code: "model_unavailable",
message: "Freepik API-Key ungueltig",
retryable: false,
};
}
if (status === 400) {
return {
code: "unknown",
message: detail ?? "Ungueltige Parameter fuer dieses Modell",
retryable: false,
};
}
if (status === 404) {
return {
code: "transient",
message: detail ?? "Freepik Task noch nicht verfuegbar",
retryable: true,
};
}
if (status === 503) {
return {
code: "model_unavailable",
message: "Freepik temporaer nicht verfuegbar",
retryable: true,
};
}
if (status === 408 || status === 504) {
return {
code: "timeout",
message: detail ?? "Freepik timeout",
retryable: true,
};
}
if (status === 429) {
return {
code: "transient",
message: detail ?? "Freepik Rate-Limit erreicht",
retryable: true,
};
}
if (status >= 500) {
return {
code: "transient",
message: detail ?? "Freepik Serverfehler",
retryable: true,
};
}
return {
code: "unknown",
message: detail ?? "Unbekannter Freepik-Fehler",
retryable: false,
};
}
function isNetworkLikeError(error: unknown): boolean {
if (!(error instanceof Error)) {
return false;
}
const lower = error.message.toLowerCase();
return (
lower.includes("fetch failed") ||
lower.includes("network") ||
lower.includes("connection") ||
lower.includes("econn")
);
}
async function parseResponseBody(response: Response): Promise<unknown> {
const text = await response.text();
if (!text) {
return undefined;
}
try {
return JSON.parse(text);
} catch {
return text;
}
}
async function freepikJsonRequest<TResponse>(params: {
path: string;
method: "GET" | "POST";
body?: string;
useApiKey?: boolean;
}): Promise<TResponse> {
const apiKey = params.useApiKey === false ? null : getFreepikApiKeyOrThrow();
const url = normalizeFreepikEndpoint(params.path);
for (let attempt = 0; attempt <= FREEPIK_MAX_RETRIES; attempt++) {
const controller = new AbortController();
const timeout = setTimeout(() => controller.abort(), FREEPIK_REQUEST_TIMEOUT_MS);
try {
const response = await fetch(url, {
method: params.method,
headers: {
Accept: "application/json",
...(apiKey ? { "x-freepik-api-key": apiKey } : {}),
...(params.body ? { "Content-Type": "application/json" } : {}),
},
body: params.body,
signal: controller.signal,
});
if (!response.ok) {
const responseBody = await parseResponseBody(response);
const mapped = mapFreepikError(response.status, responseBody);
const mappedError = new FreepikApiError({
status: response.status,
code: mapped.code,
message: mapped.message,
retryable: mapped.retryable,
body: responseBody,
});
if (mapped.retryable && attempt < FREEPIK_MAX_RETRIES) {
await wait(Math.min(1200, 300 * (attempt + 1)));
continue;
}
throw mappedError;
}
return (await response.json()) as TResponse;
} catch (error) {
const isTimeout = error instanceof Error && error.name === "AbortError";
const retryable = isTimeout || isNetworkLikeError(error);
if (retryable && attempt < FREEPIK_MAX_RETRIES) {
await wait(Math.min(1200, 300 * (attempt + 1)));
continue;
}
if (isTimeout) {
throw new FreepikApiError({
code: "timeout",
message: "Freepik timeout",
retryable: true,
});
}
if (isNetworkLikeError(error)) {
throw new FreepikApiError({
code: "transient",
message: error instanceof Error ? error.message : "Netzwerkfehler bei Freepik",
retryable: true,
});
}
throw error;
} finally {
clearTimeout(timeout);
}
}
throw new FreepikApiError({
code: "unknown",
message: "Freepik request failed",
retryable: false,
});
}
function buildTaskStatusPath(statusEndpointPath: string, taskId: string): string {
const trimmedTaskId = taskId.trim();
if (!trimmedTaskId) {
throw new FreepikApiError({
code: "unknown",
message: "Missing Freepik task_id for status polling",
retryable: false,
});
}
if (statusEndpointPath.includes("{task-id}")) {
return statusEndpointPath.replaceAll("{task-id}", encodeURIComponent(trimmedTaskId));
}
const suffix = statusEndpointPath.endsWith("/") ? "" : "/";
return `${statusEndpointPath}${suffix}${encodeURIComponent(trimmedTaskId)}`;
}
export async function createVideoTask(params: {
endpoint: string;
prompt: string;
durationSeconds: 5 | 10;
webhookUrl?: string;
imageUrl?: string;
}): Promise<{ task_id: string }> {
const payload: Record<string, unknown> = {
prompt: params.prompt,
duration: params.durationSeconds,
};
if (params.webhookUrl) {
payload.webhook_url = params.webhookUrl;
}
if (params.imageUrl) {
payload.image_url = params.imageUrl;
}
const result = await freepikJsonRequest<{ data?: { task_id?: string } }>({
path: params.endpoint,
method: "POST",
body: JSON.stringify(payload),
});
console.info("[freepik.createVideoTask] response", {
endpoint: params.endpoint,
durationSeconds: params.durationSeconds,
hasImageUrl: Boolean(params.imageUrl),
promptLength: params.prompt.length,
responseKeys: isRecord(result) ? Object.keys(result) : [],
dataKeys: isRecord(result.data) ? Object.keys(result.data) : [],
});
const taskId =
typeof result.data?.task_id === "string"
? result.data.task_id
: typeof (result as { task_id?: unknown }).task_id === "string"
? (result as { task_id: string }).task_id
: undefined;
if (typeof taskId !== "string" || taskId.trim().length === 0) {
throw new FreepikApiError({
code: "unknown",
message: "Freepik response missing task_id",
retryable: false,
body: result,
});
}
return { task_id: taskId };
}
export async function getVideoTaskStatus(params: {
taskId: string;
statusEndpointPath: string;
attempt?: number;
}): Promise<FreepikVideoTaskStatusResponse> {
const statusPath = buildTaskStatusPath(params.statusEndpointPath, params.taskId);
const result = await freepikJsonRequest<{
data?: {
status?: string;
generated?: unknown;
error?: string;
message?: string;
};
status?: string;
generated?: unknown;
error?: string;
message?: string;
}>({
path: statusPath,
method: "GET",
});
const statusRaw =
typeof result.data?.status === "string"
? result.data.status
: typeof result.status === "string"
? result.status
: undefined;
const status =
statusRaw === "CREATED" ||
statusRaw === "IN_PROGRESS" ||
statusRaw === "COMPLETED" ||
statusRaw === "FAILED"
? statusRaw
: null;
if (
status &&
shouldLogVideoPollResult(params.attempt ?? 1, status as VideoPollStatus)
) {
console.info("[freepik.getVideoTaskStatus] response", {
taskId: params.taskId,
statusPath,
statusRaw: typeof statusRaw === "string" ? statusRaw : null,
acceptedStatus: status,
dataKeys: isRecord(result.data) ? Object.keys(result.data) : [],
generatedCount: Array.isArray(result.data?.generated)
? result.data.generated.length
: Array.isArray(result.generated)
? result.generated.length
: 0,
hasError:
typeof result.data?.error === "string" || typeof result.error === "string",
hasMessage:
typeof result.data?.message === "string" || typeof result.message === "string",
});
}
if (!status) {
console.warn("[freepik.getVideoTaskStatus] unexpected response", {
taskId: params.taskId,
statusPath,
result,
});
throw new FreepikApiError({
code: "unknown",
message: "Freepik task status missing or invalid",
retryable: false,
body: result,
});
}
const generatedRaw = Array.isArray(result.data?.generated)
? result.data.generated
: Array.isArray(result.generated)
? result.generated
: undefined;
const generated = Array.isArray(generatedRaw)
? generatedRaw
.map((entry) => {
const url =
typeof entry === "string"
? entry
: isRecord(entry) && typeof entry.url === "string"
? entry.url
: undefined;
if (!url) return null;
return { url };
})
.filter((entry): entry is { url: string } => entry !== null)
: undefined;
const error =
typeof result.data?.error === "string"
? result.data.error
: typeof result.data?.message === "string"
? result.data.message
: typeof result.error === "string"
? result.error
: typeof result.message === "string"
? result.message
: undefined;
return {
status,
...(generated && generated.length > 0 ? { generated } : {}),
...(error ? { error } : {}),
};
}
export async function downloadVideoAsBlob(url: string): Promise<Blob> {
for (let attempt = 0; attempt <= FREEPIK_MAX_RETRIES; attempt++) {
const controller = new AbortController();
const timeout = setTimeout(() => controller.abort(), FREEPIK_REQUEST_TIMEOUT_MS);
try {
const response = await fetch(url, {
method: "GET",
signal: controller.signal,
});
if (!response.ok) {
const body = await parseResponseBody(response);
const mapped = mapFreepikError(response.status, body);
const mappedError = new FreepikApiError({
status: response.status,
code: mapped.code,
message: mapped.message,
retryable: mapped.retryable,
body,
});
if (mapped.retryable && attempt < FREEPIK_MAX_RETRIES) {
await wait(Math.min(1200, 300 * (attempt + 1)));
continue;
}
throw mappedError;
}
return await response.blob();
} catch (error) {
const isTimeout = error instanceof Error && error.name === "AbortError";
const retryable = isTimeout || isNetworkLikeError(error);
if (retryable && attempt < FREEPIK_MAX_RETRIES) {
await wait(Math.min(1200, 300 * (attempt + 1)));
continue;
}
if (isTimeout) {
throw new FreepikApiError({
code: "timeout",
message: "Freepik video download timeout",
retryable: true,
});
}
if (error instanceof FreepikApiError) {
throw error;
}
if (isNetworkLikeError(error)) {
throw new FreepikApiError({
code: "transient",
message: error instanceof Error ? error.message : "Netzwerkfehler beim Video-Download",
retryable: true,
});
}
throw error;
} finally {
clearTimeout(timeout);
}
}
throw new FreepikApiError({
code: "unknown",
message: "Freepik video download failed",
retryable: false,
});
}
export const search = action({
args: {
term: v.string(),

View File

@@ -222,6 +222,12 @@ export default defineSchema({
canvasId: v.optional(v.id("canvases")), // Zugehöriger Canvas
openRouterCost: v.optional(v.number()), // Tatsächliche API-Kosten (Cent)
model: v.optional(v.string()), // OpenRouter Model ID
provider: v.optional(v.union(v.literal("openrouter"), v.literal("freepik"))),
videoMeta: v.optional(v.object({
model: v.string(),
durationSeconds: v.number(),
hasAudio: v.boolean(),
})),
})
.index("by_user", ["userId"])
.index("by_user_type", ["userId", "type"])

109
lib/ai-video-models.ts Normal file
View File

@@ -0,0 +1,109 @@
export type VideoModelId =
| "wan-2-2-480p"
| "wan-2-2-720p"
| "kling-std-2-1"
| "seedance-pro-1080p"
| "kling-pro-2-6";
export type VideoModelTier = "free" | "starter" | "pro";
export type VideoModelDurationSeconds = 5 | 10;
export interface VideoModel {
id: VideoModelId;
label: string;
tier: VideoModelTier;
freepikEndpoint: string;
statusEndpointPath: string;
creditCost: Record<VideoModelDurationSeconds, number>;
supportsAudio: boolean;
supportsImageToVideo: boolean;
description: string;
}
export const VIDEO_MODELS = {
"wan-2-2-480p": {
id: "wan-2-2-480p",
label: "WAN 2.2 480p",
tier: "free",
freepikEndpoint: "/v1/ai/text-to-video/wan-2-5-t2v-720p",
statusEndpointPath: "/v1/ai/text-to-video/wan-2-5-t2v-720p/{task-id}",
creditCost: { 5: 28, 10: 56 },
supportsAudio: false,
supportsImageToVideo: false,
description: "Schnell und guenstig - gut fuer Konzepte",
},
"wan-2-2-720p": {
id: "wan-2-2-720p",
label: "WAN 2.2 720p",
tier: "free",
freepikEndpoint: "/v1/ai/text-to-video/wan-2-5-t2v-720p",
statusEndpointPath: "/v1/ai/text-to-video/wan-2-5-t2v-720p/{task-id}",
creditCost: { 5: 52, 10: 104 },
supportsAudio: false,
supportsImageToVideo: false,
description: "HD-Qualitaet, offenes Modell",
},
"kling-std-2-1": {
id: "kling-std-2-1",
label: "Kling Standard 2.1",
tier: "starter",
freepikEndpoint: "/v1/ai/image-to-video/kling-v2-1-std",
statusEndpointPath: "/v1/ai/image-to-video/kling-v2-1/{task-id}",
creditCost: { 5: 50, 10: 100 },
supportsAudio: false,
supportsImageToVideo: true,
description: "Realistisch, stabile Bewegung",
},
"seedance-pro-1080p": {
id: "seedance-pro-1080p",
label: "Seedance Pro 1080p",
tier: "starter",
freepikEndpoint: "/v1/ai/video/seedance-1-5-pro-1080p",
statusEndpointPath: "/v1/ai/video/seedance-1-5-pro-1080p/{task-id}",
creditCost: { 5: 33, 10: 66 },
supportsAudio: false,
supportsImageToVideo: false,
description: "Full-HD, gutes Preis-Leistungs-Verhaeltnis",
},
"kling-pro-2-6": {
id: "kling-pro-2-6",
label: "Kling Pro 2.6",
tier: "pro",
freepikEndpoint: "/v1/ai/image-to-video/kling-v2-6-pro",
statusEndpointPath: "/v1/ai/image-to-video/kling-v2-6/{task-id}",
creditCost: { 5: 59, 10: 118 },
supportsAudio: false,
supportsImageToVideo: true,
description: "Beste Qualitaet, cineastische Bewegung",
},
} as const satisfies Record<VideoModelId, VideoModel>;
export const DEFAULT_VIDEO_MODEL_ID: VideoModelId = "wan-2-2-720p";
const VIDEO_MODEL_IDS = Object.keys(VIDEO_MODELS) as VideoModelId[];
const VIDEO_MODEL_ID_SET = new Set<VideoModelId>(VIDEO_MODEL_IDS);
export function isVideoModelId(value: string): value is VideoModelId {
return VIDEO_MODEL_ID_SET.has(value as VideoModelId);
}
export function getVideoModel(id: string): VideoModel | undefined {
if (!isVideoModelId(id)) {
return undefined;
}
return VIDEO_MODELS[id];
}
const VIDEO_MODEL_TIER_ORDER: Record<VideoModelTier, number> = {
free: 0,
starter: 1,
pro: 2,
};
export function getAvailableVideoModels(tier: VideoModelTier): VideoModel[] {
const maxTier = VIDEO_MODEL_TIER_ORDER[tier];
return VIDEO_MODEL_IDS.map((id) => VIDEO_MODELS[id]).filter(
(model) => VIDEO_MODEL_TIER_ORDER[model.tier] <= maxTier,
);
}

View File

@@ -28,6 +28,8 @@ export type CanvasConnectionValidationReason =
| "incomplete"
| "self-loop"
| "unknown-node"
| "ai-video-source-invalid"
| "video-prompt-target-invalid"
| "adjustment-source-invalid"
| "adjustment-incoming-limit"
| "compare-incoming-limit"
@@ -41,7 +43,19 @@ export function validateCanvasConnectionPolicy(args: {
}): CanvasConnectionValidationReason | null {
const { sourceType, targetType, targetIncomingCount } = args;
if (isAdjustmentNodeType(targetType)) {
if (targetType === "ai-video" && sourceType !== "video-prompt") {
return "ai-video-source-invalid";
}
if (sourceType === "video-prompt" && targetType !== "ai-video") {
return "video-prompt-target-invalid";
}
if (targetType === "render" && !RENDER_ALLOWED_SOURCE_TYPES.has(sourceType)) {
return "render-source-invalid";
}
if (isAdjustmentNodeType(targetType) && targetType !== "render") {
if (!ADJUSTMENT_ALLOWED_SOURCE_TYPES.has(sourceType)) {
return "adjustment-source-invalid";
}
@@ -54,10 +68,6 @@ export function validateCanvasConnectionPolicy(args: {
return "compare-incoming-limit";
}
if (targetType === "render" && !RENDER_ALLOWED_SOURCE_TYPES.has(sourceType)) {
return "render-source-invalid";
}
if (
isAdjustmentNodeType(sourceType) &&
ADJUSTMENT_DISALLOWED_TARGET_TYPES.has(targetType)
@@ -78,6 +88,10 @@ export function getCanvasConnectionValidationMessage(
return "Node kann nicht mit sich selbst verbunden werden.";
case "unknown-node":
return "Verbindung enthaelt unbekannte Nodes.";
case "ai-video-source-invalid":
return "KI-Video-Ausgabe akzeptiert nur Eingaben von KI-Video.";
case "video-prompt-target-invalid":
return "KI-Video kann nur mit KI-Video-Ausgabe verbunden werden.";
case "adjustment-source-invalid":
return "Adjustment-Nodes akzeptieren nur Bild-, Asset-, KI-Bild- oder Adjustment-Input.";
case "adjustment-incoming-limit":

View File

@@ -98,6 +98,12 @@ export const NODE_CATALOG: readonly NodeCatalogEntry[] = [
category: "ai-output",
phase: 1,
}),
entry({
type: "video-prompt",
label: "KI-Video",
category: "ai-output",
phase: 1,
}),
entry({
type: "ai-text",
label: "KI-Text",
@@ -108,7 +114,7 @@ export const NODE_CATALOG: readonly NodeCatalogEntry[] = [
}),
entry({
type: "ai-video",
label: "KI-Video",
label: "KI-Video-Ausgabe",
category: "ai-output",
phase: 2,
systemOutput: true,

View File

@@ -20,6 +20,18 @@ export const CANVAS_NODE_TEMPLATES = [
height: 220,
defaultData: { prompt: "", model: "", aspectRatio: "1:1" },
},
{
type: "video-prompt",
label: "KI-Video",
width: 320,
height: 220,
defaultData: {
prompt: "",
modelId: "wan-2-2-720p",
durationSeconds: 5,
hasAudio: false,
},
},
{
type: "note",
label: "Notiz",

View File

@@ -2,6 +2,7 @@ export const PHASE1_CANVAS_NODE_TYPES = [
"image",
"text",
"prompt",
"video-prompt",
"ai-image",
"group",
"frame",
@@ -13,6 +14,7 @@ export const CANVAS_NODE_TYPES = [
"image",
"text",
"prompt",
"video-prompt",
"color",
"video",
"asset",

View File

@@ -102,7 +102,9 @@ export function convexEdgeToRF(edge: Doc<"edges">): RFEdge {
*/
const SOURCE_NODE_GLOW_RGB: Record<string, readonly [number, number, number]> = {
prompt: [139, 92, 246],
"video-prompt": [124, 58, 237],
"ai-image": [139, 92, 246],
"ai-video": [124, 58, 237],
image: [13, 148, 136],
text: [13, 148, 136],
note: [13, 148, 136],
@@ -208,7 +210,9 @@ export const NODE_HANDLE_MAP: Record<
image: { source: undefined, target: undefined },
text: { source: undefined, target: undefined },
prompt: { source: "prompt-out", target: "image-in" },
"video-prompt": { source: "video-prompt-out", target: "video-prompt-in" },
"ai-image": { source: "image-out", target: "prompt-in" },
"ai-video": { source: "video-out", target: "video-in" },
group: { source: undefined, target: undefined },
frame: { source: "frame-out", target: "frame-in" },
note: { source: undefined, target: undefined },
@@ -232,8 +236,19 @@ export const NODE_DEFAULTS: Record<
image: { width: 280, height: 200, data: {} },
text: { width: 256, height: 120, data: { content: "" } },
prompt: { width: 288, height: 220, data: { prompt: "", aspectRatio: "1:1" } },
"video-prompt": {
width: 288,
height: 220,
data: {
prompt: "",
modelId: "wan-2-2-720p",
durationSeconds: 5,
hasAudio: false,
},
},
// 1:1 viewport 320 + chrome 88 ≈ äußere Höhe (siehe lib/image-formats.ts)
"ai-image": { width: 320, height: 408, data: {} },
"ai-video": { width: 360, height: 280, data: {} },
group: { width: 400, height: 300, data: { label: "Gruppe" } },
frame: {
width: 400,

12
lib/video-poll-logging.ts Normal file
View File

@@ -0,0 +1,12 @@
export type VideoPollStatus = "CREATED" | "IN_PROGRESS" | "COMPLETED" | "FAILED";
export function shouldLogVideoPollAttempt(attempt: number): boolean {
return attempt === 1 || attempt % 5 === 0;
}
export function shouldLogVideoPollResult(
attempt: number,
status: VideoPollStatus,
): boolean {
return status !== "IN_PROGRESS" || shouldLogVideoPollAttempt(attempt);
}

View File

@@ -125,6 +125,29 @@
"failed": "Fehlgeschlagen"
}
},
"videoPromptNode": {
"label": "KI-Video",
"promptPlaceholder": "Beschreibe dein Video...",
"promptFromTextNode": "Prompt aus Text-Node",
"modelLabel": "Modell",
"durationLabel": "Laenge",
"duration5s": "5 Sek.",
"duration10s": "10 Sek.",
"generateButton": "Video generieren",
"insufficientCredits": "Nicht genug Credits",
"noPromptHint": "Prompt eingeben oder Text-Node verbinden"
},
"aiVideoNode": {
"label": "KI-Video-Ausgabe",
"idleHint": "Verbinde eine KI-Video-Node und starte dort die Generierung.",
"generating": "Video wird generiert...",
"retryButton": "Erneut versuchen",
"downloadButton": "Video herunterladen",
"modelMeta": "Modell: {model}",
"durationMeta": "{duration} Sek.",
"creditMeta": "{credits} Credits",
"errorFallback": "Video-Generierung fehlgeschlagen"
},
"credits": {
"balance": "Guthaben",
"available": "Verfügbar",
@@ -205,6 +228,8 @@
"contentPolicyDesc": "Versuche, den Prompt umzuformulieren.",
"timeoutTitle": "Generierung abgelaufen",
"timeoutDesc": "Credits wurden nicht abgebucht.",
"providerIssuesTitle": "KI-Anbieter moeglicherweise gestoert",
"providerIssuesDesc": "Mehrere Bild- oder Video-Generierungen sind fehlgeschlagen.",
"openrouterIssuesTitle": "OpenRouter möglicherweise gestört",
"openrouterIssuesDesc": "Mehrere Generierungen fehlgeschlagen.",
"concurrentLimitReachedTitle": "Generierung bereits aktiv",

View File

@@ -125,6 +125,29 @@
"failed": "Failed"
}
},
"videoPromptNode": {
"label": "AI video",
"promptPlaceholder": "Describe your video...",
"promptFromTextNode": "Prompt from text node",
"modelLabel": "Model",
"durationLabel": "Length",
"duration5s": "5 sec",
"duration10s": "10 sec",
"generateButton": "Generate video",
"insufficientCredits": "Not enough credits",
"noPromptHint": "Enter a prompt or connect a text node"
},
"aiVideoNode": {
"label": "AI video output",
"idleHint": "Connect an AI video node and start generation there.",
"generating": "Generating video...",
"retryButton": "Try again",
"downloadButton": "Download video",
"modelMeta": "Model: {model}",
"durationMeta": "{duration} sec",
"creditMeta": "{credits} credits",
"errorFallback": "Video generation failed"
},
"credits": {
"balance": "Balance",
"available": "Available",
@@ -205,6 +228,8 @@
"contentPolicyDesc": "Try rephrasing your prompt.",
"timeoutTitle": "Generation timed out",
"timeoutDesc": "Credits were not charged.",
"providerIssuesTitle": "AI providers may be experiencing issues",
"providerIssuesDesc": "Several image or video generations have failed.",
"openrouterIssuesTitle": "OpenRouter may be experiencing issues",
"openrouterIssuesDesc": "Several generations have failed.",
"concurrentLimitReachedTitle": "Generation already active",

145
tests/ai-video-node.test.ts Normal file
View File

@@ -0,0 +1,145 @@
// @vitest-environment jsdom
import React from "react";
import { act } from "react";
import { createRoot, type Root } from "react-dom/client";
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
const mocks = vi.hoisted(() => ({
generateVideo: vi.fn(async () => ({ queued: true, outputNodeId: "ai-video-1" })),
getEdges: vi.fn(() => [{ source: "video-prompt-1", target: "ai-video-1" }]),
getNode: vi.fn((id: string) => {
if (id === "video-prompt-1") {
return { id, type: "video-prompt", data: { canvasId: "canvas-1" } };
}
return null;
}),
toastPromise: vi.fn(async <T,>(promise: Promise<T>) => await promise),
toastWarning: vi.fn(),
}));
vi.mock("next-intl", () => ({
useTranslations: () => (key: string) => key,
}));
vi.mock("convex/react", () => ({
useAction: () => mocks.generateVideo,
}));
vi.mock("@/convex/_generated/api", () => ({
api: {
ai: {
generateVideo: "ai.generateVideo",
},
},
}));
vi.mock("@/components/canvas/canvas-sync-context", () => ({
useCanvasSync: () => ({
status: { isOffline: false, isSyncing: false, pendingCount: 0 },
}),
}));
vi.mock("@/lib/toast", () => ({
toast: {
promise: mocks.toastPromise,
warning: mocks.toastWarning,
},
}));
vi.mock("@/components/canvas/nodes/base-node-wrapper", () => ({
default: ({ children }: { children: React.ReactNode }) => React.createElement("div", null, children),
}));
vi.mock("@xyflow/react", () => ({
Handle: () => null,
Position: { Left: "left", Right: "right" },
useReactFlow: () => ({
getEdges: mocks.getEdges,
getNode: mocks.getNode,
}),
}));
import AiVideoNode from "@/components/canvas/nodes/ai-video-node";
(globalThis as typeof globalThis & { IS_REACT_ACT_ENVIRONMENT?: boolean }).IS_REACT_ACT_ENVIRONMENT = true;
describe("AiVideoNode", () => {
let container: HTMLDivElement | null = null;
let root: Root | null = null;
beforeEach(() => {
mocks.generateVideo.mockClear();
mocks.getEdges.mockClear();
mocks.getNode.mockClear();
mocks.toastPromise.mockClear();
mocks.toastWarning.mockClear();
});
afterEach(() => {
if (root) {
act(() => {
root?.unmount();
});
}
container?.remove();
container = null;
root = null;
});
it("retries generation from the connected video prompt node", async () => {
container = document.createElement("div");
document.body.appendChild(container);
root = createRoot(container);
await act(async () => {
root?.render(
React.createElement(AiVideoNode, {
id: "ai-video-1",
selected: false,
dragging: false,
draggable: true,
selectable: true,
deletable: true,
zIndex: 1,
isConnectable: true,
type: "ai-video",
data: {
prompt: "ein suesser Berner Sennenhund rennt ueber eine Wiese",
modelId: "wan-2-2-480p",
durationSeconds: 5,
canvasId: "canvas-1",
_status: "error",
_statusMessage: "Netzwerk: task not found yet",
} as Record<string, unknown>,
positionAbsoluteX: 0,
positionAbsoluteY: 0,
}),
);
});
const retryButton = Array.from(container.querySelectorAll("button")).find((element) =>
element.textContent?.includes("retryButton"),
);
if (!(retryButton instanceof HTMLButtonElement)) {
throw new Error("Retry button not found");
}
expect(retryButton.disabled).toBe(false);
await act(async () => {
retryButton.click();
});
expect(mocks.generateVideo).toHaveBeenCalledTimes(1);
expect(mocks.generateVideo).toHaveBeenCalledWith({
canvasId: "canvas-1",
sourceNodeId: "video-prompt-1",
outputNodeId: "ai-video-1",
prompt: "ein suesser Berner Sennenhund rennt ueber eine Wiese",
modelId: "wan-2-2-480p",
durationSeconds: 5,
});
});
});

View File

@@ -21,4 +21,60 @@ describe("canvas connection policy", () => {
getCanvasConnectionValidationMessage("compare-incoming-limit"),
).toBe("Compare-Nodes erlauben genau zwei eingehende Verbindungen.");
});
it("allows text to video-prompt", () => {
expect(
validateCanvasConnectionPolicy({
sourceType: "text",
targetType: "video-prompt",
targetIncomingCount: 0,
}),
).toBeNull();
});
it("allows video-prompt to ai-video", () => {
expect(
validateCanvasConnectionPolicy({
sourceType: "video-prompt",
targetType: "ai-video",
targetIncomingCount: 0,
}),
).toBeNull();
});
it("blocks direct video-prompt to image prompt flow", () => {
expect(
validateCanvasConnectionPolicy({
sourceType: "video-prompt",
targetType: "prompt",
targetIncomingCount: 0,
}),
).toBe("video-prompt-target-invalid");
});
it("blocks ai-video as adjustment source", () => {
expect(
validateCanvasConnectionPolicy({
sourceType: "ai-video",
targetType: "curves",
targetIncomingCount: 0,
}),
).toBe("adjustment-source-invalid");
});
it("blocks ai-video as render source", () => {
expect(
validateCanvasConnectionPolicy({
sourceType: "ai-video",
targetType: "render",
targetIncomingCount: 0,
}),
).toBe("render-source-invalid");
});
it("describes video-only ai-video input", () => {
expect(
getCanvasConnectionValidationMessage("ai-video-source-invalid"),
).toBe("KI-Video-Ausgabe akzeptiert nur Eingaben von KI-Video.");
});
});

View File

@@ -0,0 +1,219 @@
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
import {
createVideoTask,
downloadVideoAsBlob,
getVideoTaskStatus,
} from "@/convex/freepik";
type MockResponseInit = {
ok: boolean;
status: number;
statusText?: string;
json?: unknown;
text?: string;
blob?: Blob;
};
function createMockResponse(init: MockResponseInit): Response {
return {
ok: init.ok,
status: init.status,
statusText: init.statusText ?? "",
json: vi.fn(async () => init.json),
text: vi.fn(async () => init.text ?? JSON.stringify(init.json ?? {})),
blob: vi.fn(async () => init.blob ?? new Blob([])),
headers: new Headers(),
} as unknown as Response;
}
describe("freepik video client", () => {
const originalApiKey = process.env.FREEPIK_API_KEY;
const fetchMock = vi.fn<typeof fetch>();
beforeEach(() => {
process.env.FREEPIK_API_KEY = "test-key";
fetchMock.mockReset();
vi.stubGlobal("fetch", fetchMock);
});
afterEach(() => {
vi.unstubAllGlobals();
process.env.FREEPIK_API_KEY = originalApiKey;
});
it("creates a video task", async () => {
fetchMock.mockResolvedValueOnce(
createMockResponse({
ok: true,
status: 200,
json: { data: { task_id: "task_123" } },
}),
);
const result = await createVideoTask({
endpoint: "/v1/ai/video/seedance-1-5-pro-1080p",
prompt: "A cinematic city timelapse",
durationSeconds: 5,
});
expect(result.task_id).toBe("task_123");
expect(fetchMock).toHaveBeenCalledTimes(1);
});
it("accepts root-level task_id responses for current video endpoints", async () => {
fetchMock.mockResolvedValueOnce(
createMockResponse({
ok: true,
status: 200,
json: { task_id: "task_root", status: "CREATED" },
}),
);
const result = await createVideoTask({
endpoint: "/v1/ai/image-to-video/kling-v2-1-std",
prompt: "A cinematic city timelapse",
durationSeconds: 5,
});
expect(result.task_id).toBe("task_root");
});
it("reads completed video task status", async () => {
fetchMock.mockResolvedValueOnce(
createMockResponse({
ok: true,
status: 200,
json: {
data: {
status: "COMPLETED",
generated: [{ url: "https://cdn.example.com/video.mp4" }],
},
},
}),
);
const result = await getVideoTaskStatus({
taskId: "task_123",
statusEndpointPath: "/v1/ai/text-to-video/wan-2-5-t2v-720p/{task-id}",
});
expect(result.status).toBe("COMPLETED");
expect(result.generated?.[0]?.url).toBe("https://cdn.example.com/video.mp4");
expect(fetchMock).toHaveBeenCalledWith(
"https://api.freepik.com/v1/ai/text-to-video/wan-2-5-t2v-720p/task_123",
expect.any(Object),
);
});
it("downloads completed video as blob", async () => {
const blob = new Blob(["video-bytes"], { type: "video/mp4" });
fetchMock.mockResolvedValueOnce(
createMockResponse({
ok: true,
status: 200,
blob,
}),
);
const result = await downloadVideoAsBlob("https://cdn.example.com/video.mp4");
expect(result.type).toBe("video/mp4");
expect(fetchMock).toHaveBeenCalledTimes(1);
});
it("maps unauthorized responses to model_unavailable", async () => {
fetchMock.mockResolvedValueOnce(
createMockResponse({
ok: false,
status: 401,
statusText: "Unauthorized",
json: { message: "invalid api key" },
}),
);
await expect(
createVideoTask({
endpoint: "/v1/ai/video/seedance-1-5-pro-1080p",
prompt: "A cinematic city timelapse",
durationSeconds: 5,
}),
).rejects.toMatchObject({
source: "freepik",
code: "model_unavailable",
status: 401,
retryable: false,
});
expect(fetchMock).toHaveBeenCalledTimes(1);
});
it("retries once on 503 and succeeds", async () => {
fetchMock
.mockResolvedValueOnce(
createMockResponse({
ok: false,
status: 503,
statusText: "Service Unavailable",
json: { message: "temporary outage" },
}),
)
.mockResolvedValueOnce(
createMockResponse({
ok: true,
status: 200,
json: { data: { task_id: "task_after_retry" } },
}),
);
const result = await createVideoTask({
endpoint: "/v1/ai/video/seedance-1-5-pro-1080p",
prompt: "A cinematic city timelapse",
durationSeconds: 10,
});
expect(result.task_id).toBe("task_after_retry");
expect(fetchMock).toHaveBeenCalledTimes(2);
});
it("treats task 404 during polling as retryable transient provider state", async () => {
fetchMock
.mockResolvedValueOnce(
createMockResponse({
ok: false,
status: 404,
statusText: "Not Found",
json: { message: "Not found" },
}),
)
.mockResolvedValueOnce(
createMockResponse({
ok: false,
status: 404,
statusText: "Not Found",
json: { message: "Not found" },
}),
)
.mockResolvedValueOnce(
createMockResponse({
ok: false,
status: 404,
statusText: "Not Found",
json: { message: "Not found" },
}),
);
await expect(
getVideoTaskStatus({
taskId: "task_404",
statusEndpointPath: "/v1/ai/text-to-video/wan-2-5-t2v-720p/{task-id}",
}),
).rejects.toMatchObject({
source: "freepik",
code: "transient",
status: 404,
retryable: true,
});
expect(fetchMock).toHaveBeenCalledTimes(3);
});
});

View File

@@ -0,0 +1,73 @@
import { describe, expect, it } from "vitest";
import {
DEFAULT_VIDEO_MODEL_ID,
VIDEO_MODELS,
getAvailableVideoModels,
getVideoModel,
isVideoModelId,
} from "@/lib/ai-video-models";
describe("ai video models registry", () => {
it("contains all planned MVP models", () => {
expect(Object.keys(VIDEO_MODELS)).toHaveLength(5);
expect(Object.keys(VIDEO_MODELS)).toEqual([
"wan-2-2-480p",
"wan-2-2-720p",
"kling-std-2-1",
"seedance-pro-1080p",
"kling-pro-2-6",
]);
expect(isVideoModelId(DEFAULT_VIDEO_MODEL_ID)).toBe(true);
});
it("keeps credit costs consistent for 5s and 10s durations", () => {
for (const model of Object.values(VIDEO_MODELS)) {
expect(model.creditCost[5]).toBeGreaterThan(0);
expect(model.creditCost[10]).toBeGreaterThan(0);
expect(model.creditCost[10]).toBe(model.creditCost[5] * 2);
}
});
it("filters available models by tier", () => {
expect(getAvailableVideoModels("free").map((model) => model.id)).toEqual([
"wan-2-2-480p",
"wan-2-2-720p",
]);
expect(getAvailableVideoModels("starter").map((model) => model.id)).toEqual([
"wan-2-2-480p",
"wan-2-2-720p",
"kling-std-2-1",
"seedance-pro-1080p",
]);
expect(getAvailableVideoModels("pro").map((model) => model.id)).toEqual([
"wan-2-2-480p",
"wan-2-2-720p",
"kling-std-2-1",
"seedance-pro-1080p",
"kling-pro-2-6",
]);
});
it("supports lookup and model id guards", () => {
expect(isVideoModelId("wan-2-2-480p")).toBe(true);
expect(isVideoModelId("not-a-model")).toBe(false);
const validModel = getVideoModel("wan-2-2-720p");
expect(validModel?.label).toBe("WAN 2.2 720p");
expect(getVideoModel("not-a-model")).toBeUndefined();
});
it("stores model-specific status endpoints for polling", () => {
expect(getVideoModel("wan-2-2-480p")?.statusEndpointPath).toBe(
"/v1/ai/text-to-video/wan-2-5-t2v-720p/{task-id}",
);
expect(getVideoModel("seedance-pro-1080p")?.statusEndpointPath).toBe(
"/v1/ai/video/seedance-1-5-pro-1080p/{task-id}",
);
expect(getVideoModel("kling-std-2-1")?.statusEndpointPath).toBe(
"/v1/ai/image-to-video/kling-v2-1/{task-id}",
);
});
});

View File

@@ -0,0 +1,22 @@
import { describe, expect, it } from "vitest";
import {
shouldLogVideoPollAttempt,
shouldLogVideoPollResult,
} from "@/lib/video-poll-logging";
describe("video poll logging", () => {
it("logs only the first and every fifth in-progress attempt", () => {
expect(shouldLogVideoPollAttempt(1)).toBe(true);
expect(shouldLogVideoPollAttempt(2)).toBe(false);
expect(shouldLogVideoPollAttempt(5)).toBe(true);
expect(shouldLogVideoPollAttempt(6)).toBe(false);
});
it("always logs terminal poll results", () => {
expect(shouldLogVideoPollResult(2, "IN_PROGRESS")).toBe(false);
expect(shouldLogVideoPollResult(5, "IN_PROGRESS")).toBe(true);
expect(shouldLogVideoPollResult(17, "COMPLETED")).toBe(true);
expect(shouldLogVideoPollResult(3, "FAILED")).toBe(true);
});
});

View File

@@ -0,0 +1,235 @@
// @vitest-environment jsdom
import React from "react";
import { act } from "react";
import { createRoot, type Root } from "react-dom/client";
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
import type { Id } from "@/convex/_generated/dataModel";
const mocks = vi.hoisted(() => ({
edges: [] as Array<{ source: string; target: string }>,
nodes: [] as Array<{ id: string; type: string; data: Record<string, unknown> }>,
balance: { balance: 100, reserved: 0 } as { balance: number; reserved: number } | undefined,
queueNodeDataUpdate: vi.fn(async () => undefined),
createNodeConnectedFromSource: vi.fn(async () => "ai-video-node-1" as Id<"nodes">),
generateVideo: vi.fn(async () => ({ queued: true, outputNodeId: "ai-video-node-1" })),
getEdges: vi.fn(() => [] as Array<{ source: string; target: string }>),
getNode: vi.fn((id: string) =>
id === "video-prompt-1"
? { id, position: { x: 100, y: 50 }, measured: { width: 260, height: 220 } }
: null,
),
push: vi.fn(),
toastPromise: vi.fn(async <T,>(promise: Promise<T>) => await promise),
toastWarning: vi.fn(),
toastAction: vi.fn(),
toastError: vi.fn(),
}));
vi.mock("next-intl", () => ({
useTranslations: () => (key: string) => key,
}));
vi.mock("next/navigation", () => ({
useRouter: () => ({ push: mocks.push }),
}));
vi.mock("convex/react", () => ({
useAction: () => mocks.generateVideo,
}));
vi.mock("@/convex/_generated/api", () => ({
api: {
ai: {
generateVideo: "ai.generateVideo",
},
credits: {
getBalance: "credits.getBalance",
},
},
}));
vi.mock("@/hooks/use-auth-query", () => ({
useAuthQuery: () => mocks.balance,
}));
vi.mock("@/hooks/use-debounced-callback", () => ({
useDebouncedCallback: (callback: (...args: Array<unknown>) => void) => callback,
}));
vi.mock("@/components/canvas/canvas-sync-context", () => ({
useCanvasSync: () => ({
queueNodeDataUpdate: mocks.queueNodeDataUpdate,
status: { isOffline: false, isSyncing: false, pendingCount: 0 },
}),
}));
vi.mock("@/components/canvas/canvas-placement-context", () => ({
useCanvasPlacement: () => ({
createNodeConnectedFromSource: mocks.createNodeConnectedFromSource,
}),
}));
vi.mock("@/lib/toast", () => ({
toast: {
promise: mocks.toastPromise,
warning: mocks.toastWarning,
action: mocks.toastAction,
error: mocks.toastError,
},
}));
vi.mock("@/lib/ai-errors", () => ({
classifyError: (error: unknown) => ({
type: "generic",
rawMessage: error instanceof Error ? error.message : String(error),
}),
}));
vi.mock("@/components/ui/label", () => ({
Label: ({ children, htmlFor }: { children: React.ReactNode; htmlFor?: string }) =>
React.createElement("label", { htmlFor }, children),
}));
vi.mock("@/components/ui/select", () => ({
Select: ({ value, onValueChange, children }: {
value: string;
onValueChange: (value: string) => void;
children: React.ReactNode;
}) =>
React.createElement(
"select",
{
"aria-label": "video-model",
value,
onChange: (event: Event) => {
onValueChange((event.target as HTMLSelectElement).value);
},
},
children,
),
SelectTrigger: ({ children }: { children: React.ReactNode }) => children,
SelectValue: () => null,
SelectContent: ({ children }: { children: React.ReactNode }) => children,
SelectItem: ({ children, value }: { children: React.ReactNode; value: string }) =>
React.createElement("option", { value }, children),
}));
vi.mock("@/components/canvas/nodes/base-node-wrapper", () => ({
default: ({ children }: { children: React.ReactNode }) => React.createElement("div", null, children),
}));
vi.mock("@xyflow/react", () => ({
Handle: () => null,
Position: { Left: "left", Right: "right" },
useStore: (selector: (state: { edges: typeof mocks.edges; nodes: typeof mocks.nodes }) => unknown) =>
selector({ edges: mocks.edges, nodes: mocks.nodes }),
useReactFlow: () => ({
getEdges: mocks.getEdges,
getNode: mocks.getNode,
}),
}));
import VideoPromptNode from "@/components/canvas/nodes/video-prompt-node";
(globalThis as typeof globalThis & { IS_REACT_ACT_ENVIRONMENT?: boolean }).IS_REACT_ACT_ENVIRONMENT = true;
describe("VideoPromptNode", () => {
let container: HTMLDivElement | null = null;
let root: Root | null = null;
beforeEach(() => {
mocks.edges = [];
mocks.nodes = [];
mocks.balance = { balance: 100, reserved: 0 };
mocks.queueNodeDataUpdate.mockClear();
mocks.createNodeConnectedFromSource.mockClear();
mocks.generateVideo.mockClear();
mocks.getEdges.mockClear();
mocks.getNode.mockClear();
mocks.push.mockClear();
mocks.toastPromise.mockClear();
mocks.toastWarning.mockClear();
mocks.toastAction.mockClear();
mocks.toastError.mockClear();
});
afterEach(() => {
if (root) {
act(() => {
root?.unmount();
});
}
container?.remove();
container = null;
root = null;
});
it("creates an ai-video node and queues video generation when generate is clicked", async () => {
container = document.createElement("div");
document.body.appendChild(container);
root = createRoot(container);
await act(async () => {
root?.render(
React.createElement(VideoPromptNode, {
id: "video-prompt-1",
selected: false,
dragging: false,
draggable: true,
selectable: true,
deletable: true,
zIndex: 1,
isConnectable: true,
type: "video-prompt",
data: {
prompt: "ein suesser Berner Sennenhund rennt ueber eine Wiese",
modelId: "wan-2-2-480p",
durationSeconds: 5,
canvasId: "canvas-1",
},
positionAbsoluteX: 0,
positionAbsoluteY: 0,
}),
);
});
const button = Array.from(container.querySelectorAll("button")).find((element) =>
element.textContent?.includes("generateButton"),
);
if (!(button instanceof HTMLButtonElement)) {
throw new Error("Generate button not found");
}
await act(async () => {
button.click();
});
expect(mocks.createNodeConnectedFromSource).toHaveBeenCalledTimes(1);
expect(mocks.createNodeConnectedFromSource).toHaveBeenCalledWith(
expect.objectContaining({
type: "ai-video",
sourceNodeId: "video-prompt-1",
sourceHandle: "video-prompt-out",
targetHandle: "video-in",
data: expect.objectContaining({
prompt: "ein suesser Berner Sennenhund rennt ueber eine Wiese",
modelId: "wan-2-2-480p",
durationSeconds: 5,
}),
}),
);
expect(mocks.generateVideo).toHaveBeenCalledTimes(1);
expect(mocks.generateVideo).toHaveBeenCalledWith({
canvasId: "canvas-1",
sourceNodeId: "video-prompt-1",
outputNodeId: "ai-video-node-1",
prompt: "ein suesser Berner Sennenhund rennt ueber eine Wiese",
modelId: "wan-2-2-480p",
durationSeconds: 5,
});
});
});