feat(canvas): add video-prompt node and enhance video generation support

- Introduced a new node type "video-prompt" for AI video generation, including its integration into the canvas command palette and node template picker.
- Updated connection validation to allow connections from text nodes to video-prompt and from video-prompt to ai-video nodes.
- Enhanced error handling and messaging for video generation failures, including specific cases for provider issues.
- Added tests to validate new video-prompt functionality and connection policies.
- Updated localization files to include new labels and prompts for video-prompt and ai-video nodes.
This commit is contained in:
2026-04-07 08:50:59 +02:00
parent 456b910532
commit ed08b976f9
28 changed files with 2899 additions and 9 deletions

109
lib/ai-video-models.ts Normal file
View File

@@ -0,0 +1,109 @@
export type VideoModelId =
| "wan-2-2-480p"
| "wan-2-2-720p"
| "kling-std-2-1"
| "seedance-pro-1080p"
| "kling-pro-2-6";
export type VideoModelTier = "free" | "starter" | "pro";
export type VideoModelDurationSeconds = 5 | 10;
export interface VideoModel {
id: VideoModelId;
label: string;
tier: VideoModelTier;
freepikEndpoint: string;
statusEndpointPath: string;
creditCost: Record<VideoModelDurationSeconds, number>;
supportsAudio: boolean;
supportsImageToVideo: boolean;
description: string;
}
export const VIDEO_MODELS = {
"wan-2-2-480p": {
id: "wan-2-2-480p",
label: "WAN 2.2 480p",
tier: "free",
freepikEndpoint: "/v1/ai/text-to-video/wan-2-5-t2v-720p",
statusEndpointPath: "/v1/ai/text-to-video/wan-2-5-t2v-720p/{task-id}",
creditCost: { 5: 28, 10: 56 },
supportsAudio: false,
supportsImageToVideo: false,
description: "Schnell und guenstig - gut fuer Konzepte",
},
"wan-2-2-720p": {
id: "wan-2-2-720p",
label: "WAN 2.2 720p",
tier: "free",
freepikEndpoint: "/v1/ai/text-to-video/wan-2-5-t2v-720p",
statusEndpointPath: "/v1/ai/text-to-video/wan-2-5-t2v-720p/{task-id}",
creditCost: { 5: 52, 10: 104 },
supportsAudio: false,
supportsImageToVideo: false,
description: "HD-Qualitaet, offenes Modell",
},
"kling-std-2-1": {
id: "kling-std-2-1",
label: "Kling Standard 2.1",
tier: "starter",
freepikEndpoint: "/v1/ai/image-to-video/kling-v2-1-std",
statusEndpointPath: "/v1/ai/image-to-video/kling-v2-1/{task-id}",
creditCost: { 5: 50, 10: 100 },
supportsAudio: false,
supportsImageToVideo: true,
description: "Realistisch, stabile Bewegung",
},
"seedance-pro-1080p": {
id: "seedance-pro-1080p",
label: "Seedance Pro 1080p",
tier: "starter",
freepikEndpoint: "/v1/ai/video/seedance-1-5-pro-1080p",
statusEndpointPath: "/v1/ai/video/seedance-1-5-pro-1080p/{task-id}",
creditCost: { 5: 33, 10: 66 },
supportsAudio: false,
supportsImageToVideo: false,
description: "Full-HD, gutes Preis-Leistungs-Verhaeltnis",
},
"kling-pro-2-6": {
id: "kling-pro-2-6",
label: "Kling Pro 2.6",
tier: "pro",
freepikEndpoint: "/v1/ai/image-to-video/kling-v2-6-pro",
statusEndpointPath: "/v1/ai/image-to-video/kling-v2-6/{task-id}",
creditCost: { 5: 59, 10: 118 },
supportsAudio: false,
supportsImageToVideo: true,
description: "Beste Qualitaet, cineastische Bewegung",
},
} as const satisfies Record<VideoModelId, VideoModel>;
export const DEFAULT_VIDEO_MODEL_ID: VideoModelId = "wan-2-2-720p";
const VIDEO_MODEL_IDS = Object.keys(VIDEO_MODELS) as VideoModelId[];
const VIDEO_MODEL_ID_SET = new Set<VideoModelId>(VIDEO_MODEL_IDS);
export function isVideoModelId(value: string): value is VideoModelId {
return VIDEO_MODEL_ID_SET.has(value as VideoModelId);
}
export function getVideoModel(id: string): VideoModel | undefined {
if (!isVideoModelId(id)) {
return undefined;
}
return VIDEO_MODELS[id];
}
const VIDEO_MODEL_TIER_ORDER: Record<VideoModelTier, number> = {
free: 0,
starter: 1,
pro: 2,
};
export function getAvailableVideoModels(tier: VideoModelTier): VideoModel[] {
const maxTier = VIDEO_MODEL_TIER_ORDER[tier];
return VIDEO_MODEL_IDS.map((id) => VIDEO_MODELS[id]).filter(
(model) => VIDEO_MODEL_TIER_ORDER[model.tier] <= maxTier,
);
}

View File

@@ -28,6 +28,8 @@ export type CanvasConnectionValidationReason =
| "incomplete"
| "self-loop"
| "unknown-node"
| "ai-video-source-invalid"
| "video-prompt-target-invalid"
| "adjustment-source-invalid"
| "adjustment-incoming-limit"
| "compare-incoming-limit"
@@ -41,7 +43,19 @@ export function validateCanvasConnectionPolicy(args: {
}): CanvasConnectionValidationReason | null {
const { sourceType, targetType, targetIncomingCount } = args;
if (isAdjustmentNodeType(targetType)) {
if (targetType === "ai-video" && sourceType !== "video-prompt") {
return "ai-video-source-invalid";
}
if (sourceType === "video-prompt" && targetType !== "ai-video") {
return "video-prompt-target-invalid";
}
if (targetType === "render" && !RENDER_ALLOWED_SOURCE_TYPES.has(sourceType)) {
return "render-source-invalid";
}
if (isAdjustmentNodeType(targetType) && targetType !== "render") {
if (!ADJUSTMENT_ALLOWED_SOURCE_TYPES.has(sourceType)) {
return "adjustment-source-invalid";
}
@@ -54,10 +68,6 @@ export function validateCanvasConnectionPolicy(args: {
return "compare-incoming-limit";
}
if (targetType === "render" && !RENDER_ALLOWED_SOURCE_TYPES.has(sourceType)) {
return "render-source-invalid";
}
if (
isAdjustmentNodeType(sourceType) &&
ADJUSTMENT_DISALLOWED_TARGET_TYPES.has(targetType)
@@ -78,6 +88,10 @@ export function getCanvasConnectionValidationMessage(
return "Node kann nicht mit sich selbst verbunden werden.";
case "unknown-node":
return "Verbindung enthaelt unbekannte Nodes.";
case "ai-video-source-invalid":
return "KI-Video-Ausgabe akzeptiert nur Eingaben von KI-Video.";
case "video-prompt-target-invalid":
return "KI-Video kann nur mit KI-Video-Ausgabe verbunden werden.";
case "adjustment-source-invalid":
return "Adjustment-Nodes akzeptieren nur Bild-, Asset-, KI-Bild- oder Adjustment-Input.";
case "adjustment-incoming-limit":

View File

@@ -98,6 +98,12 @@ export const NODE_CATALOG: readonly NodeCatalogEntry[] = [
category: "ai-output",
phase: 1,
}),
entry({
type: "video-prompt",
label: "KI-Video",
category: "ai-output",
phase: 1,
}),
entry({
type: "ai-text",
label: "KI-Text",
@@ -108,7 +114,7 @@ export const NODE_CATALOG: readonly NodeCatalogEntry[] = [
}),
entry({
type: "ai-video",
label: "KI-Video",
label: "KI-Video-Ausgabe",
category: "ai-output",
phase: 2,
systemOutput: true,

View File

@@ -20,6 +20,18 @@ export const CANVAS_NODE_TEMPLATES = [
height: 220,
defaultData: { prompt: "", model: "", aspectRatio: "1:1" },
},
{
type: "video-prompt",
label: "KI-Video",
width: 320,
height: 220,
defaultData: {
prompt: "",
modelId: "wan-2-2-720p",
durationSeconds: 5,
hasAudio: false,
},
},
{
type: "note",
label: "Notiz",

View File

@@ -2,6 +2,7 @@ export const PHASE1_CANVAS_NODE_TYPES = [
"image",
"text",
"prompt",
"video-prompt",
"ai-image",
"group",
"frame",
@@ -13,6 +14,7 @@ export const CANVAS_NODE_TYPES = [
"image",
"text",
"prompt",
"video-prompt",
"color",
"video",
"asset",

View File

@@ -102,7 +102,9 @@ export function convexEdgeToRF(edge: Doc<"edges">): RFEdge {
*/
const SOURCE_NODE_GLOW_RGB: Record<string, readonly [number, number, number]> = {
prompt: [139, 92, 246],
"video-prompt": [124, 58, 237],
"ai-image": [139, 92, 246],
"ai-video": [124, 58, 237],
image: [13, 148, 136],
text: [13, 148, 136],
note: [13, 148, 136],
@@ -208,7 +210,9 @@ export const NODE_HANDLE_MAP: Record<
image: { source: undefined, target: undefined },
text: { source: undefined, target: undefined },
prompt: { source: "prompt-out", target: "image-in" },
"video-prompt": { source: "video-prompt-out", target: "video-prompt-in" },
"ai-image": { source: "image-out", target: "prompt-in" },
"ai-video": { source: "video-out", target: "video-in" },
group: { source: undefined, target: undefined },
frame: { source: "frame-out", target: "frame-in" },
note: { source: undefined, target: undefined },
@@ -232,8 +236,19 @@ export const NODE_DEFAULTS: Record<
image: { width: 280, height: 200, data: {} },
text: { width: 256, height: 120, data: { content: "" } },
prompt: { width: 288, height: 220, data: { prompt: "", aspectRatio: "1:1" } },
"video-prompt": {
width: 288,
height: 220,
data: {
prompt: "",
modelId: "wan-2-2-720p",
durationSeconds: 5,
hasAudio: false,
},
},
// 1:1 viewport 320 + chrome 88 ≈ äußere Höhe (siehe lib/image-formats.ts)
"ai-image": { width: 320, height: 408, data: {} },
"ai-video": { width: 360, height: 280, data: {} },
group: { width: 400, height: 300, data: { label: "Gruppe" } },
frame: {
width: 400,

12
lib/video-poll-logging.ts Normal file
View File

@@ -0,0 +1,12 @@
export type VideoPollStatus = "CREATED" | "IN_PROGRESS" | "COMPLETED" | "FAILED";
export function shouldLogVideoPollAttempt(attempt: number): boolean {
return attempt === 1 || attempt % 5 === 0;
}
export function shouldLogVideoPollResult(
attempt: number,
status: VideoPollStatus,
): boolean {
return status !== "IN_PROGRESS" || shouldLogVideoPollAttempt(attempt);
}