feat(canvas): add video-prompt node and enhance video generation support
- Introduced a new node type "video-prompt" for AI video generation, including its integration into the canvas command palette and node template picker. - Updated connection validation to allow connections from text nodes to video-prompt and from video-prompt to ai-video nodes. - Enhanced error handling and messaging for video generation failures, including specific cases for provider issues. - Added tests to validate new video-prompt functionality and connection policies. - Updated localization files to include new labels and prompts for video-prompt and ai-video nodes.
This commit is contained in:
@@ -432,6 +432,64 @@ describe("useCanvasConnections", () => {
|
||||
expect(latestHandlersRef.current?.connectionDropMenu).toBeNull();
|
||||
});
|
||||
|
||||
it("rejects text to ai-video body drops", async () => {
|
||||
const runCreateEdgeMutation = vi.fn(async () => undefined);
|
||||
const showConnectionRejectedToast = vi.fn();
|
||||
|
||||
container = document.createElement("div");
|
||||
document.body.appendChild(container);
|
||||
root = createRoot(container);
|
||||
|
||||
await act(async () => {
|
||||
root?.render(
|
||||
<HookHarness
|
||||
helperResult={{
|
||||
sourceNodeId: "node-source",
|
||||
targetNodeId: "node-target",
|
||||
sourceHandle: undefined,
|
||||
targetHandle: undefined,
|
||||
}}
|
||||
nodes={[
|
||||
{ id: "node-source", type: "text", position: { x: 0, y: 0 }, data: {} },
|
||||
{ id: "node-target", type: "ai-video", position: { x: 300, y: 200 }, data: {} },
|
||||
]}
|
||||
runCreateEdgeMutation={runCreateEdgeMutation}
|
||||
showConnectionRejectedToast={showConnectionRejectedToast}
|
||||
/>,
|
||||
);
|
||||
});
|
||||
|
||||
await act(async () => {
|
||||
latestHandlersRef.current?.onConnectStart?.(
|
||||
{} as MouseEvent,
|
||||
{
|
||||
nodeId: "node-source",
|
||||
handleId: null,
|
||||
handleType: "source",
|
||||
} as never,
|
||||
);
|
||||
latestHandlersRef.current?.onConnectEnd(
|
||||
{ clientX: 400, clientY: 260 } as MouseEvent,
|
||||
{
|
||||
isValid: false,
|
||||
from: { x: 0, y: 0 },
|
||||
fromNode: { id: "node-source", type: "text" },
|
||||
fromHandle: { id: null, type: "source" },
|
||||
fromPosition: null,
|
||||
to: { x: 400, y: 260 },
|
||||
toHandle: null,
|
||||
toNode: null,
|
||||
toPosition: null,
|
||||
pointer: null,
|
||||
} as never,
|
||||
);
|
||||
});
|
||||
|
||||
expect(runCreateEdgeMutation).not.toHaveBeenCalled();
|
||||
expect(showConnectionRejectedToast).toHaveBeenCalledWith("ai-video-source-invalid");
|
||||
expect(latestHandlersRef.current?.connectionDropMenu).toBeNull();
|
||||
});
|
||||
|
||||
it("ignores onConnectEnd when no connect drag is active", async () => {
|
||||
const runCreateEdgeMutation = vi.fn(async () => undefined);
|
||||
const showConnectionRejectedToast = vi.fn();
|
||||
|
||||
@@ -740,4 +740,33 @@ describe("useCanvasEdgeInsertions", () => {
|
||||
expect(templateTypes).not.toContain("text");
|
||||
expect(templateTypes).not.toContain("ai-image");
|
||||
});
|
||||
|
||||
it("offers video-prompt as valid split for text to ai-video", async () => {
|
||||
container = document.createElement("div");
|
||||
document.body.appendChild(container);
|
||||
root = createRoot(container);
|
||||
|
||||
await act(async () => {
|
||||
root?.render(
|
||||
<HookHarness
|
||||
nodes={[
|
||||
createNode({ id: "source", type: "text", position: { x: 0, y: 0 } }),
|
||||
createNode({ id: "target", type: "ai-video", position: { x: 360, y: 0 } }),
|
||||
]}
|
||||
edges={[createEdge({ id: "edge-1", source: "source", target: "target" })]}
|
||||
/>,
|
||||
);
|
||||
});
|
||||
|
||||
await act(async () => {
|
||||
latestHandlersRef.current?.openEdgeInsertMenu({ edgeId: "edge-1", screenX: 20, screenY: 20 });
|
||||
});
|
||||
|
||||
const templateTypes = (latestHandlersRef.current?.edgeInsertTemplates ?? []).map(
|
||||
(template) => template.type,
|
||||
);
|
||||
|
||||
expect(templateTypes).toContain("video-prompt");
|
||||
expect(templateTypes).not.toContain("prompt");
|
||||
});
|
||||
});
|
||||
|
||||
@@ -55,6 +55,7 @@ const CATALOG_ICONS: Partial<Record<string, LucideIcon>> = {
|
||||
image: Image,
|
||||
text: Type,
|
||||
prompt: Sparkles,
|
||||
"video-prompt": Video,
|
||||
color: Palette,
|
||||
video: Video,
|
||||
asset: Package,
|
||||
|
||||
@@ -26,7 +26,7 @@ export function useGenerationFailureWarnings(
|
||||
for (const node of convexNodes) {
|
||||
nextNodeStatusMap.set(node._id, node.status);
|
||||
|
||||
if (node.type !== "ai-image") {
|
||||
if (node.type !== "ai-image" && node.type !== "ai-video") {
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -61,7 +61,7 @@ export function useGenerationFailureWarnings(
|
||||
}
|
||||
|
||||
if (recentFailures.length >= GENERATION_FAILURE_THRESHOLD) {
|
||||
toast.warning(t('ai.openrouterIssuesTitle'), t('ai.openrouterIssuesDesc'));
|
||||
toast.warning(t('ai.providerIssuesTitle'), t('ai.providerIssuesDesc'));
|
||||
recentGenerationFailureTimestampsRef.current = [];
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -27,6 +27,7 @@ const NODE_ICONS: Record<CanvasNodeTemplate["type"], LucideIcon> = {
|
||||
image: Image,
|
||||
text: Type,
|
||||
prompt: Sparkles,
|
||||
"video-prompt": Video,
|
||||
note: StickyNote,
|
||||
frame: Frame,
|
||||
compare: GitCompare,
|
||||
@@ -46,6 +47,7 @@ const NODE_SEARCH_KEYWORDS: Partial<
|
||||
image: ["image", "photo", "foto"],
|
||||
text: ["text", "typo"],
|
||||
prompt: ["prompt", "ai", "generate", "ki-bild", "ki", "bild"],
|
||||
"video-prompt": ["video", "ai", "ki-video", "ki", "prompt"],
|
||||
note: ["note", "sticky", "notiz"],
|
||||
frame: ["frame", "artboard"],
|
||||
compare: ["compare", "before", "after", "vergleich"],
|
||||
|
||||
@@ -48,6 +48,7 @@ const CATALOG_ICONS: Partial<Record<string, LucideIcon>> = {
|
||||
image: Image,
|
||||
text: Type,
|
||||
prompt: Sparkles,
|
||||
"video-prompt": Video,
|
||||
color: Palette,
|
||||
video: Video,
|
||||
asset: Package,
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
import ImageNode from "./nodes/image-node";
|
||||
import TextNode from "./nodes/text-node";
|
||||
import PromptNode from "./nodes/prompt-node";
|
||||
import VideoPromptNode from "./nodes/video-prompt-node";
|
||||
import AiImageNode from "./nodes/ai-image-node";
|
||||
import AiVideoNode from "./nodes/ai-video-node";
|
||||
import GroupNode from "./nodes/group-node";
|
||||
import FrameNode from "./nodes/frame-node";
|
||||
import NoteNode from "./nodes/note-node";
|
||||
@@ -25,7 +27,9 @@ export const nodeTypes = {
|
||||
image: ImageNode,
|
||||
text: TextNode,
|
||||
prompt: PromptNode,
|
||||
"video-prompt": VideoPromptNode,
|
||||
"ai-image": AiImageNode,
|
||||
"ai-video": AiVideoNode,
|
||||
group: GroupNode,
|
||||
frame: FrameNode,
|
||||
note: NoteNode,
|
||||
|
||||
251
components/canvas/nodes/ai-video-node.tsx
Normal file
251
components/canvas/nodes/ai-video-node.tsx
Normal file
@@ -0,0 +1,251 @@
|
||||
"use client";
|
||||
|
||||
import { useCallback, useState } from "react";
|
||||
import { useAction } from "convex/react";
|
||||
import type { FunctionReference } from "convex/server";
|
||||
import { useTranslations } from "next-intl";
|
||||
import { AlertCircle, Download, Loader2, RefreshCw, Video } from "lucide-react";
|
||||
import { Handle, Position, useReactFlow, type Node, type NodeProps } from "@xyflow/react";
|
||||
|
||||
import { api } from "@/convex/_generated/api";
|
||||
import type { Id } from "@/convex/_generated/dataModel";
|
||||
import { useCanvasSync } from "@/components/canvas/canvas-sync-context";
|
||||
import { classifyError } from "@/lib/ai-errors";
|
||||
import { getVideoModel, type VideoModelDurationSeconds } from "@/lib/ai-video-models";
|
||||
import { toast } from "@/lib/toast";
|
||||
import BaseNodeWrapper from "./base-node-wrapper";
|
||||
|
||||
type AiVideoNodeData = {
|
||||
prompt?: string;
|
||||
modelId?: string;
|
||||
durationSeconds?: VideoModelDurationSeconds;
|
||||
creditCost?: number;
|
||||
canvasId?: string;
|
||||
url?: string;
|
||||
_status?: string;
|
||||
_statusMessage?: string;
|
||||
};
|
||||
|
||||
type NodeStatus =
|
||||
| "idle"
|
||||
| "analyzing"
|
||||
| "clarifying"
|
||||
| "executing"
|
||||
| "done"
|
||||
| "error";
|
||||
|
||||
export type AiVideoNodeType = Node<AiVideoNodeData, "ai-video">;
|
||||
|
||||
export default function AiVideoNode({ id, data, selected }: NodeProps<AiVideoNodeType>) {
|
||||
const t = useTranslations("aiVideoNode");
|
||||
const tToast = useTranslations("toasts");
|
||||
const nodeData = data as AiVideoNodeData;
|
||||
const { getEdges, getNode } = useReactFlow();
|
||||
const { status: syncStatus } = useCanvasSync();
|
||||
const generateVideo = useAction(
|
||||
(api as unknown as {
|
||||
ai: {
|
||||
generateVideo: FunctionReference<
|
||||
"action",
|
||||
"public",
|
||||
{
|
||||
canvasId: Id<"canvases">;
|
||||
sourceNodeId: Id<"nodes">;
|
||||
outputNodeId: Id<"nodes">;
|
||||
prompt: string;
|
||||
modelId: string;
|
||||
durationSeconds: 5 | 10;
|
||||
},
|
||||
{ queued: true; outputNodeId: Id<"nodes"> }
|
||||
>;
|
||||
};
|
||||
}).ai.generateVideo,
|
||||
);
|
||||
const status = (nodeData._status ?? "idle") as NodeStatus;
|
||||
const [isRetrying, setIsRetrying] = useState(false);
|
||||
const [localError, setLocalError] = useState<string | null>(null);
|
||||
const classifiedError = classifyError(nodeData._statusMessage ?? localError);
|
||||
const isLoading =
|
||||
status === "executing" || status === "analyzing" || status === "clarifying" || isRetrying;
|
||||
|
||||
const modelLabel =
|
||||
typeof nodeData.modelId === "string"
|
||||
? getVideoModel(nodeData.modelId)?.label ?? nodeData.modelId
|
||||
: "-";
|
||||
|
||||
const handleRetry = useCallback(async () => {
|
||||
if (isRetrying) return;
|
||||
|
||||
if (syncStatus.isOffline) {
|
||||
toast.warning(
|
||||
"Offline aktuell nicht unterstuetzt",
|
||||
"KI-Generierung benoetigt eine aktive Verbindung.",
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
const prompt = nodeData.prompt?.trim();
|
||||
const modelId = nodeData.modelId;
|
||||
const durationSeconds = nodeData.durationSeconds;
|
||||
|
||||
if (!prompt || !modelId || !durationSeconds) {
|
||||
setLocalError(t("errorFallback"));
|
||||
return;
|
||||
}
|
||||
|
||||
const incomingEdge = getEdges().find((edge) => edge.target === id);
|
||||
if (!incomingEdge) {
|
||||
setLocalError(t("errorFallback"));
|
||||
return;
|
||||
}
|
||||
|
||||
const sourceNode = getNode(incomingEdge.source);
|
||||
if (!sourceNode || sourceNode.type !== "video-prompt") {
|
||||
setLocalError(t("errorFallback"));
|
||||
return;
|
||||
}
|
||||
|
||||
const sourceData = sourceNode.data as { canvasId?: string } | undefined;
|
||||
const canvasId = (nodeData.canvasId ?? sourceData?.canvasId) as Id<"canvases"> | undefined;
|
||||
if (!canvasId) {
|
||||
setLocalError(t("errorFallback"));
|
||||
return;
|
||||
}
|
||||
|
||||
setLocalError(null);
|
||||
setIsRetrying(true);
|
||||
|
||||
try {
|
||||
await toast.promise(
|
||||
generateVideo({
|
||||
canvasId,
|
||||
sourceNodeId: incomingEdge.source as Id<"nodes">,
|
||||
outputNodeId: id as Id<"nodes">,
|
||||
prompt,
|
||||
modelId,
|
||||
durationSeconds,
|
||||
}),
|
||||
{
|
||||
loading: tToast("ai.generating"),
|
||||
success: tToast("ai.generationQueued"),
|
||||
error: tToast("ai.generationFailed"),
|
||||
},
|
||||
);
|
||||
} catch (error) {
|
||||
const classified = classifyError(error);
|
||||
setLocalError(classified.rawMessage ?? tToast("ai.generationFailed"));
|
||||
} finally {
|
||||
setIsRetrying(false);
|
||||
}
|
||||
}, [
|
||||
generateVideo,
|
||||
getEdges,
|
||||
getNode,
|
||||
id,
|
||||
isRetrying,
|
||||
nodeData.canvasId,
|
||||
nodeData.durationSeconds,
|
||||
nodeData.modelId,
|
||||
nodeData.prompt,
|
||||
syncStatus.isOffline,
|
||||
t,
|
||||
tToast,
|
||||
]);
|
||||
|
||||
return (
|
||||
<BaseNodeWrapper
|
||||
nodeType="ai-video"
|
||||
selected={selected}
|
||||
status={nodeData._status}
|
||||
statusMessage={nodeData._statusMessage}
|
||||
className="flex h-full w-full min-h-0 min-w-0 flex-col"
|
||||
>
|
||||
<Handle
|
||||
type="target"
|
||||
position={Position.Left}
|
||||
id="video-in"
|
||||
className="!h-3 !w-3 !bg-violet-600 !border-2 !border-background"
|
||||
/>
|
||||
|
||||
<div className="shrink-0 border-b border-border px-3 py-2">
|
||||
<div className="flex items-center gap-1.5 text-xs font-medium text-violet-700 dark:text-violet-300">
|
||||
<Video className="h-3.5 w-3.5" />
|
||||
{t("label")}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div className="relative min-h-0 flex-1 overflow-hidden bg-muted/30">
|
||||
{status === "idle" && !nodeData.url ? (
|
||||
<div className="absolute inset-0 flex items-center justify-center px-6 text-center text-xs text-muted-foreground">
|
||||
{t("idleHint")}
|
||||
</div>
|
||||
) : null}
|
||||
|
||||
{isLoading ? (
|
||||
<div className="absolute inset-0 flex flex-col items-center justify-center gap-2">
|
||||
<Loader2 className="h-7 w-7 animate-spin text-violet-500" />
|
||||
<p className="text-xs text-muted-foreground">{t("generating")}</p>
|
||||
</div>
|
||||
) : null}
|
||||
|
||||
{status === "error" && !isLoading ? (
|
||||
<div className="absolute inset-0 flex flex-col items-center justify-center gap-2 px-4">
|
||||
<AlertCircle className="h-7 w-7 text-destructive" />
|
||||
<p className="text-center text-xs text-destructive">
|
||||
{classifiedError.rawMessage ?? t("errorFallback")}
|
||||
</p>
|
||||
<button
|
||||
type="button"
|
||||
onClick={() => void handleRetry()}
|
||||
disabled={isRetrying}
|
||||
className="nodrag inline-flex items-center gap-1.5 rounded-md border border-border bg-background px-2.5 py-1.5 text-xs text-muted-foreground disabled:cursor-not-allowed disabled:opacity-60"
|
||||
>
|
||||
<RefreshCw className={`h-3 w-3${isRetrying ? " animate-spin" : ""}`} />
|
||||
{t("retryButton")}
|
||||
</button>
|
||||
</div>
|
||||
) : null}
|
||||
|
||||
{nodeData.url && !isLoading ? (
|
||||
<video
|
||||
src={nodeData.url}
|
||||
controls
|
||||
playsInline
|
||||
preload="metadata"
|
||||
className="h-full w-full object-contain"
|
||||
/>
|
||||
) : null}
|
||||
</div>
|
||||
|
||||
<div className="flex shrink-0 flex-col gap-1 border-t border-border px-3 py-2 text-[10px] text-muted-foreground">
|
||||
<p className="truncate" title={modelLabel}>
|
||||
{t("modelMeta", { model: modelLabel })}
|
||||
</p>
|
||||
{typeof nodeData.durationSeconds === "number" ? (
|
||||
<p>{t("durationMeta", { duration: nodeData.durationSeconds })}</p>
|
||||
) : null}
|
||||
{typeof nodeData.creditCost === "number" ? (
|
||||
<p>{t("creditMeta", { credits: nodeData.creditCost })}</p>
|
||||
) : null}
|
||||
{nodeData.prompt ? <p className="line-clamp-1">{nodeData.prompt}</p> : null}
|
||||
{nodeData.url ? (
|
||||
<a
|
||||
href={nodeData.url}
|
||||
download
|
||||
className="nodrag inline-flex items-center gap-1 text-xs text-violet-700 underline-offset-2 hover:underline dark:text-violet-300"
|
||||
>
|
||||
<Download className="h-3 w-3" />
|
||||
{t("downloadButton")}
|
||||
</a>
|
||||
) : null}
|
||||
</div>
|
||||
|
||||
<Handle
|
||||
type="source"
|
||||
position={Position.Right}
|
||||
id="video-out"
|
||||
className="!h-3 !w-3 !bg-violet-600 !border-2 !border-background"
|
||||
/>
|
||||
</BaseNodeWrapper>
|
||||
);
|
||||
}
|
||||
418
components/canvas/nodes/video-prompt-node.tsx
Normal file
418
components/canvas/nodes/video-prompt-node.tsx
Normal file
@@ -0,0 +1,418 @@
|
||||
"use client";
|
||||
|
||||
import { useCallback, useMemo, useState } from "react";
|
||||
import { Handle, Position, useReactFlow, useStore, type Node, type NodeProps } from "@xyflow/react";
|
||||
import { useAction } from "convex/react";
|
||||
import type { FunctionReference } from "convex/server";
|
||||
import { useRouter } from "next/navigation";
|
||||
import { Coins, Loader2, Sparkles, Video } from "lucide-react";
|
||||
import { useTranslations } from "next-intl";
|
||||
|
||||
import { useDebouncedCallback } from "@/hooks/use-debounced-callback";
|
||||
import {
|
||||
DEFAULT_VIDEO_MODEL_ID,
|
||||
getAvailableVideoModels,
|
||||
getVideoModel,
|
||||
isVideoModelId,
|
||||
type VideoModelDurationSeconds,
|
||||
type VideoModelId,
|
||||
} from "@/lib/ai-video-models";
|
||||
import type { Id } from "@/convex/_generated/dataModel";
|
||||
import { useCanvasPlacement } from "@/components/canvas/canvas-placement-context";
|
||||
import { useCanvasSync } from "@/components/canvas/canvas-sync-context";
|
||||
import { useAuthQuery } from "@/hooks/use-auth-query";
|
||||
import { api } from "@/convex/_generated/api";
|
||||
import { toast } from "@/lib/toast";
|
||||
import { classifyError } from "@/lib/ai-errors";
|
||||
import BaseNodeWrapper from "./base-node-wrapper";
|
||||
import { Label } from "@/components/ui/label";
|
||||
import {
|
||||
Select,
|
||||
SelectContent,
|
||||
SelectItem,
|
||||
SelectTrigger,
|
||||
SelectValue,
|
||||
} from "@/components/ui/select";
|
||||
|
||||
type VideoPromptNodeData = {
|
||||
prompt?: string;
|
||||
modelId?: string;
|
||||
durationSeconds?: number;
|
||||
hasAudio?: boolean;
|
||||
canvasId?: string;
|
||||
_status?: string;
|
||||
_statusMessage?: string;
|
||||
};
|
||||
|
||||
export type VideoPromptNodeType = Node<VideoPromptNodeData, "video-prompt">;
|
||||
|
||||
function normalizeDuration(value: number | undefined): VideoModelDurationSeconds {
|
||||
return value === 10 ? 10 : 5;
|
||||
}
|
||||
|
||||
export default function VideoPromptNode({
|
||||
id,
|
||||
data,
|
||||
selected,
|
||||
}: NodeProps<VideoPromptNodeType>) {
|
||||
const t = useTranslations("videoPromptNode");
|
||||
const tToast = useTranslations("toasts");
|
||||
const nodeData = data as VideoPromptNodeData;
|
||||
const router = useRouter();
|
||||
const { getNode } = useReactFlow();
|
||||
const { queueNodeDataUpdate, status } = useCanvasSync();
|
||||
const { createNodeConnectedFromSource } = useCanvasPlacement();
|
||||
const balance = useAuthQuery(api.credits.getBalance);
|
||||
const edges = useStore((store) => store.edges);
|
||||
const nodes = useStore((store) => store.nodes);
|
||||
const generateVideo = useAction(
|
||||
(api as unknown as {
|
||||
ai: {
|
||||
generateVideo: FunctionReference<
|
||||
"action",
|
||||
"public",
|
||||
{
|
||||
canvasId: Id<"canvases">;
|
||||
sourceNodeId: Id<"nodes">;
|
||||
outputNodeId: Id<"nodes">;
|
||||
prompt: string;
|
||||
modelId: string;
|
||||
durationSeconds: 5 | 10;
|
||||
},
|
||||
{ queued: true; outputNodeId: Id<"nodes"> }
|
||||
>;
|
||||
};
|
||||
}).ai.generateVideo,
|
||||
);
|
||||
|
||||
const [prompt, setPrompt] = useState(nodeData.prompt ?? "");
|
||||
const [modelId, setModelId] = useState<VideoModelId>(
|
||||
isVideoModelId(nodeData.modelId ?? "")
|
||||
? (nodeData.modelId as VideoModelId)
|
||||
: DEFAULT_VIDEO_MODEL_ID,
|
||||
);
|
||||
const [durationSeconds, setDurationSeconds] = useState<VideoModelDurationSeconds>(
|
||||
normalizeDuration(nodeData.durationSeconds),
|
||||
);
|
||||
const [isGenerating, setIsGenerating] = useState(false);
|
||||
const [error, setError] = useState<string | null>(null);
|
||||
|
||||
const inputMeta = useMemo(() => {
|
||||
const incomingEdges = edges.filter((edge) => edge.target === id);
|
||||
let textPrompt: string | undefined;
|
||||
let hasTextInput = false;
|
||||
|
||||
for (const edge of incomingEdges) {
|
||||
const sourceNode = nodes.find((node) => node.id === edge.source);
|
||||
if (sourceNode?.type !== "text") continue;
|
||||
hasTextInput = true;
|
||||
const sourceData = sourceNode.data as { content?: string };
|
||||
if (typeof sourceData.content === "string") {
|
||||
textPrompt = sourceData.content;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
hasTextInput,
|
||||
textPrompt: textPrompt ?? "",
|
||||
};
|
||||
}, [edges, id, nodes]);
|
||||
|
||||
const effectivePrompt = inputMeta.hasTextInput ? inputMeta.textPrompt : prompt;
|
||||
const selectedModel = getVideoModel(modelId) ?? getVideoModel(DEFAULT_VIDEO_MODEL_ID);
|
||||
const creditCost = selectedModel?.creditCost[durationSeconds] ?? 0;
|
||||
const availableCredits =
|
||||
balance !== undefined ? balance.balance - balance.reserved : null;
|
||||
const hasEnoughCredits =
|
||||
availableCredits === null ? true : availableCredits >= creditCost;
|
||||
|
||||
const debouncedSave = useDebouncedCallback(
|
||||
(
|
||||
nextPrompt: string,
|
||||
nextModelId: VideoModelId,
|
||||
nextDurationSeconds: VideoModelDurationSeconds,
|
||||
) => {
|
||||
const raw = data as Record<string, unknown>;
|
||||
const { _status, _statusMessage, ...rest } = raw;
|
||||
void _status;
|
||||
void _statusMessage;
|
||||
|
||||
void queueNodeDataUpdate({
|
||||
nodeId: id as Id<"nodes">,
|
||||
data: {
|
||||
...rest,
|
||||
prompt: nextPrompt,
|
||||
modelId: nextModelId,
|
||||
durationSeconds: nextDurationSeconds,
|
||||
},
|
||||
});
|
||||
},
|
||||
500,
|
||||
);
|
||||
|
||||
const handlePromptChange = useCallback(
|
||||
(event: React.ChangeEvent<HTMLTextAreaElement>) => {
|
||||
const value = event.target.value;
|
||||
setPrompt(value);
|
||||
debouncedSave(value, modelId, durationSeconds);
|
||||
},
|
||||
[debouncedSave, durationSeconds, modelId],
|
||||
);
|
||||
|
||||
const handleModelChange = useCallback(
|
||||
(value: string) => {
|
||||
if (!isVideoModelId(value)) return;
|
||||
setModelId(value);
|
||||
debouncedSave(prompt, value, durationSeconds);
|
||||
},
|
||||
[debouncedSave, durationSeconds, prompt],
|
||||
);
|
||||
|
||||
const handleDurationChange = useCallback(
|
||||
(value: VideoModelDurationSeconds) => {
|
||||
setDurationSeconds(value);
|
||||
debouncedSave(prompt, modelId, value);
|
||||
},
|
||||
[debouncedSave, modelId, prompt],
|
||||
);
|
||||
|
||||
const generateDisabled =
|
||||
!effectivePrompt.trim() || balance === undefined || !hasEnoughCredits || isGenerating;
|
||||
|
||||
const handleGenerate = useCallback(async () => {
|
||||
if (!effectivePrompt.trim() || isGenerating) return;
|
||||
|
||||
if (status.isOffline) {
|
||||
toast.warning(
|
||||
"Offline aktuell nicht unterstuetzt",
|
||||
"KI-Generierung benoetigt eine aktive Verbindung.",
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
if (availableCredits !== null && !hasEnoughCredits) {
|
||||
toast.action(tToast("ai.insufficientCreditsTitle"), {
|
||||
description: tToast("ai.insufficientCreditsDesc", {
|
||||
needed: creditCost,
|
||||
available: availableCredits,
|
||||
}),
|
||||
label: tToast("billing.topUp"),
|
||||
onClick: () => router.push("/settings/billing"),
|
||||
type: "warning",
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
setError(null);
|
||||
setIsGenerating(true);
|
||||
|
||||
try {
|
||||
const canvasId = nodeData.canvasId as Id<"canvases">;
|
||||
if (!canvasId) {
|
||||
throw new Error("Canvas-ID fehlt in der Node");
|
||||
}
|
||||
|
||||
const promptToUse = effectivePrompt.trim();
|
||||
if (!promptToUse) return;
|
||||
|
||||
const currentNode = getNode(id);
|
||||
const offsetX = (currentNode?.measured?.width ?? 260) + 32;
|
||||
const position = {
|
||||
x: (currentNode?.position?.x ?? 0) + offsetX,
|
||||
y: currentNode?.position?.y ?? 0,
|
||||
};
|
||||
|
||||
const clientRequestId = crypto.randomUUID();
|
||||
const outputNodeId = await createNodeConnectedFromSource({
|
||||
type: "ai-video",
|
||||
position,
|
||||
data: {
|
||||
prompt: promptToUse,
|
||||
modelId,
|
||||
durationSeconds,
|
||||
creditCost,
|
||||
canvasId,
|
||||
},
|
||||
clientRequestId,
|
||||
sourceNodeId: id as Id<"nodes">,
|
||||
sourceHandle: "video-prompt-out",
|
||||
targetHandle: "video-in",
|
||||
});
|
||||
|
||||
await toast.promise(
|
||||
generateVideo({
|
||||
canvasId,
|
||||
sourceNodeId: id as Id<"nodes">,
|
||||
outputNodeId,
|
||||
prompt: promptToUse,
|
||||
modelId,
|
||||
durationSeconds,
|
||||
}),
|
||||
{
|
||||
loading: tToast("ai.generating"),
|
||||
success: tToast("ai.generationQueued"),
|
||||
error: tToast("ai.generationFailed"),
|
||||
},
|
||||
);
|
||||
} catch (err) {
|
||||
const classified = classifyError(err);
|
||||
|
||||
if (classified.type === "dailyCap") {
|
||||
toast.error(
|
||||
tToast("billing.dailyLimitReachedTitle"),
|
||||
"Morgen stehen wieder Generierungen zur Verfuegung.",
|
||||
);
|
||||
} else if (classified.type === "concurrency") {
|
||||
toast.warning(
|
||||
tToast("ai.concurrentLimitReachedTitle"),
|
||||
tToast("ai.concurrentLimitReachedDesc"),
|
||||
);
|
||||
} else {
|
||||
setError(classified.rawMessage || tToast("ai.generationFailed"));
|
||||
}
|
||||
} finally {
|
||||
setIsGenerating(false);
|
||||
}
|
||||
}, [
|
||||
availableCredits,
|
||||
createNodeConnectedFromSource,
|
||||
creditCost,
|
||||
durationSeconds,
|
||||
effectivePrompt,
|
||||
generateVideo,
|
||||
getNode,
|
||||
hasEnoughCredits,
|
||||
id,
|
||||
isGenerating,
|
||||
modelId,
|
||||
nodeData.canvasId,
|
||||
router,
|
||||
status.isOffline,
|
||||
tToast,
|
||||
]);
|
||||
|
||||
return (
|
||||
<BaseNodeWrapper
|
||||
nodeType="video-prompt"
|
||||
selected={selected}
|
||||
status={nodeData._status}
|
||||
statusMessage={nodeData._statusMessage}
|
||||
className="min-w-[260px] border-violet-500/30"
|
||||
>
|
||||
<Handle
|
||||
type="target"
|
||||
position={Position.Left}
|
||||
id="video-prompt-in"
|
||||
className="!h-3 !w-3 !bg-violet-600 !border-2 !border-background"
|
||||
/>
|
||||
|
||||
<div className="flex h-full flex-col gap-2 p-3">
|
||||
<div className="flex items-center gap-1.5 text-xs font-medium text-violet-700 dark:text-violet-300">
|
||||
<Video className="h-3.5 w-3.5" />
|
||||
{t("label")}
|
||||
</div>
|
||||
|
||||
{inputMeta.hasTextInput ? (
|
||||
<div className="flex-1 overflow-auto rounded-md border border-violet-500/30 bg-violet-500/5 px-3 py-2">
|
||||
<p className="text-[11px] font-medium text-violet-700 dark:text-violet-300">
|
||||
{t("promptFromTextNode")}
|
||||
</p>
|
||||
<p className="mt-1 whitespace-pre-wrap text-sm text-foreground">
|
||||
{inputMeta.textPrompt.trim() || t("noPromptHint")}
|
||||
</p>
|
||||
</div>
|
||||
) : (
|
||||
<textarea
|
||||
value={prompt}
|
||||
onChange={handlePromptChange}
|
||||
placeholder={t("promptPlaceholder")}
|
||||
className="nodrag nowheel min-h-[72px] w-full flex-1 resize-none rounded-md border border-border bg-background px-3 py-2 text-sm placeholder:text-muted-foreground focus:outline-none focus:ring-1 focus:ring-violet-500"
|
||||
/>
|
||||
)}
|
||||
|
||||
<div className="flex flex-col gap-1.5">
|
||||
<Label htmlFor={`video-model-${id}`} className="text-[11px] text-muted-foreground">
|
||||
{t("modelLabel")}
|
||||
</Label>
|
||||
<Select value={modelId} onValueChange={handleModelChange}>
|
||||
<SelectTrigger id={`video-model-${id}`} className="nodrag nowheel w-full" size="sm">
|
||||
<SelectValue />
|
||||
</SelectTrigger>
|
||||
<SelectContent className="nodrag">
|
||||
{getAvailableVideoModels("pro").map((model) => (
|
||||
<SelectItem key={model.id} value={model.id}>
|
||||
{model.label}
|
||||
</SelectItem>
|
||||
))}
|
||||
</SelectContent>
|
||||
</Select>
|
||||
</div>
|
||||
|
||||
<div className="flex flex-col gap-1.5">
|
||||
<Label className="text-[11px] text-muted-foreground">{t("durationLabel")}</Label>
|
||||
<div className="grid grid-cols-2 gap-1">
|
||||
<button
|
||||
type="button"
|
||||
onClick={() => handleDurationChange(5)}
|
||||
className={`nodrag rounded-md border px-2 py-1.5 text-xs ${
|
||||
durationSeconds === 5
|
||||
? "border-violet-500 bg-violet-500/10 text-violet-700 dark:text-violet-300"
|
||||
: "border-border bg-background"
|
||||
}`}
|
||||
>
|
||||
{t("duration5s")}
|
||||
</button>
|
||||
<button
|
||||
type="button"
|
||||
onClick={() => handleDurationChange(10)}
|
||||
className={`nodrag rounded-md border px-2 py-1.5 text-xs ${
|
||||
durationSeconds === 10
|
||||
? "border-violet-500 bg-violet-500/10 text-violet-700 dark:text-violet-300"
|
||||
: "border-border bg-background"
|
||||
}`}
|
||||
>
|
||||
{t("duration10s")}
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{error ? <p className="text-xs text-destructive">{error}</p> : null}
|
||||
|
||||
<button
|
||||
type="button"
|
||||
onClick={() => void handleGenerate()}
|
||||
disabled={generateDisabled}
|
||||
className="nodrag inline-flex items-center justify-center gap-2 rounded-md bg-violet-600 px-3 py-2 text-sm font-medium text-white disabled:cursor-not-allowed disabled:opacity-50"
|
||||
>
|
||||
{isGenerating ? (
|
||||
<>
|
||||
<Loader2 className="h-4 w-4 animate-spin" />
|
||||
{tToast("ai.generating")}
|
||||
</>
|
||||
) : (
|
||||
<>
|
||||
<Sparkles className="h-4 w-4" />
|
||||
{t("generateButton")}
|
||||
<span className="inline-flex items-center gap-1 text-xs opacity-90">
|
||||
<Coins className="h-3 w-3" />
|
||||
{creditCost} Cr
|
||||
</span>
|
||||
</>
|
||||
)}
|
||||
</button>
|
||||
|
||||
{availableCredits !== null && !hasEnoughCredits ? (
|
||||
<p className="text-center text-xs text-destructive">{t("insufficientCredits")}</p>
|
||||
) : null}
|
||||
</div>
|
||||
|
||||
<Handle
|
||||
type="source"
|
||||
position={Position.Right}
|
||||
id="video-prompt-out"
|
||||
className="!h-3 !w-3 !bg-violet-600 !border-2 !border-background"
|
||||
/>
|
||||
</BaseNodeWrapper>
|
||||
);
|
||||
}
|
||||
Reference in New Issue
Block a user