feat(agent): add structured outputs and media archive support

This commit is contained in:
2026-04-10 19:01:04 +02:00
parent a1df097f9c
commit 9732022461
34 changed files with 3276 additions and 482 deletions

View File

@@ -13,7 +13,11 @@ Geteilte Hilfsfunktionen, Typ-Definitionen und Konfiguration. Keine React-Kompon
| `canvas-node-types.ts` | TypeScript-Typen und Union-Typen für Canvas-Nodes |
| `canvas-node-templates.ts` | Default-Daten für neue Nodes (beim Einfügen aus Palette) |
| `canvas-connection-policy.ts` | Validierungsregeln für Edge-Verbindungen zwischen Nodes |
| `agent-templates.ts` | Typsichere Agent-Registry für statische Agent-Node-Metadaten |
| `agent-definitions.ts` | Runtime-Registry fuer Agent-Definitionen (Struktur, Regeln, Blueprints, Docs-Pfad) |
| `agent-templates.ts` | UI-Projektion auf Agent-Metadaten aus `agent-definitions.ts` |
| `agent-prompting.ts` | Pure Prompt-Builder (`summarizeIncomingContext`, `buildAnalyzeMessages`, `buildExecuteMessages`) |
| `agent-run-contract.ts` | Normalisierung fuer Clarifications, Execution Plan und strukturierte Agent-Outputs |
| `generated/agent-doc-segments.ts` | Generierte Prompt-Segmente aus `components/agents/*.md` (nicht manuell editieren) |
| `ai-models.ts` | Client-seitige Bild-Modell-Definitionen (muss mit `convex/openrouter.ts` in sync bleiben) |
| `ai-video-models.ts` | Video-Modell-Registry: 5 MVP-Modelle mit Endpunkten, Credit-Kosten, Tier-Zugang |
| `video-poll-logging.ts` | Log-Volumen-Steuerung für Video-Polling (vermeidet excessive Konsolenausgabe) |
@@ -39,6 +43,21 @@ Geteilte Hilfsfunktionen, Typ-Definitionen und Konfiguration. Keine React-Kompon
---
## Agent Runtime: Dual Model
Die Agent-Runtime folgt einem dualen Modell:
- **TS-Vertraege als Struktur-Single-Source:** `lib/agent-definitions.ts` + `lib/agent-run-contract.ts` definieren IDs, Regeln, Blueprints und Normalisierung.
- **Markdown-Segmente als kuratierter Prompt-Input:** markierte Segmente in `components/agents/*.md` werden via `scripts/compile-agent-docs.ts` in `lib/generated/agent-doc-segments.ts` kompiliert.
Wichtig:
- `convex/agents.ts` liest nur die generierte TS-Datei, nicht Raw-Markdown.
- Nur markierte `AGENT_PROMPT_SEGMENT`-Bloecke beeinflussen Analyze/Execute-Prompts.
- `agent-templates.ts` ist bewusst nur eine UI-Projektion aus `agent-definitions.ts`.
---
## `canvas-utils.ts` — Wichtigste Datei
Alle Adapter-Funktionen zwischen Convex-Datenmodell und React Flow. Details in `components/canvas/CLAUDE.md`.

207
lib/agent-definitions.ts Normal file
View File

@@ -0,0 +1,207 @@
export type AgentDefinitionId = "campaign-distributor";
export type AgentOutputBlueprint = {
artifactType: string;
requiredSections: readonly string[];
requiredMetadataKeys: readonly string[];
qualityChecks: readonly string[];
};
export type AgentOperatorParameter = {
key: string;
label: string;
type: "multi-select" | "select";
options: readonly string[];
defaultValue: string | readonly string[];
description: string;
};
export type AgentDefinition = {
id: AgentDefinitionId;
version: number;
metadata: {
name: string;
description: string;
emoji: string;
color: string;
vibe: string;
};
docs: {
markdownPath: string;
};
acceptedSourceNodeTypes: readonly string[];
briefFieldOrder: readonly string[];
channelCatalog: readonly string[];
operatorParameters: readonly AgentOperatorParameter[];
analysisRules: readonly string[];
executionRules: readonly string[];
defaultOutputBlueprints: readonly AgentOutputBlueprint[];
uiReference: {
tools: readonly string[];
expectedInputs: readonly string[];
expectedOutputs: readonly string[];
notes: readonly string[];
};
};
export const AGENT_DEFINITIONS: readonly AgentDefinition[] = [
{
id: "campaign-distributor",
version: 1,
metadata: {
name: "Campaign Distributor",
description:
"Turns LemonSpace visual variants and optional campaign context into channel-native distribution packages.",
emoji: "lemon",
color: "yellow",
vibe: "Transforms canvas outputs into channel-native campaign content that can ship immediately.",
},
docs: {
markdownPath: "components/agents/campaign-distributor.md",
},
acceptedSourceNodeTypes: [
"image",
"asset",
"video",
"text",
"note",
"frame",
"compare",
"render",
"ai-image",
"ai-video",
],
briefFieldOrder: [
"briefing",
"audience",
"tone",
"targetChannels",
"hardConstraints",
],
channelCatalog: [
"Instagram Feed",
"Instagram Stories",
"Instagram Reels",
"LinkedIn",
"X (Twitter)",
"TikTok",
"Pinterest",
"WhatsApp Business",
"Telegram",
"E-Mail Newsletter",
"Discord",
],
operatorParameters: [
{
key: "targetChannels",
label: "Target channels",
type: "multi-select",
options: [
"Instagram Feed",
"Instagram Stories",
"Instagram Reels",
"LinkedIn",
"X (Twitter)",
"TikTok",
"Pinterest",
"WhatsApp Business",
"Telegram",
"E-Mail Newsletter",
"Discord",
],
defaultValue: ["Instagram Feed", "LinkedIn", "E-Mail Newsletter"],
description: "Controls which channels receive one structured output each.",
},
{
key: "variantsPerChannel",
label: "Variants per channel",
type: "select",
options: ["1", "2", "3"],
defaultValue: "1",
description: "Controls how many alternative copy variants are produced per selected channel.",
},
{
key: "toneOverride",
label: "Tone override",
type: "select",
options: ["auto", "professional", "casual", "inspiring", "direct"],
defaultValue: "auto",
description: "Forces a global tone while still adapting output to channel format constraints.",
},
],
analysisRules: [
"Validate that at least one visual source is present and request clarification only when required context is missing.",
"Detect output language from briefing context and default to English when ambiguous.",
"Assign assets to channels by format fit and visual intent, and surface assignment rationale.",
"Produce one execution step per selected channel with explicit goal, sections, and quality checks.",
"Record assumptions whenever brief details are missing, and never hide uncertainty.",
],
executionRules: [
"Generate one structured output payload per execution step and keep titles channel-specific.",
"Respect requiredSections and requiredMetadataKeys for the selected blueprint.",
"Keep language and tone aligned with brief constraints and toneOverride settings.",
"State format mismatches explicitly and provide a practical remediation note.",
"Return qualityChecks as explicit user-visible claims, not hidden reasoning.",
],
defaultOutputBlueprints: [
{
artifactType: "social-caption-pack",
requiredSections: ["Hook", "Caption", "Hashtags", "CTA", "Format note"],
requiredMetadataKeys: [
"objective",
"targetAudience",
"channel",
"assetRef",
"language",
"tone",
"recommendedFormat",
],
qualityChecks: [
"matches_channel_constraints",
"uses_clear_cta",
"references_assigned_asset",
"avoids_unverified_claims",
],
},
{
artifactType: "messenger-copy",
requiredSections: ["Opening", "Message", "CTA", "Format note"],
requiredMetadataKeys: ["objective", "channel", "assetRef", "language", "sendWindow"],
qualityChecks: ["fits_channel_tone", "contains_one_clear_action", "is_high_signal"],
},
{
artifactType: "newsletter-block",
requiredSections: ["Subject", "Preview line", "Body block", "CTA"],
requiredMetadataKeys: ["objective", "channel", "assetRef", "language", "recommendedSendTime"],
qualityChecks: ["is_publish_ready", "respects_reader_time", "contains_single_primary_cta"],
},
],
uiReference: {
tools: ["WebFetch", "WebSearch", "Read", "Write", "Edit"],
expectedInputs: [
"Visual node outputs (image, ai-image, render, compare)",
"Optional briefing context (text, note)",
"Asset labels, prompts, dimensions, and format hints",
],
expectedOutputs: [
"Per-channel structured delivery packages",
"Asset assignment rationale",
"Channel-ready captions, CTA, and format notes",
"Newsletter-ready subject, preview line, and body block",
],
notes: [
"Primary outputs are structured agent-output nodes, not raw ai-text nodes.",
"Language defaults to English when briefing language is ambiguous.",
"Assumptions must be explicit when required context is missing.",
],
},
},
] as const;
const AGENT_DEFINITION_BY_ID = new Map<AgentDefinitionId, AgentDefinition>(
AGENT_DEFINITIONS.map((definition) => [definition.id, definition]),
);
export function getAgentDefinition(id: string): AgentDefinition | undefined {
return AGENT_DEFINITION_BY_ID.get(id as AgentDefinitionId);
}

253
lib/agent-prompting.ts Normal file
View File

@@ -0,0 +1,253 @@
import type { AgentDefinition } from "@/lib/agent-definitions";
import type {
AgentBriefConstraints,
AgentClarificationAnswerMap,
AgentExecutionPlan,
AgentLocale,
} from "@/lib/agent-run-contract";
import {
AGENT_DOC_SEGMENTS,
type AgentDocPromptSegments,
} from "@/lib/generated/agent-doc-segments";
export type OpenRouterMessage = {
role: "system" | "user" | "assistant";
content: string;
};
export type PromptContextNode = {
nodeId: string;
type: string;
status?: string;
data?: unknown;
};
const PROMPT_SEGMENT_ORDER = ["role", "style-rules", "decision-framework", "channel-notes"] as const;
const PROMPT_DATA_WHITELIST: Record<string, readonly string[]> = {
image: ["url", "mimeType", "width", "height", "prompt"],
asset: ["url", "mimeType", "width", "height", "title"],
video: ["url", "durationSeconds", "width", "height"],
text: ["content"],
note: ["content", "color"],
frame: ["label", "exportWidth", "exportHeight", "backgroundColor"],
compare: ["leftNodeId", "rightNodeId", "sliderPosition"],
render: ["url", "format", "width", "height"],
"ai-image": ["prompt", "model", "modelTier", "creditCost"],
"ai-video": ["prompt", "model", "modelLabel", "durationSeconds", "creditCost"],
};
function trimText(value: unknown): string {
return typeof value === "string" ? value.trim() : "";
}
function formatScalarValue(value: unknown): string {
if (typeof value === "string") {
return value.trim().replace(/\s+/g, " ").slice(0, 220);
}
if (typeof value === "number" || typeof value === "boolean") {
return String(value);
}
return "";
}
function formatJsonBlock(value: unknown): string {
return JSON.stringify(value, null, 2);
}
function resolvePromptSegments(
definition: AgentDefinition,
provided?: AgentDocPromptSegments,
): AgentDocPromptSegments {
if (provided) {
return provided;
}
const generated = AGENT_DOC_SEGMENTS[definition.id];
if (generated) {
return generated;
}
return {
role: "",
"style-rules": "",
"decision-framework": "",
"channel-notes": "",
};
}
function extractWhitelistedFields(nodeType: string, data: unknown): Array<{ key: string; value: string }> {
if (!data || typeof data !== "object" || Array.isArray(data)) {
return [];
}
const record = data as Record<string, unknown>;
const keys = PROMPT_DATA_WHITELIST[nodeType] ?? [];
const fields: Array<{ key: string; value: string }> = [];
for (const key of keys) {
const value = formatScalarValue(record[key]);
if (!value) {
continue;
}
fields.push({ key, value });
}
return fields;
}
function formatPromptSegments(segments: AgentDocPromptSegments): string {
return PROMPT_SEGMENT_ORDER.map((key) => `${key}:\n${segments[key]}`).join("\n\n");
}
function getOutputLanguageInstruction(locale: AgentLocale): string {
if (locale === "de") {
return "Write all generated fields in German (de-DE), including step titles, channel labels, output types, clarification prompts, and body content.";
}
return "Write all generated fields in English (en-US), including step titles, channel labels, output types, clarification prompts, and body content.";
}
function formatBlueprintHints(definition: AgentDefinition): string {
return definition.defaultOutputBlueprints
.map((blueprint, index) => {
const requiredSections = blueprint.requiredSections.join(", ") || "none";
const requiredMetadataKeys = blueprint.requiredMetadataKeys.join(", ") || "none";
const qualityChecks = blueprint.qualityChecks.join(", ") || "none";
return [
`${index + 1}. artifactType=${blueprint.artifactType}`,
`requiredSections=${requiredSections}`,
`requiredMetadataKeys=${requiredMetadataKeys}`,
`qualityChecks=${qualityChecks}`,
].join("; ");
})
.join("\n");
}
export function summarizeIncomingContext(nodes: PromptContextNode[]): string {
if (nodes.length === 0) {
return "No incoming nodes connected to this agent.";
}
const sorted = [...nodes].sort((left, right) => {
if (left.nodeId !== right.nodeId) {
return left.nodeId.localeCompare(right.nodeId);
}
return left.type.localeCompare(right.type);
});
const lines: string[] = [`Incoming context nodes: ${sorted.length}`];
for (let index = 0; index < sorted.length; index += 1) {
const node = sorted[index];
const status = trimText(node.status) || "unknown";
lines.push(`${index + 1}. nodeId=${node.nodeId}, type=${node.type}, status=${status}`);
const fields = extractWhitelistedFields(node.type, node.data);
if (fields.length === 0) {
lines.push(" data: (no whitelisted fields)");
continue;
}
for (const field of fields) {
lines.push(` - ${field.key}: ${field.value}`);
}
}
return lines.join("\n");
}
export function buildAnalyzeMessages(input: {
definition: AgentDefinition;
locale: AgentLocale;
briefConstraints: AgentBriefConstraints;
clarificationAnswers: AgentClarificationAnswerMap;
incomingContextSummary: string;
incomingContextCount: number;
promptSegments?: AgentDocPromptSegments;
}): OpenRouterMessage[] {
const segments = resolvePromptSegments(input.definition, input.promptSegments);
return [
{
role: "system",
content: [
`You are the LemonSpace Agent Analyzer for ${input.definition.metadata.name}.`,
input.definition.metadata.description,
getOutputLanguageInstruction(input.locale),
"Use the following compiled prompt segments:",
formatPromptSegments(segments),
`analysis rules:\n- ${input.definition.analysisRules.join("\n- ")}`,
`brief field order: ${input.definition.briefFieldOrder.join(", ")}`,
`default output blueprints:\n${formatBlueprintHints(input.definition)}`,
"Return structured JSON matching the schema.",
].join("\n\n"),
},
{
role: "user",
content: [
`Brief + constraints:\n${formatJsonBlock(input.briefConstraints)}`,
`Current clarification answers:\n${formatJsonBlock(input.clarificationAnswers)}`,
`Incoming context node count: ${input.incomingContextCount}`,
"Incoming node context summary:",
input.incomingContextSummary,
].join("\n\n"),
},
];
}
function formatExecutionRequirements(plan: AgentExecutionPlan): string {
return plan.steps
.map((step, index) => {
const sections = step.requiredSections.join(", ") || "none";
const checks = step.qualityChecks.join(", ") || "none";
return [
`${index + 1}. id=${step.id}`,
`title: ${step.title}`,
`channel: ${step.channel}`,
`outputType: ${step.outputType}`,
`artifactType: ${step.artifactType}`,
`goal: ${step.goal}`,
`requiredSections: ${sections}`,
`qualityChecks: ${checks}`,
].join("; ");
})
.join("\n");
}
export function buildExecuteMessages(input: {
definition: AgentDefinition;
locale: AgentLocale;
briefConstraints: AgentBriefConstraints;
clarificationAnswers: AgentClarificationAnswerMap;
incomingContextSummary: string;
executionPlan: AgentExecutionPlan;
promptSegments?: AgentDocPromptSegments;
}): OpenRouterMessage[] {
const segments = resolvePromptSegments(input.definition, input.promptSegments);
return [
{
role: "system",
content: [
`You are the LemonSpace Agent Executor for ${input.definition.metadata.name}.`,
getOutputLanguageInstruction(input.locale),
"Use the following compiled prompt segments:",
formatPromptSegments(segments),
`execution rules:\n- ${input.definition.executionRules.join("\n- ")}`,
"Return one output payload per execution step keyed by step id.",
].join("\n\n"),
},
{
role: "user",
content: [
`Brief + constraints:\n${formatJsonBlock(input.briefConstraints)}`,
`Clarification answers:\n${formatJsonBlock(input.clarificationAnswers)}`,
`Execution plan summary: ${input.executionPlan.summary}`,
`Per-step requirements:\n${formatExecutionRequirements(input.executionPlan)}`,
"Incoming node context summary:",
input.incomingContextSummary,
].join("\n\n"),
},
];
}

View File

@@ -13,11 +13,39 @@ export type AgentOutputDraft = {
body?: string;
};
export type AgentOutputSection = {
id: string;
label: string;
content: string;
};
export type AgentStructuredOutput = {
title: string;
channel: string;
artifactType: string;
previewText: string;
sections: AgentOutputSection[];
metadata: Record<string, string | string[]>;
qualityChecks: string[];
body: string;
};
export type AgentStructuredOutputDraft = Partial<
AgentStructuredOutput & {
sections: Array<Partial<AgentOutputSection> | null>;
metadata: Record<string, unknown>;
}
>;
export type AgentExecutionStep = {
id: string;
title: string;
channel: string;
outputType: string;
artifactType: string;
goal: string;
requiredSections: string[];
qualityChecks: string[];
};
export type AgentExecutionPlan = {
@@ -44,6 +72,7 @@ export type AgentAnalyzeResult = {
const SAFE_FALLBACK_TITLE = "Untitled";
const SAFE_FALLBACK_CHANNEL = "general";
const SAFE_FALLBACK_OUTPUT_TYPE = "text";
const SAFE_FALLBACK_GOAL = "Deliver channel-ready output.";
function trimString(value: unknown): string {
return typeof value === "string" ? value.trim() : "";
@@ -82,6 +111,91 @@ function normalizeStringArray(raw: unknown, options?: { lowerCase?: boolean }):
return normalized;
}
function normalizeOutputSections(raw: unknown): AgentOutputSection[] {
if (!Array.isArray(raw)) {
return [];
}
const sections: AgentOutputSection[] = [];
const seenIds = new Set<string>();
for (const item of raw) {
if (!item || typeof item !== "object" || Array.isArray(item)) {
continue;
}
const sectionRecord = item as Record<string, unknown>;
const label = trimString(sectionRecord.label);
const content = trimString(sectionRecord.content);
if (label === "" || content === "") {
continue;
}
const normalizedBaseId = normalizeStepId(sectionRecord.id) || normalizeStepId(label) || "section";
let sectionId = normalizedBaseId;
let suffix = 2;
while (seenIds.has(sectionId)) {
sectionId = `${normalizedBaseId}-${suffix}`;
suffix += 1;
}
seenIds.add(sectionId);
sections.push({
id: sectionId,
label,
content,
});
}
return sections;
}
function normalizeStructuredMetadata(raw: unknown): Record<string, string | string[]> {
if (!raw || typeof raw !== "object" || Array.isArray(raw)) {
return {};
}
const metadata: Record<string, string | string[]> = {};
for (const [rawKey, rawValue] of Object.entries(raw as Record<string, unknown>)) {
const key = trimString(rawKey);
if (key === "") {
continue;
}
const value = trimString(rawValue);
if (value !== "") {
metadata[key] = value;
continue;
}
const listValue = normalizeStringArray(rawValue);
if (listValue.length > 0) {
metadata[key] = listValue;
}
}
return metadata;
}
function derivePreviewTextFromSections(sections: AgentOutputSection[]): string {
return sections[0]?.content ?? "";
}
function deriveBodyFromStructuredOutput(input: {
sections: AgentOutputSection[];
previewText: string;
title: string;
}): string {
if (input.sections.length > 0) {
return input.sections.map((section) => `${section.label}:\n${section.content}`).join("\n\n");
}
if (input.previewText !== "") {
return input.previewText;
}
return input.title;
}
export function normalizeAgentBriefConstraints(raw: unknown): AgentBriefConstraints {
const rawRecord =
raw && typeof raw === "object" && !Array.isArray(raw)
@@ -183,6 +297,13 @@ export function normalizeAgentExecutionPlan(raw: unknown): AgentExecutionPlan {
title: trimString(itemRecord.title) || SAFE_FALLBACK_TITLE,
channel: trimString(itemRecord.channel) || SAFE_FALLBACK_CHANNEL,
outputType: trimString(itemRecord.outputType) || SAFE_FALLBACK_OUTPUT_TYPE,
artifactType:
trimString(itemRecord.artifactType) ||
trimString(itemRecord.outputType) ||
SAFE_FALLBACK_OUTPUT_TYPE,
goal: trimString(itemRecord.goal) || SAFE_FALLBACK_GOAL,
requiredSections: normalizeStringArray(itemRecord.requiredSections),
qualityChecks: normalizeStringArray(itemRecord.qualityChecks),
});
}
@@ -229,3 +350,39 @@ export function normalizeAgentOutputDraft(
body: trimString(draft.body),
};
}
export function normalizeAgentStructuredOutput(
draft: AgentStructuredOutputDraft,
fallback: {
title: string;
channel: string;
artifactType: string;
},
): AgentStructuredOutput {
const title = trimString(draft.title) || trimString(fallback.title) || SAFE_FALLBACK_TITLE;
const channel = trimString(draft.channel) || trimString(fallback.channel) || SAFE_FALLBACK_CHANNEL;
const artifactType =
trimString(draft.artifactType) || trimString(fallback.artifactType) || SAFE_FALLBACK_OUTPUT_TYPE;
const sections = normalizeOutputSections(draft.sections);
const previewText = trimString(draft.previewText) || derivePreviewTextFromSections(sections);
const metadata = normalizeStructuredMetadata(draft.metadata);
const qualityChecks = normalizeStringArray(draft.qualityChecks);
const body =
trimString(draft.body) ||
deriveBodyFromStructuredOutput({
sections,
previewText,
title,
});
return {
title,
channel,
artifactType,
previewText,
sections,
metadata,
qualityChecks,
body,
};
}

View File

@@ -1,3 +1,5 @@
import { AGENT_DEFINITIONS } from "@/lib/agent-definitions";
export type AgentTemplateId = "campaign-distributor";
export type AgentTemplate = {
@@ -15,46 +17,19 @@ export type AgentTemplate = {
};
export const AGENT_TEMPLATES: readonly AgentTemplate[] = [
{
id: "campaign-distributor",
name: "Campaign Distributor",
description:
"Develops and distributes LemonSpace campaign content across social media and messenger channels.",
emoji: "lemon",
color: "yellow",
vibe: "Transforms canvas outputs into campaign-ready channel content.",
tools: ["WebFetch", "WebSearch", "Read", "Write", "Edit"],
channels: [
"Instagram Feed",
"Instagram Stories",
"Instagram Reels",
"LinkedIn",
"Twitter / X",
"TikTok",
"Pinterest",
"WhatsApp Business",
"Telegram",
"E-Mail Newsletter",
"Discord",
],
expectedInputs: [
"Render-Node-Export",
"Compare-Varianten",
"KI-Bild-Output",
"Frame-Dimensionen",
],
expectedOutputs: [
"Caption-Pakete",
"Kanal-Matrix",
"Posting-Plan",
"Hashtag-Sets",
"Messenger-Texte",
],
notes: [
"MVP: static input-only node, no execution flow.",
"agent-output remains pending until runtime orchestration exists.",
],
},
...AGENT_DEFINITIONS.map((definition) => ({
id: definition.id,
name: definition.metadata.name,
description: definition.metadata.description,
emoji: definition.metadata.emoji,
color: definition.metadata.color,
vibe: definition.metadata.vibe,
tools: definition.uiReference.tools,
channels: definition.channelCatalog,
expectedInputs: definition.uiReference.expectedInputs,
expectedOutputs: definition.uiReference.expectedOutputs,
notes: definition.uiReference.notes,
})),
] as const;
const AGENT_TEMPLATE_BY_ID = new Map<AgentTemplateId, AgentTemplate>(
@@ -62,8 +37,5 @@ const AGENT_TEMPLATE_BY_ID = new Map<AgentTemplateId, AgentTemplate>(
);
export function getAgentTemplate(id: string): AgentTemplate | undefined {
if (id === "campaign-distributor") {
return AGENT_TEMPLATE_BY_ID.get(id);
}
return undefined;
return AGENT_TEMPLATE_BY_ID.get(id as AgentTemplateId);
}

View File

@@ -0,0 +1,21 @@
// This file is generated by scripts/compile-agent-docs.ts
// Do not edit manually.
import type { AgentDefinitionId } from "@/lib/agent-definitions";
export type AgentDocPromptSegmentKey =
| "role"
| "style-rules"
| "decision-framework"
| "channel-notes";
export type AgentDocPromptSegments = Record<AgentDocPromptSegmentKey, string>;
export const AGENT_DOC_SEGMENTS: Record<AgentDefinitionId, AgentDocPromptSegments> = {
"campaign-distributor": {
"role": `You are the Campaign Distributor for LemonSpace, an AI creative canvas used by small design and marketing teams. Your mission is to transform visual canvas outputs and optional campaign briefing into channel-native distribution packages that are ready to publish, mapped to the best-fitting asset, and explicit about assumptions when context is missing.`,
"style-rules": `Write specific, decisive, and immediately usable copy. Prefer concrete verbs over vague language, keep claims honest, and never invent product facts, statistics, or deadlines that were not provided. Adapt tone by channel while preserving campaign intent, and keep each deliverable concise enough to be practical for operators.`,
"decision-framework": `Reason in this order: (1) validate required visual context, (2) detect language from brief and default to English if ambiguous, (3) assign assets to channels by format fit and visual intent, (4) select the best output blueprint per channel, (5) generate publish-ready sections and metadata, (6) surface assumptions and format risks explicitly. Ask clarifying questions only when required fields are missing or conflicting. For each selected channel, produce one structured deliverable with artifactType, previewText, sections, metadata, and qualityChecks.`,
"channel-notes": `Instagram needs hook-first visual storytelling with clear CTA and practical hashtag sets. LinkedIn needs professional framing, strong insight opening, and comment-driving close without hype language. X needs brevity and thread-aware sequencing when 280 characters are exceeded. TikTok needs native conversational phrasing and 9:16 adaptation notes. WhatsApp and Telegram need direct, high-signal copy with one clear action. Newsletter needs subject cue, preview line, and a reusable body block that fits any email builder. If asset format mismatches channel constraints, flag it and suggest a fix.`,
},
};