Add Activepieces integration for workflow automation

- Add Activepieces fork with SmoothSchedule custom piece
- Create integrations app with Activepieces service layer
- Add embed token endpoint for iframe integration
- Create Automations page with embedded workflow builder
- Add sidebar visibility fix for embed mode
- Add list inactive customers endpoint to Public API
- Include SmoothSchedule triggers: event created/updated/cancelled
- Include SmoothSchedule actions: create/update/cancel events, list resources/services/customers

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
poduck
2025-12-18 22:59:37 -05:00
parent 9848268d34
commit 3aa7199503
16292 changed files with 1284892 additions and 4708 deletions

View File

@@ -0,0 +1,33 @@
{
"extends": [
"../../../../.eslintrc.base.json"
],
"ignorePatterns": [
"!**/*"
],
"overrides": [
{
"files": [
"*.ts",
"*.tsx",
"*.js",
"*.jsx"
],
"rules": {}
},
{
"files": [
"*.ts",
"*.tsx"
],
"rules": {}
},
{
"files": [
"*.js",
"*.jsx"
],
"rules": {}
}
]
}

View File

@@ -0,0 +1,7 @@
# pieces-eden-ai
This library was generated with [Nx](https://nx.dev).
## Building
Run `nx build pieces-eden-ai` to build the library.

View File

@@ -0,0 +1,4 @@
{
"name": "@activepieces/piece-eden-ai",
"version": "0.0.8"
}

View File

@@ -0,0 +1,65 @@
{
"name": "pieces-eden-ai",
"$schema": "../../../../node_modules/nx/schemas/project-schema.json",
"sourceRoot": "packages/pieces/community/eden-ai/src",
"projectType": "library",
"release": {
"version": {
"manifestRootsToUpdate": [
"dist/{projectRoot}"
],
"currentVersionResolver": "git-tag",
"fallbackCurrentVersionResolver": "disk"
}
},
"tags": [],
"targets": {
"build": {
"executor": "@nx/js:tsc",
"outputs": [
"{options.outputPath}"
],
"options": {
"outputPath": "dist/packages/pieces/community/eden-ai",
"tsConfig": "packages/pieces/community/eden-ai/tsconfig.lib.json",
"packageJson": "packages/pieces/community/eden-ai/package.json",
"main": "packages/pieces/community/eden-ai/src/index.ts",
"assets": [
"packages/pieces/community/eden-ai/*.md",
{
"input": "packages/pieces/community/eden-ai/src/i18n",
"output": "./src/i18n",
"glob": "**/!(i18n.json)"
}
],
"buildableProjectDepsInPackageJsonType": "dependencies",
"updateBuildableProjectDepsInPackageJson": true
},
"dependsOn": [
"^build",
"prebuild"
]
},
"nx-release-publish": {
"options": {
"packageRoot": "dist/{projectRoot}"
}
},
"lint": {
"executor": "@nx/eslint:lint",
"outputs": [
"{options.outputFile}"
]
},
"prebuild": {
"executor": "nx:run-commands",
"options": {
"cwd": "packages/pieces/community/eden-ai",
"command": "bun install --no-save --silent"
},
"dependsOn": [
"^build"
]
}
}
}

View File

@@ -0,0 +1,130 @@
{
"Eden AI is a platform that provides a range of AI services, including text generation, summarization, translation, and more.": "Eden AI ist eine Plattform, die eine Reihe von KI-Diensten bereitstellt, darunter Textgenerierung, Zusammenfassung, Übersetzung und mehr.",
"You can obtain your API key from your [Eden AI dashboard](https://app.edenai.run/admin/account/developer).": "Du kannst deinen API-Schlüssel von deinem [Eden AI Dashboard](https://app.edenai.run/admin/account/developer).",
"Generate Text": "Text generieren",
"Summarize Text": "Text zusammenfassen",
"Extract Keywords in Text": "Schlüsselwörter in Text extrahieren",
"Detect Language of Text": "Sprache des Textes erkennen",
"Extract Named Entities in Text": "Benannte Entitäten im Text extrahieren",
"Moderate Text": "Text moderieren",
"Spell Check": "Rechtschreibprüfung",
"Translate Text": "Text übersetzen",
"Invoice Parser": "Rechnungs-Parser",
"Receipt Parser": "Receipt Parser",
"Extract Text in Image (OCR)": "Text in Bild extrahieren (OCR)",
"Image Generation": "Bildgenerierung",
"Generate Audio From Text": "Audio aus Text generieren",
"Generate text completions using various AI providers through Eden AI chat endpoint.": "Erzeugen Sie Textvervollständigungen mit verschiedenen KI-Anbietern über Eden AI-Chat-Endpunkt.",
"Extract key sentences and create summaries from long text passages using various AI providers.": "Entpacken Sie Schlüsselsätze und erstellen Sie Zusammenfassungen aus langen Textabschnitten mit verschiedenen KI-Anbietern.",
"Identify important terms in a text using Eden AI. Supports multiple providers, languages, and models.": "Identifizieren Sie wichtige Begriffe in einem Text mit Eden AI. Unterstützt mehrere Anbieter, Sprachen und Modelle.",
"Detect the language used in a text using Eden AI. Supports multiple providers and models.": "Erkennen der Sprache, die in einem Text mit Eden AI verwendet wird. Unterstützt mehrere Anbieter und Modelle.",
"Identify entities (names, places) in text using Eden AI. Supports multiple providers, languages, and models.": "Identifizieren Sie Entitäten (Namen, Orte) im Text mit Eden AI. Unterstützt mehrere Anbieter, Sprachen und Modelle.",
"Detect explicit or policy-violating text using Eden AI. Supports multiple providers, languages, and models.": "Erkenne expliziten oder richtlinienverletzenden Text mit Eden AI. Unterstützt mehrere Anbieter, Sprachen und Modelle.",
"Identify and correct spelling or grammar errors using Eden AI. Supports multiple providers, languages, and models.": "Rechtschreibfehler oder Grammatikfehler mit Eden AI identifizieren und korrigieren. Unterstützt mehrere Anbieter, Sprachen und Modelle.",
"Translate text into different languages using Eden AI. Supports multiple providers, languages, and models.": "Mit Eden AI Text in verschiedene Sprachen übersetzen. Unterstützt mehrere Anbieter, Sprachen und Modelle.",
"Extract structured invoice data from files using Eden AI. Supports multiple providers, languages, and document types.": "Extrahieren Sie strukturierte Rechnungsdaten aus Dateien mit Eden AI. Unterstützt mehrere Anbieter, Sprachen und Dokumententypen.",
"Extract structured data from receipts and documents using Eden AI. Supports general data extraction with bounding boxes.": "Extrahieren Sie strukturierte Daten aus Belegen und Dokumenten mit Eden AI. Unterstützt die generelle Datenextraktion mit Begrenzungsboxen.",
"Extract text from images (OCR) using Eden AI. Supports multiple providers, languages, and bounding box coordinates.": "Extrahieren Sie Text aus Bildern (OCR) mit Eden AI. Unterstützt mehrere Anbieter, Sprachen und Boarding-Box-Koordinaten.",
"Create images from text prompts using Eden AI. Supports multiple providers, models, and resolutions.": "Erstellen Sie Bilder aus Text-Eingabeaufforderungen mit Eden AI. Unterstützt mehrere Anbieter, Modelle und Auflösungen.",
"Convert text to spoken audio using Eden AI. Supports multiple providers, languages, and voice customization.": "Konvertieren Sie Text in gesprochene Audiodateien mit Eden AI. Unterstützt mehrere Anbieter, Sprachen und Spracheinstellungen.",
"Provider": "Anbieter",
"Prompt": "Prompt",
"System Prompt": "System-Prompt",
"Model": "Modell",
"Temperature": "Temperatur",
"Max Completion Tokens": "Max. Abschluss-Token",
"Reasoning Effort": "Grund dafür",
"Fallback Providers": "Fallback-Anbieter",
"Include Image": "Bild einbeziehen",
"Image URL": "Bild-URL",
"Text to Summarize": "Zusammenfassender Text",
"Number of Summary Sentences": "Anzahl der Zusammenfassungs-Sätze",
"Text Language": "Textsprache",
"Specific Model": "Spezifisches Modell",
"Include Original Response": "Originalantwort einbeziehen",
"Text to Analyze": "Zu analysierender Text",
"Text to Moderate": "Text zum Moderieren",
"Text to Check": "Zu überprüfender Text",
"Text to Translate": "Zu übersetzender Text",
"Source Language": "Quellsprache",
"Target Language": "Zielsprache",
"File URL": "Datei-URL",
"Document Type": "Dokumententyp",
"Document Language": "Dokumentsprache",
"PDF Password": "PDF Passwort",
"Convert to PDF": "In PDF konvertieren",
"Attributes as List": "Attribute als Liste",
"Resolution": "Auflösung",
"Number of Images": "Anzahl der Bilder",
"Text": "Text",
"Language": "Sprache",
"Voice Gender": "Sprach-Geschlecht",
"Speaking Rate": "Sprechrate",
"Voice Pitch": "Stimmenzahl",
"Audio Volume": "Audio-Lautstärke",
"Audio Format": "Audioformat",
"Sampling Rate": "Sampling-Rate",
"The AI provider to use for text generation.": "Der für die Texterzeugung zu verwendende AI-Provider.",
"The main prompt or question you want the AI to respond to.": "Die Hauptaufgabe oder Frage, auf die die KI antworten soll.",
"System message to set the behavior and context for the AI assistant (e.g., \"You are a helpful coding assistant\").": "Systemmeldung, um das Verhalten und den Kontext für den AI-Assistenten einzustellen (z.B. \"Sie sind ein hilfreicher Coding-Assistent\").",
"Specific model to use (e.g., gpt-4o, claude-3-sonnet-latest, gemini-2.0-flash). Leave empty for provider-specific defaults.": "Spezifisches zu verwendendes Modell (z.B. gpt-4o, claude-3-sonnet-latest, gemini-2.0-flash). Leer lassen für Provider-spezifische Standards.",
"Controls randomness (0.0-2.0). Higher values make output more creative.": "Steuert den Zufall (0.0-2.0). Höhere Werte machen die Ausgabe kreativer.",
"Maximum number of tokens to generate in the response.": "Maximale Anzahl an Token, die in der Antwort generiert werden sollen.",
"Level of reasoning depth for the response.": "Ebene der Argumentationstiefe für die Antwort.",
"Alternative providers to try if the main provider fails.": "Alternativer Provider, um zu versuchen, wenn der Hauptanbieter fehlschlägt.",
"Include an image in your prompt (for vision-capable models).": "Fügen Sie ein Bild in Ihre Eingabeaufforderung ein (für visionfähige Modelle).",
"URL of the image to include in the prompt (only used if \"Include Image\" is enabled).": "URL des Bildes, das in die Eingabeaufforderung aufgenommen werden soll (nur verwendet, wenn \"Include Image\" aktiviert ist).",
"The AI provider to use for text summarization.": "Der KI-Anbieter, der zur Textzusammenfassung verwendet wird.",
"The text content you want to summarize. Can be articles, documents, or any long-form text.": "Der Textinhalt, den Sie zusammenfassen möchten. Kann Artikel, Dokumente oder ein beliebiger Text sein.",
"How many sentences should the summary contain (1-20).": "Wie viele Sätze sollte die Zusammenfassung enthalten (1-20).",
"The language of the input text. Choose \"Auto Detection\" if unsure.": "Die Sprache des Eingabetextes. Wählen Sie \"Automatische Erkennung\" bei Unsicherheit.",
"Specific model to use (e.g., gpt-4, gpt-4o, summarize-xlarge). Leave empty for default.": "Spezifisches zu verwendendes Modell (z.B. gpt-4, gpt-4o, summarize-xlarge). Leer lassen für Standardeinstellung.",
"Alternative providers to try if the main provider fails (up to 5).": "Alternativer Anbieter zu versuchen, wenn der Hauptanbieter nicht funktioniert (bis zu 5).",
"Include the raw provider response in the output for debugging.": "Fügen Sie die Antwort des Rohanbieters in die Ausgabe zum Debuggen ein.",
"The AI provider to use for keyword extraction.": "Der AI-Provider, der zur Keyword-Extraktion verwendet wird.",
"The text to extract keywords from.": "Der Text, aus dem Keywords extrahiert werden.",
"Specific model to use (e.g., gpt-4o, gpt-4, grok-2-latest). Leave empty for default.": "Spezifisches zu verwendendes Modell (z.B. gpt-4o, gpt-4, grok-2-latest). Leer lassen für Standardeinstellung.",
"The AI provider to use for language detection.": "Der für die Spracherkennung zu verwendende AI-Provider.",
"The text to detect language for.": "Der Text, für den die Sprache erkannt werden soll.",
"Specific model to use (e.g., gpt-4o, grok-2-latest). Leave empty for default.": "Spezifisches zu verwendendes Modell (z.B. gpt-4o, grok-2-latest). Leer lassen für Standardeinstellung.",
"The AI provider to use for named entity recognition.": "Der AI-Provider, der für die benannte Entitätserkennung verwendet wird.",
"The text to extract entities from.": "Der Text, aus dem Entitäten extrahiert werden.",
"Specific model to use (e.g., gpt-4o, gemini-1.5-flash, grok-2-latest). Leave empty for default.": "Spezifisches zu verwendendes Modell (z.B. gpt-4o, gemini-1.5-flash, grok-2-latest). Leer lassen für Standardeinstellung.",
"The AI provider to use for text moderation.": "Der für Textmoderation zu verwendende AI-Provider.",
"The text to analyze for explicit or policy-violating content.": "Der zu analysierende Text für explizite oder Policy-verletzende Inhalte.",
"Specific model to use (e.g., text-moderation-latest, text-moderation-stable). Leave empty for default.": "Spezifisches zu verwendendes Modell (z.B. text-moderation-neuesten, text-moderation-stable). Leer lassen für Standardeinstellung.",
"The AI provider to use for spell checking and grammar correction.": "Der AI Provider zur Rechtschreibprüfung und Grammatikkorrektur.",
"The text to check for spelling or grammar errors.": "Der zu prüfende Text auf Rechtschreibfehler oder Grammatikfehler.",
"Specific model to use (e.g., gpt-4o, gpt-4, grok-2-latest, command). Leave empty for default.": "Spezifisches zu verwendendes Modell (z.B. gpt-4o, gpt-4, grok-2-latest, Befehl). Leer lassen für Standardeinstellung.",
"The AI provider to use for text translation.": "Der für die Textübersetzung zu verwendende AI-Anbieter.",
"The text to translate.": "Der zu übersetzende Text.",
"The language of the input text. Choose \"Auto Detection\" to automatically detect the language.": "Die Sprache des Eingabetextes. Wählen Sie \"Automatische Erkennung\", um die Sprache automatisch zu erkennen.",
"The language to translate the text into.": "Die Sprache, in die der Text übersetzt werden soll.",
"The AI provider to use for financial document parsing.": "Der AI-Dienstleister für das Parsen von Finanzdokumenten.",
"Public URL to the financial document file (PDF, image, etc).": "Öffentliche URL für die Finanzdokument-Datei (PDF, Bild, etc).",
"The type of financial document to parse.": "Die Art des zu analysierenden Finanzdokuments.",
"The language of the document. Choose \"Auto Detection\" if unsure.": "Die Sprache des Dokuments. Wählen Sie \"Automatische Erkennung\", wenn Sie sich nicht sicher sind.",
"Specific model to use (e.g., gpt-4o, gpt-4o-mini, gpt-4-turbo). Leave empty for default.": "Spezifisches zu verwendendes Modell (z.B. gpt-4o, gpt-4o-mini, gpt-4-turbo). Leer lassen für Standardeinstellung.",
"Password for protected PDF files (if applicable).": "Passwort für geschützte PDF-Dateien (falls zutreffend).",
"Convert DOC/DOCX files to PDF format for better compatibility.": "Konvertieren Sie DOC/DOCX-Dateien in ein PDF-Format, um die Kompatibilität zu verbessern.",
"The AI provider to use for data extraction.": "Der zur Datenextraktion zu verwendende AI-Provider.",
"Public URL to the document file (PDF, image, etc).": "Öffentliche URL zur Dokumentdatei (PDF, Bild usw.).",
"Return extracted data with each attribute as a list instead of list of objects.": "Gibt extrahierte Daten mit jedem Attribut als Liste anstelle von Objekten zurück.",
"The AI provider to use for text extraction.": "Der AI-Anbieter, der zur Textextraktion verwendet wird.",
"Public URL to the image or document file.": "Öffentliche URL zum Bild oder zur Dokumentdatei.",
"The language of the text in the image. Choose \"Auto Detection\" if unsure.": "Die Sprache des Textes im Bild. Wählen Sie \"Automatische Erkennung\", wenn Sie sich nicht sicher sind.",
"The AI provider to use for image generation.": "Der für die Bildgenerierung zu verwendende AI-Provider.",
"Description of the desired image(s). Be specific and descriptive for best results.": "Beschreibung des gewünschten Bildes. Seien Sie spezifisch und beschreibend für die besten Ergebnisse.",
"The image resolution (e.g., 512x512, 1024x1024).": "Die Bildauflösung (z.B. 512x512, 1024x1024).",
"Number of images to generate (1-10).": "Anzahl der zu generierenden Bilder (1-10).",
"Specific model to use for image generation. Leave empty for provider default.": "Spezifisches Modell, das für die Bildgenerierung verwendet werden soll. Leer lassen für die Standardeinstellung des Anbieters.",
"The AI provider to use for text-to-speech synthesis.": "Der für Text-zu-Sprach-Synthese zu verwendende AI-Provider.",
"The text to convert to speech.": "Der zu konvertierende Text in Sprache.",
"The language and locale for the speech synthesis (defaults to en-US if not specified).": "Die Sprache und Gebietsschema für die Sprachsynthese (Standardeinstellung de-US wenn nicht angegeben).",
"Choose the voice gender for speech synthesis (defaults to Female if not specified).": "Wählen Sie das Sprach-Geschlecht für Sprachsynthese (Standardmäßig weiblich wenn nicht angegeben).",
"Adjust speaking rate (-100 to 100, where 0 is normal speed).": "Sprachrate anpassen (-100 bis 100, wobei 0 die normale Geschwindigkeit ist).",
"Adjust voice pitch (-100 to 100, where 0 is normal pitch).": "Stimmfeld anpassen (-100 bis 100, wobei 0 normal ist).",
"Adjust audio volume (-100 to 100, where 0 is normal volume).": "Audio-Lautstärke anpassen (-100 bis 100, wobei 0 normal ist).",
"The audio format for the generated speech (default: MP3).": "Das Audioformat für die generierte Sprache (Standard: MP3).",
"Audio sampling rate in Hz (0-200000, 0 for provider default).": "Audio-Abtastrate in Hz (0-200000, 0 für den Provider-Standard)."
}

View File

@@ -0,0 +1,130 @@
{
"Eden AI is a platform that provides a range of AI services, including text generation, summarization, translation, and more.": "Eden AI es una plataforma que proporciona una amplia gama de servicios de IA, incluyendo generación de texto, resumen, traducción y más.",
"You can obtain your API key from your [Eden AI dashboard](https://app.edenai.run/admin/account/developer).": "Puedes obtener tu clave API de tu [panel de control de Eden AI](https://app.edenai.run/admin/account/developer).",
"Generate Text": "Generar texto",
"Summarize Text": "Resumir texto",
"Extract Keywords in Text": "Extraer palabras clave en el texto",
"Detect Language of Text": "Detectar idioma del texto",
"Extract Named Entities in Text": "Extraer entidades con nombre en texto",
"Moderate Text": "Moderar texto",
"Spell Check": "Revisión ortográfica",
"Translate Text": "Traducir texto",
"Invoice Parser": "Analizador de Facturas",
"Receipt Parser": "Receipt Parser",
"Extract Text in Image (OCR)": "Extraer texto en imagen (OCR)",
"Image Generation": "Generación de imágenes",
"Generate Audio From Text": "Generar audio desde texto",
"Generate text completions using various AI providers through Eden AI chat endpoint.": "Generar terminaciones de texto utilizando varios proveedores de IA a través del punto final del chat de Eden AI.",
"Extract key sentences and create summaries from long text passages using various AI providers.": "Extraiga frases clave y cree resúmenes a partir de pasajes de texto largos usando varios proveedores de IA.",
"Identify important terms in a text using Eden AI. Supports multiple providers, languages, and models.": "Identificar términos importantes en un texto usando Eden AI. Soporta múltiples proveedores, idiomas y modelos.",
"Detect the language used in a text using Eden AI. Supports multiple providers and models.": "Detectar el idioma utilizado en un texto usando Eden AI. Soporta múltiples proveedores y modelos.",
"Identify entities (names, places) in text using Eden AI. Supports multiple providers, languages, and models.": "Identificar entidades (nombres, lugares) en texto usando Eden AI. Soporta múltiples proveedores, idiomas y modelos.",
"Detect explicit or policy-violating text using Eden AI. Supports multiple providers, languages, and models.": "Detectar texto explícito o violador de políticas usando Eden IA. Soporta múltiples proveedores, idiomas y modelos.",
"Identify and correct spelling or grammar errors using Eden AI. Supports multiple providers, languages, and models.": "Identificar y corregir errores ortográficos o gramaticales usando Eden AI. Soporta múltiples proveedores, idiomas y modelos.",
"Translate text into different languages using Eden AI. Supports multiple providers, languages, and models.": "Traducir texto a diferentes idiomas utilizando Eden AI. Soporta múltiples proveedores, idiomas y modelos.",
"Extract structured invoice data from files using Eden AI. Supports multiple providers, languages, and document types.": "Extraer datos de factura estructurada de archivos usando Eden AI. Soporta múltiples proveedores, idiomas y tipos de documentos.",
"Extract structured data from receipts and documents using Eden AI. Supports general data extraction with bounding boxes.": "Extraer datos estructurados de recibos y documentos usando Eden AI. Permite la extracción general de datos con cajas de límites.",
"Extract text from images (OCR) using Eden AI. Supports multiple providers, languages, and bounding box coordinates.": "Extraer texto de imágenes (OCR) usando Eden AI. Soporta múltiples proveedores, idiomas y coordenadas de casillas limitadas.",
"Create images from text prompts using Eden AI. Supports multiple providers, models, and resolutions.": "Crea imágenes desde instrucciones de texto usando Eden AI. Soporta múltiples proveedores, modelos y resoluciones.",
"Convert text to spoken audio using Eden AI. Supports multiple providers, languages, and voice customization.": "Convierte texto a audio hablado usando Eden AI. Soporta múltiples proveedores, idiomas y personalización de voz.",
"Provider": "Proveedor",
"Prompt": "Petición",
"System Prompt": "Prompt del sistema",
"Model": "Modelo",
"Temperature": "Temperatura",
"Max Completion Tokens": "Máximas fichas de finalización",
"Reasoning Effort": "Razonando Effort",
"Fallback Providers": "Proveedores de Fallback",
"Include Image": "Incluye imagen",
"Image URL": "URL de imagen",
"Text to Summarize": "Texto a resumir",
"Number of Summary Sentences": "Número de sentencias resumidas",
"Text Language": "Idioma del texto",
"Specific Model": "Modelo específico",
"Include Original Response": "Incluye respuesta original",
"Text to Analyze": "Texto a analizar",
"Text to Moderate": "Texto a moderar",
"Text to Check": "Texto a revisar",
"Text to Translate": "Texto a traducir",
"Source Language": "Idioma de origen",
"Target Language": "Idioma de destino",
"File URL": "URL del archivo",
"Document Type": "Tipo de documento",
"Document Language": "Idioma del documento",
"PDF Password": "Contraseña PDF",
"Convert to PDF": "Convertir a PDF",
"Attributes as List": "Atributos como lista",
"Resolution": "Resolución",
"Number of Images": "Número de imágenes",
"Text": "Texto",
"Language": "Idioma",
"Voice Gender": "Género de voz",
"Speaking Rate": "Tasa de voz",
"Voice Pitch": "Tono de voz",
"Audio Volume": "Volumen de audio",
"Audio Format": "Formato de audio",
"Sampling Rate": "Tasa de muestreo",
"The AI provider to use for text generation.": "El proveedor de IA a utilizar para la generación de texto.",
"The main prompt or question you want the AI to respond to.": "El prompt o pregunta principal a la que quieres que responda la IA .",
"System message to set the behavior and context for the AI assistant (e.g., \"You are a helpful coding assistant\").": "Mensaje del sistema para establecer el comportamiento y contexto para el asistente de IA (por ejemplo, \"Eres un asistente de codificación útil\").",
"Specific model to use (e.g., gpt-4o, claude-3-sonnet-latest, gemini-2.0-flash). Leave empty for provider-specific defaults.": "Modelo específico a usar (por ejemplo, gpt-4o, claude-3-sonnet-latest, gemini-2.0-flash). Dejar vacío para valores predeterminados específicos del proveedor.",
"Controls randomness (0.0-2.0). Higher values make output more creative.": "Controla la aleatoria (0.0-2.0). Valores más altos hacen que la salida sea más creativa.",
"Maximum number of tokens to generate in the response.": "Número máximo de fichas a generar en la respuesta.",
"Level of reasoning depth for the response.": "Nivel de profundidad de razonamiento para la respuesta.",
"Alternative providers to try if the main provider fails.": "Proveedores alternativos para intentar si el proveedor principal falla.",
"Include an image in your prompt (for vision-capable models).": "Incluye una imagen en tu prompt (para modelos con visión).",
"URL of the image to include in the prompt (only used if \"Include Image\" is enabled).": "URL de la imagen a incluir en el prompt (sólo se utiliza si \"Imagen incluida\" está habilitada).",
"The AI provider to use for text summarization.": "El proveedor de IA a utilizar para resumir texto.",
"The text content you want to summarize. Can be articles, documents, or any long-form text.": "El contenido de texto que desea resumir. Puede ser artículos, documentos o cualquier texto de forma larga.",
"How many sentences should the summary contain (1-20).": "Cuántas frases debe contener el resumen (1-20).",
"The language of the input text. Choose \"Auto Detection\" if unsure.": "El idioma del texto de entrada. Elija \"Detección automática\" si no está seguro.",
"Specific model to use (e.g., gpt-4, gpt-4o, summarize-xlarge). Leave empty for default.": "Modelo específico a usar (por ejemplo, gpt-4, gpt-4o, summarize-xlarge). Dejar vacío por defecto.",
"Alternative providers to try if the main provider fails (up to 5).": "Proveedores alternativos para probar si el proveedor principal falla (hasta 5).",
"Include the raw provider response in the output for debugging.": "Incluye la respuesta del proveedor crudo en la salida para la depuración.",
"The AI provider to use for keyword extraction.": "El proveedor de IA a utilizar para la extracción de palabras clave.",
"The text to extract keywords from.": "El texto del que extraer palabras clave.",
"Specific model to use (e.g., gpt-4o, gpt-4, grok-2-latest). Leave empty for default.": "Modelo específico a usar (por ejemplo, gpt-4o, gpt-4, grok-2-latest). Dejar vacío por defecto.",
"The AI provider to use for language detection.": "El proveedor de IA a utilizar para la detección de idiomas.",
"The text to detect language for.": "El texto para el que detectar el idioma.",
"Specific model to use (e.g., gpt-4o, grok-2-latest). Leave empty for default.": "Modelo específico a usar (por ejemplo, gpt-4o, grok-2-latest). Dejar vacío por defecto.",
"The AI provider to use for named entity recognition.": "El proveedor de IA a utilizar para el reconocimiento de entidad nombrada.",
"The text to extract entities from.": "El texto del que extraer entidades.",
"Specific model to use (e.g., gpt-4o, gemini-1.5-flash, grok-2-latest). Leave empty for default.": "Modelo específico a usar (por ejemplo, gpt-4o, gemini-1.5-flash, grok-2-latest). Dejar vacío por defecto.",
"The AI provider to use for text moderation.": "El proveedor de IA a usar para moderación de texto.",
"The text to analyze for explicit or policy-violating content.": "El texto a analizar para contenido explícito o violador de políticas.",
"Specific model to use (e.g., text-moderation-latest, text-moderation-stable). Leave empty for default.": "Modelo específico a usar (por ejemplo, text-moderation-latest, text-moderation-stable). Dejar vacío para el valor predeterminado.",
"The AI provider to use for spell checking and grammar correction.": "El proveedor de IA que se utilizará para la comprobación de hechizos y corrección de gramáticas.",
"The text to check for spelling or grammar errors.": "El texto para comprobar si hay errores ortográficos o gramáticos.",
"Specific model to use (e.g., gpt-4o, gpt-4, grok-2-latest, command). Leave empty for default.": "Modelo específico a usar (por ejemplo, gpt-4o, gpt-4, grok-2-latest, comando). Dejar vacío por defecto.",
"The AI provider to use for text translation.": "El proveedor de IA que se utilizará para la traducción de texto.",
"The text to translate.": "El texto a traducir.",
"The language of the input text. Choose \"Auto Detection\" to automatically detect the language.": "El idioma del texto de entrada. Elija \"Detección automática\" para detectar automáticamente el idioma.",
"The language to translate the text into.": "El idioma al que traducir el texto.",
"The AI provider to use for financial document parsing.": "El proveedor de IA a utilizar para análisis de documentos financieros.",
"Public URL to the financial document file (PDF, image, etc).": "URL pública al archivo de documentos financieros (PDF, imagen, etc).",
"The type of financial document to parse.": "El tipo de documento financiero a analizar.",
"The language of the document. Choose \"Auto Detection\" if unsure.": "El idioma del documento. Elija \"Detección automática\" si no está seguro.",
"Specific model to use (e.g., gpt-4o, gpt-4o-mini, gpt-4-turbo). Leave empty for default.": "Modelo específico a usar (por ejemplo, gpt-4o, gpt-4o-mini, gpt-4-turbo). Dejar vacío por defecto.",
"Password for protected PDF files (if applicable).": "Contraseña para archivos PDF protegidos (si corresponde).",
"Convert DOC/DOCX files to PDF format for better compatibility.": "Convierte archivos DOC/DOCX a formato PDF para una mejor compatibilidad.",
"The AI provider to use for data extraction.": "El proveedor de IA a utilizar para la extracción de datos.",
"Public URL to the document file (PDF, image, etc).": "URL pública al archivo de documento (PDF, imagen, etc).",
"Return extracted data with each attribute as a list instead of list of objects.": "Devuelve datos extraídos con cada atributo como una lista en lugar de la lista de objetos.",
"The AI provider to use for text extraction.": "El proveedor de IA a utilizar para la extracción de texto.",
"Public URL to the image or document file.": "URL pública de la imagen o archivo de documentos.",
"The language of the text in the image. Choose \"Auto Detection\" if unsure.": "El idioma del texto en la imagen. Elija \"Detección automática\" si no está seguro.",
"The AI provider to use for image generation.": "El proveedor de IA a utilizar para la generación de imágenes.",
"Description of the desired image(s). Be specific and descriptive for best results.": "Descripción de la(s) imagen(es) deseada(s). Sé específico y descriptivo para mejores resultados.",
"The image resolution (e.g., 512x512, 1024x1024).": "La resolución de la imagen (por ejemplo, 512x512, 1024x1024).",
"Number of images to generate (1-10).": "Número de imágenes a generar (1-10).",
"Specific model to use for image generation. Leave empty for provider default.": "Modelo específico a usar para la generación de imágenes. Dejar vacío para el valor predeterminado del proveedor.",
"The AI provider to use for text-to-speech synthesis.": "El proveedor de IA a usar para síntesis de texto a voz.",
"The text to convert to speech.": "El texto a convertir en voz.",
"The language and locale for the speech synthesis (defaults to en-US if not specified).": "El idioma y la configuración regional para la síntesis de voz (por defecto en es-US si no se especifica).",
"Choose the voice gender for speech synthesis (defaults to Female if not specified).": "Elija el género de voz para la síntesis de voz (predeterminada para la mujer si no se especifica).",
"Adjust speaking rate (-100 to 100, where 0 is normal speed).": "Ajuste la velocidad de intervención (-100 a 100, donde 0 es velocidad normal).",
"Adjust voice pitch (-100 to 100, where 0 is normal pitch).": "Ajuste el tono de voz (-100 a 100, donde 0 es pitch normal).",
"Adjust audio volume (-100 to 100, where 0 is normal volume).": "Ajuste el volumen de audio (-100 a 100, donde 0 es volumen normal).",
"The audio format for the generated speech (default: MP3).": "El formato de audio para la voz generada (por defecto: MP3).",
"Audio sampling rate in Hz (0-200000, 0 for provider default).": "Tasa de muestreo de audio en Hz (0-200000, 0 para el valor predeterminado del proveedor)."
}

View File

@@ -0,0 +1,130 @@
{
"Eden AI is a platform that provides a range of AI services, including text generation, summarization, translation, and more.": "Eden AI est une plateforme qui fournit une gamme de services d'Amnesty International, y compris la génération de textes, le résumé, la traduction, et plus encore.",
"You can obtain your API key from your [Eden AI dashboard](https://app.edenai.run/admin/account/developer).": "Vous pouvez obtenir votre clé API depuis votre [tableau de bord IA Eden](https://app.edenai.run/admin/account/developer).",
"Generate Text": "Générer du texte",
"Summarize Text": "Résumer le texte",
"Extract Keywords in Text": "Extraire les mots-clés dans le texte",
"Detect Language of Text": "Détecter la langue du texte",
"Extract Named Entities in Text": "Extraire les entités nommées dans le texte",
"Moderate Text": "Modérer le texte",
"Spell Check": "Vérification orthographique",
"Translate Text": "Traduire le texte",
"Invoice Parser": "Analyseur de facture",
"Receipt Parser": "Receipt Parser",
"Extract Text in Image (OCR)": "Extraire le texte dans l'image (OCR)",
"Image Generation": "Génération de l'image",
"Generate Audio From Text": "Générer l'audio à partir du texte",
"Generate text completions using various AI providers through Eden AI chat endpoint.": "Générer des complétions de texte à l'aide de divers fournisseurs d'AI via le point de terminaison de chat Eden IA.",
"Extract key sentences and create summaries from long text passages using various AI providers.": "Extraire les phrases clés et créer des résumés à partir de passages de texte longs en utilisant différents fournisseurs d'AI .",
"Identify important terms in a text using Eden AI. Supports multiple providers, languages, and models.": "Identifiez des termes importants dans un texte utilisant Eden AI. Supporte plusieurs fournisseurs, langues et modèles.",
"Detect the language used in a text using Eden AI. Supports multiple providers and models.": "Détecte la langue utilisée dans un texte en utilisant Eden AI. Supporte plusieurs fournisseurs et modèles.",
"Identify entities (names, places) in text using Eden AI. Supports multiple providers, languages, and models.": "Identifier les entités (noms, places) en texte en utilisant Eden AI. Supporte plusieurs fournisseurs, langues et modèles.",
"Detect explicit or policy-violating text using Eden AI. Supports multiple providers, languages, and models.": "Détecte le texte explicite ou violant la politique en utilisant Eden AI. Supporte plusieurs fournisseurs, langages et modèles.",
"Identify and correct spelling or grammar errors using Eden AI. Supports multiple providers, languages, and models.": "Identifier et corriger les erreurs d'orthographe ou de grammaire en utilisant Eden AI. Supporte plusieurs fournisseurs, langues et modèles.",
"Translate text into different languages using Eden AI. Supports multiple providers, languages, and models.": "Traduire du texte en différentes langues en utilisant Eden AI. Prend en charge plusieurs fournisseurs, langues et modèles.",
"Extract structured invoice data from files using Eden AI. Supports multiple providers, languages, and document types.": "Extrait les données de factures structurées à partir de fichiers utilisant Eden AI. Supporte plusieurs fournisseurs, langues et types de documents.",
"Extract structured data from receipts and documents using Eden AI. Supports general data extraction with bounding boxes.": "Extrait les données structurées des reçus et des documents à l'aide d'Eden AI. Supporte l'extraction de données générales avec des boîtes de rebondissement.",
"Extract text from images (OCR) using Eden AI. Supports multiple providers, languages, and bounding box coordinates.": "Extrait du texte des images (OCR) en utilisant Eden AI. Prend en charge de multiples fournisseurs, langages et coordonnées des boîtes englobantes.",
"Create images from text prompts using Eden AI. Supports multiple providers, models, and resolutions.": "Créez des images à partir de messages texte en utilisant Eden AI. Prend en charge de multiples fournisseurs, modèles et résolutions.",
"Convert text to spoken audio using Eden AI. Supports multiple providers, languages, and voice customization.": "Convertissez du texte en audio parlé en utilisant Eden AI. Supporte plusieurs fournisseurs, langues et personnalisation de la voix.",
"Provider": "Fournisseur",
"Prompt": "Prompt",
"System Prompt": "Message du système",
"Model": "Modélisation",
"Temperature": "Température",
"Max Completion Tokens": "Maximum de jetons d'achèvement",
"Reasoning Effort": "Effort de Raison",
"Fallback Providers": "Fournisseurs de secours",
"Include Image": "Inclure l'image",
"Image URL": "URL de l'image",
"Text to Summarize": "Texte à résumer",
"Number of Summary Sentences": "Nombre de phrases sommaires",
"Text Language": "Langue du texte",
"Specific Model": "Modèle spécifique",
"Include Original Response": "Inclure la réponse originale",
"Text to Analyze": "Texte à analyser",
"Text to Moderate": "Texte à modérer",
"Text to Check": "Texte à vérifier",
"Text to Translate": "Texte à traduire",
"Source Language": "Langue source",
"Target Language": "Langue cible",
"File URL": "URL du fichier",
"Document Type": "Type de document",
"Document Language": "Langue du document",
"PDF Password": "Mot de passe PDF",
"Convert to PDF": "Convertir en PDF",
"Attributes as List": "Attributs en tant que liste",
"Resolution": "Résolution",
"Number of Images": "Nombre d'images",
"Text": "Texte du texte",
"Language": "Langue",
"Voice Gender": "Sexe vocal",
"Speaking Rate": "Taux de parole",
"Voice Pitch": "Hauteur vocale",
"Audio Volume": "Volume audio",
"Audio Format": "Format audio",
"Sampling Rate": "Taux d'échantillonnage",
"The AI provider to use for text generation.": "Le fournisseur d'Amnesty International à utiliser pour la génération de texte.",
"The main prompt or question you want the AI to respond to.": "L'invite principale ou la question à laquelle vous voulez que l'IA réponde.",
"System message to set the behavior and context for the AI assistant (e.g., \"You are a helpful coding assistant\").": "Message système pour définir le comportement et le contexte de l'assistant IA (par exemple, \"Vous êtes un assistant de codage utile\").",
"Specific model to use (e.g., gpt-4o, claude-3-sonnet-latest, gemini-2.0-flash). Leave empty for provider-specific defaults.": "Modèle spécifique à utiliser (par exemple, gpt-4o, claude-3-sonnet-latest, gemini-2.0-flash). Laisser vide pour les valeurs par défaut spécifiques au fournisseur.",
"Controls randomness (0.0-2.0). Higher values make output more creative.": "Contrôle le caractère aléatoire (0.0-2.0). Des valeurs plus élevées rendent la sortie plus créative.",
"Maximum number of tokens to generate in the response.": "Nombre maximum de jetons à générer dans la réponse.",
"Level of reasoning depth for the response.": "Niveau de la profondeur de raisonnement pour la réponse.",
"Alternative providers to try if the main provider fails.": "Fournisseurs alternatifs pour essayer si le fournisseur principal échoue.",
"Include an image in your prompt (for vision-capable models).": "Inclure une image dans votre invite (pour les modèles capables de la visionne).",
"URL of the image to include in the prompt (only used if \"Include Image\" is enabled).": "URL de l'image à inclure dans l'invite (uniquement utilisée si \"Include Image\" est activé).",
"The AI provider to use for text summarization.": "Le prestataire d'Amnesty International à utiliser pour résumer le texte.",
"The text content you want to summarize. Can be articles, documents, or any long-form text.": "Le contenu du texte que vous voulez résumer. Peut être des articles, des documents ou tout texte de forme longue.",
"How many sentences should the summary contain (1-20).": "Combien de phrases le résumé devrait-il contenir (1-20).",
"The language of the input text. Choose \"Auto Detection\" if unsure.": "La langue du texte de saisie. Choisissez \"Détection automatique\" si vous n'êtes pas sûr.",
"Specific model to use (e.g., gpt-4, gpt-4o, summarize-xlarge). Leave empty for default.": "Modèle spécifique à utiliser (par exemple, gpt-4, gpt-4o, summarize-xlarge). Laisser vide pour défaut.",
"Alternative providers to try if the main provider fails (up to 5).": "Fournisseurs alternatifs pour essayer si le fournisseur principal échoue (jusqu'à 5).",
"Include the raw provider response in the output for debugging.": "Inclure la réponse du fournisseur brut dans la sortie pour le débogage.",
"The AI provider to use for keyword extraction.": "Le fournisseur d'AI à utiliser pour l'extraction de mots-clés.",
"The text to extract keywords from.": "Le texte à partir duquel extraire les mots-clés.",
"Specific model to use (e.g., gpt-4o, gpt-4, grok-2-latest). Leave empty for default.": "Modèle spécifique à utiliser (par exemple, gpt-4o, gpt-4, grok-2-latest). Laisser vide pour défaut.",
"The AI provider to use for language detection.": "Le fournisseur d'AI à utiliser pour la détection de langage.",
"The text to detect language for.": "Le texte pour lequel détecter la langue.",
"Specific model to use (e.g., gpt-4o, grok-2-latest). Leave empty for default.": "Modèle spécifique à utiliser (par exemple, gpt-4o, grok-2-latest). Laisser vide par défaut.",
"The AI provider to use for named entity recognition.": "Le fournisseur d'Amnesty International à utiliser pour la reconnaissance d'entités nommées.",
"The text to extract entities from.": "Le texte depuis lequel extraire les entités.",
"Specific model to use (e.g., gpt-4o, gemini-1.5-flash, grok-2-latest). Leave empty for default.": "Modèle spécifique à utiliser (par exemple, gpt-4o, gemini-1.5-flash, grok-2-latest). Laisser vide par défaut.",
"The AI provider to use for text moderation.": "Le fournisseur d'Amnesty International à utiliser pour la modération de texte.",
"The text to analyze for explicit or policy-violating content.": "Le texte à analyser pour des contenus explicites ou violant la politique.",
"Specific model to use (e.g., text-moderation-latest, text-moderation-stable). Leave empty for default.": "Modèle spécifique à utiliser (par exemple, text-moderation-latest, text-moderation-stable). Laisser vide pour défaut.",
"The AI provider to use for spell checking and grammar correction.": "Le prestataire d'AI à utiliser pour la correction orthographique et la correction grammaticale.",
"The text to check for spelling or grammar errors.": "Le texte à vérifier s'il y a des erreurs d'orthographe ou de grammaire.",
"Specific model to use (e.g., gpt-4o, gpt-4, grok-2-latest, command). Leave empty for default.": "Modèle spécifique à utiliser (par exemple, gpt-4o, gpt-4, grok-2-latest, commande). Laisser vide pour défaut.",
"The AI provider to use for text translation.": "Le fournisseur d'Amnesty International à utiliser pour la traduction de texte.",
"The text to translate.": "Le texte à traduire.",
"The language of the input text. Choose \"Auto Detection\" to automatically detect the language.": "La langue du texte de saisie. Choisissez \"Détection automatique\" pour détecter automatiquement la langue.",
"The language to translate the text into.": "La langue dans laquelle traduire le texte.",
"The AI provider to use for financial document parsing.": "Le prestataire dAmnesty International doit utiliser pour analyser les documents financiers.",
"Public URL to the financial document file (PDF, image, etc).": "URL publique du fichier de documents financiers (PDF, image, etc).",
"The type of financial document to parse.": "Le type de document financier à analyser.",
"The language of the document. Choose \"Auto Detection\" if unsure.": "La langue du document. Choisissez \"Détection automatique\" en cas d'incertitude.",
"Specific model to use (e.g., gpt-4o, gpt-4o-mini, gpt-4-turbo). Leave empty for default.": "Modèle spécifique à utiliser (par exemple, gpt-4o, gpt-4o-mini, gpt-4-turbo). Laisser vide par défaut.",
"Password for protected PDF files (if applicable).": "Mot de passe pour les fichiers PDF protégés (le cas échéant).",
"Convert DOC/DOCX files to PDF format for better compatibility.": "Convertissez les fichiers DOC/DOCX au format PDF pour une meilleure compatibilité.",
"The AI provider to use for data extraction.": "Le fournisseur d'Amnesty International à utiliser pour l'extraction de données.",
"Public URL to the document file (PDF, image, etc).": "URL publique du fichier de document (PDF, image, etc).",
"Return extracted data with each attribute as a list instead of list of objects.": "Retourne les données extraites avec chaque attribut comme une liste au lieu de la liste des objets.",
"The AI provider to use for text extraction.": "Le fournisseur d'Amnesty International à utiliser pour l'extraction de texte.",
"Public URL to the image or document file.": "URL publique du fichier image ou document.",
"The language of the text in the image. Choose \"Auto Detection\" if unsure.": "La langue du texte dans l'image. Choisissez \"Détection automatique\" si vous n'êtes pas sûr.",
"The AI provider to use for image generation.": "Le fournisseur d'AI à utiliser pour la génération d'image.",
"Description of the desired image(s). Be specific and descriptive for best results.": "Description des images désirées. Soyez spécifique et descriptif pour de meilleurs résultats.",
"The image resolution (e.g., 512x512, 1024x1024).": "La résolution de l'image (par exemple, 512x512, 1024x1024).",
"Number of images to generate (1-10).": "Nombre d'images à générer (1-10).",
"Specific model to use for image generation. Leave empty for provider default.": "Modèle spécifique à utiliser pour la génération d'image. Laisser vide pour le fournisseur par défaut.",
"The AI provider to use for text-to-speech synthesis.": "Le fournisseur d'AI à utiliser pour la synthèse vocale.",
"The text to convert to speech.": "Le texte à convertir en voix.",
"The language and locale for the speech synthesis (defaults to en-US if not specified).": "La langue et la locale pour la synthèse vocale (valeur par défaut en-US si elle n'est pas spécifiée).",
"Choose the voice gender for speech synthesis (defaults to Female if not specified).": "Choisissez le sexe de la voix pour la synthèse vocale (par défaut la femme si elle n'est pas spécifiée).",
"Adjust speaking rate (-100 to 100, where 0 is normal speed).": "Ajuster la fréquence de parole (-100 à 100, où 0 est la vitesse normale).",
"Adjust voice pitch (-100 to 100, where 0 is normal pitch).": "Ajuster la hauteur de la voix (-100 à 100, où 0 est un pas normal).",
"Adjust audio volume (-100 to 100, where 0 is normal volume).": "Régler le volume audio (-100 à 100, où 0 est le volume normal).",
"The audio format for the generated speech (default: MP3).": "Le format audio pour la parole générée (par défaut: MP3).",
"Audio sampling rate in Hz (0-200000, 0 for provider default).": "Taux d'échantillonnage audio en Hz (0-200000, 0 par défaut du fournisseur)."
}

View File

@@ -0,0 +1,130 @@
{
"Eden AI is a platform that provides a range of AI services, including text generation, summarization, translation, and more.": "Eden AIは、テキスト生成、要約、翻訳など、幅広いAIサービスを提供するプラットフォームです。",
"You can obtain your API key from your [Eden AI dashboard](https://app.edenai.run/admin/account/developer).": "You can obtain your API key from your [Eden AI dashboard](https://app.edenai.run/admin/account/developer).",
"Generate Text": "テキストの生成",
"Summarize Text": "テキストの要約",
"Extract Keywords in Text": "テキスト内のキーワードを抽出",
"Detect Language of Text": "テキストの言語を検出する",
"Extract Named Entities in Text": "テキスト内の名前付きエンティティを抽出",
"Moderate Text": "テキストをモデレートする",
"Spell Check": "スペルチェック",
"Translate Text": "テキストを翻訳",
"Invoice Parser": "請求書パーサー",
"Receipt Parser": "Receipt Parser",
"Extract Text in Image (OCR)": "画像内のテキストを抽出 (OCR)",
"Image Generation": "画像の生成",
"Generate Audio From Text": "テキストからオーディオを生成",
"Generate text completions using various AI providers through Eden AI chat endpoint.": "Eden AIチャットエンドポイントを介して、さまざまなAIプロバイダーを使用してテキスト補完を生成します。",
"Extract key sentences and create summaries from long text passages using various AI providers.": "キーセンテンスを抽出し、さまざまなAIプロバイダーを使用して長い文章から要約を作成します。",
"Identify important terms in a text using Eden AI. Supports multiple providers, languages, and models.": "エデンAIを使用してテキスト内の重要な用語を特定します。複数のプロバイダー、言語、およびモデルに対応しています。",
"Detect the language used in a text using Eden AI. Supports multiple providers and models.": "エデンAIを使用してテキストで使用されている言語を検出します。複数のプロバイダーとモデルに対応しています。",
"Identify entities (names, places) in text using Eden AI. Supports multiple providers, languages, and models.": "エデンAIを使用してテキスト内のエンティティ名前、場所を特定します。複数のプロバイダー、言語、モデルに対応しています。",
"Detect explicit or policy-violating text using Eden AI. Supports multiple providers, languages, and models.": "エデンAIを使用して明示的またはポリシー違反のテキストを検出します。複数のプロバイダー、言語、およびモデルに対応しています。",
"Identify and correct spelling or grammar errors using Eden AI. Supports multiple providers, languages, and models.": "Eden AIを使用してスペルや文法エラーを特定、修正します。複数のプロバイダー、言語、およびモデルに対応しています。",
"Translate text into different languages using Eden AI. Supports multiple providers, languages, and models.": "エデンAIを使用してテキストを異なる言語に翻訳します。複数のプロバイダー、言語、モデルに対応しています。",
"Extract structured invoice data from files using Eden AI. Supports multiple providers, languages, and document types.": "Eden AIを使用してファイルから構造化された請求書データを抽出します。複数のプロバイダー、言語、ドキュメントタイプに対応しています。",
"Extract structured data from receipts and documents using Eden AI. Supports general data extraction with bounding boxes.": "エデンAIを使用してレシートや文書から構造化されたデータを抽出する バウンディングボックスを使用した一般的なデータ抽出に対応しています。",
"Extract text from images (OCR) using Eden AI. Supports multiple providers, languages, and bounding box coordinates.": "エデンAIを使用して画像(OCR)からテキストを抽出します。複数のプロバイダー、言語、境界ボックスの座標をサポートしています。",
"Create images from text prompts using Eden AI. Supports multiple providers, models, and resolutions.": "エデンAIを使用してテキストプロンプトから画像を作成します。複数のプロバイダー、モデル、解像度に対応しています。",
"Convert text to spoken audio using Eden AI. Supports multiple providers, languages, and voice customization.": "エデンAIを使用してテキストを音声に変換します。複数のプロバイダー、言語、音声カスタマイズに対応しています。",
"Provider": "プロバイダー",
"Prompt": "Prompt",
"System Prompt": "システムのプロンプト表示",
"Model": "モデル",
"Temperature": "温度",
"Max Completion Tokens": "最大完了トークン",
"Reasoning Effort": "Reasoning Workfort",
"Fallback Providers": "フォールバックプロバイダー",
"Include Image": "画像を含める",
"Image URL": "画像URL",
"Text to Summarize": "要約するテキスト",
"Number of Summary Sentences": "概要文の数",
"Text Language": "テキスト言語",
"Specific Model": "特定のモデル",
"Include Original Response": "元の回答を含める",
"Text to Analyze": "分析するテキスト",
"Text to Moderate": "テキストをモデレートする",
"Text to Check": "チェックするテキスト",
"Text to Translate": "翻訳するテキスト",
"Source Language": "ソース言語",
"Target Language": "翻訳先言語",
"File URL": "ファイル URL",
"Document Type": "ドキュメントタイプ",
"Document Language": "ドキュメントの言語",
"PDF Password": "PDFパスワード",
"Convert to PDF": "PDFに変換",
"Attributes as List": "リストとしての属性",
"Resolution": "解像度",
"Number of Images": "画像の数",
"Text": "テキスト",
"Language": "言語",
"Voice Gender": "ボイスジェンダー",
"Speaking Rate": "スピーキングレート",
"Voice Pitch": "Voice Pitch",
"Audio Volume": "音声音量",
"Audio Format": "オーディオ形式",
"Sampling Rate": "サンプリング率",
"The AI provider to use for text generation.": "テキスト生成に使用する AI プロバイダー。",
"The main prompt or question you want the AI to respond to.": "AIに応答するためのメインプロンプトまたは質問。",
"System message to set the behavior and context for the AI assistant (e.g., \"You are a helpful coding assistant\").": "AIアシスタントの動作とコンテキストを設定するためのシステムメッセージ (例: 「あなたは役に立つコーディングアシスタントです」)。",
"Specific model to use (e.g., gpt-4o, claude-3-sonnet-latest, gemini-2.0-flash). Leave empty for provider-specific defaults.": "使用する特定のモデル (例: gpt-4o, claude-3-sonnet-latest, gemini-2.0-flash). プロバイダ固有のデフォルトの場合は空のままにします。",
"Controls randomness (0.0-2.0). Higher values make output more creative.": "ランダム性(0.0-2.0)をコントロールします。値を大きくすると、出力がクリエイティブになります。",
"Maximum number of tokens to generate in the response.": "応答で生成するトークンの最大数。",
"Level of reasoning depth for the response.": "応答のための推論の深さのレベル。",
"Alternative providers to try if the main provider fails.": "メインプロバイダが失敗した場合に試してみる代替プロバイダ。",
"Include an image in your prompt (for vision-capable models).": "プロンプトに画像を含める (視覚対応モデル用)。",
"URL of the image to include in the prompt (only used if \"Include Image\" is enabled).": "プロンプトに含める画像のURL(「画像を含める」が有効な場合のみ使用されます)。",
"The AI provider to use for text summarization.": "テキスト要約に使用する AI プロバイダー。",
"The text content you want to summarize. Can be articles, documents, or any long-form text.": "要約したいテキストコンテンツ。記事、文書、または任意の長い形式のテキストであることができます。",
"How many sentences should the summary contain (1-20).": "要約に含まれる文数(1-20)",
"The language of the input text. Choose \"Auto Detection\" if unsure.": "入力テキストの言語。不明な場合は「自動検出」を選択してください。",
"Specific model to use (e.g., gpt-4, gpt-4o, summarize-xlarge). Leave empty for default.": "使用する特定のモデル (例: gpt-4, gpt-4o, summarize-xlarge). デフォルトは空のままにします。",
"Alternative providers to try if the main provider fails (up to 5).": "メインプロバイダが失敗した場合に試してみる代替プロバイダ(最大5)です。",
"Include the raw provider response in the output for debugging.": "デバッグ用に生プロバイダの応答を出力に含めます。",
"The AI provider to use for keyword extraction.": "キーワード抽出に使用する AI プロバイダー。",
"The text to extract keywords from.": "キーワードを抽出するテキスト。",
"Specific model to use (e.g., gpt-4o, gpt-4, grok-2-latest). Leave empty for default.": "使用する特定のモデル (例: gpt-4o, gpt-4, grok-2-latest). デフォルトは空のままにします。",
"The AI provider to use for language detection.": "言語検出に使用する AI プロバイダー。",
"The text to detect language for.": "言語を検出するテキスト。",
"Specific model to use (e.g., gpt-4o, grok-2-latest). Leave empty for default.": "使用する特定のモデル (例: gpt-4o, grok-2-latest). デフォルトは空のままにします。",
"The AI provider to use for named entity recognition.": "指定されたエンティティ認識に使用する AI プロバイダー。",
"The text to extract entities from.": "エンティティを抽出するテキスト。",
"Specific model to use (e.g., gpt-4o, gemini-1.5-flash, grok-2-latest). Leave empty for default.": "使用する特定のモデル (例: gpt-4o, gemini-1.5フラッシュ, grok-2-latest). デフォルトは空のままにします。",
"The AI provider to use for text moderation.": "テキストモデレーションに使用する AI プロバイダー。",
"The text to analyze for explicit or policy-violating content.": "明示的またはポリシー違反のコンテンツを分析するテキスト。",
"Specific model to use (e.g., text-moderation-latest, text-moderation-stable). Leave empty for default.": "使用する特定のモデル (例: text-moderation-latest, text-moderation-stable). デフォルトは空のままにします。",
"The AI provider to use for spell checking and grammar correction.": "スペルチェックと文法補正に使用する AI プロバイダー。",
"The text to check for spelling or grammar errors.": "スペルミスまたは文法エラーをチェックするテキスト。",
"Specific model to use (e.g., gpt-4o, gpt-4, grok-2-latest, command). Leave empty for default.": "使用する特定のモデル (例: gpt-4o, gpt-4, grok-2-latest, command). デフォルトは空のままにします。",
"The AI provider to use for text translation.": "テキスト翻訳に使用する AI プロバイダー。",
"The text to translate.": "翻訳するテキスト",
"The language of the input text. Choose \"Auto Detection\" to automatically detect the language.": "入力テキストの言語。言語を自動的に検出するには「自動検出」を選択してください。",
"The language to translate the text into.": "テキストを翻訳する言語",
"The AI provider to use for financial document parsing.": "金融文書の解析に使用する AI プロバイダー。",
"Public URL to the financial document file (PDF, image, etc).": "財務文書ファイルへの公開 URL (PDF、画像など)。",
"The type of financial document to parse.": "解析する金融文書の種類。",
"The language of the document. Choose \"Auto Detection\" if unsure.": "ドキュメントの言語。わからない場合は「自動検出」を選択してください。",
"Specific model to use (e.g., gpt-4o, gpt-4o-mini, gpt-4-turbo). Leave empty for default.": "使用する特定のモデル (例: gpt-4o, gpt-4o-mini, gpt-4-turbo). デフォルトは空のままにします。",
"Password for protected PDF files (if applicable).": "保護された PDF ファイルのパスワード (該当する場合)。",
"Convert DOC/DOCX files to PDF format for better compatibility.": "互換性を向上させるためにDOC/DOCXファイルをPDF形式に変換します。",
"The AI provider to use for data extraction.": "データ抽出に使用する AI プロバイダー。",
"Public URL to the document file (PDF, image, etc).": "ドキュメントファイルへの公開 URL (PDF、画像など)。",
"Return extracted data with each attribute as a list instead of list of objects.": "オブジェクトのリストではなく、各属性で抽出されたデータをリストとして返します。",
"The AI provider to use for text extraction.": "テキスト抽出に使用する AI プロバイダー。",
"Public URL to the image or document file.": "画像またはドキュメントファイルへの公開 URL",
"The language of the text in the image. Choose \"Auto Detection\" if unsure.": "画像内のテキストの言語。わからない場合は「自動検出」を選択してください。",
"The AI provider to use for image generation.": "画像生成に使用するAIプロバイダ。",
"Description of the desired image(s). Be specific and descriptive for best results.": "目的の画像の説明。最良の結果を得るためには具体的で説明的である。",
"The image resolution (e.g., 512x512, 1024x1024).": "画像解像度(例:512x512、1024x1024)。",
"Number of images to generate (1-10).": "生成する画像の数 (1-10)。",
"Specific model to use for image generation. Leave empty for provider default.": "画像生成に使用する特定のモデル。プロバイダのデフォルトは空のままにします。",
"The AI provider to use for text-to-speech synthesis.": "テキスト読み上げ合成に使用する AI プロバイダー。",
"The text to convert to speech.": "音声に変換するテキスト.",
"The language and locale for the speech synthesis (defaults to en-US if not specified).": "音声合成の言語とロケール(デフォルトでは、指定されていない場合は en-US になります)。",
"Choose the voice gender for speech synthesis (defaults to Female if not specified).": "音声合成のための音声ジェンダーを選択します(指定されていない場合はデフォルトは format@@0 になります)。",
"Adjust speaking rate (-100 to 100, where 0 is normal speed).": "スピーキングレート(-100~100、0が通常の速度)を調整します。",
"Adjust voice pitch (-100 to 100, where 0 is normal pitch).": "音声ピッチ(-100から100、0が通常のピッチ)を調整します。",
"Adjust audio volume (-100 to 100, where 0 is normal volume).": "オーディオの音量を調整する(-100から100、0は通常の音量)。",
"The audio format for the generated speech (default: MP3).": "生成された音声のオーディオフォーマット (デフォルト: MP3)。",
"Audio sampling rate in Hz (0-200000, 0 for provider default).": "オーディオサンプリングレート0-200000、プロバイダのデフォルトは0。"
}

View File

@@ -0,0 +1,130 @@
{
"Eden AI is a platform that provides a range of AI services, including text generation, summarization, translation, and more.": "Eden AI is een platform dat voorziet in een scala aan AI-diensten, waaronder het genereren van teksten, samenvattingen, vertalingen en meer.",
"You can obtain your API key from your [Eden AI dashboard](https://app.edenai.run/admin/account/developer).": "U kunt uw API-sleutel verkrijgen van uw [Eden AI dashboard](https://app.edenai.run/admin/account/developer).",
"Generate Text": "Genereer tekst",
"Summarize Text": "Samenvatting tekst",
"Extract Keywords in Text": "Trefwoorden in tekst uitpakken",
"Detect Language of Text": "Detecteer taal van tekst",
"Extract Named Entities in Text": "Naamloze entiteiten in tekst uitpakken",
"Moderate Text": "Moderate tekst",
"Spell Check": "Spellingscontrole",
"Translate Text": "Tekst vertalen",
"Invoice Parser": "Factuur parser",
"Receipt Parser": "Receipt Parser",
"Extract Text in Image (OCR)": "Afvoer tekst in afbeelding (OCR)",
"Image Generation": "Afbeelding genereren",
"Generate Audio From Text": "Audio van tekst genereren",
"Generate text completions using various AI providers through Eden AI chat endpoint.": "Genereer tekstaanvullingen met behulp van verschillende AI-aanbieders via Eden AI-chateindpunt.",
"Extract key sentences and create summaries from long text passages using various AI providers.": "Haal sleutelzinnen uit en maak samenvattingen uit lange tekstpassages met behulp van verschillende AI-providers.",
"Identify important terms in a text using Eden AI. Supports multiple providers, languages, and models.": "Belangrijke termen identificeren in een tekst met Eden AI. Ondersteunt meerdere providers, talen en modellen.",
"Detect the language used in a text using Eden AI. Supports multiple providers and models.": "Detecteer de taal die gebruikt wordt in een tekst met behulp van Eden AI. Ondersteunt meerdere providers en modellen.",
"Identify entities (names, places) in text using Eden AI. Supports multiple providers, languages, and models.": "Herken entiteiten (namen, plaatsen) in tekst met behulp van Eden AI. Ondersteunt meerdere providers, talen en modellen.",
"Detect explicit or policy-violating text using Eden AI. Supports multiple providers, languages, and models.": "Detecteren van expliciete of beleidsschending met behulp van Eden AI. Ondersteunt meerdere providers, talen en modellen.",
"Identify and correct spelling or grammar errors using Eden AI. Supports multiple providers, languages, and models.": "Fouten van spelling of grammatica identificeren en corrigeren met Eden AI. Ondersteunt meerdere providers, talen en modellen.",
"Translate text into different languages using Eden AI. Supports multiple providers, languages, and models.": "Vertaal tekst met behulp van Eden AI. Ondersteunt meerdere providers, talen en modellen.",
"Extract structured invoice data from files using Eden AI. Supports multiple providers, languages, and document types.": "Haal gestructureerde factuurgegevens uit bestanden met Eden AI. Ondersteunt meerdere providers, talen en documenttypes.",
"Extract structured data from receipts and documents using Eden AI. Supports general data extraction with bounding boxes.": "Haal gestructureerde gegevens uit bonnen en documenten met behulp van Eden AI uit. Ondersteunt algemene data extractie met gebonden dozen.",
"Extract text from images (OCR) using Eden AI. Supports multiple providers, languages, and bounding box coordinates.": "Pak tekst uit vanuit afbeeldingen (OCR) met behulp van Eden AI. Ondersteunt meerdere providers, talen en selectievakcoördinaten.",
"Create images from text prompts using Eden AI. Supports multiple providers, models, and resolutions.": "Maak afbeeldingen van tekst prompts met behulp van Eden AI. Ondersteunt meerdere providers, modellen en resoluties.",
"Convert text to spoken audio using Eden AI. Supports multiple providers, languages, and voice customization.": "Zet tekst om naar gesproken audio met behulp van Eden AI. Ondersteunt meerdere providers, talen en spraak aanpassingen.",
"Provider": "Leverancier",
"Prompt": "Prompt",
"System Prompt": "Systeem Vragen",
"Model": "Model",
"Temperature": "Temperatuur",
"Max Completion Tokens": "Max voltooiingstokens",
"Reasoning Effort": "Redenen inspanning",
"Fallback Providers": "Terugval aanbieders",
"Include Image": "Afbeelding opnemen",
"Image URL": "Afbeelding URL",
"Text to Summarize": "Tekst om samen te vatten",
"Number of Summary Sentences": "Aantal samenvattingen",
"Text Language": "Tekst taal",
"Specific Model": "Specifiek model",
"Include Original Response": "Voeg Originele Response toe",
"Text to Analyze": "Tekst om te analyseren",
"Text to Moderate": "Tekst om te modereren",
"Text to Check": "Tekst om te controleren",
"Text to Translate": "Tekst te vertalen",
"Source Language": "Bron taal",
"Target Language": "Doel taal",
"File URL": "Bestand URL",
"Document Type": "Document type",
"Document Language": "Document taal",
"PDF Password": "PDF wachtwoord",
"Convert to PDF": "Converteren naar PDF",
"Attributes as List": "Attributen als Lijst",
"Resolution": "Resolutie",
"Number of Images": "Aantal afbeeldingen",
"Text": "Tekstveld",
"Language": "Taal",
"Voice Gender": "Gesproken geslacht",
"Speaking Rate": "Spreken snelheid",
"Voice Pitch": "Spraakhoogte",
"Audio Volume": "Audio volume",
"Audio Format": "Audio-formaat",
"Sampling Rate": "Sampling snelheid",
"The AI provider to use for text generation.": "De AI-aanbieder om te gebruiken voor tekstgeneratie.",
"The main prompt or question you want the AI to respond to.": "De belangrijkste vraag of vraag waarop de AI moet reageren.",
"System message to set the behavior and context for the AI assistant (e.g., \"You are a helpful coding assistant\").": "Systeembericht om het gedrag en de context in te stellen voor de AI-assistent (bijv. \"U bent een handige programmeerassistent\").",
"Specific model to use (e.g., gpt-4o, claude-3-sonnet-latest, gemini-2.0-flash). Leave empty for provider-specific defaults.": "Specifiek model dat moet worden gebruikt (bijv. gpt-4o, claude-3-sonnet-latest, gemini-2.0-flash). Laat leeg voor provider-specifieke standaarden.",
"Controls randomness (0.0-2.0). Higher values make output more creative.": "Bepaalt willekeurigheid (0,0-2.0). Hogere waarden maken output creatiever.",
"Maximum number of tokens to generate in the response.": "Maximaal aantal tokens om te genereren in het antwoord.",
"Level of reasoning depth for the response.": "Niveau van redenering en diepgang van het antwoord.",
"Alternative providers to try if the main provider fails.": "Alternatieve aanbieders proberen als de hoofdprovider faalt.",
"Include an image in your prompt (for vision-capable models).": "Een afbeelding opnemen in uw prompt (voor zichtbare modellen).",
"URL of the image to include in the prompt (only used if \"Include Image\" is enabled).": "URL van de afbeelding om op te nemen in de prompt (wordt alleen gebruikt als \"Inclusief afbeelding\" is ingeschakeld).",
"The AI provider to use for text summarization.": "De AI-provider om te gebruiken voor tekstsamenvatting.",
"The text content you want to summarize. Can be articles, documents, or any long-form text.": "De tekst inhoud die je wilt samenvatten. Kan artikelen, documenten of een lange tekst zijn.",
"How many sentences should the summary contain (1-20).": "Hoeveel zinnen moet de samenvatting bevatten (1-20).",
"The language of the input text. Choose \"Auto Detection\" if unsure.": "De taal van de invoertekst. Kies \"Auto Detection\" indien niet zeker.",
"Specific model to use (e.g., gpt-4, gpt-4o, summarize-xlarge). Leave empty for default.": "Specifiek te gebruiken model (bijv. gpt-4, gpt-4o, summarize-xlarge). Laat leeg voor de standaard.",
"Alternative providers to try if the main provider fails (up to 5).": "Alternatieve aanbieders om te proberen als de hoofdprovider faalt (tot 5).",
"Include the raw provider response in the output for debugging.": "Inclusief het ruwe provider-antwoord in de output voor debugging.",
"The AI provider to use for keyword extraction.": "De AI-aanbieder om te gebruiken voor sleutelwoordextractie.",
"The text to extract keywords from.": "De tekst om trefwoorden uit te pakken.",
"Specific model to use (e.g., gpt-4o, gpt-4, grok-2-latest). Leave empty for default.": "Specifieke te gebruiken model (bijv. gpt-4o, gpt-4, grok-2-latest). Laat leeg voor de standaardwaarde.",
"The AI provider to use for language detection.": "De AI-provider om te gebruiken voor taaldetectie.",
"The text to detect language for.": "De tekst om de taal voor op te detecteren.",
"Specific model to use (e.g., gpt-4o, grok-2-latest). Leave empty for default.": "Specifieke te gebruiken model (bijv. gpt-4o, grok-2-latest). Laat leeg voor de standaardwaarde.",
"The AI provider to use for named entity recognition.": "De AI provider om te gebruiken voor benoemde entiteit erkenning.",
"The text to extract entities from.": "De tekst om entiteiten uit te pakken.",
"Specific model to use (e.g., gpt-4o, gemini-1.5-flash, grok-2-latest). Leave empty for default.": "Specifiek te gebruiken model (bijv. gpt-4o, gemini-1.5-flash, grok-2-latest). Laat leeg voor standaard.",
"The AI provider to use for text moderation.": "De AI-aanbieder om te gebruiken voor tekstmoderatie.",
"The text to analyze for explicit or policy-violating content.": "De tekst om expliciete of beleidsschendende inhoud te analyseren.",
"Specific model to use (e.g., text-moderation-latest, text-moderation-stable). Leave empty for default.": "Specifiek model dat moet worden gebruikt (bijv. text-moderation-latest, text-moderation-stabiel). Laat leeg voor de standaardwaarde.",
"The AI provider to use for spell checking and grammar correction.": "De AI provider te gebruiken voor spellingscontrole en grammatica-correctie.",
"The text to check for spelling or grammar errors.": "De tekst om fouten in spelling of grammatica te controleren.",
"Specific model to use (e.g., gpt-4o, gpt-4, grok-2-latest, command). Leave empty for default.": "Specifiek model dat moet worden gebruikt (bijv. gpt-4o, gpt-4, grok-2-latest, commando). Laat leeg voor standaard.",
"The AI provider to use for text translation.": "De AI-provider om te gebruiken voor tekstvertaling.",
"The text to translate.": "De tekst om te vertalen.",
"The language of the input text. Choose \"Auto Detection\" to automatically detect the language.": "De taal van de invoertekst. Kies \"Auto Detection\" om de taal automatisch te detecteren.",
"The language to translate the text into.": "De taal waar de tekst naartoe moet worden vertaald.",
"The AI provider to use for financial document parsing.": "De AI-aanbieder om te gebruiken voor het parseren van financiële documenten.",
"Public URL to the financial document file (PDF, image, etc).": "Openbare URL naar het financiële documentbestand (PDF, afbeelding, enz.)",
"The type of financial document to parse.": "Het soort financieel document om te parsen.",
"The language of the document. Choose \"Auto Detection\" if unsure.": "De taal van het document. Kies \"Automatische detectie\" als het niet zeker is.",
"Specific model to use (e.g., gpt-4o, gpt-4o-mini, gpt-4-turbo). Leave empty for default.": "Specifiek te gebruiken model (bijv. gpt-4o, gpt-4o-mini, gpt-4-turbo). Laat leeg voor standaard.",
"Password for protected PDF files (if applicable).": "Wachtwoord voor beschermde PDF-bestanden (indien van toepassing).",
"Convert DOC/DOCX files to PDF format for better compatibility.": "Converteer DOC/DOCX-bestanden naar PDF-formaat voor betere compatibiliteit.",
"The AI provider to use for data extraction.": "De AI-provider om te gebruiken voor data-extractie.",
"Public URL to the document file (PDF, image, etc).": "Openbare URL naar het documentbestand (PDF, afbeelding, enz.)",
"Return extracted data with each attribute as a list instead of list of objects.": "Resultaat data met elk attribuut als een lijst in plaats van objecten",
"The AI provider to use for text extraction.": "De AI-aanbieder om tekstextractie te gebruiken.",
"Public URL to the image or document file.": "Openbare URL naar het afbeeldings- of documentbestand.",
"The language of the text in the image. Choose \"Auto Detection\" if unsure.": "De taal van de tekst in de afbeelding. Kies \"Automatische detectie\" indien niet zeker.",
"The AI provider to use for image generation.": "De AI provider om te gebruiken voor het genereren van afbeeldingen.",
"Description of the desired image(s). Be specific and descriptive for best results.": "Beschrijving van de gewenste afbeelding(en). Wees specifiek en beschrijvend voor het beste resultaat.",
"The image resolution (e.g., 512x512, 1024x1024).": "De afbeeldingsresolutie (bijv. 512x512, 1024x1024).",
"Number of images to generate (1-10).": "Aantal te genereren afbeeldingen (1-10).",
"Specific model to use for image generation. Leave empty for provider default.": "Specifiek model om te gebruiken voor het genereren van afbeeldingen. Laat leeg voor de standaard provider",
"The AI provider to use for text-to-speech synthesis.": "De AI-aanbieder voor tekst-naar-spraaksynthese gebruikt.",
"The text to convert to speech.": "De tekst om te converteren naar spraak.",
"The language and locale for the speech synthesis (defaults to en-US if not specified).": "De taal en lokalisatie voor de spraaksynthese (standaard nl-NL als deze niet is opgegeven).",
"Choose the voice gender for speech synthesis (defaults to Female if not specified).": "Kies het geslacht van de stem voor spraaksynthese (standaard vrouwelijk als deze niet is opgegeven).",
"Adjust speaking rate (-100 to 100, where 0 is normal speed).": "Pas de spreeksnelheid aan (-100 tot 100, waarbij 0 de normale snelheid is).",
"Adjust voice pitch (-100 to 100, where 0 is normal pitch).": "Pas de stemhoogte aan (-100 tot 100, waar 0 de normale toonhoogte is).",
"Adjust audio volume (-100 to 100, where 0 is normal volume).": "Audiovolume aanpassen (-100 tot 100, waarbij 0 het normale volume is).",
"The audio format for the generated speech (default: MP3).": "De audioopmaak voor de gegenereerde spraak (standaard: MP3).",
"Audio sampling rate in Hz (0-200000, 0 for provider default).": "Audio sampling rate in Hz (0-200000, 0 voor provider standaard)."
}

View File

@@ -0,0 +1,130 @@
{
"Eden AI is a platform that provides a range of AI services, including text generation, summarization, translation, and more.": "Eden AI é uma plataforma que fornece uma variedade de serviços de IA, incluindo geração de texto, resumo, tradução e muito mais.",
"You can obtain your API key from your [Eden AI dashboard](https://app.edenai.run/admin/account/developer).": "Você pode obter sua chave de API do seu [Painel de Controle AI Eden](https://app.edenai.run/admin/account/developer).",
"Generate Text": "Gerar texto",
"Summarize Text": "Resumir Texto",
"Extract Keywords in Text": "Extrair palavras-chave em texto",
"Detect Language of Text": "Detectar Idioma do Texto",
"Extract Named Entities in Text": "Extrair Entidades Nomeadas em Texto",
"Moderate Text": "Moderar Texto",
"Spell Check": "Verificador Ortográfico",
"Translate Text": "Traduzir texto",
"Invoice Parser": "Analisador de Fatura",
"Receipt Parser": "Receipt Parser",
"Extract Text in Image (OCR)": "Extrair Texto na Imagem (OCR)",
"Image Generation": "Geração de imagem",
"Generate Audio From Text": "Gerar áudio a partir do texto",
"Generate text completions using various AI providers through Eden AI chat endpoint.": "Gere complementos de textos usando vários provedores de IA através do ponto de bate-papo Eden AI.",
"Extract key sentences and create summaries from long text passages using various AI providers.": "Extraia frases chave e crie resumos de passagens de texto longo usando vários provedores de IA.",
"Identify important terms in a text using Eden AI. Supports multiple providers, languages, and models.": "Identifica termos importantes em um texto usando Eden AI. Suporta vários provedores, idiomas e modelos.",
"Detect the language used in a text using Eden AI. Supports multiple providers and models.": "Detecta a linguagem usada em um texto usando o Eden AI. Suporta vários provedores e modelos.",
"Identify entities (names, places) in text using Eden AI. Supports multiple providers, languages, and models.": "Identifica entidades (nomes, lugares) em texto usando Eden AI. Suporta vários provedores, idiomas e modelos.",
"Detect explicit or policy-violating text using Eden AI. Supports multiple providers, languages, and models.": "Detecta texto explícito ou violador de políticas usando Eden AI. Suporta vários provedores, idiomas e modelos.",
"Identify and correct spelling or grammar errors using Eden AI. Supports multiple providers, languages, and models.": "Identificar e corrigir erros de ortografia ou gramática usando Eden AI. Suporta vários provedores, idiomas e modelos.",
"Translate text into different languages using Eden AI. Supports multiple providers, languages, and models.": "Traduza o texto para diferentes idiomas usando Eden AI. Suporta vários provedores, idiomas e modelos.",
"Extract structured invoice data from files using Eden AI. Supports multiple providers, languages, and document types.": "Extrair dados estruturados de faturas de arquivos que usam Eden AI. Suporta vários provedores, idiomas e tipos de documentos.",
"Extract structured data from receipts and documents using Eden AI. Supports general data extraction with bounding boxes.": "Extraia dados estruturados de recibos e documentos usando o Eden AI. Suporta extração geral de dados com caixas limitadas.",
"Extract text from images (OCR) using Eden AI. Supports multiple providers, languages, and bounding box coordinates.": "Extrair texto das imagens (OCR) usando Eden AI. Suporta vários provedores, idiomas e coordenadas delimitadoras.",
"Create images from text prompts using Eden AI. Supports multiple providers, models, and resolutions.": "Cria imagens de prompts de texto usando Eden AI. Suporta vários provedores, modelos e resoluções.",
"Convert text to spoken audio using Eden AI. Supports multiple providers, languages, and voice customization.": "Converta texto para áudio falado usando Eden AI. Suporta vários provedores, idiomas e personalização de voz.",
"Provider": "Fornecedor",
"Prompt": "Aviso",
"System Prompt": "Solicitação de Sistema",
"Model": "Modelo",
"Temperature": "Temperatura",
"Max Completion Tokens": "Max Tokens de Conclusão",
"Reasoning Effort": "Esforço Justo",
"Fallback Providers": "Provedores de Fallback",
"Include Image": "Incluir Imagem",
"Image URL": "URL da imagem",
"Text to Summarize": "Texto para Resumir",
"Number of Summary Sentences": "Número de frases resumidas",
"Text Language": "Idioma do texto",
"Specific Model": "Modelo Específico",
"Include Original Response": "Incluir Resposta Original",
"Text to Analyze": "Texto para analisar",
"Text to Moderate": "Texto para moderar",
"Text to Check": "Texto a verificar",
"Text to Translate": "Texto para traduzir",
"Source Language": "Língua Original",
"Target Language": "Idioma de destino",
"File URL": "URL do Arquivo",
"Document Type": "Tipo de documento",
"Document Language": "Idioma do Documento",
"PDF Password": "Senha do PDF",
"Convert to PDF": "Converter para PDF",
"Attributes as List": "Atributos como Lista",
"Resolution": "Resolução:",
"Number of Images": "Número de imagens",
"Text": "texto",
"Language": "IDIOMA",
"Voice Gender": "Gênero de Voz",
"Speaking Rate": "Taxa de Falar",
"Voice Pitch": "Tom de voz",
"Audio Volume": "Volume do Áudio",
"Audio Format": "Formato de Áudio",
"Sampling Rate": "Taxa de amostragem",
"The AI provider to use for text generation.": "O provedor de IA a usar para a geração de texto.",
"The main prompt or question you want the AI to respond to.": "O principal aviso ou pergunta a que deseja que a IA responda.",
"System message to set the behavior and context for the AI assistant (e.g., \"You are a helpful coding assistant\").": "Mensagem de sistema para definir o comportamento e o contexto para o assistente de AI, por exemplo: \"Você é um útil assistente de codificação\").",
"Specific model to use (e.g., gpt-4o, claude-3-sonnet-latest, gemini-2.0-flash). Leave empty for provider-specific defaults.": "Modelo específico a ser usado (por exemplo, gpt-4o, claude-3-sonnet-latest, gemini-2.0-flash). Deixe em branco para padrões específicos do provedor.",
"Controls randomness (0.0-2.0). Higher values make output more creative.": "Controla a aleatoriedade (0.0-2.0). Valores maiores tornam a saída mais criativa.",
"Maximum number of tokens to generate in the response.": "Número máximo de tokens para gerar na resposta.",
"Level of reasoning depth for the response.": "Nível de profundidade de raciocínio para a resposta.",
"Alternative providers to try if the main provider fails.": "Provedores alternativos para tentar se o provedor principal falhar.",
"Include an image in your prompt (for vision-capable models).": "Inclua uma imagem no seu prompt (para modelos capazes de visão).",
"URL of the image to include in the prompt (only used if \"Include Image\" is enabled).": "URL da imagem a ser incluída na tela (usado somente se \"Incluir Imagem\" estiver ativado).",
"The AI provider to use for text summarization.": "O fornecedor de IA a usar para o resumo de texto.",
"The text content you want to summarize. Can be articles, documents, or any long-form text.": "O conteúdo do texto que você deseja resumir. Pode ser artigos, documentos ou qualquer texto de forma longa.",
"How many sentences should the summary contain (1-20).": "Quantas frases o resumo deve conter (1-20).",
"The language of the input text. Choose \"Auto Detection\" if unsure.": "O idioma do texto de entrada. Escolha \"Detecção Automática\" se estiver incerto.",
"Specific model to use (e.g., gpt-4, gpt-4o, summarize-xlarge). Leave empty for default.": "Modelo específico a ser usado (por exemplo, gpt-4, gpt-4o, summarize-xlarge). Deixe em branco para o padrão.",
"Alternative providers to try if the main provider fails (up to 5).": "Provedores alternativos para tentar se o provedor principal falhar (até 5).",
"Include the raw provider response in the output for debugging.": "Incluir a resposta do provedor bruto na saída para depuração.",
"The AI provider to use for keyword extraction.": "O provedor de IA a usar para extração de palavra-chave.",
"The text to extract keywords from.": "O texto para extrair palavras-chave.",
"Specific model to use (e.g., gpt-4o, gpt-4, grok-2-latest). Leave empty for default.": "Modelo específico para usar (por exemplo, gpt-4o, gpt-4, grok-2-latest). Deixe em branco para o padrão.",
"The AI provider to use for language detection.": "O provedor de IA a usar para detecção de idiomas.",
"The text to detect language for.": "O texto para detectar o idioma.",
"Specific model to use (e.g., gpt-4o, grok-2-latest). Leave empty for default.": "Modelo específico para usar (por exemplo, gpt-4o, grok-2-latest). Deixe em branco para o padrão.",
"The AI provider to use for named entity recognition.": "O fornecedor de IA a usar para reconhecimento de entidade nomeada.",
"The text to extract entities from.": "O texto para extrair as entidades.",
"Specific model to use (e.g., gpt-4o, gemini-1.5-flash, grok-2-latest). Leave empty for default.": "Modelo específico para usar (por exemplo, gpt-4o, gemini-1.5-flash, grok-2-latest). Deixe em branco para o padrão.",
"The AI provider to use for text moderation.": "O provedor de I.A. para usar para moderação de texto.",
"The text to analyze for explicit or policy-violating content.": "O texto para análise de conteúdo explícito ou violador de políticas.",
"Specific model to use (e.g., text-moderation-latest, text-moderation-stable). Leave empty for default.": "Modelo específico para usar (por exemplo, text-moderation-recent text-moderation-stable). Deixe em branco para padrão.",
"The AI provider to use for spell checking and grammar correction.": "O fornecedor de IA a usar para verificação ortográfica e correção gramática.",
"The text to check for spelling or grammar errors.": "O texto para verificar se há erros de ortografia ou gramática.",
"Specific model to use (e.g., gpt-4o, gpt-4, grok-2-latest, command). Leave empty for default.": "Modelo específico a ser usado (por exemplo, gpt-4o, gpt-4, grok-2-latest, comando). Deixe em branco para o padrão.",
"The AI provider to use for text translation.": "O fornecedor de IA a usar para tradução de texto.",
"The text to translate.": "O texto a traduzir.",
"The language of the input text. Choose \"Auto Detection\" to automatically detect the language.": "O idioma do texto de entrada. Escolha \"Detecção Automática\" para detectar automaticamente o idioma.",
"The language to translate the text into.": "O idioma para traduzir o texto.",
"The AI provider to use for financial document parsing.": "O provedor de Inteligência Artificial para a análise de documentos financeiros.",
"Public URL to the financial document file (PDF, image, etc).": "URL pública para o arquivo do documento financeiro (PDF, imagem, etc).",
"The type of financial document to parse.": "O tipo de documento financeiro para analisar.",
"The language of the document. Choose \"Auto Detection\" if unsure.": "O idioma do documento. Escolha \"Detecção Automática\" se não tiver certeza.",
"Specific model to use (e.g., gpt-4o, gpt-4o-mini, gpt-4-turbo). Leave empty for default.": "Modelo específico para usar (por exemplo, gpt-4o, gpt-4o-mini, gpt-4-turbo). Deixe em branco para usar o padrão.",
"Password for protected PDF files (if applicable).": "Senha para arquivos PDF protegidos (se aplicável).",
"Convert DOC/DOCX files to PDF format for better compatibility.": "Converta arquivos DOC/DOCX para formato PDF para melhor compatibilidade.",
"The AI provider to use for data extraction.": "O fornecedor de IA a usar para extração de dados.",
"Public URL to the document file (PDF, image, etc).": "URL pública para o arquivo de documento (PDF, imagem, etc).",
"Return extracted data with each attribute as a list instead of list of objects.": "Retornar dados extraídos com cada atributo como uma lista em vez de uma lista de objetos.",
"The AI provider to use for text extraction.": "O provedor de IA a usar para extração de texto.",
"Public URL to the image or document file.": "URL pública para o arquivo de imagem ou documento.",
"The language of the text in the image. Choose \"Auto Detection\" if unsure.": "O idioma do texto na imagem. Escolha \"Detecção Automática\" se não tiver certeza.",
"The AI provider to use for image generation.": "O provedor de IA a usar para a geração de imagens.",
"Description of the desired image(s). Be specific and descriptive for best results.": "Descrição da(s) imagem(s) desejada(s). Seja específica e descritiva para os melhores resultados.",
"The image resolution (e.g., 512x512, 1024x1024).": "A resolução da imagem (ex.: 512x512, 1024x1024).",
"Number of images to generate (1-10).": "Número de imagens a gerar (1-10).",
"Specific model to use for image generation. Leave empty for provider default.": "Modelo específico para a geração de imagem. Deixe em branco para o padrão do provedor.",
"The AI provider to use for text-to-speech synthesis.": "O fornecedor de IA a usar para a síntese de texto-para-voz.",
"The text to convert to speech.": "O texto para converter em fala.",
"The language and locale for the speech synthesis (defaults to en-US if not specified).": "O idioma e a localidade da síntese de fala (o padrão é pt-BR se não especificado).",
"Choose the voice gender for speech synthesis (defaults to Female if not specified).": "Escolha o gênero da voz para sintetização da fala (o padrão é feminino se não for especificado).",
"Adjust speaking rate (-100 to 100, where 0 is normal speed).": "Ajuste a taxa de fala (-100 a 100, onde 0 é a velocidade escalar normal).",
"Adjust voice pitch (-100 to 100, where 0 is normal pitch).": "Ajustar tom de voz (-100 a 100, onde 0 é o tom normal).",
"Adjust audio volume (-100 to 100, where 0 is normal volume).": "Ajustar volume de áudio (-100 a 100, onde 0 é o volume normal).",
"The audio format for the generated speech (default: MP3).": "Formato de áudio para a voz gerada (padrão: MP3).",
"Audio sampling rate in Hz (0-200000, 0 for provider default).": "Taxa de amostragem de áudio em Hz (0-200000, 0 para padrão do provedor)."
}

View File

@@ -0,0 +1,131 @@
{
"Eden AI": "Eden AI",
"Eden AI is a platform that provides a range of AI services, including text generation, summarization, translation, and more.": "Eden AI это платформа, предоставляющая широкий спектр услуг, включая создание текстов, сводку, перевод и многое другое.",
"You can obtain your API key from your [Eden AI dashboard](https://app.edenai.run/admin/account/developer).": "Вы можете получить свой API ключ с помощью [Eden AI dashboard](https://app.edenai.run/admin/account/developer).",
"Generate Text": "Сгенерировать текст",
"Summarize Text": "Суммировать текст",
"Extract Keywords in Text": "Извлечь ключевые слова в тексте",
"Detect Language of Text": "Обнаружить язык текста",
"Extract Named Entities in Text": "Извлечь названия сущностей в тексте",
"Moderate Text": "Умеренный текст",
"Spell Check": "Проверка правописания",
"Translate Text": "Перевести текст",
"Invoice Parser": "Парсер счета",
"Receipt Parser": "Receipt Parser",
"Extract Text in Image (OCR)": "Извлечь текст в изображении (OCR)",
"Image Generation": "Генерация изображений",
"Generate Audio From Text": "Генерировать аудио из текста",
"Generate text completions using various AI providers through Eden AI chat endpoint.": "Генерируйте дополнения текста с помощью различных AI провайдеров через Eden AI чата.",
"Extract key sentences and create summaries from long text passages using various AI providers.": "Извлеките ключевые предложения и создавайте сводки из длинных отрывков с использованием различных AI провайдеров.",
"Identify important terms in a text using Eden AI. Supports multiple providers, languages, and models.": "Определите важные термины в тексте с помощью Eden AI. Поддерживает несколько провайдеров, языков и моделей.",
"Detect the language used in a text using Eden AI. Supports multiple providers and models.": "Обнаружение языка, используемого в тексте с помощью Eden AI. Поддерживает нескольких провайдеров и моделей.",
"Identify entities (names, places) in text using Eden AI. Supports multiple providers, languages, and models.": "Определите сущности (имена, места) в тексте с помощью Eden AI. Поддерживает несколько провайдеров, языков и моделей.",
"Detect explicit or policy-violating text using Eden AI. Supports multiple providers, languages, and models.": "Обнаружение явно или нарушающего политику текста с помощью Eden AI. Поддерживает несколько провайдеров, языков и моделей.",
"Identify and correct spelling or grammar errors using Eden AI. Supports multiple providers, languages, and models.": "This is a unique combination of advanced speech recognition and advanced advanced speech recognition system that lets you memorize thousands of foreign words quickly and easily.",
"Translate text into different languages using Eden AI. Supports multiple providers, languages, and models.": "Translate text into different languages using Eden AI. Supports multiple providers, languages and models.",
"Extract structured invoice data from files using Eden AI. Supports multiple providers, languages, and document types.": "Извлечь структурированные данные счетов из файлов с помощью Eden AI. Поддерживает нескольких провайдеров, языков и типов документов.",
"Extract structured data from receipts and documents using Eden AI. Supports general data extraction with bounding boxes.": "Извлечь структурированные данные из квитанций и документов с помощью Eden AI. Поддерживает общее извлечение данных с помощью карманных ящиков.",
"Extract text from images (OCR) using Eden AI. Supports multiple providers, languages, and bounding box coordinates.": "Извлечь текст из изображений (OCR) с помощью Eden AI. Поддерживает несколько провайдеров, языков и границ.",
"Create images from text prompts using Eden AI. Supports multiple providers, models, and resolutions.": "Создавайте изображения из текстовых подсказок, используя Eden AI. Поддерживает несколько провайдеров, моделей и разрешений.",
"Convert text to spoken audio using Eden AI. Supports multiple providers, languages, and voice customization.": "Конвертируйте текст в произношение аудио с помощью Eden AI. Поддерживает несколько провайдеров, языков и голосовых настроек.",
"Provider": "Поставщик",
"Prompt": "Prompt",
"System Prompt": "Системная подсказка",
"Model": "Модель",
"Temperature": "Температура",
"Max Completion Tokens": "Макс. токены завершения",
"Reasoning Effort": "Разумный Усилий",
"Fallback Providers": "Резервные провайдеры",
"Include Image": "Включить изображение",
"Image URL": "URL изображения",
"Text to Summarize": "Сводка текста",
"Number of Summary Sentences": "Количество резюме предложений",
"Text Language": "Язык текста",
"Specific Model": "Конкретная модель",
"Include Original Response": "Включить исходный ответ",
"Text to Analyze": "Текст для анализа",
"Text to Moderate": "Текст для модерации",
"Text to Check": "Текст для проверки",
"Text to Translate": "Текст для перевода",
"Source Language": "Исходный язык",
"Target Language": "Язык цели",
"File URL": "URL файла",
"Document Type": "Тип документа",
"Document Language": "Язык документа",
"PDF Password": "Пароль PDF",
"Convert to PDF": "Конвертировать в PDF",
"Attributes as List": "Атрибуты как список",
"Resolution": "Разрешение",
"Number of Images": "Количество изображений",
"Text": "Текст",
"Language": "Язык",
"Voice Gender": "Пол голоса",
"Speaking Rate": "Коэффициент разговорного курса",
"Voice Pitch": "Голосовой сигнал",
"Audio Volume": "Громкость аудио",
"Audio Format": "Формат аудио",
"Sampling Rate": "Частота отбора проб",
"The AI provider to use for text generation.": "Провайдер AI для генерации текста.",
"The main prompt or question you want the AI to respond to.": "Главная подсказка или вопрос, на который вы хотите ИИ ответить.",
"System message to set the behavior and context for the AI assistant (e.g., \"You are a helpful coding assistant\").": "Системное сообщение, устанавливающее поведение и контекст помощника ИИ (например, «Вы полезный помощник по программированию»).",
"Specific model to use (e.g., gpt-4o, claude-3-sonnet-latest, gemini-2.0-flash). Leave empty for provider-specific defaults.": "Конкретная модель для использования (например, gpt-4o, claude-3-sonnet-latest, gemini-2.0-flash). Оставьте пустым для конкретных значений по умолчанию.",
"Controls randomness (0.0-2.0). Higher values make output more creative.": "Контролирует случайность (0.0-2.0). Высокие значения делают вывод более креативным.",
"Maximum number of tokens to generate in the response.": "Максимальное количество маркеров для генерации в ответе.",
"Level of reasoning depth for the response.": "Уровень разумного обоснования ответа.",
"Alternative providers to try if the main provider fails.": "Альтернативные провайдеры для проверки при сбое основного провайдера.",
"Include an image in your prompt (for vision-capable models).": "Включите изображение в ваш запрос (для моделей с поддержкой обзора).",
"URL of the image to include in the prompt (only used if \"Include Image\" is enabled).": "URL изображения для включения в запрос (используется только в том случае, если включена \"Включить изображение\").",
"The AI provider to use for text summarization.": "Провайдер ИИ использует для текстового резюме.",
"The text content you want to summarize. Can be articles, documents, or any long-form text.": "Текстовое содержимое, которое вы хотите суммировать. Может быть статьями, документами или любым длинным.",
"How many sentences should the summary contain (1-20).": "Сколько предложений должно содержать резюме (1-20).",
"The language of the input text. Choose \"Auto Detection\" if unsure.": "Язык текста ввода. Если не уверен, выберите \"Автоматическое обнаружение\".",
"Specific model to use (e.g., gpt-4, gpt-4o, summarize-xlarge). Leave empty for default.": "Конкретная модель для использования (например, gpt-4, gpt-4o, summarize-xlarge). Оставьте пустым по умолчанию.",
"Alternative providers to try if the main provider fails (up to 5).": "Альтернативные провайдеры для проверки неудачи основного провайдера (до 5).",
"Include the raw provider response in the output for debugging.": "Включить ответ поставщика сырья в вывод для отладки.",
"The AI provider to use for keyword extraction.": "Провайдер ИИ для извлечения ключевых слов.",
"The text to extract keywords from.": "Текст для извлечения ключевых слов.",
"Specific model to use (e.g., gpt-4o, gpt-4, grok-2-latest). Leave empty for default.": "Конкретная модель для использования (например, gpt-4o, gpt-4, grok-2-latest). Оставьте пустым по умолчанию.",
"The AI provider to use for language detection.": "Провайдер AI для определения языка.",
"The text to detect language for.": "Текст для определения языка.",
"Specific model to use (e.g., gpt-4o, grok-2-latest). Leave empty for default.": "Конкретная модель для использования (например, gpt-4o, grok-2-latest). Оставьте пустым по умолчанию.",
"The AI provider to use for named entity recognition.": "Провайдер AI использует для распознавания названных сущностей.",
"The text to extract entities from.": "Текст для извлечения сущностей.",
"Specific model to use (e.g., gpt-4o, gemini-1.5-flash, grok-2-latest). Leave empty for default.": "Конкретная модель для использования (например, gpt-4o, gemini-1.5-flash, grok-2-latest). Оставьте пустым по умолчанию.",
"The AI provider to use for text moderation.": "Провайдер ИИ для модерации текста.",
"The text to analyze for explicit or policy-violating content.": "Текст для анализа содержимого, явно нарушающего политику.",
"Specific model to use (e.g., text-moderation-latest, text-moderation-stable). Leave empty for default.": "Конкретная модель для использования (например, text-moderation-latest, text-moderation-stable). Оставьте пустым по умолчанию.",
"The AI provider to use for spell checking and grammar correction.": "AI провайдер для проверки орфографии и грамматической коррекции.",
"The text to check for spelling or grammar errors.": "Текст для проверки правописания или грамматических ошибок.",
"Specific model to use (e.g., gpt-4o, gpt-4, grok-2-latest, command). Leave empty for default.": "Конкретная модель для использования (например, gpt-4o, gpt-4, grok-2-latest, команда). Оставьте пустым по умолчанию.",
"The AI provider to use for text translation.": "Провайдер ИИ для перевода текста.",
"The text to translate.": "Текст для перевода.",
"The language of the input text. Choose \"Auto Detection\" to automatically detect the language.": "Язык текста ввода. Выберите \"Автоматическое обнаружение\", чтобы автоматически определить язык.",
"The language to translate the text into.": "Язык для перевода текста.",
"The AI provider to use for financial document parsing.": "Провайдер ИИ для анализа финансовых документов.",
"Public URL to the financial document file (PDF, image, etc).": "Публичный URL-адрес файла финансового документа (PDF, изображение, и т.д.).",
"The type of financial document to parse.": "Тип финансового документа для анализа.",
"The language of the document. Choose \"Auto Detection\" if unsure.": "Язык документа. Если не уверен, выберите \"Автоматическое обнаружение\".",
"Specific model to use (e.g., gpt-4o, gpt-4o-mini, gpt-4-turbo). Leave empty for default.": "Конкретная модель для использования (например, gpt-4o, gpt-4o-mini, gpt-4-turbo). Оставьте пустым по умолчанию.",
"Password for protected PDF files (if applicable).": "Пароль для защищенных файлов PDF (если это применимо).",
"Convert DOC/DOCX files to PDF format for better compatibility.": "Конвертируйте файлы DOC/DOCX в формат PDF для лучшей совместимости.",
"The AI provider to use for data extraction.": "Провайдер ИИ для использования для извлечения данных.",
"Public URL to the document file (PDF, image, etc).": "Публичный URL-адрес файла документа (PDF, изображение, и т.д.).",
"Return extracted data with each attribute as a list instead of list of objects.": "Возврат извлеченных данных с каждым атрибутом в виде списка объектов.",
"The AI provider to use for text extraction.": "Провайдер ИИ для извлечения текста.",
"Public URL to the image or document file.": "Публичный URL к файлу изображения или документа.",
"The language of the text in the image. Choose \"Auto Detection\" if unsure.": "Язык текста изображения. Если не уверен, выберите \"Автоматическое обнаружение\".",
"The AI provider to use for image generation.": "Провайдер AI для генерации изображений.",
"Description of the desired image(s). Be specific and descriptive for best results.": "Описание желаемого изображения(ей). Укажите конкретный и описательный для получения наилучшего результата.",
"The image resolution (e.g., 512x512, 1024x1024).": "Разрешение изображения (например, 512x512, 1024x1024).",
"Number of images to generate (1-10).": "Количество изображений для генерации (1-10).",
"Specific model to use for image generation. Leave empty for provider default.": "Конкретная модель для генерации изображений. Оставьте пустым для поставщика по умолчанию.",
"The AI provider to use for text-to-speech synthesis.": "AI провайдер для синтеза речи.",
"The text to convert to speech.": "Текст для преобразования в речь.",
"The language and locale for the speech synthesis (defaults to en-US if not specified).": "Язык и локаль для синтеза речи (по умолчанию ru-US если не указано).",
"Choose the voice gender for speech synthesis (defaults to Female if not specified).": "Выберите пол голоса для синтеза речи (по умолчанию женщина, если не указана).",
"Adjust speaking rate (-100 to 100, where 0 is normal speed).": "Регулировка скорости разговора (-100 to 100, где 0 - нормальная скорость).",
"Adjust voice pitch (-100 to 100, where 0 is normal pitch).": "Регулировка высоты голоса (-100 до 100, где 0 - нормальная подставка).",
"Adjust audio volume (-100 to 100, where 0 is normal volume).": "Регулировка громкости звука (-100 to 100, где 0 нормальная громкость).",
"The audio format for the generated speech (default: MP3).": "Формат аудио для сгенерированной речи (по умолчанию: MP3).",
"Audio sampling rate in Hz (0-200000, 0 for provider default).": "Аудио частота дискретизации в Гц (0-200000, 0 для провайдеров по умолчанию)."
}

View File

@@ -0,0 +1,130 @@
{
"Eden AI is a platform that provides a range of AI services, including text generation, summarization, translation, and more.": "Eden AI is a platform that provides a range of AI services, including text generation, summarization, translation, and more.",
"You can obtain your API key from your [Eden AI dashboard](https://app.edenai.run/admin/account/developer).": "You can obtain your API key from your [Eden AI dashboard](https://app.edenai.run/admin/account/developer).",
"Generate Text": "Generate Text",
"Summarize Text": "Summarize Text",
"Extract Keywords in Text": "Extract Keywords in Text",
"Detect Language of Text": "Detect Language of Text",
"Extract Named Entities in Text": "Extract Named Entities in Text",
"Moderate Text": "Moderate Text",
"Spell Check": "Spell Check",
"Translate Text": "Translate Text",
"Invoice Parser": "Invoice Parser",
"Receipt Parser": "Receipt Parser",
"Extract Text in Image (OCR)": "Extract Text in Image (OCR)",
"Image Generation": "Image Generation",
"Generate Audio From Text": "Generate Audio From Text",
"Generate text completions using various AI providers through Eden AI chat endpoint.": "Generate text completions using various AI providers through Eden AI chat endpoint.",
"Extract key sentences and create summaries from long text passages using various AI providers.": "Extract key sentences and create summaries from long text passages using various AI providers.",
"Identify important terms in a text using Eden AI. Supports multiple providers, languages, and models.": "Identify important terms in a text using Eden AI. Supports multiple providers, languages, and models.",
"Detect the language used in a text using Eden AI. Supports multiple providers and models.": "Detect the language used in a text using Eden AI. Supports multiple providers and models.",
"Identify entities (names, places) in text using Eden AI. Supports multiple providers, languages, and models.": "Identify entities (names, places) in text using Eden AI. Supports multiple providers, languages, and models.",
"Detect explicit or policy-violating text using Eden AI. Supports multiple providers, languages, and models.": "Detect explicit or policy-violating text using Eden AI. Supports multiple providers, languages, and models.",
"Identify and correct spelling or grammar errors using Eden AI. Supports multiple providers, languages, and models.": "Identify and correct spelling or grammar errors using Eden AI. Supports multiple providers, languages, and models.",
"Translate text into different languages using Eden AI. Supports multiple providers, languages, and models.": "Translate text into different languages using Eden AI. Supports multiple providers, languages, and models.",
"Extract structured invoice data from files using Eden AI. Supports multiple providers, languages, and document types.": "Extract structured invoice data from files using Eden AI. Supports multiple providers, languages, and document types.",
"Extract structured data from receipts and documents using Eden AI. Supports general data extraction with bounding boxes.": "Extract structured data from receipts and documents using Eden AI. Supports general data extraction with bounding boxes.",
"Extract text from images (OCR) using Eden AI. Supports multiple providers, languages, and bounding box coordinates.": "Extract text from images (OCR) using Eden AI. Supports multiple providers, languages, and bounding box coordinates.",
"Create images from text prompts using Eden AI. Supports multiple providers, models, and resolutions.": "Create images from text prompts using Eden AI. Supports multiple providers, models, and resolutions.",
"Convert text to spoken audio using Eden AI. Supports multiple providers, languages, and voice customization.": "Convert text to spoken audio using Eden AI. Supports multiple providers, languages, and voice customization.",
"Provider": "Provider",
"Prompt": "Prompt",
"System Prompt": "System Prompt",
"Model": "Model",
"Temperature": "Temperature",
"Max Completion Tokens": "Max Completion Tokens",
"Reasoning Effort": "Reasoning Effort",
"Fallback Providers": "Fallback Providers",
"Include Image": "Include Image",
"Image URL": "Image URL",
"Text to Summarize": "Text to Summarize",
"Number of Summary Sentences": "Number of Summary Sentences",
"Text Language": "Text Language",
"Specific Model": "Specific Model",
"Include Original Response": "Include Original Response",
"Text to Analyze": "Text to Analyze",
"Text to Moderate": "Text to Moderate",
"Text to Check": "Text to Check",
"Text to Translate": "Text to Translate",
"Source Language": "Source Language",
"Target Language": "Target Language",
"File URL": "File URL",
"Document Type": "Document Type",
"Document Language": "Document Language",
"PDF Password": "PDF Password",
"Convert to PDF": "Convert to PDF",
"Attributes as List": "Attributes as List",
"Resolution": "Resolution",
"Number of Images": "Number of Images",
"Text": "Text",
"Language": "Language",
"Voice Gender": "Voice Gender",
"Speaking Rate": "Speaking Rate",
"Voice Pitch": "Voice Pitch",
"Audio Volume": "Audio Volume",
"Audio Format": "Audio Format",
"Sampling Rate": "Sampling Rate",
"The AI provider to use for text generation.": "The AI provider to use for text generation.",
"The main prompt or question you want the AI to respond to.": "The main prompt or question you want the AI to respond to.",
"System message to set the behavior and context for the AI assistant (e.g., \"You are a helpful coding assistant\").": "System message to set the behavior and context for the AI assistant (e.g., \"You are a helpful coding assistant\").",
"Specific model to use (e.g., gpt-4o, claude-3-sonnet-latest, gemini-2.0-flash). Leave empty for provider-specific defaults.": "Specific model to use (e.g., gpt-4o, claude-3-sonnet-latest, gemini-2.0-flash). Leave empty for provider-specific defaults.",
"Controls randomness (0.0-2.0). Higher values make output more creative.": "Controls randomness (0.0-2.0). Higher values make output more creative.",
"Maximum number of tokens to generate in the response.": "Maximum number of tokens to generate in the response.",
"Level of reasoning depth for the response.": "Level of reasoning depth for the response.",
"Alternative providers to try if the main provider fails.": "Alternative providers to try if the main provider fails.",
"Include an image in your prompt (for vision-capable models).": "Include an image in your prompt (for vision-capable models).",
"URL of the image to include in the prompt (only used if \"Include Image\" is enabled).": "URL of the image to include in the prompt (only used if \"Include Image\" is enabled).",
"The AI provider to use for text summarization.": "The AI provider to use for text summarization.",
"The text content you want to summarize. Can be articles, documents, or any long-form text.": "The text content you want to summarize. Can be articles, documents, or any long-form text.",
"How many sentences should the summary contain (1-20).": "How many sentences should the summary contain (1-20).",
"The language of the input text. Choose \"Auto Detection\" if unsure.": "The language of the input text. Choose \"Auto Detection\" if unsure.",
"Specific model to use (e.g., gpt-4, gpt-4o, summarize-xlarge). Leave empty for default.": "Specific model to use (e.g., gpt-4, gpt-4o, summarize-xlarge). Leave empty for default.",
"Alternative providers to try if the main provider fails (up to 5).": "Alternative providers to try if the main provider fails (up to 5).",
"Include the raw provider response in the output for debugging.": "Include the raw provider response in the output for debugging.",
"The AI provider to use for keyword extraction.": "The AI provider to use for keyword extraction.",
"The text to extract keywords from.": "The text to extract keywords from.",
"Specific model to use (e.g., gpt-4o, gpt-4, grok-2-latest). Leave empty for default.": "Specific model to use (e.g., gpt-4o, gpt-4, grok-2-latest). Leave empty for default.",
"The AI provider to use for language detection.": "The AI provider to use for language detection.",
"The text to detect language for.": "The text to detect language for.",
"Specific model to use (e.g., gpt-4o, grok-2-latest). Leave empty for default.": "Specific model to use (e.g., gpt-4o, grok-2-latest). Leave empty for default.",
"The AI provider to use for named entity recognition.": "The AI provider to use for named entity recognition.",
"The text to extract entities from.": "The text to extract entities from.",
"Specific model to use (e.g., gpt-4o, gemini-1.5-flash, grok-2-latest). Leave empty for default.": "Specific model to use (e.g., gpt-4o, gemini-1.5-flash, grok-2-latest). Leave empty for default.",
"The AI provider to use for text moderation.": "The AI provider to use for text moderation.",
"The text to analyze for explicit or policy-violating content.": "The text to analyze for explicit or policy-violating content.",
"Specific model to use (e.g., text-moderation-latest, text-moderation-stable). Leave empty for default.": "Specific model to use (e.g., text-moderation-latest, text-moderation-stable). Leave empty for default.",
"The AI provider to use for spell checking and grammar correction.": "The AI provider to use for spell checking and grammar correction.",
"The text to check for spelling or grammar errors.": "The text to check for spelling or grammar errors.",
"Specific model to use (e.g., gpt-4o, gpt-4, grok-2-latest, command). Leave empty for default.": "Specific model to use (e.g., gpt-4o, gpt-4, grok-2-latest, command). Leave empty for default.",
"The AI provider to use for text translation.": "The AI provider to use for text translation.",
"The text to translate.": "The text to translate.",
"The language of the input text. Choose \"Auto Detection\" to automatically detect the language.": "The language of the input text. Choose \"Auto Detection\" to automatically detect the language.",
"The language to translate the text into.": "The language to translate the text into.",
"The AI provider to use for financial document parsing.": "The AI provider to use for financial document parsing.",
"Public URL to the financial document file (PDF, image, etc).": "Public URL to the financial document file (PDF, image, etc).",
"The type of financial document to parse.": "The type of financial document to parse.",
"The language of the document. Choose \"Auto Detection\" if unsure.": "The language of the document. Choose \"Auto Detection\" if unsure.",
"Specific model to use (e.g., gpt-4o, gpt-4o-mini, gpt-4-turbo). Leave empty for default.": "Specific model to use (e.g., gpt-4o, gpt-4o-mini, gpt-4-turbo). Leave empty for default.",
"Password for protected PDF files (if applicable).": "Password for protected PDF files (if applicable).",
"Convert DOC/DOCX files to PDF format for better compatibility.": "Convert DOC/DOCX files to PDF format for better compatibility.",
"The AI provider to use for data extraction.": "The AI provider to use for data extraction.",
"Public URL to the document file (PDF, image, etc).": "Public URL to the document file (PDF, image, etc).",
"Return extracted data with each attribute as a list instead of list of objects.": "Return extracted data with each attribute as a list instead of list of objects.",
"The AI provider to use for text extraction.": "The AI provider to use for text extraction.",
"Public URL to the image or document file.": "Public URL to the image or document file.",
"The language of the text in the image. Choose \"Auto Detection\" if unsure.": "The language of the text in the image. Choose \"Auto Detection\" if unsure.",
"The AI provider to use for image generation.": "The AI provider to use for image generation.",
"Description of the desired image(s). Be specific and descriptive for best results.": "Description of the desired image(s). Be specific and descriptive for best results.",
"The image resolution (e.g., 512x512, 1024x1024).": "The image resolution (e.g., 512x512, 1024x1024).",
"Number of images to generate (1-10).": "Number of images to generate (1-10).",
"Specific model to use for image generation. Leave empty for provider default.": "Specific model to use for image generation. Leave empty for provider default.",
"The AI provider to use for text-to-speech synthesis.": "The AI provider to use for text-to-speech synthesis.",
"The text to convert to speech.": "The text to convert to speech.",
"The language and locale for the speech synthesis (defaults to en-US if not specified).": "The language and locale for the speech synthesis (defaults to en-US if not specified).",
"Choose the voice gender for speech synthesis (defaults to Female if not specified).": "Choose the voice gender for speech synthesis (defaults to Female if not specified).",
"Adjust speaking rate (-100 to 100, where 0 is normal speed).": "Adjust speaking rate (-100 to 100, where 0 is normal speed).",
"Adjust voice pitch (-100 to 100, where 0 is normal pitch).": "Adjust voice pitch (-100 to 100, where 0 is normal pitch).",
"Adjust audio volume (-100 to 100, where 0 is normal volume).": "Adjust audio volume (-100 to 100, where 0 is normal volume).",
"The audio format for the generated speech (default: MP3).": "The audio format for the generated speech (default: MP3).",
"Audio sampling rate in Hz (0-200000, 0 for provider default).": "Audio sampling rate in Hz (0-200000, 0 for provider default)."
}

View File

@@ -0,0 +1,131 @@
{
"Eden AI": "Eden AI",
"Eden AI is a platform that provides a range of AI services, including text generation, summarization, translation, and more.": "Eden AI is a platform that provides a range of AI services, including text generation, summarization, translation, and more.",
"You can obtain your API key from your [Eden AI dashboard](https://app.edenai.run/admin/account/developer).": "You can obtain your API key from your [Eden AI dashboard](https://app.edenai.run/admin/account/developer).",
"Generate Text": "Generate Text",
"Summarize Text": "Summarize Text",
"Extract Keywords in Text": "Extract Keywords in Text",
"Detect Language of Text": "Detect Language of Text",
"Extract Named Entities in Text": "Extract Named Entities in Text",
"Moderate Text": "Moderate Text",
"Spell Check": "Spell Check",
"Translate Text": "Translate Text",
"Invoice Parser": "Invoice Parser",
"Receipt Parser": "Receipt Parser",
"Extract Text in Image (OCR)": "Extract Text in Image (OCR)",
"Image Generation": "Image Generation",
"Generate Audio From Text": "Generate Audio From Text",
"Generate text completions using various AI providers through Eden AI chat endpoint.": "Generate text completions using various AI providers through Eden AI chat endpoint.",
"Extract key sentences and create summaries from long text passages using various AI providers.": "Extract key sentences and create summaries from long text passages using various AI providers.",
"Identify important terms in a text using Eden AI. Supports multiple providers, languages, and models.": "Identify important terms in a text using Eden AI. Supports multiple providers, languages, and models.",
"Detect the language used in a text using Eden AI. Supports multiple providers and models.": "Detect the language used in a text using Eden AI. Supports multiple providers and models.",
"Identify entities (names, places) in text using Eden AI. Supports multiple providers, languages, and models.": "Identify entities (names, places) in text using Eden AI. Supports multiple providers, languages, and models.",
"Detect explicit or policy-violating text using Eden AI. Supports multiple providers, languages, and models.": "Detect explicit or policy-violating text using Eden AI. Supports multiple providers, languages, and models.",
"Identify and correct spelling or grammar errors using Eden AI. Supports multiple providers, languages, and models.": "Identify and correct spelling or grammar errors using Eden AI. Supports multiple providers, languages, and models.",
"Translate text into different languages using Eden AI. Supports multiple providers, languages, and models.": "Translate text into different languages using Eden AI. Supports multiple providers, languages, and models.",
"Extract structured invoice data from files using Eden AI. Supports multiple providers, languages, and document types.": "Extract structured invoice data from files using Eden AI. Supports multiple providers, languages, and document types.",
"Extract structured data from receipts and documents using Eden AI. Supports general data extraction with bounding boxes.": "Extract structured data from receipts and documents using Eden AI. Supports general data extraction with bounding boxes.",
"Extract text from images (OCR) using Eden AI. Supports multiple providers, languages, and bounding box coordinates.": "Extract text from images (OCR) using Eden AI. Supports multiple providers, languages, and bounding box coordinates.",
"Create images from text prompts using Eden AI. Supports multiple providers, models, and resolutions.": "Create images from text prompts using Eden AI. Supports multiple providers, models, and resolutions.",
"Convert text to spoken audio using Eden AI. Supports multiple providers, languages, and voice customization.": "Convert text to spoken audio using Eden AI. Supports multiple providers, languages, and voice customization.",
"Provider": "Provider",
"Prompt": "Prompt",
"System Prompt": "System Prompt",
"Model": "Model",
"Temperature": "Temperature",
"Max Completion Tokens": "Max Completion Tokens",
"Reasoning Effort": "Reasoning Effort",
"Fallback Providers": "Fallback Providers",
"Include Image": "Include Image",
"Image URL": "Image URL",
"Text to Summarize": "Text to Summarize",
"Number of Summary Sentences": "Number of Summary Sentences",
"Text Language": "Text Language",
"Specific Model": "Specific Model",
"Include Original Response": "Include Original Response",
"Text to Analyze": "Text to Analyze",
"Text to Moderate": "Text to Moderate",
"Text to Check": "Text to Check",
"Text to Translate": "Text to Translate",
"Source Language": "Source Language",
"Target Language": "Target Language",
"File URL": "File URL",
"Document Type": "Document Type",
"Document Language": "Document Language",
"PDF Password": "PDF Password",
"Convert to PDF": "Convert to PDF",
"Attributes as List": "Attributes as List",
"Resolution": "Resolution",
"Number of Images": "Number of Images",
"Text": "Text",
"Language": "Language",
"Voice Gender": "Voice Gender",
"Speaking Rate": "Speaking Rate",
"Voice Pitch": "Voice Pitch",
"Audio Volume": "Audio Volume",
"Audio Format": "Audio Format",
"Sampling Rate": "Sampling Rate",
"The AI provider to use for text generation.": "The AI provider to use for text generation.",
"The main prompt or question you want the AI to respond to.": "The main prompt or question you want the AI to respond to.",
"System message to set the behavior and context for the AI assistant (e.g., \"You are a helpful coding assistant\").": "System message to set the behavior and context for the AI assistant (e.g., \"You are a helpful coding assistant\").",
"Specific model to use (e.g., gpt-4o, claude-3-sonnet-latest, gemini-2.0-flash). Leave empty for provider-specific defaults.": "Specific model to use (e.g., gpt-4o, claude-3-sonnet-latest, gemini-2.0-flash). Leave empty for provider-specific defaults.",
"Controls randomness (0.0-2.0). Higher values make output more creative.": "Controls randomness (0.0-2.0). Higher values make output more creative.",
"Maximum number of tokens to generate in the response.": "Maximum number of tokens to generate in the response.",
"Level of reasoning depth for the response.": "Level of reasoning depth for the response.",
"Alternative providers to try if the main provider fails.": "Alternative providers to try if the main provider fails.",
"Include an image in your prompt (for vision-capable models).": "Include an image in your prompt (for vision-capable models).",
"URL of the image to include in the prompt (only used if \"Include Image\" is enabled).": "URL of the image to include in the prompt (only used if \"Include Image\" is enabled).",
"The AI provider to use for text summarization.": "The AI provider to use for text summarization.",
"The text content you want to summarize. Can be articles, documents, or any long-form text.": "The text content you want to summarize. Can be articles, documents, or any long-form text.",
"How many sentences should the summary contain (1-20).": "How many sentences should the summary contain (1-20).",
"The language of the input text. Choose \"Auto Detection\" if unsure.": "The language of the input text. Choose \"Auto Detection\" if unsure.",
"Specific model to use (e.g., gpt-4, gpt-4o, summarize-xlarge). Leave empty for default.": "Specific model to use (e.g., gpt-4, gpt-4o, summarize-xlarge). Leave empty for default.",
"Alternative providers to try if the main provider fails (up to 5).": "Alternative providers to try if the main provider fails (up to 5).",
"Include the raw provider response in the output for debugging.": "Include the raw provider response in the output for debugging.",
"The AI provider to use for keyword extraction.": "The AI provider to use for keyword extraction.",
"The text to extract keywords from.": "The text to extract keywords from.",
"Specific model to use (e.g., gpt-4o, gpt-4, grok-2-latest). Leave empty for default.": "Specific model to use (e.g., gpt-4o, gpt-4, grok-2-latest). Leave empty for default.",
"The AI provider to use for language detection.": "The AI provider to use for language detection.",
"The text to detect language for.": "The text to detect language for.",
"Specific model to use (e.g., gpt-4o, grok-2-latest). Leave empty for default.": "Specific model to use (e.g., gpt-4o, grok-2-latest). Leave empty for default.",
"The AI provider to use for named entity recognition.": "The AI provider to use for named entity recognition.",
"The text to extract entities from.": "The text to extract entities from.",
"Specific model to use (e.g., gpt-4o, gemini-1.5-flash, grok-2-latest). Leave empty for default.": "Specific model to use (e.g., gpt-4o, gemini-1.5-flash, grok-2-latest). Leave empty for default.",
"The AI provider to use for text moderation.": "The AI provider to use for text moderation.",
"The text to analyze for explicit or policy-violating content.": "The text to analyze for explicit or policy-violating content.",
"Specific model to use (e.g., text-moderation-latest, text-moderation-stable). Leave empty for default.": "Specific model to use (e.g., text-moderation-latest, text-moderation-stable). Leave empty for default.",
"The AI provider to use for spell checking and grammar correction.": "The AI provider to use for spell checking and grammar correction.",
"The text to check for spelling or grammar errors.": "The text to check for spelling or grammar errors.",
"Specific model to use (e.g., gpt-4o, gpt-4, grok-2-latest, command). Leave empty for default.": "Specific model to use (e.g., gpt-4o, gpt-4, grok-2-latest, command). Leave empty for default.",
"The AI provider to use for text translation.": "The AI provider to use for text translation.",
"The text to translate.": "The text to translate.",
"The language of the input text. Choose \"Auto Detection\" to automatically detect the language.": "The language of the input text. Choose \"Auto Detection\" to automatically detect the language.",
"The language to translate the text into.": "The language to translate the text into.",
"The AI provider to use for financial document parsing.": "The AI provider to use for financial document parsing.",
"Public URL to the financial document file (PDF, image, etc).": "Public URL to the financial document file (PDF, image, etc).",
"The type of financial document to parse.": "The type of financial document to parse.",
"The language of the document. Choose \"Auto Detection\" if unsure.": "The language of the document. Choose \"Auto Detection\" if unsure.",
"Specific model to use (e.g., gpt-4o, gpt-4o-mini, gpt-4-turbo). Leave empty for default.": "Specific model to use (e.g., gpt-4o, gpt-4o-mini, gpt-4-turbo). Leave empty for default.",
"Password for protected PDF files (if applicable).": "Password for protected PDF files (if applicable).",
"Convert DOC/DOCX files to PDF format for better compatibility.": "Convert DOC/DOCX files to PDF format for better compatibility.",
"The AI provider to use for data extraction.": "The AI provider to use for data extraction.",
"Public URL to the document file (PDF, image, etc).": "Public URL to the document file (PDF, image, etc).",
"Return extracted data with each attribute as a list instead of list of objects.": "Return extracted data with each attribute as a list instead of list of objects.",
"The AI provider to use for text extraction.": "The AI provider to use for text extraction.",
"Public URL to the image or document file.": "Public URL to the image or document file.",
"The language of the text in the image. Choose \"Auto Detection\" if unsure.": "The language of the text in the image. Choose \"Auto Detection\" if unsure.",
"The AI provider to use for image generation.": "The AI provider to use for image generation.",
"Description of the desired image(s). Be specific and descriptive for best results.": "Description of the desired image(s). Be specific and descriptive for best results.",
"The image resolution (e.g., 512x512, 1024x1024).": "The image resolution (e.g., 512x512, 1024x1024).",
"Number of images to generate (1-10).": "Number of images to generate (1-10).",
"Specific model to use for image generation. Leave empty for provider default.": "Specific model to use for image generation. Leave empty for provider default.",
"The AI provider to use for text-to-speech synthesis.": "The AI provider to use for text-to-speech synthesis.",
"The text to convert to speech.": "The text to convert to speech.",
"The language and locale for the speech synthesis (defaults to en-US if not specified).": "The language and locale for the speech synthesis (defaults to en-US if not specified).",
"Choose the voice gender for speech synthesis (defaults to Female if not specified).": "Choose the voice gender for speech synthesis (defaults to Female if not specified).",
"Adjust speaking rate (-100 to 100, where 0 is normal speed).": "Adjust speaking rate (-100 to 100, where 0 is normal speed).",
"Adjust voice pitch (-100 to 100, where 0 is normal pitch).": "Adjust voice pitch (-100 to 100, where 0 is normal pitch).",
"Adjust audio volume (-100 to 100, where 0 is normal volume).": "Adjust audio volume (-100 to 100, where 0 is normal volume).",
"The audio format for the generated speech (default: MP3).": "The audio format for the generated speech (default: MP3).",
"Audio sampling rate in Hz (0-200000, 0 for provider default).": "Audio sampling rate in Hz (0-200000, 0 for provider default)."
}

View File

@@ -0,0 +1,130 @@
{
"Eden AI is a platform that provides a range of AI services, including text generation, summarization, translation, and more.": "Eden AI is a platform that provides a range of AI services, including text generation, summarization, translation, and more.",
"You can obtain your API key from your [Eden AI dashboard](https://app.edenai.run/admin/account/developer).": "You can obtain your API key from your [Eden AI dashboard](https://app.edenai.run/admin/account/developer).",
"Generate Text": "Generate Text",
"Summarize Text": "Summarize Text",
"Extract Keywords in Text": "Extract Keywords in Text",
"Detect Language of Text": "Detect Language of Text",
"Extract Named Entities in Text": "Extract Named Entities in Text",
"Moderate Text": "Moderate Text",
"Spell Check": "Spell Check",
"Translate Text": "Translate Text",
"Invoice Parser": "Invoice Parser",
"Receipt Parser": "Receipt Parser",
"Extract Text in Image (OCR)": "Extract Text in Image (OCR)",
"Image Generation": "Image Generation",
"Generate Audio From Text": "Generate Audio From Text",
"Generate text completions using various AI providers through Eden AI chat endpoint.": "Generate text completions using various AI providers through Eden AI chat endpoint.",
"Extract key sentences and create summaries from long text passages using various AI providers.": "Extract key sentences and create summaries from long text passages using various AI providers.",
"Identify important terms in a text using Eden AI. Supports multiple providers, languages, and models.": "Identify important terms in a text using Eden AI. Supports multiple providers, languages, and models.",
"Detect the language used in a text using Eden AI. Supports multiple providers and models.": "Detect the language used in a text using Eden AI. Supports multiple providers and models.",
"Identify entities (names, places) in text using Eden AI. Supports multiple providers, languages, and models.": "Identify entities (names, places) in text using Eden AI. Supports multiple providers, languages, and models.",
"Detect explicit or policy-violating text using Eden AI. Supports multiple providers, languages, and models.": "Detect explicit or policy-violating text using Eden AI. Supports multiple providers, languages, and models.",
"Identify and correct spelling or grammar errors using Eden AI. Supports multiple providers, languages, and models.": "Identify and correct spelling or grammar errors using Eden AI. Supports multiple providers, languages, and models.",
"Translate text into different languages using Eden AI. Supports multiple providers, languages, and models.": "Translate text into different languages using Eden AI. Supports multiple providers, languages, and models.",
"Extract structured invoice data from files using Eden AI. Supports multiple providers, languages, and document types.": "Extract structured invoice data from files using Eden AI. Supports multiple providers, languages, and document types.",
"Extract structured data from receipts and documents using Eden AI. Supports general data extraction with bounding boxes.": "Extract structured data from receipts and documents using Eden AI. Supports general data extraction with bounding boxes.",
"Extract text from images (OCR) using Eden AI. Supports multiple providers, languages, and bounding box coordinates.": "Extract text from images (OCR) using Eden AI. Supports multiple providers, languages, and bounding box coordinates.",
"Create images from text prompts using Eden AI. Supports multiple providers, models, and resolutions.": "Create images from text prompts using Eden AI. Supports multiple providers, models, and resolutions.",
"Convert text to spoken audio using Eden AI. Supports multiple providers, languages, and voice customization.": "Convert text to spoken audio using Eden AI. Supports multiple providers, languages, and voice customization.",
"Provider": "Provider",
"Prompt": "Prompt",
"System Prompt": "System Prompt",
"Model": "Model",
"Temperature": "Temperature",
"Max Completion Tokens": "Max Completion Tokens",
"Reasoning Effort": "Reasoning Effort",
"Fallback Providers": "Fallback Providers",
"Include Image": "Include Image",
"Image URL": "Image URL",
"Text to Summarize": "Text to Summarize",
"Number of Summary Sentences": "Number of Summary Sentences",
"Text Language": "Text Language",
"Specific Model": "Specific Model",
"Include Original Response": "Include Original Response",
"Text to Analyze": "Text to Analyze",
"Text to Moderate": "Text to Moderate",
"Text to Check": "Text to Check",
"Text to Translate": "Text to Translate",
"Source Language": "Source Language",
"Target Language": "Target Language",
"File URL": "File URL",
"Document Type": "Document Type",
"Document Language": "Document Language",
"PDF Password": "PDF Password",
"Convert to PDF": "Convert to PDF",
"Attributes as List": "Attributes as List",
"Resolution": "Resolution",
"Number of Images": "Number of Images",
"Text": "文本",
"Language": "Language",
"Voice Gender": "Voice Gender",
"Speaking Rate": "Speaking Rate",
"Voice Pitch": "Voice Pitch",
"Audio Volume": "Audio Volume",
"Audio Format": "Audio Format",
"Sampling Rate": "Sampling Rate",
"The AI provider to use for text generation.": "The AI provider to use for text generation.",
"The main prompt or question you want the AI to respond to.": "The main prompt or question you want the AI to respond to.",
"System message to set the behavior and context for the AI assistant (e.g., \"You are a helpful coding assistant\").": "System message to set the behavior and context for the AI assistant (e.g., \"You are a helpful coding assistant\").",
"Specific model to use (e.g., gpt-4o, claude-3-sonnet-latest, gemini-2.0-flash). Leave empty for provider-specific defaults.": "Specific model to use (e.g., gpt-4o, claude-3-sonnet-latest, gemini-2.0-flash). Leave empty for provider-specific defaults.",
"Controls randomness (0.0-2.0). Higher values make output more creative.": "Controls randomness (0.0-2.0). Higher values make output more creative.",
"Maximum number of tokens to generate in the response.": "Maximum number of tokens to generate in the response.",
"Level of reasoning depth for the response.": "Level of reasoning depth for the response.",
"Alternative providers to try if the main provider fails.": "Alternative providers to try if the main provider fails.",
"Include an image in your prompt (for vision-capable models).": "Include an image in your prompt (for vision-capable models).",
"URL of the image to include in the prompt (only used if \"Include Image\" is enabled).": "URL of the image to include in the prompt (only used if \"Include Image\" is enabled).",
"The AI provider to use for text summarization.": "The AI provider to use for text summarization.",
"The text content you want to summarize. Can be articles, documents, or any long-form text.": "The text content you want to summarize. Can be articles, documents, or any long-form text.",
"How many sentences should the summary contain (1-20).": "How many sentences should the summary contain (1-20).",
"The language of the input text. Choose \"Auto Detection\" if unsure.": "The language of the input text. Choose \"Auto Detection\" if unsure.",
"Specific model to use (e.g., gpt-4, gpt-4o, summarize-xlarge). Leave empty for default.": "Specific model to use (e.g., gpt-4, gpt-4o, summarize-xlarge). Leave empty for default.",
"Alternative providers to try if the main provider fails (up to 5).": "Alternative providers to try if the main provider fails (up to 5).",
"Include the raw provider response in the output for debugging.": "Include the raw provider response in the output for debugging.",
"The AI provider to use for keyword extraction.": "The AI provider to use for keyword extraction.",
"The text to extract keywords from.": "The text to extract keywords from.",
"Specific model to use (e.g., gpt-4o, gpt-4, grok-2-latest). Leave empty for default.": "Specific model to use (e.g., gpt-4o, gpt-4, grok-2-latest). Leave empty for default.",
"The AI provider to use for language detection.": "The AI provider to use for language detection.",
"The text to detect language for.": "The text to detect language for.",
"Specific model to use (e.g., gpt-4o, grok-2-latest). Leave empty for default.": "Specific model to use (e.g., gpt-4o, grok-2-latest). Leave empty for default.",
"The AI provider to use for named entity recognition.": "The AI provider to use for named entity recognition.",
"The text to extract entities from.": "The text to extract entities from.",
"Specific model to use (e.g., gpt-4o, gemini-1.5-flash, grok-2-latest). Leave empty for default.": "Specific model to use (e.g., gpt-4o, gemini-1.5-flash, grok-2-latest). Leave empty for default.",
"The AI provider to use for text moderation.": "The AI provider to use for text moderation.",
"The text to analyze for explicit or policy-violating content.": "The text to analyze for explicit or policy-violating content.",
"Specific model to use (e.g., text-moderation-latest, text-moderation-stable). Leave empty for default.": "Specific model to use (e.g., text-moderation-latest, text-moderation-stable). Leave empty for default.",
"The AI provider to use for spell checking and grammar correction.": "The AI provider to use for spell checking and grammar correction.",
"The text to check for spelling or grammar errors.": "The text to check for spelling or grammar errors.",
"Specific model to use (e.g., gpt-4o, gpt-4, grok-2-latest, command). Leave empty for default.": "Specific model to use (e.g., gpt-4o, gpt-4, grok-2-latest, command). Leave empty for default.",
"The AI provider to use for text translation.": "The AI provider to use for text translation.",
"The text to translate.": "The text to translate.",
"The language of the input text. Choose \"Auto Detection\" to automatically detect the language.": "The language of the input text. Choose \"Auto Detection\" to automatically detect the language.",
"The language to translate the text into.": "The language to translate the text into.",
"The AI provider to use for financial document parsing.": "The AI provider to use for financial document parsing.",
"Public URL to the financial document file (PDF, image, etc).": "Public URL to the financial document file (PDF, image, etc).",
"The type of financial document to parse.": "The type of financial document to parse.",
"The language of the document. Choose \"Auto Detection\" if unsure.": "The language of the document. Choose \"Auto Detection\" if unsure.",
"Specific model to use (e.g., gpt-4o, gpt-4o-mini, gpt-4-turbo). Leave empty for default.": "Specific model to use (e.g., gpt-4o, gpt-4o-mini, gpt-4-turbo). Leave empty for default.",
"Password for protected PDF files (if applicable).": "Password for protected PDF files (if applicable).",
"Convert DOC/DOCX files to PDF format for better compatibility.": "Convert DOC/DOCX files to PDF format for better compatibility.",
"The AI provider to use for data extraction.": "The AI provider to use for data extraction.",
"Public URL to the document file (PDF, image, etc).": "Public URL to the document file (PDF, image, etc).",
"Return extracted data with each attribute as a list instead of list of objects.": "Return extracted data with each attribute as a list instead of list of objects.",
"The AI provider to use for text extraction.": "The AI provider to use for text extraction.",
"Public URL to the image or document file.": "Public URL to the image or document file.",
"The language of the text in the image. Choose \"Auto Detection\" if unsure.": "The language of the text in the image. Choose \"Auto Detection\" if unsure.",
"The AI provider to use for image generation.": "The AI provider to use for image generation.",
"Description of the desired image(s). Be specific and descriptive for best results.": "Description of the desired image(s). Be specific and descriptive for best results.",
"The image resolution (e.g., 512x512, 1024x1024).": "The image resolution (e.g., 512x512, 1024x1024).",
"Number of images to generate (1-10).": "Number of images to generate (1-10).",
"Specific model to use for image generation. Leave empty for provider default.": "Specific model to use for image generation. Leave empty for provider default.",
"The AI provider to use for text-to-speech synthesis.": "The AI provider to use for text-to-speech synthesis.",
"The text to convert to speech.": "The text to convert to speech.",
"The language and locale for the speech synthesis (defaults to en-US if not specified).": "The language and locale for the speech synthesis (defaults to en-US if not specified).",
"Choose the voice gender for speech synthesis (defaults to Female if not specified).": "Choose the voice gender for speech synthesis (defaults to Female if not specified).",
"Adjust speaking rate (-100 to 100, where 0 is normal speed).": "Adjust speaking rate (-100 to 100, where 0 is normal speed).",
"Adjust voice pitch (-100 to 100, where 0 is normal pitch).": "Adjust voice pitch (-100 to 100, where 0 is normal pitch).",
"Adjust audio volume (-100 to 100, where 0 is normal volume).": "Adjust audio volume (-100 to 100, where 0 is normal volume).",
"The audio format for the generated speech (default: MP3).": "The audio format for the generated speech (default: MP3).",
"Audio sampling rate in Hz (0-200000, 0 for provider default).": "Audio sampling rate in Hz (0-200000, 0 for provider default)."
}

View File

@@ -0,0 +1,73 @@
import { createPiece, PieceAuth } from "@activepieces/pieces-framework";
import { generateTextAction } from './lib/actions/generate-text';
import { summarizeTextAction } from './lib/actions/summarize-text';
import { extractKeywordsAction } from './lib/actions/extract-keywords';
import { detectLanguageAction } from './lib/actions/detect-language';
import { extractEntitiesAction } from './lib/actions/extract-entities';
import { moderateTextAction } from './lib/actions/moderate-text';
import { spellCheckAction } from './lib/actions/spell-check';
import { translateTextAction } from './lib/actions/translate-text';
import { invoiceParserAction } from './lib/actions/invoice-parser';
import { receiptParserAction } from './lib/actions/receipt-parser';
import { ocrImageAction } from './lib/actions/ocr-image';
import { imageGenerationAction } from './lib/actions/image-generation';
import { textToSpeechAction } from './lib/actions/text-to-speech';
import { HttpMethod, httpClient } from '@activepieces/pieces-common';
import { PieceCategory } from "@activepieces/shared";
export const edenAiAuth = PieceAuth.SecretText({
displayName: 'Eden AI API Key',
description: `You can obtain your API key from your [Eden AI dashboard](https://app.edenai.run/admin/account/developer).`,
required: true,
validate: async ({ auth }) => {
if (!auth || typeof auth !== 'string' || auth.length < 10) {
return { valid: false, error: 'Invalid API key format.' };
}
try {
const response = await httpClient.sendRequest({
method: HttpMethod.POST,
url: 'https://api.edenai.run/v2/translation/language_detection',
headers: {
'Authorization': `Bearer ${auth}`,
'Content-Type': 'application/json',
},
body: { providers: 'google', text: 'hello' },
timeout: 10000,
});
if (response.status >= 200 && response.status < 300) {
return { valid: true };
}
return { valid: false, error: 'Invalid Eden AI API key.' };
} catch (e: any) {
return { valid: false, error: 'Invalid Eden AI API key or network error.' };
}
},
});
export const edenAi = createPiece({
displayName: "Eden AI",
auth: edenAiAuth,
minimumSupportedRelease: '0.36.1',
logoUrl: "https://cdn.activepieces.com/pieces/eden-ai.png",
authors: ["sparkybug"],
description: "Eden AI is a platform that provides a range of AI services, including text generation, summarization, translation, and more.",
categories: [PieceCategory.ARTIFICIAL_INTELLIGENCE],
actions: [
generateTextAction,
summarizeTextAction,
extractKeywordsAction,
detectLanguageAction,
extractEntitiesAction,
moderateTextAction,
spellCheckAction,
translateTextAction,
invoiceParserAction,
receiptParserAction,
ocrImageAction,
imageGenerationAction,
textToSpeechAction,
],
triggers: [],
});

View File

@@ -0,0 +1,139 @@
import { createAction, Property } from '@activepieces/pieces-framework';
import { HttpMethod, propsValidation } from '@activepieces/pieces-common';
import { edenAiApiCall } from '../common/client';
import { createStaticDropdown } from '../common/providers';
import { z } from 'zod';
import { edenAiAuth } from '../..';
const LANGUAGE_DETECTION_PROVIDERS = [
{ label: 'Amazon', value: 'amazon' },
{ label: 'Google', value: 'google' },
{ label: 'Microsoft', value: 'microsoft' },
{ label: 'ModernMT', value: 'modernmt' },
{ label: 'OpenAI', value: 'openai' },
{ label: 'XAI Grok', value: 'xai' },
{ label: 'OneAI', value: 'oneai' },
];
function normalizeLanguageDetectionResponse(provider: string, response: any) {
const providerResult = response[provider];
if (!providerResult) {
return { provider, languages: [], status: 'fail', raw: response };
}
const languages = (providerResult.items || []).map((item: any) => ({
language: item.language || '',
display_name: item.display_name || '',
confidence: item.confidence || 0,
}));
return {
provider,
languages,
status: providerResult.status || 'success',
original_response: providerResult.original_response || null,
raw: response,
};
}
export const detectLanguageAction = createAction({
auth: edenAiAuth,
name: 'detect_language',
displayName: 'Detect Language of Text',
description: 'Detect the language used in a text using Eden AI. Supports multiple providers and models.',
props: {
provider: Property.Dropdown({
auth: edenAiAuth,
displayName: 'Provider',
description: 'The AI provider to use for language detection.',
required: true,
refreshers: [],
options: createStaticDropdown(LANGUAGE_DETECTION_PROVIDERS),
}),
text: Property.LongText({
displayName: 'Text to Analyze',
description: 'The text to detect language for.',
required: true,
}),
model: Property.ShortText({
displayName: 'Specific Model',
description: 'Specific model to use (e.g., gpt-4o, grok-2-latest). Leave empty for default.',
required: false,
}),
fallback_providers: Property.MultiSelectDropdown({
auth: edenAiAuth,
displayName: 'Fallback Providers',
description: 'Alternative providers to try if the main provider fails (up to 5).',
required: false,
refreshers: [],
options: createStaticDropdown(LANGUAGE_DETECTION_PROVIDERS),
}),
show_original_response: Property.Checkbox({
displayName: 'Include Original Response',
description: 'Include the raw provider response in the output for debugging.',
required: false,
defaultValue: false,
}),
},
async run({ auth, propsValue }) {
await propsValidation.validateZod(propsValue, {
provider: z.string().min(1, 'Provider is required'),
text: z.string().min(1, 'Text is required'),
model: z.string().nullish(),
fallback_providers: z.array(z.string()).max(5).nullish(),
show_original_response: z.boolean().nullish(),
});
const {
provider,
text,
model,
fallback_providers,
show_original_response
} = propsValue;
const body: Record<string, any> = {
providers: provider,
text,
};
if (show_original_response) body['show_original_response'] = true;
if (fallback_providers && fallback_providers.length > 0) {
body['fallback_providers'] = fallback_providers.slice(0, 5);
}
if (model) {
body['settings'] = { [provider]: model };
}
try {
const response = await edenAiApiCall({
apiKey: auth.secret_text,
method: HttpMethod.POST,
resourceUri: '/translation/language_detection',
body,
});
if (!response || typeof response !== 'object') {
throw new Error('Invalid response from Eden AI API.');
}
return normalizeLanguageDetectionResponse(provider, response);
} catch (err: any) {
if (err.response?.body?.error) {
throw new Error(`Eden AI API error: ${err.response.body.error}`);
}
if (err.response?.status === 429) {
throw new Error('Rate limit exceeded. Please try again later.');
}
if (err.response?.status === 401) {
throw new Error('Invalid API key. Please check your Eden AI credentials.');
}
if (err.response?.status === 400) {
throw new Error('Invalid request. Please check your input text and parameters.');
}
throw new Error(`Failed to detect language: ${err.message || err}`);
}
},
});

View File

@@ -0,0 +1,182 @@
import { createAction, Property } from '@activepieces/pieces-framework';
import { HttpMethod, propsValidation } from '@activepieces/pieces-common';
import { edenAiApiCall } from '../common/client';
import { createStaticDropdown } from '../common/providers';
import { z } from 'zod';
import { edenAiAuth } from '../..';
const NER_PROVIDERS = [
{ label: 'Amazon', value: 'amazon' },
{ label: 'Google', value: 'google' },
{ label: 'Microsoft', value: 'microsoft' },
{ label: 'OpenAI', value: 'openai' },
{ label: 'Tenstorrent', value: 'tenstorrent' },
{ label: 'XAI Grok', value: 'xai' },
{ label: 'OneAI', value: 'oneai' },
];
const NER_LANGUAGES = [
{ label: 'Auto Detection', value: 'auto-detect' },
{ label: 'Arabic', value: 'ar' },
{ label: 'Chinese', value: 'zh' },
{ label: 'Chinese (Simplified)', value: 'zh-Hans' },
{ label: 'Chinese (Taiwan)', value: 'zh-TW' },
{ label: 'Chinese (Traditional)', value: 'zh-Hant' },
{ label: 'Czech', value: 'cs' },
{ label: 'Danish', value: 'da' },
{ label: 'Dutch', value: 'nl' },
{ label: 'English', value: 'en' },
{ label: 'Finnish', value: 'fi' },
{ label: 'French', value: 'fr' },
{ label: 'German', value: 'de' },
{ label: 'Hindi', value: 'hi' },
{ label: 'Hungarian', value: 'hu' },
{ label: 'Italian', value: 'it' },
{ label: 'Japanese', value: 'ja' },
{ label: 'Korean', value: 'ko' },
{ label: 'Norwegian', value: 'no' },
{ label: 'Norwegian Bokmål', value: 'nb' },
{ label: 'Polish', value: 'pl' },
{ label: 'Portuguese', value: 'pt' },
{ label: 'Portuguese (Brazil)', value: 'pt-BR' },
{ label: 'Portuguese (Portugal)', value: 'pt-PT' },
{ label: 'Russian', value: 'ru' },
{ label: 'Spanish', value: 'es' },
{ label: 'Swedish', value: 'sv' },
{ label: 'Turkish', value: 'tr' },
];
function normalizeNerResponse(provider: string, response: any) {
const providerResult = response[provider];
if (!providerResult) {
return { provider, entities: [], status: 'fail', raw: response };
}
const entities = (providerResult.items || []).map((item: any) => ({
entity: item.entity || '',
category: item.category || '',
importance: item.importance || 0,
}));
return {
provider,
entities,
status: providerResult.status || 'success',
original_response: providerResult.original_response || null,
raw: response,
};
}
export const extractEntitiesAction = createAction({
name: 'extract_entities',
displayName: 'Extract Named Entities in Text',
description: 'Identify entities (names, places) in text using Eden AI. Supports multiple providers, languages, and models.',
props: {
provider: Property.Dropdown({
auth: edenAiAuth,
displayName: 'Provider',
description: 'The AI provider to use for named entity recognition.',
required: true,
refreshers: [],
options: createStaticDropdown(NER_PROVIDERS),
}),
text: Property.LongText({
displayName: 'Text to Analyze',
description: 'The text to extract entities from.',
required: true,
}),
language: Property.Dropdown({
auth: edenAiAuth,
displayName: 'Text Language',
description: 'The language of the input text. Choose "Auto Detection" if unsure.',
required: false,
refreshers: [],
options: createStaticDropdown(NER_LANGUAGES),
defaultValue: 'auto-detect',
}),
model: Property.ShortText({
displayName: 'Specific Model',
description: 'Specific model to use (e.g., gpt-4o, gemini-1.5-flash, grok-2-latest). Leave empty for default.',
required: false,
}),
fallback_providers: Property.MultiSelectDropdown({
auth: edenAiAuth,
displayName: 'Fallback Providers',
description: 'Alternative providers to try if the main provider fails (up to 5).',
required: false,
refreshers: [],
options: createStaticDropdown(NER_PROVIDERS),
}),
show_original_response: Property.Checkbox({
displayName: 'Include Original Response',
description: 'Include the raw provider response in the output for debugging.',
required: false,
defaultValue: false,
}),
},
auth: edenAiAuth,
async run({ auth, propsValue }) {
await propsValidation.validateZod(propsValue, {
provider: z.string().min(1, 'Provider is required'),
text: z.string().min(1, 'Text is required'),
language: z.string().nullish(),
model: z.string().nullish(),
fallback_providers: z.array(z.string()).max(5).nullish(),
show_original_response: z.boolean().nullish(),
});
const {
provider,
text,
language,
model,
fallback_providers,
show_original_response
} = propsValue;
const body: Record<string, any> = {
providers: provider,
text,
};
if (language && language !== 'auto-detect') body['language'] = language;
if (show_original_response) body['show_original_response'] = true;
if (fallback_providers && fallback_providers.length > 0) {
body['fallback_providers'] = fallback_providers.slice(0, 5);
}
if (model) {
body['settings'] = { [provider]: model };
}
try {
const response = await edenAiApiCall({
apiKey: auth.secret_text,
method: HttpMethod.POST,
resourceUri: '/text/named_entity_recognition',
body,
});
if (!response || typeof response !== 'object') {
throw new Error('Invalid response from Eden AI API.');
}
return normalizeNerResponse(provider, response);
} catch (err: any) {
if (err.response?.body?.error) {
throw new Error(`Eden AI API error: ${err.response.body.error}`);
}
if (err.response?.status === 429) {
throw new Error('Rate limit exceeded. Please try again later.');
}
if (err.response?.status === 401) {
throw new Error('Invalid API key. Please check your Eden AI credentials.');
}
if (err.response?.status === 400) {
throw new Error('Invalid request. Please check your input text and parameters.');
}
throw new Error(`Failed to extract entities: ${err.message || err}`);
}
},
});

View File

@@ -0,0 +1,191 @@
import { createAction, Property } from '@activepieces/pieces-framework';
import { HttpMethod, propsValidation } from '@activepieces/pieces-common';
import { edenAiApiCall } from '../common/client';
import { createStaticDropdown } from '../common/providers';
import { z } from 'zod';
import { edenAiAuth } from '../..';
const KEYWORD_EXTRACTION_PROVIDERS = [
{ label: 'Amazon', value: 'amazon' },
{ label: 'Microsoft', value: 'microsoft' },
{ label: 'OpenAI', value: 'openai' },
{ label: 'Tenstorrent', value: 'tenstorrent' },
{ label: 'XAI Grok', value: 'xai' },
{ label: 'Emvista', value: 'emvista' },
{ label: 'Cortical.io', value: 'corticalio' },
{ label: 'OneAI', value: 'oneai' },
];
const KEYWORD_EXTRACTION_LANGUAGES = [
{ label: 'Auto Detection', value: 'auto-detect' },
{ label: 'Afrikaans', value: 'af' },
{ label: 'Arabic', value: 'ar' },
{ label: 'Bulgarian', value: 'bg' },
{ label: 'Catalan', value: 'ca' },
{ label: 'Chinese', value: 'zh' },
{ label: 'Chinese (Simplified)', value: 'zh-Hans' },
{ label: 'Chinese (Taiwan)', value: 'zh-TW' },
{ label: 'Croatian', value: 'hr' },
{ label: 'Danish', value: 'da' },
{ label: 'Dutch', value: 'nl' },
{ label: 'English', value: 'en' },
{ label: 'Estonian', value: 'et' },
{ label: 'Finnish', value: 'fi' },
{ label: 'French', value: 'fr' },
{ label: 'German', value: 'de' },
{ label: 'Modern Greek', value: 'el' },
{ label: 'Hindi', value: 'hi' },
{ label: 'Hungarian', value: 'hu' },
{ label: 'Indonesian', value: 'id' },
{ label: 'Italian', value: 'it' },
{ label: 'Japanese', value: 'ja' },
{ label: 'Korean', value: 'ko' },
{ label: 'Latvian', value: 'lv' },
{ label: 'Norwegian', value: 'no' },
{ label: 'Norwegian Bokmål', value: 'nb' },
{ label: 'Polish', value: 'pl' },
{ label: 'Portuguese', value: 'pt' },
{ label: 'Portuguese (Brazil)', value: 'pt-BR' },
{ label: 'Portuguese (Portugal)', value: 'pt-PT' },
{ label: 'Romanian', value: 'ro' },
{ label: 'Russian', value: 'ru' },
{ label: 'Slovak', value: 'sk' },
{ label: 'Slovenian', value: 'sl' },
{ label: 'Spanish', value: 'es' },
{ label: 'Swedish', value: 'sv' },
{ label: 'Turkish', value: 'tr' },
];
function normalizeKeywordResponse(provider: string, response: any) {
const providerResult = response[provider];
if (!providerResult) {
return { provider, keywords: [], status: 'fail', raw: response };
}
const keywords = (providerResult.items || []).map((item: any) => ({
keyword: item.keyword || '',
importance: item.importance || 0,
}));
return {
provider,
keywords,
status: providerResult.status || 'success',
original_response: providerResult.original_response || null,
raw: response,
};
}
export const extractKeywordsAction = createAction({
auth: edenAiAuth,
name: 'extract_keywords',
displayName: 'Extract Keywords in Text',
description: 'Identify important terms in a text using Eden AI. Supports multiple providers, languages, and models.',
props: {
provider: Property.Dropdown({
auth: edenAiAuth,
displayName: 'Provider',
description: 'The AI provider to use for keyword extraction.',
required: true,
refreshers: [],
options: createStaticDropdown(KEYWORD_EXTRACTION_PROVIDERS),
}),
text: Property.LongText({
displayName: 'Text to Analyze',
description: 'The text to extract keywords from.',
required: true,
}),
language: Property.Dropdown({
auth: edenAiAuth,
displayName: 'Text Language',
description: 'The language of the input text. Choose "Auto Detection" if unsure.',
required: false,
refreshers: [],
options: createStaticDropdown(KEYWORD_EXTRACTION_LANGUAGES),
defaultValue: 'auto-detect',
}),
model: Property.ShortText({
displayName: 'Specific Model',
description: 'Specific model to use (e.g., gpt-4o, gpt-4, grok-2-latest). Leave empty for default.',
required: false,
}),
fallback_providers: Property.MultiSelectDropdown({
auth: edenAiAuth,
displayName: 'Fallback Providers',
description: 'Alternative providers to try if the main provider fails (up to 5).',
required: false,
refreshers: [],
options: createStaticDropdown(KEYWORD_EXTRACTION_PROVIDERS),
}),
show_original_response: Property.Checkbox({
displayName: 'Include Original Response',
description: 'Include the raw provider response in the output for debugging.',
required: false,
defaultValue: false,
}),
},
async run({ auth, propsValue }) {
await propsValidation.validateZod(propsValue, {
provider: z.string().min(1, 'Provider is required'),
text: z.string().min(1, 'Text is required'),
language: z.string().nullish(),
model: z.string().nullish(),
fallback_providers: z.array(z.string()).max(5).nullish(),
show_original_response: z.boolean().nullish(),
});
const {
provider,
text,
language,
model,
fallback_providers,
show_original_response
} = propsValue;
const body: Record<string, any> = {
providers: provider,
text,
};
if (language && language !== 'auto-detect') body['language'] = language;
if (show_original_response) body['show_original_response'] = true;
if (fallback_providers && fallback_providers.length > 0) {
body['fallback_providers'] = fallback_providers.slice(0, 5);
}
if (model) {
body['settings'] = { [provider]: model };
}
try {
const response = await edenAiApiCall({
apiKey: auth.secret_text,
method: HttpMethod.POST,
resourceUri: '/text/keyword_extraction',
body,
});
if (!response || typeof response !== 'object') {
throw new Error('Invalid response from Eden AI API.');
}
return normalizeKeywordResponse(provider, response);
} catch (err: any) {
if (err.response?.body?.error) {
throw new Error(`Eden AI API error: ${err.response.body.error}`);
}
if (err.response?.status === 429) {
throw new Error('Rate limit exceeded. Please try again later.');
}
if (err.response?.status === 401) {
throw new Error('Invalid API key. Please check your Eden AI credentials.');
}
if (err.response?.status === 400) {
throw new Error('Invalid request. Please check your input text and parameters.');
}
throw new Error(`Failed to extract keywords: ${err.message || err}`);
}
},
});

View File

@@ -0,0 +1,226 @@
import { createAction, Property } from '@activepieces/pieces-framework';
import { HttpMethod, propsValidation } from '@activepieces/pieces-common';
import { edenAiApiCall } from '../common/client';
import { createStaticDropdown } from '../common/providers';
import { z } from 'zod';
import { edenAiAuth } from '../..';
const CHAT_PROVIDERS = [
{ label: 'OpenAI GPT-4o', value: 'openai' },
{ label: 'Anthropic Claude', value: 'anthropic' },
{ label: 'Google Gemini', value: 'google' },
{ label: 'Meta Llama', value: 'meta' },
{ label: 'Mistral', value: 'mistral' },
{ label: 'Cohere', value: 'cohere' },
{ label: 'XAI Grok', value: 'xai' },
{ label: 'Amazon Nova', value: 'amazon' },
{ label: 'Microsoft', value: 'microsoft' },
{ label: 'DeepSeek', value: 'deepseek' },
{ label: 'Groq', value: 'groq' }
];
const REASONING_EFFORT_OPTIONS = [
{ label: 'Low - Quick responses', value: 'low' },
{ label: 'Medium - Balanced approach', value: 'medium' },
{ label: 'High - In-depth reasoning', value: 'high' }
];
function normalizeChatResponse(provider: string, response: any) {
const providerResult = response[provider];
if (!providerResult) {
return { provider, content: '', usage: null, raw: response };
}
const choices = providerResult.choices || [];
const firstChoice = choices[0];
const message = firstChoice?.message;
return {
provider,
content: message?.content || '',
role: message?.role || 'assistant',
finish_reason: firstChoice?.finish_reason || '',
usage: providerResult.usage || null,
model: providerResult.model || '',
raw: response
};
}
export const generateTextAction = createAction({
auth: edenAiAuth,
name: 'generate_text',
displayName: 'Generate Text',
description:
'Generate text completions using various AI providers through Eden AI chat endpoint.',
props: {
provider: Property.Dropdown({
auth: edenAiAuth,
displayName: 'Provider',
description: 'The AI provider to use for text generation.',
required: true,
refreshers: [],
options: createStaticDropdown(CHAT_PROVIDERS)
}),
prompt: Property.LongText({
displayName: 'Prompt',
description: 'The main prompt or question you want the AI to respond to.',
required: true,
}),
system_prompt: Property.LongText({
displayName: 'System Prompt',
description:
'System message to set the behavior and context for the AI assistant (e.g., "You are a helpful coding assistant").',
required: false
}),
model: Property.ShortText({
displayName: 'Model',
description:
'Specific model to use (e.g., gpt-4o, claude-3-sonnet-latest, gemini-2.0-flash). Leave empty for provider-specific defaults.',
required: false
}),
temperature: Property.Number({
displayName: 'Temperature',
description:
'Controls randomness (0.0-2.0). Higher values make output more creative.',
required: false,
defaultValue: 0.7
}),
max_completion_tokens: Property.Number({
displayName: 'Max Completion Tokens',
description: 'Maximum number of tokens to generate in the response.',
required: false,
defaultValue: 1000
}),
reasoning_effort: Property.Dropdown({
auth: edenAiAuth,
displayName: 'Reasoning Effort',
description: 'Level of reasoning depth for the response.',
required: false,
refreshers: [],
options: createStaticDropdown(REASONING_EFFORT_OPTIONS)
}),
fallback_providers: Property.MultiSelectDropdown({
auth: edenAiAuth,
displayName: 'Fallback Providers',
description: 'Alternative providers to try if the main provider fails.',
required: false,
refreshers: [],
options: createStaticDropdown(CHAT_PROVIDERS)
}),
include_image: Property.Checkbox({
displayName: 'Include Image',
description: 'Include an image in your prompt (for vision-capable models).',
required: false,
defaultValue: false,
}),
image_url: Property.ShortText({
displayName: 'Image URL',
description: 'URL of the image to include in the prompt (only used if "Include Image" is enabled).',
required: false,
}),
},
async run({ auth, propsValue }) {
await propsValidation.validateZod(propsValue, {
provider: z.string().min(1, 'Provider is required'),
prompt: z.string().min(1, 'Prompt is required'),
temperature: z.number().min(0).max(2).nullish(),
max_completion_tokens: z.number().min(1).nullish(),
image_url: z.string().url().nullish()
});
const {
provider,
prompt,
system_prompt,
model,
temperature,
max_completion_tokens,
reasoning_effort,
fallback_providers,
include_image,
image_url
} = propsValue;
const messages: any[] = [];
if (system_prompt) {
messages.push({
role: 'system',
content: [{ type: 'text', text: system_prompt }]
});
}
const userContent: any[] = [{ type: 'text', text: prompt }];
if (include_image && image_url) {
userContent.push({
type: 'image_url',
image_url: { url: image_url }
});
}
messages.push({
role: 'user',
content: userContent
});
const body: Record<string, any> = {
providers: provider,
messages
};
const defaultModels: Record<string, string> = {
'openai': 'gpt-4o',
'anthropic': 'claude-3-sonnet-latest',
'google': 'gemini-2.0-flash',
'meta': 'llama-3.1-70b-instruct',
'mistral': 'mistral-large-latest',
'cohere': 'command-r-plus',
'xai': 'grok-2-latest',
'amazon': 'nova-pro-v1:0',
'microsoft': 'gpt-4o',
'deepseek': 'deepseek-chat',
'groq': 'llama-3.1-70b-versatile'
};
body['model'] = model || defaultModels[provider] || 'gpt-4o';
if (temperature !== undefined) body['temperature'] = temperature;
if (max_completion_tokens !== undefined) body['max_completion_tokens'] = max_completion_tokens;
if (reasoning_effort) body['reasoning_effort'] = reasoning_effort;
if (fallback_providers && fallback_providers.length > 0) {
body['fallback_providers'] = fallback_providers;
}
try {
const response = await edenAiApiCall({
apiKey: auth.secret_text,
method: HttpMethod.POST,
resourceUri: '/llm/chat',
body
});
if (!response || typeof response !== 'object') {
throw new Error('Invalid response from Eden AI API.');
}
return normalizeChatResponse(provider, response);
} catch (err: any) {
if (err.response?.body?.error) {
throw new Error(`Eden AI API error: ${err.response.body.error}`);
}
if (err.response?.status === 429) {
throw new Error('Rate limit exceeded. Please try again later.');
}
if (err.response?.status === 401) {
throw new Error(
'Invalid API key. Please check your Eden AI credentials.'
);
}
if (err.message && typeof err.message === 'string') {
throw new Error(`Failed to generate text: ${err.message}`);
}
throw new Error(`Failed to generate text: ${JSON.stringify(err)}`);
}
}
});

View File

@@ -0,0 +1,196 @@
import { createAction, Property } from '@activepieces/pieces-framework';
import { HttpMethod, propsValidation } from '@activepieces/pieces-common';
import { edenAiApiCall } from '../common/client';
import { createStaticDropdown } from '../common/providers';
import { z } from 'zod';
import { edenAiAuth } from '../..';
const IMAGE_GENERATION_PROVIDERS = [
{ label: 'OpenAI', value: 'openai' },
{ label: 'StabilityAI', value: 'stabilityai' },
{ label: 'Replicate', value: 'replicate' },
{ label: 'Amazon', value: 'amazon' },
{ label: 'Leonardo', value: 'leonardo' },
{ label: 'MiniMax', value: 'minimax' },
{ label: 'ByteDance', value: 'bytedance' },
];
const IMAGE_GENERATION_MODELS = [
{ label: 'DALL-E 3 (OpenAI)', value: 'dall-e-3' },
{ label: 'DALL-E 2 (OpenAI)', value: 'dall-e-2' },
{ label: 'Stable Diffusion v1.6 (StabilityAI)', value: 'stable-diffusion-v1-6' },
{ label: 'Stable Diffusion XL 1024 v1.0 (StabilityAI)', value: 'stable-diffusion-xl-1024-v1-0' },
{ label: 'Classic (Replicate)', value: 'classic' },
{ label: 'Anime Style (Replicate)', value: 'anime-style' },
{ label: 'Vintedois Diffusion (Replicate)', value: 'vintedois-diffusion' },
{ label: 'Titan Image Generator v1 Premium (Amazon)', value: 'titan-image-generator-v1_premium' },
{ label: 'Titan Image Generator v1 Standard (Amazon)', value: 'titan-image-generator-v1_standard' },
{ label: 'Leonardo Phoenix', value: 'Leonardo Phoenix' },
{ label: 'Leonardo Lightning XL', value: 'Leonardo Lightning XL' },
{ label: 'Leonardo Anime XL', value: 'Leonardo Anime XL' },
{ label: 'Leonardo Kino XL', value: 'Leonardo Kino XL' },
{ label: 'Leonardo Vision XL', value: 'Leonardo Vision XL' },
{ label: 'Leonardo Diffusion XL', value: 'Leonardo Diffusion XL' },
{ label: 'AlbedoBase XL', value: 'AlbedoBase XL' },
{ label: 'SDXL 0.9', value: 'SDXL 0.9' },
{ label: 'Image-01 (MiniMax)', value: 'image-01' },
{ label: 'SeeDream 3.0 T2I (ByteDance)', value: 'seedream-3-0-t2i-250415' },
];
const IMAGE_GENERATION_RESOLUTIONS = [
{ label: '256x256', value: '256x256' },
{ label: '512x512', value: '512x512' },
{ label: '1024x1024', value: '1024x1024' },
{ label: '1024x1792 (Portrait)', value: '1024x1792' },
{ label: '1792x1024 (Landscape)', value: '1792x1024' },
];
function normalizeImageGenerationResponse(provider: string, response: any) {
const providerResult = response[provider];
if (!providerResult) {
return { provider, items: [], status: 'fail', raw: response };
}
return {
provider,
items: providerResult.items || [],
status: providerResult.status || 'success',
original_response: providerResult.original_response || null,
raw: response,
};
}
export const imageGenerationAction = createAction({
auth: edenAiAuth,
name: 'image_generation',
displayName: 'Image Generation',
description: 'Create images from text prompts using Eden AI. Supports multiple providers, models, and resolutions.',
props: {
provider: Property.Dropdown({
auth: edenAiAuth,
displayName: 'Provider',
description: 'The AI provider to use for image generation.',
required: true,
refreshers: [],
options: createStaticDropdown(IMAGE_GENERATION_PROVIDERS),
}),
text: Property.LongText({
displayName: 'Prompt',
description: 'Description of the desired image(s). Be specific and descriptive for best results.',
required: true,
}),
resolution: Property.Dropdown({
auth: edenAiAuth,
displayName: 'Resolution',
description: 'The image resolution (e.g., 512x512, 1024x1024).',
required: true,
refreshers: [],
options: createStaticDropdown(IMAGE_GENERATION_RESOLUTIONS),
defaultValue: '1024x1024',
}),
num_images: Property.Number({
displayName: 'Number of Images',
description: 'Number of images to generate (1-10).',
required: false,
defaultValue: 1,
}),
model: Property.Dropdown({
auth: edenAiAuth,
displayName: 'Specific Model',
description: 'Specific model to use for image generation. Leave empty for provider default.',
required: false,
refreshers: [],
options: createStaticDropdown(IMAGE_GENERATION_MODELS),
}),
fallback_providers: Property.MultiSelectDropdown({
auth: edenAiAuth,
displayName: 'Fallback Providers',
description: 'Alternative providers to try if the main provider fails (up to 5).',
required: false,
refreshers: [],
options: createStaticDropdown(IMAGE_GENERATION_PROVIDERS),
}),
show_original_response: Property.Checkbox({
displayName: 'Include Original Response',
description: 'Include the raw provider response in the output for debugging.',
required: false,
defaultValue: false,
}),
},
async run({ auth, propsValue }) {
await propsValidation.validateZod(propsValue, {
provider: z.string().min(1, 'Provider is required'),
text: z.string().min(1, 'Prompt text is required'),
resolution: z.string().min(1, 'Resolution is required'),
num_images: z.number().int().min(1).max(10).nullish(),
model: z.string().nullish(),
fallback_providers: z.array(z.string()).max(5).nullish(),
show_original_response: z.boolean().nullish(),
});
const {
provider,
text,
resolution,
num_images,
model,
fallback_providers,
show_original_response
} = propsValue;
const body: Record<string, any> = {
providers: provider,
text,
resolution,
};
if (num_images && num_images !== 1) body['num_images'] = num_images;
if (show_original_response) body['show_original_response'] = true;
if (fallback_providers && fallback_providers.length > 0) {
body['fallback_providers'] = fallback_providers.slice(0, 5);
}
if (model) {
body['settings'] = { [provider]: model };
}
try {
const response = await edenAiApiCall({
apiKey: auth.secret_text,
method: HttpMethod.POST,
resourceUri: '/image/generation/',
body,
});
if (!response || typeof response !== 'object') {
throw new Error('Invalid response from Eden AI API.');
}
return normalizeImageGenerationResponse(provider, response);
} catch (err: any) {
if (err.response?.body?.error) {
throw new Error(`Eden AI API error: ${err.response.body.error}`);
}
if (err.response?.status === 429) {
throw new Error('Rate limit exceeded. Please try again later.');
}
if (err.response?.status === 401) {
throw new Error('Invalid API key. Please check your Eden AI credentials.');
}
if (err.response?.status === 400) {
throw new Error('Invalid request. Please check your prompt and parameters.');
}
if (err.message && typeof err.message === 'string') {
throw new Error(`Failed to generate image: ${err.message}`);
}
throw new Error(`Failed to generate image: ${JSON.stringify(err)}`);
}
},
});

View File

@@ -0,0 +1,318 @@
import { createAction, Property } from '@activepieces/pieces-framework';
import { HttpMethod, propsValidation } from '@activepieces/pieces-common';
import { edenAiApiCall } from '../common/client';
import { createStaticDropdown } from '../common/providers';
import { z } from 'zod';
import { edenAiAuth } from '../..';
const FINANCIAL_PARSER_PROVIDERS = [
{ label: 'Affinda', value: 'affinda' },
{ label: 'Amazon', value: 'amazon' },
{ label: 'Base64', value: 'base64' },
{ label: 'Google', value: 'google' },
{ label: 'Klippa', value: 'klippa' },
{ label: 'Microsoft', value: 'microsoft' },
{ label: 'Mindee', value: 'mindee' },
{ label: 'Tabscanner', value: 'tabscanner' },
{ label: 'Veryfi', value: 'veryfi' },
{ label: 'EagleDoc', value: 'eagledoc' },
{ label: 'Extracta', value: 'extracta' },
{ label: 'OpenAI', value: 'openai' },
];
const FINANCIAL_PARSER_LANGUAGES = [
{ label: 'Auto Detection', value: 'auto-detect' },
{ label: 'Afrikaans', value: 'af' },
{ label: 'Albanian', value: 'sq' },
{ label: 'Amharic', value: 'am' },
{ label: 'Arabic', value: 'ar' },
{ label: 'Armenian', value: 'hy' },
{ label: 'Azerbaijani', value: 'az' },
{ label: 'Basque', value: 'eu' },
{ label: 'Belarusian', value: 'be' },
{ label: 'Bengali', value: 'bn' },
{ label: 'Bosnian', value: 'bs' },
{ label: 'Bulgarian', value: 'bg' },
{ label: 'Burmese', value: 'my' },
{ label: 'Catalan', value: 'ca' },
{ label: 'Catalan (Spain)', value: 'ca-ES' },
{ label: 'Cebuano', value: 'ceb' },
{ label: 'Chinese', value: 'zh' },
{ label: 'Chinese (China)', value: 'zh-CN' },
{ label: 'Chinese (Taiwan)', value: 'zh-TW' },
{ label: 'Corsican', value: 'co' },
{ label: 'Croatian', value: 'hr' },
{ label: 'Czech', value: 'cs' },
{ label: 'Danish', value: 'da' },
{ label: 'Danish (Denmark)', value: 'da-DK' },
{ label: 'Dutch', value: 'nl' },
{ label: 'Dutch (Netherlands)', value: 'nl-NL' },
{ label: 'English', value: 'en' },
{ label: 'English (United Kingdom)', value: 'en-GB' },
{ label: 'English (United States)', value: 'en-US' },
{ label: 'Esperanto', value: 'eo' },
{ label: 'Estonian', value: 'et' },
{ label: 'Finnish', value: 'fi' },
{ label: 'French', value: 'fr' },
{ label: 'French (Canada)', value: 'fr-CA' },
{ label: 'French (France)', value: 'fr-FR' },
{ label: 'French (Switzerland)', value: 'fr-CH' },
{ label: 'Galician', value: 'gl' },
{ label: 'Georgian', value: 'ka' },
{ label: 'German', value: 'de' },
{ label: 'German (Germany)', value: 'de-DE' },
{ label: 'German (Switzerland)', value: 'de-CH' },
{ label: 'Modern Greek', value: 'el' },
{ label: 'Gujarati', value: 'gu' },
{ label: 'Haitian', value: 'ht' },
{ label: 'Hausa', value: 'ha' },
{ label: 'Hawaiian', value: 'haw' },
{ label: 'Hebrew', value: 'he' },
{ label: 'Hindi', value: 'hi' },
{ label: 'Hmong', value: 'hmn' },
{ label: 'Hungarian', value: 'hu' },
{ label: 'Icelandic', value: 'is' },
{ label: 'Igbo', value: 'ig' },
{ label: 'Indonesian', value: 'id' },
{ label: 'Irish', value: 'ga' },
{ label: 'Italian', value: 'it' },
{ label: 'Italian (Italy)', value: 'it-IT' },
{ label: 'Italian (Switzerland)', value: 'it-CH' },
{ label: 'Japanese', value: 'ja' },
{ label: 'Javanese', value: 'jv' },
{ label: 'Kannada', value: 'kn' },
{ label: 'Kazakh', value: 'kk' },
{ label: 'Khmer', value: 'km' },
{ label: 'Kinyarwanda', value: 'rw' },
{ label: 'Kirghiz', value: 'ky' },
{ label: 'Korean', value: 'ko' },
{ label: 'Kurdish', value: 'ku' },
{ label: 'Lao', value: 'lo' },
{ label: 'Latin', value: 'la' },
{ label: 'Latvian', value: 'lv' },
{ label: 'Lithuanian', value: 'lt' },
{ label: 'Luxembourgish', value: 'lb' },
{ label: 'Macedonian', value: 'mk' },
{ label: 'Malagasy', value: 'mg' },
{ label: 'Malay', value: 'ms' },
{ label: 'Malayalam', value: 'ml' },
{ label: 'Maltese', value: 'mt' },
{ label: 'Maori', value: 'mi' },
{ label: 'Marathi', value: 'mr' },
{ label: 'Mongolian', value: 'mn' },
{ label: 'Nepali', value: 'ne' },
{ label: 'Norwegian', value: 'no' },
{ label: 'Nyanja', value: 'ny' },
{ label: 'Oriya', value: 'or' },
{ label: 'Panjabi', value: 'pa' },
{ label: 'Persian', value: 'fa' },
{ label: 'Polish', value: 'pl' },
{ label: 'Portuguese', value: 'pt' },
{ label: 'Portuguese (Portugal)', value: 'pt-PT' },
{ label: 'Pushto', value: 'ps' },
{ label: 'Romanian', value: 'ro' },
{ label: 'Russian', value: 'ru' },
{ label: 'Samoan', value: 'sm' },
{ label: 'Scottish Gaelic', value: 'gd' },
{ label: 'Serbian', value: 'sr' },
{ label: 'Shona', value: 'sn' },
{ label: 'Sindhi', value: 'sd' },
{ label: 'Sinhala', value: 'si' },
{ label: 'Slovak', value: 'sk' },
{ label: 'Slovenian', value: 'sl' },
{ label: 'Somali', value: 'so' },
{ label: 'Southern Sotho', value: 'st' },
{ label: 'Spanish', value: 'es' },
{ label: 'Spanish (Spain)', value: 'es-ES' },
{ label: 'Sundanese', value: 'su' },
{ label: 'Swahili', value: 'sw' },
{ label: 'Swedish', value: 'sv' },
{ label: 'Tagalog', value: 'tl' },
{ label: 'Tajik', value: 'tg' },
{ label: 'Tamil', value: 'ta' },
{ label: 'Tatar', value: 'tt' },
{ label: 'Telugu', value: 'te' },
{ label: 'Thai', value: 'th' },
{ label: 'Turkish', value: 'tr' },
{ label: 'Turkmen', value: 'tk' },
{ label: 'Uighur', value: 'ug' },
{ label: 'Ukrainian', value: 'uk' },
{ label: 'Urdu', value: 'ur' },
{ label: 'Uzbek', value: 'uz' },
{ label: 'Vietnamese', value: 'vi' },
{ label: 'Welsh', value: 'cy' },
{ label: 'Western Frisian', value: 'fy' },
{ label: 'Xhosa', value: 'xh' },
{ label: 'Yiddish', value: 'yi' },
{ label: 'Yoruba', value: 'yo' },
{ label: 'Zulu', value: 'zu' },
];
const DOCUMENT_TYPES = [
{ label: 'Auto Detection', value: 'auto-detect' },
{ label: 'Invoice', value: 'invoice' },
{ label: 'Receipt', value: 'receipt' },
];
function normalizeFinancialParserResponse(provider: string, response: any) {
const providerResult = response[provider];
if (!providerResult) {
return { provider, extracted_data: [], status: 'fail', raw: response };
}
return {
provider,
extracted_data: providerResult.extracted_data || [],
status: providerResult.status || 'success',
original_response: providerResult.original_response || null,
raw: response,
};
}
export const invoiceParserAction = createAction({
auth: edenAiAuth,
name: 'invoice_parser',
displayName: 'Invoice Parser',
description: 'Extract structured invoice data from files using Eden AI. Supports multiple providers, languages, and document types.',
props: {
provider: Property.Dropdown({
auth: edenAiAuth,
displayName: 'Provider',
description: 'The AI provider to use for financial document parsing.',
required: true,
refreshers: [],
options: createStaticDropdown(FINANCIAL_PARSER_PROVIDERS),
}),
file_url: Property.ShortText({
displayName: 'File URL',
description: 'Public URL to the financial document file (PDF, image, etc).',
required: true,
}),
document_type: Property.Dropdown({
auth: edenAiAuth,
displayName: 'Document Type',
description: 'The type of financial document to parse.',
required: false,
refreshers: [],
options: createStaticDropdown(DOCUMENT_TYPES),
defaultValue: 'invoice',
}),
language: Property.Dropdown({
auth: edenAiAuth,
displayName: 'Document Language',
description: 'The language of the document. Choose "Auto Detection" if unsure.',
required: false,
refreshers: [],
options: createStaticDropdown(FINANCIAL_PARSER_LANGUAGES),
defaultValue: 'auto-detect',
}),
model: Property.ShortText({
displayName: 'Specific Model',
description: 'Specific model to use (e.g., gpt-4o, gpt-4o-mini, gpt-4-turbo). Leave empty for default.',
required: false,
}),
file_password: Property.ShortText({
displayName: 'PDF Password',
description: 'Password for protected PDF files (if applicable).',
required: false,
}),
convert_to_pdf: Property.Checkbox({
displayName: 'Convert to PDF',
description: 'Convert DOC/DOCX files to PDF format for better compatibility.',
required: false,
defaultValue: false,
}),
fallback_providers: Property.MultiSelectDropdown({
auth: edenAiAuth,
displayName: 'Fallback Providers',
description: 'Alternative providers to try if the main provider fails (up to 5).',
required: false,
refreshers: [],
options: createStaticDropdown(FINANCIAL_PARSER_PROVIDERS),
}),
show_original_response: Property.Checkbox({
displayName: 'Include Original Response',
description: 'Include the raw provider response in the output for debugging.',
required: false,
defaultValue: false,
}),
},
async run({ auth, propsValue }) {
await propsValidation.validateZod(propsValue, {
provider: z.string().min(1, 'Provider is required'),
file_url: z.string().url('Valid file URL is required'),
document_type: z.string().nullish(),
language: z.string().nullish(),
model: z.string().nullish(),
file_password: z.string().nullish(),
convert_to_pdf: z.boolean().nullish(),
fallback_providers: z.array(z.string()).max(5).nullish(),
show_original_response: z.boolean().nullish(),
});
const {
provider,
file_url,
document_type,
language,
model,
file_password,
convert_to_pdf,
fallback_providers,
show_original_response
} = propsValue;
const body: Record<string, any> = {
providers: provider,
file_url,
};
if (document_type && document_type !== 'auto-detect') {
body['document_type'] = document_type;
} else {
body['document_type'] = 'invoice';
}
if (language && language !== 'auto-detect') body['language'] = language;
if (file_password) body['file_password'] = file_password;
if (convert_to_pdf) body['convert_to_pdf'] = convert_to_pdf;
if (show_original_response) body['show_original_response'] = true;
if (fallback_providers && fallback_providers.length > 0) {
body['fallback_providers'] = fallback_providers.slice(0, 5);
}
if (model) {
body['settings'] = { [provider]: model };
}
try {
const response = await edenAiApiCall({
apiKey: auth.secret_text,
method: HttpMethod.POST,
resourceUri: '/ocr/financial_parser',
body,
});
if (!response || typeof response !== 'object') {
throw new Error('Invalid response from Eden AI API.');
}
return normalizeFinancialParserResponse(provider, response);
} catch (err: any) {
if (err.response?.body?.error) {
throw new Error(`Eden AI API error: ${err.response.body.error}`);
}
if (err.response?.status === 429) {
throw new Error('Rate limit exceeded. Please try again later.');
}
if (err.response?.status === 401) {
throw new Error('Invalid API key. Please check your Eden AI credentials.');
}
if (err.response?.status === 400) {
throw new Error('Invalid request. Please check your file URL and parameters.');
}
throw new Error(`Failed to extract financial document data: ${err.message || err}`);
}
},
});

View File

@@ -0,0 +1,258 @@
import { createAction, Property } from '@activepieces/pieces-framework';
import { HttpMethod, propsValidation } from '@activepieces/pieces-common';
import { edenAiApiCall } from '../common/client';
import { createStaticDropdown } from '../common/providers';
import { z } from 'zod';
import { edenAiAuth } from '../..';
const MODERATION_PROVIDERS = [
{ label: 'Microsoft', value: 'microsoft' },
{ label: 'OpenAI', value: 'openai' },
{ label: 'Google', value: 'google' },
];
const MODERATION_LANGUAGES = [
{ label: 'Auto Detection', value: 'auto-detect' },
{ label: 'Afrikaans', value: 'af' },
{ label: 'Albanian', value: 'sq' },
{ label: 'Amharic', value: 'am' },
{ label: 'Arabic', value: 'ar' },
{ label: 'Armenian', value: 'hy' },
{ label: 'Assamese', value: 'as' },
{ label: 'Azerbaijani', value: 'az' },
{ label: 'Basque', value: 'eu' },
{ label: 'Belarusian', value: 'be' },
{ label: 'Bengali', value: 'bn' },
{ label: 'Bosnian', value: 'bs' },
{ label: 'Breton', value: 'br' },
{ label: 'Bulgarian', value: 'bg' },
{ label: 'Catalan', value: 'ca' },
{ label: 'Central Kurdish', value: 'ckb' },
{ label: 'Cherokee', value: 'chr' },
{ label: 'Chinese', value: 'zh' },
{ label: 'Croatian', value: 'hr' },
{ label: 'Czech', value: 'cs' },
{ label: 'Danish', value: 'da' },
{ label: 'Dutch', value: 'nl' },
{ label: 'English', value: 'en' },
{ label: 'Estonian', value: 'et' },
{ label: 'Filipino', value: 'fil' },
{ label: 'Finnish', value: 'fi' },
{ label: 'French', value: 'fr' },
{ label: 'Fulah', value: 'ff' },
{ label: 'Galician', value: 'gl' },
{ label: 'Georgian', value: 'ka' },
{ label: 'German', value: 'de' },
{ label: 'Gujarati', value: 'gu' },
{ label: 'Hausa', value: 'ha' },
{ label: 'Hebrew', value: 'he' },
{ label: 'Hindi', value: 'hi' },
{ label: 'Hungarian', value: 'hu' },
{ label: 'Icelandic', value: 'is' },
{ label: 'Igbo', value: 'ig' },
{ label: 'Indonesian', value: 'id' },
{ label: 'Inuktitut', value: 'iu' },
{ label: 'Irish', value: 'ga' },
{ label: 'Italian', value: 'it' },
{ label: 'Japanese', value: 'ja' },
{ label: 'Kannada', value: 'kn' },
{ label: 'Kazakh', value: 'kk' },
{ label: 'Khmer', value: 'km' },
{ label: 'Kinyarwanda', value: 'rw' },
{ label: 'Kirghiz', value: 'ky' },
{ label: 'Konkani', value: 'kok' },
{ label: 'Korean', value: 'ko' },
{ label: 'Lao', value: 'lo' },
{ label: 'Latvian', value: 'lv' },
{ label: 'Lithuanian', value: 'lt' },
{ label: 'Luxembourgish', value: 'lb' },
{ label: 'Macedonian', value: 'mk' },
{ label: 'Malay', value: 'ms' },
{ label: 'Malayalam', value: 'ml' },
{ label: 'Maltese', value: 'mt' },
{ label: 'Maori', value: 'mi' },
{ label: 'Marathi', value: 'mr' },
{ label: 'Modern Greek', value: 'el' },
{ label: 'Mongolian', value: 'mn' },
{ label: 'Nepali', value: 'ne' },
{ label: 'Norwegian Bokmål', value: 'nb' },
{ label: 'Norwegian Nynorsk', value: 'nn' },
{ label: 'Oriya', value: 'or' },
{ label: 'Panjabi', value: 'pa' },
{ label: 'Pedi', value: 'nso' },
{ label: 'Persian', value: 'fa' },
{ label: 'Polish', value: 'pl' },
{ label: 'Portuguese', value: 'pt' },
{ label: 'Pushto', value: 'ps' },
{ label: 'Quechua', value: 'qu' },
{ label: 'Romanian', value: 'ro' },
{ label: 'Russian', value: 'ru' },
{ label: 'Scottish Gaelic', value: 'gd' },
{ label: 'Serbian', value: 'sr' },
{ label: 'Sindhi', value: 'sd' },
{ label: 'Sinhala', value: 'si' },
{ label: 'Slovak', value: 'sk' },
{ label: 'Slovenian', value: 'sl' },
{ label: 'Southern Sotho', value: 'st' },
{ label: 'Spanish', value: 'es' },
{ label: 'Swahili', value: 'sw' },
{ label: 'Swedish', value: 'sv' },
{ label: 'Tajik', value: 'tg' },
{ label: 'Tamil', value: 'ta' },
{ label: 'Tatar', value: 'tt' },
{ label: 'Telugu', value: 'te' },
{ label: 'Thai', value: 'th' },
{ label: 'Tigrinya', value: 'ti' },
{ label: 'Tswana', value: 'tn' },
{ label: 'Turkish', value: 'tr' },
{ label: 'Turkmen', value: 'tk' },
{ label: 'Uighur', value: 'ug' },
{ label: 'Ukrainian', value: 'uk' },
{ label: 'Urdu', value: 'ur' },
{ label: 'Uzbek', value: 'uz' },
{ label: 'Vietnamese', value: 'vi' },
{ label: 'Welsh', value: 'cy' },
{ label: 'Wolof', value: 'wo' },
{ label: 'Xhosa', value: 'xh' },
{ label: 'Yoruba', value: 'yo' },
{ label: 'Zulu', value: 'zu' },
];
function normalizeModerationResponse(provider: string, response: any) {
const providerResult = response[provider];
if (!providerResult) {
return { provider, nsfw_likelihood: 0, items: [], status: 'fail', raw: response };
}
const items = (providerResult.items || []).map((item: any) => ({
label: item.label || '',
likelihood: item.likelihood || 0,
category: item.category || '',
subcategory: item.subcategory || '',
likelihood_score: item.likelihood_score || 0,
}));
return {
provider,
nsfw_likelihood: providerResult.nsfw_likelihood || 0,
nsfw_likelihood_score: providerResult.nsfw_likelihood_score || 0,
items,
status: providerResult.status || 'success',
original_response: providerResult.original_response || null,
raw: response,
};
}
export const moderateTextAction = createAction({
auth: edenAiAuth,
name: 'moderate_text',
displayName: 'Moderate Text',
description: 'Detect explicit or policy-violating text using Eden AI. Supports multiple providers, languages, and models.',
props: {
provider: Property.Dropdown({
auth: edenAiAuth,
displayName: 'Provider',
description: 'The AI provider to use for text moderation.',
required: true,
refreshers: [],
options: createStaticDropdown(MODERATION_PROVIDERS),
}),
text: Property.LongText({
displayName: 'Text to Moderate',
description: 'The text to analyze for explicit or policy-violating content.',
required: true,
}),
language: Property.Dropdown({
auth: edenAiAuth,
displayName: 'Text Language',
description: 'The language of the input text. Choose "Auto Detection" if unsure.',
required: false,
refreshers: [],
options: createStaticDropdown(MODERATION_LANGUAGES),
defaultValue: 'auto-detect',
}),
model: Property.ShortText({
displayName: 'Specific Model',
description: 'Specific model to use (e.g., text-moderation-latest, text-moderation-stable). Leave empty for default.',
required: false,
}),
fallback_providers: Property.MultiSelectDropdown({
auth: edenAiAuth,
displayName: 'Fallback Providers',
description: 'Alternative providers to try if the main provider fails (up to 5).',
required: false,
refreshers: [],
options: createStaticDropdown(MODERATION_PROVIDERS),
}),
show_original_response: Property.Checkbox({
displayName: 'Include Original Response',
description: 'Include the raw provider response in the output for debugging.',
required: false,
defaultValue: false,
}),
},
async run({ auth, propsValue }) {
await propsValidation.validateZod(propsValue, {
provider: z.string().min(1, 'Provider is required'),
text: z.string().min(1, 'Text is required'),
language: z.string().nullish(),
model: z.string().nullish(),
fallback_providers: z.array(z.string()).max(5).nullish(),
show_original_response: z.boolean().nullish(),
});
const {
provider,
text,
language,
model,
fallback_providers,
show_original_response
} = propsValue;
const body: Record<string, any> = {
providers: provider,
text,
};
if (language && language !== 'auto-detect') body['language'] = language;
if (show_original_response) body['show_original_response'] = true;
if (fallback_providers && fallback_providers.length > 0) {
body['fallback_providers'] = fallback_providers.slice(0, 5);
}
if (model) {
body['settings'] = { [provider]: model };
}
try {
const response = await edenAiApiCall({
apiKey: auth.secret_text,
method: HttpMethod.POST,
resourceUri: '/text/moderation',
body,
});
if (!response || typeof response !== 'object') {
throw new Error('Invalid response from Eden AI API.');
}
return normalizeModerationResponse(provider, response);
} catch (err: any) {
if (err.response?.body?.error) {
throw new Error(`Eden AI API error: ${err.response.body.error}`);
}
if (err.response?.status === 429) {
throw new Error('Rate limit exceeded. Please try again later.');
}
if (err.response?.status === 401) {
throw new Error('Invalid API key. Please check your Eden AI credentials.');
}
if (err.response?.status === 400) {
throw new Error('Invalid request. Please check your input text and parameters.');
}
throw new Error(`Failed to moderate text: ${err.message || err}`);
}
},
});

View File

@@ -0,0 +1,370 @@
import { createAction, Property } from '@activepieces/pieces-framework';
import { HttpMethod, propsValidation } from '@activepieces/pieces-common';
import { edenAiApiCall } from '../common/client';
import { createStaticDropdown } from '../common/providers';
import { z } from 'zod';
import { edenAiAuth } from '../..';
const OCR_PROVIDERS = [
{ label: 'Amazon', value: 'amazon' },
{ label: 'Clarifai', value: 'clarifai' },
{ label: 'Google', value: 'google' },
{ label: 'Microsoft', value: 'microsoft' },
{ label: 'SentiSight', value: 'sentisight' },
{ label: 'API4AI', value: 'api4ai' },
{ label: 'Mistral', value: 'mistral' },
];
const OCR_LANGUAGES = [
{ label: 'Auto Detection', value: 'auto-detect' },
{ label: 'Abaza', value: 'abq' },
{ label: 'Adyghe', value: 'ady' },
{ label: 'Afrikaans', value: 'af' },
{ label: 'Albanian', value: 'sq' },
{ label: 'Angika', value: 'anp' },
{ label: 'Arabic', value: 'ar' },
{ label: 'Arabic (Pseudo-Accents)', value: 'ar-XA' },
{ label: 'Assamese', value: 'as' },
{ label: 'Asturian', value: 'ast' },
{ label: 'Avaric', value: 'av' },
{ label: 'Awadhi', value: 'awa' },
{ label: 'Azerbaijani', value: 'az' },
{ label: 'Bagheli', value: 'bfy' },
{ label: 'Basque', value: 'eu' },
{ label: 'Belarusian', value: 'be' },
{ label: 'Belarusian (Cyrillic)', value: 'be-cyrl' },
{ label: 'Belarusian (Latin)', value: 'be-latn' },
{ label: 'Bengali', value: 'bn' },
{ label: 'Bhojpuri', value: 'bho' },
{ label: 'Bihari languages', value: 'bh' },
{ label: 'Bislama', value: 'bi' },
{ label: 'Bodo (India)', value: 'brx' },
{ label: 'Bosnian', value: 'bs' },
{ label: 'Braj', value: 'bra' },
{ label: 'Breton', value: 'br' },
{ label: 'Bulgarian', value: 'bg' },
{ label: 'Bundeli', value: 'bns' },
{ label: 'Buriat', value: 'bua' },
{ label: 'Camling', value: 'rab' },
{ label: 'Catalan', value: 'ca' },
{ label: 'Cebuano', value: 'ceb' },
{ label: 'Chamorro', value: 'ch' },
{ label: 'Chechen', value: 'ce' },
{ label: 'Chhattisgarhi', value: 'hne' },
{ label: 'Chinese', value: 'zh' },
{ label: 'Chinese (China)', value: 'zh-CN' },
{ label: 'Chinese (Simplified)', value: 'zh-Hans' },
{ label: 'Chinese (Taiwan)', value: 'zh-TW' },
{ label: 'Chinese (Traditional)', value: 'zh-Hant' },
{ label: 'Cornish', value: 'kw' },
{ label: 'Corsican', value: 'co' },
{ label: 'Crimean Tatar', value: 'crh' },
{ label: 'Croatian', value: 'hr' },
{ label: 'Czech', value: 'cs' },
{ label: 'Danish', value: 'da' },
{ label: 'Danish (Denmark)', value: 'da-DK' },
{ label: 'Dargwa', value: 'dar' },
{ label: 'Dari', value: 'prs' },
{ label: 'Dhimal', value: 'dhi' },
{ label: 'Dogri', value: 'doi' },
{ label: 'Dutch', value: 'nl' },
{ label: 'Dutch (Netherlands)', value: 'nl-NL' },
{ label: 'English', value: 'en' },
{ label: 'English (United States)', value: 'en-US' },
{ label: 'Erzya', value: 'myv' },
{ label: 'Estonian', value: 'et' },
{ label: 'Faroese', value: 'fo' },
{ label: 'Fijian', value: 'fj' },
{ label: 'Filipino', value: 'fil' },
{ label: 'Finnish', value: 'fi' },
{ label: 'Finnish (Finland)', value: 'fi-FI' },
{ label: 'French', value: 'fr' },
{ label: 'French (France)', value: 'fr-FR' },
{ label: 'Friulian', value: 'fur' },
{ label: 'Gagauz', value: 'gag' },
{ label: 'Galician', value: 'gl' },
{ label: 'German', value: 'de' },
{ label: 'German (Germany)', value: 'de-DE' },
{ label: 'Gilbertese', value: 'gil' },
{ label: 'Goan Konkani', value: 'gom' },
{ label: 'Gondi', value: 'gon' },
{ label: 'Gurung', value: 'gvr' },
{ label: 'Haitian', value: 'ht' },
{ label: 'Halbi', value: 'hlb' },
{ label: 'Hani', value: 'hni' },
{ label: 'Haryanvi', value: 'bgc' },
{ label: 'Hawaiian', value: 'haw' },
{ label: 'Hindi', value: 'hi' },
{ label: 'Hmong Daw', value: 'mww' },
{ label: 'Ho', value: 'hoc' },
{ label: 'Hungarian', value: 'hu' },
{ label: 'Hungarian (Hungary)', value: 'hu-HU' },
{ label: 'Icelandic', value: 'is' },
{ label: 'Inari Sami', value: 'smn' },
{ label: 'Indonesian', value: 'id' },
{ label: 'Ingush', value: 'inh' },
{ label: 'Interlingua', value: 'ia' },
{ label: 'Inuktitut', value: 'iu' },
{ label: 'Irish', value: 'ga' },
{ label: 'Italian', value: 'it' },
{ label: 'Italian (Italy)', value: 'it-IT' },
{ label: 'Japanese', value: 'ja' },
{ label: 'Japanese (Japan)', value: 'ja-JP' },
{ label: 'Jaunsari', value: 'jns' },
{ label: 'Javanese', value: 'jv' },
{ label: "K'iche'", value: 'quc' },
{ label: 'Kabardian', value: 'kbd' },
{ label: 'Kabuverdianu', value: 'kea' },
{ label: 'Kachin', value: 'kac' },
{ label: 'Kalaallisut', value: 'kl' },
{ label: 'Kangri', value: 'xnr' },
{ label: 'Kara-Kalpak', value: 'kaa' },
{ label: 'Kara-Kalpak (Cyrillic)', value: 'kaa-Cyrl' },
{ label: 'Karachay-Balkar', value: 'krc' },
{ label: 'Kashubian', value: 'csb' },
{ label: 'Kazakh', value: 'kk' },
{ label: 'Kazakh (Cyrillic)', value: 'kk-cyrl' },
{ label: 'Kazakh (Latin)', value: 'kk-latn' },
{ label: 'Khaling', value: 'klr' },
{ label: 'Khasi', value: 'kha' },
{ label: 'Kirghiz', value: 'ky' },
{ label: 'Korean', value: 'ko' },
{ label: 'Korean (South Korea)', value: 'ko-KR' },
{ label: 'Korku', value: 'kfq' },
{ label: 'Koryak', value: 'kpy' },
{ label: 'Kosraean', value: 'kos' },
{ label: 'Kumarbhag Paharia', value: 'kmj' },
{ label: 'Kumyk', value: 'kum' },
{ label: 'Kurdish', value: 'ku' },
{ label: 'Kurdish (Arabic)', value: 'ku-arab' },
{ label: 'Kurdish (Latin)', value: 'ku-latn' },
{ label: 'Kurukh', value: 'kru' },
{ label: 'Kölsch', value: 'ksh' },
{ label: 'Lak', value: 'lbe' },
{ label: 'Lakota', value: 'lkt' },
{ label: 'Latin', value: 'la' },
{ label: 'Latvian', value: 'lv' },
{ label: 'Lezghian', value: 'lez' },
{ label: 'Lithuanian', value: 'lt' },
{ label: 'Lower Sorbian', value: 'dsb' },
{ label: 'Lule Sami', value: 'smj' },
{ label: 'Luxembourgish', value: 'lb' },
{ label: 'Mahasu Pahari', value: 'bfz' },
{ label: 'Maithili', value: 'mai' },
{ label: 'Malay', value: 'ms' },
{ label: 'Maltese', value: 'mt' },
{ label: 'Manx', value: 'gv' },
{ label: 'Maori', value: 'mi' },
{ label: 'Marathi', value: 'mr' },
{ label: 'Marshallese', value: 'mh' },
{ label: 'Mongolian', value: 'mn' },
{ label: 'Montenegrin', value: 'cnr' },
{ label: 'Neapolitan', value: 'nap' },
{ label: 'Nepali', value: 'ne' },
{ label: 'Newari', value: 'new' },
{ label: 'Niuean', value: 'niu' },
{ label: 'Nogai', value: 'nog' },
{ label: 'Northern Sami', value: 'se' },
{ label: 'Norwegian', value: 'no' },
{ label: 'Occitan', value: 'oc' },
{ label: 'Old English', value: 'ang' },
{ label: 'Ossetian', value: 'os' },
{ label: 'Pali', value: 'pi' },
{ label: 'Panjabi', value: 'pa' },
{ label: 'Persian', value: 'fa' },
{ label: 'Polish', value: 'pl' },
{ label: 'Polish (Poland)', value: 'pl-PO' },
{ label: 'Portuguese', value: 'pt' },
{ label: 'Portuguese (Portugal)', value: 'pt-PT' },
{ label: 'Pushto', value: 'ps' },
{ label: 'Romanian', value: 'ro' },
{ label: 'Romansh', value: 'rm' },
{ label: 'Russian', value: 'ru' },
{ label: 'Russian (Russia)', value: 'ru-RU' },
{ label: 'Sadri', value: 'sck' },
{ label: 'Samoan', value: 'sm' },
{ label: 'Sanskrit', value: 'sa' },
{ label: 'Santali', value: 'sat' },
{ label: 'Scots', value: 'sco' },
{ label: 'Scottish Gaelic', value: 'gd' },
{ label: 'Serbian', value: 'sr' },
{ label: 'Serbian (Cyrillic, Montenegro)', value: 'sr-Cyrl-ME' },
{ label: 'Serbian (Latin)', value: 'sr-latn' },
{ label: 'Serbian (Latin, Montenegro)', value: 'sr-Latn-ME' },
{ label: 'Sherpa', value: 'xsr' },
{ label: 'Sirmauri', value: 'srx' },
{ label: 'Skolt Sami', value: 'sms' },
{ label: 'Slovak', value: 'sk' },
{ label: 'Slovenian', value: 'sl' },
{ label: 'Somali', value: 'so' },
{ label: 'Southern Sami', value: 'sma' },
{ label: 'Spanish', value: 'es' },
{ label: 'Spanish (Spain)', value: 'es-ES' },
{ label: 'Swahili', value: 'sw' },
{ label: 'Swedish', value: 'sv' },
{ label: 'Swedish (Sweden)', value: 'sv-SE' },
{ label: 'Tabassaran', value: 'tab' },
{ label: 'Tagalog', value: 'tl' },
{ label: 'Tajik', value: 'tg' },
{ label: 'Tatar', value: 'tt' },
{ label: 'Tetum', value: 'tet' },
{ label: 'Thangmi', value: 'thf' },
{ label: 'Tonga', value: 'to' },
{ label: 'Turkish', value: 'tr' },
{ label: 'Turkish (Türkiye)', value: 'tr-TR' },
{ label: 'Turkmen', value: 'tk' },
{ label: 'Tuvinian', value: 'tyv' },
{ label: 'Uighur', value: 'ug' },
{ label: 'Ukrainian', value: 'uk' },
{ label: 'Upper Sorbian', value: 'hsb' },
{ label: 'Urdu', value: 'ur' },
{ label: 'Uzbek', value: 'uz' },
{ label: 'Uzbek (Arabic)', value: 'uz-arab' },
{ label: 'Uzbek (Cyrillic)', value: 'uz-cyrl' },
{ label: 'Vietnamese', value: 'vi' },
{ label: 'Volapük', value: 'vo' },
{ label: 'Walser', value: 'wae' },
{ label: 'Welsh', value: 'cy' },
{ label: 'Western Frisian', value: 'fy' },
{ label: 'Yucateco', value: 'yua' },
{ label: 'Zhuang', value: 'za' },
{ label: 'Zulu', value: 'zu' },
{ label: 'Czechia', value: 'cz-CZ' },
{ label: 'Greece', value: 'gr-GR' },
];
function normalizeOcrResponse(provider: string, response: any) {
const providerResult = response[provider];
if (!providerResult) {
return { provider, text: '', bounding_boxes: [], status: 'fail', raw: response };
}
return {
provider,
text: providerResult.text || '',
bounding_boxes: providerResult.bounding_boxes || [],
status: providerResult.status || 'success',
original_response: providerResult.original_response || null,
raw: response,
};
}
export const ocrImageAction = createAction({
name: 'ocr_image',
auth: edenAiAuth,
displayName: 'Extract Text in Image (OCR)',
description: 'Extract text from images (OCR) using Eden AI. Supports multiple providers, languages, and bounding box coordinates.',
props: {
provider: Property.Dropdown({
auth: edenAiAuth,
displayName: 'Provider',
description: 'The AI provider to use for text extraction.',
required: true,
refreshers: [],
options: createStaticDropdown(OCR_PROVIDERS),
}),
file_url: Property.ShortText({
displayName: 'File URL',
description: 'Public URL to the image or document file.',
required: true,
}),
language: Property.Dropdown({
auth: edenAiAuth,
displayName: 'Document Language',
description: 'The language of the text in the image. Choose "Auto Detection" if unsure.',
required: false,
refreshers: [],
options: createStaticDropdown(OCR_LANGUAGES),
defaultValue: 'auto-detect',
}),
file_password: Property.ShortText({
displayName: 'PDF Password',
description: 'Password for protected PDF files (if applicable).',
required: false,
}),
attributes_as_list: Property.Checkbox({
displayName: 'Attributes as List',
description: 'Return extracted data with each attribute as a list instead of list of objects.',
required: false,
defaultValue: false,
}),
fallback_providers: Property.MultiSelectDropdown({
auth: edenAiAuth,
displayName: 'Fallback Providers',
description: 'Alternative providers to try if the main provider fails (up to 5).',
required: false,
refreshers: [],
options: createStaticDropdown(OCR_PROVIDERS),
}),
show_original_response: Property.Checkbox({
displayName: 'Include Original Response',
description: 'Include the raw provider response in the output for debugging.',
required: false,
defaultValue: false,
}),
},
async run({ auth, propsValue }) {
await propsValidation.validateZod(propsValue, {
provider: z.string().min(1, 'Provider is required'),
file_url: z.string().url('Valid file URL is required'),
language: z.string().nullish(),
file_password: z.string().nullish(),
attributes_as_list: z.boolean().nullish(),
fallback_providers: z.array(z.string()).max(5).nullish(),
show_original_response: z.boolean().nullish(),
});
const {
provider,
file_url,
language,
file_password,
attributes_as_list,
fallback_providers,
show_original_response
} = propsValue;
const body: Record<string, any> = {
providers: provider,
file_url,
};
if (language && language !== 'auto-detect') body['language'] = language;
if (file_password) body['file_password'] = file_password;
if (attributes_as_list) body['attributes_as_list'] = attributes_as_list;
if (show_original_response) body['show_original_response'] = true;
if (fallback_providers && fallback_providers.length > 0) {
body['fallback_providers'] = fallback_providers.slice(0, 5);
}
try {
const response = await edenAiApiCall({
apiKey: auth.secret_text,
method: HttpMethod.POST,
resourceUri: '/ocr/ocr',
body,
});
if (!response || typeof response !== 'object') {
throw new Error('Invalid response from Eden AI API.');
}
return normalizeOcrResponse(provider, response);
} catch (err: any) {
if (err.response?.body?.error) {
throw new Error(`Eden AI API error: ${err.response.body.error}`);
}
if (err.response?.status === 429) {
throw new Error('Rate limit exceeded. Please try again later.');
}
if (err.response?.status === 401) {
throw new Error('Invalid API key. Please check your Eden AI credentials.');
}
if (err.response?.status === 400) {
throw new Error('Invalid request. Please check your file URL and parameters.');
}
throw new Error(`Failed to extract text from image: ${err.message || err}`);
}
},
});

View File

@@ -0,0 +1,144 @@
import { createAction, Property } from '@activepieces/pieces-framework';
import { HttpMethod, propsValidation } from '@activepieces/pieces-common';
import { edenAiApiCall } from '../common/client';
import { createStaticDropdown } from '../common/providers';
import { z } from 'zod';
import { edenAiAuth } from '../..';
const DATA_EXTRACTION_PROVIDERS = [
{ label: 'Amazon', value: 'amazon' },
{ label: 'Base64', value: 'base64' },
];
function normalizeDataExtractionResponse(provider: string, response: any) {
const providerResult = response[provider];
if (!providerResult) {
return { provider, fields: [], status: 'fail', raw: response };
}
return {
provider,
fields: providerResult.fields || [],
confidence_score: providerResult.confidence_score || 0,
status: providerResult.status || 'success',
original_response: providerResult.original_response || null,
raw: response,
};
}
export const receiptParserAction = createAction({
auth: edenAiAuth,
name: 'receipt_parser',
displayName: 'Receipt Parser',
description: 'Extract structured data from receipts and documents using Eden AI. Supports general data extraction with bounding boxes.',
props: {
provider: Property.Dropdown({
auth: edenAiAuth,
displayName: 'Provider',
description: 'The AI provider to use for data extraction.',
required: true,
refreshers: [],
options: createStaticDropdown(DATA_EXTRACTION_PROVIDERS),
}),
file_url: Property.ShortText({
displayName: 'File URL',
description: 'Public URL to the document file (PDF, image, etc).',
required: true,
}),
file_password: Property.ShortText({
displayName: 'PDF Password',
description: 'Password for protected PDF files (if applicable).',
required: false,
}),
convert_to_pdf: Property.Checkbox({
displayName: 'Convert to PDF',
description: 'Convert DOC/DOCX files to PDF format for better compatibility.',
required: false,
defaultValue: false,
}),
attributes_as_list: Property.Checkbox({
displayName: 'Attributes as List',
description: 'Return extracted data with each attribute as a list instead of list of objects.',
required: false,
defaultValue: false,
}),
fallback_providers: Property.MultiSelectDropdown({
auth: edenAiAuth,
displayName: 'Fallback Providers',
description: 'Alternative providers to try if the main provider fails (up to 5).',
required: false,
refreshers: [],
options: createStaticDropdown(DATA_EXTRACTION_PROVIDERS),
}),
show_original_response: Property.Checkbox({
displayName: 'Include Original Response',
description: 'Include the raw provider response in the output for debugging.',
required: false,
defaultValue: false,
}),
},
async run({ auth, propsValue }) {
await propsValidation.validateZod(propsValue, {
provider: z.string().min(1, 'Provider is required'),
file_url: z.string().url('Valid file URL is required'),
file_password: z.string().nullish(),
convert_to_pdf: z.boolean().nullish(),
attributes_as_list: z.boolean().nullish(),
fallback_providers: z.array(z.string()).max(5).nullish(),
show_original_response: z.boolean().nullish(),
});
const {
provider,
file_url,
file_password,
convert_to_pdf,
attributes_as_list,
fallback_providers,
show_original_response
} = propsValue;
const body: Record<string, any> = {
providers: provider,
file_url,
};
if (file_password) body['file_password'] = file_password;
if (convert_to_pdf) body['convert_to_pdf'] = convert_to_pdf;
if (attributes_as_list) body['attributes_as_list'] = attributes_as_list;
if (show_original_response) body['show_original_response'] = true;
if (fallback_providers && fallback_providers.length > 0) {
body['fallback_providers'] = fallback_providers.slice(0, 5);
}
try {
const response = await edenAiApiCall({
apiKey: auth.secret_text,
method: HttpMethod.POST,
resourceUri: '/ocr/data_extraction',
body,
});
if (!response || typeof response !== 'object') {
throw new Error('Invalid response from Eden AI API.');
}
return normalizeDataExtractionResponse(provider, response);
} catch (err: any) {
if (err.response?.body?.error) {
throw new Error(`Eden AI API error: ${err.response.body.error}`);
}
if (err.response?.status === 429) {
throw new Error('Rate limit exceeded. Please try again later.');
}
if (err.response?.status === 401) {
throw new Error('Invalid API key. Please check your Eden AI credentials.');
}
if (err.response?.status === 400) {
throw new Error('Invalid request. Please check your file URL and parameters.');
}
throw new Error(`Failed to extract data from document: ${err.message || err}`);
}
},
});

View File

@@ -0,0 +1,277 @@
import { createAction, Property } from '@activepieces/pieces-framework';
import { HttpMethod, propsValidation } from '@activepieces/pieces-common';
import { edenAiApiCall } from '../common/client';
import { createStaticDropdown } from '../common/providers';
import { z } from 'zod';
import { edenAiAuth } from '../..';
const SPELL_CHECK_PROVIDERS = [
{ label: 'Microsoft', value: 'microsoft' },
{ label: 'OpenAI', value: 'openai' },
{ label: 'ProWritingAid', value: 'prowritingaid' },
{ label: 'Cohere', value: 'cohere' },
{ label: 'Sapling', value: 'sapling' },
{ label: 'XAI Grok', value: 'xai' },
];
const SPELL_CHECK_LANGUAGES = [
{ label: 'Auto Detection', value: 'auto-detect' },
{ label: 'Afrikaans', value: 'af' },
{ label: 'Albanian', value: 'sq' },
{ label: 'Amharic', value: 'am' },
{ label: 'Arabic', value: 'ar' },
{ label: 'Armenian', value: 'hy' },
{ label: 'Azerbaijani', value: 'az' },
{ label: 'Basque', value: 'eu' },
{ label: 'Belarusian', value: 'be' },
{ label: 'Bengali', value: 'bn' },
{ label: 'Bosnian', value: 'bs' },
{ label: 'Bulgarian', value: 'bg' },
{ label: 'Burmese', value: 'my' },
{ label: 'Catalan', value: 'ca' },
{ label: 'Cebuano', value: 'ceb' },
{ label: 'Chinese', value: 'zh' },
{ label: 'Chinese (China)', value: 'zh-CN' },
{ label: 'Chinese (Simplified)', value: 'zh-hans' },
{ label: 'Chinese (Taiwan)', value: 'zh-TW' },
{ label: 'Chinese (Traditional)', value: 'zh-hant' },
{ label: 'Corsican', value: 'co' },
{ label: 'Croatian', value: 'hr' },
{ label: 'Czech', value: 'cs' },
{ label: 'Danish', value: 'da' },
{ label: 'Dutch', value: 'nl' },
{ label: 'English', value: 'en' },
{ label: 'English (United Kingdom)', value: 'en-gb' },
{ label: 'Esperanto', value: 'eo' },
{ label: 'Estonian', value: 'et' },
{ label: 'Finnish', value: 'fi' },
{ label: 'French', value: 'fr' },
{ label: 'Galician', value: 'gl' },
{ label: 'Georgian', value: 'ka' },
{ label: 'German', value: 'de' },
{ label: 'Modern Greek', value: 'el' },
{ label: 'Gujarati', value: 'gu' },
{ label: 'Haitian', value: 'ht' },
{ label: 'Hausa', value: 'ha' },
{ label: 'Hawaiian', value: 'haw' },
{ label: 'Hebrew', value: 'he' },
{ label: 'Hindi', value: 'hi' },
{ label: 'Hmong', value: 'hmn' },
{ label: 'Hungarian', value: 'hu' },
{ label: 'Icelandic', value: 'is' },
{ label: 'Igbo', value: 'ig' },
{ label: 'Indonesian', value: 'id' },
{ label: 'Irish', value: 'ga' },
{ label: 'Italian', value: 'it' },
{ label: 'Japanese', value: 'ja' },
{ label: 'Japanese (JP)', value: 'jp' },
{ label: 'Javanese', value: 'jv' },
{ label: 'Kannada', value: 'kn' },
{ label: 'Kazakh', value: 'kk' },
{ label: 'Khmer', value: 'km' },
{ label: 'Kinyarwanda', value: 'rw' },
{ label: 'Kirghiz', value: 'ky' },
{ label: 'Korean', value: 'ko' },
{ label: 'Kurdish', value: 'ku' },
{ label: 'Lao', value: 'lo' },
{ label: 'Latin', value: 'la' },
{ label: 'Latvian', value: 'lv' },
{ label: 'Lithuanian', value: 'lt' },
{ label: 'Luxembourgish', value: 'lb' },
{ label: 'Macedonian', value: 'mk' },
{ label: 'Malagasy', value: 'mg' },
{ label: 'Malay', value: 'ms' },
{ label: 'Malayalam', value: 'ml' },
{ label: 'Maltese', value: 'mt' },
{ label: 'Maori', value: 'mi' },
{ label: 'Marathi', value: 'mr' },
{ label: 'Mongolian', value: 'mn' },
{ label: 'Nepali', value: 'ne' },
{ label: 'Norwegian', value: 'no' },
{ label: 'Norwegian Bokmål', value: 'nb' },
{ label: 'Nyanja', value: 'ny' },
{ label: 'Oriya', value: 'or' },
{ label: 'Panjabi', value: 'pa' },
{ label: 'Persian', value: 'fa' },
{ label: 'Polish', value: 'pl' },
{ label: 'Portuguese', value: 'pt' },
{ label: 'Portuguese (Brazil)', value: 'pt-br' },
{ label: 'Portuguese (Portugal)', value: 'pt-pt' },
{ label: 'Pushto', value: 'ps' },
{ label: 'Romanian', value: 'ro' },
{ label: 'Russian', value: 'ru' },
{ label: 'Samoan', value: 'sm' },
{ label: 'Scottish Gaelic', value: 'gd' },
{ label: 'Serbian', value: 'sr' },
{ label: 'Shona', value: 'sn' },
{ label: 'Sindhi', value: 'sd' },
{ label: 'Sinhala', value: 'si' },
{ label: 'Slovak', value: 'sk' },
{ label: 'Slovenian', value: 'sl' },
{ label: 'Somali', value: 'so' },
{ label: 'Southern Sotho', value: 'st' },
{ label: 'Spanish', value: 'es' },
{ label: 'Sundanese', value: 'su' },
{ label: 'Swahili', value: 'sw' },
{ label: 'Swedish', value: 'sv' },
{ label: 'Tagalog', value: 'tl' },
{ label: 'Tajik', value: 'tg' },
{ label: 'Tamil', value: 'ta' },
{ label: 'Tatar', value: 'tt' },
{ label: 'Telugu', value: 'te' },
{ label: 'Thai', value: 'th' },
{ label: 'Turkish', value: 'tr' },
{ label: 'Turkmen', value: 'tk' },
{ label: 'Uighur', value: 'ug' },
{ label: 'Ukrainian', value: 'uk' },
{ label: 'Urdu', value: 'ur' },
{ label: 'Uzbek', value: 'uz' },
{ label: 'Vietnamese', value: 'vi' },
{ label: 'Welsh', value: 'cy' },
{ label: 'Western Frisian', value: 'fy' },
{ label: 'Xhosa', value: 'xh' },
{ label: 'Yiddish', value: 'yi' },
{ label: 'Yoruba', value: 'yo' },
{ label: 'Zulu', value: 'zu' },
];
function normalizeSpellCheckResponse(provider: string, response: any) {
const providerResult = response[provider];
if (!providerResult) {
return { provider, text: '', items: [], status: 'fail', raw: response };
}
const items = (providerResult.items || []).map((item: any) => ({
text: item.text || '',
type: item.type || '',
offset: item.offset || 0,
length: item.length || 0,
suggestions: (item.suggestions || []).map((suggestion: any) => ({
suggestion: suggestion.suggestion || '',
score: suggestion.score || 0,
})),
}));
return {
provider,
text: providerResult.text || '',
items,
status: providerResult.status || 'success',
original_response: providerResult.original_response || null,
raw: response,
};
}
export const spellCheckAction = createAction({
name: 'spell_check',
auth: edenAiAuth,
displayName: 'Spell Check',
description: 'Identify and correct spelling or grammar errors using Eden AI. Supports multiple providers, languages, and models.',
props: {
provider: Property.Dropdown({
auth: edenAiAuth,
displayName: 'Provider',
description: 'The AI provider to use for spell checking and grammar correction.',
required: true,
refreshers: [],
options: createStaticDropdown(SPELL_CHECK_PROVIDERS),
}),
text: Property.LongText({
displayName: 'Text to Check',
description: 'The text to check for spelling or grammar errors.',
required: true,
}),
language: Property.Dropdown({
auth: edenAiAuth,
displayName: 'Text Language',
description: 'The language of the input text. Choose "Auto Detection" if unsure.',
required: false,
refreshers: [],
options: createStaticDropdown(SPELL_CHECK_LANGUAGES),
defaultValue: 'auto-detect',
}),
model: Property.ShortText({
displayName: 'Specific Model',
description: 'Specific model to use (e.g., gpt-4o, gpt-4, grok-2-latest, command). Leave empty for default.',
required: false,
}),
fallback_providers: Property.MultiSelectDropdown({
auth: edenAiAuth,
displayName: 'Fallback Providers',
description: 'Alternative providers to try if the main provider fails (up to 5).',
required: false,
refreshers: [],
options: createStaticDropdown(SPELL_CHECK_PROVIDERS),
}),
show_original_response: Property.Checkbox({
displayName: 'Include Original Response',
description: 'Include the raw provider response in the output for debugging.',
required: false,
defaultValue: false,
}),
},
async run({ auth, propsValue }) {
await propsValidation.validateZod(propsValue, {
provider: z.string().min(1, 'Provider is required'),
text: z.string().min(1, 'Text is required'),
language: z.string().nullish(),
model: z.string().nullish(),
fallback_providers: z.array(z.string()).max(5).nullish(),
show_original_response: z.boolean().nullish(),
});
const {
provider,
text,
language,
model,
fallback_providers,
show_original_response
} = propsValue;
const body: Record<string, any> = {
providers: provider,
text,
};
if (language && language !== 'auto-detect') body['language'] = language;
if (show_original_response) body['show_original_response'] = true;
if (fallback_providers && fallback_providers.length > 0) {
body['fallback_providers'] = fallback_providers.slice(0, 5);
}
if (model) {
body['settings'] = { [provider]: model };
}
try {
const response = await edenAiApiCall({
apiKey: auth.secret_text,
method: HttpMethod.POST,
resourceUri: '/text/spell_check',
body,
});
if (!response || typeof response !== 'object') {
throw new Error('Invalid response from Eden AI API.');
}
return normalizeSpellCheckResponse(provider, response);
} catch (err: any) {
if (err.response?.body?.error) {
throw new Error(`Eden AI API error: ${err.response.body.error}`);
}
if (err.response?.status === 429) {
throw new Error('Rate limit exceeded. Please try again later.');
}
if (err.response?.status === 401) {
throw new Error('Invalid API key. Please check your Eden AI credentials.');
}
if (err.response?.status === 400) {
throw new Error('Invalid request. Please check your input text and parameters.');
}
throw new Error(`Failed to check spelling: ${err.message || err}`);
}
},
});

View File

@@ -0,0 +1,192 @@
import { createAction, Property } from '@activepieces/pieces-framework';
import { HttpMethod, propsValidation } from '@activepieces/pieces-common';
import { edenAiApiCall } from '../common/client';
import { createStaticDropdown } from '../common/providers';
import { z } from 'zod';
import { edenAiAuth } from '../..';
const SUMMARIZE_PROVIDERS = [
{ label: 'OpenAI GPT-4', value: 'openai' },
{ label: 'Microsoft', value: 'microsoft' },
{ label: 'Cohere', value: 'cohere' },
{ label: 'XAI Grok', value: 'xai' },
{ label: 'Anthropic Claude', value: 'anthropic' },
{ label: 'Aleph Alpha', value: 'alephalpha' },
{ label: 'Writesonic', value: 'writesonic' },
{ label: 'MeaningCloud', value: 'meaningcloud' },
{ label: 'Emvista', value: 'emvista' },
{ label: 'OneAI', value: 'oneai' },
];
const SUMMARIZE_LANGUAGES = [
{ label: 'Auto Detection', value: 'auto-detect' },
{ label: 'Bulgarian', value: 'bg' },
{ label: 'Chinese', value: 'zh' },
{ label: 'Chinese (Simplified)', value: 'zh-Hans' },
{ label: 'Czech', value: 'cs' },
{ label: 'Danish', value: 'da' },
{ label: 'Dutch', value: 'nl' },
{ label: 'English', value: 'en' },
{ label: 'Estonian', value: 'et' },
{ label: 'Finnish', value: 'fi' },
{ label: 'French', value: 'fr' },
{ label: 'German', value: 'de' },
{ label: 'Modern Greek', value: 'el' },
{ label: 'Hungarian', value: 'hu' },
{ label: 'Italian', value: 'it' },
{ label: 'Japanese', value: 'ja' },
{ label: 'Korean', value: 'ko' },
{ label: 'Latvian', value: 'lv' },
{ label: 'Polish', value: 'pl' },
{ label: 'Portuguese', value: 'pt' },
{ label: 'Portuguese (Brazil)', value: 'pt-BR' },
{ label: 'Portuguese (Portugal)', value: 'pt-PT' },
{ label: 'Romanian', value: 'ro' },
{ label: 'Russian', value: 'ru' },
{ label: 'Slovak', value: 'sk' },
{ label: 'Slovenian', value: 'sl' },
{ label: 'Spanish', value: 'es' },
{ label: 'Swedish', value: 'sv' },
];
function normalizeSummarizeResponse(provider: string, response: any) {
const providerResult = response[provider];
if (!providerResult) {
return { provider, summary: '', status: 'fail', raw: response };
}
return {
provider,
summary: providerResult.result || '',
status: providerResult.status || 'success',
original_response: providerResult.original_response || null,
raw: response,
};
}
export const summarizeTextAction = createAction({
name: 'summarize_text',
auth: edenAiAuth,
displayName: 'Summarize Text',
description: 'Extract key sentences and create summaries from long text passages using various AI providers.',
props: {
provider: Property.Dropdown({
auth: edenAiAuth,
displayName: 'Provider',
description: 'The AI provider to use for text summarization.',
required: true,
refreshers: [],
options: createStaticDropdown(SUMMARIZE_PROVIDERS),
}),
text: Property.LongText({
displayName: 'Text to Summarize',
description: 'The text content you want to summarize. Can be articles, documents, or any long-form text.',
required: true,
}),
output_sentences: Property.Number({
displayName: 'Number of Summary Sentences',
description: 'How many sentences should the summary contain (1-20).',
required: false,
defaultValue: 3,
}),
language: Property.Dropdown({
auth: edenAiAuth,
displayName: 'Text Language',
description: 'The language of the input text. Choose "Auto Detection" if unsure.',
required: false,
refreshers: [],
options: createStaticDropdown(SUMMARIZE_LANGUAGES),
defaultValue: 'auto-detect',
}),
model: Property.ShortText({
displayName: 'Specific Model',
description: 'Specific model to use (e.g., gpt-4, gpt-4o, summarize-xlarge). Leave empty for default.',
required: false,
}),
fallback_providers: Property.MultiSelectDropdown({
auth: edenAiAuth,
displayName: 'Fallback Providers',
description: 'Alternative providers to try if the main provider fails (up to 5).',
required: false,
refreshers: [],
options: createStaticDropdown(SUMMARIZE_PROVIDERS),
}),
show_original_response: Property.Checkbox({
displayName: 'Include Original Response',
description: 'Include the raw provider response in the output for debugging.',
required: false,
defaultValue: false,
}),
},
async run({ auth, propsValue }) {
await propsValidation.validateZod(propsValue, {
provider: z.string().min(1, 'Provider is required'),
text: z.string().min(1, 'Text to summarize is required'),
output_sentences: z.number().min(1).max(20).nullish(),
language: z.string().nullish(),
model: z.string().nullish(),
fallback_providers: z.array(z.string()).max(5).nullish(),
show_original_response: z.boolean().nullish(),
});
const {
provider,
text,
output_sentences,
language,
model,
fallback_providers,
show_original_response
} = propsValue;
const body: Record<string, any> = {
providers: provider,
text,
};
if (output_sentences !== undefined) body['output_sentences'] = output_sentences;
if (language && language !== 'auto-detect') body['language'] = language;
if (show_original_response) body['show_original_response'] = true;
if (fallback_providers && fallback_providers.length > 0) {
body['fallback_providers'] = fallback_providers.slice(0, 5);
}
if (model) {
body['settings'] = { [provider]: model };
}
try {
const response = await edenAiApiCall({
apiKey: auth.secret_text,
method: HttpMethod.POST,
resourceUri: '/text/summarize',
body,
});
if (!response || typeof response !== 'object') {
throw new Error('Invalid response from Eden AI API.');
}
return normalizeSummarizeResponse(provider, response);
} catch (err: any) {
if (err.response?.body?.error) {
throw new Error(`Eden AI API error: ${err.response.body.error}`);
}
if (err.response?.status === 429) {
throw new Error('Rate limit exceeded. Please try again later.');
}
if (err.response?.status === 401) {
throw new Error('Invalid API key. Please check your Eden AI credentials.');
}
if (err.response?.status === 400) {
throw new Error('Invalid request. Please check your input text and parameters.');
}
// Fallback for object errors - properly stringify
if (err.message && typeof err.message === 'string') {
throw new Error(`Failed to summarize text: ${err.message}`);
}
throw new Error(`Failed to summarize text: ${JSON.stringify(err)}`);
}
},
});

View File

@@ -0,0 +1,476 @@
import { createAction, Property } from '@activepieces/pieces-framework';
import { HttpMethod, propsValidation } from '@activepieces/pieces-common';
import { edenAiApiCall } from '../common/client';
import { createStaticDropdown } from '../common/providers';
import { z } from 'zod';
import { edenAiAuth } from '../..';
const TEXT_TO_SPEECH_PROVIDERS = [
{ label: 'Amazon', value: 'amazon' },
{ label: 'Google', value: 'google' },
{ label: 'Microsoft', value: 'microsoft' },
{ label: 'LovoAI', value: 'lovoai' },
{ label: 'ElevenLabs', value: 'elevenlabs' },
{ label: 'OpenAI', value: 'openai' },
{ label: 'Deepgram', value: 'deepgram' },
];
const TEXT_TO_SPEECH_LANGUAGES = [
{ label: 'Afrikaans', value: 'af' },
{ label: 'Albanian', value: 'sq' },
{ label: 'Amharic', value: 'am' },
{ label: 'Arabic', value: 'ar' },
{ label: 'Armenian', value: 'hy' },
{ label: 'Azerbaijani', value: 'az' },
{ label: 'Basque', value: 'eu' },
{ label: 'Belarusian', value: 'be' },
{ label: 'Bengali', value: 'bn' },
{ label: 'Bosnian', value: 'bs' },
{ label: 'Bulgarian', value: 'bg' },
{ label: 'Burmese', value: 'my' },
{ label: 'Catalan', value: 'ca' },
{ label: 'Chinese', value: 'zh' },
{ label: 'Croatian', value: 'hr' },
{ label: 'Czech', value: 'cs' },
{ label: 'Danish', value: 'da' },
{ label: 'Dutch', value: 'nl' },
{ label: 'English', value: 'en' },
{ label: 'Estonian', value: 'et' },
{ label: 'Filipino', value: 'fil' },
{ label: 'Finnish', value: 'fi' },
{ label: 'French', value: 'fr' },
{ label: 'Galician', value: 'gl' },
{ label: 'Georgian', value: 'ka' },
{ label: 'German', value: 'de' },
{ label: 'Modern Greek', value: 'el' },
{ label: 'Gujarati', value: 'gu' },
{ label: 'Hebrew', value: 'he' },
{ label: 'Hindi', value: 'hi' },
{ label: 'Hungarian', value: 'hu' },
{ label: 'Icelandic', value: 'is' },
{ label: 'Indonesian', value: 'id' },
{ label: 'Irish', value: 'ga' },
{ label: 'Italian', value: 'it' },
{ label: 'Japanese', value: 'ja' },
{ label: 'Javanese', value: 'jv' },
{ label: 'Kannada', value: 'kn' },
{ label: 'Kazakh', value: 'kk' },
{ label: 'Khmer', value: 'km' },
{ label: 'Korean', value: 'ko' },
{ label: 'Lao', value: 'lo' },
{ label: 'Latvian', value: 'lv' },
{ label: 'Lithuanian', value: 'lt' },
{ label: 'Macedonian', value: 'mk' },
{ label: 'Malay', value: 'ms' },
{ label: 'Malayalam', value: 'ml' },
{ label: 'Maltese', value: 'mt' },
{ label: 'Mandarin Chinese', value: 'cmn' },
{ label: 'Maori', value: 'mi' },
{ label: 'Marathi', value: 'mr' },
{ label: 'Mongolian', value: 'mn' },
{ label: 'Nepali', value: 'ne' },
{ label: 'Norwegian', value: 'no' },
{ label: 'Norwegian Bokmål', value: 'nb' },
{ label: 'Panjabi', value: 'pa' },
{ label: 'Persian', value: 'fa' },
{ label: 'Polish', value: 'pl' },
{ label: 'Portuguese', value: 'pt' },
{ label: 'Pushto', value: 'ps' },
{ label: 'Romanian', value: 'ro' },
{ label: 'Russian', value: 'ru' },
{ label: 'Serbian', value: 'sr' },
{ label: 'Sinhala', value: 'si' },
{ label: 'Slovak', value: 'sk' },
{ label: 'Slovenian', value: 'sl' },
{ label: 'Somali', value: 'so' },
{ label: 'Spanish', value: 'es' },
{ label: 'Standard Arabic', value: 'arb' },
{ label: 'Sundanese', value: 'su' },
{ label: 'Swahili', value: 'sw' },
{ label: 'Swedish', value: 'sv' },
{ label: 'Tagalog', value: 'tl' },
{ label: 'Tamil', value: 'ta' },
{ label: 'Telugu', value: 'te' },
{ label: 'Thai', value: 'th' },
{ label: 'Turkish', value: 'tr' },
{ label: 'Ukrainian', value: 'uk' },
{ label: 'Urdu', value: 'ur' },
{ label: 'Uzbek', value: 'uz' },
{ label: 'Vietnamese', value: 'vi' },
{ label: 'Welsh', value: 'cy' },
{ label: 'Wu Chinese', value: 'wuu' },
{ label: 'Xhosa', value: 'xh' },
{ label: 'Yue Chinese', value: 'yue' },
{ label: 'Zulu', value: 'zu' },
{ label: 'Afrikaans (South Africa)', value: 'af-ZA' },
{ label: 'Albanian (Albania)', value: 'sq-AL' },
{ label: 'Amharic (Ethiopia)', value: 'am-ET' },
{ label: 'Arabic (Algeria)', value: 'ar-DZ' },
{ label: 'Arabic (Bahrain)', value: 'ar-BH' },
{ label: 'Arabic (Egypt)', value: 'ar-EG' },
{ label: 'Arabic (Iraq)', value: 'ar-IQ' },
{ label: 'Arabic (Jordan)', value: 'ar-JO' },
{ label: 'Arabic (Kuwait)', value: 'ar-KW' },
{ label: 'Arabic (Lebanon)', value: 'ar-LB' },
{ label: 'Arabic (Libya)', value: 'ar-LY' },
{ label: 'Arabic (Morocco)', value: 'ar-MA' },
{ label: 'Arabic (Oman)', value: 'ar-OM' },
{ label: 'Arabic (Pseudo-Accents)', value: 'ar-XA' },
{ label: 'Arabic (Qatar)', value: 'ar-QA' },
{ label: 'Arabic (Saudi Arabia)', value: 'ar-SA' },
{ label: 'Arabic (Syria)', value: 'ar-SY' },
{ label: 'Arabic (Tunisia)', value: 'ar-TN' },
{ label: 'Arabic (UAE)', value: 'ar-AE' },
{ label: 'Arabic (Yemen)', value: 'ar-YE' },
{ label: 'Armenian (Armenia)', value: 'hy-AM' },
{ label: 'Azerbaijani (Azerbaijan)', value: 'az-AZ' },
{ label: 'Bangla (Bangladesh)', value: 'bn-BD' },
{ label: 'Bangla (India)', value: 'bn-IN' },
{ label: 'Basque (Spain)', value: 'eu-ES' },
{ label: 'Bosnian (Bosnia & Herzegovina)', value: 'bs-BA' },
{ label: 'Bulgarian (Bulgaria)', value: 'bg-BG' },
{ label: 'Burmese (Myanmar)', value: 'my-MM' },
{ label: 'Cantonese (China)', value: 'yue-CN' },
{ label: 'Cantonese (Hong Kong)', value: 'yue-HK' },
{ label: 'Catalan (Spain)', value: 'ca-ES' },
{ label: 'Chinese (China)', value: 'zh-CN' },
{ label: 'Chinese (China - Henan)', value: 'zh-CN-henan' },
{ label: 'Chinese (China - Shandong)', value: 'zh-CN-shandong' },
{ label: 'Chinese (China - Sichuan)', value: 'zh-CN-sichuan' },
{ label: 'Chinese (Hong Kong)', value: 'zh-HK' },
{ label: 'Chinese (Taiwan)', value: 'zh-TW' },
{ label: 'Croatian (Croatia)', value: 'hr-HR' },
{ label: 'Czech (Czechia)', value: 'cs-CZ' },
{ label: 'Danish (Denmark)', value: 'da-DK' },
{ label: 'Dutch (Belgium)', value: 'nl-BE' },
{ label: 'Dutch (Netherlands)', value: 'nl-NL' },
{ label: 'English (Australia)', value: 'en-AU' },
{ label: 'English (Canada)', value: 'en-CA' },
{ label: 'English (Curaçao)', value: 'en-AN' },
{ label: 'English (Hong Kong)', value: 'en-HK' },
{ label: 'English (India)', value: 'en-IN' },
{ label: 'English (Ireland)', value: 'en-IE' },
{ label: 'English (Kenya)', value: 'en-KE' },
{ label: 'English (New Zealand)', value: 'en-NZ' },
{ label: 'English (Nigeria)', value: 'en-NG' },
{ label: 'English (Philippines)', value: 'en-PH' },
{ label: 'English (Singapore)', value: 'en-SG' },
{ label: 'English (South Africa)', value: 'en-ZA' },
{ label: 'English (Tanzania)', value: 'en-TZ' },
{ label: 'English (United Kingdom)', value: 'en-GB' },
{ label: 'English (United States)', value: 'en-US' },
{ label: 'Estonian (Estonia)', value: 'et-EE' },
{ label: 'Filipino (Philippines)', value: 'fil-PH' },
{ label: 'Finnish (Finland)', value: 'fi-FI' },
{ label: 'French (Belgium)', value: 'fr-BE' },
{ label: 'French (Canada)', value: 'fr-CA' },
{ label: 'French (France)', value: 'fr-FR' },
{ label: 'French (Switzerland)', value: 'fr-CH' },
{ label: 'Galician (Spain)', value: 'gl-ES' },
{ label: 'Georgian (Georgia)', value: 'ka-GE' },
{ label: 'German (Austria)', value: 'de-AT' },
{ label: 'German (Germany)', value: 'de-DE' },
{ label: 'German (Switzerland)', value: 'de-CH' },
{ label: 'Greek (Greece)', value: 'el-GR' },
{ label: 'Gujarati (India)', value: 'gu-IN' },
{ label: 'Hebrew (Israel)', value: 'he-IL' },
{ label: 'Hindi (India)', value: 'hi-IN' },
{ label: 'Hungarian (Hungary)', value: 'hu-HU' },
{ label: 'Icelandic (Iceland)', value: 'is-IS' },
{ label: 'Indonesian (Indonesia)', value: 'id-ID' },
{ label: 'Irish (Ireland)', value: 'ga-IE' },
{ label: 'Italian (Italy)', value: 'it-IT' },
{ label: 'Japanese (Japan)', value: 'ja-JP' },
{ label: 'Javanese (Indonesia)', value: 'jv-ID' },
{ label: 'Kannada (India)', value: 'kn-IN' },
{ label: 'Kazakh (Kazakhstan)', value: 'kk-KZ' },
{ label: 'Khmer (Cambodia)', value: 'km-KH' },
{ label: 'Korean (South Korea)', value: 'ko-KR' },
{ label: 'Lao (Laos)', value: 'lo-LA' },
{ label: 'Latvian (Latvia)', value: 'lv-LV' },
{ label: 'Lithuanian (Lithuania)', value: 'lt-LT' },
{ label: 'Macedonian (North Macedonia)', value: 'mk-MK' },
{ label: 'Malay (Malaysia)', value: 'ms-MY' },
{ label: 'Malayalam (India)', value: 'ml-IN' },
{ label: 'Maltese (Malta)', value: 'mt-MT' },
{ label: 'Mandarin Chinese (China)', value: 'cmn-CN' },
{ label: 'Mandarin Chinese (Taiwan)', value: 'cmn-TW' },
{ label: 'Marathi (India)', value: 'mr-IN' },
{ label: 'Mongolian (Mongolia)', value: 'mn-MN' },
{ label: 'Nepali (Nepal)', value: 'ne-NP' },
{ label: 'Norwegian Bokmål (Norway)', value: 'nb-NO' },
{ label: 'Pashto (Afghanistan)', value: 'ps-AF' },
{ label: 'Persian (Iran)', value: 'fa-IR' },
{ label: 'Polish (Poland)', value: 'pl-PL' },
{ label: 'Portuguese (Brazil)', value: 'pt-BR' },
{ label: 'Portuguese (Portugal)', value: 'pt-PT' },
{ label: 'Punjabi (India)', value: 'pa-IN' },
{ label: 'Romanian (Romania)', value: 'ro-RO' },
{ label: 'Russian (Russia)', value: 'ru-RU' },
{ label: 'Serbian (Serbia)', value: 'sr-RS' },
{ label: 'Sinhala (Sri Lanka)', value: 'si-LK' },
{ label: 'Slovak (Slovakia)', value: 'sk-SK' },
{ label: 'Slovenian (Slovenia)', value: 'sl-SI' },
{ label: 'Somali (Somalia)', value: 'so-SO' },
{ label: 'Spanish (Argentina)', value: 'es-AR' },
{ label: 'Spanish (Bolivia)', value: 'es-BO' },
{ label: 'Spanish (Chile)', value: 'es-CL' },
{ label: 'Spanish (Colombia)', value: 'es-CO' },
{ label: 'Spanish (Costa Rica)', value: 'es-CR' },
{ label: 'Spanish (Cuba)', value: 'es-CU' },
{ label: 'Spanish (Dominican Republic)', value: 'es-DO' },
{ label: 'Spanish (Ecuador)', value: 'es-EC' },
{ label: 'Spanish (El Salvador)', value: 'es-SV' },
{ label: 'Spanish (Equatorial Guinea)', value: 'es-GQ' },
{ label: 'Spanish (Guatemala)', value: 'es-GT' },
{ label: 'Spanish (Honduras)', value: 'es-HN' },
{ label: 'Spanish (Mexico)', value: 'es-MX' },
{ label: 'Spanish (Nicaragua)', value: 'es-NI' },
{ label: 'Spanish (Panama)', value: 'es-PA' },
{ label: 'Spanish (Paraguay)', value: 'es-PY' },
{ label: 'Spanish (Peru)', value: 'es-PE' },
{ label: 'Spanish (Puerto Rico)', value: 'es-PR' },
{ label: 'Spanish (Spain)', value: 'es-ES' },
{ label: 'Spanish (United States)', value: 'es-US' },
{ label: 'Spanish (Uruguay)', value: 'es-UY' },
{ label: 'Spanish (Venezuela)', value: 'es-VE' },
{ label: 'Sundanese (Indonesia)', value: 'su-ID' },
{ label: 'Swahili (Kenya)', value: 'sw-KE' },
{ label: 'Swahili (Tanzania)', value: 'sw-TZ' },
{ label: 'Swedish (Sweden)', value: 'sv-SE' },
{ label: 'Tamil (India)', value: 'ta-IN' },
{ label: 'Tamil (Malaysia)', value: 'ta-MY' },
{ label: 'Tamil (Singapore)', value: 'ta-SG' },
{ label: 'Tamil (Sri Lanka)', value: 'ta-LK' },
{ label: 'Telugu (India)', value: 'te-IN' },
{ label: 'Thai (Thailand)', value: 'th-TH' },
{ label: 'Turkish (Türkiye)', value: 'tr-TR' },
{ label: 'Ukrainian (Ukraine)', value: 'uk-UA' },
{ label: 'Urdu (India)', value: 'ur-IN' },
{ label: 'Urdu (Pakistan)', value: 'ur-PK' },
{ label: 'Uzbek (United Kingdom)', value: 'uz-UK' },
{ label: 'Uzbek (Uzbekistan)', value: 'uz-UZ' },
{ label: 'Vietnamese (Vietnam)', value: 'vi-VN' },
{ label: 'Welsh (United Kingdom)', value: 'cy-GB' },
{ label: 'Wu Chinese (China)', value: 'wuu-CN' },
{ label: 'Xhosa (South Africa)', value: 'xh-ZA' },
{ label: 'Zulu (South Africa)', value: 'zu-ZA' },
];
const VOICE_OPTIONS = [
{ label: 'Male', value: 'MALE' },
{ label: 'Female', value: 'FEMALE' },
];
const AUDIO_FORMATS = [
{ label: 'MP3', value: 'mp3' },
{ label: 'WAV', value: 'wav' },
{ label: 'AAC', value: 'aac' },
{ label: 'OGG', value: 'ogg' },
{ label: 'FLAC', value: 'flac' },
];
function normalizeTextToSpeechResponse(provider: string, response: any) {
const providerResult = response[provider];
if (!providerResult) {
return { provider, audio: '', audio_resource_url: '', voice_type: 0, status: 'fail', raw: response };
}
return {
provider,
audio: providerResult.audio || '',
audio_resource_url: providerResult.audio_resource_url || '',
voice_type: providerResult.voice_type || 0,
status: providerResult.status || 'success',
original_response: providerResult.original_response || null,
raw: response,
};
}
export const textToSpeechAction = createAction({
name: 'text_to_speech',
auth: edenAiAuth,
displayName: 'Generate Audio From Text',
description: 'Convert text to spoken audio using Eden AI. Supports multiple providers, languages, and voice customization.',
props: {
provider: Property.Dropdown({
auth: edenAiAuth,
displayName: 'Provider',
description: 'The AI provider to use for text-to-speech synthesis.',
required: true,
refreshers: [],
options: createStaticDropdown(TEXT_TO_SPEECH_PROVIDERS),
}),
text: Property.LongText({
displayName: 'Text',
description: 'The text to convert to speech.',
required: true,
}),
language: Property.Dropdown({
auth: edenAiAuth,
displayName: 'Language',
description: 'The language and locale for the speech synthesis (defaults to en-US if not specified).',
required: false,
refreshers: [],
options: createStaticDropdown(TEXT_TO_SPEECH_LANGUAGES),
}),
option: Property.Dropdown({
auth: edenAiAuth,
displayName: 'Voice Gender',
description: 'Choose the voice gender for speech synthesis (defaults to Female if not specified).',
required: false,
refreshers: [],
options: createStaticDropdown(VOICE_OPTIONS),
}),
rate: Property.Number({
displayName: 'Speaking Rate',
description: 'Adjust speaking rate (-100 to 100, where 0 is normal speed).',
required: false,
defaultValue: 0,
}),
pitch: Property.Number({
displayName: 'Voice Pitch',
description: 'Adjust voice pitch (-100 to 100, where 0 is normal pitch).',
required: false,
defaultValue: 0,
}),
volume: Property.Number({
displayName: 'Audio Volume',
description: 'Adjust audio volume (-100 to 100, where 0 is normal volume).',
required: false,
defaultValue: 0,
}),
audio_format: Property.Dropdown({
auth: edenAiAuth,
displayName: 'Audio Format',
description: 'The audio format for the generated speech (default: MP3).',
required: false,
refreshers: [],
options: createStaticDropdown(AUDIO_FORMATS),
}),
sampling_rate: Property.Number({
displayName: 'Sampling Rate',
description: 'Audio sampling rate in Hz (0-200000, 0 for provider default).',
required: false,
defaultValue: 0,
}),
fallback_providers: Property.MultiSelectDropdown({
auth: edenAiAuth,
displayName: 'Fallback Providers',
description: 'Alternative providers to try if the main provider fails (up to 5).',
required: false,
refreshers: [],
options: createStaticDropdown(TEXT_TO_SPEECH_PROVIDERS),
}),
show_original_response: Property.Checkbox({
displayName: 'Include Original Response',
description: 'Include the raw provider response in the output for debugging.',
required: false,
defaultValue: false,
}),
},
async run({ auth, propsValue }) {
await propsValidation.validateZod(propsValue, {
provider: z.string().min(1, 'Provider is required'),
text: z.string().min(1, 'Text is required'),
language: z.string().nullish(),
option: z.string().nullish(),
rate: z.number().int().min(-100).max(100).nullish(),
pitch: z.number().int().min(-100).max(100).nullish(),
volume: z.number().int().min(-100).max(100).nullish(),
audio_format: z.string().nullish(),
sampling_rate: z.number().int().min(0).max(200000).nullish(),
fallback_providers: z.array(z.string()).max(5).nullish(),
show_original_response: z.boolean().nullish(),
});
const {
provider,
text,
language,
option,
rate,
pitch,
volume,
audio_format,
sampling_rate,
fallback_providers,
show_original_response
} = propsValue;
const body: Record<string, any> = {
providers: provider,
text,
};
const defaultLanguage = language || 'en-US';
const defaultOption = option || 'FEMALE';
body['language'] = defaultLanguage;
body['option'] = defaultOption;
if (rate !== undefined && rate !== 0) body['rate'] = rate;
if (pitch !== undefined && pitch !== 0) body['pitch'] = pitch;
if (volume !== undefined && volume !== 0) body['volume'] = volume;
if (audio_format) body['audio_format'] = audio_format;
if (sampling_rate !== undefined && sampling_rate !== 0) body['sampling_rate'] = sampling_rate;
if (show_original_response) body['show_original_response'] = true;
if (fallback_providers && fallback_providers.length > 0) {
body['fallback_providers'] = fallback_providers.slice(0, 5);
}
try {
const response = await edenAiApiCall({
apiKey: auth.secret_text,
method: HttpMethod.POST,
resourceUri: '/audio/text_to_speech/',
body,
});
if (!response || typeof response !== 'object') {
throw new Error('Invalid response from Eden AI API.');
}
return normalizeTextToSpeechResponse(provider, response);
} catch (err: any) {
let errorMessage = 'Unknown error occurred';
if (err.response?.body?.error) {
const errorBody = err.response.body.error;
if (errorBody.message) {
if (typeof errorBody.message === 'string') {
errorMessage = errorBody.message;
} else if (errorBody.message.non_field_errors) {
errorMessage = errorBody.message.non_field_errors.join(', ');
} else if (typeof errorBody.message === 'object') {
const fieldErrors = Object.entries(errorBody.message)
.map(([field, errors]) => `${field}: ${Array.isArray(errors) ? errors.join(', ') : errors}`)
.join('; ');
errorMessage = fieldErrors;
}
} else if (typeof errorBody === 'string') {
errorMessage = errorBody;
}
throw new Error(`Eden AI API error: ${errorMessage}`);
}
if (err.response?.status === 429) {
throw new Error('Rate limit exceeded. Please try again later.');
}
if (err.response?.status === 401) {
throw new Error('Invalid API key. Please check your Eden AI credentials.');
}
if (err.response?.status === 400) {
throw new Error('Invalid request. Please check your text and parameters.');
}
if (err.message && typeof err.message === 'string') {
throw new Error(`Failed to generate audio: ${err.message}`);
}
throw new Error(`Failed to generate audio: ${JSON.stringify(err)}`);
}
},
});

View File

@@ -0,0 +1,313 @@
import { createAction, Property } from '@activepieces/pieces-framework';
import { HttpMethod, propsValidation } from '@activepieces/pieces-common';
import { edenAiApiCall } from '../common/client';
import { createStaticDropdown } from '../common/providers';
import { z } from 'zod';
import { edenAiAuth } from '../..';
const TRANSLATION_PROVIDERS = [
{ label: 'Amazon', value: 'amazon' },
{ label: 'Google', value: 'google' },
{ label: 'Microsoft', value: 'microsoft' },
{ label: 'DeepL', value: 'deepl' },
{ label: 'ModernMT', value: 'modernmt' },
{ label: 'OpenAI', value: 'openai' },
{ label: 'XAI Grok', value: 'xai' },
];
const TRANSLATION_LANGUAGES = [
{ label: 'Auto Detection', value: 'auto-detect' },
{ label: 'Afrikaans', value: 'af' },
{ label: 'Albanian', value: 'sq' },
{ label: 'Amharic', value: 'am' },
{ label: 'Arabic', value: 'ar' },
{ label: 'Armenian', value: 'hy' },
{ label: 'Assamese', value: 'as' },
{ label: 'Azerbaijani', value: 'az' },
{ label: 'Bashkir', value: 'ba' },
{ label: 'Basque', value: 'eu' },
{ label: 'Belarusian', value: 'be' },
{ label: 'Bengali', value: 'bn' },
{ label: 'Bosnian', value: 'bs' },
{ label: 'Bulgarian', value: 'bg' },
{ label: 'Burmese', value: 'my' },
{ label: 'Catalan', value: 'ca' },
{ label: 'Cebuano', value: 'ceb' },
{ label: 'Chinese', value: 'zh' },
{ label: 'Chinese (China)', value: 'zh-CN' },
{ label: 'Chinese (Simplified)', value: 'zh-Hans' },
{ label: 'Chinese (Taiwan)', value: 'zh-TW' },
{ label: 'Chinese (Traditional)', value: 'zh-Hant' },
{ label: 'Corsican', value: 'co' },
{ label: 'Croatian', value: 'hr' },
{ label: 'Czech', value: 'cs' },
{ label: 'Danish', value: 'da' },
{ label: 'Dari', value: 'prs' },
{ label: 'Dhivehi', value: 'dv' },
{ label: 'Dutch', value: 'nl' },
{ label: 'English', value: 'en' },
{ label: 'English (United Kingdom)', value: 'en-GB' },
{ label: 'English (United States)', value: 'en-US' },
{ label: 'Esperanto', value: 'eo' },
{ label: 'Estonian', value: 'et' },
{ label: 'Faroese', value: 'fo' },
{ label: 'Fijian', value: 'fj' },
{ label: 'Filipino', value: 'fil' },
{ label: 'Finnish', value: 'fi' },
{ label: 'French', value: 'fr' },
{ label: 'French (Canada)', value: 'fr-CA' },
{ label: 'Galician', value: 'gl' },
{ label: 'Georgian', value: 'ka' },
{ label: 'German', value: 'de' },
{ label: 'Modern Greek', value: 'el' },
{ label: 'Gujarati', value: 'gu' },
{ label: 'Haitian', value: 'ht' },
{ label: 'Hausa', value: 'ha' },
{ label: 'Hawaiian', value: 'haw' },
{ label: 'Hebrew', value: 'he' },
{ label: 'Hindi', value: 'hi' },
{ label: 'Hmong', value: 'hmn' },
{ label: 'Hmong Daw', value: 'mww' },
{ label: 'Hungarian', value: 'hu' },
{ label: 'Icelandic', value: 'is' },
{ label: 'Igbo', value: 'ig' },
{ label: 'Indonesian', value: 'id' },
{ label: 'Inuinnaqtun', value: 'ikt' },
{ label: 'Inuktitut', value: 'iu' },
{ label: 'Inuktitut (Latin)', value: 'iu-Latn' },
{ label: 'Irish', value: 'ga' },
{ label: 'Italian', value: 'it' },
{ label: 'Japanese', value: 'ja' },
{ label: 'Javanese', value: 'jv' },
{ label: 'Kannada', value: 'kn' },
{ label: 'Kazakh', value: 'kk' },
{ label: 'Khmer', value: 'km' },
{ label: 'Kinyarwanda', value: 'rw' },
{ label: 'Kirghiz', value: 'ky' },
{ label: 'Klingon', value: 'tlh' },
{ label: 'Klingon (Klingon)', value: 'tlh-Piqd' },
{ label: 'Klingon (Latin)', value: 'tlh-Latn' },
{ label: 'Korean', value: 'ko' },
{ label: 'Kurdish', value: 'ku' },
{ label: 'Northern Kurdish', value: 'kmr' },
{ label: 'Lao', value: 'lo' },
{ label: 'Latin', value: 'la' },
{ label: 'Latvian', value: 'lv' },
{ label: 'Literary Chinese', value: 'lzh' },
{ label: 'Lithuanian', value: 'lt' },
{ label: 'Luxembourgish', value: 'lb' },
{ label: 'Macedonian', value: 'mk' },
{ label: 'Malagasy', value: 'mg' },
{ label: 'Malay', value: 'ms' },
{ label: 'Malayalam', value: 'ml' },
{ label: 'Maltese', value: 'mt' },
{ label: 'Maori', value: 'mi' },
{ label: 'Marathi', value: 'mr' },
{ label: 'Mongolian', value: 'mn' },
{ label: 'Mongolian (Cyrillic)', value: 'mn-Cyrl' },
{ label: 'Mongolian (Mongolian)', value: 'mn-Mong' },
{ label: 'Nepali', value: 'ne' },
{ label: 'Norwegian', value: 'no' },
{ label: 'Norwegian Bokmål', value: 'nb' },
{ label: 'Norwegian Nynorsk', value: 'nn' },
{ label: 'Nyanja', value: 'ny' },
{ label: 'Oriya', value: 'or' },
{ label: 'Panjabi', value: 'pa' },
{ label: 'Persian', value: 'fa' },
{ label: 'Persian (Afghanistan)', value: 'fa-AF' },
{ label: 'Polish', value: 'pl' },
{ label: 'Portuguese', value: 'pt' },
{ label: 'Portuguese (Brazil)', value: 'pt-BR' },
{ label: 'Portuguese (Portugal)', value: 'pt-PT' },
{ label: 'Pushto', value: 'ps' },
{ label: 'Querétaro Otomi', value: 'otq' },
{ label: 'Romanian', value: 'ro' },
{ label: 'Russian', value: 'ru' },
{ label: 'Samoan', value: 'sm' },
{ label: 'Scottish Gaelic', value: 'gd' },
{ label: 'Serbian', value: 'sr' },
{ label: 'Serbian (Cyrillic)', value: 'sr-Cyrl' },
{ label: 'Serbian (Latin)', value: 'sr-Latn' },
{ label: 'Shona', value: 'sn' },
{ label: 'Sindhi', value: 'sd' },
{ label: 'Sinhala', value: 'si' },
{ label: 'Slovak', value: 'sk' },
{ label: 'Slovenian', value: 'sl' },
{ label: 'Somali', value: 'so' },
{ label: 'Southern Sotho', value: 'st' },
{ label: 'Spanish', value: 'es' },
{ label: 'Spanish (Latin America)', value: 'es-419' },
{ label: 'Spanish (Mexico)', value: 'es-MX' },
{ label: 'Spanish (Spain)', value: 'es-ES' },
{ label: 'Sundanese', value: 'su' },
{ label: 'Swahili', value: 'sw' },
{ label: 'Swedish', value: 'sv' },
{ label: 'Tagalog', value: 'tl' },
{ label: 'Tahitian', value: 'ty' },
{ label: 'Tajik', value: 'tg' },
{ label: 'Tamil', value: 'ta' },
{ label: 'Tatar', value: 'tt' },
{ label: 'Telugu', value: 'te' },
{ label: 'Thai', value: 'th' },
{ label: 'Tibetan', value: 'bo' },
{ label: 'Tigrinya', value: 'ti' },
{ label: 'Tonga', value: 'to' },
{ label: 'Turkish', value: 'tr' },
{ label: 'Turkmen', value: 'tk' },
{ label: 'Uighur', value: 'ug' },
{ label: 'Ukrainian', value: 'uk' },
{ label: 'Upper Sorbian', value: 'hsb' },
{ label: 'Urdu', value: 'ur' },
{ label: 'Uzbek', value: 'uz' },
{ label: 'Vietnamese', value: 'vi' },
{ label: 'Welsh', value: 'cy' },
{ label: 'Western Frisian', value: 'fy' },
{ label: 'Xhosa', value: 'xh' },
{ label: 'Yiddish', value: 'yi' },
{ label: 'Yoruba', value: 'yo' },
{ label: 'Yucateco', value: 'yua' },
{ label: 'Yue Chinese', value: 'yue' },
{ label: 'Zulu', value: 'zu' },
];
function normalizeTranslationResponse(provider: string, response: any) {
const providerResult = response[provider];
if (!providerResult) {
return { provider, text: '', status: 'fail', raw: response };
}
return {
provider,
text: providerResult.text || '',
status: providerResult.status || 'success',
original_response: providerResult.original_response || null,
raw: response,
};
}
export const translateTextAction = createAction({
name: 'translate_text',
auth: edenAiAuth,
displayName: 'Translate Text',
description: 'Translate text into different languages using Eden AI. Supports multiple providers, languages, and models.',
props: {
provider: Property.Dropdown({
auth: edenAiAuth,
displayName: 'Provider',
description: 'The AI provider to use for text translation.',
required: true,
refreshers: [],
options: createStaticDropdown(TRANSLATION_PROVIDERS),
}),
text: Property.LongText({
displayName: 'Text to Translate',
description: 'The text to translate.',
required: true,
}),
source_language: Property.Dropdown({
auth: edenAiAuth,
displayName: 'Source Language',
description: 'The language of the input text. Choose "Auto Detection" to automatically detect the language.',
required: false,
refreshers: [],
options: createStaticDropdown(TRANSLATION_LANGUAGES),
defaultValue: 'auto-detect',
}),
target_language: Property.Dropdown({
auth: edenAiAuth,
displayName: 'Target Language',
description: 'The language to translate the text into.',
required: true,
refreshers: [],
options: createStaticDropdown(TRANSLATION_LANGUAGES.filter(lang => lang.value !== 'auto-detect')),
}),
model: Property.ShortText({
displayName: 'Specific Model',
description: 'Specific model to use (e.g., gpt-4o, grok-2-latest). Leave empty for default.',
required: false,
}),
fallback_providers: Property.MultiSelectDropdown({
auth: edenAiAuth,
displayName: 'Fallback Providers',
description: 'Alternative providers to try if the main provider fails (up to 5).',
required: false,
refreshers: [],
options: createStaticDropdown(TRANSLATION_PROVIDERS),
}),
show_original_response: Property.Checkbox({
displayName: 'Include Original Response',
description: 'Include the raw provider response in the output for debugging.',
required: false,
defaultValue: false,
}),
},
async run({ auth, propsValue }) {
await propsValidation.validateZod(propsValue, {
provider: z.string().min(1, 'Provider is required'),
text: z.string().min(1, 'Text is required'),
source_language: z.string().nullish(),
target_language: z.string().min(1, 'Target language is required'),
model: z.string().nullish(),
fallback_providers: z.array(z.string()).max(5).nullish(),
show_original_response: z.boolean().nullish(),
});
const {
provider,
text,
source_language,
target_language,
model,
fallback_providers,
show_original_response
} = propsValue;
const body: Record<string, any> = {
providers: provider,
text,
target_language,
};
if (source_language && source_language !== 'auto-detect') {
body['source_language'] = source_language;
}
if (show_original_response) body['show_original_response'] = true;
if (fallback_providers && fallback_providers.length > 0) {
body['fallback_providers'] = fallback_providers.slice(0, 5);
}
if (model) {
body['settings'] = { [provider]: model };
}
try {
const response = await edenAiApiCall({
apiKey: auth.secret_text,
method: HttpMethod.POST,
resourceUri: '/translation/automatic_translation',
body,
});
if (!response || typeof response !== 'object') {
throw new Error('Invalid response from Eden AI API.');
}
return normalizeTranslationResponse(provider, response);
} catch (err: any) {
if (err.response?.body?.error) {
throw new Error(`Eden AI API error: ${err.response.body.error}`);
}
if (err.response?.status === 429) {
throw new Error('Rate limit exceeded. Please try again later.');
}
if (err.response?.status === 401) {
throw new Error('Invalid API key. Please check your Eden AI credentials.');
}
if (err.response?.status === 400) {
throw new Error('Invalid request. Please check your input text and parameters.');
}
throw new Error(`Failed to translate text: ${err.message || err}`);
}
},
});

View File

@@ -0,0 +1,95 @@
import { HttpMethod, httpClient } from '@activepieces/pieces-common';
const BASE_URL = 'https://api.edenai.run/v2';
const DEFAULT_TIMEOUT = 20000;
const MAX_RETRIES = 3;
const RETRY_BACKOFF = 1000;
function logEdenAiError(error: any, context: Record<string, any>) {
const log = {
error: error?.message || error,
status: error?.response?.status,
body: error?.response?.body,
context,
};
if (typeof console !== 'undefined' && console.error) {
console.error('Eden AI API Error:', JSON.stringify(log));
}
}
function parseEdenAiError(error: any): string {
if (error?.response?.body?.error) return error.response.body.error;
if (error?.response?.body?.message) return error.response.body.message;
if (error?.message) return error.message;
return String(error);
}
export async function edenAiApiCall<T = any>({
apiKey,
method,
resourceUri,
body,
query,
headers: customHeaders = {},
timeout = DEFAULT_TIMEOUT,
maxRetries = MAX_RETRIES,
}: {
apiKey: string;
method: HttpMethod;
resourceUri: string;
body?: unknown;
query?: Record<string, string | number | boolean>;
headers?: Record<string, string>;
timeout?: number;
maxRetries?: number;
}): Promise<T> {
if (!apiKey || typeof apiKey !== 'string' || apiKey.trim().length < 10) {
throw new Error('Missing or invalid Eden AI API key. Please check your credentials.');
}
const headers: Record<string, string> = {
'Authorization': `Bearer ${apiKey}`,
'Content-Type': 'application/json',
...customHeaders,
};
const url = `${BASE_URL}${resourceUri}`;
let lastError;
const stringQueryParams = query
? Object.fromEntries(Object.entries(query).map(([k, v]) => [k, String(v)]))
: undefined;
for (let attempt = 0; attempt < maxRetries; ++attempt) {
try {
const response = await httpClient.sendRequest<T>({
method,
url,
headers,
body,
queryParams: stringQueryParams,
timeout,
});
if (response.status < 200 || response.status >= 300) {
const err = new Error(`Eden AI API error: ${response.status} ${JSON.stringify(response.body)}`);
(err as any).response = response;
throw err;
}
return response.body;
} catch (err: any) {
lastError = err;
logEdenAiError(err, { url, method, body, query, attempt });
const status = err?.response?.status;
if (status === 429 || (status && status >= 500 && status < 600)) {
if (attempt < maxRetries - 1) {
const delay = RETRY_BACKOFF * Math.pow(2, attempt);
await new Promise(res => setTimeout(res, delay));
continue;
}
}
break;
}
}
throw new Error(parseEdenAiError(lastError));
}

View File

@@ -0,0 +1,5 @@
type Option = { label: string; value: string };
export function createStaticDropdown(options: Option[]) {
return async () => ({ options });
}

View File

@@ -0,0 +1,20 @@
{
"extends": "../../../../tsconfig.base.json",
"compilerOptions": {
"module": "commonjs",
"forceConsistentCasingInFileNames": true,
"strict": true,
"importHelpers": true,
"noImplicitOverride": true,
"noImplicitReturns": true,
"noFallthroughCasesInSwitch": true,
"noPropertyAccessFromIndexSignature": true
},
"files": [],
"include": [],
"references": [
{
"path": "./tsconfig.lib.json"
}
]
}

View File

@@ -0,0 +1,9 @@
{
"extends": "./tsconfig.json",
"compilerOptions": {
"outDir": "../../../../dist/out-tsc",
"declaration": true,
"types": ["node"]
},
"include": ["src/**/*.ts"]
}