Add Activepieces integration for workflow automation
- Add Activepieces fork with SmoothSchedule custom piece - Create integrations app with Activepieces service layer - Add embed token endpoint for iframe integration - Create Automations page with embedded workflow builder - Add sidebar visibility fix for embed mode - Add list inactive customers endpoint to Public API - Include SmoothSchedule triggers: event created/updated/cancelled - Include SmoothSchedule actions: create/update/cancel events, list resources/services/customers 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
@@ -0,0 +1,33 @@
|
||||
{
|
||||
"extends": [
|
||||
"../../../../.eslintrc.base.json"
|
||||
],
|
||||
"ignorePatterns": [
|
||||
"!**/*"
|
||||
],
|
||||
"overrides": [
|
||||
{
|
||||
"files": [
|
||||
"*.ts",
|
||||
"*.tsx",
|
||||
"*.js",
|
||||
"*.jsx"
|
||||
],
|
||||
"rules": {}
|
||||
},
|
||||
{
|
||||
"files": [
|
||||
"*.ts",
|
||||
"*.tsx"
|
||||
],
|
||||
"rules": {}
|
||||
},
|
||||
{
|
||||
"files": [
|
||||
"*.js",
|
||||
"*.jsx"
|
||||
],
|
||||
"rules": {}
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -0,0 +1,7 @@
|
||||
# pieces-dumpling-ai
|
||||
|
||||
This library was generated with [Nx](https://nx.dev).
|
||||
|
||||
## Building
|
||||
|
||||
Run `nx build pieces-dumpling-ai` to build the library.
|
||||
@@ -0,0 +1,4 @@
|
||||
{
|
||||
"name": "@activepieces/piece-dumpling-ai",
|
||||
"version": "0.0.9"
|
||||
}
|
||||
@@ -0,0 +1,65 @@
|
||||
{
|
||||
"name": "pieces-dumpling-ai",
|
||||
"$schema": "../../../../node_modules/nx/schemas/project-schema.json",
|
||||
"sourceRoot": "packages/pieces/community/dumpling-ai/src",
|
||||
"projectType": "library",
|
||||
"release": {
|
||||
"version": {
|
||||
"currentVersionResolver": "git-tag",
|
||||
"preserveLocalDependencyProtocols": false,
|
||||
"manifestRootsToUpdate": [
|
||||
"dist/{projectRoot}"
|
||||
]
|
||||
}
|
||||
},
|
||||
"tags": [],
|
||||
"targets": {
|
||||
"build": {
|
||||
"executor": "@nx/js:tsc",
|
||||
"outputs": [
|
||||
"{options.outputPath}"
|
||||
],
|
||||
"options": {
|
||||
"outputPath": "dist/packages/pieces/community/dumpling-ai",
|
||||
"tsConfig": "packages/pieces/community/dumpling-ai/tsconfig.lib.json",
|
||||
"packageJson": "packages/pieces/community/dumpling-ai/package.json",
|
||||
"main": "packages/pieces/community/dumpling-ai/src/index.ts",
|
||||
"assets": [
|
||||
"packages/pieces/community/dumpling-ai/*.md",
|
||||
{
|
||||
"input": "packages/pieces/community/dumpling-ai/src/i18n",
|
||||
"output": "./src/i18n",
|
||||
"glob": "**/!(i18n.json)"
|
||||
}
|
||||
],
|
||||
"buildableProjectDepsInPackageJsonType": "dependencies",
|
||||
"updateBuildableProjectDepsInPackageJson": true
|
||||
},
|
||||
"dependsOn": [
|
||||
"^build",
|
||||
"prebuild"
|
||||
]
|
||||
},
|
||||
"nx-release-publish": {
|
||||
"options": {
|
||||
"packageRoot": "dist/{projectRoot}"
|
||||
}
|
||||
},
|
||||
"lint": {
|
||||
"executor": "@nx/eslint:lint",
|
||||
"outputs": [
|
||||
"{options.outputFile}"
|
||||
]
|
||||
},
|
||||
"prebuild": {
|
||||
"executor": "nx:run-commands",
|
||||
"options": {
|
||||
"cwd": "packages/pieces/community/dumpling-ai",
|
||||
"command": "bun install --no-save --silent"
|
||||
},
|
||||
"dependsOn": [
|
||||
"^build"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,113 @@
|
||||
{
|
||||
"Transform unstructured website content into clean, AI-ready data": "Umwandeln Sie unstrukturierte Website-Inhalte in saubere, AI-bereitgestellte Daten",
|
||||
"\n You can obtain API key from [API Section](https://app.dumplingai.com/api-keys).": "\n Du kannst API-Schlüssel unter [API Section](https://app.dumplingai.com/api-keys).",
|
||||
"Web Search": "Websuche",
|
||||
"Search News": "Nachrichten suchen",
|
||||
"Generate Image": "Bild generieren",
|
||||
"Scrape Website": "Scrape Webseite",
|
||||
"Crawl Website": "Crawl-Website",
|
||||
"Extract Document Data": "Dokumentendaten extrahieren",
|
||||
"Custom API Call": "Eigener API-Aufruf",
|
||||
"Search the web and optionally retrieve content from top results.": "Durchsuchen Sie das Web und holen Sie optional Inhalte von Top-Ergebnissen.",
|
||||
"Search for news articles using Google News.": "Suchen Sie nach News-Artikeln mit Google News.",
|
||||
"Generate images based on a text prompt using AI.": "Erstellen Sie Bilder basierend auf einer Text-Eingabeaufforderung mit Hilfe von AI.",
|
||||
"Scrapes data from a specified URL and format the result.": "Schrägt Daten von einer bestimmten URL ab und formatiert das Ergebnis.",
|
||||
"Crawl a website and return structured content from multiple pages.": "Crawlen Sie eine Website und geben Sie strukturierte Inhalte von mehreren Seiten zurück.",
|
||||
"Extract structured data from documents using vision-capable AI.": "Extrahieren Sie strukturierte Daten aus Dokumenten mit einer visionsfähigen KI.",
|
||||
"Make a custom API call to a specific endpoint": "Einen benutzerdefinierten API-Aufruf an einen bestimmten Endpunkt machen",
|
||||
"Search Query": "Suchanfrage",
|
||||
"Country": "Land",
|
||||
"Location": "Standort",
|
||||
"Language": "Sprache",
|
||||
"Date Range": "Date Range",
|
||||
"Page Number": "Seitennummer",
|
||||
"Scrape Results": "Scrape-Ergebnisse",
|
||||
"Number of Results to Scrape": "Anzahl der zu schroffen Ergebnisse",
|
||||
"Scrape Format": "Scrape Format",
|
||||
"Clean Output": "Ausgabe bereinigen",
|
||||
"Model": "Modell",
|
||||
"Prompt": "Prompt",
|
||||
"Aspect Ratio": "Seitenverhältnis",
|
||||
"Number of Images": "Anzahl der Bilder",
|
||||
"Seed": "Samen",
|
||||
"Output Format": "Ausgabeformat",
|
||||
"URL": "URL",
|
||||
"Clean Output ?": "Ausgabe bereinigen?",
|
||||
"Render JavaScript ?": "JavaScript rendern ?",
|
||||
"Page Limit": "Seitenlimit",
|
||||
"Crawl Depth": "Crawl-Tiefe",
|
||||
"File": "Datei",
|
||||
"Extraction Prompt": "Prompt für Extraktion",
|
||||
"JSON Mode": "JSON-Modus",
|
||||
"Method": "Methode",
|
||||
"Headers": "Kopfzeilen",
|
||||
"Query Parameters": "Abfrageparameter",
|
||||
"Body": "Körper",
|
||||
"Response is Binary ?": "Antwort ist binär?",
|
||||
"No Error on Failure": "Kein Fehler bei Fehler",
|
||||
"Timeout (in seconds)": "Timeout (in Sekunden)",
|
||||
"Two-letter country code for location bias (e.g., \"US\" for United States).": "Ländercode mit zwei Buchstaben für Standortbias (z.B. \"US\" für die Vereinigten Staaten).",
|
||||
"Specific location to focus the search (e.g., \"New York, NY\").": "Spezifische Position, um die Suche zu fokussieren (z.B. \"New York, NY\").",
|
||||
"Language code for the search results (e.g., \"en\" for English).": "Language code for the search results (e.g., \"de for german).",
|
||||
"Filter results by date.": "Ergebnisse nach Datum filtern.",
|
||||
"Page number for paginated results.": "Seitenzahl für paginierte Ergebnisse.",
|
||||
"Whether to scrape top search results.": "Gibt an, ob die Suchergebnisse beschnitten werden sollen.",
|
||||
"Number of top results to scrape (max: 10).": "Anzahl der besten Ergebnisse (max: 10).",
|
||||
"Format of scraped content": "Format des verschrotteten Inhalts",
|
||||
"Whether the scraped output should be cleaned.": "Ob der verschrottete Ausgang gereinigt werden soll.",
|
||||
"The search query for Google News.": "Die Suchanfrage nach Google News.",
|
||||
"Country code for location bias (e.g., \"US\" for United States).": "Ländervorwahl für Standortvoreinstellung (z.B. \"US\" für Vereinigte Staaten).",
|
||||
"The model to use for image generation": "Das Modell, das für die Bildgenerierung verwendet wird",
|
||||
"The text prompt for image generation": "Die Textanfrage zur Bilderzeugung",
|
||||
"Aspect ratio of the generated image": "Seitenverhältnis des generierten Bildes",
|
||||
"Number of images to generate (1-4)": "Anzahl der zu erstellenden Bilder (1-4)",
|
||||
"Seed for reproducible results": "Seed für reproduzierbare Ergebnisse",
|
||||
"Format of the generated image": "Format des generierten Bildes",
|
||||
"The format of the output": "Das Format der Ausgabe",
|
||||
"Whether the output should be cleaned.": "Ob die Ausgabe bereinigt werden soll.",
|
||||
"Whether to render JavaScript before scraping.": "Gibt an, ob JavaScript vor dem Scraping dargestellt werden soll.",
|
||||
"The website URL to crawl.": "Die zu crawlende Webseite.",
|
||||
"Maximum number of pages to crawl.": "Maximale Anzahl der zu durchsuchenden Seiten.",
|
||||
"Depth of crawling (distance between base URL path and sub paths).": "Tiefe des Crawlings (Abstand zwischen Basis-URL-Pfad und Unterpfaden).",
|
||||
"Format of the output content.": "Format des Ausgabeinhalts.",
|
||||
"File URL or base64-encoded file.": "Datei-URL oder base64-kodierte Datei.",
|
||||
"The prompt describing what data to extract from the document.": "Die Eingabeaufforderung, die beschreibt, welche Daten aus dem Dokument extrahiert werden sollen.",
|
||||
"Whether to return the result in JSON format.": "Gibt das Ergebnis im JSON-Format zurück.",
|
||||
"Authorization headers are injected automatically from your connection.": "Autorisierungs-Header werden automatisch von Ihrer Verbindung injiziert.",
|
||||
"Enable for files like PDFs, images, etc..": "Aktivieren für Dateien wie PDFs, Bilder, etc..",
|
||||
"Any Time": "Jederzeit",
|
||||
"Past Hour": "Letzte Stunde",
|
||||
"Past Day": "Letzter Tag",
|
||||
"Past Week": "Letzte Woche",
|
||||
"Past Month": "Letzten Monat",
|
||||
"Past Year": "Letztes Jahr",
|
||||
"Markdown": "Markdown",
|
||||
"HTML": "HTML",
|
||||
"Screenshot": "Screenshot",
|
||||
"FLUX.1-schnell": "FLUX.1-schnell",
|
||||
"FLUX.1-dev": "FLUX.1-dev",
|
||||
"FLUX.1-pro": "FLUX.1-pro",
|
||||
"FLUX.1.1-pro": "FLUX.1.1-pro",
|
||||
"recraft-v3": "recraft-v3",
|
||||
"Square (1:1)": "Quadrat (1:1)",
|
||||
"Landscape 16:9": "Querformat 16:9",
|
||||
"Landscape 21:9": "Querformat 21:9",
|
||||
"Landscape 3:2": "Landscape 3:2",
|
||||
"Landscape 4:3": "Landscape 4:3",
|
||||
"Portrait 2:3": "Hochformat 2:3",
|
||||
"Portrait 3:4": "Hochformat 3:4",
|
||||
"Portrait 4:5": "Hochformat 4:5",
|
||||
"Portrait 9:16": "Hochformat 9:16",
|
||||
"Portrait 9:21": "Hochformat 9:21",
|
||||
"WebP": "WebP",
|
||||
"JPG": "JPG",
|
||||
"PNG": "PNG",
|
||||
"Text": "Text",
|
||||
"Raw": "Rohe",
|
||||
"GET": "ERHALTEN",
|
||||
"POST": "POST",
|
||||
"PATCH": "PATCH",
|
||||
"PUT": "PUT",
|
||||
"DELETE": "LÖSCHEN",
|
||||
"HEAD": "HEAD"
|
||||
}
|
||||
@@ -0,0 +1,113 @@
|
||||
{
|
||||
"Transform unstructured website content into clean, AI-ready data": "Transforma el contenido no estructurado del sitio web en datos limpios y listos para la IA",
|
||||
"\n You can obtain API key from [API Section](https://app.dumplingai.com/api-keys).": "\n Puedes obtener la clave API de [API Section](https://app.dumplingai.com/api-keys).",
|
||||
"Web Search": "Búsqueda Web",
|
||||
"Search News": "Buscar noticias",
|
||||
"Generate Image": "Generar imagen",
|
||||
"Scrape Website": "Sitio web Scrape",
|
||||
"Crawl Website": "Sitio web de rastreo",
|
||||
"Extract Document Data": "Extraer datos de documento",
|
||||
"Custom API Call": "Llamada API personalizada",
|
||||
"Search the web and optionally retrieve content from top results.": "Busca en la web y opcionalmente recupera el contenido de los mejores resultados.",
|
||||
"Search for news articles using Google News.": "Buscar artículos de noticias usando Google News.",
|
||||
"Generate images based on a text prompt using AI.": "Genera imágenes basadas en un mensaje de texto usando AI.",
|
||||
"Scrapes data from a specified URL and format the result.": "Rasca los datos de una URL especificada y formatea el resultado.",
|
||||
"Crawl a website and return structured content from multiple pages.": "Arrastre un sitio web y devuelva contenido estructurado de varias páginas.",
|
||||
"Extract structured data from documents using vision-capable AI.": "Extraer datos estructurados de documentos usando IA con visión.",
|
||||
"Make a custom API call to a specific endpoint": "Hacer una llamada API personalizada a un extremo específico",
|
||||
"Search Query": "Buscar consulta",
|
||||
"Country": "País",
|
||||
"Location": "Ubicación",
|
||||
"Language": "Idioma",
|
||||
"Date Range": "Date Range",
|
||||
"Page Number": "Número de página",
|
||||
"Scrape Results": "Resultados del raspado",
|
||||
"Number of Results to Scrape": "Número de resultados a raspar",
|
||||
"Scrape Format": "Scrape Format",
|
||||
"Clean Output": "Limpiar salida",
|
||||
"Model": "Modelo",
|
||||
"Prompt": "Petición",
|
||||
"Aspect Ratio": "Relación de aspecto",
|
||||
"Number of Images": "Número de imágenes",
|
||||
"Seed": "Semilla",
|
||||
"Output Format": "Formato de salida",
|
||||
"URL": "URL",
|
||||
"Clean Output ?": "¿Limpiar salida?",
|
||||
"Render JavaScript ?": "¿Procesar Javascript ?",
|
||||
"Page Limit": "Límite de página",
|
||||
"Crawl Depth": "Profundidad del rastreo",
|
||||
"File": "Archivo",
|
||||
"Extraction Prompt": "Prompt de extracción",
|
||||
"JSON Mode": "Modo JSON",
|
||||
"Method": "Método",
|
||||
"Headers": "Encabezados",
|
||||
"Query Parameters": "Parámetros de consulta",
|
||||
"Body": "Cuerpo",
|
||||
"Response is Binary ?": "¿Respuesta es binaria?",
|
||||
"No Error on Failure": "No hay ningún error en fallo",
|
||||
"Timeout (in seconds)": "Tiempo de espera (en segundos)",
|
||||
"Two-letter country code for location bias (e.g., \"US\" for United States).": "Código de país de dos letras para el sesgo de ubicación (por ejemplo, \"US\" para Estados Unidos).",
|
||||
"Specific location to focus the search (e.g., \"New York, NY\").": "Ubicación específica para enfocar la búsqueda (por ejemplo, \"Nueva York, NY\").",
|
||||
"Language code for the search results (e.g., \"en\" for English).": "Language code for the search results (e.g., \"es\" for spanish).",
|
||||
"Filter results by date.": "Filtrar resultados por fecha.",
|
||||
"Page number for paginated results.": "Número de página para los resultados paginados.",
|
||||
"Whether to scrape top search results.": "Si raspar los mejores resultados de búsqueda.",
|
||||
"Number of top results to scrape (max: 10).": "Número de resultados superiores a raspar (máx. 10).",
|
||||
"Format of scraped content": "Formato de contenido scraped",
|
||||
"Whether the scraped output should be cleaned.": "Si la salida borrada debe ser limpiada.",
|
||||
"The search query for Google News.": "La consulta de búsqueda de Google News.",
|
||||
"Country code for location bias (e.g., \"US\" for United States).": "Código de país para el sesgo de ubicación (por ejemplo, \"US\" para Estados Unidos).",
|
||||
"The model to use for image generation": "El modelo a usar para la generación de imágenes",
|
||||
"The text prompt for image generation": "El mensaje de texto para generar imágenes",
|
||||
"Aspect ratio of the generated image": "Relación de aspecto de la imagen generada",
|
||||
"Number of images to generate (1-4)": "Número de imágenes a generar (1-4)",
|
||||
"Seed for reproducible results": "Semilla para resultados reproducibles",
|
||||
"Format of the generated image": "Formato de la imagen generada",
|
||||
"The format of the output": "El formato de la salida",
|
||||
"Whether the output should be cleaned.": "Si la salida debe ser limpiada.",
|
||||
"Whether to render JavaScript before scraping.": "Si desea renderizar JavaScript antes de rascar.",
|
||||
"The website URL to crawl.": "La URL del sitio web para rascar.",
|
||||
"Maximum number of pages to crawl.": "Número máximo de páginas a rascar.",
|
||||
"Depth of crawling (distance between base URL path and sub paths).": "Profundidad de rastreo (distancia entre la ruta URL base y las subrutas).",
|
||||
"Format of the output content.": "Formato del contenido de salida.",
|
||||
"File URL or base64-encoded file.": "URL del archivo o archivo codificado en base64.",
|
||||
"The prompt describing what data to extract from the document.": "El prompt describe qué datos extraer del documento.",
|
||||
"Whether to return the result in JSON format.": "Devolver el resultado en formato JSON.",
|
||||
"Authorization headers are injected automatically from your connection.": "Las cabeceras de autorización se inyectan automáticamente desde tu conexión.",
|
||||
"Enable for files like PDFs, images, etc..": "Activar para archivos como PDFs, imágenes, etc.",
|
||||
"Any Time": "En cualquier momento",
|
||||
"Past Hour": "Última hora",
|
||||
"Past Day": "Último día",
|
||||
"Past Week": "Última semana",
|
||||
"Past Month": "Último mes",
|
||||
"Past Year": "Último año",
|
||||
"Markdown": "Markdown",
|
||||
"HTML": "HTML",
|
||||
"Screenshot": "Captura de pantalla",
|
||||
"FLUX.1-schnell": "FLUX.1-schnell",
|
||||
"FLUX.1-dev": "FLUX.1-dev",
|
||||
"FLUX.1-pro": "FLUX.1-pro",
|
||||
"FLUX.1.1-pro": "FLUX.1.1-pro",
|
||||
"recraft-v3": "recraft-v3",
|
||||
"Square (1:1)": "Cuadrado (1:1)",
|
||||
"Landscape 16:9": "Paisaje 16:9",
|
||||
"Landscape 21:9": "Paisaje 21:9",
|
||||
"Landscape 3:2": "Landscape 3:2",
|
||||
"Landscape 4:3": "Landscape 4:3",
|
||||
"Portrait 2:3": "Retrato 2:3",
|
||||
"Portrait 3:4": "Retrato 3:4",
|
||||
"Portrait 4:5": "Retrato 4:5",
|
||||
"Portrait 9:16": "Retrato 9:16",
|
||||
"Portrait 9:21": "Retrato 9:21",
|
||||
"WebP": "WebP",
|
||||
"JPG": "JPG",
|
||||
"PNG": "PNG",
|
||||
"Text": "Texto",
|
||||
"Raw": "Rápido",
|
||||
"GET": "RECOGER",
|
||||
"POST": "POST",
|
||||
"PATCH": "PATCH",
|
||||
"PUT": "PUT",
|
||||
"DELETE": "BORRAR",
|
||||
"HEAD": "LIMPIO"
|
||||
}
|
||||
@@ -0,0 +1,113 @@
|
||||
{
|
||||
"Transform unstructured website content into clean, AI-ready data": "Transformez le contenu du site web non structuré en données propres et prêtes à l'IA",
|
||||
"\n You can obtain API key from [API Section](https://app.dumplingai.com/api-keys).": "\n You can obtain API key from [API Section](https://app.dumplingai.com/api-keys).",
|
||||
"Web Search": "Recherche Web",
|
||||
"Search News": "Rechercher des actualités",
|
||||
"Generate Image": "Générer une image",
|
||||
"Scrape Website": "Site Web de Scrape",
|
||||
"Crawl Website": "Site Web de Crawl",
|
||||
"Extract Document Data": "Extraire les données du document",
|
||||
"Custom API Call": "Appel API personnalisé",
|
||||
"Search the web and optionally retrieve content from top results.": "Recherchez sur le Web et éventuellement récupérez le contenu des meilleurs résultats.",
|
||||
"Search for news articles using Google News.": "Rechercher des articles d'actualités en utilisant Google News.",
|
||||
"Generate images based on a text prompt using AI.": "Générer des images basées sur une invite de texte à l'aide d'AI.",
|
||||
"Scrapes data from a specified URL and format the result.": "Scrappe les données à partir d'une URL spécifiée et formate le résultat.",
|
||||
"Crawl a website and return structured content from multiple pages.": "Crawl un site web et retourne du contenu structuré à partir de plusieurs pages.",
|
||||
"Extract structured data from documents using vision-capable AI.": "Extraire des données structurées à partir de documents à l'aide de l'IA capable de la vision.",
|
||||
"Make a custom API call to a specific endpoint": "Passez un appel API personnalisé à un point de terminaison spécifique",
|
||||
"Search Query": "Requête de recherche",
|
||||
"Country": "Pays",
|
||||
"Location": "Localisation",
|
||||
"Language": "Langue",
|
||||
"Date Range": "Date Range",
|
||||
"Page Number": "Numéro de page",
|
||||
"Scrape Results": "Résultats de Scrape",
|
||||
"Number of Results to Scrape": "Nombre de résultats à copier",
|
||||
"Scrape Format": "Scrape Format",
|
||||
"Clean Output": "Nettoyer la sortie",
|
||||
"Model": "Modélisation",
|
||||
"Prompt": "Prompt",
|
||||
"Aspect Ratio": "Ratio d'aspect",
|
||||
"Number of Images": "Nombre d'images",
|
||||
"Seed": "Graine",
|
||||
"Output Format": "Format de sortie",
|
||||
"URL": "URL",
|
||||
"Clean Output ?": "Nettoyer la sortie ?",
|
||||
"Render JavaScript ?": "Rendre JavaScript ?",
|
||||
"Page Limit": "Limite de page",
|
||||
"Crawl Depth": "Profondeur de Rampes",
|
||||
"File": "Ficher",
|
||||
"Extraction Prompt": "Proposition d'extraction",
|
||||
"JSON Mode": "Mode JSON",
|
||||
"Method": "Méthode",
|
||||
"Headers": "En-têtes",
|
||||
"Query Parameters": "Paramètres de requête",
|
||||
"Body": "Corps",
|
||||
"Response is Binary ?": "La réponse est Binaire ?",
|
||||
"No Error on Failure": "Aucune erreur en cas d'échec",
|
||||
"Timeout (in seconds)": "Délai d'attente (en secondes)",
|
||||
"Two-letter country code for location bias (e.g., \"US\" for United States).": "Code de pays à deux lettres pour le biais de localisation (par exemple, \"US\" pour les États-Unis).",
|
||||
"Specific location to focus the search (e.g., \"New York, NY\").": "Localisation spécifique de la recherche (par exemple, \"New York, New York\").",
|
||||
"Language code for the search results (e.g., \"en\" for English).": "Language code for the search results (e.g., \"fr\" for English).",
|
||||
"Filter results by date.": "Filtrer les résultats par date.",
|
||||
"Page number for paginated results.": "Numéro de page pour les résultats paginés.",
|
||||
"Whether to scrape top search results.": "Si vous voulez ramasser les meilleurs résultats de recherche.",
|
||||
"Number of top results to scrape (max: 10).": "Nombre de meilleurs résultats à griller (max: 10).",
|
||||
"Format of scraped content": "Format du contenu raclé",
|
||||
"Whether the scraped output should be cleaned.": "Si la sortie escroquée doit être nettoyée.",
|
||||
"The search query for Google News.": "La recherche de Google News.",
|
||||
"Country code for location bias (e.g., \"US\" for United States).": "Code de pays pour le biais de localisation (par exemple, \"US\" pour les États-Unis).",
|
||||
"The model to use for image generation": "Le modèle à utiliser pour la génération d'images",
|
||||
"The text prompt for image generation": "L'invite de texte pour la génération d'image",
|
||||
"Aspect ratio of the generated image": "Ratio d'aspect de l'image générée",
|
||||
"Number of images to generate (1-4)": "Nombre d'images à générer (1-4)",
|
||||
"Seed for reproducible results": "Graine pour résultats reproductibles",
|
||||
"Format of the generated image": "Format de l'image générée",
|
||||
"The format of the output": "Le format de sortie",
|
||||
"Whether the output should be cleaned.": "Si la sortie doit être nettoyée.",
|
||||
"Whether to render JavaScript before scraping.": "Renvoie ou non du JavaScript avant de gratter.",
|
||||
"The website URL to crawl.": "L'URL du site web à explorer.",
|
||||
"Maximum number of pages to crawl.": "Nombre maximum de pages à explorer.",
|
||||
"Depth of crawling (distance between base URL path and sub paths).": "Profondeur des explorations (distance entre le chemin de l'URL de base et les sous-chemins).",
|
||||
"Format of the output content.": "Format du contenu de sortie.",
|
||||
"File URL or base64-encoded file.": "URL du fichier ou fichier encodé en base64.",
|
||||
"The prompt describing what data to extract from the document.": "L'invite décrivant quelles données extraire du document.",
|
||||
"Whether to return the result in JSON format.": "Renvoie ou non le résultat au format JSON.",
|
||||
"Authorization headers are injected automatically from your connection.": "Les en-têtes d'autorisation sont injectés automatiquement à partir de votre connexion.",
|
||||
"Enable for files like PDFs, images, etc..": "Activer pour les fichiers comme les PDFs, les images, etc.",
|
||||
"Any Time": "À tout moment",
|
||||
"Past Hour": "Dernière heure",
|
||||
"Past Day": "Dernier jour",
|
||||
"Past Week": "Dernière semaine",
|
||||
"Past Month": "Mois dernier",
|
||||
"Past Year": "Année passée",
|
||||
"Markdown": "Markdown",
|
||||
"HTML": "HTML",
|
||||
"Screenshot": "Capture d'écran",
|
||||
"FLUX.1-schnell": "FLUX.1-schnell",
|
||||
"FLUX.1-dev": "FLUX.1-dev",
|
||||
"FLUX.1-pro": "FLUX.1-pro",
|
||||
"FLUX.1.1-pro": "FLUX.1.1-pro",
|
||||
"recraft-v3": "recraft-v3",
|
||||
"Square (1:1)": "Carré (1:1)",
|
||||
"Landscape 16:9": "Paysage 16:9",
|
||||
"Landscape 21:9": "Paysage 21:9",
|
||||
"Landscape 3:2": "Paysage 3:2",
|
||||
"Landscape 4:3": "Paysage 4:3",
|
||||
"Portrait 2:3": "Portrait 2:3",
|
||||
"Portrait 3:4": "Portrait 3:4",
|
||||
"Portrait 4:5": "Portrait 4:5",
|
||||
"Portrait 9:16": "Portrait 9:16",
|
||||
"Portrait 9:21": "Portrait 9:21",
|
||||
"WebP": "WebP",
|
||||
"JPG": "JPG",
|
||||
"PNG": "PNG",
|
||||
"Text": "Texte du texte",
|
||||
"Raw": "Brute",
|
||||
"GET": "OBTENIR",
|
||||
"POST": "POSTER",
|
||||
"PATCH": "PATCH",
|
||||
"PUT": "EFFACER",
|
||||
"DELETE": "SUPPRIMER",
|
||||
"HEAD": "TÊTE"
|
||||
}
|
||||
@@ -0,0 +1,113 @@
|
||||
{
|
||||
"Transform unstructured website content into clean, AI-ready data": "非構造化ウェブサイトのコンテンツを、AI対応のクリーンなデータに変換します",
|
||||
"\n You can obtain API key from [API Section](https://app.dumplingai.com/api-keys).": "\n [API Section](https://app.dumplingai.com/api-keys)からAPIキーを取得できます。",
|
||||
"Web Search": "ウェブ検索",
|
||||
"Search News": "ニュースを検索",
|
||||
"Generate Image": "画像を生成する",
|
||||
"Scrape Website": "ウェブサイトをスクレイプ",
|
||||
"Crawl Website": "クロールのウェブサイト",
|
||||
"Extract Document Data": "ドキュメントデータの抽出",
|
||||
"Custom API Call": "カスタムAPI通話",
|
||||
"Search the web and optionally retrieve content from top results.": "Webを検索し、必要に応じてトップの結果からコンテンツを取得します。",
|
||||
"Search for news articles using Google News.": "Googleニュースを使用してニュース記事を検索します。",
|
||||
"Generate images based on a text prompt using AI.": "AI を使用してテキストプロンプトに基づいて画像を生成します。",
|
||||
"Scrapes data from a specified URL and format the result.": "指定された URL からデータをスクラップし、結果をフォーマットします。",
|
||||
"Crawl a website and return structured content from multiple pages.": "ウェブサイトをクロールし、構造化されたコンテンツを複数ページから返します。",
|
||||
"Extract structured data from documents using vision-capable AI.": "視覚対応AIを使用してドキュメントから構造化データを抽出する。",
|
||||
"Make a custom API call to a specific endpoint": "特定のエンドポイントへのカスタム API コールを実行します。",
|
||||
"Search Query": "検索クエリ",
|
||||
"Country": "国",
|
||||
"Location": "場所",
|
||||
"Language": "言語",
|
||||
"Date Range": "Date Range",
|
||||
"Page Number": "ページ番号",
|
||||
"Scrape Results": "スクレイプ結果",
|
||||
"Number of Results to Scrape": "スクラップする結果の数",
|
||||
"Scrape Format": "Scrape Format",
|
||||
"Clean Output": "クリーン出力",
|
||||
"Model": "モデル",
|
||||
"Prompt": "Prompt",
|
||||
"Aspect Ratio": "アスペクト比",
|
||||
"Number of Images": "画像の数",
|
||||
"Seed": "Seed",
|
||||
"Output Format": "出力形式",
|
||||
"URL": "URL",
|
||||
"Clean Output ?": "クリーンアウトプット?",
|
||||
"Render JavaScript ?": "JavaScript をレンダリングしますか?",
|
||||
"Page Limit": "ページ制限",
|
||||
"Crawl Depth": "クロールの深さ",
|
||||
"File": "ファイル",
|
||||
"Extraction Prompt": "Extraction Prompt",
|
||||
"JSON Mode": "JSONモード",
|
||||
"Method": "方法",
|
||||
"Headers": "ヘッダー",
|
||||
"Query Parameters": "クエリパラメータ",
|
||||
"Body": "本文",
|
||||
"Response is Binary ?": "応答はバイナリですか?",
|
||||
"No Error on Failure": "失敗時にエラーはありません",
|
||||
"Timeout (in seconds)": "タイムアウト(秒)",
|
||||
"Two-letter country code for location bias (e.g., \"US\" for United States).": "位置バイアスのための2文字の国コード(例:米国の場合は「米国」)。",
|
||||
"Specific location to focus the search (e.g., \"New York, NY\").": "検索にフォーカスする特定の場所(例:「ニューヨーク」、「NY」)。",
|
||||
"Language code for the search results (e.g., \"en\" for English).": "検索結果の言語コード(例: \"en\" for English)。",
|
||||
"Filter results by date.": "日付で結果を絞り込みます。",
|
||||
"Page number for paginated results.": "ページ化された結果のページ番号。",
|
||||
"Whether to scrape top search results.": "トップ検索結果をスクレイプするかどうか。",
|
||||
"Number of top results to scrape (max: 10).": "スクレイプする上位の結果の数(最大: 10)。",
|
||||
"Format of scraped content": "削除されたコンテンツのフォーマット",
|
||||
"Whether the scraped output should be cleaned.": "スクレイプ出力を清掃するかどうか。",
|
||||
"The search query for Google News.": "Google News の検索クエリ",
|
||||
"Country code for location bias (e.g., \"US\" for United States).": "位置バイアスのための国コード(例:米国の場合は「米国」)。",
|
||||
"The model to use for image generation": "画像生成に使用するモデル",
|
||||
"The text prompt for image generation": "画像を生成するためのテキストプロンプトを表示します",
|
||||
"Aspect ratio of the generated image": "生成された画像のアスペクト比",
|
||||
"Number of images to generate (1-4)": "生成する画像の数 (1-4)",
|
||||
"Seed for reproducible results": "再現性のある結果のためのシードです",
|
||||
"Format of the generated image": "生成された画像の形式",
|
||||
"The format of the output": "出力の形式",
|
||||
"Whether the output should be cleaned.": "出力をクリーニングするかどうか。",
|
||||
"Whether to render JavaScript before scraping.": "スクレイピングの前にJavaScriptをレンダリングするかどうか。",
|
||||
"The website URL to crawl.": "クロールするウェブサイトのURL。",
|
||||
"Maximum number of pages to crawl.": "這うページの最大数。",
|
||||
"Depth of crawling (distance between base URL path and sub paths).": "クロールの深さ (ベース URL パスとサブパスの間の距離)。",
|
||||
"Format of the output content.": "出力コンテンツのフォーマット。",
|
||||
"File URL or base64-encoded file.": "ファイルURLまたはbase64エンコードファイル。",
|
||||
"The prompt describing what data to extract from the document.": "ドキュメントから抽出するデータを説明するプロンプト。",
|
||||
"Whether to return the result in JSON format.": "結果を JSON 形式で返すかどうかを指定します。",
|
||||
"Authorization headers are injected automatically from your connection.": "認証ヘッダは接続から自動的に注入されます。",
|
||||
"Enable for files like PDFs, images, etc..": "PDF、画像などのファイルを有効にします。",
|
||||
"Any Time": "いつでも",
|
||||
"Past Hour": "過去 1 時間",
|
||||
"Past Day": "過去 1 日",
|
||||
"Past Week": "過去週",
|
||||
"Past Month": "過去 1 ヶ月",
|
||||
"Past Year": "過去 1 年間",
|
||||
"Markdown": "Markdown",
|
||||
"HTML": "HTML",
|
||||
"Screenshot": "スクリーンショット",
|
||||
"FLUX.1-schnell": "FLUX.1-schnell",
|
||||
"FLUX.1-dev": "FLUX.1-dev",
|
||||
"FLUX.1-pro": "FLUX.1-pro",
|
||||
"FLUX.1.1-pro": "FLUX.1.1-pro",
|
||||
"recraft-v3": "recraft-v3",
|
||||
"Square (1:1)": "正方形(1:1)",
|
||||
"Landscape 16:9": "横向き16:9",
|
||||
"Landscape 21:9": "横向き時21:9",
|
||||
"Landscape 3:2": "Landscape 3:2",
|
||||
"Landscape 4:3": "Landscape 4:3",
|
||||
"Portrait 2:3": "ポートレート2:3",
|
||||
"Portrait 3:4": "ポートレート3:4",
|
||||
"Portrait 4:5": "ポートレート4:5",
|
||||
"Portrait 9:16": "ポートレート9:16",
|
||||
"Portrait 9:21": "ポートレート9:21",
|
||||
"WebP": "WebP",
|
||||
"JPG": "JPG",
|
||||
"PNG": "PNG",
|
||||
"Text": "テキスト",
|
||||
"Raw": "Raw",
|
||||
"GET": "取得",
|
||||
"POST": "POST",
|
||||
"PATCH": "PATCH",
|
||||
"PUT": "PUT",
|
||||
"DELETE": "削除",
|
||||
"HEAD": "頭"
|
||||
}
|
||||
@@ -0,0 +1,113 @@
|
||||
{
|
||||
"Transform unstructured website content into clean, AI-ready data": "Niet-gestructureerde website-inhoud omzetten in schone, AI-ready data",
|
||||
"\n You can obtain API key from [API Section](https://app.dumplingai.com/api-keys).": "\n U kunt API-sleutel verkrijgen van [API section](https://app.dumplingai.com/api-keys).",
|
||||
"Web Search": "Zoeken op internet",
|
||||
"Search News": "Zoek Nieuws",
|
||||
"Generate Image": "Afbeelding genereren",
|
||||
"Scrape Website": "Scrape Website",
|
||||
"Crawl Website": "Crawl website",
|
||||
"Extract Document Data": "Documentgegevens uitpakken",
|
||||
"Custom API Call": "Custom API Call",
|
||||
"Search the web and optionally retrieve content from top results.": "Zoek op het web en haal optioneel inhoud op van de bovenste resultaten.",
|
||||
"Search for news articles using Google News.": "Zoek naar nieuwsartikelen met behulp van Google News.",
|
||||
"Generate images based on a text prompt using AI.": "Genereer afbeeldingen op basis van een tekst prompt met behulp van AI.",
|
||||
"Scrapes data from a specified URL and format the result.": "Scrapt de gegevens van een opgegeven URL en formatteer het resultaat.",
|
||||
"Crawl a website and return structured content from multiple pages.": "Maak een website bezig en stuur gestructureerde inhoud terug van meerdere pagina's.",
|
||||
"Extract structured data from documents using vision-capable AI.": "Pak gestructureerde gegevens uit documenten met visionaire AI uit.",
|
||||
"Make a custom API call to a specific endpoint": "Maak een aangepaste API call naar een specifiek eindpunt",
|
||||
"Search Query": "Zoek query",
|
||||
"Country": "Land:",
|
||||
"Location": "Locatie",
|
||||
"Language": "Taal",
|
||||
"Date Range": "Date Range",
|
||||
"Page Number": "Pagina Nummer",
|
||||
"Scrape Results": "Scrape resultaten",
|
||||
"Number of Results to Scrape": "Aantal resultaten voor scrape",
|
||||
"Scrape Format": "Scrape Format",
|
||||
"Clean Output": "Schoon Uitvoer",
|
||||
"Model": "Model",
|
||||
"Prompt": "Prompt",
|
||||
"Aspect Ratio": "Verhouding beeldverhouding",
|
||||
"Number of Images": "Aantal afbeeldingen",
|
||||
"Seed": "Seed",
|
||||
"Output Format": "Uitvoer formaat",
|
||||
"URL": "URL",
|
||||
"Clean Output ?": "Verwijder Uitvoer?",
|
||||
"Render JavaScript ?": "JavaScript renderen?",
|
||||
"Page Limit": "Pagina limiet",
|
||||
"Crawl Depth": "Crawl diepte",
|
||||
"File": "Bestand",
|
||||
"Extraction Prompt": "Extractie Prompt",
|
||||
"JSON Mode": "JSON Modus",
|
||||
"Method": "Methode",
|
||||
"Headers": "Kopteksten",
|
||||
"Query Parameters": "Query parameters",
|
||||
"Body": "Lichaam",
|
||||
"Response is Binary ?": "Antwoord is binair?",
|
||||
"No Error on Failure": "Geen fout bij fout",
|
||||
"Timeout (in seconds)": "Time-out (in seconden)",
|
||||
"Two-letter country code for location bias (e.g., \"US\" for United States).": "Tweeletterige landcode voor locatievooroordelen (bijv. \"US\" voor Verenigde Staten).",
|
||||
"Specific location to focus the search (e.g., \"New York, NY\").": "Specifieke locatie om de zoekopdracht te focussen (bijv. \"New York, NY\").",
|
||||
"Language code for the search results (e.g., \"en\" for English).": "Taalcode voor de zoekresultaten (bijv. \"en\" voor Engels).",
|
||||
"Filter results by date.": "Filter resultaten op datum.",
|
||||
"Page number for paginated results.": "Paginanummer voor gepagineerde resultaten.",
|
||||
"Whether to scrape top search results.": "Of de bovenste zoekresultaten moeten worden gescrapt.",
|
||||
"Number of top results to scrape (max: 10).": "Aantal beste resultaten voor scrape (max: 10).",
|
||||
"Format of scraped content": "Formaat van gescrapde inhoud",
|
||||
"Whether the scraped output should be cleaned.": "Of de schraapte uitvoer moet worden opgeruimd.",
|
||||
"The search query for Google News.": "De zoekopdracht voor Google News.",
|
||||
"Country code for location bias (e.g., \"US\" for United States).": "Landcode voor locatievooroordelen (bijv. \"US\" voor Verenigde Staten).",
|
||||
"The model to use for image generation": "Het te gebruiken model voor het genereren van afbeeldingen",
|
||||
"The text prompt for image generation": "De tekst vraagt om het genereren van afbeeldingen",
|
||||
"Aspect ratio of the generated image": "Aspect verhouding van de gegenereerde afbeelding",
|
||||
"Number of images to generate (1-4)": "Aantal te genereren afbeeldingen (1-4)",
|
||||
"Seed for reproducible results": "Seed voor reproduceerbare resultaten",
|
||||
"Format of the generated image": "Formaat van de gegenereerde afbeelding",
|
||||
"The format of the output": "Het formaat van de uitvoer",
|
||||
"Whether the output should be cleaned.": "Of de uitvoer moet worden schoongemaakt.",
|
||||
"Whether to render JavaScript before scraping.": "Wel of niet JavaScript renderen voor het scrapen.",
|
||||
"The website URL to crawl.": "De website URL om te crawl.",
|
||||
"Maximum number of pages to crawl.": "Maximum aantal te crawl pagina's.",
|
||||
"Depth of crawling (distance between base URL path and sub paths).": "Diepte van crawling (afstand tussen basis-URL-pad en subpaden).",
|
||||
"Format of the output content.": "Formaat van de inhoud van de uitvoer.",
|
||||
"File URL or base64-encoded file.": "Bestands URL of base64-gecodeerd bestand.",
|
||||
"The prompt describing what data to extract from the document.": "De snelle beschrijving van de te extraheren gegevens uit het document.",
|
||||
"Whether to return the result in JSON format.": "Geef het resultaat in JSON formaat.",
|
||||
"Authorization headers are injected automatically from your connection.": "Autorisatie headers worden automatisch geïnjecteerd vanuit uw verbinding.",
|
||||
"Enable for files like PDFs, images, etc..": "Inschakelen voor bestanden zoals PDF's, afbeeldingen etc..",
|
||||
"Any Time": "Elk moment",
|
||||
"Past Hour": "Afgelopen Uur",
|
||||
"Past Day": "Afgelopen dag",
|
||||
"Past Week": "Afgelopen Week",
|
||||
"Past Month": "Afgelopen Maand",
|
||||
"Past Year": "Vorig jaar",
|
||||
"Markdown": "Markdown",
|
||||
"HTML": "HTML",
|
||||
"Screenshot": "Schermafbeelding",
|
||||
"FLUX.1-schnell": "FLUX.1-schnell",
|
||||
"FLUX.1-dev": "FLUX.1-dev",
|
||||
"FLUX.1-pro": "FLUX.1-pro",
|
||||
"FLUX.1.1-pro": "FLUX.1.1-pro",
|
||||
"recraft-v3": "recraft-v3",
|
||||
"Square (1:1)": "Vierkant (1:1)",
|
||||
"Landscape 16:9": "Liggend 16:9",
|
||||
"Landscape 21:9": "Liggend 21:9",
|
||||
"Landscape 3:2": "Landscape 3:2",
|
||||
"Landscape 4:3": "Landscape 4:3",
|
||||
"Portrait 2:3": "Staand 2:3",
|
||||
"Portrait 3:4": "Staand 3:4",
|
||||
"Portrait 4:5": "Staand 4:5",
|
||||
"Portrait 9:16": "Staand 9:16",
|
||||
"Portrait 9:21": "Staand 9:21",
|
||||
"WebP": "WebP",
|
||||
"JPG": "JPG",
|
||||
"PNG": "PNG",
|
||||
"Text": "Tekstveld",
|
||||
"Raw": "Onbewerkte",
|
||||
"GET": "KRIJG",
|
||||
"POST": "POSTE",
|
||||
"PATCH": "BEKIJK",
|
||||
"PUT": "PUT",
|
||||
"DELETE": "VERWIJDEREN",
|
||||
"HEAD": "HOOFD"
|
||||
}
|
||||
@@ -0,0 +1,113 @@
|
||||
{
|
||||
"Transform unstructured website content into clean, AI-ready data": "Transforme conteúdo do site não estruturado em dados limpos e com IA.",
|
||||
"\n You can obtain API key from [API Section](https://app.dumplingai.com/api-keys).": "\n Você pode obter chave de API de [Seção API](https://app.dumplingai.com/api-keys).",
|
||||
"Web Search": "Pesquisar na Web",
|
||||
"Search News": "Buscar Notícias",
|
||||
"Generate Image": "Gerar Imagem",
|
||||
"Scrape Website": "Site Scrape",
|
||||
"Crawl Website": "Site Rastejante",
|
||||
"Extract Document Data": "Extrair dados do documento",
|
||||
"Custom API Call": "Chamada de API personalizada",
|
||||
"Search the web and optionally retrieve content from top results.": "Pesquisar na web e, opcionalmente, recuperar conteúdo dos resultados superiores.",
|
||||
"Search for news articles using Google News.": "Pesquisar artigos de notícias usando o Google News.",
|
||||
"Generate images based on a text prompt using AI.": "Gerar imagens baseado em um prompt de texto usando AI.",
|
||||
"Scrapes data from a specified URL and format the result.": "Exclui dados de uma URL especificada e formata o resultado.",
|
||||
"Crawl a website and return structured content from multiple pages.": "Envolva um site e retorna conteúdo estruturado de várias páginas.",
|
||||
"Extract structured data from documents using vision-capable AI.": "Extraia dados estruturados de documentos usando I.A. I.A.",
|
||||
"Make a custom API call to a specific endpoint": "Faça uma chamada de API personalizada para um ponto de extremidade específico",
|
||||
"Search Query": "Consulta de Pesquisa",
|
||||
"Country": "País/região",
|
||||
"Location": "Local:",
|
||||
"Language": "IDIOMA",
|
||||
"Date Range": "Date Range",
|
||||
"Page Number": "Número da página",
|
||||
"Scrape Results": "Resultados Scrape",
|
||||
"Number of Results to Scrape": "Número de resultados para Scrape",
|
||||
"Scrape Format": "Scrape Format",
|
||||
"Clean Output": "Limpar Saída",
|
||||
"Model": "Modelo",
|
||||
"Prompt": "Aviso",
|
||||
"Aspect Ratio": "Proporção da proporção",
|
||||
"Number of Images": "Número de imagens",
|
||||
"Seed": "Semente",
|
||||
"Output Format": "Formato de saída",
|
||||
"URL": "URL:",
|
||||
"Clean Output ?": "Limpar a Saída ?",
|
||||
"Render JavaScript ?": "Renderizar o JavaScript ?",
|
||||
"Page Limit": "Limite de Página",
|
||||
"Crawl Depth": "Profundidade do Rastejo",
|
||||
"File": "Arquivo",
|
||||
"Extraction Prompt": "Solicitação de extração",
|
||||
"JSON Mode": "Modo JSON",
|
||||
"Method": "Método",
|
||||
"Headers": "Cabeçalhos",
|
||||
"Query Parameters": "Parâmetros da consulta",
|
||||
"Body": "Conteúdo",
|
||||
"Response is Binary ?": "A resposta é binária ?",
|
||||
"No Error on Failure": "Nenhum erro no Failure",
|
||||
"Timeout (in seconds)": "Tempo limite (em segundos)",
|
||||
"Two-letter country code for location bias (e.g., \"US\" for United States).": "Código de país de duas letras para o viés de localização (por exemplo, \"EUA\" para Estados Unidos).",
|
||||
"Specific location to focus the search (e.g., \"New York, NY\").": "Localização específica para focar a pesquisa (por exemplo, \"Nova Iorque, NY\").",
|
||||
"Language code for the search results (e.g., \"en\" for English).": "Código de idioma para os resultados da pesquisa (por exemplo, \"en\" para inglês).",
|
||||
"Filter results by date.": "Filtrar resultados por data.",
|
||||
"Page number for paginated results.": "Número da página para resultados paginados.",
|
||||
"Whether to scrape top search results.": "Se deve scrape resultados de busca mais altos.",
|
||||
"Number of top results to scrape (max: 10).": "Número de melhores resultados para sucata (máx: 10).",
|
||||
"Format of scraped content": "Formato de conteúdo pesquisado",
|
||||
"Whether the scraped output should be cleaned.": "Se a saída encontrada deve ser limpa.",
|
||||
"The search query for Google News.": "A consulta de pesquisa do Google News.",
|
||||
"Country code for location bias (e.g., \"US\" for United States).": "Código do país para viés de localização (por exemplo, \"EUA\" para Estados Unidos).",
|
||||
"The model to use for image generation": "O modelo a ser usado para a geração de imagem",
|
||||
"The text prompt for image generation": "O prompt de texto para geração de imagens",
|
||||
"Aspect ratio of the generated image": "Proporção da imagem gerada",
|
||||
"Number of images to generate (1-4)": "Número de imagens a gerar (1-4)",
|
||||
"Seed for reproducible results": "Semente para resultados reprodutíveis",
|
||||
"Format of the generated image": "Formato da imagem gerada",
|
||||
"The format of the output": "O formato da saída",
|
||||
"Whether the output should be cleaned.": "Se a saída deve ser limpa.",
|
||||
"Whether to render JavaScript before scraping.": "Se deve renderizar JavaScript antes de remover.",
|
||||
"The website URL to crawl.": "A URL do site para rastrear.",
|
||||
"Maximum number of pages to crawl.": "Número máximo de páginas para rastrear.",
|
||||
"Depth of crawling (distance between base URL path and sub paths).": "Profundidade dos rastreadores (distância entre o caminho da URL base e os subcaminhos).",
|
||||
"Format of the output content.": "Formato do conteúdo de saída.",
|
||||
"File URL or base64-encoded file.": "URL do arquivo ou arquivo codificado em base64.",
|
||||
"The prompt describing what data to extract from the document.": "O prompt descrevendo quais dados extrair do documento.",
|
||||
"Whether to return the result in JSON format.": "Se deseja retornar o resultado no formato JSON.",
|
||||
"Authorization headers are injected automatically from your connection.": "Os cabeçalhos de autorização são inseridos automaticamente a partir da sua conexão.",
|
||||
"Enable for files like PDFs, images, etc..": "Habilitar para arquivos como PDFs, imagens, etc..",
|
||||
"Any Time": "Qualquer hora",
|
||||
"Past Hour": "Última Hora",
|
||||
"Past Day": "Dia passado",
|
||||
"Past Week": "Na última semana",
|
||||
"Past Month": "Mês passado",
|
||||
"Past Year": "No último ano",
|
||||
"Markdown": "Markdown",
|
||||
"HTML": "HTML",
|
||||
"Screenshot": "Pré-visualizar",
|
||||
"FLUX.1-schnell": "FLUX.1-schnell",
|
||||
"FLUX.1-dev": "FLUX.1-DEv",
|
||||
"FLUX.1-pro": "FLUX.1-pro",
|
||||
"FLUX.1.1-pro": "FLUX.1.1-pro",
|
||||
"recraft-v3": "recraft-v3",
|
||||
"Square (1:1)": "Quadrado (1:1)",
|
||||
"Landscape 16:9": "Paisagem 16:9",
|
||||
"Landscape 21:9": "Paisagem 21:9",
|
||||
"Landscape 3:2": "Landscape 3:2",
|
||||
"Landscape 4:3": "Landscape 4:3",
|
||||
"Portrait 2:3": "Retrato 2:3",
|
||||
"Portrait 3:4": "Retrato 3:4",
|
||||
"Portrait 4:5": "Retrato 4:5",
|
||||
"Portrait 9:16": "Retrato 9:16",
|
||||
"Portrait 9:21": "Retrato 9:21",
|
||||
"WebP": "WebP",
|
||||
"JPG": "Jpg",
|
||||
"PNG": "PNG",
|
||||
"Text": "texto",
|
||||
"Raw": "RAW",
|
||||
"GET": "OBTER",
|
||||
"POST": "POSTAR",
|
||||
"PATCH": "COMPRAR",
|
||||
"PUT": "COLOCAR",
|
||||
"DELETE": "EXCLUIR",
|
||||
"HEAD": "CABEÇA"
|
||||
}
|
||||
@@ -0,0 +1,112 @@
|
||||
{
|
||||
"Dumpling AI": "Dumpling AI",
|
||||
"Transform unstructured website content into clean, AI-ready data": "Преобразовать неструктурированный контент веб-сайта в чистые, готовые к использованию данные",
|
||||
"\n You can obtain API key from [API Section](https://app.dumplingai.com/api-keys).": "\n Вы можете получить ключ API из [API Section](https://app.dumplingai.com/api-keys).",
|
||||
"Web Search": "Веб-поиск",
|
||||
"Search News": "Поиск новостей",
|
||||
"Generate Image": "Сгенерировать изображение",
|
||||
"Scrape Website": "Сайт Scrape",
|
||||
"Crawl Website": "Вебсайт Crawl",
|
||||
"Extract Document Data": "Извлечь данные документа",
|
||||
"Custom API Call": "Пользовательский вызов API",
|
||||
"Search the web and optionally retrieve content from top results.": "Поиск в интернете и получение по желанию топ-результатов.",
|
||||
"Search for news articles using Google News.": "Поиск новостей, используя Google News.",
|
||||
"Generate images based on a text prompt using AI.": "Генерировать изображения на основе текстовой подсказки с помощью AI.",
|
||||
"Scrapes data from a specified URL and format the result.": "Заполняет данные по указанному URL и отформатирует результат.",
|
||||
"Crawl a website and return structured content from multiple pages.": "Перетаскивайте сайт и возвращайте структурированный контент с нескольких страниц.",
|
||||
"Extract structured data from documents using vision-capable AI.": "Извлечение структурированных данных из документов с помощью ИИ, способной к видимости.",
|
||||
"Make a custom API call to a specific endpoint": "Сделать пользовательский API вызов к определенной конечной точке",
|
||||
"Search Query": "Поисковый запрос",
|
||||
"Country": "Страна",
|
||||
"Location": "Местоположение",
|
||||
"Language": "Язык",
|
||||
"Date Range": "Date Range",
|
||||
"Page Number": "Номер страницы",
|
||||
"Scrape Results": "Результаты Scrape",
|
||||
"Number of Results to Scrape": "Количество результатов для Scrape",
|
||||
"Scrape Format": "Scrape Format",
|
||||
"Clean Output": "Очистить вывод",
|
||||
"Model": "Модель",
|
||||
"Prompt": "Prompt",
|
||||
"Aspect Ratio": "Соотношение сторон",
|
||||
"Number of Images": "Количество изображений",
|
||||
"Seed": "Сид",
|
||||
"Output Format": "Формат вывода",
|
||||
"URL": "URL",
|
||||
"Clean Output ?": "Очистить вывод?",
|
||||
"Render JavaScript ?": "Визуализировать JavaScript ?",
|
||||
"Page Limit": "Лимит страницы",
|
||||
"Crawl Depth": "Глубина полёта",
|
||||
"File": "Файл",
|
||||
"Extraction Prompt": "Подсказка к извлечению",
|
||||
"JSON Mode": "JSON режим",
|
||||
"Method": "Метод",
|
||||
"Headers": "Заголовки",
|
||||
"Query Parameters": "Параметры запроса",
|
||||
"Body": "Тело",
|
||||
"No Error on Failure": "Нет ошибок при ошибке",
|
||||
"Timeout (in seconds)": "Таймаут (в секундах)",
|
||||
"Two-letter country code for location bias (e.g., \"US\" for United States).": "Двухсимвольный код страны для предвзятого местоположения (например, \"США\" для США).",
|
||||
"Specific location to focus the search (e.g., \"New York, NY\").": "Конкретное местоположение для фокусировки поиска (например, «Нью-Йорк, Нью-Йорк»).",
|
||||
"Language code for the search results (e.g., \"en\" for English).": "Код языка для результатов поиска (например, \"en\" для английского).",
|
||||
"Filter results by date.": "Фильтр результатов по дате.",
|
||||
"Page number for paginated results.": "Номер страницы для пагинальных результатов.",
|
||||
"Whether to scrape top search results.": "Удалять ли топ результатов поиска.",
|
||||
"Number of top results to scrape (max: 10).": "Количество топ-результатов до скрапления (макс: 10).",
|
||||
"Format of scraped content": "Формат содержимого",
|
||||
"Whether the scraped output should be cleaned.": "Следует ли очищать сломанный выход.",
|
||||
"The search query for Google News.": "Поиск по Новостям Google.",
|
||||
"Country code for location bias (e.g., \"US\" for United States).": "Код страны для предвзятого местоположения (например, \"США\" для США).",
|
||||
"The model to use for image generation": "Модель для генерации изображений",
|
||||
"The text prompt for image generation": "Текстовый запрос для генерации изображений",
|
||||
"Aspect ratio of the generated image": "Соотношение сторон сгенерированного изображения",
|
||||
"Number of images to generate (1-4)": "Количество изображений для генерации (1-4)",
|
||||
"Seed for reproducible results": "Сид для воспроизводимых результатов",
|
||||
"Format of the generated image": "Формат сгенерированного изображения",
|
||||
"The format of the output": "Формат вывода",
|
||||
"Whether the output should be cleaned.": "Должен ли вывод быть очищен.",
|
||||
"Whether to render JavaScript before scraping.": "Будь то рендеринг JavaScript перед скрещиванием.",
|
||||
"The website URL to crawl.": "URL сайта для сканирования.",
|
||||
"Maximum number of pages to crawl.": "Максимальное количество страниц для сканирования.",
|
||||
"Depth of crawling (distance between base URL path and sub paths).": "Глубина сканирования (расстояние между базовым URL-адресом и под-путями).",
|
||||
"Format of the output content.": "Формат контента вывода.",
|
||||
"File URL or base64-encoded file.": "URL файла или base64 закодированный файл.",
|
||||
"The prompt describing what data to extract from the document.": "Заявка, описывающая данные для извлечения из документа.",
|
||||
"Whether to return the result in JSON format.": "Возвращает ли результат результат в формате JSON.",
|
||||
"Authorization headers are injected automatically from your connection.": "Заголовки авторизации включаются автоматически из вашего соединения.",
|
||||
"Any Time": "В любое время",
|
||||
"Past Hour": "Последний час",
|
||||
"Past Day": "Прошлый день",
|
||||
"Past Week": "Прошлая неделя",
|
||||
"Past Month": "Прошлый месяц",
|
||||
"Past Year": "Прошлый год",
|
||||
"Markdown": "Markdown",
|
||||
"HTML": "HTML",
|
||||
"Screenshot": "Скриншот",
|
||||
"FLUX.1-schnell": "ФЛЮКС.1 - Шнег",
|
||||
"FLUX.1-dev": "ФЛЮКС.1-dev",
|
||||
"FLUX.1-pro": "FLUX.1-pro",
|
||||
"FLUX.1.1-pro": "FLUX.1.1-pro",
|
||||
"recraft-v3": "recraft-v3",
|
||||
"Square (1:1)": "Квадрат (1:1)",
|
||||
"Landscape 16:9": "Горизонтальный 16:9",
|
||||
"Landscape 21:9": "Горизонтальный 21:9",
|
||||
"Landscape 3:2": "Landscape 3:2",
|
||||
"Landscape 4:3": "Landscape 4:3",
|
||||
"Portrait 2:3": "Портретная 2:3",
|
||||
"Portrait 3:4": "Портретная 3:4",
|
||||
"Portrait 4:5": "Портретная 4:5",
|
||||
"Portrait 9:16": "Портретная 9:16",
|
||||
"Portrait 9:21": "Портретная 9:21",
|
||||
"WebP": "WebP",
|
||||
"JPG": "JPG",
|
||||
"PNG": "PNG",
|
||||
"Text": "Текст",
|
||||
"Raw": "Сырье",
|
||||
"GET": "ПОЛУЧИТЬ",
|
||||
"POST": "ПОСТ",
|
||||
"PATCH": "ПАТЧ",
|
||||
"PUT": "ПОКУПИТЬ",
|
||||
"DELETE": "УДАЛИТЬ",
|
||||
"HEAD": "HEAD"
|
||||
}
|
||||
@@ -0,0 +1,113 @@
|
||||
{
|
||||
"Transform unstructured website content into clean, AI-ready data": "Transform unstructured website content into clean, AI-ready data",
|
||||
"\n You can obtain API key from [API Section](https://app.dumplingai.com/api-keys).": "\n You can obtain API key from [API Section](https://app.dumplingai.com/api-keys).",
|
||||
"Web Search": "Web Search",
|
||||
"Search News": "Search News",
|
||||
"Generate Image": "Generate Image",
|
||||
"Scrape Website": "Scrape Website",
|
||||
"Crawl Website": "Crawl Website",
|
||||
"Extract Document Data": "Extract Document Data",
|
||||
"Custom API Call": "Custom API Call",
|
||||
"Search the web and optionally retrieve content from top results.": "Search the web and optionally retrieve content from top results.",
|
||||
"Search for news articles using Google News.": "Search for news articles using Google News.",
|
||||
"Generate images based on a text prompt using AI.": "Generate images based on a text prompt using AI.",
|
||||
"Scrapes data from a specified URL and format the result.": "Scrapes data from a specified URL and format the result.",
|
||||
"Crawl a website and return structured content from multiple pages.": "Crawl a website and return structured content from multiple pages.",
|
||||
"Extract structured data from documents using vision-capable AI.": "Extract structured data from documents using vision-capable AI.",
|
||||
"Make a custom API call to a specific endpoint": "Make a custom API call to a specific endpoint",
|
||||
"Search Query": "Search Query",
|
||||
"Country": "Country",
|
||||
"Location": "Location",
|
||||
"Language": "Language",
|
||||
"Date Range": "Date Range",
|
||||
"Page Number": "Page Number",
|
||||
"Scrape Results": "Scrape Results",
|
||||
"Number of Results to Scrape": "Number of Results to Scrape",
|
||||
"Scrape Format": "Scrape Format",
|
||||
"Clean Output": "Clean Output",
|
||||
"Model": "Model",
|
||||
"Prompt": "Prompt",
|
||||
"Aspect Ratio": "Aspect Ratio",
|
||||
"Number of Images": "Number of Images",
|
||||
"Seed": "Seed",
|
||||
"Output Format": "Output Format",
|
||||
"URL": "URL",
|
||||
"Clean Output ?": "Clean Output ?",
|
||||
"Render JavaScript ?": "Render JavaScript ?",
|
||||
"Page Limit": "Page Limit",
|
||||
"Crawl Depth": "Crawl Depth",
|
||||
"File": "File",
|
||||
"Extraction Prompt": "Extraction Prompt",
|
||||
"JSON Mode": "JSON Mode",
|
||||
"Method": "Method",
|
||||
"Headers": "Headers",
|
||||
"Query Parameters": "Query Parameters",
|
||||
"Body": "Body",
|
||||
"Response is Binary ?": "Response is Binary ?",
|
||||
"No Error on Failure": "No Error on Failure",
|
||||
"Timeout (in seconds)": "Timeout (in seconds)",
|
||||
"Two-letter country code for location bias (e.g., \"US\" for United States).": "Two-letter country code for location bias (e.g., \"US\" for United States).",
|
||||
"Specific location to focus the search (e.g., \"New York, NY\").": "Specific location to focus the search (e.g., \"New York, NY\").",
|
||||
"Language code for the search results (e.g., \"en\" for English).": "Language code for the search results (e.g., \"en\" for English).",
|
||||
"Filter results by date.": "Filter results by date.",
|
||||
"Page number for paginated results.": "Page number for paginated results.",
|
||||
"Whether to scrape top search results.": "Whether to scrape top search results.",
|
||||
"Number of top results to scrape (max: 10).": "Number of top results to scrape (max: 10).",
|
||||
"Format of scraped content": "Format of scraped content",
|
||||
"Whether the scraped output should be cleaned.": "Whether the scraped output should be cleaned.",
|
||||
"The search query for Google News.": "The search query for Google News.",
|
||||
"Country code for location bias (e.g., \"US\" for United States).": "Country code for location bias (e.g., \"US\" for United States).",
|
||||
"The model to use for image generation": "The model to use for image generation",
|
||||
"The text prompt for image generation": "The text prompt for image generation",
|
||||
"Aspect ratio of the generated image": "Aspect ratio of the generated image",
|
||||
"Number of images to generate (1-4)": "Number of images to generate (1-4)",
|
||||
"Seed for reproducible results": "Seed for reproducible results",
|
||||
"Format of the generated image": "Format of the generated image",
|
||||
"The format of the output": "The format of the output",
|
||||
"Whether the output should be cleaned.": "Whether the output should be cleaned.",
|
||||
"Whether to render JavaScript before scraping.": "Whether to render JavaScript before scraping.",
|
||||
"The website URL to crawl.": "The website URL to crawl.",
|
||||
"Maximum number of pages to crawl.": "Maximum number of pages to crawl.",
|
||||
"Depth of crawling (distance between base URL path and sub paths).": "Depth of crawling (distance between base URL path and sub paths).",
|
||||
"Format of the output content.": "Format of the output content.",
|
||||
"File URL or base64-encoded file.": "File URL or base64-encoded file.",
|
||||
"The prompt describing what data to extract from the document.": "The prompt describing what data to extract from the document.",
|
||||
"Whether to return the result in JSON format.": "Whether to return the result in JSON format.",
|
||||
"Authorization headers are injected automatically from your connection.": "Authorization headers are injected automatically from your connection.",
|
||||
"Enable for files like PDFs, images, etc..": "Enable for files like PDFs, images, etc..",
|
||||
"Any Time": "Any Time",
|
||||
"Past Hour": "Past Hour",
|
||||
"Past Day": "Past Day",
|
||||
"Past Week": "Past Week",
|
||||
"Past Month": "Past Month",
|
||||
"Past Year": "Past Year",
|
||||
"Markdown": "Markdown",
|
||||
"HTML": "HTML",
|
||||
"Screenshot": "Screenshot",
|
||||
"FLUX.1-schnell": "FLUX.1-schnell",
|
||||
"FLUX.1-dev": "FLUX.1-dev",
|
||||
"FLUX.1-pro": "FLUX.1-pro",
|
||||
"FLUX.1.1-pro": "FLUX.1.1-pro",
|
||||
"recraft-v3": "recraft-v3",
|
||||
"Square (1:1)": "Square (1:1)",
|
||||
"Landscape 16:9": "Landscape 16:9",
|
||||
"Landscape 21:9": "Landscape 21:9",
|
||||
"Landscape 3:2": "Landscape 3:2",
|
||||
"Landscape 4:3": "Landscape 4:3",
|
||||
"Portrait 2:3": "Portrait 2:3",
|
||||
"Portrait 3:4": "Portrait 3:4",
|
||||
"Portrait 4:5": "Portrait 4:5",
|
||||
"Portrait 9:16": "Portrait 9:16",
|
||||
"Portrait 9:21": "Portrait 9:21",
|
||||
"WebP": "WebP",
|
||||
"JPG": "JPG",
|
||||
"PNG": "PNG",
|
||||
"Text": "Text",
|
||||
"Raw": "Raw",
|
||||
"GET": "GET",
|
||||
"POST": "POST",
|
||||
"PATCH": "PATCH",
|
||||
"PUT": "PUT",
|
||||
"DELETE": "DELETE",
|
||||
"HEAD": "HEAD"
|
||||
}
|
||||
@@ -0,0 +1,112 @@
|
||||
{
|
||||
"Dumpling AI": "Dumpling AI",
|
||||
"Transform unstructured website content into clean, AI-ready data": "Transform unstructured website content into clean, AI-ready data",
|
||||
"\n You can obtain API key from [API Section](https://app.dumplingai.com/api-keys).": "\n You can obtain API key from [API Section](https://app.dumplingai.com/api-keys).",
|
||||
"Web Search": "Web Search",
|
||||
"Search News": "Search News",
|
||||
"Generate Image": "Generate Image",
|
||||
"Scrape Website": "Scrape Website",
|
||||
"Crawl Website": "Crawl Website",
|
||||
"Extract Document Data": "Extract Document Data",
|
||||
"Custom API Call": "Custom API Call",
|
||||
"Search the web and optionally retrieve content from top results.": "Search the web and optionally retrieve content from top results.",
|
||||
"Search for news articles using Google News.": "Search for news articles using Google News.",
|
||||
"Generate images based on a text prompt using AI.": "Generate images based on a text prompt using AI.",
|
||||
"Scrapes data from a specified URL and format the result.": "Scrapes data from a specified URL and format the result.",
|
||||
"Crawl a website and return structured content from multiple pages.": "Crawl a website and return structured content from multiple pages.",
|
||||
"Extract structured data from documents using vision-capable AI.": "Extract structured data from documents using vision-capable AI.",
|
||||
"Make a custom API call to a specific endpoint": "Make a custom API call to a specific endpoint",
|
||||
"Search Query": "Search Query",
|
||||
"Country": "Country",
|
||||
"Location": "Location",
|
||||
"Language": "Language",
|
||||
"Date Range": "Date Range",
|
||||
"Page Number": "Page Number",
|
||||
"Scrape Results": "Scrape Results",
|
||||
"Number of Results to Scrape": "Number of Results to Scrape",
|
||||
"Scrape Format": "Scrape Format",
|
||||
"Clean Output": "Clean Output",
|
||||
"Model": "Model",
|
||||
"Prompt": "Prompt",
|
||||
"Aspect Ratio": "Aspect Ratio",
|
||||
"Number of Images": "Number of Images",
|
||||
"Seed": "Seed",
|
||||
"Output Format": "Output Format",
|
||||
"URL": "URL",
|
||||
"Clean Output ?": "Clean Output ?",
|
||||
"Render JavaScript ?": "Render JavaScript ?",
|
||||
"Page Limit": "Page Limit",
|
||||
"Crawl Depth": "Crawl Depth",
|
||||
"File": "File",
|
||||
"Extraction Prompt": "Extraction Prompt",
|
||||
"JSON Mode": "JSON Mode",
|
||||
"Method": "Method",
|
||||
"Headers": "Headers",
|
||||
"Query Parameters": "Query Parameters",
|
||||
"Body": "Body",
|
||||
"No Error on Failure": "No Error on Failure",
|
||||
"Timeout (in seconds)": "Timeout (in seconds)",
|
||||
"Two-letter country code for location bias (e.g., \"US\" for United States).": "Two-letter country code for location bias (e.g., \"US\" for United States).",
|
||||
"Specific location to focus the search (e.g., \"New York, NY\").": "Specific location to focus the search (e.g., \"New York, NY\").",
|
||||
"Language code for the search results (e.g., \"en\" for English).": "Language code for the search results (e.g., \"en\" for English).",
|
||||
"Filter results by date.": "Filter results by date.",
|
||||
"Page number for paginated results.": "Page number for paginated results.",
|
||||
"Whether to scrape top search results.": "Whether to scrape top search results.",
|
||||
"Number of top results to scrape (max: 10).": "Number of top results to scrape (max: 10).",
|
||||
"Format of scraped content": "Format of scraped content",
|
||||
"Whether the scraped output should be cleaned.": "Whether the scraped output should be cleaned.",
|
||||
"The search query for Google News.": "The search query for Google News.",
|
||||
"Country code for location bias (e.g., \"US\" for United States).": "Country code for location bias (e.g., \"US\" for United States).",
|
||||
"The model to use for image generation": "The model to use for image generation",
|
||||
"The text prompt for image generation": "The text prompt for image generation",
|
||||
"Aspect ratio of the generated image": "Aspect ratio of the generated image",
|
||||
"Number of images to generate (1-4)": "Number of images to generate (1-4)",
|
||||
"Seed for reproducible results": "Seed for reproducible results",
|
||||
"Format of the generated image": "Format of the generated image",
|
||||
"The format of the output": "The format of the output",
|
||||
"Whether the output should be cleaned.": "Whether the output should be cleaned.",
|
||||
"Whether to render JavaScript before scraping.": "Whether to render JavaScript before scraping.",
|
||||
"The website URL to crawl.": "The website URL to crawl.",
|
||||
"Maximum number of pages to crawl.": "Maximum number of pages to crawl.",
|
||||
"Depth of crawling (distance between base URL path and sub paths).": "Depth of crawling (distance between base URL path and sub paths).",
|
||||
"Format of the output content.": "Format of the output content.",
|
||||
"File URL or base64-encoded file.": "File URL or base64-encoded file.",
|
||||
"The prompt describing what data to extract from the document.": "The prompt describing what data to extract from the document.",
|
||||
"Whether to return the result in JSON format.": "Whether to return the result in JSON format.",
|
||||
"Authorization headers are injected automatically from your connection.": "Authorization headers are injected automatically from your connection.",
|
||||
"Any Time": "Any Time",
|
||||
"Past Hour": "Past Hour",
|
||||
"Past Day": "Past Day",
|
||||
"Past Week": "Past Week",
|
||||
"Past Month": "Past Month",
|
||||
"Past Year": "Past Year",
|
||||
"Markdown": "Markdown",
|
||||
"HTML": "HTML",
|
||||
"Screenshot": "Screenshot",
|
||||
"FLUX.1-schnell": "FLUX.1-schnell",
|
||||
"FLUX.1-dev": "FLUX.1-dev",
|
||||
"FLUX.1-pro": "FLUX.1-pro",
|
||||
"FLUX.1.1-pro": "FLUX.1.1-pro",
|
||||
"recraft-v3": "recraft-v3",
|
||||
"Square (1:1)": "Square (1:1)",
|
||||
"Landscape 16:9": "Landscape 16:9",
|
||||
"Landscape 21:9": "Landscape 21:9",
|
||||
"Landscape 3:2": "Landscape 3:2",
|
||||
"Landscape 4:3": "Landscape 4:3",
|
||||
"Portrait 2:3": "Portrait 2:3",
|
||||
"Portrait 3:4": "Portrait 3:4",
|
||||
"Portrait 4:5": "Portrait 4:5",
|
||||
"Portrait 9:16": "Portrait 9:16",
|
||||
"Portrait 9:21": "Portrait 9:21",
|
||||
"WebP": "WebP",
|
||||
"JPG": "JPG",
|
||||
"PNG": "PNG",
|
||||
"Text": "Text",
|
||||
"Raw": "Raw",
|
||||
"GET": "GET",
|
||||
"POST": "POST",
|
||||
"PATCH": "PATCH",
|
||||
"PUT": "PUT",
|
||||
"DELETE": "DELETE",
|
||||
"HEAD": "HEAD"
|
||||
}
|
||||
@@ -0,0 +1,113 @@
|
||||
{
|
||||
"Transform unstructured website content into clean, AI-ready data": "Transform unstructured website content into clean, AI-ready data",
|
||||
"\n You can obtain API key from [API Section](https://app.dumplingai.com/api-keys).": "\n You can obtain API key from [API Section](https://app.dumplingai.com/api-keys).",
|
||||
"Web Search": "Web Search",
|
||||
"Search News": "Search News",
|
||||
"Generate Image": "Generate Image",
|
||||
"Scrape Website": "Scrape Website",
|
||||
"Crawl Website": "Crawl Website",
|
||||
"Extract Document Data": "Extract Document Data",
|
||||
"Custom API Call": "自定义 API 呼叫",
|
||||
"Search the web and optionally retrieve content from top results.": "Search the web and optionally retrieve content from top results.",
|
||||
"Search for news articles using Google News.": "Search for news articles using Google News.",
|
||||
"Generate images based on a text prompt using AI.": "Generate images based on a text prompt using AI.",
|
||||
"Scrapes data from a specified URL and format the result.": "Scrapes data from a specified URL and format the result.",
|
||||
"Crawl a website and return structured content from multiple pages.": "Crawl a website and return structured content from multiple pages.",
|
||||
"Extract structured data from documents using vision-capable AI.": "Extract structured data from documents using vision-capable AI.",
|
||||
"Make a custom API call to a specific endpoint": "将一个自定义 API 调用到一个特定的终点",
|
||||
"Search Query": "Search Query",
|
||||
"Country": "Country",
|
||||
"Location": "Location",
|
||||
"Language": "Language",
|
||||
"Date Range": "Date Range",
|
||||
"Page Number": "Page Number",
|
||||
"Scrape Results": "Scrape Results",
|
||||
"Number of Results to Scrape": "Number of Results to Scrape",
|
||||
"Scrape Format": "Scrape Format",
|
||||
"Clean Output": "Clean Output",
|
||||
"Model": "Model",
|
||||
"Prompt": "Prompt",
|
||||
"Aspect Ratio": "Aspect Ratio",
|
||||
"Number of Images": "Number of Images",
|
||||
"Seed": "Seed",
|
||||
"Output Format": "Output Format",
|
||||
"URL": "URL",
|
||||
"Clean Output ?": "Clean Output ?",
|
||||
"Render JavaScript ?": "Render JavaScript ?",
|
||||
"Page Limit": "Page Limit",
|
||||
"Crawl Depth": "Crawl Depth",
|
||||
"File": "文件",
|
||||
"Extraction Prompt": "Extraction Prompt",
|
||||
"JSON Mode": "JSON Mode",
|
||||
"Method": "方法",
|
||||
"Headers": "信头",
|
||||
"Query Parameters": "查询参数",
|
||||
"Body": "正文内容",
|
||||
"Response is Binary ?": "Response is Binary ?",
|
||||
"No Error on Failure": "失败时没有错误",
|
||||
"Timeout (in seconds)": "超时(秒)",
|
||||
"Two-letter country code for location bias (e.g., \"US\" for United States).": "Two-letter country code for location bias (e.g., \"US\" for United States).",
|
||||
"Specific location to focus the search (e.g., \"New York, NY\").": "Specific location to focus the search (e.g., \"New York, NY\").",
|
||||
"Language code for the search results (e.g., \"en\" for English).": "Language code for the search results (e.g., \"en\" for English).",
|
||||
"Filter results by date.": "Filter results by date.",
|
||||
"Page number for paginated results.": "Page number for paginated results.",
|
||||
"Whether to scrape top search results.": "Whether to scrape top search results.",
|
||||
"Number of top results to scrape (max: 10).": "Number of top results to scrape (max: 10).",
|
||||
"Format of scraped content": "Format of scraped content",
|
||||
"Whether the scraped output should be cleaned.": "Whether the scraped output should be cleaned.",
|
||||
"The search query for Google News.": "The search query for Google News.",
|
||||
"Country code for location bias (e.g., \"US\" for United States).": "Country code for location bias (e.g., \"US\" for United States).",
|
||||
"The model to use for image generation": "The model to use for image generation",
|
||||
"The text prompt for image generation": "The text prompt for image generation",
|
||||
"Aspect ratio of the generated image": "Aspect ratio of the generated image",
|
||||
"Number of images to generate (1-4)": "Number of images to generate (1-4)",
|
||||
"Seed for reproducible results": "Seed for reproducible results",
|
||||
"Format of the generated image": "Format of the generated image",
|
||||
"The format of the output": "The format of the output",
|
||||
"Whether the output should be cleaned.": "Whether the output should be cleaned.",
|
||||
"Whether to render JavaScript before scraping.": "Whether to render JavaScript before scraping.",
|
||||
"The website URL to crawl.": "The website URL to crawl.",
|
||||
"Maximum number of pages to crawl.": "Maximum number of pages to crawl.",
|
||||
"Depth of crawling (distance between base URL path and sub paths).": "Depth of crawling (distance between base URL path and sub paths).",
|
||||
"Format of the output content.": "Format of the output content.",
|
||||
"File URL or base64-encoded file.": "File URL or base64-encoded file.",
|
||||
"The prompt describing what data to extract from the document.": "The prompt describing what data to extract from the document.",
|
||||
"Whether to return the result in JSON format.": "Whether to return the result in JSON format.",
|
||||
"Authorization headers are injected automatically from your connection.": "授权头自动从您的连接中注入。",
|
||||
"Enable for files like PDFs, images, etc..": "Enable for files like PDFs, images, etc..",
|
||||
"Any Time": "Any Time",
|
||||
"Past Hour": "Past Hour",
|
||||
"Past Day": "Past Day",
|
||||
"Past Week": "Past Week",
|
||||
"Past Month": "Past Month",
|
||||
"Past Year": "Past Year",
|
||||
"Markdown": "Markdown",
|
||||
"HTML": "HTML",
|
||||
"Screenshot": "Screenshot",
|
||||
"FLUX.1-schnell": "FLUX.1-schnell",
|
||||
"FLUX.1-dev": "FLUX.1-dev",
|
||||
"FLUX.1-pro": "FLUX.1-pro",
|
||||
"FLUX.1.1-pro": "FLUX.1.1-pro",
|
||||
"recraft-v3": "recraft-v3",
|
||||
"Square (1:1)": "Square (1:1)",
|
||||
"Landscape 16:9": "Landscape 16:9",
|
||||
"Landscape 21:9": "Landscape 21:9",
|
||||
"Landscape 3:2": "Landscape 3:2",
|
||||
"Landscape 4:3": "Landscape 4:3",
|
||||
"Portrait 2:3": "Portrait 2:3",
|
||||
"Portrait 3:4": "Portrait 3:4",
|
||||
"Portrait 4:5": "Portrait 4:5",
|
||||
"Portrait 9:16": "Portrait 9:16",
|
||||
"Portrait 9:21": "Portrait 9:21",
|
||||
"WebP": "WebP",
|
||||
"JPG": "JPG",
|
||||
"PNG": "PNG",
|
||||
"Text": "文本",
|
||||
"Raw": "原始文件",
|
||||
"GET": "获取",
|
||||
"POST": "帖子",
|
||||
"PATCH": "PATCH",
|
||||
"PUT": "弹出",
|
||||
"DELETE": "删除",
|
||||
"HEAD": "黑色"
|
||||
}
|
||||
@@ -0,0 +1,70 @@
|
||||
import { createPiece, PieceAuth } from '@activepieces/pieces-framework';
|
||||
import {
|
||||
webSearch,
|
||||
searchNews,
|
||||
generateImage,
|
||||
scrapeWebsite,
|
||||
crawlWebsite,
|
||||
extractDocument,
|
||||
} from './lib/actions';
|
||||
import { PieceCategory } from '@activepieces/shared';
|
||||
import {
|
||||
AuthenticationType,
|
||||
createCustomApiCallAction,
|
||||
httpClient,
|
||||
HttpMethod,
|
||||
} from '@activepieces/pieces-common';
|
||||
|
||||
export const dumplingAuth = PieceAuth.SecretText({
|
||||
displayName: 'API Key',
|
||||
required: true,
|
||||
description: `
|
||||
You can obtain API key from [API Section](https://app.dumplingai.com/api-keys).`,
|
||||
validate: async ({ auth }) => {
|
||||
try {
|
||||
await httpClient.sendRequest({
|
||||
url: 'https://app.dumplingai.com/api/v1/search',
|
||||
method: HttpMethod.POST,
|
||||
authentication: {
|
||||
type: AuthenticationType.BEARER_TOKEN,
|
||||
token: auth,
|
||||
},
|
||||
body: {
|
||||
query: 'Activepieces',
|
||||
},
|
||||
});
|
||||
|
||||
return {
|
||||
valid: true,
|
||||
};
|
||||
} catch (e) {
|
||||
return { valid: false, error: 'Invalid API Key.' };
|
||||
}
|
||||
},
|
||||
});
|
||||
|
||||
export const dumplingAi = createPiece({
|
||||
displayName: 'Dumpling AI',
|
||||
description:'Transform unstructured website content into clean, AI-ready data',
|
||||
auth: dumplingAuth,
|
||||
minimumSupportedRelease: '0.36.1',
|
||||
logoUrl: 'https://cdn.activepieces.com/pieces/dumpling-ai.png',
|
||||
authors: ['neo773'],
|
||||
categories: [PieceCategory.ARTIFICIAL_INTELLIGENCE, PieceCategory.PRODUCTIVITY],
|
||||
actions: [
|
||||
webSearch,
|
||||
searchNews,
|
||||
generateImage,
|
||||
scrapeWebsite,
|
||||
crawlWebsite,
|
||||
extractDocument,
|
||||
createCustomApiCallAction({
|
||||
baseUrl: () => 'https://app.dumplingai.com/api/v1',
|
||||
auth: dumplingAuth,
|
||||
authMapping: async (auth) => ({
|
||||
Authorization: `Bearer ${auth}`,
|
||||
}),
|
||||
}),
|
||||
],
|
||||
triggers: [],
|
||||
});
|
||||
@@ -0,0 +1,66 @@
|
||||
import { createAction, Property } from '@activepieces/pieces-framework';
|
||||
import { httpClient, HttpMethod } from '@activepieces/pieces-common';
|
||||
import { dumplingAuth } from '../../index';
|
||||
|
||||
export const crawlWebsite = createAction({
|
||||
name: 'crawl_website',
|
||||
auth: dumplingAuth,
|
||||
displayName: 'Crawl Website',
|
||||
description: 'Crawl a website and return structured content from multiple pages.',
|
||||
props: {
|
||||
url: Property.ShortText({
|
||||
displayName: 'URL',
|
||||
required: true,
|
||||
description: 'The website URL to crawl.',
|
||||
}),
|
||||
limit: Property.Number({
|
||||
displayName: 'Page Limit',
|
||||
required: false,
|
||||
defaultValue: 5,
|
||||
description: 'Maximum number of pages to crawl.',
|
||||
}),
|
||||
depth: Property.Number({
|
||||
displayName: 'Crawl Depth',
|
||||
required: false,
|
||||
defaultValue: 2,
|
||||
description: 'Depth of crawling (distance between base URL path and sub paths).',
|
||||
}),
|
||||
format: Property.StaticDropdown({
|
||||
displayName: 'Output Format',
|
||||
required: false,
|
||||
defaultValue: 'markdown',
|
||||
options: {
|
||||
options: [
|
||||
{ label: 'Markdown', value: 'markdown' },
|
||||
{ label: 'Text', value: 'text' },
|
||||
{ label: 'Raw', value: 'raw' },
|
||||
],
|
||||
},
|
||||
description: 'Format of the output content.',
|
||||
}),
|
||||
},
|
||||
async run(context) {
|
||||
const { url, limit, depth, format } = context.propsValue;
|
||||
|
||||
const requestBody: Record<string, any> = {
|
||||
url,
|
||||
};
|
||||
|
||||
// Add optional parameters if provided
|
||||
if (limit !== undefined) requestBody['limit'] = limit;
|
||||
if (depth !== undefined) requestBody['depth'] = depth;
|
||||
if (format) requestBody['format'] = format;
|
||||
|
||||
const response = await httpClient.sendRequest({
|
||||
method: HttpMethod.POST,
|
||||
url: 'https://app.dumplingai.com/api/v1/crawl',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
Authorization: `Bearer ${context.auth.secret_text}`,
|
||||
},
|
||||
body: requestBody,
|
||||
});
|
||||
|
||||
return response.body;
|
||||
},
|
||||
});
|
||||
@@ -0,0 +1,52 @@
|
||||
import { createAction, Property } from '@activepieces/pieces-framework';
|
||||
import { httpClient, HttpMethod } from '@activepieces/pieces-common';
|
||||
import { dumplingAuth } from '../../index';
|
||||
|
||||
export const extractDocument = createAction({
|
||||
name: 'extract_document',
|
||||
auth: dumplingAuth,
|
||||
displayName: 'Extract Document Data',
|
||||
description: 'Extract structured data from documents using vision-capable AI.',
|
||||
props: {
|
||||
file: Property.File({
|
||||
displayName: 'File',
|
||||
required: true,
|
||||
description: 'File URL or base64-encoded file.',
|
||||
}),
|
||||
prompt: Property.LongText({
|
||||
displayName: 'Extraction Prompt',
|
||||
required: true,
|
||||
description: 'The prompt describing what data to extract from the document.',
|
||||
}),
|
||||
jsonMode: Property.Checkbox({
|
||||
displayName: 'JSON Mode',
|
||||
required: false,
|
||||
defaultValue: false,
|
||||
description: 'Whether to return the result in JSON format.',
|
||||
}),
|
||||
},
|
||||
async run(context) {
|
||||
const { file, prompt, jsonMode } = context.propsValue;
|
||||
|
||||
const requestBody: Record<string, any> = {
|
||||
inputMethod: 'base64',
|
||||
files: [file.base64],
|
||||
prompt,
|
||||
};
|
||||
|
||||
// Add optional parameters if provided
|
||||
if (jsonMode !== undefined) requestBody['jsonMode'] = jsonMode;
|
||||
|
||||
const response = await httpClient.sendRequest({
|
||||
method: HttpMethod.POST,
|
||||
url: 'https://app.dumplingai.com/api/v1/extract-document',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
Authorization: `Bearer ${context.auth.secret_text}`,
|
||||
},
|
||||
body: requestBody,
|
||||
});
|
||||
|
||||
return response.body;
|
||||
},
|
||||
});
|
||||
@@ -0,0 +1,107 @@
|
||||
import { createAction, Property } from '@activepieces/pieces-framework';
|
||||
import { httpClient, HttpMethod } from '@activepieces/pieces-common';
|
||||
import { dumplingAuth } from '../../index';
|
||||
|
||||
export const generateImage = createAction({
|
||||
name: 'generate_image',
|
||||
auth: dumplingAuth,
|
||||
displayName: 'Generate Image',
|
||||
description: 'Generate images based on a text prompt using AI.',
|
||||
props: {
|
||||
model: Property.StaticDropdown({
|
||||
displayName: 'Model',
|
||||
required: true,
|
||||
description: 'The model to use for image generation',
|
||||
defaultValue: 'FLUX.1-schnell',
|
||||
options: {
|
||||
options: [
|
||||
{ label: 'FLUX.1-schnell', value: 'FLUX.1-schnell' },
|
||||
{ label: 'FLUX.1-dev', value: 'FLUX.1-dev' },
|
||||
{ label: 'FLUX.1-pro', value: 'FLUX.1-pro' },
|
||||
{ label: 'FLUX.1.1-pro', value: 'FLUX.1.1-pro' },
|
||||
{ label: 'recraft-v3', value: 'recraft-v3' },
|
||||
],
|
||||
},
|
||||
}),
|
||||
prompt: Property.LongText({
|
||||
displayName: 'Prompt',
|
||||
required: true,
|
||||
description: 'The text prompt for image generation',
|
||||
}),
|
||||
aspect_ratio: Property.StaticDropdown({
|
||||
displayName: 'Aspect Ratio',
|
||||
required: false,
|
||||
description: 'Aspect ratio of the generated image',
|
||||
defaultValue: '1:1',
|
||||
options: {
|
||||
options: [
|
||||
{ label: 'Square (1:1)', value: '1:1' },
|
||||
{ label: 'Landscape 16:9', value: '16:9' },
|
||||
{ label: 'Landscape 21:9', value: '21:9' },
|
||||
{ label: 'Landscape 3:2', value: '3:2' },
|
||||
{ label: 'Landscape 4:3', value: '4:3' },
|
||||
{ label: 'Portrait 2:3', value: '2:3' },
|
||||
{ label: 'Portrait 3:4', value: '3:4' },
|
||||
{ label: 'Portrait 4:5', value: '4:5' },
|
||||
{ label: 'Portrait 9:16', value: '9:16' },
|
||||
{ label: 'Portrait 9:21', value: '9:21' },
|
||||
],
|
||||
},
|
||||
}),
|
||||
num_outputs: Property.Number({
|
||||
displayName: 'Number of Images',
|
||||
required: false,
|
||||
defaultValue: 1,
|
||||
description: 'Number of images to generate (1-4)',
|
||||
}),
|
||||
seed: Property.Number({
|
||||
displayName: 'Seed',
|
||||
required: false,
|
||||
description: 'Seed for reproducible results',
|
||||
}),
|
||||
output_format: Property.StaticDropdown({
|
||||
displayName: 'Output Format',
|
||||
required: false,
|
||||
description: 'Format of the generated image',
|
||||
defaultValue: 'webp',
|
||||
options: {
|
||||
options: [
|
||||
{ label: 'WebP', value: 'webp' },
|
||||
{ label: 'JPG', value: 'jpg' },
|
||||
{ label: 'PNG', value: 'png' },
|
||||
],
|
||||
},
|
||||
}),
|
||||
},
|
||||
async run(context) {
|
||||
const { model, prompt, aspect_ratio, num_outputs, seed, output_format } = context.propsValue;
|
||||
|
||||
// Prepare the input object based on the selected model
|
||||
const input: Record<string, any> = {
|
||||
prompt: prompt,
|
||||
};
|
||||
|
||||
// Add common optional parameters
|
||||
if (seed !== undefined) input['seed'] = seed;
|
||||
if (aspect_ratio) input['aspect_ratio'] = aspect_ratio;
|
||||
if (num_outputs) input['num_outputs'] = num_outputs;
|
||||
if (output_format) input['output_format'] = output_format;
|
||||
|
||||
const requestBody = {
|
||||
model: model,
|
||||
input: input,
|
||||
};
|
||||
|
||||
const response = await httpClient.sendRequest({
|
||||
method: HttpMethod.POST,
|
||||
url: 'https://app.dumplingai.com/api/v1/generate-ai-image',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
Authorization: `Bearer ${context.auth.secret_text}`,
|
||||
},
|
||||
body: requestBody,
|
||||
});
|
||||
|
||||
return response.body;
|
||||
},
|
||||
});
|
||||
@@ -0,0 +1,8 @@
|
||||
import { webSearch } from './web-search';
|
||||
import { searchNews } from './search-news';
|
||||
import { generateImage } from './generate-image';
|
||||
import { scrapeWebsite } from './scrape-website';
|
||||
import { crawlWebsite } from './crawl-website';
|
||||
import { extractDocument } from './extract-document';
|
||||
|
||||
export { webSearch, searchNews, generateImage, scrapeWebsite, crawlWebsite, extractDocument };
|
||||
@@ -0,0 +1,65 @@
|
||||
import { createAction, Property } from '@activepieces/pieces-framework';
|
||||
import { httpClient, HttpMethod } from '@activepieces/pieces-common';
|
||||
import { dumplingAuth } from '../../index';
|
||||
|
||||
export const scrapeWebsite = createAction({
|
||||
name: 'scrape_website',
|
||||
auth: dumplingAuth,
|
||||
displayName: 'Scrape Website',
|
||||
description: 'Scrapes data from a specified URL and format the result.',
|
||||
props: {
|
||||
url: Property.ShortText({
|
||||
displayName: 'URL',
|
||||
required: true,
|
||||
}),
|
||||
format: Property.StaticDropdown({
|
||||
displayName: 'Output Format',
|
||||
required: false,
|
||||
defaultValue: 'markdown',
|
||||
options: {
|
||||
options: [
|
||||
{ label: 'Markdown', value: 'markdown' },
|
||||
{ label: 'HTML', value: 'html' },
|
||||
{ label: 'Screenshot', value: 'screenshot' },
|
||||
],
|
||||
},
|
||||
description: 'The format of the output',
|
||||
}),
|
||||
cleaned: Property.Checkbox({
|
||||
displayName: 'Clean Output ?',
|
||||
required: false,
|
||||
defaultValue: true,
|
||||
description: 'Whether the output should be cleaned.',
|
||||
}),
|
||||
renderJs: Property.Checkbox({
|
||||
displayName: 'Render JavaScript ?',
|
||||
required: false,
|
||||
defaultValue: true,
|
||||
description: 'Whether to render JavaScript before scraping.',
|
||||
}),
|
||||
},
|
||||
async run(context) {
|
||||
const { url, format, cleaned, renderJs } = context.propsValue;
|
||||
|
||||
const requestBody: Record<string, any> = {
|
||||
url,
|
||||
};
|
||||
|
||||
// Add optional parameters if provided
|
||||
if (format) requestBody['format'] = format;
|
||||
if (cleaned !== undefined) requestBody['cleaned'] = cleaned;
|
||||
if (renderJs !== undefined) requestBody['renderJs'] = renderJs;
|
||||
|
||||
const response = await httpClient.sendRequest({
|
||||
method: HttpMethod.POST,
|
||||
url: 'https://app.dumplingai.com/api/v1/scrape',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
Authorization: `Bearer ${context.auth.secret_text}`,
|
||||
},
|
||||
body: requestBody,
|
||||
});
|
||||
|
||||
return response.body;
|
||||
},
|
||||
});
|
||||
@@ -0,0 +1,78 @@
|
||||
import { createAction, Property } from '@activepieces/pieces-framework';
|
||||
import { httpClient, HttpMethod } from '@activepieces/pieces-common';
|
||||
import { dumplingAuth } from '../../index';
|
||||
|
||||
export const searchNews = createAction({
|
||||
name: 'search_news',
|
||||
auth: dumplingAuth,
|
||||
displayName: 'Search News',
|
||||
description: 'Search for news articles using Google News.',
|
||||
props: {
|
||||
query: Property.ShortText({
|
||||
displayName: 'Search Query',
|
||||
required: true,
|
||||
description: 'The search query for Google News.',
|
||||
}),
|
||||
country: Property.ShortText({
|
||||
displayName: 'Country',
|
||||
required: false,
|
||||
description: 'Country code for location bias (e.g., "US" for United States).',
|
||||
}),
|
||||
location: Property.ShortText({
|
||||
displayName: 'Location',
|
||||
required: false,
|
||||
description: 'Specific location to focus the search (e.g., "New York, NY").',
|
||||
}),
|
||||
language: Property.ShortText({
|
||||
displayName: 'Language',
|
||||
required: false,
|
||||
description: 'Language code for the search results (e.g., "en" for English).',
|
||||
}),
|
||||
dateRange: Property.StaticDropdown({
|
||||
displayName: 'Date Range',
|
||||
required: false,
|
||||
options: {
|
||||
options: [
|
||||
{ label: 'Any Time', value: 'anyTime' },
|
||||
{ label: 'Past Hour', value: 'pastHour' },
|
||||
{ label: 'Past Day', value: 'pastDay' },
|
||||
{ label: 'Past Week', value: 'pastWeek' },
|
||||
{ label: 'Past Month', value: 'pastMonth' },
|
||||
{ label: 'Past Year', value: 'pastYear' },
|
||||
],
|
||||
},
|
||||
description: 'Filter results by date.',
|
||||
}),
|
||||
page: Property.Number({
|
||||
displayName: 'Page Number',
|
||||
required: false,
|
||||
description: 'Page number for paginated results.',
|
||||
}),
|
||||
},
|
||||
async run(context) {
|
||||
const { query, country, location, language, dateRange, page } = context.propsValue;
|
||||
|
||||
const requestBody: Record<string, any> = {
|
||||
query,
|
||||
};
|
||||
|
||||
// Add optional parameters if provided
|
||||
if (country) requestBody['country'] = country;
|
||||
if (location) requestBody['location'] = location;
|
||||
if (language) requestBody['language'] = language;
|
||||
if (dateRange) requestBody['dateRange'] = dateRange;
|
||||
if (page) requestBody['page'] = page;
|
||||
|
||||
const response = await httpClient.sendRequest({
|
||||
method: HttpMethod.POST,
|
||||
url: 'https://app.dumplingai.com/api/v1/search-news',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
Authorization: `Bearer ${context.auth.secret_text}`,
|
||||
},
|
||||
body: requestBody,
|
||||
});
|
||||
|
||||
return response.body;
|
||||
},
|
||||
});
|
||||
@@ -0,0 +1,128 @@
|
||||
import { createAction, Property } from '@activepieces/pieces-framework';
|
||||
import { httpClient, HttpMethod } from '@activepieces/pieces-common';
|
||||
import { dumplingAuth } from '../../index';
|
||||
|
||||
export const webSearch = createAction({
|
||||
name: 'web_search',
|
||||
auth: dumplingAuth,
|
||||
displayName: 'Web Search',
|
||||
description: 'Search the web and optionally retrieve content from top results.',
|
||||
props: {
|
||||
query: Property.ShortText({
|
||||
displayName: 'Search Query',
|
||||
required: true,
|
||||
}),
|
||||
country: Property.ShortText({
|
||||
displayName: 'Country',
|
||||
required: false,
|
||||
description: 'Two-letter country code for location bias (e.g., "US" for United States).',
|
||||
}),
|
||||
location: Property.ShortText({
|
||||
displayName: 'Location',
|
||||
required: false,
|
||||
description: 'Specific location to focus the search (e.g., "New York, NY").',
|
||||
}),
|
||||
language: Property.ShortText({
|
||||
displayName: 'Language',
|
||||
required: false,
|
||||
description: 'Language code for the search results (e.g., "en" for English).',
|
||||
}),
|
||||
dateRange: Property.StaticDropdown({
|
||||
displayName: 'Date Range',
|
||||
required: false,
|
||||
options: {
|
||||
options: [
|
||||
{ label: 'Any Time', value: 'anyTime' },
|
||||
{ label: 'Past Hour', value: 'pastHour' },
|
||||
{ label: 'Past Day', value: 'pastDay' },
|
||||
{ label: 'Past Week', value: 'pastWeek' },
|
||||
{ label: 'Past Month', value: 'pastMonth' },
|
||||
{ label: 'Past Year', value: 'pastYear' },
|
||||
],
|
||||
},
|
||||
description: 'Filter results by date.',
|
||||
}),
|
||||
page: Property.Number({
|
||||
displayName: 'Page Number',
|
||||
required: false,
|
||||
description: 'Page number for paginated results.',
|
||||
}),
|
||||
scrapeResults: Property.Checkbox({
|
||||
displayName: 'Scrape Results',
|
||||
required: false,
|
||||
defaultValue: false,
|
||||
description: 'Whether to scrape top search results.',
|
||||
}),
|
||||
numResultsToScrape: Property.Number({
|
||||
displayName: 'Number of Results to Scrape',
|
||||
required: false,
|
||||
defaultValue: 3,
|
||||
description: 'Number of top results to scrape (max: 10).',
|
||||
}),
|
||||
scrapeFormat: Property.StaticDropdown({
|
||||
displayName: 'Scrape Format',
|
||||
required: false,
|
||||
defaultValue: 'markdown',
|
||||
options: {
|
||||
options: [
|
||||
{ label: 'Markdown', value: 'markdown' },
|
||||
{ label: 'HTML', value: 'html' },
|
||||
{ label: 'Screenshot', value: 'screenshot' },
|
||||
],
|
||||
},
|
||||
description: 'Format of scraped content',
|
||||
}),
|
||||
cleanedOutput: Property.Checkbox({
|
||||
displayName: 'Clean Output',
|
||||
required: false,
|
||||
defaultValue: true,
|
||||
description: 'Whether the scraped output should be cleaned.',
|
||||
}),
|
||||
},
|
||||
async run(context) {
|
||||
const {
|
||||
query,
|
||||
country,
|
||||
location,
|
||||
language,
|
||||
dateRange,
|
||||
page,
|
||||
scrapeResults,
|
||||
numResultsToScrape,
|
||||
scrapeFormat,
|
||||
cleanedOutput,
|
||||
} = context.propsValue;
|
||||
|
||||
const requestBody: Record<string, any> = {
|
||||
query,
|
||||
};
|
||||
|
||||
// Add optional parameters if provided
|
||||
if (country) requestBody['country'] = country;
|
||||
if (location) requestBody['location'] = location;
|
||||
if (language) requestBody['language'] = language;
|
||||
if (dateRange) requestBody['dateRange'] = dateRange;
|
||||
if (page) requestBody['page'] = page;
|
||||
if (scrapeResults !== undefined) requestBody['scrapeResults'] = scrapeResults;
|
||||
if (numResultsToScrape) requestBody['numResultsToScrape'] = numResultsToScrape;
|
||||
|
||||
// Add scrape options if scraping is enabled
|
||||
if (scrapeResults) {
|
||||
requestBody['scrapeOptions'] = {};
|
||||
if (scrapeFormat) requestBody['scrapeOptions']['format'] = scrapeFormat;
|
||||
if (cleanedOutput !== undefined) requestBody['scrapeOptions']['cleaned'] = cleanedOutput;
|
||||
}
|
||||
|
||||
const response = await httpClient.sendRequest({
|
||||
method: HttpMethod.POST,
|
||||
url: 'https://app.dumplingai.com/api/v1/search',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
Authorization: `Bearer ${context.auth.secret_text}`,
|
||||
},
|
||||
body: requestBody,
|
||||
});
|
||||
|
||||
return response.body;
|
||||
},
|
||||
});
|
||||
@@ -0,0 +1,19 @@
|
||||
{
|
||||
"extends": "../../../../tsconfig.base.json",
|
||||
"compilerOptions": {
|
||||
"module": "commonjs",
|
||||
"forceConsistentCasingInFileNames": true,
|
||||
"strict": true,
|
||||
"noImplicitOverride": true,
|
||||
"noImplicitReturns": true,
|
||||
"noFallthroughCasesInSwitch": true,
|
||||
"noPropertyAccessFromIndexSignature": true
|
||||
},
|
||||
"files": [],
|
||||
"include": [],
|
||||
"references": [
|
||||
{
|
||||
"path": "./tsconfig.lib.json"
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -0,0 +1,11 @@
|
||||
{
|
||||
"extends": "./tsconfig.json",
|
||||
"compilerOptions": {
|
||||
"module": "commonjs",
|
||||
"outDir": "../../../../dist/out-tsc",
|
||||
"declaration": true,
|
||||
"types": ["node"]
|
||||
},
|
||||
"exclude": ["jest.config.ts", "src/**/*.spec.ts", "src/**/*.test.ts"],
|
||||
"include": ["src/**/*.ts"]
|
||||
}
|
||||
Reference in New Issue
Block a user