Add Activepieces integration for workflow automation
- Add Activepieces fork with SmoothSchedule custom piece - Create integrations app with Activepieces service layer - Add embed token endpoint for iframe integration - Create Automations page with embedded workflow builder - Add sidebar visibility fix for embed mode - Add list inactive customers endpoint to Public API - Include SmoothSchedule triggers: event created/updated/cancelled - Include SmoothSchedule actions: create/update/cancel events, list resources/services/customers 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
@@ -0,0 +1,33 @@
|
||||
{
|
||||
"extends": [
|
||||
"../../../../.eslintrc.base.json"
|
||||
],
|
||||
"ignorePatterns": [
|
||||
"!**/*"
|
||||
],
|
||||
"overrides": [
|
||||
{
|
||||
"files": [
|
||||
"*.ts",
|
||||
"*.tsx",
|
||||
"*.js",
|
||||
"*.jsx"
|
||||
],
|
||||
"rules": {}
|
||||
},
|
||||
{
|
||||
"files": [
|
||||
"*.ts",
|
||||
"*.tsx"
|
||||
],
|
||||
"rules": {}
|
||||
},
|
||||
{
|
||||
"files": [
|
||||
"*.js",
|
||||
"*.jsx"
|
||||
],
|
||||
"rules": {}
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -0,0 +1,7 @@
|
||||
# pieces-mistral-ai
|
||||
|
||||
This library was generated with [Nx](https://nx.dev).
|
||||
|
||||
## Building
|
||||
|
||||
Run `nx build pieces-mistral-ai` to build the library.
|
||||
@@ -0,0 +1,4 @@
|
||||
{
|
||||
"name": "@activepieces/piece-mistral-ai",
|
||||
"version": "0.0.9"
|
||||
}
|
||||
@@ -0,0 +1,65 @@
|
||||
{
|
||||
"name": "pieces-mistral-ai",
|
||||
"$schema": "../../../../node_modules/nx/schemas/project-schema.json",
|
||||
"sourceRoot": "packages/pieces/community/mistral-ai/src",
|
||||
"projectType": "library",
|
||||
"release": {
|
||||
"version": {
|
||||
"manifestRootsToUpdate": [
|
||||
"dist/{projectRoot}"
|
||||
],
|
||||
"currentVersionResolver": "git-tag",
|
||||
"fallbackCurrentVersionResolver": "disk"
|
||||
}
|
||||
},
|
||||
"tags": [],
|
||||
"targets": {
|
||||
"build": {
|
||||
"executor": "@nx/js:tsc",
|
||||
"outputs": [
|
||||
"{options.outputPath}"
|
||||
],
|
||||
"options": {
|
||||
"outputPath": "dist/packages/pieces/community/mistral-ai",
|
||||
"tsConfig": "packages/pieces/community/mistral-ai/tsconfig.lib.json",
|
||||
"packageJson": "packages/pieces/community/mistral-ai/package.json",
|
||||
"main": "packages/pieces/community/mistral-ai/src/index.ts",
|
||||
"assets": [
|
||||
"packages/pieces/community/mistral-ai/*.md",
|
||||
{
|
||||
"input": "packages/pieces/community/mistral-ai/src/i18n",
|
||||
"output": "./src/i18n",
|
||||
"glob": "**/!(i18n.json)"
|
||||
}
|
||||
],
|
||||
"buildableProjectDepsInPackageJsonType": "dependencies",
|
||||
"updateBuildableProjectDepsInPackageJson": true
|
||||
},
|
||||
"dependsOn": [
|
||||
"^build",
|
||||
"prebuild"
|
||||
]
|
||||
},
|
||||
"nx-release-publish": {
|
||||
"options": {
|
||||
"packageRoot": "dist/{projectRoot}"
|
||||
}
|
||||
},
|
||||
"lint": {
|
||||
"executor": "@nx/eslint:lint",
|
||||
"outputs": [
|
||||
"{options.outputFile}"
|
||||
]
|
||||
},
|
||||
"prebuild": {
|
||||
"executor": "nx:run-commands",
|
||||
"options": {
|
||||
"cwd": "packages/pieces/community/mistral-ai",
|
||||
"command": "bun install --no-save --silent"
|
||||
},
|
||||
"dependsOn": [
|
||||
"^build"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,48 @@
|
||||
{
|
||||
"Mistral AI provides state-of-the-art open-weight and hosted language models for text generation, embeddings, and reasoning tasks.": "Mistral AI bietet hochmoderne offene und gehostete Sprachmodelle für Textgeneration, Einbettung und Argumentationsaufgaben.",
|
||||
"You can obtain your API key from the Mistral AI dashboard. Go to https://console.mistral.ai, generate an API key, and paste it here.": "Du kannst deinen API-Schlüssel über das Mistral KI-Dashboard erhalten. Gehe zu https://console.mistral.ai, generiere einen API-Schlüssel und füge ihn hier ein.",
|
||||
"Ask Mistral": "Mistral fragen",
|
||||
"Create Embeddings": "Einbettungen erstellen",
|
||||
"Upload File": "Datei hochladen",
|
||||
"List Models": "Listenmodelle",
|
||||
"Custom API Call": "Eigener API-Aufruf",
|
||||
"Ask Mistral anything you want!": "Fragen Sie Mistral was Sie wollen!",
|
||||
"Creates new embedding in Mistral AI.": "Erstellt neue Einbettung in Mistral AI.",
|
||||
"Upload a file to Mistral AI (e.g., for fine-tuning or context storage).": "Laden Sie eine Datei auf Mistral AI hoch (z.B. für Feineinstellungen oder Kontextspeicher).",
|
||||
"Retrieves a list of available Mistral AI models.": "Ruft eine Liste der verfügbaren Mistral AI Modelle ab.",
|
||||
"Make a custom API call to a specific endpoint": "Einen benutzerdefinierten API-Aufruf an einen bestimmten Endpunkt machen",
|
||||
"Model": "Modell",
|
||||
"Question": "Frage",
|
||||
"Temperature": "Temperatur",
|
||||
"Top P": "Oben P",
|
||||
"Max Tokens": "Max. Token",
|
||||
"Random Seed": "Zufälliger Seed",
|
||||
"Timeout (ms)": "Timeout (ms)",
|
||||
"Input": "Eingabe",
|
||||
"File": "Datei",
|
||||
"Purpose": "Zweck",
|
||||
"Method": "Methode",
|
||||
"Headers": "Kopfzeilen",
|
||||
"Query Parameters": "Abfrageparameter",
|
||||
"Body": "Körper",
|
||||
"Response is Binary ?": "Antwort ist binär?",
|
||||
"No Error on Failure": "Kein Fehler bei Fehler",
|
||||
"Timeout (in seconds)": "Timeout (in Sekunden)",
|
||||
"Select a Mistral model. List is fetched live from your account.": "Wählen Sie ein Mistral Modell. Die Liste wird live von Ihrem Konto geladen.",
|
||||
"Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.": "Kontrolliert Zufallszufälligkeit: Die Verringerung führt zu weniger zufälligen Vervollständigungen. Je näher die Temperatur Null rückt, desto deterministischer und sich wiederholender wird.",
|
||||
"An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.": "Eine Alternative zur Probenahme mit Temperatur, genannt Nucleus Probenahme, bei der das Modell die Ergebnisse der Tokens mit der Top_p Wahrscheinlichkeitsmasse berücksichtigt. 0,1 bedeutet also nur die Token, die die obersten 10% Wahrscheinlichkeitsmasse ausmachen.",
|
||||
"The input text for which to create an embedding.": "Der Eingabetext, für den eine Einbettung erstellt werden soll.",
|
||||
"The file to upload (max 512MB).For fine tuning purspose provide .jsonl file.": "Die hochzuladende Datei (max. 512MB).Für Feintuning stellt die Datei .jsonl zur Verfügung.",
|
||||
"Purpose of the file.": "Zweck der Datei.",
|
||||
"Authorization headers are injected automatically from your connection.": "Autorisierungs-Header werden automatisch von Ihrer Verbindung injiziert.",
|
||||
"Enable for files like PDFs, images, etc..": "Aktivieren für Dateien wie PDFs, Bilder, etc..",
|
||||
"fine-tune": "feintune",
|
||||
"batch": "stapeln",
|
||||
"ocr": "okr",
|
||||
"GET": "ERHALTEN",
|
||||
"POST": "POST",
|
||||
"PATCH": "PATCH",
|
||||
"PUT": "PUT",
|
||||
"DELETE": "LÖSCHEN",
|
||||
"HEAD": "HEAD"
|
||||
}
|
||||
@@ -0,0 +1,48 @@
|
||||
{
|
||||
"Mistral AI provides state-of-the-art open-weight and hosted language models for text generation, embeddings, and reasoning tasks.": "Mistral AI proporciona modernos modelos de lenguaje de peso abierto y hospedado para la generación de texto, incrustaciones y tareas de razonamiento.",
|
||||
"You can obtain your API key from the Mistral AI dashboard. Go to https://console.mistral.ai, generate an API key, and paste it here.": "Puedes obtener tu clave API del panel de control de IA Mistral. Ve a https://console.mistral.ai, genera una clave API y pégala aquí.",
|
||||
"Ask Mistral": "Preguntar Mistral",
|
||||
"Create Embeddings": "Crear incrustaciones",
|
||||
"Upload File": "Subir archivo",
|
||||
"List Models": "Lista de modelos",
|
||||
"Custom API Call": "Llamada API personalizada",
|
||||
"Ask Mistral anything you want!": "Pregúntele a Mistral lo que quieras!",
|
||||
"Creates new embedding in Mistral AI.": "Crea una nueva incrustación en Mistral AI.",
|
||||
"Upload a file to Mistral AI (e.g., for fine-tuning or context storage).": "Subir un archivo a Mistral AI (por ejemplo, para ajustar o almacenar el contexto).",
|
||||
"Retrieves a list of available Mistral AI models.": "Recupere una lista de modelos disponibles de IA Mistral.",
|
||||
"Make a custom API call to a specific endpoint": "Hacer una llamada API personalizada a un extremo específico",
|
||||
"Model": "Modelo",
|
||||
"Question": "Pregunta",
|
||||
"Temperature": "Temperatura",
|
||||
"Top P": "Top P",
|
||||
"Max Tokens": "Tokens máximos",
|
||||
"Random Seed": "Semilla aleatoria",
|
||||
"Timeout (ms)": "Tiempo agotado (ms)",
|
||||
"Input": "Input",
|
||||
"File": "Archivo",
|
||||
"Purpose": "Propósito",
|
||||
"Method": "Método",
|
||||
"Headers": "Encabezados",
|
||||
"Query Parameters": "Parámetros de consulta",
|
||||
"Body": "Cuerpo",
|
||||
"Response is Binary ?": "¿Respuesta es binaria?",
|
||||
"No Error on Failure": "No hay ningún error en fallo",
|
||||
"Timeout (in seconds)": "Tiempo de espera (en segundos)",
|
||||
"Select a Mistral model. List is fetched live from your account.": "Seleccione un modelo Mistral. La lista se obtiene en vivo desde su cuenta.",
|
||||
"Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.": "Controles aleatorios: La reducción de resultados en terminaciones menos aleatorias. A medida que la temperatura se acerca a cero, el modelo se volverá determinista y repetitivo.",
|
||||
"An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.": "Una alternativa al muestreo con temperatura, llamado muestreo de núcleos, donde el modelo considera los resultados de los tokens con masa de probabilidad superior_p. Por lo tanto, 0,1 significa que sólo se consideran las fichas que componen la masa superior del 10% de probabilidad.",
|
||||
"The input text for which to create an embedding.": "El texto de entrada para el que crear una incrustación.",
|
||||
"The file to upload (max 512MB).For fine tuning purspose provide .jsonl file.": "El archivo a cargar (máx. 512MB).Para fina sintonización a efectos proporcionar archivo .jsonl.",
|
||||
"Purpose of the file.": "Propósito del archivo.",
|
||||
"Authorization headers are injected automatically from your connection.": "Las cabeceras de autorización se inyectan automáticamente desde tu conexión.",
|
||||
"Enable for files like PDFs, images, etc..": "Activar para archivos como PDFs, imágenes, etc.",
|
||||
"fine-tune": "afinar",
|
||||
"batch": "lote",
|
||||
"ocr": "ocr",
|
||||
"GET": "RECOGER",
|
||||
"POST": "POST",
|
||||
"PATCH": "PATCH",
|
||||
"PUT": "PUT",
|
||||
"DELETE": "BORRAR",
|
||||
"HEAD": "LIMPIO"
|
||||
}
|
||||
@@ -0,0 +1,48 @@
|
||||
{
|
||||
"Mistral AI provides state-of-the-art open-weight and hosted language models for text generation, embeddings, and reasoning tasks.": "Mistral AI fournit des modèles de langage à poids ouvert et hébergé à la fine pointe de la technologie pour la génération de texte, l'intégration et les tâches de raisonnement.",
|
||||
"You can obtain your API key from the Mistral AI dashboard. Go to https://console.mistral.ai, generate an API key, and paste it here.": "Vous pouvez obtenir votre clé API depuis le tableau de bord Mistral IA. Allez sur https://console.mistral.ai, générez une clé API et collez-la ici.",
|
||||
"Ask Mistral": "Demander Mistral",
|
||||
"Create Embeddings": "Créer des incorporations",
|
||||
"Upload File": "Charger un fichier",
|
||||
"List Models": "Lister les modèles",
|
||||
"Custom API Call": "Appel API personnalisé",
|
||||
"Ask Mistral anything you want!": "Demandez à Mistral ce que vous voulez!",
|
||||
"Creates new embedding in Mistral AI.": "Crée une nouvelle intégration dans Mistral AI.",
|
||||
"Upload a file to Mistral AI (e.g., for fine-tuning or context storage).": "Télécharger un fichier sur Mistral AI (par exemple, pour affiner ou stocker le contexte).",
|
||||
"Retrieves a list of available Mistral AI models.": "Récupère une liste des modèles Mistral AI disponibles.",
|
||||
"Make a custom API call to a specific endpoint": "Passez un appel API personnalisé à un point de terminaison spécifique",
|
||||
"Model": "Modélisation",
|
||||
"Question": "Question",
|
||||
"Temperature": "Température",
|
||||
"Top P": "Top P",
|
||||
"Max Tokens": "Tokens max",
|
||||
"Random Seed": "Graine aléatoire",
|
||||
"Timeout (ms)": "Délai d'attente (ms)",
|
||||
"Input": "Entrée",
|
||||
"File": "Ficher",
|
||||
"Purpose": "Objectif",
|
||||
"Method": "Méthode",
|
||||
"Headers": "En-têtes",
|
||||
"Query Parameters": "Paramètres de requête",
|
||||
"Body": "Corps",
|
||||
"Response is Binary ?": "La réponse est Binaire ?",
|
||||
"No Error on Failure": "Aucune erreur en cas d'échec",
|
||||
"Timeout (in seconds)": "Délai d'attente (en secondes)",
|
||||
"Select a Mistral model. List is fetched live from your account.": "Sélectionnez un modèle Mistral. La liste est récupérée en direct depuis votre compte.",
|
||||
"Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.": "Contrôle aléatoirement : La baisse des résultats est moins aléatoire, alors que la température approche de zéro, le modèle devient déterministe et répétitif.",
|
||||
"An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.": "Une alternative à l'échantillonnage à la température, appelée l'échantillonnage du noyau, où le modèle considère les résultats des jetons avec la masse de probabilité top_p. Ainsi, 0,1 signifie que seuls les jetons constituant la masse de probabilité la plus élevée de 10% sont pris en compte.",
|
||||
"The input text for which to create an embedding.": "Le texte d'entrée pour lequel il faut créer une incorporation.",
|
||||
"The file to upload (max 512MB).For fine tuning purspose provide .jsonl file.": "Le fichier à télécharger (max 512MB).Pour un réglage fin purspose fournir fichier .jsonl.",
|
||||
"Purpose of the file.": "Objectif du fichier.",
|
||||
"Authorization headers are injected automatically from your connection.": "Les en-têtes d'autorisation sont injectés automatiquement à partir de votre connexion.",
|
||||
"Enable for files like PDFs, images, etc..": "Activer pour les fichiers comme les PDFs, les images, etc.",
|
||||
"fine-tune": "fine-tune",
|
||||
"batch": "lot",
|
||||
"ocr": "ocr",
|
||||
"GET": "OBTENIR",
|
||||
"POST": "POSTER",
|
||||
"PATCH": "PATCH",
|
||||
"PUT": "EFFACER",
|
||||
"DELETE": "SUPPRIMER",
|
||||
"HEAD": "TÊTE"
|
||||
}
|
||||
@@ -0,0 +1,48 @@
|
||||
{
|
||||
"Mistral AI provides state-of-the-art open-weight and hosted language models for text generation, embeddings, and reasoning tasks.": "ミストラルAIは、テキスト生成、埋め込み、推論タスクのための最先端のオープンウェイトおよびホスト言語モデルを提供します。",
|
||||
"You can obtain your API key from the Mistral AI dashboard. Go to https://console.mistral.ai, generate an API key, and paste it here.": "Mistral AIダッシュボードからAPIキーを取得することができます。https://console.mistral.aiに移動し、APIキーを生成し、ここに貼り付けてください。",
|
||||
"Ask Mistral": "ミストラルに聞く",
|
||||
"Create Embeddings": "埋め込みを作成",
|
||||
"Upload File": "ファイルをアップロード",
|
||||
"List Models": "モデル一覧",
|
||||
"Custom API Call": "カスタムAPI通話",
|
||||
"Ask Mistral anything you want!": "あなたが望むものは何でもミストラルに聞いてください!",
|
||||
"Creates new embedding in Mistral AI.": "Mistral AI に新しい埋め込みを作成します。",
|
||||
"Upload a file to Mistral AI (e.g., for fine-tuning or context storage).": "Mistral AI にファイルをアップロードします(例えば、微調整やコンテキストストレージなど)。",
|
||||
"Retrieves a list of available Mistral AI models.": "利用可能なミストラルAIモデルのリストを取得します。",
|
||||
"Make a custom API call to a specific endpoint": "特定のエンドポイントへのカスタム API コールを実行します。",
|
||||
"Model": "モデル",
|
||||
"Question": "質問",
|
||||
"Temperature": "温度",
|
||||
"Top P": "トップ P",
|
||||
"Max Tokens": "最大トークン",
|
||||
"Random Seed": "ランダムな種",
|
||||
"Timeout (ms)": "タイムアウト (ミリ秒)",
|
||||
"Input": "Input",
|
||||
"File": "ファイル",
|
||||
"Purpose": "目的",
|
||||
"Method": "方法",
|
||||
"Headers": "ヘッダー",
|
||||
"Query Parameters": "クエリパラメータ",
|
||||
"Body": "本文",
|
||||
"Response is Binary ?": "応答はバイナリですか?",
|
||||
"No Error on Failure": "失敗時にエラーはありません",
|
||||
"Timeout (in seconds)": "タイムアウト(秒)",
|
||||
"Select a Mistral model. List is fetched live from your account.": "ミストラルモデルを選択してください。リストはあなたのアカウントからライブで取得されます。",
|
||||
"Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.": "温度がゼロに近づくにつれて、モデルは決定論的で反復的になります。",
|
||||
"An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.": "核サンプリングと呼ばれる温度でのサンプリングの代わりに、モデルはtop_p確率質量を持つトークンの結果を考慮します。 つまり、0.1は上位10%の確率質量からなるトークンのみを考慮することになります。",
|
||||
"The input text for which to create an embedding.": "埋め込みを作成する入力テキスト。",
|
||||
"The file to upload (max 512MB).For fine tuning purspose provide .jsonl file.": "アップロードするファイル(最大 512MB)。パープルを微調整するには、.jsonl ファイルを提供します。",
|
||||
"Purpose of the file.": "ファイルの目的。",
|
||||
"Authorization headers are injected automatically from your connection.": "認証ヘッダは接続から自動的に注入されます。",
|
||||
"Enable for files like PDFs, images, etc..": "PDF、画像などのファイルを有効にします。",
|
||||
"fine-tune": "微調整",
|
||||
"batch": "一括処理",
|
||||
"ocr": "ocr",
|
||||
"GET": "取得",
|
||||
"POST": "POST",
|
||||
"PATCH": "PATCH",
|
||||
"PUT": "PUT",
|
||||
"DELETE": "削除",
|
||||
"HEAD": "頭"
|
||||
}
|
||||
@@ -0,0 +1,48 @@
|
||||
{
|
||||
"Mistral AI provides state-of-the-art open-weight and hosted language models for text generation, embeddings, and reasoning tasks.": "Mistral AI biedt de meest geavanceerde vormen van open gewicht en gehoste talenmodellen voor het genereren van teksten, embedden en met redenen omkleden.",
|
||||
"You can obtain your API key from the Mistral AI dashboard. Go to https://console.mistral.ai, generate an API key, and paste it here.": "Je kunt je API-sleutel verkrijgen op het dashboard van Mistral AI. Ga naar https://console.mistral.ai, genereer een API-sleutel en plak deze hier.",
|
||||
"Ask Mistral": "Vraag Mistral",
|
||||
"Create Embeddings": "Maak inbeddingen",
|
||||
"Upload File": "Bestand uploaden",
|
||||
"List Models": "Lijst modellen",
|
||||
"Custom API Call": "Custom API Call",
|
||||
"Ask Mistral anything you want!": "Vraag Mistral wat je maar wil!",
|
||||
"Creates new embedding in Mistral AI.": "Maakt een nieuwe embedding in Mistral AI.",
|
||||
"Upload a file to Mistral AI (e.g., for fine-tuning or context storage).": "Upload een bestand naar Mistral AI (bijv. voor fine-tuning of context opslag).",
|
||||
"Retrieves a list of available Mistral AI models.": "Haal een lijst op met beschikbare Mistral AI modellen.",
|
||||
"Make a custom API call to a specific endpoint": "Maak een aangepaste API call naar een specifiek eindpunt",
|
||||
"Model": "Model",
|
||||
"Question": "Vraag",
|
||||
"Temperature": "Temperatuur",
|
||||
"Top P": "Boven P",
|
||||
"Max Tokens": "Max Tokens",
|
||||
"Random Seed": "Willekeurige plaatsen",
|
||||
"Timeout (ms)": "Time-out (ms)",
|
||||
"Input": "Input",
|
||||
"File": "Bestand",
|
||||
"Purpose": "Doel",
|
||||
"Method": "Methode",
|
||||
"Headers": "Kopteksten",
|
||||
"Query Parameters": "Query parameters",
|
||||
"Body": "Lichaam",
|
||||
"Response is Binary ?": "Antwoord is binair?",
|
||||
"No Error on Failure": "Geen fout bij fout",
|
||||
"Timeout (in seconds)": "Time-out (in seconden)",
|
||||
"Select a Mistral model. List is fetched live from your account.": "Selecteer een Mistral-model. Lijst wordt op uw rekening live opgehaald.",
|
||||
"Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.": "Bestuurt willekeurigheid: Het verlagen van de temperatuur resulteert in minder willekeurige aanvullingen. Zodra de temperatuur nul nadert, zal het model deterministisch en herhalend worden.",
|
||||
"An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.": "Een alternatief voor bemonstering met de temperatuur, genaamd nucleus sampling, waarbij het model de resultaten van de tokens met top_p waarschijnlijkheid ziet. 0.1 betekent dus dat alleen de tokens die de grootste massa van 10 procent vormen, worden overwogen.",
|
||||
"The input text for which to create an embedding.": "De invoertekst waarvoor u een embedding wilt maken.",
|
||||
"The file to upload (max 512MB).For fine tuning purspose provide .jsonl file.": "Het te uploaden bestand (max 512MB). Voor het afstemmen van purspose geeft u .jsonl bestand op.",
|
||||
"Purpose of the file.": "Doel van het bestand.",
|
||||
"Authorization headers are injected automatically from your connection.": "Autorisatie headers worden automatisch geïnjecteerd vanuit uw verbinding.",
|
||||
"Enable for files like PDFs, images, etc..": "Inschakelen voor bestanden zoals PDF's, afbeeldingen etc..",
|
||||
"fine-tune": "fine-tune",
|
||||
"batch": "batch",
|
||||
"ocr": "ocr",
|
||||
"GET": "KRIJG",
|
||||
"POST": "POSTE",
|
||||
"PATCH": "BEKIJK",
|
||||
"PUT": "PUT",
|
||||
"DELETE": "VERWIJDEREN",
|
||||
"HEAD": "HOOFD"
|
||||
}
|
||||
@@ -0,0 +1,48 @@
|
||||
{
|
||||
"Mistral AI provides state-of-the-art open-weight and hosted language models for text generation, embeddings, and reasoning tasks.": "Mistral AI fornece modelos de linguagem com peso aberto e hospedados para a geração de texto, incorporação e tarefas de raciocínio.",
|
||||
"You can obtain your API key from the Mistral AI dashboard. Go to https://console.mistral.ai, generate an API key, and paste it here.": "Você pode obter sua chave de API a partir do painel de controle de IA do Mistral. Vá para https://console.mistral.ai, gere uma chave de API e cole-a aqui.",
|
||||
"Ask Mistral": "Pergunte ao Mistral",
|
||||
"Create Embeddings": "Criar incorporações",
|
||||
"Upload File": "Enviar Arquivo",
|
||||
"List Models": "Listar Modelos",
|
||||
"Custom API Call": "Chamada de API personalizada",
|
||||
"Ask Mistral anything you want!": "Pergunte ao Mistral o que você quiser!",
|
||||
"Creates new embedding in Mistral AI.": "Cria uma nova inserção no Mistral AI.",
|
||||
"Upload a file to Mistral AI (e.g., for fine-tuning or context storage).": "Envie um arquivo para Mistral AI (por exemplo, para ajuste fino ou armazenamento contextual).",
|
||||
"Retrieves a list of available Mistral AI models.": "Recupera uma lista de modelos Mistral IA disponíveis.",
|
||||
"Make a custom API call to a specific endpoint": "Faça uma chamada de API personalizada para um ponto de extremidade específico",
|
||||
"Model": "Modelo",
|
||||
"Question": "Questão",
|
||||
"Temperature": "Temperatura",
|
||||
"Top P": "Superior P",
|
||||
"Max Tokens": "Max Tokens",
|
||||
"Random Seed": "Geração aleatória",
|
||||
"Timeout (ms)": "Tempo limite (ms)",
|
||||
"Input": "Entrada",
|
||||
"File": "Arquivo",
|
||||
"Purpose": "Objetivo",
|
||||
"Method": "Método",
|
||||
"Headers": "Cabeçalhos",
|
||||
"Query Parameters": "Parâmetros da consulta",
|
||||
"Body": "Conteúdo",
|
||||
"Response is Binary ?": "A resposta é binária ?",
|
||||
"No Error on Failure": "Nenhum erro no Failure",
|
||||
"Timeout (in seconds)": "Tempo limite (em segundos)",
|
||||
"Select a Mistral model. List is fetched live from your account.": "Selecione um modelo de Mistral. Lista é obtida ao vivo da sua conta.",
|
||||
"Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.": "Controla aleatoriedade: Diminuir resulta em menos complementos aleatórios. À medida que a temperatura se aproxima de zero, o modelo se tornará determinístico e repetitivo.",
|
||||
"An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.": "Uma alternativa à amostragem com temperatura, chamada amostragem núcleo, onde o modelo considera os resultados dos tokens com massa de probabilidade superior (P). Portanto, 0,1 significa que apenas os tokens que incluem a massa de probabilidade superior de 10% são considerados.",
|
||||
"The input text for which to create an embedding.": "O texto de entrada para o qual criar uma incorporação.",
|
||||
"The file to upload (max 512MB).For fine tuning purspose provide .jsonl file.": "O arquivo a ser carregado (máx. 512MB).Para purspose de ajuste precisa fornecer arquivo .jsonl.",
|
||||
"Purpose of the file.": "Objetivo do arquivo.",
|
||||
"Authorization headers are injected automatically from your connection.": "Os cabeçalhos de autorização são inseridos automaticamente a partir da sua conexão.",
|
||||
"Enable for files like PDFs, images, etc..": "Habilitar para arquivos como PDFs, imagens, etc..",
|
||||
"fine-tune": "afino",
|
||||
"batch": "lote",
|
||||
"ocr": "cr",
|
||||
"GET": "OBTER",
|
||||
"POST": "POSTAR",
|
||||
"PATCH": "COMPRAR",
|
||||
"PUT": "COLOCAR",
|
||||
"DELETE": "EXCLUIR",
|
||||
"HEAD": "CABEÇA"
|
||||
}
|
||||
@@ -0,0 +1,47 @@
|
||||
{
|
||||
"Mistral AI": "Mistral AI",
|
||||
"Mistral AI provides state-of-the-art open-weight and hosted language models for text generation, embeddings, and reasoning tasks.": "Mistral AI предоставляет современные модели языков с открытым весом и размером для генерации текста, встраивания и рассуждений.",
|
||||
"You can obtain your API key from the Mistral AI dashboard. Go to https://console.mistral.ai, generate an API key, and paste it here.": "Вы можете получить ваш ключ API с панели Mistral AI. Перейдите на https://console.mistral.ai, сгенерируйте ключ API и вставьте его здесь.",
|
||||
"Ask Mistral": "Спросить Мистрала",
|
||||
"Create Embeddings": "Создать встраивание",
|
||||
"Upload File": "Загрузить файл",
|
||||
"List Models": "Список моделей",
|
||||
"Custom API Call": "Пользовательский вызов API",
|
||||
"Ask Mistral anything you want!": "Спросите Мистрала все, что хотите!",
|
||||
"Creates new embedding in Mistral AI.": "Создает новое встраивание в Mistral AI.",
|
||||
"Upload a file to Mistral AI (e.g., for fine-tuning or context storage).": "Загрузите файл на Mistral AI (например, для тонкой настройки или хранения контекста).",
|
||||
"Retrieves a list of available Mistral AI models.": "Возвращает список доступных моделей Mistral AI.",
|
||||
"Make a custom API call to a specific endpoint": "Сделать пользовательский API вызов к определенной конечной точке",
|
||||
"Model": "Модель",
|
||||
"Question": "Вопрос",
|
||||
"Temperature": "Температура",
|
||||
"Top P": "Верхний П",
|
||||
"Max Tokens": "Макс. токенов",
|
||||
"Random Seed": "Случайное семя",
|
||||
"Timeout (ms)": "Таймаут (мс)",
|
||||
"Input": "Input",
|
||||
"File": "Файл",
|
||||
"Purpose": "Цель",
|
||||
"Method": "Метод",
|
||||
"Headers": "Заголовки",
|
||||
"Query Parameters": "Параметры запроса",
|
||||
"Body": "Тело",
|
||||
"No Error on Failure": "Нет ошибок при ошибке",
|
||||
"Timeout (in seconds)": "Таймаут (в секундах)",
|
||||
"Select a Mistral model. List is fetched live from your account.": "Выберите модель Mistral. Список загружен в реальном времени с вашего аккаунта.",
|
||||
"Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.": "Контролирует случайность: понижение результатов в менее случайном завершении. По мере нулевого температурного приближения модель становится детерминированной и повторяющей.",
|
||||
"An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.": "Альтернатива отоплению с температурой, называемой ядерным отбором, где модель рассматривает результаты жетонов с вероятностью top_p. Таким образом, 0.1 означает, что учитываются только жетоны, состоящие из массы 10% наивысшего уровня.",
|
||||
"The input text for which to create an embedding.": "Входной текст, для которого будет создан встраиваемый текст.",
|
||||
"The file to upload (max 512MB).For fine tuning purspose provide .jsonl file.": "Файл для загрузки (не более 512Мб). Для тонкой настройки purspose предоставьте .jsonl файл.",
|
||||
"Purpose of the file.": "Цель файла.",
|
||||
"Authorization headers are injected automatically from your connection.": "Заголовки авторизации включаются автоматически из вашего соединения.",
|
||||
"fine-tune": "тонкая мелодия",
|
||||
"batch": "партия",
|
||||
"ocr": "окр",
|
||||
"GET": "ПОЛУЧИТЬ",
|
||||
"POST": "ПОСТ",
|
||||
"PATCH": "ПАТЧ",
|
||||
"PUT": "ПОКУПИТЬ",
|
||||
"DELETE": "УДАЛИТЬ",
|
||||
"HEAD": "HEAD"
|
||||
}
|
||||
@@ -0,0 +1,48 @@
|
||||
{
|
||||
"Mistral AI provides state-of-the-art open-weight and hosted language models for text generation, embeddings, and reasoning tasks.": "Mistral AI provides state-of-the-art open-weight and hosted language models for text generation, embeddings, and reasoning tasks.",
|
||||
"You can obtain your API key from the Mistral AI dashboard. Go to https://console.mistral.ai, generate an API key, and paste it here.": "You can obtain your API key from the Mistral AI dashboard. Go to https://console.mistral.ai, generate an API key, and paste it here.",
|
||||
"Ask Mistral": "Ask Mistral",
|
||||
"Create Embeddings": "Create Embeddings",
|
||||
"Upload File": "Upload File",
|
||||
"List Models": "List Models",
|
||||
"Custom API Call": "Custom API Call",
|
||||
"Ask Mistral anything you want!": "Ask Mistral anything you want!",
|
||||
"Creates new embedding in Mistral AI.": "Creates new embedding in Mistral AI.",
|
||||
"Upload a file to Mistral AI (e.g., for fine-tuning or context storage).": "Upload a file to Mistral AI (e.g., for fine-tuning or context storage).",
|
||||
"Retrieves a list of available Mistral AI models.": "Retrieves a list of available Mistral AI models.",
|
||||
"Make a custom API call to a specific endpoint": "Make a custom API call to a specific endpoint",
|
||||
"Model": "Model",
|
||||
"Question": "Question",
|
||||
"Temperature": "Temperature",
|
||||
"Top P": "Top P",
|
||||
"Max Tokens": "Max Tokens",
|
||||
"Random Seed": "Random Seed",
|
||||
"Timeout (ms)": "Timeout (ms)",
|
||||
"Input": "Input",
|
||||
"File": "File",
|
||||
"Purpose": "Purpose",
|
||||
"Method": "Method",
|
||||
"Headers": "Headers",
|
||||
"Query Parameters": "Query Parameters",
|
||||
"Body": "Body",
|
||||
"Response is Binary ?": "Response is Binary ?",
|
||||
"No Error on Failure": "No Error on Failure",
|
||||
"Timeout (in seconds)": "Timeout (in seconds)",
|
||||
"Select a Mistral model. List is fetched live from your account.": "Select a Mistral model. List is fetched live from your account.",
|
||||
"Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.": "Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.",
|
||||
"An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.": "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.",
|
||||
"The input text for which to create an embedding.": "The input text for which to create an embedding.",
|
||||
"The file to upload (max 512MB).For fine tuning purspose provide .jsonl file.": "The file to upload (max 512MB).For fine tuning purspose provide .jsonl file.",
|
||||
"Purpose of the file.": "Purpose of the file.",
|
||||
"Authorization headers are injected automatically from your connection.": "Authorization headers are injected automatically from your connection.",
|
||||
"Enable for files like PDFs, images, etc..": "Enable for files like PDFs, images, etc..",
|
||||
"fine-tune": "fine-tune",
|
||||
"batch": "batch",
|
||||
"ocr": "ocr",
|
||||
"GET": "GET",
|
||||
"POST": "POST",
|
||||
"PATCH": "PATCH",
|
||||
"PUT": "PUT",
|
||||
"DELETE": "DELETE",
|
||||
"HEAD": "HEAD"
|
||||
}
|
||||
@@ -0,0 +1,47 @@
|
||||
{
|
||||
"Mistral AI": "Mistral AI",
|
||||
"Mistral AI provides state-of-the-art open-weight and hosted language models for text generation, embeddings, and reasoning tasks.": "Mistral AI provides state-of-the-art open-weight and hosted language models for text generation, embeddings, and reasoning tasks.",
|
||||
"You can obtain your API key from the Mistral AI dashboard. Go to https://console.mistral.ai, generate an API key, and paste it here.": "You can obtain your API key from the Mistral AI dashboard. Go to https://console.mistral.ai, generate an API key, and paste it here.",
|
||||
"Ask Mistral": "Ask Mistral",
|
||||
"Create Embeddings": "Create Embeddings",
|
||||
"Upload File": "Upload File",
|
||||
"List Models": "List Models",
|
||||
"Custom API Call": "Custom API Call",
|
||||
"Ask Mistral anything you want!": "Ask Mistral anything you want!",
|
||||
"Creates new embedding in Mistral AI.": "Creates new embedding in Mistral AI.",
|
||||
"Upload a file to Mistral AI (e.g., for fine-tuning or context storage).": "Upload a file to Mistral AI (e.g., for fine-tuning or context storage).",
|
||||
"Retrieves a list of available Mistral AI models.": "Retrieves a list of available Mistral AI models.",
|
||||
"Make a custom API call to a specific endpoint": "Make a custom API call to a specific endpoint",
|
||||
"Model": "Model",
|
||||
"Question": "Question",
|
||||
"Temperature": "Temperature",
|
||||
"Top P": "Top P",
|
||||
"Max Tokens": "Max Tokens",
|
||||
"Random Seed": "Random Seed",
|
||||
"Timeout (ms)": "Timeout (ms)",
|
||||
"Input": "Input",
|
||||
"File": "File",
|
||||
"Purpose": "Purpose",
|
||||
"Method": "Method",
|
||||
"Headers": "Headers",
|
||||
"Query Parameters": "Query Parameters",
|
||||
"Body": "Body",
|
||||
"No Error on Failure": "No Error on Failure",
|
||||
"Timeout (in seconds)": "Timeout (in seconds)",
|
||||
"Select a Mistral model. List is fetched live from your account.": "Select a Mistral model. List is fetched live from your account.",
|
||||
"Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.": "Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.",
|
||||
"An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.": "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.",
|
||||
"The input text for which to create an embedding.": "The input text for which to create an embedding.",
|
||||
"The file to upload (max 512MB).For fine tuning purspose provide .jsonl file.": "The file to upload (max 512MB).For fine tuning purspose provide .jsonl file.",
|
||||
"Purpose of the file.": "Purpose of the file.",
|
||||
"Authorization headers are injected automatically from your connection.": "Authorization headers are injected automatically from your connection.",
|
||||
"fine-tune": "fine-tune",
|
||||
"batch": "batch",
|
||||
"ocr": "ocr",
|
||||
"GET": "GET",
|
||||
"POST": "POST",
|
||||
"PATCH": "PATCH",
|
||||
"PUT": "PUT",
|
||||
"DELETE": "DELETE",
|
||||
"HEAD": "HEAD"
|
||||
}
|
||||
@@ -0,0 +1,48 @@
|
||||
{
|
||||
"Mistral AI provides state-of-the-art open-weight and hosted language models for text generation, embeddings, and reasoning tasks.": "Mistral AI provides state-of-the-art open-weight and hosted language models for text generation, embeddings, and reasoning tasks.",
|
||||
"You can obtain your API key from the Mistral AI dashboard. Go to https://console.mistral.ai, generate an API key, and paste it here.": "You can obtain your API key from the Mistral AI dashboard. Go to https://console.mistral.ai, generate an API key, and paste it here.",
|
||||
"Ask Mistral": "Ask Mistral",
|
||||
"Create Embeddings": "Create Embeddings",
|
||||
"Upload File": "Upload File",
|
||||
"List Models": "List Models",
|
||||
"Custom API Call": "自定义 API 呼叫",
|
||||
"Ask Mistral anything you want!": "Ask Mistral anything you want!",
|
||||
"Creates new embedding in Mistral AI.": "Creates new embedding in Mistral AI.",
|
||||
"Upload a file to Mistral AI (e.g., for fine-tuning or context storage).": "Upload a file to Mistral AI (e.g., for fine-tuning or context storage).",
|
||||
"Retrieves a list of available Mistral AI models.": "Retrieves a list of available Mistral AI models.",
|
||||
"Make a custom API call to a specific endpoint": "将一个自定义 API 调用到一个特定的终点",
|
||||
"Model": "Model",
|
||||
"Question": "Question",
|
||||
"Temperature": "Temperature",
|
||||
"Top P": "Top P",
|
||||
"Max Tokens": "Max Tokens",
|
||||
"Random Seed": "Random Seed",
|
||||
"Timeout (ms)": "Timeout (ms)",
|
||||
"Input": "Input",
|
||||
"File": "文件",
|
||||
"Purpose": "Purpose",
|
||||
"Method": "方法",
|
||||
"Headers": "信头",
|
||||
"Query Parameters": "查询参数",
|
||||
"Body": "正文内容",
|
||||
"Response is Binary ?": "Response is Binary ?",
|
||||
"No Error on Failure": "失败时没有错误",
|
||||
"Timeout (in seconds)": "超时(秒)",
|
||||
"Select a Mistral model. List is fetched live from your account.": "Select a Mistral model. List is fetched live from your account.",
|
||||
"Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.": "Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.",
|
||||
"An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.": "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.",
|
||||
"The input text for which to create an embedding.": "The input text for which to create an embedding.",
|
||||
"The file to upload (max 512MB).For fine tuning purspose provide .jsonl file.": "The file to upload (max 512MB).For fine tuning purspose provide .jsonl file.",
|
||||
"Purpose of the file.": "Purpose of the file.",
|
||||
"Authorization headers are injected automatically from your connection.": "授权头自动从您的连接中注入。",
|
||||
"Enable for files like PDFs, images, etc..": "Enable for files like PDFs, images, etc..",
|
||||
"fine-tune": "fine-tune",
|
||||
"batch": "batch",
|
||||
"ocr": "ocr",
|
||||
"GET": "获取",
|
||||
"POST": "帖子",
|
||||
"PATCH": "PATCH",
|
||||
"PUT": "弹出",
|
||||
"DELETE": "删除",
|
||||
"HEAD": "黑色"
|
||||
}
|
||||
@@ -0,0 +1,35 @@
|
||||
import { createPiece } from "@activepieces/pieces-framework";
|
||||
import { PieceCategory } from "@activepieces/shared";
|
||||
import { createChatCompletion } from "./lib/actions/create-chat-completion";
|
||||
import { createEmbeddings } from "./lib/actions/create-embeddings";
|
||||
import { uploadFile } from "./lib/actions/upload-file";
|
||||
import { listModels } from "./lib/actions/list-models";
|
||||
import { mistralAuth } from "./lib/common/auth";
|
||||
import { createCustomApiCallAction } from "@activepieces/pieces-common";
|
||||
|
||||
export const mistralAi = createPiece({
|
||||
displayName: "Mistral AI",
|
||||
description: "Mistral AI provides state-of-the-art open-weight and hosted language models for text generation, embeddings, and reasoning tasks.",
|
||||
auth: mistralAuth,
|
||||
minimumSupportedRelease: "0.36.1",
|
||||
logoUrl: "https://cdn.activepieces.com/pieces/mistral-ai.png",
|
||||
authors: ["sparkybug"],
|
||||
categories: [PieceCategory.ARTIFICIAL_INTELLIGENCE],
|
||||
actions: [
|
||||
createChatCompletion,
|
||||
createEmbeddings,
|
||||
uploadFile,
|
||||
listModels,
|
||||
createCustomApiCallAction({
|
||||
auth:mistralAuth,
|
||||
baseUrl:()=>'https://api.mistral.ai/v1',
|
||||
authMapping:async (auth)=>{
|
||||
return{
|
||||
Authorization:`Bearer ${auth.secret_text}`
|
||||
}
|
||||
}
|
||||
})
|
||||
],
|
||||
triggers: [],
|
||||
});
|
||||
|
||||
@@ -0,0 +1,82 @@
|
||||
import { createAction, Property } from '@activepieces/pieces-framework';
|
||||
import { HttpMethod, httpClient, AuthenticationType } from '@activepieces/pieces-common';
|
||||
import { mistralAuth } from '../common/auth';
|
||||
import { modelDropdown, parseMistralError } from '../common/props';
|
||||
|
||||
export const createChatCompletion = createAction({
|
||||
auth: mistralAuth,
|
||||
name: 'create_chat_completion',
|
||||
displayName: 'Ask Mistral',
|
||||
description: 'Ask Mistral anything you want!',
|
||||
props: {
|
||||
model: modelDropdown,
|
||||
prompt: Property.LongText({
|
||||
displayName: 'Question',
|
||||
required: true,
|
||||
}),
|
||||
temperature: Property.Number({
|
||||
displayName: 'Temperature',
|
||||
required: false,
|
||||
defaultValue: 1,
|
||||
description:
|
||||
'Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.',
|
||||
}),
|
||||
top_p: Property.Number({
|
||||
displayName: 'Top P',
|
||||
required: false,
|
||||
defaultValue: 1,
|
||||
description:
|
||||
'An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.',
|
||||
}),
|
||||
max_tokens: Property.Number({ displayName: 'Max Tokens', required: false }),
|
||||
random_seed: Property.Number({ displayName: 'Random Seed', required: false }),
|
||||
timeout: Property.Number({ displayName: 'Timeout (ms)', required: false, defaultValue: 30000 }),
|
||||
},
|
||||
async run(context) {
|
||||
const { model, temperature, top_p, max_tokens, random_seed, timeout, prompt } = context.propsValue;
|
||||
|
||||
const body: Record<string, any> = {
|
||||
model,
|
||||
messages: [
|
||||
{
|
||||
content: prompt,
|
||||
role: 'user',
|
||||
},
|
||||
],
|
||||
temperature,
|
||||
top_p,
|
||||
max_tokens,
|
||||
random_seed
|
||||
};
|
||||
let lastErr;
|
||||
for (let attempt = 0; attempt <= 3; ++attempt) {
|
||||
try {
|
||||
const response = await httpClient.sendRequest<{choices:{message:{content:string}}[]}>({
|
||||
method: HttpMethod.POST,
|
||||
url: 'https://api.mistral.ai/v1/chat/completions',
|
||||
authentication: {
|
||||
type: AuthenticationType.BEARER_TOKEN,
|
||||
token: context.auth.secret_text,
|
||||
},
|
||||
body,
|
||||
timeout: timeout ?? 30000,
|
||||
});
|
||||
|
||||
const answer = response.body.choices[0].message.content;
|
||||
|
||||
return answer;
|
||||
} catch (e: any) {
|
||||
lastErr = e;
|
||||
const status = e.response?.status;
|
||||
if (status === 429 || (status && status >= 500 && status < 600)) {
|
||||
if (attempt < 3) {
|
||||
await new Promise((r) => setTimeout(r, 1000 * (attempt + 1)));
|
||||
continue;
|
||||
}
|
||||
}
|
||||
throw new Error(parseMistralError(e));
|
||||
}
|
||||
}
|
||||
throw new Error(parseMistralError(lastErr));
|
||||
},
|
||||
});
|
||||
@@ -0,0 +1,67 @@
|
||||
import { createAction, Property } from '@activepieces/pieces-framework';
|
||||
import { HttpMethod, httpClient, AuthenticationType } from '@activepieces/pieces-common';
|
||||
import { mistralAuth } from '../common/auth';
|
||||
import { parseMistralError } from '../common/props';
|
||||
|
||||
export const createEmbeddings = createAction({
|
||||
auth: mistralAuth,
|
||||
name: 'create_embeddings',
|
||||
displayName: 'Create Embeddings',
|
||||
description: 'Creates new embedding in Mistral AI.',
|
||||
props: {
|
||||
input: Property.Array({
|
||||
displayName: 'Input',
|
||||
description: 'The input text for which to create an embedding.',
|
||||
required: true,
|
||||
}),
|
||||
timeout: Property.Number({ displayName: 'Timeout (ms)', required: false, defaultValue: 30000 }),
|
||||
},
|
||||
async run(context) {
|
||||
const { input, timeout } = context.propsValue;
|
||||
let inputArr: string[] = [];
|
||||
try {
|
||||
if (typeof input === 'string') {
|
||||
try {
|
||||
inputArr = JSON.parse(input);
|
||||
} catch {
|
||||
inputArr = input;
|
||||
}
|
||||
}
|
||||
inputArr = input as string[];
|
||||
} catch {
|
||||
throw new Error('Input must be a non-empty string or a JSON array of non-empty strings');
|
||||
}
|
||||
const body = {
|
||||
model: 'mistral-embed',
|
||||
input: inputArr,
|
||||
};
|
||||
let lastErr;
|
||||
for (let attempt = 0; attempt <= 3; ++attempt) {
|
||||
try {
|
||||
const response = await httpClient.sendRequest({
|
||||
method: HttpMethod.POST,
|
||||
url: 'https://api.mistral.ai/v1/embeddings',
|
||||
authentication: {
|
||||
type: AuthenticationType.BEARER_TOKEN,
|
||||
token: context.auth.secret_text,
|
||||
},
|
||||
body,
|
||||
timeout: timeout ?? 30000,
|
||||
});
|
||||
|
||||
return response.body;
|
||||
} catch (e: any) {
|
||||
lastErr = e;
|
||||
const status = e.response?.status;
|
||||
if (status === 429 || (status && status >= 500 && status < 600)) {
|
||||
if (attempt < 3) {
|
||||
await new Promise((r) => setTimeout(r, 1000 * (attempt + 1)));
|
||||
continue;
|
||||
}
|
||||
}
|
||||
throw new Error(parseMistralError(e));
|
||||
}
|
||||
}
|
||||
throw new Error(parseMistralError(lastErr));
|
||||
},
|
||||
});
|
||||
@@ -0,0 +1,28 @@
|
||||
import { createAction } from '@activepieces/pieces-framework';
|
||||
import { HttpMethod, httpClient, AuthenticationType } from '@activepieces/pieces-common';
|
||||
import { mistralAuth } from '../common/auth';
|
||||
import { parseMistralError } from '../common/props';
|
||||
|
||||
export const listModels = createAction({
|
||||
auth: mistralAuth,
|
||||
name: 'list_models',
|
||||
displayName: 'List Models',
|
||||
description: 'Retrieves a list of available Mistral AI models.',
|
||||
props: {},
|
||||
async run({ auth }) {
|
||||
try {
|
||||
const response = await httpClient.sendRequest({
|
||||
method: HttpMethod.GET,
|
||||
url: 'https://api.mistral.ai/v1/models',
|
||||
authentication: {
|
||||
type: AuthenticationType.BEARER_TOKEN,
|
||||
token: auth.secret_text,
|
||||
},
|
||||
});
|
||||
|
||||
return response.body;
|
||||
} catch (e: any) {
|
||||
throw new Error(parseMistralError(e));
|
||||
}
|
||||
},
|
||||
});
|
||||
@@ -0,0 +1,82 @@
|
||||
import { createAction, Property } from '@activepieces/pieces-framework';
|
||||
import { HttpMethod, httpClient, AuthenticationType } from '@activepieces/pieces-common';
|
||||
import { mistralAuth } from '../common/auth';
|
||||
import FormData from 'form-data';
|
||||
import { parseMistralError } from '../common/props';
|
||||
|
||||
const SUPPORTED_PURPOSES = ['fine-tune', 'batch', 'ocr'];
|
||||
const MAX_FILE_SIZE_BYTES = 512 * 1024 * 1024;
|
||||
const ALLOWED_EXTENSIONS = [
|
||||
'jsonl',
|
||||
'txt',
|
||||
'csv',
|
||||
'pdf',
|
||||
'docx',
|
||||
'png',
|
||||
'jpg',
|
||||
'jpeg',
|
||||
'mp3',
|
||||
'mp4',
|
||||
];
|
||||
|
||||
function getFileExtension(filename: string): string {
|
||||
const parts = filename.split('.');
|
||||
return parts.length > 1 ? parts.pop()!.toLowerCase() : '';
|
||||
}
|
||||
|
||||
export const uploadFile = createAction({
|
||||
auth: mistralAuth,
|
||||
name: 'upload_file',
|
||||
displayName: 'Upload File',
|
||||
description: 'Upload a file to Mistral AI (e.g., for fine-tuning or context storage).',
|
||||
props: {
|
||||
file: Property.File({
|
||||
displayName: 'File',
|
||||
description: 'The file to upload (max 512MB).For fine tuning purspose provide .jsonl file.',
|
||||
required: true,
|
||||
}),
|
||||
purpose: Property.StaticDropdown({
|
||||
displayName: 'Purpose',
|
||||
description: 'Purpose of the file.',
|
||||
required: true,
|
||||
options: {
|
||||
options: SUPPORTED_PURPOSES.map((p) => ({ label: p, value: p })),
|
||||
},
|
||||
}),
|
||||
},
|
||||
async run(context) {
|
||||
const { file, purpose } = context.propsValue;
|
||||
|
||||
if (!file) throw new Error('File is required');
|
||||
|
||||
if (!SUPPORTED_PURPOSES.includes(purpose)) throw new Error('Invalid purpose');
|
||||
|
||||
if (file.data.byteLength > MAX_FILE_SIZE_BYTES) throw new Error('File exceeds 512MB limit');
|
||||
|
||||
const ext = getFileExtension(file.filename);
|
||||
|
||||
if (!ALLOWED_EXTENSIONS.includes(ext)) throw new Error(`File extension .${ext} is not allowed`);
|
||||
|
||||
const form = new FormData();
|
||||
form.append('file', Buffer.from(file.data), file.filename);
|
||||
form.append('purpose', purpose);
|
||||
|
||||
try {
|
||||
const response = await httpClient.sendRequest({
|
||||
method: HttpMethod.POST,
|
||||
url: 'https://api.mistral.ai/v1/files',
|
||||
authentication: {
|
||||
type: AuthenticationType.BEARER_TOKEN,
|
||||
token: context.auth.secret_text,
|
||||
},
|
||||
headers:{
|
||||
...form.getHeaders()
|
||||
},
|
||||
body: form,
|
||||
});
|
||||
return response.body;
|
||||
} catch (e: any) {
|
||||
throw new Error(parseMistralError(e));
|
||||
}
|
||||
},
|
||||
});
|
||||
@@ -0,0 +1,32 @@
|
||||
import { PieceAuth } from '@activepieces/pieces-framework';
|
||||
import { httpClient, HttpMethod, AuthenticationType } from '@activepieces/pieces-common';
|
||||
|
||||
export const mistralAuth = PieceAuth.SecretText({
|
||||
displayName: 'API Key',
|
||||
description: `You can obtain your API key from the Mistral AI dashboard. Go to https://console.mistral.ai, generate an API key, and paste it here.`,
|
||||
required: true,
|
||||
validate: async ({ auth }) => {
|
||||
try {
|
||||
await httpClient.sendRequest({
|
||||
method: HttpMethod.GET,
|
||||
url: 'https://api.mistral.ai/v1/models',
|
||||
authentication: {
|
||||
type: AuthenticationType.BEARER_TOKEN,
|
||||
token: auth
|
||||
},
|
||||
});
|
||||
return { valid: true };
|
||||
} catch (e: any) {
|
||||
if (e.response?.status === 401) {
|
||||
return { valid: false, error: 'Invalid API key. Please check your API key and try again.' };
|
||||
}
|
||||
if (e.response?.status === 429) {
|
||||
return { valid: false, error: 'Rate limit exceeded. Please wait and try again.' };
|
||||
}
|
||||
if (e.message?.toLowerCase().includes('network')) {
|
||||
return { valid: false, error: 'Network error. Please check your internet connection.' };
|
||||
}
|
||||
return { valid: false, error: 'Authentication failed: ' + (e.message || 'Unknown error') };
|
||||
}
|
||||
},
|
||||
});
|
||||
@@ -0,0 +1,59 @@
|
||||
import { Property, DropdownOption } from '@activepieces/pieces-framework';
|
||||
import { HttpMethod, httpClient, AuthenticationType } from '@activepieces/pieces-common';
|
||||
import { mistralAuth } from './auth';
|
||||
|
||||
export const modelDropdown = Property.Dropdown({
|
||||
displayName: 'Model',
|
||||
description: 'Select a Mistral model. List is fetched live from your account.',
|
||||
required: true,
|
||||
refreshers: [],
|
||||
auth: mistralAuth,
|
||||
options: async ({ auth }) => {
|
||||
if (!auth) {
|
||||
return {
|
||||
disabled: true,
|
||||
placeholder: 'Please connect your account first.',
|
||||
options: [],
|
||||
};
|
||||
}
|
||||
try {
|
||||
const response = await httpClient.sendRequest<{data:{id:string,name:string}[]}>({
|
||||
method: HttpMethod.GET,
|
||||
url: 'https://api.mistral.ai/v1/models',
|
||||
authentication: {
|
||||
type: AuthenticationType.BEARER_TOKEN,
|
||||
token: auth.secret_text
|
||||
},
|
||||
});
|
||||
const models = response.body.data || [];
|
||||
const options: DropdownOption<string>[] = models.map((model) => ({
|
||||
label: model.name || model.id,
|
||||
value: model.id,
|
||||
}));
|
||||
if (options.length === 0) {
|
||||
return {
|
||||
disabled: true,
|
||||
placeholder: 'No models found for your account.',
|
||||
options: [],
|
||||
};
|
||||
}
|
||||
return {
|
||||
disabled: false,
|
||||
options,
|
||||
};
|
||||
} catch {
|
||||
return {
|
||||
disabled: true,
|
||||
placeholder: 'Failed to load models. Check your API key and network.',
|
||||
options: [],
|
||||
};
|
||||
}
|
||||
},
|
||||
});
|
||||
|
||||
export function parseMistralError(e: any): string {
|
||||
if (e.response?.data?.error) return e.response.data.error;
|
||||
if (e.response?.data?.message) return e.response.data.message;
|
||||
if (e.message) return e.message;
|
||||
return 'Unknown error';
|
||||
}
|
||||
@@ -0,0 +1,20 @@
|
||||
{
|
||||
"extends": "../../../../tsconfig.base.json",
|
||||
"compilerOptions": {
|
||||
"module": "commonjs",
|
||||
"forceConsistentCasingInFileNames": true,
|
||||
"strict": true,
|
||||
"importHelpers": true,
|
||||
"noImplicitOverride": true,
|
||||
"noImplicitReturns": true,
|
||||
"noFallthroughCasesInSwitch": true,
|
||||
"noPropertyAccessFromIndexSignature": true
|
||||
},
|
||||
"files": [],
|
||||
"include": [],
|
||||
"references": [
|
||||
{
|
||||
"path": "./tsconfig.lib.json"
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"extends": "./tsconfig.json",
|
||||
"compilerOptions": {
|
||||
"outDir": "../../../../dist/out-tsc",
|
||||
"declaration": true,
|
||||
"types": ["node"]
|
||||
},
|
||||
"include": ["src/**/*.ts"]
|
||||
}
|
||||
Reference in New Issue
Block a user