Add Activepieces integration for workflow automation
- Add Activepieces fork with SmoothSchedule custom piece - Create integrations app with Activepieces service layer - Add embed token endpoint for iframe integration - Create Automations page with embedded workflow builder - Add sidebar visibility fix for embed mode - Add list inactive customers endpoint to Public API - Include SmoothSchedule triggers: event created/updated/cancelled - Include SmoothSchedule actions: create/update/cancel events, list resources/services/customers 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
@@ -0,0 +1,3 @@
|
||||
{
|
||||
"presets": [["@nx/js/babel", { "useBuiltIns": "usage" }]]
|
||||
}
|
||||
@@ -0,0 +1,18 @@
|
||||
{
|
||||
"extends": ["../../../../.eslintrc.json"],
|
||||
"ignorePatterns": ["!**/*"],
|
||||
"overrides": [
|
||||
{
|
||||
"files": ["*.ts", "*.tsx", "*.js", "*.jsx"],
|
||||
"rules": {}
|
||||
},
|
||||
{
|
||||
"files": ["*.ts", "*.tsx"],
|
||||
"rules": {}
|
||||
},
|
||||
{
|
||||
"files": ["*.js", "*.jsx"],
|
||||
"rules": {}
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -0,0 +1,7 @@
|
||||
# pieces-openai
|
||||
|
||||
This library was generated with [Nx](https://nx.dev).
|
||||
|
||||
## Running lint
|
||||
|
||||
Run `nx lint pieces-openai` to execute the lint via [ESLint](https://eslint.org/).
|
||||
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"name": "@activepieces/piece-openai",
|
||||
"version": "0.6.7",
|
||||
"dependencies": {
|
||||
"tiktoken": "1.0.11",
|
||||
"openai": "4.67.1"
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,51 @@
|
||||
{
|
||||
"name": "pieces-openai",
|
||||
"$schema": "../../../../node_modules/nx/schemas/project-schema.json",
|
||||
"sourceRoot": "packages/pieces/community/openai/src",
|
||||
"projectType": "library",
|
||||
"targets": {
|
||||
"build": {
|
||||
"executor": "@nx/js:tsc",
|
||||
"outputs": [
|
||||
"{options.outputPath}"
|
||||
],
|
||||
"options": {
|
||||
"outputPath": "dist/packages/pieces/community/openai",
|
||||
"tsConfig": "packages/pieces/community/openai/tsconfig.lib.json",
|
||||
"packageJson": "packages/pieces/community/openai/package.json",
|
||||
"main": "packages/pieces/community/openai/src/index.ts",
|
||||
"assets": [
|
||||
"packages/pieces/community/openai/*.md",
|
||||
{
|
||||
"input": "packages/pieces/community/openai/src/i18n",
|
||||
"output": "./src/i18n",
|
||||
"glob": "**/!(i18n.json)"
|
||||
}
|
||||
],
|
||||
"buildableProjectDepsInPackageJsonType": "dependencies",
|
||||
"updateBuildableProjectDepsInPackageJson": true
|
||||
},
|
||||
"dependsOn": [
|
||||
"prebuild",
|
||||
"^build"
|
||||
]
|
||||
},
|
||||
"lint": {
|
||||
"executor": "@nx/eslint:lint",
|
||||
"outputs": [
|
||||
"{options.outputFile}"
|
||||
]
|
||||
},
|
||||
"prebuild": {
|
||||
"executor": "nx:run-commands",
|
||||
"options": {
|
||||
"cwd": "packages/pieces/community/openai",
|
||||
"command": "bun install --no-save --silent"
|
||||
},
|
||||
"dependsOn": [
|
||||
"^build"
|
||||
]
|
||||
}
|
||||
},
|
||||
"tags": []
|
||||
}
|
||||
@@ -0,0 +1,155 @@
|
||||
{
|
||||
"OpenAI": "OpenAI",
|
||||
"Use the many tools ChatGPT has to offer.": "Use the many tools ChatGPT has to offer.",
|
||||
"Follow these instructions to get your OpenAI API Key:\n\n1. Visit the following website: https://platform.openai.com/account/api-keys.\n2. Once on the website, locate and click on the option to obtain your OpenAI API Key.\n\nIt is strongly recommended that you add your credit card information to your OpenAI account and upgrade to the paid plan **before** generating the API Key. This will help you prevent 429 errors.\n": "Follow these instructions to get your OpenAI API Key:\n\n1. Visit the following website: https://platform.openai.com/account/api-keys.\n2. Once on the website, locate and click on the option to obtain your OpenAI API Key.\n\nIt is strongly recommended that you add your credit card information to your OpenAI account and upgrade to the paid plan **before** generating the API Key. This will help you prevent 429 errors.\n",
|
||||
"Ask ChatGPT": "Ask ChatGPT",
|
||||
"Ask Assistant": "Ask Assistant",
|
||||
"Generate Image": "Generate Image",
|
||||
"Vision Prompt": "Vision Prompt",
|
||||
"Text-to-Speech": "Text-to-Speech",
|
||||
"Transcribe Audio": "Transcribe Audio",
|
||||
"Translate Audio": "Translate Audio",
|
||||
"Extract Structured Data from Text": "Extract Structured Data from Text",
|
||||
"Custom API Call": "Custom API Call",
|
||||
"Ask ChatGPT anything you want!": "Ask ChatGPT anything you want!",
|
||||
"Ask a GPT assistant anything you want!": "Ask a GPT assistant anything you want!",
|
||||
"Generate an image using text-to-image models": "Generate an image using text-to-image models",
|
||||
"Ask GPT a question about an image": "Ask GPT a question about an image",
|
||||
"Generate an audio recording from text": "Generate an audio recording from text",
|
||||
"Transcribe audio to text using whisper-1 model": "Transcribe audio to text using whisper-1 model",
|
||||
"Translate audio to text using whisper-1 model": "Translate audio to text using whisper-1 model",
|
||||
"Returns structured data from provided unstructured text.": "Returns structured data from provided unstructured text.",
|
||||
"Make a custom API call to a specific endpoint": "Make a custom API call to a specific endpoint",
|
||||
"Model": "Model",
|
||||
"Question": "Question",
|
||||
"Temperature": "Temperature",
|
||||
"Maximum Tokens": "Maximum Tokens",
|
||||
"Top P": "Top P",
|
||||
"Frequency penalty": "Frequency penalty",
|
||||
"Presence penalty": "Presence penalty",
|
||||
"Memory Key": "Memory Key",
|
||||
"Roles": "Roles",
|
||||
"Assistant": "Assistant",
|
||||
"Prompt": "Prompt",
|
||||
"Resolution": "Resolution",
|
||||
"Quality": "Quality",
|
||||
"Image": "Image",
|
||||
"Detail": "Detail",
|
||||
"Text": "Text",
|
||||
"Speed": "Speed",
|
||||
"Voice": "Voice",
|
||||
"Output Format": "Output Format",
|
||||
"File Name": "File Name",
|
||||
"Audio": "Audio",
|
||||
"Language of the Audio": "Language of the Audio",
|
||||
"Unstructured Text": "Unstructured Text",
|
||||
"Data Definition": "Data Definition",
|
||||
"Method": "Method",
|
||||
"Headers": "Headers",
|
||||
"Query Parameters": "Query Parameters",
|
||||
"Body": "Body",
|
||||
"No Error on Failure": "No Error on Failure",
|
||||
"Timeout (in seconds)": "Timeout (in seconds)",
|
||||
"The model which will generate the completion. Some models are suitable for natural language tasks, others specialize in code.": "The model which will generate the completion. Some models are suitable for natural language tasks, others specialize in code.",
|
||||
"Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.": "Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.",
|
||||
"The maximum number of tokens to generate. Requests can use up to 2,048 or 4,096 tokens shared between prompt and completion depending on the model. Don't set the value to maximum and leave some tokens for the input. (One token is roughly 4 characters for normal English text)": "The maximum number of tokens to generate. Requests can use up to 2,048 or 4,096 tokens shared between prompt and completion depending on the model. Don't set the value to maximum and leave some tokens for the input. (One token is roughly 4 characters for normal English text)",
|
||||
"An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.": "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.",
|
||||
"Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.": "Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.",
|
||||
"Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the mode's likelihood to talk about new topics.": "Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the mode's likelihood to talk about new topics.",
|
||||
"A memory key that will keep the chat history shared across runs and flows. Keep it empty to leave ChatGPT without memory of previous messages.": "A memory key that will keep the chat history shared across runs and flows. Keep it empty to leave ChatGPT without memory of previous messages.",
|
||||
"Array of roles to specify more accurate response": "Array of roles to specify more accurate response",
|
||||
"The assistant which will generate the completion.": "The assistant which will generate the completion.",
|
||||
"A memory key that will keep the chat history shared across runs and flows. Keep it empty to leave your assistant without memory of previous messages.": "A memory key that will keep the chat history shared across runs and flows. Keep it empty to leave your assistant without memory of previous messages.",
|
||||
"The model which will generate the image.": "The model which will generate the image.",
|
||||
"The resolution to generate the image in.": "The resolution to generate the image in.",
|
||||
"Standard is faster, HD has better details.": "Standard is faster, HD has better details.",
|
||||
"The image URL or file you want GPT's vision to read.": "The image URL or file you want GPT's vision to read.",
|
||||
"What do you want ChatGPT to tell you about the image?": "What do you want ChatGPT to tell you about the image?",
|
||||
"Control how the model processes the image and generates textual understanding.": "Control how the model processes the image and generates textual understanding.",
|
||||
"The maximum number of tokens to generate. Requests can use up to 2,048 or 4,096 tokens shared between prompt and completion, don't set the value to maximum and leave some tokens for the input. The exact limit varies by model. (One token is roughly 4 characters for normal English text)": "The maximum number of tokens to generate. Requests can use up to 2,048 or 4,096 tokens shared between prompt and completion, don't set the value to maximum and leave some tokens for the input. The exact limit varies by model. (One token is roughly 4 characters for normal English text)",
|
||||
"The text you want to hear.": "The text you want to hear.",
|
||||
"The model which will generate the audio.": "The model which will generate the audio.",
|
||||
"The speed of the audio. Minimum is 0.25 and maximum is 4.00.": "The speed of the audio. Minimum is 0.25 and maximum is 4.00.",
|
||||
"The voice to generate the audio in.": "The voice to generate the audio in.",
|
||||
"The format you want the audio file in.": "The format you want the audio file in.",
|
||||
"The name of the output audio file (without extension).": "The name of the output audio file (without extension).",
|
||||
"Audio file to transcribe": "Audio file to transcribe",
|
||||
"Language of the audio file the default is en (English).": "Language of the audio file the default is en (English).",
|
||||
"Audio file to translate": "Audio file to translate",
|
||||
"Authorization headers are injected automatically from your connection.": "Authorization headers are injected automatically from your connection.",
|
||||
"tts-1": "tts-1",
|
||||
"tts-1-hd": "tts-1-hd",
|
||||
"alloy": "alloy",
|
||||
"echo": "echo",
|
||||
"fable": "fable",
|
||||
"onyx": "onyx",
|
||||
"nova": "nova",
|
||||
"shimmer": "shimmer",
|
||||
"mp3": "mp3",
|
||||
"opus": "opus",
|
||||
"aac": "aac",
|
||||
"flac": "flac",
|
||||
"Spanish": "Spanish",
|
||||
"Italian": "Italian",
|
||||
"English": "English",
|
||||
"Portuguese": "Portuguese",
|
||||
"German": "German",
|
||||
"Japanese": "Japanese",
|
||||
"Polish": "Polish",
|
||||
"Arabic": "Arabic",
|
||||
"Afrikaans": "Afrikaans",
|
||||
"Azerbaijani": "Azerbaijani",
|
||||
"Bulgarian": "Bulgarian",
|
||||
"Bosnian": "Bosnian",
|
||||
"Catalan": "Catalan",
|
||||
"Czech": "Czech",
|
||||
"Danish": "Danish",
|
||||
"Greek": "Greek",
|
||||
"Estonian": "Estonian",
|
||||
"Persian": "Persian",
|
||||
"Finnish": "Finnish",
|
||||
"Tagalog": "Tagalog",
|
||||
"French": "French",
|
||||
"Galician": "Galician",
|
||||
"Hebrew": "Hebrew",
|
||||
"Hindi": "Hindi",
|
||||
"Croatian": "Croatian",
|
||||
"Hungarian": "Hungarian",
|
||||
"Armenian": "Armenian",
|
||||
"Indonesian": "Indonesian",
|
||||
"Icelandic": "Icelandic",
|
||||
"Kazakh": "Kazakh",
|
||||
"Kannada": "Kannada",
|
||||
"Korean": "Korean",
|
||||
"Lithuanian": "Lithuanian",
|
||||
"Latvian": "Latvian",
|
||||
"Maori": "Maori",
|
||||
"Macedonian": "Macedonian",
|
||||
"Marathi": "Marathi",
|
||||
"Malay": "Malay",
|
||||
"Nepali": "Nepali",
|
||||
"Dutch": "Dutch",
|
||||
"Norwegian": "Norwegian",
|
||||
"Romanian": "Romanian",
|
||||
"Russian": "Russian",
|
||||
"Slovak": "Slovak",
|
||||
"Slovenian": "Slovenian",
|
||||
"Serbian": "Serbian",
|
||||
"Swedish": "Swedish",
|
||||
"Swahili": "Swahili",
|
||||
"Tamil": "Tamil",
|
||||
"Thai": "Thai",
|
||||
"Turkish": "Turkish",
|
||||
"Ukrainian": "Ukrainian",
|
||||
"Urdu": "Urdu",
|
||||
"Vietnamese": "Vietnamese",
|
||||
"Chinese (Simplified)": "Chinese (Simplified)",
|
||||
"Welsh": "Welsh",
|
||||
"Belarusian": "Belarusian",
|
||||
"GET": "GET",
|
||||
"POST": "POST",
|
||||
"PATCH": "PATCH",
|
||||
"PUT": "PUT",
|
||||
"DELETE": "DELETE",
|
||||
"HEAD": "HEAD"
|
||||
}
|
||||
@@ -0,0 +1,156 @@
|
||||
{
|
||||
"Use the many tools ChatGPT has to offer.": "Nutzen Sie die vielen Tools, die ChatGPT zu bieten hat.",
|
||||
"Follow these instructions to get your OpenAI API Key:\n\n1. Visit the following website: https://platform.openai.com/account/api-keys.\n2. Once on the website, locate and click on the option to obtain your OpenAI API Key.\n\nIt is strongly recommended that you add your credit card information to your OpenAI account and upgrade to the paid plan **before** generating the API Key. This will help you prevent 429 errors.\n": "Folgen Sie diesen Anweisungen, um Ihren OpenAI-API-Schlüssel zu erhalten:\n\n1. Besuchen Sie die folgende Webseite: https://platform.openai. om/account/api-keys.\n2. Sobald Sie auf der Website sind, suchen und klicken Sie auf die Option, um Ihren OpenAI API Key zu erhalten.\n\nEs wird dringend empfohlen, dass Sie Ihre Kreditkarteninformationen zu Ihrem OpenAI-Konto hinzufügen und **vorher** zum bezahlten Tarif upgraden und den API-Schlüssel generieren. Dies hilft Ihnen, 429 Fehler zu vermeiden.\n",
|
||||
"Ask ChatGPT": "ChatGPT fragen",
|
||||
"Ask Assistant": "Assistent fragen",
|
||||
"Generate Image": "Bild generieren",
|
||||
"Vision Prompt": "Vision Prompt",
|
||||
"Text-to-Speech": "Text-zu-Sprache",
|
||||
"Transcribe Audio": "Audio umschreiben",
|
||||
"Translate Audio": "Audio übersetzen",
|
||||
"Extract Structured Data from Text": "Strukturierte Daten aus Text extrahieren",
|
||||
"Custom API Call": "Eigener API-Aufruf",
|
||||
"Ask ChatGPT anything you want!": "Fragen Sie ChatGPT was Sie wollen!",
|
||||
"Ask a GPT assistant anything you want!": "Fragen Sie einen GPT-Assistenten was Sie wollen!",
|
||||
"Generate an image using text-to-image models": "Ein Bild mit Text-zu-Bild-Modellen generieren",
|
||||
"Ask GPT a question about an image": "Fragen Sie GPT nach einem Bild",
|
||||
"Generate an audio recording from text": "Eine Audioaufnahme aus Text generieren",
|
||||
"Transcribe audio to text using whisper-1 model": "Audio in Text mit Whisper-1-Modell umschreiben",
|
||||
"Translate audio to text using whisper-1 model": "Übersetzen Sie Audio in Text mit dem Whisper-1-Modell",
|
||||
"Returns structured data from provided unstructured text.": "Gibt strukturierte Daten aus unstrukturiertem Text zurück.",
|
||||
"Make a custom API call to a specific endpoint": "Einen benutzerdefinierten API-Aufruf an einen bestimmten Endpunkt machen",
|
||||
"Model": "Modell",
|
||||
"Question": "Frage",
|
||||
"Temperature": "Temperatur",
|
||||
"Maximum Tokens": "Maximale Token",
|
||||
"Top P": "Oben P",
|
||||
"Frequency penalty": "Frequenz Strafe",
|
||||
"Presence penalty": "Präsenzstrafe",
|
||||
"Memory Key": "Speicherschlüssel",
|
||||
"Roles": "Rollen",
|
||||
"Assistant": "Assistent",
|
||||
"Prompt": "Prompt",
|
||||
"Resolution": "Auflösung",
|
||||
"Quality": "Qualität",
|
||||
"Image": "Bild",
|
||||
"Detail": "Details",
|
||||
"Text": "Text",
|
||||
"Speed": "Geschwindigkeit",
|
||||
"Voice": "Stimme",
|
||||
"Output Format": "Ausgabeformat",
|
||||
"File Name": "Dateiname",
|
||||
"Audio": "Audio",
|
||||
"Language of the Audio": "Sprache des Audio",
|
||||
"Unstructured Text": "Unstrukturierter Text",
|
||||
"Data Definition": "Datendefinition",
|
||||
"Method": "Methode",
|
||||
"Headers": "Kopfzeilen",
|
||||
"Query Parameters": "Abfrageparameter",
|
||||
"Body": "Körper",
|
||||
"Response is Binary ?": "Antwort ist binär?",
|
||||
"No Error on Failure": "Kein Fehler bei Fehler",
|
||||
"Timeout (in seconds)": "Timeout (in Sekunden)",
|
||||
"The model which will generate the completion. Some models are suitable for natural language tasks, others specialize in code.": "Das Modell, das die Vervollständigung generiert. Einige Modelle eignen sich für Aufgaben der natürlichen Sprache, andere sind auf Code spezialisiert.",
|
||||
"Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.": "Kontrolliert Zufallszufälligkeit: Die Verringerung führt zu weniger zufälligen Vervollständigungen. Je näher die Temperatur Null rückt, desto deterministischer und sich wiederholender wird.",
|
||||
"The maximum number of tokens to generate. Requests can use up to 2,048 or 4,096 tokens shared between prompt and completion depending on the model. Don't set the value to maximum and leave some tokens for the input. (One token is roughly 4 characters for normal English text)": "Die maximale Anzahl zu generierender Token. Anfragen können je nach Modell bis zu 2.048 oder 4.096 Tokens verwenden, die zwischen der Eingabeaufforderung und der Fertigstellung geteilt werden. Legen Sie den Wert nicht auf maximal und lassen Sie einige Token für die Eingabe. (Ein Token ist ungefähr 4 Zeichen für den normalen englischen Text)",
|
||||
"An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.": "Eine Alternative zur Probenahme mit Temperatur, genannt Nucleus Probenahme, bei der das Modell die Ergebnisse der Tokens mit der Top_p Wahrscheinlichkeitsmasse berücksichtigt. 0,1 bedeutet also nur die Token, die die obersten 10% Wahrscheinlichkeitsmasse ausmachen.",
|
||||
"Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.": "Nummer zwischen -2.0 und 2.0. Positive Werte bestrafen neue Tokens aufgrund ihrer bisherigen Häufigkeit im Text, wodurch sich die Wahrscheinlichkeit verringert, dass das Modell dieselbe Zeile wörtlich wiederholt.",
|
||||
"Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the mode's likelihood to talk about new topics.": "Nummer zwischen -2.0 und 2.0. Positive Werte bestrafen neue Tokens je nachdem, ob sie bisher im Text erscheinen, was die Wahrscheinlichkeit erhöht, über neue Themen zu sprechen.",
|
||||
"A memory key that will keep the chat history shared across runs and flows. Keep it empty to leave ChatGPT without memory of previous messages.": "Ein Memory-Schlüssel, der den Chat-Verlauf über alle Abläufe und Ströme hinweg weitergibt. Leer lassen um ChatGPT ohne Speicher früherer Nachrichten zu verlassen.",
|
||||
"Array of roles to specify more accurate response": "Rollenzuordnung, um eine genauere Antwort anzugeben",
|
||||
"The assistant which will generate the completion.": "Der Assistent, der die Fertigstellung generiert.",
|
||||
"A memory key that will keep the chat history shared across runs and flows. Keep it empty to leave your assistant without memory of previous messages.": "Ein Memory-Schlüssel, der den Chatverlauf über Runen und Ströme hinweg teilt. Lassen Sie es leer, um Ihren Assistenten ohne Speicher der vorherigen Nachrichten zu lassen.",
|
||||
"The model which will generate the image.": "Das Modell, das das Bild generiert.",
|
||||
"The resolution to generate the image in.": "Die Auflösung, in der das Bild erzeugt wird.",
|
||||
"Standard is faster, HD has better details.": "Standard ist schneller, HD hat bessere Details.",
|
||||
"The image URL or file you want GPT's vision to read.": "Die Bild-URL oder Datei, die GPT Vision lesen soll.",
|
||||
"What do you want ChatGPT to tell you about the image?": "Was soll ChatGPT dir über das Bild sagen?",
|
||||
"Control how the model processes the image and generates textual understanding.": "Legen Sie fest, wie das Modell das Bild verarbeitet und wie es textuelles Verständnis erzeugt.",
|
||||
"The maximum number of tokens to generate. Requests can use up to 2,048 or 4,096 tokens shared between prompt and completion, don't set the value to maximum and leave some tokens for the input. The exact limit varies by model. (One token is roughly 4 characters for normal English text)": "Die maximale Anzahl der zu generierenden Token. Anfragen können bis zu 2.048 oder 4.096 Token verwenden, die zwischen Prompt und Fertigstellung geteilt werden, setzen Sie den Wert nicht auf maximal und lassen Sie einige Token für die Eingabe. Das genaue Limit variiert je nach Modell. (Ein Token ist ungefähr 4 Zeichen für den normalen englischen Text)",
|
||||
"The text you want to hear.": "Der Text, den Sie hören möchten.",
|
||||
"The model which will generate the audio.": "Das Modell, das die Audiodatei generiert.",
|
||||
"The speed of the audio. Minimum is 0.25 and maximum is 4.00.": "Die Geschwindigkeit des Audios. Minimum ist 0.25 und maximal 4.00.",
|
||||
"The voice to generate the audio in.": "Die Stimme, in der die Audio generiert wird.",
|
||||
"The format you want the audio file in.": "Das Format, in dem die Audiodatei eingegeben werden soll.",
|
||||
"The name of the output audio file (without extension).": "Der Name der Ausgabe-Audiodatei (ohne Erweiterung).",
|
||||
"Audio file to transcribe": "Zu übertragende Audiodatei",
|
||||
"Language of the audio file the default is en (English).": "Language of the audio file the default is de (german).",
|
||||
"Audio file to translate": "Zu übersetzende Audiodatei",
|
||||
"Authorization headers are injected automatically from your connection.": "Autorisierungs-Header werden automatisch von Ihrer Verbindung injiziert.",
|
||||
"Enable for files like PDFs, images, etc..": "Aktivieren für Dateien wie PDFs, Bilder, etc..",
|
||||
"tts-1": "tts-1",
|
||||
"tts-1-hd": "tts-1-hd",
|
||||
"alloy": "alloy",
|
||||
"echo": "echo",
|
||||
"fable": "fabelhaft",
|
||||
"onyx": "onyx",
|
||||
"nova": "nova",
|
||||
"shimmer": "leuchten",
|
||||
"mp3": "mp3",
|
||||
"opus": "opus",
|
||||
"aac": "aac",
|
||||
"flac": "flach",
|
||||
"Spanish": "Spanisch",
|
||||
"Italian": "Italienisch",
|
||||
"English": "Englisch",
|
||||
"Portuguese": "Portugiesisch",
|
||||
"German": "Deutsch",
|
||||
"Japanese": "Japanisch",
|
||||
"Polish": "Polnisch",
|
||||
"Arabic": "Arabisch",
|
||||
"Afrikaans": "Afrikaner",
|
||||
"Azerbaijani": "Aserbaidschan",
|
||||
"Bulgarian": "Bulgarisch",
|
||||
"Bosnian": "Bosnisch",
|
||||
"Catalan": "Katalanisch",
|
||||
"Czech": "Tschechisch",
|
||||
"Danish": "Dänisch",
|
||||
"Greek": "Griechisch",
|
||||
"Estonian": "Estnisch",
|
||||
"Persian": "Persisch",
|
||||
"Finnish": "Finnisch",
|
||||
"Tagalog": "Tagalog",
|
||||
"French": "Französisch",
|
||||
"Galician": "Galizisch",
|
||||
"Hebrew": "Hebräisch",
|
||||
"Hindi": "Hannah",
|
||||
"Croatian": "Kroatisch",
|
||||
"Hungarian": "Ungarisch",
|
||||
"Armenian": "Armenisch",
|
||||
"Indonesian": "Indonesisch",
|
||||
"Icelandic": "Icelandic",
|
||||
"Kazakh": "Kazakh",
|
||||
"Kannada": "Kannada",
|
||||
"Korean": "Koreanisch",
|
||||
"Lithuanian": "Litauisch",
|
||||
"Latvian": "Lettisch",
|
||||
"Maori": "Maori",
|
||||
"Macedonian": "Makedonisch",
|
||||
"Marathi": "Marathi",
|
||||
"Malay": "Malaiisch",
|
||||
"Nepali": "Nepali",
|
||||
"Dutch": "Niederländisch",
|
||||
"Norwegian": "Norwegisch",
|
||||
"Romanian": "Rumänisch",
|
||||
"Russian": "Russisch",
|
||||
"Slovak": "Slowakisch",
|
||||
"Slovenian": "Slovenian",
|
||||
"Serbian": "Serbisch",
|
||||
"Swedish": "Schwedisch",
|
||||
"Swahili": "Swahili",
|
||||
"Tamil": "Tamil",
|
||||
"Thai": "Thailändisch",
|
||||
"Turkish": "Türkisch",
|
||||
"Ukrainian": "Ukrainische",
|
||||
"Urdu": "Urdu",
|
||||
"Vietnamese": "Vietnamese",
|
||||
"Chinese (Simplified)": "Chinesisch (vereinfacht)",
|
||||
"Welsh": "Walisisch",
|
||||
"Belarusian": "Belarussisch",
|
||||
"GET": "ERHALTEN",
|
||||
"POST": "POST",
|
||||
"PATCH": "PATCH",
|
||||
"PUT": "PUT",
|
||||
"DELETE": "LÖSCHEN",
|
||||
"HEAD": "HEAD"
|
||||
}
|
||||
@@ -0,0 +1,156 @@
|
||||
{
|
||||
"Use the many tools ChatGPT has to offer.": "Utilice las muchas herramientas que ChatGPT tiene para ofrecer.",
|
||||
"Follow these instructions to get your OpenAI API Key:\n\n1. Visit the following website: https://platform.openai.com/account/api-keys.\n2. Once on the website, locate and click on the option to obtain your OpenAI API Key.\n\nIt is strongly recommended that you add your credit card information to your OpenAI account and upgrade to the paid plan **before** generating the API Key. This will help you prevent 429 errors.\n": "Follow these instructions to get your OpenAI API Key:\n\n1. Visit the following website: https://platform.openai.com/account/api-keys.\n2. Once on the website, locate and click on the option to obtain your OpenAI API Key.\n\nIt is strongly recommended that you add your credit card information to your OpenAI account and upgrade to the paid plan **before** generating the API Key. This will help you prevent 429 errors.\n",
|
||||
"Ask ChatGPT": "Preguntar ChatGPT",
|
||||
"Ask Assistant": "Assistente",
|
||||
"Generate Image": "Generar imagen",
|
||||
"Vision Prompt": "Vision Prompt",
|
||||
"Text-to-Speech": "Texto a voz",
|
||||
"Transcribe Audio": "Transcribir audio",
|
||||
"Translate Audio": "Traducir audio",
|
||||
"Extract Structured Data from Text": "Extraer datos estructurados del texto",
|
||||
"Custom API Call": "Llamada API personalizada",
|
||||
"Ask ChatGPT anything you want!": "¡Pregúntale lo que quieras!",
|
||||
"Ask a GPT assistant anything you want!": "Pídele a un asistente GPT lo que quieras!",
|
||||
"Generate an image using text-to-image models": "Generar una imagen usando modelos de texto a imagen",
|
||||
"Ask GPT a question about an image": "Hacer GPT una pregunta sobre una imagen",
|
||||
"Generate an audio recording from text": "Generar una grabación de audio desde texto",
|
||||
"Transcribe audio to text using whisper-1 model": "Transcribir audio al texto usando el modelo susurro 1",
|
||||
"Translate audio to text using whisper-1 model": "Traducir audio al texto usando el modelo susurro 1",
|
||||
"Returns structured data from provided unstructured text.": "Devuelve datos estructurados del texto no estructurado.",
|
||||
"Make a custom API call to a specific endpoint": "Hacer una llamada API personalizada a un extremo específico",
|
||||
"Model": "Modelo",
|
||||
"Question": "Pregunta",
|
||||
"Temperature": "Temperatura",
|
||||
"Maximum Tokens": "Tokens máximos",
|
||||
"Top P": "Top P",
|
||||
"Frequency penalty": "Puntuación de frecuencia",
|
||||
"Presence penalty": "Penalización de presencia",
|
||||
"Memory Key": "Clave de memoria",
|
||||
"Roles": "Roles",
|
||||
"Assistant": "Asistente",
|
||||
"Prompt": "Petición",
|
||||
"Resolution": "Resolución",
|
||||
"Quality": "Calidad",
|
||||
"Image": "Imagen",
|
||||
"Detail": "Detalle",
|
||||
"Text": "Texto",
|
||||
"Speed": "Velocidad",
|
||||
"Voice": "Voz",
|
||||
"Output Format": "Formato de salida",
|
||||
"File Name": "Nombre del archivo",
|
||||
"Audio": "Audio",
|
||||
"Language of the Audio": "Idioma del audio",
|
||||
"Unstructured Text": "Texto no estructurado",
|
||||
"Data Definition": "Definición de datos",
|
||||
"Method": "Método",
|
||||
"Headers": "Encabezados",
|
||||
"Query Parameters": "Parámetros de consulta",
|
||||
"Body": "Cuerpo",
|
||||
"Response is Binary ?": "¿Respuesta es binaria?",
|
||||
"No Error on Failure": "No hay ningún error en fallo",
|
||||
"Timeout (in seconds)": "Tiempo de espera (en segundos)",
|
||||
"The model which will generate the completion. Some models are suitable for natural language tasks, others specialize in code.": "El modelo que generará la terminación. Algunos modelos son adecuados para tareas de lenguaje natural, otros se especializan en código.",
|
||||
"Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.": "Controles aleatorios: La reducción de resultados en terminaciones menos aleatorias. A medida que la temperatura se acerca a cero, el modelo se volverá determinista y repetitivo.",
|
||||
"The maximum number of tokens to generate. Requests can use up to 2,048 or 4,096 tokens shared between prompt and completion depending on the model. Don't set the value to maximum and leave some tokens for the input. (One token is roughly 4 characters for normal English text)": "El número máximo de tokens a generar. Las solicitudes pueden usar hasta 2,048 o 4,096 tokens compartidos entre prompt y terminación dependiendo del modelo. No establecer el valor máximo y dejar algunas fichas para la entrada. (Una ficha es aproximadamente 4 caracteres para el texto normal en inglés)",
|
||||
"An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.": "Una alternativa al muestreo con temperatura, llamado muestreo de núcleos, donde el modelo considera los resultados de los tokens con masa de probabilidad superior_p. Por lo tanto, 0,1 significa que sólo se consideran las fichas que componen la masa superior del 10% de probabilidad.",
|
||||
"Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.": "Número entre -2.0 y 2.0. Los valores positivos penalizan nuevos tokens basados en su frecuencia existente en el texto hasta ahora, lo que reduce la probabilidad del modelo de repetir la misma línea literalmente.",
|
||||
"Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the mode's likelihood to talk about new topics.": "Número entre -2.0 y 2.0. Los valores positivos penalizan las nuevas fichas basándose en si aparecen en el texto hasta ahora, aumentando la probabilidad de que el modo hable de nuevos temas.",
|
||||
"A memory key that will keep the chat history shared across runs and flows. Keep it empty to leave ChatGPT without memory of previous messages.": "Una clave de memoria que mantendrá el historial de chat compartido a través de ejecuciones y flujos. Manténgalo vacío para dejar ChatGPT sin memoria de mensajes anteriores.",
|
||||
"Array of roles to specify more accurate response": "Matriz de roles para especificar una respuesta más precisa",
|
||||
"The assistant which will generate the completion.": "El asistente que generará la terminación.",
|
||||
"A memory key that will keep the chat history shared across runs and flows. Keep it empty to leave your assistant without memory of previous messages.": "Una clave de memoria que mantendrá el historial de chat compartido entre ejecuciones y flujos. Manténgalo vacío para dejar a tu asistente sin memoria de mensajes anteriores.",
|
||||
"The model which will generate the image.": "El modelo que generará la imagen.",
|
||||
"The resolution to generate the image in.": "La resolución en la que generar la imagen.",
|
||||
"Standard is faster, HD has better details.": "El estándar es más rápido, HD tiene mejores detalles.",
|
||||
"The image URL or file you want GPT's vision to read.": "La URL o archivo de la imagen que desea que la visión de GPT lea.",
|
||||
"What do you want ChatGPT to tell you about the image?": "¿Qué quieres que ChatGPT te cuente sobre la imagen?",
|
||||
"Control how the model processes the image and generates textual understanding.": "Controlar cómo el modelo procesa la imagen y genera comprensión textual.",
|
||||
"The maximum number of tokens to generate. Requests can use up to 2,048 or 4,096 tokens shared between prompt and completion, don't set the value to maximum and leave some tokens for the input. The exact limit varies by model. (One token is roughly 4 characters for normal English text)": "El número máximo de tokens a generar. Las solicitudes pueden usar hasta 2,048 o 4,096 tokens compartidos entre prompt y completation, no establezca el valor máximo y deje algunas fichas para la entrada. El límite exacto varía según el modelo. (Un token es de aproximadamente 4 caracteres para el texto normal en inglés)",
|
||||
"The text you want to hear.": "El texto que quiere oír.",
|
||||
"The model which will generate the audio.": "El modelo que generará el audio.",
|
||||
"The speed of the audio. Minimum is 0.25 and maximum is 4.00.": "La velocidad del audio. Mínimo es 0.25 y máximo 4.00.",
|
||||
"The voice to generate the audio in.": "La voz en la que generar el audio.",
|
||||
"The format you want the audio file in.": "El formato en el que desea el archivo de audio.",
|
||||
"The name of the output audio file (without extension).": "El nombre del archivo de salida de audio (sin extensión).",
|
||||
"Audio file to transcribe": "Archivo de audio a transcribir",
|
||||
"Language of the audio file the default is en (English).": "Idioma del archivo de audio que el predeterminado es (inglés).",
|
||||
"Audio file to translate": "Archivo de audio a traducir",
|
||||
"Authorization headers are injected automatically from your connection.": "Las cabeceras de autorización se inyectan automáticamente desde tu conexión.",
|
||||
"Enable for files like PDFs, images, etc..": "Activar para archivos como PDFs, imágenes, etc.",
|
||||
"tts-1": "t.-1",
|
||||
"tts-1-hd": "tts-1-hd",
|
||||
"alloy": "alloy",
|
||||
"echo": "eco",
|
||||
"fable": "fable",
|
||||
"onyx": "onyx",
|
||||
"nova": "nova",
|
||||
"shimmer": "brillante",
|
||||
"mp3": "mp3",
|
||||
"opus": "opus",
|
||||
"aac": "aac",
|
||||
"flac": "flac",
|
||||
"Spanish": "Español",
|
||||
"Italian": "Italiano",
|
||||
"English": "Inglés",
|
||||
"Portuguese": "Portugués",
|
||||
"German": "Alemán",
|
||||
"Japanese": "Japonés",
|
||||
"Polish": "Polaco",
|
||||
"Arabic": "Árabe",
|
||||
"Afrikaans": "Afrikaans",
|
||||
"Azerbaijani": "Бессия",
|
||||
"Bulgarian": "Búlgaro",
|
||||
"Bosnian": "Bosnio",
|
||||
"Catalan": "Catalán",
|
||||
"Czech": "Checo",
|
||||
"Danish": "Danés",
|
||||
"Greek": "Griego",
|
||||
"Estonian": "Estonio",
|
||||
"Persian": "Persa",
|
||||
"Finnish": "Finlandés",
|
||||
"Tagalog": "Tagalog",
|
||||
"French": "Francés",
|
||||
"Galician": "Galiciano",
|
||||
"Hebrew": "Hebreo",
|
||||
"Hindi": "Hindú",
|
||||
"Croatian": "Croata",
|
||||
"Hungarian": "Húngaro",
|
||||
"Armenian": "Armenio",
|
||||
"Indonesian": "Indonesio/a",
|
||||
"Icelandic": "Icelandic",
|
||||
"Kazakh": "Kazakh",
|
||||
"Kannada": "Kannada",
|
||||
"Korean": "Coreano",
|
||||
"Lithuanian": "Lituano",
|
||||
"Latvian": "Letón",
|
||||
"Maori": "Maori",
|
||||
"Macedonian": "Macedonio",
|
||||
"Marathi": "Maratí",
|
||||
"Malay": "Malayo",
|
||||
"Nepali": "Nepali",
|
||||
"Dutch": "Holandés",
|
||||
"Norwegian": "Noruego",
|
||||
"Romanian": "Rumano",
|
||||
"Russian": "Ruso",
|
||||
"Slovak": "Eslovaco",
|
||||
"Slovenian": "Slovenian",
|
||||
"Serbian": "Serbio",
|
||||
"Swedish": "Sueco",
|
||||
"Swahili": "Swahili",
|
||||
"Tamil": "Tamil",
|
||||
"Thai": "Tailandés",
|
||||
"Turkish": "Turco",
|
||||
"Ukrainian": "Ucraniano",
|
||||
"Urdu": "Urdu",
|
||||
"Vietnamese": "Vietnamese",
|
||||
"Chinese (Simplified)": "Chino (simplificado)",
|
||||
"Welsh": "Galés",
|
||||
"Belarusian": "Bielorruso",
|
||||
"GET": "RECOGER",
|
||||
"POST": "POST",
|
||||
"PATCH": "PATCH",
|
||||
"PUT": "PUT",
|
||||
"DELETE": "BORRAR",
|
||||
"HEAD": "LIMPIO"
|
||||
}
|
||||
@@ -0,0 +1,156 @@
|
||||
{
|
||||
"Use the many tools ChatGPT has to offer.": "Utilisez les nombreux outils que ChatGPT a à offrir.",
|
||||
"Follow these instructions to get your OpenAI API Key:\n\n1. Visit the following website: https://platform.openai.com/account/api-keys.\n2. Once on the website, locate and click on the option to obtain your OpenAI API Key.\n\nIt is strongly recommended that you add your credit card information to your OpenAI account and upgrade to the paid plan **before** generating the API Key. This will help you prevent 429 errors.\n": "Follow these instructions to get your OpenAI API Key:\n\n1. Visit the following website: https://platform.openai.com/account/api-keys.\n2. Once on the website, locate and click on the option to obtain your OpenAI API Key.\n\nIt is strongly recommended that you add your credit card information to your OpenAI account and upgrade to the paid plan **before** generating the API Key. This will help you prevent 429 errors.\n",
|
||||
"Ask ChatGPT": "Demander au chat GPT",
|
||||
"Ask Assistant": "Demander à l'assistant",
|
||||
"Generate Image": "Générer une image",
|
||||
"Vision Prompt": "Vision Prompt",
|
||||
"Text-to-Speech": "Texte en synthèse vocale",
|
||||
"Transcribe Audio": "Transcrire l'audio",
|
||||
"Translate Audio": "Traduire l'audio",
|
||||
"Extract Structured Data from Text": "Extraire les données structurées du texte",
|
||||
"Custom API Call": "Appel API personnalisé",
|
||||
"Ask ChatGPT anything you want!": "Demandez à ChatGPT ce que vous voulez !",
|
||||
"Ask a GPT assistant anything you want!": "Demandez à un assistant GPT ce que vous voulez!",
|
||||
"Generate an image using text-to-image models": "Générer une image en utilisant des modèles text-to-image",
|
||||
"Ask GPT a question about an image": "Poser une question à propos d'une image à GPT",
|
||||
"Generate an audio recording from text": "Générer un enregistrement audio à partir de texte",
|
||||
"Transcribe audio to text using whisper-1 model": "Transcrire l'audio en texte en utilisant le modèle chuchoter 1",
|
||||
"Translate audio to text using whisper-1 model": "Traduire l'audio en texte en utilisant le modèle chuchoter - 1",
|
||||
"Returns structured data from provided unstructured text.": "Retourne les données structurées à partir du texte non structuré.",
|
||||
"Make a custom API call to a specific endpoint": "Passez un appel API personnalisé à un point de terminaison spécifique",
|
||||
"Model": "Modélisation",
|
||||
"Question": "Question",
|
||||
"Temperature": "Température",
|
||||
"Maximum Tokens": "Maximum de jetons",
|
||||
"Top P": "Top P",
|
||||
"Frequency penalty": "Malus de fréquence",
|
||||
"Presence penalty": "Malus de présence",
|
||||
"Memory Key": "Clé de mémoire",
|
||||
"Roles": "Rôles",
|
||||
"Assistant": "Assistant",
|
||||
"Prompt": "Prompt",
|
||||
"Resolution": "Résolution",
|
||||
"Quality": "Qualité",
|
||||
"Image": "Image",
|
||||
"Detail": "Détail",
|
||||
"Text": "Texte du texte",
|
||||
"Speed": "Rapidité",
|
||||
"Voice": "Voix",
|
||||
"Output Format": "Format de sortie",
|
||||
"File Name": "Nom du fichier",
|
||||
"Audio": "Audio",
|
||||
"Language of the Audio": "Langue de l'Audio",
|
||||
"Unstructured Text": "Texte non structuré",
|
||||
"Data Definition": "Définition des données",
|
||||
"Method": "Méthode",
|
||||
"Headers": "En-têtes",
|
||||
"Query Parameters": "Paramètres de requête",
|
||||
"Body": "Corps",
|
||||
"Response is Binary ?": "La réponse est Binaire ?",
|
||||
"No Error on Failure": "Aucune erreur en cas d'échec",
|
||||
"Timeout (in seconds)": "Délai d'attente (en secondes)",
|
||||
"The model which will generate the completion. Some models are suitable for natural language tasks, others specialize in code.": "Le modèle qui va générer la complétion. Certains modèles sont adaptés aux tâches de langage naturel, d'autres se spécialisent dans le code.",
|
||||
"Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.": "Contrôle aléatoirement : La baisse des résultats est moins aléatoire, alors que la température approche de zéro, le modèle devient déterministe et répétitif.",
|
||||
"The maximum number of tokens to generate. Requests can use up to 2,048 or 4,096 tokens shared between prompt and completion depending on the model. Don't set the value to maximum and leave some tokens for the input. (One token is roughly 4 characters for normal English text)": "Le nombre maximum de jetons à générer. Les requêtes peuvent utiliser jusqu'à 2 048 ou 4 096 jetons partagés entre l'invite et la complétion selon le modèle. Ne pas définir la valeur au maximum et laisser des jetons pour l'entrée. (un jeton est d'environ 4 caractères pour le texte anglais normal)",
|
||||
"An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.": "Une alternative à l'échantillonnage à la température, appelée l'échantillonnage du noyau, où le modèle considère les résultats des jetons avec la masse de probabilité top_p. Ainsi, 0,1 signifie que seuls les jetons constituant la masse de probabilité la plus élevée de 10% sont pris en compte.",
|
||||
"Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.": "Numéroter entre -2.0 et 2.0. Les valeurs positives pénalisent les nouveaux jetons en fonction de leur fréquence existante dans le texte jusqu'à présent, diminuant la probabilité du modèle de répéter le verbatim de la même ligne.",
|
||||
"Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the mode's likelihood to talk about new topics.": "Numéroter entre -2.0 et 2.0. Les valeurs positives pénalisent les nouveaux jetons en fonction du fait qu'ils apparaissent dans le texte jusqu'à présent, ce qui augmente la probabilité du mode de parler de nouveaux sujets.",
|
||||
"A memory key that will keep the chat history shared across runs and flows. Keep it empty to leave ChatGPT without memory of previous messages.": "Une clé de mémoire qui conservera l'historique des discussions partagées à travers les exécutions et les flux. Gardez-la vide pour laisser ChatGPT sans mémoire pour les messages précédents.",
|
||||
"Array of roles to specify more accurate response": "Tableau de rôles pour spécifier une réponse plus précise",
|
||||
"The assistant which will generate the completion.": "L'assistant qui va générer la complétion.",
|
||||
"A memory key that will keep the chat history shared across runs and flows. Keep it empty to leave your assistant without memory of previous messages.": "Une clé de mémoire qui conservera l'historique des discussions partagées entre les exécutions et les flux. Laissez vide pour laisser votre assistant sans mémoire pour les messages précédents.",
|
||||
"The model which will generate the image.": "Le modèle qui va générer l'image.",
|
||||
"The resolution to generate the image in.": "La résolution pour générer l'image.",
|
||||
"Standard is faster, HD has better details.": "Standard est plus rapide, la HD a de meilleurs détails.",
|
||||
"The image URL or file you want GPT's vision to read.": "L'URL ou le fichier de l'image que vous voulez que la vision GPT soit lue.",
|
||||
"What do you want ChatGPT to tell you about the image?": "Que voulez-vous que ChatGPT vous parle de l'image ?",
|
||||
"Control how the model processes the image and generates textual understanding.": "Contrôler comment le modèle traite l'image et génère la compréhension textuelle.",
|
||||
"The maximum number of tokens to generate. Requests can use up to 2,048 or 4,096 tokens shared between prompt and completion, don't set the value to maximum and leave some tokens for the input. The exact limit varies by model. (One token is roughly 4 characters for normal English text)": "Le nombre maximum de jetons à générer. Les requêtes peuvent utiliser jusqu'à 2 048 ou 4 096 jetons partagés entre l'invite et la complétion, ne pas définir la valeur au maximum et laisser des jetons pour l'entrée. La limite exacte varie selon le modèle. (un jeton est d'environ 4 caractères pour le texte anglais normal)",
|
||||
"The text you want to hear.": "Le texte que vous voulez entendre.",
|
||||
"The model which will generate the audio.": "Le modèle qui va générer l'audio.",
|
||||
"The speed of the audio. Minimum is 0.25 and maximum is 4.00.": "La vitesse de l'audio. Le minimum est de 0,25 et le maximum est de 4,00.",
|
||||
"The voice to generate the audio in.": "La voix pour générer l'audio.",
|
||||
"The format you want the audio file in.": "Le format dans lequel vous voulez le fichier audio.",
|
||||
"The name of the output audio file (without extension).": "Le nom du fichier audio de sortie (sans extension).",
|
||||
"Audio file to transcribe": "Fichier audio à transcrire",
|
||||
"Language of the audio file the default is en (English).": "Language of the audio file the default is fr.",
|
||||
"Audio file to translate": "Fichier audio à traduire",
|
||||
"Authorization headers are injected automatically from your connection.": "Les en-têtes d'autorisation sont injectés automatiquement à partir de votre connexion.",
|
||||
"Enable for files like PDFs, images, etc..": "Activer pour les fichiers comme les PDFs, les images, etc.",
|
||||
"tts-1": "tts-1",
|
||||
"tts-1-hd": "tts-1-hd",
|
||||
"alloy": "alloy",
|
||||
"echo": "écho",
|
||||
"fable": "fable",
|
||||
"onyx": "onyx",
|
||||
"nova": "nova",
|
||||
"shimmer": "scintillant",
|
||||
"mp3": "mp3",
|
||||
"opus": "opus",
|
||||
"aac": "aac",
|
||||
"flac": "flac",
|
||||
"Spanish": "Espagnol",
|
||||
"Italian": "Italien",
|
||||
"English": "Anglais",
|
||||
"Portuguese": "Portugais",
|
||||
"German": "Allemand",
|
||||
"Japanese": "Japonais",
|
||||
"Polish": "Polonais",
|
||||
"Arabic": "Arabe",
|
||||
"Afrikaans": "afrikaans",
|
||||
"Azerbaijani": "Azerbaïdjan",
|
||||
"Bulgarian": "Bulgare",
|
||||
"Bosnian": "Bosniaque",
|
||||
"Catalan": "Catalan",
|
||||
"Czech": "Tchèque",
|
||||
"Danish": "Danois",
|
||||
"Greek": "Grecque",
|
||||
"Estonian": "estonien",
|
||||
"Persian": "Perse",
|
||||
"Finnish": "Finlandais",
|
||||
"Tagalog": "Tagalog",
|
||||
"French": "Français",
|
||||
"Galician": "Galicien",
|
||||
"Hebrew": "Hébreu",
|
||||
"Hindi": "Hindi",
|
||||
"Croatian": "Croate",
|
||||
"Hungarian": "Hongrois",
|
||||
"Armenian": "Arménien",
|
||||
"Indonesian": "Indonésien",
|
||||
"Icelandic": "Icelandic",
|
||||
"Kazakh": "Kazakh",
|
||||
"Kannada": "Kannada",
|
||||
"Korean": "Coréen",
|
||||
"Lithuanian": "lituanien",
|
||||
"Latvian": "Lettonie",
|
||||
"Maori": "Maori",
|
||||
"Macedonian": "Macédonien",
|
||||
"Marathi": "Marathi",
|
||||
"Malay": "Malais",
|
||||
"Nepali": "Nepali",
|
||||
"Dutch": "Néerlandais",
|
||||
"Norwegian": "Norvégien",
|
||||
"Romanian": "Roumain",
|
||||
"Russian": "Russe",
|
||||
"Slovak": "Slovaque",
|
||||
"Slovenian": "Slovenian",
|
||||
"Serbian": "Serbe",
|
||||
"Swedish": "Suédois",
|
||||
"Swahili": "Swahili",
|
||||
"Tamil": "Tamil",
|
||||
"Thai": "Thaï",
|
||||
"Turkish": "Turc",
|
||||
"Ukrainian": "Ukrainien",
|
||||
"Urdu": "Ourdou",
|
||||
"Vietnamese": "Vietnamese",
|
||||
"Chinese (Simplified)": "Chinois (simplifié)",
|
||||
"Welsh": "Gallois",
|
||||
"Belarusian": "Biélorusse",
|
||||
"GET": "OBTENIR",
|
||||
"POST": "POSTER",
|
||||
"PATCH": "PATCH",
|
||||
"PUT": "EFFACER",
|
||||
"DELETE": "SUPPRIMER",
|
||||
"HEAD": "TÊTE"
|
||||
}
|
||||
@@ -0,0 +1,155 @@
|
||||
{
|
||||
"OpenAI": "OpenAI",
|
||||
"Use the many tools ChatGPT has to offer.": "Use the many tools ChatGPT has to offer.",
|
||||
"Follow these instructions to get your OpenAI API Key:\n\n1. Visit the following website: https://platform.openai.com/account/api-keys.\n2. Once on the website, locate and click on the option to obtain your OpenAI API Key.\n\nIt is strongly recommended that you add your credit card information to your OpenAI account and upgrade to the paid plan **before** generating the API Key. This will help you prevent 429 errors.\n": "Follow these instructions to get your OpenAI API Key:\n\n1. Visit the following website: https://platform.openai.com/account/api-keys.\n2. Once on the website, locate and click on the option to obtain your OpenAI API Key.\n\nIt is strongly recommended that you add your credit card information to your OpenAI account and upgrade to the paid plan **before** generating the API Key. This will help you prevent 429 errors.\n",
|
||||
"Ask ChatGPT": "Ask ChatGPT",
|
||||
"Ask Assistant": "Ask Assistant",
|
||||
"Generate Image": "Generate Image",
|
||||
"Vision Prompt": "Vision Prompt",
|
||||
"Text-to-Speech": "Text-to-Speech",
|
||||
"Transcribe Audio": "Transcribe Audio",
|
||||
"Translate Audio": "Translate Audio",
|
||||
"Extract Structured Data from Text": "Extract Structured Data from Text",
|
||||
"Custom API Call": "Custom API Call",
|
||||
"Ask ChatGPT anything you want!": "Ask ChatGPT anything you want!",
|
||||
"Ask a GPT assistant anything you want!": "Ask a GPT assistant anything you want!",
|
||||
"Generate an image using text-to-image models": "Generate an image using text-to-image models",
|
||||
"Ask GPT a question about an image": "Ask GPT a question about an image",
|
||||
"Generate an audio recording from text": "Generate an audio recording from text",
|
||||
"Transcribe audio to text using whisper-1 model": "Transcribe audio to text using whisper-1 model",
|
||||
"Translate audio to text using whisper-1 model": "Translate audio to text using whisper-1 model",
|
||||
"Returns structured data from provided unstructured text.": "Returns structured data from provided unstructured text.",
|
||||
"Make a custom API call to a specific endpoint": "Make a custom API call to a specific endpoint",
|
||||
"Model": "Model",
|
||||
"Question": "Question",
|
||||
"Temperature": "Temperature",
|
||||
"Maximum Tokens": "Maximum Tokens",
|
||||
"Top P": "Top P",
|
||||
"Frequency penalty": "Frequency penalty",
|
||||
"Presence penalty": "Presence penalty",
|
||||
"Memory Key": "Memory Key",
|
||||
"Roles": "Roles",
|
||||
"Assistant": "Assistant",
|
||||
"Prompt": "Prompt",
|
||||
"Resolution": "Resolution",
|
||||
"Quality": "Quality",
|
||||
"Image": "Image",
|
||||
"Detail": "Detail",
|
||||
"Text": "Text",
|
||||
"Speed": "Speed",
|
||||
"Voice": "Voice",
|
||||
"Output Format": "Output Format",
|
||||
"File Name": "File Name",
|
||||
"Audio": "Audio",
|
||||
"Language of the Audio": "Language of the Audio",
|
||||
"Unstructured Text": "Unstructured Text",
|
||||
"Data Definition": "Data Definition",
|
||||
"Method": "Method",
|
||||
"Headers": "Headers",
|
||||
"Query Parameters": "Query Parameters",
|
||||
"Body": "Body",
|
||||
"No Error on Failure": "No Error on Failure",
|
||||
"Timeout (in seconds)": "Timeout (in seconds)",
|
||||
"The model which will generate the completion. Some models are suitable for natural language tasks, others specialize in code.": "The model which will generate the completion. Some models are suitable for natural language tasks, others specialize in code.",
|
||||
"Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.": "Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.",
|
||||
"The maximum number of tokens to generate. Requests can use up to 2,048 or 4,096 tokens shared between prompt and completion depending on the model. Don't set the value to maximum and leave some tokens for the input. (One token is roughly 4 characters for normal English text)": "The maximum number of tokens to generate. Requests can use up to 2,048 or 4,096 tokens shared between prompt and completion depending on the model. Don't set the value to maximum and leave some tokens for the input. (One token is roughly 4 characters for normal English text)",
|
||||
"An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.": "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.",
|
||||
"Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.": "Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.",
|
||||
"Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the mode's likelihood to talk about new topics.": "Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the mode's likelihood to talk about new topics.",
|
||||
"A memory key that will keep the chat history shared across runs and flows. Keep it empty to leave ChatGPT without memory of previous messages.": "A memory key that will keep the chat history shared across runs and flows. Keep it empty to leave ChatGPT without memory of previous messages.",
|
||||
"Array of roles to specify more accurate response": "Array of roles to specify more accurate response",
|
||||
"The assistant which will generate the completion.": "The assistant which will generate the completion.",
|
||||
"A memory key that will keep the chat history shared across runs and flows. Keep it empty to leave your assistant without memory of previous messages.": "A memory key that will keep the chat history shared across runs and flows. Keep it empty to leave your assistant without memory of previous messages.",
|
||||
"The model which will generate the image.": "The model which will generate the image.",
|
||||
"The resolution to generate the image in.": "The resolution to generate the image in.",
|
||||
"Standard is faster, HD has better details.": "Standard is faster, HD has better details.",
|
||||
"The image URL or file you want GPT's vision to read.": "The image URL or file you want GPT's vision to read.",
|
||||
"What do you want ChatGPT to tell you about the image?": "What do you want ChatGPT to tell you about the image?",
|
||||
"Control how the model processes the image and generates textual understanding.": "Control how the model processes the image and generates textual understanding.",
|
||||
"The maximum number of tokens to generate. Requests can use up to 2,048 or 4,096 tokens shared between prompt and completion, don't set the value to maximum and leave some tokens for the input. The exact limit varies by model. (One token is roughly 4 characters for normal English text)": "The maximum number of tokens to generate. Requests can use up to 2,048 or 4,096 tokens shared between prompt and completion, don't set the value to maximum and leave some tokens for the input. The exact limit varies by model. (One token is roughly 4 characters for normal English text)",
|
||||
"The text you want to hear.": "The text you want to hear.",
|
||||
"The model which will generate the audio.": "The model which will generate the audio.",
|
||||
"The speed of the audio. Minimum is 0.25 and maximum is 4.00.": "The speed of the audio. Minimum is 0.25 and maximum is 4.00.",
|
||||
"The voice to generate the audio in.": "The voice to generate the audio in.",
|
||||
"The format you want the audio file in.": "The format you want the audio file in.",
|
||||
"The name of the output audio file (without extension).": "The name of the output audio file (without extension).",
|
||||
"Audio file to transcribe": "Audio file to transcribe",
|
||||
"Language of the audio file the default is en (English).": "Language of the audio file the default is en (English).",
|
||||
"Audio file to translate": "Audio file to translate",
|
||||
"Authorization headers are injected automatically from your connection.": "Authorization headers are injected automatically from your connection.",
|
||||
"tts-1": "tts-1",
|
||||
"tts-1-hd": "tts-1-hd",
|
||||
"alloy": "alloy",
|
||||
"echo": "echo",
|
||||
"fable": "fable",
|
||||
"onyx": "onyx",
|
||||
"nova": "nova",
|
||||
"shimmer": "shimmer",
|
||||
"mp3": "mp3",
|
||||
"opus": "opus",
|
||||
"aac": "aac",
|
||||
"flac": "flac",
|
||||
"Spanish": "Spanish",
|
||||
"Italian": "Italian",
|
||||
"English": "English",
|
||||
"Portuguese": "Portuguese",
|
||||
"German": "German",
|
||||
"Japanese": "Japanese",
|
||||
"Polish": "Polish",
|
||||
"Arabic": "Arabic",
|
||||
"Afrikaans": "Afrikaans",
|
||||
"Azerbaijani": "Azerbaijani",
|
||||
"Bulgarian": "Bulgarian",
|
||||
"Bosnian": "Bosnian",
|
||||
"Catalan": "Catalan",
|
||||
"Czech": "Czech",
|
||||
"Danish": "Danish",
|
||||
"Greek": "Greek",
|
||||
"Estonian": "Estonian",
|
||||
"Persian": "Persian",
|
||||
"Finnish": "Finnish",
|
||||
"Tagalog": "Tagalog",
|
||||
"French": "French",
|
||||
"Galician": "Galician",
|
||||
"Hebrew": "Hebrew",
|
||||
"Hindi": "Hindi",
|
||||
"Croatian": "Croatian",
|
||||
"Hungarian": "Hungarian",
|
||||
"Armenian": "Armenian",
|
||||
"Indonesian": "Indonesian",
|
||||
"Icelandic": "Icelandic",
|
||||
"Kazakh": "Kazakh",
|
||||
"Kannada": "Kannada",
|
||||
"Korean": "Korean",
|
||||
"Lithuanian": "Lithuanian",
|
||||
"Latvian": "Latvian",
|
||||
"Maori": "Maori",
|
||||
"Macedonian": "Macedonian",
|
||||
"Marathi": "Marathi",
|
||||
"Malay": "Malay",
|
||||
"Nepali": "Nepali",
|
||||
"Dutch": "Dutch",
|
||||
"Norwegian": "Norwegian",
|
||||
"Romanian": "Romanian",
|
||||
"Russian": "Russian",
|
||||
"Slovak": "Slovak",
|
||||
"Slovenian": "Slovenian",
|
||||
"Serbian": "Serbian",
|
||||
"Swedish": "Swedish",
|
||||
"Swahili": "Swahili",
|
||||
"Tamil": "Tamil",
|
||||
"Thai": "Thai",
|
||||
"Turkish": "Turkish",
|
||||
"Ukrainian": "Ukrainian",
|
||||
"Urdu": "Urdu",
|
||||
"Vietnamese": "Vietnamese",
|
||||
"Chinese (Simplified)": "Chinese (Simplified)",
|
||||
"Welsh": "Welsh",
|
||||
"Belarusian": "Belarusian",
|
||||
"GET": "GET",
|
||||
"POST": "POST",
|
||||
"PATCH": "PATCH",
|
||||
"PUT": "PUT",
|
||||
"DELETE": "DELETE",
|
||||
"HEAD": "HEAD"
|
||||
}
|
||||
@@ -0,0 +1,155 @@
|
||||
{
|
||||
"OpenAI": "OpenAI",
|
||||
"Use the many tools ChatGPT has to offer.": "Use the many tools ChatGPT has to offer.",
|
||||
"Follow these instructions to get your OpenAI API Key:\n\n1. Visit the following website: https://platform.openai.com/account/api-keys.\n2. Once on the website, locate and click on the option to obtain your OpenAI API Key.\n\nIt is strongly recommended that you add your credit card information to your OpenAI account and upgrade to the paid plan **before** generating the API Key. This will help you prevent 429 errors.\n": "Follow these instructions to get your OpenAI API Key:\n\n1. Visit the following website: https://platform.openai.com/account/api-keys.\n2. Once on the website, locate and click on the option to obtain your OpenAI API Key.\n\nIt is strongly recommended that you add your credit card information to your OpenAI account and upgrade to the paid plan **before** generating the API Key. This will help you prevent 429 errors.\n",
|
||||
"Ask ChatGPT": "Ask ChatGPT",
|
||||
"Ask Assistant": "Ask Assistant",
|
||||
"Generate Image": "Generate Image",
|
||||
"Vision Prompt": "Vision Prompt",
|
||||
"Text-to-Speech": "Text-to-Speech",
|
||||
"Transcribe Audio": "Transcribe Audio",
|
||||
"Translate Audio": "Translate Audio",
|
||||
"Extract Structured Data from Text": "Extract Structured Data from Text",
|
||||
"Custom API Call": "Custom API Call",
|
||||
"Ask ChatGPT anything you want!": "Ask ChatGPT anything you want!",
|
||||
"Ask a GPT assistant anything you want!": "Ask a GPT assistant anything you want!",
|
||||
"Generate an image using text-to-image models": "Generate an image using text-to-image models",
|
||||
"Ask GPT a question about an image": "Ask GPT a question about an image",
|
||||
"Generate an audio recording from text": "Generate an audio recording from text",
|
||||
"Transcribe audio to text using whisper-1 model": "Transcribe audio to text using whisper-1 model",
|
||||
"Translate audio to text using whisper-1 model": "Translate audio to text using whisper-1 model",
|
||||
"Returns structured data from provided unstructured text.": "Returns structured data from provided unstructured text.",
|
||||
"Make a custom API call to a specific endpoint": "Make a custom API call to a specific endpoint",
|
||||
"Model": "Model",
|
||||
"Question": "Question",
|
||||
"Temperature": "Temperature",
|
||||
"Maximum Tokens": "Maximum Tokens",
|
||||
"Top P": "Top P",
|
||||
"Frequency penalty": "Frequency penalty",
|
||||
"Presence penalty": "Presence penalty",
|
||||
"Memory Key": "Memory Key",
|
||||
"Roles": "Roles",
|
||||
"Assistant": "Assistant",
|
||||
"Prompt": "Prompt",
|
||||
"Resolution": "Resolution",
|
||||
"Quality": "Quality",
|
||||
"Image": "Image",
|
||||
"Detail": "Detail",
|
||||
"Text": "Text",
|
||||
"Speed": "Speed",
|
||||
"Voice": "Voice",
|
||||
"Output Format": "Output Format",
|
||||
"File Name": "File Name",
|
||||
"Audio": "Audio",
|
||||
"Language of the Audio": "Language of the Audio",
|
||||
"Unstructured Text": "Unstructured Text",
|
||||
"Data Definition": "Data Definition",
|
||||
"Method": "Method",
|
||||
"Headers": "Headers",
|
||||
"Query Parameters": "Query Parameters",
|
||||
"Body": "Body",
|
||||
"No Error on Failure": "No Error on Failure",
|
||||
"Timeout (in seconds)": "Timeout (in seconds)",
|
||||
"The model which will generate the completion. Some models are suitable for natural language tasks, others specialize in code.": "The model which will generate the completion. Some models are suitable for natural language tasks, others specialize in code.",
|
||||
"Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.": "Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.",
|
||||
"The maximum number of tokens to generate. Requests can use up to 2,048 or 4,096 tokens shared between prompt and completion depending on the model. Don't set the value to maximum and leave some tokens for the input. (One token is roughly 4 characters for normal English text)": "The maximum number of tokens to generate. Requests can use up to 2,048 or 4,096 tokens shared between prompt and completion depending on the model. Don't set the value to maximum and leave some tokens for the input. (One token is roughly 4 characters for normal English text)",
|
||||
"An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.": "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.",
|
||||
"Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.": "Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.",
|
||||
"Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the mode's likelihood to talk about new topics.": "Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the mode's likelihood to talk about new topics.",
|
||||
"A memory key that will keep the chat history shared across runs and flows. Keep it empty to leave ChatGPT without memory of previous messages.": "A memory key that will keep the chat history shared across runs and flows. Keep it empty to leave ChatGPT without memory of previous messages.",
|
||||
"Array of roles to specify more accurate response": "Array of roles to specify more accurate response",
|
||||
"The assistant which will generate the completion.": "The assistant which will generate the completion.",
|
||||
"A memory key that will keep the chat history shared across runs and flows. Keep it empty to leave your assistant without memory of previous messages.": "A memory key that will keep the chat history shared across runs and flows. Keep it empty to leave your assistant without memory of previous messages.",
|
||||
"The model which will generate the image.": "The model which will generate the image.",
|
||||
"The resolution to generate the image in.": "The resolution to generate the image in.",
|
||||
"Standard is faster, HD has better details.": "Standard is faster, HD has better details.",
|
||||
"The image URL or file you want GPT's vision to read.": "The image URL or file you want GPT's vision to read.",
|
||||
"What do you want ChatGPT to tell you about the image?": "What do you want ChatGPT to tell you about the image?",
|
||||
"Control how the model processes the image and generates textual understanding.": "Control how the model processes the image and generates textual understanding.",
|
||||
"The maximum number of tokens to generate. Requests can use up to 2,048 or 4,096 tokens shared between prompt and completion, don't set the value to maximum and leave some tokens for the input. The exact limit varies by model. (One token is roughly 4 characters for normal English text)": "The maximum number of tokens to generate. Requests can use up to 2,048 or 4,096 tokens shared between prompt and completion, don't set the value to maximum and leave some tokens for the input. The exact limit varies by model. (One token is roughly 4 characters for normal English text)",
|
||||
"The text you want to hear.": "The text you want to hear.",
|
||||
"The model which will generate the audio.": "The model which will generate the audio.",
|
||||
"The speed of the audio. Minimum is 0.25 and maximum is 4.00.": "The speed of the audio. Minimum is 0.25 and maximum is 4.00.",
|
||||
"The voice to generate the audio in.": "The voice to generate the audio in.",
|
||||
"The format you want the audio file in.": "The format you want the audio file in.",
|
||||
"The name of the output audio file (without extension).": "The name of the output audio file (without extension).",
|
||||
"Audio file to transcribe": "Audio file to transcribe",
|
||||
"Language of the audio file the default is en (English).": "Language of the audio file the default is en (English).",
|
||||
"Audio file to translate": "Audio file to translate",
|
||||
"Authorization headers are injected automatically from your connection.": "Authorization headers are injected automatically from your connection.",
|
||||
"tts-1": "tts-1",
|
||||
"tts-1-hd": "tts-1-hd",
|
||||
"alloy": "alloy",
|
||||
"echo": "echo",
|
||||
"fable": "fable",
|
||||
"onyx": "onyx",
|
||||
"nova": "nova",
|
||||
"shimmer": "shimmer",
|
||||
"mp3": "mp3",
|
||||
"opus": "opus",
|
||||
"aac": "aac",
|
||||
"flac": "flac",
|
||||
"Spanish": "Spanish",
|
||||
"Italian": "Italian",
|
||||
"English": "English",
|
||||
"Portuguese": "Portuguese",
|
||||
"German": "German",
|
||||
"Japanese": "Japanese",
|
||||
"Polish": "Polish",
|
||||
"Arabic": "Arabic",
|
||||
"Afrikaans": "Afrikaans",
|
||||
"Azerbaijani": "Azerbaijani",
|
||||
"Bulgarian": "Bulgarian",
|
||||
"Bosnian": "Bosnian",
|
||||
"Catalan": "Catalan",
|
||||
"Czech": "Czech",
|
||||
"Danish": "Danish",
|
||||
"Greek": "Greek",
|
||||
"Estonian": "Estonian",
|
||||
"Persian": "Persian",
|
||||
"Finnish": "Finnish",
|
||||
"Tagalog": "Tagalog",
|
||||
"French": "French",
|
||||
"Galician": "Galician",
|
||||
"Hebrew": "Hebrew",
|
||||
"Hindi": "Hindi",
|
||||
"Croatian": "Croatian",
|
||||
"Hungarian": "Hungarian",
|
||||
"Armenian": "Armenian",
|
||||
"Indonesian": "Indonesian",
|
||||
"Icelandic": "Icelandic",
|
||||
"Kazakh": "Kazakh",
|
||||
"Kannada": "Kannada",
|
||||
"Korean": "Korean",
|
||||
"Lithuanian": "Lithuanian",
|
||||
"Latvian": "Latvian",
|
||||
"Maori": "Maori",
|
||||
"Macedonian": "Macedonian",
|
||||
"Marathi": "Marathi",
|
||||
"Malay": "Malay",
|
||||
"Nepali": "Nepali",
|
||||
"Dutch": "Dutch",
|
||||
"Norwegian": "Norwegian",
|
||||
"Romanian": "Romanian",
|
||||
"Russian": "Russian",
|
||||
"Slovak": "Slovak",
|
||||
"Slovenian": "Slovenian",
|
||||
"Serbian": "Serbian",
|
||||
"Swedish": "Swedish",
|
||||
"Swahili": "Swahili",
|
||||
"Tamil": "Tamil",
|
||||
"Thai": "Thai",
|
||||
"Turkish": "Turkish",
|
||||
"Ukrainian": "Ukrainian",
|
||||
"Urdu": "Urdu",
|
||||
"Vietnamese": "Vietnamese",
|
||||
"Chinese (Simplified)": "Chinese (Simplified)",
|
||||
"Welsh": "Welsh",
|
||||
"Belarusian": "Belarusian",
|
||||
"GET": "GET",
|
||||
"POST": "POST",
|
||||
"PATCH": "PATCH",
|
||||
"PUT": "PUT",
|
||||
"DELETE": "DELETE",
|
||||
"HEAD": "HEAD"
|
||||
}
|
||||
@@ -0,0 +1,156 @@
|
||||
{
|
||||
"Use the many tools ChatGPT has to offer.": "ChatGPTが提供する多くのツールを使用してください。",
|
||||
"Follow these instructions to get your OpenAI API Key:\n\n1. Visit the following website: https://platform.openai.com/account/api-keys.\n2. Once on the website, locate and click on the option to obtain your OpenAI API Key.\n\nIt is strongly recommended that you add your credit card information to your OpenAI account and upgrade to the paid plan **before** generating the API Key. This will help you prevent 429 errors.\n": "Follow these instructions to get your OpenAI API Key:\n\n1. Visit the following website: https://platform.openai.com/account/api-keys.\n2. Once on the website, locate and click on the option to obtain your OpenAI API Key.\n\nIt is strongly recommended that you add your credit card information to your OpenAI account and upgrade to the paid plan **before** generating the API Key. This will help you prevent 429 errors.\n",
|
||||
"Ask ChatGPT": "チャットGPTに聞く",
|
||||
"Ask Assistant": "アシスタントに聞く",
|
||||
"Generate Image": "画像を生成する",
|
||||
"Vision Prompt": "Vision Prompt",
|
||||
"Text-to-Speech": "テキスト読み上げ",
|
||||
"Transcribe Audio": "オーディオを変換する",
|
||||
"Translate Audio": "音声を翻訳",
|
||||
"Extract Structured Data from Text": "テキストから構造化されたデータを抽出",
|
||||
"Custom API Call": "カスタムAPI通話",
|
||||
"Ask ChatGPT anything you want!": "あなたが望むものは何でもChatGPTに聞いてください!",
|
||||
"Ask a GPT assistant anything you want!": "あなたが望むGPTアシスタントを何でも頼みましょう!",
|
||||
"Generate an image using text-to-image models": "テキストto-image モデルを使用して画像を生成します",
|
||||
"Ask GPT a question about an image": "画像についてGPTに質問する",
|
||||
"Generate an audio recording from text": "テキストからオーディオ録音を生成する",
|
||||
"Transcribe audio to text using whisper-1 model": "whisper-1 モデルを使用して音声をテキストに変換",
|
||||
"Translate audio to text using whisper-1 model": "ささやき1モデルを使用して音声をテキストに翻訳する",
|
||||
"Returns structured data from provided unstructured text.": "提供された非構造化テキストから構造化データを返します。",
|
||||
"Make a custom API call to a specific endpoint": "特定のエンドポイントへのカスタム API コールを実行します。",
|
||||
"Model": "モデル",
|
||||
"Question": "質問",
|
||||
"Temperature": "温度",
|
||||
"Maximum Tokens": "最大トークン",
|
||||
"Top P": "トップ P",
|
||||
"Frequency penalty": "頻度ペナルティ",
|
||||
"Presence penalty": "プレゼンスペナルティ",
|
||||
"Memory Key": "メモリーキー",
|
||||
"Roles": "ロール",
|
||||
"Assistant": "アシスタント",
|
||||
"Prompt": "Prompt",
|
||||
"Resolution": "解像度",
|
||||
"Quality": "品質",
|
||||
"Image": "画像",
|
||||
"Detail": "詳細",
|
||||
"Text": "テキスト",
|
||||
"Speed": "速度",
|
||||
"Voice": "音声",
|
||||
"Output Format": "出力形式",
|
||||
"File Name": "ファイル名",
|
||||
"Audio": "オーディオ",
|
||||
"Language of the Audio": "オーディオの言語",
|
||||
"Unstructured Text": "構造化されていないテキスト",
|
||||
"Data Definition": "データ定義",
|
||||
"Method": "方法",
|
||||
"Headers": "ヘッダー",
|
||||
"Query Parameters": "クエリパラメータ",
|
||||
"Body": "本文",
|
||||
"Response is Binary ?": "応答はバイナリですか?",
|
||||
"No Error on Failure": "失敗時にエラーはありません",
|
||||
"Timeout (in seconds)": "タイムアウト(秒)",
|
||||
"The model which will generate the completion. Some models are suitable for natural language tasks, others specialize in code.": "補完を生成するモデル。自然言語タスクに適しているモデルもあれば、コードに特化したモデルもあります。",
|
||||
"Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.": "温度がゼロに近づくにつれて、モデルは決定論的で反復的になります。",
|
||||
"The maximum number of tokens to generate. Requests can use up to 2,048 or 4,096 tokens shared between prompt and completion depending on the model. Don't set the value to maximum and leave some tokens for the input. (One token is roughly 4 characters for normal English text)": "生成するトークンの最大数。リクエストは、モデルに応じてプロンプトと補完の間で共有される最大2,048または4,096トークンを使用できます。 値を最大値に設定せず、トークンを残して入力します。(通常の英語テキストでは1つのトークンは約4文字です)",
|
||||
"An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.": "核サンプリングと呼ばれる温度でのサンプリングの代わりに、モデルはtop_p確率質量を持つトークンの結果を考慮します。 つまり、0.1は上位10%の確率質量からなるトークンのみを考慮することになります。",
|
||||
"Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.": "-2.0 から 2.0 までの数字。 正の値は、これまでのテキスト内の既存の頻度に基づいて新しいトークンを罰するため、モデルが同じ行を元に繰り返す可能性が低下します。",
|
||||
"Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the mode's likelihood to talk about new topics.": "-2.0 から 2.0 までの数字。 肯定的な値は、これまでのところテキストに表示されるかどうかに基づいて新しいトークンを罰し、モードが新しいトピックについて話す可能性を高めます。",
|
||||
"A memory key that will keep the chat history shared across runs and flows. Keep it empty to leave ChatGPT without memory of previous messages.": "実行とフロー間で共有されるチャット履歴を保持するメモリキー。以前のメッセージのメモリなしでChatGPTを残すには空白のままにしてください。",
|
||||
"Array of roles to specify more accurate response": "より正確な応答を指定するロールの配列",
|
||||
"The assistant which will generate the completion.": "補完を生成するアシスタント。",
|
||||
"A memory key that will keep the chat history shared across runs and flows. Keep it empty to leave your assistant without memory of previous messages.": "実行とフロー間で共有されるチャット履歴を保持するメモリカード。 前のメッセージを記憶せずにアシスタントを残すには空にしておいてください。",
|
||||
"The model which will generate the image.": "画像を生成するモデル。",
|
||||
"The resolution to generate the image in.": "画像を生成する解像度。",
|
||||
"Standard is faster, HD has better details.": "標準は速く、HDは詳細を持っています。",
|
||||
"The image URL or file you want GPT's vision to read.": "GPTのビジョンを読むためのイメージ URLまたはファイル。",
|
||||
"What do you want ChatGPT to tell you about the image?": "ChatGPTに画像について教えてほしいことはありますか?",
|
||||
"Control how the model processes the image and generates textual understanding.": "モデルがどのように画像を処理し、テキスト理解を生成するかを制御します。",
|
||||
"The maximum number of tokens to generate. Requests can use up to 2,048 or 4,096 tokens shared between prompt and completion, don't set the value to maximum and leave some tokens for the input. The exact limit varies by model. (One token is roughly 4 characters for normal English text)": "生成するトークンの最大数。 リクエストは、プロンプトと完了の間で共有された最大2,048または4,096トークンを使用できます。 値を最大値に設定しないで、トークンを入力してください。 正確な制限はモデルによって異なります。(通常の英文では1つのトークンが約4文字です)",
|
||||
"The text you want to hear.": "聴きたいテキスト。",
|
||||
"The model which will generate the audio.": "オーディオを生成するモデル。",
|
||||
"The speed of the audio. Minimum is 0.25 and maximum is 4.00.": "オーディオの速度。最小値は 0.25 、最大値は 4.00 です。",
|
||||
"The voice to generate the audio in.": "音声を生成します。",
|
||||
"The format you want the audio file in.": "オーディオファイルの形式を指定します。",
|
||||
"The name of the output audio file (without extension).": "出力オーディオファイルの名前 (拡張子なし)。",
|
||||
"Audio file to transcribe": "変換するオーディオファイル",
|
||||
"Language of the audio file the default is en (English).": "音声ファイルの言語は、デフォルトは en (英語) です。",
|
||||
"Audio file to translate": "翻訳するオーディオファイル",
|
||||
"Authorization headers are injected automatically from your connection.": "認証ヘッダは接続から自動的に注入されます。",
|
||||
"Enable for files like PDFs, images, etc..": "PDF、画像などのファイルを有効にします。",
|
||||
"tts-1": "tts-1",
|
||||
"tts-1-hd": "tts-1-hd",
|
||||
"alloy": "alloy",
|
||||
"echo": "echo",
|
||||
"fable": "寓話的",
|
||||
"onyx": "オニクス",
|
||||
"nova": "nova",
|
||||
"shimmer": "輝く|シマー",
|
||||
"mp3": "mp3",
|
||||
"opus": "opus",
|
||||
"aac": "aac",
|
||||
"flac": "flac",
|
||||
"Spanish": "スペイン語",
|
||||
"Italian": "イタリア語",
|
||||
"English": "日本語",
|
||||
"Portuguese": "ポルトガル語",
|
||||
"German": "ドイツ語",
|
||||
"Japanese": "日本語",
|
||||
"Polish": "ポーランド語",
|
||||
"Arabic": "アラビア文字",
|
||||
"Afrikaans": "アフリカーンス語",
|
||||
"Azerbaijani": "アゼルバイジャン語",
|
||||
"Bulgarian": "ブルガリア語",
|
||||
"Bosnian": "ボスニア語",
|
||||
"Catalan": "カタロニア語",
|
||||
"Czech": "チェコ語",
|
||||
"Danish": "デンマーク語",
|
||||
"Greek": "ギリシア語",
|
||||
"Estonian": "エストニア語",
|
||||
"Persian": "ペルシャ語",
|
||||
"Finnish": "フィンランド語",
|
||||
"Tagalog": "Tagalog",
|
||||
"French": "フランス語",
|
||||
"Galician": "ガリシア語",
|
||||
"Hebrew": "ヘブライ文字",
|
||||
"Hindi": "ヒンディー語",
|
||||
"Croatian": "クロアチア語",
|
||||
"Hungarian": "ハンガリー語",
|
||||
"Armenian": "アルメニア語",
|
||||
"Indonesian": "インドネシア語",
|
||||
"Icelandic": "Icelandic",
|
||||
"Kazakh": "Kazakh",
|
||||
"Kannada": "Kannada",
|
||||
"Korean": "Korean",
|
||||
"Lithuanian": "リトアニア語",
|
||||
"Latvian": "ラトビア語",
|
||||
"Maori": "Maori",
|
||||
"Macedonian": "マケドニア語",
|
||||
"Marathi": "Marathi",
|
||||
"Malay": "マレー語",
|
||||
"Nepali": "Nepali",
|
||||
"Dutch": "オランダ語",
|
||||
"Norwegian": "ノルウェー語",
|
||||
"Romanian": "ルーマニア語",
|
||||
"Russian": "ロシア語",
|
||||
"Slovak": "スロバキア語",
|
||||
"Slovenian": "Slovenian",
|
||||
"Serbian": "セルビア語",
|
||||
"Swedish": "スウェーデン語",
|
||||
"Swahili": "スワヒリ語",
|
||||
"Tamil": "Tamil",
|
||||
"Thai": "タイ語",
|
||||
"Turkish": "トルコ語",
|
||||
"Ukrainian": "ウクライナ語",
|
||||
"Urdu": "ウルドゥー語",
|
||||
"Vietnamese": "Vietnamese",
|
||||
"Chinese (Simplified)": "中国語 (簡体字)",
|
||||
"Welsh": "ウェールズ語",
|
||||
"Belarusian": "ベラルーシ語",
|
||||
"GET": "取得",
|
||||
"POST": "POST",
|
||||
"PATCH": "PATCH",
|
||||
"PUT": "PUT",
|
||||
"DELETE": "削除",
|
||||
"HEAD": "頭"
|
||||
}
|
||||
@@ -0,0 +1,156 @@
|
||||
{
|
||||
"Use the many tools ChatGPT has to offer.": "Gebruik de vele tools die ChatGPT te bieden heeft.",
|
||||
"Follow these instructions to get your OpenAI API Key:\n\n1. Visit the following website: https://platform.openai.com/account/api-keys.\n2. Once on the website, locate and click on the option to obtain your OpenAI API Key.\n\nIt is strongly recommended that you add your credit card information to your OpenAI account and upgrade to the paid plan **before** generating the API Key. This will help you prevent 429 errors.\n": "Volg deze instructies om je OpenAI API Key te verkrijgen:\n\n1. Bezoek de volgende website: https://platform.openai. On/account/api-keys.\n2. Eenmaal op de website, lokaliseer en klik op de optie om je OpenAI API-sleutel te verkrijgen.\n\nHet wordt ten zeerste aanbevolen dat u uw creditcardgegevens aan uw OpenAI-account toevoegt en dat u een upgrade naar het betaalde abonnement **voordat** van de API-sleutel uitvoert. Dit helpt u 429 fouten te voorkomen.\n",
|
||||
"Ask ChatGPT": "ChatGPT vragen",
|
||||
"Ask Assistant": "Vraag assistent",
|
||||
"Generate Image": "Afbeelding genereren",
|
||||
"Vision Prompt": "Vision Prompt",
|
||||
"Text-to-Speech": "Tekst-naar-spraak",
|
||||
"Transcribe Audio": "Audio Transcrimeren",
|
||||
"Translate Audio": "Audio vertalen",
|
||||
"Extract Structured Data from Text": "Verwijder gestructureerde gegevens uit tekst",
|
||||
"Custom API Call": "Custom API Call",
|
||||
"Ask ChatGPT anything you want!": "Vraag ChatGPT alles wat je maar wilt!",
|
||||
"Ask a GPT assistant anything you want!": "Vraag een GPT-assistent alles wat je maar wilt!",
|
||||
"Generate an image using text-to-image models": "Een afbeelding genereren met behulp van text-to-image modellen",
|
||||
"Ask GPT a question about an image": "GPT een vraag stellen over een afbeelding",
|
||||
"Generate an audio recording from text": "Een audio-opname vanuit de tekst genereren",
|
||||
"Transcribe audio to text using whisper-1 model": "Audio naar tekst schrijven met behulp van fluister-1 model",
|
||||
"Translate audio to text using whisper-1 model": "Vertaal audio naar tekst met fluister-1 model",
|
||||
"Returns structured data from provided unstructured text.": "Retourneert gestructureerde gegevens uit de verstrekte ongestructureerde tekst.",
|
||||
"Make a custom API call to a specific endpoint": "Maak een aangepaste API call naar een specifiek eindpunt",
|
||||
"Model": "Model",
|
||||
"Question": "Vraag",
|
||||
"Temperature": "Temperatuur",
|
||||
"Maximum Tokens": "Maximaal aantal tokens",
|
||||
"Top P": "Boven P",
|
||||
"Frequency penalty": "Frequentie boete",
|
||||
"Presence penalty": "Presence boete",
|
||||
"Memory Key": "Geheugen Sleutel",
|
||||
"Roles": "Rollen",
|
||||
"Assistant": "Assistent",
|
||||
"Prompt": "Prompt",
|
||||
"Resolution": "Resolutie",
|
||||
"Quality": "Kwaliteit",
|
||||
"Image": "Afbeelding",
|
||||
"Detail": "Detail",
|
||||
"Text": "Tekstveld",
|
||||
"Speed": "Snelheid",
|
||||
"Voice": "Stem",
|
||||
"Output Format": "Uitvoer formaat",
|
||||
"File Name": "File Name",
|
||||
"Audio": "Geluid",
|
||||
"Language of the Audio": "Taal van de audio",
|
||||
"Unstructured Text": "Ongestructureerde tekst",
|
||||
"Data Definition": "Datadefinitie",
|
||||
"Method": "Methode",
|
||||
"Headers": "Kopteksten",
|
||||
"Query Parameters": "Query parameters",
|
||||
"Body": "Lichaam",
|
||||
"Response is Binary ?": "Antwoord is binair?",
|
||||
"No Error on Failure": "Geen fout bij fout",
|
||||
"Timeout (in seconds)": "Time-out (in seconden)",
|
||||
"The model which will generate the completion. Some models are suitable for natural language tasks, others specialize in code.": "Het model dat de voltooiing zal genereren. Sommige modellen zijn geschikt voor natuurlijke taaltaken, andere zijn gespecialiseerd in code.",
|
||||
"Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.": "Bestuurt willekeurigheid: Het verlagen van de temperatuur resulteert in minder willekeurige aanvullingen. Zodra de temperatuur nul nadert, zal het model deterministisch en herhalend worden.",
|
||||
"The maximum number of tokens to generate. Requests can use up to 2,048 or 4,096 tokens shared between prompt and completion depending on the model. Don't set the value to maximum and leave some tokens for the input. (One token is roughly 4 characters for normal English text)": "Het maximale aantal te genereren tokens kan gebruikt worden tot 2.048 of 4.096 tokens gedeeld tussen de prompt en de voltooiing, afhankelijk van het model. Stel de waarde niet in op een maximum en laat sommige tokens voor de invoer. (Eén token is ongeveer 4 tekens voor normale Engelse tekst)",
|
||||
"An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.": "Een alternatief voor bemonstering met de temperatuur, genaamd nucleus sampling, waarbij het model de resultaten van de tokens met top_p waarschijnlijkheid ziet. 0.1 betekent dus dat alleen de tokens die de grootste massa van 10 procent vormen, worden overwogen.",
|
||||
"Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.": "Nummer tussen -2.0 en 2.0. Positieve waarden bestraffen nieuwe tokens op basis van hun bestaande frequentie in de tekst tot nu toe, waardoor de waarschijnlijkheid van het model om dezelfde lijn te herhalen afneemt.",
|
||||
"Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the mode's likelihood to talk about new topics.": "Nummer tussen -2.0 en 2.0. Positieve waarden bestraffen nieuwe tokens op basis van de vraag of ze tot nu toe in de tekst staan, waardoor de modus meer kans maakt om over nieuwe onderwerpen te praten.",
|
||||
"A memory key that will keep the chat history shared across runs and flows. Keep it empty to leave ChatGPT without memory of previous messages.": "Een geheugensleutel waarmee de chatgeschiedenis gedeeld blijft over uitvoeringen en stromen. Houd het leeg om ChatGPT te verlaten zonder het geheugen van vorige berichten.",
|
||||
"Array of roles to specify more accurate response": "Array of roles om een nauwkeuriger antwoord te geven",
|
||||
"The assistant which will generate the completion.": "De assistent die de voltooiing zal genereren.",
|
||||
"A memory key that will keep the chat history shared across runs and flows. Keep it empty to leave your assistant without memory of previous messages.": "Een geheugensleutel die de chatgeschiedenis gedeeld houdt over uitvoeringen en stromen. Houd het leeg om uw assistent zonder geheugen van vorige berichten te verlaten.",
|
||||
"The model which will generate the image.": "Het model dat de afbeelding zal genereren.",
|
||||
"The resolution to generate the image in.": "De resolutie voor het genereren van de afbeelding.",
|
||||
"Standard is faster, HD has better details.": "De standaard is sneller, HD heeft betere details.",
|
||||
"The image URL or file you want GPT's vision to read.": "De URL van de afbeelding of het bestand dat u GPT's visie wilt lezen.",
|
||||
"What do you want ChatGPT to tell you about the image?": "Wat wil je dat ChatGPT je vertelt over de afbeelding?",
|
||||
"Control how the model processes the image and generates textual understanding.": "Bepaal hoe het model het beeld verwerkt en genereert een tekstbegrip.",
|
||||
"The maximum number of tokens to generate. Requests can use up to 2,048 or 4,096 tokens shared between prompt and completion, don't set the value to maximum and leave some tokens for the input. The exact limit varies by model. (One token is roughly 4 characters for normal English text)": "Het maximum aantal te genereren tokens Verzoeken kunnen maximaal 2.048 of 4.096 tokens gedeeld worden tussen prompt en voltooiing, stel de waarde niet in op maximum en laat sommige tokens voor de invoer. De exacte limiet varieert per model. (Eén token is grofweg 4 tekens voor normale Engelse tekst)",
|
||||
"The text you want to hear.": "De tekst die je wilt horen.",
|
||||
"The model which will generate the audio.": "Het model dat de audio genereert.",
|
||||
"The speed of the audio. Minimum is 0.25 and maximum is 4.00.": "De snelheid van audio. Minimum is 0.25 en het maximum is 4.00.",
|
||||
"The voice to generate the audio in.": "De stem voor het genereren van audio in.",
|
||||
"The format you want the audio file in.": "Het formaat waarin u het audiobestand wilt hebben.",
|
||||
"The name of the output audio file (without extension).": "De naam van het audiobestand voor het resultaat (zonder extensie).",
|
||||
"Audio file to transcribe": "Audio-bestand om te overschrijven",
|
||||
"Language of the audio file the default is en (English).": "Taal van het audiobestand waar de standaard in staat (Engels)",
|
||||
"Audio file to translate": "Audio-bestand om te vertalen",
|
||||
"Authorization headers are injected automatically from your connection.": "Autorisatie headers worden automatisch geïnjecteerd vanuit uw verbinding.",
|
||||
"Enable for files like PDFs, images, etc..": "Inschakelen voor bestanden zoals PDF's, afbeeldingen etc..",
|
||||
"tts-1": "tikken-1",
|
||||
"tts-1-hd": "t-1-uur",
|
||||
"alloy": "alloy",
|
||||
"echo": "echo",
|
||||
"fable": "vloeibaar",
|
||||
"onyx": "onbetaald",
|
||||
"nova": "nota",
|
||||
"shimmer": "schimmer",
|
||||
"mp3": "mp3",
|
||||
"opus": "opus",
|
||||
"aac": "aac",
|
||||
"flac": "vlam",
|
||||
"Spanish": "Spaans",
|
||||
"Italian": "Italiaans",
|
||||
"English": "Nederlands",
|
||||
"Portuguese": "Portugees",
|
||||
"German": "Duits",
|
||||
"Japanese": "Afrikaans",
|
||||
"Polish": "Pools",
|
||||
"Arabic": "Arabisch",
|
||||
"Afrikaans": "Afrikaanse",
|
||||
"Azerbaijani": "Azerbeidzjaans",
|
||||
"Bulgarian": "Bulgaars",
|
||||
"Bosnian": "Bosnisch",
|
||||
"Catalan": "Catalaans",
|
||||
"Czech": "Tsjechisch",
|
||||
"Danish": "Deens",
|
||||
"Greek": "Grieks",
|
||||
"Estonian": "Estlands",
|
||||
"Persian": "Perzisch",
|
||||
"Finnish": "Fins",
|
||||
"Tagalog": "Tagalog",
|
||||
"French": "Frans",
|
||||
"Galician": "Galicisch",
|
||||
"Hebrew": "Hebreeuws",
|
||||
"Hindi": "Hindoestani",
|
||||
"Croatian": "Kroatisch",
|
||||
"Hungarian": "Hongaars",
|
||||
"Armenian": "Armeens",
|
||||
"Indonesian": "Indonesisch",
|
||||
"Icelandic": "Icelandic",
|
||||
"Kazakh": "Kazakh",
|
||||
"Kannada": "Kannada",
|
||||
"Korean": "Koreaans",
|
||||
"Lithuanian": "Litouws",
|
||||
"Latvian": "Lets",
|
||||
"Maori": "Maori",
|
||||
"Macedonian": "Macedonisch",
|
||||
"Marathi": "Marathi",
|
||||
"Malay": "Maleisisch",
|
||||
"Nepali": "Nepali",
|
||||
"Dutch": "Nederlands",
|
||||
"Norwegian": "Noors",
|
||||
"Romanian": "Roemeens",
|
||||
"Russian": "Russisch",
|
||||
"Slovak": "Slowaaks",
|
||||
"Slovenian": "Slovenian",
|
||||
"Serbian": "Servisch",
|
||||
"Swedish": "Zweeds",
|
||||
"Swahili": "Moeilijk",
|
||||
"Tamil": "Tamil",
|
||||
"Thai": "Thaise",
|
||||
"Turkish": "Turks",
|
||||
"Ukrainian": "Oekraïens",
|
||||
"Urdu": "Urdu",
|
||||
"Vietnamese": "Vietnamese",
|
||||
"Chinese (Simplified)": "Chinees (vereenvoudigd)",
|
||||
"Welsh": "Welsh",
|
||||
"Belarusian": "Wit-Russisch",
|
||||
"GET": "KRIJG",
|
||||
"POST": "POSTE",
|
||||
"PATCH": "BEKIJK",
|
||||
"PUT": "PUT",
|
||||
"DELETE": "VERWIJDEREN",
|
||||
"HEAD": "HOOFD"
|
||||
}
|
||||
@@ -0,0 +1,156 @@
|
||||
{
|
||||
"Use the many tools ChatGPT has to offer.": "Use as muitas ferramentas que o ChatGPT tem para oferecer.",
|
||||
"Follow these instructions to get your OpenAI API Key:\n\n1. Visit the following website: https://platform.openai.com/account/api-keys.\n2. Once on the website, locate and click on the option to obtain your OpenAI API Key.\n\nIt is strongly recommended that you add your credit card information to your OpenAI account and upgrade to the paid plan **before** generating the API Key. This will help you prevent 429 errors.\n": "Siga estas instruções para obter sua chave de API OpenAI:\n\n1. Visite o seguinte site: https://platform.openai. Um/conta/api-keys.\n2. Uma vez no site, localize e clique na opção de obter a sua chave de API OpenAI.\n\nÉ altamente recomendável que você adicione as informações do seu cartão de crédito à sua conta OpenAI e atualize para o plano pago **antes** gerando a chave API. Isso irá ajudá-lo a evitar 429 erros.\n",
|
||||
"Ask ChatGPT": "Perguntar ChatGPT",
|
||||
"Ask Assistant": "Solicitar Assistente",
|
||||
"Generate Image": "Gerar Imagem",
|
||||
"Vision Prompt": "Vision Prompt",
|
||||
"Text-to-Speech": "Texto-para-voz",
|
||||
"Transcribe Audio": "Transcrever Áudio",
|
||||
"Translate Audio": "Traduzir Áudio",
|
||||
"Extract Structured Data from Text": "Extrair Dados Estruturados do Texto",
|
||||
"Custom API Call": "Chamada de API personalizada",
|
||||
"Ask ChatGPT anything you want!": "Pergunte ao ChatGPT o que você quiser!",
|
||||
"Ask a GPT assistant anything you want!": "Pergunte a um assistente de GPT o que você quiser!",
|
||||
"Generate an image using text-to-image models": "Gerar uma imagem usando modelos de texto-para-imagem",
|
||||
"Ask GPT a question about an image": "Faça uma pergunta à GPT sobre uma imagem",
|
||||
"Generate an audio recording from text": "Gerar uma gravação de áudio a partir de texto",
|
||||
"Transcribe audio to text using whisper-1 model": "Transcrever áudio em texto usando o modelo whisper-1",
|
||||
"Translate audio to text using whisper-1 model": "Traduza o áudio para o texto usando o modelo whisper-1",
|
||||
"Returns structured data from provided unstructured text.": "Retorna dados estruturados do texto não estruturado fornecido.",
|
||||
"Make a custom API call to a specific endpoint": "Faça uma chamada de API personalizada para um ponto de extremidade específico",
|
||||
"Model": "Modelo",
|
||||
"Question": "Questão",
|
||||
"Temperature": "Temperatura",
|
||||
"Maximum Tokens": "Máximo de Tokens",
|
||||
"Top P": "Superior P",
|
||||
"Frequency penalty": "Penalidade de frequência",
|
||||
"Presence penalty": "Penalidade de presença",
|
||||
"Memory Key": "Chave de memória",
|
||||
"Roles": "Papéis",
|
||||
"Assistant": "Assistente",
|
||||
"Prompt": "Aviso",
|
||||
"Resolution": "Resolução:",
|
||||
"Quality": "Qualidade",
|
||||
"Image": "Imagem:",
|
||||
"Detail": "Detalhe",
|
||||
"Text": "texto",
|
||||
"Speed": "Velocidade",
|
||||
"Voice": "Voz",
|
||||
"Output Format": "Formato de saída",
|
||||
"File Name": "Nome do arquivo",
|
||||
"Audio": "Áudio",
|
||||
"Language of the Audio": "Idioma do áudio",
|
||||
"Unstructured Text": "Texto Desestruturado",
|
||||
"Data Definition": "Definição de dados",
|
||||
"Method": "Método",
|
||||
"Headers": "Cabeçalhos",
|
||||
"Query Parameters": "Parâmetros da consulta",
|
||||
"Body": "Conteúdo",
|
||||
"Response is Binary ?": "A resposta é binária ?",
|
||||
"No Error on Failure": "Nenhum erro no Failure",
|
||||
"Timeout (in seconds)": "Tempo limite (em segundos)",
|
||||
"The model which will generate the completion. Some models are suitable for natural language tasks, others specialize in code.": "O modelo que irá gerar a conclusão. Alguns modelos são adequados para tarefas de linguagem natural, outros são especializados no código.",
|
||||
"Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.": "Controla aleatoriedade: Diminuir resulta em menos complementos aleatórios. À medida que a temperatura se aproxima de zero, o modelo se tornará determinístico e repetitivo.",
|
||||
"The maximum number of tokens to generate. Requests can use up to 2,048 or 4,096 tokens shared between prompt and completion depending on the model. Don't set the value to maximum and leave some tokens for the input. (One token is roughly 4 characters for normal English text)": "O número máximo de tokens a gerar. Solicitações podem usar até 2.048 ou 4,096 tokens compartilhados entre prompt e conclusão dependendo do modelo. Não defina o valor como máximo e deixe alguns tokens para o input. (Um token é aproximadamente 4 caracteres para o texto normal em inglês)",
|
||||
"An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.": "Uma alternativa à amostragem com temperatura, chamada amostragem núcleo, onde o modelo considera os resultados dos tokens com massa de probabilidade superior (P). Portanto, 0,1 significa que apenas os tokens que incluem a massa de probabilidade superior de 10% são considerados.",
|
||||
"Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.": "Número entre -2.0 e 2.0. Valores positivos penalizam novos tokens baseados em sua frequência existente no texto até agora, diminuindo a probabilidade do modelo repetir o verbal da mesma linha.",
|
||||
"Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the mode's likelihood to talk about new topics.": "Número entre -2.0 e 2.0. Valores positivos penalizam novos tokens baseado no fato de eles aparecerem no texto até agora, aumentando a probabilidade de o modo falar sobre novos tópicos.",
|
||||
"A memory key that will keep the chat history shared across runs and flows. Keep it empty to leave ChatGPT without memory of previous messages.": "Uma chave de memória que manterá o histórico de bate-papo compartilhado entre execuções e fluxos. Deixe em branco para deixar o ChatGPT sem memória das mensagens anteriores.",
|
||||
"Array of roles to specify more accurate response": "Array de papéis para especificar uma resposta mais precisa",
|
||||
"The assistant which will generate the completion.": "O assistente que irá gerar a conclusão.",
|
||||
"A memory key that will keep the chat history shared across runs and flows. Keep it empty to leave your assistant without memory of previous messages.": "Uma chave de memória que manterá o histórico do bate-papo compartilhado entre execuções e fluxos. Deixe vazio para deixar o seu assistente sem memória das mensagens anteriores.",
|
||||
"The model which will generate the image.": "O modelo que irá gerar a imagem.",
|
||||
"The resolution to generate the image in.": "A resolução para gerar a imagem.",
|
||||
"Standard is faster, HD has better details.": "O padrão é mais rápido, HD tem melhores detalhes.",
|
||||
"The image URL or file you want GPT's vision to read.": "O URL da imagem ou arquivo que você deseja que a visão do GPT seja lida.",
|
||||
"What do you want ChatGPT to tell you about the image?": "O que você quer que o ChatGPT lhe conte sobre a imagem?",
|
||||
"Control how the model processes the image and generates textual understanding.": "Controle como o modelo processa a imagem e gera compreensão textual.",
|
||||
"The maximum number of tokens to generate. Requests can use up to 2,048 or 4,096 tokens shared between prompt and completion, don't set the value to maximum and leave some tokens for the input. The exact limit varies by model. (One token is roughly 4 characters for normal English text)": "O número máximo de tokens a gerar. Solicitações podem usar até 2.048 ou 4.096 tokens compartilhados entre prompt e conclusão, não defina o valor como máximo e deixe algumas fichas para a entrada. O limite exato varia por modelo. (Um token é aproximadamente 4 caracteres para o texto normal em inglês)",
|
||||
"The text you want to hear.": "O texto que você quer ouvir.",
|
||||
"The model which will generate the audio.": "O modelo que irá gerar o áudio.",
|
||||
"The speed of the audio. Minimum is 0.25 and maximum is 4.00.": "A velocidade do áudio. O mínimo é 0,25 e o máximo é 4,00.",
|
||||
"The voice to generate the audio in.": "A voz para gerar o áudio.",
|
||||
"The format you want the audio file in.": "O formato no qual você deseja o arquivo de áudio.",
|
||||
"The name of the output audio file (without extension).": "O nome do arquivo de áudio de saída (sem extensão).",
|
||||
"Audio file to transcribe": "Arquivo de áudio para transcrever",
|
||||
"Language of the audio file the default is en (English).": "Idioma do arquivo de áudio que o padrão é (em inglês).",
|
||||
"Audio file to translate": "Arquivo de áudio para traduzir",
|
||||
"Authorization headers are injected automatically from your connection.": "Os cabeçalhos de autorização são inseridos automaticamente a partir da sua conexão.",
|
||||
"Enable for files like PDFs, images, etc..": "Habilitar para arquivos como PDFs, imagens, etc..",
|
||||
"tts-1": "tts-1",
|
||||
"tts-1-hd": "tts-1-hd",
|
||||
"alloy": "alloy",
|
||||
"echo": "eco",
|
||||
"fable": "fábula",
|
||||
"onyx": "anónimo",
|
||||
"nova": "Novato",
|
||||
"shimmer": "cintilante",
|
||||
"mp3": "mp3",
|
||||
"opus": "opus",
|
||||
"aac": "aac",
|
||||
"flac": "flac",
|
||||
"Spanish": "espanhol",
|
||||
"Italian": "italiano",
|
||||
"English": "Portuguese-Brazil",
|
||||
"Portuguese": "Português",
|
||||
"German": "alemão",
|
||||
"Japanese": "japonês",
|
||||
"Polish": "Polonês",
|
||||
"Arabic": "Arábico",
|
||||
"Afrikaans": "africâner",
|
||||
"Azerbaijani": "azerbaijano",
|
||||
"Bulgarian": "búlgaro",
|
||||
"Bosnian": "bósnio",
|
||||
"Catalan": "catalão",
|
||||
"Czech": "tcheco",
|
||||
"Danish": "Dinamarquês",
|
||||
"Greek": "Grego",
|
||||
"Estonian": "Estônio",
|
||||
"Persian": "persa",
|
||||
"Finnish": "Finlandês",
|
||||
"Tagalog": "Tagalog",
|
||||
"French": "francês",
|
||||
"Galician": "galego",
|
||||
"Hebrew": "Hebraico",
|
||||
"Hindi": "hindi",
|
||||
"Croatian": "croata",
|
||||
"Hungarian": "Húngaro",
|
||||
"Armenian": "Armênio",
|
||||
"Indonesian": "indonésio",
|
||||
"Icelandic": "Icelandic",
|
||||
"Kazakh": "Kazakh",
|
||||
"Kannada": "Kannada",
|
||||
"Korean": "coreano",
|
||||
"Lithuanian": "lituano",
|
||||
"Latvian": "Letã",
|
||||
"Maori": "Maori",
|
||||
"Macedonian": "macedônio",
|
||||
"Marathi": "marata",
|
||||
"Malay": "malaio",
|
||||
"Nepali": "Nepali",
|
||||
"Dutch": "Neerlandês",
|
||||
"Norwegian": "norueguês",
|
||||
"Romanian": "romeno",
|
||||
"Russian": "Russo",
|
||||
"Slovak": "Eslovaco",
|
||||
"Slovenian": "Slovenian",
|
||||
"Serbian": "Sérvio",
|
||||
"Swedish": "sueco",
|
||||
"Swahili": "Suaíli",
|
||||
"Tamil": "Tamil",
|
||||
"Thai": "Tailandês",
|
||||
"Turkish": "Turco",
|
||||
"Ukrainian": "ucraniano",
|
||||
"Urdu": "urdu",
|
||||
"Vietnamese": "Vietnamese",
|
||||
"Chinese (Simplified)": "Chinês (simplificado)",
|
||||
"Welsh": "galês",
|
||||
"Belarusian": "Bielorrusso",
|
||||
"GET": "OBTER",
|
||||
"POST": "POSTAR",
|
||||
"PATCH": "COMPRAR",
|
||||
"PUT": "COLOCAR",
|
||||
"DELETE": "EXCLUIR",
|
||||
"HEAD": "CABEÇA"
|
||||
}
|
||||
@@ -0,0 +1,155 @@
|
||||
{
|
||||
"OpenAI": "OpenAI",
|
||||
"Use the many tools ChatGPT has to offer.": "Используйте много инструментов ChatGPT может предложить.",
|
||||
"Follow these instructions to get your OpenAI API Key:\n\n1. Visit the following website: https://platform.openai.com/account/api-keys.\n2. Once on the website, locate and click on the option to obtain your OpenAI API Key.\n\nIt is strongly recommended that you add your credit card information to your OpenAI account and upgrade to the paid plan **before** generating the API Key. This will help you prevent 429 errors.\n": "Следуйте этим инструкциям, чтобы получить ваш OpenAI API ключ:\n\n1. Посетите следующий веб-сайт: https://platform.openai. om/account/api-keys.\n2. Когда на веб-сайте, найдите и нажмите на опцию, чтобы получить ваш OpenAI API ключ.\n\nНастоятельно рекомендуется добавить данные вашей кредитной карты в аккаунт OpenAI и перейти на платный план **ранее** при генерации ключа API. Это поможет вам предотвратить ошибки 429.\n",
|
||||
"Ask ChatGPT": "Спросить чат-GPT",
|
||||
"Ask Assistant": "Спросить помощника",
|
||||
"Generate Image": "Сгенерировать изображение",
|
||||
"Vision Prompt": "Vision Prompt",
|
||||
"Text-to-Speech": "Синтез речи",
|
||||
"Transcribe Audio": "Переписать аудио",
|
||||
"Translate Audio": "Перевести аудио",
|
||||
"Extract Structured Data from Text": "Извлечь структурированные данные из текста",
|
||||
"Custom API Call": "Пользовательский вызов API",
|
||||
"Ask ChatGPT anything you want!": "Спросите ChatGPT все, что хотите!",
|
||||
"Ask a GPT assistant anything you want!": "Спросите помощника GPT что бы вы хотели!",
|
||||
"Generate an image using text-to-image models": "Генерировать изображение с помощью текстовых моделей изображений",
|
||||
"Ask GPT a question about an image": "Задайте GPT вопрос о изображении",
|
||||
"Generate an audio recording from text": "Создать аудиозапись из текста",
|
||||
"Transcribe audio to text using whisper-1 model": "Преобразовать аудио в текст с помощью модели whisper-1",
|
||||
"Translate audio to text using whisper-1 model": "Перевести аудио в текст, используя модель whisper-1",
|
||||
"Returns structured data from provided unstructured text.": "Возвращает структурированные данные из предоставленного неструктурированного текста.",
|
||||
"Make a custom API call to a specific endpoint": "Сделать пользовательский API вызов к определенной конечной точке",
|
||||
"Model": "Модель",
|
||||
"Question": "Вопрос",
|
||||
"Temperature": "Температура",
|
||||
"Maximum Tokens": "Максимум жетонов",
|
||||
"Top P": "Верхний П",
|
||||
"Frequency penalty": "Периодичность штрафа",
|
||||
"Presence penalty": "Штраф присутствия",
|
||||
"Memory Key": "Ключ памяти",
|
||||
"Roles": "Роли",
|
||||
"Assistant": "Помощник",
|
||||
"Prompt": "Prompt",
|
||||
"Resolution": "Разрешение",
|
||||
"Quality": "Качество",
|
||||
"Image": "Изображение",
|
||||
"Detail": "Детали",
|
||||
"Text": "Текст",
|
||||
"Speed": "Скорость",
|
||||
"Voice": "Голос",
|
||||
"Output Format": "Формат вывода",
|
||||
"File Name": "Имя файла",
|
||||
"Audio": "Аудио",
|
||||
"Language of the Audio": "Язык аудио",
|
||||
"Unstructured Text": "Неструктурированный текст",
|
||||
"Data Definition": "Определение данных",
|
||||
"Method": "Метод",
|
||||
"Headers": "Заголовки",
|
||||
"Query Parameters": "Параметры запроса",
|
||||
"Body": "Тело",
|
||||
"No Error on Failure": "Нет ошибок при ошибке",
|
||||
"Timeout (in seconds)": "Таймаут (в секундах)",
|
||||
"The model which will generate the completion. Some models are suitable for natural language tasks, others specialize in code.": "Модель, которая будет генерировать дополнение, некоторые модели подходят для естественных языковых задач, другие специализируются на программировании.",
|
||||
"Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.": "Контролирует случайность: понижение результатов в менее случайном завершении. По мере нулевого температурного приближения модель становится детерминированной и повторяющей.",
|
||||
"The maximum number of tokens to generate. Requests can use up to 2,048 or 4,096 tokens shared between prompt and completion depending on the model. Don't set the value to maximum and leave some tokens for the input. (One token is roughly 4 characters for normal English text)": "Максимальное количество жетонов для генерации. Запросы могут использовать до 2,048 или 4096 жетонов, которыми обменивались между подсказками и дополнениями, в зависимости от модели. Не устанавливайте максимальное значение и оставляйте некоторые токены для ввода. (Один токен примерно 4 символа для обычного английского текста)",
|
||||
"An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.": "Альтернатива отоплению с температурой, называемой ядерным отбором, где модель рассматривает результаты жетонов с вероятностью top_p. Таким образом, 0.1 означает, что учитываются только жетоны, состоящие из массы 10% наивысшего уровня.",
|
||||
"Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.": "Номер между -2.0 и 2.0. Положительные значения наказывают новые токены на основе их существующей частоты в тексте до сих пор, уменьшая вероятность повторения одной и той же строки.",
|
||||
"Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the mode's likelihood to talk about new topics.": "Номер между -2.0 и 2.0. Положительные значения наказывают новые токены на основании того, появляются ли они в тексте до сих пор, что повышает вероятность того, что режим будет обсуждать новые темы.",
|
||||
"A memory key that will keep the chat history shared across runs and flows. Keep it empty to leave ChatGPT without memory of previous messages.": "Ключ памяти, который хранит историю чата в разных запусках и потоках. Оставьте пустым, чтобы оставить ChatGPT без памяти предыдущих сообщений.",
|
||||
"Array of roles to specify more accurate response": "Массив ролей для более точного ответа",
|
||||
"The assistant which will generate the completion.": "Ассистент, который будет способствовать завершению.",
|
||||
"A memory key that will keep the chat history shared across runs and flows. Keep it empty to leave your assistant without memory of previous messages.": "Ключ памяти, который будет держать историю чата общими между запусками и потоками. Оставьте пустым чтобы оставить своего помощника без памяти о предыдущих сообщениях.",
|
||||
"The model which will generate the image.": "Модель, которая создаст изображение.",
|
||||
"The resolution to generate the image in.": "Разрешение для создания изображения.",
|
||||
"Standard is faster, HD has better details.": "Стандарт быстрее, HD имеет лучшие детали.",
|
||||
"The image URL or file you want GPT's vision to read.": "URL изображения или файл, который вы хотите, чтобы видение GPT читалось.",
|
||||
"What do you want ChatGPT to tell you about the image?": "Что вы хотите, чтобы ChatGPT рассказал вам об изображении?",
|
||||
"Control how the model processes the image and generates textual understanding.": "Контролируйте как модель обрабатывает изображение и генерирует текстовое понимание.",
|
||||
"The maximum number of tokens to generate. Requests can use up to 2,048 or 4,096 tokens shared between prompt and completion, don't set the value to maximum and leave some tokens for the input. The exact limit varies by model. (One token is roughly 4 characters for normal English text)": "Максимальное количество фишек для генерации Запросы могут использовать до 2 048 или 4 096 токенов, которыми обменивались между оперативными и завершенными действиями, не устанавливайте максимальное значение и оставляйте некоторые токены для ввода. In the twentieth century, Russian is widely taught in the schools of the members of the old Warsaw Pact and in other countries of the former Soviet Union.",
|
||||
"The text you want to hear.": "Текст, который вы хотите услышать.",
|
||||
"The model which will generate the audio.": "Модель, которая будет генерировать аудио.",
|
||||
"The speed of the audio. Minimum is 0.25 and maximum is 4.00.": "Минимальная скорость звука - 0,25 и максимальная - 4,00.",
|
||||
"The voice to generate the audio in.": "Голос, чтобы сгенерировать звук.",
|
||||
"The format you want the audio file in.": "Формат аудио файла.",
|
||||
"The name of the output audio file (without extension).": "Имя выходного аудиофайла (без расширения).",
|
||||
"Audio file to transcribe": "Аудио файл для трансляции",
|
||||
"Language of the audio file the default is en (English).": "Язык аудиофайла по умолчанию - ru (английский).",
|
||||
"Audio file to translate": "Аудио файл для перевода",
|
||||
"Authorization headers are injected automatically from your connection.": "Заголовки авторизации включаются автоматически из вашего соединения.",
|
||||
"tts-1": "тс-1",
|
||||
"tts-1-hd": "Ттс-1-ч",
|
||||
"alloy": "alloy",
|
||||
"echo": "эхо",
|
||||
"fable": "фибель",
|
||||
"onyx": "оникс",
|
||||
"nova": "нова",
|
||||
"shimmer": "шиммер",
|
||||
"mp3": "mp3",
|
||||
"opus": "opus",
|
||||
"aac": "aac",
|
||||
"flac": "флак",
|
||||
"Spanish": "Испанский",
|
||||
"Italian": "Итальянский",
|
||||
"English": "Russian",
|
||||
"Portuguese": "Португальский",
|
||||
"German": "Немецкий",
|
||||
"Japanese": "Японский",
|
||||
"Polish": "Польский",
|
||||
"Arabic": "Арабский",
|
||||
"Afrikaans": "Африкаанс",
|
||||
"Azerbaijani": "Азербайджан",
|
||||
"Bulgarian": "Болгарский",
|
||||
"Bosnian": "Боснийский",
|
||||
"Catalan": "Каталанский",
|
||||
"Czech": "Чешский",
|
||||
"Danish": "Датский",
|
||||
"Greek": "Греческий",
|
||||
"Estonian": "Эстонский",
|
||||
"Persian": "Персидский",
|
||||
"Finnish": "Финский",
|
||||
"Tagalog": "Tagalog",
|
||||
"French": "Французский",
|
||||
"Galician": "Галисийский",
|
||||
"Hebrew": "Иврит",
|
||||
"Hindi": "Хинди",
|
||||
"Croatian": "Хорватский",
|
||||
"Hungarian": "Венгерский",
|
||||
"Armenian": "Армянский",
|
||||
"Indonesian": "Индонезийский",
|
||||
"Icelandic": "Icelandic",
|
||||
"Kazakh": "Kazakh",
|
||||
"Kannada": "Kannada",
|
||||
"Korean": "Корейский",
|
||||
"Lithuanian": "Литовский",
|
||||
"Latvian": "Латышский",
|
||||
"Maori": "Maori",
|
||||
"Macedonian": "Македонский",
|
||||
"Marathi": "Маратти",
|
||||
"Malay": "Малай",
|
||||
"Nepali": "Nepali",
|
||||
"Dutch": "Голландский",
|
||||
"Norwegian": "Норвежский",
|
||||
"Romanian": "Румынский",
|
||||
"Russian": "Русский",
|
||||
"Slovak": "Словацкий",
|
||||
"Slovenian": "Slovenian",
|
||||
"Serbian": "Сербский",
|
||||
"Swedish": "Шведский",
|
||||
"Swahili": "Суахили",
|
||||
"Tamil": "Tamil",
|
||||
"Thai": "Тайский",
|
||||
"Turkish": "Турецкий",
|
||||
"Ukrainian": "Украинский",
|
||||
"Urdu": "Урду",
|
||||
"Vietnamese": "Vietnamese",
|
||||
"Chinese (Simplified)": "Китайский (упрощенный)",
|
||||
"Welsh": "Уэльш",
|
||||
"Belarusian": "Белорусский",
|
||||
"GET": "ПОЛУЧИТЬ",
|
||||
"POST": "ПОСТ",
|
||||
"PATCH": "ПАТЧ",
|
||||
"PUT": "ПОКУПИТЬ",
|
||||
"DELETE": "УДАЛИТЬ",
|
||||
"HEAD": "HEAD"
|
||||
}
|
||||
@@ -0,0 +1,156 @@
|
||||
{
|
||||
"Use the many tools ChatGPT has to offer.": "Use the many tools ChatGPT has to offer.",
|
||||
"Follow these instructions to get your OpenAI API Key:\n\n1. Visit the following website: https://platform.openai.com/account/api-keys.\n2. Once on the website, locate and click on the option to obtain your OpenAI API Key.\n\nIt is strongly recommended that you add your credit card information to your OpenAI account and upgrade to the paid plan **before** generating the API Key. This will help you prevent 429 errors.\n": "Follow these instructions to get your OpenAI API Key:\n\n1. Visit the following website: https://platform.openai.com/account/api-keys.\n2. Once on the website, locate and click on the option to obtain your OpenAI API Key.\n\nIt is strongly recommended that you add your credit card information to your OpenAI account and upgrade to the paid plan **before** generating the API Key. This will help you prevent 429 errors.\n",
|
||||
"Ask ChatGPT": "Ask ChatGPT",
|
||||
"Ask Assistant": "Ask Assistant",
|
||||
"Generate Image": "Generate Image",
|
||||
"Vision Prompt": "Vision Prompt",
|
||||
"Text-to-Speech": "Text-to-Speech",
|
||||
"Transcribe Audio": "Transcribe Audio",
|
||||
"Translate Audio": "Translate Audio",
|
||||
"Extract Structured Data from Text": "Extract Structured Data from Text",
|
||||
"Custom API Call": "Custom API Call",
|
||||
"Ask ChatGPT anything you want!": "Ask ChatGPT anything you want!",
|
||||
"Ask a GPT assistant anything you want!": "Ask a GPT assistant anything you want!",
|
||||
"Generate an image using text-to-image models": "Generate an image using text-to-image models",
|
||||
"Ask GPT a question about an image": "Ask GPT a question about an image",
|
||||
"Generate an audio recording from text": "Generate an audio recording from text",
|
||||
"Transcribe audio to text using whisper-1 model": "Transcribe audio to text using whisper-1 model",
|
||||
"Translate audio to text using whisper-1 model": "Translate audio to text using whisper-1 model",
|
||||
"Returns structured data from provided unstructured text.": "Returns structured data from provided unstructured text.",
|
||||
"Make a custom API call to a specific endpoint": "Make a custom API call to a specific endpoint",
|
||||
"Model": "Model",
|
||||
"Question": "Question",
|
||||
"Temperature": "Temperature",
|
||||
"Maximum Tokens": "Maximum Tokens",
|
||||
"Top P": "Top P",
|
||||
"Frequency penalty": "Frequency penalty",
|
||||
"Presence penalty": "Presence penalty",
|
||||
"Memory Key": "Memory Key",
|
||||
"Roles": "Roles",
|
||||
"Assistant": "Assistant",
|
||||
"Prompt": "Prompt",
|
||||
"Resolution": "Resolution",
|
||||
"Quality": "Quality",
|
||||
"Image": "Image",
|
||||
"Detail": "Detail",
|
||||
"Text": "Text",
|
||||
"Speed": "Speed",
|
||||
"Voice": "Voice",
|
||||
"Output Format": "Output Format",
|
||||
"File Name": "File Name",
|
||||
"Audio": "Audio",
|
||||
"Language of the Audio": "Language of the Audio",
|
||||
"Unstructured Text": "Unstructured Text",
|
||||
"Data Definition": "Data Definition",
|
||||
"Method": "Method",
|
||||
"Headers": "Headers",
|
||||
"Query Parameters": "Query Parameters",
|
||||
"Body": "Body",
|
||||
"Response is Binary ?": "Response is Binary ?",
|
||||
"No Error on Failure": "No Error on Failure",
|
||||
"Timeout (in seconds)": "Timeout (in seconds)",
|
||||
"The model which will generate the completion. Some models are suitable for natural language tasks, others specialize in code.": "The model which will generate the completion. Some models are suitable for natural language tasks, others specialize in code.",
|
||||
"Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.": "Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.",
|
||||
"The maximum number of tokens to generate. Requests can use up to 2,048 or 4,096 tokens shared between prompt and completion depending on the model. Don't set the value to maximum and leave some tokens for the input. (One token is roughly 4 characters for normal English text)": "The maximum number of tokens to generate. Requests can use up to 2,048 or 4,096 tokens shared between prompt and completion depending on the model. Don't set the value to maximum and leave some tokens for the input. (One token is roughly 4 characters for normal English text)",
|
||||
"An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.": "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.",
|
||||
"Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.": "Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.",
|
||||
"Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the mode's likelihood to talk about new topics.": "Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the mode's likelihood to talk about new topics.",
|
||||
"A memory key that will keep the chat history shared across runs and flows. Keep it empty to leave ChatGPT without memory of previous messages.": "A memory key that will keep the chat history shared across runs and flows. Keep it empty to leave ChatGPT without memory of previous messages.",
|
||||
"Array of roles to specify more accurate response": "Array of roles to specify more accurate response",
|
||||
"The assistant which will generate the completion.": "The assistant which will generate the completion.",
|
||||
"A memory key that will keep the chat history shared across runs and flows. Keep it empty to leave your assistant without memory of previous messages.": "A memory key that will keep the chat history shared across runs and flows. Keep it empty to leave your assistant without memory of previous messages.",
|
||||
"The model which will generate the image.": "The model which will generate the image.",
|
||||
"The resolution to generate the image in.": "The resolution to generate the image in.",
|
||||
"Standard is faster, HD has better details.": "Standard is faster, HD has better details.",
|
||||
"The image URL or file you want GPT's vision to read.": "The image URL or file you want GPT's vision to read.",
|
||||
"What do you want ChatGPT to tell you about the image?": "What do you want ChatGPT to tell you about the image?",
|
||||
"Control how the model processes the image and generates textual understanding.": "Control how the model processes the image and generates textual understanding.",
|
||||
"The maximum number of tokens to generate. Requests can use up to 2,048 or 4,096 tokens shared between prompt and completion, don't set the value to maximum and leave some tokens for the input. The exact limit varies by model. (One token is roughly 4 characters for normal English text)": "The maximum number of tokens to generate. Requests can use up to 2,048 or 4,096 tokens shared between prompt and completion, don't set the value to maximum and leave some tokens for the input. The exact limit varies by model. (One token is roughly 4 characters for normal English text)",
|
||||
"The text you want to hear.": "The text you want to hear.",
|
||||
"The model which will generate the audio.": "The model which will generate the audio.",
|
||||
"The speed of the audio. Minimum is 0.25 and maximum is 4.00.": "The speed of the audio. Minimum is 0.25 and maximum is 4.00.",
|
||||
"The voice to generate the audio in.": "The voice to generate the audio in.",
|
||||
"The format you want the audio file in.": "The format you want the audio file in.",
|
||||
"The name of the output audio file (without extension).": "The name of the output audio file (without extension).",
|
||||
"Audio file to transcribe": "Audio file to transcribe",
|
||||
"Language of the audio file the default is en (English).": "Language of the audio file the default is en (English).",
|
||||
"Audio file to translate": "Audio file to translate",
|
||||
"Authorization headers are injected automatically from your connection.": "Authorization headers are injected automatically from your connection.",
|
||||
"Enable for files like PDFs, images, etc..": "Enable for files like PDFs, images, etc..",
|
||||
"tts-1": "tts-1",
|
||||
"tts-1-hd": "tts-1-hd",
|
||||
"alloy": "alloy",
|
||||
"echo": "echo",
|
||||
"fable": "fable",
|
||||
"onyx": "onyx",
|
||||
"nova": "nova",
|
||||
"shimmer": "shimmer",
|
||||
"mp3": "mp3",
|
||||
"opus": "opus",
|
||||
"aac": "aac",
|
||||
"flac": "flac",
|
||||
"Spanish": "Spanish",
|
||||
"Italian": "Italian",
|
||||
"English": "English",
|
||||
"Portuguese": "Portuguese",
|
||||
"German": "German",
|
||||
"Japanese": "Japanese",
|
||||
"Polish": "Polish",
|
||||
"Arabic": "Arabic",
|
||||
"Afrikaans": "Afrikaans",
|
||||
"Azerbaijani": "Azerbaijani",
|
||||
"Bulgarian": "Bulgarian",
|
||||
"Bosnian": "Bosnian",
|
||||
"Catalan": "Catalan",
|
||||
"Czech": "Czech",
|
||||
"Danish": "Danish",
|
||||
"Greek": "Greek",
|
||||
"Estonian": "Estonian",
|
||||
"Persian": "Persian",
|
||||
"Finnish": "Finnish",
|
||||
"Tagalog": "Tagalog",
|
||||
"French": "French",
|
||||
"Galician": "Galician",
|
||||
"Hebrew": "Hebrew",
|
||||
"Hindi": "Hindi",
|
||||
"Croatian": "Croatian",
|
||||
"Hungarian": "Hungarian",
|
||||
"Armenian": "Armenian",
|
||||
"Indonesian": "Indonesian",
|
||||
"Icelandic": "Icelandic",
|
||||
"Kazakh": "Kazakh",
|
||||
"Kannada": "Kannada",
|
||||
"Korean": "Korean",
|
||||
"Lithuanian": "Lithuanian",
|
||||
"Latvian": "Latvian",
|
||||
"Maori": "Maori",
|
||||
"Macedonian": "Macedonian",
|
||||
"Marathi": "Marathi",
|
||||
"Malay": "Malay",
|
||||
"Nepali": "Nepali",
|
||||
"Dutch": "Dutch",
|
||||
"Norwegian": "Norwegian",
|
||||
"Romanian": "Romanian",
|
||||
"Russian": "Russian",
|
||||
"Slovak": "Slovak",
|
||||
"Slovenian": "Slovenian",
|
||||
"Serbian": "Serbian",
|
||||
"Swedish": "Swedish",
|
||||
"Swahili": "Swahili",
|
||||
"Tamil": "Tamil",
|
||||
"Thai": "Thai",
|
||||
"Turkish": "Turkish",
|
||||
"Ukrainian": "Ukrainian",
|
||||
"Urdu": "Urdu",
|
||||
"Vietnamese": "Vietnamese",
|
||||
"Chinese (Simplified)": "Chinese (Simplified)",
|
||||
"Welsh": "Welsh",
|
||||
"Belarusian": "Belarusian",
|
||||
"GET": "GET",
|
||||
"POST": "POST",
|
||||
"PATCH": "PATCH",
|
||||
"PUT": "PUT",
|
||||
"DELETE": "DELETE",
|
||||
"HEAD": "HEAD"
|
||||
}
|
||||
@@ -0,0 +1,155 @@
|
||||
{
|
||||
"OpenAI": "OpenAI",
|
||||
"Use the many tools ChatGPT has to offer.": "Use the many tools ChatGPT has to offer.",
|
||||
"Follow these instructions to get your OpenAI API Key:\n\n1. Visit the following website: https://platform.openai.com/account/api-keys.\n2. Once on the website, locate and click on the option to obtain your OpenAI API Key.\n\nIt is strongly recommended that you add your credit card information to your OpenAI account and upgrade to the paid plan **before** generating the API Key. This will help you prevent 429 errors.\n": "Follow these instructions to get your OpenAI API Key:\n\n1. Visit the following website: https://platform.openai.com/account/api-keys.\n2. Once on the website, locate and click on the option to obtain your OpenAI API Key.\n\nIt is strongly recommended that you add your credit card information to your OpenAI account and upgrade to the paid plan **before** generating the API Key. This will help you prevent 429 errors.\n",
|
||||
"Ask ChatGPT": "Ask ChatGPT",
|
||||
"Ask Assistant": "Ask Assistant",
|
||||
"Generate Image": "Generate Image",
|
||||
"Vision Prompt": "Vision Prompt",
|
||||
"Text-to-Speech": "Text-to-Speech",
|
||||
"Transcribe Audio": "Transcribe Audio",
|
||||
"Translate Audio": "Translate Audio",
|
||||
"Extract Structured Data from Text": "Extract Structured Data from Text",
|
||||
"Custom API Call": "Custom API Call",
|
||||
"Ask ChatGPT anything you want!": "Ask ChatGPT anything you want!",
|
||||
"Ask a GPT assistant anything you want!": "Ask a GPT assistant anything you want!",
|
||||
"Generate an image using text-to-image models": "Generate an image using text-to-image models",
|
||||
"Ask GPT a question about an image": "Ask GPT a question about an image",
|
||||
"Generate an audio recording from text": "Generate an audio recording from text",
|
||||
"Transcribe audio to text using whisper-1 model": "Transcribe audio to text using whisper-1 model",
|
||||
"Translate audio to text using whisper-1 model": "Translate audio to text using whisper-1 model",
|
||||
"Returns structured data from provided unstructured text.": "Returns structured data from provided unstructured text.",
|
||||
"Make a custom API call to a specific endpoint": "Make a custom API call to a specific endpoint",
|
||||
"Model": "Model",
|
||||
"Question": "Question",
|
||||
"Temperature": "Temperature",
|
||||
"Maximum Tokens": "Maximum Tokens",
|
||||
"Top P": "Top P",
|
||||
"Frequency penalty": "Frequency penalty",
|
||||
"Presence penalty": "Presence penalty",
|
||||
"Memory Key": "Memory Key",
|
||||
"Roles": "Roles",
|
||||
"Assistant": "Assistant",
|
||||
"Prompt": "Prompt",
|
||||
"Resolution": "Resolution",
|
||||
"Quality": "Quality",
|
||||
"Image": "Image",
|
||||
"Detail": "Detail",
|
||||
"Text": "Text",
|
||||
"Speed": "Speed",
|
||||
"Voice": "Voice",
|
||||
"Output Format": "Output Format",
|
||||
"File Name": "File Name",
|
||||
"Audio": "Audio",
|
||||
"Language of the Audio": "Language of the Audio",
|
||||
"Unstructured Text": "Unstructured Text",
|
||||
"Data Definition": "Data Definition",
|
||||
"Method": "Method",
|
||||
"Headers": "Headers",
|
||||
"Query Parameters": "Query Parameters",
|
||||
"Body": "Body",
|
||||
"No Error on Failure": "No Error on Failure",
|
||||
"Timeout (in seconds)": "Timeout (in seconds)",
|
||||
"The model which will generate the completion. Some models are suitable for natural language tasks, others specialize in code.": "The model which will generate the completion. Some models are suitable for natural language tasks, others specialize in code.",
|
||||
"Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.": "Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.",
|
||||
"The maximum number of tokens to generate. Requests can use up to 2,048 or 4,096 tokens shared between prompt and completion depending on the model. Don't set the value to maximum and leave some tokens for the input. (One token is roughly 4 characters for normal English text)": "The maximum number of tokens to generate. Requests can use up to 2,048 or 4,096 tokens shared between prompt and completion depending on the model. Don't set the value to maximum and leave some tokens for the input. (One token is roughly 4 characters for normal English text)",
|
||||
"An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.": "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.",
|
||||
"Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.": "Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.",
|
||||
"Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the mode's likelihood to talk about new topics.": "Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the mode's likelihood to talk about new topics.",
|
||||
"A memory key that will keep the chat history shared across runs and flows. Keep it empty to leave ChatGPT without memory of previous messages.": "A memory key that will keep the chat history shared across runs and flows. Keep it empty to leave ChatGPT without memory of previous messages.",
|
||||
"Array of roles to specify more accurate response": "Array of roles to specify more accurate response",
|
||||
"The assistant which will generate the completion.": "The assistant which will generate the completion.",
|
||||
"A memory key that will keep the chat history shared across runs and flows. Keep it empty to leave your assistant without memory of previous messages.": "A memory key that will keep the chat history shared across runs and flows. Keep it empty to leave your assistant without memory of previous messages.",
|
||||
"The model which will generate the image.": "The model which will generate the image.",
|
||||
"The resolution to generate the image in.": "The resolution to generate the image in.",
|
||||
"Standard is faster, HD has better details.": "Standard is faster, HD has better details.",
|
||||
"The image URL or file you want GPT's vision to read.": "The image URL or file you want GPT's vision to read.",
|
||||
"What do you want ChatGPT to tell you about the image?": "What do you want ChatGPT to tell you about the image?",
|
||||
"Control how the model processes the image and generates textual understanding.": "Control how the model processes the image and generates textual understanding.",
|
||||
"The maximum number of tokens to generate. Requests can use up to 2,048 or 4,096 tokens shared between prompt and completion, don't set the value to maximum and leave some tokens for the input. The exact limit varies by model. (One token is roughly 4 characters for normal English text)": "The maximum number of tokens to generate. Requests can use up to 2,048 or 4,096 tokens shared between prompt and completion, don't set the value to maximum and leave some tokens for the input. The exact limit varies by model. (One token is roughly 4 characters for normal English text)",
|
||||
"The text you want to hear.": "The text you want to hear.",
|
||||
"The model which will generate the audio.": "The model which will generate the audio.",
|
||||
"The speed of the audio. Minimum is 0.25 and maximum is 4.00.": "The speed of the audio. Minimum is 0.25 and maximum is 4.00.",
|
||||
"The voice to generate the audio in.": "The voice to generate the audio in.",
|
||||
"The format you want the audio file in.": "The format you want the audio file in.",
|
||||
"The name of the output audio file (without extension).": "The name of the output audio file (without extension).",
|
||||
"Audio file to transcribe": "Audio file to transcribe",
|
||||
"Language of the audio file the default is en (English).": "Language of the audio file the default is en (English).",
|
||||
"Audio file to translate": "Audio file to translate",
|
||||
"Authorization headers are injected automatically from your connection.": "Authorization headers are injected automatically from your connection.",
|
||||
"tts-1": "tts-1",
|
||||
"tts-1-hd": "tts-1-hd",
|
||||
"alloy": "alloy",
|
||||
"echo": "echo",
|
||||
"fable": "fable",
|
||||
"onyx": "onyx",
|
||||
"nova": "nova",
|
||||
"shimmer": "shimmer",
|
||||
"mp3": "mp3",
|
||||
"opus": "opus",
|
||||
"aac": "aac",
|
||||
"flac": "flac",
|
||||
"Spanish": "Spanish",
|
||||
"Italian": "Italian",
|
||||
"English": "English",
|
||||
"Portuguese": "Portuguese",
|
||||
"German": "German",
|
||||
"Japanese": "Japanese",
|
||||
"Polish": "Polish",
|
||||
"Arabic": "Arabic",
|
||||
"Afrikaans": "Afrikaans",
|
||||
"Azerbaijani": "Azerbaijani",
|
||||
"Bulgarian": "Bulgarian",
|
||||
"Bosnian": "Bosnian",
|
||||
"Catalan": "Catalan",
|
||||
"Czech": "Czech",
|
||||
"Danish": "Danish",
|
||||
"Greek": "Greek",
|
||||
"Estonian": "Estonian",
|
||||
"Persian": "Persian",
|
||||
"Finnish": "Finnish",
|
||||
"Tagalog": "Tagalog",
|
||||
"French": "French",
|
||||
"Galician": "Galician",
|
||||
"Hebrew": "Hebrew",
|
||||
"Hindi": "Hindi",
|
||||
"Croatian": "Croatian",
|
||||
"Hungarian": "Hungarian",
|
||||
"Armenian": "Armenian",
|
||||
"Indonesian": "Indonesian",
|
||||
"Icelandic": "Icelandic",
|
||||
"Kazakh": "Kazakh",
|
||||
"Kannada": "Kannada",
|
||||
"Korean": "Korean",
|
||||
"Lithuanian": "Lithuanian",
|
||||
"Latvian": "Latvian",
|
||||
"Maori": "Maori",
|
||||
"Macedonian": "Macedonian",
|
||||
"Marathi": "Marathi",
|
||||
"Malay": "Malay",
|
||||
"Nepali": "Nepali",
|
||||
"Dutch": "Dutch",
|
||||
"Norwegian": "Norwegian",
|
||||
"Romanian": "Romanian",
|
||||
"Russian": "Russian",
|
||||
"Slovak": "Slovak",
|
||||
"Slovenian": "Slovenian",
|
||||
"Serbian": "Serbian",
|
||||
"Swedish": "Swedish",
|
||||
"Swahili": "Swahili",
|
||||
"Tamil": "Tamil",
|
||||
"Thai": "Thai",
|
||||
"Turkish": "Turkish",
|
||||
"Ukrainian": "Ukrainian",
|
||||
"Urdu": "Urdu",
|
||||
"Vietnamese": "Vietnamese",
|
||||
"Chinese (Simplified)": "Chinese (Simplified)",
|
||||
"Welsh": "Welsh",
|
||||
"Belarusian": "Belarusian",
|
||||
"GET": "GET",
|
||||
"POST": "POST",
|
||||
"PATCH": "PATCH",
|
||||
"PUT": "PUT",
|
||||
"DELETE": "DELETE",
|
||||
"HEAD": "HEAD"
|
||||
}
|
||||
@@ -0,0 +1,156 @@
|
||||
{
|
||||
"Use the many tools ChatGPT has to offer.": "Use the many tools ChatGPT has to offer.",
|
||||
"Follow these instructions to get your OpenAI API Key:\n\n1. Visit the following website: https://platform.openai.com/account/api-keys.\n2. Once on the website, locate and click on the option to obtain your OpenAI API Key.\n\nIt is strongly recommended that you add your credit card information to your OpenAI account and upgrade to the paid plan **before** generating the API Key. This will help you prevent 429 errors.\n": "Follow these instructions to get your OpenAI API Key:\n\n1. Visit the following website: https://platform.openai.com/account/api-keys.\n2. Once on the website, locate and click on the option to obtain your OpenAI API Key.\n\nIt is strongly recommended that you add your credit card information to your OpenAI account and upgrade to the paid plan **before** generating the API Key. This will help you prevent 429 errors.\n",
|
||||
"Ask ChatGPT": "Ask ChatGPT",
|
||||
"Ask Assistant": "Ask Assistant",
|
||||
"Generate Image": "Generate Image",
|
||||
"Vision Prompt": "Vision Prompt",
|
||||
"Text-to-Speech": "Text-to-Speech",
|
||||
"Transcribe Audio": "Transcribe Audio",
|
||||
"Translate Audio": "Translate Audio",
|
||||
"Extract Structured Data from Text": "Extract Structured Data from Text",
|
||||
"Custom API Call": "自定义 API 呼叫",
|
||||
"Ask ChatGPT anything you want!": "Ask ChatGPT anything you want!",
|
||||
"Ask a GPT assistant anything you want!": "Ask a GPT assistant anything you want!",
|
||||
"Generate an image using text-to-image models": "Generate an image using text-to-image models",
|
||||
"Ask GPT a question about an image": "Ask GPT a question about an image",
|
||||
"Generate an audio recording from text": "Generate an audio recording from text",
|
||||
"Transcribe audio to text using whisper-1 model": "Transcribe audio to text using whisper-1 model",
|
||||
"Translate audio to text using whisper-1 model": "Translate audio to text using whisper-1 model",
|
||||
"Returns structured data from provided unstructured text.": "Returns structured data from provided unstructured text.",
|
||||
"Make a custom API call to a specific endpoint": "将一个自定义 API 调用到一个特定的终点",
|
||||
"Model": "Model",
|
||||
"Question": "Question",
|
||||
"Temperature": "Temperature",
|
||||
"Maximum Tokens": "Maximum Tokens",
|
||||
"Top P": "Top P",
|
||||
"Frequency penalty": "Frequency penalty",
|
||||
"Presence penalty": "Presence penalty",
|
||||
"Memory Key": "内存键",
|
||||
"Roles": "角色",
|
||||
"Assistant": "Assistant",
|
||||
"Prompt": "Prompt",
|
||||
"Resolution": "Resolution",
|
||||
"Quality": "Quality",
|
||||
"Image": "Image",
|
||||
"Detail": "Detail",
|
||||
"Text": "文本",
|
||||
"Speed": "Speed",
|
||||
"Voice": "Voice",
|
||||
"Output Format": "Output Format",
|
||||
"File Name": "File Name",
|
||||
"Audio": "Audio",
|
||||
"Language of the Audio": "Language of the Audio",
|
||||
"Unstructured Text": "Unstructured Text",
|
||||
"Data Definition": "Data Definition",
|
||||
"Method": "方法",
|
||||
"Headers": "信头",
|
||||
"Query Parameters": "查询参数",
|
||||
"Body": "正文内容",
|
||||
"Response is Binary ?": "Response is Binary ?",
|
||||
"No Error on Failure": "失败时没有错误",
|
||||
"Timeout (in seconds)": "超时(秒)",
|
||||
"The model which will generate the completion. Some models are suitable for natural language tasks, others specialize in code.": "The model which will generate the completion. Some models are suitable for natural language tasks, others specialize in code.",
|
||||
"Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.": "Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.",
|
||||
"The maximum number of tokens to generate. Requests can use up to 2,048 or 4,096 tokens shared between prompt and completion depending on the model. Don't set the value to maximum and leave some tokens for the input. (One token is roughly 4 characters for normal English text)": "The maximum number of tokens to generate. Requests can use up to 2,048 or 4,096 tokens shared between prompt and completion depending on the model. Don't set the value to maximum and leave some tokens for the input. (One token is roughly 4 characters for normal English text)",
|
||||
"An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.": "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.",
|
||||
"Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.": "Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.",
|
||||
"Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the mode's likelihood to talk about new topics.": "Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the mode's likelihood to talk about new topics.",
|
||||
"A memory key that will keep the chat history shared across runs and flows. Keep it empty to leave ChatGPT without memory of previous messages.": "A memory key that will keep the chat history shared across runs and flows. Keep it empty to leave ChatGPT without memory of previous messages.",
|
||||
"Array of roles to specify more accurate response": "Array of roles to specify more accurate response",
|
||||
"The assistant which will generate the completion.": "The assistant which will generate the completion.",
|
||||
"A memory key that will keep the chat history shared across runs and flows. Keep it empty to leave your assistant without memory of previous messages.": "A memory key that will keep the chat history shared across runs and flows. Keep it empty to leave your assistant without memory of previous messages.",
|
||||
"The model which will generate the image.": "The model which will generate the image.",
|
||||
"The resolution to generate the image in.": "The resolution to generate the image in.",
|
||||
"Standard is faster, HD has better details.": "Standard is faster, HD has better details.",
|
||||
"The image URL or file you want GPT's vision to read.": "The image URL or file you want GPT's vision to read.",
|
||||
"What do you want ChatGPT to tell you about the image?": "What do you want ChatGPT to tell you about the image?",
|
||||
"Control how the model processes the image and generates textual understanding.": "Control how the model processes the image and generates textual understanding.",
|
||||
"The maximum number of tokens to generate. Requests can use up to 2,048 or 4,096 tokens shared between prompt and completion, don't set the value to maximum and leave some tokens for the input. The exact limit varies by model. (One token is roughly 4 characters for normal English text)": "The maximum number of tokens to generate. Requests can use up to 2,048 or 4,096 tokens shared between prompt and completion, don't set the value to maximum and leave some tokens for the input. The exact limit varies by model. (One token is roughly 4 characters for normal English text)",
|
||||
"The text you want to hear.": "The text you want to hear.",
|
||||
"The model which will generate the audio.": "The model which will generate the audio.",
|
||||
"The speed of the audio. Minimum is 0.25 and maximum is 4.00.": "The speed of the audio. Minimum is 0.25 and maximum is 4.00.",
|
||||
"The voice to generate the audio in.": "The voice to generate the audio in.",
|
||||
"The format you want the audio file in.": "The format you want the audio file in.",
|
||||
"The name of the output audio file (without extension).": "The name of the output audio file (without extension).",
|
||||
"Audio file to transcribe": "Audio file to transcribe",
|
||||
"Language of the audio file the default is en (English).": "Language of the audio file the default is en (English).",
|
||||
"Audio file to translate": "Audio file to translate",
|
||||
"Authorization headers are injected automatically from your connection.": "授权头自动从您的连接中注入。",
|
||||
"Enable for files like PDFs, images, etc..": "Enable for files like PDFs, images, etc..",
|
||||
"tts-1": "tts-1",
|
||||
"tts-1-hd": "tts-1-hd",
|
||||
"alloy": "alloy",
|
||||
"echo": "echo",
|
||||
"fable": "fable",
|
||||
"onyx": "onyx",
|
||||
"nova": "nova",
|
||||
"shimmer": "shimmer",
|
||||
"mp3": "mp3",
|
||||
"opus": "opus",
|
||||
"aac": "aac",
|
||||
"flac": "flac",
|
||||
"Spanish": "Spanish",
|
||||
"Italian": "Italian",
|
||||
"English": "English",
|
||||
"Portuguese": "Portuguese",
|
||||
"German": "German",
|
||||
"Japanese": "Japanese",
|
||||
"Polish": "Polish",
|
||||
"Arabic": "Arabic",
|
||||
"Afrikaans": "Afrikaans",
|
||||
"Azerbaijani": "Azerbaijani",
|
||||
"Bulgarian": "Bulgarian",
|
||||
"Bosnian": "Bosnian",
|
||||
"Catalan": "Catalan",
|
||||
"Czech": "Czech",
|
||||
"Danish": "Danish",
|
||||
"Greek": "Greek",
|
||||
"Estonian": "Estonian",
|
||||
"Persian": "Persian",
|
||||
"Finnish": "Finnish",
|
||||
"Tagalog": "Tagalog",
|
||||
"French": "French",
|
||||
"Galician": "Galician",
|
||||
"Hebrew": "Hebrew",
|
||||
"Hindi": "Hindi",
|
||||
"Croatian": "Croatian",
|
||||
"Hungarian": "Hungarian",
|
||||
"Armenian": "Armenian",
|
||||
"Indonesian": "Indonesian",
|
||||
"Icelandic": "Icelandic",
|
||||
"Kazakh": "Kazakh",
|
||||
"Kannada": "Kannada",
|
||||
"Korean": "Korean",
|
||||
"Lithuanian": "Lithuanian",
|
||||
"Latvian": "Latvian",
|
||||
"Maori": "Maori",
|
||||
"Macedonian": "Macedonian",
|
||||
"Marathi": "Marathi",
|
||||
"Malay": "Malay",
|
||||
"Nepali": "Nepali",
|
||||
"Dutch": "Dutch",
|
||||
"Norwegian": "Norwegian",
|
||||
"Romanian": "Romanian",
|
||||
"Russian": "Russian",
|
||||
"Slovak": "Slovak",
|
||||
"Slovenian": "Slovenian",
|
||||
"Serbian": "Serbian",
|
||||
"Swedish": "Swedish",
|
||||
"Swahili": "Swahili",
|
||||
"Tamil": "Tamil",
|
||||
"Thai": "Thai",
|
||||
"Turkish": "Turkish",
|
||||
"Ukrainian": "Ukrainian",
|
||||
"Urdu": "Urdu",
|
||||
"Vietnamese": "Vietnamese",
|
||||
"Chinese (Simplified)": "Chinese (Simplified)",
|
||||
"Welsh": "Welsh",
|
||||
"Belarusian": "Belarusian",
|
||||
"GET": "获取",
|
||||
"POST": "帖子",
|
||||
"PATCH": "PATCH",
|
||||
"PUT": "弹出",
|
||||
"DELETE": "删除",
|
||||
"HEAD": "黑色"
|
||||
}
|
||||
@@ -0,0 +1,92 @@
|
||||
import {
|
||||
AuthenticationType,
|
||||
HttpMethod,
|
||||
createCustomApiCallAction,
|
||||
httpClient,
|
||||
} from '@activepieces/pieces-common';
|
||||
import { PieceAuth, createPiece } from '@activepieces/pieces-framework';
|
||||
import { PieceCategory, } from '@activepieces/shared';
|
||||
import { askAssistant } from './lib/actions/ask-assistant';
|
||||
import { generateImage } from './lib/actions/generate-image';
|
||||
import { askOpenAI } from './lib/actions/send-prompt';
|
||||
import { textToSpeech } from './lib/actions/text-to-speech';
|
||||
import { transcribeAction } from './lib/actions/transcriptions';
|
||||
import { translateAction } from './lib/actions/translation';
|
||||
import { visionPrompt } from './lib/actions/vision-prompt';
|
||||
import { baseUrl } from './lib/common/common';
|
||||
import { extractStructuredDataAction } from './lib/actions/extract-structure-data.action';
|
||||
|
||||
export const openaiAuth = PieceAuth.SecretText({
|
||||
description: `Follow these instructions to get your OpenAI API Key:
|
||||
|
||||
1. Visit the following website: https://platform.openai.com/account/api-keys.
|
||||
2. Once on the website, locate and click on the option to obtain your OpenAI API Key.
|
||||
|
||||
It is strongly recommended that you add your credit card information to your OpenAI account and upgrade to the paid plan **before** generating the API Key. This will help you prevent 429 errors.
|
||||
`,
|
||||
displayName: 'API Key',
|
||||
required: true,
|
||||
validate: async (auth) => {
|
||||
try {
|
||||
await httpClient.sendRequest<{
|
||||
data: { id: string }[];
|
||||
}>({
|
||||
url: `${baseUrl}/models`,
|
||||
method: HttpMethod.GET,
|
||||
authentication: {
|
||||
type: AuthenticationType.BEARER_TOKEN,
|
||||
token: auth.auth,
|
||||
},
|
||||
});
|
||||
return {
|
||||
valid: true,
|
||||
};
|
||||
} catch (e) {
|
||||
return {
|
||||
valid: false,
|
||||
error: 'Invalid API key',
|
||||
};
|
||||
}
|
||||
},
|
||||
});
|
||||
|
||||
export const openai = createPiece({
|
||||
displayName: 'OpenAI',
|
||||
description: 'Use the many tools ChatGPT has to offer.',
|
||||
minimumSupportedRelease: '0.63.0',
|
||||
logoUrl: 'https://cdn.activepieces.com/pieces/openai.png',
|
||||
categories: [PieceCategory.ARTIFICIAL_INTELLIGENCE],
|
||||
auth: openaiAuth,
|
||||
actions: [
|
||||
askOpenAI,
|
||||
askAssistant,
|
||||
generateImage,
|
||||
visionPrompt,
|
||||
textToSpeech,
|
||||
transcribeAction,
|
||||
translateAction,
|
||||
extractStructuredDataAction,
|
||||
createCustomApiCallAction({
|
||||
auth: openaiAuth,
|
||||
baseUrl: () => baseUrl,
|
||||
authMapping: async (auth) => {
|
||||
return {
|
||||
Authorization: `Bearer ${auth}`,
|
||||
};
|
||||
},
|
||||
}),
|
||||
],
|
||||
authors: [
|
||||
'aboudzein',
|
||||
'astorozhevsky',
|
||||
'Willianwg',
|
||||
'Nilesh',
|
||||
'Salem-Alaa',
|
||||
'kishanprmr',
|
||||
'MoShizzle',
|
||||
'khaledmashaly',
|
||||
'abuaboud',
|
||||
'amrdb',
|
||||
],
|
||||
triggers: [],
|
||||
});
|
||||
@@ -0,0 +1,123 @@
|
||||
import {
|
||||
createAction,
|
||||
Property,
|
||||
StoreScope,
|
||||
} from '@activepieces/pieces-framework';
|
||||
import OpenAI from 'openai';
|
||||
import { openaiAuth } from '../..';
|
||||
import { sleep } from '../common/common';
|
||||
import { z } from 'zod';
|
||||
import { propsValidation } from '@activepieces/pieces-common';
|
||||
|
||||
export const askAssistant = createAction({
|
||||
auth: openaiAuth,
|
||||
name: 'ask_assistant',
|
||||
displayName: 'Ask Assistant',
|
||||
description: 'Ask a GPT assistant anything you want!',
|
||||
props: {
|
||||
assistant: Property.Dropdown({
|
||||
auth: openaiAuth,
|
||||
displayName: 'Assistant',
|
||||
required: true,
|
||||
description: 'The assistant which will generate the completion.',
|
||||
refreshers: [],
|
||||
options: async ({ auth }) => {
|
||||
if (!auth) {
|
||||
return {
|
||||
disabled: true,
|
||||
placeholder: 'Enter your API key first',
|
||||
options: [],
|
||||
};
|
||||
}
|
||||
try {
|
||||
const openai = new OpenAI({
|
||||
apiKey: auth.secret_text,
|
||||
});
|
||||
const assistants = await openai.beta.assistants.list();
|
||||
|
||||
return {
|
||||
disabled: false,
|
||||
options: assistants.data.map((assistant: any) => {
|
||||
return {
|
||||
label: assistant.name,
|
||||
value: assistant.id,
|
||||
};
|
||||
}),
|
||||
};
|
||||
} catch (error) {
|
||||
return {
|
||||
disabled: true,
|
||||
options: [],
|
||||
placeholder: "Couldn't load assistants, API key is invalid",
|
||||
};
|
||||
}
|
||||
},
|
||||
}),
|
||||
prompt: Property.LongText({
|
||||
displayName: 'Question',
|
||||
required: true,
|
||||
}),
|
||||
memoryKey: Property.ShortText({
|
||||
displayName: 'Memory Key',
|
||||
description:
|
||||
'A memory key that will keep the chat history shared across runs and flows. Keep it empty to leave your assistant without memory of previous messages.',
|
||||
required: false,
|
||||
}),
|
||||
},
|
||||
async run({ auth, propsValue, store }) {
|
||||
await propsValidation.validateZod(propsValue, {
|
||||
memoryKey: z.string().max(128).optional(),
|
||||
});
|
||||
|
||||
const openai = new OpenAI({
|
||||
apiKey: auth.secret_text,
|
||||
});
|
||||
const { assistant, prompt, memoryKey } = propsValue;
|
||||
const runCheckDelay = 1000;
|
||||
let response: any;
|
||||
let thread: any;
|
||||
|
||||
if (memoryKey) {
|
||||
// Get existing thread ID or create a new thread for this memory key
|
||||
thread = await store.get(memoryKey, StoreScope.PROJECT);
|
||||
if (!thread) {
|
||||
thread = await openai.beta.threads.create();
|
||||
|
||||
store.put(memoryKey, thread, StoreScope.PROJECT);
|
||||
}
|
||||
} else {
|
||||
thread = await openai.beta.threads.create();
|
||||
}
|
||||
|
||||
const message = await openai.beta.threads.messages.create(thread.id, {
|
||||
role: 'user',
|
||||
content: prompt,
|
||||
});
|
||||
|
||||
const run = await openai.beta.threads.runs.create(thread.id, {
|
||||
assistant_id: assistant,
|
||||
});
|
||||
// Wait at least 400ms for inference to finish before checking to save requests
|
||||
await sleep(400);
|
||||
|
||||
while (!response) {
|
||||
const runCheck = await openai.beta.threads.runs.retrieve(
|
||||
thread.id,
|
||||
run.id
|
||||
);
|
||||
if (runCheck.status == 'completed') {
|
||||
const messages = await openai.beta.threads.messages.list(thread.id);
|
||||
// Return only messages that are newer than the user's latest message
|
||||
response = messages.data.splice(
|
||||
0,
|
||||
messages.data.findIndex((m) => m.id == message.id)
|
||||
);
|
||||
break;
|
||||
}
|
||||
|
||||
await sleep(runCheckDelay);
|
||||
}
|
||||
|
||||
return response;
|
||||
},
|
||||
});
|
||||
@@ -0,0 +1,147 @@
|
||||
import { openaiAuth } from '../../';
|
||||
import { createAction, Property } from '@activepieces/pieces-framework';
|
||||
import OpenAI from 'openai';
|
||||
import { notLLMs } from '../common/common';
|
||||
|
||||
export const extractStructuredDataAction = createAction({
|
||||
auth: openaiAuth,
|
||||
name: 'extract-structured-data',
|
||||
displayName: 'Extract Structured Data from Text',
|
||||
description: 'Returns structured data from provided unstructured text.',
|
||||
props: {
|
||||
model: Property.Dropdown({
|
||||
auth: openaiAuth,
|
||||
displayName: 'Model',
|
||||
required: true,
|
||||
refreshers: [],
|
||||
defaultValue: 'gpt-3.5-turbo',
|
||||
options: async ({ auth }) => {
|
||||
if (!auth) {
|
||||
return {
|
||||
disabled: true,
|
||||
placeholder: 'Enter your API key first',
|
||||
options: [],
|
||||
};
|
||||
}
|
||||
try {
|
||||
const openai = new OpenAI({
|
||||
apiKey: auth.secret_text,
|
||||
});
|
||||
const response = await openai.models.list();
|
||||
// We need to get only LLM models
|
||||
const models = response.data.filter((model) => !notLLMs.includes(model.id));
|
||||
return {
|
||||
disabled: false,
|
||||
options: models.map((model) => {
|
||||
return {
|
||||
label: model.id,
|
||||
value: model.id,
|
||||
};
|
||||
}),
|
||||
};
|
||||
} catch (error) {
|
||||
return {
|
||||
disabled: true,
|
||||
options: [],
|
||||
placeholder: "Couldn't load models, API key is invalid",
|
||||
};
|
||||
}
|
||||
},
|
||||
}),
|
||||
text: Property.LongText({
|
||||
displayName: 'Unstructured Text',
|
||||
required: true,
|
||||
}),
|
||||
params: Property.Array({
|
||||
displayName: 'Data Definition',
|
||||
required: true,
|
||||
properties: {
|
||||
propName: Property.ShortText({
|
||||
displayName: 'Name',
|
||||
description:
|
||||
'Provide the name of the value you want to extract from the unstructured text. The name should be unique and short. ',
|
||||
required: true,
|
||||
}),
|
||||
propDescription: Property.LongText({
|
||||
displayName: 'Description',
|
||||
description:
|
||||
'Brief description of the data, this hints for the AI on what to look for',
|
||||
required: false,
|
||||
}),
|
||||
propDataType: Property.StaticDropdown({
|
||||
displayName: 'Data Type',
|
||||
description: 'Type of parameter.',
|
||||
required: true,
|
||||
defaultValue: 'string',
|
||||
options: {
|
||||
disabled: false,
|
||||
options: [
|
||||
{ label: 'Text', value: 'string' },
|
||||
{ label: 'Number', value: 'number' },
|
||||
{ label: 'Boolean', value: 'boolean' },
|
||||
],
|
||||
},
|
||||
}),
|
||||
propIsRequired: Property.Checkbox({
|
||||
displayName: 'Fail if Not present?',
|
||||
required: true,
|
||||
defaultValue: false,
|
||||
}),
|
||||
},
|
||||
}),
|
||||
},
|
||||
async run(context) {
|
||||
const { model, text } = context.propsValue;
|
||||
const paramInputArray = context.propsValue.params as ParamInput[];
|
||||
const functionParams: Record<string, unknown> = {};
|
||||
const requiredFunctionParams: string[] = [];
|
||||
for (const param of paramInputArray) {
|
||||
functionParams[param.propName] = {
|
||||
type: param.propDataType,
|
||||
description: param.propDescription ?? param.propName,
|
||||
};
|
||||
if (param.propIsRequired) {
|
||||
requiredFunctionParams.push(param.propName);
|
||||
}
|
||||
}
|
||||
const prompt = 'Extract the following data from the provided text'
|
||||
const openai = new OpenAI({
|
||||
apiKey: context.auth.secret_text,
|
||||
});
|
||||
|
||||
const response = await openai.chat.completions.create({
|
||||
model: model,
|
||||
messages: [{ role: 'user', content: text }],
|
||||
tools: [
|
||||
{
|
||||
type: 'function',
|
||||
function: {
|
||||
name: 'extract_structured_data',
|
||||
description: prompt,
|
||||
parameters: {
|
||||
type: 'object',
|
||||
properties: functionParams,
|
||||
required: requiredFunctionParams,
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
const toolCallsResponse = response.choices[0].message.tool_calls;
|
||||
if (toolCallsResponse) {
|
||||
return JSON.parse(toolCallsResponse[0].function.arguments);
|
||||
} else {
|
||||
throw new Error(JSON.stringify({
|
||||
message: "OpenAI couldn't extract the fields from the above text."
|
||||
}));
|
||||
}
|
||||
},
|
||||
});
|
||||
|
||||
interface ParamInput {
|
||||
propName: string;
|
||||
propDescription: string;
|
||||
propDataType: string;
|
||||
propIsRequired: boolean;
|
||||
}
|
||||
@@ -0,0 +1,119 @@
|
||||
import { createAction, Property } from '@activepieces/pieces-framework';
|
||||
import OpenAI from 'openai';
|
||||
import { openaiAuth } from '../..';
|
||||
|
||||
export const generateImage = createAction({
|
||||
auth: openaiAuth,
|
||||
name: 'generate_image',
|
||||
displayName: 'Generate Image',
|
||||
description: 'Generate an image using text-to-image models',
|
||||
props: {
|
||||
model: Property.Dropdown({
|
||||
auth: openaiAuth,
|
||||
displayName: 'Model',
|
||||
required: true,
|
||||
description: 'The model which will generate the image.',
|
||||
defaultValue: 'dall-e-3',
|
||||
refreshers: [],
|
||||
options: async () => {
|
||||
return {
|
||||
options: [
|
||||
{
|
||||
label: 'dall-e-3',
|
||||
value: 'dall-e-3',
|
||||
},
|
||||
{
|
||||
label: 'dall-e-2',
|
||||
value: 'dall-e-2',
|
||||
},
|
||||
],
|
||||
};
|
||||
},
|
||||
}),
|
||||
prompt: Property.LongText({
|
||||
displayName: 'Prompt',
|
||||
required: true,
|
||||
}),
|
||||
resolution: Property.Dropdown({
|
||||
auth: openaiAuth,
|
||||
displayName: 'Resolution',
|
||||
description: 'The resolution to generate the image in.',
|
||||
required: false,
|
||||
refreshers: ['model'],
|
||||
defaultValue: '1024x1024',
|
||||
options: async ({ model }) => {
|
||||
let options = [
|
||||
{
|
||||
label: '1024x1024',
|
||||
value: '1024x1024',
|
||||
},
|
||||
{
|
||||
label: '512x512',
|
||||
value: '512x512',
|
||||
},
|
||||
{
|
||||
label: '256x256',
|
||||
value: '256x256',
|
||||
},
|
||||
];
|
||||
if (model == 'dall-e-3')
|
||||
options = [
|
||||
{
|
||||
label: '1024x1024',
|
||||
value: '1024x1024',
|
||||
},
|
||||
{
|
||||
label: '1024x1792',
|
||||
value: '1024x1792',
|
||||
},
|
||||
{
|
||||
label: '1792x1024',
|
||||
value: '1792x1024',
|
||||
},
|
||||
];
|
||||
|
||||
return {
|
||||
options: options,
|
||||
};
|
||||
},
|
||||
}),
|
||||
quality: Property.Dropdown({
|
||||
auth: openaiAuth,
|
||||
displayName: 'Quality',
|
||||
required: false,
|
||||
description: 'Standard is faster, HD has better details.',
|
||||
defaultValue: 'standard',
|
||||
refreshers: [],
|
||||
options: async () => {
|
||||
return {
|
||||
options: [
|
||||
{
|
||||
label: 'standard',
|
||||
value: 'standard',
|
||||
},
|
||||
{
|
||||
label: 'hd',
|
||||
value: 'hd',
|
||||
},
|
||||
],
|
||||
};
|
||||
},
|
||||
}),
|
||||
},
|
||||
async run({ auth, propsValue }) {
|
||||
const openai = new OpenAI({
|
||||
apiKey: auth.secret_text,
|
||||
});
|
||||
|
||||
const { quality, resolution, model, prompt } = propsValue;
|
||||
|
||||
const image = await openai.images.generate({
|
||||
model: model,
|
||||
prompt: prompt,
|
||||
quality: quality as any,
|
||||
size: resolution as any,
|
||||
});
|
||||
|
||||
return image;
|
||||
},
|
||||
});
|
||||
@@ -0,0 +1,198 @@
|
||||
import {
|
||||
createAction,
|
||||
Property,
|
||||
StoreScope,
|
||||
} from '@activepieces/pieces-framework';
|
||||
import OpenAI from 'openai';
|
||||
import { openaiAuth } from '../..';
|
||||
import {
|
||||
calculateMessagesTokenSize,
|
||||
exceedsHistoryLimit,
|
||||
notLLMs,
|
||||
reduceContextSize,
|
||||
} from '../common/common';
|
||||
import { z } from 'zod';
|
||||
import { propsValidation } from '@activepieces/pieces-common';
|
||||
|
||||
export const askOpenAI = createAction({
|
||||
auth: openaiAuth,
|
||||
name: 'ask_chatgpt',
|
||||
displayName: 'Ask ChatGPT',
|
||||
description: 'Ask ChatGPT anything you want!',
|
||||
props: {
|
||||
model: Property.Dropdown({
|
||||
auth: openaiAuth,
|
||||
displayName: 'Model',
|
||||
required: true,
|
||||
description:
|
||||
'The model which will generate the completion. Some models are suitable for natural language tasks, others specialize in code.',
|
||||
refreshers: [],
|
||||
defaultValue: 'gpt-3.5-turbo',
|
||||
options: async ({ auth }) => {
|
||||
if (!auth) {
|
||||
return {
|
||||
disabled: true,
|
||||
placeholder: 'Enter your API key first',
|
||||
options: [],
|
||||
};
|
||||
}
|
||||
try {
|
||||
const openai = new OpenAI({
|
||||
apiKey: auth.secret_text,
|
||||
});
|
||||
const response = await openai.models.list();
|
||||
// We need to get only LLM models
|
||||
const models = response.data.filter(
|
||||
(model) => !notLLMs.includes(model.id)
|
||||
);
|
||||
return {
|
||||
disabled: false,
|
||||
options: models.map((model) => {
|
||||
return {
|
||||
label: model.id,
|
||||
value: model.id,
|
||||
};
|
||||
}),
|
||||
};
|
||||
} catch (error) {
|
||||
return {
|
||||
disabled: true,
|
||||
options: [],
|
||||
placeholder: "Couldn't load models, API key is invalid",
|
||||
};
|
||||
}
|
||||
},
|
||||
}),
|
||||
prompt: Property.LongText({
|
||||
displayName: 'Question',
|
||||
required: true,
|
||||
}),
|
||||
temperature: Property.Number({
|
||||
displayName: 'Temperature',
|
||||
required: false,
|
||||
description:
|
||||
'Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.',
|
||||
defaultValue: 1,
|
||||
}),
|
||||
maxTokens: Property.Number({
|
||||
displayName: 'Maximum Tokens',
|
||||
required: true,
|
||||
description:
|
||||
"The maximum number of tokens to generate. Requests can use up to 2,048 or 4,096 tokens shared between prompt and completion depending on the model. Don't set the value to maximum and leave some tokens for the input. (One token is roughly 4 characters for normal English text)",
|
||||
defaultValue: 2048,
|
||||
}),
|
||||
topP: Property.Number({
|
||||
displayName: 'Top P',
|
||||
required: false,
|
||||
description:
|
||||
'An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.',
|
||||
defaultValue: 1,
|
||||
}),
|
||||
frequencyPenalty: Property.Number({
|
||||
displayName: 'Frequency penalty',
|
||||
required: false,
|
||||
description:
|
||||
"Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.",
|
||||
defaultValue: 0,
|
||||
}),
|
||||
presencePenalty: Property.Number({
|
||||
displayName: 'Presence penalty',
|
||||
required: false,
|
||||
description:
|
||||
"Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the mode's likelihood to talk about new topics.",
|
||||
}),
|
||||
memoryKey: Property.ShortText({
|
||||
displayName: 'Memory Key',
|
||||
description:
|
||||
'A memory key that will keep the chat history shared across runs and flows. Keep it empty to leave ChatGPT without memory of previous messages.',
|
||||
required: false,
|
||||
}),
|
||||
roles: Property.Json({
|
||||
displayName: 'Roles',
|
||||
required: false,
|
||||
description: 'Array of roles to specify more accurate response',
|
||||
defaultValue: [
|
||||
{ role: 'system', content: 'You are a helpful assistant.' },
|
||||
],
|
||||
}),
|
||||
},
|
||||
async run({ auth, propsValue, store }) {
|
||||
await propsValidation.validateZod(propsValue, {
|
||||
temperature: z.number().min(0).max(1).optional(),
|
||||
memoryKey: z.string().max(128).optional(),
|
||||
});
|
||||
const openai = new OpenAI({
|
||||
apiKey: auth.secret_text,
|
||||
});
|
||||
const {
|
||||
model,
|
||||
temperature,
|
||||
maxTokens,
|
||||
topP,
|
||||
frequencyPenalty,
|
||||
presencePenalty,
|
||||
prompt,
|
||||
memoryKey,
|
||||
} = propsValue;
|
||||
|
||||
let messageHistory: any[] | null = [];
|
||||
// If memory key is set, retrieve messages stored in history
|
||||
if (memoryKey) {
|
||||
messageHistory = (await store.get(memoryKey, StoreScope.PROJECT)) ?? [];
|
||||
}
|
||||
|
||||
// Add user prompt to message history
|
||||
messageHistory.push({
|
||||
role: 'user',
|
||||
content: prompt,
|
||||
});
|
||||
|
||||
// Add system instructions if set by user
|
||||
const rolesArray = propsValue.roles ? (propsValue.roles as any) : [];
|
||||
const roles = rolesArray.map((item: any) => {
|
||||
const rolesEnum = ['system', 'user', 'assistant'];
|
||||
if (!rolesEnum.includes(item.role)) {
|
||||
throw new Error(
|
||||
'The only available roles are: [system, user, assistant]'
|
||||
);
|
||||
}
|
||||
|
||||
return {
|
||||
role: item.role,
|
||||
content: item.content,
|
||||
};
|
||||
});
|
||||
|
||||
// Send prompt
|
||||
const completion = await openai.chat.completions.create({
|
||||
model: model,
|
||||
messages: [...roles, ...messageHistory],
|
||||
temperature: temperature,
|
||||
top_p: topP,
|
||||
frequency_penalty: frequencyPenalty,
|
||||
presence_penalty: presencePenalty ?? undefined,
|
||||
max_completion_tokens: maxTokens,
|
||||
});
|
||||
|
||||
// Add response to message history
|
||||
messageHistory = [...messageHistory, completion.choices[0].message];
|
||||
|
||||
// Check message history token size
|
||||
// System limit is 32K tokens, we can probably make it bigger but this is a safe spot
|
||||
const tokenLength = await calculateMessagesTokenSize(messageHistory, model);
|
||||
if (memoryKey) {
|
||||
// If tokens exceed 90% system limit or 90% of model limit - maxTokens, reduce history token size
|
||||
if (exceedsHistoryLimit(tokenLength, model, maxTokens)) {
|
||||
messageHistory = await reduceContextSize(
|
||||
messageHistory,
|
||||
model,
|
||||
maxTokens
|
||||
);
|
||||
}
|
||||
// Store history
|
||||
await store.put(memoryKey, messageHistory, StoreScope.PROJECT);
|
||||
}
|
||||
|
||||
return completion.choices[0].message.content;
|
||||
},
|
||||
});
|
||||
@@ -0,0 +1,106 @@
|
||||
import { createAction, Property } from '@activepieces/pieces-framework';
|
||||
import OpenAI from 'openai';
|
||||
import { openaiAuth } from '../..';
|
||||
import { streamToBuffer } from '../common/common';
|
||||
|
||||
type Voice = 'alloy' | 'echo' | 'fable' | 'onyx' | 'nova' | 'shimmer';
|
||||
type ResponseFormat = 'mp3' | 'opus' | 'aac' | 'flac' | 'wav' | 'pcm';
|
||||
type Model = 'tts-1' | 'tts-1-hd';
|
||||
|
||||
export const textToSpeech = createAction({
|
||||
auth: openaiAuth,
|
||||
name: 'text_to_speech',
|
||||
displayName: 'Text-to-Speech',
|
||||
description: 'Generate an audio recording from text',
|
||||
props: {
|
||||
text: Property.LongText({
|
||||
displayName: 'Text',
|
||||
description: 'The text you want to hear.',
|
||||
required: true,
|
||||
}),
|
||||
model: Property.StaticDropdown({
|
||||
displayName: 'Model',
|
||||
required: true,
|
||||
description: 'The model which will generate the audio.',
|
||||
defaultValue: 'tts-1',
|
||||
options: {
|
||||
disabled: false,
|
||||
options: [
|
||||
{
|
||||
label: 'tts-1',
|
||||
value: 'tts-1',
|
||||
},
|
||||
{
|
||||
label: 'tts-1-hd',
|
||||
value: 'tts-1-hd',
|
||||
},
|
||||
],
|
||||
},
|
||||
}),
|
||||
speed: Property.Number({
|
||||
displayName: 'Speed',
|
||||
description: 'The speed of the audio. Minimum is 0.25 and maximum is 4.00.',
|
||||
defaultValue: 1.0,
|
||||
required: false,
|
||||
}),
|
||||
voice: Property.StaticDropdown({
|
||||
displayName: 'Voice',
|
||||
description: 'The voice to generate the audio in.',
|
||||
required: true,
|
||||
defaultValue: 'alloy',
|
||||
options: {
|
||||
disabled: false,
|
||||
options: [
|
||||
{ label: 'alloy', value: 'alloy' },
|
||||
{ label: 'echo', value: 'echo' },
|
||||
{ label: 'fable', value: 'fable' },
|
||||
{ label: 'onyx', value: 'onyx' },
|
||||
{ label: 'nova', value: 'nova' },
|
||||
{ label: 'shimmer', value: 'shimmer' },
|
||||
],
|
||||
},
|
||||
}),
|
||||
format: Property.StaticDropdown({
|
||||
displayName: 'Output Format',
|
||||
required: true,
|
||||
description: 'The format you want the audio file in.',
|
||||
defaultValue: 'mp3',
|
||||
options: {
|
||||
disabled: false,
|
||||
options: [
|
||||
{ label: 'mp3', value: 'mp3' },
|
||||
{ label: 'opus', value: 'opus' },
|
||||
{ label: 'aac', value: 'aac' },
|
||||
{ label: 'flac', value: 'flac' },
|
||||
],
|
||||
},
|
||||
}),
|
||||
fileName: Property.ShortText({
|
||||
displayName: 'File Name',
|
||||
description: 'The name of the output audio file (without extension).',
|
||||
required: false,
|
||||
defaultValue: 'audio',
|
||||
}),
|
||||
},
|
||||
async run({ auth, propsValue, files }) {
|
||||
const openai = new OpenAI({
|
||||
apiKey: auth.secret_text,
|
||||
});
|
||||
|
||||
const { voice, format, model, text, speed, fileName } = propsValue;
|
||||
|
||||
const audio = await openai.audio.speech.create({
|
||||
model: model as Model,
|
||||
input: text,
|
||||
response_format: format as ResponseFormat,
|
||||
voice: voice as Voice,
|
||||
speed: speed,
|
||||
});
|
||||
const result = await streamToBuffer(audio.body);
|
||||
|
||||
return files.write({
|
||||
fileName: `${fileName || 'audio'}.${format}`,
|
||||
data: result as Buffer,
|
||||
});
|
||||
},
|
||||
});
|
||||
@@ -0,0 +1,70 @@
|
||||
import {
|
||||
HttpRequest,
|
||||
HttpMethod,
|
||||
httpClient,
|
||||
} from '@activepieces/pieces-common';
|
||||
import { Property, createAction } from '@activepieces/pieces-framework';
|
||||
import { openaiAuth } from '../..';
|
||||
import FormData from 'form-data';
|
||||
import mime from 'mime-types';
|
||||
import { Languages, baseUrl } from '../common/common';
|
||||
|
||||
export const transcribeAction = createAction({
|
||||
name: 'transcribe',
|
||||
displayName: 'Transcribe Audio',
|
||||
description: 'Transcribe audio to text using whisper-1 model',
|
||||
auth: openaiAuth,
|
||||
props: {
|
||||
audio: Property.File({
|
||||
displayName: 'Audio',
|
||||
required: true,
|
||||
description: 'Audio file to transcribe',
|
||||
}),
|
||||
language: Property.StaticDropdown({
|
||||
displayName: 'Language of the Audio',
|
||||
description: 'Language of the audio file the default is en (English).',
|
||||
required: false,
|
||||
options: {
|
||||
options: Languages,
|
||||
},
|
||||
defaultValue: 'en',
|
||||
}),
|
||||
},
|
||||
run: async (context) => {
|
||||
const fileData = context.propsValue.audio;
|
||||
const mimeType = mime.lookup(fileData.extension ? fileData.extension : '');
|
||||
let language = context.propsValue.language;
|
||||
// if language is not in languages list, default to english
|
||||
if (!Languages.some((l) => l.value === language)) {
|
||||
language = 'en';
|
||||
}
|
||||
|
||||
const form = new FormData();
|
||||
form.append('file', fileData.data, {
|
||||
filename: fileData.filename,
|
||||
contentType: mimeType as string,
|
||||
});
|
||||
form.append('model', 'whisper-1');
|
||||
form.append('language', language);
|
||||
|
||||
const headers = {
|
||||
Authorization: `Bearer ${context.auth.secret_text}`,
|
||||
};
|
||||
|
||||
const request: HttpRequest = {
|
||||
method: HttpMethod.POST,
|
||||
url: `${baseUrl}/audio/transcriptions`,
|
||||
body: form,
|
||||
headers: {
|
||||
...form.getHeaders(),
|
||||
...headers,
|
||||
},
|
||||
};
|
||||
try {
|
||||
const response = await httpClient.sendRequest(request);
|
||||
return response.body;
|
||||
} catch (e) {
|
||||
throw new Error(`Error while execution:\n${e}`);
|
||||
}
|
||||
},
|
||||
});
|
||||
@@ -0,0 +1,54 @@
|
||||
import {
|
||||
HttpRequest,
|
||||
HttpMethod,
|
||||
httpClient,
|
||||
} from '@activepieces/pieces-common';
|
||||
import { Property, createAction } from '@activepieces/pieces-framework';
|
||||
import { openaiAuth } from '../..';
|
||||
import FormData from 'form-data';
|
||||
import mime from 'mime-types';
|
||||
import { baseUrl } from '../common/common';
|
||||
|
||||
export const translateAction = createAction({
|
||||
name: 'translate',
|
||||
displayName: 'Translate Audio',
|
||||
description: 'Translate audio to text using whisper-1 model',
|
||||
auth: openaiAuth,
|
||||
props: {
|
||||
audio: Property.File({
|
||||
displayName: 'Audio',
|
||||
required: true,
|
||||
description: 'Audio file to translate',
|
||||
}),
|
||||
},
|
||||
run: async (context) => {
|
||||
const fileData = context.propsValue.audio;
|
||||
const mimeType = mime.lookup(fileData.extension ? fileData.extension : '');
|
||||
const form = new FormData();
|
||||
form.append('file', fileData.data, {
|
||||
filename: fileData.filename,
|
||||
contentType: mimeType as string,
|
||||
});
|
||||
form.append('model', 'whisper-1');
|
||||
|
||||
const headers = {
|
||||
Authorization: `Bearer ${context.auth.secret_text}`,
|
||||
};
|
||||
|
||||
const request: HttpRequest = {
|
||||
method: HttpMethod.POST,
|
||||
url: `${baseUrl}/audio/translations`,
|
||||
body: form,
|
||||
headers: {
|
||||
...form.getHeaders(),
|
||||
...headers,
|
||||
},
|
||||
};
|
||||
try {
|
||||
const response = await httpClient.sendRequest(request);
|
||||
return response.body;
|
||||
} catch (e) {
|
||||
throw new Error(`Error while execution:\n${e}`);
|
||||
}
|
||||
},
|
||||
});
|
||||
@@ -0,0 +1,152 @@
|
||||
import {
|
||||
createAction,
|
||||
Property,
|
||||
} from '@activepieces/pieces-framework';
|
||||
import OpenAI from 'openai';
|
||||
import { openaiAuth } from '../..';
|
||||
import { z } from 'zod';
|
||||
import { propsValidation } from '@activepieces/pieces-common';
|
||||
|
||||
export const visionPrompt = createAction({
|
||||
auth: openaiAuth,
|
||||
name: 'vision_prompt',
|
||||
displayName: 'Vision Prompt',
|
||||
description: 'Ask GPT a question about an image',
|
||||
props: {
|
||||
image: Property.File({
|
||||
displayName: 'Image',
|
||||
description: "The image URL or file you want GPT's vision to read.",
|
||||
required: true,
|
||||
}),
|
||||
prompt: Property.LongText({
|
||||
displayName: 'Question',
|
||||
description: 'What do you want ChatGPT to tell you about the image?',
|
||||
required: true,
|
||||
}),
|
||||
detail: Property.Dropdown({
|
||||
auth: openaiAuth,
|
||||
displayName: 'Detail',
|
||||
required: false,
|
||||
description:
|
||||
'Control how the model processes the image and generates textual understanding.',
|
||||
defaultValue: 'auto',
|
||||
refreshers: [],
|
||||
options: async () => {
|
||||
return {
|
||||
options: [
|
||||
{
|
||||
label: 'low',
|
||||
value: 'low',
|
||||
},
|
||||
{
|
||||
label: 'high',
|
||||
value: 'high',
|
||||
},
|
||||
{
|
||||
label: 'auto',
|
||||
value: 'auto',
|
||||
},
|
||||
],
|
||||
};
|
||||
},
|
||||
}),
|
||||
temperature: Property.Number({
|
||||
displayName: 'Temperature',
|
||||
required: false,
|
||||
description:
|
||||
'Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.',
|
||||
defaultValue: 0.9,
|
||||
}),
|
||||
maxTokens: Property.Number({
|
||||
displayName: 'Maximum Tokens',
|
||||
required: false,
|
||||
description:
|
||||
"The maximum number of tokens to generate. Requests can use up to 2,048 or 4,096 tokens shared between prompt and completion, don't set the value to maximum and leave some tokens for the input. The exact limit varies by model. (One token is roughly 4 characters for normal English text)",
|
||||
defaultValue: 2048,
|
||||
}),
|
||||
topP: Property.Number({
|
||||
displayName: 'Top P',
|
||||
required: false,
|
||||
description:
|
||||
'An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.',
|
||||
defaultValue: 1,
|
||||
}),
|
||||
frequencyPenalty: Property.Number({
|
||||
displayName: 'Frequency penalty',
|
||||
required: false,
|
||||
description:
|
||||
"Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.",
|
||||
defaultValue: 0,
|
||||
}),
|
||||
presencePenalty: Property.Number({
|
||||
displayName: 'Presence penalty',
|
||||
required: false,
|
||||
description:
|
||||
"Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the mode's likelihood to talk about new topics.",
|
||||
defaultValue: 0.6,
|
||||
}),
|
||||
roles: Property.Json({
|
||||
displayName: 'Roles',
|
||||
required: false,
|
||||
description: 'Array of roles to specify more accurate response',
|
||||
defaultValue: [
|
||||
{ role: 'system', content: 'You are a helpful assistant.' },
|
||||
],
|
||||
}),
|
||||
},
|
||||
async run({ auth, propsValue }) {
|
||||
await propsValidation.validateZod(propsValue, {
|
||||
temperature: z.number().min(0).max(1),
|
||||
});
|
||||
|
||||
const openai = new OpenAI({
|
||||
apiKey: auth.secret_text,
|
||||
});
|
||||
const { temperature, maxTokens, topP, frequencyPenalty, presencePenalty } =
|
||||
propsValue;
|
||||
|
||||
const rolesArray = propsValue.roles ? (propsValue.roles as any) : [];
|
||||
const roles = rolesArray.map((item: any) => {
|
||||
const rolesEnum = ['system', 'user', 'assistant'];
|
||||
if (!rolesEnum.includes(item.role)) {
|
||||
throw new Error(
|
||||
'The only available roles are: [system, user, assistant]'
|
||||
);
|
||||
}
|
||||
|
||||
return {
|
||||
role: item.role,
|
||||
content: item.content,
|
||||
};
|
||||
});
|
||||
|
||||
const completion = await openai.chat.completions.create({
|
||||
model: 'gpt-4o',
|
||||
messages: [
|
||||
...roles,
|
||||
{
|
||||
role: 'user',
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text: propsValue['prompt'],
|
||||
},
|
||||
{
|
||||
type: 'image_url',
|
||||
image_url: {
|
||||
url: `data:image/${propsValue.image.extension};base64,${propsValue.image.base64}`,
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
temperature: temperature,
|
||||
max_tokens: maxTokens,
|
||||
top_p: topP,
|
||||
frequency_penalty: frequencyPenalty,
|
||||
presence_penalty: presencePenalty,
|
||||
});
|
||||
|
||||
return completion.choices[0].message.content;
|
||||
},
|
||||
});
|
||||
@@ -0,0 +1,223 @@
|
||||
import { encoding_for_model } from 'tiktoken';
|
||||
|
||||
export const baseUrl = 'https://api.openai.com/v1';
|
||||
|
||||
export const Languages = [
|
||||
{ value: 'es', label: 'Spanish' },
|
||||
{ value: 'it', label: 'Italian' },
|
||||
{ value: 'en', label: 'English' },
|
||||
{ value: 'pt', label: 'Portuguese' },
|
||||
{ value: 'de', label: 'German' },
|
||||
{ value: 'ja', label: 'Japanese' },
|
||||
{ value: 'pl', label: 'Polish' },
|
||||
{ value: 'ar', label: 'Arabic' },
|
||||
{ value: 'af', label: 'Afrikaans' },
|
||||
{ value: 'az', label: 'Azerbaijani' },
|
||||
{ value: 'bg', label: 'Bulgarian' },
|
||||
{ value: 'bs', label: 'Bosnian' },
|
||||
{ value: 'ca', label: 'Catalan' },
|
||||
{ value: 'cs', label: 'Czech' },
|
||||
{ value: 'da', label: 'Danish' },
|
||||
{ value: 'el', label: 'Greek' },
|
||||
{ value: 'et', label: 'Estonian' },
|
||||
{ value: 'fa', label: 'Persian' },
|
||||
{ value: 'fi', label: 'Finnish' },
|
||||
{ value: 'tl', label: 'Tagalog' },
|
||||
{ value: 'fr', label: 'French' },
|
||||
{ value: 'gl', label: 'Galician' },
|
||||
{ value: 'he', label: 'Hebrew' },
|
||||
{ value: 'hi', label: 'Hindi' },
|
||||
{ value: 'hr', label: 'Croatian' },
|
||||
{ value: 'hu', label: 'Hungarian' },
|
||||
{ value: 'hy', label: 'Armenian' },
|
||||
{ value: 'id', label: 'Indonesian' },
|
||||
{ value: 'is', label: 'Icelandic' },
|
||||
{ value: 'kk', label: 'Kazakh' },
|
||||
{ value: 'kn', label: 'Kannada' },
|
||||
{ value: 'ko', label: 'Korean' },
|
||||
{ value: 'lt', label: 'Lithuanian' },
|
||||
{ value: 'lv', label: 'Latvian' },
|
||||
{ value: 'ma', label: 'Maori' },
|
||||
{ value: 'mk', label: 'Macedonian' },
|
||||
{ value: 'mr', label: 'Marathi' },
|
||||
{ value: 'ms', label: 'Malay' },
|
||||
{ value: 'ne', label: 'Nepali' },
|
||||
{ value: 'nl', label: 'Dutch' },
|
||||
{ value: 'no', label: 'Norwegian' },
|
||||
{ value: 'ro', label: 'Romanian' },
|
||||
{ value: 'ru', label: 'Russian' },
|
||||
{ value: 'sk', label: 'Slovak' },
|
||||
{ value: 'sl', label: 'Slovenian' },
|
||||
{ value: 'sr', label: 'Serbian' },
|
||||
{ value: 'sv', label: 'Swedish' },
|
||||
{ value: 'sw', label: 'Swahili' },
|
||||
{ value: 'ta', label: 'Tamil' },
|
||||
{ value: 'th', label: 'Thai' },
|
||||
{ value: 'tr', label: 'Turkish' },
|
||||
{ value: 'uk', label: 'Ukrainian' },
|
||||
{ value: 'ur', label: 'Urdu' },
|
||||
{ value: 'vi', label: 'Vietnamese' },
|
||||
{ value: 'zh', label: 'Chinese (Simplified)' },
|
||||
{ value: 'cy', label: 'Welsh' },
|
||||
{ value: 'be', label: 'Belarusian' },
|
||||
];
|
||||
|
||||
export const billingIssueMessage = `Error Occurred: 429 \n
|
||||
1. Ensure that billing is enabled on your OpenAI platform. \n
|
||||
2. Generate a new API key. \n
|
||||
3. Attempt the process again. \n
|
||||
For guidance, visit: https://beta.openai.com/account/billing`;
|
||||
|
||||
export const unauthorizedMessage = `Error Occurred: 401 \n
|
||||
Ensure that your API key is valid. \n`;
|
||||
|
||||
export const sleep = (ms: number) => {
|
||||
return new Promise((resolve) => setTimeout(resolve, ms));
|
||||
};
|
||||
|
||||
export const streamToBuffer = (stream: any) => {
|
||||
const chunks: any[] = [];
|
||||
return new Promise((resolve, reject) => {
|
||||
stream.on('data', (chunk: any) => chunks.push(Buffer.from(chunk)));
|
||||
stream.on('error', (err: any) => reject(err));
|
||||
stream.on('end', () => resolve(Buffer.concat(chunks)));
|
||||
});
|
||||
};
|
||||
|
||||
export const calculateTokensFromString = (string: string, model: string) => {
|
||||
try {
|
||||
const encoder = encoding_for_model(model as any);
|
||||
const tokens = encoder.encode(string);
|
||||
encoder.free();
|
||||
|
||||
return tokens.length;
|
||||
} catch (e) {
|
||||
// Model not supported by tiktoken, every 4 chars is a token
|
||||
return Math.round(string.length / 4);
|
||||
}
|
||||
};
|
||||
|
||||
export const calculateMessagesTokenSize = async (
|
||||
messages: any[],
|
||||
model: string
|
||||
) => {
|
||||
let tokenLength = 0;
|
||||
await Promise.all(
|
||||
messages.map((message: any) => {
|
||||
return new Promise((resolve) => {
|
||||
tokenLength += calculateTokensFromString(message.content, model);
|
||||
resolve(tokenLength);
|
||||
});
|
||||
})
|
||||
);
|
||||
|
||||
return tokenLength;
|
||||
};
|
||||
|
||||
export const reduceContextSize = async (
|
||||
messages: any[],
|
||||
model: string,
|
||||
maxTokens: number
|
||||
) => {
|
||||
// TODO: Summarize context instead of cutoff
|
||||
const cutoffSize = Math.round(messages.length * 0.1);
|
||||
const cutoffMessages = messages.splice(cutoffSize, messages.length - 1);
|
||||
|
||||
if (
|
||||
(await calculateMessagesTokenSize(cutoffMessages, model)) >
|
||||
maxTokens / 1.5
|
||||
) {
|
||||
reduceContextSize(cutoffMessages, model, maxTokens);
|
||||
}
|
||||
|
||||
return cutoffMessages;
|
||||
};
|
||||
|
||||
export const exceedsHistoryLimit = (
|
||||
tokenLength: number,
|
||||
model: string,
|
||||
maxTokens: number
|
||||
) => {
|
||||
if (
|
||||
tokenLength >= tokenLimit / 1.1 ||
|
||||
tokenLength >= (modelTokenLimit(model) - maxTokens) / 1.1
|
||||
) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
};
|
||||
|
||||
export const tokenLimit = 32000;
|
||||
|
||||
export const modelTokenLimit = (model: string) => {
|
||||
switch (model) {
|
||||
case 'gpt-4-1106-preview':
|
||||
return 128000;
|
||||
case 'gpt-4-vision-preview':
|
||||
return 128000;
|
||||
case 'gpt-4':
|
||||
return 8192;
|
||||
case 'gpt-4-32k':
|
||||
return 32768;
|
||||
case 'gpt-4-0613':
|
||||
return 8192;
|
||||
case 'gpt-4-32k-0613':
|
||||
return 32768;
|
||||
case 'gpt-4-0314':
|
||||
return 8192;
|
||||
case 'gpt-4-32k-0314':
|
||||
return 32768;
|
||||
case 'gpt-3.5-turbo-1106':
|
||||
return 16385;
|
||||
case 'gpt-3.5-turbo':
|
||||
return 4096;
|
||||
case 'gpt-3.5-turbo-16k':
|
||||
return 16385;
|
||||
case 'gpt-3.5-turbo-instruct':
|
||||
return 4096;
|
||||
case 'gpt-3.5-turbo-0613':
|
||||
return 4096;
|
||||
case 'gpt-3.5-turbo-16k-0613':
|
||||
return 16385;
|
||||
case 'gpt-3.5-turbo-0301':
|
||||
return 4096;
|
||||
case 'text-davinci-003':
|
||||
return 4096;
|
||||
case 'text-davinci-002':
|
||||
return 4096;
|
||||
case 'code-davinci-002':
|
||||
return 8001;
|
||||
case 'text-moderation-latest':
|
||||
return 32768;
|
||||
case 'text-moderation-stable':
|
||||
return 32768;
|
||||
case 'gpt-5':
|
||||
return 400000;
|
||||
case 'gpt-5-chat-latest':
|
||||
return 400000;
|
||||
case 'gpt-5-mini':
|
||||
return 400000;
|
||||
case 'gpt-5-nano':
|
||||
return 400000;
|
||||
default:
|
||||
return 2048;
|
||||
}
|
||||
};
|
||||
|
||||
// List of non-text models to filter out in Ask GPT action
|
||||
export const notLLMs = [
|
||||
'gpt-4o-realtime-preview-2024-10-01',
|
||||
'gpt-4o-realtime-preview',
|
||||
'babbage-002',
|
||||
'davinci-002',
|
||||
'tts-1-hd-1106',
|
||||
'whisper-1',
|
||||
'canary-whisper',
|
||||
'canary-tts',
|
||||
'tts-1',
|
||||
'tts-1-hd',
|
||||
'tts-1-1106',
|
||||
'dall-e-3',
|
||||
'dall-e-2',
|
||||
];
|
||||
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"extends": "../../../../tsconfig.base.json",
|
||||
"files": [],
|
||||
"include": [],
|
||||
"references": [
|
||||
{
|
||||
"path": "./tsconfig.lib.json"
|
||||
}
|
||||
],
|
||||
"compilerOptions": {
|
||||
"forceConsistentCasingInFileNames": true,
|
||||
"strict": true,
|
||||
"noImplicitReturns": true,
|
||||
"noFallthroughCasesInSwitch": true
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,11 @@
|
||||
{
|
||||
"extends": "./tsconfig.json",
|
||||
"compilerOptions": {
|
||||
"module": "commonjs",
|
||||
"outDir": "../../../../dist/out-tsc",
|
||||
"declaration": true,
|
||||
"types": ["node"]
|
||||
},
|
||||
"exclude": ["jest.config.ts", "src/**/*.spec.ts", "src/**/*.test.ts"],
|
||||
"include": ["src/**/*.ts"]
|
||||
}
|
||||
Reference in New Issue
Block a user