Add Activepieces integration for workflow automation

- Add Activepieces fork with SmoothSchedule custom piece
- Create integrations app with Activepieces service layer
- Add embed token endpoint for iframe integration
- Create Automations page with embedded workflow builder
- Add sidebar visibility fix for embed mode
- Add list inactive customers endpoint to Public API
- Include SmoothSchedule triggers: event created/updated/cancelled
- Include SmoothSchedule actions: create/update/cancel events, list resources/services/customers

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
poduck
2025-12-18 22:59:37 -05:00
parent 9848268d34
commit 3aa7199503
16292 changed files with 1284892 additions and 4708 deletions

View File

@@ -0,0 +1,123 @@
import {
createAction,
Property,
StoreScope,
} from '@activepieces/pieces-framework';
import OpenAI from 'openai';
import { openaiAuth } from '../..';
import { sleep } from '../common/common';
import { z } from 'zod';
import { propsValidation } from '@activepieces/pieces-common';
export const askAssistant = createAction({
auth: openaiAuth,
name: 'ask_assistant',
displayName: 'Ask Assistant',
description: 'Ask a GPT assistant anything you want!',
props: {
assistant: Property.Dropdown({
auth: openaiAuth,
displayName: 'Assistant',
required: true,
description: 'The assistant which will generate the completion.',
refreshers: [],
options: async ({ auth }) => {
if (!auth) {
return {
disabled: true,
placeholder: 'Enter your API key first',
options: [],
};
}
try {
const openai = new OpenAI({
apiKey: auth.secret_text,
});
const assistants = await openai.beta.assistants.list();
return {
disabled: false,
options: assistants.data.map((assistant: any) => {
return {
label: assistant.name,
value: assistant.id,
};
}),
};
} catch (error) {
return {
disabled: true,
options: [],
placeholder: "Couldn't load assistants, API key is invalid",
};
}
},
}),
prompt: Property.LongText({
displayName: 'Question',
required: true,
}),
memoryKey: Property.ShortText({
displayName: 'Memory Key',
description:
'A memory key that will keep the chat history shared across runs and flows. Keep it empty to leave your assistant without memory of previous messages.',
required: false,
}),
},
async run({ auth, propsValue, store }) {
await propsValidation.validateZod(propsValue, {
memoryKey: z.string().max(128).optional(),
});
const openai = new OpenAI({
apiKey: auth.secret_text,
});
const { assistant, prompt, memoryKey } = propsValue;
const runCheckDelay = 1000;
let response: any;
let thread: any;
if (memoryKey) {
// Get existing thread ID or create a new thread for this memory key
thread = await store.get(memoryKey, StoreScope.PROJECT);
if (!thread) {
thread = await openai.beta.threads.create();
store.put(memoryKey, thread, StoreScope.PROJECT);
}
} else {
thread = await openai.beta.threads.create();
}
const message = await openai.beta.threads.messages.create(thread.id, {
role: 'user',
content: prompt,
});
const run = await openai.beta.threads.runs.create(thread.id, {
assistant_id: assistant,
});
// Wait at least 400ms for inference to finish before checking to save requests
await sleep(400);
while (!response) {
const runCheck = await openai.beta.threads.runs.retrieve(
thread.id,
run.id
);
if (runCheck.status == 'completed') {
const messages = await openai.beta.threads.messages.list(thread.id);
// Return only messages that are newer than the user's latest message
response = messages.data.splice(
0,
messages.data.findIndex((m) => m.id == message.id)
);
break;
}
await sleep(runCheckDelay);
}
return response;
},
});

View File

@@ -0,0 +1,147 @@
import { openaiAuth } from '../../';
import { createAction, Property } from '@activepieces/pieces-framework';
import OpenAI from 'openai';
import { notLLMs } from '../common/common';
export const extractStructuredDataAction = createAction({
auth: openaiAuth,
name: 'extract-structured-data',
displayName: 'Extract Structured Data from Text',
description: 'Returns structured data from provided unstructured text.',
props: {
model: Property.Dropdown({
auth: openaiAuth,
displayName: 'Model',
required: true,
refreshers: [],
defaultValue: 'gpt-3.5-turbo',
options: async ({ auth }) => {
if (!auth) {
return {
disabled: true,
placeholder: 'Enter your API key first',
options: [],
};
}
try {
const openai = new OpenAI({
apiKey: auth.secret_text,
});
const response = await openai.models.list();
// We need to get only LLM models
const models = response.data.filter((model) => !notLLMs.includes(model.id));
return {
disabled: false,
options: models.map((model) => {
return {
label: model.id,
value: model.id,
};
}),
};
} catch (error) {
return {
disabled: true,
options: [],
placeholder: "Couldn't load models, API key is invalid",
};
}
},
}),
text: Property.LongText({
displayName: 'Unstructured Text',
required: true,
}),
params: Property.Array({
displayName: 'Data Definition',
required: true,
properties: {
propName: Property.ShortText({
displayName: 'Name',
description:
'Provide the name of the value you want to extract from the unstructured text. The name should be unique and short. ',
required: true,
}),
propDescription: Property.LongText({
displayName: 'Description',
description:
'Brief description of the data, this hints for the AI on what to look for',
required: false,
}),
propDataType: Property.StaticDropdown({
displayName: 'Data Type',
description: 'Type of parameter.',
required: true,
defaultValue: 'string',
options: {
disabled: false,
options: [
{ label: 'Text', value: 'string' },
{ label: 'Number', value: 'number' },
{ label: 'Boolean', value: 'boolean' },
],
},
}),
propIsRequired: Property.Checkbox({
displayName: 'Fail if Not present?',
required: true,
defaultValue: false,
}),
},
}),
},
async run(context) {
const { model, text } = context.propsValue;
const paramInputArray = context.propsValue.params as ParamInput[];
const functionParams: Record<string, unknown> = {};
const requiredFunctionParams: string[] = [];
for (const param of paramInputArray) {
functionParams[param.propName] = {
type: param.propDataType,
description: param.propDescription ?? param.propName,
};
if (param.propIsRequired) {
requiredFunctionParams.push(param.propName);
}
}
const prompt = 'Extract the following data from the provided text'
const openai = new OpenAI({
apiKey: context.auth.secret_text,
});
const response = await openai.chat.completions.create({
model: model,
messages: [{ role: 'user', content: text }],
tools: [
{
type: 'function',
function: {
name: 'extract_structured_data',
description: prompt,
parameters: {
type: 'object',
properties: functionParams,
required: requiredFunctionParams,
},
},
},
],
});
const toolCallsResponse = response.choices[0].message.tool_calls;
if (toolCallsResponse) {
return JSON.parse(toolCallsResponse[0].function.arguments);
} else {
throw new Error(JSON.stringify({
message: "OpenAI couldn't extract the fields from the above text."
}));
}
},
});
interface ParamInput {
propName: string;
propDescription: string;
propDataType: string;
propIsRequired: boolean;
}

View File

@@ -0,0 +1,119 @@
import { createAction, Property } from '@activepieces/pieces-framework';
import OpenAI from 'openai';
import { openaiAuth } from '../..';
export const generateImage = createAction({
auth: openaiAuth,
name: 'generate_image',
displayName: 'Generate Image',
description: 'Generate an image using text-to-image models',
props: {
model: Property.Dropdown({
auth: openaiAuth,
displayName: 'Model',
required: true,
description: 'The model which will generate the image.',
defaultValue: 'dall-e-3',
refreshers: [],
options: async () => {
return {
options: [
{
label: 'dall-e-3',
value: 'dall-e-3',
},
{
label: 'dall-e-2',
value: 'dall-e-2',
},
],
};
},
}),
prompt: Property.LongText({
displayName: 'Prompt',
required: true,
}),
resolution: Property.Dropdown({
auth: openaiAuth,
displayName: 'Resolution',
description: 'The resolution to generate the image in.',
required: false,
refreshers: ['model'],
defaultValue: '1024x1024',
options: async ({ model }) => {
let options = [
{
label: '1024x1024',
value: '1024x1024',
},
{
label: '512x512',
value: '512x512',
},
{
label: '256x256',
value: '256x256',
},
];
if (model == 'dall-e-3')
options = [
{
label: '1024x1024',
value: '1024x1024',
},
{
label: '1024x1792',
value: '1024x1792',
},
{
label: '1792x1024',
value: '1792x1024',
},
];
return {
options: options,
};
},
}),
quality: Property.Dropdown({
auth: openaiAuth,
displayName: 'Quality',
required: false,
description: 'Standard is faster, HD has better details.',
defaultValue: 'standard',
refreshers: [],
options: async () => {
return {
options: [
{
label: 'standard',
value: 'standard',
},
{
label: 'hd',
value: 'hd',
},
],
};
},
}),
},
async run({ auth, propsValue }) {
const openai = new OpenAI({
apiKey: auth.secret_text,
});
const { quality, resolution, model, prompt } = propsValue;
const image = await openai.images.generate({
model: model,
prompt: prompt,
quality: quality as any,
size: resolution as any,
});
return image;
},
});

View File

@@ -0,0 +1,198 @@
import {
createAction,
Property,
StoreScope,
} from '@activepieces/pieces-framework';
import OpenAI from 'openai';
import { openaiAuth } from '../..';
import {
calculateMessagesTokenSize,
exceedsHistoryLimit,
notLLMs,
reduceContextSize,
} from '../common/common';
import { z } from 'zod';
import { propsValidation } from '@activepieces/pieces-common';
export const askOpenAI = createAction({
auth: openaiAuth,
name: 'ask_chatgpt',
displayName: 'Ask ChatGPT',
description: 'Ask ChatGPT anything you want!',
props: {
model: Property.Dropdown({
auth: openaiAuth,
displayName: 'Model',
required: true,
description:
'The model which will generate the completion. Some models are suitable for natural language tasks, others specialize in code.',
refreshers: [],
defaultValue: 'gpt-3.5-turbo',
options: async ({ auth }) => {
if (!auth) {
return {
disabled: true,
placeholder: 'Enter your API key first',
options: [],
};
}
try {
const openai = new OpenAI({
apiKey: auth.secret_text,
});
const response = await openai.models.list();
// We need to get only LLM models
const models = response.data.filter(
(model) => !notLLMs.includes(model.id)
);
return {
disabled: false,
options: models.map((model) => {
return {
label: model.id,
value: model.id,
};
}),
};
} catch (error) {
return {
disabled: true,
options: [],
placeholder: "Couldn't load models, API key is invalid",
};
}
},
}),
prompt: Property.LongText({
displayName: 'Question',
required: true,
}),
temperature: Property.Number({
displayName: 'Temperature',
required: false,
description:
'Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.',
defaultValue: 1,
}),
maxTokens: Property.Number({
displayName: 'Maximum Tokens',
required: true,
description:
"The maximum number of tokens to generate. Requests can use up to 2,048 or 4,096 tokens shared between prompt and completion depending on the model. Don't set the value to maximum and leave some tokens for the input. (One token is roughly 4 characters for normal English text)",
defaultValue: 2048,
}),
topP: Property.Number({
displayName: 'Top P',
required: false,
description:
'An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.',
defaultValue: 1,
}),
frequencyPenalty: Property.Number({
displayName: 'Frequency penalty',
required: false,
description:
"Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.",
defaultValue: 0,
}),
presencePenalty: Property.Number({
displayName: 'Presence penalty',
required: false,
description:
"Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the mode's likelihood to talk about new topics.",
}),
memoryKey: Property.ShortText({
displayName: 'Memory Key',
description:
'A memory key that will keep the chat history shared across runs and flows. Keep it empty to leave ChatGPT without memory of previous messages.',
required: false,
}),
roles: Property.Json({
displayName: 'Roles',
required: false,
description: 'Array of roles to specify more accurate response',
defaultValue: [
{ role: 'system', content: 'You are a helpful assistant.' },
],
}),
},
async run({ auth, propsValue, store }) {
await propsValidation.validateZod(propsValue, {
temperature: z.number().min(0).max(1).optional(),
memoryKey: z.string().max(128).optional(),
});
const openai = new OpenAI({
apiKey: auth.secret_text,
});
const {
model,
temperature,
maxTokens,
topP,
frequencyPenalty,
presencePenalty,
prompt,
memoryKey,
} = propsValue;
let messageHistory: any[] | null = [];
// If memory key is set, retrieve messages stored in history
if (memoryKey) {
messageHistory = (await store.get(memoryKey, StoreScope.PROJECT)) ?? [];
}
// Add user prompt to message history
messageHistory.push({
role: 'user',
content: prompt,
});
// Add system instructions if set by user
const rolesArray = propsValue.roles ? (propsValue.roles as any) : [];
const roles = rolesArray.map((item: any) => {
const rolesEnum = ['system', 'user', 'assistant'];
if (!rolesEnum.includes(item.role)) {
throw new Error(
'The only available roles are: [system, user, assistant]'
);
}
return {
role: item.role,
content: item.content,
};
});
// Send prompt
const completion = await openai.chat.completions.create({
model: model,
messages: [...roles, ...messageHistory],
temperature: temperature,
top_p: topP,
frequency_penalty: frequencyPenalty,
presence_penalty: presencePenalty ?? undefined,
max_completion_tokens: maxTokens,
});
// Add response to message history
messageHistory = [...messageHistory, completion.choices[0].message];
// Check message history token size
// System limit is 32K tokens, we can probably make it bigger but this is a safe spot
const tokenLength = await calculateMessagesTokenSize(messageHistory, model);
if (memoryKey) {
// If tokens exceed 90% system limit or 90% of model limit - maxTokens, reduce history token size
if (exceedsHistoryLimit(tokenLength, model, maxTokens)) {
messageHistory = await reduceContextSize(
messageHistory,
model,
maxTokens
);
}
// Store history
await store.put(memoryKey, messageHistory, StoreScope.PROJECT);
}
return completion.choices[0].message.content;
},
});

View File

@@ -0,0 +1,106 @@
import { createAction, Property } from '@activepieces/pieces-framework';
import OpenAI from 'openai';
import { openaiAuth } from '../..';
import { streamToBuffer } from '../common/common';
type Voice = 'alloy' | 'echo' | 'fable' | 'onyx' | 'nova' | 'shimmer';
type ResponseFormat = 'mp3' | 'opus' | 'aac' | 'flac' | 'wav' | 'pcm';
type Model = 'tts-1' | 'tts-1-hd';
export const textToSpeech = createAction({
auth: openaiAuth,
name: 'text_to_speech',
displayName: 'Text-to-Speech',
description: 'Generate an audio recording from text',
props: {
text: Property.LongText({
displayName: 'Text',
description: 'The text you want to hear.',
required: true,
}),
model: Property.StaticDropdown({
displayName: 'Model',
required: true,
description: 'The model which will generate the audio.',
defaultValue: 'tts-1',
options: {
disabled: false,
options: [
{
label: 'tts-1',
value: 'tts-1',
},
{
label: 'tts-1-hd',
value: 'tts-1-hd',
},
],
},
}),
speed: Property.Number({
displayName: 'Speed',
description: 'The speed of the audio. Minimum is 0.25 and maximum is 4.00.',
defaultValue: 1.0,
required: false,
}),
voice: Property.StaticDropdown({
displayName: 'Voice',
description: 'The voice to generate the audio in.',
required: true,
defaultValue: 'alloy',
options: {
disabled: false,
options: [
{ label: 'alloy', value: 'alloy' },
{ label: 'echo', value: 'echo' },
{ label: 'fable', value: 'fable' },
{ label: 'onyx', value: 'onyx' },
{ label: 'nova', value: 'nova' },
{ label: 'shimmer', value: 'shimmer' },
],
},
}),
format: Property.StaticDropdown({
displayName: 'Output Format',
required: true,
description: 'The format you want the audio file in.',
defaultValue: 'mp3',
options: {
disabled: false,
options: [
{ label: 'mp3', value: 'mp3' },
{ label: 'opus', value: 'opus' },
{ label: 'aac', value: 'aac' },
{ label: 'flac', value: 'flac' },
],
},
}),
fileName: Property.ShortText({
displayName: 'File Name',
description: 'The name of the output audio file (without extension).',
required: false,
defaultValue: 'audio',
}),
},
async run({ auth, propsValue, files }) {
const openai = new OpenAI({
apiKey: auth.secret_text,
});
const { voice, format, model, text, speed, fileName } = propsValue;
const audio = await openai.audio.speech.create({
model: model as Model,
input: text,
response_format: format as ResponseFormat,
voice: voice as Voice,
speed: speed,
});
const result = await streamToBuffer(audio.body);
return files.write({
fileName: `${fileName || 'audio'}.${format}`,
data: result as Buffer,
});
},
});

View File

@@ -0,0 +1,70 @@
import {
HttpRequest,
HttpMethod,
httpClient,
} from '@activepieces/pieces-common';
import { Property, createAction } from '@activepieces/pieces-framework';
import { openaiAuth } from '../..';
import FormData from 'form-data';
import mime from 'mime-types';
import { Languages, baseUrl } from '../common/common';
export const transcribeAction = createAction({
name: 'transcribe',
displayName: 'Transcribe Audio',
description: 'Transcribe audio to text using whisper-1 model',
auth: openaiAuth,
props: {
audio: Property.File({
displayName: 'Audio',
required: true,
description: 'Audio file to transcribe',
}),
language: Property.StaticDropdown({
displayName: 'Language of the Audio',
description: 'Language of the audio file the default is en (English).',
required: false,
options: {
options: Languages,
},
defaultValue: 'en',
}),
},
run: async (context) => {
const fileData = context.propsValue.audio;
const mimeType = mime.lookup(fileData.extension ? fileData.extension : '');
let language = context.propsValue.language;
// if language is not in languages list, default to english
if (!Languages.some((l) => l.value === language)) {
language = 'en';
}
const form = new FormData();
form.append('file', fileData.data, {
filename: fileData.filename,
contentType: mimeType as string,
});
form.append('model', 'whisper-1');
form.append('language', language);
const headers = {
Authorization: `Bearer ${context.auth.secret_text}`,
};
const request: HttpRequest = {
method: HttpMethod.POST,
url: `${baseUrl}/audio/transcriptions`,
body: form,
headers: {
...form.getHeaders(),
...headers,
},
};
try {
const response = await httpClient.sendRequest(request);
return response.body;
} catch (e) {
throw new Error(`Error while execution:\n${e}`);
}
},
});

View File

@@ -0,0 +1,54 @@
import {
HttpRequest,
HttpMethod,
httpClient,
} from '@activepieces/pieces-common';
import { Property, createAction } from '@activepieces/pieces-framework';
import { openaiAuth } from '../..';
import FormData from 'form-data';
import mime from 'mime-types';
import { baseUrl } from '../common/common';
export const translateAction = createAction({
name: 'translate',
displayName: 'Translate Audio',
description: 'Translate audio to text using whisper-1 model',
auth: openaiAuth,
props: {
audio: Property.File({
displayName: 'Audio',
required: true,
description: 'Audio file to translate',
}),
},
run: async (context) => {
const fileData = context.propsValue.audio;
const mimeType = mime.lookup(fileData.extension ? fileData.extension : '');
const form = new FormData();
form.append('file', fileData.data, {
filename: fileData.filename,
contentType: mimeType as string,
});
form.append('model', 'whisper-1');
const headers = {
Authorization: `Bearer ${context.auth.secret_text}`,
};
const request: HttpRequest = {
method: HttpMethod.POST,
url: `${baseUrl}/audio/translations`,
body: form,
headers: {
...form.getHeaders(),
...headers,
},
};
try {
const response = await httpClient.sendRequest(request);
return response.body;
} catch (e) {
throw new Error(`Error while execution:\n${e}`);
}
},
});

View File

@@ -0,0 +1,152 @@
import {
createAction,
Property,
} from '@activepieces/pieces-framework';
import OpenAI from 'openai';
import { openaiAuth } from '../..';
import { z } from 'zod';
import { propsValidation } from '@activepieces/pieces-common';
export const visionPrompt = createAction({
auth: openaiAuth,
name: 'vision_prompt',
displayName: 'Vision Prompt',
description: 'Ask GPT a question about an image',
props: {
image: Property.File({
displayName: 'Image',
description: "The image URL or file you want GPT's vision to read.",
required: true,
}),
prompt: Property.LongText({
displayName: 'Question',
description: 'What do you want ChatGPT to tell you about the image?',
required: true,
}),
detail: Property.Dropdown({
auth: openaiAuth,
displayName: 'Detail',
required: false,
description:
'Control how the model processes the image and generates textual understanding.',
defaultValue: 'auto',
refreshers: [],
options: async () => {
return {
options: [
{
label: 'low',
value: 'low',
},
{
label: 'high',
value: 'high',
},
{
label: 'auto',
value: 'auto',
},
],
};
},
}),
temperature: Property.Number({
displayName: 'Temperature',
required: false,
description:
'Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.',
defaultValue: 0.9,
}),
maxTokens: Property.Number({
displayName: 'Maximum Tokens',
required: false,
description:
"The maximum number of tokens to generate. Requests can use up to 2,048 or 4,096 tokens shared between prompt and completion, don't set the value to maximum and leave some tokens for the input. The exact limit varies by model. (One token is roughly 4 characters for normal English text)",
defaultValue: 2048,
}),
topP: Property.Number({
displayName: 'Top P',
required: false,
description:
'An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.',
defaultValue: 1,
}),
frequencyPenalty: Property.Number({
displayName: 'Frequency penalty',
required: false,
description:
"Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.",
defaultValue: 0,
}),
presencePenalty: Property.Number({
displayName: 'Presence penalty',
required: false,
description:
"Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the mode's likelihood to talk about new topics.",
defaultValue: 0.6,
}),
roles: Property.Json({
displayName: 'Roles',
required: false,
description: 'Array of roles to specify more accurate response',
defaultValue: [
{ role: 'system', content: 'You are a helpful assistant.' },
],
}),
},
async run({ auth, propsValue }) {
await propsValidation.validateZod(propsValue, {
temperature: z.number().min(0).max(1),
});
const openai = new OpenAI({
apiKey: auth.secret_text,
});
const { temperature, maxTokens, topP, frequencyPenalty, presencePenalty } =
propsValue;
const rolesArray = propsValue.roles ? (propsValue.roles as any) : [];
const roles = rolesArray.map((item: any) => {
const rolesEnum = ['system', 'user', 'assistant'];
if (!rolesEnum.includes(item.role)) {
throw new Error(
'The only available roles are: [system, user, assistant]'
);
}
return {
role: item.role,
content: item.content,
};
});
const completion = await openai.chat.completions.create({
model: 'gpt-4o',
messages: [
...roles,
{
role: 'user',
content: [
{
type: 'text',
text: propsValue['prompt'],
},
{
type: 'image_url',
image_url: {
url: `data:image/${propsValue.image.extension};base64,${propsValue.image.base64}`,
},
},
],
},
],
temperature: temperature,
max_tokens: maxTokens,
top_p: topP,
frequency_penalty: frequencyPenalty,
presence_penalty: presencePenalty,
});
return completion.choices[0].message.content;
},
});

View File

@@ -0,0 +1,223 @@
import { encoding_for_model } from 'tiktoken';
export const baseUrl = 'https://api.openai.com/v1';
export const Languages = [
{ value: 'es', label: 'Spanish' },
{ value: 'it', label: 'Italian' },
{ value: 'en', label: 'English' },
{ value: 'pt', label: 'Portuguese' },
{ value: 'de', label: 'German' },
{ value: 'ja', label: 'Japanese' },
{ value: 'pl', label: 'Polish' },
{ value: 'ar', label: 'Arabic' },
{ value: 'af', label: 'Afrikaans' },
{ value: 'az', label: 'Azerbaijani' },
{ value: 'bg', label: 'Bulgarian' },
{ value: 'bs', label: 'Bosnian' },
{ value: 'ca', label: 'Catalan' },
{ value: 'cs', label: 'Czech' },
{ value: 'da', label: 'Danish' },
{ value: 'el', label: 'Greek' },
{ value: 'et', label: 'Estonian' },
{ value: 'fa', label: 'Persian' },
{ value: 'fi', label: 'Finnish' },
{ value: 'tl', label: 'Tagalog' },
{ value: 'fr', label: 'French' },
{ value: 'gl', label: 'Galician' },
{ value: 'he', label: 'Hebrew' },
{ value: 'hi', label: 'Hindi' },
{ value: 'hr', label: 'Croatian' },
{ value: 'hu', label: 'Hungarian' },
{ value: 'hy', label: 'Armenian' },
{ value: 'id', label: 'Indonesian' },
{ value: 'is', label: 'Icelandic' },
{ value: 'kk', label: 'Kazakh' },
{ value: 'kn', label: 'Kannada' },
{ value: 'ko', label: 'Korean' },
{ value: 'lt', label: 'Lithuanian' },
{ value: 'lv', label: 'Latvian' },
{ value: 'ma', label: 'Maori' },
{ value: 'mk', label: 'Macedonian' },
{ value: 'mr', label: 'Marathi' },
{ value: 'ms', label: 'Malay' },
{ value: 'ne', label: 'Nepali' },
{ value: 'nl', label: 'Dutch' },
{ value: 'no', label: 'Norwegian' },
{ value: 'ro', label: 'Romanian' },
{ value: 'ru', label: 'Russian' },
{ value: 'sk', label: 'Slovak' },
{ value: 'sl', label: 'Slovenian' },
{ value: 'sr', label: 'Serbian' },
{ value: 'sv', label: 'Swedish' },
{ value: 'sw', label: 'Swahili' },
{ value: 'ta', label: 'Tamil' },
{ value: 'th', label: 'Thai' },
{ value: 'tr', label: 'Turkish' },
{ value: 'uk', label: 'Ukrainian' },
{ value: 'ur', label: 'Urdu' },
{ value: 'vi', label: 'Vietnamese' },
{ value: 'zh', label: 'Chinese (Simplified)' },
{ value: 'cy', label: 'Welsh' },
{ value: 'be', label: 'Belarusian' },
];
export const billingIssueMessage = `Error Occurred: 429 \n
1. Ensure that billing is enabled on your OpenAI platform. \n
2. Generate a new API key. \n
3. Attempt the process again. \n
For guidance, visit: https://beta.openai.com/account/billing`;
export const unauthorizedMessage = `Error Occurred: 401 \n
Ensure that your API key is valid. \n`;
export const sleep = (ms: number) => {
return new Promise((resolve) => setTimeout(resolve, ms));
};
export const streamToBuffer = (stream: any) => {
const chunks: any[] = [];
return new Promise((resolve, reject) => {
stream.on('data', (chunk: any) => chunks.push(Buffer.from(chunk)));
stream.on('error', (err: any) => reject(err));
stream.on('end', () => resolve(Buffer.concat(chunks)));
});
};
export const calculateTokensFromString = (string: string, model: string) => {
try {
const encoder = encoding_for_model(model as any);
const tokens = encoder.encode(string);
encoder.free();
return tokens.length;
} catch (e) {
// Model not supported by tiktoken, every 4 chars is a token
return Math.round(string.length / 4);
}
};
export const calculateMessagesTokenSize = async (
messages: any[],
model: string
) => {
let tokenLength = 0;
await Promise.all(
messages.map((message: any) => {
return new Promise((resolve) => {
tokenLength += calculateTokensFromString(message.content, model);
resolve(tokenLength);
});
})
);
return tokenLength;
};
export const reduceContextSize = async (
messages: any[],
model: string,
maxTokens: number
) => {
// TODO: Summarize context instead of cutoff
const cutoffSize = Math.round(messages.length * 0.1);
const cutoffMessages = messages.splice(cutoffSize, messages.length - 1);
if (
(await calculateMessagesTokenSize(cutoffMessages, model)) >
maxTokens / 1.5
) {
reduceContextSize(cutoffMessages, model, maxTokens);
}
return cutoffMessages;
};
export const exceedsHistoryLimit = (
tokenLength: number,
model: string,
maxTokens: number
) => {
if (
tokenLength >= tokenLimit / 1.1 ||
tokenLength >= (modelTokenLimit(model) - maxTokens) / 1.1
) {
return true;
}
return false;
};
export const tokenLimit = 32000;
export const modelTokenLimit = (model: string) => {
switch (model) {
case 'gpt-4-1106-preview':
return 128000;
case 'gpt-4-vision-preview':
return 128000;
case 'gpt-4':
return 8192;
case 'gpt-4-32k':
return 32768;
case 'gpt-4-0613':
return 8192;
case 'gpt-4-32k-0613':
return 32768;
case 'gpt-4-0314':
return 8192;
case 'gpt-4-32k-0314':
return 32768;
case 'gpt-3.5-turbo-1106':
return 16385;
case 'gpt-3.5-turbo':
return 4096;
case 'gpt-3.5-turbo-16k':
return 16385;
case 'gpt-3.5-turbo-instruct':
return 4096;
case 'gpt-3.5-turbo-0613':
return 4096;
case 'gpt-3.5-turbo-16k-0613':
return 16385;
case 'gpt-3.5-turbo-0301':
return 4096;
case 'text-davinci-003':
return 4096;
case 'text-davinci-002':
return 4096;
case 'code-davinci-002':
return 8001;
case 'text-moderation-latest':
return 32768;
case 'text-moderation-stable':
return 32768;
case 'gpt-5':
return 400000;
case 'gpt-5-chat-latest':
return 400000;
case 'gpt-5-mini':
return 400000;
case 'gpt-5-nano':
return 400000;
default:
return 2048;
}
};
// List of non-text models to filter out in Ask GPT action
export const notLLMs = [
'gpt-4o-realtime-preview-2024-10-01',
'gpt-4o-realtime-preview',
'babbage-002',
'davinci-002',
'tts-1-hd-1106',
'whisper-1',
'canary-whisper',
'canary-tts',
'tts-1',
'tts-1-hd',
'tts-1-1106',
'dall-e-3',
'dall-e-2',
];