Add Activepieces integration for workflow automation
- Add Activepieces fork with SmoothSchedule custom piece - Create integrations app with Activepieces service layer - Add embed token endpoint for iframe integration - Create Automations page with embedded workflow builder - Add sidebar visibility fix for embed mode - Add list inactive customers endpoint to Public API - Include SmoothSchedule triggers: event created/updated/cancelled - Include SmoothSchedule actions: create/update/cancel events, list resources/services/customers 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
@@ -0,0 +1,206 @@
|
||||
import { createAction, Property, StoreScope } from '@activepieces/pieces-framework';
|
||||
import { groqAuth } from '../..';
|
||||
import { httpClient, HttpMethod, AuthenticationType } from '@activepieces/pieces-common';
|
||||
|
||||
export const askGroq = createAction({
|
||||
auth: groqAuth,
|
||||
name: 'ask-ai',
|
||||
displayName: 'Ask AI',
|
||||
description: 'Ask Groq anything using fast language models.',
|
||||
props: {
|
||||
model: Property.Dropdown({
|
||||
auth: groqAuth,
|
||||
displayName: 'Model',
|
||||
required: true,
|
||||
description: 'The model which will generate the completion.',
|
||||
refreshers: [],
|
||||
defaultValue: 'llama-3.1-70b-versatile',
|
||||
options: async ({ auth }) => {
|
||||
if (!auth) {
|
||||
return {
|
||||
disabled: true,
|
||||
placeholder: 'Please connect your Groq account first.',
|
||||
options: [],
|
||||
};
|
||||
}
|
||||
try {
|
||||
const response = await httpClient.sendRequest({
|
||||
url: 'https://api.groq.com/openai/v1/models',
|
||||
method: HttpMethod.GET,
|
||||
authentication: {
|
||||
type: AuthenticationType.BEARER_TOKEN,
|
||||
token: auth.secret_text,
|
||||
},
|
||||
});
|
||||
// Filter out audio models
|
||||
const models = (response.body.data as Array<{ id: string }>).filter(
|
||||
(model) => !model.id.toLowerCase().includes('whisper'),
|
||||
);
|
||||
return {
|
||||
disabled: false,
|
||||
options: models.map((model) => {
|
||||
return {
|
||||
label: model.id,
|
||||
value: model.id,
|
||||
};
|
||||
}),
|
||||
};
|
||||
} catch (error) {
|
||||
return {
|
||||
disabled: true,
|
||||
options: [],
|
||||
placeholder: "Couldn't load models, API key is invalid",
|
||||
};
|
||||
}
|
||||
},
|
||||
}),
|
||||
prompt: Property.LongText({
|
||||
displayName: 'Question',
|
||||
required: true,
|
||||
}),
|
||||
temperature: Property.Number({
|
||||
displayName: 'Temperature',
|
||||
required: false,
|
||||
description:
|
||||
'Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.',
|
||||
defaultValue: 0.9,
|
||||
}),
|
||||
maxTokens: Property.Number({
|
||||
displayName: 'Maximum Tokens',
|
||||
required: true,
|
||||
description:
|
||||
"The maximum number of tokens to generate. The total length of input tokens and generated tokens is limited by the model's context length.",
|
||||
defaultValue: 2048,
|
||||
}),
|
||||
topP: Property.Number({
|
||||
displayName: 'Top P',
|
||||
required: false,
|
||||
description:
|
||||
'An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.',
|
||||
defaultValue: 1,
|
||||
}),
|
||||
frequencyPenalty: Property.Number({
|
||||
displayName: 'Frequency penalty',
|
||||
required: false,
|
||||
description:
|
||||
"Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.",
|
||||
defaultValue: 0,
|
||||
}),
|
||||
presencePenalty: Property.Number({
|
||||
displayName: 'Presence penalty',
|
||||
required: false,
|
||||
description:
|
||||
"Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.",
|
||||
defaultValue: 0.6,
|
||||
}),
|
||||
memoryKey: Property.ShortText({
|
||||
displayName: 'Memory Key',
|
||||
description:
|
||||
'A memory key that will keep the chat history shared across runs and flows. Keep it empty to leave Groq without memory of previous messages.',
|
||||
required: false,
|
||||
}),
|
||||
roles: Property.Json({
|
||||
displayName: 'Roles',
|
||||
required: false,
|
||||
description: 'Array of roles to specify more accurate response',
|
||||
defaultValue: [{ role: 'system', content: 'You are a helpful assistant.' }],
|
||||
}),
|
||||
},
|
||||
async run({ auth, propsValue, store }) {
|
||||
const {
|
||||
model,
|
||||
temperature,
|
||||
maxTokens,
|
||||
topP,
|
||||
frequencyPenalty,
|
||||
presencePenalty,
|
||||
prompt,
|
||||
memoryKey,
|
||||
} = propsValue;
|
||||
|
||||
let messageHistory: any[] | null = [];
|
||||
// If memory key is set, retrieve messages stored in history
|
||||
if (memoryKey) {
|
||||
messageHistory = (await store.get(memoryKey, StoreScope.PROJECT)) ?? [];
|
||||
}
|
||||
|
||||
// Add user prompt to message history
|
||||
messageHistory.push({
|
||||
role: 'user',
|
||||
content: prompt,
|
||||
});
|
||||
|
||||
// Add system instructions if set by user
|
||||
const rolesArray = propsValue.roles ? (propsValue.roles as any) : [];
|
||||
const roles = rolesArray.map((item: any) => {
|
||||
const rolesEnum = ['system', 'user', 'assistant'];
|
||||
if (!rolesEnum.includes(item.role)) {
|
||||
throw new Error('The only available roles are: [system, user, assistant]');
|
||||
}
|
||||
|
||||
return {
|
||||
role: item.role,
|
||||
content: item.content,
|
||||
};
|
||||
});
|
||||
|
||||
// Send prompt
|
||||
const completion = await httpClient.sendRequest({
|
||||
method: HttpMethod.POST,
|
||||
url: 'https://api.groq.com/openai/v1/chat/completions',
|
||||
authentication: {
|
||||
type: AuthenticationType.BEARER_TOKEN,
|
||||
token: auth.secret_text,
|
||||
},
|
||||
body: {
|
||||
model: model,
|
||||
messages: [...roles, ...messageHistory],
|
||||
temperature: temperature,
|
||||
top_p: topP,
|
||||
frequency_penalty: frequencyPenalty,
|
||||
presence_penalty: presencePenalty,
|
||||
max_completion_tokens: maxTokens,
|
||||
},
|
||||
});
|
||||
|
||||
// Add response to message history
|
||||
messageHistory = [...messageHistory, completion.body.choices[0].message];
|
||||
|
||||
// Store history if memory key is set
|
||||
if (memoryKey) {
|
||||
await store.put(memoryKey, messageHistory, StoreScope.PROJECT);
|
||||
}
|
||||
|
||||
// Get the raw content from the response
|
||||
const rawContent = completion.body.choices[0].message.content;
|
||||
|
||||
// Check if the response contains thinking (content inside <think> tags)
|
||||
const thinkRegex = /<think>([\s\S]*?)<\/think>/;
|
||||
const thinkMatch = rawContent.match(thinkRegex);
|
||||
|
||||
// Create the response structure
|
||||
const responseStructure = [];
|
||||
|
||||
if (thinkMatch) {
|
||||
// Extract the thinking content
|
||||
const thinkContent = thinkMatch[1].trim();
|
||||
|
||||
// Extract the final answer (content after the last </think> tag)
|
||||
const finalContent = rawContent.split('</think>').pop()?.trim() || '';
|
||||
|
||||
// Add to response structure
|
||||
responseStructure.push({
|
||||
Think: thinkContent,
|
||||
Content: finalContent
|
||||
});
|
||||
} else {
|
||||
// If no thinking tags, just return the content as is
|
||||
responseStructure.push({
|
||||
Think: null,
|
||||
Content: rawContent
|
||||
});
|
||||
}
|
||||
|
||||
return responseStructure;
|
||||
},
|
||||
});
|
||||
@@ -0,0 +1,126 @@
|
||||
import { createAction, Property } from '@activepieces/pieces-framework';
|
||||
import { groqAuth } from '../..';
|
||||
import { httpClient, HttpMethod, AuthenticationType } from '@activepieces/pieces-common';
|
||||
|
||||
export const transcribeAudio = createAction({
|
||||
auth: groqAuth,
|
||||
name: 'transcribe-audio',
|
||||
displayName: 'Transcribe Audio',
|
||||
description: 'Transcribes audio into text in the input language.',
|
||||
props: {
|
||||
file: Property.File({
|
||||
displayName: 'Audio File',
|
||||
required: true,
|
||||
description:
|
||||
'The audio file to transcribe. Supported formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, webm.',
|
||||
}),
|
||||
model: Property.Dropdown({
|
||||
displayName: 'Model',
|
||||
auth: groqAuth,
|
||||
required: true,
|
||||
description: 'The model to use for transcription.',
|
||||
refreshers: [],
|
||||
defaultValue: 'whisper-large-v3',
|
||||
options: async ({ auth }) => {
|
||||
if (!auth) {
|
||||
return {
|
||||
disabled: true,
|
||||
placeholder: 'Please connect your Groq account first.',
|
||||
options: [],
|
||||
};
|
||||
}
|
||||
try {
|
||||
const response = await httpClient.sendRequest({
|
||||
url: 'https://api.groq.com/openai/v1/models',
|
||||
method: HttpMethod.GET,
|
||||
authentication: {
|
||||
type: AuthenticationType.BEARER_TOKEN,
|
||||
token: auth.secret_text,
|
||||
},
|
||||
});
|
||||
// Filter for whisper models only
|
||||
const models = (response.body.data as Array<{ id: string }>).filter((model) =>
|
||||
model.id.toLowerCase().includes('whisper'),
|
||||
);
|
||||
return {
|
||||
disabled: false,
|
||||
options: models.map((model) => {
|
||||
return {
|
||||
label: model.id,
|
||||
value: model.id,
|
||||
};
|
||||
}),
|
||||
};
|
||||
} catch (error) {
|
||||
return {
|
||||
disabled: true,
|
||||
options: [],
|
||||
placeholder: "Couldn't load models, API key is invalid",
|
||||
};
|
||||
}
|
||||
},
|
||||
}),
|
||||
language: Property.ShortText({
|
||||
displayName: 'Language',
|
||||
required: false,
|
||||
description:
|
||||
'The language of the input audio in ISO-639-1 format (e.g., "en" for English). This will improve accuracy and latency.',
|
||||
}),
|
||||
prompt: Property.LongText({
|
||||
displayName: 'Prompt',
|
||||
required: false,
|
||||
description:
|
||||
"An optional text to guide the model's style or continue a previous audio segment. The prompt should match the audio language.",
|
||||
}),
|
||||
temperature: Property.Number({
|
||||
displayName: 'Temperature',
|
||||
required: false,
|
||||
description:
|
||||
'The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.',
|
||||
defaultValue: 0,
|
||||
}),
|
||||
responseFormat: Property.StaticDropdown({
|
||||
displayName: 'Response Format',
|
||||
required: false,
|
||||
description: 'The format of the transcript output.',
|
||||
defaultValue: 'json',
|
||||
options: {
|
||||
disabled: false,
|
||||
options: [
|
||||
{ label: 'JSON', value: 'json' },
|
||||
{ label: 'Text', value: 'text' },
|
||||
{ label: 'Verbose JSON', value: 'verbose_json' },
|
||||
],
|
||||
},
|
||||
}),
|
||||
},
|
||||
async run({ auth, propsValue }) {
|
||||
const { file, model, language, prompt, temperature, responseFormat } = propsValue;
|
||||
|
||||
// Create form data
|
||||
const formData = new FormData();
|
||||
formData.append('file', new Blob([file.data] as unknown as BlobPart[]), file.filename);
|
||||
formData.append('model', model);
|
||||
|
||||
if (language) formData.append('language', language);
|
||||
if (prompt) formData.append('prompt', prompt);
|
||||
if (temperature !== undefined) formData.append('temperature', temperature.toString());
|
||||
if (responseFormat) formData.append('response_format', responseFormat);
|
||||
|
||||
// Send request
|
||||
const response = await httpClient.sendRequest({
|
||||
method: HttpMethod.POST,
|
||||
url: 'https://api.groq.com/openai/v1/audio/transcriptions',
|
||||
authentication: {
|
||||
type: AuthenticationType.BEARER_TOKEN,
|
||||
token: auth.secret_text,
|
||||
},
|
||||
headers: {
|
||||
'Content-Type': 'multipart/form-data',
|
||||
},
|
||||
body: formData,
|
||||
});
|
||||
|
||||
return response.body;
|
||||
},
|
||||
});
|
||||
@@ -0,0 +1,119 @@
|
||||
import { createAction, Property } from '@activepieces/pieces-framework';
|
||||
import { groqAuth } from '../..';
|
||||
import { httpClient, HttpMethod, AuthenticationType } from '@activepieces/pieces-common';
|
||||
|
||||
export const translateAudio = createAction({
|
||||
auth: groqAuth,
|
||||
name: 'translate-audio',
|
||||
displayName: 'Translate Audio',
|
||||
description: 'Translates audio into English text.',
|
||||
props: {
|
||||
file: Property.File({
|
||||
displayName: 'Audio File',
|
||||
required: true,
|
||||
description:
|
||||
'The audio file to translate. Supported formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, webm.',
|
||||
}),
|
||||
model: Property.Dropdown({
|
||||
displayName: 'Model',
|
||||
required: true,
|
||||
description: 'The model to use for translation.',
|
||||
refreshers: [],
|
||||
defaultValue: 'whisper-large-v3',
|
||||
auth: groqAuth,
|
||||
options: async ({ auth }) => {
|
||||
if (!auth) {
|
||||
return {
|
||||
disabled: true,
|
||||
placeholder: 'Please connect your Groq account first.',
|
||||
options: [],
|
||||
};
|
||||
}
|
||||
try {
|
||||
const response = await httpClient.sendRequest({
|
||||
url: 'https://api.groq.com/openai/v1/models',
|
||||
method: HttpMethod.GET,
|
||||
authentication: {
|
||||
type: AuthenticationType.BEARER_TOKEN,
|
||||
token: auth.secret_text
|
||||
},
|
||||
});
|
||||
// Filter for whisper models only
|
||||
const models = (response.body.data as Array<{ id: string }>).filter((model) =>
|
||||
model.id.toLowerCase().includes('whisper'),
|
||||
);
|
||||
return {
|
||||
disabled: false,
|
||||
options: models.map((model) => {
|
||||
return {
|
||||
label: model.id,
|
||||
value: model.id,
|
||||
};
|
||||
}),
|
||||
};
|
||||
} catch (error) {
|
||||
return {
|
||||
disabled: true,
|
||||
options: [],
|
||||
placeholder: "Couldn't load models, API key is invalid",
|
||||
};
|
||||
}
|
||||
},
|
||||
}),
|
||||
prompt: Property.LongText({
|
||||
displayName: 'Prompt',
|
||||
required: false,
|
||||
description:
|
||||
"An optional text in English to guide the model's style or continue a previous audio segment.",
|
||||
}),
|
||||
temperature: Property.Number({
|
||||
displayName: 'Temperature',
|
||||
required: false,
|
||||
description:
|
||||
'The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.',
|
||||
defaultValue: 0,
|
||||
}),
|
||||
responseFormat: Property.StaticDropdown({
|
||||
displayName: 'Response Format',
|
||||
required: false,
|
||||
description: 'The format of the translation output.',
|
||||
defaultValue: 'json',
|
||||
options: {
|
||||
disabled: false,
|
||||
options: [
|
||||
{ label: 'JSON', value: 'json' },
|
||||
{ label: 'Text', value: 'text' },
|
||||
{ label: 'Verbose JSON', value: 'verbose_json' },
|
||||
],
|
||||
},
|
||||
}),
|
||||
},
|
||||
async run({ auth, propsValue }) {
|
||||
const { file, model, prompt, temperature, responseFormat } = propsValue;
|
||||
|
||||
// Create form data
|
||||
const formData = new FormData();
|
||||
formData.append('file', new Blob([file.data] as unknown as BlobPart[]), file.filename);
|
||||
formData.append('model', model);
|
||||
|
||||
if (prompt) formData.append('prompt', prompt);
|
||||
if (temperature !== undefined) formData.append('temperature', temperature.toString());
|
||||
if (responseFormat) formData.append('response_format', responseFormat);
|
||||
|
||||
// Send request
|
||||
const response = await httpClient.sendRequest({
|
||||
method: HttpMethod.POST,
|
||||
url: 'https://api.groq.com/openai/v1/audio/translations',
|
||||
authentication: {
|
||||
type: AuthenticationType.BEARER_TOKEN,
|
||||
token: auth.secret_text
|
||||
},
|
||||
headers: {
|
||||
'Content-Type': 'multipart/form-data',
|
||||
},
|
||||
body: formData,
|
||||
});
|
||||
|
||||
return response.body;
|
||||
},
|
||||
});
|
||||
Reference in New Issue
Block a user