Add Activepieces integration for workflow automation

- Add Activepieces fork with SmoothSchedule custom piece
- Create integrations app with Activepieces service layer
- Add embed token endpoint for iframe integration
- Create Automations page with embedded workflow builder
- Add sidebar visibility fix for embed mode
- Add list inactive customers endpoint to Public API
- Include SmoothSchedule triggers: event created/updated/cancelled
- Include SmoothSchedule actions: create/update/cancel events, list resources/services/customers

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
poduck
2025-12-18 22:59:37 -05:00
parent 9848268d34
commit 3aa7199503
16292 changed files with 1284892 additions and 4708 deletions

View File

@@ -0,0 +1,66 @@
import { Property, createAction } from '@activepieces/pieces-framework';
import { HumeClient } from 'hume';
import { humeAiAuth } from '../common/auth';
export const analyzeEmotionsFromUrl = createAction({
name: 'analyze_emotions_from_url',
displayName: 'Analyze Emotions from URL',
description: 'Start a batch emotion analysis job for media files hosted at URLs',
auth: humeAiAuth,
props: {
urls: Property.Array({
displayName: 'Media URLs',
description: 'URLs to media files to analyze (images, videos, audio). Supports up to 100 URLs or archives (.zip, .tar.gz)',
required: true,
}),
models: Property.Object({
displayName: 'Models Configuration',
description: 'Specify which models to use for inference. Leave empty to run all models.',
required: false,
}),
transcription: Property.Checkbox({
displayName: 'Include Transcription',
description: 'Include speech-to-text transcription in the analysis',
required: false,
defaultValue: false,
}),
callbackUrl: Property.ShortText({
displayName: 'Callback URL',
description: 'Optional webhook URL to receive results when the job completes',
required: false,
}),
notify: Property.Checkbox({
displayName: 'Email Notification',
description: 'Send email notification upon job completion or failure',
required: false,
defaultValue: true,
}),
},
async run(context) {
const client = new HumeClient({
apiKey: context.auth.secret_text,
});
const { urls, models, transcription, callbackUrl, notify } = context.propsValue;
const request: any = {
urls: urls as string[],
...(models && { models }),
...(transcription && { transcription: {} }),
...(callbackUrl && { callbackUrl }),
...(notify !== undefined && { notify }),
};
try {
const response = await client.expressionMeasurement.batch.startInferenceJob(request);
return {
jobId: response.jobId,
status: 'processing',
message: 'Emotion analysis job started successfully. Use "Get Emotion Results" action to retrieve predictions.',
};
} catch (error) {
throw new Error(`Emotion analysis job failed to start: ${error instanceof Error ? error.message : 'Unknown error'}`);
}
},
});

View File

@@ -0,0 +1,46 @@
import { Property, createAction } from '@activepieces/pieces-framework';
import { HumeClient } from 'hume';
import { humeAiAuth } from '../common/auth';
export const createVoice = createAction({
name: 'create_voice',
displayName: 'Create Voice',
description: 'Save a custom voice to your account using a TTS generation ID',
auth: humeAiAuth,
props: {
generationId: Property.ShortText({
displayName: 'Generation ID',
description: 'The unique ID from a previous TTS generation to save as a custom voice',
required: true,
}),
voiceName: Property.ShortText({
displayName: 'Voice Name',
description: 'A descriptive name for your custom voice',
required: true,
}),
},
async run(context) {
const client = new HumeClient({
apiKey: context.auth.secret_text,
});
const { generationId, voiceName } = context.propsValue;
try {
const response = await client.tts.voices.create({
generationId,
name: voiceName,
});
return {
id: response.id,
name: response.name,
provider: response.provider,
compatibleOctaveModels: response.compatibleOctaveModels,
generationId: generationId,
};
} catch (error) {
throw new Error(`Voice creation failed: ${error instanceof Error ? error.message : 'Unknown error'}`);
}
},
});

View File

@@ -0,0 +1,38 @@
import { Property, createAction } from '@activepieces/pieces-framework';
import { HumeClient } from 'hume';
import { humeAiAuth } from '../common/auth';
export const deleteVoice = createAction({
name: 'delete_voice',
displayName: 'Delete Voice',
description: 'Delete a custom voice from your account',
auth: humeAiAuth,
props: {
voiceName: Property.ShortText({
displayName: 'Voice Name',
description: 'The name of the custom voice to delete',
required: true,
}),
},
async run(context) {
const client = new HumeClient({
apiKey: context.auth.secret_text,
});
const { voiceName } = context.propsValue;
try {
await client.tts.voices.delete({
name: voiceName,
});
return {
success: true,
message: `Voice "${voiceName}" deleted successfully`,
deletedVoice: voiceName,
};
} catch (error) {
throw new Error(`Voice deletion failed: ${error instanceof Error ? error.message : 'Unknown error'}`);
}
},
});

View File

@@ -0,0 +1,114 @@
import { Property, createAction } from '@activepieces/pieces-framework';
import { HumeClient } from 'hume';
import { humeAiAuth } from '../common/auth';
export const generateSpeechFromFile = createAction({
name: 'generate_speech_from_file',
displayName: 'Generate Speech from File',
description: 'Convert audio file to speech using Hume AI\'s expressive text-to-speech technology',
auth: humeAiAuth,
props: {
text: Property.LongText({
displayName: 'Text',
description: 'The text to convert to speech',
required: true,
}),
voiceDescription: Property.LongText({
displayName: 'Voice Description',
description: 'Natural language description of how the speech should sound (tone, accent, style, etc.). If no voice is specified, this will generate a dynamic voice.',
required: false,
}),
format: Property.StaticDropdown({
displayName: 'Audio Format',
description: 'The output audio file format',
options: {
options: [
{ label: 'MP3', value: 'mp3' },
{ label: 'WAV', value: 'wav' },
{ label: 'PCM', value: 'pcm' },
],
},
required: true,
defaultValue: 'mp3',
}),
speed: Property.Number({
displayName: 'Speed',
description: 'Speed multiplier for the synthesized speech (0.75-1.5 recommended)',
required: false,
defaultValue: 1.0,
}),
contextText: Property.LongText({
displayName: 'Context Text',
description: 'Optional context text to influence speech style and prosody consistency',
required: false,
}),
contextDescription: Property.LongText({
displayName: 'Context Description',
description: 'Description for the context text (how it should sound)',
required: false,
}),
trailingSilence: Property.Number({
displayName: 'Trailing Silence (seconds)',
description: 'Duration of silence to add at the end of the speech',
required: false,
}),
splitUtterances: Property.Checkbox({
displayName: 'Split Utterances',
description: 'Automatically split text into natural-sounding speech segments',
required: false,
defaultValue: true,
}),
},
async run(context) {
const client = new HumeClient({
apiKey: context.auth.secret_text,
});
const {
text,
voiceDescription,
format,
speed,
contextText,
contextDescription,
trailingSilence,
splitUtterances,
} = context.propsValue;
const request: any = {
utterances: [{
text,
...(voiceDescription && { description: voiceDescription }),
...(speed && speed !== 1.0 && { speed }),
...(trailingSilence && { trailingSilence }),
}],
format: {
type: format,
},
...(splitUtterances !== undefined && { splitUtterances }),
};
if (contextText) {
request.context = {
utterances: [{
text: contextText,
...(contextDescription && { description: contextDescription }),
}],
};
}
try {
const response = await client.tts.synthesizeFile(request);
const audioBuffer = await response.arrayBuffer();
const buffer = Buffer.from(audioBuffer);
return await context.files.write({
data: buffer,
fileName: `speech_${Date.now()}.${format}`,
});
} catch (error) {
throw new Error(`Speech file generation failed: ${error instanceof Error ? error.message : 'Unknown error'}`);
}
},
});

View File

@@ -0,0 +1,155 @@
import { Property, createAction } from '@activepieces/pieces-framework';
import { HumeClient } from 'hume';
import { humeAiAuth } from '../common/auth';
export const generateTextToSpeech = createAction({
name: 'generate_text_to_speech',
displayName: 'Generate Text to Speech',
description: "Convert text to speech using Hume AI's expressive text-to-speech technology",
auth: humeAiAuth,
props: {
text: Property.LongText({
displayName: 'Text',
description: 'The text to convert to speech',
required: true,
}),
voiceDescription: Property.LongText({
displayName: 'Voice Description',
description: 'Natural language description of how the speech should sound (tone, accent, style, etc.). If no voice is specified, this will generate a dynamic voice.',
required: false,
}),
format: Property.StaticDropdown({
displayName: 'Audio Format',
description: 'The output audio file format',
options: {
options: [
{ label: 'MP3', value: 'mp3' },
{ label: 'WAV', value: 'wav' },
{ label: 'PCM', value: 'pcm' },
],
},
required: true,
defaultValue: 'mp3',
}),
speed: Property.Number({
displayName: 'Speed',
description: 'Speed multiplier for the synthesized speech (0.75-1.5 recommended)',
required: false,
defaultValue: 1.0,
}),
numGenerations: Property.Number({
displayName: 'Number of Generations',
description: 'Number of audio generations to produce (1-5 recommended)',
required: false,
defaultValue: 1,
}),
contextText: Property.LongText({
displayName: 'Context Text',
description: 'Optional context text to influence speech style and prosody consistency',
required: false,
}),
contextDescription: Property.LongText({
displayName: 'Context Description',
description: 'Description for the context text (how it should sound)',
required: false,
}),
trailingSilence: Property.Number({
displayName: 'Trailing Silence (seconds)',
description: 'Duration of silence to add at the end of the speech',
required: false,
}),
splitUtterances: Property.Checkbox({
displayName: 'Split Utterances',
description: 'Automatically split text into natural-sounding speech segments',
required: false,
defaultValue: true,
}),
},
async run(context) {
const client = new HumeClient({
apiKey: context.auth.secret_text,
});
const {
text,
voiceDescription,
format,
speed,
numGenerations,
contextText,
contextDescription,
trailingSilence,
splitUtterances,
} = context.propsValue;
const request: any = {
utterances: [{
text,
...(voiceDescription && { description: voiceDescription }),
...(speed && speed !== 1.0 && { speed }),
...(trailingSilence && { trailingSilence }),
}],
format: {
type: format,
},
...(numGenerations && numGenerations !== 1 && { numGenerations }),
...(splitUtterances !== undefined && { splitUtterances }),
};
if (contextText) {
request.context = {
utterances: [{
text: contextText,
...(contextDescription && { description: contextDescription }),
}],
};
}
try {
const response = await client.tts.synthesizeJson(request);
const firstGeneration = response.generations[0];
if (!firstGeneration || !firstGeneration.audio) {
throw new Error('No audio generated');
}
const audioBuffer = Buffer.from(firstGeneration.audio, 'base64');
if (response.generations.length === 1) {
return await context.files.write({
data: audioBuffer,
fileName: `tts_${Date.now()}.${format}`,
});
}
const filePromises = response.generations.map(async (gen, index) => {
const genBuffer = Buffer.from(gen.audio, 'base64');
const file = await context.files.write({
data: genBuffer,
fileName: `tts_gen_${index + 1}_${Date.now()}.${format}`,
});
return {
file: file,
durationSeconds: gen.duration,
sizeBytes: gen.fileSize,
};
});
const allGenerations = await Promise.all(filePromises);
return {
primaryFile: await context.files.write({
data: audioBuffer,
fileName: `tts_primary_${Date.now()}.${format}`,
}),
format: format,
requestId: response.requestId,
audioDurationSeconds: firstGeneration.duration,
audioSizeBytes: firstGeneration.fileSize,
allGenerations: allGenerations,
};
} catch (error) {
throw new Error(`Text-to-speech generation failed: ${error instanceof Error ? error.message : 'Unknown error'}`);
}
},
});

View File

@@ -0,0 +1,39 @@
import { Property, createAction } from '@activepieces/pieces-framework';
import { HumeClient } from 'hume';
import { humeAiAuth } from '../common/auth';
export const getEmotionResults = createAction({
name: 'get_emotion_results',
displayName: 'Get Emotion Results',
description: 'Retrieve emotion analysis predictions from a completed batch job',
auth: humeAiAuth,
props: {
jobId: Property.ShortText({
displayName: 'Job ID',
description: 'The ID of the emotion analysis job to retrieve results for',
required: true,
}),
},
async run(context) {
const client = new HumeClient({
apiKey: context.auth.secret_text,
});
const { jobId } = context.propsValue;
try {
const predictions = await client.expressionMeasurement.batch.getJobPredictions(jobId);
const jobDetails = await client.expressionMeasurement.batch.getJobDetails(jobId);
return {
jobId: jobId,
status: jobDetails.state,
predictions: predictions,
predictionCount: predictions.length,
};
} catch (error) {
throw new Error(`Failed to retrieve emotion results: ${error instanceof Error ? error.message : 'Unknown error'}`);
}
},
});

View File

@@ -0,0 +1,26 @@
import { PieceAuth } from '@activepieces/pieces-framework';
import { HumeClient } from 'hume';
export const humeAiAuth = PieceAuth.SecretText({
displayName: 'API Key',
description: 'Enter your Hume AI API key from the Hume Portal (https://platform.hume.ai/settings/keys)',
required: true,
validate: async ({ auth }) => {
try {
const client = new HumeClient({
apiKey: auth as string,
});
await client.expressionMeasurement.batch.listJobs();
return {
valid: true,
};
} catch (error) {
return {
valid: false,
error: 'Invalid API key. Please check your Hume AI API key.',
};
}
},
});

View File

@@ -0,0 +1,75 @@
import { createTrigger, TriggerStrategy, PiecePropValueSchema, AppConnectionValueForAuthProperty } from '@activepieces/pieces-framework';
import { DedupeStrategy, Polling, pollingHelper } from '@activepieces/pieces-common';
import { HumeClient } from 'hume';
import { humeAiAuth } from '../common/auth';
const polling: Polling<AppConnectionValueForAuthProperty<typeof humeAiAuth>, Record<string, never>> = {
strategy: DedupeStrategy.TIMEBASED,
items: async ({ auth, lastFetchEpochMS }) => {
const client = new HumeClient({
apiKey: auth.secret_text,
});
const allVoices: any[] = [];
const pageableResponse = await client.tts.voices.list({
provider: "CUSTOM_VOICE"
});
for await (const voice of pageableResponse) {
allVoices.push(voice);
}
const newVoices = lastFetchEpochMS
? allVoices.filter(voice => {
return true;
})
: allVoices;
const items = newVoices.map((voice: any) => ({
epochMilliSeconds: Date.now(),
data: {
id: voice.id,
name: voice.name,
provider: voice.provider,
compatibleOctaveModels: voice.compatibleOctaveModels,
},
}));
return items;
},
};
export const newVoiceTrigger = createTrigger({
auth: humeAiAuth,
name: 'new_voice',
displayName: 'New Voice',
description: 'Triggers when a new voice is created in Hume AI',
props: {},
type: TriggerStrategy.POLLING,
sampleData: {
id: 'voice_123',
name: 'My Custom Voice',
provider: 'CUSTOM_VOICE',
compatibleOctaveModels: ['octave-2'],
},
async onEnable(context) {
await pollingHelper.onEnable(polling, {
auth: context.auth,
store: context.store,
propsValue: context.propsValue,
});
},
async onDisable(context) {
await pollingHelper.onDisable(polling, {
auth: context.auth,
store: context.store,
propsValue: context.propsValue,
});
},
async test(context) {
return await pollingHelper.test(polling, context);
},
async run(context) {
return await pollingHelper.poll(polling, context);
},
});