Add Activepieces integration for workflow automation
- Add Activepieces fork with SmoothSchedule custom piece - Create integrations app with Activepieces service layer - Add embed token endpoint for iframe integration - Create Automations page with embedded workflow builder - Add sidebar visibility fix for embed mode - Add list inactive customers endpoint to Public API - Include SmoothSchedule triggers: event created/updated/cancelled - Include SmoothSchedule actions: create/update/cancel events, list resources/services/customers 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
@@ -0,0 +1,33 @@
|
||||
{
|
||||
"extends": [
|
||||
"../../../../.eslintrc.base.json"
|
||||
],
|
||||
"ignorePatterns": [
|
||||
"!**/*"
|
||||
],
|
||||
"overrides": [
|
||||
{
|
||||
"files": [
|
||||
"*.ts",
|
||||
"*.tsx",
|
||||
"*.js",
|
||||
"*.jsx"
|
||||
],
|
||||
"rules": {}
|
||||
},
|
||||
{
|
||||
"files": [
|
||||
"*.ts",
|
||||
"*.tsx"
|
||||
],
|
||||
"rules": {}
|
||||
},
|
||||
{
|
||||
"files": [
|
||||
"*.js",
|
||||
"*.jsx"
|
||||
],
|
||||
"rules": {}
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -0,0 +1,7 @@
|
||||
# pieces-hume-ai
|
||||
|
||||
This library was generated with [Nx](https://nx.dev).
|
||||
|
||||
## Building
|
||||
|
||||
Run `nx build pieces-hume-ai` to build the library.
|
||||
23
activepieces-fork/packages/pieces/community/hume-ai/bun.lock
Normal file
23
activepieces-fork/packages/pieces/community/hume-ai/bun.lock
Normal file
@@ -0,0 +1,23 @@
|
||||
{
|
||||
"lockfileVersion": 1,
|
||||
"workspaces": {
|
||||
"": {
|
||||
"name": "@activepieces/piece-hume-ai",
|
||||
"dependencies": {
|
||||
"hume": "^0.15.6",
|
||||
"tslib": "^2.3.0",
|
||||
},
|
||||
},
|
||||
},
|
||||
"packages": {
|
||||
"hume": ["hume@0.15.6", "", { "dependencies": { "uuid": "9.0.1", "ws": "^8.16.0", "zod": "^3.23.8" } }, "sha512-dO2F+z/6OzW8MyHJlAySuCKsqTJk1krViSEcdlGTd5M/TwFvvoN8QdvTv90HFL2GYUXgUXCquKgRpO0GjVrHFA=="],
|
||||
|
||||
"tslib": ["tslib@2.8.1", "", {}, "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w=="],
|
||||
|
||||
"uuid": ["uuid@9.0.1", "", { "bin": { "uuid": "dist/bin/uuid" } }, "sha512-b+1eJOlsR9K8HJpow9Ok3fiWOWSIcIzXodvv0rQjVoOVNpWMpxf1wZNpt4y9h10odCNrqnYp1OBzRktckBe3sA=="],
|
||||
|
||||
"ws": ["ws@8.18.3", "", { "peerDependencies": { "bufferutil": "^4.0.1", "utf-8-validate": ">=5.0.2" }, "optionalPeers": ["bufferutil", "utf-8-validate"] }, "sha512-PEIGCY5tSlUt50cqyMXfCzX+oOPqN0vuGqWzbcJ2xvnkzkq46oOpz7dQaTDBdfICb4N14+GARUDw2XV2N4tvzg=="],
|
||||
|
||||
"zod": ["zod@3.25.76", "", {}, "sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ=="],
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,11 @@
|
||||
{
|
||||
"name": "@activepieces/piece-hume-ai",
|
||||
"version": "0.0.2",
|
||||
"type": "commonjs",
|
||||
"main": "./src/index.js",
|
||||
"types": "./src/index.d.ts",
|
||||
"dependencies": {
|
||||
"hume": "^0.15.6",
|
||||
"tslib": "^2.3.0"
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,65 @@
|
||||
{
|
||||
"name": "pieces-hume-ai",
|
||||
"$schema": "../../../../node_modules/nx/schemas/project-schema.json",
|
||||
"sourceRoot": "packages/pieces/community/hume-ai/src",
|
||||
"projectType": "library",
|
||||
"release": {
|
||||
"version": {
|
||||
"manifestRootsToUpdate": [
|
||||
"dist/{projectRoot}"
|
||||
],
|
||||
"currentVersionResolver": "git-tag",
|
||||
"fallbackCurrentVersionResolver": "disk"
|
||||
}
|
||||
},
|
||||
"tags": [],
|
||||
"targets": {
|
||||
"build": {
|
||||
"executor": "@nx/js:tsc",
|
||||
"outputs": [
|
||||
"{options.outputPath}"
|
||||
],
|
||||
"options": {
|
||||
"outputPath": "dist/packages/pieces/community/hume-ai",
|
||||
"tsConfig": "packages/pieces/community/hume-ai/tsconfig.lib.json",
|
||||
"packageJson": "packages/pieces/community/hume-ai/package.json",
|
||||
"main": "packages/pieces/community/hume-ai/src/index.ts",
|
||||
"assets": [
|
||||
"packages/pieces/community/hume-ai/*.md",
|
||||
{
|
||||
"input": "packages/pieces/community/hume-ai/src/i18n",
|
||||
"output": "./src/i18n",
|
||||
"glob": "**/!(i18n.json)"
|
||||
}
|
||||
],
|
||||
"buildableProjectDepsInPackageJsonType": "dependencies",
|
||||
"updateBuildableProjectDepsInPackageJson": true
|
||||
},
|
||||
"dependsOn": [
|
||||
"prebuild",
|
||||
"^build"
|
||||
]
|
||||
},
|
||||
"nx-release-publish": {
|
||||
"options": {
|
||||
"packageRoot": "dist/{projectRoot}"
|
||||
}
|
||||
},
|
||||
"prebuild": {
|
||||
"dependsOn": [
|
||||
"^build"
|
||||
],
|
||||
"executor": "nx:run-commands",
|
||||
"options": {
|
||||
"cwd": "packages/pieces/community/hume-ai",
|
||||
"command": "bun install --no-save --silent"
|
||||
}
|
||||
},
|
||||
"lint": {
|
||||
"executor": "@nx/eslint:lint",
|
||||
"outputs": [
|
||||
"{options.outputFile}"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,55 @@
|
||||
{
|
||||
"Enter your Hume AI API key from the Hume Portal (https://platform.hume.ai/settings/keys)": "Enter your Hume AI API key from the Hume Portal (https://platform.hume.ai/settings/keys)",
|
||||
"Generate Text to Speech": "Generate Text to Speech",
|
||||
"Generate Speech from File": "Generate Speech from File",
|
||||
"Create Voice": "Create Voice",
|
||||
"Delete Voice": "Delete Voice",
|
||||
"Analyze Emotions from URL": "Analyze Emotions from URL",
|
||||
"Get Emotion Results": "Get Emotion Results",
|
||||
"Convert text to speech using Hume AI's expressive text-to-speech technology": "Convert text to speech using Hume AI's expressive text-to-speech technology",
|
||||
"Convert audio file to speech using Hume AI's expressive text-to-speech technology": "Convert audio file to speech using Hume AI's expressive text-to-speech technology",
|
||||
"Save a custom voice to your account using a TTS generation ID": "Save a custom voice to your account using a TTS generation ID",
|
||||
"Delete a custom voice from your account": "Delete a custom voice from your account",
|
||||
"Start a batch emotion analysis job for media files hosted at URLs": "Start a batch emotion analysis job for media files hosted at URLs",
|
||||
"Retrieve emotion analysis predictions from a completed batch job": "Retrieve emotion analysis predictions from a completed batch job",
|
||||
"Text": "Text",
|
||||
"Voice Description": "Voice Description",
|
||||
"Audio Format": "Audio Format",
|
||||
"Speed": "Speed",
|
||||
"Number of Generations": "Number of Generations",
|
||||
"Context Text": "Context Text",
|
||||
"Context Description": "Context Description",
|
||||
"Trailing Silence (seconds)": "Trailing Silence (seconds)",
|
||||
"Split Utterances": "Split Utterances",
|
||||
"Generation ID": "Generation ID",
|
||||
"Voice Name": "Voice Name",
|
||||
"Media URLs": "Media URLs",
|
||||
"Models Configuration": "Models Configuration",
|
||||
"Include Transcription": "Include Transcription",
|
||||
"Callback URL": "Callback URL",
|
||||
"Email Notification": "Email Notification",
|
||||
"Job ID": "Job ID",
|
||||
"The text to convert to speech": "The text to convert to speech",
|
||||
"Natural language description of how the speech should sound (tone, accent, style, etc.). If no voice is specified, this will generate a dynamic voice.": "Natural language description of how the speech should sound (tone, accent, style, etc.). If no voice is specified, this will generate a dynamic voice.",
|
||||
"The output audio file format": "The output audio file format",
|
||||
"Speed multiplier for the synthesized speech (0.75-1.5 recommended)": "Speed multiplier for the synthesized speech (0.75-1.5 recommended)",
|
||||
"Number of audio generations to produce (1-5 recommended)": "Number of audio generations to produce (1-5 recommended)",
|
||||
"Optional context text to influence speech style and prosody consistency": "Optional context text to influence speech style and prosody consistency",
|
||||
"Description for the context text (how it should sound)": "Description for the context text (how it should sound)",
|
||||
"Duration of silence to add at the end of the speech": "Duration of silence to add at the end of the speech",
|
||||
"Automatically split text into natural-sounding speech segments": "Automatically split text into natural-sounding speech segments",
|
||||
"The unique ID from a previous TTS generation to save as a custom voice": "The unique ID from a previous TTS generation to save as a custom voice",
|
||||
"A descriptive name for your custom voice": "A descriptive name for your custom voice",
|
||||
"The name of the custom voice to delete": "The name of the custom voice to delete",
|
||||
"URLs to media files to analyze (images, videos, audio). Supports up to 100 URLs or archives (.zip, .tar.gz)": "URLs to media files to analyze (images, videos, audio). Supports up to 100 URLs or archives (.zip, .tar.gz)",
|
||||
"Specify which models to use for inference. Leave empty to run all models.": "Specify which models to use for inference. Leave empty to run all models.",
|
||||
"Include speech-to-text transcription in the analysis": "Include speech-to-text transcription in the analysis",
|
||||
"Optional webhook URL to receive results when the job completes": "Optional webhook URL to receive results when the job completes",
|
||||
"Send email notification upon job completion or failure": "Send email notification upon job completion or failure",
|
||||
"The ID of the emotion analysis job to retrieve results for": "The ID of the emotion analysis job to retrieve results for",
|
||||
"MP3": "MP3",
|
||||
"WAV": "WAV",
|
||||
"PCM": "PCM",
|
||||
"New Voice": "New Voice",
|
||||
"Triggers when a new voice is created in Hume AI": "Triggers when a new voice is created in Hume AI"
|
||||
}
|
||||
@@ -0,0 +1,28 @@
|
||||
import { createPiece } from '@activepieces/pieces-framework';
|
||||
import { humeAiAuth } from './lib/common/auth';
|
||||
import { generateTextToSpeech } from './lib/actions/generate-text-to-speech';
|
||||
import { generateSpeechFromFile } from './lib/actions/generate-speech-from-file';
|
||||
import { createVoice } from './lib/actions/create-voice';
|
||||
import { deleteVoice } from './lib/actions/delete-voice';
|
||||
import { analyzeEmotionsFromUrl } from './lib/actions/analyze-emotions-from-url';
|
||||
import { getEmotionResults } from './lib/actions/get-emotion-results';
|
||||
import { newVoiceTrigger } from './lib/triggers/new-voice';
|
||||
import { PieceCategory } from '@activepieces/shared';
|
||||
|
||||
export const humeAi = createPiece({
|
||||
displayName: 'Hume AI',
|
||||
auth: humeAiAuth,
|
||||
minimumSupportedRelease: '0.36.1',
|
||||
logoUrl: 'https://cdn.activepieces.com/pieces/hume-ai.png',
|
||||
authors: ['onyedikachi-david'],
|
||||
categories: [PieceCategory.ARTIFICIAL_INTELLIGENCE],
|
||||
actions: [
|
||||
generateTextToSpeech,
|
||||
generateSpeechFromFile,
|
||||
createVoice,
|
||||
deleteVoice,
|
||||
analyzeEmotionsFromUrl,
|
||||
getEmotionResults,
|
||||
],
|
||||
triggers: [newVoiceTrigger],
|
||||
});
|
||||
@@ -0,0 +1,66 @@
|
||||
import { Property, createAction } from '@activepieces/pieces-framework';
|
||||
import { HumeClient } from 'hume';
|
||||
import { humeAiAuth } from '../common/auth';
|
||||
|
||||
export const analyzeEmotionsFromUrl = createAction({
|
||||
name: 'analyze_emotions_from_url',
|
||||
displayName: 'Analyze Emotions from URL',
|
||||
description: 'Start a batch emotion analysis job for media files hosted at URLs',
|
||||
auth: humeAiAuth,
|
||||
props: {
|
||||
urls: Property.Array({
|
||||
displayName: 'Media URLs',
|
||||
description: 'URLs to media files to analyze (images, videos, audio). Supports up to 100 URLs or archives (.zip, .tar.gz)',
|
||||
required: true,
|
||||
}),
|
||||
models: Property.Object({
|
||||
displayName: 'Models Configuration',
|
||||
description: 'Specify which models to use for inference. Leave empty to run all models.',
|
||||
required: false,
|
||||
}),
|
||||
transcription: Property.Checkbox({
|
||||
displayName: 'Include Transcription',
|
||||
description: 'Include speech-to-text transcription in the analysis',
|
||||
required: false,
|
||||
defaultValue: false,
|
||||
}),
|
||||
callbackUrl: Property.ShortText({
|
||||
displayName: 'Callback URL',
|
||||
description: 'Optional webhook URL to receive results when the job completes',
|
||||
required: false,
|
||||
}),
|
||||
notify: Property.Checkbox({
|
||||
displayName: 'Email Notification',
|
||||
description: 'Send email notification upon job completion or failure',
|
||||
required: false,
|
||||
defaultValue: true,
|
||||
}),
|
||||
},
|
||||
async run(context) {
|
||||
const client = new HumeClient({
|
||||
apiKey: context.auth.secret_text,
|
||||
});
|
||||
|
||||
const { urls, models, transcription, callbackUrl, notify } = context.propsValue;
|
||||
|
||||
const request: any = {
|
||||
urls: urls as string[],
|
||||
...(models && { models }),
|
||||
...(transcription && { transcription: {} }),
|
||||
...(callbackUrl && { callbackUrl }),
|
||||
...(notify !== undefined && { notify }),
|
||||
};
|
||||
|
||||
try {
|
||||
const response = await client.expressionMeasurement.batch.startInferenceJob(request);
|
||||
|
||||
return {
|
||||
jobId: response.jobId,
|
||||
status: 'processing',
|
||||
message: 'Emotion analysis job started successfully. Use "Get Emotion Results" action to retrieve predictions.',
|
||||
};
|
||||
} catch (error) {
|
||||
throw new Error(`Emotion analysis job failed to start: ${error instanceof Error ? error.message : 'Unknown error'}`);
|
||||
}
|
||||
},
|
||||
});
|
||||
@@ -0,0 +1,46 @@
|
||||
import { Property, createAction } from '@activepieces/pieces-framework';
|
||||
import { HumeClient } from 'hume';
|
||||
import { humeAiAuth } from '../common/auth';
|
||||
|
||||
export const createVoice = createAction({
|
||||
name: 'create_voice',
|
||||
displayName: 'Create Voice',
|
||||
description: 'Save a custom voice to your account using a TTS generation ID',
|
||||
auth: humeAiAuth,
|
||||
props: {
|
||||
generationId: Property.ShortText({
|
||||
displayName: 'Generation ID',
|
||||
description: 'The unique ID from a previous TTS generation to save as a custom voice',
|
||||
required: true,
|
||||
}),
|
||||
voiceName: Property.ShortText({
|
||||
displayName: 'Voice Name',
|
||||
description: 'A descriptive name for your custom voice',
|
||||
required: true,
|
||||
}),
|
||||
},
|
||||
async run(context) {
|
||||
const client = new HumeClient({
|
||||
apiKey: context.auth.secret_text,
|
||||
});
|
||||
|
||||
const { generationId, voiceName } = context.propsValue;
|
||||
|
||||
try {
|
||||
const response = await client.tts.voices.create({
|
||||
generationId,
|
||||
name: voiceName,
|
||||
});
|
||||
|
||||
return {
|
||||
id: response.id,
|
||||
name: response.name,
|
||||
provider: response.provider,
|
||||
compatibleOctaveModels: response.compatibleOctaveModels,
|
||||
generationId: generationId,
|
||||
};
|
||||
} catch (error) {
|
||||
throw new Error(`Voice creation failed: ${error instanceof Error ? error.message : 'Unknown error'}`);
|
||||
}
|
||||
},
|
||||
});
|
||||
@@ -0,0 +1,38 @@
|
||||
import { Property, createAction } from '@activepieces/pieces-framework';
|
||||
import { HumeClient } from 'hume';
|
||||
import { humeAiAuth } from '../common/auth';
|
||||
|
||||
export const deleteVoice = createAction({
|
||||
name: 'delete_voice',
|
||||
displayName: 'Delete Voice',
|
||||
description: 'Delete a custom voice from your account',
|
||||
auth: humeAiAuth,
|
||||
props: {
|
||||
voiceName: Property.ShortText({
|
||||
displayName: 'Voice Name',
|
||||
description: 'The name of the custom voice to delete',
|
||||
required: true,
|
||||
}),
|
||||
},
|
||||
async run(context) {
|
||||
const client = new HumeClient({
|
||||
apiKey: context.auth.secret_text,
|
||||
});
|
||||
|
||||
const { voiceName } = context.propsValue;
|
||||
|
||||
try {
|
||||
await client.tts.voices.delete({
|
||||
name: voiceName,
|
||||
});
|
||||
|
||||
return {
|
||||
success: true,
|
||||
message: `Voice "${voiceName}" deleted successfully`,
|
||||
deletedVoice: voiceName,
|
||||
};
|
||||
} catch (error) {
|
||||
throw new Error(`Voice deletion failed: ${error instanceof Error ? error.message : 'Unknown error'}`);
|
||||
}
|
||||
},
|
||||
});
|
||||
@@ -0,0 +1,114 @@
|
||||
import { Property, createAction } from '@activepieces/pieces-framework';
|
||||
import { HumeClient } from 'hume';
|
||||
import { humeAiAuth } from '../common/auth';
|
||||
|
||||
export const generateSpeechFromFile = createAction({
|
||||
name: 'generate_speech_from_file',
|
||||
displayName: 'Generate Speech from File',
|
||||
description: 'Convert audio file to speech using Hume AI\'s expressive text-to-speech technology',
|
||||
auth: humeAiAuth,
|
||||
props: {
|
||||
text: Property.LongText({
|
||||
displayName: 'Text',
|
||||
description: 'The text to convert to speech',
|
||||
required: true,
|
||||
}),
|
||||
voiceDescription: Property.LongText({
|
||||
displayName: 'Voice Description',
|
||||
description: 'Natural language description of how the speech should sound (tone, accent, style, etc.). If no voice is specified, this will generate a dynamic voice.',
|
||||
required: false,
|
||||
}),
|
||||
format: Property.StaticDropdown({
|
||||
displayName: 'Audio Format',
|
||||
description: 'The output audio file format',
|
||||
options: {
|
||||
options: [
|
||||
{ label: 'MP3', value: 'mp3' },
|
||||
{ label: 'WAV', value: 'wav' },
|
||||
{ label: 'PCM', value: 'pcm' },
|
||||
],
|
||||
},
|
||||
required: true,
|
||||
defaultValue: 'mp3',
|
||||
}),
|
||||
speed: Property.Number({
|
||||
displayName: 'Speed',
|
||||
description: 'Speed multiplier for the synthesized speech (0.75-1.5 recommended)',
|
||||
required: false,
|
||||
defaultValue: 1.0,
|
||||
}),
|
||||
contextText: Property.LongText({
|
||||
displayName: 'Context Text',
|
||||
description: 'Optional context text to influence speech style and prosody consistency',
|
||||
required: false,
|
||||
}),
|
||||
contextDescription: Property.LongText({
|
||||
displayName: 'Context Description',
|
||||
description: 'Description for the context text (how it should sound)',
|
||||
required: false,
|
||||
}),
|
||||
trailingSilence: Property.Number({
|
||||
displayName: 'Trailing Silence (seconds)',
|
||||
description: 'Duration of silence to add at the end of the speech',
|
||||
required: false,
|
||||
}),
|
||||
splitUtterances: Property.Checkbox({
|
||||
displayName: 'Split Utterances',
|
||||
description: 'Automatically split text into natural-sounding speech segments',
|
||||
required: false,
|
||||
defaultValue: true,
|
||||
}),
|
||||
},
|
||||
async run(context) {
|
||||
const client = new HumeClient({
|
||||
apiKey: context.auth.secret_text,
|
||||
});
|
||||
|
||||
const {
|
||||
text,
|
||||
voiceDescription,
|
||||
format,
|
||||
speed,
|
||||
contextText,
|
||||
contextDescription,
|
||||
trailingSilence,
|
||||
splitUtterances,
|
||||
} = context.propsValue;
|
||||
|
||||
const request: any = {
|
||||
utterances: [{
|
||||
text,
|
||||
...(voiceDescription && { description: voiceDescription }),
|
||||
...(speed && speed !== 1.0 && { speed }),
|
||||
...(trailingSilence && { trailingSilence }),
|
||||
}],
|
||||
format: {
|
||||
type: format,
|
||||
},
|
||||
...(splitUtterances !== undefined && { splitUtterances }),
|
||||
};
|
||||
|
||||
if (contextText) {
|
||||
request.context = {
|
||||
utterances: [{
|
||||
text: contextText,
|
||||
...(contextDescription && { description: contextDescription }),
|
||||
}],
|
||||
};
|
||||
}
|
||||
|
||||
try {
|
||||
const response = await client.tts.synthesizeFile(request);
|
||||
|
||||
const audioBuffer = await response.arrayBuffer();
|
||||
const buffer = Buffer.from(audioBuffer);
|
||||
|
||||
return await context.files.write({
|
||||
data: buffer,
|
||||
fileName: `speech_${Date.now()}.${format}`,
|
||||
});
|
||||
} catch (error) {
|
||||
throw new Error(`Speech file generation failed: ${error instanceof Error ? error.message : 'Unknown error'}`);
|
||||
}
|
||||
},
|
||||
});
|
||||
@@ -0,0 +1,155 @@
|
||||
import { Property, createAction } from '@activepieces/pieces-framework';
|
||||
import { HumeClient } from 'hume';
|
||||
import { humeAiAuth } from '../common/auth';
|
||||
|
||||
export const generateTextToSpeech = createAction({
|
||||
name: 'generate_text_to_speech',
|
||||
displayName: 'Generate Text to Speech',
|
||||
description: "Convert text to speech using Hume AI's expressive text-to-speech technology",
|
||||
auth: humeAiAuth,
|
||||
props: {
|
||||
text: Property.LongText({
|
||||
displayName: 'Text',
|
||||
description: 'The text to convert to speech',
|
||||
required: true,
|
||||
}),
|
||||
voiceDescription: Property.LongText({
|
||||
displayName: 'Voice Description',
|
||||
description: 'Natural language description of how the speech should sound (tone, accent, style, etc.). If no voice is specified, this will generate a dynamic voice.',
|
||||
required: false,
|
||||
}),
|
||||
format: Property.StaticDropdown({
|
||||
displayName: 'Audio Format',
|
||||
description: 'The output audio file format',
|
||||
options: {
|
||||
options: [
|
||||
{ label: 'MP3', value: 'mp3' },
|
||||
{ label: 'WAV', value: 'wav' },
|
||||
{ label: 'PCM', value: 'pcm' },
|
||||
],
|
||||
},
|
||||
required: true,
|
||||
defaultValue: 'mp3',
|
||||
}),
|
||||
speed: Property.Number({
|
||||
displayName: 'Speed',
|
||||
description: 'Speed multiplier for the synthesized speech (0.75-1.5 recommended)',
|
||||
required: false,
|
||||
defaultValue: 1.0,
|
||||
}),
|
||||
numGenerations: Property.Number({
|
||||
displayName: 'Number of Generations',
|
||||
description: 'Number of audio generations to produce (1-5 recommended)',
|
||||
required: false,
|
||||
defaultValue: 1,
|
||||
}),
|
||||
contextText: Property.LongText({
|
||||
displayName: 'Context Text',
|
||||
description: 'Optional context text to influence speech style and prosody consistency',
|
||||
required: false,
|
||||
}),
|
||||
contextDescription: Property.LongText({
|
||||
displayName: 'Context Description',
|
||||
description: 'Description for the context text (how it should sound)',
|
||||
required: false,
|
||||
}),
|
||||
trailingSilence: Property.Number({
|
||||
displayName: 'Trailing Silence (seconds)',
|
||||
description: 'Duration of silence to add at the end of the speech',
|
||||
required: false,
|
||||
}),
|
||||
splitUtterances: Property.Checkbox({
|
||||
displayName: 'Split Utterances',
|
||||
description: 'Automatically split text into natural-sounding speech segments',
|
||||
required: false,
|
||||
defaultValue: true,
|
||||
}),
|
||||
},
|
||||
async run(context) {
|
||||
const client = new HumeClient({
|
||||
apiKey: context.auth.secret_text,
|
||||
});
|
||||
|
||||
const {
|
||||
text,
|
||||
voiceDescription,
|
||||
format,
|
||||
speed,
|
||||
numGenerations,
|
||||
contextText,
|
||||
contextDescription,
|
||||
trailingSilence,
|
||||
splitUtterances,
|
||||
} = context.propsValue;
|
||||
|
||||
const request: any = {
|
||||
utterances: [{
|
||||
text,
|
||||
...(voiceDescription && { description: voiceDescription }),
|
||||
...(speed && speed !== 1.0 && { speed }),
|
||||
...(trailingSilence && { trailingSilence }),
|
||||
}],
|
||||
format: {
|
||||
type: format,
|
||||
},
|
||||
...(numGenerations && numGenerations !== 1 && { numGenerations }),
|
||||
...(splitUtterances !== undefined && { splitUtterances }),
|
||||
};
|
||||
|
||||
if (contextText) {
|
||||
request.context = {
|
||||
utterances: [{
|
||||
text: contextText,
|
||||
...(contextDescription && { description: contextDescription }),
|
||||
}],
|
||||
};
|
||||
}
|
||||
|
||||
try {
|
||||
const response = await client.tts.synthesizeJson(request);
|
||||
|
||||
const firstGeneration = response.generations[0];
|
||||
if (!firstGeneration || !firstGeneration.audio) {
|
||||
throw new Error('No audio generated');
|
||||
}
|
||||
|
||||
const audioBuffer = Buffer.from(firstGeneration.audio, 'base64');
|
||||
|
||||
if (response.generations.length === 1) {
|
||||
return await context.files.write({
|
||||
data: audioBuffer,
|
||||
fileName: `tts_${Date.now()}.${format}`,
|
||||
});
|
||||
}
|
||||
|
||||
const filePromises = response.generations.map(async (gen, index) => {
|
||||
const genBuffer = Buffer.from(gen.audio, 'base64');
|
||||
const file = await context.files.write({
|
||||
data: genBuffer,
|
||||
fileName: `tts_gen_${index + 1}_${Date.now()}.${format}`,
|
||||
});
|
||||
return {
|
||||
file: file,
|
||||
durationSeconds: gen.duration,
|
||||
sizeBytes: gen.fileSize,
|
||||
};
|
||||
});
|
||||
|
||||
const allGenerations = await Promise.all(filePromises);
|
||||
|
||||
return {
|
||||
primaryFile: await context.files.write({
|
||||
data: audioBuffer,
|
||||
fileName: `tts_primary_${Date.now()}.${format}`,
|
||||
}),
|
||||
format: format,
|
||||
requestId: response.requestId,
|
||||
audioDurationSeconds: firstGeneration.duration,
|
||||
audioSizeBytes: firstGeneration.fileSize,
|
||||
allGenerations: allGenerations,
|
||||
};
|
||||
} catch (error) {
|
||||
throw new Error(`Text-to-speech generation failed: ${error instanceof Error ? error.message : 'Unknown error'}`);
|
||||
}
|
||||
},
|
||||
});
|
||||
@@ -0,0 +1,39 @@
|
||||
import { Property, createAction } from '@activepieces/pieces-framework';
|
||||
import { HumeClient } from 'hume';
|
||||
import { humeAiAuth } from '../common/auth';
|
||||
|
||||
export const getEmotionResults = createAction({
|
||||
name: 'get_emotion_results',
|
||||
displayName: 'Get Emotion Results',
|
||||
description: 'Retrieve emotion analysis predictions from a completed batch job',
|
||||
auth: humeAiAuth,
|
||||
props: {
|
||||
jobId: Property.ShortText({
|
||||
displayName: 'Job ID',
|
||||
description: 'The ID of the emotion analysis job to retrieve results for',
|
||||
required: true,
|
||||
}),
|
||||
},
|
||||
async run(context) {
|
||||
const client = new HumeClient({
|
||||
apiKey: context.auth.secret_text,
|
||||
});
|
||||
|
||||
const { jobId } = context.propsValue;
|
||||
|
||||
try {
|
||||
const predictions = await client.expressionMeasurement.batch.getJobPredictions(jobId);
|
||||
|
||||
const jobDetails = await client.expressionMeasurement.batch.getJobDetails(jobId);
|
||||
|
||||
return {
|
||||
jobId: jobId,
|
||||
status: jobDetails.state,
|
||||
predictions: predictions,
|
||||
predictionCount: predictions.length,
|
||||
};
|
||||
} catch (error) {
|
||||
throw new Error(`Failed to retrieve emotion results: ${error instanceof Error ? error.message : 'Unknown error'}`);
|
||||
}
|
||||
},
|
||||
});
|
||||
@@ -0,0 +1,26 @@
|
||||
import { PieceAuth } from '@activepieces/pieces-framework';
|
||||
import { HumeClient } from 'hume';
|
||||
|
||||
export const humeAiAuth = PieceAuth.SecretText({
|
||||
displayName: 'API Key',
|
||||
description: 'Enter your Hume AI API key from the Hume Portal (https://platform.hume.ai/settings/keys)',
|
||||
required: true,
|
||||
validate: async ({ auth }) => {
|
||||
try {
|
||||
const client = new HumeClient({
|
||||
apiKey: auth as string,
|
||||
});
|
||||
|
||||
await client.expressionMeasurement.batch.listJobs();
|
||||
|
||||
return {
|
||||
valid: true,
|
||||
};
|
||||
} catch (error) {
|
||||
return {
|
||||
valid: false,
|
||||
error: 'Invalid API key. Please check your Hume AI API key.',
|
||||
};
|
||||
}
|
||||
},
|
||||
});
|
||||
@@ -0,0 +1,75 @@
|
||||
import { createTrigger, TriggerStrategy, PiecePropValueSchema, AppConnectionValueForAuthProperty } from '@activepieces/pieces-framework';
|
||||
import { DedupeStrategy, Polling, pollingHelper } from '@activepieces/pieces-common';
|
||||
import { HumeClient } from 'hume';
|
||||
import { humeAiAuth } from '../common/auth';
|
||||
|
||||
const polling: Polling<AppConnectionValueForAuthProperty<typeof humeAiAuth>, Record<string, never>> = {
|
||||
strategy: DedupeStrategy.TIMEBASED,
|
||||
items: async ({ auth, lastFetchEpochMS }) => {
|
||||
const client = new HumeClient({
|
||||
apiKey: auth.secret_text,
|
||||
});
|
||||
|
||||
const allVoices: any[] = [];
|
||||
const pageableResponse = await client.tts.voices.list({
|
||||
provider: "CUSTOM_VOICE"
|
||||
});
|
||||
|
||||
for await (const voice of pageableResponse) {
|
||||
allVoices.push(voice);
|
||||
}
|
||||
|
||||
const newVoices = lastFetchEpochMS
|
||||
? allVoices.filter(voice => {
|
||||
return true;
|
||||
})
|
||||
: allVoices;
|
||||
|
||||
const items = newVoices.map((voice: any) => ({
|
||||
epochMilliSeconds: Date.now(),
|
||||
data: {
|
||||
id: voice.id,
|
||||
name: voice.name,
|
||||
provider: voice.provider,
|
||||
compatibleOctaveModels: voice.compatibleOctaveModels,
|
||||
},
|
||||
}));
|
||||
|
||||
return items;
|
||||
},
|
||||
};
|
||||
|
||||
export const newVoiceTrigger = createTrigger({
|
||||
auth: humeAiAuth,
|
||||
name: 'new_voice',
|
||||
displayName: 'New Voice',
|
||||
description: 'Triggers when a new voice is created in Hume AI',
|
||||
props: {},
|
||||
type: TriggerStrategy.POLLING,
|
||||
sampleData: {
|
||||
id: 'voice_123',
|
||||
name: 'My Custom Voice',
|
||||
provider: 'CUSTOM_VOICE',
|
||||
compatibleOctaveModels: ['octave-2'],
|
||||
},
|
||||
async onEnable(context) {
|
||||
await pollingHelper.onEnable(polling, {
|
||||
auth: context.auth,
|
||||
store: context.store,
|
||||
propsValue: context.propsValue,
|
||||
});
|
||||
},
|
||||
async onDisable(context) {
|
||||
await pollingHelper.onDisable(polling, {
|
||||
auth: context.auth,
|
||||
store: context.store,
|
||||
propsValue: context.propsValue,
|
||||
});
|
||||
},
|
||||
async test(context) {
|
||||
return await pollingHelper.test(polling, context);
|
||||
},
|
||||
async run(context) {
|
||||
return await pollingHelper.poll(polling, context);
|
||||
},
|
||||
});
|
||||
@@ -0,0 +1,20 @@
|
||||
{
|
||||
"extends": "../../../../tsconfig.base.json",
|
||||
"compilerOptions": {
|
||||
"module": "commonjs",
|
||||
"forceConsistentCasingInFileNames": true,
|
||||
"strict": true,
|
||||
"importHelpers": true,
|
||||
"noImplicitOverride": true,
|
||||
"noImplicitReturns": true,
|
||||
"noFallthroughCasesInSwitch": true,
|
||||
"noPropertyAccessFromIndexSignature": true
|
||||
},
|
||||
"files": [],
|
||||
"include": [],
|
||||
"references": [
|
||||
{
|
||||
"path": "./tsconfig.lib.json"
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"extends": "./tsconfig.json",
|
||||
"compilerOptions": {
|
||||
"outDir": "../../../../dist/out-tsc",
|
||||
"declaration": true,
|
||||
"types": ["node"]
|
||||
},
|
||||
"include": ["src/**/*.ts"]
|
||||
}
|
||||
Reference in New Issue
Block a user