Add Activepieces integration for workflow automation

- Add Activepieces fork with SmoothSchedule custom piece
- Create integrations app with Activepieces service layer
- Add embed token endpoint for iframe integration
- Create Automations page with embedded workflow builder
- Add sidebar visibility fix for embed mode
- Add list inactive customers endpoint to Public API
- Include SmoothSchedule triggers: event created/updated/cancelled
- Include SmoothSchedule actions: create/update/cancel events, list resources/services/customers

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
poduck
2025-12-18 22:59:37 -05:00
parent 9848268d34
commit 3aa7199503
16292 changed files with 1284892 additions and 4708 deletions

View File

@@ -0,0 +1,36 @@
{
"You can obtain API key from [Settings](https://app.datafuel.dev/account/api_key).": "You can obtain API key from [Settings](https://app.datafuel.dev/account/api_key).",
"Crawl Website": "Crawl Website",
"Get Scrape Result": "Get Scrape Result",
"Custom API Call": "Custom API Call",
"Crawl a website into a markdown format.": "Crawl a website into a markdown format.",
"Retrieves the deatils about scrape.": "Retrieves the deatils about scrape.",
"Make a custom API call to a specific endpoint": "Make a custom API call to a specific endpoint",
"URL": "URL",
"AI Prompt": "AI Prompt",
"Depth": "Depth",
"Limit": "Limit",
"JSON Schema": "JSON Schema",
"Job ID": "Job ID",
"AI JSON Result": "AI JSON Result",
"Markdown Result": "Markdown Result",
"Method": "Method",
"Headers": "Headers",
"Query Parameters": "Query Parameters",
"Body": "Body",
"Response is Binary ?": "Response is Binary ?",
"No Error on Failure": "No Error on Failure",
"Timeout (in seconds)": "Timeout (in seconds)",
"Prompt to crawl data": "Prompt to crawl data",
"The depth of the crawl 1 depth mean only the first level of links will be scraped": "The depth of the crawl 1 depth mean only the first level of links will be scraped",
"The maximum number of pages to scrape": "The maximum number of pages to scrape",
"JSON schema definition for structured data extraction.Format should follow OpenAI's function calling schema format (https://platform.openai.com/docs/guides/structured-outputs)": "JSON schema definition for structured data extraction.Format should follow OpenAI's function calling schema format (https://platform.openai.com/docs/guides/structured-outputs)",
"Authorization headers are injected automatically from your connection.": "Authorization headers are injected automatically from your connection.",
"Enable for files like PDFs, images, etc..": "Enable for files like PDFs, images, etc..",
"GET": "GET",
"POST": "POST",
"PATCH": "PATCH",
"PUT": "PUT",
"DELETE": "DELETE",
"HEAD": "HEAD"
}

View File

@@ -0,0 +1,30 @@
import { createPiece } from '@activepieces/pieces-framework';
import { dataFuelAuth } from './lib/common/auth';
import { crawlWebsiteAction } from './lib/actions/crawl-website';
import { scrapeWebsiteAction } from './lib/actions/scrape-website';
import { getScrapeAction } from './lib/actions/get-scrape-result';
import { createCustomApiCallAction } from '@activepieces/pieces-common';
import { BASE_URL } from './lib/common/constants';
export const datafuel = createPiece({
displayName: 'DataFuel',
auth: dataFuelAuth,
minimumSupportedRelease: '0.36.1',
logoUrl: 'https://cdn.activepieces.com/pieces/datafuel.png',
authors: ['kishanprmr'],
actions: [
crawlWebsiteAction,
scrapeWebsiteAction,
getScrapeAction,
createCustomApiCallAction({
auth: dataFuelAuth,
baseUrl: () => BASE_URL,
authMapping: async (auth) => {
return {
Authorization: `Bearer ${auth.secret_text}`,
};
},
}),
],
triggers: [],
});

View File

@@ -0,0 +1,84 @@
import { createAction, Property } from '@activepieces/pieces-framework';
import { dataFuelAuth } from '../common/auth';
import { AuthenticationType, httpClient, HttpMethod } from '@activepieces/pieces-common';
import { BASE_URL } from '../common/constants';
import { CrawlWebsiteResponse, ListScrapesResponse } from '../common/types';
export const crawlWebsiteAction = createAction({
name: 'crawl-website',
auth: dataFuelAuth,
displayName: 'Crawl Website',
description: 'Crawl a website into a markdown format.',
props: {
url: Property.ShortText({
displayName: 'URL',
required: true,
}),
prompt: Property.LongText({
displayName: 'AI Prompt',
description: 'Prompt to crawl data',
required: false,
}),
depth: Property.Number({
displayName: 'Depth',
description:
'The depth of the crawl 1 depth mean only the first level of links will be scraped',
required: true,
}),
limit: Property.Number({
displayName: 'Limit',
description: 'The maximum number of pages to scrape',
required: true,
}),
jsonSchema: Property.Json({
displayName: 'JSON Schema',
required: false,
description: `JSON schema definition for structured data extraction.Format should follow OpenAI's function calling schema format (https://platform.openai.com/docs/guides/structured-outputs)`,
}),
},
async run(context) {
const { url, prompt, depth, limit, jsonSchema } = context.propsValue;
const response = await httpClient.sendRequest<CrawlWebsiteResponse>({
method: HttpMethod.POST,
url: BASE_URL + '/crawl',
authentication: {
type: AuthenticationType.BEARER_TOKEN,
token: context.auth.secret_text,
},
body: {
url,
ai_prompt: prompt,
json_schema: jsonSchema,
depth,
limit,
},
});
const jobId = response.body.job_id;
let status = 'pending';
const timeoutAt = Date.now() + 5 * 60 * 1000;
while (status !== 'finished' && Date.now() < timeoutAt) {
await new Promise((resolve) => setTimeout(resolve, 5000)); // wait 5 seconds
const pollResponse = await httpClient.sendRequest<Array<ListScrapesResponse>>({
method: HttpMethod.GET,
url: BASE_URL + '/list_scrapes',
authentication: {
type: AuthenticationType.BEARER_TOKEN,
token: context.auth.secret_text,
},
queryParams: {
job_id: jobId,
markdown: 'true',
},
});
status = pollResponse.body[0].job_status;
if (status === 'finished') return pollResponse.body;
}
throw new Error('Crawl Job timed out or failed.');
},
});

View File

@@ -0,0 +1,46 @@
import { createAction, Property } from '@activepieces/pieces-framework';
import { dataFuelAuth } from '../common/auth';
import { AuthenticationType, httpClient, HttpMethod } from '@activepieces/pieces-common';
import { BASE_URL } from '../common/constants';
import { ListScrapesResponse } from '../common/types';
export const getScrapeAction = createAction({
name: 'get-scrape',
auth: dataFuelAuth,
displayName: 'Get Scrape Result',
description: 'Retrieves the deatils about scrape.',
props: {
jobId: Property.ShortText({
displayName: 'Job ID',
required: true,
}),
aiResponse: Property.Checkbox({
displayName: 'AI JSON Result',
required: true,
}),
markdownResponse: Property.Checkbox({
displayName: 'Markdown Result',
required: true,
}),
},
async run(context) {
const { jobId, aiResponse, markdownResponse } = context.propsValue;
const response = await httpClient.sendRequest<Array<ListScrapesResponse>>({
method: HttpMethod.GET,
url: BASE_URL + '/list_scrapes',
authentication: {
type: AuthenticationType.BEARER_TOKEN,
token: context.auth.secret_text,
},
queryParams: {
job_id: jobId,
markdown: markdownResponse ? 'true' : 'false',
ai_response: aiResponse ? 'true' : 'false',
},
});
return response.body;
},
});

View File

@@ -0,0 +1,70 @@
import { createAction, Property } from '@activepieces/pieces-framework';
import { dataFuelAuth } from '../common/auth';
import { AuthenticationType, httpClient, HttpMethod } from '@activepieces/pieces-common';
import { BASE_URL } from '../common/constants';
import { CrawlWebsiteResponse, ListScrapesResponse } from '../common/types';
export const scrapeWebsiteAction = createAction({
name: 'scrape-website',
auth: dataFuelAuth,
displayName: 'Crawl Website',
description: 'Crawl a website into a markdown format.',
props: {
url: Property.ShortText({
displayName: 'URL',
required: true,
}),
prompt: Property.LongText({
displayName: 'AI Prompt',
description: 'Prompt to crawl data',
required: false,
}),
jsonSchema: Property.Json({
displayName: 'JSON Schema',
required: false,
description: `JSON schema definition for structured data extraction.Format should follow OpenAI's function calling schema format (https://platform.openai.com/docs/guides/structured-outputs)`,
}),
},
async run(context) {
const { url, prompt, jsonSchema } = context.propsValue;
const response = await httpClient.sendRequest<CrawlWebsiteResponse>({
method: HttpMethod.POST,
url: BASE_URL + '/scrape',
authentication: {
type: AuthenticationType.BEARER_TOKEN,
token: context.auth.secret_text,
},
body: {
url,
ai_prompt: prompt,
json_schema: jsonSchema,
},
});
const jobId = response.body.job_id;
let status = 'pending';
const timeoutAt = Date.now() + 5 * 60 * 1000;
while (status !== 'finished' && Date.now() < timeoutAt) {
await new Promise((resolve) => setTimeout(resolve, 5000)); // wait 5 seconds
const pollResponse = await httpClient.sendRequest<Array<ListScrapesResponse>>({
method: HttpMethod.GET,
url: BASE_URL + '/list_scrapes',
authentication: {
type: AuthenticationType.BEARER_TOKEN,
token: context.auth.secret_text,
},
queryParams: {
job_id: jobId,
},
});
status = pollResponse.body[0].job_status;
if (status === 'finished') return pollResponse.body;
}
throw new Error('Crawl Job timed out or failed.');
},
});

View File

@@ -0,0 +1,7 @@
import { PieceAuth } from '@activepieces/pieces-framework';
export const dataFuelAuth = PieceAuth.SecretText({
displayName: 'API Key',
description: `You can obtain API key from [Settings](https://app.datafuel.dev/account/api_key).`,
required: true,
});

View File

@@ -0,0 +1 @@
export const BASE_URL = 'https://api.datafuel.dev/'

View File

@@ -0,0 +1,8 @@
export type CrawlWebsiteResponse = {
job_id:string
}
export type ListScrapesResponse = {
job_id:string,
job_status:string,
}