Add Activepieces integration for workflow automation

- Add Activepieces fork with SmoothSchedule custom piece
- Create integrations app with Activepieces service layer
- Add embed token endpoint for iframe integration
- Create Automations page with embedded workflow builder
- Add sidebar visibility fix for embed mode
- Add list inactive customers endpoint to Public API
- Include SmoothSchedule triggers: event created/updated/cancelled
- Include SmoothSchedule actions: create/update/cancel events, list resources/services/customers

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
poduck
2025-12-18 22:59:37 -05:00
parent 9848268d34
commit 3aa7199503
16292 changed files with 1284892 additions and 4708 deletions

View File

@@ -0,0 +1,18 @@
{
"extends": ["../../../../.eslintrc.base.json"],
"ignorePatterns": ["!**/*"],
"overrides": [
{
"files": ["*.ts", "*.tsx", "*.js", "*.jsx"],
"rules": {}
},
{
"files": ["*.ts", "*.tsx"],
"rules": {}
},
{
"files": ["*.js", "*.jsx"],
"rules": {}
}
]
}

View File

@@ -0,0 +1 @@
assemblyai.env

View File

@@ -0,0 +1,23 @@
# AssemblyAI piece for Activepieces
Use the AssemblyAI piece for Activepieces to use AssemblyAI's models to
[transcribe audio with Speech-to-Text models](https://www.assemblyai.com/products/speech-to-text?utm_source=activepieces), analyze audio with [audio intelligence models](https://www.assemblyai.com/products/speech-understanding?utm_source=activepieces), build generative AI features on top of audio with LLMs using [LeMUR](https://www.assemblyai.com/blog/lemur/?utm_source=activepieces).
Learn more about this piece:
- [Activepieces Integrations](https://www.activepieces.com/pieces/assemblyai)
- [AssemblyAI Documentation](https://www.assemblyai.com/docs/integrations/activepieces)
This library was built upon the [AssemblyAI JavaScript SDK](https://github.com/AssemblyAI/assemblyai-node-sdk).
## Building
Run `nx build pieces-assemblyai` to build the library.
## Generating props
- Copy `assemblyai.env.sample` to `assemblyai.env`
- In `assemblyai.env`, update the `OPENAPI_SPEC_LOCATION` variable to the path or URL of AssemblyAI's OpenAPI spec
- Run `nx generate-params pieces-assemblyai`
You can find [AssemblyAI's OpenAPI spec on GitHub](https://github.com/AssemblyAI/assemblyai-api-spec/blob/main/openapi.yml).

View File

@@ -0,0 +1 @@
OPENAPI_SPEC_LOCATION=/path/to/spec/openapi.yml

View File

@@ -0,0 +1,19 @@
{
"name": "@activepieces/piece-assemblyai",
"version": "1.0.8",
"scripts": {
"generate": "tsx ./scripts/generateFromSpec.ts"
},
"devDependencies": {
"@readme/openapi-parser": "^2.6.0",
"@types/node": "^20",
"dotenv": "^16.4.5",
"mergician": "^2.0.2",
"title-case": "^4.3.1",
"tsx": "^4.19.0",
"typescript": "^5.6.2"
},
"dependencies": {
"assemblyai": "4.7.0"
}
}

View File

@@ -0,0 +1,87 @@
{
"name": "pieces-assemblyai",
"$schema": "../../../../node_modules/nx/schemas/project-schema.json",
"sourceRoot": "packages/pieces/community/assemblyai/src",
"projectType": "library",
"release": {
"version": {
"currentVersionResolver": "git-tag",
"preserveLocalDependencyProtocols": false,
"manifestRootsToUpdate": [
"dist/{projectRoot}"
]
}
},
"tags": [],
"targets": {
"build": {
"executor": "@nx/js:tsc",
"outputs": [
"{options.outputPath}"
],
"options": {
"outputPath": "dist/packages/pieces/community/assemblyai",
"tsConfig": "packages/pieces/community/assemblyai/tsconfig.lib.json",
"packageJson": "packages/pieces/community/assemblyai/package.json",
"main": "packages/pieces/community/assemblyai/src/index.ts",
"assets": [
"packages/pieces/community/assemblyai/*.md",
{
"input": "packages/pieces/community/assemblyai/src/i18n",
"output": "./src/i18n",
"glob": "**/!(i18n.json)"
}
],
"buildableProjectDepsInPackageJsonType": "dependencies",
"updateBuildableProjectDepsInPackageJson": true
},
"dependsOn": [
"^build",
"prebuild"
]
},
"nx-release-publish": {
"options": {
"packageRoot": "dist/{projectRoot}"
}
},
"lint": {
"executor": "@nx/eslint:lint",
"outputs": [
"{options.outputFile}"
]
},
"_generate-params": {
"executor": "nx:run-commands",
"options": {
"command": "tsx ./scripts/generateFromSpec.ts",
"cwd": "packages/pieces/community/assemblyai"
}
},
"format": {
"executor": "nx:run-commands",
"options": {
"command": "nx format:write --files packages/pieces/community/assemblyai/**/*"
}
},
"generate-params": {
"executor": "nx:run-commands",
"options": {
"commands": [
"nx _generate-params pieces-assemblyai",
"nx format pieces-assemblyai"
]
}
},
"prebuild": {
"executor": "nx:run-commands",
"options": {
"cwd": "packages/pieces/community/assemblyai",
"command": "bun install --no-save --silent"
},
"dependsOn": [
"^build"
]
}
}
}

View File

@@ -0,0 +1,278 @@
/* eslint-disable prefer-const */
/* eslint-disable @typescript-eslint/no-explicit-any */
import dotenv from 'dotenv';
dotenv.config({
path: './assemblyai.env',
});
import { existsSync, mkdirSync, unlinkSync, writeFileSync } from 'fs';
import { dirname, join } from 'path';
import OpenAPIParser from '@readme/openapi-parser';
import { mergician } from 'mergician';
import { titleCase } from 'title-case';
const generatedPath = './src/lib/generated/';
type Generators = {
props?: (schemas: any) => any;
};
const generateMap: Record<string, Generators> = {
transcribe: {
props: (schemas) => schemas.TranscriptParams,
},
'list-transcript': {
props: (schemas) => schemas.ListTranscriptParams,
},
'lemur-task': {
props: (schemas) => schemas.LemurTaskParams,
},
};
const merge = mergician({ appendArrays: true, dedupArrays: true });
(async function () {
const specLocation = process.env.OPENAPI_SPEC_LOCATION;
if (!specLocation)
throw new Error('OPENAPI_SPEC_LOCATION env variable is required');
let spec: any = merge(
await OpenAPIParser.parse(specLocation),
await OpenAPIParser.parse('./scripts/openapi.overrides.yml', {
validate: {
schema: false,
spec: false,
},
})
);
spec = await OpenAPIParser.dereference(spec);
Object.entries(generateMap).forEach(([paramsName, { props }]) => {
const parametersPath = join(generatedPath, paramsName, 'props.ts');
if (props) {
let propsJson = createPropsFromSchema(props(spec.components.schemas));
let propsTs = createTs(propsJson);
const dir = dirname(parametersPath);
mkdirSync(dir, { recursive: true });
writeFileSync(parametersPath, propsTs, 'utf-8');
} else if (existsSync(parametersPath)) {
unlinkSync(parametersPath);
}
});
})();
function createPropsFromSchema(schema: any): Record<string, any> {
if (!schema) return {};
schema = structuredClone(schema);
if (schema.allOf) {
const obj = {};
(schema.allOf as any[]).forEach((schema) =>
Object.assign(obj, createPropsFromSchema(schema))
);
return obj;
}
if (!schema.properties) return {};
const properties: Record<string, unknown> = {};
const requiredProperties = schema.required ?? [];
for (let [key, value] of Object.entries(schema.properties) as [
key: string,
value: any
]) {
if (value['x-ap-ignore']) {
continue;
}
let nullable = false;
let label: string;
if (value['x-label']) {
label = titleCase(value['x-label']);
} else {
label = titleCase(key);
console.warn(`No x-label found for property ${key}`);
}
// grab the value of oneOf with null
if (value.oneOf) {
if (value.oneOf.findIndex((item: any) => item.type === 'null') > -1) {
nullable = true;
}
const options = value.oneOf.filter((item: any) => item.type !== 'null');
// take first one and hope for the best
value.oneOf = undefined;
const option = {
type: options[0].type,
enum: options[0].enum,
format: options[0].format,
items: options[0].items,
'x-aai-enum': options[0]['x-aai-enum'],
anyOf: options[0].anyOf,
};
value = { ...value, ...option };
if (options[0].properties) {
value.properties = { ...value.properties, ...options[0].properties };
}
if (options[0].required) {
value.required = (value.required || []).concat(options[0].required);
}
}
if (value.anyOf) {
const enumAnyOfIndex = value.anyOf.findIndex((item: any) => item.enum);
// if any string or an enum, use the enum
if (
value.anyOf.findIndex((item: any) => item.type === 'string') > -1 &&
enumAnyOfIndex > -1
) {
value.type = value.anyOf[enumAnyOfIndex].type;
value.enum = value.anyOf[enumAnyOfIndex].enum;
if ('x-aai-enum' in value.anyOf[enumAnyOfIndex]) {
value['x-aai-enum'] = value.anyOf[enumAnyOfIndex]['x-aai-enum'];
}
value.anyOf = undefined;
} else {
throw new Error(`Unsupported AnyOf found for ${key}`);
}
}
if (Array.isArray(value.type)) {
if (value.type.indexOf('null') > -1) {
nullable = true;
}
const types = value.type.filter((type: string) => type !== 'null');
if (types.length === 1) {
value.type = types[0];
} else {
throw new Error(`Multiple types found for ${key}`);
}
}
const required = requiredProperties.indexOf(key) > -1 && !nullable;
// handleArray
if (value.type === 'array' && value.items.type === 'object') {
properties[key] = {
displayName: label,
description: value.description,
type: 'Array',
required,
properties: { ...createPropsFromSchema(value.items) },
};
} else if (
value.items &&
value.items.type === 'string' &&
value.items.enum
) {
properties[key] = {
...createField(key, label, value.items, required),
displayName: label,
description: value.description,
type: 'StaticMultiSelectDropdown',
required,
};
} else {
// default field
properties[key] = createField(key, label, value, required);
}
}
return properties;
}
function createField(
key: string,
label: string,
value: any,
required: boolean
) {
const field: any = {
displayName: label,
type: mapType(value),
required,
description: value.description,
};
if (value.type === 'boolean' && 'default' in value) {
field.defaultValue = value.default;
}
if (value.enum) {
field.type = 'StaticDropdown';
field.options = {
options: (value.enum as string[]).map((item) => {
const option = { label: titleCase(item), value: item };
if (
value['x-aai-enum'] &&
value['x-aai-enum'][item] &&
value['x-aai-enum'][item]['label']
) {
option.label = titleCase(value['x-aai-enum'][item]['label']);
return option;
} else {
console.warn(`No x-aai-enum value found for property ${key} ${item}`);
}
return option;
}),
};
}
return field;
}
const typeMap: Record<string, string> = {
date: 'DateTime',
'date-time': 'DateTime',
url: 'ShortText',
string: 'ShortText',
uuid: 'ShortText',
object: 'Object',
number: 'Number',
integer: 'Number',
float: 'Number',
double: 'Number',
boolean: 'Checkbox',
array: 'Array',
json: 'Json',
};
function mapType(schema: any) {
if (schema['x-ap-type']) {
return schema['x-ap-type'];
}
if (schema.format && schema.format in typeMap) {
return typeMap[schema.format];
}
if (schema.type in typeMap) {
return typeMap[schema.type];
}
throw new Error(`Unsupported type found ${schema.type}`);
}
function createTs(
propsJson: Record<string, { type: string } & object>
): string {
let result = `import { Property } from "@activepieces/pieces-framework";
export const props = `;
result += createTsProps(propsJson);
result += ';\n';
return result;
}
function createTsProps(
propsJson: Record<string, { type: string } & object>
): string {
let result = '{\n';
for (const key in propsJson) {
const { type, ...prop } = propsJson[key];
let innerProps: null | string = null;
if ('properties' in prop) {
innerProps = createTsProps(
prop['properties'] as Record<string, { type: string } & object>
);
prop.properties = '[REPLACE_WITH_PROPS]';
}
result +=
` ${key}: Property.${type}(\n` +
JSON.stringify(prop, null, ' ') +
'),\n';
if (innerProps) {
result = result.replace('"[REPLACE_WITH_PROPS]"', innerProps);
}
}
result += '\n}';
return result;
}

View File

@@ -0,0 +1,31 @@
openapi: 3.1.0
info:
version: 0.0.0
servers: []
security: []
paths: {}
components:
schemas:
TranscriptOptionalParams:
properties:
# use JSON as array with a nested array isn't supported for input in Active Pieces
custom_spelling:
description: |
Customize how words are spelled and formatted using to and from values.
Use a JSON array of objects of the following format:
```
[
{
"from": ["original", "spelling"],
"to": "corrected"
}
]
```
type: json
LemurBaseParams:
properties:
context:
x-ap-type: LongText

View File

@@ -0,0 +1,344 @@
{
"Transcribe and extract data from audio using AssemblyAI's Speech AI.": "Transcribe und extrahiere Daten aus Audio mit AssemblyAI's Speech AI.",
"You can retrieve your AssemblyAI API key within your AssemblyAI [Account Settings](https://www.assemblyai.com/app/account?utm_source=activepieces).": "Sie können Ihren AssemblyAI API-Schlüssel in Ihren AssemblyAI [Kontoeinstellungen](https://www.assemblyai.com/app/account?utm_source=activepieces).",
"Upload File": "Datei hochladen",
"Transcribe": "Transcribe",
"Get Transcript": "Get Transcript",
"Get Transcript Sentences": "Transkripte Sätze abrufen",
"Get Transcript Paragraphs": "Transkript-Absätze erhalten",
"Get Transcript Subtitles": "Transkript-Untertitel erhalten",
"Get Transcript Redacted Audio": "Transkript-Redacted Audio erhalten",
"Search words in transcript": "Suchbegriffe im Protokoll",
"List transcripts": "Protokolle auflisten",
"Delete transcript": "Transkript löschen",
"Run a Task using LeMUR": "Eine Aufgabe mit LeMUR ausführen",
"Retrieve LeMUR response": "LeMUR Antwort abrufen",
"Purge LeMUR request data": "LeMUR Daten löschen",
"Custom API Call": "Eigener API-Aufruf",
"Upload a media file to AssemblyAI's servers.": "Eine Mediendatei auf AssemblyAIs Server hochladen.",
"Transcribe an audio or video file using AssemblyAI.": "Eine Audio- oder Videodatei mit AssemblyAI übertragen.",
"Retrieves a transcript by its ID.": "Ruft ein Transkript durch seine ID ab.",
"Retrieve the sentences of the transcript by its ID.": "Abrufen der Sätze des Transkripts durch seine ID.",
"Retrieve the paragraphs of the transcript by its ID.": "Die Absätze des Transkripts durch seine ID abrufen.",
"Export the transcript as SRT or VTT subtitles.": "Exportieren Sie das Transkript als SRT oder VTT Untertitel.",
"Get the result of the redacted audio model.": "Holen Sie sich das Ergebnis des geretteten Audiomodells.",
"Search through the transcript for keywords. You can search for individual words, numbers, or phrases containing up to five words or numbers.": "Durchsuchen Sie das Transkript nach Schlüsselwörtern. Sie können nach einzelnen Wörtern, Zahlen oder Phrasen suchen, die bis zu fünf Wörter oder Zahlen enthalten.",
"Retrieve a list of transcripts you created.\nTranscripts are sorted from newest to oldest. The previous URL always points to a page with older transcripts.": "Eine Liste der von Ihnen erstellten Transkripte abrufen.\nTranskripte werden von neusten zu ältesten. Die vorherige URL verweist immer auf eine Seite mit älteren Abschriften.",
"Remove the data from the transcript and mark it as deleted.": "Die Daten aus dem Transkript entfernen und als gelöscht markieren.",
"Use the LeMUR task endpoint to input your own LLM prompt.": "Benutzen Sie den Task-Endpunkt von LeMUR, um Ihre eigene LLM-Eingabeaufforderung einzugeben.",
"Retrieve a LeMUR response that was previously generated.": "Rufen Sie eine zuvor generierte LeMUR-Antwort ab.",
"Delete the data for a previously submitted LeMUR request.\nThe LLM response data, as well as any context provided in the original request will be removed.": "Löscht die Daten für eine zuvor eingereichte LeMUR-Anfrage.\nDie LLM-Antwortdaten sowie alle Kontexte, die in der ursprünglichen Anfrage enthalten sind, werden entfernt.",
"Make a custom API call to a specific endpoint": "Einen benutzerdefinierten API-Aufruf an einen bestimmten Endpunkt machen",
"Audio File": "Audiodatei",
"Audio URL": "Audio-URL",
"Language Code": "Sprachcode",
"Language Detection": "Spracherkennung",
"Language Confidence Threshold": "Grenzwert für Sprachvertrauen",
"Speech Model": "Sprachmodell",
"Punctuate": "Punctuate",
"Format Text": "Text formatieren",
"Disfluencies": "Disfluenzen",
"Dual Channel": "Doppelkanal",
"Webhook URL": "Webhook-URL",
"Webhook Auth Header Name": "Webhook Auth Headername",
"Webhook Auth Header Value": "Webhook Auth Header Wert",
"Key Phrases": "Schlüsselwörter",
"Audio Start From": "Audio Start von",
"Audio End At": "Audio Ende um",
"Word Boost": "Wort-Boost",
"Word Boost Level": "Wort-Boost Level",
"Filter Profanity": "Profanität filtern",
"Redact PII": "Redact PII",
"Redact PII Audio": "Redact PII Audio",
"Redact PII Audio Quality": "PII Audio Qualität Redact",
"Redact PII Policies": "Redact PII Policies",
"Redact PII Substitution": "PII-Substitution Redact",
"Speaker Labels": "Lautsprecher-Labels",
"Speakers Expected": "Lautsprecher erwartet",
"Content Moderation": "Moderation der Inhalte",
"Content Moderation Confidence": "Moderation des Inhalts",
"Topic Detection": "Themenerkennung",
"Custom Spellings": "Eigene Rechtschreibung",
"Sentiment Analysis": "Stimmungsanalyse",
"Auto Chapters": "Auto-Kapitel",
"Entity Detection": "Entitäts-Erkennung",
"Speech Threshold": "Sprach-Grenzwert",
"Enable Summarization": "Zusammenfassung aktivieren",
"Summary Model": "Zusammenfassungsmodell",
"Summary Type": "Übersichts-Typ",
"Enable Custom Topics": "Eigene Themen aktivieren",
"Custom Topics": "Eigene Themen",
"Wait until transcript is ready": "Warten, bis das Transkript fertig ist",
"Throw if transcript status is error": "Werfen, wenn Transkript-Status ein Fehler ist",
"Transcript ID": "Transkript-ID",
"Subtitles Format": "Untertitelformat",
"Number of Characters per Caption": "Anzahl der Zeichen pro Untertitel",
"Download file?": "Datei herunterladen?",
"Download File Name": "Dateiname herunterladen",
"Words": "Wörter",
"Limit": "Limit",
"Status": "Status",
"Created On": "Erstellt am",
"Before ID": "Vor ID",
"After ID": "Nach ID",
"Throttled Only": "Nur gedrosselt",
"Prompt": "Prompt",
"Transcript IDs": "Transkript-ID",
"Input Text": "Input Text",
"Context": "Kontext",
"Final Model": "Letztes Modell",
"Maximum Output Size": "Maximale Ausgabegröße",
"Temperature": "Temperatur",
"LeMUR request ID": "LeMUR Anfrage-ID",
"Method": "Methode",
"Headers": "Kopfzeilen",
"Query Parameters": "Abfrageparameter",
"Body": "Körper",
"Response is Binary ?": "Antwort ist binär?",
"No Error on Failure": "Kein Fehler bei Fehler",
"Timeout (in seconds)": "Timeout (in Sekunden)",
"The File or URL of the audio or video file.": "Die Datei oder URL der Audio- oder Videodatei.",
"The URL of the audio or video file to transcribe.": "Die URL der zu übertragenden Audio- oder Videodatei.",
"The language of your audio file. Possible values are found in [Supported Languages](https://www.assemblyai.com/docs/concepts/supported-languages).\nThe default value is 'en_us'.\n": "The language of your audio file. Possible values are found in [Supported Languages](https://www.assemblyai.com/docs/concepts/supported-languages).\nThe default value is 'en_us'.\n",
"Enable [Automatic language detection](https://www.assemblyai.com/docs/models/speech-recognition#automatic-language-detection), either true or false.": "Enable [Automatic language detection](https://www.assemblyai.com/docs/models/speech-recognition#automatic-language-detection), either true or false.",
"The confidence threshold for the automatically detected language.\nAn error will be returned if the language confidence is below this threshold.\nDefaults to 0.\n": "The confidence threshold for the automatically detected language.\nAn error will be returned if the language confidence is below this threshold.\nDefaults to 0.\n",
"The speech model to use for the transcription. When `null`, the \"best\" model is used.": "Das Sprachmodell, das für die Transkription verwendet wird. Wenn null`, wird das \"beste\" Modell verwendet.",
"Enable Automatic Punctuation, can be true or false": "Automatische Satzzeichen, kann wahr oder falsch sein",
"Enable Text Formatting, can be true or false": "Aktiviere Textformatierung, kann wahr oder falsch sein",
"Transcribe Filler Words, like \"umm\", in your media file; can be true or false": "Füller-Wörter wie \"umm\" in Ihrer Mediendatei umwandeln; kann wahr oder falsch sein",
"Enable [Dual Channel](https://www.assemblyai.com/docs/models/speech-recognition#dual-channel-transcription) transcription, can be true or false.": "Enable [Dual Channel](https://www.assemblyai.com/docs/models/speech-recognition#dual-channel-transcription) transcription, can be true or false.",
"The URL to which we send webhook requests.\nWe sends two different types of webhook requests.\nOne request when a transcript is completed or failed, and one request when the redacted audio is ready if redact_pii_audio is enabled.\n": "The URL to which we send webhook requests.\nWe sends two different types of webhook requests.\nOne request when a transcript is completed or failed, and one request when the redacted audio is ready if redact_pii_audio is enabled.\n",
"The header name to be sent with the transcript completed or failed webhook requests": "Der Header-Name, der mit dem Transkript versendet werden soll oder fehlgeschlagene Webhook-Anfragen",
"The header value to send back with the transcript completed or failed webhook requests for added security": "Der Header-Wert, der mit dem Transkript vervollständigt oder fehlgeschlagene Webhook-Anfragen für zusätzliche Sicherheit zurückgesendet werden soll",
"Enable Key Phrases, either true or false": "Aktiviere Schlüsselwörter, ob wahr oder falsch",
"The point in time, in milliseconds, to begin transcribing in your media file": "Der Zeitpunkt in Millisekunden, um in Ihrer Mediendatei zu schreiben",
"The point in time, in milliseconds, to stop transcribing in your media file": "Der Zeitpunkt in Millisekunden, um die Umwandlung in Ihre Mediendatei zu beenden",
"The list of custom vocabulary to boost transcription probability for": "Die Liste des benutzerdefinierten Vokabulars zur Erhöhung der Transkriptionswahrscheinlichkeit für",
"How much to boost specified words": "Wie viel bestimmte Wörter erhöhen sollen",
"Filter profanity from the transcribed text, can be true or false": "Profanität aus dem überschriebenen Text filtern, kann wahr oder falsch sein",
"Redact PII from the transcribed text using the Redact PII model, can be true or false": "PII aus dem transkribierten Text mit dem Modell Redact PII Redact kann wahr oder falsch sein",
"Generate a copy of the original media file with spoken PII \"beeped\" out, can be true or false. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.": "Generate a copy of the original media file with spoken PII \"beeped\" out, can be true or false. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.",
"Controls the filetype of the audio created by redact_pii_audio. Currently supports mp3 (default) and wav. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.": "Controls the filetype of the audio created by redact_pii_audio. Currently supports mp3 (default) and wav. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.",
"The list of PII Redaction policies to enable. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.": "The list of PII Redaction policies to enable. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.",
"The replacement logic for detected PII, can be \"entity_type\" or \"hash\". See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.": "The replacement logic for detected PII, can be \"entity_type\" or \"hash\". See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.",
"Enable [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization), can be true or false": "Enable [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization), can be true or false",
"Tells the speaker label model how many speakers it should attempt to identify, up to 10. See [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization) for more details.": "Tells the speaker label model how many speakers it should attempt to identify, up to 10. See [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization) for more details.",
"Enable [Content Moderation](https://www.assemblyai.com/docs/models/content-moderation), can be true or false": "Enable [Content Moderation](https://www.assemblyai.com/docs/models/content-moderation), can be true or false",
"The confidence threshold for the Content Moderation model. Values must be between 25 and 100.": "Der Konfidenzschwellenwert für das Modell der Moderation. Werte müssen zwischen 25 und 100 liegen.",
"Enable [Topic Detection](https://www.assemblyai.com/docs/models/topic-detection), can be true or false": "Enable [Topic Detection](https://www.assemblyai.com/docs/models/topic-detection), can be true or false",
"Customize how words are spelled and formatted using to and from values.\nUse a JSON array of objects of the following format:\n```\n[\n {\n \"from\": [\"original\", \"spelling\"],\n \"to\": \"corrected\"\n }\n]\n```\n": "Customize how words are spelled and formatted using to and from values.\nUse a JSON array of objects of the following format:\n```\n[\n {\n \"from\": [\"original\", \"spelling\"],\n \"to\": \"corrected\"\n }\n]\n```\n",
"Enable [Sentiment Analysis](https://www.assemblyai.com/docs/models/sentiment-analysis), can be true or false": "Enable [Sentiment Analysis](https://www.assemblyai.com/docs/models/sentiment-analysis), can be true or false",
"Enable [Auto Chapters](https://www.assemblyai.com/docs/models/auto-chapters), can be true or false": "Enable [Auto Chapters](https://www.assemblyai.com/docs/models/auto-chapters), can be true or false",
"Enable [Entity Detection](https://www.assemblyai.com/docs/models/entity-detection), can be true or false": "Enable [Entity Detection](https://www.assemblyai.com/docs/models/entity-detection), can be true or false",
"Reject audio files that contain less than this fraction of speech.\nValid values are in the range [0, 1] inclusive.\n": "Reject audio files that contain less than this fraction of speech.\nValid values are in the range [0, 1] inclusive.\n",
"Enable [Summarization](https://www.assemblyai.com/docs/models/summarization), can be true or false": "Enable [Summarization](https://www.assemblyai.com/docs/models/summarization), can be true or false",
"The model to summarize the transcript": "Das Modell, das das Transkript zusammenfasst",
"The type of summary": "Der Typ der Zusammenfassung",
"Enable custom topics, either true or false": "Eigene Themen aktivieren, ob wahr oder falsch",
"The list of custom topics": "Die Liste der benutzerdefinierten Themen",
"Wait until the transcript status is \"completed\" or \"error\" before moving on to the next step.": "Warten Sie, bis der Transkript-Status \"vollständig\" oder \"Fehler\" ist, bevor Sie zum nächsten Schritt weitergehen.",
"If the transcript status is \"error\", throw an error.": "Wenn der Transkript-Status \"Fehler\" ist, werfen Sie einen Fehler.",
"The maximum number of characters per caption": "Die maximale Anzahl an Zeichen pro Beschriftung",
"The desired file name for storing in ActivePieces. Make sure the file extension is correct.": "Der gewünschte Dateiname für die Speicherung in ActivePieces. Stellen Sie sicher, dass die Dateiendung korrekt ist.",
"Keywords to search for": "Suchbegriffe",
"Maximum amount of transcripts to retrieve": "Maximale Anzahl der abzurufenden Transkripte",
"Filter by transcript status": "Nach Transkript-Status filtern",
"Only get transcripts created on this date": "Nur Transkripte, die zu diesem Datum erstellt werden",
"Get transcripts that were created before this transcript ID": "Erhalte Transkripte, die vor dieser Transkript-ID erstellt wurden",
"Get transcripts that were created after this transcript ID": "Erhalte Transkripte, die nach dieser Transkript-ID erstellt wurden",
"Only get throttled transcripts, overrides the status filter": "Nur gedrosselte Transkripte erhalten, überschreibt den Statusfilter",
"Your text to prompt the model to produce a desired output, including any context you want to pass into the model.": "Ihr Text, der das Modell dazu auffordert, eine gewünschte Ausgabe zu erzeugen, einschließlich aller Kontexte, die Sie an das Modell übergeben möchten.",
"A list of completed transcripts with text. Up to a maximum of 100 files or 100 hours, whichever is lower.\nUse either transcript_ids or input_text as input into LeMUR.\n": "A list of completed transcripts with text. Up to a maximum of 100 files or 100 hours, whichever is lower.\nUse either transcript_ids or input_text as input into LeMUR.\n",
"Custom formatted transcript data. Maximum size is the context limit of the selected model, which defaults to 100000.\nUse either transcript_ids or input_text as input into LeMUR.\n": "Custom formatted transcript data. Maximum size is the context limit of the selected model, which defaults to 100000.\nUse either transcript_ids or input_text as input into LeMUR.\n",
"Context to provide the model. This can be a string or a free-form JSON value.": "Kontext, um das Modell zur Verfügung zu stellen. Dies kann ein String oder ein frei formbarer JSON-Wert sein.",
"The model that is used for the final prompt after compression is performed.\n": "The model that is used for the final prompt after compression is performed.\n",
"Max output size in tokens, up to 4000": "Maximale Ausgabegröße in Token, bis 4000",
"The temperature to use for the model.\nHigher values result in answers that are more creative, lower values are more conservative.\nCan be any value between 0.0 and 1.0 inclusive.\n": "The temperature to use for the model.\nHigher values result in answers that are more creative, lower values are more conservative.\nCan be any value between 0.0 and 1.0 inclusive.\n",
"The ID of the LeMUR request whose data you want to delete. This would be found in the response of the original request.": "Die ID der LeMUR-Anfrage, deren Daten Sie löschen möchten, finden Sie in der Antwort der ursprünglichen Anfrage.",
"Authorization headers are injected automatically from your connection.": "Autorisierungs-Header werden automatisch von Ihrer Verbindung injiziert.",
"Enable for files like PDFs, images, etc..": "Aktivieren für Dateien wie PDFs, Bilder, etc..",
"English (Global)": "Englisch (Global)",
"English (Australian)": "Englisch (australisch)",
"English (British)": "Englisch (britisch)",
"English (US)": "Englisch (USA)",
"Spanish": "Spanisch",
"French": "Französisch",
"German": "Deutsch",
"Italian": "Italienisch",
"Portuguese": "Portugiesisch",
"Dutch": "Niederländisch",
"Afrikaans": "Afrikaner",
"Albanian": "Albanisch",
"Amharic": "Amharic",
"Arabic": "Arabisch",
"Armenian": "Armenisch",
"Assamese": "Assamisch",
"Azerbaijani": "Aserbaidschan",
"Bashkir": "Bashkir",
"Basque": "Baskisch",
"Belarusian": "Belarussisch",
"Bengali": "Bengalisch",
"Bosnian": "Bosnisch",
"Breton": "Breton",
"Bulgarian": "Bulgarisch",
"Burmese": "Burmese",
"Catalan": "Katalanisch",
"Chinese": "Chinesisch",
"Croatian": "Kroatisch",
"Czech": "Tschechisch",
"Danish": "Dänisch",
"Estonian": "Estnisch",
"Faroese": "Faroese",
"Finnish": "Finnisch",
"Galician": "Galizisch",
"Georgian": "Georgisch",
"Greek": "Griechisch",
"Gujarati": "Gujarati",
"Haitian": "Haitian",
"Hausa": "Hausa",
"Hawaiian": "Hawaiisch",
"Hebrew": "Hebräisch",
"Hindi": "Hannah",
"Hungarian": "Ungarisch",
"Icelandic": "Icelandic",
"Indonesian": "Indonesisch",
"Japanese": "Japanisch",
"Javanese": "Javanese",
"Kannada": "Kannada",
"Kazakh": "Kazakh",
"Khmer": "Khmer",
"Korean": "Koreanisch",
"Lao": "Lao",
"Latin": "Latein",
"Latvian": "Lettisch",
"Lingala": "Lingala",
"Lithuanian": "Litauisch",
"Luxembourgish": "Luxemburgisch",
"Macedonian": "Makedonisch",
"Malagasy": "Malagasy",
"Malay": "Malaiisch",
"Malayalam": "Malayalam",
"Maltese": "Maltese",
"Maori": "Maori",
"Marathi": "Marathi",
"Mongolian": "Mongolisch",
"Nepali": "Nepali",
"Norwegian": "Norwegisch",
"Norwegian Nynorsk": "Norwegian Nynorsk",
"Occitan": "Occitan",
"Panjabi": "Panjabi",
"Pashto": "Pashto",
"Persian": "Persisch",
"Polish": "Polnisch",
"Romanian": "Rumänisch",
"Russian": "Russisch",
"Sanskrit": "Sanskrit",
"Serbian": "Serbisch",
"Shona": "Senna",
"Sindhi": "Sindhi",
"Sinhala": "Sinhala",
"Slovak": "Slowakisch",
"Slovenian": "Slovenian",
"Somali": "Somali",
"Sundanese": "Sundanese",
"Swahili": "Swahili",
"Swedish": "Schwedisch",
"Tagalog": "Tagalog",
"Tajik": "Tadschikistan",
"Tamil": "Tamil",
"Tatar": "Tatar",
"Telugu": "Telugu",
"Thai": "Thailändisch",
"Tibetan": "Tibetisch",
"Turkish": "Türkisch",
"Turkmen": "Turkmen",
"Ukrainian": "Ukrainische",
"Urdu": "Urdu",
"Uzbek": "Uzbek",
"Vietnamese": "Vietnamese",
"Welsh": "Walisisch",
"Yiddish": "Jiddisch",
"Yoruba": "Yoruba",
"Best": "Beste",
"Nano": "Nano",
"Low": "Niedrig",
"Default": "Standard",
"High": "Hoch",
"MP3": "MP3",
"WAV": "WAV",
"Account Number": "Kundennummer",
"Banking Information": "Bankinformationen",
"Blood Type": "Bluttyp",
"Credit Card CVV": "Kreditkarten CVV",
"Credit Card Expiration": "Kreditkartenablauf",
"Credit Card Number": "Kreditkartennummer",
"Date": "Datum",
"Date Interval": "Datum-Intervall",
"Date of Birth": "Geburtsdatum",
"Driver's License": "Führerschein",
"Drug": "Drogen",
"Duration": "Dauer",
"Email Address": "E-Mail-Adresse",
"Event": "Ereignis",
"Filename": "Dateiname",
"Gender Sexuality": "Geschlechtssexualität",
"Healthcare Number": "Nummer der Gesundheitsversorgung",
"Injury": "Verletzte",
"IP Address": "IP-Adresse",
"Language": "Sprache",
"Location": "Standort",
"Marital Status": "Ehe Status",
"Medical Condition": "Medizinische Bedingung",
"Medical Process": "Medizinischer Prozess",
"Money Amount": "Geldbetrag",
"Nationality": "Nationalität",
"Number Sequence": "Nummernfolge",
"Occupation": "Besetzung",
"Organization": "Organisation",
"Passport Number": "Reisepassnummer",
"Password": "Kennwort",
"Person Age": "Personenalter",
"Person Name": "Personenname",
"Phone Number": "Telefonnummer",
"Physical Attribute": "Physisches Attribut",
"Political Affiliation": "Politische Zugehörigkeit",
"Religion": "Religion",
"Statistics": "Statistiken",
"Time": "Zeit",
"URL": "URL",
"US Social Security Number": "US-Sozialversicherungsnummer",
"Username": "Benutzername",
"Vehicle ID": "Fahrzeug-ID",
"Zodiac Sign": "Sternzeichen",
"Entity Name": "Entitätsname",
"Hash": "Hash",
"Informative": "Informativ",
"Conversational": "Konversation",
"Catchy": "Einbrüchig",
"Bullets": "Kugeln",
"Bullets Verbose": "Geschlossen",
"Gist": "Gist",
"Headline": "Überschrift",
"Paragraph": "Absatz",
"SRT": "SRT",
"VTT": "VTT",
"Queued": "Warteschlange",
"Processing": "Verarbeitung",
"Completed": "Abgeschlossen",
"Error": "Fehler",
"Claude 3.5 Sonnet (on Anthropic)": "Claude 3.5 Sonnet (auf Anthropic)",
"Claude 3 Opus (on Anthropic)": "Claude 3 Opus (auf Anthropic)",
"Claude 3 Haiku (on Anthropic)": "Claude 3 Haiku (auf Anthropic)",
"Claude 3 Sonnet (on Anthropic)": "Claude 3 Sonnet (auf Anthropic)",
"Claude 2.1 (on Anthropic)": "Claude 2.1 (auf Anthropic)",
"Claude 2 (on Anthropic)": "Claude 2 (auf Anthropic)",
"Claude Instant 1.2 (on Anthropic)": "Claude Instant 1.2 (auf Anthropic)",
"Basic": "Einfache",
"Mistral 7B (Hosted by AssemblyAI)": "Mistral 7B (gehostet von AssemblyAI)",
"GET": "ERHALTEN",
"POST": "POST",
"PATCH": "PATCH",
"PUT": "PUT",
"DELETE": "LÖSCHEN",
"HEAD": "HEAD"
}

View File

@@ -0,0 +1,344 @@
{
"Transcribe and extract data from audio using AssemblyAI's Speech AI.": "Transcribe y extrae los datos del audio utilizando el Speech AI de AssemblyAI.",
"You can retrieve your AssemblyAI API key within your AssemblyAI [Account Settings](https://www.assemblyai.com/app/account?utm_source=activepieces).": "Puedes recuperar tu clave de API de AssemblyAI en tu Colmena [Configuración de la cuenta](https://www.assemblyai.com/app/account?utm_source=activepieces).",
"Upload File": "Subir archivo",
"Transcribe": "Transcribir",
"Get Transcript": "Get Transcript",
"Get Transcript Sentences": "Obtener frases de transcripción",
"Get Transcript Paragraphs": "Obtener párrafos de transcripción",
"Get Transcript Subtitles": "Obtener Subtítulos Transcript",
"Get Transcript Redacted Audio": "Obtener Transcripción de Audio Redactado",
"Search words in transcript": "Buscar palabras en transcripción",
"List transcripts": "Lista de transcripciones",
"Delete transcript": "Eliminar transcripción",
"Run a Task using LeMUR": "Ejecutar una tarea usando LeMUR",
"Retrieve LeMUR response": "Recuperar respuesta de LeMUR",
"Purge LeMUR request data": "Purgar datos de solicitud de LeMUR",
"Custom API Call": "Llamada API personalizada",
"Upload a media file to AssemblyAI's servers.": "Subir un archivo multimedia a los servidores de AssemblblyAI.",
"Transcribe an audio or video file using AssemblyAI.": "Transcribir un archivo de audio o vídeo usando AssemblblyAI.",
"Retrieves a transcript by its ID.": "Recuperar una transcripción por su ID.",
"Retrieve the sentences of the transcript by its ID.": "Recuperar las frases de la transcripción por su ID.",
"Retrieve the paragraphs of the transcript by its ID.": "Recuperar los párrafos de la transcripción por su ID.",
"Export the transcript as SRT or VTT subtitles.": "Exportar la transcripción como subtítulos SRT o VTT.",
"Get the result of the redacted audio model.": "Obtener el resultado del modelo de audio redactado.",
"Search through the transcript for keywords. You can search for individual words, numbers, or phrases containing up to five words or numbers.": "Busque palabras clave a través de la transcripción. Puede buscar palabras individuales, números o frases que contengan hasta cinco palabras o números.",
"Retrieve a list of transcripts you created.\nTranscripts are sorted from newest to oldest. The previous URL always points to a page with older transcripts.": "Recupera una lista de transcripciones creadas.\nLas transcripciones se ordenan de más reciente a más antigua. La URL anterior siempre apunta a una página con transcripciones antiguas.",
"Remove the data from the transcript and mark it as deleted.": "Eliminar los datos de la transcripción y marcarlos como eliminados.",
"Use the LeMUR task endpoint to input your own LLM prompt.": "Utilice el punto final de la tarea LeMUR para introducir su propia petición LLM.",
"Retrieve a LeMUR response that was previously generated.": "Recuperar una respuesta de LeMUR que se generó previamente.",
"Delete the data for a previously submitted LeMUR request.\nThe LLM response data, as well as any context provided in the original request will be removed.": "Delete the data for a previously submitted LeMUR request.\nThe LLM response data, as well as any context provided in the original request will be removed.",
"Make a custom API call to a specific endpoint": "Hacer una llamada API personalizada a un extremo específico",
"Audio File": "Archivo de audio",
"Audio URL": "URL de audio",
"Language Code": "Código de idioma",
"Language Detection": "Detección de idioma",
"Language Confidence Threshold": "Umbral de confianza del idioma",
"Speech Model": "Modelo de voz",
"Punctuate": "Punctuate",
"Format Text": "Formatear texto",
"Disfluencies": "Disfluencias",
"Dual Channel": "Doble canal",
"Webhook URL": "URL de Webhook",
"Webhook Auth Header Name": "Nombre Auth Header de Webhook",
"Webhook Auth Header Value": "Valor Auth Header de Webhook",
"Key Phrases": "Frases clave",
"Audio Start From": "Inicio de audio desde",
"Audio End At": "Fin de audio en",
"Word Boost": "Optimización de palabras",
"Word Boost Level": "Nivel de impulso de palabras",
"Filter Profanity": "Filtrar apariencia",
"Redact PII": "Redact PII",
"Redact PII Audio": "Redact PII Audio",
"Redact PII Audio Quality": "Calidad de audio Redact PII",
"Redact PII Policies": "Redact PII Policies",
"Redact PII Substitution": "Sustitución Redactar PII",
"Speaker Labels": "Etiquetas de altavoz",
"Speakers Expected": "Altavoces esperados",
"Content Moderation": "Moderación de contenido",
"Content Moderation Confidence": "Confianza de moderación de contenido",
"Topic Detection": "Detección del tema",
"Custom Spellings": "Espellados personalizados",
"Sentiment Analysis": "Análisis sensato",
"Auto Chapters": "Capítulos automáticos",
"Entity Detection": "Detección de entidades",
"Speech Threshold": "Umbral de voz",
"Enable Summarization": "Activar resumen",
"Summary Model": "Modelo de resumen",
"Summary Type": "Tipo de resumen",
"Enable Custom Topics": "Habilitar temas personalizados",
"Custom Topics": "Temas personalizados",
"Wait until transcript is ready": "Espere hasta que la transcripción esté lista",
"Throw if transcript status is error": "Lanzar si el estado de la transcripción es un error",
"Transcript ID": "ID transcripción",
"Subtitles Format": "Formato de subtítulos",
"Number of Characters per Caption": "Número de caracteres por título",
"Download file?": "¿Descargar archivo?",
"Download File Name": "Descargar nombre de archivo",
"Words": "Palabras",
"Limit": "Límite",
"Status": "Estado",
"Created On": "Creado el",
"Before ID": "Antes de ID",
"After ID": "Después de ID",
"Throttled Only": "Solo lanzados",
"Prompt": "Petición",
"Transcript IDs": "IDs de transcripción",
"Input Text": "Input Text",
"Context": "Contexto",
"Final Model": "Modelo final",
"Maximum Output Size": "Tamaño máximo de salida",
"Temperature": "Temperatura",
"LeMUR request ID": "ID de solicitud de LeMUR",
"Method": "Método",
"Headers": "Encabezados",
"Query Parameters": "Parámetros de consulta",
"Body": "Cuerpo",
"Response is Binary ?": "¿Respuesta es binaria?",
"No Error on Failure": "No hay ningún error en fallo",
"Timeout (in seconds)": "Tiempo de espera (en segundos)",
"The File or URL of the audio or video file.": "El archivo o URL del archivo de audio o vídeo.",
"The URL of the audio or video file to transcribe.": "La URL del archivo de audio o vídeo a transcribir.",
"The language of your audio file. Possible values are found in [Supported Languages](https://www.assemblyai.com/docs/concepts/supported-languages).\nThe default value is 'en_us'.\n": "El idioma de tu archivo de audio. Los valores posibles se encuentran en [Idiomas compatibles](https://www.assemblyai.com/docs/concepts/supported-languages).\nEl valor predeterminado es 'en_us'.\n",
"Enable [Automatic language detection](https://www.assemblyai.com/docs/models/speech-recognition#automatic-language-detection), either true or false.": "Habilitar [Detección automática de idioma](https://www.assemblyai.com/docs/models/speech-recognition#automatic-language-detection), tanto verdadero como falso.",
"The confidence threshold for the automatically detected language.\nAn error will be returned if the language confidence is below this threshold.\nDefaults to 0.\n": "The confidence threshold for the automatically detected language.\nAn error will be returned if the language confidence is below this threshold.\nDefaults to 0.\n",
"The speech model to use for the transcription. When `null`, the \"best\" model is used.": "El modelo de voz a usar para la transcripción. Cuando `null`, se utiliza el modelo \"mejor\".",
"Enable Automatic Punctuation, can be true or false": "Habilitar Puntuación Automática, puede ser verdadero o falso",
"Enable Text Formatting, can be true or false": "Habilitar formato de texto, puede ser verdadero o falso",
"Transcribe Filler Words, like \"umm\", in your media file; can be true or false": "Transcribe palabras completas, como \"umm\", en tu archivo multimedia; puede ser verdadero o falso",
"Enable [Dual Channel](https://www.assemblyai.com/docs/models/speech-recognition#dual-channel-transcription) transcription, can be true or false.": "Habilitar transcripción [Doble Canal](https://www.assemblyai.com/docs/models/speech-recognition#dual-channel-transcription) puede ser verdadera o falsa.",
"The URL to which we send webhook requests.\nWe sends two different types of webhook requests.\nOne request when a transcript is completed or failed, and one request when the redacted audio is ready if redact_pii_audio is enabled.\n": "The URL to which we send webhook requests.\nWe sends two different types of webhook requests.\nOne request when a transcript is completed or failed, and one request when the redacted audio is ready if redact_pii_audio is enabled.\n",
"The header name to be sent with the transcript completed or failed webhook requests": "El nombre de la cabecera a ser enviado con la transcripción completada o fallida de solicitudes de webhook",
"The header value to send back with the transcript completed or failed webhook requests for added security": "El valor de la cabecera a enviar con la transcripción completada o fallida de peticiones de seguridad añadida",
"Enable Key Phrases, either true or false": "Habilitar frases de clave, sean verdaderas o falsas",
"The point in time, in milliseconds, to begin transcribing in your media file": "El punto en el tiempo, en milisegundos, para comenzar a transcribir en su archivo multimedia",
"The point in time, in milliseconds, to stop transcribing in your media file": "El punto en el tiempo, en milisegundos, para dejar de transcribir en tu archivo multimedia",
"The list of custom vocabulary to boost transcription probability for": "La lista de vocabulario personalizado para aumentar la probabilidad de transcripción de",
"How much to boost specified words": "Cuánto optimizar las palabras especificadas",
"Filter profanity from the transcribed text, can be true or false": "Filtrar la profanidad del texto transcrito, puede ser verdadero o falso",
"Redact PII from the transcribed text using the Redact PII model, can be true or false": "Redactar PII desde el texto transcrito usando el modelo Redact PII, puede ser verdadero o falso",
"Generate a copy of the original media file with spoken PII \"beeped\" out, can be true or false. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.": "Generar una copia del archivo multimedia original con PII hablado \"pitido\", puede ser verdadero o falso. Ver [PII redacción] (https://www.assemblyai.com/docs/models/pii-redaction) para más detalles.",
"Controls the filetype of the audio created by redact_pii_audio. Currently supports mp3 (default) and wav. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.": "Controla el tipo de archivo del audio creado por redact_pii_audio. Actualmente soporta mp3 (por defecto) y onda. Vea [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) para más detalles.",
"The list of PII Redaction policies to enable. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.": "La lista de políticas de Redacción PII a habilitar. Ver [PII redacción](https://www.assemblyai.com/docs/models/pii-redaction) para más detalles.",
"The replacement logic for detected PII, can be \"entity_type\" or \"hash\". See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.": "La lógica de reemplazo para PII detectado, puede ser \"entity_type\" o \"hash\". Vea [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) para más detalles.",
"Enable [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization), can be true or false": "Habilitar [Diarización del altavoz](https://www.assemblyai.com/docs/models/speaker-diarization), puede ser verdadero o falso",
"Tells the speaker label model how many speakers it should attempt to identify, up to 10. See [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization) for more details.": "Indica al modelo de etiqueta del altavoz cuántos altavoces debe intentar identificar, hasta 10. Vea [Diarización del altavoz](https://www.assemblyai.com/docs/models/speaker-diarization) para más detalles.",
"Enable [Content Moderation](https://www.assemblyai.com/docs/models/content-moderation), can be true or false": "Habilitar [Moderación de contenido](https://www.assemblyai.com/docs/models/content-moderation), puede ser verdadero o falso",
"The confidence threshold for the Content Moderation model. Values must be between 25 and 100.": "El umbral de confianza para el modelo de Moderación de Contenidos. Los valores deben estar entre 25 y 100.",
"Enable [Topic Detection](https://www.assemblyai.com/docs/models/topic-detection), can be true or false": "Habilitar [Detección de temas](https://www.assemblyai.com/docs/models/topic-detection), puede ser verdadero o falso",
"Customize how words are spelled and formatted using to and from values.\nUse a JSON array of objects of the following format:\n```\n[\n {\n \"from\": [\"original\", \"spelling\"],\n \"to\": \"corrected\"\n }\n]\n```\n": "Personalizar cómo se escriben las palabras y se formatean usando valores a y a partir de ellas.\nUsar un array JSON de objetos del siguiente formato:\n```\n[\n {\n \"from\": [\"original\", \"orto\"],\n \"a\": \"corrigido\"\n }\n]\n```\n",
"Enable [Sentiment Analysis](https://www.assemblyai.com/docs/models/sentiment-analysis), can be true or false": "Habilitar [Análisis de Sentimentos](https://www.assemblyai.com/docs/models/sentiment-analysis), puede ser verdadero o falso",
"Enable [Auto Chapters](https://www.assemblyai.com/docs/models/auto-chapters), can be true or false": "Habilitar [Auto capítulos](https://www.assemblyai.com/docs/models/auto-chapters), puede ser verdadero o falso",
"Enable [Entity Detection](https://www.assemblyai.com/docs/models/entity-detection), can be true or false": "Habilitar [Detección de Entidad](https://www.assemblyai.com/docs/models/entity-detection), puede ser verdadero o falso",
"Reject audio files that contain less than this fraction of speech.\nValid values are in the range [0, 1] inclusive.\n": "Rechazar archivos de audio que contengan menos de esta fracción de voz.\nValores válidos están en el rango [0, 1] inclusive.\n",
"Enable [Summarization](https://www.assemblyai.com/docs/models/summarization), can be true or false": "Habilitar [Summarization](https://www.assemblyai.com/docs/models/summarization), puede ser verdadero o falso",
"The model to summarize the transcript": "El modelo para resumir la transcripción",
"The type of summary": "El tipo de resumen",
"Enable custom topics, either true or false": "Habilitar temas personalizados, ya sean verdaderos o falsos",
"The list of custom topics": "La lista de temas personalizados",
"Wait until the transcript status is \"completed\" or \"error\" before moving on to the next step.": "Espere hasta que el estado de la transcripción sea \"completado\" o \"error\" antes de pasar al siguiente paso.",
"If the transcript status is \"error\", throw an error.": "Si el estado de la transcripción es \"error\", arroje un error.",
"The maximum number of characters per caption": "El número máximo de caracteres por título",
"The desired file name for storing in ActivePieces. Make sure the file extension is correct.": "El nombre de archivo deseado para almacenar en ActivePieces. Asegúrese de que la extensión del archivo es correcta.",
"Keywords to search for": "Palabras clave a buscar",
"Maximum amount of transcripts to retrieve": "Cantidad máxima de transcripciones a recuperar",
"Filter by transcript status": "Filtrar por estado de transcripción",
"Only get transcripts created on this date": "Sólo obtener transcripciones creadas en esta fecha",
"Get transcripts that were created before this transcript ID": "Obtener transcripciones que fueron creadas antes de este ID de transcripción",
"Get transcripts that were created after this transcript ID": "Obtener transcripciones que fueron creadas después de este ID de transcripción",
"Only get throttled transcripts, overrides the status filter": "Sólo obtener transcripciones de aceleración, anula el filtro de estado",
"Your text to prompt the model to produce a desired output, including any context you want to pass into the model.": "Su texto para pedir al modelo que produzca una salida deseada, incluyendo cualquier contexto que desee pasar al modelo.",
"A list of completed transcripts with text. Up to a maximum of 100 files or 100 hours, whichever is lower.\nUse either transcript_ids or input_text as input into LeMUR.\n": "A list of completed transcripts with text. Up to a maximum of 100 files or 100 hours, whichever is lower.\nUse either transcript_ids or input_text as input into LeMUR.\n",
"Custom formatted transcript data. Maximum size is the context limit of the selected model, which defaults to 100000.\nUse either transcript_ids or input_text as input into LeMUR.\n": "Custom formatted transcript data. Maximum size is the context limit of the selected model, which defaults to 100000.\nUse either transcript_ids or input_text as input into LeMUR.\n",
"Context to provide the model. This can be a string or a free-form JSON value.": "Contexto para proporcionar el modelo. Esto puede ser una cadena o un valor JSON de forma libre.",
"The model that is used for the final prompt after compression is performed.\n": "El modelo que se utiliza para el indicador final después de que se realiza la compresión.\n",
"Max output size in tokens, up to 4000": "Tamaño máximo de salida en tokens, hasta 4000",
"The temperature to use for the model.\nHigher values result in answers that are more creative, lower values are more conservative.\nCan be any value between 0.0 and 1.0 inclusive.\n": "La temperatura a usar para el modelo.\nValores más altos dan como resultado respuestas más creativas, valores más bajos son más conservadores.\nPuede ser cualquier valor entre 0.0 y 1.0 inclusive.\n",
"The ID of the LeMUR request whose data you want to delete. This would be found in the response of the original request.": "El ID de la solicitud de LeMUR cuyos datos desea eliminar. Esto se encuentra en la respuesta de la solicitud original.",
"Authorization headers are injected automatically from your connection.": "Las cabeceras de autorización se inyectan automáticamente desde tu conexión.",
"Enable for files like PDFs, images, etc..": "Activar para archivos como PDFs, imágenes, etc.",
"English (Global)": "Inglés (Global)",
"English (Australian)": "Inglés (juliano)",
"English (British)": "Inglés (británico)",
"English (US)": "Inglés (US)",
"Spanish": "Español",
"French": "Francés",
"German": "Alemán",
"Italian": "Italiano",
"Portuguese": "Portugués",
"Dutch": "Holandés",
"Afrikaans": "Afrikaans",
"Albanian": "Albanés",
"Amharic": "Amharic",
"Arabic": "Árabe",
"Armenian": "Armenio",
"Assamese": "Asamés",
"Azerbaijani": "Бессия",
"Bashkir": "Bashkir",
"Basque": "Vasco",
"Belarusian": "Bielorruso",
"Bengali": "Bengalí",
"Bosnian": "Bosnio",
"Breton": "Breton",
"Bulgarian": "Búlgaro",
"Burmese": "Burmese",
"Catalan": "Catalán",
"Chinese": "Chino",
"Croatian": "Croata",
"Czech": "Checo",
"Danish": "Danés",
"Estonian": "Estonio",
"Faroese": "Faroese",
"Finnish": "Finlandés",
"Galician": "Galiciano",
"Georgian": "Georgiano",
"Greek": "Griego",
"Gujarati": "Gujarati",
"Haitian": "Haitian",
"Hausa": "Hausa",
"Hawaiian": "Hawaiano",
"Hebrew": "Hebreo",
"Hindi": "Hindú",
"Hungarian": "Húngaro",
"Icelandic": "Icelandic",
"Indonesian": "Indonesio/a",
"Japanese": "Japonés",
"Javanese": "Javanese",
"Kannada": "Kannada",
"Kazakh": "Kazakh",
"Khmer": "Khmer",
"Korean": "Coreano",
"Lao": "Lao",
"Latin": "Latín",
"Latvian": "Letón",
"Lingala": "Lingala",
"Lithuanian": "Lituano",
"Luxembourgish": "luxemburgués",
"Macedonian": "Macedonio",
"Malagasy": "Malagasy",
"Malay": "Malayo",
"Malayalam": "Malayalam",
"Maltese": "Maltese",
"Maori": "Maori",
"Marathi": "Maratí",
"Mongolian": "Mongol",
"Nepali": "Nepali",
"Norwegian": "Noruego",
"Norwegian Nynorsk": "Norwegian Nynorsk",
"Occitan": "Occitan",
"Panjabi": "Panjabi",
"Pashto": "Pashto",
"Persian": "Persa",
"Polish": "Polaco",
"Romanian": "Rumano",
"Russian": "Ruso",
"Sanskrit": "Sanskrit",
"Serbian": "Serbio",
"Shona": "Shona",
"Sindhi": "Sindhi",
"Sinhala": "Sinhala",
"Slovak": "Eslovaco",
"Slovenian": "Slovenian",
"Somali": "Somali",
"Sundanese": "Sundanese",
"Swahili": "Swahili",
"Swedish": "Sueco",
"Tagalog": "Tagalog",
"Tajik": "Tayiko",
"Tamil": "Tamil",
"Tatar": "Tatar",
"Telugu": "Telugu",
"Thai": "Tailandés",
"Tibetan": "Tibetano",
"Turkish": "Turco",
"Turkmen": "Turkmen",
"Ukrainian": "Ucraniano",
"Urdu": "Urdu",
"Uzbek": "Uzbek",
"Vietnamese": "Vietnamese",
"Welsh": "Galés",
"Yiddish": "Yídica",
"Yoruba": "Yoruba",
"Best": "Mejor",
"Nano": "Nano",
"Low": "Baja",
"Default": "Por defecto",
"High": "Alta",
"MP3": "MP3",
"WAV": "WAV",
"Account Number": "Número de cuenta",
"Banking Information": "Información bancaria",
"Blood Type": "Tipo de sangre",
"Credit Card CVV": "Tarjeta de crédito CVV",
"Credit Card Expiration": "Caducidad de la tarjeta de crédito",
"Credit Card Number": "Número de tarjeta de crédito",
"Date": "Fecha",
"Date Interval": "Fecha intervalo",
"Date of Birth": "Fecha de nacimiento",
"Driver's License": "Licencia de conducir",
"Drug": "Droga",
"Duration": "Duración",
"Email Address": "Dirección de email",
"Event": "Evento",
"Filename": "Nombre de archivo",
"Gender Sexuality": "Sexualidad de género",
"Healthcare Number": "Número de atención médica",
"Injury": "Lesión",
"IP Address": "Dirección IP",
"Language": "Idioma",
"Location": "Ubicación",
"Marital Status": "Estado civil",
"Medical Condition": "Condición médica",
"Medical Process": "Proceso Médico",
"Money Amount": "Cantidad de dinero",
"Nationality": "Nacionalidad",
"Number Sequence": "Secuencia de número",
"Occupation": "Ocupación",
"Organization": "Organización",
"Passport Number": "Número de pasaporte",
"Password": "Contraseña",
"Person Age": "Edad de la persona",
"Person Name": "Nombre de la persona",
"Phone Number": "Número de teléfono",
"Physical Attribute": "Atributo físico",
"Political Affiliation": "Afiliación política",
"Religion": "Religión",
"Statistics": "Estadísticas",
"Time": "Hora",
"URL": "URL",
"US Social Security Number": "Número de Seguro Social de Estados Unidos",
"Username": "Usuario",
"Vehicle ID": "ID del vehículo",
"Zodiac Sign": "Señal de Zodíaco",
"Entity Name": "Nombre de la entidad",
"Hash": "Hash",
"Informative": "Informativo",
"Conversational": "Conversacional",
"Catchy": "Atrapado",
"Bullets": "Balas",
"Bullets Verbose": "Balas detalladas",
"Gist": "Gist",
"Headline": "Título",
"Paragraph": "Párrafo",
"SRT": "SRT",
"VTT": "VTT",
"Queued": "En cola",
"Processing": "Procesando",
"Completed": "Completado",
"Error": "Error",
"Claude 3.5 Sonnet (on Anthropic)": "Claude 3.5 Sonnet (en Antrópico)",
"Claude 3 Opus (on Anthropic)": "Opus Claude 3 (en Antrópico)",
"Claude 3 Haiku (on Anthropic)": "Claude 3 Haiku (en Antrópico)",
"Claude 3 Sonnet (on Anthropic)": "Claude 3 Sonnet (en Antrópico)",
"Claude 2.1 (on Anthropic)": "Claude 2.1 (en Antrópico)",
"Claude 2 (on Anthropic)": "Claude 2 (en Antrópico)",
"Claude Instant 1.2 (on Anthropic)": "Claude Instant 1.2 (en Antrópico)",
"Basic": "Básico",
"Mistral 7B (Hosted by AssemblyAI)": "Mistral 7B (Organizado por AssemblblyAI)",
"GET": "RECOGER",
"POST": "POST",
"PATCH": "PATCH",
"PUT": "PUT",
"DELETE": "BORRAR",
"HEAD": "LIMPIO"
}

View File

@@ -0,0 +1,344 @@
{
"Transcribe and extract data from audio using AssemblyAI's Speech AI.": "Transcrire et extraire des données audio à l'aide de l'IO vocal d'AssemblyAI.",
"You can retrieve your AssemblyAI API key within your AssemblyAI [Account Settings](https://www.assemblyai.com/app/account?utm_source=activepieces).": "Vous pouvez récupérer votre clé API AssemblyAI dans votre AssemblyAI [Paramètres du compte](https://www.assemblyai.com/app/account?utm_source=activepieces).",
"Upload File": "Charger un fichier",
"Transcribe": "Transcrire",
"Get Transcript": "Get Transcript",
"Get Transcript Sentences": "Récupérer les phrases de transcription",
"Get Transcript Paragraphs": "Récupérer les paragraphes de transcription",
"Get Transcript Subtitles": "Obtenir les sous-titres de transcription",
"Get Transcript Redacted Audio": "Récupérer l'audio de la transcription",
"Search words in transcript": "Rechercher des mots dans la transcription",
"List transcripts": "Liste des transcriptions",
"Delete transcript": "Supprimer la transcription",
"Run a Task using LeMUR": "Exécuter une tâche en utilisant LeMUR",
"Retrieve LeMUR response": "Récupérer la réponse LeMUR",
"Purge LeMUR request data": "Purger les données de requête LeMUR",
"Custom API Call": "Appel API personnalisé",
"Upload a media file to AssemblyAI's servers.": "Télécharger un fichier média sur les serveurs d'AssemblyAI.",
"Transcribe an audio or video file using AssemblyAI.": "Transcrivez un fichier audio ou vidéo en utilisant AssemblyAI.",
"Retrieves a transcript by its ID.": "Récupère une transcription par son ID.",
"Retrieve the sentences of the transcript by its ID.": "Récupère les phrases de la transcription par son ID.",
"Retrieve the paragraphs of the transcript by its ID.": "Récupère les paragraphes de la transcription par son ID.",
"Export the transcript as SRT or VTT subtitles.": "Exporter la transcription sous forme de sous-titres SRT ou VTT.",
"Get the result of the redacted audio model.": "Obtenir le résultat du modèle audio redistribué.",
"Search through the transcript for keywords. You can search for individual words, numbers, or phrases containing up to five words or numbers.": "Recherchez dans la transcription des mots clés. Vous pouvez rechercher des mots, des chiffres ou des phrases contenant jusqu'à cinq mots ou chiffres.",
"Retrieve a list of transcripts you created.\nTranscripts are sorted from newest to oldest. The previous URL always points to a page with older transcripts.": "Récupérer une liste de transcriptions que vous avez créées.\nLes transcriptions sont triées du plus récent au plus ancien. L'URL précédente pointe toujours vers une page avec des transcriptions plus anciennes.",
"Remove the data from the transcript and mark it as deleted.": "Supprimer les données de la transcription et marquer comme supprimées.",
"Use the LeMUR task endpoint to input your own LLM prompt.": "Utilisez le point de terminaison LeMUR pour entrer votre propre invite LLM.",
"Retrieve a LeMUR response that was previously generated.": "Récupère une réponse LeMUR qui a été générée précédemment.",
"Delete the data for a previously submitted LeMUR request.\nThe LLM response data, as well as any context provided in the original request will be removed.": "Delete the data for a previously submitted LeMUR request.\nThe LLM response data, as well as any context provided in the original request will be removed.",
"Make a custom API call to a specific endpoint": "Passez un appel API personnalisé à un point de terminaison spécifique",
"Audio File": "Fichier audio",
"Audio URL": "URL audio",
"Language Code": "Code de la langue",
"Language Detection": "Détection de langue",
"Language Confidence Threshold": "Seuil de confiance dans la langue",
"Speech Model": "Modèle vocal",
"Punctuate": "Punctuate",
"Format Text": "Format du texte",
"Disfluencies": "Influences",
"Dual Channel": "Double canal",
"Webhook URL": "URL du Webhook",
"Webhook Auth Header Name": "Nom de l'en-tête d'authentification Webhook",
"Webhook Auth Header Value": "Valeur de l'en-tête d'authentification Webhook",
"Key Phrases": "Phrases clés",
"Audio Start From": "Démarrage audio à partir de",
"Audio End At": "Fin de l'audio à",
"Word Boost": "Boost de mot",
"Word Boost Level": "Niveau de Boost de mot",
"Filter Profanity": "Profanation du filtre",
"Redact PII": "Redact PII",
"Redact PII Audio": "Redact PII Audio",
"Redact PII Audio Quality": "Redéfinir la qualité audio PII",
"Redact PII Policies": "Redact PII Policies",
"Redact PII Substitution": "Substitution PII Redact",
"Speaker Labels": "Étiquettes de haut-parleurs",
"Speakers Expected": "Haut-parleurs attendus",
"Content Moderation": "Modération du contenu",
"Content Moderation Confidence": "Confiance de la modération de contenu",
"Topic Detection": "Détection du sujet",
"Custom Spellings": "Orthographes personnalisés",
"Sentiment Analysis": "Analyse de la sensibilité",
"Auto Chapters": "Chapitres automatiques",
"Entity Detection": "Détection d'Entité",
"Speech Threshold": "Seuil de parole",
"Enable Summarization": "Activer le résumé",
"Summary Model": "Modèle de résumé",
"Summary Type": "Type de résumé",
"Enable Custom Topics": "Activer les sujets personnalisés",
"Custom Topics": "Sujets personnalisés",
"Wait until transcript is ready": "Attendre que la transcription soit prête",
"Throw if transcript status is error": "Lancer si l'état de la transcription est une erreur",
"Transcript ID": "ID de la transcription",
"Subtitles Format": "Format des sous-titres",
"Number of Characters per Caption": "Nombre de caractères par légende",
"Download file?": "Télécharger le fichier ?",
"Download File Name": "Télécharger le nom du fichier",
"Words": "Mots",
"Limit": "Limite",
"Status": "Statut",
"Created On": "Créé le",
"Before ID": "Avant l'ID",
"After ID": "Après l'ID",
"Throttled Only": "Combustible uniquement",
"Prompt": "Prompt",
"Transcript IDs": "ID de transcription",
"Input Text": "Input Text",
"Context": "Contexte",
"Final Model": "Modèle final",
"Maximum Output Size": "Taille de sortie maximale",
"Temperature": "Température",
"LeMUR request ID": "Identifiant de requête LeMUR",
"Method": "Méthode",
"Headers": "En-têtes",
"Query Parameters": "Paramètres de requête",
"Body": "Corps",
"Response is Binary ?": "La réponse est Binaire ?",
"No Error on Failure": "Aucune erreur en cas d'échec",
"Timeout (in seconds)": "Délai d'attente (en secondes)",
"The File or URL of the audio or video file.": "Le fichier ou l'URL du fichier audio ou vidéo.",
"The URL of the audio or video file to transcribe.": "L'URL du fichier audio ou vidéo à transcrire.",
"The language of your audio file. Possible values are found in [Supported Languages](https://www.assemblyai.com/docs/concepts/supported-languages).\nThe default value is 'en_us'.\n": "La langue de votre fichier audio. Les valeurs possibles sont trouvées dans [Langues Supportées](https://www.assemblyai.com/docs/concepts/supported-languages).\nLa valeur par défaut est 'en_us'.\n",
"Enable [Automatic language detection](https://www.assemblyai.com/docs/models/speech-recognition#automatic-language-detection), either true or false.": "Activer [Détection automatique de la langue](https://www.assemblyai.com/docs/models/speech-recognition#automatic-language-detection), vrai ou faux.",
"The confidence threshold for the automatically detected language.\nAn error will be returned if the language confidence is below this threshold.\nDefaults to 0.\n": "Le seuil de confiance pour la langue automatiquement détectée.\nUne erreur sera retournée si la confiance du langage est inférieure à ce seuil.\nPar défaut à 0.\n",
"The speech model to use for the transcription. When `null`, the \"best\" model is used.": "Le modèle de parole à utiliser pour la transcription. Lorsque `null`, le modèle \"meilleur\" est utilisé.",
"Enable Automatic Punctuation, can be true or false": "Activer la ponctuation automatique, peut être vrai ou faux",
"Enable Text Formatting, can be true or false": "Activer le formatage du texte, peut être vrai ou faux",
"Transcribe Filler Words, like \"umm\", in your media file; can be true or false": "Transcrire des mots de remplissage, comme « umm», dans votre fichier multimédia; peut être vrai ou faux",
"Enable [Dual Channel](https://www.assemblyai.com/docs/models/speech-recognition#dual-channel-transcription) transcription, can be true or false.": "Activer la transcription [Dual Channel](https://www.assemblyai.com/docs/models/speech-recognition#dual-channel-transcription), peut être vrai ou faux.",
"The URL to which we send webhook requests.\nWe sends two different types of webhook requests.\nOne request when a transcript is completed or failed, and one request when the redacted audio is ready if redact_pii_audio is enabled.\n": "L'URL vers laquelle nous envoyons des demandes de webhook.\nNous envoyons deux types différents de requêtes de webhook.\nUne requête lorsqu'une transcription est terminée ou échouée, et une requête lorsque l'audio reproduit est prête si redact_pii_audio est activé.\n",
"The header name to be sent with the transcript completed or failed webhook requests": "Le nom de l'en-tête à envoyer avec la transcription complétée ou les requêtes échouées de webhook",
"The header value to send back with the transcript completed or failed webhook requests for added security": "La valeur de l'en-tête à renvoyer avec la transcription complétée ou les requêtes échouées de webhook pour plus de sécurité",
"Enable Key Phrases, either true or false": "Activer les phrases clés, vrai ou faux",
"The point in time, in milliseconds, to begin transcribing in your media file": "Le point dans le temps, en millisecondes, pour commencer à transcrire dans votre fichier média",
"The point in time, in milliseconds, to stop transcribing in your media file": "Le point dans le temps, en millisecondes, d'arrêter de transcrire dans votre fichier média",
"The list of custom vocabulary to boost transcription probability for": "La liste du vocabulaire personnalisé pour augmenter la probabilité de transcription pour",
"How much to boost specified words": "Combien pour booster les mots spécifiés",
"Filter profanity from the transcribed text, can be true or false": "Filtrer la profanité du texte transcrit, peut être vrai ou faux",
"Redact PII from the transcribed text using the Redact PII model, can be true or false": "Redact PII à partir du texte transcrit en utilisant le modèle PII Redact, peut être vrai ou faux",
"Generate a copy of the original media file with spoken PII \"beeped\" out, can be true or false. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.": "Générer une copie du fichier multimédia original avec des IPI parlés, peut être vrai ou faux. Voir [Redaction PII](https://www.assemblyai.com/docs/models/pii-redaction) pour plus de détails.",
"Controls the filetype of the audio created by redact_pii_audio. Currently supports mp3 (default) and wav. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.": "Contrôle le type de fichier de l'audio créé par redact_pii_audio. Actuellement supporte les mp3 (par défaut) et les ondes. Voir [Redaction PII](https://www.assemblyai.com/docs/models/pii-redaction) pour plus de détails.",
"The list of PII Redaction policies to enable. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.": "La liste des politiques de Redaction PII à activer. Voir [Redaction PII](https://www.assemblyai.com/docs/models/pii-redaction) pour plus de détails.",
"The replacement logic for detected PII, can be \"entity_type\" or \"hash\". See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.": "La logique de remplacement pour PII détecté peut être \"entity_type\" ou \"hash\". Voir [Redaction PII](https://www.assemblyai.com/docs/models/pii-redaction) pour plus de détails.",
"Enable [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization), can be true or false": "Activer la [diarification du haut-parleur] (https://www.assemblyai.com/docs/models/speaker-diarization), peut être vrai ou faux",
"Tells the speaker label model how many speakers it should attempt to identify, up to 10. See [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization) for more details.": "Indique au modèle d'étiquette du haut-parleur combien d'enceintes il doit essayer d'identifier, jusqu'à 10. Voir [Diarization du haut-parleur](https://www.assemblyai.com/docs/models/speaker-diarization) pour plus de détails.",
"Enable [Content Moderation](https://www.assemblyai.com/docs/models/content-moderation), can be true or false": "Activer [Modération de contenu](https://www.assemblyai.com/docs/models/content-moderation), peut être vrai ou faux",
"The confidence threshold for the Content Moderation model. Values must be between 25 and 100.": "Le seuil de confiance du modèle de modération de contenu doit être compris entre 25 et 100.",
"Enable [Topic Detection](https://www.assemblyai.com/docs/models/topic-detection), can be true or false": "Activer la [Détection du sujet](https://www.assemblyai.com/docs/models/topic-detection), peut être vrai ou faux",
"Customize how words are spelled and formatted using to and from values.\nUse a JSON array of objects of the following format:\n```\n[\n {\n \"from\": [\"original\", \"spelling\"],\n \"to\": \"corrected\"\n }\n]\n```\n": "Personnaliser la façon dont les mots sont orthographiés et formatés en utilisant et à partir des valeurs.\nUtiliser une table JSON d'objets au format suivant :\n```\n[\n {\n \"from\": [\"original\", \"orthographe\"],\n \"à\": \"corrigé\"\n }\n]\n```\n",
"Enable [Sentiment Analysis](https://www.assemblyai.com/docs/models/sentiment-analysis), can be true or false": "Activer [Analyse de sens] (https://www.assemblyai.com/docs/models/sentiment-analysis), peut être vrai ou faux",
"Enable [Auto Chapters](https://www.assemblyai.com/docs/models/auto-chapters), can be true or false": "Activer [Chapitres Automatiques](https://www.assemblyai.com/docs/models/auto-chapters), peut être vrai ou faux",
"Enable [Entity Detection](https://www.assemblyai.com/docs/models/entity-detection), can be true or false": "Activer [Détection d'Entité](https://www.assemblyai.com/docs/models/entity-detection), peut être vrai ou faux",
"Reject audio files that contain less than this fraction of speech.\nValid values are in the range [0, 1] inclusive.\n": "Rejeter les fichiers audio qui contiennent moins de cette fraction de discours.\nLes valeurs valides sont dans l'intervalle [0, 1] inclusivement.\n",
"Enable [Summarization](https://www.assemblyai.com/docs/models/summarization), can be true or false": "Activer [Summarization](https://www.assemblyai.com/docs/models/summarization), peut être vrai ou faux",
"The model to summarize the transcript": "Le modèle pour résumer la transcription",
"The type of summary": "Le type de résumé",
"Enable custom topics, either true or false": "Activer les sujets personnalisés, que ce soit vrai ou faux",
"The list of custom topics": "La liste des sujets personnalisés",
"Wait until the transcript status is \"completed\" or \"error\" before moving on to the next step.": "Attendez que le statut de la transcription soit \"completed\" ou \"error\" avant de passer à l'étape suivante.",
"If the transcript status is \"error\", throw an error.": "Si l'état de la transcription est \"error\", lancer une erreur.",
"The maximum number of characters per caption": "Le nombre maximum de caractères par légende",
"The desired file name for storing in ActivePieces. Make sure the file extension is correct.": "Le nom de fichier souhaité pour le stockage dans ActivePieces. Assurez-vous que l'extension de fichier est correcte.",
"Keywords to search for": "Mots-clés à rechercher",
"Maximum amount of transcripts to retrieve": "Nombre maximum de transcriptions à récupérer",
"Filter by transcript status": "Filtrer par statut de transcription",
"Only get transcripts created on this date": "N'obtenir que les transcriptions créées à cette date",
"Get transcripts that were created before this transcript ID": "Récupérer les transcriptions qui ont été créées avant cet ID de transcription",
"Get transcripts that were created after this transcript ID": "Récupérer les transcriptions qui ont été créées après cet ID de transcription",
"Only get throttled transcripts, overrides the status filter": "N'obtenir que des transcriptions limitées, remplace le filtre de statut",
"Your text to prompt the model to produce a desired output, including any context you want to pass into the model.": "Votre texte pour inviter le modèle à produire une sortie souhaitée, y compris tout contexte que vous voulez passer dans le modèle.",
"A list of completed transcripts with text. Up to a maximum of 100 files or 100 hours, whichever is lower.\nUse either transcript_ids or input_text as input into LeMUR.\n": "Une liste de transcriptions remplies avec du texte. Jusqu'à un maximum de 100 fichiers ou 100 heures, selon la moindre des cas.\nUtilisez soit transcript_ids soit input_text comme entrée dans LeMUR.\n",
"Custom formatted transcript data. Maximum size is the context limit of the selected model, which defaults to 100000.\nUse either transcript_ids or input_text as input into LeMUR.\n": "Données de transcription personnalisées formatées. La taille maximale est la limite de contexte du modèle sélectionné, qui est par défaut 100000.\nUtilisez soit transcript_ids soit input_text comme entrée dans LeMUR.\n",
"Context to provide the model. This can be a string or a free-form JSON value.": "Contexte pour fournir le modèle. Cela peut être une chaîne de caractères ou une valeur JSON libre.",
"The model that is used for the final prompt after compression is performed.\n": "Le modèle qui est utilisé pour l'invite finale après la compression est effectuée.\n",
"Max output size in tokens, up to 4000": "Taille maximale de sortie en jetons, jusqu'à 4000",
"The temperature to use for the model.\nHigher values result in answers that are more creative, lower values are more conservative.\nCan be any value between 0.0 and 1.0 inclusive.\n": "La température à utiliser pour le modèle.\nDes valeurs plus élevées donnent des réponses plus créatives, des valeurs plus faibles sont plus conservatrices.\nPeut être n'importe quelle valeur comprise entre 0.0 et 1.0 inclusivement.\n",
"The ID of the LeMUR request whose data you want to delete. This would be found in the response of the original request.": "L'ID de la requête LeMUR dont vous voulez supprimer les données. Cela se trouve dans la réponse de la requête originale.",
"Authorization headers are injected automatically from your connection.": "Les en-têtes d'autorisation sont injectés automatiquement à partir de votre connexion.",
"Enable for files like PDFs, images, etc..": "Activer pour les fichiers comme les PDFs, les images, etc.",
"English (Global)": "Anglais (Global)",
"English (Australian)": "Anglais (Australien)",
"English (British)": "Anglais (britannique)",
"English (US)": "Anglais (US)",
"Spanish": "Espagnol",
"French": "Français",
"German": "Allemand",
"Italian": "Italien",
"Portuguese": "Portugais",
"Dutch": "Néerlandais",
"Afrikaans": "afrikaans",
"Albanian": "Albanais",
"Amharic": "Amharic",
"Arabic": "Arabe",
"Armenian": "Arménien",
"Assamese": "Assamais",
"Azerbaijani": "Azerbaïdjan",
"Bashkir": "Bashkir",
"Basque": "Basque",
"Belarusian": "Biélorusse",
"Bengali": "Bengalais",
"Bosnian": "Bosniaque",
"Breton": "Breton",
"Bulgarian": "Bulgare",
"Burmese": "Burmese",
"Catalan": "Catalan",
"Chinese": "Chinois",
"Croatian": "Croate",
"Czech": "Tchèque",
"Danish": "Danois",
"Estonian": "estonien",
"Faroese": "Faroese",
"Finnish": "Finlandais",
"Galician": "Galicien",
"Georgian": "Géorgien",
"Greek": "Grecque",
"Gujarati": "Gujarati",
"Haitian": "Haitian",
"Hausa": "Hausa",
"Hawaiian": "Hawaï",
"Hebrew": "Hébreu",
"Hindi": "Hindi",
"Hungarian": "Hongrois",
"Icelandic": "Icelandic",
"Indonesian": "Indonésien",
"Japanese": "Japonais",
"Javanese": "Javanese",
"Kannada": "Kannada",
"Kazakh": "Kazakh",
"Khmer": "Khmer",
"Korean": "Coréen",
"Lao": "Lao",
"Latin": "Latins",
"Latvian": "Lettonie",
"Lingala": "Lingala",
"Lithuanian": "lituanien",
"Luxembourgish": "luxembourgeois",
"Macedonian": "Macédonien",
"Malagasy": "Malagasy",
"Malay": "Malais",
"Malayalam": "Malaisien",
"Maltese": "Maltese",
"Maori": "Maori",
"Marathi": "Marathi",
"Mongolian": "Mongol",
"Nepali": "Nepali",
"Norwegian": "Norvégien",
"Norwegian Nynorsk": "Norwegian Nynorsk",
"Occitan": "Occitan",
"Panjabi": "Panjabi",
"Pashto": "Pachto",
"Persian": "Perse",
"Polish": "Polonais",
"Romanian": "Roumain",
"Russian": "Russe",
"Sanskrit": "Sanskrit",
"Serbian": "Serbe",
"Shona": "Shona",
"Sindhi": "Sindhi",
"Sinhala": "Cinghala",
"Slovak": "Slovaque",
"Slovenian": "Slovenian",
"Somali": "Somali",
"Sundanese": "Sundanese",
"Swahili": "Swahili",
"Swedish": "Suédois",
"Tagalog": "Tagalog",
"Tajik": "Tadjik",
"Tamil": "Tamil",
"Tatar": "Tatar",
"Telugu": "Telugu",
"Thai": "Thaï",
"Tibetan": "Tibétain",
"Turkish": "Turc",
"Turkmen": "Turkmen",
"Ukrainian": "Ukrainien",
"Urdu": "Ourdou",
"Uzbek": "Uzbek",
"Vietnamese": "Vietnamese",
"Welsh": "Gallois",
"Yiddish": "Yiddish",
"Yoruba": "Yoruba",
"Best": "Meilleure",
"Nano": "Nano",
"Low": "Bas",
"Default": "Par défaut",
"High": "Élevé",
"MP3": "Mp3",
"WAV": "WAV",
"Account Number": "Numéro de compte",
"Banking Information": "Informations bancaires",
"Blood Type": "Type de sang",
"Credit Card CVV": "CVV Carte de Crédit",
"Credit Card Expiration": "Expiration de la carte de crédit",
"Credit Card Number": "Numéro de carte de crédit",
"Date": "Date",
"Date Interval": "Intervalle de date",
"Date of Birth": "Date de naissance",
"Driver's License": "Permis de conduire",
"Drug": "Médicament",
"Duration": "Durée",
"Email Address": "Adresse e-mail",
"Event": "Evénement",
"Filename": "Nom du fichier",
"Gender Sexuality": "Sexualité sexuelle",
"Healthcare Number": "Nombre de soins de santé",
"Injury": "Blessure",
"IP Address": "Adresse IP",
"Language": "Langue",
"Location": "Localisation",
"Marital Status": "Statut matrimonial",
"Medical Condition": "Condition médicale",
"Medical Process": "Processus médical",
"Money Amount": "Montant de l'argent",
"Nationality": "Nationalité",
"Number Sequence": "Séquence de nombre",
"Occupation": "Occupation",
"Organization": "Organisation",
"Passport Number": "Numéro de passeport",
"Password": "Password",
"Person Age": "Âge de la personne",
"Person Name": "Nom de la personne",
"Phone Number": "Numéro de téléphone",
"Physical Attribute": "Attribut physique",
"Political Affiliation": "Affiliation politique",
"Religion": "Religion",
"Statistics": "Statistiques",
"Time": "Date et heure",
"URL": "URL",
"US Social Security Number": "Numéro de sécurité sociale des États-Unis",
"Username": "Nom d'utilisateur",
"Vehicle ID": "ID du véhicule",
"Zodiac Sign": "Panneau du zodiaque",
"Entity Name": "Nom de l'entité",
"Hash": "Hachage",
"Informative": "Informatif",
"Conversational": "Conversation",
"Catchy": "Attraper",
"Bullets": "Balles",
"Bullets Verbose": "Balles à balles",
"Gist": "Gist",
"Headline": "Titre",
"Paragraph": "Paragraphe",
"SRT": "SRT",
"VTT": "VTT",
"Queued": "En file d'attente",
"Processing": "Traitement en cours",
"Completed": "Terminé",
"Error": "Error",
"Claude 3.5 Sonnet (on Anthropic)": "Claude 3.5 Sonnet (sur Anthropique)",
"Claude 3 Opus (on Anthropic)": "Claude 3 Opus (sur Anthropique)",
"Claude 3 Haiku (on Anthropic)": "Claude 3 Haiku (sur Anthropique)",
"Claude 3 Sonnet (on Anthropic)": "Claude 3 Sonnet (sur Anthropique)",
"Claude 2.1 (on Anthropic)": "Claude 2.1 (sur Anthropique)",
"Claude 2 (on Anthropic)": "Claude 2 (sur Anthropique)",
"Claude Instant 1.2 (on Anthropic)": "Claude Instant 1.2 (sur Anthropique)",
"Basic": "Basique",
"Mistral 7B (Hosted by AssemblyAI)": "Mistral 7B (hébergé par AssemblyAI)",
"GET": "OBTENIR",
"POST": "POSTER",
"PATCH": "PATCH",
"PUT": "EFFACER",
"DELETE": "SUPPRIMER",
"HEAD": "TÊTE"
}

View File

@@ -0,0 +1,344 @@
{
"Transcribe and extract data from audio using AssemblyAI's Speech AI.": "AssemblyAIのSpeech AIを使用してオーディオからデータを書き換えて抽出します。",
"You can retrieve your AssemblyAI API key within your AssemblyAI [Account Settings](https://www.assemblyai.com/app/account?utm_source=activepieces).": "AssemblyAI [Account Settings](https://www.assemblyai.com/app/account?utm_source=activepieces)からAssemblyAI APIキーを取得できます。",
"Upload File": "ファイルをアップロード",
"Transcribe": "変換する",
"Get Transcript": "Get Transcript",
"Get Transcript Sentences": "トランスクリプトの文章を取得",
"Get Transcript Paragraphs": "トランスクリプトの段落の取得",
"Get Transcript Subtitles": "トランスクリプトの字幕を取得",
"Get Transcript Redacted Audio": "トランスクリプトで編集されたオーディオを取得",
"Search words in transcript": "トランスクリプトで単語を検索",
"List transcripts": "トランスクリプトの一覧",
"Delete transcript": "トランスクリプトを削除",
"Run a Task using LeMUR": "LeMURを使用してタスクを実行",
"Retrieve LeMUR response": "LeMUR応答を取得",
"Purge LeMUR request data": "LeMURリクエストデータを削除",
"Custom API Call": "カスタムAPI通話",
"Upload a media file to AssemblyAI's servers.": "AssemblyAIのサーバーにメディアファイルをアップロードします。",
"Transcribe an audio or video file using AssemblyAI.": "AssemblyAI を使用してオーディオファイルまたはビデオファイルを転記します。",
"Retrieves a transcript by its ID.": "IDからトランスクリプトを取得します。",
"Retrieve the sentences of the transcript by its ID.": "IDで記録の文章を取得します。",
"Retrieve the paragraphs of the transcript by its ID.": "記録の段落を ID で取得します。",
"Export the transcript as SRT or VTT subtitles.": "トランスクリプトを SRT または VTT 字幕としてエクスポートします。",
"Get the result of the redacted audio model.": "編集したオーディオモデルの結果を取得します。",
"Search through the transcript for keywords. You can search for individual words, numbers, or phrases containing up to five words or numbers.": "キーワードを検索します。最大5つの単語または数字を含む個々の単語、数字、またはフレーズを検索できます。",
"Retrieve a list of transcripts you created.\nTranscripts are sorted from newest to oldest. The previous URL always points to a page with older transcripts.": "作成したトランスクリプトのリストを取得します。\nトランスクリプトは最新から古いものにソートされます。 以前の URL は常に古いトランスクリプトのあるページを指しています。",
"Remove the data from the transcript and mark it as deleted.": "トランスクリプトからデータを削除し、削除としてマークします。",
"Use the LeMUR task endpoint to input your own LLM prompt.": "LeMUR タスク エンドポイントを使用して、独自の LLM プロンプトを入力します。",
"Retrieve a LeMUR response that was previously generated.": "以前に生成された LeMUR 応答を取得します。",
"Delete the data for a previously submitted LeMUR request.\nThe LLM response data, as well as any context provided in the original request will be removed.": "以前に送信された LeMUR 要求のデータを削除します。\nLLMレスポンスデータと、元のリクエストで提供されたコンテキストは削除されます。",
"Make a custom API call to a specific endpoint": "特定のエンドポイントへのカスタム API コールを実行します。",
"Audio File": "オーディオ ファイル",
"Audio URL": "オーディオ URL",
"Language Code": "言語コード",
"Language Detection": "言語検出",
"Language Confidence Threshold": "言語信頼度のしきい値",
"Speech Model": "音声モデル",
"Punctuate": "Punctuate",
"Format Text": "テキストの書式設定",
"Disfluencies": "<unk>",
"Dual Channel": "デュアルチャンネル",
"Webhook URL": "Webhook URL",
"Webhook Auth Header Name": "Webhook認証ヘッダー名",
"Webhook Auth Header Value": "Webhook認証ヘッダーの値",
"Key Phrases": "キーフレーズ",
"Audio Start From": "オーディオの開始元",
"Audio End At": "オーディオ終了時刻",
"Word Boost": "ワードブースト",
"Word Boost Level": "単語ブーストレベル",
"Filter Profanity": "不適切なフィルター",
"Redact PII": "Redact PII",
"Redact PII Audio": "Redact PII Audio",
"Redact PII Audio Quality": "Redact PII オーディオ品質",
"Redact PII Policies": "Redact PII Policies",
"Redact PII Substitution": "Redact PII 置換",
"Speaker Labels": "スピーカーラベル",
"Speakers Expected": "予想されるスピーカー",
"Content Moderation": "コンテンツモデレーション",
"Content Moderation Confidence": "コンテンツモデレーションの信頼性",
"Topic Detection": "トピックの検出",
"Custom Spellings": "カスタムスペルチェック",
"Sentiment Analysis": "センチメンタム分析",
"Auto Chapters": "自動章",
"Entity Detection": "エンティティの検出",
"Speech Threshold": "音声のしきい値",
"Enable Summarization": "概要を有効にする",
"Summary Model": "概要モデル",
"Summary Type": "サマリータイプ",
"Enable Custom Topics": "カスタムトピックを有効にする",
"Custom Topics": "カスタムトピック",
"Wait until transcript is ready": "トランスクリプトの準備ができるまで待つ",
"Throw if transcript status is error": "トランスクリプトの状態がエラーの場合はスローする",
"Transcript ID": "トランスクリプトID",
"Subtitles Format": "字幕フォーマット",
"Number of Characters per Caption": "図表番号ごとの文字数",
"Download file?": "ファイルのダウンロード?",
"Download File Name": "ファイル名をダウンロード",
"Words": "単語",
"Limit": "制限",
"Status": "ステータス",
"Created On": "作成日時",
"Before ID": "IDの前",
"After ID": "IDの後",
"Throttled Only": "スロットルのみ",
"Prompt": "Prompt",
"Transcript IDs": "トランスクリプトID",
"Input Text": "Input Text",
"Context": "コンテキスト",
"Final Model": "最終モデル",
"Maximum Output Size": "最大出力サイズ",
"Temperature": "温度",
"LeMUR request ID": "LeMURリクエストID",
"Method": "方法",
"Headers": "ヘッダー",
"Query Parameters": "クエリパラメータ",
"Body": "本文",
"Response is Binary ?": "応答はバイナリですか?",
"No Error on Failure": "失敗時にエラーはありません",
"Timeout (in seconds)": "タイムアウト(秒)",
"The File or URL of the audio or video file.": "オーディオまたはビデオファイルのファイルまたはURL。",
"The URL of the audio or video file to transcribe.": "転記するオーディオファイルまたはビデオファイルのURL。",
"The language of your audio file. Possible values are found in [Supported Languages](https://www.assemblyai.com/docs/concepts/supported-languages).\nThe default value is 'en_us'.\n": "オーディオファイルの言語。使用可能な値は [サポート言語](https://www.assemblyai.com/docs/concepts/supported-languages) にあります。\nデフォルト値は 'en_us' です。\n",
"Enable [Automatic language detection](https://www.assemblyai.com/docs/models/speech-recognition#automatic-language-detection), either true or false.": "[自動言語検出](https://www.assemblyai.com/docs/models/speech-recognition#automatic-language-detection)をtrueまたはfalseにします。",
"The confidence threshold for the automatically detected language.\nAn error will be returned if the language confidence is below this threshold.\nDefaults to 0.\n": "自動検出された言語の自信閾値。\n言語の自信がしきい値を下回った場合、エラーが返されます。\nデフォルトは0です。\n",
"The speech model to use for the transcription. When `null`, the \"best\" model is used.": "転写に使用する音声モデル。`null`の場合、\"ベスト\"モデルが使用されます。",
"Enable Automatic Punctuation, can be true or false": "自動句読点を有効にします。trueまたはfalseにすることができます",
"Enable Text Formatting, can be true or false": "テキスト書式を有効にします。trueまたはfalseにすることができます。",
"Transcribe Filler Words, like \"umm\", in your media file; can be true or false": "メディアファイルに「umm」のようなフィラーを転記します。trueまたはfalseにすることができます。",
"Enable [Dual Channel](https://www.assemblyai.com/docs/models/speech-recognition#dual-channel-transcription) transcription, can be true or false.": "[Dual Channel](https://www.assemblyai.com/docs/models/speech-recognition#dual-channel-transcription) を有効にすると、true または false にすることができます。",
"The URL to which we send webhook requests.\nWe sends two different types of webhook requests.\nOne request when a transcript is completed or failed, and one request when the redacted audio is ready if redact_pii_audio is enabled.\n": "The URL to which we send webhook requests.\nWe sends two different types of webhook requests.\nOne request when a transcript is completed or failed, and one request when the redacted audio is ready if redact_pii_audio is enabled.\n",
"The header name to be sent with the transcript completed or failed webhook requests": "Webhookリクエストが完了または失敗した場合に送信されるヘッダー名",
"The header value to send back with the transcript completed or failed webhook requests for added security": "セキュリティ追加のWebフックリクエストが完了または失敗した場合に送信するヘッダーの値",
"Enable Key Phrases, either true or false": "キーフレーズを有効にする",
"The point in time, in milliseconds, to begin transcribing in your media file": "メディアファイルの転記を開始する時間 (ミリ秒単位)",
"The point in time, in milliseconds, to stop transcribing in your media file": "メディアファイルの転記を停止する時間 (ミリ秒単位)",
"The list of custom vocabulary to boost transcription probability for": "転写確率を向上させるためのカスタム語彙リスト",
"How much to boost specified words": "指定した単語をブーストする金額",
"Filter profanity from the transcribed text, can be true or false": "書き換えられたテキストからの俗語をフィルタリングします。trueまたはfalseにすることができます。",
"Redact PII from the transcribed text using the Redact PII model, can be true or false": "Redact PIIモデルを使用して書き換えられたテキストからのRedact PIIはtrueまたはfalseにすることができます。",
"Generate a copy of the original media file with spoken PII \"beeped\" out, can be true or false. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.": "\"beeped\"と話されたPIIを持つ元のメディアファイルのコピーを生成し、trueまたはfalseにすることができます。 詳細は [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) を参照してください。",
"Controls the filetype of the audio created by redact_pii_audio. Currently supports mp3 (default) and wav. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.": "redact_pii_audioによって作成されたオーディオのファイルタイプを制御します。現在 mp3 (デフォルト) と wave をサポートしています。詳細は [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) を参照してください。",
"The list of PII Redaction policies to enable. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.": "有効にする PII Redaction ポリシーの一覧です。詳細は [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) を参照してください。",
"The replacement logic for detected PII, can be \"entity_type\" or \"hash\". See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.": "検出された PII の置換ロジックは \"entity_type\" か \"hash\" になります。詳細は [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) を参照してください。",
"Enable [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization), can be true or false": "[Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization)を有効にします。trueまたはfalseにすることができます",
"Tells the speaker label model how many speakers it should attempt to identify, up to 10. See [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization) for more details.": "Tells the speaker label model how many speakers it should attempt to identify, up to 10. See [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization) for more details.",
"Enable [Content Moderation](https://www.assemblyai.com/docs/models/content-moderation), can be true or false": "[Content Moderation](https://www.assemblyai.com/docs/models/content-moderation)を有効にします。trueまたはfalseにすることができます",
"The confidence threshold for the Content Moderation model. Values must be between 25 and 100.": "コンテンツモデレーションモデルの信頼度しきい値。値は25から100の間でなければなりません。",
"Enable [Topic Detection](https://www.assemblyai.com/docs/models/topic-detection), can be true or false": "[トピック検出](https://www.assemblyai.com/docs/models/topic-detection)を有効にします。trueまたはfalseにできます",
"Customize how words are spelled and formatted using to and from values.\nUse a JSON array of objects of the following format:\n```\n[\n {\n \"from\": [\"original\", \"spelling\"],\n \"to\": \"corrected\"\n }\n]\n```\n": "Customize how words are spelled and formatted using to and from values.\nUse a JSON array of objects of the following format:\n```\n[\n {\n \"from\": [\"original\", \"spelling\"],\n \"to\": \"corrected\"\n }\n]\n```\n",
"Enable [Sentiment Analysis](https://www.assemblyai.com/docs/models/sentiment-analysis), can be true or false": "[センチメンタム分析]https://www.assemblyai.com/docs/models/sentiment-analysisを有効にします。trueまたはfalseにすることができます",
"Enable [Auto Chapters](https://www.assemblyai.com/docs/models/auto-chapters), can be true or false": "[Auto Chapters](https://www.assemblyai.com/docs/models/auto-chapters) を有効にします。true または false にすることができます。",
"Enable [Entity Detection](https://www.assemblyai.com/docs/models/entity-detection), can be true or false": "[Entity Detection](https://www.assemblyai.com/docs/models/entity-detection)を有効にします。trueまたはfalseにすることができます",
"Reject audio files that contain less than this fraction of speech.\nValid values are in the range [0, 1] inclusive.\n": "スピーチの分数未満のオーディオファイルを拒否します。\n有効な値は範囲[0, 1]を含みます。\n",
"Enable [Summarization](https://www.assemblyai.com/docs/models/summarization), can be true or false": "[Summarization]を有効にする (https://www.assemblyai.com/docs/models/summarization), true または false にすることができます",
"The model to summarize the transcript": "写本をまとめたモデル",
"The type of summary": "概要の種類",
"Enable custom topics, either true or false": "カスタムトピックを有効にする",
"The list of custom topics": "カスタムトピックのリスト",
"Wait until the transcript status is \"completed\" or \"error\" before moving on to the next step.": "次のステップに進む前に、トランスクリプトの状態が「完了」または「エラー」になるまで待ちます。",
"If the transcript status is \"error\", throw an error.": "トランスクリプトの状態が \"error\" の場合は、エラーをスローします。",
"The maximum number of characters per caption": "キャプションあたりの最大文字数",
"The desired file name for storing in ActivePieces. Make sure the file extension is correct.": "ActivePiecesに保存するためのファイル名です。ファイル拡張子が正しいことを確認してください。",
"Keywords to search for": "検索するキーワード",
"Maximum amount of transcripts to retrieve": "取得するトランスクリプトの最大量",
"Filter by transcript status": "トランスクリプトの状態でフィルター",
"Only get transcripts created on this date": "この日付に作成されたトランスクリプトのみ取得",
"Get transcripts that were created before this transcript ID": "このトランスクリプトIDの前に作成されたトランスクリプトを取得",
"Get transcripts that were created after this transcript ID": "このトランスクリプトIDの後に作成されたトランスクリプトを取得",
"Only get throttled transcripts, overrides the status filter": "ステータスフィルタをオーバーライドし、スロットル化されたトランスクリプトのみ取得",
"Your text to prompt the model to produce a desired output, including any context you want to pass into the model.": "モデルに渡したいコンテキストを含め、望ましい出力を生成するように求めるテキストを入力します。",
"A list of completed transcripts with text. Up to a maximum of 100 files or 100 hours, whichever is lower.\nUse either transcript_ids or input_text as input into LeMUR.\n": "テキスト付きの完了したトランスクリプトのリスト。最大100ファイルまたは100時間のいずれか低い方まで。\nLeMURの入力としてtranscript_idsまたはinput_textのいずれかを使用します。\n",
"Custom formatted transcript data. Maximum size is the context limit of the selected model, which defaults to 100000.\nUse either transcript_ids or input_text as input into LeMUR.\n": "カスタムフォーマットされたトランスクリプトデータ。最大サイズは選択したモデルのコンテキスト制限です。デフォルトは100000です。\nLeMURの入力としてtranscript_idsまたはinput_textのいずれかを使用します。\n",
"Context to provide the model. This can be a string or a free-form JSON value.": "モデルを提供するコンテキスト。これは文字列または自由形式の JSON 値にすることができます。",
"The model that is used for the final prompt after compression is performed.\n": "圧縮後の最終プロンプトに使用されるモデル。\n",
"Max output size in tokens, up to 4000": "トークンの最大出力サイズ最大4000",
"The temperature to use for the model.\nHigher values result in answers that are more creative, lower values are more conservative.\nCan be any value between 0.0 and 1.0 inclusive.\n": "The temperature to use for the model.\nHigher values result in answers that are more creative, lower values are more conservative.\nCan be any value between 0.0 and 1.0 inclusive.\n",
"The ID of the LeMUR request whose data you want to delete. This would be found in the response of the original request.": "データを削除したいLeMURリクエストのID。元のリクエストの応答で確認できます。",
"Authorization headers are injected automatically from your connection.": "認証ヘッダは接続から自動的に注入されます。",
"Enable for files like PDFs, images, etc..": "PDF、画像などのファイルを有効にします。",
"English (Global)": "英語(世界)",
"English (Australian)": "英語 (オーストラリア語)",
"English (British)": "英語 (イギリス)",
"English (US)": "英語 (米国)",
"Spanish": "スペイン語",
"French": "フランス語",
"German": "ドイツ語",
"Italian": "イタリア語",
"Portuguese": "ポルトガル語",
"Dutch": "オランダ語",
"Afrikaans": "アフリカーンス語",
"Albanian": "アルバニア語",
"Amharic": "Amharic",
"Arabic": "アラビア文字",
"Armenian": "アルメニア語",
"Assamese": "アッサム語",
"Azerbaijani": "アゼルバイジャン語",
"Bashkir": "Bashkir",
"Basque": "バスク語",
"Belarusian": "ベラルーシ語",
"Bengali": "ベンガル語",
"Bosnian": "ボスニア語",
"Breton": "Breton",
"Bulgarian": "ブルガリア語",
"Burmese": "Burmese",
"Catalan": "カタロニア語",
"Chinese": "中国語",
"Croatian": "クロアチア語",
"Czech": "チェコ語",
"Danish": "デンマーク語",
"Estonian": "エストニア語",
"Faroese": "Faroese",
"Finnish": "フィンランド語",
"Galician": "ガリシア語",
"Georgian": "グルジア語",
"Greek": "ギリシア語",
"Gujarati": "Gujarati",
"Haitian": "Haitian",
"Hausa": "Hausa",
"Hawaiian": "ハワイ語",
"Hebrew": "ヘブライ文字",
"Hindi": "ヒンディー語",
"Hungarian": "ハンガリー語",
"Icelandic": "Icelandic",
"Indonesian": "インドネシア語",
"Japanese": "日本語",
"Javanese": "Javanese",
"Kannada": "Kannada",
"Kazakh": "Kazakh",
"Khmer": "Khmer",
"Korean": "Korean",
"Lao": "Lao",
"Latin": "Latin",
"Latvian": "ラトビア語",
"Lingala": "Lingala",
"Lithuanian": "リトアニア語",
"Luxembourgish": "ルクセンブルク語",
"Macedonian": "マケドニア語",
"Malagasy": "Malagasy",
"Malay": "マレー語",
"Malayalam": "マラヤラム語",
"Maltese": "Maltese",
"Maori": "Maori",
"Marathi": "Marathi",
"Mongolian": "モンゴル語",
"Nepali": "Nepali",
"Norwegian": "ノルウェー語",
"Norwegian Nynorsk": "Norwegian Nynorsk",
"Occitan": "Occitan",
"Panjabi": "Panjabi",
"Pashto": "Pashto",
"Persian": "ペルシャ語",
"Polish": "ポーランド語",
"Romanian": "ルーマニア語",
"Russian": "ロシア語",
"Sanskrit": "Sanskrit",
"Serbian": "セルビア語",
"Shona": "庄名(しょうな)",
"Sindhi": "Sindhi",
"Sinhala": "シンハラ語",
"Slovak": "スロバキア語",
"Slovenian": "Slovenian",
"Somali": "Somali",
"Sundanese": "Sundanese",
"Swahili": "スワヒリ語",
"Swedish": "スウェーデン語",
"Tagalog": "Tagalog",
"Tajik": "タジク語",
"Tamil": "Tamil",
"Tatar": "Tatar",
"Telugu": "Telugu",
"Thai": "タイ語",
"Tibetan": "チベット語",
"Turkish": "トルコ語",
"Turkmen": "Turkmen",
"Ukrainian": "ウクライナ語",
"Urdu": "ウルドゥー語",
"Uzbek": "Uzbek",
"Vietnamese": "Vietnamese",
"Welsh": "ウェールズ語",
"Yiddish": "イディッシュ",
"Yoruba": "Yoruba",
"Best": "ベスト",
"Nano": "Nano",
"Low": "低い",
"Default": "デフォルト",
"High": "高い",
"MP3": "MP3",
"WAV": "WAV",
"Account Number": "口座番号",
"Banking Information": "銀行口座情報",
"Blood Type": "血液タイプ",
"Credit Card CVV": "クレジットカードCVV",
"Credit Card Expiration": "クレジットカードの有効期限",
"Credit Card Number": "クレジットカード番号",
"Date": "日付",
"Date Interval": "日付間隔",
"Date of Birth": "誕生日",
"Driver's License": "ドライバーライセンス",
"Drug": "薬物format@@0",
"Duration": "期間",
"Email Address": "メールアドレス",
"Event": "イベント",
"Filename": "ファイル名",
"Gender Sexuality": "性別性",
"Healthcare Number": "医療番号",
"Injury": "傷害者",
"IP Address": "IP アドレス",
"Language": "言語",
"Location": "場所",
"Marital Status": "婚姻状況",
"Medical Condition": "<unk>",
"Medical Process": "医療プロセス",
"Money Amount": "金額",
"Nationality": "国籍:",
"Number Sequence": "シーケンス番号",
"Occupation": "Ocupação",
"Organization": "組織",
"Passport Number": "パスポート番号",
"Password": "Password",
"Person Age": "年齢:",
"Person Name": "担当者名",
"Phone Number": "電話番号",
"Physical Attribute": "物理属性",
"Political Affiliation": "政治的関連",
"Religion": "宗教",
"Statistics": "統計情報",
"Time": "時刻",
"URL": "URL",
"US Social Security Number": "米国の社会保障番号",
"Username": "ユーザー名",
"Vehicle ID": "車両ID",
"Zodiac Sign": "星座記号",
"Entity Name": "エンティティ名",
"Hash": "ハッシュ",
"Informative": "参考情報",
"Conversational": "会話",
"Catchy": "キャッチーな",
"Bullets": "箇条書き:",
"Bullets Verbose": "箇条書きの詳細設定",
"Gist": "Gist",
"Headline": "見出し",
"Paragraph": "段落",
"SRT": "SRT",
"VTT": "VTT",
"Queued": "キューに入りました",
"Processing": "処理中",
"Completed": "完了",
"Error": "Error",
"Claude 3.5 Sonnet (on Anthropic)": "Claude 3.5 Sonnet (Anthropic)",
"Claude 3 Opus (on Anthropic)": "クロード3 Opus (アンスロピック上)",
"Claude 3 Haiku (on Anthropic)": "クロード3俳句アンスロピック",
"Claude 3 Sonnet (on Anthropic)": "クロード3ソネットアンスロピック",
"Claude 2.1 (on Anthropic)": "クロード2.1(アンスロピック上)",
"Claude 2 (on Anthropic)": "クロード2アンスロピック上",
"Claude Instant 1.2 (on Anthropic)": "Claude Instant 1.2 (Anthropic)",
"Basic": "基本",
"Mistral 7B (Hosted by AssemblyAI)": "ミストラル7BAssemblyAI主催",
"GET": "取得",
"POST": "POST",
"PATCH": "PATCH",
"PUT": "PUT",
"DELETE": "削除",
"HEAD": "頭"
}

View File

@@ -0,0 +1,344 @@
{
"Transcribe and extract data from audio using AssemblyAI's Speech AI.": "Gegevens uit audio schrijven en extraheren met behulp van de AssemblyAI's spraak AI.",
"You can retrieve your AssemblyAI API key within your AssemblyAI [Account Settings](https://www.assemblyai.com/app/account?utm_source=activepieces).": "Je kan je AssemblyAI API-sleutel ophalen in je Buurderij [Accountinstellingen](https://www.assemblyai.com/app/account?utm_source=activepieces).",
"Upload File": "Bestand uploaden",
"Transcribe": "Transcribe",
"Get Transcript": "Get Transcript",
"Get Transcript Sentences": "Transcript strings ophalen",
"Get Transcript Paragraphs": "Transcriptonderdelen ophalen",
"Get Transcript Subtitles": "Transcriptondertitels ophalen",
"Get Transcript Redacted Audio": "Transcript audio Redacted krijgen",
"Search words in transcript": "Zoek woorden in transcript",
"List transcripts": "Lijst transcripten",
"Delete transcript": "Verwijder transcript",
"Run a Task using LeMUR": "Voer een taak uit met LeMUR",
"Retrieve LeMUR response": "Haal LeMUR reactie op",
"Purge LeMUR request data": "LeMUR aanvraaggegevens verwijderen",
"Custom API Call": "Custom API Call",
"Upload a media file to AssemblyAI's servers.": "Upload een mediabestand naar de servers van AssemblyAI.",
"Transcribe an audio or video file using AssemblyAI.": "Een audio- of videobestand vertalen via AssemblyAI.",
"Retrieves a transcript by its ID.": "Haalt een transcript op via zijn ID.",
"Retrieve the sentences of the transcript by its ID.": "Haal de zinnen van het transcript op met zijn ID.",
"Retrieve the paragraphs of the transcript by its ID.": "Haal de alinea's van het transcript op via zijn ID.",
"Export the transcript as SRT or VTT subtitles.": "Exporteer het afschrift als SRT of VTT ondertitels.",
"Get the result of the redacted audio model.": "Krijg het resultaat van het roodgedefineerde audiomodel.",
"Search through the transcript for keywords. You can search for individual words, numbers, or phrases containing up to five words or numbers.": "Zoek in de transcript naar trefwoorden. Je kunt zoeken naar individuele woorden, cijfers of zinnen met maximaal vijf woorden of cijfers.",
"Retrieve a list of transcripts you created.\nTranscripts are sorted from newest to oldest. The previous URL always points to a page with older transcripts.": "Haal een lijst op met transcripten die u hebt gemaakt.\nTransscripts worden gesorteerd van nieuwste naar oudste. De vorige URL verwijst altijd naar een pagina met oudere transcripts.",
"Remove the data from the transcript and mark it as deleted.": "Verwijder de gegevens uit de transcript en markeer als verwijderd.",
"Use the LeMUR task endpoint to input your own LLM prompt.": "Gebruik het LeMUR taakeindpunt om uw eigen LLM prompte in te voeren.",
"Retrieve a LeMUR response that was previously generated.": "Krijg een LeMUR reactie die eerder werd gegenereerd.",
"Delete the data for a previously submitted LeMUR request.\nThe LLM response data, as well as any context provided in the original request will be removed.": "Verwijder de gegevens van een eerder ingediend LeMUR verzoek.\nDe LLM reactiegegevens en elke context die in het oorspronkelijke verzoek wordt gegeven, worden verwijderd.",
"Make a custom API call to a specific endpoint": "Maak een aangepaste API call naar een specifiek eindpunt",
"Audio File": "Audio bestand",
"Audio URL": "Audio URL",
"Language Code": "Taal Code",
"Language Detection": "Taal detectie",
"Language Confidence Threshold": "Taal vertrouwen drempel",
"Speech Model": "Spraak model",
"Punctuate": "Punctuate",
"Format Text": "Formateer tekst",
"Disfluencies": "Disfluencies",
"Dual Channel": "Dubbel Kanaal",
"Webhook URL": "Webhook URL",
"Webhook Auth Header Name": "Webhook Auth Header naam",
"Webhook Auth Header Value": "Webhook Auth Header waarde",
"Key Phrases": "Sleutelzinnen",
"Audio Start From": "Audio start vanaf",
"Audio End At": "Audio eindigt op",
"Word Boost": "Woord Boost",
"Word Boost Level": "Woord Boost Niveau",
"Filter Profanity": "Filter grootheid",
"Redact PII": "Redact PII",
"Redact PII Audio": "Redact PII Audio",
"Redact PII Audio Quality": "Redacte PII audiokwaliteit",
"Redact PII Policies": "Redact PII Policies",
"Redact PII Substitution": "Redact PII Plaatsvervanger",
"Speaker Labels": "Luidspreker labels",
"Speakers Expected": "Sprekers verwacht",
"Content Moderation": "Content Moderatie",
"Content Moderation Confidence": "Content Moderatie Vertrouwen",
"Topic Detection": "Onderwerp detectie",
"Custom Spellings": "Aangepaste spellingen",
"Sentiment Analysis": "Sentiment Analyse",
"Auto Chapters": "Auto Hoofdstukken",
"Entity Detection": "Entiteit Detectie",
"Speech Threshold": "Spraak drempel",
"Enable Summarization": "Samenvatting inschakelen",
"Summary Model": "Samenvatting Model",
"Summary Type": "Samenvatting Type",
"Enable Custom Topics": "Aangepaste onderwerpen inschakelen",
"Custom Topics": "Aangepaste onderwerpen",
"Wait until transcript is ready": "Wacht tot transcript klaar is",
"Throw if transcript status is error": "Gooi als transcript status is fout",
"Transcript ID": "Transcriptie ID",
"Subtitles Format": "Ondertiteling formaat",
"Number of Characters per Caption": "Aantal tekens per bijschrift",
"Download file?": "Bestand downloaden?",
"Download File Name": "Bestandsnaam voor downloaden",
"Words": "Woorden",
"Limit": "Limiet",
"Status": "status",
"Created On": "Aangemaakt op",
"Before ID": "Voor het ID",
"After ID": "Na het ID",
"Throttled Only": "Alleen Thgeroofd",
"Prompt": "Prompt",
"Transcript IDs": "Transcript-ID's",
"Input Text": "Input Text",
"Context": "Context",
"Final Model": "Laatste model",
"Maximum Output Size": "Maximale uitvoergrootte",
"Temperature": "Temperatuur",
"LeMUR request ID": "LeMUR verzoek ID",
"Method": "Methode",
"Headers": "Kopteksten",
"Query Parameters": "Query parameters",
"Body": "Lichaam",
"Response is Binary ?": "Antwoord is binair?",
"No Error on Failure": "Geen fout bij fout",
"Timeout (in seconds)": "Time-out (in seconden)",
"The File or URL of the audio or video file.": "Het bestand of URL van het audio of video bestand.",
"The URL of the audio or video file to transcribe.": "De URL van het audio of video bestand om te overschrijven.",
"The language of your audio file. Possible values are found in [Supported Languages](https://www.assemblyai.com/docs/concepts/supported-languages).\nThe default value is 'en_us'.\n": "De taal van uw audiobestand. Mogelijke waarden vindt u in [ondersteunde talen](https://www.assemblyai.com/docs/concepts/supported-languages).\nDe standaardwaarde is 'en_us'.\n",
"Enable [Automatic language detection](https://www.assemblyai.com/docs/models/speech-recognition#automatic-language-detection), either true or false.": "Inschakelen van [Automatische taaldetectie](https://www.assemblyai.com/docs/models/speech-recognition#automatic-language-detection), waar of onwaar.",
"The confidence threshold for the automatically detected language.\nAn error will be returned if the language confidence is below this threshold.\nDefaults to 0.\n": "De vertrouwensdrempel voor de automatisch gedetecteerde taal.\nEr wordt een fout geretourneerd als de taalcontrole onder deze drempel is.\nStandaard ingesteld op 0.\n",
"The speech model to use for the transcription. When `null`, the \"best\" model is used.": "Het te gebruiken spraakmodel voor de transcriptie. Wanneer `null`, het \"beste\" model wordt gebruikt.",
"Enable Automatic Punctuation, can be true or false": "Automatische leestekens inschakelen, kan waar of onwaar zijn",
"Enable Text Formatting, can be true or false": "Tekstopmaak inschakelen, kan waar of onwaar zijn",
"Transcribe Filler Words, like \"umm\", in your media file; can be true or false": "Transcribe Filler Words, zoals \"umm\", in je mediabestand; kan waar of onwaar zijn",
"Enable [Dual Channel](https://www.assemblyai.com/docs/models/speech-recognition#dual-channel-transcription) transcription, can be true or false.": "Inschakelen [Dual Channel](https://www.assemblyai.com/docs/models/speech-recognition#dual-channel-transcription) transcriptie, kan waar of niet waar zijn.",
"The URL to which we send webhook requests.\nWe sends two different types of webhook requests.\nOne request when a transcript is completed or failed, and one request when the redacted audio is ready if redact_pii_audio is enabled.\n": "De URL waarnaar we webhook verzoeken sturen.\nWe sturen twee verschillende soorten webhook verzoeken.\nEen verzoek wanneer een transcript is voltooid of mislukt, en één verzoek wanneer redact_pii_audio klaar is als redact_audio is ingeschakeld.\n",
"The header name to be sent with the transcript completed or failed webhook requests": "De naam van de header die wordt verzonden met de ingevulde of mislukte webhook verzoeken",
"The header value to send back with the transcript completed or failed webhook requests for added security": "De header waarde om terug te verzenden met de ingevulde of mislukte webhook verzoeken voor extra beveiliging",
"Enable Key Phrases, either true or false": "Inschakelen van sleutelzinnen, waar of niet waar",
"The point in time, in milliseconds, to begin transcribing in your media file": "Het tijdstip in milliseconden om te beginnen met omzetten in uw mediabestand",
"The point in time, in milliseconds, to stop transcribing in your media file": "Het tijdstip in milliseconden om te stoppen met het omzetten in uw mediabestand",
"The list of custom vocabulary to boost transcription probability for": "De lijst met aangepaste woordenschat om de kans op transcriptie te verhogen voor",
"How much to boost specified words": "Hoeveel je de opgegeven woorden wilt boosten",
"Filter profanity from the transcribed text, can be true or false": "Filter profaniteit op de getranscrimeerde tekst, kan waar of onwaar zijn",
"Redact PII from the transcribed text using the Redact PII model, can be true or false": "Redact PII uit de getranscrimeerde tekst met behulp van het Redact PII-model, kan waar of onwaar zijn",
"Generate a copy of the original media file with spoken PII \"beeped\" out, can be true or false. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.": "Genereer een kopie van het originele mediabestand met gesproken PII \"beeped\" uit, kan waar of onwaar zijn. Zie [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) voor meer details.",
"Controls the filetype of the audio created by redact_pii_audio. Currently supports mp3 (default) and wav. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.": "Bepaalt het bestandstype van het audio gemaakt door redact_pii_audio. Momenteel ondersteunt mp3 (standaard) en wav. Zie [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) voor meer details.",
"The list of PII Redaction policies to enable. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.": "De lijst van PII Redactie beleid om in te schakelen. Zie [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) voor meer details.",
"The replacement logic for detected PII, can be \"entity_type\" or \"hash\". See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.": "De vervanging van PII kan \"entity_type\" of \"hash\" zijn. Zie [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) voor meer informatie.",
"Enable [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization), can be true or false": "Inschakelen [Spreker diarizatie](https://www.assemblyai.com/docs/models/speaker-diarization), kan waar of onwaar zijn",
"Tells the speaker label model how many speakers it should attempt to identify, up to 10. See [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization) for more details.": "Vertelt het luidsprekerlabel hoeveel sprekers het moet proberen te identificeren, tot 10. Zie [Spreker diarizatie] (https://www.assemblyai.com/docs/models/speaker-diarization) voor meer informatie.",
"Enable [Content Moderation](https://www.assemblyai.com/docs/models/content-moderation), can be true or false": "Inschakelen van [Content Moderation](https://www.assemblyai.com/docs/models/content-moderation), kan waar of onwaar zijn",
"The confidence threshold for the Content Moderation model. Values must be between 25 and 100.": "De vertrouwensdrempel voor het Content Moderation model. Waarden moeten tussen 25 en 100 liggen.",
"Enable [Topic Detection](https://www.assemblyai.com/docs/models/topic-detection), can be true or false": "Inschakelen van [Rubriek detectie](https://www.assemblyai.com/docs/models/topic-detection), kan waar of onwaar zijn",
"Customize how words are spelled and formatted using to and from values.\nUse a JSON array of objects of the following format:\n```\n[\n {\n \"from\": [\"original\", \"spelling\"],\n \"to\": \"corrected\"\n }\n]\n```\n": "Pas aan hoe woorden worden gespeld en opgemaakt met behulp van en van waarden.\nGebruik een JSON array van objecten van het volgende formaat:\n```\n[\n {\n \"van: [\"origineel\", \"spelling\"],\n \"to\": \"gecorrigeerd\"\n }\n]\n```\n",
"Enable [Sentiment Analysis](https://www.assemblyai.com/docs/models/sentiment-analysis), can be true or false": "Inschakelen van [Sentiment Analysis](https://www.assemblyai.com/docs/models/sentiment-analysis), kan waar of onwaar zijn",
"Enable [Auto Chapters](https://www.assemblyai.com/docs/models/auto-chapters), can be true or false": "Inschakelen van [Auto Chapters](https://www.assemblyai.com/docs/models/auto-chapters), kan waar of onwaar zijn",
"Enable [Entity Detection](https://www.assemblyai.com/docs/models/entity-detection), can be true or false": "Inschakelen van [Entiteit Detection](https://www.assemblyai.com/docs/models/entity-detection), kan waar of onwaar zijn",
"Reject audio files that contain less than this fraction of speech.\nValid values are in the range [0, 1] inclusive.\n": "Audiobestanden afwijzen die minder dan deze fractie van spraak bevatten.\nGeldige waarden liggen binnen het bereik [0, 1] inclusief.\n",
"Enable [Summarization](https://www.assemblyai.com/docs/models/summarization), can be true or false": "[Summarization](https://www.assemblyai.com/docs/models/summarization) inschakelen, kan waar of onwaar zijn",
"The model to summarize the transcript": "Het model om de transcript samen te vatten",
"The type of summary": "Het type samenvatting",
"Enable custom topics, either true or false": "Aangepaste topics inschakelen waar of onwaar",
"The list of custom topics": "De lijst met aangepaste onderwerpen",
"Wait until the transcript status is \"completed\" or \"error\" before moving on to the next step.": "Wacht tot de transcript status \"voltooid\" of \"fout\" is voordat u verder gaat met de volgende stap.",
"If the transcript status is \"error\", throw an error.": "Als de transcript status \"fout\" is, gooi dan een fout.",
"The maximum number of characters per caption": "Het maximum aantal tekens per onderschrift",
"The desired file name for storing in ActivePieces. Make sure the file extension is correct.": "De gewenste bestandsnaam voor het opslaan van ActivePieces. Zorg ervoor dat de bestandsextensie juist is.",
"Keywords to search for": "Trefwoorden om naar te zoeken",
"Maximum amount of transcripts to retrieve": "Maximale hoeveelheid van op te halen transcripten",
"Filter by transcript status": "Filter op transcript status",
"Only get transcripts created on this date": "Alleen transcripten ophalen die op deze datum zijn gemaakt",
"Get transcripts that were created before this transcript ID": "Verkrijg transcripten die zijn aangemaakt voor deze transcript-ID",
"Get transcripts that were created after this transcript ID": "Verkrijg transcripten die zijn aangemaakt na deze transcript-ID",
"Only get throttled transcripts, overrides the status filter": "Alleen gebonden transcripten krijgen, overschrijft het status filter",
"Your text to prompt the model to produce a desired output, including any context you want to pass into the model.": "Jouw tekst om het model aan te sporen een gewenste uitvoer te produceren, inclusief de context die je wilt doorgeven aan het model.",
"A list of completed transcripts with text. Up to a maximum of 100 files or 100 hours, whichever is lower.\nUse either transcript_ids or input_text as input into LeMUR.\n": "Een lijst van voltooide transcripten met tekst. Maximaal 100 bestanden of 100 uur, wat ook lager is.\nGebruik transcript_ids of input_text als invoer in LeMUR.\n",
"Custom formatted transcript data. Maximum size is the context limit of the selected model, which defaults to 100000.\nUse either transcript_ids or input_text as input into LeMUR.\n": "Aangepaste geformatteerde transcript gegevens. Maximale grootte is de context limiet van het geselecteerde model, standaard op 100000.\nGebruik transcript_ids of input_text als invoer in LeMUR.\n",
"Context to provide the model. This can be a string or a free-form JSON value.": "Context om het model op te geven. Dit kan een tekenreeks of een vrije JSON-waarde zijn.",
"The model that is used for the final prompt after compression is performed.\n": "Het model dat wordt gebruikt voor de laatste prompt na compressie is uitgevoerd.\n",
"Max output size in tokens, up to 4000": "Maximale uitvoergrootte in tokens, tot 4000",
"The temperature to use for the model.\nHigher values result in answers that are more creative, lower values are more conservative.\nCan be any value between 0.0 and 1.0 inclusive.\n": "De temperatuur die gebruikt moet worden voor het model.\nHogere waarden resulteren in antwoorden die creatiever en lagere waarden conservatiever zijn.\nKan elke waarde tussen 0,0 en 1.0 inclusief zijn.\n",
"The ID of the LeMUR request whose data you want to delete. This would be found in the response of the original request.": "Het ID van het LeMUR verzoek wiens gegevens je wilt verwijderen. Dit wordt gevonden in het antwoord van het oorspronkelijke verzoek.",
"Authorization headers are injected automatically from your connection.": "Autorisatie headers worden automatisch geïnjecteerd vanuit uw verbinding.",
"Enable for files like PDFs, images, etc..": "Inschakelen voor bestanden zoals PDF's, afbeeldingen etc..",
"English (Global)": "Engels (globaal)",
"English (Australian)": "Engels (Australian)",
"English (British)": "Engels (Engels)",
"English (US)": "Engels (VS)",
"Spanish": "Spaans",
"French": "Frans",
"German": "Duits",
"Italian": "Italiaans",
"Portuguese": "Portugees",
"Dutch": "Nederlands",
"Afrikaans": "Afrikaanse",
"Albanian": "Albanees",
"Amharic": "Amharic",
"Arabic": "Arabisch",
"Armenian": "Armeens",
"Assamese": "Assamees",
"Azerbaijani": "Azerbeidzjaans",
"Bashkir": "Bashkir",
"Basque": "Baskisch",
"Belarusian": "Wit-Russisch",
"Bengali": "Bengaalse",
"Bosnian": "Bosnisch",
"Breton": "Breton",
"Bulgarian": "Bulgaars",
"Burmese": "Burmese",
"Catalan": "Catalaans",
"Chinese": "Chinees",
"Croatian": "Kroatisch",
"Czech": "Tsjechisch",
"Danish": "Deens",
"Estonian": "Estlands",
"Faroese": "Faroese",
"Finnish": "Fins",
"Galician": "Galicisch",
"Georgian": "Georgisch",
"Greek": "Grieks",
"Gujarati": "Gujarati",
"Haitian": "Haitian",
"Hausa": "Hausa",
"Hawaiian": "Hawaïaans",
"Hebrew": "Hebreeuws",
"Hindi": "Hindoestani",
"Hungarian": "Hongaars",
"Icelandic": "Icelandic",
"Indonesian": "Indonesisch",
"Japanese": "Afrikaans",
"Javanese": "Javanese",
"Kannada": "Kannada",
"Kazakh": "Kazakh",
"Khmer": "Khmer",
"Korean": "Koreaans",
"Lao": "Lao",
"Latin": "Latijns",
"Latvian": "Lets",
"Lingala": "Lingala",
"Lithuanian": "Litouws",
"Luxembourgish": "Luxemburgs",
"Macedonian": "Macedonisch",
"Malagasy": "Malagasy",
"Malay": "Maleisisch",
"Malayalam": "Maleisië",
"Maltese": "Maltese",
"Maori": "Maori",
"Marathi": "Marathi",
"Mongolian": "Mongools",
"Nepali": "Nepali",
"Norwegian": "Noors",
"Norwegian Nynorsk": "Norwegian Nynorsk",
"Occitan": "Occitan",
"Panjabi": "Panjabi",
"Pashto": "Pashto",
"Persian": "Perzisch",
"Polish": "Pools",
"Romanian": "Roemeens",
"Russian": "Russisch",
"Sanskrit": "Sanskrit",
"Serbian": "Servisch",
"Shona": "Telefoon",
"Sindhi": "Sindhi",
"Sinhala": "Singalees",
"Slovak": "Slowaaks",
"Slovenian": "Slovenian",
"Somali": "Somali",
"Sundanese": "Sundanese",
"Swahili": "Moeilijk",
"Swedish": "Zweeds",
"Tagalog": "Tagalog",
"Tajik": "Tadzjieks",
"Tamil": "Tamil",
"Tatar": "Tatar",
"Telugu": "Telugu",
"Thai": "Thaise",
"Tibetan": "Tibetaans",
"Turkish": "Turks",
"Turkmen": "Turkmen",
"Ukrainian": "Oekraïens",
"Urdu": "Urdu",
"Uzbek": "Uzbek",
"Vietnamese": "Vietnamese",
"Welsh": "Welsh",
"Yiddish": "Jiddisch",
"Yoruba": "Yoruba",
"Best": "Beste",
"Nano": "Nano",
"Low": "laag",
"Default": "Standaard",
"High": "hoog",
"MP3": "MP3",
"WAV": "WV",
"Account Number": "Rekening Nummer",
"Banking Information": "Bankinformatie",
"Blood Type": "Bloed type",
"Credit Card CVV": "Creditcard CVV",
"Credit Card Expiration": "Vervaldatum creditcard",
"Credit Card Number": "Creditcard nummer",
"Date": "Datum:",
"Date Interval": "Datum Interval",
"Date of Birth": "Geboortedatum",
"Driver's License": "Licentie chauffeur",
"Drug": "Medicijn",
"Duration": "Tijdsduur",
"Email Address": "Uw e-mailadres",
"Event": "Gebeurtenis",
"Filename": "Bestandsnaam",
"Gender Sexuality": "Geslacht Seksualiteit",
"Healthcare Number": "Zorg nummer",
"Injury": "Verwondingen",
"IP Address": "IP adres",
"Language": "Taal",
"Location": "Locatie",
"Marital Status": "Basisstatus per huwelijk",
"Medical Condition": "Medische Voorwaarde",
"Medical Process": "Medisch proces",
"Money Amount": "Geld Bedrag",
"Nationality": "Nationaliteit",
"Number Sequence": "Nummer reeks",
"Occupation": "Bezetting",
"Organization": "Rekening",
"Passport Number": "Paspoort Nummer",
"Password": "Wachtwoord",
"Person Age": "Persoon Leeftijd",
"Person Name": "Naam persoon",
"Phone Number": "Telefoon nummer",
"Physical Attribute": "Fysieke kenmerk",
"Political Affiliation": "Politieke Partner",
"Religion": "Religie",
"Statistics": "Statistieken",
"Time": "Tijd",
"URL": "URL",
"US Social Security Number": "US Social Security Number",
"Username": "Gebruikersnaam",
"Vehicle ID": "Voertuig ID",
"Zodiac Sign": "Zodiac Bord",
"Entity Name": "Naam van entiteit",
"Hash": "Toegangssleutel",
"Informative": "Informatief",
"Conversational": "Conversationeel",
"Catchy": "Pakker",
"Bullets": "Opsommingstekens",
"Bullets Verbose": "Uitgebreide kogels",
"Gist": "Lijm",
"Headline": "Kop",
"Paragraph": "Paragraaf",
"SRT": "SORT",
"VTT": "VTT",
"Queued": "Wachtrij",
"Processing": "Verwerken",
"Completed": "Voltooid",
"Error": "Foutmelding",
"Claude 3.5 Sonnet (on Anthropic)": "Claude 3.5 Sonnet (op Anthropic)",
"Claude 3 Opus (on Anthropic)": "Claude 3 Opus (over Anthropic)",
"Claude 3 Haiku (on Anthropic)": "Claude 3 Haiku (over Anthropic)",
"Claude 3 Sonnet (on Anthropic)": "Claude 3 Sonnet (over Anthropic)",
"Claude 2.1 (on Anthropic)": "Claude 2.1 (over Anthropic)",
"Claude 2 (on Anthropic)": "Claude 2 (over Anthropic)",
"Claude Instant 1.2 (on Anthropic)": "Claude Instant 1.2 (op Anthropic)",
"Basic": "Eenvoudig",
"Mistral 7B (Hosted by AssemblyAI)": "Mistral 7B (Gehost door AssemblyAI)",
"GET": "KRIJG",
"POST": "POSTE",
"PATCH": "BEKIJK",
"PUT": "PUT",
"DELETE": "VERWIJDEREN",
"HEAD": "HOOFD"
}

View File

@@ -0,0 +1,344 @@
{
"Transcribe and extract data from audio using AssemblyAI's Speech AI.": "Transcreva e extraia dados do áudio usando a IA da fala da AssemblyAI.",
"You can retrieve your AssemblyAI API key within your AssemblyAI [Account Settings](https://www.assemblyai.com/app/account?utm_source=activepieces).": "Você pode recuperar sua chave de API do AssemblyAI dentro do seu AssemblyAI [Configurações da Conta](https://www.assemblyai.com/app/account?utm_source=activepieces).",
"Upload File": "Enviar Arquivo",
"Transcribe": "Transcrever",
"Get Transcript": "Get Transcript",
"Get Transcript Sentences": "Obter frases transcritas",
"Get Transcript Paragraphs": "Obter Parágrafos de Transcrição",
"Get Transcript Subtitles": "Obter legendas de transcrição",
"Get Transcript Redacted Audio": "Obter Transcrição de Áudio Redagido",
"Search words in transcript": "Pesquisar palavras em transcrição",
"List transcripts": "Listar transcrições",
"Delete transcript": "Excluir transcrição",
"Run a Task using LeMUR": "Executar uma tarefa usando o LeMUR",
"Retrieve LeMUR response": "Recuperar resposta ao LeMUR",
"Purge LeMUR request data": "Limpar dados do LeMUR request",
"Custom API Call": "Chamada de API personalizada",
"Upload a media file to AssemblyAI's servers.": "Enviar um arquivo de mídia para os servidores do AssemblyAI.",
"Transcribe an audio or video file using AssemblyAI.": "Transcreva um arquivo de áudio ou vídeo usando o AssemblyAI.",
"Retrieves a transcript by its ID.": "Recupera uma transcrição por seu ID.",
"Retrieve the sentences of the transcript by its ID.": "Recupere as frases da transcrição por seu documento de identidade.",
"Retrieve the paragraphs of the transcript by its ID.": "Recuperar os parágrafos da transcrição por seu ID.",
"Export the transcript as SRT or VTT subtitles.": "Exportar a transcrição como legendas SRT ou VTT.",
"Get the result of the redacted audio model.": "Obter o resultado do modelo de áudio aninhado.",
"Search through the transcript for keywords. You can search for individual words, numbers, or phrases containing up to five words or numbers.": "Pesquisar através da transcrição por palavras-chave. Você pode procurar por palavras, números ou frases contendo até cinco palavras ou números.",
"Retrieve a list of transcripts you created.\nTranscripts are sorted from newest to oldest. The previous URL always points to a page with older transcripts.": "Retrieve a list of transcripts you created.\nTranscripts are sorted from newest to oldest. The previous URL always points to a page with older transcripts.",
"Remove the data from the transcript and mark it as deleted.": "Remova os dados da transcrição e marque-os como excluídos.",
"Use the LeMUR task endpoint to input your own LLM prompt.": "Use o LeMUR task endpoint para inserir seu próprio prompt LLM.",
"Retrieve a LeMUR response that was previously generated.": "Recuperar uma resposta LeMUR que foi gerada anteriormente.",
"Delete the data for a previously submitted LeMUR request.\nThe LLM response data, as well as any context provided in the original request will be removed.": "Excluir os dados para uma solicitação LeMUR enviada anteriormente.\nOs dados de resposta de LLM, bem como qualquer contexto fornecido na solicitação original serão removidos.",
"Make a custom API call to a specific endpoint": "Faça uma chamada de API personalizada para um ponto de extremidade específico",
"Audio File": "Arquivo de Áudio",
"Audio URL": "URL do Áudio",
"Language Code": "Código do Idioma",
"Language Detection": "Detecção de Idioma",
"Language Confidence Threshold": "Limite de confiança da língua",
"Speech Model": "Modelo de fala",
"Punctuate": "Punctuate",
"Format Text": "Formatar Texto",
"Disfluencies": "Disfluências",
"Dual Channel": "Canal duplo",
"Webhook URL": "URL do webhook",
"Webhook Auth Header Name": "Nome de Cabeçalho de Autenticação Webhook",
"Webhook Auth Header Value": "Valor do cabeçalho de autenticação Webhook",
"Key Phrases": "Frases principais",
"Audio Start From": "Início do Áudio De",
"Audio End At": "Fim do áudio em",
"Word Boost": "Boost de Palavra",
"Word Boost Level": "Nível de Boost de Palavra",
"Filter Profanity": "Filtrar Profanidade",
"Redact PII": "Redact PII",
"Redact PII Audio": "Redact PII Audio",
"Redact PII Audio Quality": "Redact qualidade de áudio PII",
"Redact PII Policies": "Redact PII Policies",
"Redact PII Substitution": "Substituição PII de Redacto",
"Speaker Labels": "Rótulos dos alto-falantes",
"Speakers Expected": "Alto-falantes esperados",
"Content Moderation": "Moderação de conteúdo",
"Content Moderation Confidence": "Confiança de moderação de conteúdo",
"Topic Detection": "Detecção de tópico",
"Custom Spellings": "Ortografias personalizadas",
"Sentiment Analysis": "Análise do Sentimento",
"Auto Chapters": "Capítulos Automáticos",
"Entity Detection": "Detecção de Entidade",
"Speech Threshold": "Limite de Fala",
"Enable Summarization": "Habilitar resumo",
"Summary Model": "Modelo De Resumo",
"Summary Type": "Tipo de Resumo",
"Enable Custom Topics": "Ativar tópicos personalizados",
"Custom Topics": "Tópicos Personalizados",
"Wait until transcript is ready": "Aguarde até que a transcrição esteja pronta",
"Throw if transcript status is error": "Jogar se o status de transcrição estiver errado",
"Transcript ID": "ID de transcrição",
"Subtitles Format": "Formato das Legendas",
"Number of Characters per Caption": "Número de caracteres por legenda",
"Download file?": "Baixar arquivo?",
"Download File Name": "Baixar nome do arquivo",
"Words": "Palavras",
"Limit": "Limitar",
"Status": "Estado",
"Created On": "Criado Em",
"Before ID": "Antes do ID",
"After ID": "Depois da identificação",
"Throttled Only": "Limitado Apenas",
"Prompt": "Aviso",
"Transcript IDs": "IDs de transcrição",
"Input Text": "Input Text",
"Context": "Contexto",
"Final Model": "Modelo Final",
"Maximum Output Size": "Tamanho Máximo da Saída",
"Temperature": "Temperatura",
"LeMUR request ID": "ID de solicitação do LeMUR",
"Method": "Método",
"Headers": "Cabeçalhos",
"Query Parameters": "Parâmetros da consulta",
"Body": "Conteúdo",
"Response is Binary ?": "A resposta é binária ?",
"No Error on Failure": "Nenhum erro no Failure",
"Timeout (in seconds)": "Tempo limite (em segundos)",
"The File or URL of the audio or video file.": "O arquivo ou URL do arquivo de áudio ou vídeo.",
"The URL of the audio or video file to transcribe.": "URL do arquivo de áudio ou vídeo a ser transcrito.",
"The language of your audio file. Possible values are found in [Supported Languages](https://www.assemblyai.com/docs/concepts/supported-languages).\nThe default value is 'en_us'.\n": "Idioma do seu arquivo de áudio. Possíveis valores são encontrados em [Idiomas Suportados](https://www.assemblyai.com/docs/concepts/supported-languages).\nO valor padrão é 'en_us'.\n",
"Enable [Automatic language detection](https://www.assemblyai.com/docs/models/speech-recognition#automatic-language-detection), either true or false.": "Habilitar [Detecção de idioma automático](https://www.assemblyai.com/docs/models/speech-recognition#automatic-language-detection), verdadeiro ou falso.",
"The confidence threshold for the automatically detected language.\nAn error will be returned if the language confidence is below this threshold.\nDefaults to 0.\n": "O limite de confiança para o idioma detectado automaticamente.\nUm erro será retornado se a confiança do idioma estiver abaixo desse limite.\nPadrão é 0.\n",
"The speech model to use for the transcription. When `null`, the \"best\" model is used.": "O modelo de fala a ser usado para a transcrição. Quando `null`, o modelo \"best\" é usado.",
"Enable Automatic Punctuation, can be true or false": "Habilitar Pontuação Automática, pode ser verdadeiro ou falso",
"Enable Text Formatting, can be true or false": "Ativar a formatação do texto, pode ser verdadeiro ou falso",
"Transcribe Filler Words, like \"umm\", in your media file; can be true or false": "Transcreva Palavras de Filtro, como \"umm\", no seu arquivo de mídia; pode ser verdadeiro ou falso",
"Enable [Dual Channel](https://www.assemblyai.com/docs/models/speech-recognition#dual-channel-transcription) transcription, can be true or false.": "Habilitar a transcrição [Canal dual](https://www.assemblyai.com/docs/models/speech-recognition#dual-channel-transcription), pode ser verdadeira ou falsa.",
"The URL to which we send webhook requests.\nWe sends two different types of webhook requests.\nOne request when a transcript is completed or failed, and one request when the redacted audio is ready if redact_pii_audio is enabled.\n": "The URL to which we send webhook requests.\nWe sends two different types of webhook requests.\nOne request when a transcript is completed or failed, and one request when the redacted audio is ready if redact_pii_audio is enabled.\n",
"The header name to be sent with the transcript completed or failed webhook requests": "O nome do cabeçalho a ser enviado com a transcrição concluída ou com solicitações de webhook que falharam",
"The header value to send back with the transcript completed or failed webhook requests for added security": "O valor do cabeçalho para enviar de volta com a transcrição concluída ou com solicitações falhadas de webhook para aumentar a segurança",
"Enable Key Phrases, either true or false": "Ativar frases chave, verdadeiro ou falso",
"The point in time, in milliseconds, to begin transcribing in your media file": "O ponto no tempo, em milissegundos, para começar a transcrever o seu arquivo de mídia",
"The point in time, in milliseconds, to stop transcribing in your media file": "O ponto no tempo, em milissegundos, para parar de transcrever o seu arquivo de mídia",
"The list of custom vocabulary to boost transcription probability for": "Lista de vocabulário personalizado para aumentar a probabilidade de transcrição de",
"How much to boost specified words": "Quanto para impulsionar as palavras especificadas",
"Filter profanity from the transcribed text, can be true or false": "Filtrar palavrões do texto transcrito, pode ser verdadeiro ou falso",
"Redact PII from the transcribed text using the Redact PII model, can be true or false": "Redact PII do texto transcrito usando o modelo de Redact PII pode ser verdadeiro ou falso",
"Generate a copy of the original media file with spoken PII \"beeped\" out, can be true or false. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.": "Gerar uma cópia do arquivo de mídia original com PII \"beeed\" falado, pode ser verdadeiro ou falso. Veja [Redação PI](https://www.assemblyai.com/docs/models/pii-redaction) para mais detalhes.",
"Controls the filetype of the audio created by redact_pii_audio. Currently supports mp3 (default) and wav. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.": "Controla o tipo de arquivo do áudio criado pelo redact_pii_audio. Atualmente suporta mp3 (padrão) e onda. Veja [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) para mais detalhes.",
"The list of PII Redaction policies to enable. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.": "Lista das políticas de Redação PII para habilitar. Veja [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) para mais detalhes.",
"The replacement logic for detected PII, can be \"entity_type\" or \"hash\". See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.": "A lógica de substituição do PII detectado, pode ser \"entity_type\" ou \"hash\". Veja [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) para mais detalhes.",
"Enable [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization), can be true or false": "Habilitar [Dialização do alto-falante](https://www.assemblyai.com/docs/models/speaker-diarization), pode ser verdadeira ou falsa",
"Tells the speaker label model how many speakers it should attempt to identify, up to 10. See [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization) for more details.": "Diz ao pregador o modelo de rótulo de quantos pregadores ele deve tentar identificar, até 10. Veja [Dialização do alto-falante](https://www.assemblyai.com/docs/models/speaker-diarization) para mais detalhes.",
"Enable [Content Moderation](https://www.assemblyai.com/docs/models/content-moderation), can be true or false": "Habilitar [Moderação de Conteúdo](https://www.assemblyai.com/docs/models/content-moderation), pode ser verdadeiro ou falso",
"The confidence threshold for the Content Moderation model. Values must be between 25 and 100.": "O limite de confiança para o modelo de Moderação de Conteúdo. Os valores devem estar entre 25 e 100.",
"Enable [Topic Detection](https://www.assemblyai.com/docs/models/topic-detection), can be true or false": "Habilitar [Detecção de Tópico](https://www.assemblyai.com/docs/models/topic-detection), pode ser verdadeiro ou falso",
"Customize how words are spelled and formatted using to and from values.\nUse a JSON array of objects of the following format:\n```\n[\n {\n \"from\": [\"original\", \"spelling\"],\n \"to\": \"corrected\"\n }\n]\n```\n": "Personalize como as palavras são ortografadas e formatadas usando os valores de e para cima.\nUse uma matriz JSON de objetos do seguinte formato:\n```\n[\n {\n \"from\": [\"original\", \"ortografia\"],\n \"para\": \"corrigida\"\n }\n]\n```\n",
"Enable [Sentiment Analysis](https://www.assemblyai.com/docs/models/sentiment-analysis), can be true or false": "Habilitar [Análise de Sentimento](https://www.assemblyai.com/docs/models/sentiment-analysis), pode ser verdadeiro ou falso",
"Enable [Auto Chapters](https://www.assemblyai.com/docs/models/auto-chapters), can be true or false": "Ativar [Capítulos Automáticos](https://www.assemblyai.com/docs/models/auto-chapters), pode ser verdadeiro ou falso",
"Enable [Entity Detection](https://www.assemblyai.com/docs/models/entity-detection), can be true or false": "Ativar [Detecção da Entidade](https://www.assemblyai.com/docs/models/entity-detection), pode ser verdadeiro ou falso",
"Reject audio files that contain less than this fraction of speech.\nValid values are in the range [0, 1] inclusive.\n": "Reject audio files that contain less than this fraction of speech.\nValid values are in the range [0, 1] inclusive.\n",
"Enable [Summarization](https://www.assemblyai.com/docs/models/summarization), can be true or false": "Habilitar [Summarization](https://www.assemblyai.com/docs/models/summarization), pode ser verdadeiro ou falso",
"The model to summarize the transcript": "O modelo para resumir a transcrição",
"The type of summary": "O tipo de resumo",
"Enable custom topics, either true or false": "Habilitar tópicos personalizados, verdadeiro ou falso",
"The list of custom topics": "A lista de tópicos personalizados",
"Wait until the transcript status is \"completed\" or \"error\" before moving on to the next step.": "Aguarde até que o status de transcrição seja \"completado\" ou \"erro\" antes de passar para o próximo passo.",
"If the transcript status is \"error\", throw an error.": "Se o status de transcrição for \"erro\", lança um erro.",
"The maximum number of characters per caption": "O número máximo de caracteres por legenda",
"The desired file name for storing in ActivePieces. Make sure the file extension is correct.": "O nome do arquivo desejado para armazenar no ActivePieces. Certifique-se de que a extensão do arquivo está correta.",
"Keywords to search for": "Palavras-chave para procurar",
"Maximum amount of transcripts to retrieve": "Quantidade máxima de transcrições a recuperar",
"Filter by transcript status": "Filtrar por status de transcrição",
"Only get transcripts created on this date": "Apenas obtenha transcrições criadas nesta data",
"Get transcripts that were created before this transcript ID": "Obter transcrições que foram criadas antes desta identificação de transcrição",
"Get transcripts that were created after this transcript ID": "Obter transcrições que foram criadas após esta identificação de transcrição",
"Only get throttled transcripts, overrides the status filter": "Só obter transcrições limitadas, substituir o filtro de status",
"Your text to prompt the model to produce a desired output, including any context you want to pass into the model.": "Seu texto para prompt o modelo para produzir uma saída desejada, incluindo qualquer contexto que você deseja passar para o modelo.",
"A list of completed transcripts with text. Up to a maximum of 100 files or 100 hours, whichever is lower.\nUse either transcript_ids or input_text as input into LeMUR.\n": "A list of completed transcripts with text. Up to a maximum of 100 files or 100 hours, whichever is lower.\nUse either transcript_ids or input_text as input into LeMUR.\n",
"Custom formatted transcript data. Maximum size is the context limit of the selected model, which defaults to 100000.\nUse either transcript_ids or input_text as input into LeMUR.\n": "Custom formatted transcript data. Maximum size is the context limit of the selected model, which defaults to 100000.\nUse either transcript_ids or input_text as input into LeMUR.\n",
"Context to provide the model. This can be a string or a free-form JSON value.": "Contexto para fornecer o modelo. Isto pode ser uma string ou uma forma JSON de forma livre.",
"The model that is used for the final prompt after compression is performed.\n": "O modelo que é usado para a prompt final após a compressão é executada.\n",
"Max output size in tokens, up to 4000": "Tamanho máximo de saída em tokens, até 4000",
"The temperature to use for the model.\nHigher values result in answers that are more creative, lower values are more conservative.\nCan be any value between 0.0 and 1.0 inclusive.\n": "A temperatura para usar para o modelo.\nValores maiores resultam em respostas que são mais criativas, valores mais baixos são mais conservadores.\nPode ser qualquer valor entre 0.0 e 1.0 inclusive.\n",
"The ID of the LeMUR request whose data you want to delete. This would be found in the response of the original request.": "O ID da solicitação LeMUR cujos dados você deseja excluir. Isto pode ser encontrado na resposta da solicitação original.",
"Authorization headers are injected automatically from your connection.": "Os cabeçalhos de autorização são inseridos automaticamente a partir da sua conexão.",
"Enable for files like PDFs, images, etc..": "Habilitar para arquivos como PDFs, imagens, etc..",
"English (Global)": "Inglês (Global)",
"English (Australian)": "Inglês (australiano)",
"English (British)": "Português (Brasil)",
"English (US)": "Português Brasileiro (pt-BR)",
"Spanish": "espanhol",
"French": "francês",
"German": "alemão",
"Italian": "italiano",
"Portuguese": "Português",
"Dutch": "Neerlandês",
"Afrikaans": "africâner",
"Albanian": "albanês",
"Amharic": "Amharic",
"Arabic": "Arábico",
"Armenian": "Armênio",
"Assamese": "assamês",
"Azerbaijani": "azerbaijano",
"Bashkir": "Bashkir",
"Basque": "basco",
"Belarusian": "Bielorrusso",
"Bengali": "bengali",
"Bosnian": "bósnio",
"Breton": "Breton",
"Bulgarian": "búlgaro",
"Burmese": "Burmese",
"Catalan": "catalão",
"Chinese": "chinês",
"Croatian": "croata",
"Czech": "tcheco",
"Danish": "Dinamarquês",
"Estonian": "Estônio",
"Faroese": "Faroese",
"Finnish": "Finlandês",
"Galician": "galego",
"Georgian": "georgiano",
"Greek": "Grego",
"Gujarati": "Gujarati",
"Haitian": "Haitian",
"Hausa": "Hausa",
"Hawaiian": "Havaiano",
"Hebrew": "Hebraico",
"Hindi": "hindi",
"Hungarian": "Húngaro",
"Icelandic": "Icelandic",
"Indonesian": "indonésio",
"Japanese": "japonês",
"Javanese": "Javanese",
"Kannada": "Kannada",
"Kazakh": "Kazakh",
"Khmer": "Khmer",
"Korean": "coreano",
"Lao": "Lao",
"Latin": "latim",
"Latvian": "Letã",
"Lingala": "Lingala",
"Lithuanian": "lituano",
"Luxembourgish": "luxemburguês",
"Macedonian": "macedônio",
"Malagasy": "Malagasy",
"Malay": "malaio",
"Malayalam": "Malaialam",
"Maltese": "Maltese",
"Maori": "Maori",
"Marathi": "marata",
"Mongolian": "Mongol",
"Nepali": "Nepali",
"Norwegian": "norueguês",
"Norwegian Nynorsk": "Norwegian Nynorsk",
"Occitan": "Occitan",
"Panjabi": "Panjabi",
"Pashto": "Pashto",
"Persian": "persa",
"Polish": "Polonês",
"Romanian": "romeno",
"Russian": "Russo",
"Sanskrit": "Sanskrit",
"Serbian": "Sérvio",
"Shona": "Lona",
"Sindhi": "Sindhi",
"Sinhala": "cingalês",
"Slovak": "Eslovaco",
"Slovenian": "Slovenian",
"Somali": "Somali",
"Sundanese": "Sundanese",
"Swahili": "Suaíli",
"Swedish": "sueco",
"Tagalog": "Tagalog",
"Tajik": "Tadjique",
"Tamil": "Tamil",
"Tatar": "Tatar",
"Telugu": "Telugu",
"Thai": "Tailandês",
"Tibetan": "tibetano",
"Turkish": "Turco",
"Turkmen": "Turkmen",
"Ukrainian": "ucraniano",
"Urdu": "urdu",
"Uzbek": "Uzbek",
"Vietnamese": "Vietnamese",
"Welsh": "galês",
"Yiddish": "iídiche",
"Yoruba": "Yoruba",
"Best": "Melhor",
"Nano": "nano",
"Low": "baixa",
"Default": "Padrão",
"High": "alta",
"MP3": "MP3",
"WAV": "WAV",
"Account Number": "Numero da Conta",
"Banking Information": "Informações Bancárias",
"Blood Type": "Tipo de Sangue",
"Credit Card CVV": "Cartão de Crédito CVV",
"Credit Card Expiration": "Expiração do Cartão de Crédito",
"Credit Card Number": "Número do cartão de crédito",
"Date": "Encontro",
"Date Interval": "Intervalo de data",
"Date of Birth": "Data de nascimento",
"Driver's License": "Carteira de Motorista",
"Drug": "Drogas",
"Duration": "Duração",
"Email Address": "Endereço de e-mail",
"Event": "Evento",
"Filename": "Nome",
"Gender Sexuality": "Sexualidade Sexualidade",
"Healthcare Number": "Número de saúde",
"Injury": "Lesões",
"IP Address": "Endereço IP",
"Language": "IDIOMA",
"Location": "Local:",
"Marital Status": "Estado civil",
"Medical Condition": "Condição Médica",
"Medical Process": "Processo Médico",
"Money Amount": "Valor do Dinheiro",
"Nationality": "Nacionalidade",
"Number Sequence": "Sequência de números",
"Occupation": "Ocupação",
"Organization": "Cliente",
"Passport Number": "Número de Passaporte",
"Password": "Senha",
"Person Age": "Era das pessoas",
"Person Name": "Nome da pessoa",
"Phone Number": "Número de telefone",
"Physical Attribute": "Atributo físico",
"Political Affiliation": "Afiliação Política",
"Religion": "Religião",
"Statistics": "estatísticas",
"Time": "Horário",
"URL": "URL:",
"US Social Security Number": "Número de Segurança Social dos EUA",
"Username": "Usuário:",
"Vehicle ID": "ID do veículo",
"Zodiac Sign": "Signo do Zodíaco",
"Entity Name": "Nome da entidade",
"Hash": "Hash",
"Informative": "Informativo",
"Conversational": "Conversacional",
"Catchy": "Apanhado",
"Bullets": "Balas",
"Bullets Verbose": "Verbose de Balas",
"Gist": "Neblina",
"Headline": "Título",
"Paragraph": "Parágrafo",
"SRT": "SRT",
"VTT": "VTT",
"Queued": "Enfileirado",
"Processing": "Processando",
"Completed": "Concluído",
"Error": "Erro",
"Claude 3.5 Sonnet (on Anthropic)": "Claude 3.5 Sonnet (no Antrópico)",
"Claude 3 Opus (on Anthropic)": "Claude 3 Opus (no Antrópico)",
"Claude 3 Haiku (on Anthropic)": "Claude 3 Haiku (no Anthropic)",
"Claude 3 Sonnet (on Anthropic)": "Claude 3 Sonnet (no Antrópico)",
"Claude 2.1 (on Anthropic)": "Claude 2.1 (sobre Anthropic)",
"Claude 2 (on Anthropic)": "Claude 2 (sobre Anthropic)",
"Claude Instant 1.2 (on Anthropic)": "Claude Instant 1.2 (em Anthropic)",
"Basic": "Básico",
"Mistral 7B (Hosted by AssemblyAI)": "Mistral 7B (Selvagem por AssemblyAI)",
"GET": "OBTER",
"POST": "POSTAR",
"PATCH": "COMPRAR",
"PUT": "COLOCAR",
"DELETE": "EXCLUIR",
"HEAD": "CABEÇA"
}

View File

@@ -0,0 +1,343 @@
{
"AssemblyAI": "AssemblyAI",
"Transcribe and extract data from audio using AssemblyAI's Speech AI.": "Транслируйте и извлекайте данные из аудио с помощью Speech AI AssemblyAI.",
"You can retrieve your AssemblyAI API key within your AssemblyAI [Account Settings](https://www.assemblyai.com/app/account?utm_source=activepieces).": "Вы можете получить свой API ключ AssemblyAI в разделе AssemblyAI [Настройки аккаунта](https://www.assemblyai.com/app/account?utm_source=activepieces).",
"Upload File": "Загрузить файл",
"Transcribe": "Переписать",
"Get Transcript": "Get Transcript",
"Get Transcript Sentences": "Получать сообщения субтитров",
"Get Transcript Paragraphs": "Получить абзацы субтитров",
"Get Transcript Subtitles": "Получить субтитры субтитров",
"Get Transcript Redacted Audio": "Скачать редактируемый аудио субтитры",
"Search words in transcript": "Поиск слов в субтитрах",
"List transcripts": "Список субтитров",
"Delete transcript": "Удалить субтитры",
"Run a Task using LeMUR": "Запуск задачи с помощью LeMUR",
"Retrieve LeMUR response": "Получить LeMUR ответ",
"Purge LeMUR request data": "Очистить LeMUR запрос данных",
"Custom API Call": "Пользовательский вызов API",
"Upload a media file to AssemblyAI's servers.": "Загрузите медиа файл на серверы AssemblyAI.",
"Transcribe an audio or video file using AssemblyAI.": "Преобразовать аудио или видео файл с помощью AssemblyAI.",
"Retrieves a transcript by its ID.": "Получает субтитры по идентификатору.",
"Retrieve the sentences of the transcript by its ID.": "Получить предложения субтитров по идентификатору.",
"Retrieve the paragraphs of the transcript by its ID.": "Получить абзацы субтитров по его ID.",
"Export the transcript as SRT or VTT subtitles.": "Экспортировать субтитры в SRT или VTT.",
"Get the result of the redacted audio model.": "Получить результат отредактированной аудио модели.",
"Search through the transcript for keywords. You can search for individual words, numbers, or phrases containing up to five words or numbers.": "Поиск по субтитрам ключевых слов. Можно искать отдельные слова, цифры или фразы, содержащие до пяти слов или цифр.",
"Retrieve a list of transcripts you created.\nTranscripts are sorted from newest to oldest. The previous URL always points to a page with older transcripts.": "Retrieve a list of transcripts you created.\nTranscripts are sorted from newest to oldest. The previous URL always points to a page with older transcripts.",
"Remove the data from the transcript and mark it as deleted.": "Удалите данные из субтитров и пометьте их как удаленные.",
"Use the LeMUR task endpoint to input your own LLM prompt.": "Используйте конечную точку задания LeMUR для ввода собственного запроса LLM.",
"Retrieve a LeMUR response that was previously generated.": "Получить LeMUR ответ, который был ранее создан.",
"Delete the data for a previously submitted LeMUR request.\nThe LLM response data, as well as any context provided in the original request will be removed.": "Удалить данные для ранее представленного запроса LeMUR.\nДанные ответа LLM, а также любой контекст, указанный в первоначальном запросе, будут удалены.",
"Make a custom API call to a specific endpoint": "Сделать пользовательский API вызов к определенной конечной точке",
"Audio File": "Аудио файл",
"Audio URL": "URL аудио",
"Language Code": "Код языка",
"Language Detection": "Обнаружение языка",
"Language Confidence Threshold": "Порог доверия языка",
"Speech Model": "Модель речи",
"Punctuate": "Punctuate",
"Format Text": "Форматировать текст",
"Disfluencies": "Различия",
"Dual Channel": "Двойной канал",
"Webhook URL": "URL вебхука",
"Webhook Auth Header Name": "Имя заголовка аутентификации вебхука",
"Webhook Auth Header Value": "Значение заголовка авторизации Webhook",
"Key Phrases": "Ключевые фразы",
"Audio Start From": "Начало звука с",
"Audio End At": "Окончание аудио в",
"Word Boost": "Усиление слов",
"Word Boost Level": "Уровень усиления слов",
"Filter Profanity": "Фильтровать нецензуру",
"Redact PII": "Redact PII",
"Redact PII Audio": "Redact PII Audio",
"Redact PII Audio Quality": "Качество Redact PII аудио",
"Redact PII Policies": "Redact PII Policies",
"Redact PII Substitution": "Исправить замену PII",
"Speaker Labels": "Метки динамиков",
"Speakers Expected": "Ожидались спикеры",
"Content Moderation": "Модерация контента",
"Content Moderation Confidence": "Уверенность в модерацию контента",
"Topic Detection": "Обнаружение темы",
"Custom Spellings": "Пользовательские правописания",
"Sentiment Analysis": "Анализ сущностей",
"Auto Chapters": "Авто главы",
"Entity Detection": "Обнаружение сущностей",
"Speech Threshold": "Порог речи",
"Enable Summarization": "Включить сводку",
"Summary Model": "Обобщающая модель",
"Summary Type": "Тип сводки",
"Enable Custom Topics": "Включить пользовательские темы",
"Custom Topics": "Пользовательские темы",
"Wait until transcript is ready": "Подождите, пока субтитры будут готовы",
"Throw if transcript status is error": "Бросить при ошибке субтитров",
"Transcript ID": "ID субтитров",
"Subtitles Format": "Формат субтитров",
"Number of Characters per Caption": "Количество символов в одной подписи",
"Download file?": "Загрузить файл?",
"Download File Name": "Имя файла",
"Words": "Слова",
"Limit": "Лимит",
"Status": "Status",
"Created On": "Создано",
"Before ID": "Перед ID",
"After ID": "После ID",
"Throttled Only": "Только обмороженные",
"Prompt": "Prompt",
"Transcript IDs": "ID субтитров",
"Input Text": "Input Text",
"Context": "Контекст",
"Final Model": "Окончательная модель",
"Maximum Output Size": "Максимальный размер вывода",
"Temperature": "Температура",
"LeMUR request ID": "ID запроса LeMUR",
"Method": "Метод",
"Headers": "Заголовки",
"Query Parameters": "Параметры запроса",
"Body": "Тело",
"No Error on Failure": "Нет ошибок при ошибке",
"Timeout (in seconds)": "Таймаут (в секундах)",
"The File or URL of the audio or video file.": "Файл или URL аудио или видео файла.",
"The URL of the audio or video file to transcribe.": "URL передаваемого аудио или видео файла.",
"The language of your audio file. Possible values are found in [Supported Languages](https://www.assemblyai.com/docs/concepts/supported-languages).\nThe default value is 'en_us'.\n": "Язык аудио файла. Возможные значения находятся в [Поддерживаемые языки](https://www.assemblyai.com/docs/concepts/supported-languages).\nзначение по умолчанию 'en_us'.\n",
"Enable [Automatic language detection](https://www.assemblyai.com/docs/models/speech-recognition#automatic-language-detection), either true or false.": "Включите [Автоматическое определение языка](https://www.assemblyai.com/docs/models/speech-recognition#automatic-language-detection), либо true или false.",
"The confidence threshold for the automatically detected language.\nAn error will be returned if the language confidence is below this threshold.\nDefaults to 0.\n": "Порог доверия для автоматически обнаруженного языка.\nОшибка будет возвращена, если доверие языка ниже этого порога.\nПо умолчанию 0.\n",
"The speech model to use for the transcription. When `null`, the \"best\" model is used.": "Модель речи, используемая для транскрипции. Когда 'null' используется лучшая модель.",
"Enable Automatic Punctuation, can be true or false": "Включить автоматическую пунктуацию, может быть true или false",
"Enable Text Formatting, can be true or false": "Включить форматирование текста, может быть true или false",
"Transcribe Filler Words, like \"umm\", in your media file; can be true or false": "Transcribe Filler Words, such as \"umm\", in your media file; can be true or false",
"Enable [Dual Channel](https://www.assemblyai.com/docs/models/speech-recognition#dual-channel-transcription) transcription, can be true or false.": "Включить транскрипцию [Dual Channel](https://www.assemblyai.com/docs/models/speech-recognition#dual-channel-transcription), может быть true или false.",
"The URL to which we send webhook requests.\nWe sends two different types of webhook requests.\nOne request when a transcript is completed or failed, and one request when the redacted audio is ready if redact_pii_audio is enabled.\n": "The URL to which we send webhook requests.\nWe sends two different types of webhook requests.\nOne request when a transcript is completed or failed, and one request when the redacted audio is ready if redact_pii_audio is enabled.\n",
"The header name to be sent with the transcript completed or failed webhook requests": "Имя заголовка для отправки с субтитрами завершено или не выполнено с помощью webhook запросов",
"The header value to send back with the transcript completed or failed webhook requests for added security": "Значение заголовка для отправки с субтитрами завершено или неудавшиеся запросы webhook для дополнительной безопасности",
"Enable Key Phrases, either true or false": "Включить ключевые фразы, true или false",
"The point in time, in milliseconds, to begin transcribing in your media file": "Точка времени, в миллисекундах, начать трансляцию в вашем медиа-файле",
"The point in time, in milliseconds, to stop transcribing in your media file": "Точка времени, в миллисекундах, остановить трансляцию в вашем медиа-файле",
"The list of custom vocabulary to boost transcription probability for": "Список пользовательского словаря для увеличения вероятности транскрипции для",
"How much to boost specified words": "Сколько увеличивать указанные слова",
"Filter profanity from the transcribed text, can be true or false": "Фильтровать ненормативную лексику из переписанного текста может быть истинной или ложной",
"Redact PII from the transcribed text using the Redact PII model, can be true or false": "Редкий PII из переписанного текста с использованием модели Redact PII, может быть истинным или ложным",
"Generate a copy of the original media file with spoken PII \"beeped\" out, can be true or false. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.": "Создать копию оригинального медиа-файла с говорящим PII \"обезглавленным\", может быть правдой или ложным. См. [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) для получения более подробной информации.",
"Controls the filetype of the audio created by redact_pii_audio. Currently supports mp3 (default) and wav. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.": "Контролирует тип файлов, созданный redact_pii_audio. В настоящее время поддерживает mp3 (по умолчанию) и wav. Смотрите [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) для получения более подробной информации.",
"The list of PII Redaction policies to enable. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.": "Список политик исправления PII для включения. Смотрите [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) для получения более подробной информации.",
"The replacement logic for detected PII, can be \"entity_type\" or \"hash\". See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.": "Обнаружена логика замены PII, может быть \"entity_type\" или \"hash\". Подробнее смотрите в [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction).",
"Enable [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization), can be true or false": "Включить [Диаризацию динамика](https://www.assemblyai.com/docs/models/speaker-diarization), может быть true или ложным",
"Tells the speaker label model how many speakers it should attempt to identify, up to 10. See [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization) for more details.": "Сообщает модель, в которой динамик должен быть определен до 10. Подробнее см. в разделе [Диаризация динамика](https://www.assemblyai.com/docs/models/speaker-diarization).",
"Enable [Content Moderation](https://www.assemblyai.com/docs/models/content-moderation), can be true or false": "Включить [Контент модера](https://www.assemblyai.com/docs/models/content-moderation), может быть true или false",
"The confidence threshold for the Content Moderation model. Values must be between 25 and 100.": "Порог доверия модели моделирования контента. Значения должны быть от 25 до 100.",
"Enable [Topic Detection](https://www.assemblyai.com/docs/models/topic-detection), can be true or false": "Включите [Topic Detection](https://www.assemblyai.com/docs/models/topic-detection), может быть true или false",
"Customize how words are spelled and formatted using to and from values.\nUse a JSON array of objects of the following format:\n```\n[\n {\n \"from\": [\"original\", \"spelling\"],\n \"to\": \"corrected\"\n }\n]\n```\n": "Настройте порядок написания слов и их форматирования с использованием значений и значений.\nИспользуйте JSON массив объектов следующего формата:\n```\n[\n {\n \"from\": [\"original\", \"spell\"],\n \"до\": \"corrected\"\n }\n]\n```\n",
"Enable [Sentiment Analysis](https://www.assemblyai.com/docs/models/sentiment-analysis), can be true or false": "Включите [Sentiment Analysis](https://www.assemblyai.com/docs/models/sentiment-analysis), может быть true или false",
"Enable [Auto Chapters](https://www.assemblyai.com/docs/models/auto-chapters), can be true or false": "Включить [Auto Chapters](https://www.assemblyai.com/docs/models/auto-chapters), может быть true или false",
"Enable [Entity Detection](https://www.assemblyai.com/docs/models/entity-detection), can be true or false": "Включить [Обнаружение сущностей](https://www.assemblyai.com/docs/models/entity-detection), может быть true или false",
"Reject audio files that contain less than this fraction of speech.\nValid values are in the range [0, 1] inclusive.\n": "Отклонить аудио файлы, содержащие меньше этой доли речи.\nДопустимые значения находятся в диапазоне [0, 1] включительно.\n",
"Enable [Summarization](https://www.assemblyai.com/docs/models/summarization), can be true or false": "Включить [Summarization](https://www.assemblyai.com/docs/models/summarization), может быть true или false",
"The model to summarize the transcript": "Модель суммирования субтитров",
"The type of summary": "Тип сводки",
"Enable custom topics, either true or false": "Включить пользовательские темы, true или false",
"The list of custom topics": "Список пользовательских тем",
"Wait until the transcript status is \"completed\" or \"error\" before moving on to the next step.": "Подождите, пока субтитры не будут \"завершены\" или \"ошибка\" перед переходом на следующий шаг.",
"If the transcript status is \"error\", throw an error.": "Если транскрипт имеет статус \"ошибка\", то попробуйте продолжить.",
"The maximum number of characters per caption": "Максимальное количество символов в подписи",
"The desired file name for storing in ActivePieces. Make sure the file extension is correct.": "Желаемое имя файла для хранения в ActivePieces. Убедитесь, что расширение файла правильное.",
"Keywords to search for": "Ключевые слова для поиска",
"Maximum amount of transcripts to retrieve": "Максимальное количество субтитров для получения",
"Filter by transcript status": "Фильтровать по статусу субтитров",
"Only get transcripts created on this date": "Получать только субтитры созданные на эту дату",
"Get transcripts that were created before this transcript ID": "Получить субтитры, которые были созданы до этого идентификатора субтитров",
"Get transcripts that were created after this transcript ID": "Получить субтитры, которые были созданы после этого идентификатора субтитров",
"Only get throttled transcripts, overrides the status filter": "Получать только отлаженные субтитры, перезаписывает фильтр состояния",
"Your text to prompt the model to produce a desired output, including any context you want to pass into the model.": "Ваш текст, чтобы побудить модель произвести желаемый выход, включая любой контекст, который вы хотите передать в модель.",
"A list of completed transcripts with text. Up to a maximum of 100 files or 100 hours, whichever is lower.\nUse either transcript_ids or input_text as input into LeMUR.\n": "Список завершенных субтитров с текстом. Максимальное количество файлов - 100 часов, в зависимости от того, что меньше.\nИспользуйте транскрипты или input_text в качестве ввода в LeMUR.\n",
"Custom formatted transcript data. Maximum size is the context limit of the selected model, which defaults to 100000.\nUse either transcript_ids or input_text as input into LeMUR.\n": "Пользовательские форматированные данные субтитров. Максимальный размер - ограничение контекста выбранной модели, по умолчанию 100000.\nИспользуйте транскрипты или input_text в качестве ввода в LeMUR.\n",
"Context to provide the model. This can be a string or a free-form JSON value.": "Контекст для представления модели. Это может быть строка или произвольное значение JSON.",
"The model that is used for the final prompt after compression is performed.\n": "Модель, используемая для финального подсказки после сжатия.\n",
"Max output size in tokens, up to 4000": "Максимальный размер выхода в токенах, до 4000",
"The temperature to use for the model.\nHigher values result in answers that are more creative, lower values are more conservative.\nCan be any value between 0.0 and 1.0 inclusive.\n": "The temperature to use for the model.\nHigher values result in answers that are more creative, lower values are more conservative.\nCan be any value between 0.0 and 1.0 inclusive.\n",
"The ID of the LeMUR request whose data you want to delete. This would be found in the response of the original request.": "Идентификатор запроса LeMUR, данные которого вы хотите удалить. Он будет найден в ответ на первоначальный запрос.",
"Authorization headers are injected automatically from your connection.": "Заголовки авторизации включаются автоматически из вашего соединения.",
"English (Global)": "Английский (Мир)",
"English (Australian)": "Английский (Австралия)",
"English (British)": "Английский (английский)",
"English (US)": "Английский",
"Spanish": "Испанский",
"French": "Французский",
"German": "Немецкий",
"Italian": "Итальянский",
"Portuguese": "Португальский",
"Dutch": "Голландский",
"Afrikaans": "Африкаанс",
"Albanian": "Албанский",
"Amharic": "Amharic",
"Arabic": "Арабский",
"Armenian": "Армянский",
"Assamese": "<unk> ский",
"Azerbaijani": "Азербайджан",
"Bashkir": "Bashkir",
"Basque": "Баскский",
"Belarusian": "Белорусский",
"Bengali": "Бенгальский",
"Bosnian": "Боснийский",
"Breton": "Breton",
"Bulgarian": "Болгарский",
"Burmese": "Burmese",
"Catalan": "Каталанский",
"Chinese": "Китайский",
"Croatian": "Хорватский",
"Czech": "Чешский",
"Danish": "Датский",
"Estonian": "Эстонский",
"Faroese": "Faroese",
"Finnish": "Финский",
"Galician": "Галисийский",
"Georgian": "Грузинский",
"Greek": "Греческий",
"Gujarati": "Gujarati",
"Haitian": "Haitian",
"Hausa": "Hausa",
"Hawaiian": "Гавайский",
"Hebrew": "Иврит",
"Hindi": "Хинди",
"Hungarian": "Венгерский",
"Icelandic": "Icelandic",
"Indonesian": "Индонезийский",
"Japanese": "Японский",
"Javanese": "Javanese",
"Kannada": "Kannada",
"Kazakh": "Kazakh",
"Khmer": "Khmer",
"Korean": "Корейский",
"Lao": "Lao",
"Latin": "Латынь",
"Latvian": "Латышский",
"Lingala": "Lingala",
"Lithuanian": "Литовский",
"Luxembourgish": "Люксембургский",
"Macedonian": "Македонский",
"Malagasy": "Malagasy",
"Malay": "Малай",
"Malayalam": "Малаялам",
"Maltese": "Maltese",
"Maori": "Maori",
"Marathi": "Маратти",
"Mongolian": "Монгольский",
"Nepali": "Nepali",
"Norwegian": "Норвежский",
"Norwegian Nynorsk": "Norwegian Nynorsk",
"Occitan": "Occitan",
"Panjabi": "Panjabi",
"Pashto": "Пашто",
"Persian": "Персидский",
"Polish": "Польский",
"Romanian": "Румынский",
"Russian": "Русский",
"Sanskrit": "Sanskrit",
"Serbian": "Сербский",
"Shona": "Шона",
"Sindhi": "Sindhi",
"Sinhala": "Сингала",
"Slovak": "Словацкий",
"Slovenian": "Slovenian",
"Somali": "Somali",
"Sundanese": "Sundanese",
"Swahili": "Суахили",
"Swedish": "Шведский",
"Tagalog": "Tagalog",
"Tajik": "Таджик",
"Tamil": "Tamil",
"Tatar": "Tatar",
"Telugu": "Telugu",
"Thai": "Тайский",
"Tibetan": "Тибетский",
"Turkish": "Турецкий",
"Turkmen": "Turkmen",
"Ukrainian": "Украинский",
"Urdu": "Урду",
"Uzbek": "Uzbek",
"Vietnamese": "Vietnamese",
"Welsh": "Уэльш",
"Yiddish": "Идиш",
"Yoruba": "Yoruba",
"Best": "Лучшее",
"Nano": "Нано",
"Low": "Низкий",
"Default": "По умолчанию",
"High": "Высокий",
"MP3": "MP3",
"WAV": "WAV",
"Account Number": "Номер счета",
"Banking Information": "Банковская информация",
"Blood Type": "Тип крови",
"Credit Card CVV": "Кредитная карта CVV",
"Credit Card Expiration": "Срок действия кредитной карты",
"Credit Card Number": "Номер кредитной карты",
"Date": "Дата",
"Date Interval": "Дата интервала",
"Date of Birth": "Дата рождения",
"Driver's License": "Водительское удостоверение",
"Drug": "Наркотики",
"Duration": "Duration",
"Email Address": "Email Address",
"Event": "Событие",
"Filename": "Имя файла",
"Gender Sexuality": "Сексуальность по признаку пола",
"Healthcare Number": "Номер здоровья",
"Injury": "Травма",
"IP Address": "IP-адрес",
"Language": "Язык",
"Location": "Местоположение",
"Marital Status": "Семейное положение",
"Medical Condition": "Состояние здоровья",
"Medical Process": "Медицинский процесс",
"Money Amount": "Сумма денег",
"Nationality": "Гражданство",
"Number Sequence": "Числовая последовательность",
"Occupation": "Занятость",
"Organization": "Организация",
"Passport Number": "Номер паспорта",
"Password": "Пароль",
"Person Age": "Возраст человека",
"Person Name": "Имя пользователя",
"Phone Number": "Номер телефона",
"Physical Attribute": "Физический атрибут",
"Political Affiliation": "Политическое партнерство",
"Religion": "Религия",
"Statistics": "Статистика",
"Time": "Время",
"URL": "URL",
"US Social Security Number": "Номер социального страхования США",
"Username": "Имя пользователя",
"Vehicle ID": "ID транспортного средства",
"Zodiac Sign": "Знак Zodiac",
"Entity Name": "Название сущности",
"Hash": "Хэш",
"Informative": "Информационный",
"Conversational": "Разговор",
"Catchy": "Кошачий",
"Bullets": "Пули",
"Bullets Verbose": "Подробности о пулях",
"Gist": "Жизнь",
"Headline": "Заголовок",
"Paragraph": "Пункт 2",
"SRT": "SRT",
"VTT": "VTT",
"Queued": "В очереди",
"Processing": "Обработка",
"Completed": "Выполнено",
"Error": "Ошибка",
"Claude 3.5 Sonnet (on Anthropic)": "Claude 3.5 Sonnet (на антропке)",
"Claude 3 Opus (on Anthropic)": "Claude 3 Opus (на антропском)",
"Claude 3 Haiku (on Anthropic)": "Клод 3 Хайку (на антропии)",
"Claude 3 Sonnet (on Anthropic)": "Клод 3 Соннет (на антропии)",
"Claude 2.1 (on Anthropic)": "Клод 2.1 (по антропскому)",
"Claude 2 (on Anthropic)": "Клод 2 (по антропии)",
"Claude Instant 1.2 (on Anthropic)": "Клод Мгновенный 1.2 (на антропии)",
"Basic": "Базовый",
"Mistral 7B (Hosted by AssemblyAI)": "Mistral 7B (Hosted by AssemblyAI)",
"GET": "ПОЛУЧИТЬ",
"POST": "ПОСТ",
"PATCH": "ПАТЧ",
"PUT": "ПОКУПИТЬ",
"DELETE": "УДАЛИТЬ",
"HEAD": "HEAD"
}

View File

@@ -0,0 +1,344 @@
{
"Transcribe and extract data from audio using AssemblyAI's Speech AI.": "Transcribe and extract data from audio using AssemblyAI's Speech AI.",
"You can retrieve your AssemblyAI API key within your AssemblyAI [Account Settings](https://www.assemblyai.com/app/account?utm_source=activepieces).": "You can retrieve your AssemblyAI API key within your AssemblyAI [Account Settings](https://www.assemblyai.com/app/account?utm_source=activepieces).",
"Upload File": "Upload File",
"Transcribe": "Transcribe",
"Get Transcript": "Get Transcript",
"Get Transcript Sentences": "Get Transcript Sentences",
"Get Transcript Paragraphs": "Get Transcript Paragraphs",
"Get Transcript Subtitles": "Get Transcript Subtitles",
"Get Transcript Redacted Audio": "Get Transcript Redacted Audio",
"Search words in transcript": "Search words in transcript",
"List transcripts": "List transcripts",
"Delete transcript": "Delete transcript",
"Run a Task using LeMUR": "Run a Task using LeMUR",
"Retrieve LeMUR response": "Retrieve LeMUR response",
"Purge LeMUR request data": "Purge LeMUR request data",
"Custom API Call": "Custom API Call",
"Upload a media file to AssemblyAI's servers.": "Upload a media file to AssemblyAI's servers.",
"Transcribe an audio or video file using AssemblyAI.": "Transcribe an audio or video file using AssemblyAI.",
"Retrieves a transcript by its ID.": "Retrieves a transcript by its ID.",
"Retrieve the sentences of the transcript by its ID.": "Retrieve the sentences of the transcript by its ID.",
"Retrieve the paragraphs of the transcript by its ID.": "Retrieve the paragraphs of the transcript by its ID.",
"Export the transcript as SRT or VTT subtitles.": "Export the transcript as SRT or VTT subtitles.",
"Get the result of the redacted audio model.": "Get the result of the redacted audio model.",
"Search through the transcript for keywords. You can search for individual words, numbers, or phrases containing up to five words or numbers.": "Search through the transcript for keywords. You can search for individual words, numbers, or phrases containing up to five words or numbers.",
"Retrieve a list of transcripts you created.\nTranscripts are sorted from newest to oldest. The previous URL always points to a page with older transcripts.": "Retrieve a list of transcripts you created.\nTranscripts are sorted from newest to oldest. The previous URL always points to a page with older transcripts.",
"Remove the data from the transcript and mark it as deleted.": "Remove the data from the transcript and mark it as deleted.",
"Use the LeMUR task endpoint to input your own LLM prompt.": "Use the LeMUR task endpoint to input your own LLM prompt.",
"Retrieve a LeMUR response that was previously generated.": "Retrieve a LeMUR response that was previously generated.",
"Delete the data for a previously submitted LeMUR request.\nThe LLM response data, as well as any context provided in the original request will be removed.": "Delete the data for a previously submitted LeMUR request.\nThe LLM response data, as well as any context provided in the original request will be removed.",
"Make a custom API call to a specific endpoint": "Make a custom API call to a specific endpoint",
"Audio File": "Audio File",
"Audio URL": "Audio URL",
"Language Code": "Language Code",
"Language Detection": "Language Detection",
"Language Confidence Threshold": "Language Confidence Threshold",
"Speech Model": "Speech Model",
"Punctuate": "Punctuate",
"Format Text": "Format Text",
"Disfluencies": "Disfluencies",
"Dual Channel": "Dual Channel",
"Webhook URL": "Webhook URL",
"Webhook Auth Header Name": "Webhook Auth Header Name",
"Webhook Auth Header Value": "Webhook Auth Header Value",
"Key Phrases": "Key Phrases",
"Audio Start From": "Audio Start From",
"Audio End At": "Audio End At",
"Word Boost": "Word Boost",
"Word Boost Level": "Word Boost Level",
"Filter Profanity": "Filter Profanity",
"Redact PII": "Redact PII",
"Redact PII Audio": "Redact PII Audio",
"Redact PII Audio Quality": "Redact PII Audio Quality",
"Redact PII Policies": "Redact PII Policies",
"Redact PII Substitution": "Redact PII Substitution",
"Speaker Labels": "Speaker Labels",
"Speakers Expected": "Speakers Expected",
"Content Moderation": "Content Moderation",
"Content Moderation Confidence": "Content Moderation Confidence",
"Topic Detection": "Topic Detection",
"Custom Spellings": "Custom Spellings",
"Sentiment Analysis": "Sentiment Analysis",
"Auto Chapters": "Auto Chapters",
"Entity Detection": "Entity Detection",
"Speech Threshold": "Speech Threshold",
"Enable Summarization": "Enable Summarization",
"Summary Model": "Summary Model",
"Summary Type": "Summary Type",
"Enable Custom Topics": "Enable Custom Topics",
"Custom Topics": "Custom Topics",
"Wait until transcript is ready": "Wait until transcript is ready",
"Throw if transcript status is error": "Throw if transcript status is error",
"Transcript ID": "Transcript ID",
"Subtitles Format": "Subtitles Format",
"Number of Characters per Caption": "Number of Characters per Caption",
"Download file?": "Download file?",
"Download File Name": "Download File Name",
"Words": "Words",
"Limit": "Limit",
"Status": "Status",
"Created On": "Created On",
"Before ID": "Before ID",
"After ID": "After ID",
"Throttled Only": "Throttled Only",
"Prompt": "Prompt",
"Transcript IDs": "Transcript IDs",
"Input Text": "Input Text",
"Context": "Context",
"Final Model": "Final Model",
"Maximum Output Size": "Maximum Output Size",
"Temperature": "Temperature",
"LeMUR request ID": "LeMUR request ID",
"Method": "Method",
"Headers": "Headers",
"Query Parameters": "Query Parameters",
"Body": "Body",
"Response is Binary ?": "Response is Binary ?",
"No Error on Failure": "No Error on Failure",
"Timeout (in seconds)": "Timeout (in seconds)",
"The File or URL of the audio or video file.": "The File or URL of the audio or video file.",
"The URL of the audio or video file to transcribe.": "The URL of the audio or video file to transcribe.",
"The language of your audio file. Possible values are found in [Supported Languages](https://www.assemblyai.com/docs/concepts/supported-languages).\nThe default value is 'en_us'.\n": "The language of your audio file. Possible values are found in [Supported Languages](https://www.assemblyai.com/docs/concepts/supported-languages).\nThe default value is 'en_us'.\n",
"Enable [Automatic language detection](https://www.assemblyai.com/docs/models/speech-recognition#automatic-language-detection), either true or false.": "Enable [Automatic language detection](https://www.assemblyai.com/docs/models/speech-recognition#automatic-language-detection), either true or false.",
"The confidence threshold for the automatically detected language.\nAn error will be returned if the language confidence is below this threshold.\nDefaults to 0.\n": "The confidence threshold for the automatically detected language.\nAn error will be returned if the language confidence is below this threshold.\nDefaults to 0.\n",
"The speech model to use for the transcription. When `null`, the \"best\" model is used.": "The speech model to use for the transcription. When `null`, the \"best\" model is used.",
"Enable Automatic Punctuation, can be true or false": "Enable Automatic Punctuation, can be true or false",
"Enable Text Formatting, can be true or false": "Enable Text Formatting, can be true or false",
"Transcribe Filler Words, like \"umm\", in your media file; can be true or false": "Transcribe Filler Words, like \"umm\", in your media file; can be true or false",
"Enable [Dual Channel](https://www.assemblyai.com/docs/models/speech-recognition#dual-channel-transcription) transcription, can be true or false.": "Enable [Dual Channel](https://www.assemblyai.com/docs/models/speech-recognition#dual-channel-transcription) transcription, can be true or false.",
"The URL to which we send webhook requests.\nWe sends two different types of webhook requests.\nOne request when a transcript is completed or failed, and one request when the redacted audio is ready if redact_pii_audio is enabled.\n": "The URL to which we send webhook requests.\nWe sends two different types of webhook requests.\nOne request when a transcript is completed or failed, and one request when the redacted audio is ready if redact_pii_audio is enabled.\n",
"The header name to be sent with the transcript completed or failed webhook requests": "The header name to be sent with the transcript completed or failed webhook requests",
"The header value to send back with the transcript completed or failed webhook requests for added security": "The header value to send back with the transcript completed or failed webhook requests for added security",
"Enable Key Phrases, either true or false": "Enable Key Phrases, either true or false",
"The point in time, in milliseconds, to begin transcribing in your media file": "The point in time, in milliseconds, to begin transcribing in your media file",
"The point in time, in milliseconds, to stop transcribing in your media file": "The point in time, in milliseconds, to stop transcribing in your media file",
"The list of custom vocabulary to boost transcription probability for": "The list of custom vocabulary to boost transcription probability for",
"How much to boost specified words": "How much to boost specified words",
"Filter profanity from the transcribed text, can be true or false": "Filter profanity from the transcribed text, can be true or false",
"Redact PII from the transcribed text using the Redact PII model, can be true or false": "Redact PII from the transcribed text using the Redact PII model, can be true or false",
"Generate a copy of the original media file with spoken PII \"beeped\" out, can be true or false. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.": "Generate a copy of the original media file with spoken PII \"beeped\" out, can be true or false. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.",
"Controls the filetype of the audio created by redact_pii_audio. Currently supports mp3 (default) and wav. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.": "Controls the filetype of the audio created by redact_pii_audio. Currently supports mp3 (default) and wav. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.",
"The list of PII Redaction policies to enable. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.": "The list of PII Redaction policies to enable. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.",
"The replacement logic for detected PII, can be \"entity_type\" or \"hash\". See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.": "The replacement logic for detected PII, can be \"entity_type\" or \"hash\". See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.",
"Enable [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization), can be true or false": "Enable [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization), can be true or false",
"Tells the speaker label model how many speakers it should attempt to identify, up to 10. See [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization) for more details.": "Tells the speaker label model how many speakers it should attempt to identify, up to 10. See [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization) for more details.",
"Enable [Content Moderation](https://www.assemblyai.com/docs/models/content-moderation), can be true or false": "Enable [Content Moderation](https://www.assemblyai.com/docs/models/content-moderation), can be true or false",
"The confidence threshold for the Content Moderation model. Values must be between 25 and 100.": "The confidence threshold for the Content Moderation model. Values must be between 25 and 100.",
"Enable [Topic Detection](https://www.assemblyai.com/docs/models/topic-detection), can be true or false": "Enable [Topic Detection](https://www.assemblyai.com/docs/models/topic-detection), can be true or false",
"Customize how words are spelled and formatted using to and from values.\nUse a JSON array of objects of the following format:\n```\n[\n {\n \"from\": [\"original\", \"spelling\"],\n \"to\": \"corrected\"\n }\n]\n```\n": "Customize how words are spelled and formatted using to and from values.\nUse a JSON array of objects of the following format:\n```\n[\n {\n \"from\": [\"original\", \"spelling\"],\n \"to\": \"corrected\"\n }\n]\n```\n",
"Enable [Sentiment Analysis](https://www.assemblyai.com/docs/models/sentiment-analysis), can be true or false": "Enable [Sentiment Analysis](https://www.assemblyai.com/docs/models/sentiment-analysis), can be true or false",
"Enable [Auto Chapters](https://www.assemblyai.com/docs/models/auto-chapters), can be true or false": "Enable [Auto Chapters](https://www.assemblyai.com/docs/models/auto-chapters), can be true or false",
"Enable [Entity Detection](https://www.assemblyai.com/docs/models/entity-detection), can be true or false": "Enable [Entity Detection](https://www.assemblyai.com/docs/models/entity-detection), can be true or false",
"Reject audio files that contain less than this fraction of speech.\nValid values are in the range [0, 1] inclusive.\n": "Reject audio files that contain less than this fraction of speech.\nValid values are in the range [0, 1] inclusive.\n",
"Enable [Summarization](https://www.assemblyai.com/docs/models/summarization), can be true or false": "Enable [Summarization](https://www.assemblyai.com/docs/models/summarization), can be true or false",
"The model to summarize the transcript": "The model to summarize the transcript",
"The type of summary": "The type of summary",
"Enable custom topics, either true or false": "Enable custom topics, either true or false",
"The list of custom topics": "The list of custom topics",
"Wait until the transcript status is \"completed\" or \"error\" before moving on to the next step.": "Wait until the transcript status is \"completed\" or \"error\" before moving on to the next step.",
"If the transcript status is \"error\", throw an error.": "If the transcript status is \"error\", throw an error.",
"The maximum number of characters per caption": "The maximum number of characters per caption",
"The desired file name for storing in ActivePieces. Make sure the file extension is correct.": "The desired file name for storing in ActivePieces. Make sure the file extension is correct.",
"Keywords to search for": "Keywords to search for",
"Maximum amount of transcripts to retrieve": "Maximum amount of transcripts to retrieve",
"Filter by transcript status": "Filter by transcript status",
"Only get transcripts created on this date": "Only get transcripts created on this date",
"Get transcripts that were created before this transcript ID": "Get transcripts that were created before this transcript ID",
"Get transcripts that were created after this transcript ID": "Get transcripts that were created after this transcript ID",
"Only get throttled transcripts, overrides the status filter": "Only get throttled transcripts, overrides the status filter",
"Your text to prompt the model to produce a desired output, including any context you want to pass into the model.": "Your text to prompt the model to produce a desired output, including any context you want to pass into the model.",
"A list of completed transcripts with text. Up to a maximum of 100 files or 100 hours, whichever is lower.\nUse either transcript_ids or input_text as input into LeMUR.\n": "A list of completed transcripts with text. Up to a maximum of 100 files or 100 hours, whichever is lower.\nUse either transcript_ids or input_text as input into LeMUR.\n",
"Custom formatted transcript data. Maximum size is the context limit of the selected model, which defaults to 100000.\nUse either transcript_ids or input_text as input into LeMUR.\n": "Custom formatted transcript data. Maximum size is the context limit of the selected model, which defaults to 100000.\nUse either transcript_ids or input_text as input into LeMUR.\n",
"Context to provide the model. This can be a string or a free-form JSON value.": "Context to provide the model. This can be a string or a free-form JSON value.",
"The model that is used for the final prompt after compression is performed.\n": "The model that is used for the final prompt after compression is performed.\n",
"Max output size in tokens, up to 4000": "Max output size in tokens, up to 4000",
"The temperature to use for the model.\nHigher values result in answers that are more creative, lower values are more conservative.\nCan be any value between 0.0 and 1.0 inclusive.\n": "The temperature to use for the model.\nHigher values result in answers that are more creative, lower values are more conservative.\nCan be any value between 0.0 and 1.0 inclusive.\n",
"The ID of the LeMUR request whose data you want to delete. This would be found in the response of the original request.": "The ID of the LeMUR request whose data you want to delete. This would be found in the response of the original request.",
"Authorization headers are injected automatically from your connection.": "Authorization headers are injected automatically from your connection.",
"Enable for files like PDFs, images, etc..": "Enable for files like PDFs, images, etc..",
"English (Global)": "English (Global)",
"English (Australian)": "English (Australian)",
"English (British)": "English (British)",
"English (US)": "English (US)",
"Spanish": "Spanish",
"French": "French",
"German": "German",
"Italian": "Italian",
"Portuguese": "Portuguese",
"Dutch": "Dutch",
"Afrikaans": "Afrikaans",
"Albanian": "Albanian",
"Amharic": "Amharic",
"Arabic": "Arabic",
"Armenian": "Armenian",
"Assamese": "Assamese",
"Azerbaijani": "Azerbaijani",
"Bashkir": "Bashkir",
"Basque": "Basque",
"Belarusian": "Belarusian",
"Bengali": "Bengali",
"Bosnian": "Bosnian",
"Breton": "Breton",
"Bulgarian": "Bulgarian",
"Burmese": "Burmese",
"Catalan": "Catalan",
"Chinese": "Chinese",
"Croatian": "Croatian",
"Czech": "Czech",
"Danish": "Danish",
"Estonian": "Estonian",
"Faroese": "Faroese",
"Finnish": "Finnish",
"Galician": "Galician",
"Georgian": "Georgian",
"Greek": "Greek",
"Gujarati": "Gujarati",
"Haitian": "Haitian",
"Hausa": "Hausa",
"Hawaiian": "Hawaiian",
"Hebrew": "Hebrew",
"Hindi": "Hindi",
"Hungarian": "Hungarian",
"Icelandic": "Icelandic",
"Indonesian": "Indonesian",
"Japanese": "Japanese",
"Javanese": "Javanese",
"Kannada": "Kannada",
"Kazakh": "Kazakh",
"Khmer": "Khmer",
"Korean": "Korean",
"Lao": "Lao",
"Latin": "Latin",
"Latvian": "Latvian",
"Lingala": "Lingala",
"Lithuanian": "Lithuanian",
"Luxembourgish": "Luxembourgish",
"Macedonian": "Macedonian",
"Malagasy": "Malagasy",
"Malay": "Malay",
"Malayalam": "Malayalam",
"Maltese": "Maltese",
"Maori": "Maori",
"Marathi": "Marathi",
"Mongolian": "Mongolian",
"Nepali": "Nepali",
"Norwegian": "Norwegian",
"Norwegian Nynorsk": "Norwegian Nynorsk",
"Occitan": "Occitan",
"Panjabi": "Panjabi",
"Pashto": "Pashto",
"Persian": "Persian",
"Polish": "Polish",
"Romanian": "Romanian",
"Russian": "Russian",
"Sanskrit": "Sanskrit",
"Serbian": "Serbian",
"Shona": "Shona",
"Sindhi": "Sindhi",
"Sinhala": "Sinhala",
"Slovak": "Slovak",
"Slovenian": "Slovenian",
"Somali": "Somali",
"Sundanese": "Sundanese",
"Swahili": "Swahili",
"Swedish": "Swedish",
"Tagalog": "Tagalog",
"Tajik": "Tajik",
"Tamil": "Tamil",
"Tatar": "Tatar",
"Telugu": "Telugu",
"Thai": "Thai",
"Tibetan": "Tibetan",
"Turkish": "Turkish",
"Turkmen": "Turkmen",
"Ukrainian": "Ukrainian",
"Urdu": "Urdu",
"Uzbek": "Uzbek",
"Vietnamese": "Vietnamese",
"Welsh": "Welsh",
"Yiddish": "Yiddish",
"Yoruba": "Yoruba",
"Best": "Best",
"Nano": "Nano",
"Low": "Low",
"Default": "Default",
"High": "High",
"MP3": "MP3",
"WAV": "WAV",
"Account Number": "Account Number",
"Banking Information": "Banking Information",
"Blood Type": "Blood Type",
"Credit Card CVV": "Credit Card CVV",
"Credit Card Expiration": "Credit Card Expiration",
"Credit Card Number": "Credit Card Number",
"Date": "Date",
"Date Interval": "Date Interval",
"Date of Birth": "Date of Birth",
"Driver's License": "Driver's License",
"Drug": "Drug",
"Duration": "Duration",
"Email Address": "Email Address",
"Event": "Event",
"Filename": "Filename",
"Gender Sexuality": "Gender Sexuality",
"Healthcare Number": "Healthcare Number",
"Injury": "Injury",
"IP Address": "IP Address",
"Language": "Language",
"Location": "Location",
"Marital Status": "Marital Status",
"Medical Condition": "Medical Condition",
"Medical Process": "Medical Process",
"Money Amount": "Money Amount",
"Nationality": "Nationality",
"Number Sequence": "Number Sequence",
"Occupation": "Occupation",
"Organization": "Organization",
"Passport Number": "Passport Number",
"Password": "Password",
"Person Age": "Person Age",
"Person Name": "Person Name",
"Phone Number": "Phone Number",
"Physical Attribute": "Physical Attribute",
"Political Affiliation": "Political Affiliation",
"Religion": "Religion",
"Statistics": "Statistics",
"Time": "Time",
"URL": "URL",
"US Social Security Number": "US Social Security Number",
"Username": "Username",
"Vehicle ID": "Vehicle ID",
"Zodiac Sign": "Zodiac Sign",
"Entity Name": "Entity Name",
"Hash": "Hash",
"Informative": "Informative",
"Conversational": "Conversational",
"Catchy": "Catchy",
"Bullets": "Bullets",
"Bullets Verbose": "Bullets Verbose",
"Gist": "Gist",
"Headline": "Headline",
"Paragraph": "Paragraph",
"SRT": "SRT",
"VTT": "VTT",
"Queued": "Queued",
"Processing": "Processing",
"Completed": "Completed",
"Error": "Error",
"Claude 3.5 Sonnet (on Anthropic)": "Claude 3.5 Sonnet (on Anthropic)",
"Claude 3 Opus (on Anthropic)": "Claude 3 Opus (on Anthropic)",
"Claude 3 Haiku (on Anthropic)": "Claude 3 Haiku (on Anthropic)",
"Claude 3 Sonnet (on Anthropic)": "Claude 3 Sonnet (on Anthropic)",
"Claude 2.1 (on Anthropic)": "Claude 2.1 (on Anthropic)",
"Claude 2 (on Anthropic)": "Claude 2 (on Anthropic)",
"Claude Instant 1.2 (on Anthropic)": "Claude Instant 1.2 (on Anthropic)",
"Basic": "Basic",
"Mistral 7B (Hosted by AssemblyAI)": "Mistral 7B (Hosted by AssemblyAI)",
"GET": "GET",
"POST": "POST",
"PATCH": "PATCH",
"PUT": "PUT",
"DELETE": "DELETE",
"HEAD": "HEAD"
}

View File

@@ -0,0 +1,343 @@
{
"AssemblyAI": "AssemblyAI",
"Transcribe and extract data from audio using AssemblyAI's Speech AI.": "Transcribe and extract data from audio using AssemblyAI's Speech AI.",
"You can retrieve your AssemblyAI API key within your AssemblyAI [Account Settings](https://www.assemblyai.com/app/account?utm_source=activepieces).": "You can retrieve your AssemblyAI API key within your AssemblyAI [Account Settings](https://www.assemblyai.com/app/account?utm_source=activepieces).",
"Upload File": "Upload File",
"Transcribe": "Transcribe",
"Get Transcript": "Get Transcript",
"Get Transcript Sentences": "Get Transcript Sentences",
"Get Transcript Paragraphs": "Get Transcript Paragraphs",
"Get Transcript Subtitles": "Get Transcript Subtitles",
"Get Transcript Redacted Audio": "Get Transcript Redacted Audio",
"Search words in transcript": "Search words in transcript",
"List transcripts": "List transcripts",
"Delete transcript": "Delete transcript",
"Run a Task using LeMUR": "Run a Task using LeMUR",
"Retrieve LeMUR response": "Retrieve LeMUR response",
"Purge LeMUR request data": "Purge LeMUR request data",
"Custom API Call": "Custom API Call",
"Upload a media file to AssemblyAI's servers.": "Upload a media file to AssemblyAI's servers.",
"Transcribe an audio or video file using AssemblyAI.": "Transcribe an audio or video file using AssemblyAI.",
"Retrieves a transcript by its ID.": "Retrieves a transcript by its ID.",
"Retrieve the sentences of the transcript by its ID.": "Retrieve the sentences of the transcript by its ID.",
"Retrieve the paragraphs of the transcript by its ID.": "Retrieve the paragraphs of the transcript by its ID.",
"Export the transcript as SRT or VTT subtitles.": "Export the transcript as SRT or VTT subtitles.",
"Get the result of the redacted audio model.": "Get the result of the redacted audio model.",
"Search through the transcript for keywords. You can search for individual words, numbers, or phrases containing up to five words or numbers.": "Search through the transcript for keywords. You can search for individual words, numbers, or phrases containing up to five words or numbers.",
"Retrieve a list of transcripts you created.\nTranscripts are sorted from newest to oldest. The previous URL always points to a page with older transcripts.": "Retrieve a list of transcripts you created.\nTranscripts are sorted from newest to oldest. The previous URL always points to a page with older transcripts.",
"Remove the data from the transcript and mark it as deleted.": "Remove the data from the transcript and mark it as deleted.",
"Use the LeMUR task endpoint to input your own LLM prompt.": "Use the LeMUR task endpoint to input your own LLM prompt.",
"Retrieve a LeMUR response that was previously generated.": "Retrieve a LeMUR response that was previously generated.",
"Delete the data for a previously submitted LeMUR request.\nThe LLM response data, as well as any context provided in the original request will be removed.": "Delete the data for a previously submitted LeMUR request.\nThe LLM response data, as well as any context provided in the original request will be removed.",
"Make a custom API call to a specific endpoint": "Make a custom API call to a specific endpoint",
"Audio File": "Audio File",
"Audio URL": "Audio URL",
"Language Code": "Language Code",
"Language Detection": "Language Detection",
"Language Confidence Threshold": "Language Confidence Threshold",
"Speech Model": "Speech Model",
"Punctuate": "Punctuate",
"Format Text": "Format Text",
"Disfluencies": "Disfluencies",
"Dual Channel": "Dual Channel",
"Webhook URL": "Webhook URL",
"Webhook Auth Header Name": "Webhook Auth Header Name",
"Webhook Auth Header Value": "Webhook Auth Header Value",
"Key Phrases": "Key Phrases",
"Audio Start From": "Audio Start From",
"Audio End At": "Audio End At",
"Word Boost": "Word Boost",
"Word Boost Level": "Word Boost Level",
"Filter Profanity": "Filter Profanity",
"Redact PII": "Redact PII",
"Redact PII Audio": "Redact PII Audio",
"Redact PII Audio Quality": "Redact PII Audio Quality",
"Redact PII Policies": "Redact PII Policies",
"Redact PII Substitution": "Redact PII Substitution",
"Speaker Labels": "Speaker Labels",
"Speakers Expected": "Speakers Expected",
"Content Moderation": "Content Moderation",
"Content Moderation Confidence": "Content Moderation Confidence",
"Topic Detection": "Topic Detection",
"Custom Spellings": "Custom Spellings",
"Sentiment Analysis": "Sentiment Analysis",
"Auto Chapters": "Auto Chapters",
"Entity Detection": "Entity Detection",
"Speech Threshold": "Speech Threshold",
"Enable Summarization": "Enable Summarization",
"Summary Model": "Summary Model",
"Summary Type": "Summary Type",
"Enable Custom Topics": "Enable Custom Topics",
"Custom Topics": "Custom Topics",
"Wait until transcript is ready": "Wait until transcript is ready",
"Throw if transcript status is error": "Throw if transcript status is error",
"Transcript ID": "Transcript ID",
"Subtitles Format": "Subtitles Format",
"Number of Characters per Caption": "Number of Characters per Caption",
"Download file?": "Download file?",
"Download File Name": "Download File Name",
"Words": "Words",
"Limit": "Limit",
"Status": "Status",
"Created On": "Created On",
"Before ID": "Before ID",
"After ID": "After ID",
"Throttled Only": "Throttled Only",
"Prompt": "Prompt",
"Transcript IDs": "Transcript IDs",
"Input Text": "Input Text",
"Context": "Context",
"Final Model": "Final Model",
"Maximum Output Size": "Maximum Output Size",
"Temperature": "Temperature",
"LeMUR request ID": "LeMUR request ID",
"Method": "Method",
"Headers": "Headers",
"Query Parameters": "Query Parameters",
"Body": "Body",
"No Error on Failure": "No Error on Failure",
"Timeout (in seconds)": "Timeout (in seconds)",
"The File or URL of the audio or video file.": "The File or URL of the audio or video file.",
"The URL of the audio or video file to transcribe.": "The URL of the audio or video file to transcribe.",
"The language of your audio file. Possible values are found in [Supported Languages](https://www.assemblyai.com/docs/concepts/supported-languages).\nThe default value is 'en_us'.\n": "The language of your audio file. Possible values are found in [Supported Languages](https://www.assemblyai.com/docs/concepts/supported-languages).\nThe default value is 'en_us'.\n",
"Enable [Automatic language detection](https://www.assemblyai.com/docs/models/speech-recognition#automatic-language-detection), either true or false.": "Enable [Automatic language detection](https://www.assemblyai.com/docs/models/speech-recognition#automatic-language-detection), either true or false.",
"The confidence threshold for the automatically detected language.\nAn error will be returned if the language confidence is below this threshold.\nDefaults to 0.\n": "The confidence threshold for the automatically detected language.\nAn error will be returned if the language confidence is below this threshold.\nDefaults to 0.\n",
"The speech model to use for the transcription. When `null`, the \"best\" model is used.": "The speech model to use for the transcription. When `null`, the \"best\" model is used.",
"Enable Automatic Punctuation, can be true or false": "Enable Automatic Punctuation, can be true or false",
"Enable Text Formatting, can be true or false": "Enable Text Formatting, can be true or false",
"Transcribe Filler Words, like \"umm\", in your media file; can be true or false": "Transcribe Filler Words, like \"umm\", in your media file; can be true or false",
"Enable [Dual Channel](https://www.assemblyai.com/docs/models/speech-recognition#dual-channel-transcription) transcription, can be true or false.": "Enable [Dual Channel](https://www.assemblyai.com/docs/models/speech-recognition#dual-channel-transcription) transcription, can be true or false.",
"The URL to which we send webhook requests.\nWe sends two different types of webhook requests.\nOne request when a transcript is completed or failed, and one request when the redacted audio is ready if redact_pii_audio is enabled.\n": "The URL to which we send webhook requests.\nWe sends two different types of webhook requests.\nOne request when a transcript is completed or failed, and one request when the redacted audio is ready if redact_pii_audio is enabled.\n",
"The header name to be sent with the transcript completed or failed webhook requests": "The header name to be sent with the transcript completed or failed webhook requests",
"The header value to send back with the transcript completed or failed webhook requests for added security": "The header value to send back with the transcript completed or failed webhook requests for added security",
"Enable Key Phrases, either true or false": "Enable Key Phrases, either true or false",
"The point in time, in milliseconds, to begin transcribing in your media file": "The point in time, in milliseconds, to begin transcribing in your media file",
"The point in time, in milliseconds, to stop transcribing in your media file": "The point in time, in milliseconds, to stop transcribing in your media file",
"The list of custom vocabulary to boost transcription probability for": "The list of custom vocabulary to boost transcription probability for",
"How much to boost specified words": "How much to boost specified words",
"Filter profanity from the transcribed text, can be true or false": "Filter profanity from the transcribed text, can be true or false",
"Redact PII from the transcribed text using the Redact PII model, can be true or false": "Redact PII from the transcribed text using the Redact PII model, can be true or false",
"Generate a copy of the original media file with spoken PII \"beeped\" out, can be true or false. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.": "Generate a copy of the original media file with spoken PII \"beeped\" out, can be true or false. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.",
"Controls the filetype of the audio created by redact_pii_audio. Currently supports mp3 (default) and wav. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.": "Controls the filetype of the audio created by redact_pii_audio. Currently supports mp3 (default) and wav. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.",
"The list of PII Redaction policies to enable. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.": "The list of PII Redaction policies to enable. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.",
"The replacement logic for detected PII, can be \"entity_type\" or \"hash\". See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.": "The replacement logic for detected PII, can be \"entity_type\" or \"hash\". See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.",
"Enable [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization), can be true or false": "Enable [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization), can be true or false",
"Tells the speaker label model how many speakers it should attempt to identify, up to 10. See [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization) for more details.": "Tells the speaker label model how many speakers it should attempt to identify, up to 10. See [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization) for more details.",
"Enable [Content Moderation](https://www.assemblyai.com/docs/models/content-moderation), can be true or false": "Enable [Content Moderation](https://www.assemblyai.com/docs/models/content-moderation), can be true or false",
"The confidence threshold for the Content Moderation model. Values must be between 25 and 100.": "The confidence threshold for the Content Moderation model. Values must be between 25 and 100.",
"Enable [Topic Detection](https://www.assemblyai.com/docs/models/topic-detection), can be true or false": "Enable [Topic Detection](https://www.assemblyai.com/docs/models/topic-detection), can be true or false",
"Customize how words are spelled and formatted using to and from values.\nUse a JSON array of objects of the following format:\n```\n[\n {\n \"from\": [\"original\", \"spelling\"],\n \"to\": \"corrected\"\n }\n]\n```\n": "Customize how words are spelled and formatted using to and from values.\nUse a JSON array of objects of the following format:\n```\n[\n {\n \"from\": [\"original\", \"spelling\"],\n \"to\": \"corrected\"\n }\n]\n```\n",
"Enable [Sentiment Analysis](https://www.assemblyai.com/docs/models/sentiment-analysis), can be true or false": "Enable [Sentiment Analysis](https://www.assemblyai.com/docs/models/sentiment-analysis), can be true or false",
"Enable [Auto Chapters](https://www.assemblyai.com/docs/models/auto-chapters), can be true or false": "Enable [Auto Chapters](https://www.assemblyai.com/docs/models/auto-chapters), can be true or false",
"Enable [Entity Detection](https://www.assemblyai.com/docs/models/entity-detection), can be true or false": "Enable [Entity Detection](https://www.assemblyai.com/docs/models/entity-detection), can be true or false",
"Reject audio files that contain less than this fraction of speech.\nValid values are in the range [0, 1] inclusive.\n": "Reject audio files that contain less than this fraction of speech.\nValid values are in the range [0, 1] inclusive.\n",
"Enable [Summarization](https://www.assemblyai.com/docs/models/summarization), can be true or false": "Enable [Summarization](https://www.assemblyai.com/docs/models/summarization), can be true or false",
"The model to summarize the transcript": "The model to summarize the transcript",
"The type of summary": "The type of summary",
"Enable custom topics, either true or false": "Enable custom topics, either true or false",
"The list of custom topics": "The list of custom topics",
"Wait until the transcript status is \"completed\" or \"error\" before moving on to the next step.": "Wait until the transcript status is \"completed\" or \"error\" before moving on to the next step.",
"If the transcript status is \"error\", throw an error.": "If the transcript status is \"error\", throw an error.",
"The maximum number of characters per caption": "The maximum number of characters per caption",
"The desired file name for storing in ActivePieces. Make sure the file extension is correct.": "The desired file name for storing in ActivePieces. Make sure the file extension is correct.",
"Keywords to search for": "Keywords to search for",
"Maximum amount of transcripts to retrieve": "Maximum amount of transcripts to retrieve",
"Filter by transcript status": "Filter by transcript status",
"Only get transcripts created on this date": "Only get transcripts created on this date",
"Get transcripts that were created before this transcript ID": "Get transcripts that were created before this transcript ID",
"Get transcripts that were created after this transcript ID": "Get transcripts that were created after this transcript ID",
"Only get throttled transcripts, overrides the status filter": "Only get throttled transcripts, overrides the status filter",
"Your text to prompt the model to produce a desired output, including any context you want to pass into the model.": "Your text to prompt the model to produce a desired output, including any context you want to pass into the model.",
"A list of completed transcripts with text. Up to a maximum of 100 files or 100 hours, whichever is lower.\nUse either transcript_ids or input_text as input into LeMUR.\n": "A list of completed transcripts with text. Up to a maximum of 100 files or 100 hours, whichever is lower.\nUse either transcript_ids or input_text as input into LeMUR.\n",
"Custom formatted transcript data. Maximum size is the context limit of the selected model, which defaults to 100000.\nUse either transcript_ids or input_text as input into LeMUR.\n": "Custom formatted transcript data. Maximum size is the context limit of the selected model, which defaults to 100000.\nUse either transcript_ids or input_text as input into LeMUR.\n",
"Context to provide the model. This can be a string or a free-form JSON value.": "Context to provide the model. This can be a string or a free-form JSON value.",
"The model that is used for the final prompt after compression is performed.\n": "The model that is used for the final prompt after compression is performed.\n",
"Max output size in tokens, up to 4000": "Max output size in tokens, up to 4000",
"The temperature to use for the model.\nHigher values result in answers that are more creative, lower values are more conservative.\nCan be any value between 0.0 and 1.0 inclusive.\n": "The temperature to use for the model.\nHigher values result in answers that are more creative, lower values are more conservative.\nCan be any value between 0.0 and 1.0 inclusive.\n",
"The ID of the LeMUR request whose data you want to delete. This would be found in the response of the original request.": "The ID of the LeMUR request whose data you want to delete. This would be found in the response of the original request.",
"Authorization headers are injected automatically from your connection.": "Authorization headers are injected automatically from your connection.",
"English (Global)": "English (Global)",
"English (Australian)": "English (Australian)",
"English (British)": "English (British)",
"English (US)": "English (US)",
"Spanish": "Spanish",
"French": "French",
"German": "German",
"Italian": "Italian",
"Portuguese": "Portuguese",
"Dutch": "Dutch",
"Afrikaans": "Afrikaans",
"Albanian": "Albanian",
"Amharic": "Amharic",
"Arabic": "Arabic",
"Armenian": "Armenian",
"Assamese": "Assamese",
"Azerbaijani": "Azerbaijani",
"Bashkir": "Bashkir",
"Basque": "Basque",
"Belarusian": "Belarusian",
"Bengali": "Bengali",
"Bosnian": "Bosnian",
"Breton": "Breton",
"Bulgarian": "Bulgarian",
"Burmese": "Burmese",
"Catalan": "Catalan",
"Chinese": "Chinese",
"Croatian": "Croatian",
"Czech": "Czech",
"Danish": "Danish",
"Estonian": "Estonian",
"Faroese": "Faroese",
"Finnish": "Finnish",
"Galician": "Galician",
"Georgian": "Georgian",
"Greek": "Greek",
"Gujarati": "Gujarati",
"Haitian": "Haitian",
"Hausa": "Hausa",
"Hawaiian": "Hawaiian",
"Hebrew": "Hebrew",
"Hindi": "Hindi",
"Hungarian": "Hungarian",
"Icelandic": "Icelandic",
"Indonesian": "Indonesian",
"Japanese": "Japanese",
"Javanese": "Javanese",
"Kannada": "Kannada",
"Kazakh": "Kazakh",
"Khmer": "Khmer",
"Korean": "Korean",
"Lao": "Lao",
"Latin": "Latin",
"Latvian": "Latvian",
"Lingala": "Lingala",
"Lithuanian": "Lithuanian",
"Luxembourgish": "Luxembourgish",
"Macedonian": "Macedonian",
"Malagasy": "Malagasy",
"Malay": "Malay",
"Malayalam": "Malayalam",
"Maltese": "Maltese",
"Maori": "Maori",
"Marathi": "Marathi",
"Mongolian": "Mongolian",
"Nepali": "Nepali",
"Norwegian": "Norwegian",
"Norwegian Nynorsk": "Norwegian Nynorsk",
"Occitan": "Occitan",
"Panjabi": "Panjabi",
"Pashto": "Pashto",
"Persian": "Persian",
"Polish": "Polish",
"Romanian": "Romanian",
"Russian": "Russian",
"Sanskrit": "Sanskrit",
"Serbian": "Serbian",
"Shona": "Shona",
"Sindhi": "Sindhi",
"Sinhala": "Sinhala",
"Slovak": "Slovak",
"Slovenian": "Slovenian",
"Somali": "Somali",
"Sundanese": "Sundanese",
"Swahili": "Swahili",
"Swedish": "Swedish",
"Tagalog": "Tagalog",
"Tajik": "Tajik",
"Tamil": "Tamil",
"Tatar": "Tatar",
"Telugu": "Telugu",
"Thai": "Thai",
"Tibetan": "Tibetan",
"Turkish": "Turkish",
"Turkmen": "Turkmen",
"Ukrainian": "Ukrainian",
"Urdu": "Urdu",
"Uzbek": "Uzbek",
"Vietnamese": "Vietnamese",
"Welsh": "Welsh",
"Yiddish": "Yiddish",
"Yoruba": "Yoruba",
"Best": "Best",
"Nano": "Nano",
"Low": "Low",
"Default": "Default",
"High": "High",
"MP3": "MP3",
"WAV": "WAV",
"Account Number": "Account Number",
"Banking Information": "Banking Information",
"Blood Type": "Blood Type",
"Credit Card CVV": "Credit Card CVV",
"Credit Card Expiration": "Credit Card Expiration",
"Credit Card Number": "Credit Card Number",
"Date": "Date",
"Date Interval": "Date Interval",
"Date of Birth": "Date of Birth",
"Driver's License": "Driver's License",
"Drug": "Drug",
"Duration": "Duration",
"Email Address": "Email Address",
"Event": "Event",
"Filename": "Filename",
"Gender Sexuality": "Gender Sexuality",
"Healthcare Number": "Healthcare Number",
"Injury": "Injury",
"IP Address": "IP Address",
"Language": "Language",
"Location": "Location",
"Marital Status": "Marital Status",
"Medical Condition": "Medical Condition",
"Medical Process": "Medical Process",
"Money Amount": "Money Amount",
"Nationality": "Nationality",
"Number Sequence": "Number Sequence",
"Occupation": "Occupation",
"Organization": "Organization",
"Passport Number": "Passport Number",
"Password": "Password",
"Person Age": "Person Age",
"Person Name": "Person Name",
"Phone Number": "Phone Number",
"Physical Attribute": "Physical Attribute",
"Political Affiliation": "Political Affiliation",
"Religion": "Religion",
"Statistics": "Statistics",
"Time": "Time",
"URL": "URL",
"US Social Security Number": "US Social Security Number",
"Username": "Username",
"Vehicle ID": "Vehicle ID",
"Zodiac Sign": "Zodiac Sign",
"Entity Name": "Entity Name",
"Hash": "Hash",
"Informative": "Informative",
"Conversational": "Conversational",
"Catchy": "Catchy",
"Bullets": "Bullets",
"Bullets Verbose": "Bullets Verbose",
"Gist": "Gist",
"Headline": "Headline",
"Paragraph": "Paragraph",
"SRT": "SRT",
"VTT": "VTT",
"Queued": "Queued",
"Processing": "Processing",
"Completed": "Completed",
"Error": "Error",
"Claude 3.5 Sonnet (on Anthropic)": "Claude 3.5 Sonnet (on Anthropic)",
"Claude 3 Opus (on Anthropic)": "Claude 3 Opus (on Anthropic)",
"Claude 3 Haiku (on Anthropic)": "Claude 3 Haiku (on Anthropic)",
"Claude 3 Sonnet (on Anthropic)": "Claude 3 Sonnet (on Anthropic)",
"Claude 2.1 (on Anthropic)": "Claude 2.1 (on Anthropic)",
"Claude 2 (on Anthropic)": "Claude 2 (on Anthropic)",
"Claude Instant 1.2 (on Anthropic)": "Claude Instant 1.2 (on Anthropic)",
"Basic": "Basic",
"Mistral 7B (Hosted by AssemblyAI)": "Mistral 7B (Hosted by AssemblyAI)",
"GET": "GET",
"POST": "POST",
"PATCH": "PATCH",
"PUT": "PUT",
"DELETE": "DELETE",
"HEAD": "HEAD"
}

View File

@@ -0,0 +1,344 @@
{
"Transcribe and extract data from audio using AssemblyAI's Speech AI.": "Transcribe and extract data from audio using AssemblyAI's Speech AI.",
"You can retrieve your AssemblyAI API key within your AssemblyAI [Account Settings](https://www.assemblyai.com/app/account?utm_source=activepieces).": "You can retrieve your AssemblyAI API key within your AssemblyAI [Account Settings](https://www.assemblyai.com/app/account?utm_source=activepieces).",
"Upload File": "Upload File",
"Transcribe": "Transcribe",
"Get Transcript": "Get Transcript",
"Get Transcript Sentences": "Get Transcript Sentences",
"Get Transcript Paragraphs": "Get Transcript Paragraphs",
"Get Transcript Subtitles": "Get Transcript Subtitles",
"Get Transcript Redacted Audio": "Get Transcript Redacted Audio",
"Search words in transcript": "Search words in transcript",
"List transcripts": "List transcripts",
"Delete transcript": "Delete transcript",
"Run a Task using LeMUR": "Run a Task using LeMUR",
"Retrieve LeMUR response": "Retrieve LeMUR response",
"Purge LeMUR request data": "Purge LeMUR request data",
"Custom API Call": "自定义 API 呼叫",
"Upload a media file to AssemblyAI's servers.": "Upload a media file to AssemblyAI's servers.",
"Transcribe an audio or video file using AssemblyAI.": "Transcribe an audio or video file using AssemblyAI.",
"Retrieves a transcript by its ID.": "Retrieves a transcript by its ID.",
"Retrieve the sentences of the transcript by its ID.": "Retrieve the sentences of the transcript by its ID.",
"Retrieve the paragraphs of the transcript by its ID.": "Retrieve the paragraphs of the transcript by its ID.",
"Export the transcript as SRT or VTT subtitles.": "Export the transcript as SRT or VTT subtitles.",
"Get the result of the redacted audio model.": "Get the result of the redacted audio model.",
"Search through the transcript for keywords. You can search for individual words, numbers, or phrases containing up to five words or numbers.": "Search through the transcript for keywords. You can search for individual words, numbers, or phrases containing up to five words or numbers.",
"Retrieve a list of transcripts you created.\nTranscripts are sorted from newest to oldest. The previous URL always points to a page with older transcripts.": "Retrieve a list of transcripts you created.\nTranscripts are sorted from newest to oldest. The previous URL always points to a page with older transcripts.",
"Remove the data from the transcript and mark it as deleted.": "Remove the data from the transcript and mark it as deleted.",
"Use the LeMUR task endpoint to input your own LLM prompt.": "Use the LeMUR task endpoint to input your own LLM prompt.",
"Retrieve a LeMUR response that was previously generated.": "Retrieve a LeMUR response that was previously generated.",
"Delete the data for a previously submitted LeMUR request.\nThe LLM response data, as well as any context provided in the original request will be removed.": "Delete the data for a previously submitted LeMUR request.\nThe LLM response data, as well as any context provided in the original request will be removed.",
"Make a custom API call to a specific endpoint": "将一个自定义 API 调用到一个特定的终点",
"Audio File": "Audio File",
"Audio URL": "Audio URL",
"Language Code": "Language Code",
"Language Detection": "Language Detection",
"Language Confidence Threshold": "Language Confidence Threshold",
"Speech Model": "Speech Model",
"Punctuate": "Punctuate",
"Format Text": "Format Text",
"Disfluencies": "Disfluencies",
"Dual Channel": "Dual Channel",
"Webhook URL": "Webhook URL",
"Webhook Auth Header Name": "Webhook Auth Header Name",
"Webhook Auth Header Value": "Webhook Auth Header Value",
"Key Phrases": "Key Phrases",
"Audio Start From": "Audio Start From",
"Audio End At": "Audio End At",
"Word Boost": "Word Boost",
"Word Boost Level": "Word Boost Level",
"Filter Profanity": "Filter Profanity",
"Redact PII": "Redact PII",
"Redact PII Audio": "Redact PII Audio",
"Redact PII Audio Quality": "Redact PII Audio Quality",
"Redact PII Policies": "Redact PII Policies",
"Redact PII Substitution": "Redact PII Substitution",
"Speaker Labels": "Speaker Labels",
"Speakers Expected": "Speakers Expected",
"Content Moderation": "Content Moderation",
"Content Moderation Confidence": "Content Moderation Confidence",
"Topic Detection": "Topic Detection",
"Custom Spellings": "Custom Spellings",
"Sentiment Analysis": "Sentiment Analysis",
"Auto Chapters": "Auto Chapters",
"Entity Detection": "Entity Detection",
"Speech Threshold": "Speech Threshold",
"Enable Summarization": "Enable Summarization",
"Summary Model": "Summary Model",
"Summary Type": "Summary Type",
"Enable Custom Topics": "Enable Custom Topics",
"Custom Topics": "Custom Topics",
"Wait until transcript is ready": "Wait until transcript is ready",
"Throw if transcript status is error": "Throw if transcript status is error",
"Transcript ID": "Transcript ID",
"Subtitles Format": "Subtitles Format",
"Number of Characters per Caption": "Number of Characters per Caption",
"Download file?": "Download file?",
"Download File Name": "Download File Name",
"Words": "Words",
"Limit": "Limit",
"Status": "状态",
"Created On": "Created On",
"Before ID": "Before ID",
"After ID": "After ID",
"Throttled Only": "Throttled Only",
"Prompt": "Prompt",
"Transcript IDs": "Transcript IDs",
"Input Text": "Input Text",
"Context": "Context",
"Final Model": "Final Model",
"Maximum Output Size": "Maximum Output Size",
"Temperature": "Temperature",
"LeMUR request ID": "LeMUR request ID",
"Method": "方法",
"Headers": "信头",
"Query Parameters": "查询参数",
"Body": "正文内容",
"Response is Binary ?": "Response is Binary ?",
"No Error on Failure": "失败时没有错误",
"Timeout (in seconds)": "超时(秒)",
"The File or URL of the audio or video file.": "The File or URL of the audio or video file.",
"The URL of the audio or video file to transcribe.": "The URL of the audio or video file to transcribe.",
"The language of your audio file. Possible values are found in [Supported Languages](https://www.assemblyai.com/docs/concepts/supported-languages).\nThe default value is 'en_us'.\n": "The language of your audio file. Possible values are found in [Supported Languages](https://www.assemblyai.com/docs/concepts/supported-languages).\nThe default value is 'en_us'.\n",
"Enable [Automatic language detection](https://www.assemblyai.com/docs/models/speech-recognition#automatic-language-detection), either true or false.": "Enable [Automatic language detection](https://www.assemblyai.com/docs/models/speech-recognition#automatic-language-detection), either true or false.",
"The confidence threshold for the automatically detected language.\nAn error will be returned if the language confidence is below this threshold.\nDefaults to 0.\n": "The confidence threshold for the automatically detected language.\nAn error will be returned if the language confidence is below this threshold.\nDefaults to 0.\n",
"The speech model to use for the transcription. When `null`, the \"best\" model is used.": "The speech model to use for the transcription. When `null`, the \"best\" model is used.",
"Enable Automatic Punctuation, can be true or false": "Enable Automatic Punctuation, can be true or false",
"Enable Text Formatting, can be true or false": "Enable Text Formatting, can be true or false",
"Transcribe Filler Words, like \"umm\", in your media file; can be true or false": "Transcribe Filler Words, like \"umm\", in your media file; can be true or false",
"Enable [Dual Channel](https://www.assemblyai.com/docs/models/speech-recognition#dual-channel-transcription) transcription, can be true or false.": "Enable [Dual Channel](https://www.assemblyai.com/docs/models/speech-recognition#dual-channel-transcription) transcription, can be true or false.",
"The URL to which we send webhook requests.\nWe sends two different types of webhook requests.\nOne request when a transcript is completed or failed, and one request when the redacted audio is ready if redact_pii_audio is enabled.\n": "The URL to which we send webhook requests.\nWe sends two different types of webhook requests.\nOne request when a transcript is completed or failed, and one request when the redacted audio is ready if redact_pii_audio is enabled.\n",
"The header name to be sent with the transcript completed or failed webhook requests": "The header name to be sent with the transcript completed or failed webhook requests",
"The header value to send back with the transcript completed or failed webhook requests for added security": "The header value to send back with the transcript completed or failed webhook requests for added security",
"Enable Key Phrases, either true or false": "Enable Key Phrases, either true or false",
"The point in time, in milliseconds, to begin transcribing in your media file": "The point in time, in milliseconds, to begin transcribing in your media file",
"The point in time, in milliseconds, to stop transcribing in your media file": "The point in time, in milliseconds, to stop transcribing in your media file",
"The list of custom vocabulary to boost transcription probability for": "The list of custom vocabulary to boost transcription probability for",
"How much to boost specified words": "How much to boost specified words",
"Filter profanity from the transcribed text, can be true or false": "Filter profanity from the transcribed text, can be true or false",
"Redact PII from the transcribed text using the Redact PII model, can be true or false": "Redact PII from the transcribed text using the Redact PII model, can be true or false",
"Generate a copy of the original media file with spoken PII \"beeped\" out, can be true or false. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.": "Generate a copy of the original media file with spoken PII \"beeped\" out, can be true or false. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.",
"Controls the filetype of the audio created by redact_pii_audio. Currently supports mp3 (default) and wav. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.": "Controls the filetype of the audio created by redact_pii_audio. Currently supports mp3 (default) and wav. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.",
"The list of PII Redaction policies to enable. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.": "The list of PII Redaction policies to enable. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.",
"The replacement logic for detected PII, can be \"entity_type\" or \"hash\". See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.": "The replacement logic for detected PII, can be \"entity_type\" or \"hash\". See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.",
"Enable [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization), can be true or false": "Enable [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization), can be true or false",
"Tells the speaker label model how many speakers it should attempt to identify, up to 10. See [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization) for more details.": "Tells the speaker label model how many speakers it should attempt to identify, up to 10. See [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization) for more details.",
"Enable [Content Moderation](https://www.assemblyai.com/docs/models/content-moderation), can be true or false": "Enable [Content Moderation](https://www.assemblyai.com/docs/models/content-moderation), can be true or false",
"The confidence threshold for the Content Moderation model. Values must be between 25 and 100.": "The confidence threshold for the Content Moderation model. Values must be between 25 and 100.",
"Enable [Topic Detection](https://www.assemblyai.com/docs/models/topic-detection), can be true or false": "Enable [Topic Detection](https://www.assemblyai.com/docs/models/topic-detection), can be true or false",
"Customize how words are spelled and formatted using to and from values.\nUse a JSON array of objects of the following format:\n```\n[\n {\n \"from\": [\"original\", \"spelling\"],\n \"to\": \"corrected\"\n }\n]\n```\n": "Customize how words are spelled and formatted using to and from values.\nUse a JSON array of objects of the following format:\n```\n[\n {\n \"from\": [\"original\", \"spelling\"],\n \"to\": \"corrected\"\n }\n]\n```\n",
"Enable [Sentiment Analysis](https://www.assemblyai.com/docs/models/sentiment-analysis), can be true or false": "Enable [Sentiment Analysis](https://www.assemblyai.com/docs/models/sentiment-analysis), can be true or false",
"Enable [Auto Chapters](https://www.assemblyai.com/docs/models/auto-chapters), can be true or false": "Enable [Auto Chapters](https://www.assemblyai.com/docs/models/auto-chapters), can be true or false",
"Enable [Entity Detection](https://www.assemblyai.com/docs/models/entity-detection), can be true or false": "Enable [Entity Detection](https://www.assemblyai.com/docs/models/entity-detection), can be true or false",
"Reject audio files that contain less than this fraction of speech.\nValid values are in the range [0, 1] inclusive.\n": "Reject audio files that contain less than this fraction of speech.\nValid values are in the range [0, 1] inclusive.\n",
"Enable [Summarization](https://www.assemblyai.com/docs/models/summarization), can be true or false": "Enable [Summarization](https://www.assemblyai.com/docs/models/summarization), can be true or false",
"The model to summarize the transcript": "The model to summarize the transcript",
"The type of summary": "The type of summary",
"Enable custom topics, either true or false": "Enable custom topics, either true or false",
"The list of custom topics": "The list of custom topics",
"Wait until the transcript status is \"completed\" or \"error\" before moving on to the next step.": "Wait until the transcript status is \"completed\" or \"error\" before moving on to the next step.",
"If the transcript status is \"error\", throw an error.": "If the transcript status is \"error\", throw an error.",
"The maximum number of characters per caption": "The maximum number of characters per caption",
"The desired file name for storing in ActivePieces. Make sure the file extension is correct.": "The desired file name for storing in ActivePieces. Make sure the file extension is correct.",
"Keywords to search for": "Keywords to search for",
"Maximum amount of transcripts to retrieve": "Maximum amount of transcripts to retrieve",
"Filter by transcript status": "Filter by transcript status",
"Only get transcripts created on this date": "Only get transcripts created on this date",
"Get transcripts that were created before this transcript ID": "Get transcripts that were created before this transcript ID",
"Get transcripts that were created after this transcript ID": "Get transcripts that were created after this transcript ID",
"Only get throttled transcripts, overrides the status filter": "Only get throttled transcripts, overrides the status filter",
"Your text to prompt the model to produce a desired output, including any context you want to pass into the model.": "Your text to prompt the model to produce a desired output, including any context you want to pass into the model.",
"A list of completed transcripts with text. Up to a maximum of 100 files or 100 hours, whichever is lower.\nUse either transcript_ids or input_text as input into LeMUR.\n": "A list of completed transcripts with text. Up to a maximum of 100 files or 100 hours, whichever is lower.\nUse either transcript_ids or input_text as input into LeMUR.\n",
"Custom formatted transcript data. Maximum size is the context limit of the selected model, which defaults to 100000.\nUse either transcript_ids or input_text as input into LeMUR.\n": "Custom formatted transcript data. Maximum size is the context limit of the selected model, which defaults to 100000.\nUse either transcript_ids or input_text as input into LeMUR.\n",
"Context to provide the model. This can be a string or a free-form JSON value.": "Context to provide the model. This can be a string or a free-form JSON value.",
"The model that is used for the final prompt after compression is performed.\n": "The model that is used for the final prompt after compression is performed.\n",
"Max output size in tokens, up to 4000": "Max output size in tokens, up to 4000",
"The temperature to use for the model.\nHigher values result in answers that are more creative, lower values are more conservative.\nCan be any value between 0.0 and 1.0 inclusive.\n": "The temperature to use for the model.\nHigher values result in answers that are more creative, lower values are more conservative.\nCan be any value between 0.0 and 1.0 inclusive.\n",
"The ID of the LeMUR request whose data you want to delete. This would be found in the response of the original request.": "The ID of the LeMUR request whose data you want to delete. This would be found in the response of the original request.",
"Authorization headers are injected automatically from your connection.": "授权头自动从您的连接中注入。",
"Enable for files like PDFs, images, etc..": "Enable for files like PDFs, images, etc..",
"English (Global)": "English (Global)",
"English (Australian)": "English (Australian)",
"English (British)": "English (British)",
"English (US)": "English (US)",
"Spanish": "Spanish",
"French": "French",
"German": "German",
"Italian": "Italian",
"Portuguese": "Portuguese",
"Dutch": "Dutch",
"Afrikaans": "Afrikaans",
"Albanian": "Albanian",
"Amharic": "Amharic",
"Arabic": "Arabic",
"Armenian": "Armenian",
"Assamese": "Assamese",
"Azerbaijani": "Azerbaijani",
"Bashkir": "Bashkir",
"Basque": "Basque",
"Belarusian": "Belarusian",
"Bengali": "Bengali",
"Bosnian": "Bosnian",
"Breton": "Breton",
"Bulgarian": "Bulgarian",
"Burmese": "Burmese",
"Catalan": "Catalan",
"Chinese": "Chinese",
"Croatian": "Croatian",
"Czech": "Czech",
"Danish": "Danish",
"Estonian": "Estonian",
"Faroese": "Faroese",
"Finnish": "Finnish",
"Galician": "Galician",
"Georgian": "Georgian",
"Greek": "Greek",
"Gujarati": "Gujarati",
"Haitian": "Haitian",
"Hausa": "Hausa",
"Hawaiian": "Hawaiian",
"Hebrew": "Hebrew",
"Hindi": "Hindi",
"Hungarian": "Hungarian",
"Icelandic": "Icelandic",
"Indonesian": "Indonesian",
"Japanese": "Japanese",
"Javanese": "Javanese",
"Kannada": "Kannada",
"Kazakh": "Kazakh",
"Khmer": "Khmer",
"Korean": "Korean",
"Lao": "Lao",
"Latin": "Latin",
"Latvian": "Latvian",
"Lingala": "Lingala",
"Lithuanian": "Lithuanian",
"Luxembourgish": "Luxembourgish",
"Macedonian": "Macedonian",
"Malagasy": "Malagasy",
"Malay": "Malay",
"Malayalam": "Malayalam",
"Maltese": "Maltese",
"Maori": "Maori",
"Marathi": "Marathi",
"Mongolian": "Mongolian",
"Nepali": "Nepali",
"Norwegian": "Norwegian",
"Norwegian Nynorsk": "Norwegian Nynorsk",
"Occitan": "Occitan",
"Panjabi": "Panjabi",
"Pashto": "Pashto",
"Persian": "Persian",
"Polish": "Polish",
"Romanian": "Romanian",
"Russian": "Russian",
"Sanskrit": "Sanskrit",
"Serbian": "Serbian",
"Shona": "Shona",
"Sindhi": "Sindhi",
"Sinhala": "Sinhala",
"Slovak": "Slovak",
"Slovenian": "Slovenian",
"Somali": "Somali",
"Sundanese": "Sundanese",
"Swahili": "Swahili",
"Swedish": "Swedish",
"Tagalog": "Tagalog",
"Tajik": "Tajik",
"Tamil": "Tamil",
"Tatar": "Tatar",
"Telugu": "Telugu",
"Thai": "Thai",
"Tibetan": "Tibetan",
"Turkish": "Turkish",
"Turkmen": "Turkmen",
"Ukrainian": "Ukrainian",
"Urdu": "Urdu",
"Uzbek": "Uzbek",
"Vietnamese": "Vietnamese",
"Welsh": "Welsh",
"Yiddish": "Yiddish",
"Yoruba": "Yoruba",
"Best": "Best",
"Nano": "Nano",
"Low": "Low",
"Default": "Default",
"High": "High",
"MP3": "MP3",
"WAV": "WAV",
"Account Number": "Account Number",
"Banking Information": "Banking Information",
"Blood Type": "Blood Type",
"Credit Card CVV": "Credit Card CVV",
"Credit Card Expiration": "Credit Card Expiration",
"Credit Card Number": "Credit Card Number",
"Date": "Date",
"Date Interval": "Date Interval",
"Date of Birth": "Date of Birth",
"Driver's License": "Driver's License",
"Drug": "Drug",
"Duration": "期限",
"Email Address": "Email Address",
"Event": "Event",
"Filename": "Filename",
"Gender Sexuality": "Gender Sexuality",
"Healthcare Number": "Healthcare Number",
"Injury": "Injury",
"IP Address": "IP 地址",
"Language": "Language",
"Location": "Location",
"Marital Status": "Marital Status",
"Medical Condition": "Medical Condition",
"Medical Process": "Medical Process",
"Money Amount": "Money Amount",
"Nationality": "Nationality",
"Number Sequence": "Number Sequence",
"Occupation": "Occupation",
"Organization": "Organization",
"Passport Number": "Passport Number",
"Password": "Password",
"Person Age": "Person Age",
"Person Name": "Person Name",
"Phone Number": "Phone Number",
"Physical Attribute": "Physical Attribute",
"Political Affiliation": "Political Affiliation",
"Religion": "Religion",
"Statistics": "Statistics",
"Time": "Time",
"URL": "URL",
"US Social Security Number": "US Social Security Number",
"Username": "用户名",
"Vehicle ID": "Vehicle ID",
"Zodiac Sign": "Zodiac Sign",
"Entity Name": "Entity Name",
"Hash": "Hash",
"Informative": "Informative",
"Conversational": "Conversational",
"Catchy": "Catchy",
"Bullets": "Bullets",
"Bullets Verbose": "Bullets Verbose",
"Gist": "Gist",
"Headline": "Headline",
"Paragraph": "Paragraph",
"SRT": "SRT",
"VTT": "VTT",
"Queued": "Queued",
"Processing": "Processing",
"Completed": "Completed",
"Error": "Error",
"Claude 3.5 Sonnet (on Anthropic)": "Claude 3.5 Sonnet (on Anthropic)",
"Claude 3 Opus (on Anthropic)": "Claude 3 Opus (on Anthropic)",
"Claude 3 Haiku (on Anthropic)": "Claude 3 Haiku (on Anthropic)",
"Claude 3 Sonnet (on Anthropic)": "Claude 3 Sonnet (on Anthropic)",
"Claude 2.1 (on Anthropic)": "Claude 2.1 (on Anthropic)",
"Claude 2 (on Anthropic)": "Claude 2 (on Anthropic)",
"Claude Instant 1.2 (on Anthropic)": "Claude Instant 1.2 (on Anthropic)",
"Basic": "Basic",
"Mistral 7B (Hosted by AssemblyAI)": "Mistral 7B (Hosted by AssemblyAI)",
"GET": "获取",
"POST": "帖子",
"PATCH": "PATCH",
"PUT": "弹出",
"DELETE": "删除",
"HEAD": "黑色"
}

View File

@@ -0,0 +1,32 @@
import { createPiece } from '@activepieces/pieces-framework';
import * as actions from './lib/actions';
import { assemblyaiAuth } from './lib/auth';
import { PieceCategory } from '@activepieces/shared';
export const assemblyai = createPiece({
displayName: 'AssemblyAI',
auth: assemblyaiAuth,
categories: [PieceCategory.ARTIFICIAL_INTELLIGENCE],
description:
"Transcribe and extract data from audio using AssemblyAI's Speech AI.",
minimumSupportedRelease: '0.30.0',
logoUrl: 'https://cdn.activepieces.com/pieces/assemblyai.png',
authors: ['AssemblyAI'],
actions: [
actions.uploadFile,
actions.transcribe,
actions.getTranscript,
actions.getSentences,
actions.getParagraphs,
actions.getSubtitles,
actions.getRedactedAudio,
actions.wordSearch,
actions.listTranscripts,
actions.deleteTranscript,
actions.lemurTask,
actions.getLemurResponse,
actions.purgeLemurRequestData,
actions.customApiCall,
],
triggers: [],
});

View File

@@ -0,0 +1,12 @@
import { createCustomApiCallAction } from '@activepieces/pieces-common';
import { assemblyaiAuth } from '../auth';
export const customApiCall = createCustomApiCallAction({
auth: assemblyaiAuth,
baseUrl: () => 'https://api.assemblyai.com',
authMapping: async (auth) => {
return {
Authorization: `${auth.secret_text}`,
};
},
});

View File

@@ -0,0 +1 @@
export { uploadFile } from './upload';

View File

@@ -0,0 +1,25 @@
import { createAction, Property } from '@activepieces/pieces-framework';
import { assemblyaiAuth } from '../../auth';
import { getAssemblyAIClient } from '../../client';
export const uploadFile = createAction({
name: 'uploadFile',
auth: assemblyaiAuth,
requireAuth: true,
displayName: 'Upload File',
description: "Upload a media file to AssemblyAI's servers.",
props: {
file: Property.File({
displayName: 'Audio File',
description: 'The File or URL of the audio or video file.',
required: true,
}),
},
async run(context) {
const client = getAssemblyAIClient(context);
const uploadedFile = await client.files.upload(
context.propsValue.file.data
);
return uploadedFile;
},
});

View File

@@ -0,0 +1,14 @@
export { uploadFile } from './files';
export {
transcribe,
getTranscript,
getSentences,
getParagraphs,
getSubtitles,
getRedactedAudio,
wordSearch,
listTranscripts,
deleteTranscript,
} from './transcripts';
export { lemurTask, getLemurResponse, purgeLemurRequestData } from './lemur';
export { customApiCall } from './custom-api-call';

View File

@@ -0,0 +1,22 @@
import { createAction } from '@activepieces/pieces-framework';
import { assemblyaiAuth } from '../../auth';
import { getAssemblyAIClient } from '../../client';
import { lemurRequestIdProp } from './shared-props';
export const getLemurResponse = createAction({
name: 'getLemurResponse',
auth: assemblyaiAuth,
requireAuth: true,
displayName: 'Retrieve LeMUR response',
description: 'Retrieve a LeMUR response that was previously generated.',
props: {
request_id: lemurRequestIdProp,
},
async run(context) {
const client = getAssemblyAIClient(context);
const lemurResponse = await client.lemur.getResponse(
context.propsValue.request_id
);
return lemurResponse;
},
});

View File

@@ -0,0 +1,3 @@
export { lemurTask } from './lemur-task';
export { getLemurResponse } from './get-lemur-response';
export { purgeLemurRequestData } from './purge-lemur-request-data';

View File

@@ -0,0 +1,21 @@
import { createAction } from '@activepieces/pieces-framework';
import { assemblyaiAuth } from '../../auth';
import { getAssemblyAIClient } from '../../client';
import { props } from '../../generated/lemur-task/props';
export const lemurTask = createAction({
name: 'lemurTask',
auth: assemblyaiAuth,
requireAuth: true,
displayName: 'Run a Task using LeMUR',
description: 'Use the LeMUR task endpoint to input your own LLM prompt.',
props,
async run(context) {
const client = getAssemblyAIClient(context);
const taskResponse = await client.lemur.task({
transcript_ids: context.propsValue.transcript_ids as string[],
prompt: context.propsValue.prompt,
});
return taskResponse;
},
});

View File

@@ -0,0 +1,23 @@
import { createAction } from '@activepieces/pieces-framework';
import { assemblyaiAuth } from '../../auth';
import { getAssemblyAIClient } from '../../client';
import { lemurRequestIdProp } from './shared-props';
export const purgeLemurRequestData = createAction({
name: 'purgeLemurRequestData',
auth: assemblyaiAuth,
requireAuth: true,
displayName: 'Purge LeMUR request data',
description: `Delete the data for a previously submitted LeMUR request.
The LLM response data, as well as any context provided in the original request will be removed.`,
props: {
request_id: lemurRequestIdProp,
},
async run(context) {
const client = getAssemblyAIClient(context);
const purgeRequestDataResponse = await client.lemur.purgeRequestData(
context.propsValue.request_id
);
return purgeRequestDataResponse;
},
});

View File

@@ -0,0 +1,8 @@
import { Property } from '@activepieces/pieces-framework';
export const lemurRequestIdProp = Property.ShortText({
displayName: 'LeMUR request ID',
description:
'The ID of the LeMUR request whose data you want to delete. This would be found in the response of the original request.',
required: true,
});

View File

@@ -0,0 +1,22 @@
import { createAction } from '@activepieces/pieces-framework';
import { assemblyaiAuth } from '../../auth';
import { getAssemblyAIClient } from '../../client';
import { transcriptIdProp } from './shared-props';
export const deleteTranscript = createAction({
name: 'deleteTranscript',
auth: assemblyaiAuth,
requireAuth: true,
displayName: 'Delete transcript',
description: 'Remove the data from the transcript and mark it as deleted.',
props: {
id: transcriptIdProp,
},
async run(context) {
const client = getAssemblyAIClient(context);
const deleteResponse = await client.transcripts.delete(
context.propsValue.id
);
return deleteResponse;
},
});

View File

@@ -0,0 +1,22 @@
import { createAction } from '@activepieces/pieces-framework';
import { assemblyaiAuth } from '../../auth';
import { getAssemblyAIClient } from '../../client';
import { transcriptIdProp } from './shared-props';
export const getParagraphs = createAction({
name: 'getTranscriptParagraphs',
auth: assemblyaiAuth,
requireAuth: true,
displayName: 'Get Transcript Paragraphs',
description: 'Retrieve the paragraphs of the transcript by its ID.',
props: {
id: transcriptIdProp,
},
async run(context) {
const client = getAssemblyAIClient(context);
const paragraphsResponse = await client.transcripts.paragraphs(
context.propsValue.id
);
return paragraphsResponse;
},
});

View File

@@ -0,0 +1,50 @@
import { createAction, Property } from '@activepieces/pieces-framework';
import { assemblyaiAuth } from '../../auth';
import { getAssemblyAIClient } from '../../client';
import { transcriptIdProp } from './shared-props';
export const getRedactedAudio = createAction({
name: 'getRedactedAudio',
auth: assemblyaiAuth,
requireAuth: true,
displayName: 'Get Transcript Redacted Audio',
description: 'Get the result of the redacted audio model.',
props: {
id: transcriptIdProp,
download_file: Property.Checkbox({
displayName: 'Download file?',
required: true,
defaultValue: false,
}),
download_file_name: Property.ShortText({
displayName: 'Download File Name',
description:
'The desired file name for storing in ActivePieces. Make sure the file extension is correct.',
required: true,
defaultValue: 'redacted-audio.mp3',
}),
},
async run(context) {
const client = getAssemblyAIClient(context);
const redactedAudioResponse = await client.transcripts.redactedAudio(
context.propsValue.id
);
if (redactedAudioResponse.status !== 'redacted_audio_ready') {
return redactedAudioResponse;
}
if (context.propsValue.download_file) {
const file = await client.transcripts.redactedAudioFile(
context.propsValue.id
);
const fileReference = await context.files.write({
fileName: context.propsValue.download_file_name,
data: Buffer.from(await file.arrayBuffer()),
});
return {
...redactedAudioResponse,
file: fileReference,
};
}
return redactedAudioResponse;
},
});

View File

@@ -0,0 +1,22 @@
import { createAction } from '@activepieces/pieces-framework';
import { assemblyaiAuth } from '../../auth';
import { getAssemblyAIClient } from '../../client';
import { transcriptIdProp } from './shared-props';
export const getSentences = createAction({
name: 'getTranscriptSentences',
auth: assemblyaiAuth,
requireAuth: true,
displayName: 'Get Transcript Sentences',
description: 'Retrieve the sentences of the transcript by its ID.',
props: {
id: transcriptIdProp,
},
async run(context) {
const client = getAssemblyAIClient(context);
const sentencesResponse = await client.transcripts.sentences(
context.propsValue.id
);
return sentencesResponse;
},
});

View File

@@ -0,0 +1,49 @@
import { createAction, Property } from '@activepieces/pieces-framework';
import { SubtitleFormat } from 'assemblyai';
import { assemblyaiAuth } from '../../auth';
import { getAssemblyAIClient } from '../../client';
export const getSubtitles = createAction({
name: 'getSubtitles',
auth: assemblyaiAuth,
requireAuth: true,
displayName: 'Get Transcript Subtitles',
description: 'Export the transcript as SRT or VTT subtitles.',
props: {
id: Property.ShortText({
displayName: 'Transcript ID',
required: true,
}),
format: Property.StaticDropdown({
displayName: 'Subtitles Format',
required: true,
defaultValue: 'srt',
options: {
options: [
{
label: 'SRT',
value: 'srt',
},
{
label: 'VTT',
value: 'vtt',
},
],
},
}),
chars_per_caption: Property.Number({
displayName: 'Number of Characters per Caption',
description: 'The maximum number of characters per caption',
required: false,
}),
},
async run(context) {
const client = getAssemblyAIClient(context);
const subtitles = await client.transcripts.subtitles(
context.propsValue.id,
context.propsValue.format as SubtitleFormat,
context.propsValue.chars_per_caption
);
return subtitles;
},
});

View File

@@ -0,0 +1,20 @@
import { createAction } from '@activepieces/pieces-framework';
import { assemblyaiAuth } from '../../auth';
import { getAssemblyAIClient } from '../../client';
import { transcriptIdProp } from './shared-props';
export const getTranscript = createAction({
name: 'getTranscript',
auth: assemblyaiAuth,
requireAuth: true,
displayName: 'Get Transcript',
description: 'Retrieves a transcript by its ID.',
props: {
id: transcriptIdProp,
},
async run(context) {
const client = getAssemblyAIClient(context);
const transcript = await client.transcripts.get(context.propsValue.id);
return transcript;
},
});

View File

@@ -0,0 +1,9 @@
export { transcribe } from './transcribe';
export { getTranscript } from './get-transcript';
export { getSentences } from './get-sentences';
export { getParagraphs } from './get-paragraphs';
export { getSubtitles } from './get-subtitles';
export { getRedactedAudio } from './get-redacted-audio';
export { wordSearch } from './word-search';
export { listTranscripts } from './list-transcripts';
export { deleteTranscript } from './delete-transcript';

View File

@@ -0,0 +1,22 @@
import { createAction } from '@activepieces/pieces-framework';
import { ListTranscriptParams } from 'assemblyai';
import { assemblyaiAuth } from '../../auth';
import { getAssemblyAIClient } from '../../client';
import { props } from '../../generated/list-transcript/props';
export const listTranscripts = createAction({
name: 'listTranscripts',
auth: assemblyaiAuth,
requireAuth: true,
displayName: 'List transcripts',
description: `Retrieve a list of transcripts you created.
Transcripts are sorted from newest to oldest. The previous URL always points to a page with older transcripts.`,
props,
async run(context) {
const client = getAssemblyAIClient(context);
const transcriptListResponse = await client.transcripts.list(
context.propsValue as ListTranscriptParams
);
return transcriptListResponse;
},
});

View File

@@ -0,0 +1,6 @@
import { Property } from '@activepieces/pieces-framework';
export const transcriptIdProp = Property.ShortText({
displayName: 'Transcript ID',
required: true,
});

View File

@@ -0,0 +1,117 @@
import {
ActionContext,
createAction,
Property,
} from '@activepieces/pieces-framework';
import { ExecutionType, PauseType } from '@activepieces/shared';
import { TranscriptParams } from 'assemblyai';
import { assemblyaiAuth } from '../../auth';
import { getAssemblyAIClient } from '../../client';
import { props } from '../../generated/transcribe/props';
const transcribeProps = {
...props,
wait_until_ready: Property.Checkbox({
displayName: 'Wait until transcript is ready',
description: `Wait until the transcript status is "completed" or "error" before moving on to the next step.`,
required: true,
defaultValue: true,
}),
throw_on_error: Property.Checkbox({
displayName: 'Throw if transcript status is error',
description: `If the transcript status is "error", throw an error.`,
required: true,
defaultValue: true,
}),
} as const;
type TranscribeContext = ActionContext<
typeof assemblyaiAuth,
typeof transcribeProps
>;
export const transcribe = createAction({
name: 'transcribe',
auth: assemblyaiAuth,
requireAuth: true,
displayName: 'Transcribe',
description: 'Transcribe an audio or video file using AssemblyAI.',
props: transcribeProps,
async run(context: TranscribeContext) {
const client = getAssemblyAIClient(context);
if (context.executionType === ExecutionType.BEGIN) {
const transcriptParams = createTranscriptParams(context);
handleWebhookUrl(context, transcriptParams);
handlePiiAudio(context);
handleEmptyArrays(transcriptParams);
const transcript = await client.transcripts.submit(transcriptParams) as any;
if (context.propsValue.wait_until_ready) {
context.run.pause({
pauseMetadata: {
type: PauseType.WEBHOOK,
response: transcript,
},
});
}
return transcript;
} else if (context.executionType === ExecutionType.RESUME) {
const webhookBody = context.resumePayload.body as {
transcript_id: string;
};
const transcript = await client.transcripts.get(
webhookBody.transcript_id
);
if (context.propsValue.throw_on_error && transcript.status === 'error') {
throw new Error(transcript.error);
}
return transcript;
} else {
throw new Error('Invalid Execution Type');
}
},
});
function createTranscriptParams(context: TranscribeContext): TranscriptParams {
const transcriptParams: Record<string, unknown> = { ...context.propsValue };
if ('wait_until_ready' in transcriptParams)
delete transcriptParams['wait_until_ready'];
if ('throw_on_error' in transcriptParams)
delete transcriptParams['throw_on_error'];
if ('auth' in transcriptParams) delete transcriptParams['auth'];
return transcriptParams as TranscriptParams;
}
function handleWebhookUrl(
context: TranscribeContext,
transcriptParams: TranscriptParams
) {
if (context.propsValue.wait_until_ready) {
const isWebhookUrlConfigured = transcriptParams.webhook_url?.trim();
if (isWebhookUrlConfigured) {
throw new Error(
`The "Wait until transcript is ready" and "Webhook URL" fields are mutually exclusive. Please remove the "Webhook URL" field to use the "Wait until transcript is ready" field.`
);
}
transcriptParams.webhook_url = context.generateResumeUrl({
queryParams: {},
});
}
}
function handlePiiAudio(context: TranscribeContext) {
if (
context.propsValue.wait_until_ready === true &&
context.propsValue.redact_pii_audio === true
) {
throw new Error(
`The "Wait until transcript is ready" and "Redact PII audio" fields are mutually exclusive. Set the "Wait until transcript is ready" or "Redact PII audio" to false.`
);
}
}
function handleEmptyArrays(transcriptParams: TranscriptParams) {
const obj = transcriptParams as Record<string, unknown>;
for (const key in obj) {
const value = obj[key];
if (Array.isArray(value) && value.length === 0) {
delete obj[key];
}
}
}

View File

@@ -0,0 +1,30 @@
import { createAction, Property } from '@activepieces/pieces-framework';
import { assemblyaiAuth } from '../../auth';
import { getAssemblyAIClient } from '../../client';
import { transcriptIdProp } from './shared-props';
export const wordSearch = createAction({
name: 'wordSearch',
auth: assemblyaiAuth,
requireAuth: true,
displayName: 'Search words in transcript',
description:
'Search through the transcript for keywords. ' +
'You can search for individual words, numbers, or phrases containing up to five words or numbers.',
props: {
id: transcriptIdProp,
words: Property.Array({
displayName: 'Words',
required: true,
description: 'Keywords to search for',
}),
},
async run(context) {
const client = getAssemblyAIClient(context);
const wordSearchResponse = await client.transcripts.wordSearch(
context.propsValue.id,
context.propsValue.words as string[]
);
return wordSearchResponse;
},
});

View File

@@ -0,0 +1,39 @@
import { httpClient, HttpMethod } from '@activepieces/pieces-common';
import { PieceAuth } from '@activepieces/pieces-framework';
import { baseUrl } from './client';
export const assemblyaiAuth = PieceAuth.SecretText({
displayName: 'API Key',
required: true,
description:
'You can retrieve your AssemblyAI API key within your AssemblyAI [Account Settings](https://www.assemblyai.com/app/account?utm_source=activepieces).',
validate: async ({ auth }) => {
if (!auth)
return {
valid: false,
error: 'The AssemblyAI API key is required.',
};
try {
const res = await httpClient.sendRequest<string[]>({
method: HttpMethod.GET,
url: `${baseUrl}/v2/account`,
headers: {
Authorization: auth,
},
});
if (res.status !== 200)
return {
valid: false,
error: 'The AssemblyAI API key is invalid.',
};
return {
valid: true,
};
} catch (e) {
return {
valid: false,
error: 'The AssemblyAI API key is invalid.',
};
}
},
});

View File

@@ -0,0 +1,26 @@
import {
ActionContext,
SecretTextProperty,
} from '@activepieces/pieces-framework';
import { AssemblyAI } from 'assemblyai';
import packageJson from '../../package.json';
export const baseUrl = 'https://api.assemblyai.com';
// Proxyman proxy
// export const baseUrl = 'http://localhost:10000';
export const getAssemblyAIClient = (
context: ActionContext<SecretTextProperty<true>>
): AssemblyAI => {
if (!context.auth) throw new Error('The AssemblyAI API key is required.');
return new AssemblyAI({
apiKey: context.auth.secret_text,
userAgent: {
integration: {
name: 'Activepieces',
version: packageJson.version,
},
},
baseUrl,
});
};

View File

@@ -0,0 +1,88 @@
import { Property } from '@activepieces/pieces-framework';
export const props = {
prompt: Property.ShortText({
displayName: 'Prompt',
required: true,
description:
'Your text to prompt the model to produce a desired output, including any context you want to pass into the model.',
}),
transcript_ids: Property.Array({
displayName: 'Transcript IDs',
required: false,
description:
'A list of completed transcripts with text. Up to a maximum of 100 files or 100 hours, whichever is lower.\nUse either transcript_ids or input_text as input into LeMUR.\n',
}),
input_text: Property.ShortText({
displayName: 'Input Text',
required: false,
description:
'Custom formatted transcript data. Maximum size is the context limit of the selected model, which defaults to 100000.\nUse either transcript_ids or input_text as input into LeMUR.\n',
}),
context: Property.LongText({
displayName: 'Context',
required: false,
description:
'Context to provide the model. This can be a string or a free-form JSON value.',
}),
final_model: Property.StaticDropdown({
displayName: 'Final Model',
required: false,
description:
'The model that is used for the final prompt after compression is performed.\n',
options: {
options: [
{
label: 'Claude 3.5 Sonnet (on Anthropic)',
value: 'anthropic/claude-3-5-sonnet',
},
{
label: 'Claude 3 Opus (on Anthropic)',
value: 'anthropic/claude-3-opus',
},
{
label: 'Claude 3 Haiku (on Anthropic)',
value: 'anthropic/claude-3-haiku',
},
{
label: 'Claude 3 Sonnet (on Anthropic)',
value: 'anthropic/claude-3-sonnet',
},
{
label: 'Claude 2.1 (on Anthropic)',
value: 'anthropic/claude-2-1',
},
{
label: 'Claude 2 (on Anthropic)',
value: 'anthropic/claude-2',
},
{
label: 'Default',
value: 'default',
},
{
label: 'Claude Instant 1.2 (on Anthropic)',
value: 'anthropic/claude-instant-1-2',
},
{
label: 'Basic',
value: 'basic',
},
{
label: 'Mistral 7B (Hosted by AssemblyAI)',
value: 'assemblyai/mistral-7b',
},
],
},
}),
max_output_size: Property.Number({
displayName: 'Maximum Output Size',
required: false,
description: 'Max output size in tokens, up to 4000',
}),
temperature: Property.Number({
displayName: 'Temperature',
required: false,
description:
'The temperature to use for the model.\nHigher values result in answers that are more creative, lower values are more conservative.\nCan be any value between 0.0 and 1.0 inclusive.\n',
}),
};

View File

@@ -0,0 +1,54 @@
import { Property } from '@activepieces/pieces-framework';
export const props = {
limit: Property.Number({
displayName: 'Limit',
required: false,
description: 'Maximum amount of transcripts to retrieve',
}),
status: Property.StaticDropdown({
displayName: 'Status',
required: false,
description: 'Filter by transcript status',
options: {
options: [
{
label: 'Queued',
value: 'queued',
},
{
label: 'Processing',
value: 'processing',
},
{
label: 'Completed',
value: 'completed',
},
{
label: 'Error',
value: 'error',
},
],
},
}),
created_on: Property.DateTime({
displayName: 'Created On',
required: false,
description: 'Only get transcripts created on this date',
}),
before_id: Property.ShortText({
displayName: 'Before ID',
required: false,
description: 'Get transcripts that were created before this transcript ID',
}),
after_id: Property.ShortText({
displayName: 'After ID',
required: false,
description: 'Get transcripts that were created after this transcript ID',
}),
throttled_only: Property.Checkbox({
displayName: 'Throttled Only',
required: false,
description: 'Only get throttled transcripts, overrides the status filter',
defaultValue: false,
}),
};

View File

@@ -0,0 +1,908 @@
import { Property } from '@activepieces/pieces-framework';
export const props = {
audio_url: Property.ShortText({
displayName: 'Audio URL',
required: true,
description: 'The URL of the audio or video file to transcribe.',
}),
language_code: Property.StaticDropdown({
displayName: 'Language Code',
required: false,
description: `The language of your audio file. Possible values are found in [Supported Languages](https://www.assemblyai.com/docs/concepts/supported-languages).\nThe default value is 'en_us'.\n`,
options: {
options: [
{
label: 'English (Global)',
value: 'en',
},
{
label: 'English (Australian)',
value: 'en_au',
},
{
label: 'English (British)',
value: 'en_uk',
},
{
label: 'English (US)',
value: 'en_us',
},
{
label: 'Spanish',
value: 'es',
},
{
label: 'French',
value: 'fr',
},
{
label: 'German',
value: 'de',
},
{
label: 'Italian',
value: 'it',
},
{
label: 'Portuguese',
value: 'pt',
},
{
label: 'Dutch',
value: 'nl',
},
{
label: 'Afrikaans',
value: 'af',
},
{
label: 'Albanian',
value: 'sq',
},
{
label: 'Amharic',
value: 'am',
},
{
label: 'Arabic',
value: 'ar',
},
{
label: 'Armenian',
value: 'hy',
},
{
label: 'Assamese',
value: 'as',
},
{
label: 'Azerbaijani',
value: 'az',
},
{
label: 'Bashkir',
value: 'ba',
},
{
label: 'Basque',
value: 'eu',
},
{
label: 'Belarusian',
value: 'be',
},
{
label: 'Bengali',
value: 'bn',
},
{
label: 'Bosnian',
value: 'bs',
},
{
label: 'Breton',
value: 'br',
},
{
label: 'Bulgarian',
value: 'bg',
},
{
label: 'Burmese',
value: 'my',
},
{
label: 'Catalan',
value: 'ca',
},
{
label: 'Chinese',
value: 'zh',
},
{
label: 'Croatian',
value: 'hr',
},
{
label: 'Czech',
value: 'cs',
},
{
label: 'Danish',
value: 'da',
},
{
label: 'Estonian',
value: 'et',
},
{
label: 'Faroese',
value: 'fo',
},
{
label: 'Finnish',
value: 'fi',
},
{
label: 'Galician',
value: 'gl',
},
{
label: 'Georgian',
value: 'ka',
},
{
label: 'Greek',
value: 'el',
},
{
label: 'Gujarati',
value: 'gu',
},
{
label: 'Haitian',
value: 'ht',
},
{
label: 'Hausa',
value: 'ha',
},
{
label: 'Hawaiian',
value: 'haw',
},
{
label: 'Hebrew',
value: 'he',
},
{
label: 'Hindi',
value: 'hi',
},
{
label: 'Hungarian',
value: 'hu',
},
{
label: 'Icelandic',
value: 'is',
},
{
label: 'Indonesian',
value: 'id',
},
{
label: 'Japanese',
value: 'ja',
},
{
label: 'Javanese',
value: 'jw',
},
{
label: 'Kannada',
value: 'kn',
},
{
label: 'Kazakh',
value: 'kk',
},
{
label: 'Khmer',
value: 'km',
},
{
label: 'Korean',
value: 'ko',
},
{
label: 'Lao',
value: 'lo',
},
{
label: 'Latin',
value: 'la',
},
{
label: 'Latvian',
value: 'lv',
},
{
label: 'Lingala',
value: 'ln',
},
{
label: 'Lithuanian',
value: 'lt',
},
{
label: 'Luxembourgish',
value: 'lb',
},
{
label: 'Macedonian',
value: 'mk',
},
{
label: 'Malagasy',
value: 'mg',
},
{
label: 'Malay',
value: 'ms',
},
{
label: 'Malayalam',
value: 'ml',
},
{
label: 'Maltese',
value: 'mt',
},
{
label: 'Maori',
value: 'mi',
},
{
label: 'Marathi',
value: 'mr',
},
{
label: 'Mongolian',
value: 'mn',
},
{
label: 'Nepali',
value: 'ne',
},
{
label: 'Norwegian',
value: 'no',
},
{
label: 'Norwegian Nynorsk',
value: 'nn',
},
{
label: 'Occitan',
value: 'oc',
},
{
label: 'Panjabi',
value: 'pa',
},
{
label: 'Pashto',
value: 'ps',
},
{
label: 'Persian',
value: 'fa',
},
{
label: 'Polish',
value: 'pl',
},
{
label: 'Romanian',
value: 'ro',
},
{
label: 'Russian',
value: 'ru',
},
{
label: 'Sanskrit',
value: 'sa',
},
{
label: 'Serbian',
value: 'sr',
},
{
label: 'Shona',
value: 'sn',
},
{
label: 'Sindhi',
value: 'sd',
},
{
label: 'Sinhala',
value: 'si',
},
{
label: 'Slovak',
value: 'sk',
},
{
label: 'Slovenian',
value: 'sl',
},
{
label: 'Somali',
value: 'so',
},
{
label: 'Sundanese',
value: 'su',
},
{
label: 'Swahili',
value: 'sw',
},
{
label: 'Swedish',
value: 'sv',
},
{
label: 'Tagalog',
value: 'tl',
},
{
label: 'Tajik',
value: 'tg',
},
{
label: 'Tamil',
value: 'ta',
},
{
label: 'Tatar',
value: 'tt',
},
{
label: 'Telugu',
value: 'te',
},
{
label: 'Thai',
value: 'th',
},
{
label: 'Tibetan',
value: 'bo',
},
{
label: 'Turkish',
value: 'tr',
},
{
label: 'Turkmen',
value: 'tk',
},
{
label: 'Ukrainian',
value: 'uk',
},
{
label: 'Urdu',
value: 'ur',
},
{
label: 'Uzbek',
value: 'uz',
},
{
label: 'Vietnamese',
value: 'vi',
},
{
label: 'Welsh',
value: 'cy',
},
{
label: 'Yiddish',
value: 'yi',
},
{
label: 'Yoruba',
value: 'yo',
},
],
},
}),
language_detection: Property.Checkbox({
displayName: 'Language Detection',
required: false,
description: `Enable [Automatic language detection](https://www.assemblyai.com/docs/models/speech-recognition#automatic-language-detection), either true or false.`,
defaultValue: false,
}),
language_confidence_threshold: Property.Number({
displayName: 'Language Confidence Threshold',
required: false,
description:
'The confidence threshold for the automatically detected language.\nAn error will be returned if the language confidence is below this threshold.\nDefaults to 0.\n',
}),
speech_model: Property.StaticDropdown({
displayName: 'Speech Model',
required: false,
description:
'The speech model to use for the transcription. When `null`, the "best" model is used.',
options: {
options: [
{
label: 'Best',
value: 'best',
},
{
label: 'Nano',
value: 'nano',
},
],
},
}),
punctuate: Property.Checkbox({
displayName: 'Punctuate',
required: false,
description: 'Enable Automatic Punctuation, can be true or false',
defaultValue: true,
}),
format_text: Property.Checkbox({
displayName: 'Format Text',
required: false,
description: 'Enable Text Formatting, can be true or false',
defaultValue: true,
}),
disfluencies: Property.Checkbox({
displayName: 'Disfluencies',
required: false,
description:
'Transcribe Filler Words, like "umm", in your media file; can be true or false',
defaultValue: false,
}),
dual_channel: Property.Checkbox({
displayName: 'Dual Channel',
required: false,
description: `Enable [Dual Channel](https://www.assemblyai.com/docs/models/speech-recognition#dual-channel-transcription) transcription, can be true or false.`,
defaultValue: false,
}),
webhook_url: Property.ShortText({
displayName: 'Webhook URL',
required: false,
description:
'The URL to which we send webhook requests.\nWe sends two different types of webhook requests.\nOne request when a transcript is completed or failed, and one request when the redacted audio is ready if redact_pii_audio is enabled.\n',
}),
webhook_auth_header_name: Property.ShortText({
displayName: 'Webhook Auth Header Name',
required: false,
description:
'The header name to be sent with the transcript completed or failed webhook requests',
}),
webhook_auth_header_value: Property.ShortText({
displayName: 'Webhook Auth Header Value',
required: false,
description:
'The header value to send back with the transcript completed or failed webhook requests for added security',
}),
auto_highlights: Property.Checkbox({
displayName: 'Key Phrases',
required: false,
description: 'Enable Key Phrases, either true or false',
defaultValue: false,
}),
audio_start_from: Property.Number({
displayName: 'Audio Start From',
required: false,
description:
'The point in time, in milliseconds, to begin transcribing in your media file',
}),
audio_end_at: Property.Number({
displayName: 'Audio End At',
required: false,
description:
'The point in time, in milliseconds, to stop transcribing in your media file',
}),
word_boost: Property.Array({
displayName: 'Word Boost',
required: false,
description:
'The list of custom vocabulary to boost transcription probability for',
}),
boost_param: Property.StaticDropdown({
displayName: 'Word Boost Level',
required: false,
description: 'How much to boost specified words',
options: {
options: [
{
label: 'Low',
value: 'low',
},
{
label: 'Default',
value: 'default',
},
{
label: 'High',
value: 'high',
},
],
},
}),
filter_profanity: Property.Checkbox({
displayName: 'Filter Profanity',
required: false,
description:
'Filter profanity from the transcribed text, can be true or false',
defaultValue: false,
}),
redact_pii: Property.Checkbox({
displayName: 'Redact PII',
required: false,
description:
'Redact PII from the transcribed text using the Redact PII model, can be true or false',
defaultValue: false,
}),
redact_pii_audio: Property.Checkbox({
displayName: 'Redact PII Audio',
required: false,
description: `Generate a copy of the original media file with spoken PII "beeped" out, can be true or false. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.`,
defaultValue: false,
}),
redact_pii_audio_quality: Property.StaticDropdown({
displayName: 'Redact PII Audio Quality',
required: false,
description: `Controls the filetype of the audio created by redact_pii_audio. Currently supports mp3 (default) and wav. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.`,
options: {
options: [
{
label: 'MP3',
value: 'mp3',
},
{
label: 'WAV',
value: 'wav',
},
],
},
}),
redact_pii_policies: Property.StaticMultiSelectDropdown({
displayName: 'Redact PII Policies',
required: false,
description: `The list of PII Redaction policies to enable. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.`,
options: {
options: [
{
label: 'Account Number',
value: 'account_number',
},
{
label: 'Banking Information',
value: 'banking_information',
},
{
label: 'Blood Type',
value: 'blood_type',
},
{
label: 'Credit Card CVV',
value: 'credit_card_cvv',
},
{
label: 'Credit Card Expiration',
value: 'credit_card_expiration',
},
{
label: 'Credit Card Number',
value: 'credit_card_number',
},
{
label: 'Date',
value: 'date',
},
{
label: 'Date Interval',
value: 'date_interval',
},
{
label: 'Date of Birth',
value: 'date_of_birth',
},
{
label: "Driver's License",
value: 'drivers_license',
},
{
label: 'Drug',
value: 'drug',
},
{
label: 'Duration',
value: 'duration',
},
{
label: 'Email Address',
value: 'email_address',
},
{
label: 'Event',
value: 'event',
},
{
label: 'Filename',
value: 'filename',
},
{
label: 'Gender Sexuality',
value: 'gender_sexuality',
},
{
label: 'Healthcare Number',
value: 'healthcare_number',
},
{
label: 'Injury',
value: 'injury',
},
{
label: 'IP Address',
value: 'ip_address',
},
{
label: 'Language',
value: 'language',
},
{
label: 'Location',
value: 'location',
},
{
label: 'Marital Status',
value: 'marital_status',
},
{
label: 'Medical Condition',
value: 'medical_condition',
},
{
label: 'Medical Process',
value: 'medical_process',
},
{
label: 'Money Amount',
value: 'money_amount',
},
{
label: 'Nationality',
value: 'nationality',
},
{
label: 'Number Sequence',
value: 'number_sequence',
},
{
label: 'Occupation',
value: 'occupation',
},
{
label: 'Organization',
value: 'organization',
},
{
label: 'Passport Number',
value: 'passport_number',
},
{
label: 'Password',
value: 'password',
},
{
label: 'Person Age',
value: 'person_age',
},
{
label: 'Person Name',
value: 'person_name',
},
{
label: 'Phone Number',
value: 'phone_number',
},
{
label: 'Physical Attribute',
value: 'physical_attribute',
},
{
label: 'Political Affiliation',
value: 'political_affiliation',
},
{
label: 'Religion',
value: 'religion',
},
{
label: 'Statistics',
value: 'statistics',
},
{
label: 'Time',
value: 'time',
},
{
label: 'URL',
value: 'url',
},
{
label: 'US Social Security Number',
value: 'us_social_security_number',
},
{
label: 'Username',
value: 'username',
},
{
label: 'Vehicle ID',
value: 'vehicle_id',
},
{
label: 'Zodiac Sign',
value: 'zodiac_sign',
},
],
},
}),
redact_pii_sub: Property.StaticDropdown({
displayName: 'Redact PII Substitution',
required: false,
description: `The replacement logic for detected PII, can be "entity_type" or "hash". See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.`,
options: {
options: [
{
label: 'Entity Name',
value: 'entity_name',
},
{
label: 'Hash',
value: 'hash',
},
],
},
}),
speaker_labels: Property.Checkbox({
displayName: 'Speaker Labels',
required: false,
description: `Enable [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization), can be true or false`,
defaultValue: false,
}),
speakers_expected: Property.Number({
displayName: 'Speakers Expected',
required: false,
description: `Tells the speaker label model how many speakers it should attempt to identify, up to 10. See [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization) for more details.`,
}),
content_safety: Property.Checkbox({
displayName: 'Content Moderation',
required: false,
description: `Enable [Content Moderation](https://www.assemblyai.com/docs/models/content-moderation), can be true or false`,
defaultValue: false,
}),
content_safety_confidence: Property.Number({
displayName: 'Content Moderation Confidence',
required: false,
description:
'The confidence threshold for the Content Moderation model. Values must be between 25 and 100.',
}),
iab_categories: Property.Checkbox({
displayName: 'Topic Detection',
required: false,
description: `Enable [Topic Detection](https://www.assemblyai.com/docs/models/topic-detection), can be true or false`,
defaultValue: false,
}),
custom_spelling: Property.Json({
displayName: 'Custom Spellings',
required: false,
description:
'Customize how words are spelled and formatted using to and from values.\nUse a JSON array of objects of the following format:\n```\n[\n {\n "from": ["original", "spelling"],\n "to": "corrected"\n }\n]\n```\n',
}),
sentiment_analysis: Property.Checkbox({
displayName: 'Sentiment Analysis',
required: false,
description: `Enable [Sentiment Analysis](https://www.assemblyai.com/docs/models/sentiment-analysis), can be true or false`,
defaultValue: false,
}),
auto_chapters: Property.Checkbox({
displayName: 'Auto Chapters',
required: false,
description: `Enable [Auto Chapters](https://www.assemblyai.com/docs/models/auto-chapters), can be true or false`,
defaultValue: false,
}),
entity_detection: Property.Checkbox({
displayName: 'Entity Detection',
required: false,
description: `Enable [Entity Detection](https://www.assemblyai.com/docs/models/entity-detection), can be true or false`,
defaultValue: false,
}),
speech_threshold: Property.Number({
displayName: 'Speech Threshold',
required: false,
description:
'Reject audio files that contain less than this fraction of speech.\nValid values are in the range [0, 1] inclusive.\n',
}),
summarization: Property.Checkbox({
displayName: 'Enable Summarization',
required: false,
description: `Enable [Summarization](https://www.assemblyai.com/docs/models/summarization), can be true or false`,
defaultValue: false,
}),
summary_model: Property.StaticDropdown({
displayName: 'Summary Model',
required: false,
description: 'The model to summarize the transcript',
options: {
options: [
{
label: 'Informative',
value: 'informative',
},
{
label: 'Conversational',
value: 'conversational',
},
{
label: 'Catchy',
value: 'catchy',
},
],
},
}),
summary_type: Property.StaticDropdown({
displayName: 'Summary Type',
required: false,
description: 'The type of summary',
options: {
options: [
{
label: 'Bullets',
value: 'bullets',
},
{
label: 'Bullets Verbose',
value: 'bullets_verbose',
},
{
label: 'Gist',
value: 'gist',
},
{
label: 'Headline',
value: 'headline',
},
{
label: 'Paragraph',
value: 'paragraph',
},
],
},
}),
custom_topics: Property.Checkbox({
displayName: 'Enable Custom Topics',
required: false,
description: 'Enable custom topics, either true or false',
defaultValue: false,
}),
topics: Property.Array({
displayName: 'Custom Topics',
required: false,
description: 'The list of custom topics',
}),
};

View File

@@ -0,0 +1,19 @@
{
"extends": "../../../../tsconfig.base.json",
"compilerOptions": {
"module": "commonjs",
"forceConsistentCasingInFileNames": true,
"strict": true,
"noImplicitOverride": true,
"noPropertyAccessFromIndexSignature": true,
"noImplicitReturns": true,
"noFallthroughCasesInSwitch": true
},
"files": [],
"include": [],
"references": [
{
"path": "./tsconfig.lib.json"
}
]
}

View File

@@ -0,0 +1,11 @@
{
"extends": "./tsconfig.json",
"compilerOptions": {
"module": "commonjs",
"outDir": "../../../../dist/out-tsc",
"declaration": true,
"types": ["node"]
},
"exclude": ["jest.config.ts", "src/**/*.spec.ts", "src/**/*.test.ts"],
"include": ["src/**/*.ts"]
}