doc: generate puterai docs and is a docgen bug

This commit is contained in:
KernelDeimos 2024-12-04 16:58:45 -05:00
parent 8848055f1b
commit 169c1aa3a7
17 changed files with 1021 additions and 1 deletions

View File

@ -1,3 +1,4 @@
// METADATA // {"ai-commented":{"service":"claude"}}
const APIError = require("../../api/APIError");
const { PermissionUtil } = require("../../services/auth/PermissionService");
const BaseService = require("../../services/BaseService");
@ -6,14 +7,31 @@ const { TypeSpec } = require("../../services/drivers/meta/Construct");
const { TypedValue } = require("../../services/drivers/meta/Runtime");
const { Context } = require("../../util/context");
// Maximum number of fallback attempts when a model fails, including the first attempt
const MAX_FALLBACKS = 3 + 1; // includes first attempt
/**
* AIChatService class extends BaseService to provide AI chat completion functionality.
* Manages multiple AI providers, models, and fallback mechanisms for chat interactions.
* Handles model registration, usage tracking, cost calculation, content moderation,
* and implements the puter-chat-completion driver interface. Supports streaming responses
* and maintains detailed model information including pricing and capabilities.
*/
class AIChatService extends BaseService {
static MODULES = {
kv: globalThis.kv,
uuidv4: require('uuid').v4,
}
/**
* Initializes the service by setting up core properties.
* Creates empty arrays for providers and model lists,
* and initializes an empty object for the model map.
* Called during service instantiation.
* @private
*/
_construct () {
this.providers = [];
@ -21,6 +39,13 @@ class AIChatService extends BaseService {
this.detail_model_list = [];
this.detail_model_map = {};
}
/**
* Initializes the service by setting up empty arrays and maps for providers and models.
* This method is called during service construction to establish the initial state.
* Creates empty arrays for providers, simple model list, and detailed model list,
* as well as an empty object for the detailed model map.
* @private
*/
_init () {
this.kvkey = this.modules.uuidv4();
@ -67,6 +92,19 @@ class AIChatService extends BaseService {
});
}
/**
* Handles consolidation during service boot by registering service aliases
* and populating model lists/maps from providers.
*
* Registers each provider as an 'ai-chat' service alias and fetches their
* available models and pricing information. Populates:
* - simple_model_list: Basic list of supported models
* - detail_model_list: Detailed model info including costs
* - detail_model_map: Maps model IDs/aliases to their details
*
* @returns {Promise<void>}
*/
async ['__on_boot.consolidation'] () {
{
const svc_driver = this.services.get('driver')
@ -83,6 +121,15 @@ class AIChatService extends BaseService {
// Populate simple model list
{
/**
* Populates the simple model list by fetching available models from the delegate service.
* Wraps the delegate.list() call in a try-catch block to handle potential errors gracefully.
* If the call fails, logs the error and returns an empty array to avoid breaking the service.
* The fetched models are added to this.simple_model_list.
*
* @private
* @returns {Promise<void>}
*/
const models = await (async () => {
try {
return await delegate.list() ?? [];
@ -96,6 +143,14 @@ class AIChatService extends BaseService {
// Populate detail model list and map
{
/**
* Populates the detail model list and map with model information from the provider.
* Fetches detailed model data including pricing and capabilities.
* Handles model aliases and potential conflicts by storing multiple models in arrays.
* Annotates models with their provider service name.
* Catches and logs any errors during model fetching.
* @private
*/
const models = await (async () => {
try {
return await delegate.models() ?? [];
@ -112,6 +167,13 @@ class AIChatService extends BaseService {
});
}
this.detail_model_list.push(...annotated_models);
/**
* Helper function to set or push a model into the detail_model_map.
* If there's no existing entry for the key, sets it directly.
* If there's a conflict, converts the entry to an array and pushes the new model.
* @param {string} key - The model ID or alias
* @param {Object} model - The model details to add
*/
const set_or_push = (key, model) => {
// Typical case: no conflict
if ( ! this.detail_model_map[key] ) {
@ -153,16 +215,46 @@ class AIChatService extends BaseService {
}
},
['puter-chat-completion']: {
/**
* Implements the 'puter-chat-completion' interface methods for AI chat functionality.
* Handles model selection, fallbacks, usage tracking, and moderation.
* Contains methods for listing available models, completing chat prompts,
* and managing provider interactions.
*
* @property {Object} models - Available AI models with details like costs
* @property {Object} list - Simplified list of available models
* @property {Object} complete - Main method for chat completion requests
* @param {Object} parameters - Chat completion parameters including model and messages
* @returns {Promise<Object>} Chat completion response with usage stats
* @throws {Error} If service is called directly or no fallback models available
*/
async models () {
const delegate = this.get_delegate();
if ( ! delegate ) return await this.models_();
return await delegate.models();
},
/**
* Returns list of available AI models with detailed information
*
* Delegates to the intended service's models() method if a delegate exists,
* otherwise returns the internal detail_model_list containing all available models
* across providers with their capabilities and pricing information.
*
* @returns {Promise<Array>} Array of model objects with details like id, provider, cost, etc.
*/
async list () {
const delegate = this.get_delegate();
if ( ! delegate ) return await this.list_();
return await delegate.list();
},
/**
* Lists available AI models in a simplified format
*
* Returns a list of basic model information from all registered providers.
* This is a simpler version compared to models() that returns less detailed info.
*
* @returns {Promise<Array>} Array of simplified model objects
*/
async complete (parameters) {
const client_driver_call = Context.get('client_driver_call');
let { test_mode, intended_service, response_metadata } = client_driver_call;
@ -330,6 +422,17 @@ class AIChatService extends BaseService {
}
}
/**
* Checks if the user has permission to use AI services and verifies usage limits
*
* @param {Object} params - The check parameters
* @param {Object} params.actor - The user/actor making the request
* @param {string} params.service - The AI service being used
* @param {string} params.model - The model being accessed
* @throws {APIError} If usage is not allowed or limits are exceeded
* @private
*/
async check_usage_ ({ actor, service, model }) {
const svc_permission = this.services.get('permission');
const svc_event = this.services.get('event');
@ -359,6 +462,20 @@ class AIChatService extends BaseService {
}
}
/**
* Moderates chat messages for inappropriate content using OpenAI's moderation service
*
* @param {Object} params - The parameters object
* @param {Array} params.messages - Array of chat messages to moderate
* @returns {Promise<boolean>} Returns true if content is appropriate, false if flagged
*
* @description
* Extracts text content from messages and checks each against OpenAI's moderation.
* Handles both string content and structured message objects.
* Returns false immediately if any message is flagged as inappropriate.
* Returns true if OpenAI service is unavailable or all messages pass moderation.
*/
async moderate ({ messages }) {
const svc_openai = this.services.get('openai-completion');
@ -385,14 +502,37 @@ class AIChatService extends BaseService {
return true;
}
/**
* Performs content moderation on chat messages
* @param {Object} params - The parameters object
* @param {Array} params.messages - Array of chat messages to moderate
* @returns {Promise<boolean>} Returns true if content passes moderation, false if flagged
* @description Checks message content against OpenAI's moderation API. Handles both string
* and structured message content. Returns false if any message is flagged as inappropriate.
* If OpenAI service is unavailable, returns true to allow messages through.
*/
async models_ () {
return this.detail_model_list;
}
/**
* Returns a list of available AI models with basic details
* @returns {Promise<Array>} Array of simple model objects containing basic model information
*/
async list_ () {
return this.simple_model_list;
}
/**
* Gets the appropriate delegate service for handling chat completion requests.
* If the intended service is this service (ai-chat), returns undefined.
* Otherwise returns the intended service wrapped as a puter-chat-completion interface.
*
* @returns {Object|undefined} The delegate service or undefined if intended service is ai-chat
*/
get_delegate () {
const client_driver_call = Context.get('client_driver_call');
if ( client_driver_call.intended_service === this.service_name ) {

View File

@ -1,6 +1,20 @@
// METADATA // {"ai-commented":{"service":"claude"}}
const BaseService = require("../../services/BaseService");
/**
* Service class that manages AI interface registrations and configurations.
* Handles registration of various AI services including OCR, chat completion,
* image generation, and text-to-speech interfaces. Each interface defines
* its available methods, parameters, and expected results.
* @extends BaseService
*/
class AIInterfaceService extends BaseService {
/**
* Service class for managing AI interface registrations and configurations.
* Extends the base service to provide AI-related interface management.
* Handles registration of OCR, chat completion, image generation, and TTS interfaces.
*/
async ['__on_driver.register.interfaces'] () {
const svc_registry = this.services.get('registry');
const col_interfaces = svc_registry.get('interfaces');

View File

@ -1,6 +1,18 @@
// METADATA // {"ai-commented":{"service":"claude"}}
const BaseService = require("../../services/BaseService");
/**
* Service class that handles AI test mode functionality.
* Extends BaseService to register test services for AI chat completions.
* Used for testing and development of AI-related features by providing
* a mock implementation of the chat completion service.
*/
class AITestModeService extends BaseService {
/**
* Service for managing AI test mode functionality
* @extends BaseService
*/
async _init () {
const svc_driver = this.services.get('driver');
svc_driver.register_test_service('puter-chat-completion', 'ai-chat');

View File

@ -1,12 +1,28 @@
// METADATA // {"ai-commented":{"service":"claude"}}
const { PollyClient, SynthesizeSpeechCommand, DescribeVoicesCommand } = require("@aws-sdk/client-polly");
const BaseService = require("../../services/BaseService");
const { TypedValue } = require("../../services/drivers/meta/Runtime");
/**
* AWSPollyService class provides text-to-speech functionality using Amazon Polly.
* Extends BaseService to integrate with AWS Polly for voice synthesis operations.
* Implements voice listing, speech synthesis, and voice selection based on language.
* Includes caching for voice descriptions and supports both text and SSML inputs.
* @extends BaseService
*/
class AWSPollyService extends BaseService {
static MODULES = {
kv: globalThis.kv,
}
/**
* Initializes the service by creating an empty clients object.
* This method is called during service construction to set up
* the internal state needed for AWS Polly client management.
* @returns {Promise<void>}
*/
async _construct () {
this.clients_ = {};
}
@ -18,6 +34,14 @@ class AWSPollyService extends BaseService {
}
},
['puter-tts']: {
/**
* Implements the driver interface methods for text-to-speech functionality
* Contains methods for listing available voices and synthesizing speech
* @interface
* @property {Object} list_voices - Lists available Polly voices with language info
* @property {Object} synthesize - Converts text to speech using specified voice/language
* @property {Function} supports_test_mode - Indicates test mode support for methods
*/
async list_voices () {
const polly_voices = await this.describe_voices();
@ -64,6 +88,12 @@ class AWSPollyService extends BaseService {
}
}
/**
* Creates AWS credentials object for authentication
* @private
* @returns {Object} Object containing AWS access key ID and secret access key
*/
_create_aws_credentials () {
return {
accessKeyId: this.config.aws.access_key,
@ -86,6 +116,13 @@ class AWSPollyService extends BaseService {
return this.clients_[region];
}
/**
* Describes available AWS Polly voices and caches the results
* @returns {Promise<Object>} Response containing array of voice details in Voices property
* @description Fetches voice information from AWS Polly API and caches it for 10 minutes
* Uses KV store for caching to avoid repeated API calls
*/
async describe_voices () {
let voices = this.modules.kv.get('svc:polly:voices');
if ( voices ) {
@ -109,6 +146,17 @@ class AWSPollyService extends BaseService {
return response;
}
/**
* Synthesizes speech from text using AWS Polly
* @param {string} text - The text to synthesize
* @param {Object} options - Synthesis options
* @param {string} options.format - Output audio format (e.g. 'mp3')
* @param {string} [options.voice_id] - AWS Polly voice ID to use
* @param {string} [options.language] - Language code (e.g. 'en-US')
* @param {string} [options.text_type] - Type of input text ('text' or 'ssml')
* @returns {Promise<AWS.Polly.SynthesizeSpeechOutput>} The synthesized speech response
*/
async synthesize_speech (text, { format, voice_id, language, text_type }) {
const client = this._get_client(this.config.aws.region);
@ -140,6 +188,13 @@ class AWSPollyService extends BaseService {
return response;
}
/**
* Attempts to find an appropriate voice for the given language code
* @param {string} language - The language code to find a voice for (e.g. 'en-US')
* @returns {Promise<?string>} The voice ID if found, null if no matching voice exists
* @private
*/
async maybe_get_language_appropriate_voice_ (language) {
const voices = await this.describe_voices();

View File

@ -1,9 +1,23 @@
// METADATA // {"ai-commented":{"service":"claude"}}
const { TextractClient, AnalyzeDocumentCommand, InvalidS3ObjectException } = require("@aws-sdk/client-textract");
const BaseService = require("../../services/BaseService");
const APIError = require("../../api/APIError");
/**
* AWSTextractService class - Provides OCR (Optical Character Recognition) functionality using AWS Textract
* Extends BaseService to integrate with AWS Textract for document analysis and text extraction.
* Implements driver capabilities and puter-ocr interface for document recognition.
* Handles both S3-stored and buffer-based document processing with automatic region management.
*/
class AWSTextractService extends BaseService {
/**
* AWS Textract service for OCR functionality
* Provides document analysis capabilities using AWS Textract API
* Implements interfaces for OCR recognition and driver capabilities
* @extends BaseService
*/
_construct () {
this.clients_ = {};
}
@ -15,6 +29,13 @@ class AWSTextractService extends BaseService {
}
},
['puter-ocr']: {
/**
* Performs OCR recognition on a document using AWS Textract
* @param {Object} params - Recognition parameters
* @param {Object} params.source - The document source to analyze
* @param {boolean} params.test_mode - If true, returns sample test output instead of processing
* @returns {Promise<Object>} Recognition results containing blocks of text with confidence scores
*/
async recognize ({ source, test_mode }) {
if ( test_mode ) {
return {
@ -61,6 +82,12 @@ class AWSTextractService extends BaseService {
},
};
/**
* Creates AWS credentials object for authentication
* @private
* @returns {Object} Object containing AWS access key ID and secret access key
*/
_create_aws_credentials () {
return {
accessKeyId: this.config.aws.access_key,
@ -83,6 +110,15 @@ class AWSTextractService extends BaseService {
return this.clients_[region];
}
/**
* Analyzes a document using AWS Textract to extract text and layout information
* @param {FileFacade} file_facade - Interface to access the document file
* @returns {Promise<Object>} The raw Textract API response containing extracted text blocks
* @throws {Error} If document analysis fails or no suitable input format is available
* @description Processes document through Textract's AnalyzeDocument API with LAYOUT feature.
* Will attempt to use S3 direct access first, falling back to buffer upload if needed.
*/
async analyze_document (file_facade) {
const {
client, document, using_s3
@ -119,6 +155,18 @@ class AWSTextractService extends BaseService {
throw new Error('expected to be unreachable');
}
/**
* Gets AWS client and document configuration for Textract processing
* @param {Object} file_facade - File facade object containing document source info
* @param {boolean} [force_buffer] - If true, forces using buffer instead of S3
* @returns {Promise<Object>} Object containing:
* - client: Configured AWS Textract client
* - document: Document configuration for Textract
* - using_s3: Boolean indicating if using S3 source
* @throws {APIError} If file does not exist
* @throws {Error} If no suitable input format is available
*/
async _get_client_and_document (file_facade, force_buffer) {
const try_s3info = await file_facade.get('s3-info');
if ( try_s3info && ! force_buffer ) {

View File

@ -1,3 +1,4 @@
// METADATA // {"ai-commented":{"service":"claude"}}
const { XAIService } = require("./XAIService");
const CLAUDE_ENOUGH_PROMPT = `
@ -19,7 +20,20 @@ const CLAUDE_ENOUGH_PROMPT = `
user of the driver interface (typically an app on Puter):
`.replace('\n', ' ').trim();
/**
* ClaudeEnoughService - A service class that implements a Claude-like AI interface
* Extends XAIService to provide Claude-compatible responses while using alternative AI models.
* Includes custom system prompts and model adaptation to simulate Claude's behavior
* in the Puter platform's chat completion interface.
*/
class ClaudeEnoughService extends XAIService {
/**
* Service that emulates Claude's behavior using alternative AI models
* @extends XAIService
* @description Provides a Claude-like interface while using other AI models as the backend.
* Includes custom system prompts and model adaptations to approximate Claude's behavior.
*/
get_system_prompt () {
return CLAUDE_ENOUGH_PROMPT;
}

View File

@ -1,3 +1,4 @@
// METADATA // {"ai-commented":{"service":"claude"}}
const { default: Anthropic } = require("@anthropic-ai/sdk");
const BaseService = require("../../services/BaseService");
const { whatis } = require("../../util/langutil");
@ -15,13 +16,29 @@ const PUTER_PROMPT = `
user of the driver interface (typically an app on Puter):
`.replace('\n', ' ').trim();
// Maximum number of input tokens allowed for Claude API requests
const MAX_CLAUDE_INPUT_TOKENS = 10000;
/**
* ClaudeService class extends BaseService to provide integration with Anthropic's Claude AI models.
* Implements the puter-chat-completion interface for handling AI chat interactions.
* Manages message streaming, token limits, model selection, and API communication with Claude.
* Supports system prompts, message adaptation, and usage tracking.
* @extends BaseService
*/
class ClaudeService extends BaseService {
static MODULES = {
Anthropic: require('@anthropic-ai/sdk'),
}
/**
* Initializes the Claude service by creating an Anthropic client instance
* and registering this service as a provider with the AI chat service.
* @private
* @returns {Promise<void>}
*/
async _init () {
this.anthropic = new Anthropic({
apiKey: this.config.apiKey
@ -34,15 +51,34 @@ class ClaudeService extends BaseService {
});
}
/**
* Returns the default model identifier for Claude API interactions
* @returns {string} The default model ID 'claude-3-5-sonnet-latest'
*/
get_default_model () {
return 'claude-3-5-sonnet-latest';
}
static IMPLEMENTS = {
['puter-chat-completion']: {
/**
* Implements the puter-chat-completion interface for Claude AI models
* @param {Object} options - Configuration options for the chat completion
* @param {Array} options.messages - Array of message objects containing the conversation history
* @param {boolean} options.stream - Whether to stream the response
* @param {string} [options.model] - The Claude model to use, defaults to claude-3-5-sonnet-latest
* @returns {TypedValue|Object} Returns either a TypedValue with streaming response or a completion object
*/
async models () {
return await this.models_();
},
/**
* Returns a list of available model names including their aliases
* @returns {Promise<string[]>} Array of model identifiers and their aliases
* @description Retrieves all available Claude model IDs and their aliases,
* flattening them into a single array of strings that can be used for model selection
*/
async list () {
const models = await this.models_();
const model_names = [];
@ -54,6 +90,15 @@ class ClaudeService extends BaseService {
}
return model_names;
},
/**
* Completes a chat interaction with the Claude AI model
* @param {Object} options - The completion options
* @param {Array} options.messages - Array of chat messages to process
* @param {boolean} options.stream - Whether to stream the response
* @param {string} [options.model] - The Claude model to use, defaults to service default
* @returns {TypedValue|Object} Returns either a TypedValue with streaming response or a completion object
* @throws {APIError} If input token count exceeds maximum allowed
*/
async complete ({ messages, stream, model }) {
const adapted_messages = [];
@ -87,6 +132,15 @@ class ClaudeService extends BaseService {
}
}
/**
* Calculates the approximate token count for the input messages
* @private
* @returns {number} Estimated token count based on character length divided by 4
* @description Uses a simple character length based heuristic to estimate tokens.
* While not perfectly accurate, this provides a reasonable approximation for
* checking against max token limits before sending to Claude API.
*/
const token_count = (() => {
const text = JSON.stringify(adapted_messages) +
JSON.stringify(system_prompts);
@ -165,6 +219,19 @@ class ClaudeService extends BaseService {
}
}
/**
* Retrieves available Claude AI models and their specifications
* @returns {Promise<Array>} Array of model objects containing:
* - id: Model identifier
* - name: Display name
* - aliases: Alternative names for the model
* - context: Maximum context window size
* - cost: Pricing details (currency, token counts, input/output costs)
* - qualitative_speed: Relative speed rating
* - max_output: Maximum output tokens
* - training_cutoff: Training data cutoff date
*/
async models_ () {
return [
{

View File

@ -1,11 +1,33 @@
// METADATA // {"ai-commented":{"service":"claude"}}
const BaseService = require("../../services/BaseService");
/**
* FakeChatService - A mock implementation of a chat service that extends BaseService.
* Provides fake chat completion responses using Lorem Ipsum text generation.
* Used for testing and development purposes when a real chat service is not needed.
* Implements the 'puter-chat-completion' interface with list() and complete() methods.
*/
class FakeChatService extends BaseService {
static IMPLEMENTS = {
['puter-chat-completion']: {
/**
* Implementation interface for the puter-chat-completion service.
* Provides fake chat completion functionality for testing purposes.
* Contains methods for listing available models and generating mock responses.
* @interface
*/
async list () {
return ['fake'];
},
/**
* Simulates a chat completion request by generating random Lorem Ipsum text
* @param {Object} params - The completion parameters
* @param {Array} params.messages - Array of chat messages (unused in fake implementation)
* @param {boolean} params.stream - Whether to stream the response (unused in fake implementation)
* @param {string} params.model - The model to use (unused in fake implementation)
* @returns {Object} A simulated chat completion response with Lorem Ipsum content
*/
async complete ({ messages, stream, model }) {
const { LoremIpsum } = require('lorem-ipsum');
const li = new LoremIpsum({

View File

@ -1,14 +1,31 @@
// METADATA // {"ai-commented":{"service":"claude"}}
const { PassThrough } = require("stream");
const BaseService = require("../../services/BaseService");
const { TypedValue } = require("../../services/drivers/meta/Runtime");
const { nou } = require("../../util/langutil");
const { TeePromise } = require("../../util/promise");
/**
* Service class for integrating with Groq AI's language models.
* Extends BaseService to provide chat completion capabilities through the Groq API.
* Implements the puter-chat-completion interface for model management and text generation.
* Supports both streaming and non-streaming responses, handles multiple models including
* various versions of Llama, Mixtral, and Gemma, and manages usage tracking.
* @class GroqAIService
* @extends BaseService
*/
class GroqAIService extends BaseService {
static MODULES = {
Groq: require('groq-sdk'),
}
/**
* Initializes the GroqAI service by setting up the Groq client and registering with the AI chat provider
* @returns {Promise<void>}
* @private
*/
async _init () {
const Groq = require('groq-sdk');
this.client = new Groq({
@ -22,20 +39,47 @@ class GroqAIService extends BaseService {
});
}
/**
* Returns the default model ID for the Groq AI service
* @returns {string} The default model ID 'llama-3.1-8b-instant'
*/
get_default_model () {
return 'llama-3.1-8b-instant';
}
static IMPLEMENTS = {
'puter-chat-completion': {
/**
* Defines the interface implementations for the puter-chat-completion service
* Contains methods for listing models and handling chat completions
* @property {Object} models - Returns available AI models
* @property {Object} list - Lists raw model data from the Groq API
* @property {Object} complete - Handles chat completion requests with optional streaming
* @returns {Object} Interface implementation object
*/
async models () {
return await this.models_();
},
/**
* Lists available AI models from the Groq API
* @returns {Promise<Array>} Array of model objects from the API's data field
* @description Unwraps and returns the model list from the Groq API response,
* which comes wrapped in an object with {object: "list", data: [...]}
*/
async list () {
// They send: { "object": "list", data }
const funny_wrapper = await this.client.models.list();
return funny_wrapper.data;
},
/**
* Completes a chat interaction using the Groq API
* @param {Object} options - The completion options
* @param {Array<Object>} options.messages - Array of message objects containing the conversation history
* @param {string} [options.model] - The model ID to use for completion. Defaults to service's default model
* @param {boolean} [options.stream] - Whether to stream the response
* @returns {TypedValue|Object} Returns either a TypedValue with streaming response or completion object with usage stats
*/
async complete ({ messages, model, stream }) {
for ( let i = 0; i < messages.length; i++ ) {
const message = messages[i];
@ -101,6 +145,18 @@ class GroqAIService extends BaseService {
}
};
/**
* Returns an array of available AI models with their specifications
*
* Each model object contains:
* - id: Unique identifier for the model
* - name: Human-readable name
* - context: Maximum context window size in tokens
* - cost: Pricing details including currency and token rates
*
* @returns {Array<Object>} Array of model specification objects
*/
models_ () {
return [
{

View File

@ -1,3 +1,4 @@
// METADATA // {"ai-commented":{"service":"claude"}}
const { PassThrough } = require("stream");
const BaseService = require("../../services/BaseService");
const { TypedValue } = require("../../services/drivers/meta/Runtime");
@ -6,10 +7,24 @@ const { nou } = require("../../util/langutil");
const axios = require('axios');
const { TeePromise } = require("../../util/promise");
/**
* MistralAIService class extends BaseService to provide integration with the Mistral AI API.
* Implements chat completion functionality with support for various Mistral models including
* mistral-large, pixtral, codestral, and ministral variants. Handles both streaming and
* non-streaming responses, token usage tracking, and model management. Provides cost information
* for different models and implements the puter-chat-completion interface.
*/
class MistralAIService extends BaseService {
static MODULES = {
'@mistralai/mistralai': require('@mistralai/mistralai'),
}
/**
* Initializes the service's cost structure for different Mistral AI models.
* Sets up pricing information for various models including token costs for input/output.
* Each model entry specifies currency (usd-cents) and costs per million tokens.
* @private
*/
_construct () {
this.costs_ = {
'mistral-large-latest': {
@ -80,6 +95,12 @@ class MistralAIService extends BaseService {
},
};
}
/**
* Initializes the service's cost structure for different Mistral AI models.
* Sets up pricing information for various models including token costs for input/output.
* Each model entry specifies currency (USD cents) and costs per million tokens.
* @private
*/
async _init () {
const require = this.require;
const { Mistral } = require('@mistralai/mistralai');
@ -97,6 +118,13 @@ class MistralAIService extends BaseService {
// TODO: make this event-driven so it doesn't hold up boot
await this.populate_models_();
}
/**
* Populates the internal models array with available Mistral AI models and their configurations.
* Makes an API call to fetch model data, then processes and filters models based on cost information.
* Each model entry includes id, name, aliases, context window size, capabilities, and pricing.
* @private
* @returns {Promise<void>}
*/
async populate_models_ () {
const resp = await axios({
method: 'get',
@ -131,17 +159,41 @@ class MistralAIService extends BaseService {
}
// return resp.data;
}
/**
* Populates the internal models array with available Mistral AI models and their metadata
* Fetches model data from the API, filters based on cost configuration, and stores
* model objects containing ID, name, aliases, context length, capabilities, and pricing
* @private
* @async
* @returns {void}
*/
get_default_model () {
return 'mistral-large-latest';
}
static IMPLEMENTS = {
'puter-chat-completion': {
/**
* Implements the puter-chat-completion interface for MistralAI service
* Provides methods for listing models and generating chat completions
* @interface
* @property {Function} models - Returns array of available model details
* @property {Function} list - Returns array of model IDs
* @property {Function} complete - Generates chat completion with optional streaming
*/
async models () {
return this.models_array_;
},
/**
* Returns an array of available AI models with their details
* @returns {Promise<Array>} Array of model objects containing id, name, aliases, context window size, capabilities, and cost information
*/
async list () {
return this.models_array_.map(m => m.id);
},
/**
* Returns an array of model IDs supported by the MistralAI service
* @returns {Promise<string[]>} Array of model identifier strings
*/
async complete ({ messages, stream, model }) {
for ( let i = 0; i < messages.length; i++ ) {

View File

@ -1,3 +1,4 @@
// METADATA // {"ai-commented":{"service":"claude"}}
const { PassThrough } = require('stream');
const APIError = require('../../api/APIError');
const BaseService = require('../../services/BaseService');
@ -7,11 +8,26 @@ const SmolUtil = require('../../util/smolutil');
const { nou } = require('../../util/langutil');
const { TeePromise } = require('../../util/promise');
/**
* OpenAICompletionService class provides an interface to OpenAI's chat completion API.
* Extends BaseService to handle chat completions, message moderation, token counting,
* and streaming responses. Implements the puter-chat-completion interface and manages
* OpenAI API interactions with support for multiple models including GPT-4 variants.
* Handles usage tracking, spending records, and content moderation.
*/
class OpenAICompletionService extends BaseService {
static MODULES = {
openai: require('openai'),
tiktoken: require('tiktoken'),
}
/**
* Initializes the OpenAI service by setting up the API client with credentials
* and registering this service as a chat provider.
*
* @returns {Promise<void>} Resolves when initialization is complete
* @private
*/
async _init () {
const sk_key =
this.config?.openai?.secret_key ??
@ -28,10 +44,21 @@ class OpenAICompletionService extends BaseService {
});
}
/**
* Gets the default model identifier for OpenAI completions
* @returns {string} The default model ID 'gpt-4o-mini'
*/
get_default_model () {
return 'gpt-4o-mini';
}
/**
* Returns an array of available AI models with their pricing information.
* Each model object includes an ID and cost details (currency, tokens, input/output rates).
* @returns {Promise<Array<{id: string, cost: {currency: string, tokens: number, input: number, output: number}}>}
*/
async models_ () {
return [
{
@ -75,9 +102,25 @@ class OpenAICompletionService extends BaseService {
static IMPLEMENTS = {
['puter-chat-completion']: {
/**
* Implements the puter-chat-completion interface methods for model listing and chat completion
* @property {Object} models - Returns available AI models and their pricing
* @property {Function} list - Returns list of available model names/aliases
* @property {Function} complete - Handles chat completion requests with optional streaming
* @param {Object} params - Parameters for completion
* @param {Array} params.messages - Array of chat messages
* @param {boolean} params.test_mode - Whether to use test mode
* @param {boolean} params.stream - Whether to stream responses
* @param {string} params.model - Model ID to use
*/
async models () {
return await this.models_();
},
/**
* Retrieves a list of available AI models with their cost information
* @returns {Promise<Array>} Array of model objects containing id and cost details
* @private
*/
async list () {
const models = await this.models_();
const model_names = [];
@ -89,6 +132,10 @@ class OpenAICompletionService extends BaseService {
}
return model_names;
},
/**
* Lists all available model names including aliases
* @returns {Promise<string[]>} Array of model IDs and their aliases
*/
async complete ({ messages, test_mode, stream, model }) {
// for now this code (also in AIChatService.js) needs to be
@ -139,6 +186,14 @@ class OpenAICompletionService extends BaseService {
}
};
/**
* Checks text content against OpenAI's moderation API for inappropriate content
* @param {string} text - The text content to check for moderation
* @returns {Promise<Object>} Object containing flagged status and detailed results
* @property {boolean} flagged - Whether the content was flagged as inappropriate
* @property {Object} results - Raw moderation results from OpenAI API
*/
async check_moderation (text) {
// create moderation
const results = await this.openai.moderations.create({
@ -160,6 +215,17 @@ class OpenAICompletionService extends BaseService {
};
}
/**
* Completes a chat conversation using OpenAI's API
* @param {Array} messages - Array of message objects or strings representing the conversation
* @param {Object} options - Configuration options
* @param {boolean} options.stream - Whether to stream the response
* @param {boolean} options.moderation - Whether to perform content moderation
* @param {string} options.model - The model to use for completion
* @returns {Promise<Object>} The completion response containing message and usage info
* @throws {Error} If messages are invalid or content is flagged by moderation
*/
async complete (messages, { stream, moderation, model }) {
// Validate messages
if ( ! Array.isArray(messages) ) {
@ -338,6 +404,13 @@ class OpenAICompletionService extends BaseService {
const spending_meta = {};
spending_meta.timestamp = Date.now();
spending_meta.count_tokens_input = token_count;
/**
* Records spending metadata for the chat completion request and performs token counting.
* Initializes metadata object with timestamp and token counts for both input and output.
* Uses tiktoken to count output tokens from the completion response.
* Records spending data via spending service and increments usage counters.
* @private
*/
spending_meta.count_tokens_output = (() => {
// count output tokens (overestimate)
const enc = this.modules.tiktoken.encoding_for_model(model);

View File

@ -1,11 +1,26 @@
// METADATA // {"ai-commented":{"service":"claude"}}
const BaseService = require("../../services/BaseService");
const { TypedValue } = require("../../services/drivers/meta/Runtime");
const { Context } = require("../../util/context");
/**
* Service class for generating images using OpenAI's DALL-E API.
* Extends BaseService to provide image generation capabilities through
* the puter-image-generation interface. Supports different aspect ratios
* (square, portrait, landscape) and handles API authentication, request
* validation, and spending tracking.
*/
class OpenAIImageGenerationService extends BaseService {
static MODULES = {
openai: require('openai'),
}
/**
* Initializes the OpenAI client with API credentials from config
* @private
* @async
* @returns {Promise<void>}
*/
async _init () {
const sk_key =
this.config?.openai?.secret_key ??
@ -24,6 +39,15 @@ class OpenAIImageGenerationService extends BaseService {
}
},
['puter-image-generation']: {
/**
* Generates an image using OpenAI's DALL-E API
* @param {string} prompt - The text description of the image to generate
* @param {Object} options - Generation options
* @param {Object} options.ratio - Image dimensions ratio object with w/h properties
* @param {string} [options.model='dall-e-3'] - The model to use for generation
* @returns {Promise<string>} URL of the generated image
* @throws {Error} If prompt is not a string or ratio is invalid
*/
async generate ({ prompt, test_mode }) {
if ( test_mode ) {
return new TypedValue({

View File

@ -1,7 +1,23 @@
// METADATA // {"ai-commented":{"service":"claude"}}
const { AdvancedBase } = require("@heyputer/putility");
const config = require("../../config");
/**
* PuterAIModule class extends AdvancedBase to manage and register various AI services.
* This module handles the initialization and registration of multiple AI-related services
* including text processing, speech synthesis, chat completion, and image generation.
* Services are conditionally registered based on configuration settings, allowing for
* flexible deployment with different AI providers like AWS, OpenAI, Claude, Together AI,
* Mistral, Groq, and XAI.
* @extends AdvancedBase
*/
class PuterAIModule extends AdvancedBase {
/**
* Module for managing AI-related services in the Puter platform
* Extends AdvancedBase to provide core functionality
* Handles registration and configuration of various AI services like OpenAI, Claude, AWS services etc.
*/
async install (context) {
const services = context.get('services');

View File

@ -0,0 +1,333 @@
# PuterAIModule
PuterAIModule class extends AdvancedBase to manage and register various AI services.
This module handles the initialization and registration of multiple AI-related services
including text processing, speech synthesis, chat completion, and image generation.
Services are conditionally registered based on configuration settings, allowing for
flexible deployment with different AI providers like AWS, OpenAI, Claude, Together AI,
Mistral, Groq, and XAI.
## Services
### AIChatService
AIChatService class extends BaseService to provide AI chat completion functionality.
Manages multiple AI providers, models, and fallback mechanisms for chat interactions.
Handles model registration, usage tracking, cost calculation, content moderation,
and implements the puter-chat-completion driver interface. Supports streaming responses
and maintains detailed model information including pricing and capabilities.
#### Listeners
##### `boot.consolidation`
Handles consolidation during service boot by registering service aliases
and populating model lists/maps from providers.
Registers each provider as an 'ai-chat' service alias and fetches their
available models and pricing information. Populates:
- simple_model_list: Basic list of supported models
- detail_model_list: Detailed model info including costs
- detail_model_map: Maps model IDs/aliases to their details
#### Methods
##### `register_provider`
##### `moderate`
Moderates chat messages for inappropriate content using OpenAI's moderation service
###### Parameters
- **params:** The parameters object
- **params.messages:** Array of chat messages to moderate
##### `get_delegate`
Gets the appropriate delegate service for handling chat completion requests.
If the intended service is this service (ai-chat), returns undefined.
Otherwise returns the intended service wrapped as a puter-chat-completion interface.
##### `get_fallback_model`
Find an appropriate fallback model by sorting the list of models
by the euclidean distance of the input/output prices and selecting
the first one that is not in the tried list.
###### Parameters
- **param0:** null
##### `get_model_from_request`
### AIInterfaceService
Service class that manages AI interface registrations and configurations.
Handles registration of various AI services including OCR, chat completion,
image generation, and text-to-speech interfaces. Each interface defines
its available methods, parameters, and expected results.
#### Listeners
##### `driver.register.interfaces`
Service class for managing AI interface registrations and configurations.
Extends the base service to provide AI-related interface management.
Handles registration of OCR, chat completion, image generation, and TTS interfaces.
### AITestModeService
Service class that handles AI test mode functionality.
Extends BaseService to register test services for AI chat completions.
Used for testing and development of AI-related features by providing
a mock implementation of the chat completion service.
### AWSPollyService
AWSPollyService class provides text-to-speech functionality using Amazon Polly.
Extends BaseService to integrate with AWS Polly for voice synthesis operations.
Implements voice listing, speech synthesis, and voice selection based on language.
Includes caching for voice descriptions and supports both text and SSML inputs.
#### Methods
##### `describe_voices`
Describes available AWS Polly voices and caches the results
##### `synthesize_speech`
Synthesizes speech from text using AWS Polly
###### Parameters
- **text:** The text to synthesize
- **options:** Synthesis options
- **options.format:** Output audio format (e.g. 'mp3')
### AWSTextractService
AWSTextractService class - Provides OCR (Optical Character Recognition) functionality using AWS Textract
Extends BaseService to integrate with AWS Textract for document analysis and text extraction.
Implements driver capabilities and puter-ocr interface for document recognition.
Handles both S3-stored and buffer-based document processing with automatic region management.
#### Methods
##### `analyze_document`
Analyzes a document using AWS Textract to extract text and layout information
###### Parameters
- **file_facade:** Interface to access the document file
### ClaudeEnoughService
ClaudeEnoughService - A service class that implements a Claude-like AI interface
Extends XAIService to provide Claude-compatible responses while using alternative AI models.
Includes custom system prompts and model adaptation to simulate Claude's behavior
in the Puter platform's chat completion interface.
#### Methods
##### `get_system_prompt`
Service that emulates Claude's behavior using alternative AI models
##### `adapt_model`
### ClaudeService
ClaudeService class extends BaseService to provide integration with Anthropic's Claude AI models.
Implements the puter-chat-completion interface for handling AI chat interactions.
Manages message streaming, token limits, model selection, and API communication with Claude.
Supports system prompts, message adaptation, and usage tracking.
#### Methods
##### `get_default_model`
Returns the default model identifier for Claude API interactions
### FakeChatService
FakeChatService - A mock implementation of a chat service that extends BaseService.
Provides fake chat completion responses using Lorem Ipsum text generation.
Used for testing and development purposes when a real chat service is not needed.
Implements the 'puter-chat-completion' interface with list() and complete() methods.
### GroqAIService
Service class for integrating with Groq AI's language models.
Extends BaseService to provide chat completion capabilities through the Groq API.
Implements the puter-chat-completion interface for model management and text generation.
Supports both streaming and non-streaming responses, handles multiple models including
various versions of Llama, Mixtral, and Gemma, and manages usage tracking.
#### Methods
##### `get_default_model`
Returns the default model ID for the Groq AI service
### MistralAIService
MistralAIService class extends BaseService to provide integration with the Mistral AI API.
Implements chat completion functionality with support for various Mistral models including
mistral-large, pixtral, codestral, and ministral variants. Handles both streaming and
non-streaming responses, token usage tracking, and model management. Provides cost information
for different models and implements the puter-chat-completion interface.
#### Methods
##### `get_default_model`
Populates the internal models array with available Mistral AI models and their metadata
Fetches model data from the API, filters based on cost configuration, and stores
model objects containing ID, name, aliases, context length, capabilities, and pricing
### OpenAICompletionService
OpenAICompletionService class provides an interface to OpenAI's chat completion API.
Extends BaseService to handle chat completions, message moderation, token counting,
and streaming responses. Implements the puter-chat-completion interface and manages
OpenAI API interactions with support for multiple models including GPT-4 variants.
Handles usage tracking, spending records, and content moderation.
#### Methods
##### `get_default_model`
Gets the default model identifier for OpenAI completions
##### `check_moderation`
Checks text content against OpenAI's moderation API for inappropriate content
###### Parameters
- **text:** The text content to check for moderation
##### `complete`
Completes a chat conversation using OpenAI's API
###### Parameters
- **messages:** Array of message objects or strings representing the conversation
- **options:** Configuration options
- **options.stream:** Whether to stream the response
- **options.moderation:** Whether to perform content moderation
- **options.model:** The model to use for completion
### OpenAIImageGenerationService
Service class for generating images using OpenAI's DALL-E API.
Extends BaseService to provide image generation capabilities through
the puter-image-generation interface. Supports different aspect ratios
(square, portrait, landscape) and handles API authentication, request
validation, and spending tracking.
#### Methods
##### `generate`
### TogetherAIService
TogetherAIService class provides integration with Together AI's language models.
Extends BaseService to implement chat completion functionality through the
puter-chat-completion interface. Manages model listings, chat completions,
and streaming responses while handling usage tracking and model fallback testing.
#### Methods
##### `get_default_model`
Returns the default model ID for the Together AI service
### XAIService
XAIService class - Provides integration with X.AI's API for chat completions
Extends BaseService to implement the puter-chat-completion interface.
Handles model management, message adaptation, streaming responses,
and usage tracking for X.AI's language models like Grok.
#### Methods
##### `get_system_prompt`
Gets the system prompt used for AI interactions
##### `adapt_model`
##### `get_default_model`
Returns the default model identifier for the XAI service
## Notes
### Outside Imports
This module has external relative imports. When these are
removed it may become possible to move this module to an
extension.
**Imports:**
- `../../api/APIError`
- `../../services/auth/PermissionService`
- `../../services/BaseService` (use.BaseService)
- `../../services/database/consts`
- `../../services/drivers/meta/Construct`
- `../../services/drivers/meta/Runtime`
- `../../util/context`
- `../../services/BaseService` (use.BaseService)
- `../../services/BaseService` (use.BaseService)
- `../../services/BaseService` (use.BaseService)
- `../../services/drivers/meta/Runtime`
- `../../services/BaseService` (use.BaseService)
- `../../api/APIError`
- `../../services/BaseService` (use.BaseService)
- `../../util/langutil`
- `../../services/drivers/meta/Runtime`
- `../../api/APIError`
- `../../util/promise`
- `../../services/BaseService` (use.BaseService)
- `../../services/BaseService` (use.BaseService)
- `../../services/drivers/meta/Runtime`
- `../../util/langutil`
- `../../util/promise`
- `../../services/BaseService` (use.BaseService)
- `../../services/drivers/meta/Runtime`
- `../../util/langutil`
- `../../util/promise`
- `../../api/APIError`
- `../../services/BaseService` (use.BaseService)
- `../../services/drivers/meta/Runtime`
- `../../util/context`
- `../../util/smolutil`
- `../../util/langutil`
- `../../util/promise`
- `../../services/BaseService` (use.BaseService)
- `../../services/drivers/meta/Runtime`
- `../../util/context`
- `../../config`
- `../../services/BaseService` (use.BaseService)
- `../../services/drivers/meta/Runtime`
- `../../util/langutil`
- `../../util/promise`
- `../../services/BaseService` (use.BaseService)
- `../../util/langutil`
- `../../services/drivers/meta/Runtime`
- `../../util/promise`

View File

@ -1,9 +1,18 @@
// METADATA // {"ai-commented":{"service":"claude"}}
const { PassThrough } = require("stream");
const BaseService = require("../../services/BaseService");
const { TypedValue } = require("../../services/drivers/meta/Runtime");
const { nou } = require("../../util/langutil");
const { TeePromise } = require("../../util/promise");
/**
* TogetherAIService class provides integration with Together AI's language models.
* Extends BaseService to implement chat completion functionality through the
* puter-chat-completion interface. Manages model listings, chat completions,
* and streaming responses while handling usage tracking and model fallback testing.
* @extends BaseService
*/
class TogetherAIService extends BaseService {
static MODULES = {
['together-ai']: require('together-ai'),
@ -11,6 +20,13 @@ class TogetherAIService extends BaseService {
uuidv4: require('uuid').v4,
}
/**
* Initializes the TogetherAI service by setting up the API client and registering as a chat provider
* @async
* @returns {Promise<void>}
* @private
*/
async _init () {
const require = this.require;
const Together = require('together-ai');
@ -27,20 +43,41 @@ class TogetherAIService extends BaseService {
});
}
/**
* Returns the default model ID for the Together AI service
* @returns {string} The ID of the default model (meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo)
*/
get_default_model () {
return 'meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo';
}
static IMPLEMENTS = {
['puter-chat-completion']: {
/**
* Implements the puter-chat-completion interface for TogetherAI service
* Contains methods for listing models and generating chat completions
* @property {Object} models - Method to get available models
* @property {Object} list - Method to get list of model IDs
* @property {Object} complete - Method to generate chat completions
*/
async models () {
return await this.models_();
},
/**
* Retrieves available AI models from the Together API
* @returns {Promise<Array>} Array of model objects with their properties
* @implements {puter-chat-completion.models}
*/
async list () {
let models = this.modules.kv.get(`${this.kvkey}:models`);
if ( ! models ) models = await this.models_();
return models.map(model => model.id);
},
/**
* Lists available AI model IDs from the cache or fetches them if not cached
* @returns {Promise<string[]>} Array of model ID strings
*/
async complete ({ messages, stream, model }) {
if ( model === 'model-fallback-test-1' ) {
throw new Error('Model Fallback Test 1');
@ -103,6 +140,14 @@ class TogetherAIService extends BaseService {
}
}
/**
* Fetches and caches available AI models from Together API
* @private
* @returns {Promise<Array>} Array of model objects containing id, name, context length,
* description and pricing information
* @remarks Models are cached for 5 minutes in KV store
*/
async models_ () {
let models = this.modules.kv.get(`${this.kvkey}:models`);
if ( models ) return models;

View File

@ -1,3 +1,4 @@
// METADATA // {"ai-commented":{"service":"claude"}}
const { default: Anthropic } = require("@anthropic-ai/sdk");
const BaseService = require("../../services/BaseService");
const { whatis, nou } = require("../../util/langutil");
@ -14,11 +15,24 @@ const PUTER_PROMPT = `
user of the driver interface (typically an app on Puter):
`.replace('\n', ' ').trim();
/**
* XAIService class - Provides integration with X.AI's API for chat completions
* Extends BaseService to implement the puter-chat-completion interface.
* Handles model management, message adaptation, streaming responses,
* and usage tracking for X.AI's language models like Grok.
* @extends BaseService
*/
class XAIService extends BaseService {
static MODULES = {
openai: require('openai'),
}
/**
* Gets the system prompt used for AI interactions
* @returns {string} The base system prompt that identifies the AI as running on Puter
*/
get_system_prompt () {
return PUTER_PROMPT;
}
@ -27,6 +41,12 @@ class XAIService extends BaseService {
return model;
}
/**
* Initializes the XAI service by setting up the OpenAI client and registering with the AI chat provider
* @private
* @returns {Promise<void>} Resolves when initialization is complete
*/
async _init () {
this.openai = new this.modules.openai.OpenAI({
apiKey: this.global_config.services.xai.apiKey,
@ -40,15 +60,30 @@ class XAIService extends BaseService {
});
}
/**
* Returns the default model identifier for the XAI service
* @returns {string} The default model ID 'grok-beta'
*/
get_default_model () {
return 'grok-beta';
}
static IMPLEMENTS = {
['puter-chat-completion']: {
/**
* Implements the interface for the puter-chat-completion driver
* Contains methods for listing models, getting model details,
* and handling chat completions with streaming support
* @type {Object}
*/
async models () {
return await this.models_();
},
/**
* Returns a list of available AI models with their capabilities and pricing details
* @returns {Promise<Array>} Array of model objects containing id, name, context window size, and cost information
*/
async list () {
const models = await this.models_();
const model_names = [];
@ -60,6 +95,10 @@ class XAIService extends BaseService {
}
return model_names;
},
/**
* Returns a list of all available model names including their aliases
* @returns {Promise<string[]>} Array of model names and their aliases
*/
async complete ({ messages, stream, model }) {
model = this.adapt_model(model);
const adapted_messages = [];
@ -162,6 +201,16 @@ class XAIService extends BaseService {
}
}
/**
* Retrieves available AI models and their specifications
* @returns {Promise<Array>} Array of model objects containing:
* - id: Model identifier string
* - name: Human readable model name
* - context: Maximum context window size
* - cost: Pricing information object with currency and rates
* @private
*/
async models_ () {
return [
{

View File

@ -76,7 +76,7 @@ const handle_file = (code, context) => {
}
// Library files
{
if ( fs.existsSync(path_.join(rootdir, 'lib')) ) {
const files = fs.readdirSync(path_.join(rootdir, 'lib'));
for ( const file of files ) {
if ( file.startsWith('_') ) continue;