Forráskód Böngészése

doc: fix comments for list(), models(), complete()

KernelDeimos 4 hónapja
szülő
commit
57befd310d

+ 58 - 22
src/backend/src/modules/puterai/AIChatService.js

@@ -227,46 +227,82 @@ class AIChatService extends BaseService {
                     method_name === 'complete';
             }
         },
+        /**
+        * Implements the 'puter-chat-completion' interface methods for AI chat functionality.
+        * Handles model selection, fallbacks, usage tracking, and moderation.
+        * Contains methods for listing available models, completing chat prompts,
+        * and managing provider interactions.
+        * 
+        * @property {Object} models - Available AI models with details like costs
+        * @property {Object} list - Simplified list of available models
+        * @property {Object} complete - Main method for chat completion requests
+        * @param {Object} parameters - Chat completion parameters including model and messages
+        * @returns {Promise<Object>} Chat completion response with usage stats
+        * @throws {Error} If service is called directly or no fallback models available
+        */
         ['puter-chat-completion']: {
             /**
-            * Implements the 'puter-chat-completion' interface methods for AI chat functionality.
-            * Handles model selection, fallbacks, usage tracking, and moderation.
-            * Contains methods for listing available models, completing chat prompts,
-            * and managing provider interactions.
+            * Returns list of available AI models with detailed information
+            * 
+            * Delegates to the intended service's models() method if a delegate exists,
+            * otherwise returns the internal detail_model_list containing all available models
+            * across providers with their capabilities and pricing information.
             * 
-            * @property {Object} models - Available AI models with details like costs
-            * @property {Object} list - Simplified list of available models
-            * @property {Object} complete - Main method for chat completion requests
-            * @param {Object} parameters - Chat completion parameters including model and messages
-            * @returns {Promise<Object>} Chat completion response with usage stats
-            * @throws {Error} If service is called directly or no fallback models available
+            * For an example of the expected model object structure, see the `async models_`
+            * private method at the bottom of any service with hard-coded model details such
+            * as ClaudeService or GroqAIService.
+            * 
+            * @returns {Promise<Array<Object>>} Array of model objects with details like id, provider, cost, etc.
             */
             async models () {
                 const delegate = this.get_delegate();
                 if ( ! delegate ) return await this.models_();
                 return await delegate.models();
             },
+
             /**
-            * Returns list of available AI models with detailed information
-            * 
-            * Delegates to the intended service's models() method if a delegate exists,
-            * otherwise returns the internal detail_model_list containing all available models
-            * across providers with their capabilities and pricing information.
-            * 
-            * @returns {Promise<Array>} Array of model objects with details like id, provider, cost, etc.
-            */
+             * Reports model names (including aliased names) only with no additional
+             * detail.
+             * @returns {Promise<Array<string>} Array of model objects with basic details
+             */
             async list () {
                 const delegate = this.get_delegate();
                 if ( ! delegate ) return await this.list_();
                 return await delegate.list();
             },
+
             /**
-            * Lists available AI models in a simplified format
+            * Completes a chat interaction using one of the available AI models
+            * 
+            * This service registers itself under an alias for each other AI
+            * chat service, which results in DriverService always calling this
+            * `complete` implementaiton first, which delegates to the intended
+            * service.
+            * 
+            * The return value may be anything that DriverService knows how to
+            * coerce to the intended result. When `options.stream` is FALSE,
+            * this is typically a raw object for the JSON response. When
+            * `options.stream` is TRUE, the result is a TypedValue with this
+            * structure:
             * 
-            * Returns a list of basic model information from all registered providers.
-            * This is a simpler version compared to models() that returns less detailed info.
+            *   ai-chat-intermediate {
+            *     usage_promise: Promise,
+            *     stream: true,
+            *     response: stream {
+            *       content_type: 'application/x-ndjson',
+            *     }
+            *   }
             * 
-            * @returns {Promise<Array>} Array of simplified model objects
+            * The `usage_promise` is a promise that resolves to the usage
+            * information for the completion. This is used to report usage
+            * as soon as possible regardless of when it is reported in the
+            * stream. 
+            *
+            * @param {Object} options - The completion options
+            * @param {Array} options.messages - Array of chat messages to process
+            * @param {boolean} options.stream - Whether to stream the response
+            * @param {string} options.model   - The name of a model to use
+            * @returns {TypedValue|Object} Returns either a TypedValue with streaming response or a completion object
             */
             async complete (parameters) {
                 const client_driver_call = Context.get('client_driver_call');

+ 8 - 8
src/backend/src/modules/puterai/ClaudeService.js

@@ -60,20 +60,19 @@ class ClaudeService extends BaseService {
     static IMPLEMENTS = {
         ['puter-chat-completion']: {
             /**
-            * Implements the puter-chat-completion interface for Claude AI models
-            * @param {Object} options - Configuration options for the chat completion
-            * @param {Array} options.messages - Array of message objects containing the conversation history
-            * @param {boolean} options.stream - Whether to stream the response
-            * @param {string} [options.model] - The Claude model to use, defaults to claude-3-5-sonnet-latest
-            * @returns {TypedValue|Object} Returns either a TypedValue with streaming response or a completion object
-            */
+             * Returns a list of available models and their details.
+             * See AIChatService for more information.
+             * 
+             * @returns Promise<Array<Object>> Array of model details
+             */
             async models () {
                 return await this.models_();
             },
+
             /**
             * Returns a list of available model names including their aliases
             * @returns {Promise<string[]>} Array of model identifiers and their aliases
-            * @description Retrieves all available Claude model IDs and their aliases,
+            * @description Retrieves all available model IDs and their aliases,
             * flattening them into a single array of strings that can be used for model selection
             */
             async list () {
@@ -87,6 +86,7 @@ class ClaudeService extends BaseService {
                 }
                 return model_names;
             },
+
             /**
             * Completes a chat interaction with the Claude AI model
             * @param {Object} options - The completion options

+ 5 - 4
src/backend/src/modules/puterai/FakeChatService.js

@@ -16,14 +16,15 @@ class FakeChatService extends BaseService {
     static IMPLEMENTS = {
         ['puter-chat-completion']: {
             /**
-            * Implementation interface for the puter-chat-completion service.
-            * Provides fake chat completion functionality for testing purposes.
-            * Contains methods for listing available models and generating mock responses.
-            * @interface
+            * Returns a list of available model names including their aliases
+            * @returns {Promise<string[]>} Array of model identifiers and their aliases
+            * @description Retrieves all available model IDs and their aliases,
+            * flattening them into a single array of strings that can be used for model selection
             */
             async list () {
                 return ['fake'];
             },
+
             /**
             * Simulates a chat completion request by generating random Lorem Ipsum text
             * @param {Object} params - The completion parameters

+ 9 - 11
src/backend/src/modules/puterai/GroqAIService.js

@@ -51,21 +51,19 @@ class GroqAIService extends BaseService {
     static IMPLEMENTS = {
         'puter-chat-completion': {
             /**
-            * Defines the interface implementations for the puter-chat-completion service
-            * Contains methods for listing models and handling chat completions
-            * @property {Object} models - Returns available AI models
-            * @property {Object} list - Lists raw model data from the Groq API
-            * @property {Object} complete - Handles chat completion requests with optional streaming
-            * @returns {Object} Interface implementation object
-            */
+             * Returns a list of available models and their details.
+             * See AIChatService for more information.
+             * 
+             * @returns Promise<Array<Object>> Array of model details
+             */
             async models () {
                 return await this.models_();
             },
             /**
-            * Lists available AI models from the Groq API
-            * @returns {Promise<Array>} Array of model objects from the API's data field
-            * @description Unwraps and returns the model list from the Groq API response,
-            * which comes wrapped in an object with {object: "list", data: [...]}
+            * Returns a list of available model names including their aliases
+            * @returns {Promise<string[]>} Array of model identifiers and their aliases
+            * @description Retrieves all available model IDs and their aliases,
+            * flattening them into a single array of strings that can be used for model selection
             */
             async list () {
                 // They send: { "object": "list", data }

+ 14 - 12
src/backend/src/modules/puterai/MistralAIService.js

@@ -173,27 +173,29 @@ class MistralAIService extends BaseService {
     static IMPLEMENTS = {
         'puter-chat-completion': {
             /**
-            * Implements the puter-chat-completion interface for MistralAI service
-            * Provides methods for listing models and generating chat completions
-            * @interface
-            * @property {Function} models - Returns array of available model details
-            * @property {Function} list - Returns array of model IDs
-            * @property {Function} complete - Generates chat completion with optional streaming
-            */
+             * Returns a list of available models and their details.
+             * See AIChatService for more information.
+             * 
+             * @returns Promise<Array<Object>> Array of model details
+             */
             async models () {
                 return this.models_array_;
             },
+
             /**
-            * Returns an array of available AI models with their details
-            * @returns {Promise<Array>} Array of model objects containing id, name, aliases, context window size, capabilities, and cost information
+            * Returns a list of available model names including their aliases
+            * @returns {Promise<string[]>} Array of model identifiers and their aliases
+            * @description Retrieves all available model IDs and their aliases,
+            * flattening them into a single array of strings that can be used for model selection
             */
             async list () {
                 return this.models_array_.map(m => m.id);
             },
+
             /**
-            * Returns an array of model IDs supported by the MistralAI service
-            * @returns {Promise<string[]>} Array of model identifier strings
-            */
+             * AI Chat completion method.
+             * See AIChatService for more details.
+             */
             async complete ({ messages, stream, model }) {
 
                 for ( let i = 0; i < messages.length; i++ ) {

+ 14 - 16
src/backend/src/modules/puterai/OpenAICompletionService.js

@@ -102,23 +102,20 @@ class OpenAICompletionService extends BaseService {
     static IMPLEMENTS = {
         ['puter-chat-completion']: {
             /**
-            * Implements the puter-chat-completion interface methods for model listing and chat completion
-            * @property {Object} models - Returns available AI models and their pricing
-            * @property {Function} list - Returns list of available model names/aliases
-            * @property {Function} complete - Handles chat completion requests with optional streaming
-            * @param {Object} params - Parameters for completion
-            * @param {Array} params.messages - Array of chat messages
-            * @param {boolean} params.test_mode - Whether to use test mode
-            * @param {boolean} params.stream - Whether to stream responses
-            * @param {string} params.model - Model ID to use
-            */
+             * Returns a list of available models and their details.
+             * See AIChatService for more information.
+             * 
+             * @returns Promise<Array<Object>> Array of model details
+             */
             async models () {
                 return await this.models_();
             },
+
             /**
-            * Retrieves a list of available AI models with their cost information
-            * @returns {Promise<Array>} Array of model objects containing id and cost details
-            * @private
+            * Returns a list of available model names including their aliases
+            * @returns {Promise<string[]>} Array of model identifiers and their aliases
+            * @description Retrieves all available model IDs and their aliases,
+            * flattening them into a single array of strings that can be used for model selection
             */
             async list () {
                 const models = await this.models_();
@@ -131,10 +128,11 @@ class OpenAICompletionService extends BaseService {
                 }
                 return model_names;
             },
+
             /**
-            * Lists all available model names including aliases
-            * @returns {Promise<string[]>} Array of model IDs and their aliases
-            */
+             * AI Chat completion method.
+             * See AIChatService for more details.
+             */
             async complete ({ messages, test_mode, stream, model }) {
 
                 // for now this code (also in AIChatService.js) needs to be

+ 13 - 12
src/backend/src/modules/puterai/TogetherAIService.js

@@ -55,19 +55,20 @@ class TogetherAIService extends BaseService {
     static IMPLEMENTS = {
         ['puter-chat-completion']: {
             /**
-            * Implements the puter-chat-completion interface for TogetherAI service
-            * Contains methods for listing models and generating chat completions
-            * @property {Object} models - Method to get available models
-            * @property {Object} list - Method to get list of model IDs
-            * @property {Object} complete - Method to generate chat completions
-            */
+             * Returns a list of available models and their details.
+             * See AIChatService for more information.
+             * 
+             * @returns Promise<Array<Object>> Array of model details
+             */
             async models () {
                 return await this.models_();
             },
+
             /**
-            * Retrieves available AI models from the Together API
-            * @returns {Promise<Array>} Array of model objects with their properties
-            * @implements {puter-chat-completion.models}
+            * Returns a list of available model names including their aliases
+            * @returns {Promise<string[]>} Array of model identifiers and their aliases
+            * @description Retrieves all available model IDs and their aliases,
+            * flattening them into a single array of strings that can be used for model selection
             */
             async list () {
                 let models = this.modules.kv.get(`${this.kvkey}:models`);
@@ -75,9 +76,9 @@ class TogetherAIService extends BaseService {
                 return models.map(model => model.id);
             },
             /**
-            * Lists available AI model IDs from the cache or fetches them if not cached
-            * @returns {Promise<string[]>} Array of model ID strings
-            */
+             * AI Chat completion method.
+             * See AIChatService for more details.
+             */
             async complete ({ messages, stream, model }) {
                 if ( model === 'model-fallback-test-1' ) {
                     throw new Error('Model Fallback Test 1');

+ 13 - 10
src/backend/src/modules/puterai/XAIService.js

@@ -72,17 +72,19 @@ class XAIService extends BaseService {
     static IMPLEMENTS = {
         ['puter-chat-completion']: {
             /**
-            * Implements the interface for the puter-chat-completion driver
-            * Contains methods for listing models, getting model details,
-            * and handling chat completions with streaming support
-            * @type {Object}
-            */
+             * Returns a list of available models and their details.
+             * See AIChatService for more information.
+             * 
+             * @returns Promise<Array<Object>> Array of model details
+             */
             async models () {
                 return await this.models_();
             },
             /**
-            * Returns a list of available AI models with their capabilities and pricing details
-            * @returns {Promise<Array>} Array of model objects containing id, name, context window size, and cost information
+            * Returns a list of available model names including their aliases
+            * @returns {Promise<string[]>} Array of model identifiers and their aliases
+            * @description Retrieves all available model IDs and their aliases,
+            * flattening them into a single array of strings that can be used for model selection
             */
             async list () {
                 const models = await this.models_();
@@ -95,10 +97,11 @@ class XAIService extends BaseService {
                 }
                 return model_names;
             },
+
             /**
-            * Returns a list of all available model names including their aliases
-            * @returns {Promise<string[]>} Array of model names and their aliases
-            */
+             * AI Chat completion method.
+             * See AIChatService for more details.
+             */
             async complete ({ messages, stream, model }) {
                 model = this.adapt_model(model);
                 const adapted_messages = [];