add swaggerUI docstrings for LLM/AI API routes

This commit is contained in:
perf3ct
2025-03-26 19:19:19 +00:00
parent 7c519df9b5
commit 15630fb432
5 changed files with 1136 additions and 29 deletions

View File

@@ -20,7 +20,58 @@ interface AnthropicModel {
}
/**
* List available models from Anthropic
* @swagger
* /api/anthropic/models:
* post:
* summary: List available models from Anthropic
* operationId: anthropic-list-models
* requestBody:
* required: false
* content:
* application/json:
* schema:
* type: object
* properties:
* baseUrl:
* type: string
* description: Optional custom Anthropic API base URL
* responses:
* '200':
* description: List of available Anthropic models
* content:
* application/json:
* schema:
* type: object
* properties:
* success:
* type: boolean
* chatModels:
* type: array
* items:
* type: object
* properties:
* id:
* type: string
* name:
* type: string
* type:
* type: string
* embeddingModels:
* type: array
* items:
* type: object
* properties:
* id:
* type: string
* name:
* type: string
* type:
* type: string
* '500':
* description: Error listing models
* security:
* - session: []
* tags: ["llm"]
*/
async function listModels(req: Request, res: Response) {
try {

View File

@@ -8,7 +8,75 @@ import log from "../../services/log.js";
import sql from "../../services/sql.js";
/**
* Get similar notes based on note ID
* @swagger
* /api/embeddings/similar/{noteId}:
* get:
* summary: Find similar notes based on a given note ID
* operationId: embeddings-similar-by-note
* parameters:
* - name: noteId
* in: path
* required: true
* schema:
* type: string
* - name: providerId
* in: query
* required: false
* schema:
* type: string
* default: openai
* description: Embedding provider ID
* - name: modelId
* in: query
* required: false
* schema:
* type: string
* default: text-embedding-3-small
* description: Embedding model ID
* - name: limit
* in: query
* required: false
* schema:
* type: integer
* default: 10
* description: Maximum number of similar notes to return
* - name: threshold
* in: query
* required: false
* schema:
* type: number
* format: float
* default: 0.7
* description: Similarity threshold (0.0-1.0)
* responses:
* '200':
* description: List of similar notes
* content:
* application/json:
* schema:
* type: object
* properties:
* success:
* type: boolean
* similarNotes:
* type: array
* items:
* type: object
* properties:
* noteId:
* type: string
* title:
* type: string
* similarity:
* type: number
* format: float
* '400':
* description: Invalid request parameters
* '404':
* description: Note not found
* security:
* - session: []
* tags: ["llm"]
*/
async function findSimilarNotes(req: Request, res: Response) {
const noteId = req.params.noteId;
@@ -70,7 +138,78 @@ async function findSimilarNotes(req: Request, res: Response) {
}
/**
* Search notes by text
* @swagger
* /api/embeddings/search:
* post:
* summary: Search for notes similar to provided text
* operationId: embeddings-search-by-text
* parameters:
* - name: providerId
* in: query
* required: false
* schema:
* type: string
* default: openai
* description: Embedding provider ID
* - name: modelId
* in: query
* required: false
* schema:
* type: string
* default: text-embedding-3-small
* description: Embedding model ID
* - name: limit
* in: query
* required: false
* schema:
* type: integer
* default: 10
* description: Maximum number of similar notes to return
* - name: threshold
* in: query
* required: false
* schema:
* type: number
* format: float
* default: 0.7
* description: Similarity threshold (0.0-1.0)
* requestBody:
* required: true
* content:
* application/json:
* schema:
* type: object
* properties:
* text:
* type: string
* description: Text to search with
* responses:
* '200':
* description: List of similar notes
* content:
* application/json:
* schema:
* type: object
* properties:
* success:
* type: boolean
* similarNotes:
* type: array
* items:
* type: object
* properties:
* noteId:
* type: string
* title:
* type: string
* similarity:
* type: number
* format: float
* '400':
* description: Invalid request parameters
* security:
* - session: []
* tags: ["llm"]
*/
async function searchByText(req: Request, res: Response) {
const { text } = req.body;
@@ -110,7 +249,39 @@ async function searchByText(req: Request, res: Response) {
}
/**
* Get embedding providers
* @swagger
* /api/embeddings/providers:
* get:
* summary: Get available embedding providers
* operationId: embeddings-get-providers
* responses:
* '200':
* description: List of available embedding providers
* content:
* application/json:
* schema:
* type: object
* properties:
* success:
* type: boolean
* providers:
* type: array
* items:
* type: object
* properties:
* id:
* type: string
* name:
* type: string
* isEnabled:
* type: boolean
* priority:
* type: integer
* config:
* type: object
* security:
* - session: []
* tags: ["llm"]
*/
async function getProviders(req: Request, res: Response) {
const providerConfigs = await providerManager.getEmbeddingProviderConfigs();
@@ -122,7 +293,49 @@ async function getProviders(req: Request, res: Response) {
}
/**
* Update provider configuration
* @swagger
* /api/embeddings/providers/{providerId}:
* patch:
* summary: Update embedding provider configuration
* operationId: embeddings-update-provider
* parameters:
* - name: providerId
* in: path
* required: true
* schema:
* type: string
* description: ID of the embedding provider to update
* requestBody:
* required: true
* content:
* application/json:
* schema:
* type: object
* properties:
* isEnabled:
* type: boolean
* description: Whether the provider is enabled
* priority:
* type: integer
* description: Priority level for the provider
* config:
* type: object
* description: Provider-specific configuration
* responses:
* '200':
* description: Provider successfully updated
* content:
* application/json:
* schema:
* type: object
* properties:
* success:
* type: boolean
* '404':
* description: Provider not found
* security:
* - session: []
* tags: ["llm"]
*/
async function updateProvider(req: Request, res: Response) {
const { providerId } = req.params;
@@ -145,7 +358,26 @@ async function updateProvider(req: Request, res: Response) {
}
/**
* Manually trigger a reprocessing of all notes
* @swagger
* /api/embeddings/reprocess:
* post:
* summary: Reprocess all notes for embedding generation
* operationId: embeddings-reprocess-all
* responses:
* '200':
* description: Reprocessing started successfully
* content:
* application/json:
* schema:
* type: object
* properties:
* success:
* type: boolean
* message:
* type: string
* security:
* - session: []
* tags: ["llm"]
*/
async function reprocessAllNotes(req: Request, res: Response) {
// Import cls
@@ -172,7 +404,36 @@ async function reprocessAllNotes(req: Request, res: Response) {
}
/**
* Get embedding queue status
* @swagger
* /api/embeddings/queue-status:
* get:
* summary: Get status of the embedding generation queue
* operationId: embeddings-queue-status
* responses:
* '200':
* description: Queue status information
* content:
* application/json:
* schema:
* type: object
* properties:
* success:
* type: boolean
* status:
* type: object
* properties:
* queueCount:
* type: integer
* description: Number of items in the queue
* failedCount:
* type: integer
* description: Number of failed embedding attempts
* totalEmbeddingsCount:
* type: integer
* description: Total number of generated embeddings
* security:
* - session: []
* tags: ["llm"]
*/
async function getQueueStatus(req: Request, res: Response) {
// Use the imported sql instead of requiring it
@@ -199,7 +460,36 @@ async function getQueueStatus(req: Request, res: Response) {
}
/**
* Get embedding statistics
* @swagger
* /api/embeddings/stats:
* get:
* summary: Get embedding statistics
* operationId: embeddings-stats
* responses:
* '200':
* description: Embedding statistics
* content:
* application/json:
* schema:
* type: object
* properties:
* success:
* type: boolean
* stats:
* type: object
* properties:
* totalEmbeddings:
* type: integer
* providers:
* type: object
* modelCounts:
* type: object
* lastUpdated:
* type: string
* format: date-time
* security:
* - session: []
* tags: ["llm"]
*/
async function getEmbeddingStats(req: Request, res: Response) {
const stats = await vectorStore.getEmbeddingStats();
@@ -211,7 +501,40 @@ async function getEmbeddingStats(req: Request, res: Response) {
}
/**
* Get list of failed embedding notes
* @swagger
* /api/embeddings/failed:
* get:
* summary: Get list of notes that failed embedding generation
* operationId: embeddings-failed-notes
* responses:
* '200':
* description: List of failed embedding notes
* content:
* application/json:
* schema:
* type: object
* properties:
* success:
* type: boolean
* failedNotes:
* type: array
* items:
* type: object
* properties:
* noteId:
* type: string
* title:
* type: string
* error:
* type: string
* attempts:
* type: integer
* lastAttempt:
* type: string
* format: date-time
* security:
* - session: []
* tags: ["llm"]
*/
async function getFailedNotes(req: Request, res: Response) {
const limit = parseInt(req.query.limit as string || '100', 10);
@@ -225,7 +548,35 @@ async function getFailedNotes(req: Request, res: Response) {
}
/**
* Retry a specific failed note embedding
* @swagger
* /api/embeddings/retry/{noteId}:
* post:
* summary: Retry embedding generation for a failed note
* operationId: embeddings-retry-note
* parameters:
* - name: noteId
* in: path
* required: true
* schema:
* type: string
* description: ID of the note to retry embedding
* responses:
* '200':
* description: Retry operation result
* content:
* application/json:
* schema:
* type: object
* properties:
* success:
* type: boolean
* message:
* type: string
* '404':
* description: Note not found or not in failed state
* security:
* - session: []
* tags: ["llm"]
*/
async function retryFailedNote(req: Request, res: Response) {
const { noteId } = req.params;
@@ -253,7 +604,29 @@ async function retryFailedNote(req: Request, res: Response) {
}
/**
* Retry all failed note embeddings
* @swagger
* /api/embeddings/retry-all-failed:
* post:
* summary: Retry embedding generation for all failed notes
* operationId: embeddings-retry-all-failed
* responses:
* '200':
* description: Retry operation started
* content:
* application/json:
* schema:
* type: object
* properties:
* success:
* type: boolean
* message:
* type: string
* count:
* type: integer
* description: Number of notes queued for retry
* security:
* - session: []
* tags: ["llm"]
*/
async function retryAllFailedNotes(req: Request, res: Response) {
const count = await vectorStore.retryAllFailedEmbeddings();
@@ -265,7 +638,42 @@ async function retryAllFailedNotes(req: Request, res: Response) {
}
/**
* Manually trigger a rebuild of the search index
* @swagger
* /api/embeddings/rebuild-index:
* post:
* summary: Rebuild the embedding vector index
* operationId: embeddings-rebuild-index
* requestBody:
* required: false
* content:
* application/json:
* schema:
* type: object
* properties:
* provider:
* type: string
* description: Specific provider to rebuild index for
* force:
* type: boolean
* description: Force rebuild even if not necessary
* responses:
* '200':
* description: Index rebuild operation started
* content:
* application/json:
* schema:
* type: object
* properties:
* success:
* type: boolean
* message:
* type: string
* jobId:
* type: string
* description: ID of the rebuild job for status tracking
* security:
* - session: []
* tags: ["llm"]
*/
async function rebuildIndex(req: Request, res: Response) {
// Start the index rebuilding operation in the background
@@ -286,7 +694,50 @@ async function rebuildIndex(req: Request, res: Response) {
}
/**
* Get the current index rebuild status
* @swagger
* /api/embeddings/index-rebuild-status:
* get:
* summary: Get status of the vector index rebuild operation
* operationId: embeddings-rebuild-status
* parameters:
* - name: jobId
* in: query
* required: false
* schema:
* type: string
* description: Optional job ID to get status for a specific rebuild job
* responses:
* '200':
* description: Rebuild status information
* content:
* application/json:
* schema:
* type: object
* properties:
* success:
* type: boolean
* status:
* type: string
* enum: [idle, in_progress, completed, failed]
* progress:
* type: number
* format: float
* description: Progress percentage (0-100)
* message:
* type: string
* details:
* type: object
* properties:
* startTime:
* type: string
* format: date-time
* processed:
* type: integer
* total:
* type: integer
* security:
* - session: []
* tags: ["llm"]
*/
async function getIndexRebuildStatus(req: Request, res: Response) {
const status = indexService.getIndexRebuildStatus();

View File

@@ -203,7 +203,57 @@ function safelyUseAIManager(): boolean {
}
/**
* Create a new LLM chat session
* @swagger
* /api/llm/sessions:
* post:
* summary: Create a new LLM chat session
* operationId: llm-create-session
* requestBody:
* required: true
* content:
* application/json:
* schema:
* type: object
* properties:
* title:
* type: string
* description: Title for the chat session
* systemPrompt:
* type: string
* description: System message to set the behavior of the assistant
* temperature:
* type: number
* description: Temperature parameter for the LLM (0.0-1.0)
* maxTokens:
* type: integer
* description: Maximum tokens to generate in responses
* model:
* type: string
* description: Specific model to use (depends on provider)
* provider:
* type: string
* description: LLM provider to use (e.g., 'openai', 'anthropic', 'ollama')
* contextNoteId:
* type: string
* description: Note ID to use as context for the session
* responses:
* '200':
* description: Successfully created session
* content:
* application/json:
* schema:
* type: object
* properties:
* sessionId:
* type: string
* title:
* type: string
* createdAt:
* type: string
* format: date-time
* security:
* - session: []
* tags: ["llm"]
*/
async function createSession(req: Request, res: Response) {
try {
@@ -254,7 +304,53 @@ async function createSession(req: Request, res: Response) {
}
/**
* Get session details
* @swagger
* /api/llm/sessions/{sessionId}:
* get:
* summary: Retrieve a specific chat session by ID
* operationId: llm-get-session
* parameters:
* - name: sessionId
* in: path
* required: true
* schema:
* type: string
* responses:
* '200':
* description: Chat session details
* content:
* application/json:
* schema:
* type: object
* properties:
* id:
* type: string
* title:
* type: string
* messages:
* type: array
* items:
* type: object
* properties:
* role:
* type: string
* enum: [user, assistant, system]
* content:
* type: string
* timestamp:
* type: string
* format: date-time
* createdAt:
* type: string
* format: date-time
* lastActive:
* type: string
* format: date-time
* '404':
* description: Session not found
* security:
* - session: []
* tags: ["llm"]
*/
async function getSession(req: Request, res: Response) {
try {
@@ -282,7 +378,65 @@ async function getSession(req: Request, res: Response) {
}
/**
* Update session properties
* @swagger
* /api/llm/sessions/{sessionId}:
* put:
* summary: Update a chat session's settings
* operationId: llm-update-session
* parameters:
* - name: sessionId
* in: path
* required: true
* schema:
* type: string
* requestBody:
* required: true
* content:
* application/json:
* schema:
* type: object
* properties:
* title:
* type: string
* description: Updated title for the session
* systemPrompt:
* type: string
* description: Updated system prompt
* temperature:
* type: number
* description: Updated temperature setting
* maxTokens:
* type: integer
* description: Updated maximum tokens setting
* model:
* type: string
* description: Updated model selection
* provider:
* type: string
* description: Updated provider selection
* contextNoteId:
* type: string
* description: Updated note ID for context
* responses:
* '200':
* description: Session successfully updated
* content:
* application/json:
* schema:
* type: object
* properties:
* id:
* type: string
* title:
* type: string
* updatedAt:
* type: string
* format: date-time
* '404':
* description: Session not found
* security:
* - session: []
* tags: ["llm"]
*/
async function updateSession(req: Request, res: Response) {
try {
@@ -336,7 +490,36 @@ async function updateSession(req: Request, res: Response) {
}
/**
* List active sessions
* @swagger
* /api/llm/sessions:
* get:
* summary: List all chat sessions
* operationId: llm-list-sessions
* responses:
* '200':
* description: List of chat sessions
* content:
* application/json:
* schema:
* type: array
* items:
* type: object
* properties:
* id:
* type: string
* title:
* type: string
* createdAt:
* type: string
* format: date-time
* lastActive:
* type: string
* format: date-time
* messageCount:
* type: integer
* security:
* - session: []
* tags: ["llm"]
*/
async function listSessions(req: Request, res: Response) {
try {
@@ -361,7 +544,25 @@ async function listSessions(req: Request, res: Response) {
}
/**
* Delete a session
* @swagger
* /api/llm/sessions/{sessionId}:
* delete:
* summary: Delete a chat session
* operationId: llm-delete-session
* parameters:
* - name: sessionId
* in: path
* required: true
* schema:
* type: string
* responses:
* '200':
* description: Session successfully deleted
* '404':
* description: Session not found
* security:
* - session: []
* tags: ["llm"]
*/
async function deleteSession(req: Request, res: Response) {
try {
@@ -537,7 +738,75 @@ function buildContextFromNotes(sources: NoteSource[], query: string): string {
}
/**
* Send a message to the AI
* @swagger
* /api/llm/sessions/{sessionId}/messages:
* post:
* summary: Send a message to an LLM and get a response
* operationId: llm-send-message
* parameters:
* - name: sessionId
* in: path
* required: true
* schema:
* type: string
* requestBody:
* required: true
* content:
* application/json:
* schema:
* type: object
* properties:
* message:
* type: string
* description: The user message to send to the LLM
* options:
* type: object
* description: Optional parameters for this specific message
* properties:
* temperature:
* type: number
* maxTokens:
* type: integer
* model:
* type: string
* provider:
* type: string
* includeContext:
* type: boolean
* description: Whether to include relevant notes as context
* useNoteContext:
* type: boolean
* description: Whether to use the session's context note
* responses:
* '200':
* description: LLM response
* content:
* application/json:
* schema:
* type: object
* properties:
* response:
* type: string
* sources:
* type: array
* items:
* type: object
* properties:
* noteId:
* type: string
* title:
* type: string
* similarity:
* type: number
* sessionId:
* type: string
* '404':
* description: Session not found
* '500':
* description: Error processing request
* security:
* - session: []
* tags: ["llm"]
*/
async function sendMessage(req: Request, res: Response) {
try {
@@ -949,7 +1218,31 @@ async function sendMessage(req: Request, res: Response) {
}
/**
* Get statistics about the knowledge base indexing
* @swagger
* /api/llm/index/stats:
* get:
* summary: Get statistics about the vector index
* operationId: llm-index-stats
* responses:
* '200':
* description: Vector index statistics
* content:
* application/json:
* schema:
* type: object
* properties:
* totalEmbeddings:
* type: integer
* totalIndexedNotes:
* type: integer
* lastIndexed:
* type: string
* format: date-time
* embeddingProvider:
* type: string
* security:
* - session: []
* tags: ["llm"]
*/
async function getIndexStats(req: Request, res: Response) {
try {
@@ -966,7 +1259,39 @@ async function getIndexStats(req: Request, res: Response) {
}
/**
* Start or update knowledge base indexing
* @swagger
* /api/llm/index/start:
* post:
* summary: Start or restart the indexing process
* operationId: llm-start-indexing
* requestBody:
* required: false
* content:
* application/json:
* schema:
* type: object
* properties:
* forceReindex:
* type: boolean
* description: Whether to force reindexing of all notes
* branchId:
* type: string
* description: Optional branch ID to limit indexing scope
* responses:
* '200':
* description: Indexing process started
* content:
* application/json:
* schema:
* type: object
* properties:
* message:
* type: string
* notesToIndex:
* type: integer
* security:
* - session: []
* tags: ["llm"]
*/
async function startIndexing(req: Request, res: Response) {
try {
@@ -999,7 +1324,33 @@ async function startIndexing(req: Request, res: Response) {
}
/**
* Get failed indexing attempts
* @swagger
* /api/llm/index/failed:
* get:
* summary: Get list of notes that failed to be indexed
* operationId: llm-failed-indexes
* responses:
* '200':
* description: List of failed note indexes
* content:
* application/json:
* schema:
* type: array
* items:
* type: object
* properties:
* noteId:
* type: string
* title:
* type: string
* error:
* type: string
* timestamp:
* type: string
* format: date-time
* security:
* - session: []
* tags: ["llm"]
*/
async function getFailedIndexes(req: Request, res: Response) {
try {
@@ -1021,7 +1372,34 @@ async function getFailedIndexes(req: Request, res: Response) {
}
/**
* Retry failed indexing operation
* @swagger
* /api/llm/index/failed/{noteId}/retry:
* post:
* summary: Retry indexing a specific failed note
* operationId: llm-retry-failed-index
* parameters:
* - name: noteId
* in: path
* required: true
* schema:
* type: string
* responses:
* '200':
* description: Retry process started
* content:
* application/json:
* schema:
* type: object
* properties:
* success:
* type: boolean
* message:
* type: string
* '404':
* description: Failed note not found
* security:
* - session: []
* tags: ["llm"]
*/
async function retryFailedIndex(req: Request, res: Response) {
try {
@@ -1047,7 +1425,28 @@ async function retryFailedIndex(req: Request, res: Response) {
}
/**
* Retry all failed indexing operations
* @swagger
* /api/llm/index/failed/retry-all:
* post:
* summary: Retry indexing all failed notes
* operationId: llm-retry-all-failed
* responses:
* '200':
* description: Retry process started for all failed notes
* content:
* application/json:
* schema:
* type: object
* properties:
* success:
* type: boolean
* message:
* type: string
* count:
* type: integer
* security:
* - session: []
* tags: ["llm"]
*/
async function retryAllFailedIndexes(req: Request, res: Response) {
try {
@@ -1069,7 +1468,48 @@ async function retryAllFailedIndexes(req: Request, res: Response) {
}
/**
* Find similar notes based on query
* @swagger
* /api/llm/similar:
* post:
* summary: Find notes similar to the provided content
* operationId: llm-find-similar
* requestBody:
* required: true
* content:
* application/json:
* schema:
* type: object
* properties:
* content:
* type: string
* description: Content to find similar notes for
* limit:
* type: integer
* description: Maximum number of results to return
* threshold:
* type: number
* description: Similarity threshold (0.0-1.0)
* responses:
* '200':
* description: List of similar notes
* content:
* application/json:
* schema:
* type: array
* items:
* type: object
* properties:
* noteId:
* type: string
* title:
* type: string
* similarity:
* type: number
* branchId:
* type: string
* security:
* - session: []
* tags: ["llm"]
*/
async function findSimilarNotes(req: Request, res: Response) {
try {
@@ -1100,7 +1540,51 @@ async function findSimilarNotes(req: Request, res: Response) {
}
/**
* Generate context for an LLM query
* @swagger
* /api/llm/generate-context:
* post:
* summary: Generate context from similar notes for a query
* operationId: llm-generate-context
* requestBody:
* required: true
* content:
* application/json:
* schema:
* type: object
* properties:
* query:
* type: string
* description: Query to generate context for
* limit:
* type: integer
* description: Maximum number of notes to include
* contextNoteId:
* type: string
* description: Optional note ID to provide additional context
* responses:
* '200':
* description: Generated context and sources
* content:
* application/json:
* schema:
* type: object
* properties:
* context:
* type: string
* sources:
* type: array
* items:
* type: object
* properties:
* noteId:
* type: string
* title:
* type: string
* similarity:
* type: number
* security:
* - session: []
* tags: ["llm"]
*/
async function generateQueryContext(req: Request, res: Response) {
try {
@@ -1131,7 +1615,44 @@ async function generateQueryContext(req: Request, res: Response) {
}
/**
* Index a specific note
* @swagger
* /api/llm/index/note/{noteId}:
* post:
* summary: Index or reindex a specific note
* operationId: llm-index-note
* parameters:
* - name: noteId
* in: path
* required: true
* schema:
* type: string
* requestBody:
* required: false
* content:
* application/json:
* schema:
* type: object
* properties:
* force:
* type: boolean
* description: Whether to force reindexing even if already indexed
* responses:
* '200':
* description: Note indexing result
* content:
* application/json:
* schema:
* type: object
* properties:
* success:
* type: boolean
* message:
* type: string
* '404':
* description: Note not found
* security:
* - session: []
* tags: ["llm"]
*/
async function indexNote(req: Request, res: Response) {
try {

View File

@@ -4,7 +4,40 @@ import log from "../../services/log.js";
import type { Request, Response } from "express";
/**
* List available models from Ollama
* @swagger
* /api/ollama/models:
* post:
* summary: List available models from Ollama
* operationId: ollama-list-models
* requestBody:
* required: false
* content:
* application/json:
* schema:
* type: object
* properties:
* baseUrl:
* type: string
* description: Optional custom Ollama API base URL
* responses:
* '200':
* description: List of available Ollama models
* content:
* application/json:
* schema:
* type: object
* properties:
* success:
* type: boolean
* models:
* type: array
* items:
* type: object
* '500':
* description: Error listing models
* security:
* - session: []
* tags: ["llm"]
*/
async function listModels(req: Request, res: Response) {
try {

View File

@@ -4,7 +4,58 @@ import log from "../../services/log.js";
import type { Request, Response } from "express";
/**
* List available models from OpenAI
* @swagger
* /api/openai/models:
* post:
* summary: List available models from OpenAI
* operationId: openai-list-models
* requestBody:
* required: false
* content:
* application/json:
* schema:
* type: object
* properties:
* baseUrl:
* type: string
* description: Optional custom OpenAI API base URL
* responses:
* '200':
* description: List of available OpenAI models
* content:
* application/json:
* schema:
* type: object
* properties:
* success:
* type: boolean
* chatModels:
* type: array
* items:
* type: object
* properties:
* id:
* type: string
* name:
* type: string
* type:
* type: string
* embeddingModels:
* type: array
* items:
* type: object
* properties:
* id:
* type: string
* name:
* type: string
* type:
* type: string
* '500':
* description: Error listing models
* security:
* - session: []
* tags: ["llm"]
*/
async function listModels(req: Request, res: Response) {
try {