well this works for tool calling the "readNote" func

This commit is contained in:
perf3ct
2025-04-08 22:08:52 +00:00
parent 683d1a5481
commit 7373249dee
2 changed files with 25 additions and 22 deletions

View File

@@ -91,25 +91,14 @@ export class ModelSelectionStage extends BasePipelineStage<ModelSelectionInput,
if (model) {
defaultModel = `ollama:${model}`;
// Special configuration for Ollama
// Since Ollama models have different requirements for tool calling,
// configure based on the model being used
const modelLower = model.toLowerCase();
if (modelLower.includes('llama3') ||
modelLower.includes('mistral') ||
modelLower.includes('dolphin') ||
modelLower.includes('neural') ||
modelLower.includes('mist') ||
modelLower.includes('wizard')) {
// These models are known to support tool calling
log.info(`Using Ollama model ${model} with tool calling support`);
// Enable tools for all Ollama models
// The Ollama API will handle models that don't support tool calling
log.info(`Using Ollama model ${model} with tool calling enabled`);
updatedOptions.enableTools = true;
}
}
}
}
}
} catch (error) {
// If any error occurs, use the fallback default
log.error(`Error determining default model: ${error}`);

View File

@@ -200,9 +200,23 @@ export class OllamaService extends BaseAIService {
});
}
// Log full request body (this will create large logs but is helpful for debugging)
// Log full request body (with improved logging for debug purposes)
const requestStr = JSON.stringify(requestBody);
log.info(`Full Ollama request (truncated): ${requestStr.substring(0, 1000)}...`);
log.info(`========== FULL OLLAMA REQUEST ==========`);
// Log request in manageable chunks
const maxChunkSize = 4000;
if (requestStr.length > maxChunkSize) {
let i = 0;
while (i < requestStr.length) {
const chunk = requestStr.substring(i, i + maxChunkSize);
log.info(`Request part ${Math.floor(i/maxChunkSize) + 1}/${Math.ceil(requestStr.length/maxChunkSize)}: ${chunk}`);
i += maxChunkSize;
}
} else {
log.info(`Full request: ${requestStr}`);
}
log.info(`========== END FULL OLLAMA REQUEST ==========`);
log.info(`========== END OLLAMA REQUEST ==========`);
// Make API request