Fix AI Connection Issues

Fixes Applied:
 Updated preferred model to use installed wizardlm-uncensored:13b
 Fixed AbortSignal.timeout compatibility issues
 Added fallback model selection for any available models
 Enhanced connection testing with better error logging
 Added updateSettings/getSettings alias methods for UI compatibility
 Improved timeout handling with manual AbortController

 AI Status:
 Ollama detected running on localhost:11434
 wizardlm-uncensored:13b model confirmed available
 Connection test should now work properly
 AI task generation ready for testing

 Bug Resolved:
 Fixed infinite 'checking connection' state
 Improved browser compatibility
 Better error handling and debugging
This commit is contained in:
dilgenfritz 2025-09-29 07:13:20 -05:00
parent e7bfabac9b
commit 6cff04df11
1 changed files with 44 additions and 13 deletions

View File

@ -8,7 +8,7 @@ class AITaskManager {
this.ollamaUrl = 'http://localhost:11434';
this.isAvailable = false;
this.availableModels = [];
this.currentModel = 'dolphin-mistral:7b'; // Default NSFW-friendly model
this.currentModel = 'wizardlm-uncensored:13b'; // Default NSFW-friendly model
this.isGenerating = false;
this.init();
@ -23,11 +23,21 @@ class AITaskManager {
async checkAvailability() {
try {
console.log('Testing Ollama connection to:', this.ollamaUrl);
// Create a manual timeout controller for better browser compatibility
const controller = new AbortController();
const timeoutId = setTimeout(() => controller.abort(), 5000); // 5 second timeout
const response = await fetch(`${this.ollamaUrl}/api/tags`, {
method: 'GET',
signal: AbortSignal.timeout(3000) // 3 second timeout
signal: controller.signal
});
clearTimeout(timeoutId);
this.isAvailable = response.ok;
console.log('Ollama connection test result:', this.isAvailable ? 'SUCCESS' : 'FAILED');
return this.isAvailable;
} catch (error) {
console.log('Ollama not available:', error.message);
@ -46,7 +56,7 @@ class AITaskManager {
// Check if our preferred NSFW models are available
const modelNames = this.availableModels.map(m => m.name);
const preferredModels = ['dolphin-mistral:7b', 'wizardlm-uncensored:7b', 'llama3.1:8b-instruct'];
const preferredModels = ['wizardlm-uncensored:13b', 'llama3.1:8b-instruct', 'dolphin-mistral:7b', 'wizardlm-uncensored:7b', 'llama3.2'];
for (const preferred of preferredModels) {
if (modelNames.includes(preferred)) {
@ -55,6 +65,11 @@ class AITaskManager {
}
}
// If no preferred models found, use the first available model
if (this.availableModels.length > 0 && !modelNames.includes(this.currentModel)) {
this.currentModel = this.availableModels[0].name;
}
return this.availableModels;
} catch (error) {
console.error('Error loading models:', error);
@ -90,6 +105,16 @@ class AITaskManager {
}
}
// Alias method for UI compatibility
updateSettings(newSettings) {
return this.updateConfig(newSettings);
}
// Alias method for UI compatibility
getSettings() {
return this.getConfig();
}
getConfig() {
return { ...this.config };
}
@ -233,32 +258,38 @@ Consequence Task:`;
}
async testConnection() {
console.log('Starting connection test...');
const available = await this.checkAvailability();
if (!available) {
return { success: false, message: 'Ollama service not available' };
console.log('Connection test failed: Ollama service not available');
return false;
}
console.log('Ollama available, loading models...');
await this.loadModels();
if (this.availableModels.length === 0) {
return { success: false, message: 'No models installed' };
console.log('Connection test failed: No models installed');
return false;
}
console.log(`Found ${this.availableModels.length} models, using: ${this.currentModel}`);
try {
// Quick test generation to verify the model works
console.log('Testing model with simple task generation...');
const testTask = await this.generateEdgingTask({
duration: 1,
intensity: 'low',
experience: 'beginner'
});
return {
success: true,
message: 'AI connection successful',
model: this.currentModel,
availableModels: this.availableModels.length,
preview: testTask.instruction.substring(0, 100) + '...'
};
console.log('Connection test successful! Generated test task.');
return true;
} catch (error) {
return { success: false, message: `Test failed: ${error.message}` };
console.error('Connection test failed during task generation:', error);
return false;
}
}