/** * AI Task Manager - Ollama Integration for Gooner Training Academy * Generates NSFW edging tasks using local AI models */ class AITaskManager { constructor(dataManager) { this.dataManager = dataManager; this.ollamaUrl = 'http://localhost:11434'; this.isAvailable = false; this.availableModels = []; this.currentModel = 'wizardlm-uncensored:13b'; // Default NSFW-friendly model this.isGenerating = false; this.init(); } async init() { console.log('AITaskManager initializing...'); console.log('Testing basic fetch capability...'); // Test if fetch works at all try { const testResponse = await fetch('http://localhost:11434/api/tags'); console.log('Basic fetch test result:', testResponse.status); } catch (testError) { console.error('Basic fetch test failed:', testError); } await this.checkAvailability(); await this.loadModels(); this.loadConfig(); console.log('AITaskManager initialized:', this.isAvailable ? 'Ollama available' : 'Ollama not available'); } async checkAvailability() { try { console.log('Testing Ollama connection to:', this.ollamaUrl); // Create a manual timeout controller for better browser compatibility const controller = new AbortController(); const timeoutId = setTimeout(() => controller.abort(), 5000); // 5 second timeout const response = await fetch(`${this.ollamaUrl}/api/tags`, { method: 'GET', signal: controller.signal, mode: 'cors', headers: { 'Accept': 'application/json', 'Content-Type': 'application/json' } }); clearTimeout(timeoutId); console.log('Response status:', response.status); console.log('Response ok:', response.ok); this.isAvailable = response.ok; if (response.ok) { const data = await response.json(); console.log('Available models from API:', data.models?.map(m => m.name)); } console.log('Ollama connection test result:', this.isAvailable ? 'SUCCESS' : 'FAILED'); return this.isAvailable; } catch (error) { console.error('Ollama connection error details:', error); console.log('Error name:', error.name); console.log('Error message:', error.message); this.isAvailable = false; return false; } } async loadModels() { if (!this.isAvailable) { console.log('Skipping model loading - Ollama not available'); return []; } try { console.log('Loading models from Ollama...'); const response = await fetch(`${this.ollamaUrl}/api/tags`, { method: 'GET', mode: 'cors', headers: { 'Accept': 'application/json', 'Content-Type': 'application/json' } }); if (!response.ok) { throw new Error(`HTTP ${response.status}: ${response.statusText}`); } const data = await response.json(); this.availableModels = data.models || []; console.log('Loaded models:', this.availableModels.map(m => m.name)); // Check if our preferred NSFW models are available const modelNames = this.availableModels.map(m => m.name); const preferredModels = ['wizardlm-uncensored:13b', 'llama3.1:8b-instruct', 'dolphin-mistral:7b', 'wizardlm-uncensored:7b', 'llama3.2']; console.log('Checking preferred models:', preferredModels); console.log('Available model names:', modelNames); for (const preferred of preferredModels) { if (modelNames.includes(preferred)) { console.log('Found preferred model:', preferred); this.currentModel = preferred; break; } } // If no preferred models found, use the first available model if (this.availableModels.length > 0 && !modelNames.includes(this.currentModel)) { this.currentModel = this.availableModels[0].name; console.log('Using first available model:', this.currentModel); } console.log('Selected model:', this.currentModel); return this.availableModels; } catch (error) { console.error('Error loading models:', error); this.availableModels = []; return []; } } loadConfig() { const savedConfig = this.dataManager.get('aiTaskConfig'); this.config = { enabled: false, model: this.currentModel, temperature: 0.8, maxTokens: 300, userPreferences: { experience: 'intermediate', // beginner, intermediate, advanced intensity: 'medium', // low, medium, high, extreme duration: 5, // minutes style: 'instructional', // instructional, descriptive, commanding kinks: [], // user-selected interests limits: [] // user-defined hard limits }, ...savedConfig }; } updateConfig(newConfig) { this.config = { ...this.config, ...newConfig }; this.dataManager.set('aiTaskConfig', this.config); if (newConfig.model) { this.currentModel = newConfig.model; } } // Alias method for UI compatibility updateSettings(newSettings) { return this.updateConfig(newSettings); } // Alias method for UI compatibility getSettings() { return this.getConfig(); } getConfig() { return { ...this.config }; } async generateEdgingTask(customPrefs = {}) { if (!this.isAvailable) { throw new Error('AI not available. Please ensure Ollama is running and models are installed.'); } if (this.isGenerating) { throw new Error('Already generating a task. Please wait...'); } this.isGenerating = true; try { const prefs = { ...this.config.userPreferences, ...customPrefs }; const prompt = this.buildEdgingPrompt(prefs); console.log('Generating AI task with model:', this.currentModel); const response = await fetch(`${this.ollamaUrl}/api/generate`, { method: 'POST', headers: { 'Content-Type': 'application/json' }, body: JSON.stringify({ model: this.currentModel, prompt: prompt, stream: false, options: { temperature: this.config.temperature, num_predict: this.config.maxTokens, stop: ['User:', 'Human:', 'Assistant:'] } }) }); if (!response.ok) { throw new Error(`Ollama API error: ${response.status}`); } const data = await response.json(); const taskText = data.response.trim(); if (!taskText || taskText.length < 20) { throw new Error('Generated task was too short or empty'); } // Create task object compatible with existing game system const aiTask = { id: `ai-task-${Date.now()}`, type: 'ai-generated', category: 'edging', instruction: taskText, duration: prefs.duration * 60000, // Convert to milliseconds difficulty: prefs.intensity, source: 'ollama', model: this.currentModel, generated: new Date().toISOString(), preferences: prefs }; console.log('AI Task Generated:', aiTask.instruction.substring(0, 100) + '...'); return aiTask; } catch (error) { console.error('Error generating AI task:', error); throw error; } finally { this.isGenerating = false; } } buildEdgingPrompt(prefs) { const basePrompt = `You are an expert in creating edging challenges. Generate a detailed ${prefs.duration}-minute edging task with ${prefs.intensity} intensity for someone with ${prefs.experience} experience. The task should include: - Clear step-by-step instructions - Specific timing and rhythm guidance - Techniques for building arousal without climax - Commands for start, stop, and pause moments - Breathing and focus instructions - Progressive intensity building Style: ${prefs.style} Duration: ${prefs.duration} minutes exactly Intensity: ${prefs.intensity} Experience Level: ${prefs.experience} Generate only the task instructions, no introduction or explanation: Task Instructions:`; return basePrompt; } async generateConsequenceTask(skippedTask, severity = 'medium') { if (!this.isAvailable) return null; const prompt = `Create a consequence task for someone who skipped an edging challenge. This should be a punishment task with ${severity} severity that teaches discipline and makes them regret skipping. The consequence should: - Be more challenging than the original task - Include elements of denial or frustration - Have a longer duration (at least 7-10 minutes) - Include specific punishments or restrictions - Make the user understand the cost of skipping Generate only the consequence task instructions: Consequence Task:`; try { const response = await fetch(`${this.ollamaUrl}/api/generate`, { method: 'POST', headers: { 'Content-Type': 'application/json' }, body: JSON.stringify({ model: this.currentModel, prompt: prompt, stream: false, options: { temperature: 0.9, num_predict: this.config.maxTokens } }) }); const data = await response.json(); return { id: `ai-consequence-${Date.now()}`, type: 'ai-consequence', instruction: data.response.trim(), duration: 600000, // 10 minutes difficulty: 'punishment', source: 'ollama', isConsequence: true }; } catch (error) { console.error('Error generating consequence task:', error); return null; } } async testConnection() { console.log('Starting connection test...'); const available = await this.checkAvailability(); if (!available) { console.log('Connection test failed: Ollama service not available'); return false; } console.log('Ollama available, loading models...'); await this.loadModels(); if (this.availableModels.length === 0) { console.log('Connection test failed: No models installed'); return false; } console.log(`Found ${this.availableModels.length} models, using: ${this.currentModel}`); try { // Quick test generation to verify the model works console.log('Testing model with simple task generation...'); const testTask = await this.generateEdgingTask({ duration: 1, intensity: 'low', experience: 'beginner' }); console.log('Connection test successful! Generated test task.'); return true; } catch (error) { console.error('Connection test failed during task generation:', error); return false; } } getAvailableModels() { return this.availableModels; } isReady() { return this.isAvailable && this.availableModels.length > 0; } getStatus() { return { available: this.isAvailable, generating: this.isGenerating, model: this.currentModel, modelCount: this.availableModels.length, config: this.config }; } }