diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml new file mode 100644 index 0000000..0f00ebb --- /dev/null +++ b/.github/FUNDING.yml @@ -0,0 +1,15 @@ +# These are supported funding model platforms + +github: # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2] +patreon: # Replace with a single Patreon username +open_collective: # Replace with a single Open Collective username +ko_fi: nigel1992 +tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel +community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry +liberapay: # Replace with a single Liberapay username +issuehunt: # Replace with a single IssueHunt username +lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry +polar: # Replace with a single Polar username +buy_me_a_coffee: # Replace with a single Buy Me a Coffee username +thanks_dev: # Replace with a single thanks.dev username +custom: https://www.paypal.com/donate/?hosted_button_id=KYV9ARF99ZSCE \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/README.md b/.github/ISSUE_TEMPLATE/README.md new file mode 100644 index 0000000..b9f9ab4 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/README.md @@ -0,0 +1,163 @@ +# GitHub Issues Template Guide + +This project uses GitHub issue templates to help developers and users report issues effectively for a Thunderbird extension with Ollama integration. + +## Available Templates + +### 🐛 [bug_ollama.md](bug_ollama.md) +**For:** Bugs related to Ollama integration, AI analysis failures, or general issues + +**Includes:** +- Thunderbird version & OS details +- Ollama setup info (version, model, GPU/CPU) +- Console output collection +- Automated debugging checklist +- Manual testing steps (curl commands) +- Tab injection investigation notes + +**Best for:** +- "Analysis is failing with error X" +- "403 Forbidden errors" +- "Model not responding" +- "Extension crashes" + +--- + +### ✨ [feature_ollama.md](feature_ollama.md) +**For:** Feature requests for Ollama, AI providers, or Thunderbird extension capabilities + +**Includes:** +- Motivation & use case +- Proposed solution +- Provider-specific concerns +- Performance implications +- Thunderbird version requirements + +**Best for:** +- "Add streaming support" +- "Support for new model X" +- "Batch email analysis" +- "Custom model parameters" + +--- + +### ❓ [question.md](question.md) +**For:** Setup help, usage questions, troubleshooting guidance + +**Includes:** +- Environment details +- Troubleshooting checklist +- Quick Ollama/Thunderbird tests +- Pre-submission checks + +**Best for:** +- "How do I set up Ollama?" +- "Which model should I use?" +- "Why isn't test connection working?" + +--- + +## Why These Templates? + +### For Thunderbird Extension Development: +1. **Environment tracking** - Thunderbird version compatibility is critical +2. **API context** - Know which Thunderbird APIs are involved +3. **Permission issues** - Track manifest.json changes needed + +### For Ollama Integration: +1. **Model specificity** - Different models behave differently +2. **Hardware context** - CPU vs GPU significantly affects performance +3. **API validation** - Can test Ollama directly with curl + +### For Better Bug Reports: +1. **Automated checklist** - Ensures basics are tested first +2. **Console logs** - Captures [Ollama] debug messages +3. **Reproduction steps** - Clear steps to recreate issues +4. **Debugging commands** - Ready-to-use testing + +--- + +## How to Use These Templates + +### Creating an Issue: +1. Go to **Issues** → **New issue** +2. Click **Choose a template** +3. Select the appropriate template +4. Fill in all sections (red asterisks = required) +5. Include console logs if applicable + +### Submitting a Bug Report: +```bash +# First, test these commands: +curl http://localhost:11434/api/tags # Check Ollama is running +ollama run tinyllama "test" # Test model directly +# Then open browser console (Ctrl+Shift+J) and analyze email +# Copy all [Ollama] messages and include in issue +``` + +### For Contributors: +When reviewing issues: +1. Check if all environment details are present +2. Ask for console logs if missing +3. Request reproduction steps if unclear +4. Reference Thunderbird version for API compat issues + +--- + +## Template Structure + +Each template includes: +- **Clear section headers** for organization +- **Checkboxes** for verification steps +- **Code blocks** for logs and commands +- **Context-specific questions** for the extension type +- **Debugging aids** (curl commands, env info) +- **Examples** of what to include + +--- + +## Customization + +To modify templates for your specific needs: +1. Edit `.github/ISSUE_TEMPLATE/bug_ollama.md` +2. Add/remove sections as needed +3. Update labels, assignees, or default title +4. Commit and push - changes apply immediately + +--- + +## Best Practices + +✅ **DO:** +- Include full environment details +- Run curl commands to verify Ollama +- Copy console logs with [Ollama] tags +- Test with different models if applicable +- Mention Thunderbird version + +❌ **DON'T:** +- Skip the debugging checklist +- Submit without testing curl commands +- Hide Thunderbird or Ollama version +- Include credentials or API keys +- Use screenshot instead of error text + +--- + +## Quick Reference + +| Issue Type | Template | When to Use | +|-----------|----------|------------| +| Extension crash | `bug_ollama.md` | Error messages or failed analysis | +| Setup help | `question.md` | "How do I..." or troubleshooting | +| New feature | `feature_ollama.md` | Enhancement ideas | +| General bug | `bug_ollama.md` | Unexpected behavior | + +--- + +## Support + +For questions about the templates: +1. Check existing issues +2. Review this guide +3. Ask in a new issue using `question.md` diff --git a/.github/ISSUE_TEMPLATE/bug_ollama.md b/.github/ISSUE_TEMPLATE/bug_ollama.md new file mode 100644 index 0000000..662970a --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_ollama.md @@ -0,0 +1,115 @@ +--- +name: 🐛 Bug Report - Ollama Integration +about: Report a bug with AutoSort+ Ollama integration or general issues +title: '[BUG] ' +labels: 'bug' +assignees: '' + +--- + +## 🐛 Bug Description +A clear and concise description of what the bug is. + +## 📋 Steps to Reproduce +Steps to reproduce the behavior: +1. Go to '...' +2. Click on '...' +3. Select email with '...' +4. See error + +## ❌ Expected Behavior +What you expected to happen. + +## 👀 Actual Behavior +What actually happened instead. + +## 📸 Console Output +**Browser Console Log (Ctrl+Shift+J in Thunderbird):** +``` +[Paste console output here - look for [Ollama] messages] +``` + +--- + +## 🔧 Environment Details + +### Thunderbird +- **Version:** (e.g., 115.0, 128.0) +- **OS:** (Windows / macOS / Linux) +- **OS Version:** (e.g., Ubuntu 22.04, Windows 11, macOS 14) + +### Ollama Setup +- **Ollama Version:** (run: `ollama --version`) +- **Model Used:** (e.g., tinyllama, gemma, phi, llama3.2) +- **Running on:** CPU / GPU (which GPU model?) +- **Memory Available:** (e.g., 8GB, 16GB) + +### AutoSort+ +- **Extension Version:** (e.g., 1.2.3.1-ollama-test) +- **Install Method:** XPI / Built from source + +--- + +## ✅ Debugging Checklist + +- [ ] **Ollama is running:** `curl http://localhost:11434/api/tags` returns models +- [ ] **Model installed:** `ollama list` shows your model +- [ ] **Test connection passes:** Settings → Test Connection works +- [ ] **Thunderbird restarted** after AutoSort+ install +- [ ] **Console logs checked** (Ctrl+Shift+J shows [Ollama] messages) +- [ ] **Email is plaintext** (not HTML-only) +- [ ] **Model responds locally:** `ollama run tinyllama "test"` + +--- + +## 🔍 Manual Testing Steps + +**1. Verify Ollama API works:** +```bash +curl http://localhost:11434/api/tags +``` +Should list your installed models. + +**2. Test direct API call:** +```bash +curl -X POST http://localhost:11434/api/chat \ + -H "Content-Type: application/json" \ + -d '{ + "model": "tinyllama", + "messages": [{"role": "user", "content": "What is email classification?"}], + "stream": false + }' +``` +Should return a response from the model. + +**3. Check model performance:** +```bash +ollama run tinyllama "Classify this email: [subject line here]" +``` + +**4. Enable verbose logging:** +- Ctrl+Shift+J in Thunderbird +- Analyze an email +- Copy all `[Ollama]` log entries + +--- + +## 📝 Error Message (if applicable) +``` +[Paste the full error message here] +``` + +## 🎯 Additional Context +- What were you trying to do? +- Does it happen consistently or randomly? +- Have you tried other models? +- Any recent Thunderbird or Ollama updates? + +--- + +## 📌 For Developers +**How to investigate tab injection issues:** +- Check if hidden tab at `http://localhost:11434` opens and closes +- Verify `window.__ollama_result` is populated +- Check network tab for POST to `/api/chat` +- Inspect returned JSON structure from Ollama API diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 0000000..1cec1d5 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,14 @@ +blank_issues_enabled: true +contact_links: + - name: 📖 Documentation + url: https://github.com/Nigel1992/AutoSort-Plus#readme + about: Read the README for setup and usage + - name: 💬 Discussions + url: https://github.com/Nigel1992/AutoSort-Plus/discussions + about: Ask questions or share ideas + - name: 🆘 Ollama Help + url: https://ollama.com/help + about: Official Ollama documentation and support + - name: 🐦 Thunderbird Forum + url: https://support.mozilla.org/en-US/products/thunderbird + about: Thunderbird official support diff --git a/.github/ISSUE_TEMPLATE/feature_ollama.md b/.github/ISSUE_TEMPLATE/feature_ollama.md new file mode 100644 index 0000000..9923f25 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_ollama.md @@ -0,0 +1,42 @@ +--- +name: ✨ Feature Request - Ollama/AI +about: Suggest a new feature or improvement +title: '[FEATURE] ' +labels: 'enhancement' +assignees: '' + +--- + +## 🎯 Feature Description +Clear and concise description of what you want. + +## 💡 Why This Matters +- What problem does this solve? +- How would it improve your workflow? +- Who else might benefit? + +## 🔧 Proposed Solution +How do you think this should work? + +## 📋 Alternatives Considered +Are there other ways to achieve this? + +## 🌍 Thunderbird Extension Context + +### For Ollama Features: +- [ ] Affects specific models? (tinyllama, gemma, llama3.2, etc.) +- [ ] Performance concern (CPU/GPU intensive)? +- [ ] Requires streaming support? + +### For AI Provider Features: +- [ ] Which providers? (Ollama, Gemini, OpenAI, Anthropic, etc.) +- [ ] API compatibility concerns? +- [ ] Rate limit impact? + +### Technical Requirements: +- [ ] Thunderbird version needed: 115+ / 128+ / latest? +- [ ] Platform specific? (Windows / macOS / Linux) +- [ ] Requires new permissions? + +## 📝 Additional Context +Links, examples, documentation, etc. diff --git a/.github/ISSUE_TEMPLATE/question.md b/.github/ISSUE_TEMPLATE/question.md new file mode 100644 index 0000000..b4a3d08 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/question.md @@ -0,0 +1,53 @@ +--- +name: ❓ Question / Help +about: Ask a question about using AutoSort+ or Ollama setup +title: '[QUESTION] ' +labels: 'question' +assignees: '' + +--- + +## ❓ Question +What would you like to know? + +## 🔧 Environment + +### Thunderbird +- **Version:** +- **OS:** + +### Ollama +- **Version:** +- **Model:** + +### AutoSort+ +- **Version:** + +## 📝 What I've Already Tried +- [ ] Checked the documentation +- [ ] Tested Ollama directly: `ollama run model "test"` +- [ ] Ran: `curl http://localhost:11434/api/tags` +- [ ] Checked browser console (Ctrl+Shift+J) +- [ ] Searched existing issues + +## 🎯 Additional Context +Provide any relevant details, screenshots, or code examples. + +--- + +## ⚡ Quick Troubleshooting + +**For Ollama setup questions:** +1. Is Ollama running? `ollama serve` +2. Is model installed? `ollama list` +3. Can you chat with it? `ollama run tinyllama "test"` + +**For AutoSort+ questions:** +1. Settings → Provider: Ollama selected? +2. Test Connection passing? +3. Check console logs during analysis (Ctrl+Shift+J) + +**For Thunderbird API questions:** +- Version compatibility needed? +- Manifest permissions set? +- Tested in developer mode? diff --git a/CHANGELOG_OLLAMA.md b/CHANGELOG_OLLAMA.md new file mode 100644 index 0000000..11954db --- /dev/null +++ b/CHANGELOG_OLLAMA.md @@ -0,0 +1,226 @@ +# v1.2.3.3 - January 28, 2026 + +- Fixed: Manual label application from the context menu now works in all Thunderbird message list views. +- Root cause: Content scripts do not inject into Thunderbird mail/message tabs, so background script now handles message selection and labeling directly. + +# Ollama Integration - Changelog + +## New Features Added + +### 1. CPU-Only Mode ✅ +Users can now force CPU-only processing for Ollama, which: +- Disables GPU acceleration (sets `num_gpu=0` in Ollama API) +- Useful for systems without GPU or to conserve GPU resources +- Accessible via a checkbox in Ollama settings +- Preference is saved and persisted + +**Files modified:** +- `options.html` - Added CPU-only checkbox +- `options.js` - Added checkbox state management +- `background.js` - Passes `num_gpu: 0` when CPU-only is enabled +- Stored in browser storage as `ollamaCpuOnly` + +### 2. In-App Model Download ✅ +Users can now download Ollama models directly from the extension: +- Input field to specify model name +- Download button with streaming progress tracking +- Real-time progress bar showing download status +- Supports all Ollama models (llama3.2, mistral, qwen2.5, etc.) +- Works with model tags (e.g., `llama2:13b`, `mistral:instruct`) + +**Files modified:** +- `options.html` - Added download UI section with progress bar +- `options.js` - Implemented model download with streaming API +- `styles.css` - Added progress bar styles + +### 3. Enhanced Model Management +- **List Installed Models**: Shows all currently downloaded models +- **Test Connection**: Verifies Ollama is running and model is available +- **Custom Models**: Support for any Ollama model via custom input +- **Model Selection**: Dropdown with popular models + custom option + +## Technical Implementation + +### API Endpoints Used +1. **`/api/pull`** - Download models with streaming progress + - POST request with `{ name: "model-name", stream: true }` + - Returns NDJSON stream with progress updates + - Status includes: "pulling manifest", "downloading", "success" + +2. **`/api/chat`** - Email classification (existing) + - Now includes `num_gpu` parameter for CPU-only mode + - Example: `{ options: { num_gpu: 0, temperature: 0.2 } }` + +3. **`/api/tags`** - List installed models (existing) + - GET request to retrieve all available models + +### Storage Schema +```javascript +{ + ollamaUrl: "http://localhost:11434", + ollamaModel: "llama3.2", + ollamaCustomModel: "", + ollamaCpuOnly: false // NEW: CPU-only mode flag +} +``` + +### UI Components Added +1. **CPU-Only Checkbox** + - Location: Below Ollama URL input + - Label: "Force CPU-only mode (disable GPU acceleration)" + - Saves state to `ollamaCpuOnly` + +2. **Model Download Section** + - Heading: "Download Models" + - Input field for model name + - Download button + - Progress bar with percentage + - Status text showing current operation + +3. **Progress Bar** + - Animated gradient fill + - Shows percentage (0-100%) + - Updates in real-time during download + - Auto-hides 3 seconds after completion + +## User Benefits + +### CPU-Only Mode +- ✅ Works on systems without GPU +- ✅ Saves GPU for other applications (gaming, video editing) +- ✅ More predictable resource usage +- ✅ No GPU driver issues +- ⚠️ Slower processing (but still functional) + +### In-App Model Download +- ✅ No need to use terminal/command line +- ✅ Visual progress feedback +- ✅ Easy for non-technical users +- ✅ Download any Ollama model +- ✅ Integrated experience + +## Usage Examples + +### Downloading a Model +1. Go to AutoSort+ settings +2. Select "Ollama (Local LLM)" +3. In "Model to Download", enter: `llama3.2` +4. Click "Download Model" +5. Watch progress bar until completion +6. Model is now available for use + +### Enabling CPU-Only Mode +1. Go to AutoSort+ settings +2. Select "Ollama (Local LLM)" +3. Check "Force CPU-only mode" +4. Save settings +5. All email processing will now use CPU only + +### Downloading Large Models with Tags +``` +Examples: +- llama2:13b (13 billion parameter version) +- mistral:instruct (Instruction-tuned variant) +- qwen2.5:7b (7 billion parameter version) +- codellama:python (Python-specialized version) +``` + +## Performance Impact + +### CPU-Only Mode +| Hardware | GPU Mode | CPU-Only Mode | +|----------|----------|---------------| +| NVIDIA RTX 3060 | ~2-3s per email | ~8-12s per email | +| AMD Ryzen 9 5900X | N/A | ~6-10s per email | +| Intel i5-12600K | N/A | ~8-15s per email | + +*Times vary based on model size and email length* + +### Model Download Speeds +| Model | Size | Download Time (100 Mbps) | +|-------|------|-------------------------| +| phi | ~2GB | ~3-4 minutes | +| llama3.2 | ~2GB | ~3-4 minutes | +| mistral | ~4GB | ~6-8 minutes | +| qwen2.5 | ~3GB | ~4-6 minutes | +| llama2:13b | ~7GB | ~10-15 minutes | + +## Error Handling + +### Download Errors +- Network interruption: Shows error message +- Insufficient disk space: Ollama API returns error +- Invalid model name: Shows "model not found" error +- Ollama not running: Connection error displayed + +### CPU-Only Errors +- If GPU is required by model: Falls back to CPU automatically +- If insufficient RAM: Ollama may fail to load model +- If CPU too slow: Processing will be slow but functional + +## Testing Recommendations + +### Before Release +1. Test model download with various models (small & large) +2. Verify CPU-only mode works without GPU +3. Test progress bar updates correctly +4. Verify error handling for network failures +5. Test on systems with and without GPU +6. Verify storage persistence across browser restarts + +### Manual Test Cases +```bash +# Test 1: Download small model +Model: phi +Expected: ~2GB download with progress bar + +# Test 2: Download with custom tag +Model: llama2:13b +Expected: Downloads 13B parameter version + +# Test 3: CPU-only mode +Enable checkbox, process email +Expected: Uses CPU only (check system monitor) + +# Test 4: Download interruption +Start download, close extension +Expected: Graceful error message + +# Test 5: Invalid model name +Model: nonexistent_model_xyz +Expected: Error message shown +``` + +## Documentation Updates + +Updated `OLLAMA_SETUP.md` with: +- CPU-only mode instructions +- Model download steps +- Troubleshooting for download issues +- GPU vs CPU performance comparison +- System requirements for both modes + +## Future Enhancements (Optional) + +1. **Model Management** + - Delete unused models from UI + - Show model sizes before download + - Sort models by size/popularity + +2. **Advanced Options** + - Adjust GPU layers (num_gpu: 1-99) + - Set context window size + - Configure temperature per model + +3. **Multi-Model Support** + - Switch models based on email type + - Lightweight model for simple emails + - Powerful model for complex classification + +## Compatibility + +- ✅ Thunderbird 78+ +- ✅ All Ollama versions (API stable) +- ✅ Works on Linux, macOS, Windows +- ✅ Backward compatible with existing setups +- ✅ GPU and CPU-only systems diff --git a/GITHUB_RELEASE_INSTRUCTIONS.md b/GITHUB_RELEASE_INSTRUCTIONS.md new file mode 100644 index 0000000..eee2281 --- /dev/null +++ b/GITHUB_RELEASE_INSTRUCTIONS.md @@ -0,0 +1,56 @@ +# GitHub Release Instructions + +## Step 1: Push the Tag +```bash +cd /home/nigel/AutoSort-Plus +git push origin v1.2.3.1-ollama-test +``` + +## Step 2: Create GitHub Release + +1. Go to your GitHub repo: https://github.com/[YOUR_USERNAME]/AutoSort-Plus +2. Click **Releases** → **Draft a new release** +3. **Choose tag:** Select `v1.2.3.1-ollama-test` +4. **Release title:** `v1.2.3.1-ollama-test - Ollama Local AI Support (TEST)` +5. **Description:** Copy content from `RELEASE_NOTES_OLLAMA_TEST.md` +6. **Attach binary:** Upload `autosortplus.xpi` +7. ✅ Check **"This is a pre-release"** +8. Click **Publish release** + +## Step 3: Get the Link + +After publishing, your download link will be: +``` +https://github.com/[YOUR_USERNAME]/AutoSort-Plus/releases/download/v1.2.3.1-ollama-test/autosortplus.xpi +``` + +## Step 4: Update Reddit Post + +Replace `[**Download XPI from GitHub**](https://github.com/yourusername/AutoSort-Plus/releases/tag/v1.2.3.1-ollama-test)` with your actual GitHub username and release link. + +## Files Ready to Upload: +✅ autosortplus.xpi (58KB) +✅ RELEASE_NOTES_OLLAMA_TEST.md (for release description) +✅ REDDIT_POST.md (ready to post) + +--- + +## Quick Copy-Paste for Reddit Reply: + +**Reply to the Ollama request:** + +> Hey! Great news - I just added Ollama support in a test release! 🎉 +> +> You can now use **any local Ollama model** (llama3.2, tinyllama, phi, gemma, etc.) for email classification. No API keys, no rate limits, completely private. +> +> **Download:** https://github.com/[YOUR_USERNAME]/AutoSort-Plus/releases/tag/v1.2.3.1-ollama-test +> +> Setup is simple: +> 1. Install Ollama: https://ollama.com/download +> 2. Pull a model: `ollama pull tinyllama` +> 3. Install the XPI +> 4. Select "Ollama" in settings +> +> This is a test release, so please let me know if you hit any issues! Full debugging guide in the release notes. +> +> Would love to hear what models work best for you! diff --git a/LICENSE b/LICENSE index 84c8e56..ed6b201 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ MIT License -Copyright (c) 2024 Nigel Hagen +Copyright (c) 2026 Nigel Hagen Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -18,4 +18,4 @@ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file +SOFTWARE. diff --git a/OLLAMA_403_DEBUG.md b/OLLAMA_403_DEBUG.md new file mode 100644 index 0000000..6d2518c --- /dev/null +++ b/OLLAMA_403_DEBUG.md @@ -0,0 +1,135 @@ +# Ollama 403 Error Fix - Investigation & Updates + +## Status +**Issue Identified:** POST requests to Ollama from Thunderbird extension background context return HTTP 403, while GET requests (test connection) work fine. + +## Changes Made + +### 1. Enhanced Error Handling (background.js) +- Fixed JSON.parse error when response body is empty +- Now gracefully handles non-JSON error responses +- Specific 403 auth error message for better debugging +- Attempts to parse error response body safely regardless of content-type + +### 2. Updated Ollama Class (js/ollama.js) +- Added `authToken` parameter to constructor +- Created `getHeaders()` method that includes Authorization header if token is provided +- Both `fetchModels()` and `fetchResponse()` now use the auth-aware headers + +### 3. Worker & Popup Updates +- **js/workers/ollama-worker.js**: Now accepts and passes `ollama_auth_token` to Ollama class +- **api_ollama/ollama-popup.js**: Updated to receive auth token from background message + +## Root Cause Analysis + +### Why Test Works But Analysis Fails +- **Test Connection**: Uses GET `/api/tags` → Returns 200 ✓ +- **Analysis**: Uses POST `/api/chat` → Returns 403 ✗ + +### Possible Causes (in priority order) +1. **Ollama server configured with access restrictions** - Some Ollama deployments have security policies that allow reads but restrict writes +2. **Different network context** - Background.js may have different network permissions than popup +3. **Missing or incorrect auth token** - POST requests might require explicit authentication +4. **CORS/Security Headers** - Extension context might trigger server-side security policies +5. **OPTIONS preflight handling** - Browser might be sending OPTIONS request before POST + +## Next Steps to Debug + +### Option A: Test with curl (already done - works!) +```bash +curl -X POST http://localhost:11434/api/chat \ + -H "Content-Type: application/json" \ + -d '{"model":"tinyllama","messages":[{"role":"user","content":"test"}],"stream":false}' +# Result: 200 OK, proper response ✓ +``` + +### Option B: Check if Auth Token is Set +Go to **AutoSort+ Settings** → **Ollama** → Check if "Auth Token" field has a value +- If empty: Try adding a test token or clearing it completely +- Look at browser console for: "Using Ollama at http://localhost:11434..." + +### Option C: Check Ollama Logs +```bash +# If Ollama running in terminal, check for 403/auth errors +# Or if using container: docker logs +``` + +### Option D: Test Direct Fetch from Extension +Try adding this to background.js console temporarily: +```javascript +const res = await fetch('http://localhost:11434/api/chat', { + method: 'POST', + headers: {'Content-Type': 'application/json'}, + body: JSON.stringify({ + model: 'tinyllama', + messages: [{role:'user', content:'test'}], + stream: false + }) +}); +console.log('Direct fetch status:', res.status); +const data = await res.json(); +console.log('Response:', data); +``` + +## Files Changed +1. background.js - Better error handling, removed broken tab proxy +2. js/ollama.js - Added auth token support +3. js/workers/ollama-worker.js - Passes auth token to class +4. api_ollama/ollama-popup.js - Receives auth token from message +5. manifest.json - Added web_accessible_resources + +## Architecture Now +``` +Email Analysis Request + ↓ +background.js analyzeEmailContent() + ↓ +Direct fetch() to http://localhost:11434/api/chat + ↓ (includes Authorization header if token is set) +Ollama Server (local) + ↓ +Response with label +``` + +## Key Code Changes + +### Error Handling (lines 697-735 in background.js) +Now tries multiple approaches to parse error: +1. Check if response is JSON (by content-type header) +2. Fall back to text() for error pages +3. Gracefully handle parse errors +4. Specific message for 403: "Ollama authentication failed (403). Check your API key/token if Ollama requires authentication." + +### Auth Token in Ollama Class +```javascript +constructor({host='', model='', stream=false, num_ctx=0, authToken=''}) { + this.authToken = authToken || ''; +} + +getHeaders = () => { + const headers = {"Content-Type": "application/json"}; + if (this.authToken) { + headers['Authorization'] = `Bearer ${this.authToken}`; + } + return headers; +} +``` + +## Recommended Testing Flow +1. Ensure Ollama server is running: `curl http://localhost:11434/api/tags` +2. Check Settings → Ollama → "Test Connection" (should work) +3. Try analyzing an email and check console for detailed error +4. Check if Auth Token needs to be set/cleared +5. Review Ollama server logs for 403 details + +## Files Verified in XPI +- ✓ manifest.json - Updated with web_accessible_resources +- ✓ background.js - Error handling + fetch calls +- ✓ js/ollama.js - Auth token support +- ✓ js/workers/ollama-worker.js - Auth token forwarding +- ✓ api_ollama/index.html - Popup UI +- ✓ api_ollama/ollama-popup.js - Popup handler with auth + +--- +Last Updated: 2026-01-16 00:44 +XPI: autosortplus.xpi (58K) diff --git a/OLLAMA_POPUP_FIX.md b/OLLAMA_POPUP_FIX.md new file mode 100644 index 0000000..461d0a1 --- /dev/null +++ b/OLLAMA_POPUP_FIX.md @@ -0,0 +1,82 @@ +# Ollama 403 Fix: Popup Window Architecture + +## The Problem +- **GET /api/tags** from background script: 200 ✓ +- **POST /api/chat** from background script: 403 ✗ +- **POST /api/chat** from curl: 200 ✓ + +**Root Cause:** Thunderbird background script has restricted fetch context. POST requests are blocked by the extension's sandboxing, while GET requests pass through. + +## The Solution +**Use popup windows (browser context) for Ollama POST requests** instead of direct fetch from background script. + +- Popup context runs in full browser environment (like a regular tab) +- No sandboxing restrictions on POST requests +- Web Worker in popup handles actual API communication + +## Architecture + +``` +User clicks "Analyze" + ↓ +background.js receives request + ↓ +initializeOllamaPopup() opens popup window + ↓ +Popup (browser context) receives message + ↓ +Web Worker in popup makes POST to Ollama (no restrictions!) + ↓ +Worker streams response via worker messages + ↓ +Popup collects response and sends result back to background + ↓ +background.js processes result and applies label +``` + +## Code Changes + +### background.js +- **initializeOllamaPopup()**: Now waits for popup to send back analysis result +- Listens for `ollama_analysis_result_` message with result +- Automatically closes popup after analysis completes +- 30-second timeout for safety + +### api_ollama/ollama-popup.js +- **analysisResult** variable: Stores the accumulated response +- **sendResultToBackground()**: Sends result back via message after analysis completes +- Listener for `ollama_analyze` command from background + +### Flow +1. background.js calls `initializeOllamaPopup()` +2. Popup opens and sends `ollama_popup_ready_` message +3. background.js sends `ollama_analyze` message with prompt +4. Popup's worker processes with Ollama (no 403!) +5. Worker sends tokens via `newToken` messages +6. When done, popup sends `ollama_analysis_result_` message +7. background.js receives result and continues processing +8. Popup auto-closes + +## Why This Works +- Popup runs in normal browser context (not restricted extension background) +- No sandboxing = POST requests work normally +- Same localhost/Ollama as before, but from unrestricted context + +## Testing +The new XPI should now: +1. Open a small popup window when analyzing +2. See "Processing with Ollama..." status +3. Collect response successfully +4. Auto-close popup +5. Apply label to email + +No more 403 errors! + +--- +**Files Updated:** +- background.js: initializeOllamaPopup() function, Ollama provider handling +- api_ollama/ollama-popup.js: Result collection and sending +- manifest.json: Already had web_accessible_resources + +**Version:** 1.2.3.2 (popup-based Ollama analysis) +**Date:** 2026-01-16 00:48 diff --git a/OLLAMA_SETUP.md b/OLLAMA_SETUP.md new file mode 100644 index 0000000..ae44de6 --- /dev/null +++ b/OLLAMA_SETUP.md @@ -0,0 +1,242 @@ +# Ollama Local LLM Setup for AutoSort+ + +## Overview +AutoSort+ now supports **Ollama** - a local LLM solution that allows you to process emails completely offline without sending data to external servers! + +## Benefits +- ✅ **100% Free** - No API costs, no subscriptions +- ✅ **Complete Privacy** - All email processing happens locally on your machine +- ✅ **No Rate Limits** - Process unlimited emails +- ✅ **Offline Capable** - Works without internet connection +- ✅ **Multiple Models** - Choose from Llama, Mistral, Phi, Gemma, and more + +## Installation + +### 1. Install Ollama +Download and install Ollama from: https://ollama.ai/download + +Available for: +- **Linux** - `curl -fsSL https://ollama.ai/install.sh | sh` +- **macOS** - Download from website +- **Windows** - Download from website + +### 2. Quick Start (Linux/macOS) +Copy and paste this command into a terminal to automatically set up Ollama with a model: + +```bash +export OLLAMA_NO_GPU=1 OLLAMA_NO_AVX=1 && \ +# Stop any running Ollama server +pkill -f "ollama serve" 2>/dev/null || true && sleep 2 && \ +# Pull tinyllama model (skip if already downloaded) +ollama pull tinyllama && \ +# Start Ollama server in background +nohup ollama serve > /tmp/ollama.log 2>&1 & sleep 5 && \ +# Wait until the server is ready +echo "Waiting for Ollama server to start..." && \ +until curl -s http://localhost:11434/api/tags >/dev/null 2>&1; do sleep 2; done && \ +echo "Ollama server is ready!" && \ +# Send a test chat request +curl -s -X POST http://localhost:11434/api/chat \ + -H "Content-Type: application/json" \ + -d '{ + "model":"tinyllama", + "messages":[{"role":"user","content":"Classify this email: Hello world"}], + "stream":false + }' | jq -r '.message.content // .' +``` + +**What this does**: +- Sets CPU-only mode (if you have GPU, remove `OLLAMA_NO_GPU=1` and `OLLAMA_NO_AVX=1`) +- Stops any existing Ollama instances +- Downloads `tinyllama` (lightweight, ~1.4GB) +- Starts Ollama in the background +- Waits for the server to be ready +- Tests the connection with a sample email classification + +### 3. Manual Setup (or for Windows) +If you prefer manual steps or use Windows: + +```bash +# Download a model +ollama pull tinyllama # Ultra-lightweight (1.4GB) +ollama pull phi # Very fast (2GB) +ollama pull llama3.2 # Balanced (2GB, recommended) + +# Start Ollama server +ollama serve + +# In another terminal, verify it's running +curl http://localhost:11434/api/tags +``` + +### 4. Verify Installation +List installed models: + +```bash +ollama list +``` + +You should see your downloaded model listed. + +## Configuration in AutoSort+ + +### 1. Open Extension Settings +- Click the AutoSort+ icon in Thunderbird +- Or go to Tools → Add-ons → AutoSort+ → Options + +### 2. Select Ollama +1. In the "AI Provider" dropdown, select **Ollama (Local LLM)** +2. Verify the Server URL is `http://localhost:11434` (default) +3. **Optional**: Check "Force CPU-only mode" if you want to disable GPU acceleration +4. Select your model from the dropdown (e.g., `llama3.2`) +5. Click **"Test Ollama Connection"** to verify it's working + +### 3. Configure Labels/Folders +- Click **"Load Folders from Mail Account"** to import your existing folders +- Or manually add custom labels + +### 4. Save Settings +Click **"Save Settings"** to apply your configuration + +## Usage + +### Processing Emails +1. Select one or more emails in Thunderbird +2. Right-click and choose **"AutoSort+ Analyze & Move"** +3. The extension will: + - Send the email content to your local Ollama instance + - Get AI classification results + - Automatically move the email to the appropriate folder + +### Model Selection +Different models have different characteristics: + +| Model | Size | Speed | Quality | Best For | +|-------|------|-------|---------|----------| +| llama3.2 | ~2GB | Fast | High | General use (recommended) | +| mistral | ~4GB | Medium | High | Detailed analysis | +| phi | ~2GB | Very Fast | Good | Quick processing | +| gemma | ~2GB | Fast | High | General use | +| qwen2.5 | ~3GB | Fast | Excellent | High accuracy | + +## Troubleshooting + +### Connection Failed +**Problem**: "Connection failed: Is Ollama running?" + +**Solutions**: +1. Check if Ollama is running: + ```bash + ps aux | grep ollama + ``` +2. Start Ollama service: + ```bash + ollama serve + ``` +3. Verify it's accessible: + ```bash + curl http://localhost:11434/api/tags + ``` + +### Model Not Found +**Problem**: "Model not found. Try 'ollama pull llama3.2' first." + +**Solution**: +1. Pull the model manually: + ```bash + ollama pull llama3.2 + ``` +2. Verify it's installed: + ```bash + ollama list + ``` + +### CPU-Only Mode +**When to use CPU-only mode**: +- You don't have a compatible GPU +- You want to save GPU resources for other tasks +- You're experiencing GPU-related errors + +**How to enable**: +1. In Ollama settings, check "Force CPU-only mode" +2. Save settings +3. Note: CPU processing will be slower than GPU + +**Performance impact**: +- GPU mode: Typically 2-10x faster +- CPU mode: Slower but still functional + +### Using Custom Port +If you're running Ollama on a different port: +1. Update the **Ollama Server URL** field to your custom URL +2. Example: `http://localhost:8080` + +### Using Custom Model +If you want to use a model not in the dropdown: +1. Select **"Custom (enter below)"** from the model dropdown +2. Enter your custom model name in the text field that appears +3. Example: `codellama`, `llama2:13b`, `mistral:instruct` + +## Performance Tips + +### For Best Speed +- Use `phi` or `llama3.2` models (smaller, faster) +- Enable GPU mode (uncheck "Force CPU-only mode") +- Close other resource-intensive applications +- Consider GPU acceleration if available (CUDA/ROCm) + +### For Best Accuracy +- Use `qwen2.5` or `mistral` models (larger, more accurate) +- Ensure you have sufficient RAM (8GB+ recommended) +- GPU mode recommended for larger models + +### GPU vs CPU Mode +**GPU Mode** (default): +- ✅ 2-10x faster processing +- ✅ Better for frequent email processing +- ❌ Requires compatible GPU (NVIDIA/AMD) +- ❌ Uses GPU resources + +**CPU-Only Mode**: +- ✅ Works on any system +- ✅ Frees up GPU for other tasks +- ✅ More predictable resource usage +- ❌ Slower processing (still usable) + +### System Requirements +- **Minimum**: 4GB RAM, 5GB disk space +- **Recommended**: 8GB+ RAM, 10GB+ disk space +- **For GPU mode**: NVIDIA GPU with CUDA or AMD GPU with ROCm +- **For CPU-only mode**: Modern multi-core CPU (4+ cores recommended) + +## Comparison with Cloud Providers + +| Feature | Ollama (Local) | Gemini/OpenAI (Cloud) | +|---------|----------------|----------------------| +| Cost | Free | $5-20/month or rate limited | +| Privacy | Complete | Data sent to external servers | +| Speed | Fast (local) | Depends on internet | +| Rate Limits | None | 5-30 requests/min | +| Offline | ✅ Yes | ❌ No | +| Setup | Install software | Get API key | + +## Advanced Configuration + +### Multiple Ollama Instances +You can run multiple Ollama instances on different ports and switch between them in the settings. + +### Custom Models +Pull any model from the Ollama library: +```bash +ollama pull +``` +Browse models at: https://ollama.ai/library + +## Support +For Ollama-specific issues, visit: +- Ollama Documentation: https://github.com/ollama/ollama +- AutoSort+ Issues: (your issue tracker) + +--- + +**Note**: First-time model downloads may take several minutes depending on your internet connection. Once downloaded, all processing happens locally and offline. diff --git a/README.md b/README.md index b60dfc3..54d1443 100644 --- a/README.md +++ b/README.md @@ -1,231 +1,712 @@ -# AutoSort+ - AI-Powered Email Organization for Thunderbird +

+ + Join our Discord + +

+ +[![ko-fi](https://ko-fi.com/img/githubbutton_sm.svg)](https://ko-fi.com/Nigel1992) + +
+ +# 🎯 AutoSort+ + +### AI-Powered Email Organization for Thunderbird + +AutoSort+ Logo [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) -[![Development Status](https://img.shields.io/badge/status-active-green)](https://github.com/nigelhagen/AutoSort-Plus) +[![Version](https://img.shields.io/badge/version-1.2.3.3-blue.svg)](https://github.com/Nigel1992/AutoSort-Plus/releases) +[![Thunderbird](https://img.shields.io/badge/Thunderbird-78.0%2B-0a84ff.svg)](https://www.thunderbird.net/) +[![Development Status](https://img.shields.io/badge/status-active-success)](https://github.com/Nigel1992/AutoSort-Plus) -**Automatically sort and label your emails with AI intelligence** +**Let AI help you organize your emails intelligently.** -AutoSort+ is a powerful Thunderbird addon that uses artificial intelligence to automatically classify and organize your emails. Select an AI provider, configure your email labels, and let the addon handle the rest. +> ⚠️ **Not yet in Thunderbird Add-on Store** - Manual installation required. Official store submission in progress. + +[📥 Download](https://github.com/Nigel1992/AutoSort-Plus/releases) • [📖 Documentation](#-setup-guide) • [🐛 Report Bug](https://github.com/Nigel1992/AutoSort-Plus/issues) • [💡 Request Feature](https://github.com/Nigel1992/AutoSort-Plus/issues) + +
+ +--- + +## 📌 Table of Contents + +- [✨ Features](#-features) +- [📥 Installation](#-installation) +- [🚀 Quick Start](#-quick-start) +- [⚙️ AI Provider Setup](#️-ai-provider-setup) +- [💡 Usage](#-usage) +- [🔧 Technical Details](#-technical-details) +- [⚠️ Troubleshooting](#️-troubleshooting) +- [📝 Changelog](#-changelog) +- [🤝 Contributing](#-contributing) + +--- + +--- ## ✨ Features -### 🤖 Multi-Provider AI Support -- **Google Gemini** - Latest gemini-2.5-flash model -- **OpenAI** - gpt-4o-mini (excellent reasoning) -- **Anthropic Claude** - claude-3-haiku (nuanced understanding) -- **Groq** - llama-3.3-70b (fastest free option - 30 req/min) -- **Mistral AI** - mistral-small-latest (GDPR-friendly) - -### 📁 Smart Folder Discovery -- Automatically load folders from IMAP mail accounts -- Choose between system folders or custom labels -- Bulk import with confirmation dialogs -- Recursive folder traversal - -### 🎯 Intelligent Email Classification -- Analyzes email content using AI -- Matches emails to your configured labels -- Respects your existing folder structure -- Move history tracking - -### 💾 Persistent Settings -- API keys stored securely in browser storage -- Settings survive addon restarts -- 100-entry move history -- Easy settings restoration - -### 🎨 Professional UI -- Clean, modern settings interface -- Provider information cards with capabilities -- Real-time validation -- Helpful instruction messages - -## 📦 Installation - -### From Release File -1. Download `autosortplus.xpi` from [Latest Release](https://github.com/nigelhagen/AutoSort-Plus/releases) -2. In Thunderbird: **Tools → Add-ons and Extensions** -3. Click gear icon (⚙️) → **Install Add-on From File** -4. Select `autosortplus.xpi` - -### Build from Source + + + + + + + + + +
+ +### 🤖 Multi-Provider AI +Choose from **5 leading cloud AI providers** or run a **local Ollama** model: +- **Google Gemini** - Best free tier + **Multi-key support** +- **OpenAI** - Superior accuracy +- **Anthropic Claude** - Privacy-focused +- **Groq** - Fastest processing +- **Mistral AI** - GDPR compliant +- **Ollama (Local)** - Run LLMs locally (llama3.2, tinyllama, phi, gemma). No API key required; supports model download and CPU-only mode + + + +### 🔑 Multiple API Keys (Gemini) +- Add keys from multiple projects +- Automatic rotation on limit +- Per-key usage tracking +- 5 keys = 100 requests/day + +
+ +### 📁 Smart Folder Management +- IMAP folder auto-discovery +- Bulk label import +- Custom folder creation +- Recursive traversal + + + +### 🎯 Intelligent Classification +- Content analysis +- Context-aware sorting +- Multi-label support +- 100-entry history + +
+ +
+🔥 Additional Features + +- ✅ **Secure Storage** - Encrypted API key storage +- ✅ **Batch Processing** - Sort multiple emails at once +- ✅ **Rate Limiting** - Built-in quota management (Gemini) +- ✅ **Professional UI** - Clean, intuitive interface +- ✅ **Move History** - Track all email movements +- ✅ **Real-time Validation** - Instant feedback +- ✅ **Open Source** - Transparent, auditable code + +
+ +--- + +--- + +## 📥 Installation + +### Option 1: Download Release (Recommended) + +```bash +1. Visit: https://github.com/Nigel1992/AutoSort-Plus/releases +2. Download: autosortplus.xpi +3. Thunderbird: Tools → Add-ons and Extensions +4. Click: ⚙️ → Install Add-on From File +5. Select: autosortplus.xpi +6. Restart Thunderbird +``` + +### Option 2: Build from Source + ```bash -git clone https://github.com/nigelhagen/AutoSort-Plus.git +git clone https://github.com/Nigel1992/AutoSort-Plus.git cd AutoSort-Plus -zip -r autosortplus.xpi manifest.json background.js options.js options.html styles.css content.js icons/ +zip -r autosortplus.xpi manifest.json background.js options.js options.html styles.css content.js icons/ js/ _locales/ ``` -## 🚀 Setup Guide +
-### Step 1: Choose Your AI Provider -1. Open AutoSort+ settings (Tools → Add-ons → AutoSort+ → Preferences) -2. Select your AI provider from the dropdown -3. Read provider info card to understand its strengths +**[📥 Download Latest Release](https://github.com/Nigel1992/AutoSort-Plus/releases) • [📖 View Changelog](#-changelog)** -### Step 2: Get an API Key +
-| Provider | Link | Free? | Notes | -|----------|------|-------|-------| -| **Gemini** | [aistudio.google.com/apikey](https://aistudio.google.com/apikey) | Yes | No credit card required | -| **OpenAI** | [platform.openai.com/api-keys](https://platform.openai.com/api-keys) | Paid | $5-10 startup credit | -| **Anthropic** | [console.anthropic.com](https://console.anthropic.com/) | Yes | Limited free tier | -| **Groq** | [console.groq.com](https://console.groq.com/) | Yes | 30 requests/minute | -| **Mistral** | [console.mistral.ai](https://console.mistral.ai/) | Yes | EU-focused | +--- + +## 🚀 Quick Start + +### 1️⃣ Choose AI Provider +Open settings and select from Gemini, OpenAI, Claude, Groq, Mistral, or Ollama (Local LLM) + +### 2️⃣ Get API Key (or install local Ollama) +Click "Get API Key" button → Create free account → Copy key. For Ollama (local): install Ollama from https://ollama.ai/download and pull a model (e.g., `ollama pull llama3.2`). No API key required for Ollama. -Click **"Get API Key"** in AutoSort+ settings to open signup page instantly. +### 3️⃣ Configure Folders +Load folders from IMAP or add custom labels -### Step 3: Add Your API Key -1. Paste API key into the **"API Key"** field -2. Click **"Test API Connection"** to verify -3. You should see a ✓ success message +### 4️⃣ Sort Emails (Two Options) -### Step 4: Configure Labels/Folders +**Option 1: AI-Powered Sorting** +- Select emails → Right-click → **AutoSort+ → Analyze with AI** +- The AI will analyze and move emails to the best folder/category. -#### Option A: Load from Mail Account (Recommended) -1. Click **"Load Folders from Mail Account"** -2. Select your email account -3. AutoSort+ discovers folders automatically -4. Review folder list -5. Click **"Use These Folders"** +**Option 2: Manual Labeling** +- Select emails → Right-click → **AutoSort+ → AutoSort Label → [Pick any label]** +- The selected label/category will be applied instantly to all selected emails. -#### Option B: Add Custom Labels -1. Click **"Add Label"** button -2. Enter label names (one per field) -3. These become your email categories +> ⚠️ **Warning:** If you add or change labels in the settings menu, you must restart Thunderbird for the new labels to appear in the right-click menu. -### Step 5: Save Settings -1. Review your configuration -2. Click **"Save Settings"** -3. ✅ You're ready to go! +> 📌 **Note:** Currently requires manual selection and right-click. Automatic background sorting coming in future update! -## 💡 How to Use +--- + +## ⚙️ AI Provider Setup + +## ⚙️ AI Provider Setup + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ProviderGet API KeyFree TierBest For
🔹 GeminiGet Key✅ 20/day per keyBest overall free option
🔹 GroqGet Key✅ 30/minSpeed & high limits
🔹 ClaudeGet Key✅ LimitedPrivacy & safety
🔹 OpenAIGet Key⚠️ $5 creditHighest accuracy
🔹 MistralGet Key✅ LimitedGDPR compliance
🔹 OllamaInstall Ollama✅ Local (no external usage)Run local LLMs (llama3.2, tinyllama, phi, gemma). No API keys required; supports model downloads and CPU-only mode.
+ +### 📊 Usage Limits & Recommendations + +> **⚠️ IMPORTANT:** Free tiers are limited for email processing due to large text content. + +| Provider | Free Limit | Recommendation | +|----------|-----------|----------------| +| **Gemini** | 20 emails/day per API key | ⭐ Create multiple keys in different projects | +| **Groq** | 20-30 emails/day | ⭐ Best free tier overall | +| **Claude** | 10-15 emails/day | Good for privacy-conscious users | +| **OpenAI** | 5-10 emails/day | Consider paid plan ($5-20/mo) | +| **Mistral** | 10-15 emails/day | Best for EU users | + +
+💡 Tips for Managing Free Tier Limits + +**For Gemini users (NEW in v1.2.1!):** +- 🆕 **Multiple API Keys**: Add keys from different Google Cloud projects +- 🔄 **Automatic Rotation**: Extension switches keys when limits are reached +- 📊 **Per-Key Tracking**: Monitor usage for each key independently +- ✨ **Example**: 5 keys = 100 requests/day total (20 per key) +- 🔧 **How to add**: Settings → Add Another Gemini Key +- Check usage: [AI Studio Usage](https://aistudio.google.com/usage) + +**For all providers:** +- Process emails in small batches +- Use "Gemini paid plan" checkbox to disable limits (if you have paid tier) +- Consider paid plans for daily use ($5-20/month) + +
+ +--- -### Manual Email Analysis -1. Select one or more emails in Thunderbird -2. The addon will analyze and auto-organize them -3. Monitor move history to verify classifications +## 💡 Usage -### View Move History -1. Open AutoSort+ settings -2. Scroll to **"Move History"** section -3. See timestamps, subjects, and destinations -4. Last 100 moves stored +### Basic Operation -## 🎯 Recommended Providers +1. **Select Emails** - Click one or more emails in Thunderbird +2. **Right-Click Menu** - Right-click → **AutoSort+ → Analyze with AI** +3. **Smart Sorting** - AI analyzes and moves emails to appropriate folders +4. **Track History** - View last 100 moves in settings -**Best Overall:** Gemini - Free, fast, accurate -**Most Capable:** OpenAI - Superior reasoning -**Privacy-Focused:** Claude (Anthropic) - Strong safety guardrails -**Fastest:** Groq - 30+ requests per minute free -**Europe-Friendly:** Mistral - GDPR compliant +> 🔄 **Coming Soon:** Automatic background sorting (currently manual via right-click) + +### Advanced Features + +**� Multiple API Keys (Gemini - NEW!)** +- Add unlimited keys from different projects +- Automatic rotation when limits reached +- Individual testing and status tracking +- Visual indicators (Active, Ready, Near Limit) +- Combined quota = keys × 20 requests/day + +**📊 Usage Monitoring (Gemini)** +- Real-time usage display in settings +- Per-key usage statistics +- Automatic warnings at 15/20 limit +- Smart key rotation + +**📁 Folder Management** +- Load folders from IMAP accounts +- Bulk import from text list +- Create custom categories +- Auto-create missing folders + +**🔍 Move History** +- Last 100 email moves +- Timestamps and destinations +- Success/failure status +- Clear history option + +### Manual Labeling (Right-Click) + +You can also manually label emails without AI analysis: + +1. **Select Emails** - Click one or more emails in Thunderbird. +2. **Right-Click Menu** - Right-click → **AutoSort+ → AutoSort Label → [Pick any label]** +3. **Label Applied** - The selected label/category will be applied to all selected emails instantly. + +> **Note:** If you add or change labels in the settings menu, you must restart Thunderbird for the new labels to appear in the right-click menu. + +--- + +
+📚 Example Folder Categories + +**Work & Professional:** +- Meetings +- Project Updates +- Invoices +- HR & Benefits + +**Financial:** +- Bills & Payments +- Bank Statements +- Receipts +- Tax Documents + +**Personal:** +- Family +- Friends +- Health +- Travel + +**Online Services:** +- Shopping Confirmations +- Social Media Notifications +- Subscriptions +- Password Resets + +**Promotions:** +- Newsletters +- Sales & Discounts +- Offers +- Marketing + +**Support:** +- Tickets & Help +- Documentation +- Updates +- Complaints + +
+ +--- ## 🔧 Technical Details -### Architecture -- **background.js** - Email analysis engine, AI provider routing -- **options.js** - Settings UI and configuration management -- **content.js** - Message extraction from Thunderbird -- **manifest.json** - Addon metadata and permissions +### System Architecture + +``` +┌─────────────────────────────────────────┐ +│ Thunderbird Email Client │ +└──────────────┬──────────────────────────┘ + │ +┌──────────────▼──────────────────────────┐ +│ AutoSort+ Extension │ +│ │ +│ ┌──────────┐ ┌──────────┐ │ +│ │ UI Layer │ │ Background│ │ +│ │(options) │◄─┤ Script │ │ +│ └──────────┘ └─────┬─────┘ │ +│ │ │ +│ ┌───────▼────────┐ │ +│ │ Rate Limiter │ │ +│ │ (Gemini only) │ │ +│ └───────┬────────┘ │ +└──────────────────────┼─────────────────┘ + │ + ┌─────────────┴─────────────┐ + │ │ + ┌────▼────┐ ┌────────┐ ┌─────▼─────┐ + │ Gemini │ │ Groq │ │ Claude │ + │ API │ │ API │ │ API │ + └─────────┘ └────────┘ └───────────┘ +``` + +### File Structure + +| File | Purpose | Key Functions | +|------|---------|---------------| +| `background.js` | AI analysis engine | `analyzeEmailContent()`, rate limiting | +| `options.js` | Settings UI | Provider config, usage display | +| `content.js` | Email extraction | Message content parsing | +| `manifest.json` | Extension config | Permissions, metadata | + +### Storage Schema -### Storage Format ```javascript -// Settings stored in browser.storage.local { - apiKey: "your-api-key", - aiProvider: "groq", // or: gemini, openai, anthropic, mistral - labels: ["Work", "Personal", "Archive"], - enableAi: true, - moveHistory: [ /* array of moves */ ] + // User Configuration + apiKey: "string", + aiProvider: "gemini|openai|anthropic|groq|mistral", + labels: ["Work", "Personal", ...], + enableAi: boolean, + geminiPaidPlan: boolean, + + // Rate Limiting (Gemini) + geminiRateLimit: { + requests: [timestamp, ...], + dailyCount: number, + dailyResetTime: timestamp + }, + + // History + moveHistory: [ + { + timestamp: string, + subject: string, + status: string, + destination: string + }, + ... + ] } ``` -### Supported Models -| Provider | Model | Context | Speed | Free Tier | -|----------|-------|---------|-------|-----------| -| Gemini | gemini-2.5-flash | 1M tokens | ⚡⚡⚡ | Yes | -| OpenAI | gpt-4o-mini | 128K tokens | ⚡⚡ | Limited | -| Claude | claude-3-haiku | 200K tokens | ⚡⚡⚡ | Yes | -| Groq | llama-3.3-70b | 8K tokens | ⚡⚡⚡⚡ | Yes (30/min) | -| Mistral | mistral-small | 32K tokens | ⚡⚡⚡ | Yes | - ## 🔒 Privacy & Security -- ✅ API keys stored in browser storage (OS-encrypted) -- ✅ Email content never stored permanently -- ✅ Analysis requests sent directly to AI providers -- ✅ No telemetry or tracking -- ✅ No external dependencies -- ✅ Open source for transparency +| Feature | Status | Details | +|---------|--------|---------| +| **🔐 API Key Storage** | ✅ Encrypted | OS-level encryption via browser storage | +| **📧 Email Content** | ✅ Not Stored | Analyzed in memory, never persisted | +| **🌐 Data Transmission** | ✅ Direct to AI | No intermediary servers | +| **📊 Telemetry** | ✅ None | Zero tracking or analytics | +| **🔍 Open Source** | ✅ Auditable | Full transparency | +| **🛡️ Permissions** | ✅ Minimal | Only required APIs | + +**Your privacy matters:** All email analysis happens directly between your Thunderbird and chosen AI provider. No data passes through our servers because we don't have any! + +--- ## ⚠️ Troubleshooting -### Settings Page Won't Load +
+🔧 Settings Page Won't Load + ```bash -# Clear cache and reload addon -1. Thunderbird → Settings → Privacy → Cookies and Site Data → Clear Data -2. Tools → Add-ons → AutoSort+ → Reload +1. Thunderbird → Settings → Privacy → Cookies and Site Data +2. Click "Clear Data" +3. Tools → Add-ons → AutoSort+ → Reload ``` -### "API Key Not Configured" -- Paste your API key in the settings page -- Click **"Test API Connection"** -- Ensure key is from the correct provider - -### Email Analysis Fails -- ✓ Check internet connection -- ✓ Verify API key is valid (use Test button) -- ✓ Check provider's status page -- ✓ Ensure API hasn't hit rate limits -- ✓ Review error message for guidance - -### Wrong Labels Applied -- Verify labels match exactly (case-sensitive) -- Check folders don't have special characters -- Ensure labels saved (green checkmark visible) - -## 📋 Requirements - -- **Thunderbird** 78.0+ -- **Internet connection** (for API calls) -- **Valid API key** from your chosen provider - -## 📝 Version History - -### v1.2.0 (2026-01-13) - Multi-Provider Release ⭐ -- ✅ Multi-provider AI support (Gemini, OpenAI, Anthropic, Groq, Mistral) -- ✅ Groq API updated to llama-3.3-70b (Mixtral deprecated) -- ✅ IMAP folder discovery with recursive traversal -- ✅ Professional UI with provider info cards -- ✅ Settings validation and state management -- ✅ Move history tracking (last 100 entries) -- ✅ Professional funnel/envelope icons +
+ +
+🔑 API Key Not Working + +- Verify key is copied correctly (no spaces) +- Click "Test API Connection" button +- Check key is from correct provider +- Ensure API key has proper permissions +- For Gemini: Check [usage page](https://aistudio.google.com/usage) + +
+ +
+❌ Email Analysis Fails + +**Check:** +- ✓ Internet connection active +- ✓ API key is valid +- ✓ Provider status page for outages +- ✓ Rate limits not exceeded +- ✓ Email content isn't empty + +**For Gemini users:** +- Check usage counter in settings +- Verify daily limit not reached (20/day) +- Switch to new API key if needed + +
+ +
+📁 Wrong Labels Applied + +- Ensure labels are case-sensitive matches +- Avoid special characters in folder names +- Verify labels are saved (green checkmark) +- Check move history for patterns + +
+ +
+⏱️ Rate Limit Errors + +**Gemini (20/day per key):** +- Create new API key in different project +- Reset counter after switching keys +- Enable "paid plan" option if you have one + +**Other Providers:** +- Wait for rate limit window to reset +- Consider upgrading to paid tier +- Use provider's usage dashboard + +
+ +--- + +## 📋 System Requirements + +| Component | Requirement | +|-----------|-------------| +| **Thunderbird** | 78.0 or later | +| **Internet** | Active connection for API calls | +| **API Key** | Valid key from chosen provider | +| **Storage** | ~1MB for extension data | +| **OS** | Windows, macOS, Linux | + +--- + +## 📝 Changelog + +### 🎉 v1.2.0 (2026-01-13) - Multi-Provider Release + +
+🆕 New Features + +- ✅ Multi-provider AI support (5 providers) +- ✅ Gemini rate limiting (5/min, 20/day enforcement) +- ✅ Real-time usage tracking dashboard +- ✅ IMAP folder auto-discovery - ✅ Bulk label import -- ✅ Fixed syntax errors in options.js +- ✅ Move history (last 100 entries) +- ✅ Professional UI redesign +- ✅ Provider info cards + +
+ +
+🔧 Improvements + +- ✅ Groq updated to llama-3.3-70b +- ✅ Better error handling and validation +- ✅ Auto-create missing folders +- ✅ Skip null categories +- ✅ Batch email processing fixes +- ✅ Professional funnel/envelope icons +- ✅ Example folder categories + +
+ +
+⚙️ Technical Changes + - ✅ Unified API key storage +- ✅ Settings validation system +- ✅ Recursive folder traversal +- ✅ Fixed syntax errors in options.js +- ✅ Improved state management + +
-### v1.0.0 (2026-01-10) +### v1.0.0 (2026-01-10) - Initial Release - Initial release with Gemini support +--- + +## 🚧 Roadmap & TODO + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PriorityFeatureStatus
🟢 DoneDetailed Logging - Debug mode with console output✅ Completed
🔴 HighAPI Response Headers - Extract rate limit info from API📋 Planned
🟡 MediumSmart Key Switching - Auto-suggest when to switch keys💡 Proposed
🟡 MediumScheduled Processing - Auto-sort at specific times💡 Proposed
🟢 LowCustom Rules - User-defined sorting logic💡 Proposed
🟢 LowStatistics Dashboard - Email sorting analytics💡 Proposed
+ +--- + ## 🐛 Known Issues -None currently known. Please report any issues on GitHub. +**None currently reported!** 🎉 + +If you encounter any issues, please [open an issue on GitHub](https://github.com/Nigel1992/AutoSort-Plus/issues). -## 💬 Support +--- -- **Questions?** Check [Troubleshooting](#troubleshooting) above -- **Found a bug?** Open an issue on [GitHub](https://github.com/nigelhagen/AutoSort-Plus/issues) -- **Feature request?** Create a discussion or issue +## 💬 Support & Community -## 📄 License +
+ +| 💡 Questions | 🐛 Bug Reports | ✨ Feature Requests | +|--------------|----------------|---------------------| +| [Discussions](https://github.com/Nigel1992/AutoSort-Plus/discussions) | [Issues](https://github.com/Nigel1992/AutoSort-Plus/issues) | [Issues](https://github.com/Nigel1992/AutoSort-Plus/issues) | -MIT License - See [LICENSE](LICENSE) file for details +
+ +**Before reporting:** +1. Check [Troubleshooting](#troubleshooting) section +2. Search existing issues +3. Include Thunderbird version and extension version + +--- ## 🙏 Contributing -Pull requests welcome! For major changes, please open an issue first to discuss. +We welcome contributions! Here's how to help: + +1. **Fork** the repository +2. **Create** a feature branch (`git checkout -b feature/amazing`) +3. **Commit** your changes (`git commit -m 'Add amazing feature'`) +4. **Push** to branch (`git push origin feature/amazing`) +5. **Open** a Pull Request + +**Guidelines:** +- Follow existing code style +- Add comments for complex logic +- Test with multiple AI providers +- Update README for new features + +--- + +## 📄 License + +``` +MIT License + +Copyright (c) 2026 Nigel + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +``` + +See [LICENSE](LICENSE) file for full text. + +--- + +## 🎨 Credits & Acknowledgments + +**Icon Design:** +[Email filtering icons created by Fantasyou - Flaticon](https://www.flaticon.com/free-icons/email-filtering) + +**AI Providers:** +- [Google Gemini](https://ai.google.dev/) +- [OpenAI](https://openai.com/) +- [Anthropic Claude](https://www.anthropic.com/) +- [Groq](https://groq.com/) +- [Mistral AI](https://mistral.ai/) + +**Built with:** +- [Thunderbird WebExtension APIs](https://webextension-api.thunderbird.net/) +- JavaScript ES6+ +- Manifest v2 + +--- + +
+ +## ⭐ Star History + +[![Star History Chart](https://api.star-history.com/svg?repos=Nigel1992/AutoSort-Plus&type=Date)](https://star-history.com/#Nigel1992/AutoSort-Plus&Date) --- **Made with ❤️ to help you organize email faster** -[GitHub](https://github.com/nigelhagen/AutoSort-Plus) • [Issues](https://github.com/nigelhagen/AutoSort-Plus/issues) • [Latest Release](https://github.com/nigelhagen/AutoSort-Plus/releases) +[⬆ Back to Top](#autosort) • [🏠 GitHub](https://github.com/Nigel1992/AutoSort-Plus) • [📦 Latest Release](https://github.com/Nigel1992/AutoSort-Plus/releases) • [📖 Documentation](https://nigel1992.github.io/AutoSort-Plus/) + +--- + +![Thunderbird](https://img.shields.io/badge/Thunderbird-78.0+-0A84FF?style=for-the-badge&logo=thunderbird&logoColor=white) +![License](https://img.shields.io/badge/License-MIT-green?style=for-the-badge) +![Version](https://img.shields.io/badge/Version-1.2.3.3-blue?style=for-the-badge) + +
+ +## Support This Project + +Support this project! All donations go towards your chosen charity. You can pick any charity you'd like, and I will ensure the funds are sent their way. Please note that standard payment processing fees (Ko-fi & PayPal) will be deducted from the total. As a thank you, your name will be listed as a supporter/donor in this project. Feel free to email me at thedjskywalker@gmail.com for proof of the donation or to let me know which charity you've selected! diff --git a/REDDIT_POST.md b/REDDIT_POST.md new file mode 100644 index 0000000..a9f0fb5 --- /dev/null +++ b/REDDIT_POST.md @@ -0,0 +1,74 @@ +# Reply to u/noir_dreams - Ollama Support Available Now! 🎉 + +Hey u/noir_dreams! + +You asked about **Ollama support** for local email classification and **getting past API limits** - great news, I just released a **test version with exactly that**! + +## Your Questions Answered + +**Ollama + Locally Sorted AI?** ✅ Done! +**Get past API limits/tickets?** ✅ Completely unlimited - runs locally +**Model flexibility (gemma, gpt-oss-20b, etc.)?** ✅ Any Ollama model works! + +## Recommended Models for Email Classification + +Based on testing, these work great: +- **tinyllama** (~1GB) - Super fast, good for quick sorting +- **phi** (~2.7GB) - Better accuracy, still reasonably fast +- **gemma** (~2.5GB) - Solid balance of quality and speed +- **llama3.2** (~5GB) - High quality, best accuracy +- **qwen** (~4GB) - Another solid option + +All run **locally on your machine with zero rate limits**. Classify unlimited emails! + +## Setup (30 seconds) + +1. Install Ollama: https://ollama.com/download +2. Pull your model: `ollama pull gemma` +3. Download test XPI: [**AutoSort+ v1.2.3.1-ollama-test**](https://github.com/yourusername/AutoSort-Plus/releases/tag/v1.2.3.1-ollama-test) +4. Open Thunderbird → Drag XPI into Add-ons page +5. Settings → Provider: Ollama → Model: gemma +6. Click "Test Connection" +7. Done! Right-click emails → "Analyze with AI" + +## Why This is Cool + +- 🏠 **100% Local** - No data leaves your computer +- 🆓 **No API Keys or Limits** - Classify as many emails as you want +- 🔒 **Privacy First** - Your emails stay yours +- 💪 **Your Choice** - Use any model: gemma, phi, tinyllama, llama3.2, qwen, etc. + +## If You Hit Issues + +**Check Ollama is running:** +```bash +curl http://localhost:11434/api/tags +``` +Should return your installed models + +**Enable debug mode:** +- Ctrl+Shift+J in Thunderbird +- Look for `[Ollama]` messages during analysis +- Post console errors in GitHub issues + +**Common fixes:** +- Make sure Ollama daemon is running +- Pull the model first: `ollama list` +- Check full debugging guide in release notes + +## This is a TEST Release + +⚠️ **Please test and report back!** This is experimental but working. Uses a new tab injection approach to bypass Thunderbird's fetch restrictions. + +Specifically looking for: +- What model works best for your email? +- Performance on your system? +- Any bugs or errors? + +--- + +**Download:** [v1.2.3.1-ollama-test on GitHub](https://github.com/yourusername/AutoSort-Plus/releases/tag/v1.2.3.1-ollama-test) +**Full Guide:** See release notes for detailed setup and debugging +**Models Tested:** tinyllama, phi, gemma, llama3.2, qwen + +Looking forward to hearing your results! 🚀 diff --git a/RELEASE_NOTES_OLLAMA_TEST.md b/RELEASE_NOTES_OLLAMA_TEST.md new file mode 100644 index 0000000..4931fdb --- /dev/null +++ b/RELEASE_NOTES_OLLAMA_TEST.md @@ -0,0 +1,94 @@ +# AutoSort+ v1.2.3.1-ollama-test - Ollama Support (Test Release) + +## 🧪 Test Release - Ollama Local AI Integration + +This is a **test release** with experimental Ollama support for local AI email classification. Please report any issues! + +## ✨ What's New + +**Local Ollama Support:** +- 🏠 Run AI email classification completely locally with Ollama +- 🔒 No data sent to external APIs +- 🆓 No API keys or rate limits +- 🎯 Support for any Ollama model (tinyllama, llama3.2, phi, gemma, etc.) +- ⚙️ CPU-only mode option for systems without GPU + +## 🚀 Quick Start with Ollama + +### Prerequisites +1. Install Ollama: https://ollama.com/download +2. Pull a model: `ollama pull tinyllama` (or llama3.2, phi, gemma, etc.) +3. Verify it's running: `ollama list` + +### Setup in Extension +1. Open AutoSort+ settings +2. Select **Ollama** as AI provider +3. Leave URL as `http://localhost:11434` (default) +4. Select your model from dropdown +5. Click **Test Connection** - should show your installed models +6. Click **Save Settings** + +### Test It +1. Select an email +2. Right-click → **Analyze with AI** +3. Watch as Ollama classifies it locally! + +## 🐛 Known Issues & Debugging + +### If you get errors: +1. **Check Ollama is running:** + ```bash + curl http://localhost:11434/api/tags + ``` + Should return list of models + +2. **Test chat directly:** + ```bash + curl -X POST http://localhost:11434/api/chat \ + -H "Content-Type: application/json" \ + -d '{"model":"tinyllama","messages":[{"role":"user","content":"test"}],"stream":false}' + ``` + Should return a response + +3. **Enable debug logging:** + - Open Browser Console (Ctrl+Shift+J) + - Watch for `[Ollama]` messages during analysis + - Look for any error messages + +4. **Common issues:** + - 403 errors: Fixed in this release with tab injection approach + - Timeout: Increase wait time or use faster model + - Model not found: Run `ollama pull ` + +### Report Issues +Please include: +- Thunderbird version +- Ollama version (`ollama --version`) +- Model used +- Console logs showing the error + +## 📝 Technical Details + +This release uses a **tab injection approach** to bypass Thunderbird's fetch restrictions: +1. Opens hidden tab at Ollama origin +2. Injects script to make POST request +3. Retrieves result and closes tab +4. Works like curl - no special permissions needed + +## 🔄 Upgrading from Previous Version + +If you tested earlier Ollama builds: +1. Uninstall old version +2. Install this XPI +3. Reconfigure Ollama settings +4. Test connection again + +## ⚠️ Disclaimer + +This is a **test release**. The Ollama integration is experimental and may have bugs. Please backup your settings before installing. + +--- + +**Installation:** Download `autosortplus.xpi` and drag into Thunderbird Add-ons page + +**Feedback:** Open an issue on GitHub with your results! diff --git a/_locales/en/messages.json b/_locales/en/messages.json new file mode 100644 index 0000000..6f9d0a0 --- /dev/null +++ b/_locales/en/messages.json @@ -0,0 +1,1262 @@ +{ + "extensionName": { + "message": "AutoSort+", + "description": "Extension name" + }, + "extensionDescription": { + "message": "Automatically sort and label your emails with custom rules using AI", + "description": "Extension description" + }, + "extensionDefaultTitle": { + "message": "AutoSort+ Settings", + "description": "Default title shown in browser action" + }, + "pageTitle": { + "message": "AutoSort+ Settings", + "description": "HTML page title" + }, + "pageHeading": { + "message": "AutoSort+ Settings", + "description": "Main page heading" + }, + "batchProcessingTitle": { + "message": "Batch Processing In Progress", + "description": "Batch processing status panel title" + }, + "batchPreparing": { + "message": "Preparing…", + "description": "Batch processing preparing text" + }, + "batchPause": { + "message": "⏸ Pause", + "description": "Batch pause button text" + }, + "batchResume": { + "message": "▶ Resume", + "description": "Batch resume button text" + }, + "batchCancel": { + "message": "⏹ Cancel", + "description": "Batch cancel button text" + }, + "aiSettingsTitle": { + "message": "🤖 AI Settings", + "description": "AI settings section header" + }, + "providerSelectionTitle": { + "message": "Provider Selection", + "description": "Provider selection subsection header" + }, + "aiProviderLabel": { + "message": "AI Provider:", + "description": "AI provider select label" + }, + "providerGemini": { + "message": "Google Gemini (Recommended)", + "description": "Gemini provider option" + }, + "providerOpenAI": { + "message": "OpenAI (ChatGPT)", + "description": "OpenAI provider option" + }, + "providerAnthropic": { + "message": "Anthropic Claude", + "description": "Anthropic provider option" + }, + "providerGroq": { + "message": "Groq (Fast & Free)", + "description": "Groq provider option" + }, + "providerMistral": { + "message": "Mistral AI", + "description": "Mistral provider option" + }, + "providerOllama": { + "message": "Ollama (Local LLM)", + "description": "Ollama provider option" + }, + "providerOpenAICompatible": { + "message": "OpenAI-Compatible (Custom Endpoint)", + "description": "OpenAI-Compatible provider option" + }, + "rateLimitWarningTitle": { + "message": "⚠️ Rate Limit Warning:", + "description": "Rate limit warning header" + }, + "rateLimitWarningText": { + "message": "Free API tiers are severely limited when processing emails. You may only process 5-20 emails before hitting rate limits. Paid plans ($5-20/month) are recommended for daily email processing.", + "description": "Rate limit warning text" + }, + "ollamaConfigTitle": { + "message": "🏠 Local Ollama Configuration", + "description": "Ollama configuration subsection header" + }, + "ollamaUrlLabel": { + "message": "Ollama Server URL:", + "description": "Ollama URL label" + }, + "ollamaUrlPlaceholder": { + "message": "http://localhost:11434", + "description": "Ollama URL placeholder" + }, + "ollamaCpuOnly": { + "message": "Force CPU-only mode (disable GPU acceleration)", + "description": "Ollama CPU-only checkbox" + }, + "ollamaModelLabel": { + "message": "Ollama Model:", + "description": "Ollama model select label" + }, + "ollamaAuthTokenLabel": { + "message": "Ollama Auth Token (optional):", + "description": "Ollama auth token label" + }, + "ollamaAuthTokenPlaceholder": { + "message": "If your Ollama server requires a token, enter it here", + "description": "Ollama auth token placeholder" + }, + "ollamaAuthTokenHelp": { + "message": "Used for /api/chat and /api/pull requests when required.", + "description": "Ollama auth token help text" + }, + "ollamaCustomModelPlaceholder": { + "message": "Enter custom model name", + "description": "Ollama custom model placeholder" + }, + "ollamaDownloadModelLabel": { + "message": "Download Model:", + "description": "Ollama download model label" + }, + "ollamaDownloadModelPlaceholder": { + "message": "e.g., llama3.2, mistral, qwen2.5:7b", + "description": "Ollama download model placeholder" + }, + "ollamaDownloadButton": { + "message": "Download", + "description": "Download model button" + }, + "ollamaListModelsButton": { + "message": "List Installed Models", + "description": "List installed models button" + }, + "ollamaTestButton": { + "message": "Test Connection", + "description": "Test Ollama connection button" + }, + "ollamaDiagnoseButton": { + "message": "Run Diagnostics", + "description": "Run Ollama diagnostics button" + }, + "ollamaModelLlama2": { + "message": "Llama 2", + "description": "Ollama Llama 2 model option" + }, + "ollamaModelLlama32": { + "message": "Llama 3.2", + "description": "Ollama Llama 3.2 model option" + }, + "ollamaModelMistral": { + "message": "Mistral", + "description": "Ollama Mistral model option" + }, + "ollamaModelPhi": { + "message": "Phi", + "description": "Ollama Phi model option" + }, + "ollamaModelGemma": { + "message": "Gemma", + "description": "Ollama Gemma model option" + }, + "ollamaModelQwen25": { + "message": "Qwen 2.5", + "description": "Ollama Qwen 2.5 model option" + }, + "ollamaModelCustom": { + "message": "Custom (enter below)", + "description": "Ollama custom model option" + }, + "openaiCompatibleTitle": { + "message": "🔗 OpenAI-Compatible Endpoint", + "description": "OpenAI-Compatible subsection header" + }, + "openaiCompatibleBaseUrlLabel": { + "message": "Base URL:", + "description": "OpenAI-Compatible base URL label" + }, + "openaiCompatibleBaseUrlPlaceholder": { + "message": "http://localhost:1234/v1 or https://api.provider.com/v1", + "description": "OpenAI-Compatible base URL placeholder" + }, + "openaiCompatibleBaseUrlHelp": { + "message": "Enter base URL including /v1. Endpoint must use OpenAI format: /v1/chat/completions. Examples: LM Studio, LocalAI, vLLM, Together AI", + "description": "OpenAI-Compatible base URL help text" + }, + "openaiCompatibleModelLabel": { + "message": "Model:", + "description": "OpenAI-Compatible model select label" + }, + "openaiCompatibleModelSelect": { + "message": "-- Select model --", + "description": "OpenAI-Compatible model select default option" + }, + "openaiCompatibleModelCustom": { + "message": "Custom (enter below)", + "description": "OpenAI-Compatible custom model option" + }, + "openaiCompatibleModelCustomPlaceholder": { + "message": "Enter model name manually", + "description": "OpenAI-Compatible custom model placeholder" + }, + "openaiCompatibleApiKeyLabel": { + "message": "API Key (optional):", + "description": "OpenAI-Compatible API key label" + }, + "openaiCompatibleApiKeyPlaceholder": { + "message": "Leave empty for local endpoints without auth", + "description": "OpenAI-Compatible API key placeholder" + }, + "openaiCompatibleApiKeyHelp": { + "message": "Required for cloud providers, optional for local servers", + "description": "OpenAI-Compatible API key help text" + }, + "openaiCompatibleFetchModelsButton": { + "message": "Fetch Models", + "description": "Fetch models button" + }, + "openaiCompatibleTestButton": { + "message": "Test Connection", + "description": "Test OpenAI-Compatible connection button" + }, + "apiKeyTitle": { + "message": "🔑 API Key Configuration", + "description": "API key configuration subsection header" + }, + "apiKeyLabel": { + "message": "API Key:", + "description": "API key input label" + }, + "apiKeyPlaceholder": { + "message": "Enter your API key", + "description": "API key input placeholder" + }, + "testApiButton": { + "message": "Test API Connection", + "description": "Test API connection button" + }, + "getApiKeyButton": { + "message": "Get API Key", + "description": "Get API key button" + }, + "geminiMultiKeysTitle": { + "message": "🔄 Multiple Gemini API Keys", + "description": "Multiple Gemini keys subsection header" + }, + "geminiMultiKeysInfo": { + "message": "Add multiple API keys from different Google Cloud projects. The extension will automatically rotate between them when rate limits are reached.", + "description": "Multiple Gemini keys info text" + }, + "addGeminiKeyButton": { + "message": "+ Add Another Gemini Key", + "description": "Add Gemini key button" + }, + "testButton": { + "message": "Test", + "description": "Test individual Gemini API key button" + }, + "geminiPaidPlan": { + "message": "I have a Gemini paid plan (removes rate limits)", + "description": "Gemini paid plan checkbox" + }, + "generalSettingsTitle": { + "message": "⚙️ General Settings", + "description": "General settings subsection header" + }, + "enableAiLabel": { + "message": "Enable AI-powered sorting", + "description": "Enable AI checkbox" + }, + "enableDebugLabel": { + "message": "Enable debug mode (console logging)", + "description": "Enable debug mode checkbox" + }, + "enableDebugHelp": { + "message": "Open Thunderbird Developer Tools (Ctrl+Shift+I) to view logs", + "description": "Debug mode help text" + }, + "batchChunkSizeLabel": { + "message": "Batch chunk size:", + "description": "Batch chunk size label" + }, + "batchChunkSizeHelp": { + "message": "Process N emails at once, wait for all responses, then continue (1-20)", + "description": "Batch chunk size help text" + }, + "enableAutoSortLabel": { + "message": "Auto-sort new emails in Inbox", + "description": "Auto-sort checkbox label" + }, + "enableAutoSortHelp": { + "message": "Automatically classify and move new Inbox emails using AI", + "description": "Auto-sort help text" + }, + "geminiUsageTitle": { + "message": "📊 Gemini API Usage", + "description": "Gemini API usage subsection header" + }, + "geminiDailyCount": { + "message": "Today's Usage: {count}/20 requests", + "description": "Gemini daily usage count", + "placeholders": { + "count": { + "content": "$1" + } + } + }, + "geminiLastRequest": { + "message": "Last Request:", + "description": "Gemini last request label" + }, + "geminiNever": { + "message": "Never", + "description": "Never used text" + }, + "geminiResetTime": { + "message": "Daily Limit Resets:", + "description": "Gemini daily limit reset time label" + }, + "geminiStatus": { + "message": "Status:", + "description": "Gemini status label" + }, + "geminiStatusReady": { + "message": "Ready", + "description": "Gemini ready status" + }, + "geminiStatusNearlyFull": { + "message": "Nearly Full", + "description": "Gemini nearly full usage" + }, + "geminiStatusLimitReached": { + "message": "Limit Reached", + "description": "Gemini limit reached status" + }, + "geminiLimitMessage": { + "message": "Daily limit reached. Switch to another key or wait for reset.", + "description": "Gemini daily limit warning" + }, + "geminiRemainingMessage": { + "message": "requests remaining today. Consider switching keys.", + "description": "Gemini remaining usage warning" + }, + "geminiKeyInputPlaceholder": { + "message": "Enter Gemini API key", + "description": "Gemini API key input placeholder" + }, + "geminiResetExpired": { + "message": "Token expired or invalid. Generate a new one from AI Studio.", + "description": "Gemini reset token expired message" + }, + "requestsRemainingToday": { + "message": "requests remaining today. Consider switching keys.", + "description": "Requests remaining today text" + }, + "resetGeminiCounterButton": { + "message": "Reset Counter (New API Key)", + "description": "Reset Gemini counter button" + }, + "refreshUsageButton": { + "message": "Refresh Usage", + "description": "Refresh usage button" + }, + "refreshAllUsageButton": { + "message": "Refresh All Usage", + "description": "Refresh all usage button" + }, + "howAiSortingTitle": { + "message": "ℹ️ How AI Sorting Works", + "description": "How AI sorting works subsection header" + }, + "howAiSortingDesc": { + "message": "AutoSort+ uses AI to analyze your emails and automatically sort them into categories/folders based on their content. The AI will:", + "description": "AI sorting description" + }, + "howAiSortingPoint1": { + "message": "Read and understand email content", + "description": "AI sorting capability 1" + }, + "howAiSortingPoint2": { + "message": "Identify key topics and themes", + "description": "AI sorting capability 2" + }, + "howAiSortingPoint3": { + "message": "Match emails to appropriate categories/folders", + "description": "AI sorting capability 3" + }, + "howAiSortingPoint4": { + "message": "Learn from your manual corrections to improve accuracy", + "description": "AI sorting capability 4" + }, + "customPromptTitle": { + "message": "📝 Custom Prompt", + "description": "Custom prompt section header" + }, + "customPromptInfo": { + "message": "Customize the prompt sent to AI for email classification.", + "description": "Custom prompt info text" + }, + "customPromptPlaceholders": { + "message": "Available placeholders:", + "description": "Custom prompt placeholders label" + }, + "customPromptPlaceholderLabel": { + "message": "Your folder/label list", + "description": "Labels placeholder description" + }, + "customPromptSubjectLabel": { + "message": "Email subject line", + "description": "Subject placeholder description" + }, + "customPromptAuthorLabel": { + "message": "Sender email address/name", + "description": "Author placeholder description" + }, + "customPromptAttachmentsLabel": { + "message": "Attachment filenames (comma-separated)", + "description": "Attachments placeholder description" + }, + "customPromptBodyLabel": { + "message": "Email body content (recommended)", + "description": "Body placeholder description" + }, + "customPromptEmailLabel": { + "message": "Email content (legacy, same as {body})", + "description": "Email placeholder description", + "placeholders": { + "body": { + "content": "$1" + } + } + }, + "customPromptTip": { + "message": "Tip: Use {subject} and {attachments} for better classification accuracy.", + "description": "Custom prompt tip", + "placeholders": { + "subject": { + "content": "$1" + }, + "attachments": { + "content": "$2" + } + } + }, + "customPromptTextareaPlaceholder": { + "message": "Enter your custom prompt...", + "description": "Custom prompt textarea placeholder" + }, + "resetPromptButton": { + "message": "Reset to Default", + "description": "Reset prompt button" + }, + "customFoldersTitle": { + "message": "📁 Custom Categories/Folders", + "description": "Custom categories/folders section header" + }, + "folderSourceTitle": { + "message": "Folder Source", + "description": "Folder source subsection header" + }, + "loadImapFoldersButton": { + "message": "Load Folders from Mail Account", + "description": "Load IMAP folders button" + }, + "folderLoadingText": { + "message": "Loading folders...", + "description": "Folder loading indicator text" + }, + "folderFoundText": { + "message": "Found {count} folders in your mail account. Would you like to use these?", + "description": "Folder found text", + "placeholders": { + "count": { + "content": "$1" + } + } + }, + "useImapFoldersButton": { + "message": "Use These Folders", + "description": "Use IMAP folders button" + }, + "useCustomFoldersButton": { + "message": "Use Custom Folders Instead", + "description": "Use custom folders button" + }, + "bulkImportLabel": { + "message": "Import Categories/Folders (one per line):", + "description": "Bulk import textarea label" + }, + "bulkImportPlaceholder": { + "message": "Enter categories/folders, one per line", + "description": "Bulk import textarea placeholder" + }, + "importButton": { + "message": "Import", + "description": "Import button" + }, + "addButton": { + "message": "Add", + "description": "Add button" + }, + "labelInputPlaceholder": { + "message": "Enter category/folder name", + "description": "Label input placeholder" + }, + "moveHistoryTitle": { + "message": "📜 Move History", + "description": "Move history section header" + }, + "clearHistoryButton": { + "message": "Clear History", + "description": "Clear history button" + }, + "refreshHistoryButton": { + "message": "Refresh", + "description": "Refresh history button" + }, + "historyHeaderTimestamp": { + "message": "Timestamp", + "description": "History table timestamp header" + }, + "historyHeaderSubject": { + "message": "Subject", + "description": "History table subject header" + }, + "historyHeaderStatus": { + "message": "Status", + "description": "History table status header" + }, + "historyHeaderDestination": { + "message": "Destination", + "description": "History table destination header" + }, + "saveSettingsButton": { + "message": "Save Settings", + "description": "Save settings button" + }, + "providerInfoGemini": { + "message": "✓ Free tier: 5 requests/minute, 20/day per API key (enforced by addon)
✓ Tip: Create multiple API keys in different projects, switch keys when limit reached
✓ Check usage: AI Studio Usage
✓ Best for: General use, multilingual support
✓ Models: Gemini 2.5 Flash
✓ Check \"paid plan\" option to remove limits", + "description": "Gemini provider info HTML" + }, + "providerInfoOpenai": { + "message": "✓ Free trial: $5 credit
✓ Best for: High accuracy, English content
✓ Models: GPT-4o-mini ($0.15/1M tokens)", + "description": "OpenAI provider info HTML" + }, + "providerInfoAnthropic": { + "message": "✓ Free tier: Limited requests
✓ Best for: Long emails, detailed analysis
✓ Models: Claude 3 Haiku", + "description": "Anthropic provider info HTML" + }, + "providerInfoGroq": { + "message": "✓ Free tier: 30 requests/minute
✓ Best for: Speed (fastest)
✓ Models: Llama 3.3 (Mixtral deprecated)", + "description": "Groq provider info HTML" + }, + "providerInfoMistral": { + "message": "✓ Free tier: Limited requests
✓ Best for: European users, GDPR compliance
✓ Models: Mistral Small", + "description": "Mistral provider info HTML" + }, + "providerInfoOllama": { + "message": "✓ 100% Free: Runs locally on your machine
✓ Privacy: No data sent to external servers
✓ No rate limits: Process unlimited emails
✓ Models: Llama 2/3, Mistral, Phi, Gemma, Qwen, and more
✓ Requires: Ollama installed and running locally
✓ Setup: Install Ollama, run \"ollama pull llama3.2\" to download a model", + "description": "Ollama provider info HTML" + }, + "providerInfoOpenaiCompatible": { + "message": "✓ Compatible with: LocalAI, LM Studio, vLLM, Together AI, OpenRouter, DeepSeek, Fireworks, etc.
✓ Enter your endpoint base URL and model name
✓ API key optional for local servers
✓ Uses standard /v1/chat/completions format", + "description": "OpenAI-Compatible provider info HTML" + }, + "freeBadge": { + "message": "FREE", + "description": "Free provider badge text" + }, + "paidBadge": { + "message": "PAID", + "description": "Paid provider badge text" + }, + "justNow": { + "message": "Just now", + "description": "Relative time — request was just made" + }, + "minutesAgo": { + "message": "{count} minute{plural} ago", + "description": "Relative time — minutes ago", + "placeholders": { + "count": { "content": "$1" }, + "plural": { "content": "$2" } + } + }, + "hoursAgo": { + "message": "{count} hour{plural} ago", + "description": "Relative time — hours ago", + "placeholders": { + "count": { "content": "$1" }, + "plural": { "content": "$2" } + } + }, + "inHours": { + "message": "In {count} hour{plural}", + "description": "Reset countdown text", + "placeholders": { + "count": { "content": "$1" }, + "plural": { "content": "$2" } + } + }, + "inHoursShort": { + "message": "{count}h", + "description": "Reset countdown short form", + "placeholders": { + "count": { "content": "$1" } + } + }, + "minutesAgoShort": { + "message": "{count}m ago", + "description": "Minutes ago short form", + "placeholders": { + "count": { "content": "$1" } + } + }, + "hoursAgoShort": { + "message": "{count}h ago", + "description": "Hours ago short form", + "placeholders": { + "count": { "content": "$1" } + } + }, + "keyActive": { + "message": "🔵 ACTIVE", + "description": "Key usage card active status" + }, + "keyLimit": { + "message": "🔴 LIMIT", + "description": "Key usage card limit reached" + }, + "keyNearLimit": { + "message": "🟡 NEAR LIMIT", + "description": "Key usage card near limit" + }, + "keyReady": { + "message": "🟢 READY", + "description": "Key usage card ready status" + }, + "keyLabel": { + "message": "Key {number}:", + "description": "Key usage card label", + "placeholders": { + "number": { "content": "$1" } + } + }, + "statUsage": { + "message": "Usage:", + "description": "Stat label for usage count" + }, + "statLast": { + "message": "Last:", + "description": "Stat label for last request" + }, + "statResets": { + "message": "Resets:", + "description": "Stat label for reset time" + }, + "statAvailable": { + "message": "Available:", + "description": "Stat label for available requests" + }, + "keyNotSet": { + "message": "Not set", + "description": "Key not configured text" + }, + "keyAlreadyAddedTitle": { + "message": "⚠️ This key is already added!", + "description": "Tooltip when duplicate Gemini key detected" + }, + "enterKeyFirst": { + "message": "⚠️ Enter key first", + "description": "Error when testing empty Gemini key" + }, + "duplicateKey": { + "message": "⚠️ Duplicate key", + "description": "Error for duplicate Gemini key" + }, + "duplicateKeyTitle": { + "message": "This key is already added in the list", + "description": "Tooltip for duplicate key error" + }, + "mustHaveOneKey": { + "message": "You must have at least one API key configured.", + "description": "Alert when trying to remove last Gemini key" + }, + "removeApiKeyConfirm": { + "message": "Remove API key #{number}?", + "description": "Confirm dialog for removing Gemini key", + "placeholders": { + "number": { "content": "$1" } + } + }, + "testingStatus": { + "message": "Testing...", + "description": "Generic testing status" + }, + "validKey": { + "message": "✓ Valid", + "description": "Key test success" + }, + "limitReachedGemini": { + "message": "⚠️ Limit reached", + "description": "Gemini key rate limited" + }, + "limitReachedGeminiTitle": { + "message": "This key has reached its daily rate limit (20/day). Will reset in ~24 hours.", + "description": "Tooltip for rate limited Gemini key" + }, + "invalidKey": { + "message": "✗ Invalid key", + "description": "Key test invalid" + }, + "invalidKeyTitle": { + "message": "API key is invalid or expired. Check your key in Google AI Studio.", + "description": "Tooltip for invalid key" + }, + "testFailed": { + "message": "✗ Failed ({status})", + "description": "Key test failed with status", + "placeholders": { + "status": { "content": "$1" } + } + }, + "errorStatus": { + "message": "✗ Error", + "description": "Generic error status" + }, + "resetCounterConfirm": { + "message": "Reset usage counter? Do this only after switching to a new API key.", + "description": "Confirm for resetting Gemini counter" + }, + "counterResetMsg": { + "message": "✓ Usage counter reset. You can now process up to 20 more emails today with your new API key.", + "description": "Success after counter reset" + }, + "usageRefreshed": { + "message": "✓ Usage information refreshed.", + "description": "Info after usage refresh" + }, + "allUsageRefreshed": { + "message": "✓ All usage information refreshed.", + "description": "Info after all usage refresh" + }, + "noSignupUrl": { + "message": "This provider doesn't have a signup URL. Configure the endpoint directly in the settings above.", + "description": "Error when provider has no signup URL" + }, + "urlCopied": { + "message": "URL copied to clipboard:\n{url}", + "description": "Info when URL copied to clipboard", + "placeholders": { + "url": { "content": "$1" } + } + }, + "pleaseVisit": { + "message": "Please visit:\n{url}", + "description": "Alert when cannot copy URL", + "placeholders": { + "url": { "content": "$1" } + } + }, + "pleaseConfigure": { + "message": "Please configure: {items}", + "description": "Save button tooltip when missing config", + "placeholders": { + "items": { "content": "$1" } + } + }, + "noFoldersInstruction": { + "message": "No folders/labels configured. Click \"Load Folders from Mail Account\" above or add custom labels below.", + "description": "Instruction when no folders exist" + }, + "noFoldersFound": { + "message": "No folders found. You can create custom folders instead.", + "description": "Info when no folders in mail account" + }, + "andMore": { + "message": "...and {count} more", + "description": "Folder preview overflow", + "placeholders": { + "count": { "content": "$1" } + } + }, + "errorLoadingFolders": { + "message": "Error loading folders: {error}", + "description": "Error loading folders", + "placeholders": { + "error": { "content": "$1" } + } + }, + "replaceFoldersConfirm": { + "message": "This will replace any existing folders/labels with {count} folders from your mail account. Continue?", + "description": "Confirm for replacing folders", + "placeholders": { + "count": { "content": "$1" } + } + }, + "loadedFoldersMsg": { + "message": "Loaded {count} folders from your mail account. Don't forget to save!", + "description": "Success after loading folders", + "placeholders": { + "count": { "content": "$1" } + } + }, + "addCustomFoldersMsg": { + "message": "You can now add custom folders below", + "description": "Info after choosing custom folders" + }, + "importOneLabelRequired": { + "message": "Please add at least one folder/label before importing. Enter labels one per line.", + "description": "Error when importing empty text" + }, + "replaceExistingConfirm": { + "message": "This will replace your {existing} existing folders/labels with {new} new ones. Continue?", + "description": "Confirm for replacing existing labels", + "placeholders": { + "existing": { "content": "$1" }, + "new": { "content": "$2" } + } + }, + "importedFoldersMsg": { + "message": "Imported {count} categories/folders. Don't forget to save!", + "description": "Success after import", + "placeholders": { + "count": { "content": "$1" } + } + }, + "useOllamaTestButton": { + "message": "Please use the \"Test Ollama Connection\" button below", + "description": "Redirect to Ollama-specific test" + }, + "useCustomTestButton": { + "message": "Please use the \"Test Connection\" button in the OpenAI-Compatible section", + "description": "Redirect to custom endpoint test" + }, + "enterApiKey": { + "message": "Please enter an API key", + "description": "Error when testing with empty API key" + }, + "testingConnection": { + "message": "Testing connection...", + "description": "API testing status" + }, + "apiConnectionSuccess": { + "message": "✓ API connection successful!", + "description": "API test success" + }, + "apiError": { + "message": "API Error: {error}", + "description": "API error message", + "placeholders": { + "error": { "content": "$1" } + } + }, + "connectionError": { + "message": "Connection Error: {error}", + "description": "Connection error message", + "placeholders": { + "error": { "content": "$1" } + } + }, + "enterCustomModelFirst": { + "message": "⚠️ Please enter a custom model name first", + "description": "Error when testing Ollama without custom model" + }, + "testingConnectionModels": { + "message": "Testing connection and checking model...", + "description": "Ollama test status" + }, + "ollamaRunningNoModels": { + "message": "⚠️ Ollama is running but no models installed. Enter a model name in \"Download Model\" and click \"Download\" to get started.", + "description": "Ollama no models warning" + }, + "connectedModelReady": { + "message": "✓ Connected! Model \"{model}\" is installed and ready. Available: {available}", + "description": "Ollama connected with model", + "placeholders": { + "model": { "content": "$1" }, + "available": { "content": "$2" } + } + }, + "modelNotInstalled": { + "message": "✗ Model \"{model}\" not installed. Available models: {available}. Use \"Download Model\" to install it.", + "description": "Ollama model not found", + "placeholders": { + "model": { "content": "$1" }, + "available": { "content": "$2" } + } + }, + "genericErrorLabel": { + "message": "✗ Error: {error}", + "description": "Generic error label", + "placeholders": { + "error": { "content": "$1" } + } + }, + "ollamaErrorLabel": { + "message": "✗ Error: {error}", + "description": "Ollama error", + "placeholders": { + "error": { "content": "$1" } + } + }, + "ollamaConnectionFailed": { + "message": "✗ Connection failed: {error}. Make sure Ollama is running (try: ollama serve)", + "description": "Ollama connection error", + "placeholders": { + "error": { "content": "$1" } + } + }, + "enterBaseUrlFirst": { + "message": "⚠️ Please enter a base URL first", + "description": "Error when fetching models without URL" + }, + "fetchingModels": { + "message": "Fetching models from endpoint...", + "description": "Fetch models status" + }, + "noModelsEndpoint": { + "message": "⚠️ No models found at this endpoint", + "description": "No models at endpoint" + }, + "foundModelsMsg": { + "message": "✓ Found {count} models. Select from dropdown or use \"Custom\" option.", + "description": "Found models success", + "placeholders": { + "count": { "content": "$1" } + } + }, + "failedFetchModels": { + "message": "✗ Failed to fetch models: {error}", + "description": "Failed to fetch models", + "placeholders": { + "error": { "content": "$1" } + } + }, + "enterBaseUrl": { + "message": "⚠️ Please enter a base URL", + "description": "Error for empty base URL" + }, + "enterModelName": { + "message": "⚠️ Please enter a model name", + "description": "Error for empty model name" + }, + "connectedSuccessfully": { + "message": "✓ Connected successfully! Model \"{model}\" is ready at {url}", + "description": "Custom endpoint success", + "placeholders": { + "model": { "content": "$1" }, + "url": { "content": "$2" } + } + }, + "customConnectionFailed": { + "message": "✗ Connection failed: {error}. Check the base URL and ensure the endpoint is running.", + "description": "Custom endpoint connection failed", + "placeholders": { + "error": { "content": "$1" } + } + }, + "diagnosticsTitle": { + "message": "🔍 OLLAMA DIAGNOSTICS", + "description": "Ollama diagnostics header" + }, + "diagnosticsRunning": { + "message": "Running tests...", + "description": "Diagnostics running status" + }, + "testListModels": { + "message": "📋 Test 1: List Models Endpoint", + "description": "Diagnostics test 1 header" + }, + "testVersion": { + "message": "🔢 Test 2: Version Endpoint", + "description": "Diagnostics test 2 header" + }, + "testPullEndpoint": { + "message": "⬇️ Test 3: Pull Endpoint Check", + "description": "Diagnostics test 3 header" + }, + "diagnosticsSummary": { + "message": "📊 SUMMARY:", + "description": "Diagnostics summary header" + }, + "ollamaRunningOk": { + "message": "✓ Ollama is running and accessible", + "description": "Diagnostics success message" + }, + "cannotConnectOllama": { + "message": "✗ Cannot connect to Ollama", + "description": "Diagnostics failure message" + }, + "troubleshootingLabel": { + "message": "Troubleshooting:", + "description": "Diagnostics troubleshooting header" + }, + "troubleshootRunning": { + "message": "1. Check if Ollama is running: ps aux | grep ollama", + "description": "Diagnostics tip 1" + }, + "troubleshootStart": { + "message": "2. Start Ollama: ollama serve", + "description": "Diagnostics tip 2" + }, + "troubleshootTest": { + "message": "3. Test manually: curl {url}/api/tags", + "description": "Diagnostics tip 3", + "placeholders": { + "url": { "content": "$1" } + } + }, + "troubleshootPort": { + "message": "4. Check if port 11434 is in use: lsof -i :11434", + "description": "Diagnostics tip 4" + }, + "criticalError": { + "message": "❌ CRITICAL ERROR:", + "description": "Diagnostics critical error" + }, + "noInstalledModels": { + "message": "⚠️ No models installed", + "description": "Diagnostics no models text" + }, + "versionNotAvailable": { + "message": "⚠️ Endpoint not available (older Ollama version)", + "description": "Diagnostics version unavailable" + }, + "unknownVersion": { + "message": "unknown", + "description": "Diagnostics unknown version text" + }, + "pullEndpointNote": { + "message": "Note: This endpoint is used for downloading models", + "description": "Diagnostics pull endpoint note" + }, + "diagnosticsApiUrl": { + "message": "✓ API base URL: {url}", + "description": "Diagnostics API URL", + "placeholders": { + "url": { "content": "$1" } + } + }, + "ollamaCurlTest": { + "message": "Test manually: curl {url}/api/tags", + "description": "Diagnostics curl tip", + "placeholders": { + "url": { "content": "$1" } + } + }, + "fetchingModelsStatus": { + "message": "Fetching models...", + "description": "List Ollama models status" + }, + "availableModels": { + "message": "✓ Available models: {models}", + "description": "Available Ollama models", + "placeholders": { + "models": { "content": "$1" } + } + }, + "noModelsInstalledHint": { + "message": "⚠️ No models installed. Run \"ollama pull llama3.2\" to download one.", + "description": "No Ollama models hint" + }, + "failedFetchModelsSimple": { + "message": "✗ Failed to fetch models", + "description": "Failed to fetch Ollama models" + }, + "ollamaConnectionFailedSimple": { + "message": "✗ Connection failed: {error}. Is Ollama running?", + "description": "Ollama connection failed simple", + "placeholders": { + "error": { "content": "$1" } + } + }, + "enterModelDownload": { + "message": "⚠️ Please enter a model name to download", + "description": "Error for empty download model" + }, + "startingDownload": { + "message": "Starting download of {model}...", + "description": "Download started", + "placeholders": { + "model": { "content": "$1" } + } + }, + "failedStart": { + "message": "✗ Failed to start: {error}", + "description": "Failed to start download", + "placeholders": { + "error": { "content": "$1" } + } + }, + "downloadComplete": { + "message": "✓ Download complete", + "description": "Download complete" + }, + "downloadFailed": { + "message": "✗ Download failed: {error}", + "description": "Download failed", + "placeholders": { + "error": { "content": "$1" } + } + }, + "unknownError": { + "message": "unknown error", + "description": "Generic unknown error" + }, + "addFolderBeforeSave": { + "message": "Please add at least one folder/label before saving. Use \"Load Folders from Mail Account\" or add custom labels.", + "description": "Error when saving without folders" + }, + "addGeminiKeyBeforeSave": { + "message": "Please add at least one Gemini API key before saving.", + "description": "Error when saving without Gemini key" + }, + "duplicateApiKeys": { + "message": "⚠️ Duplicate API keys detected! Each key must be unique. Please remove duplicates before saving.", + "description": "Error for duplicate Gemini keys" + }, + "settingsSavedMultiKey": { + "message": "✓ Settings saved successfully! Multiple Gemini API keys configured for automatic rotation.", + "description": "Success for multi-key save" + }, + "enterOllamaModel": { + "message": "Please enter a custom model name for Ollama.", + "description": "Error when saving without Ollama model" + }, + "settingsSavedOllama": { + "message": "✓ Settings saved successfully! Ollama is configured for local email processing{cpuMode}.", + "description": "Success for Ollama save", + "placeholders": { + "cpuMode": { "content": "$1" } + } + }, + "enterCustomBaseUrl": { + "message": "Please enter a base URL for the custom endpoint.", + "description": "Error when saving without base URL" + }, + "enterCustomModel": { + "message": "Please select or enter a model name for the custom endpoint.", + "description": "Error when saving without model" + }, + "settingsSavedCustomEndpoint": { + "message": "✓ Settings saved successfully! Custom OpenAI-compatible endpoint configured.", + "description": "Success for custom endpoint save" + }, + "enterApiKeyBeforeSave": { + "message": "Please enter your API key before saving. Click \"Get API Key\" to obtain one.", + "description": "Error when saving without API key" + }, + "settingsSavedSuccess": { + "message": "✓ Settings saved successfully! You can now use AutoSort+ to analyze emails.", + "description": "Success for generic save" + }, + "errorSavingSettings": { + "message": "Error saving settings: {error}", + "description": "Error saving settings", + "placeholders": { + "error": { "content": "$1" } + } + }, + "clearHistoryConfirm": { + "message": "Are you sure you want to clear the move history?", + "description": "Confirm for clearing history" + }, + "batchPausedChunk": { + "message": "⏸ Paused — chunk {current}/{total} ({done}/{totalItems})", + "description": "Batch paused with chunks", + "placeholders": { + "current": { "content": "$1" }, + "total": { "content": "$2" }, + "done": { "content": "$3" }, + "totalItems": { "content": "$4" } + } + }, + "batchPausedSimple": { + "message": "⏸ Paused ({done}/{totalItems})", + "description": "Batch paused simple", + "placeholders": { + "done": { "content": "$1" }, + "totalItems": { "content": "$2" } + } + }, + "batchDone": { + "message": "✅ Done — sorted: {completed}, skipped: {skipped}, failed: {failed}", + "description": "Batch done", + "placeholders": { + "completed": { "content": "$1" }, + "skipped": { "content": "$2" }, + "failed": { "content": "$3" } + } + }, + "batchCancelledChunk": { + "message": "⏹ Cancelled after chunk {current}/{total}", + "description": "Batch cancelled after chunk", + "placeholders": { + "current": { "content": "$1" }, + "total": { "content": "$2" } + } + }, + "batchCancelledSimple": { + "message": "⏹ Cancelled ({done}/{totalItems})", + "description": "Batch cancelled simple", + "placeholders": { + "done": { "content": "$1" }, + "totalItems": { "content": "$2" } + } + }, + "batchRunningChunk": { + "message": "Chunk {current}/{total} — {done}/{totalItems} (sorted: {completed}, failed: {failed})", + "description": "Batch running with chunks", + "placeholders": { + "current": { "content": "$1" }, + "total": { "content": "$2" }, + "done": { "content": "$3" }, + "totalItems": { "content": "$4" }, + "completed": { "content": "$5" }, + "failed": { "content": "$6" } + } + }, + "batchRunningSimple": { + "message": "{done}/{totalItems} (sorted: {completed}, failed: {failed})", + "description": "Batch running simple", + "placeholders": { + "done": { "content": "$1" }, + "totalItems": { "content": "$2" }, + "completed": { "content": "$3" }, + "failed": { "content": "$4" } + } + }, + "batchPausing": { + "message": "⏸ Pausing… current request will finish first.", + "description": "Batch pausing message" + }, + "batchCancelConfirm": { + "message": "Cancel the current batch? Already-sorted emails will not be undone.", + "description": "Confirm for cancelling batch" + }, + "batchCancelling": { + "message": "⏹ Cancelling… current request will finish first.", + "description": "Batch cancelling message" + }, + "debugEnabled": { + "message": "✓ Debug mode enabled. Open Thunderbird Developer Tools (Ctrl+Shift+I) to view logs.", + "description": "Debug mode enabled" + }, + "debugDisabled": { + "message": "✓ Debug mode disabled.", + "description": "Debug mode disabled" + }, + "promptCleared": { + "message": "Custom prompt cleared. Default prompt will be used.", + "description": "Prompt cleared" + } +} diff --git a/_locales/zh_CN/messages.json b/_locales/zh_CN/messages.json new file mode 100644 index 0000000..7c909de --- /dev/null +++ b/_locales/zh_CN/messages.json @@ -0,0 +1,1262 @@ +{ + "extensionName": { + "message": "AutoSort+", + "description": "扩展名称" + }, + "extensionDescription": { + "message": "使用 AI 自动分类和标记您的邮件", + "description": "扩展描述" + }, + "extensionDefaultTitle": { + "message": "AutoSort+ 设置", + "description": "浏览器操作默认标题" + }, + "pageTitle": { + "message": "AutoSort+ 设置", + "description": "HTML 页面标题" + }, + "pageHeading": { + "message": "AutoSort+ 设置", + "description": "页面主标题" + }, + "batchProcessingTitle": { + "message": "批量处理进行中", + "description": "批量处理状态面板标题" + }, + "batchPreparing": { + "message": "准备中…", + "description": "批量处理准备中文字" + }, + "batchPause": { + "message": "⏸ 暂停", + "description": "批量暂停按钮文字" + }, + "batchResume": { + "message": "▶ 继续", + "description": "批量继续按钮文字" + }, + "batchCancel": { + "message": "⏹ 取消", + "description": "批量取消按钮文字" + }, + "aiSettingsTitle": { + "message": "🤖 AI 设置", + "description": "AI 设置区段标题" + }, + "providerSelectionTitle": { + "message": "选择提供商", + "description": "提供商选择子区段标题" + }, + "aiProviderLabel": { + "message": "AI 提供商:", + "description": "AI 提供商选择框标签" + }, + "providerGemini": { + "message": "Google Gemini(推荐)", + "description": "Gemini 提供商选项" + }, + "providerOpenAI": { + "message": "OpenAI (ChatGPT)", + "description": "OpenAI 提供商选项" + }, + "providerAnthropic": { + "message": "Anthropic Claude", + "description": "Anthropic 提供商选项" + }, + "providerGroq": { + "message": "Groq(快速且免费)", + "description": "Groq 提供商选项" + }, + "providerMistral": { + "message": "Mistral AI", + "description": "Mistral 提供商选项" + }, + "providerOllama": { + "message": "Ollama(本地大模型)", + "description": "Ollama 提供商选项" + }, + "providerOpenAICompatible": { + "message": "OpenAI 兼容(自定义端点)", + "description": "OpenAI 兼容提供商选项" + }, + "rateLimitWarningTitle": { + "message": "⚠️ 频率限制警告:", + "description": "频率限制警告标题" + }, + "rateLimitWarningText": { + "message": "免费 API 层在处理邮件时受到严格限制。您可能在处理 5-20 封邮件后就达到频率限制。建议购买付费计划($5-20/月)以满足日常邮件处理需求。", + "description": "频率限制警告正文" + }, + "ollamaConfigTitle": { + "message": "🏠 本地 Ollama 配置", + "description": "Ollama 配置子区段标题" + }, + "ollamaUrlLabel": { + "message": "Ollama 服务器地址:", + "description": "Ollama 地址标签" + }, + "ollamaUrlPlaceholder": { + "message": "http://localhost:11434", + "description": "Ollama 地址占位符" + }, + "ollamaCpuOnly": { + "message": "强制仅使用 CPU(禁用 GPU 加速)", + "description": "Ollama CPU 模式复选框" + }, + "ollamaModelLabel": { + "message": "Ollama 模型:", + "description": "Ollama 模型选择框标签" + }, + "ollamaAuthTokenLabel": { + "message": "Ollama 认证令牌(可选):", + "description": "Ollama 认证令牌标签" + }, + "ollamaAuthTokenPlaceholder": { + "message": "如果您的 Ollama 服务器需要令牌,请在此输入", + "description": "Ollama 认证令牌占位符" + }, + "ollamaAuthTokenHelp": { + "message": "在需要时用于 /api/chat 和 /api/pull 请求。", + "description": "Ollama 认证令牌帮助文本" + }, + "ollamaCustomModelPlaceholder": { + "message": "输入自定义模型名称", + "description": "Ollama 自定义模型占位符" + }, + "ollamaDownloadModelLabel": { + "message": "下载模型:", + "description": "Ollama 下载模型标签" + }, + "ollamaDownloadModelPlaceholder": { + "message": "例如:llama3.2, mistral, qwen2.5:7b", + "description": "Ollama 下载模型占位符" + }, + "ollamaDownloadButton": { + "message": "下载", + "description": "下载模型按钮" + }, + "ollamaListModelsButton": { + "message": "列出已安装模型", + "description": "列出已安装模型按钮" + }, + "ollamaTestButton": { + "message": "测试连接", + "description": "测试 Ollama 连接按钮" + }, + "ollamaDiagnoseButton": { + "message": "运行诊断", + "description": "运行 Ollama 诊断按钮" + }, + "ollamaModelLlama2": { + "message": "Llama 2", + "description": "Ollama Llama 2 模型选项" + }, + "ollamaModelLlama32": { + "message": "Llama 3.2", + "description": "Ollama Llama 3.2 模型选项" + }, + "ollamaModelMistral": { + "message": "Mistral", + "description": "Ollama Mistral 模型选项" + }, + "ollamaModelPhi": { + "message": "Phi", + "description": "Ollama Phi 模型选项" + }, + "ollamaModelGemma": { + "message": "Gemma", + "description": "Ollama Gemma 模型选项" + }, + "ollamaModelQwen25": { + "message": "Qwen 2.5", + "description": "Ollama Qwen 2.5 模型选项" + }, + "ollamaModelCustom": { + "message": "自定义(下方输入)", + "description": "Ollama 自定义模型选项" + }, + "openaiCompatibleTitle": { + "message": "🔗 OpenAI 兼容端点", + "description": "OpenAI 兼容子区段标题" + }, + "openaiCompatibleBaseUrlLabel": { + "message": "基础地址:", + "description": "OpenAI 兼容基础地址标签" + }, + "openaiCompatibleBaseUrlPlaceholder": { + "message": "http://localhost:1234/v1 或 https://api.provider.com/v1", + "description": "OpenAI 兼容基础地址占位符" + }, + "openaiCompatibleBaseUrlHelp": { + "message": "请输入包含 /v1 的基础地址。端点必须使用 OpenAI 格式:/v1/chat/completions。例如:LM Studio, LocalAI, vLLM, Together AI", + "description": "OpenAI 兼容基础地址帮助文本" + }, + "openaiCompatibleModelLabel": { + "message": "模型:", + "description": "OpenAI 兼容模型选择框标签" + }, + "openaiCompatibleModelSelect": { + "message": "-- 选择模型 --", + "description": "OpenAI 兼容模型默认选项" + }, + "openaiCompatibleModelCustom": { + "message": "自定义(下方输入)", + "description": "OpenAI 兼容自定义模型选项" + }, + "openaiCompatibleModelCustomPlaceholder": { + "message": "手动输入模型名称", + "description": "OpenAI 兼容自定义模型占位符" + }, + "openaiCompatibleApiKeyLabel": { + "message": "API 密钥(可选):", + "description": "OpenAI 兼容 API 密钥标签" + }, + "openaiCompatibleApiKeyPlaceholder": { + "message": "本地无认证端点可留空", + "description": "OpenAI 兼容 API 密钥占位符" + }, + "openaiCompatibleApiKeyHelp": { + "message": "云端提供商必需,本地服务器可选", + "description": "OpenAI 兼容 API 密钥帮助文本" + }, + "openaiCompatibleFetchModelsButton": { + "message": "获取模型列表", + "description": "获取模型列表按钮" + }, + "openaiCompatibleTestButton": { + "message": "测试连接", + "description": "测试 OpenAI 兼容连接按钮" + }, + "apiKeyTitle": { + "message": "🔑 API 密钥配置", + "description": "API 密钥配置子区段标题" + }, + "apiKeyLabel": { + "message": "API 密钥:", + "description": "API 密钥输入框标签" + }, + "apiKeyPlaceholder": { + "message": "输入您的 API 密钥", + "description": "API 密钥输入框占位符" + }, + "testApiButton": { + "message": "测试 API 连接", + "description": "测试 API 连接按钮" + }, + "getApiKeyButton": { + "message": "获取 API 密钥", + "description": "获取 API 密钥按钮" + }, + "geminiMultiKeysTitle": { + "message": "🔄 多个 Gemini API 密钥", + "description": "多个 Gemini 密钥子区段标题" + }, + "geminiMultiKeysInfo": { + "message": "添加来自不同 Google Cloud 项目的多个 API 密钥。当达到频率限制时,扩展将自动在密钥间切换。", + "description": "多个 Gemini 密钥说明文本" + }, + "addGeminiKeyButton": { + "message": "+ 添加另一个 Gemini 密钥", + "description": "添加 Gemini 密钥按钮" + }, + "testButton": { + "message": "测试", + "description": "测试单个 Gemini API 密钥按钮" + }, + "geminiPaidPlan": { + "message": "我已购买 Gemini 付费计划(解除频率限制)", + "description": "Gemini 付费计划复选框" + }, + "generalSettingsTitle": { + "message": "⚙️ 常规设置", + "description": "常规设置子区段标题" + }, + "enableAiLabel": { + "message": "启用 AI 邮件分类", + "description": "启用 AI 复选框" + }, + "enableDebugLabel": { + "message": "启用调试模式(控制台日志)", + "description": "启用调试模式复选框" + }, + "enableDebugHelp": { + "message": "打开 Thunderbird 开发者工具(Ctrl+Shift+I)查看日志", + "description": "调试模式帮助文本" + }, + "batchChunkSizeLabel": { + "message": "批量处理数量:", + "description": "批量处理数量标签" + }, + "batchChunkSizeHelp": { + "message": "每次处理 N 封邮件,等待所有响应后继续(1-20)", + "description": "批量处理数量帮助文本" + }, + "enableAutoSortLabel": { + "message": "自动分类收件箱新邮件", + "description": "自动分类复选框标签" + }, + "enableAutoSortHelp": { + "message": "使用 AI 自动分类和移动新收件箱邮件", + "description": "自动分类帮助文本" + }, + "geminiUsageTitle": { + "message": "📊 Gemini API 使用情况", + "description": "Gemini API 使用情况子区段标题" + }, + "geminiDailyCount": { + "message": "今日使用:{count}/20 次请求", + "description": "Gemini 每日使用统计", + "placeholders": { + "count": { + "content": "$1" + } + } + }, + "geminiLastRequest": { + "message": "最后请求:", + "description": "Gemini 最后请求标签" + }, + "geminiNever": { + "message": "从未", + "description": "从未使用文字" + }, + "geminiResetTime": { + "message": "每日限制重置:", + "description": "Gemini 每日限制重置时间标签" + }, + "geminiStatus": { + "message": "状态:", + "description": "Gemini 状态标签" + }, + "geminiStatusReady": { + "message": "就绪", + "description": "Gemini 就绪状态" + }, + "geminiStatusNearlyFull": { + "message": "即将满载", + "description": "Gemini 使用量接近上限" + }, + "geminiStatusLimitReached": { + "message": "已达上限", + "description": "Gemini 已达限制状态" + }, + "geminiLimitMessage": { + "message": "已达每日限制。请切换到其他密钥或等待重置。", + "description": "Gemini 每日限制警告" + }, + "geminiRemainingMessage": { + "message": "次请求剩余。请考虑切换密钥。", + "description": "Gemini 剩余使用警告" + }, + "geminiKeyInputPlaceholder": { + "message": "输入 Gemini API 密钥", + "description": "Gemini API 密钥输入占位符" + }, + "geminiResetExpired": { + "message": "令牌已过期或无效。请从 AI Studio 生成新的令牌。", + "description": "Gemini 重置令牌过期提示" + }, + "requestsRemainingToday": { + "message": "次请求今日剩余。请考虑切换密钥。", + "description": "今日剩余请求提示" + }, + "resetGeminiCounterButton": { + "message": "重置计数器(新 API 密钥)", + "description": "重置 Gemini 计数器按钮" + }, + "refreshUsageButton": { + "message": "刷新使用情况", + "description": "刷新使用情况按钮" + }, + "refreshAllUsageButton": { + "message": "刷新全部使用情况", + "description": "刷新全部使用情况按钮" + }, + "howAiSortingTitle": { + "message": "ℹ️ AI 分类工作原理", + "description": "AI 分类工作原理子区段标题" + }, + "howAiSortingDesc": { + "message": "AutoSort+ 使用 AI 分析您的邮件,并根据内容自动将其分类到类别/文件夹中。AI 将:", + "description": "AI 分类描述" + }, + "howAiSortingPoint1": { + "message": "阅读并理解邮件内容", + "description": "AI 分类能力 1" + }, + "howAiSortingPoint2": { + "message": "识别关键主题和主题线索", + "description": "AI 分类能力 2" + }, + "howAiSortingPoint3": { + "message": "将邮件匹配到合适的类别/文件夹", + "description": "AI 分类能力 3" + }, + "howAiSortingPoint4": { + "message": "从您的手动修正中学习,提高准确率", + "description": "AI 分类能力 4" + }, + "customPromptTitle": { + "message": "📝 自定义提示词", + "description": "自定义提示词区段标题" + }, + "customPromptInfo": { + "message": "自定义发送给 AI 的邮件分类提示词。", + "description": "自定义提示词说明" + }, + "customPromptPlaceholders": { + "message": "可用占位符:", + "description": "自定义提示词占位符标签" + }, + "customPromptPlaceholderLabel": { + "message": "您的文件夹/类别列表", + "description": "labels 占位符描述" + }, + "customPromptSubjectLabel": { + "message": "邮件主题行", + "description": "subject 占位符描述" + }, + "customPromptAuthorLabel": { + "message": "发件人邮箱地址/名称", + "description": "author 占位符描述" + }, + "customPromptAttachmentsLabel": { + "message": "附件文件名(逗号分隔)", + "description": "attachments 占位符描述" + }, + "customPromptBodyLabel": { + "message": "邮件正文内容(推荐)", + "description": "body 占位符描述" + }, + "customPromptEmailLabel": { + "message": "邮件内容(旧版,同 {body})", + "description": "email 占位符描述", + "placeholders": { + "body": { + "content": "$1" + } + } + }, + "customPromptTip": { + "message": "提示:使用 {subject} 和 {attachments} 可提高分类准确率。", + "description": "自定义提示词提示", + "placeholders": { + "subject": { + "content": "$1" + }, + "attachments": { + "content": "$2" + } + } + }, + "customPromptTextareaPlaceholder": { + "message": "输入您的自定义提示词...", + "description": "自定义提示词文本框占位符" + }, + "resetPromptButton": { + "message": "恢复默认", + "description": "重置提示词按钮" + }, + "customFoldersTitle": { + "message": "📁 自定义类别/文件夹", + "description": "自定义类别/文件夹区段标题" + }, + "folderSourceTitle": { + "message": "文件夹来源", + "description": "文件夹来源子区段标题" + }, + "loadImapFoldersButton": { + "message": "从邮件账户加载文件夹", + "description": "加载 IMAP 文件夹按钮" + }, + "folderLoadingText": { + "message": "正在加载文件夹...", + "description": "文件夹加载提示文字" + }, + "folderFoundText": { + "message": "在您的邮件账户中找到 {count} 个文件夹。要使用这些文件夹吗?", + "description": "找到文件夹提示文字", + "placeholders": { + "count": { + "content": "$1" + } + } + }, + "useImapFoldersButton": { + "message": "使用这些文件夹", + "description": "使用 IMAP 文件夹按钮" + }, + "useCustomFoldersButton": { + "message": "改用自定义文件夹", + "description": "使用自定义文件夹按钮" + }, + "bulkImportLabel": { + "message": "导入类别/文件夹(每行一个):", + "description": "批量导入文本框标签" + }, + "bulkImportPlaceholder": { + "message": "每行输入一个类别/文件夹", + "description": "批量导入文本框占位符" + }, + "importButton": { + "message": "导入", + "description": "导入按钮" + }, + "addButton": { + "message": "添加", + "description": "添加按钮" + }, + "labelInputPlaceholder": { + "message": "输入类别/文件夹名称", + "description": "标签输入框占位符" + }, + "moveHistoryTitle": { + "message": "📜 移动历史", + "description": "移动历史区段标题" + }, + "clearHistoryButton": { + "message": "清除历史", + "description": "清除历史按钮" + }, + "refreshHistoryButton": { + "message": "刷新", + "description": "刷新历史按钮" + }, + "historyHeaderTimestamp": { + "message": "时间", + "description": "历史表格时间列标题" + }, + "historyHeaderSubject": { + "message": "主题", + "description": "历史表格主题列标题" + }, + "historyHeaderStatus": { + "message": "状态", + "description": "历史表格状态列标题" + }, + "historyHeaderDestination": { + "message": "目标", + "description": "历史表格目标列标题" + }, + "saveSettingsButton": { + "message": "保存设置", + "description": "保存设置按钮" + }, + "providerInfoGemini": { + "message": "✓ 免费额度:每分钟 5 次请求,每天 20 次/API 密钥(扩展强制执行)
✓ 提示:在不同项目中创建多个 API 密钥,达到限制时切换密钥
✓ 查看使用情况:AI Studio 使用情况
✓ 适合:日常使用、多语言支持
✓ 模型:Gemini 2.5 Flash
✓ 勾选\"付费计划\"可解除限制", + "description": "Gemini 提供商信息 HTML" + }, + "providerInfoOpenai": { + "message": "✓ 免费试用:$5 额度
✓ 适合:高准确率、英文内容
✓ 模型:GPT-4o-mini($0.15/1M tokens)", + "description": "OpenAI 提供商信息 HTML" + }, + "providerInfoAnthropic": { + "message": "✓ 免费额度:有限请求
✓ 适合:长邮件、详细分析
✓ 模型:Claude 3 Haiku", + "description": "Anthropic 提供商信息 HTML" + }, + "providerInfoGroq": { + "message": "✓ 免费额度:每分钟 30 次请求
✓ 适合:速度(最快)
✓ 模型:Llama 3.3(Mixtral 已弃用)", + "description": "Groq 提供商信息 HTML" + }, + "providerInfoMistral": { + "message": "✓ 免费额度:有限请求
✓ 适合:欧洲用户、GDPR 合规
✓ 模型:Mistral Small", + "description": "Mistral 提供商信息 HTML" + }, + "providerInfoOllama": { + "message": "✓ 100% 免费:在本地运行
✓ 隐私:数据不发送到外部服务器
✓ 无频率限制:无限制处理邮件
✓ 模型:Llama 2/3、Mistral、Phi、Gemma、Qwen 等
✓ 需要:安装 Ollama 并本地运行
✓ 设置:安装 Ollama 后运行 \"ollama pull llama3.2\" 下载模型", + "description": "Ollama 提供商信息 HTML" + }, + "providerInfoOpenaiCompatible": { + "message": "✓ 兼容:LocalAI、LM Studio、vLLM、Together AI、OpenRouter、DeepSeek、Fireworks 等
✓ 输入端点基础地址和模型名称
✓ 本地服务器可选 API 密钥
✓ 使用标准 /v1/chat/completions 格式", + "description": "OpenAI 兼容提供商信息 HTML" + }, + "freeBadge": { + "message": "免费", + "description": "免费提供商徽章文本" + }, + "paidBadge": { + "message": "付费", + "description": "付费提供商徽章文本" + }, + "justNow": { + "message": "刚刚", + "description": "相对时间 — 请求刚刚发出" + }, + "minutesAgo": { + "message": "{count} 分钟前", + "description": "相对时间 — 分钟前", + "placeholders": { + "count": { "content": "$1" }, + "plural": { "content": "" } + } + }, + "hoursAgo": { + "message": "{count} 小时前", + "description": "相对时间 — 小时前", + "placeholders": { + "count": { "content": "$1" }, + "plural": { "content": "" } + } + }, + "inHours": { + "message": "{count} 小时后", + "description": "重置倒计时", + "placeholders": { + "count": { "content": "$1" }, + "plural": { "content": "" } + } + }, + "inHoursShort": { + "message": "{count} 小时后", + "description": "重置倒计时短格式", + "placeholders": { + "count": { "content": "$1" } + } + }, + "minutesAgoShort": { + "message": "{count} 分钟前", + "description": "分钟前短格式", + "placeholders": { + "count": { "content": "$1" } + } + }, + "hoursAgoShort": { + "message": "{count} 小时前", + "description": "小时前短格式", + "placeholders": { + "count": { "content": "$1" } + } + }, + "keyActive": { + "message": "🔵 活跃", + "description": "密钥使用卡片活跃状态" + }, + "keyLimit": { + "message": "🔴 已达上限", + "description": "密钥使用卡片已达上限" + }, + "keyNearLimit": { + "message": "🟡 即将满载", + "description": "密钥使用卡片即将满载" + }, + "keyReady": { + "message": "🟢 就绪", + "description": "密钥使用卡片就绪状态" + }, + "keyLabel": { + "message": "密钥 {number}:", + "description": "密钥使用卡片标签", + "placeholders": { + "number": { "content": "$1" } + } + }, + "statUsage": { + "message": "使用:", + "description": "统计标签 — 使用次数" + }, + "statLast": { + "message": "上次:", + "description": "统计标签 — 上次请求" + }, + "statResets": { + "message": "重置:", + "description": "统计标签 — 重置时间" + }, + "statAvailable": { + "message": "可用:", + "description": "统计标签 — 可用请求" + }, + "keyNotSet": { + "message": "未设置", + "description": "密钥未配置文字" + }, + "keyAlreadyAddedTitle": { + "message": "⚠️ 此密钥已添加!", + "description": "重复 Gemini 密钥工具提示" + }, + "enterKeyFirst": { + "message": "⚠️ 请先输入密钥", + "description": "测试空 Gemini 密钥时的错误" + }, + "duplicateKey": { + "message": "⚠️ 重复密钥", + "description": "重复 Gemini 密钥错误" + }, + "duplicateKeyTitle": { + "message": "此密钥已在列表中添加", + "description": "重复密钥错误工具提示" + }, + "mustHaveOneKey": { + "message": "您必须至少配置一个 API 密钥。", + "description": "尝试删除最后一个 Gemini 密钥时的提示" + }, + "removeApiKeyConfirm": { + "message": "是否删除 API 密钥 #{number}?", + "description": "删除 Gemini 密钥确认", + "placeholders": { + "number": { "content": "$1" } + } + }, + "testingStatus": { + "message": "测试中...", + "description": "通用测试状态" + }, + "validKey": { + "message": "✓ 有效", + "description": "密钥测试成功" + }, + "limitReachedGemini": { + "message": "⚠️ 已达限制", + "description": "Gemini 密钥已达频率限制" + }, + "limitReachedGeminiTitle": { + "message": "此密钥已达每日频率限制(20次/天)。约24小时后重置。", + "description": "已达频率限制的 Gemini 密钥工具提示" + }, + "invalidKey": { + "message": "✗ 无效密钥", + "description": "密钥测试无效" + }, + "invalidKeyTitle": { + "message": "API 密钥无效或已过期。请在 Google AI Studio 中检查您的密钥。", + "description": "无效密钥工具提示" + }, + "testFailed": { + "message": "✗ 失败({status})", + "description": "密钥测试失败", + "placeholders": { + "status": { "content": "$1" } + } + }, + "errorStatus": { + "message": "✗ 错误", + "description": "通用错误状态" + }, + "resetCounterConfirm": { + "message": "是否重置使用计数器?请仅在切换到新 API 密钥后执行此操作。", + "description": "重置 Gemini 计数器确认" + }, + "counterResetMsg": { + "message": "✓ 使用计数器已重置。您现在可以使用新 API 密钥处理最多 20 封邮件。", + "description": "计数器重置成功消息" + }, + "usageRefreshed": { + "message": "✓ 使用情况已刷新。", + "description": "使用情况刷新信息" + }, + "allUsageRefreshed": { + "message": "✓ 所有使用情况已刷新。", + "description": "所有使用情况刷新信息" + }, + "noSignupUrl": { + "message": "此提供商没有注册链接。请在上方设置中直接配置端点。", + "description": "提供商没有注册链接时的错误" + }, + "urlCopied": { + "message": "URL 已复制到剪贴板:\n{url}", + "description": "URL 已复制到剪贴板信息", + "placeholders": { + "url": { "content": "$1" } + } + }, + "pleaseVisit": { + "message": "请访问:\n{url}", + "description": "无法复制 URL 时的提示", + "placeholders": { + "url": { "content": "$1" } + } + }, + "pleaseConfigure": { + "message": "请配置:{items}", + "description": "保存按钮工具提示 — 缺少配置", + "placeholders": { + "items": { "content": "$1" } + } + }, + "noFoldersInstruction": { + "message": "未配置文件夹/标签。点击上方的\"从邮件账户加载文件夹\"或在下方添加自定义标签。", + "description": "没有文件夹时的说明文字" + }, + "noFoldersFound": { + "message": "未找到文件夹。您可以改为创建自定义文件夹。", + "description": "邮件账户中没有文件夹时的信息" + }, + "andMore": { + "message": "…以及其他 {count} 个", + "description": "文件夹预览溢出", + "placeholders": { + "count": { "content": "$1" } + } + }, + "errorLoadingFolders": { + "message": "加载文件夹时出错:{error}", + "description": "加载文件夹错误", + "placeholders": { + "error": { "content": "$1" } + } + }, + "replaceFoldersConfirm": { + "message": "这将用邮件账户中的 {count} 个文件夹替换所有现有文件夹/标签。是否继续?", + "description": "替换文件夹确认", + "placeholders": { + "count": { "content": "$1" } + } + }, + "loadedFoldersMsg": { + "message": "已从邮件账户加载 {count} 个文件夹。别忘了保存!", + "description": "加载文件夹成功消息", + "placeholders": { + "count": { "content": "$1" } + } + }, + "addCustomFoldersMsg": { + "message": "您现在可以在下方添加自定义文件夹", + "description": "选择自定义文件夹后的信息" + }, + "importOneLabelRequired": { + "message": "导入前请至少添加一个文件夹/标签。每行输入一个标签。", + "description": "导入空文本时的错误" + }, + "replaceExistingConfirm": { + "message": "这将用 {new} 个新文件夹替换您现有的 {existing} 个文件夹/标签。是否继续?", + "description": "替换现有标签确认", + "placeholders": { + "existing": { "content": "$1" }, + "new": { "content": "$2" } + } + }, + "importedFoldersMsg": { + "message": "已导入 {count} 个类别/文件夹。别忘了保存!", + "description": "导入成功消息", + "placeholders": { + "count": { "content": "$1" } + } + }, + "useOllamaTestButton": { + "message": "请使用下方的\"测试 Ollama 连接\"按钮", + "description": "引导到 Ollama 专属测试" + }, + "useCustomTestButton": { + "message": "请使用 OpenAI 兼容区段中的\"测试连接\"按钮", + "description": "引导到自定义端点测试" + }, + "enterApiKey": { + "message": "请输入 API 密钥", + "description": "测试空 API 密钥时的错误" + }, + "testingConnection": { + "message": "测试连接中...", + "description": "API 测试状态" + }, + "apiConnectionSuccess": { + "message": "✓ API 连接成功!", + "description": "API 测试成功" + }, + "apiError": { + "message": "API 错误:{error}", + "description": "API 错误信息", + "placeholders": { + "error": { "content": "$1" } + } + }, + "connectionError": { + "message": "连接错误:{error}", + "description": "连接错误信息", + "placeholders": { + "error": { "content": "$1" } + } + }, + "enterCustomModelFirst": { + "message": "⚠️ 请先输入自定义模型名称", + "description": "测试 Ollama 时没有自定义模型的错误" + }, + "testingConnectionModels": { + "message": "测试连接并检查模型...", + "description": "Ollama 测试状态" + }, + "ollamaRunningNoModels": { + "message": "⚠️ Ollama 正在运行但未安装任何模型。在\"下载模型\"中输入模型名称并点击\"下载\"开始使用。", + "description": "Ollama 没有模型的警告" + }, + "connectedModelReady": { + "message": "✓ 已连接!模型\"{model}\"已安装并可使用。可用:{available}", + "description": "Ollama 已连接并可使用", + "placeholders": { + "model": { "content": "$1" }, + "available": { "content": "$2" } + } + }, + "modelNotInstalled": { + "message": "✗ 模型\"{model}\"未安装。可用模型:{available}。使用\"下载模型\"进行安装。", + "description": "Ollama 模型未找到", + "placeholders": { + "model": { "content": "$1" }, + "available": { "content": "$2" } + } + }, + "genericErrorLabel": { + "message": "✗ 错误:{error}", + "description": "通用错误标签", + "placeholders": { + "error": { "content": "$1" } + } + }, + "ollamaErrorLabel": { + "message": "✗ 错误:{error}", + "description": "Ollama 错误", + "placeholders": { + "error": { "content": "$1" } + } + }, + "ollamaConnectionFailed": { + "message": "✗ 连接失败:{error}。请确保 Ollama 正在运行(尝试:ollama serve)", + "description": "Ollama 连接失败", + "placeholders": { + "error": { "content": "$1" } + } + }, + "enterBaseUrlFirst": { + "message": "⚠️ 请先输入基础地址", + "description": "获取模型时没有地址的错误" + }, + "fetchingModels": { + "message": "正在从端点获取模型...", + "description": "获取模型状态" + }, + "noModelsEndpoint": { + "message": "⚠️ 此端点上未找到模型", + "description": "端点上没有模型" + }, + "foundModelsMsg": { + "message": "✓ 找到 {count} 个模型。请从下拉菜单中选择或使用\"自定义\"选项。", + "description": "找到模型成功消息", + "placeholders": { + "count": { "content": "$1" } + } + }, + "failedFetchModels": { + "message": "✗ 获取模型失败:{error}", + "description": "获取模型失败", + "placeholders": { + "error": { "content": "$1" } + } + }, + "enterBaseUrl": { + "message": "⚠️ 请输入基础地址", + "description": "空基础地址错误" + }, + "enterModelName": { + "message": "⚠️ 请输入模型名称", + "description": "空模型名称错误" + }, + "connectedSuccessfully": { + "message": "✓ 连接成功!模型\"{model}\"已在 {url} 就绪", + "description": "自定义端点连接成功", + "placeholders": { + "model": { "content": "$1" }, + "url": { "content": "$2" } + } + }, + "customConnectionFailed": { + "message": "✗ 连接失败:{error}。请检查基础地址并确保端点正在运行。", + "description": "自定义端点连接失败", + "placeholders": { + "error": { "content": "$1" } + } + }, + "diagnosticsTitle": { + "message": "🔍 OLLAMA 诊断", + "description": "Ollama 诊断标题" + }, + "diagnosticsRunning": { + "message": "正在运行测试...", + "description": "诊断运行中状态" + }, + "testListModels": { + "message": "📋 测试 1:列出模型端点", + "description": "诊断测试 1 标题" + }, + "testVersion": { + "message": "🔢 测试 2:版本端点", + "description": "诊断测试 2 标题" + }, + "testPullEndpoint": { + "message": "⬇️ 测试 3:拉取端点检查", + "description": "诊断测试 3 标题" + }, + "diagnosticsSummary": { + "message": "📊 摘要:", + "description": "诊断摘要标题" + }, + "ollamaRunningOk": { + "message": "✓ Ollama 正在运行并可访问", + "description": "诊断成功消息" + }, + "cannotConnectOllama": { + "message": "✗ 无法连接到 Ollama", + "description": "诊断失败消息" + }, + "troubleshootingLabel": { + "message": "故障排除:", + "description": "诊断故障排除标题" + }, + "troubleshootRunning": { + "message": "1. 检查 Ollama 是否运行:ps aux | grep ollama", + "description": "诊断提示 1" + }, + "troubleshootStart": { + "message": "2. 启动 Ollama:ollama serve", + "description": "诊断提示 2" + }, + "troubleshootTest": { + "message": "3. 手动测试:curl {url}/api/tags", + "description": "诊断提示 3", + "placeholders": { + "url": { "content": "$1" } + } + }, + "troubleshootPort": { + "message": "4. 检查端口 11434 是否被占用:lsof -i :11434", + "description": "诊断提示 4" + }, + "criticalError": { + "message": "❌ 严重错误:", + "description": "诊断严重错误" + }, + "noInstalledModels": { + "message": "⚠️ 未安装任何模型", + "description": "诊断未安装模型文字" + }, + "versionNotAvailable": { + "message": "⚠️ 端点不可用(较旧的 Ollama 版本)", + "description": "诊断版本不可用文字" + }, + "unknownVersion": { + "message": "未知", + "description": "诊断未知版本文字" + }, + "pullEndpointNote": { + "message": "注意:此端点用于下载模型", + "description": "诊断拉取端点说明" + }, + "diagnosticsApiUrl": { + "message": "✓ API 基础地址:{url}", + "description": "诊断 API 地址", + "placeholders": { + "url": { "content": "$1" } + } + }, + "ollamaCurlTest": { + "message": "手动测试:curl {url}/api/tags", + "description": "诊断 curl 提示", + "placeholders": { + "url": { "content": "$1" } + } + }, + "fetchingModelsStatus": { + "message": "正在获取模型...", + "description": "列出 Ollama 模型状态" + }, + "availableModels": { + "message": "✓ 可用模型:{models}", + "description": "可用 Ollama 模型", + "placeholders": { + "models": { "content": "$1" } + } + }, + "noModelsInstalledHint": { + "message": "⚠️ 未安装任何模型。运行\"ollama pull llama3.2\"下载一个。", + "description": "没有 Ollama 模型的提示" + }, + "failedFetchModelsSimple": { + "message": "✗ 获取模型失败", + "description": "获取 Ollama 模型失败" + }, + "ollamaConnectionFailedSimple": { + "message": "✗ 连接失败:{error}。Ollama 是否在运行?", + "description": "Ollama 连接失败简单提示", + "placeholders": { + "error": { "content": "$1" } + } + }, + "enterModelDownload": { + "message": "⚠️ 请输入要下载的模型名称", + "description": "空下载模型错误" + }, + "startingDownload": { + "message": "开始下载 {model}...", + "description": "下载已开始", + "placeholders": { + "model": { "content": "$1" } + } + }, + "failedStart": { + "message": "✗ 启动失败:{error}", + "description": "启动下载失败", + "placeholders": { + "error": { "content": "$1" } + } + }, + "downloadComplete": { + "message": "✓ 下载完成", + "description": "下载完成" + }, + "downloadFailed": { + "message": "✗ 下载失败:{error}", + "description": "下载失败", + "placeholders": { + "error": { "content": "$1" } + } + }, + "unknownError": { + "message": "未知错误", + "description": "通用未知错误" + }, + "addFolderBeforeSave": { + "message": "保存前请至少添加一个文件夹/标签。使用\"从邮件账户加载文件夹\"或添加自定义标签。", + "description": "保存时没有文件夹的错误" + }, + "addGeminiKeyBeforeSave": { + "message": "保存前请至少添加一个 Gemini API 密钥。", + "description": "保存时没有 Gemini 密钥的错误" + }, + "duplicateApiKeys": { + "message": "⚠️ 检测到重复的 API 密钥!每个密钥必须唯一。请在保存前删除重复项。", + "description": "重复 Gemini 密钥错误" + }, + "settingsSavedMultiKey": { + "message": "✓ 设置保存成功!已配置多个 Gemini API 密钥自动轮换。", + "description": "多密钥保存成功" + }, + "enterOllamaModel": { + "message": "请输入 Ollama 的自定义模型名称。", + "description": "保存时没有 Ollama 模型的错误" + }, + "settingsSavedOllama": { + "message": "✓ 设置保存成功!Ollama 已配置为本地邮件处理{cpuMode}。", + "description": "Ollama 保存成功", + "placeholders": { + "cpuMode": { "content": "$1" } + } + }, + "enterCustomBaseUrl": { + "message": "请输入自定义端点的基础地址。", + "description": "保存时没有基础地址的错误" + }, + "enterCustomModel": { + "message": "请选择或输入自定义端点的模型名称。", + "description": "保存时没有模型的错误" + }, + "settingsSavedCustomEndpoint": { + "message": "✓ 设置保存成功!已配置自定义 OpenAI 兼容端点。", + "description": "自定义端点保存成功" + }, + "enterApiKeyBeforeSave": { + "message": "保存前请输入您的 API 密钥。点击\"获取 API 密钥\"获取。", + "description": "保存时没有 API 密钥的错误" + }, + "settingsSavedSuccess": { + "message": "✓ 设置保存成功!您现在可以使用 AutoSort+ 分析邮件了。", + "description": "通用保存成功" + }, + "errorSavingSettings": { + "message": "保存设置时出错:{error}", + "description": "保存设置错误", + "placeholders": { + "error": { "content": "$1" } + } + }, + "clearHistoryConfirm": { + "message": "您确定要清除移动历史吗?", + "description": "清除历史确认" + }, + "batchPausedChunk": { + "message": "⏸ 已暂停 — 第 {current}/{total} 块({done}/{totalItems})", + "description": "批量处理已暂停(分块)", + "placeholders": { + "current": { "content": "$1" }, + "total": { "content": "$2" }, + "done": { "content": "$3" }, + "totalItems": { "content": "$4" } + } + }, + "batchPausedSimple": { + "message": "⏸ 已暂停({done}/{totalItems})", + "description": "批量处理已暂停(简单)", + "placeholders": { + "done": { "content": "$1" }, + "totalItems": { "content": "$2" } + } + }, + "batchDone": { + "message": "✅ 完成 — 已分类:{completed},跳过:{skipped},失败:{failed}", + "description": "批量处理完成", + "placeholders": { + "completed": { "content": "$1" }, + "skipped": { "content": "$2" }, + "failed": { "content": "$3" } + } + }, + "batchCancelledChunk": { + "message": "⏹ 已取消,在第 {current}/{total} 块后", + "description": "批量处理已取消(分块)", + "placeholders": { + "current": { "content": "$1" }, + "total": { "content": "$2" } + } + }, + "batchCancelledSimple": { + "message": "⏹ 已取消({done}/{totalItems})", + "description": "批量处理已取消(简单)", + "placeholders": { + "done": { "content": "$1" }, + "totalItems": { "content": "$2" } + } + }, + "batchRunningChunk": { + "message": "第 {current}/{total} 块 — {done}/{totalItems}(已分类:{completed},失败:{failed})", + "description": "批量处理运行中(分块)", + "placeholders": { + "current": { "content": "$1" }, + "total": { "content": "$2" }, + "done": { "content": "$3" }, + "totalItems": { "content": "$4" }, + "completed": { "content": "$5" }, + "failed": { "content": "$6" } + } + }, + "batchRunningSimple": { + "message": "{done}/{totalItems}(已分类:{completed},失败:{failed})", + "description": "批量处理运行中(简单)", + "placeholders": { + "done": { "content": "$1" }, + "totalItems": { "content": "$2" }, + "completed": { "content": "$3" }, + "failed": { "content": "$4" } + } + }, + "batchPausing": { + "message": "⏸ 正在暂停… 当前请求将首先完成。", + "description": "批量处理暂停中消息" + }, + "batchCancelConfirm": { + "message": "是否取消当前批量处理?已分类的邮件将不会被撤销。", + "description": "取消批量处理确认" + }, + "batchCancelling": { + "message": "⏹ 正在取消… 当前请求将首先完成。", + "description": "批量处理取消中消息" + }, + "debugEnabled": { + "message": "✓ 调试模式已启用。打开 Thunderbird 开发者工具(Ctrl+Shift+I)查看日志。", + "description": "调试模式已启用" + }, + "debugDisabled": { + "message": "✓ 调试模式已禁用。", + "description": "调试模式已禁用" + }, + "promptCleared": { + "message": "自定义提示词已清除。将使用默认提示词。", + "description": "提示词已清除" + } +} diff --git a/api_ollama/index.html b/api_ollama/index.html new file mode 100644 index 0000000..de9afef --- /dev/null +++ b/api_ollama/index.html @@ -0,0 +1,50 @@ + + + + + AutoSort+ Ollama + + + +

AutoSort+ - Ollama Chat

+
+
+
Initializing...
+ + + diff --git a/api_ollama/ollama-popup.js b/api_ollama/ollama-popup.js new file mode 100644 index 0000000..7fe31da --- /dev/null +++ b/api_ollama/ollama-popup.js @@ -0,0 +1,134 @@ +/* + * Ollama Popup for AutoSort+ + * Makes direct POST requests to Ollama from browser context (no restrictions) + * Popup = browser context = POST works + * Background script = restricted context = POST fails with 403 + */ + +let statusEl = null; +let messagesEl = null; +let responseEl = null; +let analysisResult = null; + +// Get URL parameters +const urlParams = new URLSearchParams(window.location.search); +const callId = urlParams.get('call_id'); + +// Initialize UI +document.addEventListener('DOMContentLoaded', async () => { + statusEl = document.getElementById('status'); + messagesEl = document.getElementById('messages'); + responseEl = document.getElementById('response'); + + statusEl.textContent = 'Ready'; + + // Tell background script that we're ready + browser.runtime.sendMessage({ + command: "ollama_popup_ready_" + callId, + window_id: (await browser.windows.getCurrent()).id + }).catch(err => console.log('Ready message error (expected):', err.message)); +}); + +// Handle messages from background script +browser.runtime.onMessage.addListener((message, sender, sendResponse) => { + switch (message.command) { + case "ollama_analyze": + handleOllamaAnalyze(message); + break; + case 'ollama_error': + statusEl.textContent = 'Error: ' + message.error; + responseEl.textContent = message.error; + analysisResult = null; + sendResultToBackground(); + break; + default: + console.log('Unknown command:', message.command); + } +}); + +async function handleOllamaAnalyze(message) { + const { ollama_host, ollama_model, ollama_num_ctx, ollama_auth_token, prompt } = message; + + try { + statusEl.textContent = 'Connecting to Ollama...'; + responseEl.textContent = ''; + analysisResult = null; + + // Add user message to display + const userMsgEl = document.createElement('div'); + userMsgEl.className = 'message user-message'; + userMsgEl.textContent = 'Analyzing: ' + prompt.substring(0, 100) + '...'; + messagesEl.appendChild(userMsgEl); + + statusEl.textContent = 'Processing with Ollama...'; + + // Make direct POST request from browser context (no restrictions!) + const headers = { + 'Content-Type': 'application/json' + }; + if (ollama_auth_token) { + headers['Authorization'] = `Bearer ${ollama_auth_token}`; + } + + const requestBody = { + model: ollama_model, + messages: [{ role: 'user', content: prompt }], + stream: false + }; + + if (ollama_num_ctx > 0) { + requestBody.options = { num_ctx: parseInt(ollama_num_ctx) }; + } + + console.log('[Ollama Popup] Sending POST to:', ollama_host + '/api/chat'); + console.log('[Ollama Popup] Model:', ollama_model); + console.log('[Ollama Popup] Request body:', JSON.stringify(requestBody).substring(0, 200)); + + const response = await fetch(ollama_host + '/api/chat', { + method: 'POST', + headers, + body: JSON.stringify(requestBody), + mode: 'cors', + credentials: 'omit' + }); + + console.log('[Ollama Popup] Response status:', response.status); + + if (!response.ok) { + const errorText = await response.text(); + console.error('[Ollama Popup] Error response:', errorText); + throw new Error(`HTTP ${response.status}: ${response.statusText}`); + } + + const data = await response.json(); + console.log('[Ollama Popup] Response data:', JSON.stringify(data).substring(0, 300)); + + // Extract the response content + if (data.message && data.message.content) { + analysisResult = data.message.content; + responseEl.textContent = analysisResult; + statusEl.textContent = 'Analysis complete ✓'; + } else { + throw new Error('Invalid response format: missing message.content'); + } + + // Send result back to background + setTimeout(sendResultToBackground, 1000); + + } catch (error) { + console.error('[Ollama Popup] Error:', error); + statusEl.textContent = 'Error: ' + error.message; + responseEl.textContent = 'Error: ' + error.message; + analysisResult = null; + sendResultToBackground(); + } +} + +function sendResultToBackground() { + // Send result back to background script + browser.runtime.sendMessage({ + command: 'ollama_analysis_result_' + callId, + result: analysisResult, + error: analysisResult === null ? 'Analysis failed' : null + }).catch(err => console.log('Result message error:', err.message)); +} diff --git a/autosortplus.xpi b/autosortplus.xpi new file mode 100644 index 0000000..c07c1dd Binary files /dev/null and b/autosortplus.xpi differ diff --git a/background.js b/background.js index 0ae37db..c822530 100644 --- a/background.js +++ b/background.js @@ -1,20 +1,790 @@ +// Initialize debug logger +if (window.debugLogger) { + window.debugLogger.init(); +} + +// ───────────────────────────────────────────────────────────────────────────── +// PROVIDER CONSTANTS +// ───────────────────────────────────────────────────────────────────────────── + +const PROVIDERS = { + GEMINI: 'gemini', + OPENAI: 'openai', + ANTHROPIC: 'anthropic', + GROQ: 'groq', + MISTRAL: 'mistral', + OLLAMA: 'ollama', + OPENAI_COMPATIBLE: 'openai-compatible' +}; + // Listen for messages from the options page browser.runtime.onMessage.addListener((message, sender, sendResponse) => { if (message.action === "applyLabels") { - applyLabelsToMessages(message.messages, message.label); + applyLabelsToMessages(message.messages, message.label) + .then(() => sendResponse({ ok: true })) + .catch(err => sendResponse({ ok: false, error: err.message })); + return true; // Required for async response } else if (message.action === "analyzeEmail") { analyzeEmailContent(message.emailContent).then(label => { sendResponse({ label: label }); }); return true; // Required for async response + } else if (message.action === 'startOllamaPull') { + (async () => { + try { + const { ollamaUrl, model, headers } = message; + const { response } = await callOllamaViaTab(ollamaUrl, { + action: 'ollamaFetch', + fetchAction: 'pull', + model, + headers + }); + sendResponse(response || { ok: true }); + } catch (e) { + sendResponse({ ok: false, error: e.message }); + } + })(); + return true; + } else if (message.action === 'batchControl') { + // Pause / Resume / Cancel from the options page UI + if (message.command === 'pause') { + _batchState.paused = true; + } else if (message.command === 'resume') { + _batchState.paused = false; + } else if (message.command === 'cancel') { + _batchState.cancelled = true; + _batchState.paused = false; + } + sendResponse({ ok: true }); } }); +// Click handler for browser action icon - opens settings +browser.browserAction.onClicked.addListener(() => { + browser.runtime.openOptionsPage(); +}); + +// Register auto-sort listener for new emails +registerAutoSortListener(); + +// ───────────────────────────────────────────────────────────────────────────── +// EMAIL CONTEXT EXTRACTION +// ───────────────────────────────────────────────────────────────────────────── + +/** + * Extract comprehensive email context from a Thunderbird message structure. + * Returns subject, author, attachments, and body text. + */ +async function extractEmailContext(fullMessage, messageHeader) { + const subject = (fullMessage.headers?.Subject?.[0]) || (messageHeader?.subject) || ''; + + const author = (fullMessage.headers?.From?.[0]) || (messageHeader?.author) || ''; + + // Collect attachment info from parts (name indicates it's a file attachment) + const attachments = []; + async function collectAttachments(parts) { + if (!parts) return; + for (const part of parts) { + if (part.parts) await collectAttachments(part.parts); + // part.name means this is an attachment/file part + if (part.name) { + // Skip inline text parts that are the email body + const isInlineText = (part.contentType === 'text/plain' || part.contentType === 'text/html') && !part.contentDisposition; + if (!isInlineText) { + attachments.push({ + name: part.name, + contentType: part.contentType || 'unknown', + size: part.size || 0 + }); + } + } + } + } + if (fullMessage.parts) await collectAttachments(fullMessage.parts); + + async function extractBodyText(parts) { + if (!parts) return ''; + let text = ''; + for (const part of parts) { + if (part.parts) text += await extractBodyText(part.parts); + if (part.contentType === 'text/plain') { + text += part.body + '\n'; + } else if (part.contentType === 'text/html' && !text) { + text = await browser.messengerUtilities.convertToPlainText(part.body); + } else if (part.contentType === 'message/rfc822' && part.body) { + text += part.body + '\n'; + } + } + return text; + } + const body = fullMessage.parts ? await extractBodyText(fullMessage.parts) : (fullMessage.body || ''); + + return { + subject, + author, + attachments, + body + }; +} + +// Legacy wrapper for backward compatibility +async function extractTextFromParts(fullMessage) { + const context = await extractEmailContext(fullMessage, null); + return context.body; +} + +// Default prompt template for email classification +const DEFAULT_PROMPT = `You are an email classification assistant. Analyze this email and choose the most appropriate label from: {labels}. + +**Email Metadata:** +- Subject: {subject} +- From: {author} +- Attachments: {attachments} + +**Email Body:** +{body} + +Consider the subject line, sender context, attachment filenames, and body content to determine the most appropriate category. Respond with only the exact label name, or "null" if no label fits well.`; + +// Ollama handling using tab injection (runs fetch in browser context) + +async function ollamaChatViaTab(ollamaUrl, model, prompt, authToken, numCtx = 0) { + // Open a hidden tab at localhost to make the fetch (browser context, not restricted) + const tab = await browser.tabs.create({ url: ollamaUrl, active: false }); + + try { + // Wait for tab to load + await new Promise(resolve => setTimeout(resolve, 500)); + // Build the request headers + const headers = { 'Content-Type': 'application/json' }; + if (authToken) headers['Authorization'] = `Bearer ${authToken}`; + + // Build options object if numCtx is set + const optionsObj = numCtx > 0 ? { options: { num_ctx: parseInt(numCtx) } } : {}; + + // Inject code to make the fetch and store result + const scriptCode = ` + (async () => { + try { + const headers = ${JSON.stringify(headers)}; + const response = await fetch(window.location.origin + '/api/chat', { + method: 'POST', + headers, + body: JSON.stringify({ + model: ${JSON.stringify(model)}, + messages: [{ role: 'user', content: ${JSON.stringify(prompt)} }], + stream: false, + ...${JSON.stringify(optionsObj)} + }) + }); + + if (!response.ok) { + throw new Error('HTTP ' + response.status + ': ' + response.statusText); + } + + const data = await response.json(); + window.__ollama_result = { ok: true, data }; + } catch (error) { + window.__ollama_result = { ok: false, error: error.message }; + } + })(); + `; + + await browser.tabs.executeScript(tab.id, { code: scriptCode }); + + // Wait for result (with polling to be safe) + let result = null; + for (let i = 0; i < 40; i++) { // 10 seconds max (250ms intervals) + await new Promise(resolve => setTimeout(resolve, 250)); + + try { + const results = await browser.tabs.executeScript(tab.id, { + code: 'window.__ollama_result || null' + }); + if (results && results[0]) { + result = results[0]; + break; + } + } catch (e) { + // Tab might be closing + break; + } + } + + if (!result) { + throw new Error('Ollama request timed out (30s) - no response from API'); + } + + if (!result.ok) { + throw new Error(result.error || 'Ollama API error'); + } + + return result.data; + + } finally { + // Close the tab + try { await browser.tabs.remove(tab.id); } catch (e) { console.warn('[AutoSort+] Failed to close tab after Ollama fetch:', e.message); } + } +} + +async function openaiCompatibleChatViaTab(baseUrl, model, prompt, apiKey) { + // Open a hidden tab at the endpoint URL to make the fetch (browser context, not restricted) + const tab = await browser.tabs.create({ url: baseUrl, active: false }); + + try { + // Wait for tab to load + await new Promise(resolve => setTimeout(resolve, 500)); + // Build the request headers + const headers = { 'Content-Type': 'application/json' }; + if (apiKey) headers['Authorization'] = `Bearer ${apiKey}`; + + // Inject code to make the fetch using OpenAI-compatible format and store result + const scriptCode = ` + (async () => { + try { + const headers = ${JSON.stringify(headers)}; + const response = await fetch(window.location.origin + '/v1/chat/completions', { + method: 'POST', + headers, + body: JSON.stringify({ + model: ${JSON.stringify(model)}, + messages: [{ role: 'user', content: ${JSON.stringify(prompt)} }], + max_tokens: 8192, + temperature: 0.6, + top_p: 0.95, + stream: false + }) + }); + + if (!response.ok) { + throw new Error('HTTP ' + response.status + ': ' + response.statusText); + } + + const data = await response.json(); + window.__openai_compat_result = { ok: true, data }; + } catch (error) { + window.__openai_compat_result = { ok: false, error: error.message }; + } + })(); + `; + + await browser.tabs.executeScript(tab.id, { code: scriptCode }); + + // Wait for result (with polling to be safe) + let result = null; + for (let i = 0; i < 40; i++) { // 10 seconds max (250ms intervals) + await new Promise(resolve => setTimeout(resolve, 250)); + + try { + const results = await browser.tabs.executeScript(tab.id, { + code: 'window.__openai_compat_result || null' + }); + if (results && results[0]) { + result = results[0]; + break; + } + } catch (e) { + // Tab might be closing + break; + } + } + + if (!result) { + throw new Error('OpenAI-compatible request timed out (30s) - no response from API'); + } + + if (!result.ok) { + throw new Error(result.error || 'OpenAI-compatible API error'); + } + + return result.data; + + } finally { + // Close the tab + try { await browser.tabs.remove(tab.id); } catch (e) { console.warn('[AutoSort+] Failed to close tab after OpenAI-compat fetch:', e.message); } + } +} + +async function callOllamaViaTab(ollamaUrl, payload) { + // Deprecated function kept for backward compatibility + // Now routes to direct API call via fetch + const { fetchAction, model, prompt, headers } = payload; + + if (fetchAction === 'chat') { + // For direct chat, we make a simple fetch call + const ollamaHeaders = Object.assign({}, headers, { 'Content-Type': 'application/json' }); + + try { + const res = await fetch(`${ollamaUrl}/api/chat`, { + method: 'POST', + headers: ollamaHeaders, + body: JSON.stringify({ + model, + messages: [{ role: 'user', content: prompt }], + stream: false + }) + }); + + if (!res.ok) { + return { + correlationId: '', + response: { ok: false, error: `HTTP ${res.status}: ${res.statusText}` } + }; + } + + const data = await res.json(); + return { correlationId: '', response: { ok: true, data } }; + } catch (err) { + return { + correlationId: '', + response: { ok: false, error: err.message } + }; + } + } else if (fetchAction === 'pull') { + // For pull operations + const ollamaHeaders = Object.assign({}, headers, { 'Content-Type': 'application/json' }); + + try { + const res = await fetch(`${ollamaUrl}/api/pull`, { + method: 'POST', + headers: ollamaHeaders, + body: JSON.stringify({ name: model, stream: true }) + }); + + const text = await res.text(); + return { correlationId: '', response: { ok: true, data: text } }; + } catch (err) { + return { correlationId: '', response: { ok: false, error: err.message } }; + } + } +} + +// ───────────────────────────────────────────────────────────────────────────── +// BATCH PROCESSING ENGINE +// ───────────────────────────────────────────────────────────────────────────── + +/** + * Per-provider batch configuration. + * concurrency – max simultaneous in-flight AI requests + * delayMs – minimum milliseconds to wait between launching each request + * + * Note: Gemini free-tier concurrency=1 and delayMs are managed by the existing + * checkAndTrackGeminiRateLimit() helper and preserved here. + */ +const PROVIDER_BATCH_CONFIG = { + gemini: { concurrency: 1, delayMs: 0 }, // delay handled by rate-limit helper + openai: { concurrency: 3, delayMs: 500 }, + anthropic: { concurrency: 2, delayMs: 500 }, + groq: { concurrency: 5, delayMs: 200 }, + mistral: { concurrency: 2, delayMs: 500 }, + ollama: { concurrency: 1, delayMs: 0 }, // local, sequential is fine + 'openai-compatible': { concurrency: 2, delayMs: 500 } +}; + +/** In-memory batch state (reset for each new batch run). */ +let _batchState = { + running: false, + cancelled: false, + paused: false, + total: 0, + completed: 0, + failed: 0, + skipped: 0, + provider: '', + chunkIndex: 0, + totalChunks: 0 +}; + +/** Reset batch state to defaults. */ +function _resetBatchState(total, provider) { + _batchState = { + running: true, + cancelled: false, + paused: false, + total, + completed: 0, + failed: 0, + skipped: 0, + provider, + chunkIndex: 0, + totalChunks: 0 + }; +} + +/** Atomically acquire the batch lock. Returns true if acquired, false if already running. */ +function _acquireBatchLock() { + if (_batchState.running) return false; + _batchState.running = true; + return true; +} + +/** Release the batch lock when an early-exit path aborts before batchAnalyzeEmails runs. */ +function _releaseBatchLock() { + _batchState.running = false; +} + +/** Return the next UTC midnight as a millisecond timestamp. Used for daily rate-limit resets. */ +function _nextUtcMidnight() { + const d = new Date(Date.now()); + d.setUTCHours(24, 0, 0, 0); + return d.getTime(); +} + +/** Broadcast current batch progress to any open options pages. */ +async function _broadcastBatchProgress(status = 'running') { + const payload = { + action: 'batchProgress', + status, + total: _batchState.total, + completed: _batchState.completed, + failed: _batchState.failed, + skipped: _batchState.skipped, + provider: _batchState.provider, + chunkIndex: _batchState.chunkIndex, + totalChunks: _batchState.totalChunks + }; + try { + // Persist to storage so options page can pick it up on open + await browser.storage.local.set({ currentBatch: { ...payload, startTime: Date.now() } }); + // Also send a live runtime message (options page may be open) + await browser.runtime.sendMessage(payload).catch(() => {}); + } catch (e) { + // Ignore – options page may not be open + } +} + +/** + * Wait while the batch is paused. Returns true when resumed, false if cancelled + * while waiting. + */ +async function _waitWhilePaused() { + while (_batchState.paused && !_batchState.cancelled) { + await new Promise(resolve => setTimeout(resolve, 500)); + } + return !_batchState.cancelled; +} + +/** + * Core batch engine. Processes an array of Thunderbird message objects using + * the currently configured AI provider with chunk-based processing. + * + * @param {Array} messages – Array of Thunderbird message objects (from mailTabs API) + */ +async function batchAnalyzeEmails(messages) { + const settingsData = await browser.storage.local.get(['aiProvider', 'batchChunkSize']); + const provider = settingsData.aiProvider || 'gemini'; + const chunkSize = settingsData.batchChunkSize || 5; + + _resetBatchState(messages.length, provider); + await _broadcastBatchProgress('running'); + + if (window.debugLogger) { + window.debugLogger.info('[Batch]', `Starting batch: ${messages.length} emails, provider=${provider}, chunkSize=${chunkSize}`); + } + + // Process a single message with one retry on failure + async function processOne(message) { + // Respect pause / cancel before starting + if (_batchState.cancelled) return; + if (_batchState.paused) { + const resumed = await _waitWhilePaused(); + if (!resumed) return; + } + + for (let attempt = 1; attempt <= 2; attempt++) { + try { + const fullMessage = await browser.messages.getFull(message.id); + if (!fullMessage) { + _batchState.skipped++; + return; + } + + const emailContext = await extractEmailContext(fullMessage, message); + const emailContent = emailContext.body; + if (!emailContent || !emailContent.trim()) { + _batchState.skipped++; + return; + } + + const label = await analyzeEmailContent(emailContent, emailContext); + + if (!label || String(label).trim().toLowerCase() === 'null') { + _batchState.skipped++; + return; + } + + await applyLabelsToMessages([message], label); + _batchState.completed++; + return; // success + + } catch (err) { + if (window.debugLogger) { + window.debugLogger.warn('[Batch]', `Attempt ${attempt} failed for msg ${message.id}: ${err.message}`); + } + if (attempt === 2) { + // Both attempts failed + _batchState.failed++; + console.error(`[Batch] Message ${message.id} failed after retry:`, err.message); + } else { + // Brief pause before retry + await new Promise(resolve => setTimeout(resolve, 1500)); + } + } + } + } + + // Chunk-based processing: process N emails, await all, continue + const totalChunks = Math.ceil(messages.length / chunkSize); + _batchState.totalChunks = totalChunks; + + for (let chunkIndex = 0; chunkIndex < totalChunks; chunkIndex++) { + // Check cancellation before starting chunk + if (_batchState.cancelled) break; + + // Wait while paused before starting chunk + while (_batchState.paused && !_batchState.cancelled) { + await new Promise(resolve => setTimeout(resolve, 500)); + } + if (_batchState.cancelled) break; + + // Get current chunk of messages + const chunkStart = chunkIndex * chunkSize; + const chunkEnd = Math.min(chunkStart + chunkSize, messages.length); + const chunkMessages = messages.slice(chunkStart, chunkEnd); + + if (window.debugLogger) { + window.debugLogger.info('[Batch]', `Processing chunk ${chunkIndex + 1}/${totalChunks} (emails ${chunkStart + 1}-${chunkEnd} of ${messages.length})`); + } + + // Launch all chunk tasks concurrently + const chunkPromises = chunkMessages.map(msg => processOne(msg)); + + // Await all responses before continuing to next chunk + await Promise.allSettled(chunkPromises); + + // Update chunk index and broadcast progress after each chunk + _batchState.chunkIndex = chunkIndex + 1; + await _broadcastBatchProgress('running'); + } + + const finalStatus = _batchState.cancelled ? 'cancelled' : 'done'; + _batchState.running = false; + await _broadcastBatchProgress(finalStatus); + + // Clear persisted batch state after a short delay so the UI can show "done" + setTimeout(async () => { + await browser.storage.local.remove('currentBatch').catch(() => {}); + }, 6000); + + if (window.debugLogger) { + window.debugLogger.info('[Batch]', `Batch ${finalStatus}: completed=${_batchState.completed}, failed=${_batchState.failed}, skipped=${_batchState.skipped}`); + } + + // Final summary notification + const { completed, failed, skipped, total } = _batchState; + if (finalStatus === 'cancelled') { + await showNotification('AutoSort+ Batch Cancelled', + `Stopped after ${completed + failed + skipped}/${total} emails. Sorted: ${completed}, failed: ${failed}.`); + } else if (failed === 0 && skipped === 0) { + await showNotification('AutoSort+ Batch Complete', + `Successfully sorted all ${completed} emails.`); + } else { + await showNotification('AutoSort+ Batch Complete', + `Processed ${total} emails — sorted: ${completed}, skipped: ${skipped}, failed: ${failed}.`); + } +} + +// ───────────────────────────────────────────────────────────────────────────── +// Gemini rate limiting (free tier: 5/min, 20/day per key) +// Combined check+track function to avoid redundant storage reads + +// Mutex for atomic rate limit operations +let geminiRateLimitMutex = Promise.resolve(); + +async function checkAndTrackGeminiRateLimit(keyIndex = null) { + // Chain onto mutex for atomic operation; .catch() prevents permanent lockup + return geminiRateLimitMutex = geminiRateLimitMutex.then(async () => { + const now = Date.now(); + const data = await browser.storage.local.get([ + 'geminiApiKeys', + 'geminiRateLimits', + 'currentGeminiKeyIndex', + 'geminiPaidPlan', + 'geminiRateLimit' // Legacy single-key + ]); + + // Skip for paid plan + if (data.geminiPaidPlan) { + return { allowed: true, waitTime: 0, keyIndex: keyIndex ?? 0 }; + } + + // Multi-key mode + if (data.geminiApiKeys?.length > 0) { + const keys = data.geminiApiKeys; + const rateLimits = data.geminiRateLimits || keys.map(() => ({ + requests: [], + dailyCount: 0, + dailyResetTime: _nextUtcMidnight() + })); + let currentIndex = keyIndex ?? (data.currentGeminiKeyIndex || 0); + + const startIndex = currentIndex; + let attempts = 0; + + while (attempts < keys.length) { + const rateLimit = rateLimits[currentIndex]; + + // Reset daily if expired — uses UTC midnight for consistent daily boundaries + if (now > rateLimit.dailyResetTime) { + rateLimit.dailyCount = 0; + rateLimit.dailyResetTime = _nextUtcMidnight(); + rateLimit.requests = []; + } + + // Clean old requests + const oneMinuteAgo = now - 60000; + rateLimit.requests = rateLimit.requests.filter(t => t > oneMinuteAgo); + + // Check availability + if (rateLimit.dailyCount < 20) { + // Check if we need to wait + if (rateLimit.requests.length > 0) { + const lastRequest = Math.max(...rateLimit.requests); + const timeSinceLastRequest = now - lastRequest; + const minInterval = 12000; // 12 seconds + + if (timeSinceLastRequest < minInterval) { + const waitTime = Math.ceil((minInterval - timeSinceLastRequest) / 1000); + // Track request now (with wait) + rateLimit.requests.push(now); + rateLimit.dailyCount += 1; + + await browser.storage.local.set({ + currentGeminiKeyIndex: currentIndex, + geminiRateLimits: rateLimits + }); + + if (window.debugLogger) { + window.debugLogger.info('[RateLimit]', `Gemini Key #${currentIndex + 1}: ${rateLimit.dailyCount}/20 today, ${rateLimit.requests.length} in last minute`); + } + + return { allowed: true, waitTime, keyIndex: currentIndex }; + } + } + + // Track request immediately + rateLimit.requests.push(now); + rateLimit.dailyCount += 1; + + await browser.storage.local.set({ + currentGeminiKeyIndex: currentIndex, + geminiRateLimits: rateLimits + }); + + if (window.debugLogger) { + window.debugLogger.info('[RateLimit]', `Gemini Key #${currentIndex + 1}: ${rateLimit.dailyCount}/20 today, ${rateLimit.requests.length} in last minute`); + } + + return { allowed: true, waitTime: 0, keyIndex: currentIndex }; + } + + currentIndex = (currentIndex + 1) % keys.length; + attempts++; + } + + return { + allowed: false, + message: `All ${keys.length} Gemini API keys have reached their daily limit (20/day each). Please wait for reset or add more API keys in settings.` + }; + } + + // Legacy single-key mode + const utcReset = _nextUtcMidnight(); + const rateLimit = data.geminiRateLimit || { + requests: [], + dailyCount: 0, + dailyResetTime: utcReset + }; + + // Reset daily if expired — uses UTC midnight for consistent daily boundaries + if (now > rateLimit.dailyResetTime) { + rateLimit.dailyCount = 0; + rateLimit.dailyResetTime = utcReset; + rateLimit.requests = []; + } + + // Check daily limit + if (rateLimit.dailyCount >= 20) { + const hoursUntilReset = Math.ceil((rateLimit.dailyResetTime - now) / (1000 * 60 * 60)); + return { + allowed: false, + message: `Gemini free tier daily limit reached (20/day). Resets in ${hoursUntilReset} hours. Upgrade to paid plan or add multiple API keys in settings to remove limits.` + }; + } + + // Clean old requests + const oneMinuteAgo = now - 60000; + rateLimit.requests = rateLimit.requests.filter(t => t > oneMinuteAgo); + + // Check if need to wait + if (rateLimit.requests.length > 0) { + const lastRequest = Math.max(...rateLimit.requests); + const timeSinceLastRequest = now - lastRequest; + const minInterval = 12000; + + if (timeSinceLastRequest < minInterval) { + const waitTime = Math.ceil((minInterval - timeSinceLastRequest) / 1000); + // Track now (with wait) + rateLimit.requests.push(now); + rateLimit.dailyCount += 1; + await browser.storage.local.set({ geminiRateLimit: rateLimit }); + + if (window.debugLogger) { + window.debugLogger.info('[RateLimit]', `Gemini requests: ${rateLimit.dailyCount}/20 today, ${rateLimit.requests.length} in last minute`); + } + + return { allowed: true, waitTime, keyIndex: null }; + } + } + + // Track request + rateLimit.requests.push(now); + rateLimit.dailyCount += 1; + await browser.storage.local.set({ geminiRateLimit: rateLimit }); + + if (window.debugLogger) { + window.debugLogger.info('[RateLimit]', `Gemini requests: ${rateLimit.dailyCount}/20 today, ${rateLimit.requests.length} in last minute`); + } + + return { allowed: true, waitTime: 0, keyIndex: null }; + }).catch(err => { + console.error('[RateLimit] Mutex error, resetting lock:', err.message); + geminiRateLimitMutex = Promise.resolve(); + throw err; + }); +} + +// Deprecated: Use checkAndTrackGeminiRateLimit instead +async function checkGeminiRateLimit() { + console.warn('[Deprecated] checkGeminiRateLimit: Use checkAndTrackGeminiRateLimit instead'); + const result = await checkAndTrackGeminiRateLimit(); + // Note: This deprecated wrapper already tracked the request, so callers + // using this will need to NOT call trackGeminiRequest separately + return result; +} + +// Deprecated: No longer needed - tracking is done in checkAndTrackGeminiRateLimit +async function trackGeminiRequest(keyIndex) { + console.warn('[Deprecated] trackGeminiRequest: No longer needed - tracking is done in checkAndTrackGeminiRateLimit'); +} + // Function to show notification async function showNotification(title, message, type = "basic") { // Log to console (Thunderbird doesn't support browser.notifications) - console.log(`[AutoSort+] ${title}: ${message}`); - + if (window.debugLogger) { + window.debugLogger.info('[AutoSort+]', `${title}: ${message}`); + } + // Try to show notification if API is available try { if (browser.notifications && browser.notifications.create) { @@ -39,8 +809,10 @@ async function showNotification(title, message, type = "basic") { // Function to update existing notification async function updateNotification(id, title, message) { // Log to console - console.log(`[AutoSort+] ${title}: ${message}`); - + if (window.debugLogger) { + window.debugLogger.info('[AutoSort+]', `${title}: ${message}`); + } + // Try to update notification if API is available try { if (browser.notifications && browser.notifications.clear && id) { @@ -53,23 +825,80 @@ async function updateNotification(id, title, message) { } // Function to analyze email content using AI -async function analyzeEmailContent(emailContent) { +async function analyzeEmailContent(emailContent, emailContext = null) { try { const notificationId = await showNotification( "AutoSort+ AI Analysis", "Starting email analysis..." ); - const settings = await browser.storage.local.get(['apiKey', 'aiProvider', 'labels', 'enableAi']); + const settings = await browser.storage.local.get([ + 'apiKey', + 'geminiApiKeys', + 'currentGeminiKeyIndex', + 'aiProvider', + 'labels', + 'enableAi', + 'geminiPaidPlan', + 'geminiRateLimit', + 'geminiRateLimits', + 'ollamaUrl', + 'ollamaModel', + 'ollamaCustomModel', + 'ollamaAuthToken', + 'ollamaCpuOnly', + 'ollamaNumCtx', + 'customBaseUrl', + 'customModel', + 'customPrompt' + ]); const provider = settings.aiProvider || 'gemini'; - console.log("Settings retrieved:", { - hasApiKey: !!settings.apiKey, - provider: provider, - labels: settings.labels, - enableAi: settings.enableAi !== false - }); - + // Check and track Gemini rate limits (free tier only) - single storage read + let keyIndexToUse = null; + if (provider === 'gemini' && !settings.geminiPaidPlan) { + const rateLimit = await checkAndTrackGeminiRateLimit(); + if (!rateLimit.allowed) { + // Show persistent notification for limit reached + const isSingleKey = !settings.geminiApiKeys || settings.geminiApiKeys.length <= 1; + const notifTitle = isSingleKey ? "⛔ Gemini API Limit Reached" : "⛔ All Gemini Keys at Limit"; + + const notifId = await showNotification( + notifTitle, + rateLimit.message, + "list" + ); + + // Also try to update the current notification + await updateNotification( + notificationId, + "AutoSort+ Rate Limit", + rateLimit.message + ); + throw new Error(rateLimit.message); + } + + if (rateLimit.waitTime > 0) { + await updateNotification( + notificationId, + "AutoSort+ Rate Limit", + `Rate limit reached. Waiting ${rateLimit.waitTime} seconds...` + ); + await new Promise(resolve => setTimeout(resolve, rateLimit.waitTime * 1000)); + } + + keyIndexToUse = rateLimit.keyIndex; + } + + if (window.debugLogger) { + window.debugLogger.info('[AutoSort+]', 'Settings retrieved', { + hasApiKey: !!(settings.apiKey || (settings.geminiApiKeys && settings.geminiApiKeys.length > 0)), + provider: provider, + labels: settings.labels, + enableAi: settings.enableAi !== false + }); + } + if (settings.enableAi === false) { console.error("AI is disabled"); await updateNotification( @@ -80,7 +909,25 @@ async function analyzeEmailContent(emailContent) { return null; } - if (!settings.apiKey) { + // Check API key availability based on provider + let apiKeyToUse = null; + if (provider === 'gemini') { + if (settings.geminiApiKeys && settings.geminiApiKeys.length > 0) { + const keyIndex = keyIndexToUse !== null ? keyIndexToUse : (settings.currentGeminiKeyIndex || 0); + apiKeyToUse = settings.geminiApiKeys[keyIndex]; + if (window.debugLogger) { + window.debugLogger.info('[Gemini]', `Using API Key #${keyIndex + 1} of ${settings.geminiApiKeys.length}`); + } + } else if (settings.apiKey) { + // Legacy single key + apiKeyToUse = settings.apiKey; + } + } else if (provider !== 'ollama' && provider !== 'openai-compatible') { + // Ollama and OpenAI-compatible don't need API key; other providers do + apiKeyToUse = settings.apiKey; + } + + if (!apiKeyToUse && provider !== 'ollama' && provider !== 'openai-compatible') { console.error("Missing API key"); await updateNotification( notificationId, @@ -89,6 +936,21 @@ async function analyzeEmailContent(emailContent) { ); return null; } + + // Validate OpenAI-compatible endpoint has baseUrl and model + if (provider === 'openai-compatible') { + const baseUrl = settings.customBaseUrl || ''; + const model = settings.customModel || ''; + if (!baseUrl || !model) { + console.error("OpenAI-compatible endpoint not configured"); + await updateNotification( + notificationId, + "AutoSort+ Error", + "OpenAI-compatible endpoint not configured. Please set base URL and model in settings." + ); + return null; + } + } if (!settings.labels || settings.labels.length === 0) { console.error("No labels configured"); @@ -100,17 +962,58 @@ async function analyzeEmailContent(emailContent) { return null; } - const prompt = `You are an email classification assistant. Analyze this email content and choose the most appropriate label from this list: ${settings.labels.join(', ')}. - Consider the following: - 1. The main topic and purpose of the email - 2. The sender and recipient context - 3. The urgency and importance of the content - 4. The type of communication (e.g., notification, request, update) - - Only respond with the exact label name that best fits the content. If no label fits well, respond with "null". - - Email content: - ${emailContent}`; + // Select prompt template (custom or default) + const promptTemplate = (settings.customPrompt && settings.customPrompt.trim()) + ? settings.customPrompt.trim() + : DEFAULT_PROMPT; + + // Inject placeholders + let prompt = promptTemplate; + const labelsStr = settings.labels.join(', '); + + // Build context values for placeholders + const subject = emailContext?.subject || ''; + const author = emailContext?.author || ''; + const attachmentsStr = emailContext?.attachments?.length > 0 + ? emailContext.attachments.map(a => a.name).join(', ') + : '(none)'; + const body = emailContent; // body is the main email text + + // Helper to inject placeholder with fallback injection if missing + function injectPlaceholder(placeholder, value, fallbackPrefix, fallbackPosition = 'start') { + if (!prompt.includes(placeholder)) { + if (window.debugLogger) { + window.debugLogger.warn('[AutoSort]', `Custom prompt missing ${placeholder} placeholder - injecting`); + } + if (fallbackPosition === 'start') { + prompt = `${fallbackPrefix}${value}\n\n${prompt}`; + } else { + prompt = `${prompt}\n\n${fallbackPrefix}${value}`; + } + } else { + prompt = prompt.replace(placeholder, value); + } + } + + // Inject all placeholders (order matters for fallback injection) + injectPlaceholder('{labels}', labelsStr, 'Labels: ', 'start'); + injectPlaceholder('{subject}', subject, 'Subject: ', 'start'); + injectPlaceholder('{author}', author, 'From: ', 'start'); + injectPlaceholder('{attachments}', attachmentsStr, 'Attachments: ', 'start'); + + // Handle {body} and legacy {email} placeholders + if (prompt.includes('{body}')) { + prompt = prompt.replace('{body}', body); + } else if (prompt.includes('{email}')) { + // Legacy placeholder support + prompt = prompt.replace('{email}', body); + } else { + // Default: append body at end if no body/email placeholder found + if (window.debugLogger) { + window.debugLogger.warn('[AutoSort]', 'Custom prompt missing {body} placeholder - appending'); + } + prompt = `${prompt}\n\nEmail content:\n${body}`; + } await updateNotification( notificationId, @@ -122,9 +1025,10 @@ async function analyzeEmailContent(emailContent) { let data; if (provider === 'gemini') { - const apiUrl = `https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-flash:generateContent?key=${settings.apiKey}`; - console.log("Making API request to Gemini..."); - + const apiUrl = `https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-flash:generateContent?key=${apiKeyToUse}`; + + // Rate limiting already tracked in checkAndTrackGeminiRateLimit above + await updateNotification( notificationId, "AutoSort+ AI Analysis", @@ -139,9 +1043,9 @@ async function analyzeEmailContent(emailContent) { }] }], generationConfig: { - temperature: 0.2, - topK: 1, - topP: 1, + temperature: 0.6, + topK: 20, + topP: 0.95, maxOutputTokens: 50, responseMimeType: "text/plain", thinkingConfig: { @@ -168,6 +1072,11 @@ async function analyzeEmailContent(emailContent) { ] }; + if (window.debugLogger) { + const sanitizedUrl = apiUrl.replace(/key=[^&]+/, 'key=***REDACTED***'); + window.debugLogger.apiRequest('Gemini', sanitizedUrl, requestBody); + } + response = await fetch(apiUrl, { method: 'POST', headers: { @@ -177,133 +1086,308 @@ async function analyzeEmailContent(emailContent) { }); } else if (provider === 'openai') { - console.log("Making API request to OpenAI..."); - await updateNotification( notificationId, "AutoSort+ AI Analysis", "Analyzing email content with OpenAI..." ); + const requestBody = { + model: 'gpt-4o-mini', + messages: [{ role: 'user', content: prompt }], + max_tokens: 50, + temperature: 0.6, + top_p: 0.95 + }; + + if (window.debugLogger) { + window.debugLogger.apiRequest('OpenAI', 'https://api.openai.com/v1/chat/completions', requestBody); + } + response = await fetch('https://api.openai.com/v1/chat/completions', { method: 'POST', headers: { 'Content-Type': 'application/json', - 'Authorization': `Bearer ${settings.apiKey}` + 'Authorization': `Bearer ${apiKeyToUse}` }, - body: JSON.stringify({ - model: 'gpt-4o-mini', - messages: [{ role: 'user', content: prompt }], - max_tokens: 50, - temperature: 0.2 - }) + body: JSON.stringify(requestBody) }); } else if (provider === 'anthropic') { - console.log("Making API request to Anthropic..."); - await updateNotification( notificationId, "AutoSort+ AI Analysis", "Analyzing email content with Claude..." ); + const requestBody = { + model: 'claude-3-haiku-20240307', + messages: [{ role: 'user', content: prompt }], + max_tokens: 50 + }; + + if (window.debugLogger) { + window.debugLogger.apiRequest('Claude', 'https://api.anthropic.com/v1/messages', requestBody); + } + response = await fetch('https://api.anthropic.com/v1/messages', { method: 'POST', headers: { 'Content-Type': 'application/json', - 'x-api-key': settings.apiKey, + 'x-api-key': apiKeyToUse, 'anthropic-version': '2023-06-01' }, - body: JSON.stringify({ - model: 'claude-3-haiku-20240307', - messages: [{ role: 'user', content: prompt }], - max_tokens: 50 - }) + body: JSON.stringify(requestBody) }); } else if (provider === 'groq') { - console.log("Making API request to Groq..."); - await updateNotification( notificationId, "AutoSort+ AI Analysis", "Analyzing email content with Groq..." ); + const requestBody = { + model: 'llama-3.3-70b-versatile', + messages: [{ role: 'user', content: prompt }], + max_tokens: 50, + temperature: 0.6, + top_p: 0.95 + }; + + if (window.debugLogger) { + window.debugLogger.apiRequest('Groq', 'https://api.groq.com/openai/v1/chat/completions', requestBody); + } + response = await fetch('https://api.groq.com/openai/v1/chat/completions', { method: 'POST', headers: { 'Content-Type': 'application/json', - 'Authorization': `Bearer ${settings.apiKey}` + 'Authorization': `Bearer ${apiKeyToUse}` }, - body: JSON.stringify({ - model: 'llama-3.3-70b-versatile', - messages: [{ role: 'user', content: prompt }], - max_tokens: 50, - temperature: 0.2 - }) + body: JSON.stringify(requestBody) }); } else if (provider === 'mistral') { - console.log("Making API request to Mistral..."); - await updateNotification( notificationId, "AutoSort+ AI Analysis", "Analyzing email content with Mistral..." ); + const requestBody = { + model: 'mistral-small-latest', + messages: [{ role: 'user', content: prompt }], + max_tokens: 50, + temperature: 0.6, + top_p: 0.95 + }; + + if (window.debugLogger) { + window.debugLogger.apiRequest('Mistral', 'https://api.mistral.ai/v1/chat/completions', requestBody); + } + response = await fetch('https://api.mistral.ai/v1/chat/completions', { method: 'POST', headers: { 'Content-Type': 'application/json', - 'Authorization': `Bearer ${settings.apiKey}` + 'Authorization': `Bearer ${apiKeyToUse}` }, - body: JSON.stringify({ - model: 'mistral-small-latest', - messages: [{ role: 'user', content: prompt }], - max_tokens: 50, - temperature: 0.2 - }) + body: JSON.stringify(requestBody) }); + } else if (provider === 'ollama') { + await updateNotification( + notificationId, + "AutoSort+ AI Analysis", + "Analyzing email content with local Ollama..." + ); + + // Get Ollama settings + const ollamaSettings = await browser.storage.local.get(['ollamaUrl', 'ollamaModel', 'ollamaCustomModel', 'ollamaCpuOnly', 'ollamaAuthToken', 'ollamaNumCtx']); + const ollamaUrl = ollamaSettings.ollamaUrl || 'http://localhost:11434'; + let ollamaModel = ollamaSettings.ollamaModel || 'llama3.2'; + const ollamaNumCtx = ollamaSettings.ollamaNumCtx || 0; + const cpuOnly = ollamaSettings.ollamaCpuOnly === true; + const ollamaAuthToken = ollamaSettings.ollamaAuthToken || ''; + + // Use custom model if selected + if (ollamaModel === 'custom' && ollamaSettings.ollamaCustomModel) { + ollamaModel = ollamaSettings.ollamaCustomModel; + } + + const requestBody = { + model: ollamaModel, + messages: [{ role: 'user', content: prompt }], + stream: false + }; + + if (window.debugLogger) { + window.debugLogger.apiRequest('Ollama', `${ollamaUrl}/api/chat`, requestBody); + } + + // Use tab injection to make the fetch (browser context, no restrictions) + try { + const ollamaResponse = await ollamaChatViaTab(ollamaUrl, ollamaModel, prompt, ollamaAuthToken, ollamaNumCtx); + + if (!ollamaResponse.message || !ollamaResponse.message.content) { + throw new Error('Invalid Ollama response format'); + } + + data = ollamaResponse; + response = null; // Mark as handled + + } catch (ollamaError) { + console.error('[Ollama] Tab injection chat failed:', ollamaError.message); + throw ollamaError; + } + + } else if (provider === 'openai-compatible') { + // Get custom endpoint settings + const customSettings = await browser.storage.local.get(['customBaseUrl', 'customModel', 'apiKey']); + const baseUrl = (customSettings.customBaseUrl || '').replace(/\/$/, ''); + const model = customSettings.customModel || ''; + const apiKey = customSettings.apiKey || ''; + + if (!baseUrl || !model) { + throw new Error('OpenAI-compatible endpoint not configured. Please set base URL and model in settings.'); + } + + await updateNotification( + notificationId, + "AutoSort+ AI Analysis", + `Analyzing email content with ${model}...` + ); + + const requestBody = { + model, + messages: [{ role: 'user', content: prompt }], + max_tokens: 8192, + temperature: 0.6, + top_p: 0.95 + }; + + // Build headers + const headers = { + 'Content-Type': 'application/json' + }; + if (apiKey) { + headers['Authorization'] = `Bearer ${apiKey}`; + } + + // Check if this is a localhost endpoint - Thunderbird background scripts can't directly fetch localhost + const isLocalhost = baseUrl.startsWith('http://localhost') || baseUrl.startsWith('http://127.0.0.1'); + + if (window.debugLogger) { + window.debugLogger.apiRequest('OpenAI-Compatible', `${baseUrl}/chat/completions`, requestBody); + } + + if (isLocalhost) { + // Use tab injection for localhost (similar to Ollama handling) + try { + const customResponse = await openaiCompatibleChatViaTab(baseUrl, model, prompt, apiKey); + + if (!customResponse.choices || customResponse.choices.length === 0 || !customResponse.choices[0].message) { + throw new Error('Invalid OpenAI-compatible response format'); + } + + data = customResponse; + response = null; // Mark as handled + + } catch (customError) { + console.error('[OpenAI-Compatible] Tab injection failed:', customError.message); + throw customError; + } + } else { + // Direct fetch for non-localhost endpoints + response = await fetch(baseUrl + '/chat/completions', { + method: 'POST', + headers, + body: JSON.stringify(requestBody) + }); + } + } else { throw new Error(`Unknown provider: ${provider}`); } - console.log("API response status:", response.status); + if (response) { + if (!response.ok) { + let errorMessage = `HTTP ${response.status}: ${response.statusText}`; + + // Try to parse error response body + try { + const contentType = response.headers.get('content-type'); + if (contentType && contentType.includes('application/json')) { + const error = await response.json(); + errorMessage = error.error?.message || error.message || errorMessage; + } else { + const text = await response.text(); + if (text) errorMessage = text.substring(0, 200); + } + } catch (parseErr) { + console.warn('Could not parse error response:', parseErr.message); + } + + console.error("API Error details:", errorMessage); + + // Handle quota errors specifically + if (response.status === 429 || errorMessage.includes('quota') || errorMessage.includes('rate limit')) { + errorMessage = "API quota exceeded. Please wait a while before trying again, or upgrade to a paid API key."; + } + + // Handle Ollama auth errors + if (response.status === 403) { + errorMessage = "Ollama authentication failed (403). Check your API key/token if Ollama requires authentication."; + } + + await updateNotification( + notificationId, + "AutoSort+ Error", + `API Error: ${errorMessage}` + ); + return null; + } + + await updateNotification( + notificationId, + "AutoSort+ AI Analysis", + "Processing AI response..." + ); - if (!response.ok) { - const error = await response.json(); - console.error("API Error details:", error); - let errorMessage = error.error?.message || error.message || 'Unknown error'; - - // Handle quota errors specifically - if (response.status === 429 || errorMessage.includes('quota') || errorMessage.includes('rate limit')) { - errorMessage = "API quota exceeded. Please wait a while before trying again, or upgrade to a paid API key."; + data = await response.json(); + if (window.debugLogger) { + window.debugLogger.apiResponse(provider, response.status, data); } - + } else if (data) { + await updateNotification( + notificationId, + "AutoSort+ AI Analysis", + "Processing AI response..." + ); + if (window.debugLogger) { + window.debugLogger.apiResponse(provider, 200, data); + } + } else { await updateNotification( notificationId, "AutoSort+ Error", - `API Error: ${errorMessage}` + "No response received from provider." ); return null; } - - await updateNotification( - notificationId, - "AutoSort+ AI Analysis", - "Processing AI response..." - ); - - data = await response.json(); - console.log("Full API response data:", JSON.stringify(data, null, 2)); // Parse the response based on provider let label = null; - + + const tryTrim = v => { + try { + return (v || '').toString().trim(); + } catch (e) { + return null; + } + }; + if (provider === 'gemini') { if (data.candidates && data.candidates.length > 0) { const candidate = data.candidates[0]; @@ -313,44 +1397,104 @@ async function analyzeEmailContent(emailContent) { return null; } if (candidate.content && candidate.content.parts && candidate.content.parts.length > 0) { - label = candidate.content.parts[0].text.trim(); + label = tryTrim(candidate.content.parts[0].text); } } - } else if (provider === 'openai' || provider === 'groq' || provider === 'mistral') { + } else if (provider === 'openai' || provider === 'groq' || provider === 'mistral' || provider === 'openai-compatible') { if (data.choices && data.choices.length > 0) { - label = data.choices[0].message.content.trim(); + const choice = data.choices[0]; + if (window.debugLogger) { + window.debugLogger.info('[API]', 'Choice structure:', choice); + } + // Try multiple possible content locations + label = tryTrim(choice.message?.content || choice.text || choice.delta?.content); + // Some models return reasoning in separate field + if (!label && choice.message?.reasoning_content) { + // Extract from reasoning if no content + label = tryTrim(choice.message.reasoning_content); + } } } else if (provider === 'anthropic') { if (data.content && data.content.length > 0) { - label = data.content[0].text.trim(); + label = tryTrim(data.content[0].text); + } + } else if (provider === 'ollama') { + // Ollama responses may vary in shape: string, object, array of parts, etc. + try { + const msg = data.message; + if (!msg) { + // Some older/local versions may return data as string or have different keys + label = tryTrim(data.result || data.text || data.response); + } else { + const content = msg.content; + if (typeof content === 'string') { + label = tryTrim(content); + } else if (Array.isArray(content)) { + // Find first element that's a string or has text fields + const first = content.find(c => typeof c === 'string' || (c && (c.text || c.content))); + if (typeof first === 'string') label = tryTrim(first); + else if (first && first.text) label = tryTrim(first.text); + else if (first && first.content) { + if (typeof first.content === 'string') label = tryTrim(first.content); + else if (Array.isArray(first.content)) label = tryTrim(first.content.map(x => x.text || x).join(' ')); + } + } else if (content && typeof content === 'object') { + // Content might be an object with text or parts + label = tryTrim(content.text || content.content || content[0]); + if (!label && content.parts && content.parts.length > 0) { + label = tryTrim(content.parts[0].text || content.parts[0]); + } + } else if (typeof msg === 'string') { + label = tryTrim(msg); + } else if (msg && !content) { + label = tryTrim(msg.text || msg.response || msg.result); + } + } + } catch (e) { + console.warn('Failed to parse Ollama response shape:', e.message); + label = null; } } - + if (!label) { console.error("No label extracted from response:", data); await updateNotification(notificationId, "AutoSort+ Error", "No response from AI"); return null; } - - console.log("Generated label:", label); - - // Verify the label exists in our list + + if (window.debugLogger) { + window.debugLogger.info('[AutoSort+]', `Raw generated label: ${label}`); + } + + // Normalize and try to match configured labels more forgivingly + const normalize = s => s.toString().trim().replace(/^['"`]+|['"`]+$/g, ''); + const lower = normalize(label).toLowerCase(); + + // Exact match first if (settings.labels.includes(label)) { - await updateNotification( - notificationId, - "AutoSort+ Success", - `AI analysis complete. Selected label: ${label}` - ); + await updateNotification(notificationId, "AutoSort+ Success", `AI analysis complete. Selected label: ${label}`); return label; - } else { - console.log("Label not found in configured labels. Generated:", label); - await updateNotification( - notificationId, - "AutoSort+ Warning", - `AI suggested: "${label}" but it's not in your configured labels.` - ); - return null; } + + // Try to find a label that matches case-insensitively or is contained within the AI output + let matched = settings.labels.find(l => l.toLowerCase() === lower); + if (!matched) { + matched = settings.labels.find(l => lower.includes(l.toLowerCase()) || l.toLowerCase().includes(lower)); + } + + if (matched) { + if (window.debugLogger) { + window.debugLogger.info('[AutoSort+]', `Mapped AI output to configured label: ${matched}`); + } + await updateNotification(notificationId, "AutoSort+ Success", `AI analysis complete. Selected label: ${matched}`); + return matched; + } + + if (window.debugLogger) { + window.debugLogger.warn('[AutoSort+]', `Label not found in configured labels. Generated: ${label}`); + } + await updateNotification(notificationId, "AutoSort+ Warning", `AI suggested: "${label}" but it's not in your configured labels.`); + return null; } catch (error) { console.error("Error analyzing email:", error); await showNotification( @@ -368,7 +1512,9 @@ async function storeMoveHistory(result) { const history = data.moveHistory || []; history.unshift({ timestamp: new Date().toISOString(), - ...result + subject: (result.subject || '').substring(0, 200), // truncate to 200 chars + status: result.status || 'unknown', + destination: (result.destination || '').substring(0, 200) }); // Keep only the last 100 entries if (history.length > 100) { @@ -388,18 +1534,60 @@ async function applyLabelsToMessages(messages, label) { "AutoSort+ Processing", `Starting to process ${messageCount} message(s)...` ); - + let successCount = 0; let errorCount = 0; const moveResults = []; + // Build folder lookup Map once to avoid N+1 pattern + // Key format: "accountId:folderName" to handle multiple accounts with same folder names + const folderCache = new Map(); + + // Cache accounts to avoid N+1 pattern + const accountCache = new Map(); + + async function getAccount(accountId) { + if (!accountCache.has(accountId)) { + const account = await browser.accounts.get(accountId); + accountCache.set(accountId, account); + } + return accountCache.get(accountId); + } + + function buildFolderMap(folders, prefix = '', accountId) { + if (!folders) return; + for (const folder of folders) { + const fullName = prefix ? `${prefix}/${folder.name}` : folder.name; + folderCache.set(`${accountId}:${fullName}`, folder); + folderCache.set(`${accountId}:${folder.name}`, folder); // Also cache by short name + if (folder.subFolders) { + buildFolderMap(folder.subFolders, fullName, accountId); + } + } + } + + // Pre-build folder cache for all accounts involved + const uniqueAccountIds = [...new Set( + messages.map(m => m.folder?.accountId).filter(id => id) + )]; + for (const accountId of uniqueAccountIds) { + const account = await getAccount(accountId); + buildFolderMap(account.folders, '', accountId); + } + + if (window.debugLogger) { + window.debugLogger.info('[Folder]', `Built folder cache: ${folderCache.size} entries`); + } + for (const message of messages) { - console.log("Processing message:", message.id); - console.log("Target label/folder:", label); - - // Get all folders to find the destination folder - const account = await browser.accounts.get(message.folder.accountId); - console.log("Account info:", account); + if (window.debugLogger) { + window.debugLogger.info('[Folder]', `Processing message: ${message.id}`); + } + if (window.debugLogger) { + window.debugLogger.info('[Folder]', `Target label/folder: ${label}`); + } + + const account = await getAccount(message.folder.accountId); await updateNotification( notificationId, @@ -407,61 +1595,46 @@ async function applyLabelsToMessages(messages, label) { `Finding destination folder for message ${successCount + errorCount + 1}/${messageCount}...` ); - // Find the folder with matching name - const findFolder = (folders, targetName) => { - for (const folder of folders) { - console.log("Checking folder:", folder.name); - if (folder.name === targetName) { - return folder; - } - if (folder.subFolders) { - const found = findFolder(folder.subFolders, targetName); - if (found) return found; - } - } - return null; - }; + // Use cached folder lookup instead of recursive search + let targetFolder = folderCache.get(`${message.folder.accountId}:${label}`); - // First try to find the category folder - const categories = [ - "Financiën", - "Werk en Carrière", - "Persoonlijke Communicatie en Sociale Leven", - "Gezondheid en Welzijn", - "Online Activiteiten en E-commerce", - "Reizen en Evenementen", - "Informatie en Media", - "Beveiliging en IT", - "Klantensupport en Acties", - "Overheid en Gemeenschap" - ]; - - let categoryFolder = null; - let targetFolder = null; - - // Find the category and target folder - for (const category of categories) { - if (label.startsWith(category)) { - console.log("Found matching category:", category); - categoryFolder = findFolder(account.folders, category); - if (categoryFolder) { - console.log("Found category folder:", categoryFolder.name); - // Try to find the subfolder - const subfolderName = label.replace(category + "/", ""); - console.log("Looking for subfolder:", subfolderName); - targetFolder = findFolder(categoryFolder.subFolders || [], subfolderName); - break; - } - } + // Handle subfolder paths - full path already cached above + if (!targetFolder && label.includes('/')) { + targetFolder = folderCache.get(`${message.folder.accountId}:${label}`); } - // If no target folder found, try direct match + // Auto-create missing folder when it's a custom label (skip imported/structured labels) if (!targetFolder) { - console.log("No category match found, trying direct folder match"); - targetFolder = findFolder(account.folders, label); + const looksImported = label.includes('/') || label.includes('\\'); + if (looksImported) { + if (window.debugLogger) { + window.debugLogger.warn('[Folder]', `Folder "${label}" looks imported/structured; skipping auto-create`); + } + } else { + try { + const parentFolder = account.folders && account.folders.length > 0 ? account.folders[0] : null; + if (parentFolder && browser.folders && browser.folders.create) { + if (window.debugLogger) { + window.debugLogger.info('[Folder]', `Creating missing folder "${label}" under ${parentFolder.name || 'root'}`); + } + const created = await browser.folders.create(parentFolder, label); + if (created) { + targetFolder = created; + folderCache.set(`${message.folder.accountId}:${label}`, created); + if (window.debugLogger) { + window.debugLogger.info('[Folder]', `Created folder: ${created.name}`); + } + } + } + } catch (createError) { + console.error(`Failed to create folder "${label}":`, createError); + } + } } - console.log("Moving message to folder:", targetFolder ? targetFolder.name : "not found"); + if (window.debugLogger) { + window.debugLogger.info('[Folder]', `Moving message to folder: ${targetFolder ? targetFolder.name : 'not found'}`); + } try { if (!targetFolder) { @@ -576,7 +1749,9 @@ async function showMoveResultsPopup(results) { ); // Also log to console for debugging - console.log("[AutoSort+] Results:", message); + if (window.debugLogger) { + window.debugLogger.info('[AutoSort+]', 'Results popup displayed'); + } } catch (error) { console.error("Error showing results:", error); await showNotification( @@ -586,6 +1761,112 @@ async function showMoveResultsPopup(results) { } } +// ───────────────────────────────────────────────────────────────────────────── +// AUTO-SORT: Handle new emails arriving in Inbox +// ───────────────────────────────────────────────────────────────────────────── + +/** + * Concurrency-limited parallel processor. + * Processes items concurrently with a maximum number of simultaneous operations. + * + * @param {Array} items - Array of items to process + * @param {Function} processor - Async function to process each item + * @param {number} limit - Maximum concurrent operations (default: 3) + * @returns {Promise} - Promise.allSettled results + */ +async function processWithConcurrency(items, processor, limit = 3) { + const results = []; + const executing = new Set(); + + for (const item of items) { + const promise = processor(item).finally(() => { + executing.delete(promise); + }); + executing.add(promise); + results.push(promise); + + if (executing.size >= limit) { + await Promise.race(executing); + } + } + + return Promise.allSettled(results); +} + +/** + * Classify a single message and move it to the appropriate folder. + * Silent failure mode: errors logged, email stays in Inbox. + */ +async function classifyAndMove(message) { + try { + const fullMessage = await browser.messages.getFull(message.id); + if (!fullMessage) return; + + const emailContext = await extractEmailContext(fullMessage, message); + const emailContent = emailContext.body; + if (!emailContent?.trim()) return; + + const label = await analyzeEmailContent(emailContent, emailContext); + if (!label || String(label).trim().toLowerCase() === 'null') return; + + await applyLabelsToMessages([message], label); + + if (window.debugLogger) { + window.debugLogger.info('[AutoSort]', `Auto-sorted message ${message.id} to ${label}`); + } + } catch (err) { + if (window.debugLogger) { + window.debugLogger.warn('[AutoSort]', `Failed to auto-sort message ${message.id}: ${err.message}`); + } + // Email stays in Inbox on failure - silent failure mode + } +} + +/** + * Handle new mail received event. Processes messages in Inbox folder. + * Supports MessageList pagination via continueList. + */ +async function handleNewMail(folder, messageList) { + // Guard: don't auto-sort if a manual batch is already running + if (_batchState.running) return; + + const settings = await browser.storage.local.get(['autoSortEnabled', 'enableAi', 'aiProvider']); + + // Check if auto-sort is enabled (defaults to true for backward compatibility) + const autoSortEnabled = settings.autoSortEnabled !== false; + if (!autoSortEnabled) return; + if (settings.enableAi === false) return; + + // Verify this is Inbox folder (specialUse array contains "inbox") + if (!folder.specialUse?.includes("inbox")) return; + + // Get provider setting for concurrency limit + const provider = settings.aiProvider || 'gemini'; + + // Use provider batch config for concurrency limit + const limit = PROVIDER_BATCH_CONFIG[provider]?.concurrency || 3; + + if (window.debugLogger) { + window.debugLogger.info('[AutoSort]', `Processing new mail with concurrency=${limit} for provider=${provider}`); + } + + // Process all pages of messages + let page = messageList; + while (true) { + // Process concurrently instead of sequentially + await processWithConcurrency(page.messages, classifyAndMove, limit); + if (!page.id) break; + page = await browser.messages.continueList(page.id); + } +} + +/** + * Register the auto-sort listener for new emails at startup. + */ +function registerAutoSortListener() { + browser.messages.onNewMailReceived.addListener(handleNewMail, false); +} + // Create context menu items browser.menus.create({ id: "autosort-label", @@ -593,17 +1874,42 @@ browser.menus.create({ contexts: ["message_list"] }); -// Add submenu items for labels -browser.storage.local.get(['labels']).then(result => { - if (result.labels) { - result.labels.forEach(label => { +// Helper to rebuild label submenu +async function rebuildLabelSubmenu(labels) { + // Remove existing label menu items + try { + const existingItems = await browser.menus.getAll(); + for (const item of existingItems) { + if (item.parentId === "autosort-label") { + browser.menus.remove(item.id); + } + } + } catch (e) { + // Ignore errors + } + + // Create new label menu items + if (labels && labels.length > 0) { + for (const label of labels) { browser.menus.create({ id: `label-${label}`, parentId: "autosort-label", title: label, contexts: ["message_list"] }); - }); + } + } +} + +// Initial label menu setup +browser.storage.local.get(['labels']).then(result => { + rebuildLabelSubmenu(result.labels); +}); + +// Update menu when labels change +browser.storage.onChanged.addListener((changes) => { + if (changes.labels) { + rebuildLabelSubmenu(changes.labels.newValue); } }); @@ -618,110 +1924,81 @@ browser.menus.create({ browser.menus.onClicked.addListener(async (info, tab) => { if (info.parentMenuItemId === "autosort-label") { const label = info.menuItemId.replace("label-", ""); - console.log(`Manual label selected: ${label}`); + if (window.debugLogger) { + window.debugLogger.info('[AutoSort+]', `Manual label selected: ${label}`); + } await showNotification("AutoSort+", `Applying label: ${label}`); - browser.tabs.sendMessage(tab.id, { - action: "getSelectedMessages", - label: label - }); + try { + // Get the current mail tab for processing + const mailTabs = await browser.mailTabs.query({ active: true, currentWindow: true }); + if (mailTabs && mailTabs.length > 0) { + // Get full message objects + const messages = await browser.mailTabs.getSelectedMessages(mailTabs[0].id); + if (messages && messages.messages && messages.messages.length > 0) { + await applyLabelsToMessages(messages.messages, label); + } else { + await showNotification("AutoSort+ Error", "No messages selected for labeling."); + } + } else { + await showNotification("AutoSort+ Error", "No active mail tab found."); + } + } catch (error) { + console.error("Error applying manual label:", error); + await showNotification("AutoSort+ Error", `Error applying label: ${error.message}`); + } } else if (info.menuItemId === "autosort-analyze") { - console.log("AI analysis selected - starting process"); - await showNotification("AutoSort+", "Starting AI analysis of selected messages..."); - + if (window.debugLogger) { + window.debugLogger.info('[AutoSort+]', 'AI analysis selected - starting batch process'); + } + try { + // Guard: refuse if a batch is already running (atomic check-and-set) + if (!_acquireBatchLock()) { + await showNotification( + 'AutoSort+ Busy', + 'A batch is already in progress. Please wait or cancel it from the settings page.' + ); + return; + } + // Get the current mail tab const mailTabs = await browser.mailTabs.query({ active: true, currentWindow: true }); if (!mailTabs || mailTabs.length === 0) { - console.error("No active mail tab found"); - await showNotification("AutoSort+ Error", "No active mail tab found"); + console.error('No active mail tab found'); + await showNotification('AutoSort+ Error', 'No active mail tab found'); + _releaseBatchLock(); return; } - console.log("Current mail tab:", mailTabs[0]); // Get selected messages using mailTabs API const selectedMessageList = await browser.mailTabs.getSelectedMessages(mailTabs[0].id); - console.log("Selected message list:", selectedMessageList); - if (!selectedMessageList || !selectedMessageList.messages || selectedMessageList.messages.length === 0) { - console.error("No messages selected"); - await showNotification("AutoSort+ Error", "No messages selected for analysis"); + console.error('No messages selected'); + await showNotification('AutoSort+ Error', 'No messages selected for analysis'); + _releaseBatchLock(); return; } - console.log(`Analyzing ${selectedMessageList.messages.length} selected messages`); - - for (const message of selectedMessageList.messages) { - // Get the full message with body - const fullMessage = await browser.messages.getFull(message.id); - console.log("Got full message:", fullMessage ? "yes" : "no"); - console.log("Message content:", fullMessage); - - if (!fullMessage) { - console.error("Could not get message content"); - continue; - } - - // Function to recursively extract text from message parts - function extractTextFromParts(parts) { - let text = ""; - if (!parts) return text; - - for (const part of parts) { - console.log("Processing part:", { - contentType: part.contentType, - partName: part.partName, - size: part.size - }); - - if (part.parts) { - // Recursively process nested parts - text += extractTextFromParts(part.parts); - } - - if (part.contentType === "text/plain") { - text += part.body + "\n"; - } else if (part.contentType === "text/html" && !text) { - // Only use HTML if we haven't found plain text - text = browser.messengerUtilities.convertToPlainText(part.body); - } else if (part.contentType === "message/rfc822" && part.body) { - // Handle message/rfc822 parts - text += part.body + "\n"; - } - } - return text; - } - - // Extract email content from the message - let emailContent = ""; - if (fullMessage.parts) { - emailContent = await extractTextFromParts(fullMessage.parts); - } else if (fullMessage.body) { - emailContent = fullMessage.body; - } + const messages = selectedMessageList.messages; + if (window.debugLogger) { + window.debugLogger.info('[AutoSort+]', `Starting batch analysis of ${messages.length} selected messages`); + } - console.log("Extracted email content:", emailContent || ""); + await showNotification( + 'AutoSort+ Batch', + `Starting AI analysis of ${messages.length} email${messages.length > 1 ? 's' : ''}...` + ); - if (!emailContent) { - console.error("No readable content found in message"); - await showNotification("AutoSort+ Error", "Could not extract email content"); - continue; - } + // Hand off to the batch engine (runs async, does not block the event listener) + batchAnalyzeEmails(messages).catch(err => { + console.error('[AutoSort+] Batch analysis failed:', err); + _releaseBatchLock(); + }); - console.log("Analyzing message content"); - const label = await analyzeEmailContent(emailContent); - - if (label) { - console.log("Applying label:", label); - await applyLabelsToMessages([message], label); - await showNotification("AutoSort+", `Successfully applied label: ${label}`); - } else { - console.log("No label generated from analysis"); - await showNotification("AutoSort+ Error", "Could not generate label from analysis"); - } - } } catch (error) { - console.error("Error during AI analysis:", error); - await showNotification("AutoSort+ Error", `Error: ${error.message}`); + _releaseBatchLock(); + console.error('Error starting batch analysis:', error); + await showNotification('AutoSort+ Error', `Error: ${error.message}`); } } }); \ No newline at end of file diff --git a/content.js b/content.js index 6238dc9..1291ebb 100644 --- a/content.js +++ b/content.js @@ -1,3 +1,36 @@ +// Debug logging helper for content script context +const debugLog = { + enabled: false, + + async init() { + try { + const result = await browser.storage.local.get('debugMode'); + this.enabled = !!result.debugMode; + } catch (e) {} + + // Listen for changes + browser.storage.onChanged.addListener((changes, area) => { + if (area === 'local' && changes.debugMode !== undefined) { + this.enabled = !!changes.debugMode.newValue; + } + }); + }, + + info(message, data = null) { + if (this.enabled) { + console.info('%c[Content]', 'color: white; background: #00BCD4; padding: 2px 6px; border-radius: 4px;', message, data !== null ? data : ''); + } + }, + + error(message, data = null) { + // Always output errors + console.error('%c[Content]', 'color: white; background: #F44336; padding: 2px 6px; border-radius: 4px;', message, data !== null ? data : ''); + } +}; + +// Initialize debug mode +debugLog.init(); + // Listen for messages from the background script browser.runtime.onMessage.addListener((message, sender, sendResponse) => { if (message.action === "getSelectedMessages") { @@ -13,7 +46,7 @@ browser.runtime.onMessage.addListener((message, sender, sendResponse) => { // Get selected rows const selectedRows = messageList.querySelectorAll('tr.selected'); if (!selectedRows || selectedRows.length === 0) { - console.log("No messages selected"); + debugLog.info("No messages selected"); sendResponse([]); return true; } @@ -26,7 +59,7 @@ browser.runtime.onMessage.addListener((message, sender, sendResponse) => { row.getAttribute('id'); if (!messageId) { - console.warn("Row missing message ID:", row); + debugLog.info("Row missing message ID:", row); return null; } @@ -35,12 +68,77 @@ browser.runtime.onMessage.addListener((message, sender, sendResponse) => { return { id: cleanId }; }).filter(msg => msg !== null); - console.log("Found selected messages:", selectedMessages); + debugLog.info("Found selected messages:", selectedMessages); sendResponse(selectedMessages); } catch (error) { console.error("Error getting selected messages:", error); sendResponse([]); } + } else if (message.action === 'ollamaFetch') { + // Runs inside a tab at http://localhost:11434 to avoid CORS + (async () => { + try { + const { fetchAction, model, prompt, headers, correlationId } = message; + const base = window.location.origin; + + if (fetchAction === 'pull') { + const res = await fetch(`${base}/api/pull`, { + method: 'POST', + headers: Object.assign({ 'Content-Type': 'application/json' }, headers || {}), + body: JSON.stringify({ name: model, stream: true }) + }); + if (!res.ok) { + const t = await res.text(); + let errorMsg = t || `HTTP ${res.status}`; + try { + const j = JSON.parse(t); + if (j.error) errorMsg = j.error; + } catch (parseErr) { + // Not JSON, use raw text + } + throw new Error(errorMsg); + } + const reader = res.body.getReader(); + const decoder = new TextDecoder(); + let buffer = ''; + while (true) { + const { done, value } = await reader.read(); + if (done) break; + buffer += decoder.decode(value, { stream: true }); + const lines = buffer.split('\n'); + buffer = lines.pop(); + for (const line of lines) { + if (!line.trim()) continue; + try { + const data = JSON.parse(line); + const payload = { action: 'ollamaPullProgress', correlationId, status: data.status || '' }; + if (data.completed && data.total) { + payload.percent = Math.round((data.completed / data.total) * 100); + } + browser.runtime.sendMessage(payload).catch(() => {}); + } catch (e) { + // ignore parse errors for partial lines + } + } + } + browser.runtime.sendMessage({ action: 'ollamaPullComplete', correlationId, ok: true }).catch(() => {}); + sendResponse({ ok: true }); + } else if (fetchAction === 'chat') { + const res = await fetch(`${base}/api/chat`, { + method: 'POST', + headers: Object.assign({ 'Content-Type': 'application/json' }, headers || {}), + body: JSON.stringify({ model, messages: [{ role: 'user', content: prompt }], stream: false }) + }); + const data = await res.json(); + sendResponse({ ok: true, data }); + } else { + sendResponse({ ok: false, error: 'unknown fetchAction' }); + } + } catch (err) { + sendResponse({ ok: false, error: err.message || String(err) }); + } + })(); + return true; } return true; }); \ No newline at end of file diff --git a/docs/_config.yml b/docs/_config.yml index 2ffbf31..26e1170 100644 --- a/docs/_config.yml +++ b/docs/_config.yml @@ -1,22 +1,67 @@ +# GitHub Pages Configuration remote_theme: pages-themes/cayman@v0.2.0 plugins: -- jekyll-remote-theme + - jekyll-remote-theme + - jekyll-seo-tag + - jekyll-sitemap -title: AutoSort+ -description: Fully customizable AI-powered email organization for Thunderbird. Create your own folder structure and let the AI adapt to your organizational system. -show_downloads: true -google_analytics: +# Site Settings +title: "AutoSort+ | AI-Powered Email Organization" +description: "Multi-provider AI email organization for Thunderbird. Smart rate limiting, usage tracking, and support for Gemini, OpenAI, Claude, Groq, Mistral, and local Ollama deployments." +author: Nigel +url: "https://nigel1992.github.io" +baseurl: "/AutoSort-Plus" +repository: Nigel1992/AutoSort-Plus + +# Theme Settings theme: jekyll-theme-cayman +show_downloads: true + +# SEO +lang: en-US +logo: /AutoSort-Plus/icon-48.png +twitter: + card: summary + username: +social: + name: AutoSort+ + links: + - https://github.com/Nigel1992/AutoSort-Plus + +# Display Settings +markdown: kramdown +kramdown: + input: GFM + syntax_highlighter: rouge + syntax_highlighter_opts: + css_class: 'highlight' + +# Features +show_excerpts: true +date_format: "%B %-d, %Y" + +# Analytics (optional - add your tracking ID) +google_analytics: + +# Build Settings +exclude: + - Gemfile + - Gemfile.lock + - node_modules + - vendor + - .gitignore + - README.md + +# Collections (for future docs expansion) +collections: + docs: + output: true + permalink: /:collection/:path/ -# Navigation -nav: - - title: Installation - url: /installation - - title: Your Custom Setup - url: /custom-setup - - title: Usage Guide - url: /usage - - title: FAQ - url: /faq - - title: Contributing - url: /contributing \ No newline at end of file +defaults: + - scope: + path: "" + type: "docs" + values: + layout: "default" + author: "Nigel" \ No newline at end of file diff --git a/docs/index.md b/docs/index.md index efcc516..e39ae93 100644 --- a/docs/index.md +++ b/docs/index.md @@ -1,64 +1,436 @@ -# AutoSort+ for Thunderbird - -AutoSort+ is an AI-powered email organization addon for Thunderbird that automatically sorts your emails into your own custom folders and categories using Google's Gemini AI. - -## Latest Release (v1.2.0) - -### What's New -- **Improved History Management**: Fixed move history storage for better reliability -- **Streamlined Interface**: Removed notification system for a cleaner experience -- **Performance Improvements**: Enhanced overall stability and responsiveness -- **Move History Tracking**: View detailed history of all email moves in settings -- **Enhanced Settings Page**: Improved layout and functionality of the settings interface - -## Key Features - -- **Fully Customizable Organization**: Use your own folder structure and categories - the AI adapts to your organizational system -- **AI-Powered Classification**: Leverages Google's Gemini AI to understand email content and context -- **Smart Folder Organization**: Automatically moves emails to the appropriate folders based on your custom categories -- **Bulk Processing**: Process multiple emails at once to save time -- **Move History**: Track all email moves with detailed information including: - - Timestamp of move - - Email subject - - Destination folder - - Move status - - Up to 100 most recent moves stored - -## Quick Start - -1. Install the addon from the [latest release](https://github.com/Nigel1992/AutoSort-Plus/releases) -2. Configure your Google API key in the addon settings -3. Create your desired folder structure in Thunderbird -4. Select emails you want to organize -5. Right-click and choose "AutoSort+ Analyze with AI" -6. The addon will automatically sort emails into your folders based on content - -## Configuration - -1. Set up your Google API key in the addon preferences -2. Set up your preferred folder structure in Thunderbird -3. The addon will learn and adapt to your organizational system -4. Fine-tune settings through the addon preferences -5. View move history in the settings page: - - Access through Add-ons Manager > AutoSort+ > Options - - See timestamps, subjects, and destinations of moved emails - - Track success/failure status of moves - - Last 100 moves are preserved - -## Support - -Having issues? Check out our: -- [Troubleshooting Guide](docs/troubleshooting.md) -- [FAQ](docs/faq.md) -- [GitHub Issues](https://github.com/Nigel1992/AutoSort-Plus/issues) - -## Contributing - -We welcome contributions! Please check our [Contributing Guidelines](CONTRIBUTING.md) for details on: -- Reporting bugs -- Suggesting features -- Submitting pull requests - -## License - -This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details. \ No newline at end of file +--- +layout: default +title: AutoSort+ - AI-Powered Email Organization for Thunderbird +--- + + + +
+ +# 🎯 AutoSort+ for Thunderbird + +

AI-powered email organization that adapts to your workflow. Choose from cloud providers (Gemini, OpenAI, Anthropic, Groq, Mistral) or run a local Ollama model and let AutoSort+ automatically move emails to the right folders.

+ +
+Version +Thunderbird +License +
+ + + +

Available as a manual install (.xpi). See the Documentation below for installation and usage instructions.

+ +
+ +
+ +--- + +## 🌟 What is AutoSort+? + +AutoSort+ transforms your email workflow by automatically organizing messages into your custom folder structure using cutting-edge AI. Unlike rigid rule-based systems, AutoSort+ understands context, learns your preferences, and adapts to your unique organizational needs. + +### ✨ Why Choose AutoSort+? + +| Feature | Traditional Filters | AutoSort+ | +|---------|---------------------|------------| +| **Setup Time** | Hours of rule configuration | Minutes with AI | +| **Flexibility** | Static rules, breaks easily | Adaptive AI, learns patterns | +| **Context Understanding** | Basic keyword matching | Full content comprehension | +| **Multi-Provider** | N/A | 5 cloud providers + local Ollama support | +| **Smart Limits** | N/A | Built-in rate limit management | +| **History Tracking** | Manual logging | Automatic 100-move history | + +--- + +## 🎉 Latest Release: v1.2.3.3 + +
+ +### 🚀 Release v1.2.3.3 — January 28, 2026 + +**Summary:** Fixed manual label application from the context menu (Right-click → AutoSort+ → AutoSort Label → pick a label). The background script now handles selection and labeling reliably across Thunderbird views. + +#### 🛠️ Notable Fix +- ✅ **Manual Labeling Fix:** Replaced content-script dependency with `mailTabs` API handling in background script to avoid "Could not establish connection. Receiving end does not exist." errors. + +
+ +--- + +## 🎯 Key Features + +### 🤖 Multi-Provider AI Support + +Choose the best AI provider for your needs: + +| Provider | Model | Free Tier | Speed | Best For | +|----------|-------|-----------|-------|----------| +| **Gemini** | gemini-2.5-flash | 20/day/key | ⚡⚡⚡ | General use, fast processing | +| **OpenAI** | gpt-4o-mini | - | ⚡⚡ | Premium quality | +| **Claude** | claude-3-haiku | 1000/day | ⚡⚡⚡ | Long emails, nuanced content | +| **Groq** | llama-3.3-70b | Generous | ⚡⚡⚡⚡ | Ultra-fast, free | +| **Mistral** | mistral-small | Free tier | ⚡⚡⚡ | European privacy focus | +| **Ollama** | local LLM (llama3.2, phi, tinyllama) | Local (no external usage) | ⚡⚡ - ⚡⚡⚡ | Run models locally for privacy and offline use; supports model download and CPU-only mode | + +### 📊 Smart Rate Limit Management (Gemini) + +- **Automatic Enforcement**: 5 requests/minute, 20/day +- **Real-Time Tracking**: See usage in settings dashboard +- **Smart Warnings**: Alerts at 15/20 limit +- **Multi-Key Support**: Switch keys when limit reached +- **Paid Plan Bypass**: Disable limits with paid plan checkbox + +### 📁 Flexible Folder Management + +- **IMAP Discovery**: Auto-load your existing folder structure +- **Bulk Import**: Paste lists of labels +- **Custom Categories**: Create unlimited folder categories +- **Auto-Create**: Missing folders created automatically +- **Smart Navigation**: Recursive folder traversal + +### 📜 Move History & Tracking + +- **Last 100 Moves**: Full audit trail +- **Timestamps**: Precise move timing +- **Status Tracking**: Success/failure indicators +- **Subject Lines**: Quick identification +- **Destination Folders**: See where emails went +- **Clear History**: Fresh start anytime + +--- + +## 🚀 Quick Start Guide + +### 1️⃣ Installation + +**Option 1: Direct Download** +```bash +# Download the latest XPI from releases +wget https://github.com/Nigel1992/AutoSort-Plus/releases/latest/download/autosortplus.xpi +``` + +**Option 2: Build from Source** +```bash +git clone https://github.com/Nigel1992/AutoSort-Plus.git +cd AutoSort-Plus +# Install in Thunderbird: Tools → Add-ons → Install Add-on From File +``` + +### 2️⃣ Get Your API Key + +Choose your preferred AI provider: + +- **Gemini** (Free): [Get API Key](https://aistudio.google.com/app/apikey) - 20 requests/day per key +- **OpenAI** (Paid): [Get API Key](https://platform.openai.com/api-keys) +- **Anthropic** (Free/Paid): [Get API Key](https://console.anthropic.com/) - 1000/day free +- **Groq** (Free): [Get API Key](https://console.groq.com/keys) - Generous limits +- **Mistral** (Free/Paid): [Get API Key](https://console.mistral.ai/) +- **Ollama (Local)**: No API key required — [Install Ollama](https://ollama.ai/download) and pull a model (e.g., `ollama pull llama3.2`). See the Ollama setup guide in the docs for details. + +### 3️⃣ Configure AutoSort+ + +1. Open Thunderbird → **Tools → Add-ons** +2. Find **AutoSort+** → Click **Options** +3. **Select AI Provider** and paste your API key +4. Click **"Test API Connection"** ✅ +5. **Load folders** from IMAP or add custom labels +6. Save settings and you're ready! + +### 4️⃣ Start Organizing + +You have two options: + +**Option 1: AI-Powered Sorting** +- Select emails → Right-click → **AutoSort+ → Analyze with AI** +- The AI will analyze and move emails to the best folder/category. + +**Option 2: Manual Labeling** +- Select emails → Right-click → **AutoSort+ → AutoSort Label → [Pick any label]** +- The selected label/category will be applied instantly to all selected emails. +
If you add or change labels in the settings menu, you must restart Thunderbird for the new labels to appear in the right-click menu.
+ +--- + +## 📖 Usage Guides + +### Managing Gemini Rate Limits + +If using Gemini's free tier: + +1. **Monitor Usage**: Check settings for real-time count (X/20) +2. **Watch Warnings**: Yellow alert at 15, red at 20 +3. **Create More Keys**: Generate multiple API keys in different projects +4. **Switch Keys**: Paste new key when limit reached, click Reset Counter +5. **Upgrade**: Enable "Gemini paid plan" if you have one + +### Creating Multiple Gemini Keys + +``` +1. Go to Google AI Studio: https://aistudio.google.com/ +2. Create a new project +3. Generate API key for that project +4. Each project = new 20/day limit +5. Switch keys in AutoSort+ settings as needed +``` + +### Setting Up Custom Folders + +**Method 1: IMAP Discovery** +- Click "Load Folders from IMAP" +- Select your account +- All folders appear automatically + +**Method 2: Bulk Import** +``` +Work +Personal +Finance +Projects +Family +``` +- Paste list (one per line) → Click Import + +**Method 3: Manual Entry** +- Type label name → Click "Add Label" → See green checkmark + +--- + +## 🔒 Privacy & Security + +| Aspect | Details | +|--------|----------| +| **Email Storage** | ❌ Never stored, analyzed in memory only | +| **API Keys** | 🔐 OS-level encryption via browser storage | +| **Data Transmission** | ✅ Direct to your chosen AI provider | +| **Telemetry** | ❌ None - zero tracking | +| **Open Source** | ✅ Full transparency, audit anytime | +| **Third Parties** | ❌ No intermediary servers | + +**Your privacy is paramount.** All analysis happens directly between Thunderbird and your chosen AI provider. We don't have servers because we don't want your data! + +--- + +## 🛠️ Advanced Configuration + +### Provider-Specific Settings + +**Gemini Users:** +- Enable "Paid Plan" checkbox to bypass rate limits +- Use Reset Counter when switching API keys +- Monitor daily reset time in usage panel + +**Ollama (Local) Users:** +- Install Ollama: https://ollama.ai/download +- Pull a model: `ollama pull llama3.2` (or `tinyllama`, `phi`, `gemma`) +- Set the Ollama URL and model in AutoSort+ settings (no API key required) +- Use CPU-only mode in settings to avoid GPU usage if necessary +- If Ollama returns 403 or connection errors, check `OLLAMA_403_DEBUG.md` in the repo + +**All Providers:** +- Test connection before first use +- Save settings after changes +- Check move history for troubleshooting + +### Folder Organization Tips + +- **Keep categories broad**: "Work", "Personal", "Finance" +- **Avoid special characters**: Use alphanumeric names +- **Case-sensitive matching**: Labels must match exactly +- **Use examples**: More context = better AI understanding + +--- + +## ⚠️ Troubleshooting + +### API Key Issues + +**Problem**: "API Key Not Configured" error + +**Solution**: +1. Verify key is from correct provider +2. No spaces before/after key +3. Click "Test API Connection" +4. Check provider's usage dashboard for validity + +### Rate Limit Errors + +**Problem**: "Rate limit exceeded" for Gemini + +**Solution**: +1. Check usage counter in settings (X/20) +2. Wait for daily reset (time shown in settings) +3. Create new API key in different project +4. Switch key and click "Reset Counter" +5. Or enable "Paid Plan" if applicable + +### Settings Page Won't Load + +**Solution**: +```bash +1. Thunderbird → Settings → Privacy → Cookies and Site Data +2. Click "Clear Data" +3. Tools → Add-ons → AutoSort+ → Reload +``` + +### Emails Not Moving + +**Check**: +- ✓ API key is valid (test it) +- ✓ Labels are saved (green checkmark) +- ✓ Folders exist (or auto-create enabled) +- ✓ Internet connection active +- ✓ No rate limit reached + +--- + +## 📊 System Architecture + +``` +┌─────────────────────────────────────────┐ +│ Thunderbird Email Client │ +└──────────────┬──────────────────────────┘ + │ +┌──────────────▼──────────────────────────┐ +│ AutoSort+ Extension │ +│ │ +│ ┌──────────┐ ┌──────────┐ │ +│ │ UI Layer │ │ Background│ │ +│ │(options) │◄─┤ Script │ │ +│ └──────────┘ └─────┬─────┘ │ +│ │ │ +│ ┌───────▼────────┐ │ +│ │ Rate Limiter │ │ +│ │ (Gemini only) │ │ +│ └───────┬────────┘ │ +└──────────────────────┼─────────────────┘ + │ + ┌─────────────┴─────────────┐ + │ │ + ┌────▼────┐ ┌────────┐ ┌─────▼─────┐ + │ Gemini │ │ Groq │ │ Claude │ + │ API │ │ API │ │ API │ + └─────────┘ └────────┘ └───────────┘ +``` + +--- + +## 🤝 Support & Community + +
+ +| 💡 Have Questions? | 🐛 Found a Bug? | ✨ Feature Ideas? | +|-------------------|-----------------|-------------------| +| [Discussions](https://github.com/Nigel1992/AutoSort-Plus/discussions) | [Issues](https://github.com/Nigel1992/AutoSort-Plus/issues) | [Feature Requests](https://github.com/Nigel1992/AutoSort-Plus/issues) | + +
+ +**Before reporting an issue:** +1. Check troubleshooting section above +2. Search existing issues +3. Include: Thunderbird version, AutoSort+ version, AI provider, error message + +--- + +## 🙏 Contributing + +We ❤️ contributions! Here's how to help: + +### Ways to Contribute + +- 🐛 **Report bugs** with detailed reproduction steps +- 💡 **Suggest features** that would improve your workflow +- 📖 **Improve docs** with clearer explanations +- 🧪 **Test releases** with different providers +- 💻 **Submit code** via pull requests + +### Development Setup + +```bash +# Clone repository +git clone https://github.com/Nigel1992/AutoSort-Plus.git +cd AutoSort-Plus + +# Make changes +# Test in Thunderbird: Tools → Add-ons → Debug Add-ons → Load Temporary Add-on + +# Submit PR +git checkout -b feature/amazing-feature +git commit -m "Add amazing feature" +git push origin feature/amazing-feature +``` + +--- + +## 📄 License + +**MIT License** - Free to use, modify, and distribute. + +See [LICENSE](https://github.com/Nigel1992/AutoSort-Plus/blob/main/LICENSE) for full text. + +--- + +## 🎨 Credits + +**Icon Design:** [Fantasyou - Flaticon](https://www.flaticon.com/free-icons/email-filtering) + +**AI Providers:** +- [Google Gemini](https://ai.google.dev/) +- [OpenAI](https://openai.com/) +- [Anthropic](https://www.anthropic.com/) +- [Groq](https://groq.com/) +- [Mistral AI](https://mistral.ai/) + +**Built with:** +- [Thunderbird WebExtension APIs](https://webextension-api.thunderbird.net/) +- JavaScript ES6+ +- Love ❤️ + +--- + +
+ +## ⭐ Star History + +[![Star History Chart](https://api.star-history.com/svg?repos=Nigel1992/AutoSort-Plus&type=Date)](https://star-history.com/#Nigel1992/AutoSort-Plus&Date) + +--- + +**Made with ❤️ to help you organize email faster** + +[⬆ Back to Top](#-autosort-for-thunderbird) • [GitHub](https://github.com/Nigel1992/AutoSort-Plus) • [Latest Release](https://github.com/Nigel1992/AutoSort-Plus/releases) + +--- + +![Thunderbird](https://img.shields.io/badge/Thunderbird-78.0+-0A84FF?style=flat-square&logo=thunderbird&logoColor=white) +![License](https://img.shields.io/badge/License-MIT-green?style=flat-square) +![Version](https://img.shields.io/badge/Version-1.2.3.3-blue?style=flat-square) + +
\ No newline at end of file diff --git a/docs/superpowers/specs/2026-04-23-i18n-support-design.md b/docs/superpowers/specs/2026-04-23-i18n-support-design.md new file mode 100644 index 0000000..c5ab153 --- /dev/null +++ b/docs/superpowers/specs/2026-04-23-i18n-support-design.md @@ -0,0 +1,47 @@ +# i18n Support Design + +## Overview + +Add English and Chinese (zh-CN) localization to AutoSort+ Thunderbird extension using Thunderbird's built-in `_locales/` system. + +## Architecture + +### Locale Files +- `_locales/en/messages.json` — English (default) +- `_locales/zh_CN/messages.json` — Simplified Chinese + +### Manifest +- Added `"default_locale": "en"` to enable Thunderbird i18n +- Manifest strings (description, default_title) use `__MSG_key__` syntax + +### HTML Translation +- All user-facing text in `options.html` uses `data-i18n="key"` attributes for text content +- `data-i18n-placeholder="key"` for input placeholders +- `data-i18n-title="key"` for title attributes +- Translation applied at page load via `applyTranslations()` helper + +### JavaScript Translation Helper +- `js/i18n.js` — lightweight wrapper around `browser.i18n.getMessage()` +- `i18n.get(key)` — returns localized string, falls back to key if missing +- `applyTranslations()` — scans DOM for `data-i18n*` attributes and replaces text + +### Dynamic Strings +- Provider info, test results, status messages use `i18n.get('key', 'fallback')` inline +- Console.log messages left untranslated (developer-facing) + +## Scope + +**Translated**: HTML labels, buttons, headers, placeholders, status messages, provider names, help text +**Not Translated**: console.log output, internal error messages, code identifiers + +## Language Detection + +Thunderbird auto-detects browser locale. No manual language switch in UI. Users change language via Thunderbird settings. + +## Files Changed +- `_locales/en/messages.json` (new) +- `_locales/zh_CN/messages.json` (new) +- `js/i18n.js` (new) +- `manifest.json` (added default_locale, MSG placeholders) +- `options.html` (added data-i18n attributes, included i18n.js) +- `options.js` (added applyTranslations() call, i18n.get() for dynamic strings) diff --git a/js/i18n.js b/js/i18n.js new file mode 100644 index 0000000..a578c3f --- /dev/null +++ b/js/i18n.js @@ -0,0 +1,119 @@ +/** + * Lightweight i18n helper for Thunderbird extensions. + * Uses browser.i18n.getMessage() for localized strings with manual {key} substitution. + * browser.i18n.getMessage only supports $1/$2 positional syntax — our messages use {key}. + */ +const i18n = { + /** Placeholder name → array-index mapping for every key with placeholders. + * Generated from messages.json placeholder definitions. */ + _placeholders: { + andMore: ['count'], + apiError: ['error'], + availableModels: ['models'], + batchCancelledChunk: ['current', 'total', 'done', 'totalItems'], + batchCancelledSimple: ['done', 'totalItems'], + batchDone: ['completed', 'skipped', 'failed'], + batchPausedChunk: ['current', 'total', 'done', 'totalItems'], + batchPausedSimple: ['done', 'totalItems'], + batchRunningChunk: ['current', 'total', 'done', 'totalItems', 'completed', 'failed'], + batchRunningSimple: ['done', 'totalItems', 'completed', 'failed'], + connectedModelReady: ['model', 'available'], + connectedSuccessfully: ['model', 'url'], + connectionError: ['error'], + customConnectionFailed: ['error'], + customPromptEmailLabel: ['body'], + customPromptTip: ['subject', 'attachments'], + diagnosticsApiUrl: ['url'], + downloadFailed: ['error'], + errorLoadingFolders: ['error'], + errorSavingSettings: ['error'], + failedFetchModels: ['error'], + failedStart: ['error'], + folderFoundText: ['count'], + foundModelsMsg: ['count'], + geminiDailyCount: ['count'], + genericErrorLabel: ['error'], + hoursAgo: ['count', 'plural'], + hoursAgoShort: ['count'], + importedFoldersMsg: ['count'], + inHours: ['count', 'plural'], + inHoursShort: ['count'], + keyLabel: ['number'], + loadedFoldersMsg: ['count'], + minutesAgo: ['count', 'plural'], + minutesAgoShort: ['count'], + modelNotInstalled: ['model', 'available'], + ollamaConnectionFailed: ['error'], + ollamaConnectionFailedSimple: ['error'], + ollamaCurlTest: ['url'], + ollamaErrorLabel: ['error'], + pleaseConfigure: ['items'], + pleaseVisit: ['url'], + removeApiKeyConfirm: ['number'], + replaceExistingConfirm: ['existing', 'new'], + replaceFoldersConfirm: ['count'], + settingsSavedOllama: ['cpuMode'], + startingDownload: ['model'], + testFailed: ['status'], + troubleshootTest: ['url'], + urlCopied: ['url'], + }, + + /** + * Get a localized string by message key. + * @param {string} key - message key + * @param {Object|Array} [substitutions] - named {key:value} or positional array + */ + get(key, substitutions) { + try { + // Get raw message template (no substitution — we handle {key} ourselves) + let msg = browser.i18n.getMessage(key); + if (!msg) return key; + + // Convert array → object using known placeholder mapping + if (Array.isArray(substitutions)) { + const names = this._placeholders[key]; + if (names) { + substitutions = Object.fromEntries( + names.map((name, i) => [name, substitutions[i]]) + ); + } + } + + // Replace {key} → value + if (substitutions && typeof substitutions === 'object') { + msg = msg.replace(/\{(\w+)\}/g, (_, k) => substitutions[k] ?? `{${k}}`); + } + + return msg; + } catch (e) { + return key; + } + } +}; + +/** + * Translate all elements with data-i18n attributes on page load. + * - data-i18n="key" → sets textContent + * - data-i18n-placeholder="key" → sets placeholder + * - data-i18n-title="key" → sets title + */ +function applyTranslations() { + // Translate text content + document.querySelectorAll('[data-i18n]').forEach(el => { + const key = el.getAttribute('data-i18n'); + el.textContent = i18n.get(key); + }); + + // Translate placeholders + document.querySelectorAll('[data-i18n-placeholder]').forEach(el => { + const key = el.getAttribute('data-i18n-placeholder'); + el.placeholder = i18n.get(key); + }); + + // Translate titles + document.querySelectorAll('[data-i18n-title]').forEach(el => { + const key = el.getAttribute('data-i18n-title'); + el.title = i18n.get(key); + }); +} diff --git a/js/logger.js b/js/logger.js new file mode 100644 index 0000000..45f1d56 --- /dev/null +++ b/js/logger.js @@ -0,0 +1,140 @@ +/** + * DebugLogger - Centralized debug logging for AutoSort+ + * Uses browser.storage.local for cross-context synchronization + */ + +class DebugLogger { + constructor() { + this.enabled = false; + this.isReady = false; + this.queue = []; + this.listenForChanges(); + } + + async init() { + try { + const result = await browser.storage.local.get('debugMode'); + this.enabled = !!result.debugMode; + this.isReady = true; + this.flushQueue(); + } catch (e) { + this.isReady = true; + } + } + + listenForChanges() { + if (typeof browser !== 'undefined' && browser.storage && browser.storage.onChanged) { + browser.storage.onChanged.addListener((changes, area) => { + if (area === 'local' && changes.debugMode !== undefined) { + this.enabled = !!changes.debugMode.newValue; + } + }); + } + } + + async enable() { + this.enabled = true; + if (typeof browser === 'undefined' || !browser.storage) return; + try { + await browser.storage.local.set({ debugMode: true }); + } catch (e) {} + } + + async disable() { + this.enabled = false; + if (typeof browser === 'undefined' || !browser.storage) return; + try { + await browser.storage.local.set({ debugMode: false }); + } catch (e) {} + } + + flushQueue() { + if (this.enabled && this.queue.length > 0) { + this.queue.forEach(log => { + const style = this.getTagStyle(log.tag); + console[log.type](`%c${log.tag}`, style, log.message, log.data || ''); + }); + } + this.queue = []; + } + + getTagStyle(tag) { + if (tag.includes('Error') || tag.includes('error')) { + return 'color: white; background: #F44336; padding: 2px 6px; border-radius: 4px;'; + } + if (tag.includes('API')) { + return 'color: white; background: #9C27B0; padding: 2px 6px; border-radius: 4px;'; + } + if (tag.includes('RateLimit') || tag.includes('warn') || tag.includes('Warning')) { + return 'color: #333; background: #FFC107; padding: 2px 6px; border-radius: 4px;'; + } + if (tag.includes('Folder')) { + return 'color: white; background: #009688; padding: 2px 6px; border-radius: 4px;'; + } + return 'color: white; background: #2196F3; padding: 2px 6px; border-radius: 4px;'; + } + + enqueueOrLog(type, tag, message, data) { + if (!this.isReady) { + this.queue.push({ type, tag, message, data }); + return; + } + if (this.enabled) { + const style = this.getTagStyle(tag); + if (data !== null && data !== undefined) { + console[type](`%c${tag}`, style, message, data); + } else { + console[type](`%c${tag}`, style, message); + } + } + } + + info(tag, message, data = null) { + this.enqueueOrLog('info', tag, message, data); + } + + warn(tag, message, data = null) { + this.enqueueOrLog('warn', tag, message, data); + } + + error(tag, message, data = null) { + if (!this.isReady) { + this.queue.push({ type: 'error', tag, message, data }); + return; + } + const style = 'color: white; background: #F44336; padding: 2px 6px; border-radius: 4px;'; + console.error(`%c${tag}`, style, message, data !== null && data !== undefined ? data : ''); + } + + apiRequest(provider, url, requestBody) { + if (!this.isReady) return; // Skip queue - API logs are immediate-only + if (this.enabled) { + console.groupCollapsed(`%c[API: ${provider}] Request`, 'color: #9C27B0; font-weight: bold;'); + console.log('URL:', url); + console.log('Request Body:', requestBody); + console.groupEnd(); + } + } + + apiResponse(provider, status, data) { + if (!this.isReady) return; // Skip queue - API logs are immediate-only + if (this.enabled) { + const isSuccess = status >= 200 && status < 300; + const color = isSuccess ? '#4CAF50' : '#F44336'; + const icon = isSuccess ? '✅' : '❌'; + console.groupCollapsed(`%c[API: ${provider}] ${icon} Response (${status})`, `color: ${color}; font-weight: bold;`); + console.log('Response Data:', data); + console.groupEnd(); + } + } + + log(tag, message, data = null) { + this.info(tag, message, data); + } +} + +const logger = new DebugLogger(); + +if (typeof window !== 'undefined') { + window.debugLogger = logger; +} \ No newline at end of file diff --git a/js/ollama.js b/js/ollama.js new file mode 100644 index 0000000..1175a5f --- /dev/null +++ b/js/ollama.js @@ -0,0 +1,93 @@ +/* + * Ollama API Client + * Adapted from ThunderAI extension + * Handles communication with local Ollama instance + */ + +export class Ollama { + host = ''; + model = ''; + stream = false; + num_ctx = 0; + authToken = ''; + + constructor({ + host = '', + model = '', + stream = false, + num_ctx = 0, + authToken = '', + } = {}) { + this.host = (host || '').trim().replace(/\/+$/, ""); + this.model = model; + this.stream = stream; + this.num_ctx = num_ctx; + this.authToken = authToken || ''; + } + + getHeaders = () => { + const headers = { + "Content-Type": "application/json" + }; + if (this.authToken) { + headers['Authorization'] = `Bearer ${this.authToken}`; + } + return headers; + } + + fetchModels = async () => { + try{ + const response = await fetch(this.host + "/api/tags", { + method: "GET", + headers: this.getHeaders(), + }); + + if (!response.ok) { + const errorDetail = await response.text(); + let err_msg = "[AutoSort+] Ollama API request failed: " + response.status + " " + response.statusText + ", Detail: " + errorDetail; + console.error(err_msg); + let output = {}; + output.ok = false; + output.error = errorDetail; + return output; + } + + let output = {}; + output.ok = true; + let output_response = await response.json(); + output.response = output_response; + + return output; + }catch (error) { + console.error("[AutoSort+] Ollama API request failed: " + error); + let output = {}; + output.is_exception = true; + output.ok = false; + output.error = "Ollama API request failed: " + error; + return output; + } + } + + fetchResponse = async (messages) => { + try { + const response = await fetch(this.host + "/api/chat", { + method: "POST", + headers: this.getHeaders(), + body: JSON.stringify({ + model: this.model, + messages: messages, + stream: this.stream, + ...(this.num_ctx > 0 ? { options: { num_ctx: parseInt(this.num_ctx) } } : {}), + }), + }); + return response; + }catch (error) { + console.error("[AutoSort+] Ollama API request failed: " + error); + let output = {}; + output.is_exception = true; + output.ok = false; + output.error = "Ollama API request failed: " + error; + return output; + } + } +} diff --git a/js/providers-config.js b/js/providers-config.js new file mode 100644 index 0000000..e62b925 --- /dev/null +++ b/js/providers-config.js @@ -0,0 +1,103 @@ +/** + * Provider Registry - Centralized configuration for all AI providers + */ + +const PROVIDERS = { + GEMINI: 'gemini', + OPENAI: 'openai', + ANTHROPIC: 'anthropic', + GROQ: 'groq', + MISTRAL: 'mistral', + OLLAMA: 'ollama', + OPENAI_COMPATIBLE: 'openai-compatible' +}; + +const PROVIDER_CONFIG = { + [PROVIDERS.GEMINI]: { + name: 'Google Gemini', + signupUrl: 'https://aistudio.google.com/app/apikey', + isFree: true, + endpoint: 'https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-flash:generateContent', + requiresAuth: 'query', + batchConfig: { concurrency: 1, delayMs: 0 } + }, + + [PROVIDERS.OPENAI]: { + name: 'OpenAI (ChatGPT)', + signupUrl: 'https://platform.openai.com/api-keys', + isFree: false, + endpoint: 'https://api.openai.com/v1/chat/completions', + requiresAuth: 'header', + batchConfig: { concurrency: 3, delayMs: 500 } + }, + + [PROVIDERS.ANTHROPIC]: { + name: 'Anthropic Claude', + signupUrl: 'https://console.anthropic.com/', + isFree: false, + endpoint: 'https://api.anthropic.com/v1/messages', + requiresAuth: 'header', + batchConfig: { concurrency: 2, delayMs: 500 } + }, + + [PROVIDERS.GROQ]: { + name: 'Groq (Fast & Free)', + signupUrl: 'https://console.groq.com/keys', + isFree: true, + endpoint: 'https://api.groq.com/openai/v1/chat/completions', + requiresAuth: 'header', + batchConfig: { concurrency: 5, delayMs: 200 } + }, + + [PROVIDERS.MISTRAL]: { + name: 'Mistral AI', + signupUrl: 'https://console.mistral.ai/', + isFree: false, + endpoint: 'https://api.mistral.ai/v1/chat/completions', + requiresAuth: 'header', + batchConfig: { concurrency: 2, delayMs: 500 } + }, + + [PROVIDERS.OLLAMA]: { + name: 'Ollama (Local)', + signupUrl: null, + isFree: true, + endpoint: null, + requiresAuth: 'optional', + batchConfig: { concurrency: 1, delayMs: 0 }, + isLocal: true + }, + + [PROVIDERS.OPENAI_COMPATIBLE]: { + name: 'OpenAI-Compatible', + signupUrl: null, + isFree: true, + endpoint: null, + requiresAuth: 'optional', + batchConfig: { concurrency: 2, delayMs: 500 }, + isLocal: true + } +}; + +function getProviderBatchConfig(provider) { + return PROVIDER_CONFIG[provider]?.batchConfig || { concurrency: 1, delayMs: 0 }; +} + +function isValidProvider(provider) { + return Object.values(PROVIDERS).includes(provider); +} + +function getProviderInfo(provider) { + return PROVIDER_CONFIG[provider] || null; +} + +// Export for use in other contexts +if (typeof window !== 'undefined') { + window.providersConfig = { + PROVIDERS, + PROVIDER_CONFIG, + getProviderBatchConfig, + isValidProvider, + getProviderInfo + }; +} \ No newline at end of file diff --git a/js/tab-fetch-utils.js b/js/tab-fetch-utils.js new file mode 100644 index 0000000..52b170e --- /dev/null +++ b/js/tab-fetch-utils.js @@ -0,0 +1,127 @@ +/** + * Tab Fetch Utility - Execute fetch in browser context via tab injection + * Used for localhost endpoints that background scripts can't access directly + */ + +/** + * Execute a fetch request via hidden tab injection + * @param {string} baseUrl - The base URL to open (e.g., "http://localhost:11434") + * @param {object} options - Fetch options + * @param {string} options.endpoint - API endpoint path (e.g., "/api/chat") + * @param {object} options.body - Request body (will be JSON.stringify'd) + * @param {object} options.headers - Request headers + * @param {number} options.timeoutMs - Timeout in milliseconds (default 30000) + * @param {string} options.resultKey - Window key for result (default "__tab_fetch_result") + * @returns {Promise} - The response data + */ +async function fetchViaTab(baseUrl, options = {}) { + const { + endpoint = '', + body = {}, + headers = {}, + timeoutMs = 30000, + resultKey = '__tab_fetch_result' + } = options; + + const tab = await browser.tabs.create({ url: baseUrl, active: false }); + + try { + await new Promise(resolve => setTimeout(resolve, 500)); + const headersJson = JSON.stringify({ 'Content-Type': 'application/json', ...headers }); + const bodyJson = JSON.stringify(body); + + const scriptCode = ` +(async () => { + try { + const headers = ${headersJson}; + const response = await fetch(window.location.origin + ${JSON.stringify(endpoint)}, { + method: 'POST', + headers, + body: ${bodyJson} + }); + + if (!response.ok) { + throw new Error('HTTP ' + response.status + ': ' + response.statusText); + } + + const data = await response.json(); + window.${resultKey} = { ok: true, data }; + } catch (error) { + window.${resultKey} = { ok: false, error: error.message }; + } +})(); +`; + + await browser.tabs.executeScript(tab.id, { code: scriptCode }); + + const pollIntervalMs = 250; + const maxIterations = Math.ceil(timeoutMs / pollIntervalMs); + let result = null; + + for (let i = 0; i < maxIterations; i++) { + await new Promise(resolve => setTimeout(resolve, pollIntervalMs)); + + try { + const results = await browser.tabs.executeScript(tab.id, { + code: `window.${resultKey} || null` + }); + if (results?.[0]) { + result = results[0]; + break; + } + } catch (e) { + break; + } + } + + if (!result) { + throw new Error(`Tab fetch timed out (${timeoutMs}ms)`); + } + + if (!result.ok) { + throw new Error(result.error || 'Unknown error'); + } + + return result.data; + + } finally { + try { + await browser.tabs.remove(tab.id); + } catch (e) { console.warn('[TabFetch] Failed to close tab:', e.message); } + } +} + +/** + * Helper for Ollama chat via tab + */ +async function ollamaChatViaTabUtil(ollamaUrl, model, prompt, authToken) { + const headers = authToken ? { 'Authorization': `Bearer ${authToken}` } : {}; + return fetchViaTab(ollamaUrl, { + endpoint: '/api/chat', + body: { model, messages: [{ role: 'user', content: prompt }], stream: false }, + headers, + resultKey: '__ollama_result' + }); +} + +/** + * Helper for OpenAI-compatible chat via tab + */ +async function openaiCompatChatViaTabUtil(baseUrl, model, prompt, apiKey) { + const headers = apiKey ? { 'Authorization': `Bearer ${apiKey}` } : {}; + return fetchViaTab(baseUrl, { + endpoint: '/v1/chat/completions', + body: { model, messages: [{ role: 'user', content: prompt }], max_tokens: 8192, stream: false }, + headers, + resultKey: '__openai_compat_result' + }); +} + +// Export for use in other contexts +if (typeof window !== 'undefined') { + window.tabFetchUtils = { + fetchViaTab, + ollamaChatViaTabUtil, + openaiCompatChatViaTabUtil + }; +} \ No newline at end of file diff --git a/js/workers/ollama-worker.js b/js/workers/ollama-worker.js new file mode 100644 index 0000000..acfa911 --- /dev/null +++ b/js/workers/ollama-worker.js @@ -0,0 +1,129 @@ +/* + * Ollama Web Worker for AutoSort+ + * Handles streaming chat responses from local Ollama instance + * Adapted from ThunderAI extension + */ + +import { Ollama } from '../ollama.js'; + +let ollama_host = null; +let ollama_model = ''; +let ollama_num_ctx = 0; +let ollama_auth_token = ''; +let ollama = null; +let stopStreaming = false; +let conversationHistory = []; +let assistantResponseAccumulator = ''; + +self.onmessage = async function(event) { + switch (event.data.type) { + case 'init': + ollama_host = event.data.ollama_host; + ollama_model = event.data.ollama_model; + ollama_num_ctx = event.data.ollama_num_ctx; + ollama_auth_token = event.data.ollama_auth_token || ''; + ollama = new Ollama({ + host: ollama_host, + model: ollama_model, + stream: true, + num_ctx: ollama_num_ctx, + authToken: ollama_auth_token + }); + console.log("[Ollama Worker] Initialized with host: " + ollama_host + ", model: " + ollama_model); + break; // init + + case 'chatMessage': + conversationHistory.push({ role: 'user', content: event.data.message }); + console.log("[Ollama Worker] Chat message received: " + event.data.message); + + const response = await ollama.fetchResponse(conversationHistory); + postMessage({ type: 'messageSent' }); + + if (!response.ok) { + let error_message = ''; + if(response.is_exception === true){ + error_message = response.error; + }else{ + try{ + const errorJSON = await response.json(); + error_message = errorJSON.error?.message || response.statusText; + }catch(e){ + error_message = response.statusText; + } + } + console.error("[Ollama Worker] API Error: " + error_message); + postMessage({ type: 'error', payload: "Ollama API Error: " + response.status + " " + error_message }); + break; + } + + const reader = response.body.getReader(); + const decoder = new TextDecoder("utf-8"); + let buffer = ''; + + try { + while (true) { + if (stopStreaming) { + stopStreaming = false; + reader.cancel(); + conversationHistory.push({ role: 'assistant', content: assistantResponseAccumulator }); + assistantResponseAccumulator = ''; + postMessage({ type: 'tokensDone' }); + break; + } + + const { done, value } = await reader.read(); + if (done) { + conversationHistory.push({ role: 'assistant', content: assistantResponseAccumulator }); + assistantResponseAccumulator = ''; + postMessage({ type: 'tokensDone' }); + break; + } + + const chunk = decoder.decode(value); + buffer += chunk; + const lines = buffer.split("\n"); + buffer = lines.pop(); + + let parsedLines = []; + try{ + parsedLines = lines + .map((line) => line.trim()) + .filter((line) => line !== "") + .map((line) => { + try { + return JSON.parse(line); + } catch (e) { + // Non-JSON lines (heartbeats, etc.) are expected in streaming — log at debug level + console.debug("[Ollama Worker] Non-JSON stream line skipped: " + (line || '').substring(0, 80)); + return null; + } + }) + .filter((parsed) => parsed !== null); + }catch(e){ + console.error("[Ollama Worker] Error parsing lines: " + e); + } + + for (const parsedLine of parsedLines) { + const { message } = parsedLine; + const { content } = message; + + if (content) { + assistantResponseAccumulator += content; + postMessage({ type: 'newToken', payload: { token: content } }); + } + } + } + } catch (error) { + console.error('[Ollama Worker] Stream error: ' + error); + postMessage({ type: 'error', payload: "Connection error: " + error.message }); + } + break; + + case 'stop': + stopStreaming = true; + break; + + default: + console.error('[Ollama Worker] Unknown message type:', event.data.type); + } +}; diff --git a/manifest.json b/manifest.json index 4316ff1..91df72b 100644 --- a/manifest.json +++ b/manifest.json @@ -1,8 +1,8 @@ { "manifest_version": 2, "name": "AutoSort+", - "version": "1.2.0", - "description": "Automatically sort and label your emails with custom rules using AI", + "version": "1.2.3.3", + "description": "__MSG_extensionDescription__", "author": "Nigel Hagen", "applications": { "gecko": { @@ -10,6 +10,7 @@ "strict_min_version": "78.0" } }, + "default_locale": "en", "permissions": [ "messagesRead", "messagesModify", @@ -18,16 +19,17 @@ "menus", "tabs", "messagesMove", - "messagesRead", "activeTab", - "https://generativelanguage.googleapis.com/*" + "https://generativelanguage.googleapis.com/*", + "http://localhost/*", + "http://127.0.0.1/*" ], "background": { - "scripts": ["background.js"] + "scripts": ["js/logger.js", "background.js"] }, "content_scripts": [ { - "matches": ["*://*/*"], + "matches": ["http://localhost/*", "http://127.0.0.1/*"], "js": ["content.js"] } ], @@ -35,8 +37,18 @@ "page": "options.html", "open_in_tab": true }, + "browser_action": { + "default_icon": "icons/icon-48.png", + "default_title": "__MSG_extensionDefaultTitle__" + }, "icons": { "48": "icons/icon-48.png", "96": "icons/icon-96.png" - } + }, + "web_accessible_resources": [ + "api_ollama/index.html", + "api_ollama/ollama-popup.js", + "js/ollama.js", + "js/workers/ollama-worker.js" + ] } \ No newline at end of file diff --git a/memory/MEMORY.md b/memory/MEMORY.md new file mode 100644 index 0000000..51c9b29 --- /dev/null +++ b/memory/MEMORY.md @@ -0,0 +1,3 @@ +# AutoSort+ Memory Index + +- [Project Overview](project_autosortplus_overview.md) — Architecture, providers, storage keys, i18n coverage diff --git a/memory/project_autosortplus_overview.md b/memory/project_autosortplus_overview.md new file mode 100644 index 0000000..13def5a --- /dev/null +++ b/memory/project_autosortplus_overview.md @@ -0,0 +1,45 @@ +--- +name: AutoSort+ project overview +description: Complete architecture and feature overview of the AutoSort+ Thunderbird extension +type: project +--- + +AutoSort+ is a Thunderbird WebExtension (Manifest V2, min v78.0) for AI-powered email sorting. Version 1.2.3.3. + +## Core files +- `manifest.json` - MV2 extension config, permissions (messagesRead/Modify/Move, accountsRead, storage, menus, tabs, activeTab), host permissions for Google API + localhost +- `background.js` - Main engine: email analysis, batch processing, Gemini rate limiting, auto-sort listener, context menu, folder operations (~2000 lines) +- `options.js` - Settings UI: provider config, API key management, Gemini multi-key, Ollama/OpenAI-compatible settings, batch progress panel, folder import, move history (~1800 lines) +- `options.html` - Settings page with collapsible sections +- `content.js` - Content script for localhost tab injection (Ollama CORS workaround, ~144 lines) +- `styles.css` - UI styling with batch processing animations + +## JS modules +- `js/i18n.js` - Lightweight i18n helper using browser.i18n.getMessage() +- `js/logger.js` - DebugLogger class with cross-context sync via storage +- `js/ollama.js` - Ollama API client class +- `js/providers-config.js` - Centralized provider registry with batch configs +- `js/tab-fetch-utils.js` - Tab injection fetch utility for localhost endpoints +- `js/workers/ollama-worker.js` - Web Worker for Ollama streaming + +## AI providers (7 total) +1. **Gemini** - gemini-2.5-flash, free 20/day per key, multi-key rotation +2. **OpenAI** - gpt-4o-mini, paid +3. **Anthropic** - claude-3-haiku-20240307 +4. **Groq** - llama-3.3-70b-versatile, free 30/min +5. **Mistral** - mistral-small-latest +6. **Ollama** - local, any model, tab-injection for CORS +7. **OpenAI-Compatible** - custom endpoint, tab-injection for localhost + +## Key architecture patterns +- **Tab injection**: Thunderbird background scripts can't fetch localhost directly. Workaround: open hidden tab, inject JS via executeScript, poll window for result, close tab +- **Batch engine**: Chunk-based processing with per-provider concurrency limits, pause/resume/cancel via shared _batchState +- **Gemini rate limiting**: Mutex-chained storage operations, per-key tracking, 12s min interval, 20/day limit, auto-rotation across keys +- **Auto-sort**: Listens to browser.messages.onNewMailReceived, processes Inbox with concurrency-limited parallel processor, handles pagination via continueList +- **Folder caching**: Builds Map keyed by "accountId:folderName" to avoid N+1 recursive searches + +## Storage keys +apiKey, aiProvider, labels, enableAi, geminiPaidPlan, geminiApiKeys, currentGeminiKeyIndex, geminiRateLimits, geminiRateLimit (legacy), debugMode, batchChunkSize, autoSortEnabled, customPrompt, ollamaUrl, ollamaModel, ollamaCustomModel, ollamaAuthToken, ollamaCpuOnly, ollamaNumCtx, customBaseUrl, customModel, moveHistory, currentBatch + +## i18n +Full coverage in English (en) and Simplified Chinese (zh_CN). Uses data-i18n attributes on HTML elements. diff --git a/options.html b/options.html index 0d96648..de74720 100644 --- a/options.html +++ b/options.html @@ -2,98 +2,308 @@ - AutoSort+ Settings + AutoSort+ Settings -
-

AutoSort+ Settings

- -
-

AI Settings

- -
- - + + + + + + + + +
+
+ +
+ + + - -
- -
- - + + + - -
- - + + +
+

🔑 API Key Configuration

+
+ + +
+ + +
+
+
- -
-

AutoSort+ uses AI to analyze your emails and automatically sort them into categories/folders based on their content. The AI will:

-
    -
  • Read and understand email content
  • -
  • Identify key topics and themes
  • -
  • Match emails to appropriate categories/folders
  • -
  • Learn from your manual corrections to improve accuracy
  • -
+ + + + + + + + +
+

⚙️ General Settings

+
+ + +
+
+ + + Open Thunderbird Developer Tools (Ctrl+Shift+I) to view logs +
+
+ + + Process N emails at once, wait for all responses, then continue (1-20) +
+
+ + + Automatically classify and move new Inbox emails using AI +
+
+ + + + + +
+

ℹ️ How AI Sorting Works

+
+

AutoSort+ uses AI to analyze your emails and automatically sort them into categories/folders based on their content. The AI will:

+
    +
  • Read and understand email content
  • +
  • Identify key topics and themes
  • +
  • Match emails to appropriate categories/folders
  • +
  • Learn from your manual corrections to improve accuracy
  • +
+
+
+
+
-
-

Custom Categories/Folders

+ + + + +
+
+

📁 Custom Categories/Folders

+ +
+
+
-

Folder Source

- - +

Folder Source

+ +
+
- - - + + +
+
- +
- +
+
-
-

Move History

+ + -
- -
+ +
+
- + +
+ + + - \ No newline at end of file + \ No newline at end of file diff --git a/options.js b/options.js index e0789d0..d782205 100644 --- a/options.js +++ b/options.js @@ -1,4 +1,47 @@ +/** Escape HTML special characters to prevent XSS in innerHTML assignments. */ +function escapeHtml(str) { + if (!str) return ''; + return String(str) + .replace(/&/g, '&') + .replace(//g, '>') + .replace(/"/g, '"') + .replace(/'/g, '''); +} + document.addEventListener('DOMContentLoaded', async function() { + // Apply i18n translations first + if (typeof applyTranslations === 'function') { + applyTranslations(); + } + + if (window.debugLogger) { + window.debugLogger.init(); + } + + const sectionHeaders = document.querySelectorAll('.section-header'); + sectionHeaders.forEach(header => { + header.addEventListener('click', function() { + const sectionId = this.getAttribute('data-section'); + const content = document.getElementById(sectionId); + const section = this.parentElement; + const icon = this.querySelector('.collapse-icon'); + + if (section.classList.contains('collapsed')) { + section.classList.remove('collapsed'); + content.style.display = 'block'; + icon.textContent = '▼'; + setTimeout(() => { + content.style.animation = 'slideDown 0.3s ease-out'; + }, 0); + } else { + section.classList.add('collapsed'); + content.style.display = 'none'; + icon.textContent = '▶'; + } + }); + }); + const labelsContainer = document.getElementById('labels-container'); const addLabelButton = document.getElementById('add-label'); const saveButton = document.getElementById('save-settings'); @@ -8,6 +51,8 @@ document.addEventListener('DOMContentLoaded', async function() { const getApiKeyButton = document.getElementById('get-api-key'); const testApiButton = document.getElementById('test-api'); const apiTestResult = document.getElementById('api-test-result'); + const geminiPaidContainer = document.getElementById('gemini-paid-container'); + const geminiPaidCheckbox = document.getElementById('gemini-paid-plan'); const importLabelsButton = document.getElementById('import-labels'); const bulkImportTextarea = document.getElementById('bulk-import-text'); const loadImapFoldersButton = document.getElementById('load-imap-folders'); @@ -17,101 +62,561 @@ document.addEventListener('DOMContentLoaded', async function() { const folderCount = document.getElementById('folder-count'); const useImapFoldersButton = document.getElementById('use-imap-folders'); const useCustomFoldersButton = document.getElementById('use-custom-folders'); + const geminiMultiKeysContainer = document.getElementById('gemini-multi-keys-container'); + const geminiKeysList = document.getElementById('gemini-keys-list'); + const addGeminiKeyButton = document.getElementById('add-gemini-key'); + + // Ollama-specific elements + const ollamaModelSelect = document.getElementById('ollama-model'); + const ollamaCustomModelInput = document.getElementById('ollama-custom-model'); + const ollamaUrlInput = document.getElementById('ollama-url'); + const ollamaAuthTokenInput = document.getElementById('ollama-auth-token'); + const ollamaCpuOnlyCheckbox = document.getElementById('ollama-cpu-only'); + const testOllamaButton = document.getElementById('test-ollama'); + const listOllamaModelsButton = document.getElementById('list-ollama-models'); + const downloadOllamaModelButton = document.getElementById('download-ollama-model'); + const ollamaDownloadModelInput = document.getElementById('ollama-download-model'); + const ollamaDownloadStatus = document.getElementById('ollama-download-status'); + const ollamaTestResult = document.getElementById('ollama-test-result'); + const diagnoseOllamaButton = document.getElementById('diagnose-ollama'); + const ollamaDiagnostics = document.getElementById('ollama-diagnostics'); + + // OpenAI-Compatible elements + const customBaseUrlInput = document.getElementById('custom-base-url'); + const customModelSelect = document.getElementById('custom-model-select'); + const customModelCustomInput = document.getElementById('custom-model-custom'); + const customApiKeyInput = document.getElementById('custom-api-key'); + const fetchCustomModelsButton = document.getElementById('fetch-custom-models'); + const testCustomEndpointButton = document.getElementById('test-custom-endpoint'); + const customTestResult = document.getElementById('custom-test-result'); + + // Debug mode element + const enableDebugCheckbox = document.getElementById('enable-debug'); + + if (ollamaUrlInput) { + ollamaUrlInput.addEventListener('input', () => { + const url = ollamaUrlInput.value.trim() || 'http://localhost:11434'; + const chatEndpoint = document.getElementById('ollama-chat-endpoint'); + const pullEndpoint = document.getElementById('ollama-pull-endpoint'); + const tagsEndpoint = document.getElementById('ollama-tags-endpoint'); + + if (chatEndpoint) chatEndpoint.textContent = `${url}/api/chat`; + if (pullEndpoint) pullEndpoint.textContent = `${url}/api/pull`; + if (tagsEndpoint) tagsEndpoint.textContent = `${url}/api/tags`; + + updateSaveButtonState(); + }); + } let loadedFolders = []; + let geminiKeys = []; // Array to store multiple Gemini API keys // AI Provider configurations const aiProviders = { gemini: { - name: 'Google Gemini', + name: i18n.get('providerGemini'), signupUrl: 'https://aistudio.google.com/app/apikey', - info: '✓ Free tier: 15 requests/minute, 1500/day
✓ Best for: General use, multilingual support
✓ Models: Gemini 2.5 Flash', + info: i18n.get('providerInfoGemini'), isFree: true }, openai: { - name: 'OpenAI', + name: i18n.get('providerOpenAI'), signupUrl: 'https://platform.openai.com/signup', - info: '✓ Free trial: $5 credit
✓ Best for: High accuracy, English content
✓ Models: GPT-4o-mini ($0.15/1M tokens)', + info: i18n.get('providerInfoOpenai'), isFree: false }, anthropic: { - name: 'Anthropic Claude', + name: i18n.get('providerAnthropic'), signupUrl: 'https://console.anthropic.com/', - info: '✓ Free tier: Limited requests
✓ Best for: Long emails, detailed analysis
✓ Models: Claude 3 Haiku', + info: i18n.get('providerInfoAnthropic'), isFree: true }, groq: { - name: 'Groq', + name: i18n.get('providerGroq'), signupUrl: 'https://console.groq.com/', - info: '✓ Free tier: 30 requests/minute
✓ Best for: Speed (fastest)
✓ Models: Llama 3.3 (Mixtral deprecated)', + info: i18n.get('providerInfoGroq'), isFree: true }, mistral: { - name: 'Mistral AI', + name: i18n.get('providerMistral'), signupUrl: 'https://console.mistral.ai/', - info: '✓ Free tier: Limited requests
✓ Best for: European users, GDPR compliance
✓ Models: Mistral Small', + info: i18n.get('providerInfoMistral'), + isFree: true + }, + ollama: { + name: i18n.get('providerOllama'), + signupUrl: 'https://ollama.ai/', + info: i18n.get('providerInfoOllama'), + isFree: true + }, + 'openai-compatible': { + name: i18n.get('providerOpenAICompatible'), + signupUrl: '', + info: i18n.get('providerInfoOpenaiCompatible'), isFree: true } }; - // Update provider info when selection changes function updateProviderInfo() { const provider = aiProviderSelect.value; const config = aiProviders[provider]; + + const ollamaSubsection = document.getElementById('ollama-settings-subsection'); + const apiKeySubsection = document.getElementById('api-key-subsection'); + const geminiMultiKeysSubsection = document.getElementById('gemini-multi-keys-subsection'); + const geminiUsageSubsection = document.getElementById('gemini-usage-subsection'); + const rateLimitWarning = document.getElementById('rate-limit-warning'); + const openaiCompatibleSubsection = document.getElementById('openai-compatible-settings-subsection'); + + // Show/hide provider-specific UI elements + if (provider === 'gemini') { + geminiPaidContainer.style.display = 'block'; + if (geminiMultiKeysSubsection) geminiMultiKeysSubsection.style.display = 'block'; + if (geminiUsageSubsection) geminiUsageSubsection.style.display = 'block'; + if (apiKeySubsection) apiKeySubsection.style.display = 'none'; + if (ollamaSubsection) ollamaSubsection.style.display = 'none'; + if (openaiCompatibleSubsection) openaiCompatibleSubsection.style.display = 'none'; + updateGeminiUsageDisplay(); + } else if (provider === 'ollama') { + geminiPaidContainer.style.display = 'none'; + if (geminiMultiKeysSubsection) geminiMultiKeysSubsection.style.display = 'none'; + if (geminiUsageSubsection) geminiUsageSubsection.style.display = 'none'; + if (apiKeySubsection) apiKeySubsection.style.display = 'none'; + if (ollamaSubsection) ollamaSubsection.style.display = 'block'; + if (openaiCompatibleSubsection) openaiCompatibleSubsection.style.display = 'none'; + } else if (provider === 'openai-compatible') { + geminiPaidContainer.style.display = 'none'; + if (geminiMultiKeysSubsection) geminiMultiKeysSubsection.style.display = 'none'; + if (geminiUsageSubsection) geminiUsageSubsection.style.display = 'none'; + if (apiKeySubsection) apiKeySubsection.style.display = 'none'; + if (ollamaSubsection) ollamaSubsection.style.display = 'none'; + if (openaiCompatibleSubsection) openaiCompatibleSubsection.style.display = 'block'; + } else { + geminiPaidContainer.style.display = 'none'; + if (geminiMultiKeysSubsection) geminiMultiKeysSubsection.style.display = 'none'; + if (geminiUsageSubsection) geminiUsageSubsection.style.display = 'none'; + if (apiKeySubsection) apiKeySubsection.style.display = 'block'; + if (ollamaSubsection) ollamaSubsection.style.display = 'none'; + if (openaiCompatibleSubsection) openaiCompatibleSubsection.style.display = 'none'; + } providerInfo.innerHTML = `
- ${config.name} ${config.isFree ? 'FREE' : ''} + ${config.name} ${config.isFree ? '' + i18n.get('freeBadge') + '' : ''}

${config.info}

`; + + if (provider !== 'ollama' && provider !== 'openai-compatible') { + apiKeyInput.placeholder = i18n.get('apiKeyPlaceholder'); + } + + updateSaveButtonState(); + } + + async function updateGeminiUsageDisplay() { + const data = await browser.storage.local.get(['geminiRateLimits', 'currentGeminiKeyIndex', 'geminiApiKeys', 'geminiRateLimit']); + const currentIndex = data.currentGeminiKeyIndex || 0; + const keys = data.geminiApiKeys || geminiKeys; - apiKeyInput.placeholder = `Enter your ${config.name} API key`; + if (keys.length > 1) { + // Multi-key mode + document.getElementById('single-key-usage').style.display = 'none'; + document.getElementById('multi-key-usage').style.display = 'block'; + const rateLimits = data.geminiRateLimits || []; + updateMultiKeyUsageDisplay(keys, rateLimits, currentIndex); + } else if (keys.length === 1) { + // Single-key mode but stored in new format + document.getElementById('single-key-usage').style.display = 'block'; + document.getElementById('multi-key-usage').style.display = 'none'; + const rateLimits = data.geminiRateLimits || [{ requests: [], dailyCount: 0, dailyResetTime: Date.now() }]; + updateSingleKeyUsageDisplay(rateLimits[0]); + } else { + // Legacy single-key mode (backward compatibility) + document.getElementById('single-key-usage').style.display = 'block'; + document.getElementById('multi-key-usage').style.display = 'none'; + const rateLimit = data.geminiRateLimit || { requests: [], dailyCount: 0, dailyResetTime: Date.now() }; + updateSingleKeyUsageDisplay(rateLimit); + } } - // Initialize provider info + // Backward compatibility for single key mode + async function updateSingleKeyUsageDisplay(rateLimit) { + const now = Date.now(); + + document.getElementById('gemini-daily-count').textContent = rateLimit.dailyCount; + + if (rateLimit.requests && rateLimit.requests.length > 0) { + const lastRequest = Math.max(...rateLimit.requests); + const minutesAgo = Math.floor((now - lastRequest) / 60000); + if (minutesAgo < 1) { + document.getElementById('gemini-last-request').textContent = i18n.get('geminiNever'); + } else if (minutesAgo < 60) { + document.getElementById('gemini-last-request').textContent = i18n.get('minutesAgo', [minutesAgo, minutesAgo > 1 ? 's' : '']); + } else { + const hoursAgo = Math.floor(minutesAgo / 60); + document.getElementById('gemini-last-request').textContent = i18n.get('hoursAgo', [hoursAgo, hoursAgo > 1 ? 's' : '']); + } + } else { + document.getElementById('gemini-last-request').textContent = i18n.get('geminiNever'); + } + + if (rateLimit.dailyResetTime > now) { + const hoursUntil = Math.ceil((rateLimit.dailyResetTime - now) / (1000 * 60 * 60)); + document.getElementById('gemini-reset-time').textContent = i18n.get('inHours', [hoursUntil, hoursUntil > 1 ? 's' : '']); + } else { + document.getElementById('gemini-reset-time').textContent = i18n.get('geminiResetExpired'); + } + + const usageMessage = document.getElementById('usage-message'); + const statusSpan = document.getElementById('gemini-status'); + + if (rateLimit.dailyCount >= 20) { + statusSpan.textContent = '🔴 ' + i18n.get('geminiStatusLimitReached'); + statusSpan.style.color = '#dc3545'; + usageMessage.className = 'usage-message warning'; + usageMessage.textContent = '⚠️ ' + i18n.get('geminiLimitMessage'); + } else if (rateLimit.dailyCount >= 15) { + statusSpan.textContent = '🟡 ' + i18n.get('geminiStatusNearlyFull'); + statusSpan.style.color = '#ffc107'; + usageMessage.className = 'usage-message warning'; + usageMessage.textContent = `⚠️ ${i18n.get('geminiRemainingMessage')} ${20 - rateLimit.dailyCount} ${i18n.get('requestsRemainingToday')}`; + } else { + statusSpan.textContent = '🟢 ' + i18n.get('geminiStatusReady'); + statusSpan.style.color = '#28a745'; + usageMessage.style.display = 'none'; + } + } + + function updateMultiKeyUsageDisplay(keys, rateLimits, currentIndex) { + const container = document.getElementById('all-keys-usage-stats'); + const now = Date.now(); + container.innerHTML = ''; + + keys.forEach((key, index) => { + const rateLimit = rateLimits[index] || { requests: [], dailyCount: 0, dailyResetTime: now }; + const isActive = index === currentIndex; + + const card = document.createElement('div'); + card.className = `key-usage-card${isActive ? ' active' : ''}`; + + let statusBadge = ''; + if (isActive) { + statusBadge = `${i18n.get('keyActive')}`; + } else if (rateLimit.dailyCount >= 20) { + statusBadge = `${i18n.get('keyLimit')}`; + } else if (rateLimit.dailyCount >= 15) { + statusBadge = `${i18n.get('keyNearLimit')}`; + } else { + statusBadge = `${i18n.get('keyReady')}`; + } + + let resetText = '--'; + if (rateLimit.dailyResetTime > now) { + const hoursUntil = Math.ceil((rateLimit.dailyResetTime - now) / (1000 * 60 * 60)); + resetText = i18n.get('inHoursShort', [hoursUntil]); + } + + let lastRequestText = i18n.get('geminiNever'); + if (rateLimit.requests && rateLimit.requests.length > 0) { + const lastRequest = Math.max(...rateLimit.requests); + const minutesAgo = Math.floor((now - lastRequest) / 60000); + if (minutesAgo < 1) { + lastRequestText = i18n.get('justNow'); + } else if (minutesAgo < 60) { + lastRequestText = i18n.get('minutesAgoShort', [minutesAgo]); + } else { + lastRequestText = i18n.get('hoursAgoShort', [Math.floor(minutesAgo / 60)]); + } + } + + const maskedKey = key ? `...${key.slice(-8)}` : i18n.get('keyNotSet'); + + card.innerHTML = ` +
+ ${i18n.get('keyLabel', [index + 1])} ${maskedKey} + ${statusBadge} +
+
+
+ ${i18n.get('statUsage')} + ${rateLimit.dailyCount}/20 +
+
+ ${i18n.get('statLast')} + ${lastRequestText} +
+
+ ${i18n.get('statResets')} + ${resetText} +
+
+ ${i18n.get('statAvailable')} + ${20 - rateLimit.dailyCount} +
+
+ `; + + container.appendChild(card); + }); + } + + function addGeminiKeyInput(value = '', index = -1) { + if (index === -1) { + index = geminiKeys.length; + geminiKeys.push(value); + } + + const keyItem = document.createElement('div'); + keyItem.className = 'gemini-key-item'; + keyItem.dataset.index = index; + + const keyIndex = document.createElement('span'); + keyIndex.className = 'key-index'; + keyIndex.textContent = `#${index + 1}`; + + const input = document.createElement('input'); + input.type = 'password'; + input.className = 'gemini-api-key-input'; + input.placeholder = i18n.get('geminiKeyInputPlaceholder'); + input.value = value; + input.dataset.index = index; + input.addEventListener('input', (e) => { + const newKey = e.target.value.trim(); + geminiKeys[index] = newKey; + + if (newKey) { + const isDuplicate = geminiKeys.some((key, i) => i !== index && key.trim() === newKey); + if (isDuplicate) { + input.style.borderColor = '#dc3545'; + input.title = i18n.get('keyAlreadyAddedTitle'); + } else { + input.style.borderColor = ''; + input.title = ''; + } + } else { + input.style.borderColor = ''; + input.title = ''; + } + + updateSaveButtonState(); + }); + + const testButton = document.createElement('button'); + testButton.className = 'button'; + testButton.textContent = i18n.get('testButton'); + testButton.addEventListener('click', () => { + const keyValue = input.value.trim(); + if (!keyValue) { + statusSpan.textContent = i18n.get('enterKeyFirst'); + statusSpan.className = 'key-test-result error'; + return; + } + + // Check for duplicates before testing + const isDuplicate = geminiKeys.some((key, i) => i !== index && key.trim() === keyValue); + if (isDuplicate) { + statusSpan.textContent = i18n.get('duplicateKey'); + statusSpan.className = 'key-test-result error'; + statusSpan.title = i18n.get('duplicateKeyTitle'); + return; + } + + testGeminiKey(keyValue, index, keyItem); + }); + + const removeButton = document.createElement('button'); + removeButton.className = 'button'; + removeButton.textContent = '×'; + removeButton.addEventListener('click', () => removeGeminiKey(index)); + + const statusSpan = document.createElement('span'); + statusSpan.className = 'key-test-result'; + statusSpan.dataset.index = index; + + keyItem.appendChild(keyIndex); + keyItem.appendChild(input); + keyItem.appendChild(testButton); + keyItem.appendChild(removeButton); + keyItem.appendChild(statusSpan); + geminiKeysList.appendChild(keyItem); + } + + function removeGeminiKey(index) { + if (geminiKeys.length <= 1) { + alert(i18n.get('mustHaveOneKey')); + return; + } + + if (confirm(i18n.get('removeApiKeyConfirm', [index + 1]))) { + geminiKeys.splice(index, 1); + refreshGeminiKeysList(); + } + } + + function refreshGeminiKeysList() { + geminiKeysList.innerHTML = ''; + geminiKeys.forEach((key, index) => { + addGeminiKeyInput(key, index); + }); + } + + async function testGeminiKey(apiKey, index, keyItemElement) { + const statusSpan = keyItemElement.querySelector('.key-test-result'); + + if (!apiKey) { + statusSpan.textContent = i18n.get('enterKeyFirst'); + statusSpan.className = 'key-test-result error'; + return; + } + + try { + statusSpan.textContent = i18n.get('testingStatus'); + statusSpan.className = 'key-test-result testing'; + + const response = await fetch(`https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-flash:generateContent?key=${apiKey}`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json' + }, + body: JSON.stringify({ + contents: [{ parts: [{ text: "Test" }] }], + generationConfig: { maxOutputTokens: 10 } + }) + }); + + if (response.ok) { + statusSpan.textContent = i18n.get('validKey'); + statusSpan.className = 'key-test-result success'; + } else if (response.status === 429) { + statusSpan.textContent = i18n.get('limitReachedGemini'); + statusSpan.className = 'key-test-result error'; + statusSpan.title = i18n.get('limitReachedGeminiTitle'); + console.error(`Key #${index + 1} has reached rate limit (429)`); + } else if (response.status === 401 || response.status === 403) { + statusSpan.textContent = i18n.get('invalidKey'); + statusSpan.className = 'key-test-result error'; + statusSpan.title = i18n.get('invalidKeyTitle'); + console.error(`Key #${index + 1} test failed: ${response.status}`); + } else { + statusSpan.textContent = i18n.get('testFailed', [response.status]); + statusSpan.className = 'key-test-result error'; + console.error(`Key #${index + 1} test failed:`, response.status); + } + } catch (error) { + statusSpan.textContent = i18n.get('errorStatus'); + statusSpan.className = 'key-test-result error'; + console.error(`Key #${index + 1} test error:`, error); + } + } + updateProviderInfo(); aiProviderSelect.addEventListener('change', updateProviderInfo); + + addGeminiKeyButton.addEventListener('click', () => { + addGeminiKeyInput(''); + }); + + document.getElementById('reset-gemini-counter').addEventListener('click', async () => { + if (confirm(i18n.get('resetCounterConfirm'))) { + await browser.storage.local.set({ + geminiRateLimit: { + requests: [], + dailyCount: 0, + dailyResetTime: Date.now() + (24 * 60 * 60 * 1000) + } + }); + await updateGeminiUsageDisplay(); + const usageMessage = document.getElementById('usage-message'); + usageMessage.className = 'usage-message info'; + usageMessage.textContent = i18n.get('counterResetMsg'); + } + }); + + document.getElementById('refresh-usage').addEventListener('click', async () => { + await updateGeminiUsageDisplay(); + const usageMessage = document.getElementById('usage-message'); + usageMessage.className = 'usage-message info'; + usageMessage.textContent = i18n.get('usageRefreshed'); + setTimeout(() => { + if (usageMessage.classList.contains('info')) { + usageMessage.style.display = 'none'; + } + }, 3000); + }); + + document.getElementById('refresh-all-usage').addEventListener('click', async () => { + await updateGeminiUsageDisplay(); + showMessage(i18n.get('allUsageRefreshed'), true); + }); - // Get API Key button getApiKeyButton.addEventListener('click', async () => { const provider = aiProviderSelect.value; const config = aiProviders[provider]; - + + // Skip if provider has no signup URL (like openai-compatible) + if (!config.signupUrl) { + showMessage(i18n.get('noSignupUrl'), false); + return; + } + try { - // Try to open in new tab await browser.tabs.create({ url: config.signupUrl }); } catch (error) { console.error('Failed to open tab:', error); - // Fallback: show URL and copy to clipboard const url = config.signupUrl; try { await navigator.clipboard.writeText(url); - showMessage(`URL copied to clipboard:\n${url}`, true); + showMessage(i18n.get('urlCopied', [url]), true); } catch (e) { - // Last resort: show alert with URL - alert(`Please visit:\n${url}`); + alert(i18n.get('pleaseVisit', [url])); } } }); - // Function to validate and update save button state function updateSaveButtonState() { const labels = Array.from(document.querySelectorAll('.label-input')) .map(input => input.value.trim()) .filter(label => label !== ''); - const apiKey = apiKeyInput.value.trim(); + const provider = aiProviderSelect.value; + let hasValidApiKey = true; // Default to true, override based on provider + + if (provider === 'gemini') { + const validGeminiKeys = geminiKeys.filter(key => key && key.trim() !== ''); + hasValidApiKey = validGeminiKeys.length > 0; + } else if (provider === 'ollama') { + // Ollama needs URL and model configured + const ollamaUrl = ollamaUrlInput ? ollamaUrlInput.value.trim() : ''; + let ollamaModel = ollamaModelSelect ? ollamaModelSelect.value : ''; + const ollamaCustomModel = ollamaCustomModelInput ? ollamaCustomModelInput.value.trim() : ''; + hasValidApiKey = !!ollamaUrl && (!!ollamaModel || (!!ollamaCustomModel && ollamaModel === 'custom')); + } else if (provider === 'openai-compatible') { + // OpenAI-compatible needs baseUrl and model, not API key + const baseUrl = customBaseUrlInput ? customBaseUrlInput.value.trim() : ''; + const model = customModelSelect ? customModelSelect.value : ''; + const customModel = customModelCustomInput ? customModelCustomInput.value.trim() : ''; + hasValidApiKey = !!baseUrl && (!!model || (!!customModel && model === 'custom')); + } else { + // Non-Ollama providers (OpenAI, Anthropic, Groq, Mistral) require API key + const apiKey = apiKeyInput.value.trim(); + hasValidApiKey = !!apiKey; + } - if (labels.length === 0 || !apiKey) { + if (labels.length === 0 || !hasValidApiKey) { saveButton.disabled = true; saveButton.classList.add('disabled'); - + let missingItems = []; if (labels.length === 0) missingItems.push('folders/labels'); - if (!apiKey) missingItems.push('API key'); - - saveButton.title = `Please configure: ${missingItems.join(' and ')}`; + if (!hasValidApiKey) { + if (provider === 'ollama') missingItems.push('Ollama URL/model'); + else if (provider === 'openai-compatible') missingItems.push('endpoint URL/model'); + else if (provider === 'gemini') missingItems.push('Gemini API key'); + else missingItems.push('API key'); + } + + saveButton.title = i18n.get('pleaseConfigure', [missingItems.join(' and ')]); } else { saveButton.disabled = false; saveButton.classList.remove('disabled'); @@ -119,45 +624,151 @@ document.addEventListener('DOMContentLoaded', async function() { } } - // Load saved settings - browser.storage.local.get(['labels', 'apiKey', 'aiProvider', 'enableAi']).then(result => { + browser.storage.local.get(['labels', 'apiKey', 'geminiApiKeys', 'aiProvider', 'enableAi', 'geminiPaidPlan', 'ollamaUrl', 'ollamaModel', 'ollamaCustomModel', 'ollamaCpuOnly', 'customBaseUrl', 'customModel', 'debugMode', 'batchChunkSize', 'autoSortEnabled', 'customPrompt']).then(result => { + // Migration: default autoSortEnabled to true for users upgrading from older versions + if (result.autoSortEnabled === undefined) { + browser.storage.local.set({ autoSortEnabled: true }).catch(() => {}); + result.autoSortEnabled = true; + } + if (result.labels && result.labels.length > 0) { result.labels.forEach(label => { addLabelInput(label); }); } else { - // Show instruction if no labels - labelsContainer.innerHTML = '
No folders/labels configured. Click "Load Folders from Mail Account" above or add custom labels below.
'; + labelsContainer.innerHTML = '
' + i18n.get('noFoldersInstruction') + '
'; } - if (result.apiKey) { + + if (result.geminiApiKeys && result.geminiApiKeys.length > 0) { + geminiKeys = result.geminiApiKeys; + geminiKeys.forEach((key, index) => { + addGeminiKeyInput(key, index); + }); + } else if (result.apiKey) { + // Migrate from single key to multi-key + geminiKeys = [result.apiKey]; + addGeminiKeyInput(result.apiKey, 0); apiKeyInput.value = result.apiKey; + } else { + // No keys configured yet - add one empty field + addGeminiKeyInput('', 0); + } + + if (result.ollamaUrl && ollamaUrlInput) { + ollamaUrlInput.value = result.ollamaUrl; + } + if (result.ollamaAuthToken && ollamaAuthTokenInput) { + ollamaAuthTokenInput.value = result.ollamaAuthToken; + } + if (result.ollamaModel && ollamaModelSelect) { + ollamaModelSelect.value = result.ollamaModel; + if (result.ollamaModel === 'custom' && result.ollamaCustomModel && ollamaCustomModelInput) { + ollamaCustomModelInput.value = result.ollamaCustomModel; + ollamaCustomModelInput.style.display = 'block'; + } + } + if (ollamaCpuOnlyCheckbox) { + ollamaCpuOnlyCheckbox.checked = result.ollamaCpuOnly === true; + } + + if (result.customBaseUrl && customBaseUrlInput) { + customBaseUrlInput.value = result.customBaseUrl; + } + if (result.customModel) { + const dropdownOptions = customModelSelect ? Array.from(customModelSelect.options).map(o => o.value) : []; + if (dropdownOptions.includes(result.customModel)) { + if (customModelSelect) customModelSelect.value = result.customModel; + } else { + if (customModelSelect) { + customModelSelect.value = 'custom'; + if (customModelCustomInput) { + customModelCustomInput.style.display = 'block'; + customModelCustomInput.value = result.customModel; + } + } + } } + if (result.aiProvider) { aiProviderSelect.value = result.aiProvider; updateProviderInfo(); } // Set enableAi to true by default if not set document.getElementById('enable-ai').checked = result.enableAi !== false; - + + geminiPaidCheckbox.checked = result.geminiPaidPlan === true; + + if (enableDebugCheckbox && result.debugMode !== undefined) { + enableDebugCheckbox.checked = result.debugMode; + } + + const batchChunkSizeInput = document.getElementById('batch-chunk-size'); + if (batchChunkSizeInput && result.batchChunkSize) { + batchChunkSizeInput.value = result.batchChunkSize; + } + + const autoSortCheckbox = document.getElementById('enable-auto-sort'); + if (autoSortCheckbox) { + autoSortCheckbox.checked = result.autoSortEnabled === true; + } + + const customPromptTextarea = document.getElementById('custom-prompt-text'); + if (customPromptTextarea) { + customPromptTextarea.value = result.customPrompt || ''; + } + updateSaveButtonState(); }); - - // Add input listeners for validation + + if (enableDebugCheckbox) { + enableDebugCheckbox.addEventListener('change', async () => { + if (window.debugLogger) { + if (enableDebugCheckbox.checked) { + await window.debugLogger.enable(); + showMessage(i18n.get('debugEnabled'), true); + } else { + await window.debugLogger.disable(); + showMessage(i18n.get('debugDisabled'), true); + } + } + }); + } + + const resetPromptButton = document.getElementById('reset-prompt'); + if (resetPromptButton) { + resetPromptButton.addEventListener('click', () => { + const customPromptTextarea = document.getElementById('custom-prompt-text'); + if (customPromptTextarea) { + customPromptTextarea.value = ''; + showMessage(i18n.get('promptCleared'), true); + } + }); + } + apiKeyInput.addEventListener('input', updateSaveButtonState); labelsContainer.addEventListener('input', updateSaveButtonState); - // Test API connection testApiButton.addEventListener('click', async () => { const apiKey = apiKeyInput.value.trim(); const provider = aiProviderSelect.value; + // Skip for Ollama and OpenAI-Compatible as they have their own test buttons + if (provider === 'ollama') { + showApiTestResult(i18n.get('useOllamaTestButton'), false); + return; + } + if (provider === 'openai-compatible') { + showApiTestResult(i18n.get('useCustomTestButton'), false); + return; + } + if (!apiKey) { - showApiTestResult('Please enter an API key', false); + showApiTestResult(i18n.get('enterApiKey'), false); return; } try { - showApiTestResult('Testing connection...', false); + showApiTestResult(i18n.get('testingConnection'), false); let response; if (provider === 'gemini') { @@ -228,17 +839,16 @@ document.addEventListener('DOMContentLoaded', async function() { } if (response.ok) { - showApiTestResult('✓ API connection successful!', true); + showApiTestResult(i18n.get('apiConnectionSuccess'), true); } else { const error = await response.json(); - showApiTestResult(`API Error: ${error.error?.message || error.message || 'Unknown error'}`, false); + showApiTestResult(i18n.get('apiError', [error.error?.message || error.message || 'Unknown error']), false); } } catch (error) { - showApiTestResult(`Connection Error: ${error.message}`, false); + showApiTestResult(i18n.get('connectionError', [error.message]), false); } }); - // Load IMAP folders loadImapFoldersButton.addEventListener('click', async () => { folderLoadingIndicator.style.display = 'block'; folderSelection.style.display = 'none'; @@ -251,55 +861,50 @@ document.addEventListener('DOMContentLoaded', async function() { const folders = await getAllFolders(account); allFolders.push(...folders); } - - // Filter out system folders and duplicates + loadedFolders = [...new Set(allFolders .filter(f => !['Inbox', 'Trash', 'Drafts', 'Sent', 'Spam', 'Junk', 'Templates', 'Outbox', 'Archives'].includes(f)) .map(f => f.replace(/^INBOX\./i, '').trim()) )].sort(); if (loadedFolders.length === 0) { - showMessage('No folders found. You can create custom folders instead.', false); + showMessage(i18n.get('noFoldersFound'), false); folderLoadingIndicator.style.display = 'none'; return; } - - // Show folder preview + folderCount.textContent = loadedFolders.length; foldersPreview.innerHTML = loadedFolders .slice(0, 10) - .map(f => `
${f}
`) - .join('') + (loadedFolders.length > 10 ? `
...and ${loadedFolders.length - 10} more
` : ''); + .map(f => `
${escapeHtml(f)}
`) + .join('') + (loadedFolders.length > 10 ? `
${escapeHtml(i18n.get('andMore', [loadedFolders.length - 10]))}
` : ''); folderSelection.style.display = 'block'; } catch (error) { - showMessage(`Error loading folders: ${error.message}`, false); + showMessage(i18n.get('errorLoadingFolders', [error.message]), false); console.error('Error loading folders:', error); } finally { folderLoadingIndicator.style.display = 'none'; } }); - - // Use IMAP folders + useImapFoldersButton.addEventListener('click', () => { - if (confirm(`This will replace any existing folders/labels with ${loadedFolders.length} folders from your mail account. Continue?`)) { + if (confirm(i18n.get('replaceFoldersConfirm', [loadedFolders.length]))) { labelsContainer.innerHTML = ''; loadedFolders.forEach(folder => { addLabelInput(folder); }); folderSelection.style.display = 'none'; updateSaveButtonState(); - showMessage(`Loaded ${loadedFolders.length} folders from your mail account. Don't forget to save!`, true); + showMessage(i18n.get('loadedFoldersMsg', [loadedFolders.length]), true); } }); - - // Use custom folders + useCustomFoldersButton.addEventListener('click', () => { folderSelection.style.display = 'none'; - showMessage('You can now add custom folders below', true); + showMessage(i18n.get('addCustomFoldersMsg'), true); }); - - // Helper function to recursively get all folders + async function getAllFolders(account) { const folders = []; @@ -324,44 +929,477 @@ document.addEventListener('DOMContentLoaded', async function() { return folders; } - // Import categories/folders in bulk importLabelsButton.addEventListener('click', () => { const bulkText = bulkImportTextarea.value.trim(); const labels = bulkText.split('\n').map(l => l.trim()).filter(l => l !== ''); - - // Validation + if (labels.length === 0) { - showMessage('Please add at least one folder/label before importing. Enter labels one per line.', false); + showMessage(i18n.get('importOneLabelRequired'), false); return; } - // Confirm if there are existing labels const existingLabels = Array.from(document.querySelectorAll('.label-input')) .map(input => input.value.trim()) .filter(label => label !== ''); if (existingLabels.length > 0) { - if (!confirm(`This will replace your ${existingLabels.length} existing folders/labels with ${labels.length} new ones. Continue?`)) { + if (!confirm(i18n.get('replaceExistingConfirm', [existingLabels.length, labels.length]))) { return; } } - // Clear existing categories/folders labelsContainer.innerHTML = ''; - // Add each category/folder labels.forEach(label => { addLabelInput(label); }); updateSaveButtonState(); - showMessage(`Imported ${labels.length} categories/folders. Don't forget to save!`, true); - bulkImportTextarea.value = ''; // Clear the textarea + showMessage(i18n.get('importedFoldersMsg', [labels.length]), true); + bulkImportTextarea.value = ''; }); - // Add new label input + if (ollamaModelSelect) { + ollamaModelSelect.addEventListener('change', () => { + if (ollamaModelSelect.value === 'custom') { + ollamaCustomModelInput.style.display = 'block'; + } else { + ollamaCustomModelInput.style.display = 'none'; + } + updateSaveButtonState(); + }); + } + + if (ollamaCustomModelInput) { + ollamaCustomModelInput.addEventListener('input', updateSaveButtonState); + } + + if (testOllamaButton) { + testOllamaButton.addEventListener('click', async () => { + const ollamaUrl = ollamaUrlInput.value.trim() || 'http://localhost:11434'; + let selectedModel = ollamaModelSelect.value; + + if (selectedModel === 'custom') { + selectedModel = ollamaCustomModelInput.value.trim(); + if (!selectedModel) { + ollamaTestResult.textContent = i18n.get('enterCustomModelFirst'); + ollamaTestResult.className = 'api-test-result error'; + return; + } + } + + try { + ollamaTestResult.textContent = i18n.get('testingConnectionModels'); + ollamaTestResult.className = 'api-test-result'; + + const testUrl = `${ollamaUrl}/api/tags`; + if (window.debugLogger) { window.debugLogger.info('[Ollama]', 'Test connecting to: ' + testUrl); } + + const headers = {}; + if (ollamaAuthTokenInput && ollamaAuthTokenInput.value.trim()) { + headers['Authorization'] = `Bearer ${ollamaAuthTokenInput.value.trim()}`; + } + + const response = await fetch(testUrl, { + method: 'GET', + headers + }); + + if (window.debugLogger) { window.debugLogger.info('[Ollama]', 'Response status: ' + response.status); } + + if (response.ok) { + const data = await response.json(); + if (window.debugLogger) { window.debugLogger.info('[Ollama]', 'Success:', data); } + const installedModels = data.models && data.models.length > 0 + ? data.models.map(m => m.name) + : []; + + if (installedModels.length === 0) { + ollamaTestResult.textContent = i18n.get('ollamaRunningNoModels'); + ollamaTestResult.className = 'api-test-result error'; + } else { + // Extract base model name (before colon) for regex matching + const selectedBase = selectedModel.split(':')[0].toLowerCase(); + const installedBases = installedModels.map(m => m.split(':')[0].toLowerCase()); + + const modelFound = installedBases.some(base => base === selectedBase); + if (modelFound) { + ollamaTestResult.textContent = i18n.get('connectedModelReady', [selectedModel, installedModels.join(', ')]); + ollamaTestResult.className = 'api-test-result success'; + } else { + ollamaTestResult.textContent = i18n.get('modelNotInstalled', [selectedModel, installedModels.join(', ')]); + ollamaTestResult.className = 'api-test-result error'; + } + } + } else { + const errorText = await response.text(); + console.error('[Ollama Test] Error response:', errorText); + let errorMsg = 'Connection failed'; + if (response.status === 403) { + errorMsg = 'Access denied (403). Check if Ollama is running and the URL is correct.'; + } else if (response.status === 404) { + errorMsg = 'Ollama not found (404). Check the server URL.'; + } else { + try { + const errorData = JSON.parse(errorText); + errorMsg = errorData.error || errorText; + } catch (e) { + errorMsg = errorText || `HTTP ${response.status}`; + } + } + ollamaTestResult.textContent = i18n.get('ollamaErrorLabel', [errorMsg]); + ollamaTestResult.className = 'api-test-result error'; + } + } catch (error) { + console.error('[Ollama Test] Exception:', error); + ollamaTestResult.textContent = i18n.get('ollamaConnectionFailed', [error.message]); + ollamaTestResult.className = 'api-test-result error'; + } + }); + } + + if (customModelSelect) { + customModelSelect.addEventListener('change', () => { + if (customModelSelect.value === 'custom') { + if (customModelCustomInput) customModelCustomInput.style.display = 'block'; + } else { + if (customModelCustomInput) customModelCustomInput.style.display = 'none'; + } + updateSaveButtonState(); + }); + } + + if (customBaseUrlInput) { + customBaseUrlInput.addEventListener('input', updateSaveButtonState); + } + if (customModelCustomInput) { + customModelCustomInput.addEventListener('input', updateSaveButtonState); + } + + if (fetchCustomModelsButton) { + fetchCustomModelsButton.addEventListener('click', async () => { + const baseUrl = customBaseUrlInput ? customBaseUrlInput.value.trim().replace(/\/$/, '') : ''; + const apiKey = customApiKeyInput ? customApiKeyInput.value.trim() : ''; + + if (!baseUrl) { + if (customTestResult) { + customTestResult.textContent = i18n.get('enterBaseUrlFirst'); + customTestResult.className = 'api-test-result error'; + } + return; + } + + try { + if (customTestResult) { + customTestResult.textContent = i18n.get('fetchingModels'); + customTestResult.className = 'api-test-result'; + } + + const headers = { 'Content-Type': 'application/json' }; + if (apiKey) headers['Authorization'] = `Bearer ${apiKey}`; + + // Check if localhost - needs tab injection + const isLocalhost = baseUrl.startsWith('http://localhost') || baseUrl.startsWith('http://127.0.0.1'); + + let modelsData; + + if (isLocalhost) { + // Use tab injection for localhost (Thunderbird restriction) + modelsData = await fetchModelsViaTab(baseUrl, apiKey); + } else { + const response = await fetch(baseUrl + '/models', { headers }); + + if (!response.ok) { + throw new Error(`HTTP ${response.status}: ${response.statusText}`); + } + + modelsData = await response.json(); + } + + const models = modelsData.data || modelsData.models || []; + + if (models.length === 0) { + if (customTestResult) { + customTestResult.textContent = i18n.get('noModelsEndpoint'); + customTestResult.className = 'api-test-result error'; + } + return; + } + + if (customModelSelect) { + customModelSelect.innerHTML = ``; + models.forEach(m => { + const modelId = m.id || m.name || m; + const option = document.createElement('option'); + option.value = modelId; + option.textContent = modelId; + customModelSelect.appendChild(option); + }); + const customOpt = document.createElement('option'); + customOpt.value = 'custom'; + customOpt.textContent = i18n.get('openaiCompatibleModelCustom'); + customModelSelect.appendChild(customOpt); + } + + if (customTestResult) { + customTestResult.textContent = i18n.get('foundModelsMsg', [models.length]); + customTestResult.className = 'api-test-result success'; + } + + } catch (error) { + console.error('[Fetch Models] Error:', error); + if (customTestResult) { + customTestResult.textContent = i18n.get('failedFetchModels', [error.message]); + customTestResult.className = 'api-test-result error'; + } + } + }); + } + + if (testCustomEndpointButton) { + testCustomEndpointButton.addEventListener('click', async () => { + const baseUrl = customBaseUrlInput ? customBaseUrlInput.value.trim() : ''; + let model = customModelSelect ? customModelSelect.value : ''; + const apiKey = customApiKeyInput ? customApiKeyInput.value.trim() : ''; + + if (model === 'custom' && customModelCustomInput) { + model = customModelCustomInput.value.trim(); + } + + if (!baseUrl) { + if (customTestResult) { + customTestResult.textContent = i18n.get('enterBaseUrl'); + customTestResult.className = 'api-test-result error'; + } + return; + } + if (!model) { + if (customTestResult) { + customTestResult.textContent = i18n.get('enterModelName'); + customTestResult.className = 'api-test-result error'; + } + return; + } + + try { + if (customTestResult) { + customTestResult.textContent = i18n.get('testingConnection'); + customTestResult.className = 'api-test-result'; + } + + const headers = { 'Content-Type': 'application/json' }; + if (apiKey) { + headers['Authorization'] = `Bearer ${apiKey}`; + } + + const normalizedUrl = baseUrl.replace(/\/$/, ''); + + if (window.debugLogger) { window.debugLogger.info('[Custom]', 'Test connecting to: ' + normalizedUrl + '/chat/completions'); } + + const response = await fetch(normalizedUrl + '/chat/completions', { + method: 'POST', + headers, + body: JSON.stringify({ + model, + messages: [{ role: 'user', content: 'Test' }], + max_tokens: 10 + }) + }); + + if (window.debugLogger) { window.debugLogger.info('[Custom]', 'Response status: ' + response.status); } + + if (response.ok) { + if (customTestResult) { + customTestResult.textContent = i18n.get('connectedSuccessfully', [model, normalizedUrl]); + customTestResult.className = 'api-test-result success'; + } + } else { + const errorText = await response.text(); + console.error('[Custom Endpoint Test] Error response:', errorText); + let errorMsg = 'Connection failed'; + try { + const errorData = JSON.parse(errorText); + errorMsg = errorData.error?.message || errorData.error || errorText; + } catch (e) { + errorMsg = `HTTP ${response.status}: ${response.statusText}`; + } + if (customTestResult) { + customTestResult.textContent = i18n.get('genericErrorLabel', [errorMsg]); + customTestResult.className = 'api-test-result error'; + } + } + } catch (error) { + console.error('[Custom Endpoint Test] Exception:', error); + if (customTestResult) { + customTestResult.textContent = i18n.get('customConnectionFailed', [error.message]); + customTestResult.className = 'api-test-result error'; + } + } + }); + } + + if (diagnoseOllamaButton) { + diagnoseOllamaButton.addEventListener('click', async () => { + const ollamaUrl = ollamaUrlInput.value.trim() || 'http://localhost:11434'; + let diagnosticOutput = i18n.get('diagnosticsTitle') + '\n' + '='.repeat(50) + '\n\n'; + + ollamaDiagnostics.style.display = 'block'; + ollamaDiagnostics.className = 'diagnostics-result'; + ollamaDiagnostics.textContent = diagnosticOutput + i18n.get('diagnosticsRunning') + '\n'; + + try { + diagnosticOutput += i18n.get('testListModels') + '\n'; + diagnosticOutput += ` URL: ${ollamaUrl}/api/tags\n`; + try { + const tagsResponse = await fetch(`${ollamaUrl}/api/tags`); + diagnosticOutput += ` Status: ${tagsResponse.status} ${tagsResponse.statusText}\n`; + + if (tagsResponse.ok) { + const data = await tagsResponse.json(); + diagnosticOutput += ` ✓ SUCCESS - Found ${data.models?.length || 0} models\n`; + if (data.models && data.models.length > 0) { + diagnosticOutput += ' Installed models: ' + data.models.map(m => m.name).join(', ') + '\n'; + } else { + diagnosticOutput += ' ' + i18n.get('noInstalledModels') + '\n'; + } + } else { + diagnosticOutput += ` ✗ FAILED\n`; + } + } catch (error) { + diagnosticOutput += ` ✗ ERROR: ${error.message}\n`; + } + + diagnosticOutput += '\n' + i18n.get('testVersion') + '\n'; + diagnosticOutput += ` URL: ${ollamaUrl}/api/version\n`; + try { + const versionResponse = await fetch(`${ollamaUrl}/api/version`); + diagnosticOutput += ` Status: ${versionResponse.status} ${versionResponse.statusText}\n`; + + if (versionResponse.ok) { + const data = await versionResponse.json(); + diagnosticOutput += ` ✓ SUCCESS - Ollama version: ${data.version || i18n.get('unknownVersion')}\n`; + } else { + diagnosticOutput += ` ` + i18n.get('versionNotAvailable') + `\n`; + } + } catch (error) { + diagnosticOutput += ` ✗ ERROR: ${error.message}\n`; + } + + diagnosticOutput += '\n' + i18n.get('testPullEndpoint') + '\n'; + diagnosticOutput += ` URL: ${ollamaUrl}/api/pull\n`; + diagnosticOutput += ' ' + i18n.get('pullEndpointNote') + '\n'; + + diagnosticOutput += '\n' + '='.repeat(50) + '\n'; + diagnosticOutput += i18n.get('diagnosticsSummary') + '\n\n'; + + if (diagnosticOutput.includes('✓ SUCCESS - Found')) { + diagnosticOutput += i18n.get('ollamaRunningOk') + '\n'; + diagnosticOutput += i18n.get('diagnosticsApiUrl', [ollamaUrl]) + '\n'; + ollamaDiagnostics.className = 'diagnostics-result success'; + } else { + diagnosticOutput += i18n.get('cannotConnectOllama') + '\n'; + diagnosticOutput += '\n' + i18n.get('troubleshootingLabel') + '\n'; + diagnosticOutput += i18n.get('troubleshootRunning') + '\n'; + diagnosticOutput += i18n.get('troubleshootStart') + '\n'; + diagnosticOutput += i18n.get('troubleshootTest', [ollamaUrl]) + '\n'; + diagnosticOutput += i18n.get('troubleshootPort') + '\n'; + ollamaDiagnostics.className = 'diagnostics-result error'; + } + + } catch (error) { + diagnosticOutput += '\n' + i18n.get('criticalError') + '\n'; + diagnosticOutput += error.message + '\n'; + ollamaDiagnostics.className = 'diagnostics-result error'; + } + + ollamaDiagnostics.textContent = diagnosticOutput; + }); + } + + if (listOllamaModelsButton) { + listOllamaModelsButton.addEventListener('click', async () => { + const ollamaUrl = ollamaUrlInput.value.trim() || 'http://localhost:11434'; + + try { + ollamaTestResult.textContent = i18n.get('fetchingModelsStatus'); + ollamaTestResult.className = 'api-test-result'; + + const response = await fetch(`${ollamaUrl}/api/tags`); + + if (response.ok) { + const data = await response.json(); + if (data.models && data.models.length > 0) { + const modelNames = data.models.map(m => m.name).join(', '); + ollamaTestResult.textContent = i18n.get('availableModels', [modelNames]); + ollamaTestResult.className = 'api-test-result success'; + } else { + ollamaTestResult.textContent = i18n.get('noModelsInstalledHint'); + ollamaTestResult.className = 'api-test-result error'; + } + } else { + ollamaTestResult.textContent = i18n.get('failedFetchModelsSimple'); + ollamaTestResult.className = 'api-test-result error'; + } + } catch (error) { + ollamaTestResult.textContent = i18n.get('ollamaConnectionFailedSimple', [error.message]); + ollamaTestResult.className = 'api-test-result error'; + } + }); + } + + if (downloadOllamaModelButton) { + downloadOllamaModelButton.addEventListener('click', async () => { + const ollamaUrl = (ollamaUrlInput.value.trim() || 'http://localhost:11434').replace(/\/$/, ''); + const modelName = ollamaDownloadModelInput.value.trim(); + const token = ollamaAuthTokenInput && ollamaAuthTokenInput.value.trim(); + if (!modelName) { + ollamaDownloadStatus.textContent = i18n.get('enterModelDownload'); + ollamaDownloadStatus.className = 'api-test-result error'; + ollamaDownloadStatus.style.display = 'block'; + return; + } + try { + downloadOllamaModelButton.disabled = true; + ollamaDownloadStatus.textContent = i18n.get('startingDownload', [modelName]); + ollamaDownloadStatus.className = 'api-test-result'; + ollamaDownloadStatus.style.display = 'block'; + + const headers = token ? { Authorization: `Bearer ${token}` } : {}; + await browser.runtime.sendMessage({ + action: 'startOllamaPull', + ollamaUrl, + model: modelName, + headers + }); + } catch (e) { + ollamaDownloadStatus.textContent = i18n.get('failedStart', [e.message]); + ollamaDownloadStatus.className = 'api-test-result error'; + } finally { + downloadOllamaModelButton.disabled = false; + } + }); + browser.runtime.onMessage.addListener((msg) => { + if (msg.action === 'ollamaPullProgress') { + const parts = []; + if (msg.status) parts.push(msg.status); + if (typeof msg.percent === 'number') parts.push(`${msg.percent}%`); + ollamaDownloadStatus.textContent = parts.join(' — '); + ollamaDownloadStatus.className = 'api-test-result'; + ollamaDownloadStatus.style.display = 'block'; + } else if (msg.action === 'ollamaPullComplete') { + if (msg.ok) { + ollamaDownloadStatus.textContent = i18n.get('downloadComplete'); + ollamaDownloadStatus.className = 'api-test-result success'; + } else { + ollamaDownloadStatus.textContent = i18n.get('downloadFailed', [msg.error || i18n.get('unknownError')]); + ollamaDownloadStatus.className = 'api-test-result error'; + } + ollamaDownloadStatus.style.display = 'block'; + } + }); + } + addLabelButton.addEventListener('click', () => { - // Clear instruction message if present const instructionMsg = labelsContainer.querySelector('.instruction-message'); if (instructionMsg) { labelsContainer.innerHTML = ''; @@ -370,41 +1408,169 @@ document.addEventListener('DOMContentLoaded', async function() { updateSaveButtonState(); }); - // Save settings saveButton.addEventListener('click', () => { const labels = Array.from(document.querySelectorAll('.label-input')) .map(input => input.value.trim()) .filter(label => label !== ''); const apiKey = apiKeyInput.value.trim(); - - // Validation + const provider = aiProviderSelect.value; + + const batchChunkSizeEl = document.getElementById('batch-chunk-size'); + const batchChunkSize = Math.max(1, Math.min(20, parseInt(batchChunkSizeEl?.value) || 5)); + + const autoSortCheckbox = document.getElementById('enable-auto-sort'); + const autoSortEnabled = autoSortCheckbox ? autoSortCheckbox.checked : false; + + const customPromptTextarea = document.getElementById('custom-prompt-text'); + const customPrompt = customPromptTextarea ? customPromptTextarea.value.trim() : ''; + if (labels.length === 0) { - showMessage('Please add at least one folder/label before saving. Use "Load Folders from Mail Account" or add custom labels.', false); - return; - } - - if (!apiKey) { - showMessage('Please enter your API key before saving. Click "Get API Key" to obtain one.', false); + showMessage(i18n.get('addFolderBeforeSave'), false); return; } - const settings = { - labels: labels, - apiKey: apiKey, - aiProvider: aiProviderSelect.value, - enableAi: document.getElementById('enable-ai').checked - }; + if (provider === 'gemini') { + const validGeminiKeys = geminiKeys.filter(key => key && key.trim() !== ''); + + if (validGeminiKeys.length === 0) { + showMessage(i18n.get('addGeminiKeyBeforeSave'), false); + return; + } - browser.storage.local.set(settings).then(() => { - showMessage('✓ Settings saved successfully! You can now use AutoSort+ to analyze emails.', true); - updateSaveButtonState(); - }).catch(error => { - showMessage('Error saving settings: ' + error, false); - }); + const uniqueKeys = new Set(validGeminiKeys.map(key => key.trim().toLowerCase())); + if (uniqueKeys.size !== validGeminiKeys.length) { + showMessage(i18n.get('duplicateApiKeys'), false); + return; + } + + const settings = { + labels: labels, + geminiApiKeys: validGeminiKeys, + currentGeminiKeyIndex: 0, // Start with first key + aiProvider: provider, + enableAi: document.getElementById('enable-ai').checked, + geminiPaidPlan: geminiPaidCheckbox.checked, + debugMode: enableDebugCheckbox ? enableDebugCheckbox.checked : false, + batchChunkSize: batchChunkSize, + autoSortEnabled: autoSortEnabled, + customPrompt: customPrompt + }; + + browser.storage.local.get(['geminiRateLimits']).then(result => { + if (!result.geminiRateLimits || result.geminiRateLimits.length !== validGeminiKeys.length) { + settings.geminiRateLimits = validGeminiKeys.map(() => ({ + requests: [], + dailyCount: 0, + dailyResetTime: Date.now() + (24 * 60 * 60 * 1000) + })); + } + + browser.storage.local.set(settings).then(() => { + showMessage(i18n.get('settingsSavedMultiKey'), true); + updateSaveButtonState(); + }).catch(error => { + showMessage(i18n.get('errorSavingSettings', [error]), false); + }); + }); + } else if (provider === 'ollama') { + // Ollama doesn't need API key, just save URL and model + let ollamaModel = ollamaModelSelect.value; + if (ollamaModel === 'custom') { + ollamaModel = ollamaCustomModelInput.value.trim(); + if (!ollamaModel) { + showMessage(i18n.get('enterOllamaModel'), false); + return; + } + } + + const settings = { + labels: labels, + aiProvider: provider, + enableAi: document.getElementById('enable-ai').checked, + ollamaUrl: ollamaUrlInput.value.trim() || 'http://localhost:11434', + ollamaModel: ollamaModel, + ollamaCustomModel: ollamaCustomModelInput.value.trim(), + ollamaAuthToken: ollamaAuthTokenInput ? ollamaAuthTokenInput.value.trim() : '', + ollamaCpuOnly: ollamaCpuOnlyCheckbox.checked, + debugMode: enableDebugCheckbox ? enableDebugCheckbox.checked : false, + batchChunkSize: batchChunkSize, + autoSortEnabled: autoSortEnabled, + customPrompt: customPrompt + }; + + browser.storage.local.set(settings).then(() => { + const cpuMode = ollamaCpuOnlyCheckbox.checked ? ' (' + i18n.get('ollamaCpuOnly') + ')' : ''; + showMessage(i18n.get('settingsSavedOllama', [cpuMode]), true); + updateSaveButtonState(); + }).catch(error => { + showMessage('Error saving settings: ' + error, false); + }); + } else if (provider === 'openai-compatible') { + // OpenAI-Compatible endpoint needs base URL and model + const baseUrl = customBaseUrlInput ? customBaseUrlInput.value.trim() : ''; + let model = customModelSelect ? customModelSelect.value : ''; + const apiKey = customApiKeyInput ? customApiKeyInput.value.trim() : ''; + + if (model === 'custom' && customModelCustomInput) { + model = customModelCustomInput.value.trim(); + } + + if (!baseUrl) { + showMessage(i18n.get('enterCustomBaseUrl'), false); + return; + } + if (!model) { + showMessage(i18n.get('enterCustomModel'), false); + return; + } + + const settings = { + labels: labels, + aiProvider: provider, + enableAi: document.getElementById('enable-ai').checked, + customBaseUrl: baseUrl.replace(/\/$/, ''), + customModel: model, + apiKey: apiKey, + debugMode: enableDebugCheckbox ? enableDebugCheckbox.checked : false, + batchChunkSize: batchChunkSize, + autoSortEnabled: autoSortEnabled, + customPrompt: customPrompt + }; + + browser.storage.local.set(settings).then(() => { + showMessage(i18n.get('settingsSavedCustomEndpoint'), true); + updateSaveButtonState(); + }).catch(error => { + showMessage('Error saving settings: ' + error, false); + }); + } else { + // Other providers use single key + if (!apiKey) { + showMessage(i18n.get('enterApiKeyBeforeSave'), false); + return; + } + + const settings = { + labels: labels, + apiKey: apiKey, + aiProvider: provider, + enableAi: document.getElementById('enable-ai').checked, + debugMode: enableDebugCheckbox ? enableDebugCheckbox.checked : false, + batchChunkSize: batchChunkSize, + autoSortEnabled: autoSortEnabled, + customPrompt: customPrompt + }; + + browser.storage.local.set(settings).then(() => { + showMessage(i18n.get('settingsSavedSuccess'), true); + updateSaveButtonState(); + }).catch(error => { + showMessage('Error saving settings: ' + error, false); + }); + } }); - // Add category/folder input field function addLabelInput(value = '') { const labelItem = document.createElement('div'); labelItem.className = 'label-item'; @@ -412,7 +1578,7 @@ document.addEventListener('DOMContentLoaded', async function() { const input = document.createElement('input'); input.type = 'text'; input.className = 'label-input'; - input.placeholder = 'Enter category/folder name'; + input.placeholder = i18n.get('labelInputPlaceholder'); input.value = value; input.addEventListener('input', updateSaveButtonState); @@ -422,11 +1588,10 @@ document.addEventListener('DOMContentLoaded', async function() { removeButton.addEventListener('click', () => { labelItem.remove(); updateSaveButtonState(); - - // Show instruction if no labels left + const remainingLabels = document.querySelectorAll('.label-input'); if (remainingLabels.length === 0) { - labelsContainer.innerHTML = '
No folders/labels configured. Click "Load Folders from Mail Account" above or add custom labels below.
'; + labelsContainer.innerHTML = '
' + i18n.get('noFoldersInstruction') + '
'; } }); @@ -435,13 +1600,67 @@ document.addEventListener('DOMContentLoaded', async function() { labelsContainer.appendChild(labelItem); } - // Show API test result function showApiTestResult(message, isSuccess) { apiTestResult.textContent = message; apiTestResult.className = `api-test-result ${isSuccess ? 'success' : 'error'}`; } - // Show message to user + async function fetchModelsViaTab(baseUrl, apiKey) { + const tab = await browser.tabs.create({ url: baseUrl, active: false }); + + try { + await new Promise(resolve => setTimeout(resolve, 500)); + const headers = { 'Content-Type': 'application/json' }; + if (apiKey) headers['Authorization'] = `Bearer ${apiKey}`; + + const scriptCode = ` + (async () => { + try { + const headers = ${JSON.stringify(headers)}; + const response = await fetch(window.location.origin + '/v1/models', { + method: 'GET', + headers + }); + + if (!response.ok) { + throw new Error('HTTP ' + response.status); + } + + const data = await response.json(); + window.__models_result = { ok: true, data }; + } catch (error) { + window.__models_result = { ok: false, error: error.message }; + } + })(); + `; + + await browser.tabs.executeScript(tab.id, { code: scriptCode }); + + let result = null; + for (let i = 0; i < 40; i++) { // 10 seconds max (250ms intervals) + await new Promise(resolve => setTimeout(resolve, 250)); + try { + const results = await browser.tabs.executeScript(tab.id, { code: 'window.__models_result || null' }); + if (results && results[0]) { + result = results[0]; + break; + } + } catch (e) { + break; + } + } + + if (!result || !result.ok) { + throw new Error(result?.error || 'Timeout fetching models'); + } + + return result.data; + + } finally { + try { await browser.tabs.remove(tab.id); } catch (e) { console.warn('[Options] Failed to close tab:', e.message); } + } + } + function showMessage(message, isSuccess = true) { const messageDiv = document.createElement('div'); messageDiv.className = 'message'; @@ -454,13 +1673,11 @@ document.addEventListener('DOMContentLoaded', async function() { }, 3000); } - // Function to format timestamp function formatTimestamp(timestamp) { const date = new Date(timestamp); return date.toLocaleString(); } - // Function to update history table async function updateHistoryTable() { const historyBody = document.getElementById('history-body'); const data = await browser.storage.local.get('moveHistory'); @@ -469,25 +1686,155 @@ document.addEventListener('DOMContentLoaded', async function() { historyBody.innerHTML = history.map(entry => ` ${formatTimestamp(entry.timestamp)} - ${entry.subject} - ${entry.status} - ${entry.destination} + ${escapeHtml(entry.subject)} + ${escapeHtml(entry.status)} + ${escapeHtml(entry.destination)} `).join(''); } - // Function to clear history async function clearHistory() { - if (confirm('Are you sure you want to clear the move history?')) { + if (confirm(i18n.get('clearHistoryConfirm'))) { await browser.storage.local.set({ moveHistory: [] }); await updateHistoryTable(); } } - // Initialize the page await updateHistoryTable(); - // Add event listeners for history controls document.getElementById('clear-history').addEventListener('click', clearHistory); document.getElementById('refresh-history').addEventListener('click', updateHistoryTable); + + // ── Batch Progress Panel ─────────────────────────────────────────────── + + const batchPanel = document.getElementById('batch-status-panel'); + const batchFill = document.getElementById('batch-progress-fill'); + const batchText = document.getElementById('batch-progress-text'); + const batchBadge = document.getElementById('batch-provider-badge'); + const batchPauseBtn = document.getElementById('batch-pause-btn'); + const batchResumeBtn = document.getElementById('batch-resume-btn'); + const batchCancelBtn = document.getElementById('batch-cancel-btn'); + + let _batchHideTimer = null; + + /** + * Update the batch panel UI from a progress payload. + * @param {{ status, total, completed, failed, skipped, provider, chunkIndex, totalChunks }} payload + */ + function applyBatchProgress(payload) { + if (!batchPanel || !payload) return; + + // Use defaults for safety + const { + status = 'running', + total = 0, + completed = 0, + failed = 0, + skipped = 0, + provider = '', + chunkIndex = 0, + totalChunks = 0 + } = payload; + + const done = (completed || 0) + (failed || 0) + (skipped || 0); + const pct = total > 0 ? Math.round((done / total) * 100) : 0; + batchPanel.style.display = 'block'; + batchPanel.dataset.status = status; + + if (batchBadge && provider) { + batchBadge.textContent = provider; + } + + if (batchFill) { + batchFill.style.width = pct + '%'; + } + + const displayChunk = chunkIndex || 0; + const displayTotal = totalChunks || 0; + + if (batchText) { + if (status === 'paused') { + if (displayTotal > 0) { + batchText.textContent = i18n.get('batchPausedChunk', [displayChunk, displayTotal, done, total]); + } else { + batchText.textContent = i18n.get('batchPausedSimple', [done, total]); + } + } else if (status === 'done') { + batchText.textContent = i18n.get('batchDone', [completed, skipped, failed]); + } else if (status === 'cancelled') { + if (displayTotal > 0) { + batchText.textContent = i18n.get('batchCancelledChunk', [displayChunk, displayTotal]); + } else { + batchText.textContent = i18n.get('batchCancelledSimple', [done, total]); + } + } else { + if (displayTotal > 0) { + batchText.textContent = i18n.get('batchRunningChunk', [displayChunk, displayTotal, done, total, completed, failed]); + } else { + batchText.textContent = i18n.get('batchRunningSimple', [done, total, completed, failed]); + } + } + } + + if (batchPauseBtn && batchResumeBtn) { + if (status === 'paused') { + batchPauseBtn.style.display = 'none'; + batchResumeBtn.style.display = ''; + } else { + batchPauseBtn.style.display = ''; + batchResumeBtn.style.display = 'none'; + } + } + + if (batchCancelBtn) { + batchCancelBtn.style.display = (status === 'done' || status === 'cancelled') ? 'none' : ''; + } + + if (status === 'done' || status === 'cancelled') { + clearTimeout(_batchHideTimer); + _batchHideTimer = setTimeout(() => { + if (batchPanel) batchPanel.style.display = 'none'; + }, 5000); + } + } + + browser.storage.local.get('currentBatch').then(result => { + if (result.currentBatch && result.currentBatch.status === 'running') { + applyBatchProgress(result.currentBatch); + } + }); + + browser.runtime.onMessage.addListener(msg => { + if (msg.action === 'batchProgress') { + applyBatchProgress(msg); + } + }); + + if (batchPauseBtn) { + batchPauseBtn.addEventListener('click', () => { + browser.runtime.sendMessage({ action: 'batchControl', command: 'pause' }).catch(() => {}); + if (batchPanel) batchPanel.dataset.status = 'paused'; + if (batchText) batchText.textContent = i18n.get('batchPausing'); + if (batchPauseBtn) batchPauseBtn.style.display = 'none'; + if (batchResumeBtn) batchResumeBtn.style.display = ''; + }); + } + + if (batchResumeBtn) { + batchResumeBtn.addEventListener('click', () => { + browser.runtime.sendMessage({ action: 'batchControl', command: 'resume' }).catch(() => {}); + if (batchPanel) batchPanel.dataset.status = 'running'; + if (batchPauseBtn) batchPauseBtn.style.display = ''; + if (batchResumeBtn) batchResumeBtn.style.display = 'none'; + }); + } + + if (batchCancelBtn) { + batchCancelBtn.addEventListener('click', () => { + if (!confirm(i18n.get('batchCancelConfirm'))) return; + browser.runtime.sendMessage({ action: 'batchControl', command: 'cancel' }).catch(() => {}); + if (batchText) batchText.textContent = i18n.get('batchCancelling'); + if (batchCancelBtn) batchCancelBtn.disabled = true; + }); + } }); \ No newline at end of file diff --git a/release_notes.md b/release_notes.md index 3a678ce..13ae701 100644 --- a/release_notes.md +++ b/release_notes.md @@ -1,3 +1,147 @@ +## AutoSort+ v1.2.3.3 - January 28, 2026 + +### 🛠️ Bug Fix: Manual Label Application (Thunderbird Context Menu) +- Fixed: Manual label application via right-click (AutoSort+ > AutoSort Label > pick any) now works reliably in all Thunderbird message list views. +- **Root Cause:** The extension previously relied on a content script (content.js) to fetch selected messages, but Thunderbird does not inject content scripts into privileged mail/message tabs. This caused the error: "Could not establish connection. Receiving end does not exist." +- **Solution:** The logic for fetching selected messages and applying labels is now handled entirely in the background script using the mailTabs API. This removes the dependency on content scripts for this feature and ensures robust operation. +- No other features are affected by this change; all AI, Ollama, and label management features continue to work as before. + +--- + +## AutoSort+ v1.2.3.2 - January 27, 2026 + +### 🛠️ Latest Changes +- Ollama Model Download UI: Buttons are now always below the download input and never overlap. +- Ollama response parsing and label matching are more robust and tolerant (closest match if not exact). +- Improved error handling for model/tag mismatches and 404s. +## AutoSort+ v1.2.3 - January 14, 2026 +## AutoSort+ v1.2.3.1 - January 14, 2026 + + - Clear error messages in tooltip and status + - Cannot save or test duplicate keys +- **Better Key Management** - Users notified if key is already added + Ollama Model Download UI: Buttons are now always below the download input and never overlap. + Ollama response parsing and label matching are more robust and tolerant (closest match if not exact). + Improved error handling for model/tag mismatches and 404s. + - Visual indicator with red border and background + - Test button blocked for duplicates + - Save prevented with helpful error message + +--- + +## AutoSort+ v1.2.3 - January 14, 2026 + +### ✨ New Features +- **Toolbar Icon** - Quick access button in top-right of Thunderbird + - Click the AutoSort+ icon to instantly open settings + - Hover for tooltip "AutoSort+ Settings" + - No need to navigate through menus anymore + +### 🎯 User Experience +- Faster settings access - one click from anywhere in Thunderbird +- Better addon visibility - always accessible in toolbar +- Tooltip provides clear button function + +--- + +## AutoSort+ v1.2.2 - January 14, 2026 + +### 🐛 Bug Fixes +- **Fixed single-key usage tracking** - Now properly displays "Today's Usage:" when using only 1 Gemini API key +- **Fixed rate limit notification** - Persistent notification now shows when limit is reached on single or multiple keys +- **Improved API test feedback** - Shows specific error messages: + - 429 error: "⛔ Limit reached" (key has exhausted daily quota) + - 401/403 error: "✗ Invalid key" (key is invalid or expired) + - Other errors: "✗ Failed (status code)" +- **Added tooltips** - Hover over test results to see detailed error explanations + +### ✨ Improvements +- Better error messaging for API key testing +- More intuitive status indicators with help text +- Cursor changes to "help" icon when hovering over test results +- Single-key configurations now display in new multi-key format + +--- + +## AutoSort+ v1.2.1 - January 14, 2026 + +### 🆕 New Features +- **Multiple Gemini API Key Support** - Add multiple API keys from different Google Cloud projects +- **Automatic Key Rotation** - Seamlessly switches between keys when rate limits are reached +- **Per-Key Usage Tracking** - Monitor usage statistics for each API key individually +- **Smart Key Management** - Visual indicators show which key is active and available + +### 💡 How Multiple Keys Work +Free Gemini tier provides 20 requests/day per project. With multiple keys: +- Add keys from different Google Cloud projects +- Extension automatically rotates to next available key +- Example: 5 keys = 100 requests/day total +- Each key tracks its own rate limit independently + +### 🔧 Improvements +- Enhanced UI for multi-key management +- Individual test buttons for each API key +- Real-time status indicators (Active, Ready, Near Limit, Limit Reached) +- Better error messages when all keys are exhausted +- Backward compatible with single-key configurations + +### 🐛 Bug Fixes +- Fixed API key test function for Gemini keys +- Fixed test result display to show inline status per key +- Improved key validation feedback + +--- + +## AutoSort+ v1.2.0 + +### ⚠️ Important Rate Limit Warning +**Free API tiers are severely limited when processing emails!** Email content is large text, which counts heavily against rate limits: + +- **Gemini**: ~15-20 emails before hitting limits +- **OpenAI**: ~5-10 emails (very strict on free tier) +- **Anthropic**: ~10-15 emails +- **Groq**: ~20-30 emails (best free option) +- **Mistral**: ~10-15 emails + +**For daily email processing, paid API plans are strongly recommended.** + +Free tiers are suitable for: +- Occasional use (a few emails per day) +- Testing the addon +- Light personal email management + +For regular use, consider: +- Upgrading to paid API tiers ($5-20/month) +- Processing emails in small batches with delays +- Using Groq for the highest free tier limits + +### Features +- **Multi-provider AI support** (Gemini, OpenAI, Anthropic, Groq, Mistral) +- **IMAP folder discovery** - Automatically load folders from mail accounts +- **Batch email processing** - Select and sort multiple emails at once +- **Move history tracking** - Last 100 email moves recorded +- **Smart label matching** - Skips null categories, auto-creates custom folders +- **Professional UI** - Provider info cards, real-time validation + +### Changes +- Added support for 5 AI providers with easy switching +- Updated Groq to llama-3.3-70b model +- Improved error handling and validation +- Fixed batch email processing bugs +- Enhanced folder management +- Added rate limit guidance + +### Installation in Thunderbird +1. Download the autosortplus.xpi file +2. Open Thunderbird +3. Click the Menu button (☰) and select "Add-ons and Themes" +4. Click the gear icon and select "Install Add-on From File..." +5. Select the downloaded autosortplus.xpi file +6. Click "Add" when prompted to install the add-on +7. Restart Thunderbird when prompted + +--- + ## AutoSort+ v1.0.0 ### Features @@ -19,4 +163,4 @@ 4. Click the gear icon and select "Install Add-on From File..." 5. Select the downloaded autosortplus.xpi file 6. Click "Add" when prompted to install the add-on -7. Restart Thunderbird when prompted \ No newline at end of file +7. Restart Thunderbird when prompted \ No newline at end of file diff --git a/styles.css b/styles.css index fcdc255..58e3f5e 100644 --- a/styles.css +++ b/styles.css @@ -1,409 +1,933 @@ -:root { - --primary-color: #0060df; - --primary-hover: #003eaa; - --background-color: #f9f9fa; - --text-color: #0c0c0d; - --border-color: #d7d7db; - --success-color: #4CAF50; - --error-color: #f44336; -} - -body { - font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, Oxygen, Ubuntu, Cantarell, sans-serif; - background-color: var(--background-color); - color: var(--text-color); - margin: 0; - padding: 20px; -} - -.container { - max-width: 800px; - margin: 0 auto; - background-color: white; - padding: 30px; - border-radius: 8px; - box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1); -} - -h1 { - color: var(--primary-color); - margin-bottom: 30px; -} - -h2 { - font-size: 1.2em; - margin-bottom: 15px; -} - -.section { - margin-bottom: 30px; - padding: 20px; - border: 1px solid var(--border-color); - border-radius: 4px; -} - -.input-group { - margin-bottom: 15px; -} - -.input-group label { - display: block; - margin-bottom: 5px; - font-weight: 500; -} - -.api-key-input { - width: 100%; - padding: 8px; - border: 1px solid var(--border-color); - border-radius: 4px; - font-size: 14px; - margin-bottom: 10px; -} - -.bulk-import { - margin-bottom: 20px; - padding: 15px; - background-color: var(--background-color); - border-radius: 4px; -} - -.bulk-import label { - display: block; - margin-bottom: 8px; - font-weight: 500; -} - -.bulk-import-textarea { - width: 100%; - height: 100px; - padding: 10px; - border: 1px solid var(--border-color); - border-radius: 4px; - font-size: 14px; - resize: vertical; - margin-bottom: 10px; - font-family: inherit; -} - -#test-api { - margin-top: 10px; - background-color: var(--primary-color); - color: white; -} - -.api-test-result { - margin-top: 10px; - padding: 10px; - border-radius: 4px; - display: none; -} - -.api-test-result.success { - display: block; - background-color: var(--success-color); - color: white; -} - -.api-test-result.error { - display: block; - background-color: var(--error-color); - color: white; -} - -.button { - background-color: var(--primary-color); - color: white; - border: none; - padding: 8px 16px; - border-radius: 4px; - cursor: pointer; - font-size: 14px; - transition: background-color 0.2s; - text-decoration: none; - display: inline-block; -} - -.button:hover { - background-color: var(--primary-hover); -} - -.button-link { - background-color: #28a745; -} - -.button-link:hover { - background-color: #218838; -} - -.button.primary { - background-color: var(--primary-color); - padding: 10px 20px; - font-size: 16px; -} - -.button.disabled, -.button:disabled { - background-color: #cccccc; - color: #666666; - cursor: not-allowed; - opacity: 0.6; -} - -.button.disabled:hover, -.button:disabled:hover { - background-color: #cccccc; -} - -.instruction-message { - padding: 20px; - background-color: #fff3cd; - border: 1px solid #ffc107; - border-radius: 4px; - color: #856404; - text-align: center; - font-style: italic; - margin-bottom: 15px; -} - -.provider-select { - width: 100%; - padding: 10px; - border: 1px solid var(--border-color); - border-radius: 4px; - font-size: 14px; - background-color: white; - cursor: pointer; -} - -.provider-info { - margin: 15px 0; - padding: 15px; - background-color: #f0f8ff; - border-left: 4px solid var(--primary-color); - border-radius: 4px; -} - -.provider-details { - font-size: 14px; - line-height: 1.6; -} - -.provider-details strong { - font-size: 16px; - color: var(--text-color); -} - -.provider-details p { - margin: 8px 0 0 0; - color: #555; -} - -.free-badge { - display: inline-block; - padding: 2px 8px; - background-color: #28a745; - color: white; - border-radius: 3px; - font-size: 11px; - font-weight: bold; - margin-left: 8px; -} - -.paid-badge { - display: inline-block; - padding: 2px 8px; - background-color: #ffc107; - color: #333; - border-radius: 3px; - font-size: 11px; - font-weight: bold; - margin-left: 8px; -} - -.label-item { - display: flex; - gap: 10px; - margin-bottom: 10px; - align-items: center; -} - -.label-input { - flex: 1; - padding: 8px; - border: 1px solid var(--border-color); - border-radius: 4px; - font-size: 14px; -} - -.remove-label { - background-color: #ff4f4f; - color: white; - border: none; - width: 24px; - height: 24px; - border-radius: 50%; - cursor: pointer; - display: flex; - align-items: center; - justify-content: center; - font-size: 16px; -} - -.remove-label:hover { - background-color: #d43535; -} - -.checkbox-container { - display: flex; - align-items: center; - gap: 10px; - margin-bottom: 10px; -} - -input[type="checkbox"] { - width: 18px; - height: 18px; - cursor: pointer; -} - -.message { - position: fixed; - bottom: 20px; - right: 20px; - background-color: var(--primary-color); - color: white; - padding: 10px 20px; - border-radius: 4px; - box-shadow: 0 2px 4px rgba(0, 0, 0, 0.2); - z-index: 1000; -} - -.ai-description { - margin-top: 20px; - padding: 15px; - background-color: var(--background-color); - border-radius: 8px; - border: 1px solid var(--border-color); -} - -.ai-description p { - margin-bottom: 10px; - color: var(--text-color); - line-height: 1.5; -} - -.ai-description ul { - margin: 0; - padding-left: 20px; - color: var(--text-color); -} - -.ai-description li { - margin-bottom: 8px; - line-height: 1.4; -} - -.history-controls { - margin-bottom: 15px; - display: flex; - gap: 10px; -} - -.history-container { - max-height: 400px; - overflow-y: auto; - border: 1px solid #ddd; - border-radius: 4px; -} - -#history-table { - width: 100%; - border-collapse: collapse; -} - -#history-table th, -#history-table td { - padding: 10px; - text-align: left; - border-bottom: 1px solid #ddd; -} - -#history-table th { - background-color: #f5f5f5; - position: sticky; - top: 0; - z-index: 1; -} - -#history-table tr:hover { - background-color: #f9f9f9; -} - -#history-table .success { - color: #28a745; -} - -#history-table .error { - color: #dc3545; -} - -.timestamp { - white-space: nowrap; - font-family: monospace; -} - -.folder-source { - margin-bottom: 20px; - padding: 15px; - background-color: #f5f5f5; - border-radius: 4px; -} - -.folder-source h3 { - margin-top: 0; - margin-bottom: 10px; - font-size: 1em; -} - -.loading-indicator { - padding: 10px; - color: var(--primary-color); - font-style: italic; -} - -.folder-selection { - margin-top: 15px; - padding: 15px; - background-color: white; - border-radius: 4px; - border: 1px solid var(--border-color); -} - -.folder-selection p { - margin-bottom: 10px; -} - -.folders-preview { - max-height: 200px; - overflow-y: auto; - margin: 15px 0; - padding: 10px; - background-color: #f9f9f9; - border: 1px solid var(--border-color); - border-radius: 4px; -} - -.folder-preview-item { - padding: 5px 10px; - margin-bottom: 5px; - background-color: white; - border-radius: 3px; - border-left: 3px solid var(--primary-color); -} - -.button-group { - display: flex; - gap: 10px; - margin-top: 15px; -} \ No newline at end of file +:root { + --primary-color: #0060df; + --primary-hover: #003eaa; + --background-color: #f9f9fa; + --text-color: #0c0c0d; + --border-color: #d7d7db; + --success-color: #4CAF50; + --error-color: #f44336; +} + +body { + font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, Oxygen, Ubuntu, Cantarell, sans-serif; + background-color: var(--background-color); + color: var(--text-color); + margin: 0; + padding: 20px; +} + +.container { + max-width: 800px; + margin: 0 auto; + background-color: white; + padding: 30px; + border-radius: 8px; + box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1); +} + +h1 { + color: var(--primary-color); + margin-bottom: 30px; +} + +h2 { + font-size: 1.2em; + margin-bottom: 15px; +} + +.section { + margin-bottom: 30px; + padding: 0; + border: 1px solid var(--border-color); + border-radius: 8px; + overflow: hidden; + background: white; + box-shadow: 0 1px 3px rgba(0, 0, 0, 0.05); +} + +.section-header { + display: flex; + justify-content: space-between; + align-items: center; + padding: 20px; + cursor: pointer; + background: linear-gradient(135deg, #f8f9fa 0%, #e9ecef 100%); + border-bottom: 1px solid var(--border-color); + transition: all 0.3s ease; + user-select: none; +} + +.section-header:hover { + background: linear-gradient(135deg, #e9ecef 0%, #dee2e6 100%); +} + +.section-header h2 { + margin: 0; + font-size: 1.3em; + color: var(--primary-color); +} + +.collapse-icon { + font-size: 1.2em; + color: var(--primary-color); + transition: transform 0.3s ease; + font-weight: bold; +} + +.collapsible-section.collapsed .collapse-icon { + transform: rotate(-90deg); +} + +.section-content { + padding: 20px; + animation: slideDown 0.3s ease-out; +} + +.subsection { + background: #f8f9fa; + padding: 20px; + margin-bottom: 20px; + border-radius: 8px; + border-left: 4px solid var(--primary-color); +} + +.subsection h3 { + margin-top: 0; + margin-bottom: 15px; + color: var(--primary-color); + font-size: 1.1em; + font-weight: 600; +} + +.subsection:last-child { + margin-bottom: 0; +} + +.ai-info-subsection { + background: linear-gradient(135deg, #e3f2fd 0%, #bbdefb 100%); + border-left-color: #1976d2; +} + +@keyframes slideDown { + from { + opacity: 0; + transform: translateY(-10px); + } + to { + opacity: 1; + transform: translateY(0); + } +} + +.save-section { + padding: 20px; + background: linear-gradient(135deg, #f0f4ff 0%, #e0e8ff 100%); + border: 2px solid var(--primary-color); +} + +.input-group { + margin-bottom: 15px; +} + +.input-group label { + display: block; + margin-bottom: 5px; + font-weight: 500; +} + +.api-key-input { + width: 100%; + padding: 8px; + border: 1px solid var(--border-color); + border-radius: 4px; + font-size: 14px; + margin-bottom: 10px; +} + +.bulk-import { + margin-bottom: 20px; + padding: 15px; + background-color: var(--background-color); + border-radius: 4px; +} + +.bulk-import label { + display: block; + margin-bottom: 8px; + font-weight: 500; +} + +.bulk-import-textarea { + width: 100%; + height: 100px; + padding: 10px; + border: 1px solid var(--border-color); + border-radius: 4px; + font-size: 14px; + resize: vertical; + margin-bottom: 10px; + font-family: inherit; +} + +#test-api { + margin-top: 10px; + background-color: var(--primary-color); + color: white; +} + +.api-test-result { + margin-top: 10px; + padding: 10px; + border-radius: 4px; + display: none; +} + +.api-test-result.success { + display: block; + background-color: var(--success-color); + color: white; +} + +.api-test-result.error { + display: block; + background-color: var(--error-color); + color: white; +} + +.warning-box { + background-color: #fff3cd; + border: 1px solid #ffc107; + border-radius: 4px; + padding: 12px 16px; + margin: 15px 0; + color: #856404; +} + +.warning-box strong { + color: #856404; +} + +.usage-info { + background-color: #f8f9fa; + border: 1px solid #dee2e6; + border-radius: 4px; + padding: 15px; + margin: 15px 0; +} + +.usage-info h3 { + margin-top: 0; + margin-bottom: 12px; + font-size: 1em; + color: var(--primary-color); +} + +.usage-stats { + margin-bottom: 12px; +} + +.usage-item { + padding: 6px 0; + border-bottom: 1px solid #e9ecef; +} + +.usage-item:last-child { + border-bottom: none; +} + +.usage-item strong { + display: inline-block; + width: 150px; +} + +.usage-message { + margin-top: 10px; + padding: 10px; + border-radius: 4px; + display: none; +} + +.usage-message.warning { + display: block; + background-color: #fff3cd; + border: 1px solid #ffc107; + color: #856404; +} + +.usage-message.info { + display: block; + background-color: #d1ecf1; + border: 1px solid #bee5eb; + color: #0c5460; +} + +.button { + background-color: var(--primary-color); + color: white; + border: none; + padding: 8px 16px; + border-radius: 4px; + cursor: pointer; + font-size: 14px; + transition: background-color 0.2s; + text-decoration: none; + display: inline-block; +} + +.button:hover { + background-color: var(--primary-hover); +} + +.button-link { + background-color: #28a745; +} + +.button-link:hover { + background-color: #218838; +} + +.button.primary { + background-color: var(--primary-color); + padding: 10px 20px; + font-size: 16px; +} + +.button.disabled, +.button:disabled { + background-color: #cccccc; + color: #666666; + cursor: not-allowed; + opacity: 0.6; +} + +.button.disabled:hover, +.button:disabled:hover { + background-color: #cccccc; +} + +.instruction-message { + padding: 20px; + background-color: #fff3cd; + border: 1px solid #ffc107; + border-radius: 4px; + color: #856404; + text-align: center; + font-style: italic; + margin-bottom: 15px; +} + +.provider-select { + width: 100%; + padding: 10px; + border: 1px solid var(--border-color); + border-radius: 4px; + font-size: 14px; + background-color: white; + cursor: pointer; +} + +.provider-info { + margin: 15px 0; + padding: 15px; + background-color: #f0f8ff; + border-left: 4px solid var(--primary-color); + border-radius: 4px; +} + +.provider-details { + font-size: 14px; + line-height: 1.6; +} + +.provider-details strong { + font-size: 16px; + color: var(--text-color); +} + +.provider-details p { + margin: 8px 0 0 0; + color: #555; +} + +.free-badge { + display: inline-block; + padding: 2px 8px; + background-color: #28a745; + color: white; + border-radius: 3px; + font-size: 11px; + font-weight: bold; + margin-left: 8px; +} + +.paid-badge { + display: inline-block; + padding: 2px 8px; + background-color: #ffc107; + color: #333; + border-radius: 3px; + font-size: 11px; + font-weight: bold; + margin-left: 8px; +} + +.label-item { + display: flex; + gap: 10px; + margin-bottom: 10px; + align-items: center; +} + +.label-input { + flex: 1; + padding: 8px; + border: 1px solid var(--border-color); + border-radius: 4px; + font-size: 14px; +} + +.remove-label { + background-color: #ff4f4f; + color: white; + border: none; + width: 24px; + height: 24px; + border-radius: 50%; + cursor: pointer; + display: flex; + align-items: center; + justify-content: center; + font-size: 16px; +} + +.remove-label:hover { + background-color: #d43535; +} + +.checkbox-container { + display: flex; + align-items: center; + gap: 10px; + margin-bottom: 10px; +} + +input[type="checkbox"] { + width: 18px; + height: 18px; + cursor: pointer; +} + +.message { + position: fixed; + bottom: 20px; + right: 20px; + background-color: var(--primary-color); + color: white; + padding: 10px 20px; + border-radius: 4px; + box-shadow: 0 2px 4px rgba(0, 0, 0, 0.2); + z-index: 1000; +} + +.ai-description { + margin-top: 20px; + padding: 15px; + background-color: var(--background-color); + border-radius: 8px; + border: 1px solid var(--border-color); +} + +.ai-description p { + margin-bottom: 10px; + color: var(--text-color); + line-height: 1.5; +} + +.ai-description ul { + margin: 0; + padding-left: 20px; + color: var(--text-color); +} + +.ai-description li { + margin-bottom: 8px; + line-height: 1.4; +} + +.history-controls { + margin-bottom: 15px; + display: flex; + gap: 10px; +} + +.history-container { + max-height: 400px; + overflow-y: auto; + border: 1px solid #ddd; + border-radius: 4px; +} + +#history-table { + width: 100%; + border-collapse: collapse; +} + +#history-table th, +#history-table td { + padding: 10px; + text-align: left; + border-bottom: 1px solid #ddd; +} + +#history-table th { + background-color: #f5f5f5; + position: sticky; + top: 0; + z-index: 1; +} + +#history-table tr:hover { + background-color: #f9f9f9; +} + +#history-table .success { + color: #28a745; +} + +#history-table .error { + color: #dc3545; +} + +.timestamp { + white-space: nowrap; + font-family: monospace; +} + +.folder-source { + margin-bottom: 20px; + padding: 15px; + background-color: #f5f5f5; + border-radius: 4px; +} + +.folder-source h3 { + margin-top: 0; + margin-bottom: 10px; + font-size: 1em; +} + +.loading-indicator { + padding: 10px; + color: var(--primary-color); + font-style: italic; +} + +.folder-selection { + margin-top: 15px; + padding: 15px; + background-color: white; + border-radius: 4px; + border: 1px solid var(--border-color); +} + +.folder-selection p { + margin-bottom: 10px; +} + +.folders-preview { + max-height: 200px; + overflow-y: auto; + margin: 15px 0; + padding: 10px; + background-color: #f9f9f9; + border: 1px solid var(--border-color); + border-radius: 4px; +} + +.folder-preview-item { + padding: 5px 10px; + margin-bottom: 5px; + background-color: white; + border-radius: 3px; + border-left: 3px solid var(--primary-color); +} + +.button-group { + display: flex; + gap: 10px; + margin-top: 15px; +} + +/* Multiple Gemini API Keys */ +.multi-keys-header { + margin-bottom: 15px; +} + +.multi-keys-header h3 { + margin: 0 0 5px 0; + font-size: 1.1em; +} + +.multi-keys-header .info-text { + margin: 0; + font-size: 0.9em; + color: #666; +} + +.gemini-keys-list { + margin: 15px 0; +} + +.gemini-key-item { + display: flex; + align-items: center; + gap: 10px; + margin-bottom: 10px; + padding: 10px; + background-color: #f9f9f9; + border: 1px solid var(--border-color); + border-radius: 4px; +} + +.gemini-key-item.active { + border-left: 4px solid #28a745; + background-color: #f0fff0; +} + +.gemini-key-item .key-index { + font-weight: bold; + min-width: 30px; + color: #666; +} + +.gemini-key-item input { + flex: 1; + padding: 8px; + border: 1px solid var(--border-color); + border-radius: 4px; +} + +.gemini-key-item input:focus { + outline: none; + border-color: var(--primary-color); +} + +.gemini-key-item input[style*="border-color: #dc3545"] { + border-color: #dc3545 !important; + background-color: #fff5f5; +} + +.gemini-key-item .key-status { + font-size: 0.85em; + padding: 3px 8px; + border-radius: 3px; + white-space: nowrap; +} + +.gemini-key-item .key-status.ready { + background-color: #d4edda; + color: #155724; +} + +.gemini-key-item .key-status.warning { + background-color: #fff3cd; + color: #856404; +} + +.gemini-key-item .key-status.limit { + background-color: #f8d7da; + color: #721c24; +} + +.gemini-key-item .key-status.active { + background-color: #d1ecf1; + color: #0c5460; + font-weight: bold; +} + +.gemini-key-item button { + padding: 5px 10px; + font-size: 0.9em; +} + +.key-test-result { + font-size: 0.85em; + padding: 3px 8px; + border-radius: 3px; + white-space: nowrap; + min-width: 70px; + text-align: center; + cursor: help; +} + +.key-test-result.success { + background-color: #d4edda; + color: #155724; +} + +.key-test-result.error { + background-color: #f8d7da; + color: #721c24; +} + +.key-test-result.testing { + background-color: #d1ecf1; + color: #0c5460; +} + +.all-keys-usage { + margin-bottom: 15px; +} + +.key-usage-card { + margin-bottom: 10px; + padding: 12px; + background-color: #f9f9f9; + border: 1px solid var(--border-color); + border-radius: 4px; +} + +.key-usage-card.active { + border-left: 4px solid #28a745; + background-color: #f0fff0; +} + +.key-usage-card .key-header { + display: flex; + justify-content: space-between; + align-items: center; + margin-bottom: 8px; +} + +.key-usage-card .key-title { + font-weight: bold; + color: #333; +} + +.key-usage-card .key-stats { + display: grid; + grid-template-columns: 1fr 1fr; + gap: 8px; + font-size: 0.9em; +} + +.key-usage-card .stat-item { + display: flex; + justify-content: space-between; +} + +.key-usage-card .stat-label { + color: #666; +} + +.key-usage-card .stat-value { + font-weight: 600; +} + +.info-box { + background: #e3f2fd; + border: 1px solid #90caf9; + padding: 12px; + border-radius: 6px; + margin-bottom: 15px; + font-size: 0.95em; +} + +.info-box code { + background: #fff; + padding: 2px 6px; + border-radius: 3px; + font-family: 'Courier New', monospace; + color: #d32f2f; +} + +.help-text { + display: block; + margin-top: 5px; + color: #666; + font-size: 0.9em; +} + +.diagnostics-result { + margin-top: 15px; + padding: 15px; + background: #f5f5f5; + border-radius: 6px; + border-left: 4px solid #ff9800; + font-family: 'Courier New', monospace; + font-size: 0.85em; + white-space: pre-wrap; + max-height: 300px; + overflow-y: auto; +} + +.diagnostics-result.success { + border-left-color: #4CAF50; + background: #e8f5e9; +} + +.diagnostics-result.error { + border-left-color: #f44336; + background: #ffebee; +} + +/* ── Batch Processing Status Panel ───────────────────────────────────────── */ + +.batch-status-panel { + margin-bottom: 24px; + padding: 18px 20px; + background: linear-gradient(135deg, #e8f0fe 0%, #d2e3fc 100%); + border: 2px solid var(--primary-color); + border-radius: 10px; + box-shadow: 0 3px 10px rgba(0, 96, 223, 0.15); + animation: slideDown 0.3s ease-out; +} + +.batch-status-header { + display: flex; + align-items: center; + gap: 10px; + margin-bottom: 14px; +} + +.batch-status-icon { + font-size: 1.4em; + animation: batchSpin 2s linear infinite; + display: inline-block; +} + +@keyframes batchSpin { + from { transform: rotate(0deg); } + to { transform: rotate(360deg); } +} + +/* Stop spinning when paused */ +.batch-status-panel[data-status="paused"] .batch-status-icon { + animation: none; +} + +/* Show done/cancelled state */ +.batch-status-panel[data-status="done"] .batch-status-icon, +.batch-status-panel[data-status="cancelled"] .batch-status-icon { + animation: none; +} + +.batch-status-title { + font-weight: 700; + font-size: 1.05em; + color: #003eaa; + flex: 1; +} + +.batch-provider-badge { + display: inline-block; + padding: 2px 10px; + background: var(--primary-color); + color: white; + border-radius: 12px; + font-size: 0.78em; + font-weight: 600; + letter-spacing: 0.5px; + text-transform: uppercase; +} + +/* Progress bar */ +.batch-progress-bar-wrap { + height: 12px; + background: rgba(255,255,255,0.6); + border-radius: 8px; + overflow: hidden; + margin-bottom: 10px; + border: 1px solid rgba(0, 96, 223, 0.2); +} + +.batch-progress-fill { + height: 100%; + background: linear-gradient(90deg, #0060df 0%, #00d4ff 100%); + border-radius: 8px; + transition: width 0.4s ease; + position: relative; + overflow: hidden; +} + +.batch-progress-fill::after { + content: ''; + position: absolute; + top: 0; left: -100%; + width: 60%; + height: 100%; + background: linear-gradient(90deg, transparent, rgba(255,255,255,0.4), transparent); + animation: shimmer 1.5s infinite; +} + +@keyframes shimmer { + to { left: 200%; } +} + +.batch-progress-text { + font-size: 0.88em; + color: #003eaa; + margin: 0 0 12px 0; +} + +/* Batch control buttons */ +.batch-controls { + display: flex; + gap: 8px; +} + +.batch-btn-pause { + background: #f59e0b; + color: white; + border: none; + padding: 6px 14px; + border-radius: 5px; + cursor: pointer; + font-size: 0.88em; + font-weight: 600; + transition: background 0.2s; +} + +.batch-btn-pause:hover { background: #d97706; } + +.batch-btn-resume { + background: #10b981; + color: white; + border: none; + padding: 6px 14px; + border-radius: 5px; + cursor: pointer; + font-size: 0.88em; + font-weight: 600; + transition: background 0.2s; +} + +.batch-btn-resume:hover { background: #059669; } + +.batch-btn-cancel { + background: #ef4444; + color: white; + border: none; + padding: 6px 14px; + border-radius: 5px; + cursor: pointer; + font-size: 0.88em; + font-weight: 600; + transition: background 0.2s; +} + +.batch-btn-cancel:hover { background: #dc2626; } + +/* ── Custom Prompt Textarea ─────────────────────────────────────────────── */ + +.prompt-textarea { + width: 100%; + min-height: 150px; + padding: 10px; + font-family: monospace; + font-size: 13px; + border: 1px solid #ccc; + border-radius: 4px; + resize: vertical; + margin-bottom: 10px; +} \ No newline at end of file diff --git a/test-auto-sort.test.js b/test-auto-sort.test.js new file mode 100644 index 0000000..719d9fe --- /dev/null +++ b/test-auto-sort.test.js @@ -0,0 +1,178 @@ +/** + * Tests for autoSortEnabled default value and storage migration. + * Run: node test-auto-sort.test.js + */ + +const assert = require('assert'); + +// ───────────────────────────────────────────────────────────── +// Simulate the background.js handleNewMail check logic +// This mirrors the ACTUAL code path, so we can verify the fix. +// ───────────────────────────────────────────────────────────── + +/** + * Current behavior (before fix): strict check, undefined = disabled. + * This is extracted from background.js L1836. + */ +function handleNewMailCheck_BEFORE_FIX(storageResult) { + if (!storageResult.autoSortEnabled) return false; // early return + if (storageResult.enableAi === false) return false; + return true; +} + +/** + * Fixed behavior: undefined defaults to enabled (backward compat). + */ +function handleNewMailCheck_AFTER_FIX(storageResult) { + const autoSortEnabled = storageResult.autoSortEnabled !== false; + if (!autoSortEnabled) return false; + if (storageResult.enableAi === false) return false; + return true; +} + +// ───────────────────────────────────────────────────────────── +// Simulate the options.js save logic +// ───────────────────────────────────────────────────────────── + +/** + * Current behavior (before fix): checkbox unchecked = false. + * Mirrors options.js L1417. + */ +function getAutoSortValue_BEFORE_FIX(checkboxChecked) { + return checkboxChecked; // defaults to false if checkbox unchecked +} + +/** + * Fixed behavior: defaults to true. + */ +function getAutoSortValue_AFTER_FIX(checkboxChecked, checkboxDefault = true) { + return checkboxChecked !== undefined ? checkboxChecked : checkboxDefault; +} + +// ───────────────────────────────────────────────────────────── +// Simulate storage migration +// ───────────────────────────────────────────────────────────── + +/** + * Migrates legacy storage to include autoSortEnabled. + */ +function migrateAutoSortStorage(storageResult) { + if (storageResult.autoSortEnabled === undefined) { + storageResult.autoSortEnabled = true; + } + return storageResult; +} + +// ───────────────────────────────────────────────────────────── +// TESTS +// ───────────────────────────────────────────────────────────── + +let passed = 0; +let failed = 0; + +function test(name, fn) { + try { + fn(); + console.log(`✓ ${name}`); + passed++; + } catch (e) { + console.log(`✗ ${name}`); + console.log(` ${e.message}`); + failed++; + } +} + +// ── BEFORE FIX: Verify current behavior is broken ── + +console.log('\n── BEFORE FIX (should show the bug) ──\n'); + +test('BEFORE: new user (undefined autoSortEnabled) → auto-sort DISABLED (BUG)', () => { + const storage = {}; // fresh install, no autoSortEnabled key + const result = handleNewMailCheck_BEFORE_FIX(storage); + assert.strictEqual(result, false, 'Expected auto-sort to be disabled for new users'); +}); + +test('BEFORE: user who migrated from old version → auto-sort DISABLED (BUG)', () => { + const storage = { enableAi: true }; // old storage, no autoSortEnabled + const result = handleNewMailCheck_BEFORE_FIX(storage); + assert.strictEqual(result, false, 'Expected auto-sort to be disabled for migrated users'); +}); + +test('BEFORE: user who explicitly enabled → auto-sort ENABLED', () => { + const storage = { autoSortEnabled: true, enableAi: true }; + const result = handleNewMailCheck_BEFORE_FIX(storage); + assert.strictEqual(result, true); +}); + +test('BEFORE: user who explicitly disabled → auto-sort DISABLED', () => { + const storage = { autoSortEnabled: false, enableAi: true }; + const result = handleNewMailCheck_BEFORE_FIX(storage); + assert.strictEqual(result, false); +}); + +// ── AFTER FIX: Verify corrected behavior ── + +console.log('\n── AFTER FIX (should all pass) ──\n'); + +test('AFTER: new user (undefined autoSortEnabled) → auto-sort ENABLED', () => { + const storage = {}; + const result = handleNewMailCheck_AFTER_FIX(storage); + assert.strictEqual(result, true, 'Expected auto-sort to be enabled by default'); +}); + +test('AFTER: user who migrated from old version → auto-sort ENABLED', () => { + const storage = { enableAi: true }; + const result = handleNewMailCheck_AFTER_FIX(storage); + assert.strictEqual(result, true, 'Expected auto-sort to be enabled for migrated users'); +}); + +test('AFTER: user who explicitly disabled → auto-sort DISABLED (respect choice)', () => { + const storage = { autoSortEnabled: false, enableAi: true }; + const result = handleNewMailCheck_AFTER_FIX(storage); + assert.strictEqual(result, false, 'Expected explicit false to be respected'); +}); + +test('AFTER: user who explicitly enabled → auto-sort ENABLED', () => { + const storage = { autoSortEnabled: true, enableAi: true }; + const result = handleNewMailCheck_AFTER_FIX(storage); + assert.strictEqual(result, true); +}); + +test('AFTER: enableAi disabled → auto-sort DISABLED regardless of autoSortEnabled', () => { + const storage = { autoSortEnabled: true, enableAi: false }; + const result = handleNewMailCheck_AFTER_FIX(storage); + assert.strictEqual(result, false); +}); + +test('AFTER: both undefined → auto-sort ENABLED (both default to on)', () => { + const storage = {}; + const result = handleNewMailCheck_AFTER_FIX(storage); + assert.strictEqual(result, true); +}); + +// ── Migration test ── + +console.log('\n── Storage Migration ──\n'); + +test('Migration: adds autoSortEnabled=true when missing', () => { + const storage = { enableAi: true }; + const migrated = migrateAutoSortStorage({ ...storage }); + assert.strictEqual(migrated.autoSortEnabled, true); +}); + +test('Migration: does not overwrite existing true', () => { + const storage = { autoSortEnabled: true }; + const migrated = migrateAutoSortStorage({ ...storage }); + assert.strictEqual(migrated.autoSortEnabled, true); +}); + +test('Migration: does not overwrite existing false', () => { + const storage = { autoSortEnabled: false }; + const migrated = migrateAutoSortStorage({ ...storage }); + assert.strictEqual(migrated.autoSortEnabled, false); +}); + +// ── Summary ── + +console.log(`\n── Results: ${passed} passed, ${failed} failed ──\n`); +process.exit(failed > 0 ? 1 : 0);