AgentOp

Preview: Resume Optimizer

Transform your resume into an ATS-winning document. AI-powered analysis extracts keywords from job descriptions, optimizes bullet points, fixes formatting issues, and boosts your ATS compatibility score.

Preview Mode

This is a preview with sample data. The template uses placeholders like which will be replaced with actual agent data.

Template Preview

Template Metadata

Slug
resume-optimizer
Created By
ozzo
Created
Feb 28, 2026
Usage Count
0

Tags

resume career ats job-search optimization

Code Statistics

HTML Lines
278
CSS Lines
714
JS Lines
994
Python Lines
599

Source Code

<!DOCTYPE html>
<html lang="en">
<head>
  <meta charset="UTF-8" />
  <meta http-equiv="X-UA-Compatible" content="IE=edge" />
  <meta name="viewport" content="width=device-width,initial-scale=1" />
  <title>{{ agent_name }} - Resume Optimizer</title>

  <style>
    {{ css_code }}
  </style>

  <!-- Conditional Script Imports -->
  {% if needs_pyodide %}
  <script src="https://cdn.jsdelivr.net/pyodide/v{{ pyodide_version }}/full/pyodide.js"></script>
  {% endif %}

  <!-- jsPDF for PDF export -->
  <script src="https://cdnjs.cloudflare.com/ajax/libs/jspdf/2.5.1/jspdf.umd.min.js"></script>

  <script>
    const PROVIDER = "{{ embedded_provider }}";
    const API_KEY = "{{ embedded_api_key }}";
    const AGENT_CONFIG = {{ default_config|safe }};
    const NEEDS_PYODIDE = {{ needs_pyodide|lower }};
    const PYODIDE_VERSION = "{{ pyodide_version }}";
  </script>
</head>
<body>
<div class="resume-optimizer-dashboard">
  <!-- Left Panel: Input & Settings -->
  <div class="left-panel">
    <div class="panel-header">
      <div class="icon-badge">📄</div>
      <div>
        <h2>Resume Optimizer</h2>
        <p class="tagline">Transform your resume into an ATS-winning document</p>
      </div>
    </div>
    
    <!-- Resume Upload Section -->
    <div class="section">
      <h3>📄 Upload Your Resume</h3>
      <div class="upload-area" id="resume-upload-area">
        <div class="upload-content">
          <div class="upload-icon">📤</div>
          <p class="upload-text">Drag & drop your resume<br>or click to browse</p>
          <p class="upload-hint">Supported: PDF, DOCX, TXT • Max 10 MB</p>
        </div>
        <input type="file" id="resume-file-input" accept=".pdf,.docx,.txt" hidden>
      </div>
      
      <button class="secondary-btn" id="paste-resume-btn" onclick="toggleResumeTextarea()">
        📋 Or Paste Resume Text
      </button>
      
      <textarea id="resume-text-input" 
                class="text-input hidden" 
                placeholder="Paste your resume text here..."
                rows="6"></textarea>
    </div>
    
    <!-- Job Description Section -->
    <div class="section">
      <h3>🎯 Target Job Description</h3>
      <textarea id="job-description-input" 
                class="text-input" 
                placeholder="Paste the job posting you're applying for..."
                rows="8"></textarea>
      <div class="char-count" id="jd-char-count">0 / 5000 characters</div>
      
      <div class="button-group">
        <button class="secondary-btn" onclick="loadSampleJob()">📂 Load Sample</button>
      </div>
    </div>
    
    <!-- Settings Section -->
    <div class="section">
      <h3>⚙️ Optimization Settings</h3>
      
      <label class="form-label">Target Industry</label>
      <select id="industry-select" class="select-input">
        <option value="technology">Technology</option>
        <option value="finance">Finance</option>
        <option value="healthcare">Healthcare</option>
        <option value="marketing">Marketing & Sales</option>
        <option value="engineering">Engineering</option>
        <option value="consulting">Consulting</option>
        <option value="other">Other</option>
      </select>
      
      <label class="form-label">Target Role Level</label>
      <div class="radio-group">
        <label class="radio-label">
          <input type="radio" name="level" value="entry" checked>
          <span>Entry-level</span>
        </label>
        <label class="radio-label">
          <input type="radio" name="level" value="mid">
          <span>Mid-level</span>
        </label>
        <label class="radio-label">
          <input type="radio" name="level" value="senior">
          <span>Senior</span>
        </label>
        <label class="radio-label">
          <input type="radio" name="level" value="executive">
          <span>Executive</span>
        </label>
      </div>
      
      <label class="form-label">Optimization Options</label>
      <div class="checkbox-group">
        <label class="checkbox-label">
          <input type="checkbox" checked>
          <span>Extract keywords from job</span>
        </label>
        <label class="checkbox-label">
          <input type="checkbox" checked>
          <span>Optimize bullet points</span>
        </label>
        <label class="checkbox-label">
          <input type="checkbox" checked>
          <span>Fix ATS formatting</span>
        </label>
        <label class="checkbox-label">
          <input type="checkbox" checked>
          <span>Add missing keywords</span>
        </label>
        <label class="checkbox-label">
          <input type="checkbox" checked>
          <span>Highlight achievements</span>
        </label>
        <label class="checkbox-label">
          <input type="checkbox" checked>
          <span>Suggest action verbs</span>
        </label>
      </div>
      
      <button class="primary-btn" id="optimize-btn" onclick="optimizeResume()">
        <span class="btn-icon">🚀</span>
        <span>Optimize Resume</span>
      </button>
    </div>
  </div>
  
  <!-- Center Panel: Preview & Editor -->
  <div class="center-panel">
    <div class="panel-header">
      <h2>Resume Preview</h2>
      <div class="view-toggle">
        <button class="toggle-btn active" data-view="original" onclick="switchView('original')">Original</button>
        <button class="toggle-btn" data-view="optimized" onclick="switchView('optimized')">Optimized</button>
        <button class="toggle-btn" data-view="comparison" onclick="switchView('comparison')">Compare</button>
      </div>
    </div>
    
    <div class="preview-container" id="preview-container">
      <div class="welcome-state" id="welcome-state">
        <div class="welcome-icon">📄</div>
        <h3>Ready to Optimize</h3>
        <p>Upload your resume and paste a job description to get started</p>
        <div class="feature-list">
          <div class="feature-item">✨ AI-Powered Analysis</div>
          <div class="feature-item">📊 ATS Compatibility Score</div>
          <div class="feature-item">🎯 Keyword Optimization</div>
          <div class="feature-item">💡 Smart Recommendations</div>
        </div>
      </div>
      
      <!-- Original View -->
      <div class="resume-view hidden" id="original-view">
        <div class="resume-document">
          <div id="original-content"></div>
        </div>
      </div>
      
      <!-- Optimized View -->
      <div class="resume-view hidden" id="optimized-view">
        <div class="resume-document">
          <div id="optimized-content"></div>
        </div>
      </div>
      
      <!-- Comparison View -->
      <div class="comparison-view hidden" id="comparison-view">
        <div class="comparison-split">
          <div class="comparison-side">
            <div class="comparison-label">Original</div>
            <div class="resume-document">
              <div id="comparison-original"></div>
            </div>
          </div>
          <div class="comparison-divider"></div>
          <div class="comparison-side">
            <div class="comparison-label">Optimized</div>
            <div class="resume-document">
              <div id="comparison-optimized"></div>
            </div>
          </div>
        </div>
      </div>
    </div>
    
    <!-- Export Section -->
    <div class="export-section hidden" id="export-section">
      <h3>📤 Export & Download</h3>
      <div class="button-group">
        <button class="primary-btn" onclick="downloadResume('pdf')">
          📄 Download as PDF
        </button>
        <button class="primary-btn" onclick="downloadResume('docx')">
          📋 Download as DOCX
        </button>
        <button class="secondary-btn" onclick="downloadResume('txt')">
          📝 Download as TXT
        </button>
        <button class="secondary-btn" onclick="copyToClipboard()">
          📋 Copy to Clipboard
        </button>
      </div>
    </div>
  </div>
  
  <!-- Right Panel: Analysis -->
  <div class="right-panel">
    <!-- ATS Score -->
    <div class="analysis-card">
      <h3>🎯 ATS Compatibility Score</h3>
      <div id="ats-score-display" class="score-display">
        <div class="score-placeholder">
          <div class="placeholder-icon">📊</div>
          <p>Upload resume to see score</p>
        </div>
      </div>
    </div>
    
    <!-- Keyword Analysis -->
    <div class="analysis-card">
      <h3>🔑 Keyword Analysis</h3>
      <div id="keyword-analysis-display" class="keyword-display">
        <div class="score-placeholder">
          <div class="placeholder-icon">🔍</div>
          <p>Analyzing keywords...</p>
        </div>
      </div>
    </div>
    
    <!-- Issues & Recommendations -->
    <div class="analysis-card">
      <h3>⚠️ Issues & Recommendations</h3>
      <div id="recommendations-display" class="recommendations-display">
        <div class="score-placeholder">
          <div class="placeholder-icon">💡</div>
          <p>Recommendations will appear here</p>
        </div>
      </div>
    </div>
  </div>
  
  <!-- Loading Overlay -->
  <div class="loading-overlay hidden" id="loading-overlay">
    <div class="loading-spinner"></div>
    <p class="loading-text">Analyzing your resume...</p>
  </div>
</div>

<script>
{{ js_code }}
</script>

<!-- Hidden Python code -->
<script type="text/python" id="python-code">
{{ python_code }}
</script>

</body>
</html>
:root {
  --primary-blue: #1E40AF;
  --accent-green: #10B981;
  --bg-gray: #F3F4F6;
  --text-dark: #1F2937;
  --text-muted: #6B7280;
  --border: #E5E7EB;
  --white: #FFFFFF;
  --red: #EF4444;
  --yellow: #FBBF24;
  --success: #10B981;
  --shadow: 0 1px 3px rgba(0,0,0,0.1);
  --shadow-lg: 0 10px 25px rgba(0,0,0,0.1);
}

* {
  box-sizing: border-box;
}

body {
  margin: 0;
  font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', 'Inter', 'Poppins', sans-serif;
  background: linear-gradient(135deg, #EFF6FF 0%, #F9FAFB 100%);
  color: var(--text-dark);
  line-height: 1.6;
}

.resume-optimizer-dashboard {
  max-width: 1800px;
  margin: 0 auto;
  padding: 1.5rem;
  display: grid;
  grid-template-columns: 380px 1fr 380px;
  gap: 1.5rem;
  min-height: 100vh;
}

/* Panel Styles */
.left-panel, .center-panel, .right-panel {
  background: var(--white);
  border-radius: 12px;
  padding: 1.5rem;
  box-shadow: var(--shadow);
  border: 1px solid var(--border);
  height: fit-content;
}

.center-panel {
  min-height: 600px;
}

.right-panel {
  display: flex;
  flex-direction: column;
  gap: 1rem;
}

/* Panel Headers */
.panel-header {
  display: flex;
  align-items: center;
  gap: 1rem;
  margin-bottom: 1.5rem;
  padding-bottom: 1rem;
  border-bottom: 2px solid var(--border);
}

.icon-badge {
  width: 48px;
  height: 48px;
  background: linear-gradient(135deg, var(--primary-blue), #3B82F6);
  border-radius: 12px;
  display: flex;
  align-items: center;
  justify-content: center;
  font-size: 1.5rem;
}

.panel-header h2 {
  margin: 0;
  font-size: 1.25rem;
  color: var(--text-dark);
  font-weight: 700;
}

.tagline {
  margin: 0;
  font-size: 0.875rem;
  color: var(--text-muted);
}

/* Sections */
.section {
  margin-bottom: 1.5rem;
  padding-bottom: 1.5rem;
  border-bottom: 1px solid var(--border);
}

.section:last-child {
  border-bottom: none;
}

.section h3 {
  margin: 0 0 1rem 0;
  font-size: 1rem;
  font-weight: 600;
  color: var(--text-dark);
}

/* Upload Area */
.upload-area {
  border: 2px dashed var(--border);
  border-radius: 8px;
  padding: 2rem;
  text-align: center;
  cursor: pointer;
  transition: all 0.3s ease;
  margin-bottom: 1rem;
  background: var(--bg-gray);
}

.upload-area:hover, .upload-area.drag-over {
  border-color: var(--primary-blue);
  background: #EFF6FF;
}

.upload-area.has-file {
  border-color: var(--accent-green);
  background: #F0FDF4;
}

.upload-icon {
  font-size: 2.5rem;
  margin-bottom: 0.5rem;
}

.upload-text {
  margin: 0.5rem 0;
  font-weight: 500;
  color: var(--text-dark);
}

.upload-hint {
  margin: 0;
  font-size: 0.75rem;
  color: var(--text-muted);
}

/* Form Elements */
.text-input {
  width: 100%;
  padding: 0.75rem;
  border: 1px solid var(--border);
  border-radius: 8px;
  font-family: inherit;
  font-size: 0.875rem;
  resize: vertical;
  transition: all 0.2s;
}

.text-input:focus {
  outline: none;
  border-color: var(--primary-blue);
  box-shadow: 0 0 0 3px rgba(30, 64, 175, 0.1);
}

.select-input {
  width: 100%;
  padding: 0.75rem;
  border: 1px solid var(--border);
  border-radius: 8px;
  font-family: inherit;
  font-size: 0.875rem;
  background: var(--white);
  cursor: pointer;
  transition: all 0.2s;
  margin-bottom: 1rem;
}

.select-input:focus {
  outline: none;
  border-color: var(--primary-blue);
  box-shadow: 0 0 0 3px rgba(30, 64, 175, 0.1);
}

.form-label {
  display: block;
  font-size: 0.875rem;
  font-weight: 500;
  margin-bottom: 0.5rem;
  color: var(--text-dark);
}

.char-count {
  font-size: 0.75rem;
  color: var(--text-muted);
  text-align: right;
  margin-top: 0.25rem;
}

/* Radio & Checkbox Groups */
.radio-group, .checkbox-group {
  display: flex;
  flex-direction: column;
  gap: 0.75rem;
  margin-bottom: 1rem;
}

.radio-label, .checkbox-label {
  display: flex;
  align-items: center;
  gap: 0.5rem;
  cursor: pointer;
  font-size: 0.875rem;
}

.radio-label input, .checkbox-label input {
  cursor: pointer;
}

/* Buttons */
.primary-btn, .secondary-btn {
  padding: 0.75rem 1.25rem;
  border-radius: 8px;
  font-weight: 600;
  font-size: 0.875rem;
  cursor: pointer;
  transition: all 0.2s;
  border: none;
  display: flex;
  align-items: center;
  justify-content: center;
  gap: 0.5rem;
  width: 100%;
}

.primary-btn {
  background: var(--primary-blue);
  color: var(--white);
}

.primary-btn:hover:not(:disabled) {
  background: #1E3A8A;
  transform: translateY(-1px);
  box-shadow: var(--shadow-lg);
}

.secondary-btn {
  background: var(--bg-gray);
  color: var(--text-dark);
  border: 1px solid var(--border);
}

.secondary-btn:hover:not(:disabled) {
  background: #E5E7EB;
}

.primary-btn:disabled, .secondary-btn:disabled {
  opacity: 0.5;
  cursor: not-allowed;
}

.btn-icon {
  font-size: 1.125rem;
}

.button-group {
  display: flex;
  gap: 0.5rem;
  flex-wrap: wrap;
}

.button-group button {
  flex: 1;
}

/* View Toggle */
.view-toggle {
  display: flex;
  gap: 0.5rem;
  margin-left: auto;
}

.toggle-btn {
  padding: 0.5rem 1rem;
  border: 1px solid var(--border);
  background: var(--white);
  border-radius: 6px;
  font-size: 0.875rem;
  cursor: pointer;
  transition: all 0.2s;
  font-weight: 500;
}

.toggle-btn:hover {
  background: var(--bg-gray);
}

.toggle-btn.active {
  background: var(--primary-blue);
  color: var(--white);
  border-color: var(--primary-blue);
}

/* Preview Container */
.preview-container {
  min-height: 500px;
  background: var(--bg-gray);
  border-radius: 8px;
  padding: 1.5rem;
  position: relative;
}

/* Welcome State */
.welcome-state {
  text-align: center;
  padding: 3rem 1rem;
}

.welcome-icon {
  font-size: 4rem;
  margin-bottom: 1rem;
}

.welcome-state h3 {
  font-size: 1.5rem;
  margin: 0 0 0.5rem 0;
  color: var(--text-dark);
}

.welcome-state p {
  color: var(--text-muted);
  margin: 0 0 2rem 0;
}

.feature-list {
  display: grid;
  grid-template-columns: repeat(2, 1fr);
  gap: 1rem;
  max-width: 400px;
  margin: 0 auto;
}

.feature-item {
  background: var(--white);
  padding: 0.75rem;
  border-radius: 8px;
  font-size: 0.875rem;
  box-shadow: var(--shadow);
}

/* Resume Views */
.resume-view, .comparison-view {
  animation: fadeIn 0.3s ease;
}

@keyframes fadeIn {
  from { opacity: 0; transform: translateY(10px); }
  to { opacity: 1; transform: translateY(0); }
}

.resume-document {
  background: var(--white);
  border-radius: 8px;
  padding: 2rem;
  box-shadow: var(--shadow);
  min-height: 400px;
  font-size: 0.875rem;
  line-height: 1.6;
}

.resume-document iframe {
  width: 100%;
  height: 900px;
  border: 0;
  border-radius: 8px;
  background: var(--white);
}

.resume-pre {
  white-space: pre-wrap;
  word-break: break-word;
  margin: 0;
  font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
  font-size: 0.9rem;
  line-height: 1.6;
  color: var(--text-dark);
}

/* Comparison View */
.comparison-split {
  display: flex;
  flex-direction: column;
  gap: 2rem;
}

.comparison-divider {
  height: 2px;
  background: linear-gradient(to right, transparent, var(--border), transparent);
  margin: 1rem 0;
  border-radius: 1px;
}

.comparison-label {
  font-weight: 700;
  margin-bottom: 1rem;
  color: var(--primary-blue);
  font-size: 1.1rem;
  padding: 0.5rem 0.75rem;
  background: linear-gradient(135deg, #EFF6FF, #F0F9FF);
  border-left: 4px solid var(--primary-blue);
  border-radius: 4px;
}

/* Export Section */
.export-section {
  margin-top: 1rem;
  padding-top: 1rem;
  border-top: 2px solid var(--border);
}

.export-section h3 {
  font-size: 1rem;
  margin: 0 0 1rem 0;
}

/* Analysis Cards */
.analysis-card {
  background: var(--white);
  border-radius: 12px;
  padding: 1.25rem;
  box-shadow: var(--shadow);
  border: 1px solid var(--border);
}

.analysis-card h3 {
  margin: 0 0 1rem 0;
  font-size: 0.95rem;
  font-weight: 600;
  color: var(--text-dark);
}

/* Score Display */
.score-display, .keyword-display, .recommendations-display {
  min-height: 150px;
}

.score-placeholder {
  text-align: center;
  padding: 2rem 1rem;
}

.placeholder-icon {
  font-size: 2.5rem;
  margin-bottom: 0.5rem;
}

.score-placeholder p {
  margin: 0;
  color: var(--text-muted);
  font-size: 0.875rem;
}

/* Score Visualization */
.score-comparison {
  display: flex;
  justify-content: space-around;
  align-items: center;
  margin: 1.5rem 0;
}

.score-item {
  text-align: center;
}

.score-label {
  font-size: 0.75rem;
  color: var(--text-muted);
  text-transform: uppercase;
  margin-bottom: 0.5rem;
}

.score-value {
  font-size: 2rem;
  font-weight: 700;
  color: var(--primary-blue);
}

.score-arrow {
  font-size: 2rem;
  color: var(--accent-green);
}

.score-breakdown {
  background: var(--bg-gray);
  border-radius: 8px;
  padding: 1rem;
  margin-top: 1rem;
}

.score-breakdown-item {
  display: flex;
  justify-content: space-between;
  align-items: center;
  margin-bottom: 0.75rem;
  font-size: 0.875rem;
}

.score-breakdown-item:last-child {
  margin-bottom: 0;
}

.score-bar {
  flex: 1;
  height: 8px;
  background: #E5E7EB;
  border-radius: 4px;
  margin: 0 0.75rem;
  overflow: hidden;
}

.score-bar-fill {
  height: 100%;
  background: var(--accent-green);
  border-radius: 4px;
  transition: width 0.5s ease;
}

.score-badge {
  padding: 0.25rem 0.5rem;
  border-radius: 4px;
  font-size: 0.75rem;
  font-weight: 600;
}

.badge-green {
  background: #D1FAE5;
  color: #065F46;
}

.badge-yellow {
  background: #FEF3C7;
  color: #92400E;
}

.badge-red {
  background: #FEE2E2;
  color: #991B1B;
}

/* Keyword Analysis */
.keyword-grid {
  display: grid;
  grid-template-columns: repeat(2, 1fr);
  gap: 0.5rem;
  margin-bottom: 1rem;
}

.keyword-tag {
  padding: 0.5rem;
  border-radius: 6px;
  font-size: 0.75rem;
  display: flex;
  align-items: center;
  gap: 0.5rem;
}

.keyword-tag.matched {
  background: #D1FAE5;
  color: #065F46;
}

.keyword-tag.missing {
  background: #FEE2E2;
  color: #991B1B;
}

.keyword-density {
  background: var(--bg-gray);
  padding: 1rem;
  border-radius: 8px;
  margin-top: 1rem;
  font-size: 0.875rem;
}

/* Recommendations */
.recommendation-item {
  background: var(--bg-gray);
  border-left: 3px solid var(--border);
  padding: 1rem;
  border-radius: 6px;
  margin-bottom: 0.75rem;
  font-size: 0.875rem;
}

.recommendation-item:last-child {
  margin-bottom: 0;
}

.recommendation-item.critical {
  border-left-color: var(--red);
  background: #FEF2F2;
}

.recommendation-item.warning {
  border-left-color: var(--yellow);
  background: #FFFBEB;
}

.recommendation-item.info {
  border-left-color: var(--primary-blue);
  background: #EFF6FF;
}

.recommendation-header {
  display: flex;
  align-items: center;
  gap: 0.5rem;
  margin-bottom: 0.5rem;
  font-weight: 600;
}

.recommendation-text {
  color: var(--text-dark);
  line-height: 1.5;
}

/* Loading Overlay */
.loading-overlay {
  position: fixed;
  inset: 0;
  background: rgba(0, 0, 0, 0.5);
  display: flex;
  flex-direction: column;
  align-items: center;
  justify-content: center;
  z-index: 1000;
  backdrop-filter: blur(4px);
}

.loading-spinner {
  width: 50px;
  height: 50px;
  border: 4px solid rgba(255, 255, 255, 0.3);
  border-top: 4px solid var(--white);
  border-radius: 50%;
  animation: spin 1s linear infinite;
}

@keyframes spin {
  0% { transform: rotate(0deg); }
  100% { transform: rotate(360deg); }
}

.loading-text {
  color: var(--white);
  margin-top: 1rem;
  font-size: 1rem;
  font-weight: 500;
}

/* Utility Classes */
.hidden {
  display: none !important;
}

/* Responsive Design */
@media (max-width: 1400px) {
  .resume-optimizer-dashboard {
    grid-template-columns: 350px 1fr 350px;
    gap: 1rem;
  }
}

@media (max-width: 1200px) {
  .resume-optimizer-dashboard {
    grid-template-columns: 1fr;
  }
  
  .right-panel {
    order: -1;
  }
  
  .comparison-split {
    grid-template-columns: 1fr;
  }
  
  .comparison-divider {
    height: 2px;
    width: 100%;
  }
}

@media (max-width: 768px) {
  .resume-optimizer-dashboard {
    padding: 1rem;
  }
  
  .feature-list {
    grid-template-columns: 1fr;
  }
  
  .keyword-grid {
    grid-template-columns: 1fr;
  }
  
  .button-group {
    flex-direction: column;
  }
  
  .view-toggle {
    flex-wrap: wrap;
  }
}
// Global state
let resumeText = '';
let jobDescription = '';
let optimizedResume = '';
let analysisData = {};

// Uploaded file state (for proper PDF preview)
let uploadedResumeFileType = null;
let uploadedResumePdfUrl = null;
let uploadedResumePdfDataUrl = null;

function _stripControlCharsKeepNewlines(text) {
  // Remove control chars except \n and \t (PDF extraction often includes them)
  return String(text)
    .replace(/\r\n/g, '\n')
    .replace(/\r/g, '\n')
    .replace(/[\u0000-\u0008\u000B\u000C\u000E-\u001F\u007F]/g, '');
}

function _fixCommonSpacing(text) {
  return String(text)
    // Fix punctuation stuck to next word, but preserve URLs (word.word)
    // 1. Handle sentence ends: "word.Word" -> "word. Word"
    .replace(/([a-z])\.([A-Z])/g, '$1. $2')
    // 2. Handle commas/semicolons: "word,word" -> "word, word"
    .replace(/([a-zA-Z]),([a-zA-Z])/g, '$1, $2')
    .replace(/([a-zA-Z]);([a-zA-Z])/g, '$1; $2')
    // 3. Handle colons: "Word:Word" -> "Word: Word"
    .replace(/([a-zA-Z]):([a-zA-Z])/g, '$1: $2')
    // 4. Fix missing spaces around dashes
    .replace(/\s*–\s*/g, ' – ')
    // Collapse "ozgun - ay" -> "ozgun-ay" (lowercase on both sides)
    .replace(/([a-z])\s+-\s+([a-z])/g, '$1-$2')
    // Expand "July - Present" (TitleCase/Digit on sides)
    .replace(/([A-Z0-9][a-z]*)\s*-\s*([A-Z0-9])/g, '$1 - $2')
    // 5. Collapse excessive spaces
    .replace(/\u00A0/g, ' ')
    .replace(/[ \t]{2,}/g, ' ');
}

function _collapseSpacedLettersInLine(line) {
  // Detect cases like "J u l y 2 0 2 2" and collapse them.
  const parts = line.split(' ').filter(Boolean);
  if (parts.length < 8) return line;

  const singleCharParts = parts.filter(p => p.length === 1).length;
  const ratio = singleCharParts / parts.length;
  if (ratio < 0.75) return line;

  // Join single-character tokens; keep any longer tokens separated.
  // Example: "J u l y 2 0 2 2 - P r e s e n t" -> "July2022 - Present"
  let joined = '';
  let buffer = '';
  for (const p of parts) {
    if (p.length === 1) {
      buffer += p;
    } else {
      if (buffer) {
        joined += buffer;
        buffer = '';
      }
      if (joined && !joined.endsWith(' ')) joined += ' ';
      joined += p;
      joined += ' ';
    }
  }
  if (buffer) joined += buffer;
  joined = joined.trim();

  // Fix MonthYear like July2022 -> July 2022
  joined = joined.replace(/\b(Jan(?:uary)?|Feb(?:ruary)?|Mar(?:ch)?|Apr(?:il)?|May|Jun(?:e)?|Jul(?:y)?|Aug(?:ust)?|Sep(?:tember)?|Oct(?:ober)?|Nov(?:ember)?|Dec(?:ember)?)(\d{4})\b/gi, '$1 $2');
  // Fix 2020–February -> 2020 – February
  joined = joined.replace(/\b(\d{4})([A-Za-z])/g, '$1 $2');
  return joined;
}

function normalizeExtractedResumeText(rawText) {
  if (!rawText) return '';
  let text = rawText;

  // 1. Fix specific PDF encoding artifacts (Turkish chars & Stars) BEFORE stripping controls
  text = text.replace(/\x01/g, 'ı'); // \x01 -> ı
  text = text.replace(/\x1F/g, 'ğ'); // \x1F -> ğ
  text = text.replace(/&/g, '★');   // Star rating filled
  text = text.replace(/&/g, '☆');   // Star rating empty

  // 2. Strip remaining control characters
  text = _stripControlCharsKeepNewlines(text);

  // 3. Fix PDF bullet artifacts
  text = text
    .replace(/%Ï\s*/g, '• ')
    .replace(/%[IÏ]\s*/g, '• ')
    .replace(/\uf0b7\s*/g, '• ')
    .replace(/\u2022\s*/g, '• ')
    .replace(/\s*•\s*/g, '• ');

  // 4. Fix specific spacing issues
  text = text.replace(/(github|linkedin)\.\s+com/gi, '$1.com');
  text = text.replace(/([a-z])([A-Z]{2,})/g, '$1 $2'); // likeGDB -> like GDB
  text = text.replace(/([a-z])([0-9])/g, '$1 $2'); // first5G -> first 5G
  text = text.replace(/\b(I)(have|am|was|will|would|can|could)\b/g, '$1 $2'); // Ihave -> I have

  // 5. Line-by-line cleanup for spaced letters
  const lines = text.split('\n').map(l => _collapseSpacedLettersInLine(l));
  text = lines.join('\n');

  // 6. General spacing fix
  text = _fixCommonSpacing(text);

  // Restore bullet indentation
  text = text.replace(/\n\s*•\s*/g, '\n• ');

  return text.trim();
}

function normalizeOptimizedResumeText(rawText) {
  if (!rawText) return '';
  let text = rawText;
  
  // Fix artifacts if they survived
  text = text.replace(/%Ï\s*/g, '• ');
  text = text.replace(/&/g, '★').replace(/&/g, '☆');
  
  text = _stripControlCharsKeepNewlines(text);
  text = _fixCommonSpacing(text);
  return text.trim();
}

// File upload handlers
function setupFileUpload() {
  const uploadArea = document.getElementById('resume-upload-area');
  const fileInput = document.getElementById('resume-file-input');
  
  uploadArea.addEventListener('click', () => fileInput.click());
  
  uploadArea.addEventListener('dragover', (e) => {
    e.preventDefault();
    uploadArea.classList.add('drag-over');
  });
  
  uploadArea.addEventListener('dragleave', () => {
    uploadArea.classList.remove('drag-over');
  });
  
  uploadArea.addEventListener('drop', (e) => {
    e.preventDefault();
    uploadArea.classList.remove('drag-over');
    
    const files = e.dataTransfer.files;
    if (files.length > 0) {
      handleFileUpload(files[0]);
    }
  });
  
  fileInput.addEventListener('change', (e) => {
    if (e.target.files.length > 0) {
      handleFileUpload(e.target.files[0]);
    }
  });
}

async function handleFileUpload(file) {
  const uploadArea = document.getElementById('resume-upload-area');
  const maxSize = 10 * 1024 * 1024; // 10MB
  
  if (file.size > maxSize) {
    alert('File size exceeds 10 MB limit');
    return;
  }
  
  const allowedTypes = [
    'application/pdf',
    'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
    'application/msword',
    'text/plain'
  ];
  if (!allowedTypes.includes(file.type)) {
    alert('Unsupported file type. Please upload PDF, DOCX, DOC, or TXT');
    return;
  }
  
  // Show loading state
  uploadArea.innerHTML = `
    <div class="upload-content">
      <div class="upload-icon">⏳</div>
      <p class="upload-text">Processing ${file.name}...</p>
    </div>
  `;
  
  try {
    // Keep a blob URL for PDF preview (show actual PDF instead of extracted text)
    if (uploadedResumePdfUrl) {
      try { URL.revokeObjectURL(uploadedResumePdfUrl); } catch (e) {}
      uploadedResumePdfUrl = null;
    }
    uploadedResumeFileType = file.type;
    uploadedResumePdfDataUrl = null;
    if (file.type === 'application/pdf') {
      const isFileOrigin = (window.location.protocol === 'file:' || window.location.origin === 'null');
      if (isFileOrigin) {
        uploadedResumePdfUrl = null;
        uploadedResumePdfDataUrl = await new Promise((resolve, reject) => {
          const r = new FileReader();
          r.onload = () => resolve(r.result);
          r.onerror = () => reject(new Error('Failed to read PDF for preview'));
          r.readAsDataURL(file);
        });
      } else {
        uploadedResumePdfUrl = URL.createObjectURL(file);
      }
    }

    const text = await extractTextFromFile(file);
    resumeText = normalizeExtractedResumeText(text);
    
    // Update UI
    uploadArea.classList.add('has-file');
    uploadArea.innerHTML = `
      <div class="upload-content">
        <div class="upload-icon">✅</div>
        <p class="upload-text">${file.name}</p>
        <p class="upload-hint">${formatFileSize(file.size)} • Click to change</p>
      </div>
    `;
    
    // Update preview
    updateOriginalView(resumeText);
    
  } catch (error) {
    console.error('File upload error:', error);
    uploadArea.innerHTML = `
      <div class="upload-content">
        <div class="upload-icon">❌</div>
        <p class="upload-text">Error processing file</p>
        <p class="upload-hint">${error.message}</p>
      </div>
    `;
  }
}

async function extractTextFromFile(file) {
  if (file.type === 'text/plain') {
    return await file.text();
  } else if (file.type === 'application/vnd.openxmlformats-officedocument.wordprocessingml.document' || file.type === 'application/msword') {
    // Extract text from DOCX/DOC using python-docx
    return new Promise(async (resolve, reject) => {
      const reader = new FileReader();
      reader.onload = async () => {
        try {
          const arrayBuffer = reader.result;
          const bytes = new Uint8Array(arrayBuffer);
          
          // Wait for Pyodide to be ready
          if (!window.pyodide) {
            reject(new Error('Pyodide not initialized yet. Please wait and try again.'));
            return;
          }
          
          // Install python-docx if not already installed
          try {
            await window.pyodide.loadPackage(['micropip']);
            await window.pyodide.runPythonAsync(`
import micropip
await micropip.install('python-docx')
            `);
          } catch (e) {
            console.log('python-docx already installed or error:', e);
          }
          
          // Extract text using python-docx
          window.pyodide.globals.set('docx_bytes', bytes);
          const extractedText = await window.pyodide.runPythonAsync(`
import io
from docx import Document

# Read DOCX from bytes
docx_file = io.BytesIO(bytes(docx_bytes.to_py()))
doc = Document(docx_file)

# Extract text from all paragraphs
text_content = []
for paragraph in doc.paragraphs:
    if paragraph.text.strip():
        text_content.append(paragraph.text)

# Join all paragraphs
full_text = "\\n".join(text_content)
full_text
          `);
          
          if (!extractedText || extractedText.trim().length === 0) {
            reject(new Error('Could not extract text from document. The file may be empty or corrupted.'));
            return;
          }
          
          resolve(extractedText);
        } catch (error) {
          console.error('DOCX extraction error:', error);
          reject(new Error('Failed to extract document text: ' + error.message));
        }
      };
      reader.readAsArrayBuffer(file);
    });
  } else if (file.type === 'application/pdf') {
    // Extract text from PDF using pypdf (layout mode preserves spaces better)
    return new Promise(async (resolve, reject) => {
      const reader = new FileReader();
      reader.onload = async () => {
        try {
          const arrayBuffer = reader.result;
          const bytes = new Uint8Array(arrayBuffer);
          
          // Wait for Pyodide to be ready
          if (!window.pyodide) {
            reject(new Error('Pyodide not initialized yet. Please wait and try again.'));
            return;
          }
          
          // Install pypdf if not already installed
          try {
            await window.pyodide.loadPackage(['micropip']);
            await window.pyodide.runPythonAsync(`
import micropip
await micropip.install('pypdf')
            `);
          } catch (e) {
            console.log('pypdf already installed or error:', e);
          }
          
          // Extract text using pypdf
          window.pyodide.globals.set('pdf_bytes', bytes);
          const extractedText = await window.pyodide.runPythonAsync(`
import io
from pypdf import PdfReader

# Read PDF from bytes
pdf_file = io.BytesIO(bytes(pdf_bytes.to_py()))
pdf_reader = PdfReader(pdf_file)

# Extract text from all pages
text_content = []
for page in pdf_reader.pages:
    try:
        # layout mode tends to keep spaces/columns better when available
        page_text = page.extract_text(extraction_mode="layout")
    except TypeError:
        page_text = page.extract_text()
    except Exception:
        page_text = page.extract_text()
    if page_text:
        text_content.append(page_text)

# Join all pages
full_text = "\\n\\n".join(text_content)
full_text
          `);
          
          if (!extractedText || extractedText.trim().length === 0) {
            reject(new Error('Could not extract text from PDF. The file may be scanned or image-based.'));
            return;
          }
          
          resolve(extractedText);
        } catch (error) {
          console.error('PDF extraction error:', error);
          reject(new Error('Failed to extract PDF text: ' + error.message));
        }
      };
      reader.readAsArrayBuffer(file);
    });
  } else {
    throw new Error('Unsupported file type');
  }
}

function escapeHtml(text) {
  return String(text)
    .replace(/&/g, '&amp;')
    .replace(/</g, '&lt;')
    .replace(/>/g, '&gt;')
    .replace(/"/g, '&quot;')
    .replace(/'/g, '&#039;');
}

function formatFileSize(bytes) {
  if (bytes === 0) return '0 Bytes';
  const k = 1024;
  const sizes = ['Bytes', 'KB', 'MB', 'GB'];
  const i = Math.floor(Math.log(bytes) / Math.log(k));
  return parseFloat((bytes / Math.pow(k, i)).toFixed(2)) + ' ' + sizes[i];
}

// Text input handlers
function toggleResumeTextarea() {
  const textarea = document.getElementById('resume-text-input');
  const btn = document.getElementById('paste-resume-btn');
  
  if (textarea.classList.contains('hidden')) {
    textarea.classList.remove('hidden');
    btn.textContent = '📄 Use File Upload Instead';
  } else {
    textarea.classList.add('hidden');
    btn.textContent = '📋 Or Paste Resume Text';
  }
}

function setupTextInputs() {
  const resumeTextarea = document.getElementById('resume-text-input');
  const jobDescTextarea = document.getElementById('job-description-input');
  const charCount = document.getElementById('jd-char-count');
  
  resumeTextarea.addEventListener('input', (e) => {
    resumeText = e.target.value;
    updateOriginalView(resumeText);
  });
  
  jobDescTextarea.addEventListener('input', (e) => {
    jobDescription = e.target.value;
    const count = jobDescription.length;
    charCount.textContent = `${count} / 5000 characters`;
    
    if (count > 5000) {
      charCount.style.color = 'var(--red)';
    } else {
      charCount.style.color = 'var(--text-muted)';
    }
  });
}

// View switching
function switchView(viewName) {
  // Update toggle buttons
  document.querySelectorAll('.toggle-btn').forEach(btn => {
    btn.classList.remove('active');
    if (btn.dataset.view === viewName) {
      btn.classList.add('active');
    }
  });
  
  // Show/hide views
  const views = ['welcome-state', 'original-view', 'optimized-view', 'comparison-view'];
  views.forEach(v => {
    document.getElementById(v)?.classList.add('hidden');
  });
  
  if (viewName === 'original') {
    document.getElementById('original-view')?.classList.remove('hidden');
  } else if (viewName === 'optimized') {
    document.getElementById('optimized-view')?.classList.remove('hidden');
  } else if (viewName === 'comparison') {
    document.getElementById('comparison-view')?.classList.remove('hidden');
  }
}

function updateOriginalView(text) {
  const welcomeState = document.getElementById('welcome-state');
  const originalView = document.getElementById('original-view');
  const originalContent = document.getElementById('original-content');
  const comparisonOriginal = document.getElementById('comparison-original');
  
  if (text && text.trim()) {
    welcomeState?.classList.add('hidden');
    originalView?.classList.remove('hidden');

    // If PDF uploaded, show the PDF itself (accurate preview)
    const pdfSrc = uploadedResumePdfDataUrl || uploadedResumePdfUrl;
    if (uploadedResumeFileType === 'application/pdf' && pdfSrc) {
      const pdfIframe = `<iframe src="${pdfSrc}" title="Resume PDF Preview"></iframe>`;
      originalContent.innerHTML = pdfIframe;
      comparisonOriginal.innerHTML = pdfIframe;
    } else {
      const formatted = formatResumeText(text);
      originalContent.innerHTML = formatted;
      comparisonOriginal.innerHTML = formatted;
    }
    
    // Auto-switch to original view
    switchView('original');
  }
}

function formatResumeText(text) {
  // Preserve whitespace/newlines exactly (important for CVs)
  return `<pre class="resume-pre">${escapeHtml(text)}</pre>`;
}

// Optimization
async function optimizeResume() {
  if (!resumeText || !resumeText.trim()) {
    alert('Please upload or paste your resume first');
    return;
  }
  
  if (!jobDescription.trim()) {
    alert('Please paste the job description');
    return;
  }

  // For local provider, optimization is driven by the in-browser WebLLM agent.
  // The model MUST be loaded first via the model selector.
  if (!window.agentManager || !window.agentManager.agent) {
    alert('Please load an AI model first (top bar) before optimizing.');
    return;
  }
  
  const optimizeBtn = document.getElementById('optimize-btn');
  const loadingOverlay = document.getElementById('loading-overlay');
  
  optimizeBtn.disabled = true;
  optimizeBtn.innerHTML = '<span class="btn-icon">⏳</span><span>Optimizing...</span>';
  loadingOverlay.classList.remove('hidden');
  
  try {
    // Get settings
    const industry = document.getElementById('industry-select').value;
    const level = document.querySelector('input[name="level"]:checked').value;

    // Use cleaned text for best results (PDF extraction can introduce artifacts)
    const resumeForOptimization = normalizeExtractedResumeText(resumeText);

    // Route through the unified agent pipeline so WebLLM decides tool calls.
    // We instruct the model to call optimize_resume (Python tool) and then
    // synthesize a STRICT JSON response for the UI.
    const query = `You are a resume optimization assistant.\n\n` +
      `You have access to Python tools. You MUST call the tool optimize_resume exactly once to compute ATS/keyword analysis and recommendations.\n` +
      `After the tool returns, you MUST respond with ONLY a valid JSON object (no markdown, no backticks, no extra text).\n\n` +
      `JSON schema required:\n` +
      `{\n` +
      `  "optimized_resume": string,\n` +
      `  "ats_score": {"before": number, "after": number, "breakdown": object},\n` +
      `  "keyword_analysis": {"matched": array, "missing": array, "total": number, "density": number},\n` +
      `  "recommendations": [{"type": "critical"|"warning"|"info", "title": string, "text": string}]\n` +
      `}\n\n` +
      `Input parameters for optimize_resume:\n` +
      `- resume_text: ${JSON.stringify(resumeForOptimization)}\n` +
      `- job_description: ${JSON.stringify(jobDescription)}\n` +
      `- industry: ${JSON.stringify(industry)}\n` +
      `- experience_level: ${JSON.stringify(level)}\n\n` +
      `Optimization requirements for optimized_resume:\n` +
      `- Keep the original structure and headings\n` +
      `- Strengthen action verbs and bullet points\n` +
      `- Incorporate missing keywords naturally (do not keyword-stuff)\n` +
      `- Keep it ATS-friendly and professional\n` +
      `- Remove duplicated lines/headings (e.g., repeated name/contact)\n` +
      `- Fix spacing/line breaks; do not leave dangling words on their own line\n` +
      `- Use consistent bullet formatting and avoid incomplete bullets\n` +
      `- Replace placeholders/typos (e.g., 'Matched') with correct, meaningful wording\n`;

    const parseMaybeJson = (text) => {
      try {
        return JSON.parse(text);
      } catch (_) {
        const start = text.indexOf('{');
        const end = text.lastIndexOf('}');
        if (start >= 0 && end > start) {
          const slice = text.slice(start, end + 1);
          return JSON.parse(slice);
        }
        throw new Error('Model did not return valid JSON');
      }
    };

    window.pyodide.globals.set('user_query', query);
    const resultText = await window.pyodide.runPythonAsync(`process_user_query(user_query)`);

    // Parse model result (must be JSON)
    const data = parseMaybeJson(resultText);
    if (data && typeof data.optimized_resume === 'string') {
      data.optimized_resume = normalizeOptimizedResumeText(data.optimized_resume);
    }
    
    optimizedResume = data.optimized_resume;
    analysisData = data;
    
    // Update views
    updateOptimizedView(data);
    updateAnalysisPanel(data);
    
    // Show export section
    document.getElementById('export-section').classList.remove('hidden');
    
    // Switch to comparison view
    switchView('comparison');
    
  } catch (error) {
    console.error('Optimization error:', error);
    alert('Error optimizing resume: ' + error.message);
  } finally {
    optimizeBtn.disabled = false;
    optimizeBtn.innerHTML = '<span class="btn-icon">🚀</span><span>Optimize Resume</span>';
    loadingOverlay.classList.add('hidden');
  }
}

function updateOptimizedView(data) {
  const optimizedContent = document.getElementById('optimized-content');
  const comparisonOptimized = document.getElementById('comparison-optimized');
  
  const formatted = formatResumeText(normalizeOptimizedResumeText(data.optimized_resume));
  optimizedContent.innerHTML = formatted;
  comparisonOptimized.innerHTML = formatted;
}

function updateAnalysisPanel(data) {
  updateATSScore(data.ats_score);
  updateKeywordAnalysis(data.keyword_analysis);
  updateRecommendations(data.recommendations);
}

function updateATSScore(scoreData) {
  const scoreDisplay = document.getElementById('ats-score-display');
  
  scoreDisplay.innerHTML = `
    <div class="score-comparison">
      <div class="score-item">
        <div class="score-label">Before</div>
        <div class="score-value" style="color: var(--yellow)">${scoreData.before}%</div>
      </div>
      <div class="score-arrow">→</div>
      <div class="score-item">
        <div class="score-label">After</div>
        <div class="score-value" style="color: var(--accent-green)">${scoreData.after}%</div>
      </div>
    </div>
    
    <div class="score-breakdown">
      <h4 style="margin: 0 0 0.75rem 0; font-size: 0.875rem;">Score Breakdown</h4>
      ${Object.entries(scoreData.breakdown).map(([key, value]) => `
        <div class="score-breakdown-item">
          <span>${key}</span>
          <div class="score-bar">
            <div class="score-bar-fill" style="width: ${value}%"></div>
          </div>
          <span class="score-badge ${value >= 80 ? 'badge-green' : value >= 60 ? 'badge-yellow' : 'badge-red'}">
            ${value}/100
          </span>
        </div>
      `).join('')}
    </div>
  `;
}

function updateKeywordAnalysis(keywordData) {
  const keywordDisplay = document.getElementById('keyword-analysis-display');
  
  keywordDisplay.innerHTML = `
    <p style="margin: 0 0 1rem 0; font-size: 0.875rem;">
      <strong>Matched Keywords:</strong> ${keywordData.matched.length}/${keywordData.total}
    </p>
    
    <div class="keyword-grid">
      ${keywordData.matched.map(kw => `
        <div class="keyword-tag matched">
          <span>✅</span>
          <span>${kw}</span>
        </div>
      `).join('')}
      ${keywordData.missing.map(kw => `
        <div class="keyword-tag missing">
          <span>❌</span>
          <span>${kw}</span>
        </div>
      `).join('')}
    </div>
    
    <div class="keyword-density">
      <strong>Keyword Density:</strong> ${keywordData.density}%
      <br>
      <span style="color: var(--text-muted); font-size: 0.875rem;">
        ${keywordData.density >= 2 && keywordData.density <= 3 ? '✅ Optimal (2-3%)' : '⚠️ Needs adjustment'}
      </span>
    </div>
  `;
}

function updateRecommendations(recommendations) {
  const recDisplay = document.getElementById('recommendations-display');
  
  recDisplay.innerHTML = recommendations.map(rec => `
    <div class="recommendation-item ${rec.type}">
      <div class="recommendation-header">
        <span>${rec.type === 'critical' ? '🔴' : rec.type === 'warning' ? '🟡' : '💡'}</span>
        <span>${rec.title}</span>
      </div>
      <div class="recommendation-text">${rec.text}</div>
    </div>
  `).join('');
}

// Export functions
async function downloadResume(format) {
  if (!optimizedResume) {
    alert('Please optimize your resume first');
    return;
  }
  
  if (format === 'pdf') {
    await downloadAsPDF();
    return;
  }
  
  if (format === 'docx') {
    await downloadAsDOCX();
    return;
  }
  
  // TXT format
  const blob = new Blob([optimizedResume], { type: 'text/plain' });
  const url = URL.createObjectURL(blob);
  const a = document.createElement('a');
  a.href = url;
  a.download = `optimized_resume.${format}`;
  document.body.appendChild(a);
  a.click();
  document.body.removeChild(a);
  URL.revokeObjectURL(url);
}

async function downloadAsPDF() {
  try {
    const { jsPDF } = window.jspdf;
    const doc = new jsPDF({
      unit: 'mm',
      format: 'a4'
    });
    
    // PDF dimensions
    const pageWidth = doc.internal.pageSize.getWidth();
    const pageHeight = doc.internal.pageSize.getHeight();
    const margin = 15;
    const maxLineWidth = pageWidth - (margin * 2);
    const lineHeight = 6;
    let yPosition = margin;
    
    // Set default font
    doc.setFont('helvetica', 'normal');
    doc.setFontSize(10);
    
    const lines = optimizedResume.split('\n');
    
    // Load Unicode font (Roboto) to support Turkish characters and bullets
    let fontName = 'helvetica';
    try {
      const fontUrlReg = 'https://cdnjs.cloudflare.com/ajax/libs/pdfmake/0.1.66/fonts/Roboto/Roboto-Regular.ttf';
      const fontUrlBold = 'https://cdnjs.cloudflare.com/ajax/libs/pdfmake/0.1.66/fonts/Roboto/Roboto-Medium.ttf';
      
      const [bufReg, bufBold] = await Promise.all([
        fetch(fontUrlReg).then(res => res.arrayBuffer()),
        fetch(fontUrlBold).then(res => res.arrayBuffer())
      ]);
      
      const toBase64 = (buffer) => {
        let binary = '';
        const bytes = new Uint8Array(buffer);
        const len = bytes.byteLength;
        for (let i = 0; i < len; i++) {
          binary += String.fromCharCode(bytes[i]);
        }
        return window.btoa(binary);
      };

      doc.addFileToVFS('Roboto-Regular.ttf', toBase64(bufReg));
      doc.addFileToVFS('Roboto-Bold.ttf', toBase64(bufBold));
      
      doc.addFont('Roboto-Regular.ttf', 'Roboto', 'normal');
      doc.addFont('Roboto-Bold.ttf', 'Roboto', 'bold');
      
      fontName = 'Roboto';
      doc.setFont(fontName, 'normal');
    } catch (e) {
      console.warn('Could not load Unicode font, falling back to Helvetica', e);
    }

    for (let i = 0; i < lines.length; i++) {
      const line = lines[i];
      const trimmedLine = line.trim();
      
      // Skip empty lines but add spacing
      if (!trimmedLine) {
        yPosition += lineHeight * 0.5;
        continue;
      }
      
      // Name/Title (first few lines, large text)
      if (i < 3 && trimmedLine.length < 50 && trimmedLine.match(/^[A-Z]/)) {
        doc.setFontSize(16);
        doc.setFont(fontName, 'bold');
        
        if (yPosition + lineHeight > pageHeight - margin) {
          doc.addPage();
          yPosition = margin;
        }
        
        const textLines = doc.splitTextToSize(trimmedLine, maxLineWidth);
        textLines.forEach((textLine, index) => {
          doc.text(textLine, margin, yPosition + (index * (lineHeight + 2)));
        });
        yPosition += textLines.length * (lineHeight + 2);
        
        doc.setFontSize(10);
        doc.setFont(fontName, 'normal');
        continue;
      }
      
      // Section headings (all caps, short lines, or ending with colon)
      if ((trimmedLine === trimmedLine.toUpperCase() && trimmedLine.length < 60 && trimmedLine.length > 2) ||
          (trimmedLine.endsWith(':') && trimmedLine.length < 50)) {
        yPosition += lineHeight * 0.5; // Extra space before heading
        
        doc.setFontSize(12);
        doc.setFont(fontName, 'bold');
        doc.setTextColor(30, 64, 175); // Blue color
        
        if (yPosition + lineHeight > pageHeight - margin) {
          doc.addPage();
          yPosition = margin;
        }
        
        const textLines = doc.splitTextToSize(trimmedLine, maxLineWidth);
        textLines.forEach((textLine, index) => {
          doc.text(textLine, margin, yPosition + (index * lineHeight));
        });
        yPosition += textLines.length * lineHeight + 2;
        
        doc.setFontSize(10);
        doc.setFont(fontName, 'normal');
        doc.setTextColor(0, 0, 0); // Back to black
        continue;
      }
      
      // Bullet points
      if (trimmedLine.match(/^[•●▪■◆%-]\s/)) {
        doc.setFont(fontName, 'normal');
        
        if (yPosition + lineHeight > pageHeight - margin) {
          doc.addPage();
          yPosition = margin;
        }
        
        const textLines = doc.splitTextToSize(trimmedLine, maxLineWidth - 5);
        textLines.forEach((textLine, index) => {
          doc.text(textLine, margin + 5, yPosition + (index * lineHeight));
        });
        yPosition += textLines.length * lineHeight;
        continue;
      }
      
      // Regular text
      doc.setFont(fontName, 'normal');
      
      if (yPosition + lineHeight > pageHeight - margin) {
        doc.addPage();
        yPosition = margin;
      }
      
      const textLines = doc.splitTextToSize(trimmedLine, maxLineWidth);
      textLines.forEach((textLine, index) => {
        doc.text(textLine, margin, yPosition + (index * lineHeight));
      });
      yPosition += textLines.length * lineHeight;
    }
    
    doc.save('optimized_resume.pdf');
  } catch (error) {
    console.error('PDF export error:', error);
    alert('Failed to create PDF: ' + error.message + '. Downloading as TXT instead.');
    downloadResume('txt');
  }
}

async function downloadAsDOCX() {
  try {
    // Wait for Pyodide to be ready
    if (!window.pyodide) {
      alert('Please wait for the system to initialize.');
      return;
    }
    
    // Install python-docx if not already installed
    try {
      await window.pyodide.loadPackage(['micropip']);
      await window.pyodide.runPythonAsync(`
import micropip
await micropip.install('python-docx')
      `);
    } catch (e) {
      console.log('python-docx already installed or error:', e);
    }
    
    // Generate DOCX using python-docx
    window.pyodide.globals.set('resume_text', optimizedResume);
    const docxBytes = await window.pyodide.runPythonAsync(`
import io
from docx import Document
from docx.shared import Pt, RGBColor
from docx.enum.text import WD_ALIGN_PARAGRAPH

# Create document
doc = Document()

# Process resume text
lines = resume_text.split('\\n')

for line in lines:
    line = line.strip()
    if not line:
        continue
    
    # Detect headings (all caps)
    if line == line.upper() and len(line) < 50 and len(line) > 3:
        heading = doc.add_heading(line, level=2)
        heading.runs[0].font.color.rgb = RGBColor(30, 64, 175)  # Blue color
    # Detect bullet points
    elif line.startswith(('•', '●', '▪️', '-')):
        p = doc.add_paragraph(line, style='List Bullet')
        p.paragraph_format.left_indent = Pt(20)
    else:
        p = doc.add_paragraph(line)
        p.paragraph_format.space_after = Pt(6)

# Save to bytes
buffer = io.BytesIO()
doc.save(buffer)
buffer.seek(0)
bytes(buffer.read())
    `);
    
    // Convert Python bytes to JavaScript Blob
    const uint8Array = new Uint8Array(docxBytes.toJs());
    const blob = new Blob([uint8Array], { 
      type: 'application/vnd.openxmlformats-officedocument.wordprocessingml.document' 
    });
    
    // Download
    const url = URL.createObjectURL(blob);
    const a = document.createElement('a');
    a.href = url;
    a.download = 'optimized_resume.docx';
    document.body.appendChild(a);
    a.click();
    document.body.removeChild(a);
    URL.revokeObjectURL(url);
    
  } catch (error) {
    console.error('DOCX export error:', error);
    alert('Failed to create DOCX. Downloading as TXT instead.');
    downloadResume('txt');
  }
}

function copyToClipboard() {
  if (!optimizedResume) {
    alert('Please optimize your resume first');
    return;
  }
  
  navigator.clipboard.writeText(optimizedResume).then(() => {
    alert('✅ Copied to clipboard!');
  }).catch(err => {
    console.error('Copy failed:', err);
    alert('Failed to copy to clipboard');
  });
}

// Sample data
function loadSampleJob() {
  const textarea = document.getElementById('job-description-input');
  textarea.value = `Senior Full-Stack Engineer

We are looking for an experienced Full-Stack Engineer to join our growing team. You will be responsible for designing, developing, and maintaining scalable web applications.

Requirements:
• 5+ years of software development experience
• Strong proficiency in JavaScript, Python, and SQL
• Experience with AWS, Docker, and Kubernetes
• Proven track record of building microservices architecture
• Excellent problem-solving and communication skills
• Experience with Agile/Scrum methodologies

Nice to have:
• Experience with React and Node.js
• Knowledge of CI/CD pipelines
• Experience leading technical teams
• Bachelor's degree in Computer Science or related field`;
  
  textarea.dispatchEvent(new Event('input'));
}

// Initialize
document.addEventListener('DOMContentLoaded', function() {
  setupFileUpload();
  setupTextInputs();
});
"""
Unified Agent Template - Works with OpenAI, Anthropic, and Local WebLLM
Pure Python tools with conditional LangChain wrapping based on provider.

This template enables ONE codebase for all three providers:
- OpenAI: Uses LangChain with ChatOpenAI
- Anthropic: Uses LangChain with ChatAnthropic  
- Local WebLLM: Routes to JavaScript LangChain.js bridge (NO Python LangChain)

Key features:
- Pure Python tool functions (no decorators at definition time)
- Schema extraction via get_tool_schemas() for WebLLM JavaScript bridge
- Conditional LangChain imports only when needed (cloud providers)
- Runtime tool wrapping with tool() function for cloud providers
"""

import json
import inspect
from typing import get_type_hints

# Provider injected from generator context (openai|anthropic|local)

# ============================================================================
# Schema Extraction Helpers (for all providers)
# ============================================================================

def _python_type_to_json_type(python_type):
    """Convert Python type to JSON schema type."""
    type_mapping = {
        'str': 'string',
        'int': 'integer',
        'float': 'number',
        'bool': 'boolean',
        'list': 'array',
        'dict': 'object',
    }
    type_name = python_type.__name__ if hasattr(python_type, '__name__') else str(python_type)
    return type_mapping.get(type_name, 'string')


def _extract_function_schema(func):
    """
    Extract OpenAI function schema from a Python function.
    Works for all providers - no LangChain dependency.
    
    Extracts:
    - Function name
    - Description from docstring
    - Parameters with types and descriptions
    - Required vs optional parameters
    
    Args:
        func: Python function with type hints and docstring
    
    Returns:
        dict: OpenAI function schema format
    """
    sig = inspect.signature(func)
    
    # Get type hints
    try:
        hints = get_type_hints(func)
    except:
        hints = {}
    
    # Parse docstring
    doc = inspect.getdoc(func) or ""
    description = doc.split('\n\n')[0] if doc else f"Execute {func.__name__}"
    
    # Build parameters schema
    parameters = {
        "type": "object",
        "properties": {},
        "required": []
    }
    
    # Extract parameter descriptions from docstring Args section
    param_descriptions = {}
    if "Args:" in doc:
        args_section = doc.split("Args:")[1].split("Returns:")[0] if "Returns:" in doc else doc.split("Args:")[1]
        for line in args_section.split('\n'):
            line = line.strip()
            if ':' in line:
                param_name = line.split(':')[0].strip()
                param_desc = line.split(':', 1)[1].strip()
                param_descriptions[param_name] = param_desc
    
    # Process each parameter
    for param_name, param in sig.parameters.items():
        if param_name in ['self', 'cls']:
            continue
        
        param_type = hints.get(param_name, param.annotation)
        if param_type == inspect.Parameter.empty:
            param_type = str
        
        json_type = _python_type_to_json_type(param_type)
        
        prop_schema = {
            "type": json_type,
            "description": param_descriptions.get(param_name, f"The {param_name} parameter")
        }
        
        parameters["properties"][param_name] = prop_schema
        
        # Mark as required if no default value
        if param.default == inspect.Parameter.empty:
            parameters["required"].append(param_name)
    
    return {
        "name": func.__name__,
        "description": description,
        "parameters": parameters
    }

# Resume Optimizer AI Agent
# Analyzes resumes against job descriptions and provides optimization recommendations

import json
import re
from typing import Dict, List, Any

# Global state
api_key = ""

# Import LangChain
try:
    from langchain_core.messages import HumanMessage, SystemMessage
    from langchain_openai import ChatOpenAI
    langchain_available = True
    print("[OK] LangChain imports successful")
except ImportError as e:
    langchain_available = False
    print(f"[WARNING] LangChain not available: {e}")

def extract_keywords_from_job(job_description: str) -> Dict[str, List[str]]:
    """Extract required skills and keywords from job description"""
    
    # Common keyword patterns
    skill_patterns = [
        r'(?i)(?:experience with|proficiency in|knowledge of|familiar with|skilled in)\s+([\w\s,/+#-]+)',
        r'(?i)(?:required|must have|should have):\s*([\w\s,/+#-]+)',
        r'(?i)(?:skills?|technologies?|tools?|languages?):\s*([\w\s,/+#-]+)'
    ]
    
    keywords = {
        'critical': [],
        'high': [],
        'medium': [],
        'nice_to_have': []
    }
    
    # Extract from "Requirements" section
    requirements_section = re.search(r'(?i)requirements?:(.*?)(?:nice to have|$)', job_description, re.DOTALL)
    if requirements_section:
        req_text = requirements_section.group(1)
        # Extract bullet points
        bullets = re.findall(r'[•●▪️-]\s*(.+)', req_text)
        
        for bullet in bullets:
            # Extract skills/technologies
            words = re.findall(r'\b[A-Z][\w+#.-]+\b', bullet)
            if words:
                if any(term in bullet.lower() for term in ['required', 'must', 'essential']):
                    keywords['critical'].extend(words)
                elif any(term in bullet.lower() for term in ['experience', 'years', 'proven']):
                    keywords['high'].extend(words)
                else:
                    keywords['medium'].extend(words)
    
    # Extract from "Nice to have" section
    nice_to_have = re.search(r'(?i)nice to have:(.*?)$', job_description, re.DOTALL)
    if nice_to_have:
        nice_text = nice_to_have.group(1)
        words = re.findall(r'\b[A-Z][\w+#.-]+\b', nice_text)
        keywords['nice_to_have'].extend(words)
    
    # Remove duplicates
    for category in keywords:
        keywords[category] = list(set(keywords[category]))
    
    return keywords

def extract_resume_keywords(resume_text: str) -> List[str]:
    """Extract technical keywords from resume"""
    
    # Common technical terms pattern
    tech_keywords = re.findall(r'\b[A-Z][\w+#.-]+\b', resume_text)
    
    # Filter out common words
    common_words = {'The', 'And', 'For', 'With', 'This', 'That', 'From', 'Have', 'Been', 'Were', 'Will'}
    filtered = [kw for kw in tech_keywords if kw not in common_words]
    
    return list(set(filtered))

def calculate_keyword_density(resume_text: str, keywords: List[str]) -> float:
    """Calculate keyword density percentage"""
    
    total_words = len(resume_text.split())
    keyword_count = sum(resume_text.lower().count(kw.lower()) for kw in keywords)
    
    if total_words == 0:
        return 0.0
    
    density = (keyword_count / total_words) * 100
    return round(density, 2)

def match_keywords(resume_keywords: List[str], job_keywords: Dict[str, List[str]]) -> Dict[str, List[str]]:
    """Match resume keywords against job requirements"""
    
    resume_lower = [kw.lower() for kw in resume_keywords]
    all_job_keywords = []
    for category in job_keywords.values():
        all_job_keywords.extend([kw.lower() for kw in category])
    
    matched = [kw for kw in resume_keywords if kw.lower() in all_job_keywords]
    missing = [kw for kw in all_job_keywords if kw not in resume_lower]
    
    return {
        'matched': matched,
        'missing': list(set(missing))
    }

def calculate_ats_score(resume_text: str, job_description: str, matched_keywords: Dict[str, List[str]]) -> Dict[str, Any]:
    """Calculate ATS compatibility score"""
    
    # Extract keywords
    job_keywords = extract_keywords_from_job(job_description)
    resume_keywords = extract_resume_keywords(resume_text)
    
    # Count total required keywords
    total_critical = len(job_keywords['critical'])
    total_high = len(job_keywords['high'])
    
    # Count matched keywords
    matched_critical = sum(1 for kw in job_keywords['critical'] if kw.lower() in [k.lower() for k in matched_keywords['matched']])
    matched_high = sum(1 for kw in job_keywords['high'] if kw.lower() in [k.lower() for k in matched_keywords['matched']])
    
    # Calculate keyword score (0-100)
    if total_critical + total_high > 0:
        keyword_score = ((matched_critical * 2 + matched_high) / (total_critical * 2 + total_high)) * 100
    else:
        keyword_score = 50
    
    # Format score (simple heuristics)
    format_score = 95  # Assume good formatting
    if len(resume_text.split('\n')) < 10:
        format_score -= 20
    
    # Content score (check for quantifiable achievements)
    achievement_count = len(re.findall(r'\d+%|\d+\+|\$\d+|\d+x', resume_text))
    content_score = min(100, 60 + (achievement_count * 5))
    
    # Experience score
    years_mentioned = len(re.findall(r'\d+\+?\s*years?', resume_text, re.IGNORECASE))
    experience_score = min(100, 70 + (years_mentioned * 10))
    
    # Overall score (weighted average)
    overall_before = int(
        keyword_score * 0.4 +
        format_score * 0.2 +
        content_score * 0.2 +
        experience_score * 0.2
    )
    
    # Optimized score (assume +25% improvement)
    overall_after = min(100, overall_before + 25)
    
    return {
        'before': overall_before,
        'after': overall_after,
        'breakdown': {
            'Keywords': int(keyword_score),
            'Formatting': format_score,
            'Content': content_score,
            'Experience': experience_score
        }
    }

def optimize_bullet_points(text: str) -> List[Dict[str, str]]:
    """Identify weak bullet points and suggest improvements"""
    
    weak_verbs = ['worked', 'did', 'was', 'responsible for', 'helped', 'assisted']
    strong_verbs = ['architected', 'engineered', 'led', 'developed', 'implemented', 'designed', 'optimized']
    
    improvements = []
    
    # Find bullet points
    bullets = re.findall(r'[•●▪️-]\s*(.+)', text)
    
    for bullet in bullets:
        bullet_lower = bullet.lower()
        
        # Check for weak verbs
        for weak in weak_verbs:
            if weak in bullet_lower:
                improvements.append({
                    'type': 'weak_verb',
                    'original': bullet,
                    'issue': f'Weak action verb: "{weak}"',
                    'suggestion': f'Use stronger verbs like: {", ".join(strong_verbs[:3])}'
                })
                break
        
        # Check for lack of quantification
        if not re.search(r'\d+', bullet):
            improvements.append({
                'type': 'no_metrics',
                'original': bullet,
                'issue': 'Missing quantifiable results',
                'suggestion': 'Add numbers, percentages, or specific outcomes'
            })
    
    return improvements

def optimize_resume(resume_text: str, job_description: str, industry: str, experience_level: str) -> str:
    """Main optimization function"""
    
    print(f"[OPTIMIZE] Starting optimization for {industry} - {experience_level}")
    
    # Extract keywords
    job_keywords = extract_keywords_from_job(job_description)
    resume_keywords = extract_resume_keywords(resume_text)
    
    print(f"[OPTIMIZE] Found {len(resume_keywords)} keywords in resume")
    print(f"[OPTIMIZE] Found {sum(len(v) for v in job_keywords.values())} keywords in job description")
    
    # Match keywords
    matched = match_keywords(resume_keywords, job_keywords)
    
    # Calculate scores
    ats_score = calculate_ats_score(resume_text, job_description, matched)
    
    # Keyword density
    all_job_kw = []
    for v in job_keywords.values():
        all_job_kw.extend(v)
    density = calculate_keyword_density(resume_text, all_job_kw)
    
    # Identify improvements
    bullet_improvements = optimize_bullet_points(resume_text)
    
    # Generate recommendations
    recommendations = []
    
    # Critical: Missing keywords
    if len(matched['missing']) > 0:
        recommendations.append({
            'type': 'critical',
            'title': 'Missing Required Keywords',
            'text': f"Add these keywords from job description: {', '.join(matched['missing'][:5])}"
        })
    
    # Warnings: Bullet point improvements
    for improvement in bullet_improvements[:3]:
        recommendations.append({
            'type': 'warning',
            'title': improvement['issue'],
            'text': f"Original: \"{improvement['original']}\" - {improvement['suggestion']}"
        })
    
    # Pro tips
    recommendations.append({
        'type': 'info',
        'title': '💡 Pro Tip',
        'text': 'Start each bullet point with a strong action verb and include quantifiable results'
    })
    
    # IMPORTANT (Local/WebLLM mode):
    # The in-browser LLM is responsible for generating the rewritten resume.
    # This tool returns analysis + recommendations that the LLM uses.
    print("[OPTIMIZE] Returning analysis for WebLLM to synthesize optimized resume")
    optimized_text = resume_text
    
    # Prepare result
    result = {
        'optimized_resume': optimized_text,
        'ats_score': ats_score,
        'keyword_analysis': {
            'matched': matched['matched'],
            'missing': matched['missing'],
            'total': len(matched['matched']) + len(matched['missing']),
            'density': density
        },
        'recommendations': recommendations
    }
    
    return json.dumps(result)

print("[INIT] Resume Optimizer ready")
print("[INIT] Upload your resume and paste a job description to begin")

# ============================================================================
# Tool Schema Export (for WebLLM JavaScript bridge)
# ============================================================================

def get_tool_schemas() -> str:
    """
    Export all tool function schemas in OpenAI format.
    Used by WebLLM JavaScript bridge to discover available tools.
    
    This function is called by PyodideToolBridge in JavaScript to:
    1. Discover what tools are available
    2. Get their schemas for LLM binding
    3. Enable function calling in WebLLM
    
    Returns:
        str: JSON string with OpenAI-format tool schemas
    """
    # List all your tool functions here
    # Example: tool_functions = [example_tool, another_tool]
    tool_functions = [
        # TODO: Add your tool functions here
        # For Data Analysis Agent:
        # load_csv_data, get_data_summary, get_column_info, get_value_counts, create_chart, get_correlation_analysis
    ]
    
    # Auto-discovery: if tool_functions is empty, try to find functions defined in this module
    # that are not private (start with _) and not imported
    if not tool_functions:
        import inspect
        import sys
        current_module = sys.modules[__name__]
        for name, obj in inspect.getmembers(current_module):
            if inspect.isfunction(obj) and not name.startswith('_'):
                # Filter out imported functions and infrastructure functions
                if obj.__module__ == __name__ and name not in ['get_tool_schemas', 'process_user_query', 'process_user_query_webllm']:
                    tool_functions.append(obj)
    
    schemas = []
    for func in tool_functions:
        try:
            function_schema = _extract_function_schema(func)
            openai_schema = {
                "type": "function",
                "function": function_schema
            }
            schemas.append(openai_schema)
        except Exception as e:
            print(f"Warning: Failed to extract schema for {func.__name__}: {e}")
    
    return json.dumps(schemas, indent=2)


# ============================================================================
# Unified Query Processing
# ============================================================================

async def process_user_query(query: str) -> str:
    """
    Unified query processor - works for ALL providers (OpenAI, Anthropic, Local).
    
    Routes to appropriate backend based on PROVIDER global variable:
    - 'local': Routes to JavaScript WebLLM agent (NO Python LangChain)
    - 'openai': Uses Python LangChain with ChatOpenAI
    - 'anthropic': Uses Python LangChain with ChatAnthropic
    
    Args:
        query: User's natural language query
    
    Returns:
        str: Response from the agent
    """
    provider = globals().get('PROVIDER', 'openai')
    
    if provider == 'local':
        # ====================================================================
        # WebLLM Local Mode: Use JavaScript LangChain.js bridge
        # ====================================================================
        # No Python LangChain imports needed here
        # All inference happens in JavaScript with WebLLM + LangChain.js
        # Tools are executed in Python via PyodideToolBridge
        
        try:
            # Route to JavaScript WebLLM agent
            # This function is defined in the HTML template and bridges to JS
            result = await process_user_query_webllm(query)
            return result
        except Exception as e:
            print(f"[WebLLM Error] {str(e)}")
            import traceback
            traceback.print_exc()
            return f"❌ Error using WebLLM: {str(e)}"
    
    else:
        # ====================================================================
        # Cloud Providers (OpenAI/Anthropic): Use Python LangChain
        # ====================================================================
        # Import LangChain components ONLY for cloud providers
        # This keeps the bundle smaller for local mode
        
        try:
            from langchain_core.tools import tool
            from langchain_core.messages import HumanMessage, AIMessage, ToolMessage
            
            # Get API key from globals (set by HTML template)
            api_key = globals().get('current_api_key', '')
            if not api_key:
                return "⚠️ Please enter an API key to use AI-powered features."
            
            # Import and initialize provider-specific LLM
            if provider == 'openai':
                from langchain_openai import ChatOpenAI
                llm = ChatOpenAI(
                    model="gpt-3.5-turbo",
                    api_key=api_key,
                    temperature=0.7
                )
            elif provider == 'anthropic':
                from langchain_anthropic import ChatAnthropic
                llm = ChatAnthropic(
                    model="gpt-3.5-turbo",
                    api_key=api_key,
                    temperature=0.7
                )
            else:
                return f"❌ Unknown provider: {provider}"
            
            # Get tool functions list using auto-discovery (same as get_tool_schemas)
            tool_functions = []
            
            # Auto-discover tool functions from current module
            import sys
            current_module = sys.modules[__name__]
            for name, obj in inspect.getmembers(current_module):
                if inspect.isfunction(obj) and not name.startswith('_'):
                    # Filter out imported functions and infrastructure functions
                    if obj.__module__ == __name__ and name not in ['get_tool_schemas', 'process_user_query', 'process_user_query_webllm', '_python_type_to_json_type', '_extract_function_schema']:
                        tool_functions.append(obj)
            
            if not tool_functions:
                # No tools defined - simple conversation mode
                response = llm.invoke([HumanMessage(content=query)])
                return response.content
            
            # Wrap tools with @tool decorator at runtime
            # This is the key: tool() is called as a FUNCTION, not decorator
            tools = [tool(func) for func in tool_functions]
            llm_with_tools = llm.bind_tools(tools)
            
            # Tool calling loop with message history
            messages = [HumanMessage(content=query)]
            
            max_iterations = 3  # Prevent infinite loops
            for iteration in range(max_iterations):
                response = llm_with_tools.invoke(messages)
                messages.append(response)
                
                # Check if LLM made any tool calls
                if not response.tool_calls:
                    break  # No more tools to call, we're done
                
                # Execute each tool call
                for tool_call in response.tool_calls:
                    tool_name = tool_call['name']
                    tool_args = tool_call['args']
                    
                    # Build tool name -> function mapping
                    tool_map = {func.__name__: func for func in tool_functions}
                    
                    if tool_name in tool_map:
                        try:
                            # Execute the tool
                            result = tool_map[tool_name](**tool_args)
                            messages.append(ToolMessage(
                                content=str(result),
                                tool_call_id=tool_call['id']
                            ))
                        except Exception as e:
                            # Tool execution failed
                            messages.append(ToolMessage(
                                content=f"❌ Error executing {tool_name}: {str(e)}",
                                tool_call_id=tool_call['id']
                            ))
                    else:
                        # Unknown tool requested
                        messages.append(ToolMessage(
                            content=f"❌ Unknown tool: {tool_name}",
                            tool_call_id=tool_call['id']
                        ))
            
            # Return final response content
            final_message = messages[-1]
            if hasattr(final_message, 'content'):
                return final_message.content
            else:
                return str(final_message)
                
        except Exception as e:
            import traceback
            traceback.print_exc()
            return f"❌ Error processing query: {str(e)}"


# ============================================================================
# Initialization Message
# ============================================================================

print("✅ Unified agent initialized (provider: {})".format(globals().get('PROVIDER', 'openai')))