init
This commit is contained in:
118
.opencode/scripts/README.md
Normal file
118
.opencode/scripts/README.md
Normal file
@@ -0,0 +1,118 @@
|
||||
# Claude Code Scripts
|
||||
|
||||
Centralized utility scripts for Claude Code skills.
|
||||
|
||||
## Installation
|
||||
|
||||
Install required dependencies:
|
||||
|
||||
```bash
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
## resolve_env.py
|
||||
|
||||
Centralized environment variable resolver that follows Claude Code's hierarchy.
|
||||
|
||||
### Priority Order (Highest to Lowest)
|
||||
|
||||
1. **process.env** - Runtime environment variables (HIGHEST)
|
||||
2. **PROJECT/.opencode/skills/\<skill\>/.env** - Project skill-specific
|
||||
3. **PROJECT/.opencode/skills/.env** - Project shared across skills
|
||||
4. **PROJECT/.opencode/.env** - Project global defaults
|
||||
5. **~/.opencode/skills/\<skill\>/.env** - User skill-specific
|
||||
6. **~/.opencode/skills/.env** - User shared across skills
|
||||
7. **~/.opencode/.env** - User global defaults (LOWEST)
|
||||
|
||||
### CLI Usage
|
||||
|
||||
```bash
|
||||
# Resolve a variable for a specific skill
|
||||
python ~/.opencode/scripts/resolve_env.py GEMINI_API_KEY --skill ai-multimodal
|
||||
|
||||
# With verbose output
|
||||
python ~/.opencode/scripts/resolve_env.py GEMINI_API_KEY --skill ai-multimodal --verbose
|
||||
|
||||
# Find all locations where variable is defined
|
||||
python ~/.opencode/scripts/resolve_env.py GEMINI_API_KEY --find-all
|
||||
|
||||
# Show hierarchy for a skill
|
||||
python ~/.opencode/scripts/resolve_env.py --show-hierarchy --skill ai-multimodal
|
||||
|
||||
# Export format for shell sourcing
|
||||
eval $(python ~/.opencode/scripts/resolve_env.py GEMINI_API_KEY --export)
|
||||
```
|
||||
|
||||
### Python API Usage
|
||||
|
||||
```python
|
||||
# Add to sys.path if needed
|
||||
import sys
|
||||
from pathlib import Path
|
||||
sys.path.insert(0, str(Path.home() / '.claude' / 'scripts'))
|
||||
|
||||
from resolve_env import resolve_env, find_all, show_hierarchy
|
||||
|
||||
# Simple resolution
|
||||
api_key = resolve_env('GEMINI_API_KEY', skill='ai-multimodal')
|
||||
|
||||
# With default value
|
||||
api_key = resolve_env('GEMINI_API_KEY', skill='ai-multimodal', default='fallback-key')
|
||||
|
||||
# With verbose output
|
||||
api_key = resolve_env('GEMINI_API_KEY', skill='ai-multimodal', verbose=True)
|
||||
|
||||
# Find all locations
|
||||
locations = find_all('GEMINI_API_KEY', skill='ai-multimodal')
|
||||
for description, value, path in locations:
|
||||
print(f"{description}: {value}")
|
||||
|
||||
# Show hierarchy
|
||||
show_hierarchy(skill='ai-multimodal')
|
||||
```
|
||||
|
||||
### Integration Pattern
|
||||
|
||||
Skills should use this script instead of implementing their own resolution logic:
|
||||
|
||||
```python
|
||||
#!/usr/bin/env python3
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
# Import centralized resolver
|
||||
sys.path.insert(0, str(Path.home() / '.claude' / 'scripts'))
|
||||
from resolve_env import resolve_env
|
||||
|
||||
# Resolve API key
|
||||
api_key = resolve_env('GEMINI_API_KEY', skill='ai-multimodal')
|
||||
|
||||
if not api_key:
|
||||
print("Error: GEMINI_API_KEY not found")
|
||||
print("Run: python ~/.opencode/scripts/resolve_env.py --show-hierarchy --skill ai-multimodal")
|
||||
sys.exit(1)
|
||||
|
||||
# Use api_key...
|
||||
```
|
||||
|
||||
### Benefits
|
||||
|
||||
- **Consistent**: All skills use the same resolution logic
|
||||
- **Maintainable**: Single source of truth for hierarchy
|
||||
- **Debuggable**: Built-in verbose mode and find-all functionality
|
||||
- **Flexible**: Supports both project-local and user-global configs
|
||||
- **Clear**: Shows exactly where each value comes from
|
||||
|
||||
### Testing
|
||||
|
||||
```bash
|
||||
# Test without any config files
|
||||
python ~/.opencode/scripts/resolve_env.py TEST_VAR --verbose
|
||||
|
||||
# Test with environment variable
|
||||
export TEST_VAR=from-runtime
|
||||
python ~/.opencode/scripts/resolve_env.py TEST_VAR --verbose
|
||||
|
||||
# Test with skill context
|
||||
python ~/.opencode/scripts/resolve_env.py GEMINI_API_KEY --skill ai-multimodal --find-all
|
||||
```
|
||||
1
.opencode/scripts/requirements.txt
Normal file
1
.opencode/scripts/requirements.txt
Normal file
@@ -0,0 +1 @@
|
||||
pyyaml>=6.0
|
||||
341
.opencode/scripts/resolve_env.py
Executable file
341
.opencode/scripts/resolve_env.py
Executable file
@@ -0,0 +1,341 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Centralized environment variable resolver for Claude Code skills.
|
||||
|
||||
Resolves environment variables following the Claude Code hierarchy:
|
||||
1. process.env - Runtime environment (HIGHEST)
|
||||
2. .opencode/skills/<skill>/.env - Project skill-specific
|
||||
3. .opencode/skills/.env - Project shared
|
||||
4. .opencode/.env - Project global
|
||||
5. ~/.opencode/skills/<skill>/.env - User skill-specific
|
||||
6. ~/.opencode/skills/.env - User shared
|
||||
7. ~/.opencode/.env - User global (LOWEST)
|
||||
|
||||
Usage:
|
||||
from resolve_env import resolve_env
|
||||
|
||||
api_key = resolve_env('GEMINI_API_KEY', skill='ai-multimodal')
|
||||
api_key = resolve_env('GEMINI_API_KEY') # Without skill context
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import Optional, Dict, List, Tuple
|
||||
|
||||
def _parse_env_file_fallback(path) -> Dict[str, str]:
|
||||
"""
|
||||
Pure-Python fallback .env parser when python-dotenv is not installed.
|
||||
|
||||
Handles basic .env format:
|
||||
- KEY=value
|
||||
- KEY="quoted value"
|
||||
- KEY='single quoted'
|
||||
- # comments (full line)
|
||||
- Empty lines ignored
|
||||
|
||||
Args:
|
||||
path: Path to .env file (str or Path)
|
||||
|
||||
Returns:
|
||||
Dictionary of environment variables
|
||||
"""
|
||||
env_vars = {}
|
||||
try:
|
||||
with open(path, 'r') as f:
|
||||
for line in f:
|
||||
line = line.strip()
|
||||
# Skip empty lines and comments
|
||||
if not line or line.startswith('#'):
|
||||
continue
|
||||
# Parse KEY=value
|
||||
if '=' in line:
|
||||
key, value = line.split('=', 1)
|
||||
key = key.strip()
|
||||
value = value.strip()
|
||||
# Remove surrounding quotes
|
||||
if (value.startswith('"') and value.endswith('"')) or \
|
||||
(value.startswith("'") and value.endswith("'")):
|
||||
value = value[1:-1]
|
||||
env_vars[key] = value
|
||||
except Exception:
|
||||
pass
|
||||
return env_vars
|
||||
|
||||
|
||||
try:
|
||||
from dotenv import dotenv_values
|
||||
except ImportError:
|
||||
# Use fallback parser when python-dotenv not installed
|
||||
dotenv_values = _parse_env_file_fallback
|
||||
|
||||
|
||||
def find_project_root() -> Optional[Path]:
|
||||
"""Find project root by looking for .git or .claude directory."""
|
||||
current = Path.cwd()
|
||||
|
||||
# Check current directory and all parents
|
||||
for directory in [current] + list(current.parents):
|
||||
if (directory / '.git').exists() or (directory / '.claude').exists():
|
||||
return directory
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def get_env_file_paths(skill: Optional[str] = None) -> List[Tuple[str, Path]]:
|
||||
"""
|
||||
Get all potential .env file paths in priority order.
|
||||
|
||||
Args:
|
||||
skill: Optional skill name for skill-specific configs
|
||||
|
||||
Returns:
|
||||
List of (description, path) tuples in priority order (highest to lowest)
|
||||
"""
|
||||
paths = []
|
||||
|
||||
# Find project root
|
||||
project_root = find_project_root()
|
||||
|
||||
# User home directory
|
||||
home = Path.home()
|
||||
|
||||
# Priority 2-4: Project-level configs (if project root found)
|
||||
if project_root:
|
||||
if skill:
|
||||
paths.append((
|
||||
f"Project skill-specific ({skill})",
|
||||
project_root / '.claude' / 'skills' / skill / '.env'
|
||||
))
|
||||
|
||||
paths.append((
|
||||
"Project skills shared",
|
||||
project_root / '.claude' / 'skills' / '.env'
|
||||
))
|
||||
|
||||
paths.append((
|
||||
"Project global",
|
||||
project_root / '.claude' / '.env'
|
||||
))
|
||||
|
||||
# Priority 5-7: User-level configs
|
||||
if skill:
|
||||
paths.append((
|
||||
f"User skill-specific ({skill})",
|
||||
home / '.claude' / 'skills' / skill / '.env'
|
||||
))
|
||||
|
||||
paths.append((
|
||||
"User skills shared",
|
||||
home / '.claude' / 'skills' / '.env'
|
||||
))
|
||||
|
||||
paths.append((
|
||||
"User global",
|
||||
home / '.claude' / '.env'
|
||||
))
|
||||
|
||||
return paths
|
||||
|
||||
|
||||
def resolve_env(
|
||||
var_name: str,
|
||||
skill: Optional[str] = None,
|
||||
default: Optional[str] = None,
|
||||
verbose: bool = False
|
||||
) -> Optional[str]:
|
||||
"""
|
||||
Resolve environment variable following Claude Code hierarchy.
|
||||
|
||||
Args:
|
||||
var_name: Name of the environment variable to resolve
|
||||
skill: Optional skill name for skill-specific resolution
|
||||
default: Default value if variable not found anywhere
|
||||
verbose: If True, print resolution details
|
||||
|
||||
Returns:
|
||||
Resolved value or default if not found
|
||||
"""
|
||||
# Priority 1: Check process environment (HIGHEST)
|
||||
value = os.getenv(var_name)
|
||||
if value:
|
||||
if verbose:
|
||||
print(f"✓ {var_name} found in: Runtime environment (process.env)")
|
||||
return value
|
||||
|
||||
if verbose:
|
||||
print(f"✗ {var_name} not in: Runtime environment")
|
||||
|
||||
# Note: dotenv_values is always available (uses fallback if python-dotenv not installed)
|
||||
|
||||
# Priority 2-7: Check .env files in order
|
||||
env_paths = get_env_file_paths(skill)
|
||||
|
||||
for description, path in env_paths:
|
||||
if path.exists():
|
||||
try:
|
||||
env_vars = dotenv_values(path)
|
||||
value = env_vars.get(var_name)
|
||||
|
||||
if value:
|
||||
if verbose:
|
||||
print(f"✓ {var_name} found in: {description}")
|
||||
print(f" Path: {path}")
|
||||
return value
|
||||
else:
|
||||
if verbose:
|
||||
print(f"✗ {var_name} not in: {description} (file exists)")
|
||||
except Exception as e:
|
||||
if verbose:
|
||||
print(f"⚠ Error reading {description}: {e}")
|
||||
else:
|
||||
if verbose:
|
||||
print(f"✗ {var_name} not in: {description} (file not found)")
|
||||
|
||||
# Not found anywhere — always show checked locations to help users debug
|
||||
checked_files = [str(p) for _, p in env_paths if p.exists()]
|
||||
missing_files = [str(p) for _, p in env_paths if not p.exists()]
|
||||
print(f"[!] {var_name} not found in any location", file=sys.stderr)
|
||||
if checked_files:
|
||||
print(f" Checked (file exists, key absent):", file=sys.stderr)
|
||||
for f in checked_files:
|
||||
print(f" - {f}", file=sys.stderr)
|
||||
if missing_files and verbose:
|
||||
print(f" Not found (file missing):", file=sys.stderr)
|
||||
for f in missing_files:
|
||||
print(f" - {f}", file=sys.stderr)
|
||||
print(f" Tip: Add {var_name}=<value> to one of the .env files above", file=sys.stderr)
|
||||
|
||||
if default:
|
||||
if verbose:
|
||||
print(f" Using default: {default}", file=sys.stderr)
|
||||
|
||||
return default
|
||||
|
||||
|
||||
def find_all(var_name: str, skill: Optional[str] = None) -> List[Tuple[str, str, Path]]:
|
||||
"""
|
||||
Find all locations where a variable is defined.
|
||||
|
||||
Args:
|
||||
var_name: Name of the environment variable
|
||||
skill: Optional skill name
|
||||
|
||||
Returns:
|
||||
List of (description, value, path) tuples for all found locations
|
||||
"""
|
||||
results = []
|
||||
|
||||
# Check process environment
|
||||
value = os.getenv(var_name)
|
||||
if value:
|
||||
results.append(("Runtime environment", value, None))
|
||||
|
||||
# Check all .env files (dotenv_values always available via fallback)
|
||||
env_paths = get_env_file_paths(skill)
|
||||
|
||||
for description, path in env_paths:
|
||||
if path.exists():
|
||||
try:
|
||||
env_vars = dotenv_values(path)
|
||||
value = env_vars.get(var_name)
|
||||
|
||||
if value:
|
||||
results.append((description, value, path))
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def show_hierarchy(skill: Optional[str] = None):
|
||||
"""Print the environment variable resolution hierarchy."""
|
||||
print("Environment Variable Resolution Hierarchy")
|
||||
print("=" * 60)
|
||||
print("\nPriority order (highest to lowest):")
|
||||
print("1. process.env - Runtime environment")
|
||||
|
||||
env_paths = get_env_file_paths(skill)
|
||||
for i, (description, path) in enumerate(env_paths, start=2):
|
||||
exists = "✓" if path.exists() else "✗"
|
||||
print(f"{i}. {description:30} {exists} {path}")
|
||||
|
||||
print("\n" + "=" * 60)
|
||||
|
||||
|
||||
def main():
|
||||
"""CLI interface for environment variable resolution."""
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Resolve environment variables following Claude Code hierarchy',
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
epilog="""
|
||||
Examples:
|
||||
# Resolve GEMINI_API_KEY for ai-multimodal skill
|
||||
%(prog)s GEMINI_API_KEY --skill ai-multimodal
|
||||
|
||||
# Resolve with verbose output
|
||||
%(prog)s GEMINI_API_KEY --skill ai-multimodal --verbose
|
||||
|
||||
# Find all locations where variable is defined
|
||||
%(prog)s GEMINI_API_KEY --find-all
|
||||
|
||||
# Show hierarchy
|
||||
%(prog)s --show-hierarchy --skill ai-multimodal
|
||||
"""
|
||||
)
|
||||
|
||||
parser.add_argument('var_name', nargs='?', help='Environment variable name to resolve')
|
||||
parser.add_argument('--skill', help='Skill name for skill-specific resolution')
|
||||
parser.add_argument('--default', help='Default value if not found')
|
||||
parser.add_argument('--verbose', '-v', action='store_true', help='Show resolution details')
|
||||
parser.add_argument('--find-all', action='store_true', help='Find all locations where variable is defined')
|
||||
parser.add_argument('--show-hierarchy', action='store_true', help='Show resolution hierarchy')
|
||||
parser.add_argument('--export', action='store_true', help='Output in export format for shell sourcing')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.show_hierarchy:
|
||||
show_hierarchy(args.skill)
|
||||
sys.exit(0)
|
||||
|
||||
if not args.var_name:
|
||||
parser.error("var_name is required unless --show-hierarchy is used")
|
||||
|
||||
if args.find_all:
|
||||
results = find_all(args.var_name, args.skill)
|
||||
|
||||
if results:
|
||||
print(f"Variable '{args.var_name}' found in {len(results)} location(s):")
|
||||
print("=" * 60)
|
||||
|
||||
for i, (description, value, path) in enumerate(results, start=1):
|
||||
priority = i if i == 1 else i + 1 # Account for process.env being priority 1
|
||||
print(f"\n{priority}. {description}")
|
||||
if path:
|
||||
print(f" Path: {path}")
|
||||
print(f" Value: {value[:50]}{'...' if len(value) > 50 else ''}")
|
||||
|
||||
print("\n" + "=" * 60)
|
||||
print(f"✓ Resolved value (highest priority): {results[0][1][:50]}{'...' if len(results[0][1]) > 50 else ''}")
|
||||
else:
|
||||
print(f"❌ Variable '{args.var_name}' not found in any location")
|
||||
sys.exit(1)
|
||||
else:
|
||||
value = resolve_env(args.var_name, args.skill, args.default, args.verbose)
|
||||
|
||||
if value:
|
||||
if args.export:
|
||||
print(f"export {args.var_name}='{value}'")
|
||||
else:
|
||||
print(value)
|
||||
sys.exit(0)
|
||||
else:
|
||||
if not args.verbose:
|
||||
print(f"Error: {args.var_name} not found", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
25
.opencode/scripts/scan_commands.py
Normal file
25
.opencode/scripts/scan_commands.py
Normal file
@@ -0,0 +1,25 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Legacy command scanner (deprecated).
|
||||
|
||||
Commands were migrated to skills. This script now writes an empty commands dataset
|
||||
for backward compatibility with older tooling.
|
||||
"""
|
||||
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
def main() -> None:
|
||||
output_path = Path(".opencode/scripts/commands_data.yaml")
|
||||
output_path.write_text(
|
||||
"# Commands have been migrated to skills.\n"
|
||||
"# See .opencode/scripts/skills_data.yaml for the current catalog.\n"
|
||||
"[]\n",
|
||||
encoding="utf-8",
|
||||
)
|
||||
print("Commands are deprecated; wrote empty commands catalog for compatibility.")
|
||||
print(f"✓ Saved metadata to {output_path}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
263
.opencode/scripts/scan_skills.py
Executable file
263
.opencode/scripts/scan_skills.py
Executable file
@@ -0,0 +1,263 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Scan .opencode/skills directory and extract skill metadata.
|
||||
"""
|
||||
|
||||
import re
|
||||
from pathlib import Path
|
||||
from typing import Dict, List
|
||||
try:
|
||||
import yaml
|
||||
except ModuleNotFoundError:
|
||||
raise SystemExit(
|
||||
"PyYAML is required. Install with: python3 -m pip install -r .opencode/scripts/requirements.txt"
|
||||
)
|
||||
|
||||
# Exact mappings for high-signal CK skills to avoid falling into "other".
|
||||
EXACT_CATEGORY_MAP = {
|
||||
# Utilities & Helpers
|
||||
"ask": "utilities",
|
||||
"bootstrap": "utilities",
|
||||
"brainstorm": "utilities",
|
||||
"ck-autoresearch": "utilities",
|
||||
"ck-debug": "utilities",
|
||||
"ck-loop": "utilities",
|
||||
"ck-predict": "utilities",
|
||||
"ck-scenario": "utilities",
|
||||
"code-review": "utilities",
|
||||
"coding-level": "utilities",
|
||||
"context-engineering": "utilities",
|
||||
"cook": "utilities",
|
||||
"copywriting": "utilities",
|
||||
"debug": "utilities",
|
||||
"docs": "utilities",
|
||||
"fix": "utilities",
|
||||
"journal": "utilities",
|
||||
"markdown-novel-viewer": "utilities",
|
||||
"mermaidjs-v11": "utilities",
|
||||
"plan": "utilities",
|
||||
"ck-plan": "utilities",
|
||||
"preview": "utilities",
|
||||
"problem-solving": "utilities",
|
||||
"project-management": "utilities",
|
||||
"project-organization": "utilities",
|
||||
"research": "utilities",
|
||||
"retro": "utilities",
|
||||
"sequential-thinking": "utilities",
|
||||
"test": "utilities",
|
||||
"watzup": "utilities",
|
||||
# Development Tools
|
||||
"find-skills": "dev-tools",
|
||||
"git": "dev-tools",
|
||||
"gkg": "dev-tools",
|
||||
"kanban": "dev-tools",
|
||||
"llms": "dev-tools",
|
||||
"mintlify": "dev-tools",
|
||||
"plans-kanban": "dev-tools",
|
||||
"scout": "dev-tools",
|
||||
"ship": "dev-tools",
|
||||
"team": "dev-tools",
|
||||
"use-mcp": "dev-tools",
|
||||
"worktree": "dev-tools",
|
||||
# Frontend & Design
|
||||
"react-best-practices": "frontend",
|
||||
"remotion": "frontend",
|
||||
"shader": "frontend",
|
||||
"stitch": "frontend",
|
||||
"web-design-guidelines": "frontend",
|
||||
# Frameworks & Platforms
|
||||
"tanstack": "frameworks",
|
||||
# Infrastructure & DevOps
|
||||
"deploy": "infrastructure",
|
||||
# Multimedia & Processing
|
||||
"agent-browser": "multimedia",
|
||||
"web-testing": "multimedia",
|
||||
# Security (mapped to utilities)
|
||||
"ck-security": "utilities",
|
||||
"security-scan": "utilities",
|
||||
}
|
||||
|
||||
def extract_frontmatter(content: str) -> Dict:
|
||||
"""Extract YAML frontmatter from markdown content."""
|
||||
match = re.match(r'^---\s*\n(.*?)\n---\s*\n', content, re.DOTALL)
|
||||
if match:
|
||||
try:
|
||||
return yaml.safe_load(match.group(1))
|
||||
except:
|
||||
return {}
|
||||
return {}
|
||||
|
||||
def extract_first_paragraph(content: str) -> str:
|
||||
"""Extract first meaningful paragraph after frontmatter."""
|
||||
# Remove frontmatter
|
||||
content = re.sub(r'^---\s*\n.*?\n---\s*\n', '', content, flags=re.DOTALL)
|
||||
|
||||
# Find first paragraph (after headings)
|
||||
lines = content.split('\n')
|
||||
paragraph = []
|
||||
|
||||
for line in lines:
|
||||
line = line.strip()
|
||||
# Skip headings and empty lines
|
||||
if line.startswith('#') or not line:
|
||||
if paragraph: # If we've started collecting, stop
|
||||
break
|
||||
continue
|
||||
|
||||
paragraph.append(line)
|
||||
|
||||
# Stop after first paragraph
|
||||
if line.endswith('.') and len(' '.join(paragraph)) > 50:
|
||||
break
|
||||
|
||||
return ' '.join(paragraph)[:200]
|
||||
|
||||
def scan_skills(base_path: Path) -> List[Dict]:
|
||||
"""Scan all skill files and extract metadata."""
|
||||
skills = []
|
||||
|
||||
for skill_file in sorted(base_path.rglob('SKILL.md')):
|
||||
# Get skill directory name
|
||||
skill_dir = skill_file.parent
|
||||
skill_name = skill_dir.name
|
||||
|
||||
# Skip template
|
||||
if skill_name == 'template-skill':
|
||||
continue
|
||||
|
||||
# Handle nested skills (like document-skills/*)
|
||||
if skill_dir.parent.name != 'skills':
|
||||
parent_name = skill_dir.parent.name
|
||||
skill_name = f"{parent_name}/{skill_name}"
|
||||
|
||||
try:
|
||||
content = skill_file.read_text()
|
||||
frontmatter = extract_frontmatter(content)
|
||||
|
||||
description = frontmatter.get('description', '')
|
||||
if not description:
|
||||
description = extract_first_paragraph(content)
|
||||
|
||||
# Categorize based on content/name
|
||||
category = categorize_skill(skill_name, description, content)
|
||||
|
||||
skill_entry = {
|
||||
'name': skill_name,
|
||||
'path': str(skill_file.relative_to(Path('.opencode/skills'))),
|
||||
'description': description,
|
||||
'category': category,
|
||||
'has_scripts': (skill_dir / 'scripts').exists(),
|
||||
'has_references': (skill_dir / 'references').exists()
|
||||
}
|
||||
|
||||
# Include argument-hint if present in frontmatter
|
||||
argument_hint = frontmatter.get('argument-hint', '')
|
||||
if argument_hint:
|
||||
skill_entry['argument_hint'] = str(argument_hint)
|
||||
|
||||
skills.append(skill_entry)
|
||||
except Exception as e:
|
||||
print(f"Error processing {skill_file}: {e}")
|
||||
|
||||
return skills
|
||||
|
||||
def categorize_skill(name: str, description: str, content: str) -> str:
|
||||
"""Categorize skill based on name and content."""
|
||||
lower_name = name.lower()
|
||||
if lower_name in EXACT_CATEGORY_MAP:
|
||||
return EXACT_CATEGORY_MAP[lower_name]
|
||||
|
||||
# AI/ML
|
||||
if any(x in lower_name for x in ['ai-', 'gemini', 'multimodal', 'adk']):
|
||||
return 'ai-ml'
|
||||
|
||||
# Frontend
|
||||
if any(x in lower_name for x in ['frontend', 'ui', 'design', 'aesthetic', 'threejs']):
|
||||
return 'frontend'
|
||||
|
||||
# Backend
|
||||
if any(x in lower_name for x in ['backend', 'auth', 'payment']):
|
||||
return 'backend'
|
||||
|
||||
# Infrastructure
|
||||
if any(x in lower_name for x in ['devops', 'docker', 'cloudflare', 'gcloud']):
|
||||
return 'infrastructure'
|
||||
|
||||
# Database
|
||||
if any(x in lower_name for x in ['database', 'mongodb', 'postgresql', 'sql']):
|
||||
return 'database'
|
||||
|
||||
# Development Tools
|
||||
if any(x in lower_name for x in ['mcp', 'skill-creator', 'repomix', 'docs-seeker']):
|
||||
return 'dev-tools'
|
||||
|
||||
# Multimedia
|
||||
if any(x in lower_name for x in ['media', 'chrome-devtools', 'document-skills']):
|
||||
return 'multimedia'
|
||||
|
||||
# Frameworks
|
||||
if any(x in lower_name for x in ['web-frameworks', 'mobile', 'shopify']):
|
||||
return 'frameworks'
|
||||
|
||||
# Utilities
|
||||
if any(x in lower_name for x in ['debug', 'problem', 'code-review', 'planning', 'research', 'sequential']):
|
||||
return 'utilities'
|
||||
|
||||
return 'other'
|
||||
|
||||
def group_by_category(skills: List[Dict]) -> Dict[str, List[Dict]]:
|
||||
"""Group skills by category."""
|
||||
categories = {}
|
||||
|
||||
for skill in skills:
|
||||
category = skill['category']
|
||||
if category not in categories:
|
||||
categories[category] = []
|
||||
categories[category].append(skill)
|
||||
|
||||
return categories
|
||||
|
||||
def main():
|
||||
"""Main execution."""
|
||||
base_path = Path('.opencode/skills')
|
||||
|
||||
if not base_path.exists():
|
||||
print(f"Error: {base_path} not found")
|
||||
return
|
||||
|
||||
print("Scanning skills...")
|
||||
skills = scan_skills(base_path)
|
||||
|
||||
print(f"\nFound {len(skills)} skills\n")
|
||||
|
||||
# Group by category
|
||||
categories = group_by_category(skills)
|
||||
|
||||
category_names = {
|
||||
'ai-ml': 'AI & Machine Learning',
|
||||
'frontend': 'Frontend & Design',
|
||||
'backend': 'Backend Development',
|
||||
'infrastructure': 'Infrastructure & DevOps',
|
||||
'database': 'Database & Storage',
|
||||
'dev-tools': 'Development Tools',
|
||||
'multimedia': 'Multimedia & Processing',
|
||||
'frameworks': 'Frameworks & Platforms',
|
||||
'utilities': 'Utilities & Helpers',
|
||||
'other': 'Other'
|
||||
}
|
||||
|
||||
for category, skills_list in sorted(categories.items()):
|
||||
print(f"\n{category_names.get(category, category.upper())}:")
|
||||
for skill in skills_list:
|
||||
scripts = '📦' if skill['has_scripts'] else ' '
|
||||
refs = '📚' if skill['has_references'] else ' '
|
||||
print(f" {scripts}{refs} {skill['name']:30} {skill['description'][:80]}")
|
||||
|
||||
# Output YAML to scripts directory
|
||||
output_path = Path('.opencode/scripts/skills_data.yaml')
|
||||
output_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
output_path.write_text(yaml.dump(skills, allow_unicode=True, default_flow_style=False))
|
||||
print(f"\n✓ Saved metadata to {output_path}")
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
49
.opencode/scripts/set-active-plan.cjs
Executable file
49
.opencode/scripts/set-active-plan.cjs
Executable file
@@ -0,0 +1,49 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* Update session state with new active plan
|
||||
*
|
||||
* Usage: node .claude/scripts/set-active-plan.cjs <plan-path>
|
||||
*
|
||||
* This script updates the session temp file with the new active plan path,
|
||||
* allowing subagents to receive the latest plan context via SubagentStart hook.
|
||||
*
|
||||
* The session temp file (/tmp/ck-session-{id}.json) is the source of truth
|
||||
* for plan context within a session. Env vars ($CK_ACTIVE_PLAN) are just
|
||||
* the initial snapshot from session start.
|
||||
*/
|
||||
|
||||
const path = require('path');
|
||||
const { updateSessionState } = require('../hooks/lib/ck-config-utils.cjs');
|
||||
|
||||
const sessionId = process.env.CK_SESSION_ID;
|
||||
const newPlan = process.argv[2];
|
||||
|
||||
if (!newPlan) {
|
||||
console.error('Error: Plan path required');
|
||||
console.log('Usage: node .claude/scripts/set-active-plan.cjs <plan-path>');
|
||||
console.log('Example: node .claude/scripts/set-active-plan.cjs plans/251207-1030-feature-name');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Issue #335: Resolve to absolute path to support brownfield/subdirectory workflows
|
||||
// When agent navigates away from session origin, relative paths become invalid
|
||||
const absolutePlan = path.resolve(newPlan);
|
||||
|
||||
if (!sessionId) {
|
||||
console.warn('Warning: CK_SESSION_ID not set - session state will not persist');
|
||||
console.log(`Would set active plan to: ${absolutePlan}`);
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
const success = updateSessionState(sessionId, (current) => ({
|
||||
...current,
|
||||
activePlan: absolutePlan,
|
||||
timestamp: Date.now()
|
||||
}));
|
||||
|
||||
if (success) {
|
||||
console.log(`Active plan set to: ${absolutePlan}`);
|
||||
} else {
|
||||
console.error('Failed to update session state');
|
||||
process.exit(1);
|
||||
}
|
||||
714
.opencode/scripts/skills_data.yaml
Normal file
714
.opencode/scripts/skills_data.yaml
Normal file
@@ -0,0 +1,714 @@
|
||||
- argument_hint: '[url or task]'
|
||||
category: multimedia
|
||||
description: AI-optimized browser automation CLI with context-efficient snapshots.
|
||||
Use for long autonomous sessions, self-verifying workflows, video recording, and
|
||||
cloud browser testing (Browserbase).
|
||||
has_references: true
|
||||
has_scripts: false
|
||||
name: agent-browser
|
||||
path: agent-browser/SKILL.md
|
||||
- argument_hint: '[concept] [--mode search|creative|wild|all] [--skip]'
|
||||
category: ai-ml
|
||||
description: 'Generate images via Nano Banana with 129 curated prompts. Mandatory
|
||||
validation interview refines style/mood/colors (use --skip to bypass). 3 modes:
|
||||
search, creative, wild. Styles: Ukiyo-e, Bento grid, cyberpunk, cinematic, vintage
|
||||
patent.'
|
||||
has_references: true
|
||||
has_scripts: true
|
||||
name: ai-artist
|
||||
path: ai-artist/SKILL.md
|
||||
- argument_hint: '[file-path] [prompt]'
|
||||
category: ai-ml
|
||||
description: Analyze images/audio/video with Gemini API (better vision than Claude).
|
||||
Generate images (Imagen 4, Nano Banana 2, MiniMax), videos (Veo 3, Hailuo), speech
|
||||
(MiniMax TTS), music (MiniMax). Use for vision analysis, transcription, OCR, design
|
||||
extraction, multimodal AI.
|
||||
has_references: true
|
||||
has_scripts: true
|
||||
name: ai-multimodal
|
||||
path: ai-multimodal/SKILL.md
|
||||
- argument_hint: '[technical-question]'
|
||||
category: utilities
|
||||
description: Answer technical and architectural questions with expert consultation.
|
||||
has_references: false
|
||||
has_scripts: false
|
||||
name: ask
|
||||
path: ask/SKILL.md
|
||||
- argument_hint: '[framework] [task]'
|
||||
category: backend
|
||||
description: Build backends with Node.js, Python, Go (NestJS, FastAPI, Django).
|
||||
Use for REST/GraphQL/gRPC APIs, auth (OAuth, JWT), databases, microservices, security
|
||||
(OWASP), Docker/K8s.
|
||||
has_references: true
|
||||
has_scripts: false
|
||||
name: backend-development
|
||||
path: backend-development/SKILL.md
|
||||
- argument_hint: '[auth-method or feature]'
|
||||
category: backend
|
||||
description: Add authentication with Better Auth (TypeScript). Use for email/password,
|
||||
OAuth providers (Google, GitHub), 2FA/MFA, passkeys/WebAuthn, sessions, RBAC,
|
||||
rate limiting.
|
||||
has_references: true
|
||||
has_scripts: true
|
||||
name: better-auth
|
||||
path: better-auth/SKILL.md
|
||||
- argument_hint: '[requirements] [--full|--auto|--fast|--parallel]'
|
||||
category: utilities
|
||||
description: 'Bootstrap new projects with research, tech stack, design, planning,
|
||||
and implementation. Modes: full (interactive), auto (default), fast (skip research),
|
||||
parallel (multi-agent).'
|
||||
has_references: true
|
||||
has_scripts: false
|
||||
name: bootstrap
|
||||
path: bootstrap/SKILL.md
|
||||
- argument_hint: '[topic or problem]'
|
||||
category: utilities
|
||||
description: Brainstorm solutions with trade-off analysis and brutal honesty. Use
|
||||
for ideation, architecture decisions, technical debates, feature exploration,
|
||||
feasibility assessment, design discussions.
|
||||
has_references: false
|
||||
has_scripts: false
|
||||
name: brainstorm
|
||||
path: brainstorm/SKILL.md
|
||||
- argument_hint: '[url or task]'
|
||||
category: multimedia
|
||||
description: Automate browsers with Puppeteer CLI scripts and persistent sessions.
|
||||
Use for screenshots, performance analysis, network monitoring, web scraping, form
|
||||
automation, JavaScript debugging.
|
||||
has_references: true
|
||||
has_scripts: true
|
||||
name: chrome-devtools
|
||||
path: chrome-devtools/SKILL.md
|
||||
- argument_hint: '[Goal/Metric description] or inline config block'
|
||||
category: utilities
|
||||
description: Autonomous iterative optimization loop — run N iterations against a
|
||||
mechanical metric, learn from git history, auto-keep/discard changes. Use for
|
||||
improving measurable metrics (coverage, performance, bundle size, etc.) through
|
||||
repeated experimentation.
|
||||
has_references: true
|
||||
has_scripts: false
|
||||
name: ck-autoresearch
|
||||
path: ck-autoresearch/SKILL.md
|
||||
- argument_hint: '[error or issue description]'
|
||||
category: utilities
|
||||
description: Debug systematically with root cause analysis before fixes. Use for
|
||||
bugs, test failures, unexpected behavior, performance issues, call stack tracing,
|
||||
multi-layer validation, log analysis, CI/CD failures, database diagnostics, system
|
||||
investigation.
|
||||
has_references: true
|
||||
has_scripts: true
|
||||
name: ck-debug
|
||||
path: ck-debug/SKILL.md
|
||||
- argument_hint: '[Goal/Metric description] or inline config block'
|
||||
category: utilities
|
||||
description: Autonomous iterative optimization loop — run N iterations against a
|
||||
mechanical metric, learn from git history, auto-keep/discard changes. Use for
|
||||
improving measurable metrics (coverage, performance, bundle size, etc.) through
|
||||
repeated experimentation.
|
||||
has_references: true
|
||||
has_scripts: false
|
||||
name: ck-loop
|
||||
path: ck-loop/SKILL.md
|
||||
- argument_hint: '[task] OR [archive|red-team|validate]'
|
||||
category: utilities
|
||||
description: Plan implementations, design architectures, create technical roadmaps
|
||||
with detailed phases. Use for feature planning, system design, solution architecture,
|
||||
implementation strategy, phase documentation.
|
||||
has_references: true
|
||||
has_scripts: false
|
||||
name: ck-plan
|
||||
path: ck-plan/SKILL.md
|
||||
- argument_hint: <feature description or change proposal> [--files <glob>]
|
||||
category: utilities
|
||||
description: 5 expert personas debate proposed changes before implementation. Catches
|
||||
architectural, security, performance, and UX issues early. Use before major features
|
||||
or risky changes.
|
||||
has_references: false
|
||||
has_scripts: false
|
||||
name: ck-predict
|
||||
path: ck-predict/SKILL.md
|
||||
- argument_hint: <file path or feature description>
|
||||
category: utilities
|
||||
description: Generate comprehensive edge cases and test scenarios by decomposing
|
||||
features across 12 dimensions. Use before implementation or testing to catch issues
|
||||
early.
|
||||
has_references: false
|
||||
has_scripts: false
|
||||
name: ck-scenario
|
||||
path: ck-scenario/SKILL.md
|
||||
- argument_hint: <scope glob or 'full'> [--fix] [--iterations N]
|
||||
category: utilities
|
||||
description: STRIDE + OWASP-based security audit with optional auto-fix. Scans code
|
||||
for vulnerabilities, categorizes by severity, and can iteratively fix findings
|
||||
using ck:autoresearch pattern.
|
||||
has_references: true
|
||||
has_scripts: false
|
||||
name: ck-security
|
||||
path: ck-security/SKILL.md
|
||||
- argument_hint: '[#PR | COMMIT | --pending | codebase [parallel]]'
|
||||
category: utilities
|
||||
description: 'Review code quality with adversarial rigor. Supports input modes:
|
||||
pending changes, PR number, commit hash, codebase scan. Always-on red-team analysis
|
||||
finds security holes, false assumptions, and failure modes.'
|
||||
has_references: true
|
||||
has_scripts: false
|
||||
name: code-review
|
||||
path: code-review/SKILL.md
|
||||
- argument_hint: '[0-5]'
|
||||
category: utilities
|
||||
description: Set coding experience level for tailored explanations and output format.
|
||||
has_references: false
|
||||
has_scripts: false
|
||||
name: coding-level
|
||||
path: coding-level/SKILL.md
|
||||
- argument_hint: '[topic or question]'
|
||||
category: utilities
|
||||
description: Check context usage limits, monitor time remaining, optimize token
|
||||
consumption, debug context failures. Use when asking about context percentage,
|
||||
rate limits, usage warnings, context optimization, agent architectures, memory
|
||||
systems.
|
||||
has_references: true
|
||||
has_scripts: true
|
||||
name: context-engineering
|
||||
path: context-engineering/SKILL.md
|
||||
- argument_hint: '[task|plan-path] [--interactive|--fast|--parallel|--auto|--no-test]'
|
||||
category: utilities
|
||||
description: ALWAYS activate this skill before implementing EVERY feature, plan,
|
||||
or fix.
|
||||
has_references: true
|
||||
has_scripts: false
|
||||
name: cook
|
||||
path: cook/SKILL.md
|
||||
- argument_hint: '[copy-type] [context]'
|
||||
category: utilities
|
||||
description: Conversion copywriting formulas, headline templates, email copy patterns,
|
||||
landing page structures, CTA optimization, and writing style extraction. Activate
|
||||
for writing high-converting copy, crafting headlines, email campaigns, landing
|
||||
pages, or applying custom writing styles from assets/writing-styles/ directory.
|
||||
has_references: true
|
||||
has_scripts: true
|
||||
name: copywriting
|
||||
path: copywriting/SKILL.md
|
||||
- argument_hint: '[query or schema task]'
|
||||
category: database
|
||||
description: Design schemas, write queries for MongoDB and PostgreSQL. Use for database
|
||||
design, SQL/NoSQL queries, aggregation pipelines, indexes, migrations, replication,
|
||||
performance optimization, psql CLI.
|
||||
has_references: true
|
||||
has_scripts: true
|
||||
name: databases
|
||||
path: databases/SKILL.md
|
||||
- argument_hint: '[platform] [environment]'
|
||||
category: infrastructure
|
||||
description: Deploy projects to any platform with auto-detection. Use when user
|
||||
says "deploy", "publish", "ship", "go live", "push to production", "host this
|
||||
app", or mentions any hosting platform (Vercel, Netlify, Cloudflare, Railway,
|
||||
Fly.io, Render, Heroku, TOSE, Github Pages, AWS, GCP, Digital Ocean, Vultr, Coolify,
|
||||
Dokploy). Auto-detects deployment target from config files and docs/deployment.md.
|
||||
has_references: true
|
||||
has_scripts: false
|
||||
name: deploy
|
||||
path: deploy/SKILL.md
|
||||
- argument_hint: '[design-type] [context]'
|
||||
category: frontend
|
||||
description: 'Comprehensive design skill: brand identity, design tokens, UI styling,
|
||||
logo generation (55 styles, Gemini AI), corporate identity program (50 deliverables,
|
||||
CIP mockups), HTML presentations (Chart.js), banner design (22 styles, social/ads/web/print),
|
||||
icon design (15 styles, SVG, Gemini 3.1 Pro), social photos (HTML→screenshot,
|
||||
multi-platform). Actions: design logo, create CIP, generate mockups, build slides,
|
||||
design banner, generate icon, create social photos, social media images, brand
|
||||
identity, design system. Platforms: Facebook, Twitter, LinkedIn, YouTube, Instagram,
|
||||
Pinterest, TikTok, Threads, Google Ads.'
|
||||
has_references: true
|
||||
has_scripts: true
|
||||
name: design
|
||||
path: design/SKILL.md
|
||||
- argument_hint: '[platform] [task]'
|
||||
category: infrastructure
|
||||
description: Deploy to Cloudflare (Workers, R2, D1), Docker, GCP (Cloud Run, GKE),
|
||||
Kubernetes (kubectl, Helm). Use for serverless, containers, CI/CD, GitOps, security
|
||||
audit.
|
||||
has_references: true
|
||||
has_scripts: true
|
||||
name: devops
|
||||
path: devops/SKILL.md
|
||||
- argument_hint: init|update|summarize
|
||||
category: utilities
|
||||
description: Analyze codebase and manage project documentation — init, update, summarize.
|
||||
has_references: true
|
||||
has_scripts: false
|
||||
name: docs
|
||||
path: docs/SKILL.md
|
||||
- argument_hint: '[library-name] [topic]'
|
||||
category: dev-tools
|
||||
description: Search library/framework documentation via llms.txt (context7.com).
|
||||
Use for API docs, GitHub repository analysis, technical documentation lookup,
|
||||
latest library features.
|
||||
has_references: true
|
||||
has_scripts: true
|
||||
name: docs-seeker
|
||||
path: docs-seeker/SKILL.md
|
||||
- category: multimedia
|
||||
description: Create, edit, analyze .docx Word documents. Use for document creation,
|
||||
tracked changes, comments, formatting preservation, text extraction, template
|
||||
modification.
|
||||
has_references: false
|
||||
has_scripts: true
|
||||
name: document-skills/docx
|
||||
path: document-skills/docx/SKILL.md
|
||||
- category: multimedia
|
||||
description: Extract text/tables, create, merge, split PDFs. Fill PDF forms programmatically.
|
||||
Use for PDF processing, generation, form filling, document analysis, batch operations.
|
||||
has_references: false
|
||||
has_scripts: true
|
||||
name: document-skills/pdf
|
||||
path: document-skills/pdf/SKILL.md
|
||||
- category: multimedia
|
||||
description: Create, edit, analyze .pptx PowerPoint files. Use for presentations,
|
||||
slides, layouts, speaker notes, template modification, content extraction, slide
|
||||
generation.
|
||||
has_references: false
|
||||
has_scripts: true
|
||||
name: document-skills/pptx
|
||||
path: document-skills/pptx/SKILL.md
|
||||
- category: multimedia
|
||||
description: Create, edit, analyze spreadsheets (.xlsx, .csv, .tsv). Use for Excel
|
||||
formulas, data analysis, visualization, formatting, pivot tables, charts, formula
|
||||
recalculation.
|
||||
has_references: false
|
||||
has_scripts: false
|
||||
name: document-skills/xlsx
|
||||
path: document-skills/xlsx/SKILL.md
|
||||
- argument_hint: '[capability or task description]'
|
||||
category: dev-tools
|
||||
description: Helps users discover and install agent skills when they ask questions
|
||||
like "how do I do X", "find a skill for X", "is there a skill that can...", or
|
||||
express interest in extending capabilities. This skill should be used when the
|
||||
user is looking for functionality that might exist as an installable skill.
|
||||
has_references: false
|
||||
has_scripts: false
|
||||
name: find-skills
|
||||
path: find-skills/SKILL.md
|
||||
- argument_hint: '[issue] --auto|--review|--quick|--parallel'
|
||||
category: utilities
|
||||
description: ALWAYS activate this skill before fixing ANY bug, error, test failure,
|
||||
CI/CD issue, type error, lint, log error, UI issue, code problem.
|
||||
has_references: true
|
||||
has_scripts: false
|
||||
name: fix
|
||||
path: fix/SKILL.md
|
||||
- category: frontend
|
||||
description: Create polished frontend interfaces from designs/screenshots/videos.
|
||||
Use for web components, 3D experiences, replicating UI designs, quick prototypes,
|
||||
immersive interfaces, avoiding AI slop.
|
||||
has_references: true
|
||||
has_scripts: false
|
||||
name: frontend-design
|
||||
path: frontend-design/SKILL.md
|
||||
- argument_hint: '[component or feature]'
|
||||
category: frontend
|
||||
description: Build React/TypeScript frontends with modern patterns. Use for components,
|
||||
Suspense, lazy loading, useSuspenseQuery, MUI v7 styling, TanStack Router, performance
|
||||
optimization.
|
||||
has_references: false
|
||||
has_scripts: false
|
||||
name: frontend-development
|
||||
path: frontend-development/SKILL.md
|
||||
- argument_hint: cm|cp|pr|merge [args]
|
||||
category: dev-tools
|
||||
description: Git operations with conventional commits. Use for staging, committing,
|
||||
pushing, PRs, merges. Auto-splits commits by type/scope. Security scans for secrets.
|
||||
has_references: true
|
||||
has_scripts: false
|
||||
name: git
|
||||
path: git/SKILL.md
|
||||
- argument_hint: '[symbol or query]'
|
||||
category: dev-tools
|
||||
description: Semantic code analysis with GitLab Knowledge Graph. Use for go-to-definition,
|
||||
find-usages, impact analysis, architecture visualization. Supports Ruby, Java,
|
||||
Kotlin, Python, TypeScript/JavaScript.
|
||||
has_references: true
|
||||
has_scripts: false
|
||||
name: gkg
|
||||
path: gkg/SKILL.md
|
||||
- argument_hint: '[agent or feature]'
|
||||
category: ai-ml
|
||||
description: Build AI agents with Google ADK Python. Multi-agent systems, A2A protocol,
|
||||
MCP tools, workflow agents, state/memory, callbacks/plugins, Vertex AI deployment,
|
||||
evaluation.
|
||||
has_references: true
|
||||
has_scripts: false
|
||||
name: google-adk-python
|
||||
path: google-adk-python/SKILL.md
|
||||
- argument_hint: '[topic or reflection]'
|
||||
category: utilities
|
||||
description: Write journal entries analyzing recent changes and session reflections.
|
||||
has_references: false
|
||||
has_scripts: false
|
||||
name: journal
|
||||
path: journal/SKILL.md
|
||||
- argument_hint: '[dir]'
|
||||
category: dev-tools
|
||||
description: AI agent orchestration board for task visualization and team coordination.
|
||||
has_references: false
|
||||
has_scripts: false
|
||||
name: kanban
|
||||
path: kanban/SKILL.md
|
||||
- argument_hint: '[path|url] [--full] [--output path]'
|
||||
category: dev-tools
|
||||
description: Generate llms.txt files from docs or codebase scanning. Follows llmstxt.org
|
||||
spec. Use for LLM-friendly site indexes, documentation summaries, AI context optimization.
|
||||
has_references: true
|
||||
has_scripts: true
|
||||
name: llms
|
||||
path: llms/SKILL.md
|
||||
- argument_hint: '[file-or-directory]'
|
||||
category: utilities
|
||||
description: View markdown files with calm, book-like reading experience via HTTP
|
||||
server. Use for long-form content, documentation preview, novel reading, report
|
||||
viewing, distraction-free reading.
|
||||
has_references: false
|
||||
has_scripts: true
|
||||
name: markdown-novel-viewer
|
||||
path: markdown-novel-viewer/SKILL.md
|
||||
- argument_hint: '[service or API to integrate]'
|
||||
category: frontend
|
||||
description: Build MCP servers for LLM-external service integration. Use for FastMCP
|
||||
(Python), MCP SDK (Node/TypeScript), tool design, API integration, resource providers.
|
||||
has_references: false
|
||||
has_scripts: true
|
||||
name: mcp-builder
|
||||
path: mcp-builder/SKILL.md
|
||||
- argument_hint: '[task or server-name]'
|
||||
category: dev-tools
|
||||
description: Manage MCP servers - discover, analyze, execute tools/prompts/resources.
|
||||
Use for MCP integrations, intelligent tool selection, multi-server management,
|
||||
context-efficient capability discovery.
|
||||
has_references: true
|
||||
has_scripts: true
|
||||
name: mcp-management
|
||||
path: mcp-management/SKILL.md
|
||||
- argument_hint: '[input-file] [operation]'
|
||||
category: multimedia
|
||||
description: Process media with FFmpeg (video/audio), ImageMagick (images), RMBG
|
||||
(AI background removal). Use for encoding, format conversion, filters, thumbnails,
|
||||
batch processing, HLS/DASH streaming.
|
||||
has_references: true
|
||||
has_scripts: true
|
||||
name: media-processing
|
||||
path: media-processing/SKILL.md
|
||||
- argument_hint: '[diagram-type or description]'
|
||||
category: utilities
|
||||
description: Create diagrams with Mermaid.js v11 syntax. Use for flowcharts, sequence
|
||||
diagrams, class diagrams, ER diagrams, Gantt charts, state diagrams, architecture
|
||||
diagrams, timelines, user journeys.
|
||||
has_references: true
|
||||
has_scripts: false
|
||||
name: mermaidjs-v11
|
||||
path: mermaidjs-v11/SKILL.md
|
||||
- argument_hint: '[task] [path]'
|
||||
category: dev-tools
|
||||
description: Build and deploy documentation sites with Mintlify. Use when creating
|
||||
API docs, developer portals, or knowledge bases. Covers docs.json configuration,
|
||||
MDX components (Cards, Steps, Tabs, Accordions, CodeGroup, Callouts, Mermaid,
|
||||
View, Tiles, Tree, Badge, Banner, Color, Tooltips, Panel), page frontmatter, navigation
|
||||
structure (tabs, anchors, dropdowns, products, versions, languages), theming (7
|
||||
themes), OpenAPI/AsyncAPI integration, AI features (llms.txt, MCP, skill.md),
|
||||
deployment (GitHub, GitLab, Vercel, Cloudflare, AWS), and CLI commands for local
|
||||
development and validation.
|
||||
has_references: true
|
||||
has_scripts: false
|
||||
name: mintlify
|
||||
path: mintlify/SKILL.md
|
||||
- argument_hint: '[platform] [feature]'
|
||||
category: frameworks
|
||||
description: Build mobile apps with React Native, Flutter, Swift/SwiftUI, Kotlin/Jetpack
|
||||
Compose. Use for iOS/Android, mobile UX, performance optimization, offline-first,
|
||||
app store deployment.
|
||||
has_references: true
|
||||
has_scripts: false
|
||||
name: mobile-development
|
||||
path: mobile-development/SKILL.md
|
||||
- argument_hint: '[provider] [task]'
|
||||
category: backend
|
||||
description: Integrate payments with SePay (VietQR), Polar, Stripe, Paddle (MoR
|
||||
subscriptions), Creem.io (licensing). Checkout, webhooks, subscriptions, QR codes,
|
||||
multi-provider orders.
|
||||
has_references: true
|
||||
has_scripts: true
|
||||
name: payment-integration
|
||||
path: payment-integration/SKILL.md
|
||||
- argument_hint: '[plans-dir]'
|
||||
category: dev-tools
|
||||
description: View plans dashboard with progress tracking and timeline visualization.
|
||||
Use for kanban boards, plan status overview, phase progress, milestone tracking,
|
||||
project visibility.
|
||||
has_references: false
|
||||
has_scripts: true
|
||||
name: plans-kanban
|
||||
path: plans-kanban/SKILL.md
|
||||
- argument_hint: '[path] OR [--html] --explain|--slides|--diagram|--ascii [topic]
|
||||
OR --html --diff|--plan-review|--recap'
|
||||
category: utilities
|
||||
description: View files/directories OR generate visual explanations, slides, diagrams
|
||||
(Markdown or self-contained HTML).
|
||||
has_references: true
|
||||
has_scripts: false
|
||||
name: preview
|
||||
path: preview/SKILL.md
|
||||
- argument_hint: '[problem description]'
|
||||
category: utilities
|
||||
description: Apply systematic problem-solving techniques when stuck. Use for complexity
|
||||
spirals, innovation blocks, recurring patterns, assumption constraints, simplification
|
||||
cascades, scale uncertainty.
|
||||
has_references: true
|
||||
has_scripts: false
|
||||
name: problem-solving
|
||||
path: problem-solving/SKILL.md
|
||||
- argument_hint: '[task: status, hydrate, sync, report]'
|
||||
category: utilities
|
||||
description: Track progress, update plan statuses, manage Claude Tasks, generate
|
||||
reports, coordinate docs updates. Use for project oversight, status checks, plan
|
||||
completion, task hydration, cross-session continuity.
|
||||
has_references: true
|
||||
has_scripts: false
|
||||
name: project-management
|
||||
path: project-management/SKILL.md
|
||||
- argument_hint: '[directories or files to organize]'
|
||||
category: utilities
|
||||
description: Organize files, directories, and content structure in any project.
|
||||
Use when creating files, determining output paths, organizing existing assets,
|
||||
or standardizing project layout.
|
||||
has_references: true
|
||||
has_scripts: false
|
||||
name: project-organization
|
||||
path: project-organization/SKILL.md
|
||||
- argument_hint: '[component or pattern]'
|
||||
category: frontend
|
||||
description: React and Next.js performance optimization guidelines from Vercel Engineering.
|
||||
This skill should be used when writing, reviewing, or refactoring React/Next.js
|
||||
code to ensure optimal performance patterns. Triggers on tasks involving React
|
||||
components, Next.js pages, data fetching, bundle optimization, or performance
|
||||
improvements.
|
||||
has_references: false
|
||||
has_scripts: false
|
||||
name: react-best-practices
|
||||
path: react-best-practices/SKILL.md
|
||||
- argument_hint: '[video or component]'
|
||||
category: frontend
|
||||
description: Best practices for Remotion - Video creation in React
|
||||
has_references: false
|
||||
has_scripts: false
|
||||
name: remotion
|
||||
path: remotion/SKILL.md
|
||||
- argument_hint: '[path] [--style xml|markdown|plain|json]'
|
||||
category: dev-tools
|
||||
description: Pack repositories into AI-friendly files with Repomix (XML, Markdown,
|
||||
plain text). Use for codebase snapshots, LLM context preparation, security audits,
|
||||
third-party library analysis.
|
||||
has_references: true
|
||||
has_scripts: true
|
||||
name: repomix
|
||||
path: repomix/SKILL.md
|
||||
- argument_hint: '[topic]'
|
||||
category: utilities
|
||||
description: Research technical solutions, analyze architectures, gather requirements
|
||||
thoroughly. Use for technology evaluation, best practices research, solution design,
|
||||
scalability/security/maintainability analysis.
|
||||
has_references: false
|
||||
has_scripts: false
|
||||
name: research
|
||||
path: research/SKILL.md
|
||||
- argument_hint: '[timeframe] [--compare] [--team] [--format html|md]'
|
||||
category: utilities
|
||||
description: Data-driven sprint retrospective. Gathers git metrics (commits, LOC,
|
||||
hotspots, churn), computes derived health indicators, and generates a structured
|
||||
markdown or HTML report. Use after sprints, weekly check-ins, or any review period.
|
||||
has_references: true
|
||||
has_scripts: false
|
||||
name: retro
|
||||
path: retro/SKILL.md
|
||||
- argument_hint: '[search-target] [ext]'
|
||||
category: dev-tools
|
||||
description: Fast codebase scouting using parallel agents. Use for file discovery,
|
||||
task context gathering, quick searches across directories. Supports internal (Explore)
|
||||
and external (Gemini/OpenCode) agents.
|
||||
has_references: true
|
||||
has_scripts: false
|
||||
name: scout
|
||||
path: scout/SKILL.md
|
||||
- argument_hint: '[scope] [--secrets-only] [--deps-only] [--full]'
|
||||
category: utilities
|
||||
description: Scan codebase for security vulnerabilities, hardcoded secrets, dependency
|
||||
issues, and OWASP patterns. Use when asked to 'security scan', 'check for secrets',
|
||||
'audit security', or before major releases.
|
||||
has_references: true
|
||||
has_scripts: false
|
||||
name: security-scan
|
||||
path: security-scan/SKILL.md
|
||||
- argument_hint: '[problem to analyze step-by-step]'
|
||||
category: utilities
|
||||
description: Apply step-by-step analysis for complex problems with revision capability.
|
||||
Use for multi-step reasoning, hypothesis verification, adaptive planning, problem
|
||||
decomposition, course correction.
|
||||
has_references: true
|
||||
has_scripts: true
|
||||
name: sequential-thinking
|
||||
path: sequential-thinking/SKILL.md
|
||||
- argument_hint: '[effect or pattern]'
|
||||
category: frontend
|
||||
description: 'Write GLSL fragment shaders for procedural graphics. Topics: shapes
|
||||
(SDF), patterns, noise (Perlin/simplex/cellular), fBm, colors (HSB/RGB), matrices,
|
||||
gradients, animations. Use for generative art, textures, visual effects, WebGL,
|
||||
Three.js shaders.'
|
||||
has_references: true
|
||||
has_scripts: false
|
||||
name: shader
|
||||
path: shader/SKILL.md
|
||||
- argument_hint: '[official|beta] [--skip-tests] [--skip-review] [--skip-journal]
|
||||
[--skip-docs] [--dry-run]'
|
||||
category: dev-tools
|
||||
description: 'Ship pipeline: merge main, test, review, commit, push, PR. Single
|
||||
command from feature branch to PR URL. Use for shipping official releases to main/master
|
||||
or beta releases to dev/beta branches.'
|
||||
has_references: true
|
||||
has_scripts: false
|
||||
name: ship
|
||||
path: ship/SKILL.md
|
||||
- argument_hint: '[extension-type] [feature]'
|
||||
category: frameworks
|
||||
description: Build Shopify apps, extensions, themes with Shopify CLI. Use for GraphQL/REST
|
||||
APIs, Polaris UI, Liquid templates, checkout customization, webhooks, billing
|
||||
integration.
|
||||
has_references: true
|
||||
has_scripts: true
|
||||
name: shopify
|
||||
path: shopify/SKILL.md
|
||||
- argument_hint: '[skill-name or description]'
|
||||
category: dev-tools
|
||||
description: Create or update Claude skills with eval-driven iteration. Use for
|
||||
new skills, skill scripts, references, benchmark optimization, description optimization,
|
||||
eval testing, extending Claude's capabilities.
|
||||
has_references: true
|
||||
has_scripts: true
|
||||
name: skill-creator
|
||||
path: skill-creator/SKILL.md
|
||||
- argument_hint: '[design prompt or action]'
|
||||
category: frontend
|
||||
description: AI design generation with Google Stitch. Generate UI designs from text
|
||||
prompts, export Tailwind/HTML/DESIGN.md, orchestrate design-to-code pipeline.
|
||||
Use for rapid prototyping, UI generation, design exploration.
|
||||
has_references: true
|
||||
has_scripts: true
|
||||
name: stitch
|
||||
path: stitch/SKILL.md
|
||||
- argument_hint: '[framework] [feature]'
|
||||
category: frameworks
|
||||
description: Build with TanStack Start (full-stack React framework), TanStack Form
|
||||
(headless form management), and TanStack AI (AI streaming/chat). Use when creating
|
||||
TanStack projects, routes, server functions, forms, validation, or AI chat features.
|
||||
has_references: true
|
||||
has_scripts: false
|
||||
name: tanstack
|
||||
path: tanstack/SKILL.md
|
||||
- argument_hint: <template> <context> [--devs|--researchers|--reviewers N] [--delegate]
|
||||
category: dev-tools
|
||||
description: Orchestrate Agent Teams for parallel multi-session collaboration. Use
|
||||
for research, implementation, review, and debug workflows requiring independent
|
||||
teammates.
|
||||
has_references: true
|
||||
has_scripts: false
|
||||
name: team
|
||||
path: team/SKILL.md
|
||||
- argument_hint: '[context] OR ui [url]'
|
||||
category: utilities
|
||||
description: Run unit, integration, e2e, and UI tests. Use for test execution, coverage
|
||||
analysis, build verification, visual regression, and QA reports.
|
||||
has_references: true
|
||||
has_scripts: false
|
||||
name: test
|
||||
path: test/SKILL.md
|
||||
- argument_hint: '[3D scene or feature]'
|
||||
category: frontend
|
||||
description: 'Build 3D web apps with Three.js (WebGL/WebGPU). 556 searchable examples,
|
||||
60 API classes, 20 use cases. Actions: create 3D scene, load model, add animation,
|
||||
implement physics, build VR/XR. Topics: GLTF loader, PBR materials, particle effects,
|
||||
shadows, post-processing, compute shaders, TSL. Integrations: WebGPU, physics
|
||||
engines, spatial audio.'
|
||||
has_references: true
|
||||
has_scripts: true
|
||||
name: threejs
|
||||
path: threejs/SKILL.md
|
||||
- argument_hint: '[component or layout]'
|
||||
category: frontend
|
||||
description: Style UIs with shadcn/ui components (Radix UI + Tailwind CSS). Use
|
||||
for accessible components, themes, dark mode, responsive layouts, design systems,
|
||||
color customization.
|
||||
has_references: true
|
||||
has_scripts: true
|
||||
name: ui-styling
|
||||
path: ui-styling/SKILL.md
|
||||
- category: frontend
|
||||
description: 'UI/UX design intelligence for web and mobile. Includes 50+ styles,
|
||||
161 color palettes, 57 font pairings, 161 product types, 99 UX guidelines, and
|
||||
25 chart types across 10 stacks (React, Next.js, Vue, Svelte, SwiftUI, React Native,
|
||||
Flutter, Tailwind, shadcn/ui, and HTML/CSS). Actions: plan, build, create, design,
|
||||
implement, review, fix, improve, optimize, enhance, refactor, and check UI/UX
|
||||
code. Projects: website, landing page, dashboard, admin panel, e-commerce, SaaS,
|
||||
portfolio, blog, and mobile app. Elements: button, modal, navbar, sidebar, card,
|
||||
table, form, and chart. Styles: glassmorphism, claymorphism, minimalism, brutalism,
|
||||
neumorphism, bento grid, dark mode, responsive, skeuomorphism, and flat design.
|
||||
Topics: color systems, accessibility, animation, layout, typography, font pairing,
|
||||
spacing, interaction states, shadow, and gradient. Integrations: shadcn/ui MCP
|
||||
for component search and examples.'
|
||||
has_references: false
|
||||
has_scripts: true
|
||||
name: ui-ux-pro-max
|
||||
path: ui-ux-pro-max/SKILL.md
|
||||
- argument_hint: '[task]'
|
||||
category: dev-tools
|
||||
description: Utilize MCP server tools with intelligent discovery and execution.
|
||||
has_references: false
|
||||
has_scripts: false
|
||||
name: use-mcp
|
||||
path: use-mcp/SKILL.md
|
||||
- category: utilities
|
||||
description: Review recent changes and wrap up the current work session.
|
||||
has_references: false
|
||||
has_scripts: false
|
||||
name: watzup
|
||||
path: watzup/SKILL.md
|
||||
- argument_hint: '[file-or-pattern]'
|
||||
category: frontend
|
||||
description: Review UI code for Web Interface Guidelines compliance. Use when asked
|
||||
to "review my UI", "check accessibility", "audit design", "review UX", or "check
|
||||
my site against best practices".
|
||||
has_references: false
|
||||
has_scripts: false
|
||||
name: web-design-guidelines
|
||||
path: web-design-guidelines/SKILL.md
|
||||
- argument_hint: '[framework] [feature]'
|
||||
category: frameworks
|
||||
description: Build with Next.js (App Router, RSC, SSR, ISR), Turborepo monorepos.
|
||||
Use for React apps, server rendering, build optimization, caching strategies,
|
||||
shared dependencies.
|
||||
has_references: true
|
||||
has_scripts: true
|
||||
name: web-frameworks
|
||||
path: web-frameworks/SKILL.md
|
||||
- argument_hint: '[test-type] [target]'
|
||||
category: multimedia
|
||||
description: Web testing with Playwright, Vitest, k6. E2E/unit/integration/load/security/visual/a11y
|
||||
testing. Use for test automation, flakiness, Core Web Vitals, mobile gestures,
|
||||
cross-browser.
|
||||
has_references: true
|
||||
has_scripts: true
|
||||
name: web-testing
|
||||
path: web-testing/SKILL.md
|
||||
- argument_hint: '[feature-description] OR [project] [feature]'
|
||||
category: dev-tools
|
||||
description: Create isolated git worktree for parallel development in monorepos.
|
||||
has_references: false
|
||||
has_scripts: true
|
||||
name: worktree
|
||||
path: worktree/SKILL.md
|
||||
342
.opencode/scripts/validate-docs.cjs
Normal file
342
.opencode/scripts/validate-docs.cjs
Normal file
@@ -0,0 +1,342 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* Validate documentation accuracy against codebase.
|
||||
* Detects potential hallucinations: invented APIs, broken links, missing env vars.
|
||||
*
|
||||
* Usage:
|
||||
* node .claude/scripts/validate-docs.cjs [docs-dir] [--src dir1,dir2]
|
||||
*
|
||||
* Checks:
|
||||
* 1. Code references - verify `functionName()` and `ClassName` exist
|
||||
* 2. Internal links - verify markdown links point to existing files
|
||||
* 3. Config keys - verify ENV_VAR exist in .env.example
|
||||
*
|
||||
* Exit: Always 0 (non-blocking, warn-only mode)
|
||||
*/
|
||||
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
const { spawnSync } = require('child_process');
|
||||
|
||||
// Patterns
|
||||
const CODE_REF_PATTERN = /`([A-Za-z_][A-Za-z0-9_]*(?:\(\))?)`/g;
|
||||
const LINK_PATTERN = /\[([^\]]+)\]\(([^)]+)\)/g;
|
||||
const ENV_PATTERN = /`([A-Z][A-Z0-9_]{2,})`|\$([A-Z][A-Z0-9_]{2,})/g;
|
||||
|
||||
// Common code terms to ignore (not actual code refs)
|
||||
const IGNORE_CODE_REFS = new Set([
|
||||
'true', 'false', 'null', 'undefined', 'string', 'number', 'boolean',
|
||||
'object', 'array', 'function', 'async', 'await', 'const', 'let', 'var',
|
||||
'if', 'else', 'for', 'while', 'return', 'import', 'export', 'default',
|
||||
'npm', 'npx', 'node', 'yarn', 'pnpm', 'git', 'bash', 'sh', 'zsh',
|
||||
'GET', 'POST', 'PUT', 'DELETE', 'PATCH', 'HEAD', 'OPTIONS',
|
||||
'JSON', 'XML', 'HTML', 'CSS', 'SQL', 'API', 'URL', 'URI', 'HTTP', 'HTTPS',
|
||||
'OK', 'ERROR', 'WARNING', 'INFO', 'DEBUG', 'TRACE',
|
||||
'README', 'LICENSE', 'CHANGELOG', 'TODO', 'FIXME', 'NOTE', 'HACK',
|
||||
'dev', 'prod', 'test', 'staging', 'production', 'development',
|
||||
'src', 'lib', 'dist', 'build', 'docs', 'tests', 'config',
|
||||
'index', 'main', 'app', 'server', 'client', 'utils', 'helpers'
|
||||
]);
|
||||
|
||||
// Common env var prefixes to ignore (not project-specific)
|
||||
const IGNORE_ENV_PREFIXES = ['NODE_', 'PATH', 'HOME', 'USER', 'SHELL', 'TERM', 'PWD', 'CI'];
|
||||
|
||||
// Markdown template variables (not actual env vars)
|
||||
const IGNORE_ENV_VARS = new Set(['ARGUMENTS']);
|
||||
|
||||
/**
|
||||
* Find all markdown files in directory.
|
||||
*/
|
||||
function findMarkdownFiles(dir) {
|
||||
if (!fs.existsSync(dir)) return [];
|
||||
return fs.readdirSync(dir)
|
||||
.filter(f => f.endsWith('.md'))
|
||||
.map(f => path.join(dir, f));
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract code references from markdown content.
|
||||
*/
|
||||
function extractCodeRefs(content, filepath) {
|
||||
const refs = [];
|
||||
let match;
|
||||
const lines = content.split('\n');
|
||||
|
||||
lines.forEach((line, idx) => {
|
||||
// Skip code blocks
|
||||
if (line.trim().startsWith('```')) return;
|
||||
|
||||
while ((match = CODE_REF_PATTERN.exec(line)) !== null) {
|
||||
const ref = match[1];
|
||||
// Filter out common terms
|
||||
if (IGNORE_CODE_REFS.has(ref.replace('()', '').toLowerCase())) continue;
|
||||
// Only check function calls and PascalCase classes
|
||||
if (ref.endsWith('()') || /^[A-Z][a-z]/.test(ref)) {
|
||||
refs.push({ ref, file: filepath, line: idx + 1 });
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
return refs;
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract internal links from markdown content.
|
||||
*/
|
||||
function extractLinks(content, filepath) {
|
||||
const links = [];
|
||||
let match;
|
||||
const lines = content.split('\n');
|
||||
|
||||
lines.forEach((line, idx) => {
|
||||
while ((match = LINK_PATTERN.exec(line)) !== null) {
|
||||
const href = match[2];
|
||||
// Skip external links and anchors
|
||||
if (href.startsWith('http') || href.startsWith('#') || href.startsWith('mailto:')) continue;
|
||||
links.push({ href, file: filepath, line: idx + 1, text: match[1] });
|
||||
}
|
||||
});
|
||||
|
||||
return links;
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract env var references from markdown content.
|
||||
*/
|
||||
function extractEnvVars(content, filepath) {
|
||||
const vars = [];
|
||||
let match;
|
||||
const lines = content.split('\n');
|
||||
|
||||
lines.forEach((line, idx) => {
|
||||
// Skip code blocks
|
||||
if (line.trim().startsWith('```')) return;
|
||||
|
||||
while ((match = ENV_PATTERN.exec(line)) !== null) {
|
||||
const envVar = match[1] || match[2];
|
||||
// Filter common system vars and template variables
|
||||
if (IGNORE_ENV_PREFIXES.some(p => envVar.startsWith(p))) continue;
|
||||
if (IGNORE_ENV_VARS.has(envVar)) continue;
|
||||
vars.push({ envVar, file: filepath, line: idx + 1 });
|
||||
}
|
||||
});
|
||||
|
||||
return vars;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if code reference exists in source directories.
|
||||
*/
|
||||
function checkCodeRefExists(ref, srcDirs) {
|
||||
const name = ref.replace('()', '');
|
||||
const patterns = [
|
||||
`function ${name}`,
|
||||
`const ${name}`,
|
||||
`class ${name}`,
|
||||
`def ${name}`,
|
||||
`export.*${name}`,
|
||||
`${name}:` // object methods
|
||||
];
|
||||
|
||||
for (const srcDir of srcDirs) {
|
||||
if (!fs.existsSync(srcDir)) continue;
|
||||
for (const pattern of patterns) {
|
||||
// Use spawnSync with args array to prevent command injection
|
||||
const result = spawnSync('grep', ['-rl', pattern, srcDir], {
|
||||
encoding: 'utf8',
|
||||
stdio: ['pipe', 'pipe', 'pipe'],
|
||||
timeout: 5000
|
||||
});
|
||||
if (result.status === 0 && result.stdout.trim()) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if internal link target exists.
|
||||
*/
|
||||
function checkLinkExists(href, sourceFile) {
|
||||
const sourceDir = path.dirname(sourceFile);
|
||||
const targetPath = path.resolve(sourceDir, href.split('#')[0]);
|
||||
return fs.existsSync(targetPath);
|
||||
}
|
||||
|
||||
/**
|
||||
* Load env vars from .env.example.
|
||||
*/
|
||||
function loadEnvExample(projectRoot) {
|
||||
const envPath = path.join(projectRoot, '.env.example');
|
||||
if (!fs.existsSync(envPath)) return new Set();
|
||||
|
||||
const content = fs.readFileSync(envPath, 'utf8');
|
||||
const vars = new Set();
|
||||
|
||||
content.split('\n').forEach(line => {
|
||||
const match = line.match(/^([A-Z][A-Z0-9_]+)=/);
|
||||
if (match) vars.add(match[1]);
|
||||
});
|
||||
|
||||
return vars;
|
||||
}
|
||||
|
||||
/**
|
||||
* Run all validations and generate report.
|
||||
*/
|
||||
function validate(docsDir, srcDirs, projectRoot) {
|
||||
const issues = {
|
||||
codeRefs: [],
|
||||
links: [],
|
||||
envVars: []
|
||||
};
|
||||
const stats = {
|
||||
filesChecked: 0,
|
||||
codeRefsChecked: 0,
|
||||
linksChecked: 0,
|
||||
envVarsChecked: 0,
|
||||
codeRefsValid: 0,
|
||||
linksValid: 0,
|
||||
envVarsValid: 0
|
||||
};
|
||||
|
||||
const mdFiles = findMarkdownFiles(docsDir);
|
||||
stats.filesChecked = mdFiles.length;
|
||||
|
||||
if (mdFiles.length === 0) {
|
||||
console.log(`No markdown files found in ${docsDir}`);
|
||||
return;
|
||||
}
|
||||
|
||||
const envExample = loadEnvExample(projectRoot);
|
||||
|
||||
for (const filepath of mdFiles) {
|
||||
let content;
|
||||
try {
|
||||
content = fs.readFileSync(filepath, 'utf8');
|
||||
} catch (err) {
|
||||
// File deleted during validation - skip
|
||||
continue;
|
||||
}
|
||||
const relPath = path.relative(projectRoot, filepath);
|
||||
|
||||
// Check code references
|
||||
const codeRefs = extractCodeRefs(content, relPath);
|
||||
stats.codeRefsChecked += codeRefs.length;
|
||||
for (const { ref, file, line } of codeRefs) {
|
||||
if (checkCodeRefExists(ref, srcDirs)) {
|
||||
stats.codeRefsValid++;
|
||||
} else {
|
||||
issues.codeRefs.push({ ref, file, line });
|
||||
}
|
||||
}
|
||||
|
||||
// Check internal links
|
||||
const links = extractLinks(content, filepath);
|
||||
stats.linksChecked += links.length;
|
||||
for (const { href, file, line, text } of links) {
|
||||
if (checkLinkExists(href, file)) {
|
||||
stats.linksValid++;
|
||||
} else {
|
||||
issues.links.push({ href, file: relPath, line, text });
|
||||
}
|
||||
}
|
||||
|
||||
// Check env vars
|
||||
const envVars = extractEnvVars(content, relPath);
|
||||
stats.envVarsChecked += envVars.length;
|
||||
for (const { envVar, file, line } of envVars) {
|
||||
if (envExample.has(envVar)) {
|
||||
stats.envVarsValid++;
|
||||
} else {
|
||||
issues.envVars.push({ envVar, file, line });
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Generate report
|
||||
console.log('\n## Docs Validation Report\n');
|
||||
console.log(`**Files Checked:** ${stats.filesChecked}`);
|
||||
console.log(`**Scan Date:** ${new Date().toISOString().split('T')[0]}\n`);
|
||||
|
||||
const hasIssues = issues.codeRefs.length || issues.links.length || issues.envVars.length;
|
||||
|
||||
if (hasIssues) {
|
||||
console.log('### Potential Issues\n');
|
||||
|
||||
if (issues.codeRefs.length) {
|
||||
console.log(`⚠️ **Code References** (${issues.codeRefs.length} issues)`);
|
||||
for (const { ref, file, line } of issues.codeRefs.slice(0, 10)) {
|
||||
console.log(`- \`${ref}\` in ${file}:${line} - not found in codebase`);
|
||||
}
|
||||
if (issues.codeRefs.length > 10) {
|
||||
console.log(`- ... and ${issues.codeRefs.length - 10} more`);
|
||||
}
|
||||
console.log('');
|
||||
}
|
||||
|
||||
if (issues.links.length) {
|
||||
console.log(`⚠️ **Internal Links** (${issues.links.length} issues)`);
|
||||
for (const { href, file, line } of issues.links.slice(0, 10)) {
|
||||
console.log(`- \`${href}\` in ${file}:${line} - file not found`);
|
||||
}
|
||||
if (issues.links.length > 10) {
|
||||
console.log(`- ... and ${issues.links.length - 10} more`);
|
||||
}
|
||||
console.log('');
|
||||
}
|
||||
|
||||
if (issues.envVars.length) {
|
||||
console.log(`⚠️ **Config Keys** (${issues.envVars.length} issues)`);
|
||||
for (const { envVar, file, line } of issues.envVars.slice(0, 10)) {
|
||||
console.log(`- \`${envVar}\` in ${file}:${line} - not in .env.example`);
|
||||
}
|
||||
if (issues.envVars.length > 10) {
|
||||
console.log(`- ... and ${issues.envVars.length - 10} more`);
|
||||
}
|
||||
console.log('');
|
||||
}
|
||||
}
|
||||
|
||||
console.log('### Verified OK\n');
|
||||
if (stats.codeRefsValid > 0) console.log(`✅ ${stats.codeRefsValid} code references validated`);
|
||||
if (stats.linksValid > 0) console.log(`✅ ${stats.linksValid} internal links working`);
|
||||
if (stats.envVarsValid > 0) console.log(`✅ ${stats.envVarsValid} config keys confirmed`);
|
||||
if (stats.codeRefsValid === 0 && stats.linksValid === 0 && stats.envVarsValid === 0) {
|
||||
console.log('ℹ️ No validatable references found');
|
||||
}
|
||||
console.log('');
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse CLI arguments.
|
||||
*/
|
||||
function parseArgs(args) {
|
||||
const result = {
|
||||
docsDir: 'docs',
|
||||
srcDirs: ['src', 'lib', 'app', 'scripts', '.claude']
|
||||
};
|
||||
|
||||
for (let i = 0; i < args.length; i++) {
|
||||
const arg = args[i];
|
||||
if (arg === '--src' && args[i + 1]) {
|
||||
result.srcDirs = args[++i].split(',');
|
||||
} else if (!arg.startsWith('-')) {
|
||||
result.docsDir = arg;
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
// Main
|
||||
const args = parseArgs(process.argv.slice(2));
|
||||
const projectRoot = process.cwd();
|
||||
const docsDir = path.resolve(projectRoot, args.docsDir);
|
||||
const srcDirs = args.srcDirs.map(d => path.resolve(projectRoot, d));
|
||||
|
||||
validate(docsDir, srcDirs, projectRoot);
|
||||
|
||||
// Always exit 0 (non-blocking)
|
||||
process.exit(0);
|
||||
57
.opencode/scripts/win_compat.py
Executable file
57
.opencode/scripts/win_compat.py
Executable file
@@ -0,0 +1,57 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Windows compatibility utilities for ClaudeKit scripts.
|
||||
|
||||
Provides UTF-8 encoding support for Windows console (cp1252).
|
||||
Import this module early in scripts that output Unicode content.
|
||||
|
||||
Usage:
|
||||
# At top of script, after imports:
|
||||
from win_compat import safe_print, ensure_utf8_stdout
|
||||
|
||||
# Option 1: Wrap stdout globally (recommended for scripts with many prints)
|
||||
ensure_utf8_stdout()
|
||||
print("Unicode content: emojis, symbols, etc.")
|
||||
|
||||
# Option 2: Use safe_print for individual calls
|
||||
safe_print("Unicode content: emojis, symbols, etc.")
|
||||
"""
|
||||
|
||||
import sys
|
||||
|
||||
_stdout_wrapped = False
|
||||
|
||||
|
||||
def ensure_utf8_stdout():
|
||||
"""Wrap sys.stdout to use UTF-8 encoding on Windows.
|
||||
|
||||
Safe to call multiple times - only wraps once.
|
||||
Call this early in script execution, before any print() calls.
|
||||
"""
|
||||
global _stdout_wrapped
|
||||
if _stdout_wrapped:
|
||||
return
|
||||
|
||||
if sys.platform == 'win32':
|
||||
import io
|
||||
# Only wrap if stdout has a buffer (not already wrapped)
|
||||
if hasattr(sys.stdout, 'buffer'):
|
||||
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
|
||||
|
||||
_stdout_wrapped = True
|
||||
|
||||
|
||||
def safe_print(text):
|
||||
"""Print with Unicode fallback for Windows cp1252 console.
|
||||
|
||||
Use this for individual print calls when you can't wrap stdout globally.
|
||||
Falls back to replacing unencodable characters with '?'.
|
||||
|
||||
Args:
|
||||
text: String to print (can contain any Unicode characters)
|
||||
"""
|
||||
try:
|
||||
print(text)
|
||||
except UnicodeEncodeError:
|
||||
# Fallback: replace unencodable chars
|
||||
encoding = getattr(sys.stdout, 'encoding', 'utf-8') or 'utf-8'
|
||||
print(text.encode(encoding, errors='replace').decode(encoding))
|
||||
9
.opencode/scripts/worktree.cjs
Normal file
9
.opencode/scripts/worktree.cjs
Normal file
@@ -0,0 +1,9 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* Backward-compatible wrapper for worktree CLI.
|
||||
*
|
||||
* Canonical implementation:
|
||||
* .claude/skills/worktree/scripts/worktree.cjs
|
||||
*/
|
||||
|
||||
require('../skills/worktree/scripts/worktree.cjs');
|
||||
9
.opencode/scripts/worktree.test.cjs
Normal file
9
.opencode/scripts/worktree.test.cjs
Normal file
@@ -0,0 +1,9 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* Backward-compatible wrapper for worktree tests.
|
||||
*
|
||||
* Canonical test suite:
|
||||
* .claude/skills/worktree/scripts/worktree.test.cjs
|
||||
*/
|
||||
|
||||
require('../skills/worktree/scripts/worktree.test.cjs');
|
||||
Reference in New Issue
Block a user