init
This commit is contained in:
211
.opencode/skills/docs-seeker/scripts/analyze-llms-txt.js
Executable file
211
.opencode/skills/docs-seeker/scripts/analyze-llms-txt.js
Executable file
@@ -0,0 +1,211 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
/**
|
||||
* llms.txt Analyzer Script
|
||||
* Parses llms.txt content and categorizes URLs for optimal agent distribution
|
||||
*/
|
||||
|
||||
const { loadEnv } = require('./utils/env-loader');
|
||||
|
||||
// Load environment
|
||||
const env = loadEnv();
|
||||
const DEBUG = env.DEBUG === 'true';
|
||||
|
||||
/**
|
||||
* URL priority categories
|
||||
*/
|
||||
const PRIORITY_KEYWORDS = {
|
||||
critical: [
|
||||
'getting-started', 'quick-start', 'quickstart', 'introduction', 'intro', 'overview',
|
||||
'installation', 'install', 'setup', 'basics', 'core-concepts', 'fundamentals',
|
||||
],
|
||||
supplementary: [
|
||||
'advanced', 'internals', 'migration', 'migrate', 'troubleshooting', 'troubleshoot',
|
||||
'faq', 'frequently-asked', 'changelog', 'contributing', 'contribute',
|
||||
],
|
||||
important: [
|
||||
'guide', 'tutorial', 'example', 'api-reference', 'api', 'reference',
|
||||
'configuration', 'config', 'routing', 'route', 'data-fetching', 'authentication', 'auth',
|
||||
],
|
||||
};
|
||||
|
||||
/**
|
||||
* Categorize URL by priority
|
||||
* @param {string} url - Documentation URL
|
||||
* @returns {string} Priority level (critical/important/supplementary)
|
||||
*/
|
||||
function categorizeUrl(url) {
|
||||
const urlLower = url.toLowerCase();
|
||||
|
||||
// Check in priority order: critical first, then supplementary, then important
|
||||
// This ensures specific keywords (advanced, internals) are caught before generic ones
|
||||
const priorities = ['critical', 'supplementary', 'important'];
|
||||
|
||||
for (const priority of priorities) {
|
||||
const keywords = PRIORITY_KEYWORDS[priority];
|
||||
for (const keyword of keywords) {
|
||||
if (urlLower.includes(keyword)) {
|
||||
return priority;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return 'important'; // Default
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse llms.txt content to extract URLs
|
||||
* @param {string} content - llms.txt content
|
||||
* @returns {Array<string>} Array of URLs
|
||||
*/
|
||||
function parseUrls(content) {
|
||||
if (!content || typeof content !== 'string') {
|
||||
return [];
|
||||
}
|
||||
|
||||
const urls = [];
|
||||
const lines = content.split('\n');
|
||||
|
||||
for (const line of lines) {
|
||||
const trimmed = line.trim();
|
||||
|
||||
// Skip comments and empty lines
|
||||
if (!trimmed || trimmed.startsWith('#')) continue;
|
||||
|
||||
// Extract URLs (look for http/https)
|
||||
const urlMatch = trimmed.match(/https?:\/\/[^\s<>"]+/i);
|
||||
if (urlMatch) {
|
||||
urls.push(urlMatch[0]);
|
||||
}
|
||||
}
|
||||
|
||||
return urls;
|
||||
}
|
||||
|
||||
/**
|
||||
* Group URLs by priority
|
||||
* @param {Array<string>} urls - Array of URLs
|
||||
* @returns {Object} URLs grouped by priority
|
||||
*/
|
||||
function groupByPriority(urls) {
|
||||
const groups = {
|
||||
critical: [],
|
||||
important: [],
|
||||
supplementary: [],
|
||||
};
|
||||
|
||||
for (const url of urls) {
|
||||
const priority = categorizeUrl(url);
|
||||
groups[priority].push(url);
|
||||
}
|
||||
|
||||
return groups;
|
||||
}
|
||||
|
||||
/**
|
||||
* Suggest optimal agent distribution
|
||||
* @param {number} urlCount - Total number of URLs
|
||||
* @returns {Object} Agent distribution suggestion
|
||||
*/
|
||||
function suggestAgentDistribution(urlCount) {
|
||||
if (urlCount <= 3) {
|
||||
return {
|
||||
agentCount: 1,
|
||||
strategy: 'single',
|
||||
urlsPerAgent: urlCount,
|
||||
description: 'Single agent can handle all URLs',
|
||||
};
|
||||
} else if (urlCount <= 10) {
|
||||
const agents = Math.min(Math.ceil(urlCount / 2), 5);
|
||||
return {
|
||||
agentCount: agents,
|
||||
strategy: 'parallel',
|
||||
urlsPerAgent: Math.ceil(urlCount / agents),
|
||||
description: `Deploy ${agents} agents in parallel`,
|
||||
};
|
||||
} else if (urlCount <= 20) {
|
||||
return {
|
||||
agentCount: 7,
|
||||
strategy: 'parallel',
|
||||
urlsPerAgent: Math.ceil(urlCount / 7),
|
||||
description: 'Deploy 7 agents with balanced workload',
|
||||
};
|
||||
} else {
|
||||
return {
|
||||
agentCount: 7,
|
||||
strategy: 'phased',
|
||||
urlsPerAgent: Math.ceil(urlCount / 7),
|
||||
phases: 2,
|
||||
description: 'Use two-phase approach: critical first, then important',
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Analyze llms.txt content
|
||||
* @param {string} content - llms.txt content
|
||||
* @returns {Object} Analysis result
|
||||
*/
|
||||
function analyzeLlmsTxt(content) {
|
||||
const urls = parseUrls(content);
|
||||
const grouped = groupByPriority(urls);
|
||||
const distribution = suggestAgentDistribution(urls.length);
|
||||
|
||||
return {
|
||||
totalUrls: urls.length,
|
||||
urls,
|
||||
grouped,
|
||||
distribution,
|
||||
summary: {
|
||||
critical: grouped.critical.length,
|
||||
important: grouped.important.length,
|
||||
supplementary: grouped.supplementary.length,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* CLI entry point
|
||||
*/
|
||||
function main() {
|
||||
const args = process.argv.slice(2);
|
||||
|
||||
if (args.length === 0) {
|
||||
console.error('Usage: node analyze-llms-txt.js <content-file-or-stdin>');
|
||||
console.error('Or pipe content: cat llms.txt | node analyze-llms-txt.js');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
const fs = require('fs');
|
||||
let content;
|
||||
|
||||
if (args[0] === '-') {
|
||||
// Read from stdin
|
||||
content = fs.readFileSync(0, 'utf8');
|
||||
} else {
|
||||
// Read from file
|
||||
const filePath = args[0];
|
||||
if (!fs.existsSync(filePath)) {
|
||||
console.error(`Error: File not found: ${filePath}`);
|
||||
process.exit(1);
|
||||
}
|
||||
content = fs.readFileSync(filePath, 'utf8');
|
||||
}
|
||||
|
||||
const result = analyzeLlmsTxt(content);
|
||||
console.log(JSON.stringify(result, null, 2));
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
// Run if called directly
|
||||
if (require.main === module) {
|
||||
main();
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
analyzeLlmsTxt,
|
||||
parseUrls,
|
||||
groupByPriority,
|
||||
categorizeUrl,
|
||||
suggestAgentDistribution,
|
||||
};
|
||||
172
.opencode/skills/docs-seeker/scripts/detect-topic.js
Executable file
172
.opencode/skills/docs-seeker/scripts/detect-topic.js
Executable file
@@ -0,0 +1,172 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
/**
|
||||
* Topic Detection Script
|
||||
* Analyzes user queries to extract library name and topic keywords
|
||||
* Returns null for general queries, topic info for specific queries
|
||||
*/
|
||||
|
||||
const { loadEnv } = require('./utils/env-loader');
|
||||
|
||||
// Load environment
|
||||
const env = loadEnv();
|
||||
const DEBUG = env.DEBUG === 'true';
|
||||
|
||||
/**
|
||||
* Topic-specific query patterns
|
||||
*/
|
||||
const TOPIC_PATTERNS = [
|
||||
// "How do I use X in Y?"
|
||||
/how (?:do i|to|can i) (?:use|implement|add|setup|configure) (?:the )?(.+?) (?:in|with|for) (.+)/i,
|
||||
|
||||
// "Y X strategies/patterns" - e.g., "Next.js caching strategies"
|
||||
/(.+?) (.+?) (?:strategies|patterns|techniques|methods|approaches)/i,
|
||||
|
||||
// "X Y documentation" or "Y X docs"
|
||||
/(.+?) (.+?) (?:documentation|docs|guide|tutorial)/i,
|
||||
|
||||
// "Using X with Y"
|
||||
/using (.+?) (?:with|in|for) (.+)/i,
|
||||
|
||||
// "Y X guide/implementation/setup"
|
||||
/(.+?) (.+?) (?:guide|implementation|setup|configuration)/i,
|
||||
|
||||
// "Implement X in Y"
|
||||
/implement(?:ing)? (.+?) (?:in|with|for|using) (.+)/i,
|
||||
];
|
||||
|
||||
/**
|
||||
* General library query patterns (non-topic specific)
|
||||
*/
|
||||
const GENERAL_PATTERNS = [
|
||||
/(?:documentation|docs) for (.+)/i,
|
||||
/(.+?) (?:getting started|quick ?start|introduction)/i,
|
||||
/(?:how to use|learn) (.+)/i,
|
||||
/(.+?) (?:api reference|overview|basics)/i,
|
||||
];
|
||||
|
||||
/**
|
||||
* Normalize topic keyword
|
||||
* @param {string} topic - Raw topic string
|
||||
* @returns {string} Normalized topic keyword
|
||||
*/
|
||||
function normalizeTopic(topic) {
|
||||
return topic
|
||||
.toLowerCase()
|
||||
.trim()
|
||||
.replace(/[^a-z0-9\s-]/g, '') // Remove special chars
|
||||
.replace(/\s+/g, '-') // Replace spaces with hyphens
|
||||
.split('-')[0] // Take first word for multi-word topics
|
||||
.slice(0, 20); // Limit length
|
||||
}
|
||||
|
||||
/**
|
||||
* Normalize library name
|
||||
* @param {string} library - Raw library string
|
||||
* @returns {string} Normalized library name
|
||||
*/
|
||||
function normalizeLibrary(library) {
|
||||
return library
|
||||
.toLowerCase()
|
||||
.trim()
|
||||
.replace(/[^a-z0-9\s\-\/\.]/g, '')
|
||||
.replace(/\s+/g, '-');
|
||||
}
|
||||
|
||||
/**
|
||||
* Detect if query is topic-specific or general
|
||||
* @param {string} query - User query
|
||||
* @returns {Object|null} Topic info or null for general query
|
||||
*/
|
||||
function detectTopic(query) {
|
||||
if (!query || typeof query !== 'string') {
|
||||
return null;
|
||||
}
|
||||
|
||||
const trimmedQuery = query.trim();
|
||||
|
||||
// Check general patterns first
|
||||
for (const pattern of GENERAL_PATTERNS) {
|
||||
const match = trimmedQuery.match(pattern);
|
||||
if (match) {
|
||||
if (DEBUG) console.error('[DEBUG] Matched general pattern, no topic');
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
// Check topic-specific patterns
|
||||
for (let i = 0; i < TOPIC_PATTERNS.length; i++) {
|
||||
const pattern = TOPIC_PATTERNS[i];
|
||||
const match = trimmedQuery.match(pattern);
|
||||
if (match) {
|
||||
const [, term1, term2] = match;
|
||||
|
||||
// Determine which is library and which is topic based on pattern
|
||||
let topic, library;
|
||||
|
||||
// Pattern 0: "How do I use X in Y?" -> X is topic, Y is library
|
||||
// Pattern 1: "Y X strategies" -> X is topic, Y is library
|
||||
// Pattern 2-5: X is topic, Y is library in most cases
|
||||
|
||||
// For pattern 1 (strategies/patterns), term1 is library, term2 is topic
|
||||
if (i === 1) {
|
||||
topic = normalizeTopic(term2);
|
||||
library = normalizeLibrary(term1);
|
||||
} else {
|
||||
// For other patterns, term1 is topic, term2 is library
|
||||
topic = normalizeTopic(term1);
|
||||
library = normalizeLibrary(term2);
|
||||
}
|
||||
|
||||
if (DEBUG) {
|
||||
console.error('[DEBUG] Matched topic pattern');
|
||||
console.error('[DEBUG] Topic:', topic);
|
||||
console.error('[DEBUG] Library:', library);
|
||||
}
|
||||
|
||||
return {
|
||||
query: trimmedQuery,
|
||||
topic,
|
||||
library,
|
||||
isTopicSpecific: true,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
if (DEBUG) console.error('[DEBUG] No pattern matched, treating as general');
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* CLI entry point
|
||||
*/
|
||||
function main() {
|
||||
const args = process.argv.slice(2);
|
||||
|
||||
if (args.length === 0) {
|
||||
console.error('Usage: node detect-topic.js "<user query>"');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
const query = args.join(' ');
|
||||
const result = detectTopic(query);
|
||||
|
||||
if (result) {
|
||||
console.log(JSON.stringify(result, null, 2));
|
||||
process.exit(0);
|
||||
} else {
|
||||
console.log(JSON.stringify({ isTopicSpecific: false }, null, 2));
|
||||
process.exit(0);
|
||||
}
|
||||
}
|
||||
|
||||
// Run if called directly
|
||||
if (require.main === module) {
|
||||
main();
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
detectTopic,
|
||||
normalizeTopic,
|
||||
normalizeLibrary,
|
||||
};
|
||||
213
.opencode/skills/docs-seeker/scripts/fetch-docs.js
Executable file
213
.opencode/skills/docs-seeker/scripts/fetch-docs.js
Executable file
@@ -0,0 +1,213 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
/**
|
||||
* Documentation Fetcher Script
|
||||
* Fetches documentation from context7.com with topic support and fallback chain
|
||||
*/
|
||||
|
||||
const https = require('https');
|
||||
const { loadEnv } = require('./utils/env-loader');
|
||||
const { detectTopic } = require('./detect-topic');
|
||||
|
||||
// Load environment
|
||||
const env = loadEnv();
|
||||
const DEBUG = env.DEBUG === 'true';
|
||||
const API_KEY = env.CONTEXT7_API_KEY;
|
||||
|
||||
/**
|
||||
* Make HTTPS GET request
|
||||
* @param {string} url - URL to fetch
|
||||
* @returns {Promise<string>} Response body
|
||||
*/
|
||||
function httpsGet(url) {
|
||||
return new Promise((resolve, reject) => {
|
||||
const options = {
|
||||
headers: API_KEY ? { 'Authorization': `Bearer ${API_KEY}` } : {},
|
||||
};
|
||||
|
||||
https.get(url, options, (res) => {
|
||||
let data = '';
|
||||
|
||||
res.on('data', (chunk) => {
|
||||
data += chunk;
|
||||
});
|
||||
|
||||
res.on('end', () => {
|
||||
if (res.statusCode === 200) {
|
||||
resolve(data);
|
||||
} else if (res.statusCode === 404) {
|
||||
resolve(null);
|
||||
} else {
|
||||
reject(new Error(`HTTP ${res.statusCode}: ${data}`));
|
||||
}
|
||||
});
|
||||
}).on('error', reject);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Construct context7.com URL
|
||||
* @param {string} library - Library name (e.g., "next.js", "shadcn/ui")
|
||||
* @param {string} topic - Optional topic keyword
|
||||
* @returns {string} context7.com URL
|
||||
*/
|
||||
function buildContext7Url(library, topic = null) {
|
||||
// Determine if GitHub repo or website
|
||||
let basePath;
|
||||
|
||||
if (library.includes('/')) {
|
||||
// GitHub repo format: org/repo
|
||||
const [org, repo] = library.split('/');
|
||||
basePath = `${org}/${repo}`;
|
||||
} else {
|
||||
// Try common patterns
|
||||
const normalized = library.toLowerCase().replace(/[^a-z0-9-]/g, '');
|
||||
basePath = `websites/${normalized}`;
|
||||
}
|
||||
|
||||
const baseUrl = `https://context7.com/${basePath}/llms.txt`;
|
||||
|
||||
if (topic) {
|
||||
return `${baseUrl}?topic=${encodeURIComponent(topic)}`;
|
||||
}
|
||||
|
||||
return baseUrl;
|
||||
}
|
||||
|
||||
/**
|
||||
* Try multiple URL variations for a library
|
||||
* @param {string} library - Library name
|
||||
* @param {string} topic - Optional topic
|
||||
* @returns {Promise<Array>} Array of URLs to try
|
||||
*/
|
||||
async function getUrlVariations(library, topic = null) {
|
||||
const urls = [];
|
||||
|
||||
// Known repo mappings
|
||||
const knownRepos = {
|
||||
'next.js': 'vercel/next.js',
|
||||
'nextjs': 'vercel/next.js',
|
||||
'remix': 'remix-run/remix',
|
||||
'astro': 'withastro/astro',
|
||||
'shadcn': 'shadcn-ui/ui',
|
||||
'shadcn/ui': 'shadcn-ui/ui',
|
||||
'better-auth': 'better-auth/better-auth',
|
||||
};
|
||||
|
||||
const normalized = library.toLowerCase();
|
||||
const repo = knownRepos[normalized] || library;
|
||||
|
||||
// Primary: Try with topic if available
|
||||
if (topic) {
|
||||
urls.push(buildContext7Url(repo, topic));
|
||||
}
|
||||
|
||||
// Fallback: Try without topic
|
||||
urls.push(buildContext7Url(repo));
|
||||
|
||||
return urls;
|
||||
}
|
||||
|
||||
/**
|
||||
* Fetch documentation from context7.com
|
||||
* @param {string} query - User query
|
||||
* @returns {Promise<Object>} Documentation result
|
||||
*/
|
||||
async function fetchDocs(query) {
|
||||
const topicInfo = detectTopic(query);
|
||||
|
||||
if (DEBUG) {
|
||||
console.error('[DEBUG] Topic detection result:', topicInfo);
|
||||
}
|
||||
|
||||
let urls = [];
|
||||
|
||||
if (topicInfo && topicInfo.isTopicSpecific) {
|
||||
// Topic-specific search
|
||||
urls = await getUrlVariations(topicInfo.library, topicInfo.topic);
|
||||
|
||||
if (DEBUG) {
|
||||
console.error('[DEBUG] Topic-specific URLs:', urls);
|
||||
}
|
||||
} else {
|
||||
// Extract library from general query
|
||||
const libraryMatch = query.match(/(?:documentation|docs|guide) (?:for )?(.+)/i);
|
||||
if (libraryMatch) {
|
||||
const library = libraryMatch[1].trim();
|
||||
urls = await getUrlVariations(library);
|
||||
|
||||
if (DEBUG) {
|
||||
console.error('[DEBUG] General library URLs:', urls);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Try each URL
|
||||
for (const url of urls) {
|
||||
if (DEBUG) {
|
||||
console.error(`[DEBUG] Trying URL: ${url}`);
|
||||
}
|
||||
|
||||
try {
|
||||
const content = await httpsGet(url);
|
||||
|
||||
if (content) {
|
||||
return {
|
||||
success: true,
|
||||
source: 'context7.com',
|
||||
url,
|
||||
content,
|
||||
topicSpecific: url.includes('?topic='),
|
||||
};
|
||||
}
|
||||
} catch (error) {
|
||||
if (DEBUG) {
|
||||
console.error(`[DEBUG] Failed to fetch ${url}:`, error.message);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// No URL worked
|
||||
return {
|
||||
success: false,
|
||||
source: 'context7.com',
|
||||
error: 'Documentation not found on context7.com',
|
||||
urls,
|
||||
suggestion: 'Try repository analysis or web search',
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* CLI entry point
|
||||
*/
|
||||
async function main() {
|
||||
const args = process.argv.slice(2);
|
||||
|
||||
if (args.length === 0) {
|
||||
console.error('Usage: node fetch-docs.js "<user query>"');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
const query = args.join(' ');
|
||||
|
||||
try {
|
||||
const result = await fetchDocs(query);
|
||||
console.log(JSON.stringify(result, null, 2));
|
||||
process.exit(result.success ? 0 : 1);
|
||||
} catch (error) {
|
||||
console.error('Error:', error.message);
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
// Run if called directly
|
||||
if (require.main === module) {
|
||||
main();
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
fetchDocs,
|
||||
buildContext7Url,
|
||||
getUrlVariations,
|
||||
httpsGet,
|
||||
};
|
||||
72
.opencode/skills/docs-seeker/scripts/tests/run-tests.js
Executable file
72
.opencode/skills/docs-seeker/scripts/tests/run-tests.js
Executable file
@@ -0,0 +1,72 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
/**
|
||||
* Test runner - runs all tests
|
||||
*/
|
||||
|
||||
const { spawn } = require('child_process');
|
||||
const path = require('path');
|
||||
|
||||
const tests = [
|
||||
'test-detect-topic.js',
|
||||
'test-fetch-docs.js',
|
||||
'test-analyze-llms.js',
|
||||
];
|
||||
|
||||
let totalPassed = 0;
|
||||
let totalFailed = 0;
|
||||
|
||||
function runTest(testFile) {
|
||||
return new Promise((resolve, reject) => {
|
||||
console.log(`\n${'='.repeat(60)}`);
|
||||
console.log(`Running: ${testFile}`);
|
||||
console.log('='.repeat(60));
|
||||
|
||||
const testPath = path.join(__dirname, testFile);
|
||||
const proc = spawn('node', [testPath], {
|
||||
stdio: 'inherit',
|
||||
});
|
||||
|
||||
proc.on('close', (code) => {
|
||||
if (code === 0) {
|
||||
resolve();
|
||||
} else {
|
||||
reject(new Error(`Test failed: ${testFile}`));
|
||||
}
|
||||
});
|
||||
|
||||
proc.on('error', reject);
|
||||
});
|
||||
}
|
||||
|
||||
async function runAllTests() {
|
||||
console.log('Running all docs-seeker tests...');
|
||||
|
||||
let failedTests = [];
|
||||
|
||||
for (const test of tests) {
|
||||
try {
|
||||
await runTest(test);
|
||||
} catch (error) {
|
||||
failedTests.push(test);
|
||||
}
|
||||
}
|
||||
|
||||
console.log(`\n${'='.repeat(60)}`);
|
||||
console.log('All Tests Summary');
|
||||
console.log('='.repeat(60));
|
||||
console.log(`Total test files: ${tests.length}`);
|
||||
console.log(`Passed: ${tests.length - failedTests.length}`);
|
||||
console.log(`Failed: ${failedTests.length}`);
|
||||
|
||||
if (failedTests.length > 0) {
|
||||
console.log('\nFailed tests:');
|
||||
failedTests.forEach((test) => console.log(` - ${test}`));
|
||||
process.exit(1);
|
||||
} else {
|
||||
console.log('\n✓ All tests passed!');
|
||||
process.exit(0);
|
||||
}
|
||||
}
|
||||
|
||||
runAllTests();
|
||||
119
.opencode/skills/docs-seeker/scripts/tests/test-analyze-llms.js
Executable file
119
.opencode/skills/docs-seeker/scripts/tests/test-analyze-llms.js
Executable file
@@ -0,0 +1,119 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
/**
|
||||
* Tests for analyze-llms-txt.js
|
||||
*/
|
||||
|
||||
const {
|
||||
analyzeLlmsTxt,
|
||||
parseUrls,
|
||||
groupByPriority,
|
||||
categorizeUrl,
|
||||
suggestAgentDistribution,
|
||||
} = require('../analyze-llms-txt');
|
||||
|
||||
// Test counter
|
||||
let passed = 0;
|
||||
let failed = 0;
|
||||
|
||||
function assert(condition, message) {
|
||||
if (condition) {
|
||||
console.log(`✓ ${message}`);
|
||||
passed++;
|
||||
} else {
|
||||
console.error(`✗ ${message}`);
|
||||
failed++;
|
||||
}
|
||||
}
|
||||
|
||||
function assertEqual(actual, expected, message) {
|
||||
if (actual === expected) {
|
||||
console.log(`✓ ${message}`);
|
||||
passed++;
|
||||
} else {
|
||||
console.error(`✗ ${message}`);
|
||||
console.error(` Expected: ${expected}`);
|
||||
console.error(` Actual: ${actual}`);
|
||||
failed++;
|
||||
}
|
||||
}
|
||||
|
||||
console.log('Running analyze-llms-txt.js tests...\n');
|
||||
|
||||
// Test categorizeUrl
|
||||
console.log('## Testing categorizeUrl()');
|
||||
assertEqual(categorizeUrl('https://docs.example.com/getting-started'), 'critical', 'Categorize getting-started as critical');
|
||||
assertEqual(categorizeUrl('https://docs.example.com/guide/routing'), 'important', 'Categorize routing guide as important');
|
||||
assertEqual(categorizeUrl('https://docs.example.com/advanced/internals'), 'supplementary', 'Categorize internals as supplementary');
|
||||
assertEqual(categorizeUrl('https://docs.example.com/api-reference'), 'important', 'Categorize API reference as important');
|
||||
|
||||
// Test parseUrls
|
||||
console.log('\n## Testing parseUrls()');
|
||||
|
||||
const sampleContent = `# Documentation
|
||||
https://docs.example.com/getting-started
|
||||
https://docs.example.com/guide
|
||||
# Comment line
|
||||
https://docs.example.com/api-reference
|
||||
|
||||
https://docs.example.com/advanced
|
||||
`;
|
||||
|
||||
const urls = parseUrls(sampleContent);
|
||||
assertEqual(urls.length, 4, 'Parse 4 URLs from content');
|
||||
assert(urls[0].includes('getting-started'), 'First URL is getting-started');
|
||||
|
||||
const emptyContent = '';
|
||||
const emptyUrls = parseUrls(emptyContent);
|
||||
assertEqual(emptyUrls.length, 0, 'Empty content returns 0 URLs');
|
||||
|
||||
// Test groupByPriority
|
||||
console.log('\n## Testing groupByPriority()');
|
||||
|
||||
const testUrls = [
|
||||
'https://docs.example.com/getting-started',
|
||||
'https://docs.example.com/guide/routing',
|
||||
'https://docs.example.com/advanced/internals',
|
||||
'https://docs.example.com/installation',
|
||||
];
|
||||
|
||||
const grouped = groupByPriority(testUrls);
|
||||
assert(grouped.critical.length >= 2, 'Has critical URLs');
|
||||
assert(grouped.important.length >= 1, 'Has important URLs');
|
||||
assert(grouped.supplementary.length >= 1, 'Has supplementary URLs');
|
||||
|
||||
// Test suggestAgentDistribution
|
||||
console.log('\n## Testing suggestAgentDistribution()');
|
||||
|
||||
const dist1 = suggestAgentDistribution(2);
|
||||
assertEqual(dist1.agentCount, 1, 'Suggest 1 agent for 2 URLs');
|
||||
assertEqual(dist1.strategy, 'single', 'Strategy is single for few URLs');
|
||||
|
||||
const dist2 = suggestAgentDistribution(8);
|
||||
assert(dist2.agentCount >= 3 && dist2.agentCount <= 5, 'Suggest 3-5 agents for 8 URLs');
|
||||
assertEqual(dist2.strategy, 'parallel', 'Strategy is parallel for medium URLs');
|
||||
|
||||
const dist3 = suggestAgentDistribution(15);
|
||||
assertEqual(dist3.agentCount, 7, 'Suggest 7 agents for 15 URLs');
|
||||
|
||||
const dist4 = suggestAgentDistribution(25);
|
||||
assertEqual(dist4.agentCount, 7, 'Suggest 7 agents for 25 URLs');
|
||||
assertEqual(dist4.strategy, 'phased', 'Strategy is phased for many URLs');
|
||||
assertEqual(dist4.phases, 2, 'Use 2 phases for large sets');
|
||||
|
||||
// Test analyzeLlmsTxt
|
||||
console.log('\n## Testing analyzeLlmsTxt()');
|
||||
|
||||
const analysis = analyzeLlmsTxt(sampleContent);
|
||||
assertEqual(analysis.totalUrls, 4, 'Analysis counts 4 URLs');
|
||||
assert(analysis.grouped, 'Analysis includes grouped URLs');
|
||||
assert(analysis.distribution, 'Analysis includes distribution suggestion');
|
||||
assert(analysis.summary, 'Analysis includes summary');
|
||||
|
||||
// Summary
|
||||
console.log('\n## Test Summary');
|
||||
console.log(`Passed: ${passed}`);
|
||||
console.log(`Failed: ${failed}`);
|
||||
console.log(`Total: ${passed + failed}`);
|
||||
|
||||
process.exit(failed > 0 ? 1 : 0);
|
||||
112
.opencode/skills/docs-seeker/scripts/tests/test-detect-topic.js
Executable file
112
.opencode/skills/docs-seeker/scripts/tests/test-detect-topic.js
Executable file
@@ -0,0 +1,112 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
/**
|
||||
* Tests for detect-topic.js
|
||||
*/
|
||||
|
||||
const { detectTopic, normalizeTopic, normalizeLibrary } = require('../detect-topic');
|
||||
|
||||
// Test counter
|
||||
let passed = 0;
|
||||
let failed = 0;
|
||||
|
||||
function assert(condition, message) {
|
||||
if (condition) {
|
||||
console.log(`✓ ${message}`);
|
||||
passed++;
|
||||
} else {
|
||||
console.error(`✗ ${message}`);
|
||||
failed++;
|
||||
}
|
||||
}
|
||||
|
||||
function assertEqual(actual, expected, message) {
|
||||
if (actual === expected) {
|
||||
console.log(`✓ ${message}`);
|
||||
passed++;
|
||||
} else {
|
||||
console.error(`✗ ${message}`);
|
||||
console.error(` Expected: ${expected}`);
|
||||
console.error(` Actual: ${actual}`);
|
||||
failed++;
|
||||
}
|
||||
}
|
||||
|
||||
console.log('Running detect-topic.js tests...\n');
|
||||
|
||||
// Test normalizeTopic
|
||||
console.log('## Testing normalizeTopic()');
|
||||
assertEqual(normalizeTopic('date picker'), 'date', 'Normalize multi-word topic');
|
||||
assertEqual(normalizeTopic('OAuth'), 'oauth', 'Normalize OAuth');
|
||||
assertEqual(normalizeTopic('Server-Side'), 'server', 'Normalize Server-Side');
|
||||
assertEqual(normalizeTopic('caching'), 'caching', 'Normalize caching');
|
||||
|
||||
// Test normalizeLibrary
|
||||
console.log('\n## Testing normalizeLibrary()');
|
||||
assertEqual(normalizeLibrary('Next.js'), 'next.js', 'Normalize Next.js');
|
||||
assertEqual(normalizeLibrary('shadcn/ui'), 'shadcn/ui', 'Normalize shadcn/ui');
|
||||
assertEqual(normalizeLibrary('Better Auth'), 'better-auth', 'Normalize Better Auth');
|
||||
|
||||
// Test topic-specific queries
|
||||
console.log('\n## Testing topic-specific queries');
|
||||
|
||||
const topicQuery1 = detectTopic('How do I use date picker in shadcn/ui?');
|
||||
assert(topicQuery1 !== null, 'Detect topic-specific query 1');
|
||||
assert(topicQuery1.isTopicSpecific === true, 'Query 1 is topic-specific');
|
||||
assertEqual(topicQuery1.topic, 'date', 'Query 1 topic is "date"');
|
||||
assertEqual(topicQuery1.library, 'shadcn/ui', 'Query 1 library is "shadcn/ui"');
|
||||
|
||||
const topicQuery2 = detectTopic('Next.js caching strategies');
|
||||
assert(topicQuery2 !== null, 'Detect topic-specific query 2');
|
||||
assert(topicQuery2 && topicQuery2.isTopicSpecific === true, 'Query 2 is topic-specific');
|
||||
if (topicQuery2) {
|
||||
assertEqual(topicQuery2.topic, 'caching', 'Query 2 topic is "caching"');
|
||||
assertEqual(topicQuery2.library, 'next.js', 'Query 2 library is "next.js"');
|
||||
}
|
||||
|
||||
const topicQuery3 = detectTopic('Better Auth OAuth setup');
|
||||
assert(topicQuery3 !== null, 'Detect topic-specific query 3');
|
||||
assert(topicQuery3.isTopicSpecific === true, 'Query 3 is topic-specific');
|
||||
|
||||
const topicQuery4 = detectTopic('Using authentication with Better Auth');
|
||||
assert(topicQuery4 !== null, 'Detect topic-specific query 4');
|
||||
assert(topicQuery4.isTopicSpecific === true, 'Query 4 is topic-specific');
|
||||
|
||||
const topicQuery5 = detectTopic('Implement routing in Next.js');
|
||||
assert(topicQuery5 !== null, 'Detect topic-specific query 5');
|
||||
assert(topicQuery5.isTopicSpecific === true, 'Query 5 is topic-specific');
|
||||
|
||||
// Test general queries
|
||||
console.log('\n## Testing general queries');
|
||||
|
||||
const generalQuery1 = detectTopic('Documentation for Next.js');
|
||||
assert(generalQuery1 === null, 'Detect general query 1 (returns null)');
|
||||
|
||||
const generalQuery2 = detectTopic('Astro getting started');
|
||||
assert(generalQuery2 === null, 'Detect general query 2 (returns null)');
|
||||
|
||||
const generalQuery3 = detectTopic('How to use Better Auth');
|
||||
assert(generalQuery3 === null, 'Detect general query 3 (returns null)');
|
||||
|
||||
const generalQuery4 = detectTopic('Next.js API reference');
|
||||
assert(generalQuery4 === null, 'Detect general query 4 (returns null)');
|
||||
|
||||
// Test edge cases
|
||||
console.log('\n## Testing edge cases');
|
||||
|
||||
const edgeCase1 = detectTopic('');
|
||||
assert(edgeCase1 === null, 'Empty string returns null');
|
||||
|
||||
const edgeCase2 = detectTopic(null);
|
||||
assert(edgeCase2 === null, 'Null returns null');
|
||||
|
||||
const edgeCase3 = detectTopic('Random text without pattern');
|
||||
assert(edgeCase3 === null, 'Non-matching query returns null');
|
||||
|
||||
// Summary
|
||||
console.log('\n## Test Summary');
|
||||
console.log(`Passed: ${passed}`);
|
||||
console.log(`Failed: ${failed}`);
|
||||
console.log(`Total: ${passed + failed}`);
|
||||
|
||||
process.exit(failed > 0 ? 1 : 0);
|
||||
84
.opencode/skills/docs-seeker/scripts/tests/test-fetch-docs.js
Executable file
84
.opencode/skills/docs-seeker/scripts/tests/test-fetch-docs.js
Executable file
@@ -0,0 +1,84 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
/**
|
||||
* Tests for fetch-docs.js
|
||||
*/
|
||||
|
||||
const { buildContext7Url, getUrlVariations } = require('../fetch-docs');
|
||||
|
||||
// Test counter
|
||||
let passed = 0;
|
||||
let failed = 0;
|
||||
|
||||
function assert(condition, message) {
|
||||
if (condition) {
|
||||
console.log(`✓ ${message}`);
|
||||
passed++;
|
||||
} else {
|
||||
console.error(`✗ ${message}`);
|
||||
failed++;
|
||||
}
|
||||
}
|
||||
|
||||
function assertEqual(actual, expected, message) {
|
||||
if (actual === expected) {
|
||||
console.log(`✓ ${message}`);
|
||||
passed++;
|
||||
} else {
|
||||
console.error(`✗ ${message}`);
|
||||
console.error(` Expected: ${expected}`);
|
||||
console.error(` Actual: ${actual}`);
|
||||
failed++;
|
||||
}
|
||||
}
|
||||
|
||||
console.log('Running fetch-docs.js tests...\n');
|
||||
|
||||
// Test buildContext7Url
|
||||
console.log('## Testing buildContext7Url()');
|
||||
|
||||
assertEqual(
|
||||
buildContext7Url('vercel/next.js'),
|
||||
'https://context7.com/vercel/next.js/llms.txt',
|
||||
'Build URL for GitHub repo'
|
||||
);
|
||||
|
||||
assertEqual(
|
||||
buildContext7Url('vercel/next.js', 'cache'),
|
||||
'https://context7.com/vercel/next.js/llms.txt?topic=cache',
|
||||
'Build URL with topic parameter'
|
||||
);
|
||||
|
||||
assertEqual(
|
||||
buildContext7Url('shadcn-ui/ui', 'date'),
|
||||
'https://context7.com/shadcn-ui/ui/llms.txt?topic=date',
|
||||
'Build URL for shadcn with topic'
|
||||
);
|
||||
|
||||
// Test getUrlVariations
|
||||
console.log('\n## Testing getUrlVariations()');
|
||||
|
||||
async function testUrlVariations() {
|
||||
const urls1 = await getUrlVariations('next.js', 'cache');
|
||||
assert(urls1.length >= 2, 'Returns multiple URL variations with topic');
|
||||
assert(urls1[0].includes('?topic=cache'), 'First URL has topic parameter');
|
||||
assert(!urls1[1].includes('?topic='), 'Second URL has no topic parameter');
|
||||
|
||||
const urls2 = await getUrlVariations('shadcn/ui');
|
||||
assert(urls2.length >= 1, 'Returns URL variations without topic');
|
||||
assert(!urls2[0].includes('?topic='), 'URL has no topic parameter');
|
||||
|
||||
const urls3 = await getUrlVariations('astro', 'routing');
|
||||
assert(urls3.length >= 2, 'Returns variations for known library');
|
||||
assertEqual(urls3[0], 'https://context7.com/withastro/astro/llms.txt?topic=routing', 'Maps Astro correctly');
|
||||
}
|
||||
|
||||
testUrlVariations().then(() => {
|
||||
// Summary
|
||||
console.log('\n## Test Summary');
|
||||
console.log(`Passed: ${passed}`);
|
||||
console.log(`Failed: ${failed}`);
|
||||
console.log(`Total: ${passed + failed}`);
|
||||
|
||||
process.exit(failed > 0 ? 1 : 0);
|
||||
});
|
||||
94
.opencode/skills/docs-seeker/scripts/utils/env-loader.js
Executable file
94
.opencode/skills/docs-seeker/scripts/utils/env-loader.js
Executable file
@@ -0,0 +1,94 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
/**
|
||||
* Environment variable loader for docs-seeker skill
|
||||
* Respects order: process.env > skill/.env > skills/.env > .claude/.env
|
||||
*/
|
||||
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
|
||||
/**
|
||||
* Parse .env file content into key-value pairs
|
||||
* @param {string} content - .env file content
|
||||
* @returns {Object} Parsed environment variables
|
||||
*/
|
||||
function parseEnvFile(content) {
|
||||
const env = {};
|
||||
const lines = content.split('\n');
|
||||
|
||||
for (const line of lines) {
|
||||
// Skip comments and empty lines
|
||||
if (!line || line.trim().startsWith('#')) continue;
|
||||
|
||||
const match = line.match(/^\s*([A-Za-z_][A-Za-z0-9_]*)\s*=\s*(.*)$/);
|
||||
if (match) {
|
||||
const key = match[1];
|
||||
let value = match[2].trim();
|
||||
|
||||
// Remove quotes if present
|
||||
if ((value.startsWith('"') && value.endsWith('"')) ||
|
||||
(value.startsWith("'") && value.endsWith("'"))) {
|
||||
value = value.slice(1, -1);
|
||||
}
|
||||
|
||||
env[key] = value;
|
||||
}
|
||||
}
|
||||
|
||||
return env;
|
||||
}
|
||||
|
||||
/**
|
||||
* Load environment variables from .env files in priority order
|
||||
* Priority: process.env > skill/.env > skills/.env > .claude/.env
|
||||
* @returns {Object} Merged environment variables
|
||||
*/
|
||||
function loadEnv() {
|
||||
const skillDir = path.resolve(__dirname, '../..');
|
||||
const skillsDir = path.resolve(skillDir, '..');
|
||||
const claudeDir = path.resolve(skillsDir, '..');
|
||||
|
||||
const envPaths = [
|
||||
path.join(claudeDir, '.env'), // Lowest priority
|
||||
path.join(skillsDir, '.env'),
|
||||
path.join(skillDir, '.env'), // Highest priority (file)
|
||||
];
|
||||
|
||||
let mergedEnv = {};
|
||||
|
||||
// Load .env files in order (lowest to highest priority)
|
||||
for (const envPath of envPaths) {
|
||||
if (fs.existsSync(envPath)) {
|
||||
try {
|
||||
const content = fs.readFileSync(envPath, 'utf8');
|
||||
const parsed = parseEnvFile(content);
|
||||
mergedEnv = { ...mergedEnv, ...parsed };
|
||||
} catch (error) {
|
||||
// Silently skip unreadable files
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// process.env has highest priority
|
||||
mergedEnv = { ...mergedEnv, ...process.env };
|
||||
|
||||
return mergedEnv;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get environment variable with fallback
|
||||
* @param {string} key - Environment variable key
|
||||
* @param {string} defaultValue - Default value if not found
|
||||
* @returns {string} Environment variable value
|
||||
*/
|
||||
function getEnv(key, defaultValue = '') {
|
||||
const env = loadEnv();
|
||||
return env[key] || defaultValue;
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
loadEnv,
|
||||
getEnv,
|
||||
parseEnvFile,
|
||||
};
|
||||
Reference in New Issue
Block a user