This commit is contained in:
2026-04-12 01:06:31 +07:00
commit 10d660cbcb
1066 changed files with 228596 additions and 0 deletions

View File

@@ -0,0 +1,197 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
AI Artist Core - BM25 search engine for prompt engineering resources
"""
import csv
import re
from pathlib import Path
from math import log
from collections import defaultdict
# ============ CONFIGURATION ============
DATA_DIR = Path(__file__).parent.parent / "data"
MAX_RESULTS = 3
CSV_CONFIG = {
"use-case": {
"file": "use-cases.csv",
"search_cols": ["Use Case", "Category", "Keywords", "Best Platforms"],
"output_cols": ["Use Case", "Category", "Keywords", "Prompt Template", "Key Elements", "Best Platforms", "Aspect Ratios", "Tips", "Example"]
},
"style": {
"file": "styles.csv",
"search_cols": ["Style Name", "Category", "Keywords", "Description", "Best For"],
"output_cols": ["Style Name", "Category", "Description", "Key Characteristics", "Color Palette", "Best For", "Platforms", "Prompt Keywords"]
},
"platform": {
"file": "platforms.csv",
"search_cols": ["Platform", "Type", "Keywords", "Strengths"],
"output_cols": ["Platform", "Type", "Prompt Style", "Key Parameters", "Strengths", "Limitations", "Aspect Ratios", "Best Practices"]
},
"technique": {
"file": "techniques.csv",
"search_cols": ["Technique", "Category", "Keywords", "Description", "When to Use"],
"output_cols": ["Technique", "Category", "Description", "When to Use", "Syntax Example", "Platforms", "Tips"]
},
"lighting": {
"file": "lighting.csv",
"search_cols": ["Lighting Type", "Category", "Keywords", "Description", "Mood", "Best For"],
"output_cols": ["Lighting Type", "Category", "Description", "Mood", "Best For", "Prompt Keywords", "Technical Notes"]
},
"template": {
"file": "nano-banana-templates.csv",
"search_cols": ["Category", "Template Name", "Keywords"],
"output_cols": ["Category", "Template Name", "Keywords", "Prompt Template", "Aspect Ratio", "Tips"]
},
"awesome": {
"file": "awesome-prompts.csv",
"search_cols": ["title", "description", "prompt"],
"output_cols": ["id", "title", "category", "description", "prompt", "author", "source"]
}
}
# ============ BM25 IMPLEMENTATION ============
class BM25:
"""BM25 ranking algorithm for text search"""
def __init__(self, k1=1.5, b=0.75):
self.k1 = k1
self.b = b
self.corpus = []
self.doc_lengths = []
self.avgdl = 0
self.idf = {}
self.doc_freqs = defaultdict(int)
self.N = 0
def tokenize(self, text):
"""Lowercase, split, remove punctuation, filter short words"""
text = re.sub(r'[^\w\s]', ' ', str(text).lower())
return [w for w in text.split() if len(w) > 2]
def fit(self, documents):
"""Build BM25 index from documents"""
self.corpus = [self.tokenize(doc) for doc in documents]
self.N = len(self.corpus)
if self.N == 0:
return
self.doc_lengths = [len(doc) for doc in self.corpus]
self.avgdl = sum(self.doc_lengths) / self.N
for doc in self.corpus:
seen = set()
for word in doc:
if word not in seen:
self.doc_freqs[word] += 1
seen.add(word)
for word, freq in self.doc_freqs.items():
self.idf[word] = log((self.N - freq + 0.5) / (freq + 0.5) + 1)
def score(self, query):
"""Score all documents against query"""
query_tokens = self.tokenize(query)
scores = []
for idx, doc in enumerate(self.corpus):
score = 0
doc_len = self.doc_lengths[idx]
term_freqs = defaultdict(int)
for word in doc:
term_freqs[word] += 1
for token in query_tokens:
if token in self.idf:
tf = term_freqs[token]
idf = self.idf[token]
numerator = tf * (self.k1 + 1)
denominator = tf + self.k1 * (1 - self.b + self.b * doc_len / self.avgdl)
score += idf * numerator / denominator
scores.append((idx, score))
return sorted(scores, key=lambda x: x[1], reverse=True)
# ============ SEARCH FUNCTIONS ============
def _load_csv(filepath):
"""Load CSV and return list of dicts"""
with open(filepath, 'r', encoding='utf-8') as f:
return list(csv.DictReader(f))
def _search_csv(filepath, search_cols, output_cols, query, max_results):
"""Core search function using BM25"""
if not filepath.exists():
return []
data = _load_csv(filepath)
# Build documents from search columns
documents = [" ".join(str(row.get(col, "")) for col in search_cols) for row in data]
# BM25 search
bm25 = BM25()
bm25.fit(documents)
ranked = bm25.score(query)
# Get top results with score > 0
results = []
for idx, score in ranked[:max_results]:
if score > 0:
row = data[idx]
results.append({col: row.get(col, "") for col in output_cols if col in row})
return results
def detect_domain(query):
"""Auto-detect the most relevant domain from query"""
query_lower = query.lower()
domain_keywords = {
"use-case": ["avatar", "profile", "thumbnail", "poster", "social", "youtube", "instagram", "marketing", "product", "e-commerce", "infographic", "comic", "game", "app", "web", "header", "banner"],
"style": ["style", "aesthetic", "photorealistic", "anime", "manga", "3d", "render", "illustration", "pixel", "watercolor", "oil", "cyberpunk", "vaporwave", "minimalist", "vintage", "retro"],
"platform": ["midjourney", "dalle", "dall-e", "stable diffusion", "flux", "nano banana", "gemini", "imagen", "ideogram", "leonardo", "firefly", "platform", "tool"],
"technique": ["prompt", "technique", "weight", "emphasis", "negative", "json", "structured", "iteration", "reference", "identity", "multi-panel", "search grounding"],
"lighting": ["lighting", "light", "shadow", "golden hour", "blue hour", "rembrandt", "butterfly", "neon", "volumetric", "softbox", "rim light", "studio"]
}
scores = {domain: sum(1 for kw in keywords if kw in query_lower) for domain, keywords in domain_keywords.items()}
best = max(scores, key=scores.get)
return best if scores[best] > 0 else "style"
def search(query, domain=None, max_results=MAX_RESULTS):
"""Main search function with auto-domain detection"""
if domain is None:
domain = detect_domain(query)
config = CSV_CONFIG.get(domain, CSV_CONFIG["style"])
filepath = DATA_DIR / config["file"]
if not filepath.exists():
return {"error": f"File not found: {filepath}", "domain": domain}
results = _search_csv(filepath, config["search_cols"], config["output_cols"], query, max_results)
return {
"domain": domain,
"query": query,
"file": config["file"],
"count": len(results),
"results": results
}
def search_all_domains(query, max_per_domain=2):
"""Search across all domains for comprehensive results"""
all_results = {}
for domain in CSV_CONFIG.keys():
result = search(query, domain, max_per_domain)
if result.get("count", 0) > 0:
all_results[domain] = result
return all_results

View File

@@ -0,0 +1,102 @@
#!/usr/bin/env python3
"""Extract all prompts from awesome-nano-banana-pro-prompts.md into CSV."""
import re
import csv
from pathlib import Path
def extract_prompts(md_content: str) -> list[dict]:
"""Extract all prompts with their metadata."""
prompts = []
# Split by prompt entries (### No. X:)
entries = re.split(r'### No\. \d+:', md_content)
for i, entry in enumerate(entries[1:], 1): # Skip content before first entry
prompt_data = {
"id": i,
"title": "",
"category": "",
"description": "",
"prompt": "",
"author": "",
"source": "",
}
# Extract title (first line after split)
title_match = re.search(r'^([^\n]+)', entry.strip())
if title_match:
prompt_data["title"] = title_match.group(1).strip()
# Extract category from badges
categories = re.findall(r'!\[([^\]]+)\]\([^)]+badge[^)]*\)', entry)
if categories:
# Filter out non-category badges
cats = [c for c in categories if c not in ["Featured", "Raycast", "Language-ZH", "Language-EN", "Language-JA"]]
prompt_data["category"] = ", ".join(cats[:3]) if cats else ""
# Extract description
desc_match = re.search(r'#### 📖 Description\s*\n\n([^\n#]+)', entry)
if desc_match:
prompt_data["description"] = desc_match.group(1).strip()
# Extract prompt (between ``` markers after "#### 📝 Prompt")
prompt_section = re.search(r'#### 📝 Prompt\s*\n\n```[^\n]*\n(.*?)```', entry, re.DOTALL)
if prompt_section:
prompt_data["prompt"] = prompt_section.group(1).strip()
# Extract author
author_match = re.search(r'\*\*Author:\*\*\s*\[([^\]]+)\]', entry)
if author_match:
prompt_data["author"] = author_match.group(1).strip()
# Extract source URL
source_match = re.search(r'\*\*Source:\*\*\s*\[([^\]]+)\]\(([^)]+)\)', entry)
if source_match:
prompt_data["source"] = source_match.group(2).strip()
if prompt_data["prompt"]: # Only add if we found a prompt
prompts.append(prompt_data)
return prompts
def save_to_csv(prompts: list[dict], output_path: Path):
"""Save prompts to CSV file."""
fieldnames = ["id", "title", "category", "description", "prompt", "author", "source"]
with open(output_path, 'w', newline='', encoding='utf-8') as f:
writer = csv.DictWriter(f, fieldnames=fieldnames, quoting=csv.QUOTE_ALL)
writer.writeheader()
for p in prompts:
writer.writerow(p)
print(f"Saved {len(prompts)} prompts to {output_path}")
def main():
script_dir = Path(__file__).parent
md_path = script_dir.parent / "references" / "awesome-nano-banana-pro-prompts.md"
csv_path = script_dir.parent / "data" / "awesome-prompts.csv"
print(f"Reading from: {md_path}")
with open(md_path, 'r', encoding='utf-8') as f:
content = f.read()
prompts = extract_prompts(content)
print(f"Extracted {len(prompts)} prompts")
# Print sample
if prompts:
print("\nSample prompts:")
for p in prompts[:3]:
print(f"\n[{p['id']}] {p['title'][:50]}...")
print(f" Category: {p['category']}")
print(f" Prompt: {p['prompt'][:100]}...")
save_to_csv(prompts, csv_path)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,371 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
AI Artist Generate - Nano Banana image generation with 3 creative modes
Uses 129 actual prompts from awesome-nano-banana-pro-prompts collection.
Usage:
python generate.py "<concept>" --output <path.png> [options]
Modes:
--mode search : Find best matching prompt (default)
--mode creative : Remix elements from multiple prompts
--mode wild : AI-enhanced out-of-the-box interpretation
--mode all : Generate all 3 variations
"""
import argparse
import sys
import os
import re
import random
from pathlib import Path
# Add parent for core imports
sys.path.insert(0, str(Path(__file__).parent))
from core import search
# Gemini API setup
CLAUDE_ROOT = Path.home() / '.claude'
sys.path.insert(0, str(CLAUDE_ROOT / 'scripts'))
PROJECT_CLAUDE = Path(__file__).parent.parent.parent.parent
sys.path.insert(0, str(PROJECT_CLAUDE / 'scripts'))
try:
from resolve_env import resolve_env
CENTRALIZED_RESOLVER = True
except ImportError:
CENTRALIZED_RESOLVER = False
try:
from dotenv import load_dotenv
load_dotenv(Path.home() / '.claude' / '.env')
load_dotenv(Path.home() / '.claude' / 'skills' / '.env')
except ImportError:
pass
try:
from google import genai
from google.genai import types
GENAI_AVAILABLE = True
except ImportError:
GENAI_AVAILABLE = False
# ============ CONFIGURATION ============
NANO_BANANA_MODELS = {
"flash2": "gemini-3.1-flash-image-preview", # Nano Banana 2 (new default)
"flash": "gemini-2.5-flash-image",
"pro": "gemini-3-pro-image-preview",
}
DEFAULT_MODEL = "flash2"
ASPECT_RATIOS = ["1:1", "2:3", "3:2", "3:4", "4:3", "4:5", "5:4", "9:16", "16:9", "21:9"]
def get_api_key() -> str:
"""Get Gemini API key from environment."""
if CENTRALIZED_RESOLVER:
return resolve_env('GEMINI_API_KEY', skill='ai-multimodal')
return os.getenv('GEMINI_API_KEY')
def adapt_prompt(template_prompt: str, concept: str, **kwargs) -> str:
"""Adapt a template prompt to the user's concept.
Intelligently replaces variables and adapts the prompt while keeping
the original structure and Nano Banana narrative style.
"""
prompt = template_prompt
# Replace common variable patterns
replacements = {
# Raycast-style arguments
r'\{argument name="[^"]*" default="[^"]*"\}': concept,
r'\{argument name=[^}]+\}': concept,
# Bracket variables
r'\[insert [^\]]+\]': concept,
r'\[subject\]': concept,
r'\[concept\]': concept,
r'\[topic\]': concept,
r'\[product\]': concept,
r'\[scene\]': concept,
r'\[description\]': concept,
# Generic placeholders
r'\{[^}]+\}': lambda m: kwargs.get(m.group(0)[1:-1], concept),
}
for pattern, replacement in replacements.items():
if callable(replacement):
prompt = re.sub(pattern, replacement, prompt, flags=re.IGNORECASE)
else:
prompt = re.sub(pattern, replacement, prompt, flags=re.IGNORECASE)
# Ensure negative constraints exist (Nano Banana style)
if "NEVER" not in prompt and "DO NOT" not in prompt:
prompt += " NEVER add watermarks or unwanted text. DO NOT include labels."
return prompt
def mode_search(concept: str, verbose: bool = False) -> tuple[str, dict]:
"""Mode 1: Find best matching prompt from awesome collection."""
result = search(concept, "awesome", 1)
if result.get("count", 0) > 0:
match = result["results"][0]
prompt = adapt_prompt(match["prompt"], concept)
if verbose:
print(f" [SEARCH] Matched: {match['title'][:60]}...")
print(f" Author: {match.get('author', 'Unknown')}")
return prompt, {"mode": "search", "match": match}
# Fallback to basic prompt
prompt = f"A professional image of {concept}. High quality, detailed. Professional photography. NEVER add watermarks."
return prompt, {"mode": "search", "match": None}
def mode_creative(concept: str, verbose: bool = False) -> tuple[str, dict]:
"""Mode 2: Creative remix - combine elements from multiple prompts."""
# Get top 3 matches
result = search(concept, "awesome", 3)
matches = result.get("results", [])
if len(matches) < 2:
return mode_search(concept, verbose)
# Extract key elements from each prompt
elements = []
for m in matches:
prompt = m.get("prompt", "")
# Extract style descriptions, lighting, composition hints
if "style" in prompt.lower() or "lighting" in prompt.lower():
elements.append(prompt[:200])
if verbose:
print(f" [CREATIVE] Remixing {len(matches)} prompts:")
for m in matches:
print(f" - {m['title'][:50]}...")
# Build creative remix
base = matches[0]["prompt"]
style_hints = []
# Extract style from second match
if len(matches) > 1:
m2 = matches[1]["prompt"]
style_match = re.search(r'(style[^.]+\.)', m2, re.IGNORECASE)
if style_match:
style_hints.append(style_match.group(1))
# Extract lighting/mood from third match
if len(matches) > 2:
m3 = matches[2]["prompt"]
light_match = re.search(r'(lighting[^.]+\.)', m3, re.IGNORECASE)
if light_match:
style_hints.append(light_match.group(1))
# Adapt and enhance
prompt = adapt_prompt(base, concept)
if style_hints:
prompt += " " + " ".join(style_hints)
return prompt, {"mode": "creative", "matches": [m["title"] for m in matches]}
def mode_wild(concept: str, verbose: bool = False) -> tuple[str, dict]:
"""Mode 3: Wild/Out-of-the-box - AI-enhanced creative interpretation."""
result = search(concept, "awesome", 5)
matches = result.get("results", [])
# Creative transformations
transformations = [
"reimagined as a Japanese Ukiyo-e woodblock print with Prussian blue and vermilion",
"transformed into a premium liquid glass Bento grid infographic",
"captured as a vintage 1800s patent document with technical drawings",
"rendered as a surreal dreamscape with volumetric god rays",
"depicted in cyberpunk neon aesthetic with holographic elements",
"illustrated as a hand-drawn chalkboard explanation",
"visualized as an isometric 3D diorama with miniature figures",
"presented as a cinematic movie poster with dramatic lighting",
"created as a vaporwave aesthetic with glitch effects and Roman statues",
"designed as a premium Apple-style product showcase",
]
# Pick random transformation
transform = random.choice(transformations)
if matches:
# Use structure from a random match but apply wild transformation
base = random.choice(matches)
prompt = f"{concept}, {transform}. "
# Extract any technical camera/quality settings from matched prompt
tech_match = re.search(r'(\d+mm lens|f/[\d.]+|Canon|Nikon|professional photography)', base["prompt"])
if tech_match:
prompt += f"Shot with {tech_match.group(1)}. "
if verbose:
print(f" [WILD] Transform: {transform}")
print(f" Based on: {base['title'][:50]}...")
else:
prompt = f"{concept}, {transform}. Professional quality."
prompt += " NEVER add watermarks. DO NOT include unwanted text."
return prompt, {"mode": "wild", "transformation": transform}
def generate_image(
prompt: str,
output_path: str,
model: str = DEFAULT_MODEL,
aspect_ratio: str = "1:1",
size: str = "2K",
verbose: bool = False
) -> dict:
"""Generate image using Nano Banana (Gemini image models)."""
if not GENAI_AVAILABLE:
return {"status": "error", "error": "google-genai not installed. Run: pip install google-genai"}
api_key = get_api_key()
if not api_key:
return {"status": "error", "error": "GEMINI_API_KEY not found"}
model_id = NANO_BANANA_MODELS.get(model, model)
if verbose:
print(f"\n[Nano Banana Generation]")
print(f" Model: {model_id}")
print(f" Aspect: {aspect_ratio}")
print(f" Prompt: {prompt[:100]}...")
try:
client = genai.Client(api_key=api_key)
# Build config
image_config_args = {'aspect_ratio': aspect_ratio}
if 'pro' in model_id.lower() and size:
image_config_args['image_size'] = size
config = types.GenerateContentConfig(
response_modalities=['IMAGE'],
image_config=types.ImageConfig(**image_config_args)
)
response = client.models.generate_content(
model=model_id,
contents=[prompt],
config=config
)
output_file = Path(output_path)
output_file.parent.mkdir(parents=True, exist_ok=True)
if hasattr(response, 'candidates') and response.candidates:
for part in response.candidates[0].content.parts:
if part.inline_data:
with open(output_file, 'wb') as f:
f.write(part.inline_data.data)
if verbose:
print(f" Generated: {output_file}")
return {"status": "success", "output": str(output_file), "model": model_id}
return {"status": "error", "error": "No image in response"}
except Exception as e:
return {"status": "error", "error": str(e)}
def main():
parser = argparse.ArgumentParser(
description="AI Artist Generate - Nano Banana with 3 creative modes",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Modes:
search : Find best matching prompt from 129 curated prompts (default)
creative : Remix elements from multiple matching prompts
wild : AI-enhanced out-of-the-box creative interpretation
all : Generate all 3 variations
Examples:
# Search mode (default)
python generate.py "tech conference banner" -o banner.png
# Creative remix
python generate.py "AI workshop" -o workshop.png --mode creative
# Wild/experimental
python generate.py "product showcase" -o product.png --mode wild
# Generate all 3 variations
python generate.py "futuristic city" -o city.png --mode all
"""
)
parser.add_argument("concept", help="Core concept/subject to generate")
parser.add_argument("--output", "-o", required=True, help="Output image path")
parser.add_argument("--mode", "-m", choices=["search", "creative", "wild", "all"],
default="search", help="Generation mode")
parser.add_argument("--model", choices=list(NANO_BANANA_MODELS.keys()),
default=DEFAULT_MODEL, help="Model: flash2 (default, Nano Banana 2), flash, or pro")
parser.add_argument("--aspect-ratio", "-ar", choices=ASPECT_RATIOS, default="1:1")
parser.add_argument("--size", choices=["1K", "2K", "4K"], default="2K")
parser.add_argument("--verbose", "-v", action="store_true")
parser.add_argument("--show-prompt", action="store_true", help="Print generated prompt")
parser.add_argument("--dry-run", action="store_true", help="Build prompt without generating")
args = parser.parse_args()
if args.verbose:
print(f"[Concept: {args.concept}]")
# Determine modes to run
modes = ["search", "creative", "wild"] if args.mode == "all" else [args.mode]
for mode in modes:
if args.verbose or len(modes) > 1:
print(f"\n{'='*50}")
print(f"[Mode: {mode.upper()}]")
# Build prompt based on mode
if mode == "search":
prompt, meta = mode_search(args.concept, args.verbose)
elif mode == "creative":
prompt, meta = mode_creative(args.concept, args.verbose)
elif mode == "wild":
prompt, meta = mode_wild(args.concept, args.verbose)
if args.show_prompt or args.verbose:
print(f"\n[Prompt]\n{prompt}\n")
if args.dry_run:
print("[Dry run - no generation]")
continue
# Generate output path for mode
output_path = args.output
if len(modes) > 1:
base = Path(args.output)
output_path = str(base.parent / f"{base.stem}-{mode}{base.suffix}")
result = generate_image(
prompt=prompt,
output_path=output_path,
model=args.model,
aspect_ratio=args.aspect_ratio,
size=args.size,
verbose=args.verbose
)
if result["status"] == "success":
print(f"✓ Generated: {result['output']}")
else:
print(f"✗ Error: {result['error']}")
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,147 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
AI Artist Search - BM25 search engine for prompt engineering resources
Usage: python search.py "<query>" [--domain <domain>] [--max-results 3]
python search.py "<query>" --prompt-system [--platform <platform>]
Domains: use-case, style, platform, technique, lighting
Platforms: midjourney, dalle, sd, flux, nano-banana
"""
import argparse
import sys
from core import CSV_CONFIG, MAX_RESULTS, search, search_all_domains
# Fix Windows cp1252 encoding: hardcoded emojis can't encode on Windows.
# Reconfigure stdout to UTF-8 with replacement (Python 3.7+).
if sys.stdout.encoding and sys.stdout.encoding.lower() != "utf-8":
if hasattr(sys.stdout, 'reconfigure'):
sys.stdout.reconfigure(encoding="utf-8", errors="replace")
def format_output(result):
"""Format results for Claude consumption (token-optimized)"""
if "error" in result:
return f"Error: {result['error']}"
output = []
output.append(f"## AI Artist Search Results")
output.append(f"**Domain:** {result['domain']} | **Query:** {result['query']}")
output.append(f"**Source:** {result['file']} | **Found:** {result['count']} results\n")
for i, row in enumerate(result['results'], 1):
output.append(f"### Result {i}")
for key, value in row.items():
value_str = str(value)
if len(value_str) > 400:
value_str = value_str[:400] + "..."
output.append(f"- **{key}:** {value_str}")
output.append("")
return "\n".join(output)
def generate_prompt_system(query, platform=None):
"""Generate a comprehensive prompt system for a given concept"""
output = []
output.append(f"## 🎨 AI Artist Prompt System")
output.append(f"**Concept:** {query}")
if platform:
output.append(f"**Target Platform:** {platform}")
output.append("")
# Search relevant domains
use_case = search(query, "use-case", 1)
style = search(query, "style", 2)
lighting = search(query, "lighting", 1)
technique = search(query, "technique", 2)
# Use case / Template
if use_case.get("count", 0) > 0:
uc = use_case["results"][0]
output.append("### 📋 Use Case Match")
output.append(f"**{uc.get('Use Case', 'N/A')}** ({uc.get('Category', '')})")
if uc.get("Prompt Template"):
output.append(f"**Template:** `{uc.get('Prompt Template')}`")
if uc.get("Key Elements"):
output.append(f"**Key Elements:** {uc.get('Key Elements')}")
if uc.get("Tips"):
output.append(f"**Tips:** {uc.get('Tips')}")
output.append("")
# Styles
if style.get("count", 0) > 0:
output.append("### 🎭 Recommended Styles")
for s in style["results"]:
output.append(f"**{s.get('Style Name', 'N/A')}** - {s.get('Description', '')}")
if s.get("Prompt Keywords"):
output.append(f" Keywords: `{s.get('Prompt Keywords')}`")
output.append("")
# Lighting
if lighting.get("count", 0) > 0:
lt = lighting["results"][0]
output.append("### 💡 Lighting Suggestion")
output.append(f"**{lt.get('Lighting Type', 'N/A')}** - {lt.get('Description', '')}")
output.append(f" Mood: {lt.get('Mood', '')} | Keywords: `{lt.get('Prompt Keywords', '')}`")
output.append("")
# Techniques
if technique.get("count", 0) > 0:
output.append("### 🔧 Relevant Techniques")
for t in technique["results"]:
output.append(f"**{t.get('Technique', 'N/A')}**: {t.get('Description', '')}")
if t.get("Syntax Example"):
output.append(f" Example: `{t.get('Syntax Example')}`")
output.append("")
# Platform-specific tips
if platform:
plat = search(platform, "platform", 1)
if plat.get("count", 0) > 0:
p = plat["results"][0]
output.append(f"### 🖥️ {p.get('Platform', '')} Tips")
output.append(f"**Prompt Style:** {p.get('Prompt Style', '')}")
output.append(f"**Key Parameters:** {p.get('Key Parameters', '')}")
output.append(f"**Best Practices:** {p.get('Best Practices', '')}")
output.append("")
return "\n".join(output)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="AI Artist Search")
parser.add_argument("query", help="Search query")
parser.add_argument("--domain", "-d", choices=list(CSV_CONFIG.keys()), help="Search domain")
parser.add_argument("--max-results", "-n", type=int, default=MAX_RESULTS, help="Max results (default: 3)")
parser.add_argument("--json", action="store_true", help="Output as JSON")
# Prompt system generation
parser.add_argument("--prompt-system", "-ps", action="store_true", help="Generate comprehensive prompt system")
parser.add_argument("--platform", "-p", type=str, default=None, help="Target platform for prompt system")
parser.add_argument("--all", "-a", action="store_true", help="Search all domains")
args = parser.parse_args()
# Prompt system generation
if args.prompt_system:
result = generate_prompt_system(args.query, args.platform)
print(result)
# Search all domains
elif args.all:
results = search_all_domains(args.query, args.max_results)
if args.json:
import json
print(json.dumps(results, indent=2, ensure_ascii=False))
else:
for domain, result in results.items():
print(format_output(result))
print("---\n")
# Domain search
else:
result = search(args.query, args.domain, args.max_results)
if args.json:
import json
print(json.dumps(result, indent=2, ensure_ascii=False))
else:
print(format_output(result))