359 lines
8.7 KiB
Dart
359 lines
8.7 KiB
Dart
/// Database performance optimization utilities for Hive CE
|
|
///
|
|
/// Features:
|
|
/// - Lazy box loading for large datasets
|
|
/// - Database compaction strategies
|
|
/// - Query optimization helpers
|
|
/// - Cache management
|
|
/// - Batch operations
|
|
|
|
import 'package:hive_ce/hive.dart';
|
|
import '../constants/performance_constants.dart';
|
|
import 'performance_monitor.dart';
|
|
|
|
/// Database optimization helpers for Hive CE
|
|
class DatabaseOptimizer {
|
|
/// Batch write operations for better performance
|
|
static Future<void> batchWrite<T>({
|
|
required Box<T> box,
|
|
required Map<String, T> items,
|
|
}) async {
|
|
final startTime = DateTime.now();
|
|
|
|
// Hive doesn't support batch operations natively,
|
|
// but we can optimize by reducing individual writes
|
|
final entries = items.entries.toList();
|
|
final batchSize = PerformanceConstants.databaseBatchSize;
|
|
|
|
for (var i = 0; i < entries.length; i += batchSize) {
|
|
final end = (i + batchSize < entries.length)
|
|
? i + batchSize
|
|
: entries.length;
|
|
final batch = entries.sublist(i, end);
|
|
|
|
for (final entry in batch) {
|
|
await box.put(entry.key, entry.value);
|
|
}
|
|
}
|
|
|
|
final duration = DateTime.now().difference(startTime);
|
|
DatabaseTracker.logQuery(
|
|
operation: 'batchWrite',
|
|
duration: duration,
|
|
affectedRows: items.length,
|
|
);
|
|
}
|
|
|
|
/// Batch delete operations
|
|
static Future<void> batchDelete<T>({
|
|
required Box<T> box,
|
|
required List<String> keys,
|
|
}) async {
|
|
final startTime = DateTime.now();
|
|
|
|
final batchSize = PerformanceConstants.databaseBatchSize;
|
|
|
|
for (var i = 0; i < keys.length; i += batchSize) {
|
|
final end = (i + batchSize < keys.length) ? i + batchSize : keys.length;
|
|
final batch = keys.sublist(i, end);
|
|
|
|
for (final key in batch) {
|
|
await box.delete(key);
|
|
}
|
|
}
|
|
|
|
final duration = DateTime.now().difference(startTime);
|
|
DatabaseTracker.logQuery(
|
|
operation: 'batchDelete',
|
|
duration: duration,
|
|
affectedRows: keys.length,
|
|
);
|
|
}
|
|
|
|
/// Compact database to reduce file size
|
|
static Future<void> compactBox<T>(Box<T> box) async {
|
|
final startTime = DateTime.now();
|
|
|
|
await box.compact();
|
|
|
|
final duration = DateTime.now().difference(startTime);
|
|
DatabaseTracker.logQuery(
|
|
operation: 'compact',
|
|
duration: duration,
|
|
);
|
|
}
|
|
|
|
/// Efficient filtered query with caching
|
|
static List<T> queryWithFilter<T>({
|
|
required Box<T> box,
|
|
required bool Function(T item) filter,
|
|
int? limit,
|
|
}) {
|
|
final startTime = DateTime.now();
|
|
|
|
final results = <T>[];
|
|
final values = box.values;
|
|
|
|
for (final item in values) {
|
|
if (filter(item)) {
|
|
results.add(item);
|
|
if (limit != null && results.length >= limit) {
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
|
|
final duration = DateTime.now().difference(startTime);
|
|
DatabaseTracker.logQuery(
|
|
operation: 'queryWithFilter',
|
|
duration: duration,
|
|
affectedRows: results.length,
|
|
);
|
|
|
|
return results;
|
|
}
|
|
|
|
/// Efficient pagination
|
|
static List<T> queryWithPagination<T>({
|
|
required Box<T> box,
|
|
required int page,
|
|
int pageSize = 20,
|
|
bool Function(T item)? filter,
|
|
}) {
|
|
final startTime = DateTime.now();
|
|
|
|
final skip = page * pageSize;
|
|
final results = <T>[];
|
|
var skipped = 0;
|
|
var taken = 0;
|
|
|
|
final values = box.values;
|
|
|
|
for (final item in values) {
|
|
if (filter != null && !filter(item)) {
|
|
continue;
|
|
}
|
|
|
|
if (skipped < skip) {
|
|
skipped++;
|
|
continue;
|
|
}
|
|
|
|
if (taken < pageSize) {
|
|
results.add(item);
|
|
taken++;
|
|
} else {
|
|
break;
|
|
}
|
|
}
|
|
|
|
final duration = DateTime.now().difference(startTime);
|
|
DatabaseTracker.logQuery(
|
|
operation: 'queryWithPagination',
|
|
duration: duration,
|
|
affectedRows: results.length,
|
|
);
|
|
|
|
return results;
|
|
}
|
|
|
|
/// Check if box needs compaction
|
|
static bool needsCompaction<T>(Box<T> box) {
|
|
// Hive automatically compacts when needed
|
|
// This is a placeholder for custom compaction logic
|
|
return false;
|
|
}
|
|
|
|
/// Get box statistics
|
|
static Map<String, dynamic> getBoxStats<T>(Box<T> box) {
|
|
return {
|
|
'name': box.name,
|
|
'length': box.length,
|
|
'isEmpty': box.isEmpty,
|
|
'isOpen': box.isOpen,
|
|
};
|
|
}
|
|
|
|
/// Clear old cache entries based on timestamp
|
|
static Future<void> clearOldEntries<T>({
|
|
required Box<T> box,
|
|
required DateTime Function(T item) getTimestamp,
|
|
required Duration maxAge,
|
|
}) async {
|
|
final startTime = DateTime.now();
|
|
final now = DateTime.now();
|
|
final keysToDelete = <String>[];
|
|
|
|
for (final key in box.keys) {
|
|
final item = box.get(key);
|
|
if (item != null) {
|
|
final timestamp = getTimestamp(item);
|
|
if (now.difference(timestamp) > maxAge) {
|
|
keysToDelete.add(key.toString());
|
|
}
|
|
}
|
|
}
|
|
|
|
await batchDelete(box: box, keys: keysToDelete);
|
|
|
|
final duration = DateTime.now().difference(startTime);
|
|
DatabaseTracker.logQuery(
|
|
operation: 'clearOldEntries',
|
|
duration: duration,
|
|
affectedRows: keysToDelete.length,
|
|
);
|
|
}
|
|
|
|
/// Optimize box by removing duplicates (if applicable)
|
|
static Future<void> removeDuplicates<T>({
|
|
required Box<T> box,
|
|
required String Function(T item) getUniqueId,
|
|
}) async {
|
|
final startTime = DateTime.now();
|
|
final seen = <String>{};
|
|
final keysToDelete = <String>[];
|
|
|
|
for (final key in box.keys) {
|
|
final item = box.get(key);
|
|
if (item != null) {
|
|
final uniqueId = getUniqueId(item);
|
|
if (seen.contains(uniqueId)) {
|
|
keysToDelete.add(key.toString());
|
|
} else {
|
|
seen.add(uniqueId);
|
|
}
|
|
}
|
|
}
|
|
|
|
await batchDelete(box: box, keys: keysToDelete);
|
|
|
|
final duration = DateTime.now().difference(startTime);
|
|
DatabaseTracker.logQuery(
|
|
operation: 'removeDuplicates',
|
|
duration: duration,
|
|
affectedRows: keysToDelete.length,
|
|
);
|
|
}
|
|
}
|
|
|
|
/// Lazy box helper for large datasets
|
|
class LazyBoxHelper {
|
|
/// Load items in chunks to avoid memory issues
|
|
static Future<List<T>> loadInChunks<T>({
|
|
required LazyBox<T> lazyBox,
|
|
int chunkSize = 50,
|
|
bool Function(T item)? filter,
|
|
}) async {
|
|
final startTime = DateTime.now();
|
|
final results = <T>[];
|
|
final keys = lazyBox.keys.toList();
|
|
|
|
for (var i = 0; i < keys.length; i += chunkSize) {
|
|
final end = (i + chunkSize < keys.length) ? i + chunkSize : keys.length;
|
|
final chunkKeys = keys.sublist(i, end);
|
|
|
|
for (final key in chunkKeys) {
|
|
final item = await lazyBox.get(key);
|
|
if (item != null) {
|
|
if (filter == null || filter(item)) {
|
|
results.add(item);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
final duration = DateTime.now().difference(startTime);
|
|
DatabaseTracker.logQuery(
|
|
operation: 'loadInChunks',
|
|
duration: duration,
|
|
affectedRows: results.length,
|
|
);
|
|
|
|
return results;
|
|
}
|
|
|
|
/// Get paginated items from lazy box
|
|
static Future<List<T>> getPaginated<T>({
|
|
required LazyBox<T> lazyBox,
|
|
required int page,
|
|
int pageSize = 20,
|
|
}) async {
|
|
final startTime = DateTime.now();
|
|
final skip = page * pageSize;
|
|
final keys = lazyBox.keys.skip(skip).take(pageSize).toList();
|
|
final results = <T>[];
|
|
|
|
for (final key in keys) {
|
|
final item = await lazyBox.get(key);
|
|
if (item != null) {
|
|
results.add(item);
|
|
}
|
|
}
|
|
|
|
final duration = DateTime.now().difference(startTime);
|
|
DatabaseTracker.logQuery(
|
|
operation: 'getPaginated',
|
|
duration: duration,
|
|
affectedRows: results.length,
|
|
);
|
|
|
|
return results;
|
|
}
|
|
}
|
|
|
|
/// Cache manager for database queries
|
|
class QueryCache<T> {
|
|
final Map<String, _CachedQuery<T>> _cache = {};
|
|
final Duration cacheDuration;
|
|
|
|
QueryCache({this.cacheDuration = const Duration(minutes: 5)});
|
|
|
|
/// Get or compute cached result
|
|
Future<T> getOrCompute(
|
|
String key,
|
|
Future<T> Function() compute,
|
|
) async {
|
|
final cached = _cache[key];
|
|
final now = DateTime.now();
|
|
|
|
if (cached != null && now.difference(cached.timestamp) < cacheDuration) {
|
|
return cached.value;
|
|
}
|
|
|
|
final value = await compute();
|
|
_cache[key] = _CachedQuery(value: value, timestamp: now);
|
|
|
|
// Clean old cache entries
|
|
_cleanCache();
|
|
|
|
return value;
|
|
}
|
|
|
|
/// Invalidate specific cache entry
|
|
void invalidate(String key) {
|
|
_cache.remove(key);
|
|
}
|
|
|
|
/// Clear all cache
|
|
void clear() {
|
|
_cache.clear();
|
|
}
|
|
|
|
void _cleanCache() {
|
|
final now = DateTime.now();
|
|
_cache.removeWhere((key, value) {
|
|
return now.difference(value.timestamp) > cacheDuration;
|
|
});
|
|
}
|
|
}
|
|
|
|
class _CachedQuery<T> {
|
|
final T value;
|
|
final DateTime timestamp;
|
|
|
|
_CachedQuery({
|
|
required this.value,
|
|
required this.timestamp,
|
|
});
|
|
}
|