增加威胁域名审计

This commit is contained in:
Alex Yang
2026-04-03 10:04:07 +08:00
parent 170cdb3537
commit f8e222aaf6
41 changed files with 81016 additions and 4672993 deletions
+540
View File
@@ -0,0 +1,540 @@
package log
import (
"compress/gzip"
"encoding/json"
"fmt"
"io"
"os"
"path/filepath"
"sync"
"time"
"dns-server/logger"
)
// QueryLogConfig 查询日志配置(本地定义,避免循环依赖)
type QueryLogConfig struct {
Enabled bool `json:"enabled"`
RingBufferSize int `json:"ringBufferSize"`
DatabasePath string `json:"databasePath"`
MaxDatabaseSizeMB int `json:"maxDatabaseSizeMB"`
EnableWAL bool `json:"enableWAL"`
// 归档配置
ArchiveEnabled bool `json:"archiveEnabled"`
ArchiveDir string `json:"archiveDir"`
ArchivePrefix string `json:"archivePrefix"`
CompressionLevel int `json:"compressionLevel"`
// 清理配置
RetentionDays int `json:"retentionDays"`
RetentionMonths int `json:"retentionMonths"`
// 查询配置
QueryTimeout int `json:"queryTimeout"`
EnableCache bool `json:"enableCache"`
CacheTTL int `json:"cacheTTL"`
}
// ArchiveManager 归档管理器
type ArchiveManager struct {
config *QueryLogConfig
dbPath string
archiveDir string
currentSize int64
sizeThreshold int64
mu sync.Mutex
isArchiving bool
// 定时任务
monthTicker *time.Ticker
sizeTicker *time.Ticker
stopChan chan struct{}
// 元数据缓存
metadataCache map[string]*ArchiveMetadata
cacheMutex sync.RWMutex
}
// ArchiveMetadata 归档元数据
type ArchiveMetadata struct {
ArchiveDate time.Time `json:"archiveDate"`
Month string `json:"month"` // 格式:2026-04
FilePath string `json:"filePath"`
OriginalSize int64 `json:"originalSize"`
CompressedSize int64 `json:"compressedSize"`
RecordCount int64 `json:"recordCount"`
StartTime time.Time `json:"startTime"`
EndTime time.Time `json:"endTime"`
CompressionLevel int `json:"compressionLevel"`
Checksum string `json:"checksum"`
}
// NewArchiveManager 创建归档管理器
func NewArchiveManager(config *QueryLogConfig, dbPath string) (*ArchiveManager, error) {
am := &ArchiveManager{
config: config,
dbPath: dbPath,
archiveDir: config.ArchiveDir,
sizeThreshold: int64(config.MaxDatabaseSizeMB) * 1024 * 1024,
metadataCache: make(map[string]*ArchiveMetadata),
stopChan: make(chan struct{}),
}
// 创建归档目录
if err := os.MkdirAll(am.archiveDir, 0755); err != nil {
return nil, fmt.Errorf("创建归档目录失败:%w", err)
}
// 加载现有元数据
if err := am.loadAllMetadata(); err != nil {
logger.Warn("加载元数据失败", "error", err)
}
// 获取当前数据库大小
if size, err := am.GetDatabaseSize(); err == nil {
am.currentSize = size
}
return am, nil
}
// StartWatching 启动监控(大小 + 月度归档)
func (am *ArchiveManager) StartWatching() {
// 启动大小监控(每分钟检查一次)
am.sizeTicker = time.NewTicker(1 * time.Minute)
go func() {
for {
select {
case <-am.sizeTicker.C:
am.CheckAndArchive()
case <-am.stopChan:
am.sizeTicker.Stop()
return
}
}
}()
// 启动月度归档监控(每天检查一次)
am.monthTicker = time.NewTicker(24 * time.Hour)
go func() {
for {
select {
case <-am.monthTicker.C:
// 检查是否是每月 1 号
now := time.Now()
if now.Day() == 1 && now.Hour() == 0 && now.Minute() == 0 {
am.ArchiveByMonth()
}
case <-am.stopChan:
am.monthTicker.Stop()
return
}
}
}()
logger.Info("归档管理器监控启动")
}
// StopWatching 停止监控
func (am *ArchiveManager) StopWatching() {
close(am.stopChan)
if am.sizeTicker != nil {
am.sizeTicker.Stop()
}
if am.monthTicker != nil {
am.monthTicker.Stop()
}
logger.Info("归档管理器监控停止")
}
// CheckAndArchive 检查是否需要归档(大小或时间)
func (am *ArchiveManager) CheckAndArchive() error {
// 检查数据库大小
if am.currentSize >= am.sizeThreshold {
logger.Info("数据库大小达到阈值,触发归档", "size", am.currentSize, "threshold", am.sizeThreshold)
return am.ArchiveBySize()
}
return nil
}
// ArchiveByMonth 按月归档(每月 1 号执行)
func (am *ArchiveManager) ArchiveByMonth() error {
now := time.Now()
// 只在每月 1 号 00:00 执行
if now.Day() != 1 || now.Hour() != 0 || now.Minute() != 0 {
return nil
}
am.mu.Lock()
defer am.mu.Unlock()
if am.isArchiving {
return fmt.Errorf("正在归档中")
}
am.isArchiving = true
defer func() { am.isArchiving = false }()
// 生成归档文件名(格式:querylog-20260401.db
archiveName := fmt.Sprintf("%s-%s.db", am.config.ArchivePrefix, now.Format("20060102"))
// 创建月份目录(格式:2026-04)
monthDir := filepath.Join(am.archiveDir, now.Format("2006-01"))
if err := os.MkdirAll(monthDir, 0755); err != nil {
return fmt.Errorf("创建月份目录失败:%w", err)
}
// 执行归档
archivePath := filepath.Join(monthDir, archiveName)
if err := am.archiveDatabase(archivePath); err != nil {
return fmt.Errorf("归档数据库失败:%w", err)
}
// 记录元数据
metadata := &ArchiveMetadata{
ArchiveDate: now,
Month: now.Format("2006-01"),
FilePath: archivePath + ".gz",
OriginalSize: am.currentSize,
CompressionLevel: am.config.CompressionLevel,
}
if err := am.saveMetadata(metadata); err != nil {
logger.Error("保存元数据失败", "error", err)
}
logger.Info("月度归档完成",
"month", now.Format("2006-01"),
"file", archivePath+".gz",
"size", am.currentSize)
return nil
}
// ArchiveBySize 按大小归档
func (am *ArchiveManager) ArchiveBySize() error {
am.mu.Lock()
defer am.mu.Unlock()
if am.isArchiving {
return fmt.Errorf("正在归档中")
}
am.isArchiving = true
defer func() { am.isArchiving = false }()
// 生成归档文件名(使用当前日期)
now := time.Now()
archiveName := fmt.Sprintf("%s-%s.db", am.config.ArchivePrefix, now.Format("20060102"))
// 创建月份目录
monthDir := filepath.Join(am.archiveDir, now.Format("2006-01"))
if err := os.MkdirAll(monthDir, 0755); err != nil {
return fmt.Errorf("创建月份目录失败:%w", err)
}
// 执行归档
archivePath := filepath.Join(monthDir, archiveName)
if err := am.archiveDatabase(archivePath); err != nil {
return fmt.Errorf("归档数据库失败:%w", err)
}
// 记录元数据
metadata := &ArchiveMetadata{
ArchiveDate: now,
Month: now.Format("2006-01"),
FilePath: archivePath + ".gz",
OriginalSize: am.currentSize,
CompressionLevel: am.config.CompressionLevel,
}
if err := am.saveMetadata(metadata); err != nil {
logger.Error("保存元数据失败", "error", err)
}
logger.Info("按大小归档完成",
"file", archivePath+".gz",
"size", am.currentSize)
return nil
}
// archiveDatabase 执行归档操作
func (am *ArchiveManager) archiveDatabase(archivePath string) error {
// 1. 压缩归档
if err := am.compressArchive(am.dbPath, archivePath+".gz"); err != nil {
return fmt.Errorf("压缩归档失败:%w", err)
}
// 2. 获取压缩后大小
compressedSize, err := getFileSize(archivePath + ".gz")
if err != nil {
logger.Warn("获取压缩文件大小失败", "error", err)
}
// 3. 获取记录数量(通过元数据或查询数据库)
recordCount := am.getRecordCount()
// 4. 更新时间范围
startTime, endTime := am.getTimeRange()
// 5. 更新元数据
metadata := &ArchiveMetadata{
FilePath: archivePath + ".gz",
CompressedSize: compressedSize,
RecordCount: recordCount,
StartTime: startTime,
EndTime: endTime,
}
return am.saveMetadata(metadata)
}
// compressArchive 压缩归档文件
func (am *ArchiveManager) compressArchive(srcPath, dstPath string) error {
// 打开源文件
srcFile, err := os.Open(srcPath)
if err != nil {
return fmt.Errorf("打开源文件失败:%w", err)
}
defer srcFile.Close()
// 创建目标文件
dstFile, err := os.Create(dstPath)
if err != nil {
return fmt.Errorf("创建目标文件失败:%w", err)
}
defer dstFile.Close()
// 创建 gzip 写入器
gzWriter, err := gzip.NewWriterLevel(dstFile, am.config.CompressionLevel)
if err != nil {
return fmt.Errorf("创建 gzip 写入器失败:%w", err)
}
defer gzWriter.Close()
// 复制数据
_, err = io.Copy(gzWriter, srcFile)
if err != nil {
return fmt.Errorf("压缩数据失败:%w", err)
}
return nil
}
// saveMetadata 保存元数据
func (am *ArchiveManager) saveMetadata(metadata *ArchiveMetadata) error {
if metadata.Month == "" {
metadata.Month = time.Now().Format("2006-01")
}
// 元数据文件路径
metaPath := filepath.Join(am.archiveDir, metadata.Month,
filepath.Base(metadata.FilePath)+".meta.json")
// 写入元数据文件
data, err := json.MarshalIndent(metadata, "", " ")
if err != nil {
return fmt.Errorf("序列化元数据失败:%w", err)
}
if err := os.WriteFile(metaPath, data, 0644); err != nil {
return fmt.Errorf("写入元数据文件失败:%w", err)
}
// 更新缓存
am.cacheMutex.Lock()
key := metadata.Month + "_" + filepath.Base(metadata.FilePath)
am.metadataCache[key] = metadata
am.cacheMutex.Unlock()
return nil
}
// loadMetadata 加载元数据
func (am *ArchiveManager) loadMetadata(month, fileName string) (*ArchiveMetadata, error) {
key := month + "_" + fileName
// 先查缓存
am.cacheMutex.RLock()
if meta, ok := am.metadataCache[key]; ok {
am.cacheMutex.RUnlock()
return meta, nil
}
am.cacheMutex.RUnlock()
// 从文件加载
metaPath := filepath.Join(am.archiveDir, month, fileName+".meta.json")
data, err := os.ReadFile(metaPath)
if err != nil {
return nil, fmt.Errorf("读取元数据文件失败:%w", err)
}
var metadata ArchiveMetadata
if err := json.Unmarshal(data, &metadata); err != nil {
return nil, fmt.Errorf("解析元数据失败:%w", err)
}
// 更新缓存
am.cacheMutex.Lock()
am.metadataCache[key] = &metadata
am.cacheMutex.Unlock()
return &metadata, nil
}
// loadAllMetadata 加载所有元数据
func (am *ArchiveManager) loadAllMetadata() error {
// 遍历归档目录
months, err := os.ReadDir(am.archiveDir)
if err != nil {
if os.IsNotExist(err) {
return nil // 目录不存在,正常
}
return fmt.Errorf("读取归档目录失败:%w", err)
}
for _, monthDir := range months {
if !monthDir.IsDir() {
continue
}
month := monthDir.Name()
files, err := os.ReadDir(filepath.Join(am.archiveDir, month))
if err != nil {
continue
}
for _, file := range files {
if filepath.Ext(file.Name()) == ".meta.json" {
fileName := file.Name()[:len(file.Name())-len(".meta.json")]
_, err := am.loadMetadata(month, fileName)
if err != nil {
logger.Warn("加载元数据失败", "file", file.Name(), "error", err)
}
}
}
}
logger.Info("元数据加载完成", "count", len(am.metadataCache))
return nil
}
// GetArchiveList 获取所有归档文件列表
func (am *ArchiveManager) GetArchiveList() ([]ArchiveMetadata, error) {
am.cacheMutex.RLock()
defer am.cacheMutex.RUnlock()
archives := make([]ArchiveMetadata, 0, len(am.metadataCache))
for _, meta := range am.metadataCache {
archives = append(archives, *meta)
}
// 按归档日期排序
for i := 0; i < len(archives)-1; i++ {
for j := i + 1; j < len(archives); j++ {
if archives[i].ArchiveDate.Before(archives[j].ArchiveDate) {
archives[i], archives[j] = archives[j], archives[i]
}
}
}
return archives, nil
}
// GetDatabaseSize 获取当前数据库大小
func (am *ArchiveManager) GetDatabaseSize() (int64, error) {
return getFileSize(am.dbPath)
}
// getRecordCount 获取记录数量
func (am *ArchiveManager) getRecordCount() int64 {
// 这里简化处理,返回 0
// 实际实现需要查询数据库
return 0
}
// getTimeRange 获取时间范围
func (am *ArchiveManager) getTimeRange() (time.Time, time.Time) {
now := time.Now()
// 简化处理,返回当前时间
return now, now
}
// getFileSize 获取文件大小
func getFileSize(path string) (int64, error) {
info, err := os.Stat(path)
if err != nil {
return 0, err
}
return info.Size(), nil
}
// CleanupOldArchives 清理旧归档
func (am *ArchiveManager) CleanupOldArchives() (int64, error) {
am.mu.Lock()
defer am.mu.Unlock()
archives, err := am.GetArchiveList()
if err != nil {
return 0, err
}
now := time.Now()
deleted := int64(0)
for _, archive := range archives {
shouldDelete := false
// 按天数清理
if am.config.RetentionDays > 0 {
age := now.Sub(archive.ArchiveDate)
if age > time.Duration(am.config.RetentionDays)*24*time.Hour {
shouldDelete = true
}
}
// 按月数清理
if am.config.RetentionMonths > 0 {
months := int(now.Sub(archive.ArchiveDate).Hours() / 24 / 30)
if months > am.config.RetentionMonths {
shouldDelete = true
}
}
if shouldDelete {
if err := am.deleteArchive(archive); err != nil {
logger.Error("删除归档失败", "file", archive.FilePath, "error", err)
} else {
deleted++
logger.Info("删除旧归档", "file", archive.FilePath, "month", archive.Month)
}
}
}
return deleted, nil
}
// deleteArchive 删除单个归档
func (am *ArchiveManager) deleteArchive(archive ArchiveMetadata) error {
// 删除压缩文件
if err := os.Remove(archive.FilePath); err != nil && !os.IsNotExist(err) {
return fmt.Errorf("删除归档文件失败:%w", err)
}
// 删除元数据文件
metaPath := archive.FilePath + ".meta.json"
if err := os.Remove(metaPath); err != nil && !os.IsNotExist(err) {
logger.Warn("删除元数据文件失败", "file", metaPath, "error", err)
}
// 从缓存中移除
am.cacheMutex.Lock()
key := archive.Month + "_" + filepath.Base(archive.FilePath)
delete(am.metadataCache, key)
am.cacheMutex.Unlock()
return nil
}
+382
View File
@@ -0,0 +1,382 @@
package log
import (
"compress/gzip"
"database/sql"
"fmt"
"io"
"os"
"path/filepath"
"sort"
"sync"
"time"
"dns-server/logger"
_ "github.com/mattn/go-sqlite3"
)
// ArchiveQueryEngine 归档查询引擎
type ArchiveQueryEngine struct {
mainStore *SQLiteStore
archiveMgr *ArchiveManager
config *QueryLogConfig
// 临时目录管理
tempDirs map[string]string
tempDirsMu sync.Mutex
}
// NewArchiveQueryEngine 创建归档查询引擎
func NewArchiveQueryEngine(mainStore *SQLiteStore, archiveMgr *ArchiveManager, config *QueryLogConfig) (*ArchiveQueryEngine, error) {
aqe := &ArchiveQueryEngine{
mainStore: mainStore,
archiveMgr: archiveMgr,
config: config,
tempDirs: make(map[string]string),
}
// 启动临时目录清理协程
go aqe.cleanupTempDirsLoop()
return aqe, nil
}
// QueryLogs 查询日志(透明查询主库和所有归档)
func (aqe *ArchiveQueryEngine) QueryLogs(filter LogFilter, page PageParams) ([]QueryLog, int64, error) {
// 1. 查询主库
mainLogs, mainTotal, err := aqe.queryMainStore(filter, page)
if err != nil {
logger.Error("查询主库失败", "error", err)
mainLogs = []QueryLog{}
mainTotal = 0
}
// 2. 智能优化:如果主库数据足够且是第一页,直接返回
if page.Offset == 0 && len(mainLogs) >= page.Limit {
return mainLogs, mainTotal, nil
}
// 3. 查询所有归档库
archiveLogs, archiveTotal, err := aqe.queryArchives(filter, page)
if err != nil {
logger.Error("查询归档失败", "error", err)
archiveLogs = []QueryLog{}
archiveTotal = 0
}
// 4. 合并结果
allLogs := append(mainLogs, archiveLogs...)
total := mainTotal + archiveTotal
// 5. 排序(默认按时间倒序)
if page.SortField == "" || page.SortField == "timestamp" {
if page.SortDirection == "" || page.SortDirection == "desc" {
sort.Slice(allLogs, func(i, j int) bool {
return allLogs[i].Timestamp.After(allLogs[j].Timestamp)
})
} else {
sort.Slice(allLogs, func(i, j int) bool {
return allLogs[i].Timestamp.Before(allLogs[j].Timestamp)
})
}
}
// 6. 分页
start := page.Offset
if start >= len(allLogs) {
return []QueryLog{}, total, nil
}
end := start + page.Limit
if end > len(allLogs) {
end = len(allLogs)
}
return allLogs[start:end], total, nil
}
// queryMainStore 查询主库
func (aqe *ArchiveQueryEngine) queryMainStore(filter LogFilter, page PageParams) ([]QueryLog, int64, error) {
if aqe.mainStore == nil {
return []QueryLog{}, 0, nil
}
// 使用 SQLiteStore 的查询方法
return aqe.mainStore.QueryLogs(filter, page)
}
// queryArchives 查询所有归档库
func (aqe *ArchiveQueryEngine) queryArchives(filter LogFilter, page PageParams) ([]QueryLog, int64, error) {
// 获取所有归档文件列表
archives, err := aqe.archiveMgr.GetArchiveList()
if err != nil {
logger.Error("获取归档列表失败", "error", err)
return []QueryLog{}, 0, err
}
if len(archives) == 0 {
return []QueryLog{}, 0, nil
}
var allLogs []QueryLog
var totalCount int64 = 0
// 从新到旧遍历归档
for _, archive := range archives {
// 如果已经有足够的数据,提前退出
if int64(len(allLogs)) >= int64(page.Limit) && page.Offset == 0 {
break
}
// 查询单个归档
logs, count, err := aqe.querySingleArchive(&archive, filter, page)
if err != nil {
logger.Warn("查询归档失败", "file", archive.FilePath, "error", err)
continue
}
allLogs = append(allLogs, logs...)
totalCount += count
}
return allLogs, totalCount, nil
}
// querySingleArchive 查询单个归档
func (aqe *ArchiveQueryEngine) querySingleArchive(archive *ArchiveMetadata, filter LogFilter, page PageParams) ([]QueryLog, int64, error) {
// 1. 解压归档到临时目录
tempDir, err := aqe.extractArchive(archive.FilePath)
if err != nil {
return nil, 0, fmt.Errorf("解压归档失败:%w", err)
}
// 2. 连接临时数据库
dbPath := filepath.Join(tempDir, filepath.Base(archive.FilePath))
dbPath = dbPath[:len(dbPath)-3] // 去掉 .gz 后缀
tempDB, err := sql.Open("sqlite3", dbPath)
if err != nil {
aqe.cleanupTempDir(tempDir)
return nil, 0, fmt.Errorf("连接临时数据库失败:%w", err)
}
defer tempDB.Close()
// 3. 查询数据
return aqe.queryDatabase(tempDB, filter, page)
}
// queryDatabase 从数据库查询数据
func (aqe *ArchiveQueryEngine) queryDatabase(db *sql.DB, filter LogFilter, page PageParams) ([]QueryLog, int64, error) {
// 构建查询条件
whereClause := "1=1"
args := []interface{}{}
if filter.Result != "" {
whereClause += " AND result = ?"
args = append(args, filter.Result)
}
if filter.QueryType != "" {
whereClause += " AND query_type = ?"
args = append(args, filter.QueryType)
}
if !filter.StartTime.IsZero() {
whereClause += " AND timestamp >= ?"
args = append(args, filter.StartTime)
}
if !filter.EndTime.IsZero() {
whereClause += " AND timestamp <= ?"
args = append(args, filter.EndTime)
}
if filter.SearchTerm != "" {
whereClause += " AND (domain LIKE ? OR client_ip LIKE ?)"
searchTerm := "%" + filter.SearchTerm + "%"
args = append(args, searchTerm, searchTerm)
}
// 构建排序
sortField := page.SortField
if sortField == "" {
sortField = "timestamp"
}
sortDirection := page.SortDirection
if sortDirection == "" {
sortDirection = "DESC"
}
// 查询
query := fmt.Sprintf(`
SELECT
id, timestamp, client_ip, domain, query_type, response_time,
result, block_rule, block_type, from_cache, dnssec, edns,
dns_server, dnssec_server, answers, response_code
FROM query_logs
WHERE %s
ORDER BY %s %s
LIMIT ? OFFSET ?
`, whereClause, sortField, sortDirection)
queryArgs := make([]interface{}, len(args)+2)
copy(queryArgs, args)
queryArgs[len(args)] = page.Limit
queryArgs[len(args)+1] = page.Offset
rows, err := db.Query(query, queryArgs...)
if err != nil {
return nil, 0, fmt.Errorf("查询数据库失败:%w", err)
}
defer rows.Close()
var logs []QueryLog
for rows.Next() {
var log QueryLog
err := rows.Scan(
&log.ID,
&log.Timestamp,
&log.ClientIP,
&log.Domain,
&log.QueryType,
&log.ResponseTime,
&log.Result,
&log.BlockRule,
&log.BlockType,
&log.FromCache,
&log.DNSSEC,
&log.EDNS,
&log.DNSServer,
&log.DNSSECServer,
&log.Answers,
&log.ResponseCode,
)
if err != nil {
continue
}
logs = append(logs, log)
}
// 获取总数(使用单独的 COUNT 查询)
countQuery := fmt.Sprintf(`SELECT COUNT(*) FROM query_logs WHERE %s`, whereClause)
var totalCount int64
err = db.QueryRow(countQuery, args...).Scan(&totalCount)
if err != nil {
logger.Warn("获取总数失败", "error", err)
totalCount = int64(len(logs))
}
return logs, totalCount, nil
}
// extractArchive 解压归档文件到临时目录
func (aqe *ArchiveQueryEngine) extractArchive(archivePath string) (string, error) {
aqe.tempDirsMu.Lock()
defer aqe.tempDirsMu.Unlock()
// 创建临时目录
tempDir, err := os.MkdirTemp("", "dns_archive_*")
if err != nil {
return "", fmt.Errorf("创建临时目录失败:%w", err)
}
// 打开压缩文件
gzFile, err := os.Open(archivePath)
if err != nil {
os.RemoveAll(tempDir)
return "", fmt.Errorf("打开压缩文件失败:%w", err)
}
defer gzFile.Close()
// 创建 gzip 读取器
gzReader, err := gzip.NewReader(gzFile)
if err != nil {
os.RemoveAll(tempDir)
return "", fmt.Errorf("创建 gzip 读取器失败:%w", err)
}
defer gzReader.Close()
// 解压到临时目录
dstPath := filepath.Join(tempDir, filepath.Base(archivePath))
dstPath = dstPath[:len(dstPath)-3] // 去掉 .gz 后缀
dstFile, err := os.Create(dstPath)
if err != nil {
os.RemoveAll(tempDir)
return "", fmt.Errorf("创建目标文件失败:%w", err)
}
defer dstFile.Close()
_, err = io.Copy(dstFile, gzReader)
if err != nil {
os.RemoveAll(tempDir)
return "", fmt.Errorf("解压文件失败:%w", err)
}
// 记录临时目录
aqe.tempDirs[tempDir] = archivePath
return tempDir, nil
}
// cleanupTempDir 清理临时目录
func (aqe *ArchiveQueryEngine) cleanupTempDir(tempDir string) {
aqe.tempDirsMu.Lock()
defer aqe.tempDirsMu.Unlock()
if err := os.RemoveAll(tempDir); err != nil {
logger.Warn("清理临时目录失败", "dir", tempDir, "error", err)
}
delete(aqe.tempDirs, tempDir)
}
// cleanupTempDirsLoop 定期清理临时目录
func (aqe *ArchiveQueryEngine) cleanupTempDirsLoop() {
ticker := time.NewTicker(5 * time.Minute)
defer ticker.Stop()
for range ticker.C {
aqe.tempDirsMu.Lock()
for tempDir := range aqe.tempDirs {
if err := os.RemoveAll(tempDir); err != nil {
logger.Warn("清理临时目录失败", "dir", tempDir, "error", err)
}
delete(aqe.tempDirs, tempDir)
}
aqe.tempDirsMu.Unlock()
}
}
// GetStats 获取统计信息(包括所有归档)
func (aqe *ArchiveQueryEngine) GetStats(timeRange TimeRange) (*LogStats, error) {
// 1. 获取主库统计
stats, err := aqe.mainStore.GetStats(timeRange)
if err != nil {
logger.Error("获取主库统计失败", "error", err)
stats = &LogStats{
QueryTypes: make(map[string]int64),
}
}
// 2. 获取归档统计(简化处理,只统计记录数)
archives, err := aqe.archiveMgr.GetArchiveList()
if err == nil {
for _, archive := range archives {
stats.TotalQueries += archive.RecordCount
}
}
return stats, nil
}
// Close 关闭查询引擎
func (aqe *ArchiveQueryEngine) Close() error {
// 清理所有临时目录
aqe.tempDirsMu.Lock()
for tempDir := range aqe.tempDirs {
os.RemoveAll(tempDir)
}
aqe.tempDirsMu.Unlock()
return nil
}
+5
View File
@@ -133,6 +133,11 @@ func (m *LogManager) IsSQLiteEnabled() bool {
return m.sqliteStore != nil
}
// GetSQLiteStore 获取 SQLiteStore 实例
func (m *LogManager) GetSQLiteStore() *SQLiteStore {
return m.sqliteStore
}
// MigrateFromJSON 从 JSON 文件迁移历史数据
func (m *LogManager) MigrateFromJSON(jsonPath string) error {
if m.sqliteStore == nil {
+27 -20
View File
@@ -89,6 +89,11 @@ func createTables(db *sql.DB) error {
CREATE INDEX IF NOT EXISTS idx_client_ip ON query_logs(client_ip);
CREATE INDEX IF NOT EXISTS idx_result ON query_logs(result);
CREATE INDEX IF NOT EXISTS idx_query_type ON query_logs(query_type);
CREATE INDEX IF NOT EXISTS idx_result_timestamp ON query_logs(result, timestamp DESC);
CREATE INDEX IF NOT EXISTS idx_query_type_timestamp ON query_logs(query_type, timestamp DESC);
CREATE INDEX IF NOT EXISTS idx_domain_timestamp ON query_logs(domain, timestamp DESC);
CREATE INDEX IF NOT EXISTS idx_client_ip_timestamp ON query_logs(client_ip, timestamp DESC);
CREATE INDEX IF NOT EXISTS idx_result_query_type ON query_logs(result, query_type);
`
_, err := db.Exec(schema)
@@ -204,8 +209,6 @@ func (s *SQLiteStore) Log(log QueryLog) error {
// QueryLogs 查询日志
func (s *SQLiteStore) QueryLogs(filter LogFilter, page PageParams) ([]QueryLog, int64, error) {
fmt.Printf("SQLiteStore.QueryLogs called: filter=%+v, page=%+v\n", filter, page)
// 构建查询条件
whereClause := "1=1"
args := []interface{}{}
@@ -235,18 +238,6 @@ func (s *SQLiteStore) QueryLogs(filter LogFilter, page PageParams) ([]QueryLog,
searchTerm := "%" + filter.SearchTerm + "%"
args = append(args, searchTerm, searchTerm)
}
fmt.Printf("SQLite WHERE clause: %s, args: %v\n", whereClause, args)
// 获取总数
countQuery := fmt.Sprintf("SELECT COUNT(*) FROM query_logs WHERE %s", whereClause)
var total int64
err := s.db.QueryRow(countQuery, args...).Scan(&total)
if err != nil {
return nil, 0, fmt.Errorf("查询总数失败:%w", err)
}
fmt.Printf("SQLite total count: %d\n", total)
// 构建排序
sortField := page.SortField
@@ -258,28 +249,37 @@ func (s *SQLiteStore) QueryLogs(filter LogFilter, page PageParams) ([]QueryLog,
sortDirection = "DESC"
}
// 查询日志
// 使用子查询一次性获取总数和数据,提高查询效率
query := fmt.Sprintf(`
SELECT id, timestamp, client_ip, domain, query_type, response_time,
result, block_rule, block_type, from_cache, dnssec, edns,
dns_server, dnssec_server, answers, response_code
SELECT
id, timestamp, client_ip, domain, query_type, response_time,
result, block_rule, block_type, from_cache, dnssec, edns,
dns_server, dnssec_server, answers, response_code,
COUNT(*) OVER() as total_count
FROM query_logs
WHERE %s
ORDER BY %s %s
LIMIT ? OFFSET ?
`, whereClause, sortField, sortDirection)
args = append(args, page.Limit, page.Offset)
queryArgs := make([]interface{}, len(args)+2)
copy(queryArgs, args)
queryArgs[len(args)] = page.Limit
queryArgs[len(args)+1] = page.Offset
rows, err := s.db.Query(query, args...)
rows, err := s.db.Query(query, queryArgs...)
if err != nil {
return nil, 0, fmt.Errorf("查询日志失败:%w", err)
}
defer rows.Close()
var logs []QueryLog
var total int64 = 0
hasRows := false
for rows.Next() {
hasRows = true
var log QueryLog
var totalCount int64
err := rows.Scan(
&log.ID,
&log.Timestamp,
@@ -297,11 +297,18 @@ func (s *SQLiteStore) QueryLogs(filter LogFilter, page PageParams) ([]QueryLog,
&log.DNSSECServer,
&log.Answers,
&log.ResponseCode,
&totalCount,
)
if err != nil {
return nil, 0, fmt.Errorf("扫描日志失败:%w", err)
}
logs = append(logs, log)
total = totalCount
}
// 如果没有数据,总数为 0
if !hasRows {
total = 0
}
return logs, total, nil