This commit is contained in:
Alex Yang
2026-04-01 12:22:55 +08:00
parent 61789061ce
commit efebce3c39
46 changed files with 4797716 additions and 462145 deletions
+196
View File
@@ -0,0 +1,196 @@
package log
import (
"fmt"
"sync"
"time"
)
// LogManager 日志管理器
type LogManager struct {
ringBuffer *RingBuffer
sqliteStore *SQLiteStore
mu sync.RWMutex
closed bool
}
// LogManagerConfig 日志管理器配置
type LogManagerConfig struct {
RingBufferSize int // 环形缓冲区大小
DatabasePath string // SQLite 数据库路径
EnableSQLite bool // 是否启用 SQLite 存储
}
// DefaultConfig 默认配置
func DefaultConfig() LogManagerConfig {
return LogManagerConfig{
RingBufferSize: 10000,
DatabasePath: "data/query_logs.db",
EnableSQLite: true,
}
}
// NewLogManager 创建日志管理器
func NewLogManager(config LogManagerConfig) (*LogManager, error) {
manager := &LogManager{
ringBuffer: NewRingBuffer(config.RingBufferSize),
}
if config.EnableSQLite {
sqliteStore, err := NewSQLiteStore(config.DatabasePath)
if err != nil {
return nil, fmt.Errorf("创建 SQLite 存储失败:%w", err)
}
manager.sqliteStore = sqliteStore
}
return manager, nil
}
// Log 记录日志(同步写入多个存储)
func (m *LogManager) Log(log QueryLog) error {
m.mu.RLock()
defer m.mu.RUnlock()
if m.closed {
return fmt.Errorf("日志管理器已关闭")
}
// 写入环形缓冲区(总是成功)
m.ringBuffer.Push(log)
// 写入 SQLite(如果启用)
if m.sqliteStore != nil {
err := m.sqliteStore.Log(log)
if err != nil {
// SQLite 写入失败不影响环形缓冲区
fmt.Printf("SQLite 写入失败:%v\n", err)
}
}
return nil
}
// QueryLogs 查询日志(优先从 SQLite 查询,失败则从环形缓冲区查询)
func (m *LogManager) QueryLogs(filter LogFilter, page PageParams) ([]QueryLog, int64, error) {
m.mu.RLock()
defer m.mu.RUnlock()
// 如果启用了 SQLite,优先从 SQLite 查询
if m.sqliteStore != nil {
logs, total, err := m.sqliteStore.QueryLogs(filter, page)
if err == nil {
return logs, total, nil
}
// SQLite 查询失败,降级到环形缓冲区
fmt.Printf("SQLite 查询失败,降级到环形缓冲区:%v\n", err)
}
// 从环形缓冲区查询
return m.ringBuffer.Query(filter, page)
}
// GetStats 获取统计信息(合并多个存储的统计)
func (m *LogManager) GetStats(timeRange TimeRange) (*LogStats, error) {
m.mu.RLock()
defer m.mu.RUnlock()
// 如果启用了 SQLite,使用 SQLite 的统计
if m.sqliteStore != nil {
return m.sqliteStore.GetStats(timeRange)
}
// 否则使用环形缓冲区的统计
return m.ringBuffer.GetStats(timeRange)
}
// Close 关闭日志管理器
func (m *LogManager) Close() error {
m.mu.Lock()
defer m.mu.Unlock()
if m.closed {
return nil
}
m.closed = true
var err error
if m.sqliteStore != nil {
err = m.sqliteStore.Close()
}
return err
}
// GetRingBufferCount 获取环形缓冲区中的日志数量
func (m *LogManager) GetRingBufferCount() int {
return m.ringBuffer.Count()
}
// IsSQLiteEnabled 检查是否启用了 SQLite
func (m *LogManager) IsSQLiteEnabled() bool {
return m.sqliteStore != nil
}
// MigrateFromJSON 从 JSON 文件迁移历史数据
func (m *LogManager) MigrateFromJSON(jsonPath string) error {
if m.sqliteStore == nil {
return fmt.Errorf("SQLite 未启用,无法迁移数据")
}
return m.sqliteStore.MigrateFromJSON(jsonPath)
}
// CleanupOldLogs 清理旧日志
func (m *LogManager) CleanupOldLogs(olderThan time.Time) (int64, error) {
if m.sqliteStore == nil {
return 0, fmt.Errorf("SQLite 未启用,无法清理日志")
}
// 开启事务
tx, err := m.sqliteStore.db.Begin()
if err != nil {
return 0, fmt.Errorf("开启事务失败:%w", err)
}
// 删除旧日志
result, err := tx.Exec("DELETE FROM query_logs WHERE timestamp < ?", olderThan)
if err != nil {
tx.Rollback()
return 0, fmt.Errorf("删除旧日志失败:%w", err)
}
// 获取删除的行数
deleted, err := result.RowsAffected()
if err != nil {
tx.Rollback()
return 0, fmt.Errorf("获取删除行数失败:%w", err)
}
// 提交事务
err = tx.Commit()
if err != nil {
return 0, fmt.Errorf("提交事务失败:%w", err)
}
return deleted, nil
}
// ExportLogs 导出日志到 JSON
func (m *LogManager) ExportLogs(filter LogFilter, outputPath string) error {
// 查询所有符合条件的日志
page := PageParams{
Limit: 100000, // 大量导出
Offset: 0,
}
_, _, err := m.QueryLogs(filter, page)
if err != nil {
return fmt.Errorf("查询日志失败:%w", err)
}
// TODO: 写入到 JSON 文件
// 由于导入限制,暂时返回错误
return fmt.Errorf("导出功能未实现")
}
+87
View File
@@ -0,0 +1,87 @@
package log
import (
"time"
)
// QueryLog DNS 查询日志结构
type QueryLog struct {
ID int64 `json:"id"`
Timestamp time.Time `json:"timestamp"` // 查询时间
ClientIP string `json:"clientIP"` // 客户端 IP
Domain string `json:"domain"` // 查询域名
QueryType string `json:"queryType"` // 查询类型
ResponseTime int64 `json:"responseTime"` // 响应时间 (ms)
Result string `json:"result"` // 查询结果(allowed, blocked, error
BlockRule string `json:"blockRule"` // 屏蔽规则(如果被屏蔽)
BlockType string `json:"blockType"` // 屏蔽类型(如果被屏蔽)
FromCache bool `json:"fromCache"` // 是否来自缓存
DNSSEC bool `json:"dnssec"` // 是否使用了 DNSSEC
EDNS bool `json:"edns"` // 是否使用了 EDNS
DNSServer string `json:"dnsServer"` // 使用的 DNS 服务器
DNSSECServer string `json:"dnssecServer"` // 使用的 DNSSEC 专用服务器
Answers string `json:"answers"` // 解析记录(JSON 格式)
ResponseCode int `json:"responseCode"` // DNS 响应代码
}
// LogFilter 日志过滤条件
type LogFilter struct {
Result string // 结果过滤(allowed, blocked, error
SearchTerm string // 搜索关键词(域名或 IP
QueryType string // 查询类型过滤
StartTime time.Time // 开始时间
EndTime time.Time // 结束时间
}
// PageParams 分页参数
type PageParams struct {
Limit int // 每页数量
Offset int // 偏移量
SortField string // 排序字段
SortDirection string // 排序方向(asc, desc
}
// LogStats 日志统计信息
type LogStats struct {
TotalQueries int64 `json:"totalQueries"` // 总查询数
BlockedQueries int64 `json:"blockedQueries"` // 被屏蔽查询数
AllowedQueries int64 `json:"allowedQueries"` // 允许查询数
ErrorQueries int64 `json:"errorQueries"` // 错误查询数
AvgResponseTime float64 `json:"avgResponseTime"` // 平均响应时间
QueryTypes map[string]int64 `json:"queryTypes"` // 查询类型分布
TopDomains []DomainCount `json:"topDomains"` // TOP 域名
TopClients []ClientCount `json:"topClients"` // TOP 客户端
}
// DomainCount 域名统计
type DomainCount struct {
Domain string `json:"domain"`
Count int64 `json:"count"`
}
// ClientCount 客户端统计
type ClientCount struct {
IP string `json:"ip"`
Count int64 `json:"count"`
}
// QueryLogger 日志记录器接口
type QueryLogger interface {
// Log 记录日志(异步、非阻塞)
Log(log QueryLog) error
// QueryLogs 查询日志(支持分页、过滤、排序)
QueryLogs(filter LogFilter, page PageParams) ([]QueryLog, int64, error)
// GetStats 获取统计信息
GetStats(timeRange TimeRange) (*LogStats, error)
// Close 关闭日志记录器
Close() error
}
// TimeRange 时间范围
type TimeRange struct {
StartTime time.Time
EndTime time.Time
}
+239
View File
@@ -0,0 +1,239 @@
package log
import (
"sync"
)
// RingBuffer 内存环形缓冲区
type RingBuffer struct {
data []QueryLog
capacity int
head int
tail int
count int
mu sync.RWMutex
}
// NewRingBuffer 创建环形缓冲区
func NewRingBuffer(capacity int) *RingBuffer {
return &RingBuffer{
data: make([]QueryLog, capacity),
capacity: capacity,
head: 0,
tail: 0,
count: 0,
}
}
// Push 添加日志到缓冲区
func (rb *RingBuffer) Push(log QueryLog) {
rb.mu.Lock()
defer rb.mu.Unlock()
// 如果缓冲区已满,覆盖最旧的数据
if rb.count == rb.capacity {
rb.head = (rb.head + 1) % rb.capacity
}
rb.data[rb.tail] = log
rb.tail = (rb.tail + 1) % rb.capacity
if rb.count < rb.capacity {
rb.count++
}
}
// Query 查询日志
func (rb *RingBuffer) Query(filter LogFilter, page PageParams) ([]QueryLog, int64, error) {
rb.mu.RLock()
defer rb.mu.RUnlock()
// 收集所有符合条件的日志
var filtered []QueryLog
for i := 0; i < rb.count; i++ {
idx := (rb.head + i) % rb.capacity
log := rb.data[idx]
// 应用过滤条件
if rb.matchesFilter(log, filter) {
filtered = append(filtered, log)
}
}
total := int64(len(filtered))
// 排序
rb.sortLogs(filtered, page.SortField, page.SortDirection)
// 分页
start := page.Offset
if start >= len(filtered) {
return []QueryLog{}, total, nil
}
end := start + page.Limit
if end > len(filtered) {
end = len(filtered)
}
return filtered[start:end], total, nil
}
// matchesFilter 检查日志是否匹配过滤条件
func (rb *RingBuffer) matchesFilter(log QueryLog, filter LogFilter) bool {
// 结果过滤
if filter.Result != "" && log.Result != filter.Result {
return false
}
// 查询类型过滤
if filter.QueryType != "" && log.QueryType != filter.QueryType {
return false
}
// 时间范围过滤
if !filter.StartTime.IsZero() && log.Timestamp.Before(filter.StartTime) {
return false
}
if !filter.EndTime.IsZero() && log.Timestamp.After(filter.EndTime) {
return false
}
// 搜索过滤
if filter.SearchTerm != "" {
if !contains(log.Domain, filter.SearchTerm) && !contains(log.ClientIP, filter.SearchTerm) {
return false
}
}
return true
}
// contains 检查字符串是否包含子串(不区分大小写)
func contains(s, substr string) bool {
return len(s) >= len(substr) && (s == substr || containsIgnoreCase(s, substr))
}
// containsIgnoreCase 不区分大小写的包含检查
func containsIgnoreCase(s, substr string) bool {
s = toLower(s)
substr = toLower(substr)
for i := 0; i <= len(s)-len(substr); i++ {
if s[i:i+len(substr)] == substr {
return true
}
}
return false
}
// toLower 转换为小写
func toLower(s string) string {
result := make([]byte, len(s))
for i := 0; i < len(s); i++ {
c := s[i]
if c >= 'A' && c <= 'Z' {
c = c + ('a' - 'A')
}
result[i] = c
}
return string(result)
}
// sortLogs 对日志进行排序
func (rb *RingBuffer) sortLogs(logs []QueryLog, sortField, sortDirection string) {
if sortField == "" {
sortField = "timestamp"
}
if sortDirection == "" {
sortDirection = "desc"
}
// 简单的冒泡排序(适用于小数据量)
n := len(logs)
for i := 0; i < n-1; i++ {
for j := 0; j < n-i-1; j++ {
shouldSwap := rb.compareLogs(logs[j], logs[j+1], sortField)
if sortDirection == "desc" {
shouldSwap = !shouldSwap
}
if shouldSwap {
logs[j], logs[j+1] = logs[j+1], logs[j]
}
}
}
}
// compareLogs 比较两个日志
func (rb *RingBuffer) compareLogs(a, b QueryLog, sortField string) bool {
switch sortField {
case "timestamp", "time":
return a.Timestamp.Before(b.Timestamp)
case "domain":
return a.Domain < b.Domain
case "clientIp", "client_ip":
return a.ClientIP < b.ClientIP
case "responseTime", "response_time":
return a.ResponseTime < b.ResponseTime
default:
return a.Timestamp.Before(b.Timestamp)
}
}
// GetStats 获取统计信息
func (rb *RingBuffer) GetStats(timeRange TimeRange) (*LogStats, error) {
rb.mu.RLock()
defer rb.mu.RUnlock()
stats := &LogStats{
QueryTypes: make(map[string]int64),
}
for i := 0; i < rb.count; i++ {
idx := (rb.head + i) % rb.capacity
log := rb.data[idx]
// 时间范围过滤
if !timeRange.StartTime.IsZero() && log.Timestamp.Before(timeRange.StartTime) {
continue
}
if !timeRange.EndTime.IsZero() && log.Timestamp.After(timeRange.EndTime) {
continue
}
stats.TotalQueries++
stats.AvgResponseTime += float64(log.ResponseTime)
switch log.Result {
case "blocked":
stats.BlockedQueries++
case "allowed":
stats.AllowedQueries++
case "error":
stats.ErrorQueries++
}
stats.QueryTypes[log.QueryType]++
}
if stats.TotalQueries > 0 {
stats.AvgResponseTime /= float64(stats.TotalQueries)
}
return stats, nil
}
// Count 返回当前缓冲区中的日志数量
func (rb *RingBuffer) Count() int {
rb.mu.RLock()
defer rb.mu.RUnlock()
return rb.count
}
// Clear 清空缓冲区
func (rb *RingBuffer) Clear() {
rb.mu.Lock()
defer rb.mu.Unlock()
rb.head = 0
rb.tail = 0
rb.count = 0
}
+522
View File
@@ -0,0 +1,522 @@
package log
import (
"database/sql"
"encoding/json"
"fmt"
"os"
"sync"
"time"
_ "github.com/mattn/go-sqlite3"
)
// SQLiteStore SQLite 存储引擎
type SQLiteStore struct {
db *sql.DB
batchChan chan QueryLog
batchSize int
batchDelay time.Duration
mu sync.Mutex
closed bool
}
// NewSQLiteStore 创建 SQLite 存储
func NewSQLiteStore(dbPath string) (*SQLiteStore, error) {
// 打开数据库连接
db, err := sql.Open("sqlite3", dbPath)
if err != nil {
return nil, fmt.Errorf("打开数据库失败:%w", err)
}
// 设置连接参数
db.SetMaxOpenConns(1) // SQLite 不支持高并发写入
db.SetMaxIdleConns(1)
db.SetConnMaxLifetime(time.Hour)
// 启用 WAL 模式
_, err = db.Exec("PRAGMA journal_mode=WAL")
if err != nil {
db.Close()
return nil, fmt.Errorf("启用 WAL 模式失败:%w", err)
}
// 创建表结构
err = createTables(db)
if err != nil {
db.Close()
return nil, fmt.Errorf("创建表结构失败:%w", err)
}
store := &SQLiteStore{
db: db,
batchChan: make(chan QueryLog, 10000),
batchSize: 100,
batchDelay: 100 * time.Millisecond,
}
// 启动批量写入协程
go store.batchWriter()
return store, nil
}
// createTables 创建数据库表
func createTables(db *sql.DB) error {
schema := `
CREATE TABLE IF NOT EXISTS query_logs (
id INTEGER PRIMARY KEY AUTOINCREMENT,
timestamp DATETIME NOT NULL,
client_ip TEXT NOT NULL,
domain TEXT NOT NULL,
query_type TEXT NOT NULL,
response_time INTEGER NOT NULL,
result TEXT NOT NULL,
block_rule TEXT,
block_type TEXT,
from_cache BOOLEAN DEFAULT FALSE,
dnssec BOOLEAN DEFAULT FALSE,
edns BOOLEAN DEFAULT FALSE,
dns_server TEXT,
dnssec_server TEXT,
answers TEXT,
response_code INTEGER,
created_at DATETIME DEFAULT CURRENT_TIMESTAMP
);
CREATE INDEX IF NOT EXISTS idx_timestamp ON query_logs(timestamp DESC);
CREATE INDEX IF NOT EXISTS idx_domain ON query_logs(domain);
CREATE INDEX IF NOT EXISTS idx_client_ip ON query_logs(client_ip);
CREATE INDEX IF NOT EXISTS idx_result ON query_logs(result);
CREATE INDEX IF NOT EXISTS idx_query_type ON query_logs(query_type);
`
_, err := db.Exec(schema)
return err
}
// batchWriter 批量写入协程
func (s *SQLiteStore) batchWriter() {
batch := make([]QueryLog, 0, s.batchSize)
ticker := time.NewTicker(s.batchDelay)
defer ticker.Stop()
for {
select {
case log, ok := <-s.batchChan:
if !ok {
// 通道关闭,写入剩余日志
if len(batch) > 0 {
s.writeBatch(batch)
}
return
}
batch = append(batch, log)
// 达到批量大小时立即写入
if len(batch) >= s.batchSize {
s.writeBatch(batch)
batch = batch[:0]
}
case <-ticker.C:
// 定时写入
if len(batch) > 0 {
s.writeBatch(batch)
batch = batch[:0]
}
}
}
}
// writeBatch 批量写入日志
func (s *SQLiteStore) writeBatch(batch []QueryLog) {
if len(batch) == 0 {
return
}
s.mu.Lock()
defer s.mu.Unlock()
// 开启事务
tx, err := s.db.Begin()
if err != nil {
fmt.Printf("开启事务失败:%v\n", err)
return
}
// 准备插入语句
stmt, err := tx.Prepare(`
INSERT INTO query_logs (
timestamp, client_ip, domain, query_type, response_time,
result, block_rule, block_type, from_cache, dnssec, edns,
dns_server, dnssec_server, answers, response_code
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
`)
if err != nil {
tx.Rollback()
fmt.Printf("准备语句失败:%v\n", err)
return
}
defer stmt.Close()
// 批量插入
for _, log := range batch {
_, err := stmt.Exec(
log.Timestamp,
log.ClientIP,
log.Domain,
log.QueryType,
log.ResponseTime,
log.Result,
log.BlockRule,
log.BlockType,
log.FromCache,
log.DNSSEC,
log.EDNS,
log.DNSServer,
log.DNSSECServer,
log.Answers,
log.ResponseCode,
)
if err != nil {
fmt.Printf("插入日志失败:%v\n", err)
continue
}
}
// 提交事务
err = tx.Commit()
if err != nil {
fmt.Printf("提交事务失败:%v\n", err)
}
}
// Log 记录日志
func (s *SQLiteStore) Log(log QueryLog) error {
if s.closed {
return fmt.Errorf("存储已关闭")
}
s.batchChan <- log
return nil
}
// QueryLogs 查询日志
func (s *SQLiteStore) QueryLogs(filter LogFilter, page PageParams) ([]QueryLog, int64, error) {
fmt.Printf("SQLiteStore.QueryLogs called: filter=%+v, page=%+v\n", filter, page)
// 构建查询条件
whereClause := "1=1"
args := []interface{}{}
if filter.Result != "" {
whereClause += " AND result = ?"
args = append(args, filter.Result)
}
if filter.QueryType != "" {
whereClause += " AND query_type = ?"
args = append(args, filter.QueryType)
}
if !filter.StartTime.IsZero() {
whereClause += " AND timestamp >= ?"
args = append(args, filter.StartTime)
}
if !filter.EndTime.IsZero() {
whereClause += " AND timestamp <= ?"
args = append(args, filter.EndTime)
}
if filter.SearchTerm != "" {
whereClause += " AND (domain LIKE ? OR client_ip LIKE ?)"
searchTerm := "%" + filter.SearchTerm + "%"
args = append(args, searchTerm, searchTerm)
}
fmt.Printf("SQLite WHERE clause: %s, args: %v\n", whereClause, args)
// 获取总数
countQuery := fmt.Sprintf("SELECT COUNT(*) FROM query_logs WHERE %s", whereClause)
var total int64
err := s.db.QueryRow(countQuery, args...).Scan(&total)
if err != nil {
return nil, 0, fmt.Errorf("查询总数失败:%w", err)
}
fmt.Printf("SQLite total count: %d\n", total)
// 构建排序
sortField := page.SortField
if sortField == "" {
sortField = "timestamp"
}
sortDirection := page.SortDirection
if sortDirection == "" {
sortDirection = "DESC"
}
// 查询日志
query := fmt.Sprintf(`
SELECT id, timestamp, client_ip, domain, query_type, response_time,
result, block_rule, block_type, from_cache, dnssec, edns,
dns_server, dnssec_server, answers, response_code
FROM query_logs
WHERE %s
ORDER BY %s %s
LIMIT ? OFFSET ?
`, whereClause, sortField, sortDirection)
args = append(args, page.Limit, page.Offset)
rows, err := s.db.Query(query, args...)
if err != nil {
return nil, 0, fmt.Errorf("查询日志失败:%w", err)
}
defer rows.Close()
var logs []QueryLog
for rows.Next() {
var log QueryLog
err := rows.Scan(
&log.ID,
&log.Timestamp,
&log.ClientIP,
&log.Domain,
&log.QueryType,
&log.ResponseTime,
&log.Result,
&log.BlockRule,
&log.BlockType,
&log.FromCache,
&log.DNSSEC,
&log.EDNS,
&log.DNSServer,
&log.DNSSECServer,
&log.Answers,
&log.ResponseCode,
)
if err != nil {
return nil, 0, fmt.Errorf("扫描日志失败:%w", err)
}
logs = append(logs, log)
}
return logs, total, nil
}
// GetStats 获取统计信息
func (s *SQLiteStore) GetStats(timeRange TimeRange) (*LogStats, error) {
// 构建时间范围条件
whereClause := "1=1"
args := []interface{}{}
if !timeRange.StartTime.IsZero() {
whereClause += " AND timestamp >= ?"
args = append(args, timeRange.StartTime)
}
if !timeRange.EndTime.IsZero() {
whereClause += " AND timestamp <= ?"
args = append(args, timeRange.EndTime)
}
stats := &LogStats{
QueryTypes: make(map[string]int64),
}
// 基础统计
query := fmt.Sprintf(`
SELECT
COUNT(*) as total,
SUM(CASE WHEN result = 'blocked' THEN 1 ELSE 0 END) as blocked,
SUM(CASE WHEN result = 'allowed' THEN 1 ELSE 0 END) as allowed,
SUM(CASE WHEN result = 'error' THEN 1 ELSE 0 END) as error,
AVG(response_time) as avg_response_time
FROM query_logs
WHERE %s
`, whereClause)
err := s.db.QueryRow(query, args...).Scan(
&stats.TotalQueries,
&stats.BlockedQueries,
&stats.AllowedQueries,
&stats.ErrorQueries,
&stats.AvgResponseTime,
)
if err != nil {
return nil, fmt.Errorf("查询基础统计失败:%w", err)
}
// 查询类型分布
query = fmt.Sprintf(`
SELECT query_type, COUNT(*) as count
FROM query_logs
WHERE %s
GROUP BY query_type
`, whereClause)
rows, err := s.db.Query(query, args...)
if err != nil {
return nil, fmt.Errorf("查询类型分布失败:%w", err)
}
defer rows.Close()
for rows.Next() {
var queryType string
var count int64
err := rows.Scan(&queryType, &count)
if err != nil {
continue
}
stats.QueryTypes[queryType] = count
}
// TOP 域名
query = fmt.Sprintf(`
SELECT domain, COUNT(*) as count
FROM query_logs
WHERE %s
GROUP BY domain
ORDER BY count DESC
LIMIT 10
`, whereClause)
rows, err = s.db.Query(query, args...)
if err != nil {
return nil, fmt.Errorf("查询 TOP 域名失败:%w", err)
}
defer rows.Close()
for rows.Next() {
var domain string
var count int64
err := rows.Scan(&domain, &count)
if err != nil {
continue
}
stats.TopDomains = append(stats.TopDomains, DomainCount{Domain: domain, Count: count})
}
// TOP 客户端
query = fmt.Sprintf(`
SELECT client_ip, COUNT(*) as count
FROM query_logs
WHERE %s
GROUP BY client_ip
ORDER BY count DESC
LIMIT 10
`, whereClause)
rows, err = s.db.Query(query, args...)
if err != nil {
return nil, fmt.Errorf("查询 TOP 客户端失败:%w", err)
}
defer rows.Close()
for rows.Next() {
var ip string
var count int64
err := rows.Scan(&ip, &count)
if err != nil {
continue
}
stats.TopClients = append(stats.TopClients, ClientCount{IP: ip, Count: count})
}
return stats, nil
}
// Close 关闭存储
func (s *SQLiteStore) Close() error {
s.mu.Lock()
defer s.mu.Unlock()
if s.closed {
return nil
}
s.closed = true
close(s.batchChan)
// 等待批量写入完成
time.Sleep(200 * time.Millisecond)
return s.db.Close()
}
// MigrateFromJSON 从 JSON 文件迁移数据
func (s *SQLiteStore) MigrateFromJSON(jsonPath string) error {
// 读取 JSON 文件
data, err := readFile(jsonPath)
if err != nil {
return fmt.Errorf("读取 JSON 文件失败:%w", err)
}
var logs []QueryLog
err = json.Unmarshal(data, &logs)
if err != nil {
return fmt.Errorf("解析 JSON 失败:%w", err)
}
if len(logs) == 0 {
return nil
}
// 批量插入
tx, err := s.db.Begin()
if err != nil {
return fmt.Errorf("开启事务失败:%w", err)
}
stmt, err := tx.Prepare(`
INSERT INTO query_logs (
timestamp, client_ip, domain, query_type, response_time,
result, block_rule, block_type, from_cache, dnssec, edns,
dns_server, dnssec_server, answers, response_code
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
`)
if err != nil {
tx.Rollback()
return fmt.Errorf("准备语句失败:%w", err)
}
defer stmt.Close()
for _, log := range logs {
_, err := stmt.Exec(
log.Timestamp,
log.ClientIP,
log.Domain,
log.QueryType,
log.ResponseTime,
log.Result,
log.BlockRule,
log.BlockType,
log.FromCache,
log.DNSSEC,
log.EDNS,
log.DNSServer,
log.DNSSECServer,
log.Answers,
log.ResponseCode,
)
if err != nil {
fmt.Printf("迁移日志失败:%v\n", err)
continue
}
}
err = tx.Commit()
if err != nil {
return fmt.Errorf("提交事务失败:%w", err)
}
return nil
}
// readFile 读取文件内容(辅助函数)
func readFile(path string) ([]byte, error) {
return os.ReadFile(path)
}