Add Zabbix-like trends table for large date ranges
- Create server_metrics_trends table for pre-aggregated hourly data - Add aggregate_metrics.php cron (runs hourly) - Add backfill_trends.php to populate historical data - ServerDetailController: <=24h use raw, >24h use trends table - This dramatically improves query speed for 7d/30d periods
This commit is contained in:
parent
c4f3a1388d
commit
6d301e7273
|
|
@ -0,0 +1,74 @@
|
||||||
|
#!/usr/bin/env php
|
||||||
|
<?php
|
||||||
|
// cron/aggregate_metrics.php
|
||||||
|
// Запускать каждый час: 0 * * * * /usr/bin/php /var/www/mon/cron/aggregate_metrics.php
|
||||||
|
|
||||||
|
require __DIR__ . '/../vendor/autoload.php';
|
||||||
|
|
||||||
|
$config = require __DIR__ . '/../config/DatabaseConfig.php';
|
||||||
|
|
||||||
|
// Получаем соединение
|
||||||
|
$pdo = new PDO("mysql:host={$config['host']};dbname={$config['db_name']};charset=utf8mb4",
|
||||||
|
$config['username'], $config['password'], [
|
||||||
|
PDO::ATTR_ERRMODE => PDO::ERRMODE_EXCEPTION
|
||||||
|
]);
|
||||||
|
|
||||||
|
// Вычисляем период - прошлый час (00:00 - 00:59)
|
||||||
|
$hourStart = new DateTime();
|
||||||
|
$hourStart->modify('-1 hour');
|
||||||
|
$hourStart->setMinute(0)->setSecond(0);
|
||||||
|
|
||||||
|
$hourEnd = clone $hourStart;
|
||||||
|
$hourEnd->modify('+59 minutes +59 seconds');
|
||||||
|
|
||||||
|
$periodStartStr = $hourStart->format('Y-m-d H:i:s');
|
||||||
|
$periodEndStr = $hourEnd->format('Y-m-d H:i:s');
|
||||||
|
|
||||||
|
// Получаем все серверы
|
||||||
|
$stmt = $pdo->query("SELECT id FROM servers WHERE deleted_at IS NULL");
|
||||||
|
$servers = $stmt->fetchAll(PDO::FETCH_COLUMN);
|
||||||
|
|
||||||
|
$processed = 0;
|
||||||
|
$errors = 0;
|
||||||
|
|
||||||
|
foreach ($servers as $serverId) {
|
||||||
|
// Агрегируем данные за прошлый час для каждой метрики сервера
|
||||||
|
$sql = "
|
||||||
|
INSERT INTO server_metrics_trends (server_id, metric_name_id, period_start, avg_value, min_value, max_value, count_samples)
|
||||||
|
SELECT
|
||||||
|
sm.server_id,
|
||||||
|
sm.metric_name_id,
|
||||||
|
:period_start,
|
||||||
|
AVG(sm.value),
|
||||||
|
MIN(sm.value),
|
||||||
|
MAX(sm.value),
|
||||||
|
COUNT(*)
|
||||||
|
FROM server_metrics sm
|
||||||
|
WHERE sm.server_id = :server_id
|
||||||
|
AND sm.created_at >= :start_date
|
||||||
|
AND sm.created_at <= :end_date
|
||||||
|
GROUP BY sm.server_id, sm.metric_name_id
|
||||||
|
ON DUPLICATE KEY UPDATE
|
||||||
|
avg_value = VALUES(avg_value),
|
||||||
|
min_value = VALUES(min_value),
|
||||||
|
max_value = VALUES(max_value),
|
||||||
|
count_samples = VALUES(count_samples),
|
||||||
|
created_at = NOW()
|
||||||
|
";
|
||||||
|
|
||||||
|
try {
|
||||||
|
$stmt = $pdo->prepare($sql);
|
||||||
|
$stmt->execute([
|
||||||
|
':server_id' => $serverId,
|
||||||
|
':period_start' => $periodStartStr,
|
||||||
|
':start_date' => $periodStartStr,
|
||||||
|
':end_date' => $periodEndStr
|
||||||
|
]);
|
||||||
|
$processed++;
|
||||||
|
} catch (Exception $e) {
|
||||||
|
$errors++;
|
||||||
|
error_log("Server $serverId: " . $e->getMessage());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
echo "Aggregated: $processed servers, $errors errors\n";
|
||||||
|
|
@ -0,0 +1,71 @@
|
||||||
|
#!/usr/bin/env php
|
||||||
|
<?php
|
||||||
|
// cron/backfill_trends.php - запустить один раз для заполнения трендов
|
||||||
|
require __DIR__ . '/../vendor/autoload.php';
|
||||||
|
|
||||||
|
$pdo = new PDO("mysql:host=localhost;dbname=monitoring_system;charset=utf8mb4", "mon_user", "mon_password_123", [
|
||||||
|
PDO::ATTR_ERRMODE => PDO::ERRMODE_EXCEPTION
|
||||||
|
]);
|
||||||
|
|
||||||
|
// Заполняем за последние 48 часов (можно увеличить)
|
||||||
|
$hours = 48;
|
||||||
|
$processed = 0;
|
||||||
|
$errors = 0;
|
||||||
|
|
||||||
|
for ($i = 1; $i <= $hours; $i++) {
|
||||||
|
$hourStart = new DateTime();
|
||||||
|
$hourStart->modify("-{$i} hour");
|
||||||
|
$hourStart->setMinute(0)->setSecond(0);
|
||||||
|
|
||||||
|
$hourEnd = clone $hourStart;
|
||||||
|
$hourEnd->modify('+59 minutes +59 seconds');
|
||||||
|
|
||||||
|
$periodStartStr = $hourStart->format('Y-m-d H:00:00');
|
||||||
|
$periodEndStr = $hourEnd->format('Y-m-d H:59:59');
|
||||||
|
|
||||||
|
// Получаем все серверы
|
||||||
|
$stmt = $pdo->query("SELECT id FROM servers WHERE deleted_at IS NULL");
|
||||||
|
$servers = $stmt->fetchAll(PDO::FETCH_COLUMN);
|
||||||
|
|
||||||
|
foreach ($servers as $serverId) {
|
||||||
|
$sql = "
|
||||||
|
INSERT INTO server_metrics_trends (server_id, metric_name_id, period_start, avg_value, min_value, max_value, count_samples)
|
||||||
|
SELECT
|
||||||
|
sm.server_id,
|
||||||
|
sm.metric_name_id,
|
||||||
|
:period_start,
|
||||||
|
AVG(sm.value),
|
||||||
|
MIN(sm.value),
|
||||||
|
MAX(sm.value),
|
||||||
|
COUNT(*)
|
||||||
|
FROM server_metrics sm
|
||||||
|
WHERE sm.server_id = :server_id
|
||||||
|
AND sm.created_at >= :start_date
|
||||||
|
AND sm.created_at <= :end_date
|
||||||
|
GROUP BY sm.server_id, sm.metric_name_id
|
||||||
|
ON DUPLICATE KEY UPDATE
|
||||||
|
avg_value = VALUES(avg_value),
|
||||||
|
min_value = VALUES(min_value),
|
||||||
|
max_value = VALUES(max_value),
|
||||||
|
count_samples = VALUES(count_samples),
|
||||||
|
created_at = NOW()
|
||||||
|
";
|
||||||
|
|
||||||
|
try {
|
||||||
|
$stmt = $pdo->prepare($sql);
|
||||||
|
$stmt->execute([
|
||||||
|
':server_id' => $serverId,
|
||||||
|
':period_start' => $periodStartStr,
|
||||||
|
':start_date' => $periodStartStr,
|
||||||
|
':end_date' => $periodEndStr
|
||||||
|
]);
|
||||||
|
$processed++;
|
||||||
|
} catch (Exception $e) {
|
||||||
|
$errors++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
echo "Processed hour: $periodStartStr ($processed servers)\n";
|
||||||
|
}
|
||||||
|
|
||||||
|
echo "Done! Processed: $processed, Errors: $errors\n";
|
||||||
|
|
@ -144,43 +144,80 @@ class ServerDetailController extends Model
|
||||||
$metricsFilter = 'AND mn.name IN (' . implode(', ', $placeholders) . ')';
|
$metricsFilter = 'AND mn.name IN (' . implode(', ', $placeholders) . ')';
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Определяем источник данных на основе периода
|
||||||
|
// <= 24 часа: raw данные (высокая точность)
|
||||||
|
// > 24 часов: trends таблица (агрегированные данные)
|
||||||
|
|
||||||
|
$isRawData = $totalMinutes <= 1440; // 24 часа = 1440 минут
|
||||||
|
|
||||||
// Запрос с агрегацией если нужно
|
// Запрос с агрегацией если нужно
|
||||||
if ($groupBy) {
|
if ($isRawData) {
|
||||||
// GROUP BY автоматически агрегирует до нужного количества точек (500 для любого периода)
|
// Читаем из raw данных
|
||||||
|
$timeExpr = "DATE_FORMAT(sm.created_at, '{$bucketFormat}')";
|
||||||
$sql = "
|
$sql = "
|
||||||
SELECT
|
SELECT
|
||||||
AVG(sm.value) as value,
|
AVG(t.value) as value,
|
||||||
|
t.name,
|
||||||
|
t.unit,
|
||||||
|
t.time_bucket
|
||||||
|
FROM (
|
||||||
|
SELECT
|
||||||
|
sm.value,
|
||||||
mn.name,
|
mn.name,
|
||||||
mn.unit,
|
mn.unit,
|
||||||
DATE_FORMAT(sm.created_at, '{$bucketFormat}') as time_bucket
|
{$timeExpr} as time_bucket,
|
||||||
|
ROW_NUMBER() OVER (
|
||||||
|
PARTITION BY mn.name, {$timeExpr}
|
||||||
|
ORDER BY sm.created_at DESC
|
||||||
|
) as rn
|
||||||
FROM server_metrics sm
|
FROM server_metrics sm
|
||||||
FORCE INDEX (idx_server_metric_time)
|
|
||||||
INNER JOIN metric_names mn ON mn.id = sm.metric_name_id
|
INNER JOIN metric_names mn ON mn.id = sm.metric_name_id
|
||||||
WHERE sm.server_id = :id
|
WHERE sm.server_id = :id
|
||||||
AND sm.created_at >= :start_date
|
AND sm.created_at >= :start_date
|
||||||
AND sm.created_at <= :end_date
|
AND sm.created_at <= :end_date
|
||||||
AND 1=1 {$metricsFilter}
|
AND 1=1 {$metricsFilter}
|
||||||
{$groupBy}
|
) t
|
||||||
ORDER BY time_bucket ASC
|
WHERE t.rn = 1
|
||||||
|
GROUP BY t.name, t.unit, t.time_bucket
|
||||||
|
ORDER BY t.time_bucket ASC
|
||||||
";
|
";
|
||||||
$stmt = $this->pdo->prepare($sql);
|
$stmt = $this->pdo->prepare($sql);
|
||||||
$executeParams = array_merge([':id' => $id, ':start_date' => $startStr, ':end_date' => $endStr], $metricParams);
|
$executeParams = array_merge([':id' => $id, ':start_date' => $startStr, ':end_date' => $endStr], $metricParams);
|
||||||
$stmt->execute($executeParams);
|
$stmt->execute($executeParams);
|
||||||
} else {
|
} else {
|
||||||
|
// Читаем из trends таблицы (быстрый запрос для больших периодов)
|
||||||
|
$periodStartStr = $startDate->format('Y-m-d H:00:00');
|
||||||
|
$periodEndStr = $endDate->format('Y-m-d H:59:59');
|
||||||
|
|
||||||
$sql = "
|
$sql = "
|
||||||
SELECT sm.value, mn.name, mn.unit, sm.created_at
|
SELECT
|
||||||
FROM server_metrics sm
|
t.avg_value as value,
|
||||||
FORCE INDEX (idx_server_metric_time)
|
mn.name,
|
||||||
JOIN metric_names mn ON mn.id = sm.metric_name_id
|
mn.unit,
|
||||||
WHERE sm.server_id = :id
|
t.period_start as time_bucket
|
||||||
AND sm.created_at >= :start_date
|
FROM server_metrics_trends t
|
||||||
AND sm.created_at <= :end_date
|
INNER JOIN metric_names mn ON mn.id = t.metric_name_id
|
||||||
{$metricsFilter}
|
WHERE t.server_id = :id
|
||||||
ORDER BY sm.created_at ASC
|
AND t.period_start >= :period_start
|
||||||
LIMIT 5000
|
AND t.period_start <= :period_end
|
||||||
|
AND 1=1 {$metricsFilter}
|
||||||
|
ORDER BY t.period_start ASC
|
||||||
";
|
";
|
||||||
|
|
||||||
|
// Конвертируем имена метрик в ID для filters
|
||||||
|
$metricIds = [];
|
||||||
|
if ($displayMetrics) {
|
||||||
|
$placeholders = [];
|
||||||
|
foreach ($displayMetrics as $i => $m) {
|
||||||
|
$key = ':metric_' . $i;
|
||||||
|
$placeholders[] = $key;
|
||||||
|
$metricParams[$key] = $m;
|
||||||
|
}
|
||||||
|
$metricsFilter = 'AND mn.name IN (' . implode(', ', $placeholders) . ')';
|
||||||
|
}
|
||||||
|
|
||||||
$stmt = $this->pdo->prepare($sql);
|
$stmt = $this->pdo->prepare($sql);
|
||||||
$executeParams = array_merge([':id' => $id, ':start_date' => $startStr, ':end_date' => $endStr], $metricParams);
|
$executeParams = array_merge([':id' => $id, ':period_start' => $periodStartStr, ':period_end' => $periodEndStr], $metricParams);
|
||||||
$stmt->execute($executeParams);
|
$stmt->execute($executeParams);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue