#include "loadbalancer.h" #include "loadbalancer_events.h" #include "esp_event.h" #include "esp_log.h" #include "freertos/FreeRTOS.h" #include "freertos/task.h" #include "freertos/semphr.h" #include "input_filter.h" #include "nvs_flash.h" #include "nvs.h" #include #include "meter_events.h" #include "evse_events.h" #include "math.h" #include // Necessário para PRIu64 #ifndef MIN #define MIN(a, b) ((a) < (b) ? (a) : (b)) #endif static const char *TAG = "loadbalancer"; // Limites configuráveis #define MIN_CHARGING_CURRENT_LIMIT 6 // A #define MAX_CHARGING_CURRENT_LIMIT 32 // A #define MIN_GRID_CURRENT_LIMIT 6 // A #define MAX_GRID_CURRENT_LIMIT 100 // A // Pequena tolerância para considerar "sem margem" #define AVAILABLE_EPS 1.0f // Histerese de suspensão / retoma (em torno dos 6A) #define LB_SUSPEND_THRESHOLD 5.0f // abaixo disto -> suspende #define LB_RESUME_THRESHOLD 7.0f // acima disto -> pode retomar // Timeout para perda de medição de GRID (fail-safe) #define GRID_METER_TIMEOUT_US (10LL * 1000000LL) // 30 segundos // Parâmetros static uint8_t max_grid_current = MAX_GRID_CURRENT_LIMIT; static bool loadbalancer_enabled = false; static float grid_current = 0.0f; static float evse_current = 0.0f; static input_filter_t grid_filter; static input_filter_t evse_filter; static int64_t last_grid_timestamp_us = 0; // última atualização de medição GRID #define MAX_SLAVES 255 #define CONNECTOR_COUNT (MAX_SLAVES + 1) // Proteção simples de concorrência static SemaphoreHandle_t lb_mutex = NULL; // Estrutura unificada para master e slaves typedef struct { uint8_t id; // 0xFF = master, 0..MAX_SLAVES-1 = slave bool is_master; bool charging; float hw_max_current; float runtime_current; int64_t timestamp; // microssegundos (última métrica EVSE/slave) bool online; float assigned; // limite calculado pelo LB int64_t started_us; // início da sessão de carregamento (para prioridade) uint16_t last_limit; // último max_current enviado bool suspended_by_lb; // flag de suspensão por LB (para histerese) } evse_connector_t; static evse_connector_t connectors[CONNECTOR_COUNT]; const int64_t METRICS_TIMEOUT_US = 60 * 1000000; // 60 segundos // Helper: inicializa array de conectores static void init_connectors(void) { // master em índice 0 connectors[0] = (evse_connector_t){ .id = 0xFF, .is_master = true, .charging = false, .hw_max_current = MAX_CHARGING_CURRENT_LIMIT, .runtime_current = 0, .timestamp = 0, .online = false, .assigned = 0.0f, .started_us = 0, .last_limit = 0, .suspended_by_lb = false}; // slaves em 1..CONNECTOR_COUNT-1 for (int i = 1; i < CONNECTOR_COUNT; i++) { connectors[i] = (evse_connector_t){ .id = (uint8_t)(i - 1), .is_master = false, .charging = false, .hw_max_current = 0.0f, .runtime_current = 0.0f, .timestamp = 0, .online = false, .assigned = 0.0f, .started_us = 0, .last_limit = 0, .suspended_by_lb = false}; } } // --- Helpers --- static void input_filter_reset(input_filter_t *filter) { filter->value = 0.0f; } // Callback de status de slave static void on_slave_status(void *handler_arg, esp_event_base_t base, int32_t id, void *data) { const loadbalancer_slave_status_event_t *status = (const loadbalancer_slave_status_event_t *)data; if (status->slave_id >= MAX_SLAVES) { ESP_LOGW(TAG, "Invalid slave_id %d", status->slave_id); return; } int idx = status->slave_id + 1; // slaves começam no índice 1 bool was_charging; if (lb_mutex) xSemaphoreTake(lb_mutex, portMAX_DELAY); was_charging = connectors[idx].charging; connectors[idx].charging = status->charging; connectors[idx].hw_max_current = status->hw_max_current; connectors[idx].runtime_current = status->runtime_current; connectors[idx].timestamp = esp_timer_get_time(); connectors[idx].online = true; // Se começou agora a carregar, marca início da sessão if (status->charging && !was_charging) { connectors[idx].started_us = connectors[idx].timestamp; connectors[idx].suspended_by_lb = false; // reset } if (lb_mutex) xSemaphoreGive(lb_mutex); ESP_LOGI(TAG, "Slave %d status: charging=%d hw_max_current=%.1fA runtime_current=%.2fA", status->slave_id, status->charging, status->hw_max_current, status->runtime_current); } static void on_evse_config_event(void *handler_arg, esp_event_base_t base, int32_t id, void *event_data) { const evse_config_event_data_t *evt = (const evse_config_event_data_t *)event_data; int idx = 0; // MASTER INDICE 0 if (lb_mutex) xSemaphoreTake(lb_mutex, portMAX_DELAY); connectors[idx].charging = evt->charging; connectors[idx].hw_max_current = evt->hw_max_current; connectors[idx].runtime_current = evt->runtime_current; connectors[idx].timestamp = esp_timer_get_time(); connectors[idx].online = true; if (!evt->charging) { connectors[idx].suspended_by_lb = false; } if (lb_mutex) xSemaphoreGive(lb_mutex); ESP_LOGI(TAG, "EVSE config updated: charging=%d hw_max_current=%.1f runtime_current=%.1f", evt->charging, evt->hw_max_current, evt->runtime_current); } // --- Handlers de eventos externos --- static void loadbalancer_meter_event_handler(void *handler_arg, esp_event_base_t base, int32_t id, void *event_data) { if (id != METER_EVENT_DATA_READY || event_data == NULL) return; const meter_event_data_t *evt = (const meter_event_data_t *)event_data; float max_irms = evt->irms[0]; for (int i = 1; i < 3; ++i) { if (evt->irms[i] > max_irms) { max_irms = evt->irms[i]; } } float max_vrms = evt->vrms[0]; for (int i = 1; i < 3; ++i) { if (evt->vrms[i] > max_vrms) { max_vrms = evt->vrms[i]; } } if (lb_mutex) xSemaphoreTake(lb_mutex, portMAX_DELAY); if (evt->source && strcmp(evt->source, "GRID") == 0) { grid_current = input_filter_update(&grid_filter, max_irms); last_grid_timestamp_us = esp_timer_get_time(); ESP_LOGI(TAG, "GRID IRMS (filtered): %.2f A", grid_current); ESP_LOGI(TAG, "GRID VRMS: %.2f V", max_vrms); } else if (evt->source && strcmp(evt->source, "EVSE") == 0) { evse_current = input_filter_update(&evse_filter, max_irms); ESP_LOGI(TAG, "EVSE IRMS (filtered): %.2f A", evse_current); } else { ESP_LOGW(TAG, "Unknown meter event source: %s", evt->source); } if (lb_mutex) xSemaphoreGive(lb_mutex); } static void loadbalancer_evse_event_handler(void *handler_arg, esp_event_base_t base, int32_t id, void *event_data) { const evse_state_event_data_t *evt = (const evse_state_event_data_t *)event_data; if (lb_mutex) xSemaphoreTake(lb_mutex, portMAX_DELAY); switch (evt->state) { case EVSE_STATE_EVENT_IDLE: case EVSE_STATE_EVENT_WAITING: ESP_LOGI(TAG, "Local EVSE is %s - vehicle %sconnected / not charging", evt->state == EVSE_STATE_EVENT_IDLE ? "IDLE" : "WAITING", evt->state == EVSE_STATE_EVENT_IDLE ? "dis" : ""); connectors[0].charging = false; connectors[0].online = true; // master está sempre online connectors[0].suspended_by_lb = false; break; case EVSE_STATE_EVENT_CHARGING: { ESP_LOGI(TAG, "Local EVSE is CHARGING - resetting filters"); grid_current = 0.0f; evse_current = 0.0f; input_filter_reset(&grid_filter); input_filter_reset(&evse_filter); bool was_charging = connectors[0].charging; connectors[0].charging = true; connectors[0].online = true; connectors[0].timestamp = esp_timer_get_time(); if (!was_charging) { connectors[0].started_us = connectors[0].timestamp; connectors[0].suspended_by_lb = false; } break; } case EVSE_STATE_EVENT_FAULT: ESP_LOGW(TAG, "Local EVSE is in FAULT state - disabling load balancing temporarily"); connectors[0].charging = false; connectors[0].online = true; // EVSE está online mas com falha connectors[0].suspended_by_lb = false; break; default: ESP_LOGW(TAG, "Unknown EVSE state: %d", evt->state); break; } if (lb_mutex) xSemaphoreGive(lb_mutex); } // --- Config persistência --- static esp_err_t loadbalancer_load_config() { nvs_handle_t handle; esp_err_t err = nvs_open("loadbalancing", NVS_READWRITE, &handle); if (err != ESP_OK) return err; bool needs_commit = false; uint8_t temp_u8; err = nvs_get_u8(handle, "max_grid_curr", &temp_u8); if (err == ESP_OK && temp_u8 >= MIN_GRID_CURRENT_LIMIT && temp_u8 <= MAX_GRID_CURRENT_LIMIT) max_grid_current = temp_u8; else { max_grid_current = MAX_GRID_CURRENT_LIMIT; nvs_set_u8(handle, "max_grid_curr", max_grid_current); needs_commit = true; } err = nvs_get_u8(handle, "enabled", &temp_u8); if (err == ESP_OK && temp_u8 <= 1) loadbalancer_enabled = (temp_u8 != 0); else { loadbalancer_enabled = false; nvs_set_u8(handle, "enabled", 0); needs_commit = true; } if (needs_commit) nvs_commit(handle); nvs_close(handle); return ESP_OK; } // --- API --- void loadbalancer_set_enabled(bool enabled) { nvs_handle_t handle; if (nvs_open("loadbalancing", NVS_READWRITE, &handle) == ESP_OK) { nvs_set_u8(handle, "enabled", enabled ? 1 : 0); nvs_commit(handle); nvs_close(handle); } loadbalancer_enabled = enabled; loadbalancer_state_event_t evt = {.enabled = enabled, .timestamp_us = esp_timer_get_time()}; esp_event_post(LOADBALANCER_EVENTS, LOADBALANCER_EVENT_STATE_CHANGED, &evt, sizeof(evt), portMAX_DELAY); } esp_err_t load_balancing_set_max_grid_current(uint8_t value) { if (value < MIN_GRID_CURRENT_LIMIT || value > MAX_GRID_CURRENT_LIMIT) return ESP_ERR_INVALID_ARG; nvs_handle_t handle; if (nvs_open("loadbalancing", NVS_READWRITE, &handle) != ESP_OK) return ESP_FAIL; nvs_set_u8(handle, "max_grid_curr", value); nvs_commit(handle); nvs_close(handle); max_grid_current = value; return ESP_OK; } uint8_t load_balancing_get_max_grid_current(void) { return max_grid_current; } bool loadbalancer_is_enabled(void) { return loadbalancer_enabled; } // --- Task principal --- void loadbalancer_task(void *param) { while (true) { if (!loadbalancer_is_enabled()) { vTaskDelay(pdMS_TO_TICKS(30000)); continue; } int idxs[CONNECTOR_COUNT]; int active_cnt = 0; int64_t now = esp_timer_get_time(); // --- Atualiza estado online e conta ativos --- if (lb_mutex) xSemaphoreTake(lb_mutex, portMAX_DELAY); for (int i = 0; i < CONNECTOR_COUNT; i++) { // --- Master nunca pode ficar offline --- if (connectors[i].is_master) { connectors[i].online = true; ESP_LOGI(TAG, "Connector[%d] ONLINE (MASTER, charging=%d, hw_max_current=%.1f)", i, connectors[i].charging, connectors[i].hw_max_current); if (connectors[i].charging) { idxs[active_cnt++] = i; ESP_LOGI(TAG, "Connector[%d] is ACTIVE (charging)", i); } continue; } // --- Ignora conectores já marcados como offline --- if (!connectors[i].online) { continue; } // --- Timeout de heartbeat para escravos --- if ((now - connectors[i].timestamp) >= METRICS_TIMEOUT_US) { connectors[i].online = false; ESP_LOGW(TAG, "Connector[%d] marked OFFLINE (charging=%d, timestamp_diff=%lld us)", i, connectors[i].charging, (long long)(now - connectors[i].timestamp)); continue; } ESP_LOGI(TAG, "Connector[%d] ONLINE (charging=%d, hw_max_current=%.1f, timestamp_diff=%lld us)", i, connectors[i].charging, connectors[i].hw_max_current, (long long)(now - connectors[i].timestamp)); if (connectors[i].charging) { idxs[active_cnt++] = i; ESP_LOGI(TAG, "Connector[%d] is ACTIVE (charging)", i); } } // snapshot de grid_current e last_grid_timestamp sob mutex float grid_snapshot = grid_current; int64_t last_grid_ts_snapshot = last_grid_timestamp_us; if (lb_mutex) xSemaphoreGive(lb_mutex); ESP_LOGI(TAG, "Active connectors: %d", active_cnt); if (active_cnt == 0) { vTaskDelay(pdMS_TO_TICKS(5000)); continue; } // --- Verifica timeout de medição de GRID (fail-safe) --- bool meter_timeout = (last_grid_ts_snapshot == 0 || (now - last_grid_ts_snapshot) > GRID_METER_TIMEOUT_US); if (meter_timeout) { ESP_LOGW(TAG, "GRID meter timeout (last update=%lld us ago). Applying fail-safe limits (<=%dA).", (long long)(now - last_grid_ts_snapshot), MIN_CHARGING_CURRENT_LIMIT); if (lb_mutex) xSemaphoreTake(lb_mutex, portMAX_DELAY); // Fail-safe: limitar cada EV ao mínimo permitido (6A) ou menos, nunca aumentar for (int k = 0; k < active_cnt; k++) { int i = idxs[k]; float cur = connectors[i].runtime_current; if (cur > MIN_CHARGING_CURRENT_LIMIT) connectors[i].assigned = (float)MIN_CHARGING_CURRENT_LIMIT; else connectors[i].assigned = cur; } if (lb_mutex) xSemaphoreGive(lb_mutex); } // --- Calcula corrente disponível (headroom global) --- float available = (float)max_grid_current - grid_snapshot; ESP_LOGI(TAG, "LB raw headroom: max_grid=%uA, grid_current=%.1fA, available=%.2fA", max_grid_current, grid_snapshot, available); // ========================== // ZONA A: overload significativo -> reduzir correntes (throttling) // ========================== if (available < -AVAILABLE_EPS) { ESP_LOGW(TAG, "Overload: grid=%.1fA, max=%.1fA (available=%.2fA) -> throttling", grid_snapshot, (float)max_grid_current, available); float factor = ((float)max_grid_current) / grid_snapshot; if (factor < 0.0f) factor = 0.0f; if (factor > 1.0f) factor = 1.0f; if (lb_mutex) xSemaphoreTake(lb_mutex, portMAX_DELAY); for (int k = 0; k < active_cnt; k++) { int i = idxs[k]; connectors[i].assigned = connectors[i].runtime_current * factor; ESP_LOGI(TAG, "Connector[%d] overload throttling: runtime=%.1fA -> assigned=%.1fA", i, connectors[i].runtime_current, connectors[i].assigned); } if (lb_mutex) xSemaphoreGive(lb_mutex); } // ========================== // ZONA B: sem margem prática -> manter correntes atuais como limites // ========================== else if (fabsf(available) <= AVAILABLE_EPS) { ESP_LOGI(TAG, "No effective headroom: grid=%.1fA, max=%.1fA (available=%.2fA). Keeping current as limit.", grid_snapshot, (float)max_grid_current, available); if (lb_mutex) xSemaphoreTake(lb_mutex, portMAX_DELAY); for (int k = 0; k < active_cnt; k++) { int i = idxs[k]; connectors[i].assigned = connectors[i].runtime_current; ESP_LOGI(TAG, "Connector[%d] keep runtime as limit: assigned=%.1fA", i, connectors[i].assigned); } if (lb_mutex) xSemaphoreGive(lb_mutex); } // ========================== // ZONA C: há margem positiva -> garantir mínimo e depois water-filling SOBRE assigned // ========================== else // available > AVAILABLE_EPS { if (available > max_grid_current) { available = (float)max_grid_current; } ESP_LOGI(TAG, "LB Calc (zone C): available=%.1fA, active_connectors=%d", available, active_cnt); if (lb_mutex) xSemaphoreTake(lb_mutex, portMAX_DELAY); // 1) Ordenar conectores ativos por started_us (mais antigos primeiro) for (int a = 0; a < active_cnt - 1; a++) { for (int b = 0; b < active_cnt - 1 - a; b++) { int i1 = idxs[b]; int i2 = idxs[b + 1]; if (connectors[i1].started_us > connectors[i2].started_us) { int tmp = idxs[b]; idxs[b] = idxs[b + 1]; idxs[b + 1] = tmp; } } } // Inicialmente: assigned = runtime_current for (int k = 0; k < active_cnt; k++) { int i = idxs[k]; connectors[i].assigned = connectors[i].runtime_current; } float remaining = available; // margem extra total // 2) FASE 1: tentar garantir pelo menos 6A (ou hw_max, se menor) aos mais antigos for (int k = 0; k < active_cnt && remaining > 0.0f; k++) { int i = idxs[k]; float current = connectors[i].runtime_current; float hw_max = connectors[i].hw_max_current; float target_min = (float)MIN_CHARGING_CURRENT_LIMIT; if (hw_max < target_min) target_min = hw_max; if (current >= target_min) { connectors[i].assigned = current; continue; } float delta = target_min - current; if (delta <= remaining) { connectors[i].assigned = current + delta; remaining -= delta; } else { connectors[i].assigned = current; } } // 3) FASE 2: "last in, first cut" -> cortar quem ficou abaixo do mínimo começando pelos mais recentes for (int k = active_cnt - 1; k >= 0; k--) { int i = idxs[k]; if (connectors[i].assigned >= MIN_CHARGING_CURRENT_LIMIT) { continue; } ESP_LOGI(TAG, "Connector[%d] below min after phase1 (assigned=%.1fA) -> suspending (0A)", i, connectors[i].assigned); connectors[i].assigned = 0.0f; connectors[i].suspended_by_lb = true; } // 4) FASE 3: se ainda sobrar margem, distribuir extra por cima dos que ficaram ON (assigned > 0) if (remaining > AVAILABLE_EPS) { int on_cnt = 0; for (int k = 0; k < active_cnt; k++) { int i = idxs[k]; if (connectors[i].assigned > 0.0f) on_cnt++; } float extra_remaining = remaining; int extra_cnt = on_cnt; for (int k = 0; k < active_cnt; k++) { int i = idxs[k]; if (connectors[i].assigned <= 0.0f) continue; if (extra_cnt <= 0 || extra_remaining <= 0.0f) break; float headroom = connectors[i].hw_max_current - connectors[i].assigned; if (headroom <= 0.0f) { extra_cnt--; continue; } float share = extra_remaining / (float)extra_cnt; if (share >= headroom) { connectors[i].assigned += headroom; extra_remaining -= headroom; extra_cnt--; } else { for (int m = k; m < active_cnt; m++) { int j = idxs[m]; if (connectors[j].assigned <= 0.0f) continue; float headroom_j = connectors[j].hw_max_current - connectors[j].assigned; if (headroom_j <= 0.0f) continue; float inc = MIN(share, headroom_j); connectors[j].assigned += inc; } break; } } } if (lb_mutex) xSemaphoreGive(lb_mutex); } // --- Publicação de limites / suspensão com histerese --- if (lb_mutex) xSemaphoreTake(lb_mutex, portMAX_DELAY); for (int k = 0; k < active_cnt; k++) { int i = idxs[k]; float assigned = connectors[i].assigned; float effective = assigned; // Histerese de suspensão / retoma if (connectors[i].suspended_by_lb) { // Está suspenso: só retoma se limite calculado for >= limiar de retoma if (assigned >= LB_RESUME_THRESHOLD) { effective = assigned; connectors[i].suspended_by_lb = false; } else { effective = 0.0f; } } else { // Ainda não está suspenso: só suspende se ficar claramente abaixo do limiar if (assigned > 0.0f && assigned < LB_SUSPEND_THRESHOLD) { effective = 0.0f; connectors[i].suspended_by_lb = true; } } uint16_t max_cur; if (effective <= 0.0f) { max_cur = 0; } else { max_cur = (uint16_t)MIN(effective, (float)MAX_CHARGING_CURRENT_LIMIT); } // Evita flapping de comandos: só envia se o limite mudou if (connectors[i].last_limit == max_cur) { continue; // sem alteração } connectors[i].last_limit = max_cur; if (lb_mutex) xSemaphoreGive(lb_mutex); if (connectors[i].is_master) { loadbalancer_master_limit_event_t master_evt = { .slave_id = connectors[i].id, .max_current = max_cur, .timestamp_us = now}; esp_event_post(LOADBALANCER_EVENTS, LOADBALANCER_EVENT_MASTER_CURRENT_LIMIT, &master_evt, sizeof(master_evt), portMAX_DELAY); ESP_LOGI(TAG, "Master limit changed -> %.1f A (assigned=%.2f A)", (float)max_cur, assigned); } else { loadbalancer_slave_limit_event_t slave_evt = { .slave_id = connectors[i].id, .max_current = max_cur, .timestamp_us = now}; esp_event_post(LOADBALANCER_EVENTS, LOADBALANCER_EVENT_SLAVE_CURRENT_LIMIT, &slave_evt, sizeof(slave_evt), portMAX_DELAY); ESP_LOGI(TAG, "Slave %d limit changed -> %.1f A (assigned=%.2f A)", connectors[i].id, (float)max_cur, assigned); } if (lb_mutex) xSemaphoreTake(lb_mutex, portMAX_DELAY); } if (lb_mutex) xSemaphoreGive(lb_mutex); vTaskDelay(pdMS_TO_TICKS(5000)); } } // --- Init --- void loadbalancer_init(void) { if (loadbalancer_load_config() != ESP_OK) ESP_LOGW(TAG, "Failed to load/init config. Using defaults."); lb_mutex = xSemaphoreCreateMutex(); if (lb_mutex == NULL) { ESP_LOGE(TAG, "Failed to create loadbalancer mutex"); } init_connectors(); input_filter_init(&grid_filter, 0.3f); input_filter_init(&evse_filter, 0.3f); if (xTaskCreate(loadbalancer_task, "loadbalancer", 4096, NULL, 4, NULL) != pdPASS) ESP_LOGE(TAG, "Failed to create loadbalancer task"); loadbalancer_state_event_t evt = {.enabled = loadbalancer_enabled, .timestamp_us = esp_timer_get_time()}; esp_event_post(LOADBALANCER_EVENTS, LOADBALANCER_EVENT_INIT, &evt, sizeof(evt), portMAX_DELAY); ESP_ERROR_CHECK(esp_event_handler_register(METER_EVENT, METER_EVENT_DATA_READY, &loadbalancer_meter_event_handler, NULL)); ESP_ERROR_CHECK(esp_event_handler_register(EVSE_EVENTS, EVSE_EVENT_STATE_CHANGED, &loadbalancer_evse_event_handler, NULL)); ESP_ERROR_CHECK(esp_event_handler_register(EVSE_EVENTS, EVSE_EVENT_CONFIG_UPDATED, &on_evse_config_event, NULL)); ESP_ERROR_CHECK(esp_event_handler_register(LOADBALANCER_EVENTS, LOADBALANCER_EVENT_SLAVE_STATUS, &on_slave_status, NULL)); }