diff --git a/orchagent/bufferorch.cpp b/orchagent/bufferorch.cpp index e7204344d5..2eb424bc70 100644 --- a/orchagent/bufferorch.cpp +++ b/orchagent/bufferorch.cpp @@ -1,5 +1,6 @@ #include "tokenize.h" #include "bufferorch.h" +#include "directory.h" #include "logger.h" #include "sai_serialize.h" #include "warm_restart.h" @@ -16,6 +17,7 @@ extern sai_switch_api_t *sai_switch_api; extern sai_buffer_api_t *sai_buffer_api; extern PortsOrch *gPortsOrch; +extern Directory gDirectory; extern sai_object_id_t gSwitchId; #define BUFFER_POOL_WATERMARK_FLEX_STAT_COUNTER_POLL_MSECS "60000" @@ -803,6 +805,20 @@ task_process_status BufferOrch::processQueue(KeyOpFieldsValuesTuple &tuple) return handle_status; } } + // create/remove a port queue counter for the queue buffer + else + { + auto flexCounterOrch = gDirectory.get(); + auto queues = tokens[1]; + if (op == SET_COMMAND && flexCounterOrch->getQueueCountersState()) + { + gPortsOrch->createPortBufferQueueCounters(port, queues); + } + else if (op == DEL_COMMAND && flexCounterOrch->getQueueCountersState()) + { + gPortsOrch->removePortBufferQueueCounters(port, queues); + } + } } } } @@ -862,7 +878,7 @@ task_process_status BufferOrch::processPriorityGroup(KeyOpFieldsValuesTuple &tup if (op == SET_COMMAND) { ref_resolve_status resolve_result = resolveFieldRefValue(m_buffer_type_maps, buffer_profile_field_name, - buffer_to_ref_table_map.at(buffer_profile_field_name), tuple, + buffer_to_ref_table_map.at(buffer_profile_field_name), tuple, sai_buffer_profile, buffer_profile_name); if (ref_resolve_status::success != resolve_result) { @@ -942,6 +958,20 @@ task_process_status BufferOrch::processPriorityGroup(KeyOpFieldsValuesTuple &tup return handle_status; } } + // create or remove a port PG counter for the PG buffer + else + { + auto flexCounterOrch = gDirectory.get(); + auto pgs = tokens[1]; + if (op == SET_COMMAND && flexCounterOrch->getPgWatermarkCountersState()) + { + gPortsOrch->createPortBufferPgCounters(port, pgs); + } + else if (op == DEL_COMMAND && flexCounterOrch->getPgWatermarkCountersState()) + { + gPortsOrch->removePortBufferPgCounters(port, pgs); + } + } } } } diff --git a/orchagent/flexcounterorch.cpp b/orchagent/flexcounterorch.cpp index dc14998774..2722abde03 100644 --- a/orchagent/flexcounterorch.cpp +++ b/orchagent/flexcounterorch.cpp @@ -10,6 +10,7 @@ #include "debugcounterorch.h" #include "directory.h" #include "copporch.h" +#include extern sai_port_api_t *sai_port_api; @@ -53,6 +54,8 @@ unordered_map flexCounterGroupMap = FlexCounterOrch::FlexCounterOrch(DBConnector *db, vector &tableNames): Orch(db, tableNames), m_flexCounterConfigTable(db, CFG_FLEX_COUNTER_TABLE_NAME), + m_bufferQueueConfigTable(db, CFG_BUFFER_QUEUE_TABLE_NAME), + m_bufferPgConfigTable(db, CFG_BUFFER_PG_TABLE_NAME), m_flexCounterDb(new DBConnector("FLEX_COUNTER_DB", 0)), m_flexCounterGroupTable(new ProducerTable(m_flexCounterDb.get(), FLEX_COUNTER_GROUP_TABLE)) { @@ -139,11 +142,13 @@ void FlexCounterOrch::doTask(Consumer &consumer) } else if(key == QUEUE_KEY) { - gPortsOrch->generateQueueMap(); + gPortsOrch->generateQueueMap(getQueueConfigurations()); + m_queue_enabled = true; } else if(key == PG_WATERMARK_KEY) { - gPortsOrch->generatePriorityGroupMap(); + gPortsOrch->generatePriorityGroupMap(getPgConfigurations()); + m_pg_watermark_enabled = true; } } if(gIntfsOrch && (key == RIF_KEY) && (value == "enable")) @@ -206,6 +211,16 @@ bool FlexCounterOrch::getPortBufferDropCountersState() const return m_port_buffer_drop_counter_enabled; } +bool FlexCounterOrch::getPgWatermarkCountersState() const +{ + return m_pg_watermark_enabled; +} + +bool FlexCounterOrch::getQueueCountersState() const +{ + return m_queue_enabled; +} + bool FlexCounterOrch::bake() { /* @@ -247,3 +262,155 @@ bool FlexCounterOrch::bake() Consumer* consumer = dynamic_cast(getExecutor(CFG_FLEX_COUNTER_TABLE_NAME)); return consumer->addToSync(entries); } + +map FlexCounterOrch::getQueueConfigurations() +{ + map queuesStateVector; + std::vector portQueueKeys; + m_bufferQueueConfigTable.getKeys(portQueueKeys); + + for (const auto& portQueueKey : portQueueKeys) + { + auto toks = tokenize(portQueueKey, '|'); + if (toks.size() != 2) + { + SWSS_LOG_ERROR("Invalid BUFFER_QUEUE key: [%s]", portQueueKey.c_str()); + continue; + } + + auto configPortNames = tokenize(toks[0], ','); + auto configPortQueues = toks[1]; + toks = tokenize(configPortQueues, '-'); + + for (const auto& configPortName : configPortNames) + { + uint32_t maxQueueNumber = gPortsOrch->getNumberOfPortSupportedQueueCounters(configPortName); + uint32_t maxQueueIndex = maxQueueNumber - 1; + uint32_t minQueueIndex = 0; + + if (!queuesStateVector.count(configPortName)) + { + FlexCounterQueueStates flexCounterQueueState(maxQueueNumber); + queuesStateVector.insert(make_pair(configPortName, flexCounterQueueState)); + } + + try { + auto startIndex = to_uint(toks[0], minQueueIndex, maxQueueIndex); + if (toks.size() > 1) + { + auto endIndex = to_uint(toks[1], minQueueIndex, maxQueueIndex); + queuesStateVector.at(configPortName).enableQueueCounters(startIndex, endIndex); + } + else + { + queuesStateVector.at(configPortName).enableQueueCounter(startIndex); + } + } catch (std::invalid_argument const& e) { + SWSS_LOG_ERROR("Invalid queue index [%s] for port [%s]", configPortQueues.c_str(), configPortName.c_str()); + continue; + } + } + } + + return queuesStateVector; +} + +map FlexCounterOrch::getPgConfigurations() +{ + map pgsStateVector; + std::vector portPgKeys; + m_bufferPgConfigTable.getKeys(portPgKeys); + + for (const auto& portPgKey : portPgKeys) + { + auto toks = tokenize(portPgKey, '|'); + if (toks.size() != 2) + { + SWSS_LOG_ERROR("Invalid BUFFER_PG key: [%s]", portPgKey.c_str()); + continue; + } + + auto configPortNames = tokenize(toks[0], ','); + auto configPortPgs = toks[1]; + toks = tokenize(configPortPgs, '-'); + + for (const auto& configPortName : configPortNames) + { + uint32_t maxPgNumber = gPortsOrch->getNumberOfPortSupportedPgCounters(configPortName); + uint32_t maxPgIndex = maxPgNumber - 1; + uint32_t minPgIndex = 0; + + if (!pgsStateVector.count(configPortName)) + { + FlexCounterPgStates flexCounterPgState(maxPgNumber); + pgsStateVector.insert(make_pair(configPortName, flexCounterPgState)); + } + + try { + auto startIndex = to_uint(toks[0], minPgIndex, maxPgIndex); + if (toks.size() > 1) + { + auto endIndex = to_uint(toks[1], minPgIndex, maxPgIndex); + pgsStateVector.at(configPortName).enablePgCounters(startIndex, endIndex); + } + else + { + pgsStateVector.at(configPortName).enablePgCounter(startIndex); + } + } catch (std::invalid_argument const& e) { + SWSS_LOG_ERROR("Invalid pg index [%s] for port [%s]", configPortPgs.c_str(), configPortName.c_str()); + continue; + } + } + } + + return pgsStateVector; +} + +FlexCounterQueueStates::FlexCounterQueueStates(uint32_t maxQueueNumber) +{ + SWSS_LOG_ENTER(); + m_queueStates.resize(maxQueueNumber, false); +} + +bool FlexCounterQueueStates::isQueueCounterEnabled(uint32_t index) const +{ + return m_queueStates[index]; +} + +void FlexCounterQueueStates::enableQueueCounters(uint32_t startIndex, uint32_t endIndex) +{ + for (uint32_t queueIndex = startIndex; queueIndex <= endIndex; queueIndex++) + { + enableQueueCounter(queueIndex); + } +} + +void FlexCounterQueueStates::enableQueueCounter(uint32_t queueIndex) +{ + m_queueStates[queueIndex] = true; +} + +FlexCounterPgStates::FlexCounterPgStates(uint32_t maxPgNumber) +{ + SWSS_LOG_ENTER(); + m_pgStates.resize(maxPgNumber, false); +} + +bool FlexCounterPgStates::isPgCounterEnabled(uint32_t index) const +{ + return m_pgStates[index]; +} + +void FlexCounterPgStates::enablePgCounters(uint32_t startIndex, uint32_t endIndex) +{ + for (uint32_t pgIndex = startIndex; pgIndex <= endIndex; pgIndex++) + { + enablePgCounter(pgIndex); + } +} + +void FlexCounterPgStates::enablePgCounter(uint32_t pgIndex) +{ + m_pgStates[pgIndex] = true; +} diff --git a/orchagent/flexcounterorch.h b/orchagent/flexcounterorch.h index ceb8187506..c04519f02d 100644 --- a/orchagent/flexcounterorch.h +++ b/orchagent/flexcounterorch.h @@ -10,6 +10,30 @@ extern "C" { #include "sai.h" } +class FlexCounterQueueStates +{ +public: + FlexCounterQueueStates(uint32_t maxQueueNumber); + bool isQueueCounterEnabled(uint32_t index) const; + void enableQueueCounters(uint32_t startIndex, uint32_t endIndex); + void enableQueueCounter(uint32_t queueIndex); + +private: + std::vector m_queueStates{}; +}; + +class FlexCounterPgStates +{ +public: + FlexCounterPgStates(uint32_t maxPgNumber); + bool isPgCounterEnabled(uint32_t index) const; + void enablePgCounters(uint32_t startIndex, uint32_t endIndex); + void enablePgCounter(uint32_t pgIndex); + +private: + std::vector m_pgStates{}; +}; + class FlexCounterOrch: public Orch { public: @@ -18,17 +42,25 @@ class FlexCounterOrch: public Orch virtual ~FlexCounterOrch(void); bool getPortCountersState() const; bool getPortBufferDropCountersState() const; + bool getPgWatermarkCountersState() const; + bool getQueueCountersState() const; + map getQueueConfigurations(); + map getPgConfigurations(); bool getHostIfTrapCounterState() const {return m_hostif_trap_counter_enabled;} bool bake() override; - + private: std::shared_ptr m_flexCounterDb = nullptr; std::shared_ptr m_flexCounterGroupTable = nullptr; bool m_port_counter_enabled = false; bool m_port_buffer_drop_counter_enabled = false; + bool m_pg_watermark_enabled = false; + bool m_queue_enabled = false; bool m_hostif_trap_counter_enabled = false; Table m_flexCounterConfigTable; + Table m_bufferQueueConfigTable; + Table m_bufferPgConfigTable; }; #endif diff --git a/orchagent/portsorch.cpp b/orchagent/portsorch.cpp index ada1f4bb92..6699904255 100755 --- a/orchagent/portsorch.cpp +++ b/orchagent/portsorch.cpp @@ -4917,7 +4917,7 @@ bool PortsOrch::addLag(string lag_alias, uint32_t spa_id, int32_t switch_id) auto lagport = m_portList.find(lag_alias); if (lagport != m_portList.end()) { - /* The deletion of bridgeport attached to the lag may still be + /* The deletion of bridgeport attached to the lag may still be * pending due to fdb entries still present on the lag. Wait * until the cleanup is done. */ @@ -5363,7 +5363,7 @@ bool PortsOrch::removeTunnel(Port tunnel) return true; } -void PortsOrch::generateQueueMap() +void PortsOrch::generateQueueMap(map queuesStateVector) { if (m_isQueueMapGenerated) { @@ -5374,14 +5374,20 @@ void PortsOrch::generateQueueMap() { if (it.second.m_type == Port::PHY) { - generateQueueMapPerPort(it.second); + if (!queuesStateVector.count(it.second.m_alias)) + { + auto maxQueueNumber = getNumberOfPortSupportedQueueCounters(it.second.m_alias); + FlexCounterQueueStates flexCounterQueueState(maxQueueNumber); + queuesStateVector.insert(make_pair(it.second.m_alias, flexCounterQueueState)); + } + generateQueueMapPerPort(it.second, queuesStateVector.at(it.second.m_alias)); } } m_isQueueMapGenerated = true; } -void PortsOrch::generateQueueMapPerPort(const Port& port) +void PortsOrch::generateQueueMapPerPort(const Port& port, FlexCounterQueueStates& queuesState) { /* Create the Queue map in the Counter DB */ /* Add stat counters to flex_counter */ @@ -5397,9 +5403,78 @@ void PortsOrch::generateQueueMapPerPort(const Port& port) const auto id = sai_serialize_object_id(port.m_queue_ids[queueIndex]); + string queueType; + uint8_t queueRealIndex = 0; + if (getQueueTypeAndIndex(port.m_queue_ids[queueIndex], queueType, queueRealIndex)) + { + if (!queuesState.isQueueCounterEnabled(queueRealIndex)) + { + continue; + } + queueTypeVector.emplace_back(id, queueType); + queueIndexVector.emplace_back(id, to_string(queueRealIndex)); + } + queueVector.emplace_back(name.str(), id); queuePortVector.emplace_back(id, sai_serialize_object_id(port.m_port_id)); + // Install a flex counter for this queue to track stats + std::unordered_set counter_stats; + for (const auto& it: queue_stat_ids) + { + counter_stats.emplace(sai_serialize_queue_stat(it)); + } + queue_stat_manager.setCounterIdList(port.m_queue_ids[queueIndex], CounterType::QUEUE, counter_stats); + + /* add watermark queue counters */ + string key = getQueueWatermarkFlexCounterTableKey(id); + + string delimiter(""); + std::ostringstream counters_stream; + for (const auto& it: queueWatermarkStatIds) + { + counters_stream << delimiter << sai_serialize_queue_stat(it); + delimiter = comma; + } + + vector fieldValues; + fieldValues.emplace_back(QUEUE_COUNTER_ID_LIST, counters_stream.str()); + + m_flexCounterTable->set(key, fieldValues); + } + + m_queueTable->set("", queueVector); + m_queuePortTable->set("", queuePortVector); + m_queueIndexTable->set("", queueIndexVector); + m_queueTypeTable->set("", queueTypeVector); + + CounterCheckOrch::getInstance().addPort(port); +} + +void PortsOrch::createPortBufferQueueCounters(const Port &port, string queues) +{ + /* Create the Queue map in the Counter DB */ + /* Add stat counters to flex_counter */ + vector queueVector; + vector queuePortVector; + vector queueIndexVector; + vector queueTypeVector; + + auto toks = tokenize(queues, '-'); + auto startIndex = to_uint(toks[0]); + auto endIndex = startIndex; + if (toks.size() > 1) + { + endIndex = to_uint(toks[1]); + } + + for (auto queueIndex = startIndex; queueIndex <= endIndex; queueIndex++) + { + std::ostringstream name; + name << port.m_alias << ":" << queueIndex; + + const auto id = sai_serialize_object_id(port.m_queue_ids[queueIndex]); + string queueType; uint8_t queueRealIndex = 0; if (getQueueTypeAndIndex(port.m_queue_ids[queueIndex], queueType, queueRealIndex)) @@ -5408,6 +5483,9 @@ void PortsOrch::generateQueueMapPerPort(const Port& port) queueIndexVector.emplace_back(id, to_string(queueRealIndex)); } + queueVector.emplace_back(name.str(), id); + queuePortVector.emplace_back(id, sai_serialize_object_id(port.m_port_id)); + // Install a flex counter for this queue to track stats std::unordered_set counter_stats; for (const auto& it: queue_stat_ids) @@ -5441,7 +5519,40 @@ void PortsOrch::generateQueueMapPerPort(const Port& port) CounterCheckOrch::getInstance().addPort(port); } -void PortsOrch::generatePriorityGroupMap() +void PortsOrch::removePortBufferQueueCounters(const Port &port, string queues) +{ + /* Remove the Queues maps in the Counter DB */ + /* Remove stat counters from flex_counter DB */ + auto toks = tokenize(queues, '-'); + auto startIndex = to_uint(toks[0]); + auto endIndex = startIndex; + if (toks.size() > 1) + { + endIndex = to_uint(toks[1]); + } + + for (auto queueIndex = startIndex; queueIndex <= endIndex; queueIndex++) + { + std::ostringstream name; + name << port.m_alias << ":" << queueIndex; + const auto id = sai_serialize_object_id(port.m_queue_ids[queueIndex]); + + /* Remove watermark queue counters */ + string key = getQueueWatermarkFlexCounterTableKey(id); + m_flexCounterTable->del(key); + + // Remove the flex counter for this queue + queue_stat_manager.clearCounterIdList(port.m_queue_ids[queueIndex]); + + // Remove the queue counter from counters DB maps + m_queueTable->hdel("", name.str()); + m_queuePortTable->hdel("", id); + m_queueIndexTable->hdel("", id); + m_queueTypeTable->hdel("", id); + } +} + +void PortsOrch::generatePriorityGroupMap(map pgsStateVector) { if (m_isPriorityGroupMapGenerated) { @@ -5452,14 +5563,20 @@ void PortsOrch::generatePriorityGroupMap() { if (it.second.m_type == Port::PHY) { - generatePriorityGroupMapPerPort(it.second); + if (!pgsStateVector.count(it.second.m_alias)) + { + auto maxPgNumber = getNumberOfPortSupportedPgCounters(it.second.m_alias); + FlexCounterPgStates flexCounterPgState(maxPgNumber); + pgsStateVector.insert(make_pair(it.second.m_alias, flexCounterPgState)); + } + generatePriorityGroupMapPerPort(it.second, pgsStateVector.at(it.second.m_alias)); } } m_isPriorityGroupMapGenerated = true; } -void PortsOrch::generatePriorityGroupMapPerPort(const Port& port) +void PortsOrch::generatePriorityGroupMapPerPort(const Port& port, FlexCounterPgStates& pgsState) { /* Create the PG map in the Counter DB */ /* Add stat counters to flex_counter */ @@ -5468,6 +5585,76 @@ void PortsOrch::generatePriorityGroupMapPerPort(const Port& port) vector pgIndexVector; for (size_t pgIndex = 0; pgIndex < port.m_priority_group_ids.size(); ++pgIndex) + { + if (!pgsState.isPgCounterEnabled(static_cast(pgIndex))) + { + continue; + } + std::ostringstream name; + name << port.m_alias << ":" << pgIndex; + + const auto id = sai_serialize_object_id(port.m_priority_group_ids[pgIndex]); + + pgVector.emplace_back(name.str(), id); + pgPortVector.emplace_back(id, sai_serialize_object_id(port.m_port_id)); + pgIndexVector.emplace_back(id, to_string(pgIndex)); + + string key = getPriorityGroupWatermarkFlexCounterTableKey(id); + + std::string delimiter = ""; + std::ostringstream counters_stream; + /* Add watermark counters to flex_counter */ + for (const auto& it: ingressPriorityGroupWatermarkStatIds) + { + counters_stream << delimiter << sai_serialize_ingress_priority_group_stat(it); + delimiter = comma; + } + + vector fieldValues; + fieldValues.emplace_back(PG_COUNTER_ID_LIST, counters_stream.str()); + m_flexCounterTable->set(key, fieldValues); + + delimiter = ""; + std::ostringstream ingress_pg_drop_packets_counters_stream; + key = getPriorityGroupDropPacketsFlexCounterTableKey(id); + /* Add dropped packets counters to flex_counter */ + for (const auto& it: ingressPriorityGroupDropStatIds) + { + ingress_pg_drop_packets_counters_stream << delimiter << sai_serialize_ingress_priority_group_stat(it); + if (delimiter.empty()) + { + delimiter = comma; + } + } + fieldValues.clear(); + fieldValues.emplace_back(PG_COUNTER_ID_LIST, ingress_pg_drop_packets_counters_stream.str()); + m_flexCounterTable->set(key, fieldValues); + } + + m_pgTable->set("", pgVector); + m_pgPortTable->set("", pgPortVector); + m_pgIndexTable->set("", pgIndexVector); + + CounterCheckOrch::getInstance().addPort(port); +} + +void PortsOrch::createPortBufferPgCounters(const Port& port, string pgs) +{ + /* Create the PG map in the Counter DB */ + /* Add stat counters to flex_counter */ + vector pgVector; + vector pgPortVector; + vector pgIndexVector; + + auto toks = tokenize(pgs, '-'); + auto startIndex = to_uint(toks[0]); + auto endIndex = startIndex; + if (toks.size() > 1) + { + endIndex = to_uint(toks[1]); + } + + for (auto pgIndex = startIndex; pgIndex <= endIndex; pgIndex++) { std::ostringstream name; name << port.m_alias << ":" << pgIndex; @@ -5517,6 +5704,39 @@ void PortsOrch::generatePriorityGroupMapPerPort(const Port& port) CounterCheckOrch::getInstance().addPort(port); } +void PortsOrch::removePortBufferPgCounters(const Port& port, string pgs) +{ + /* Remove the Pgs maps in the Counter DB */ + /* Remove stat counters from flex_counter DB */ + auto toks = tokenize(pgs, '-'); + auto startIndex = to_uint(toks[0]); + auto endIndex = startIndex; + if (toks.size() > 1) + { + endIndex = to_uint(toks[1]); + } + + for (auto pgIndex = startIndex; pgIndex <= endIndex; pgIndex++) + { + std::ostringstream name; + name << port.m_alias << ":" << pgIndex; + const auto id = sai_serialize_object_id(port.m_priority_group_ids[pgIndex]); + + /* Remove dropped packets counters from flex_counter */ + string key = getPriorityGroupDropPacketsFlexCounterTableKey(id); + m_flexCounterTable->del(key); + + /* Remove watermark counters from flex_counter */ + key = getPriorityGroupWatermarkFlexCounterTableKey(id); + m_flexCounterTable->del(key); + + // Remove the pg counter from counters DB maps + m_pgTable->hdel("", name.str()); + m_pgPortTable->hdel("", id); + m_pgIndexTable->hdel("", id); + } +} + void PortsOrch::generatePortCounterMap() { if (m_isPortCounterMapGenerated) @@ -5559,6 +5779,16 @@ void PortsOrch::generatePortBufferDropCounterMap() m_isPortBufferDropCounterMapGenerated = true; } +uint32_t PortsOrch::getNumberOfPortSupportedPgCounters(string port) +{ + return static_cast(m_portList[port].m_priority_group_ids.size()); +} + +uint32_t PortsOrch::getNumberOfPortSupportedQueueCounters(string port) +{ + return static_cast(m_portList[port].m_queue_ids.size()); +} + void PortsOrch::doTask(NotificationConsumer &consumer) { SWSS_LOG_ENTER(); diff --git a/orchagent/portsorch.h b/orchagent/portsorch.h index 843ffbd7f1..f13bd0811c 100755 --- a/orchagent/portsorch.h +++ b/orchagent/portsorch.h @@ -124,8 +124,16 @@ class PortsOrch : public Orch, public Subject bool getPortPfc(sai_object_id_t portId, uint8_t *pfc_bitmask); bool setPortPfc(sai_object_id_t portId, uint8_t pfc_bitmask); - void generateQueueMap(); - void generatePriorityGroupMap(); + void generateQueueMap(map queuesStateVector); + uint32_t getNumberOfPortSupportedQueueCounters(string port); + void createPortBufferQueueCounters(const Port &port, string queues); + void removePortBufferQueueCounters(const Port &port, string queues); + + void generatePriorityGroupMap(map pgsStateVector); + uint32_t getNumberOfPortSupportedPgCounters(string port); + void createPortBufferPgCounters(const Port &port, string pgs); + void removePortBufferPgCounters(const Port& port, string pgs); + void generatePortCounterMap(); void generatePortBufferDropCounterMap(); @@ -311,11 +319,9 @@ class PortsOrch : public Orch, public Subject bool getQueueTypeAndIndex(sai_object_id_t queue_id, string &type, uint8_t &index); bool m_isQueueMapGenerated = false; - void generateQueueMapPerPort(const Port& port); - + void generateQueueMapPerPort(const Port& port, FlexCounterQueueStates& queuesState); bool m_isPriorityGroupMapGenerated = false; - void generatePriorityGroupMapPerPort(const Port& port); - + void generatePriorityGroupMapPerPort(const Port& port, FlexCounterPgStates& pgsState); bool m_isPortCounterMapGenerated = false; bool m_isPortBufferDropCounterMapGenerated = false; diff --git a/tests/mock_tests/portsorch_ut.cpp b/tests/mock_tests/portsorch_ut.cpp index 853fdbfb69..908e108547 100644 --- a/tests/mock_tests/portsorch_ut.cpp +++ b/tests/mock_tests/portsorch_ut.cpp @@ -62,13 +62,14 @@ namespace portsorch_test ASSERT_EQ(gPortsOrch, nullptr); + gPortsOrch = new PortsOrch(m_app_db.get(), m_state_db.get(), ports_tables, m_chassis_app_db.get()); + vector flex_counter_tables = { CFG_FLEX_COUNTER_TABLE_NAME }; auto* flexCounterOrch = new FlexCounterOrch(m_config_db.get(), flex_counter_tables); gDirectory.set(flexCounterOrch); - gPortsOrch = new PortsOrch(m_app_db.get(), m_state_db.get(), ports_tables, m_chassis_app_db.get()); vector buffer_tables = { APP_BUFFER_POOL_TABLE_NAME, APP_BUFFER_PROFILE_TABLE_NAME, APP_BUFFER_QUEUE_TABLE_NAME, @@ -567,7 +568,7 @@ namespace portsorch_test * updated to DB. */ TEST_F(PortsOrchTest, PortOperStatusIsUpAndOperSpeedIsZero) - { + { Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); // Get SAI default ports to populate DB @@ -592,7 +593,7 @@ namespace portsorch_test Port port; gPortsOrch->getPort("Ethernet0", port); ASSERT_TRUE(port.m_oper_status != SAI_PORT_OPER_STATUS_UP); - + // save original api since we will spy auto orig_port_api = sai_port_api; sai_port_api = new sai_port_api_t(); @@ -610,14 +611,14 @@ namespace portsorch_test // Return 0 for port operational speed attrs[0].value.u32 = 0; } - + return (sai_status_t)SAI_STATUS_SUCCESS; } ); auto exec = static_cast(gPortsOrch->getExecutor("PORT_STATUS_NOTIFICATIONS")); auto consumer = exec->getNotificationConsumer(); - + // mock a redis reply for notification, it notifies that Ehernet0 is going to up mockReply = (redisReply *)calloc(sizeof(redisReply), 1); mockReply->type = REDIS_REPLY_ARRAY; @@ -639,7 +640,7 @@ namespace portsorch_test // trigger the notification consumer->readData(); gPortsOrch->doTask(*consumer); - mockReply = nullptr; + mockReply = nullptr; gPortsOrch->getPort("Ethernet0", port); ASSERT_TRUE(port.m_oper_status == SAI_PORT_OPER_STATUS_UP); diff --git a/tests/mock_tests/routeorch_ut.cpp b/tests/mock_tests/routeorch_ut.cpp index 84f92a088c..07c21396d1 100644 --- a/tests/mock_tests/routeorch_ut.cpp +++ b/tests/mock_tests/routeorch_ut.cpp @@ -176,15 +176,15 @@ namespace routeorch_test { APP_LAG_MEMBER_TABLE_NAME, portsorch_base_pri } }; + ASSERT_EQ(gPortsOrch, nullptr); + gPortsOrch = new PortsOrch(m_app_db.get(), m_state_db.get(), ports_tables, m_chassis_app_db.get()); + vector flex_counter_tables = { CFG_FLEX_COUNTER_TABLE_NAME }; auto* flexCounterOrch = new FlexCounterOrch(m_config_db.get(), flex_counter_tables); gDirectory.set(flexCounterOrch); - ASSERT_EQ(gPortsOrch, nullptr); - gPortsOrch = new PortsOrch(m_app_db.get(), m_state_db.get(), ports_tables, m_chassis_app_db.get()); - ASSERT_EQ(gVrfOrch, nullptr); gVrfOrch = new VRFOrch(m_app_db.get(), APP_VRF_TABLE_NAME, m_state_db.get(), STATE_VRF_OBJECT_TABLE_NAME); diff --git a/tests/test_flex_counters.py b/tests/test_flex_counters.py index ea950af7c1..3268866046 100644 --- a/tests/test_flex_counters.py +++ b/tests/test_flex_counters.py @@ -123,6 +123,18 @@ def wait_for_interval_set(self, group, interval): assert False, "Polling interval is not applied to FLEX_COUNTER_GROUP_TABLE for group {}, expect={}, actual={}".format(group, interval, interval_value) + def wait_for_buffer_pg_queue_counter(self, map, port, index, isSet): + for retry in range(NUMBER_OF_RETRIES): + counter_oid = self.counters_db.db_connection.hget(map, port + ':' + index) + if (isSet and counter_oid): + return counter_oid + elif (not isSet and not counter_oid): + return None + else: + time.sleep(1) + + assert False, "Counter not {} for port: {}, type: {}, index: {}".format("created" if isSet else "removed", port, map, index) + def verify_no_flex_counters_tables(self, counter_stat): counters_stat_keys = self.flex_db.get_keys("FLEX_COUNTER_TABLE:" + counter_stat) assert len(counters_stat_keys) == 0, "FLEX_COUNTER_TABLE:" + str(counter_stat) + " tables exist before enabling the flex counter group" @@ -372,3 +384,53 @@ def test_remove_trap_group(self, dvs): assert trap_id not in counters_keys self.set_flex_counter_group_status(meta_data['key'], meta_data['group_name'], 'disable') + + def test_create_remove_buffer_pg_counter(self, dvs): + """ + Test steps: + 1. Enable PG flex counters. + 2. Configure new buffer prioriy group for a port + 3. Verify counter is automatically created + 4. Remove the new buffer prioriy group for the port + 5. Verify counter is automatically removed + + Args: + dvs (object): virtual switch object + """ + self.setup_dbs(dvs) + meta_data = counter_group_meta['pg_watermark_counter'] + + self.set_flex_counter_group_status(meta_data['key'], meta_data['name_map']) + + self.config_db.update_entry('BUFFER_PG', 'Ethernet0|1', {'profile': 'ingress_lossy_profile'}) + counter_oid = self.wait_for_buffer_pg_queue_counter(meta_data['name_map'], 'Ethernet0', '1', True) + self.wait_for_id_list(meta_data['group_name'], "Ethernet0", counter_oid) + + self.config_db.delete_entry('BUFFER_PG', 'Ethernet0|1') + self.wait_for_buffer_pg_queue_counter(meta_data['name_map'], 'Ethernet0', '1', False) + self.wait_for_id_list_remove(meta_data['group_name'], "Ethernet0", counter_oid) + + def test_create_remove_buffer_queue_counter(self, dvs): + """ + Test steps: + 1. Enable Queue flex counters. + 2. Configure new buffer queue for a port + 3. Verify counter is automatically created + 4. Remove the new buffer queue for the port + 5. Verify counter is automatically removed + + Args: + dvs (object): virtual switch object + """ + self.setup_dbs(dvs) + meta_data = counter_group_meta['queue_counter'] + + self.set_flex_counter_group_status(meta_data['key'], meta_data['name_map']) + + self.config_db.update_entry('BUFFER_QUEUE', 'Ethernet0|1', {'profile': 'egress_lossless_profile'}) + counter_oid = self.wait_for_buffer_pg_queue_counter(meta_data['name_map'], 'Ethernet0', '1', True) + self.wait_for_id_list(meta_data['group_name'], "Ethernet0", counter_oid) + + self.config_db.delete_entry('BUFFER_QUEUE', 'Ethernet0|1') + self.wait_for_buffer_pg_queue_counter(meta_data['name_map'], 'Ethernet0', '1', False) + self.wait_for_id_list_remove(meta_data['group_name'], "Ethernet0", counter_oid)