From 7a0ec8f625ee6d6566cc25a8a8511c2fb24d6a0a Mon Sep 17 00:00:00 2001 From: shlomibitton <60430976+shlomibitton@users.noreply.github.com> Date: Thu, 9 Sep 2021 21:22:58 +0300 Subject: [PATCH 01/19] [Flex-counters] Fix the delay of flex counters flow to prevent infinite loop (#1899) * Fix delay flex counters flow to prevent infinite loop Signed-off-by: Shlomi Bitton --- orchagent/flexcounterorch.cpp | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/orchagent/flexcounterorch.cpp b/orchagent/flexcounterorch.cpp index 51c9c9325a..78368508a5 100644 --- a/orchagent/flexcounterorch.cpp +++ b/orchagent/flexcounterorch.cpp @@ -86,10 +86,11 @@ void FlexCounterOrch::doTask(Consumer &consumer) if (op == SET_COMMAND) { - auto it = std::find(std::begin(data), std::end(data), FieldValueTuple(FLEX_COUNTER_DELAY_STATUS_FIELD, "true")); + auto itDelay = std::find(std::begin(data), std::end(data), FieldValueTuple(FLEX_COUNTER_DELAY_STATUS_FIELD, "true")); - if (it != data.end()) + if (itDelay != data.end()) { + consumer.m_toSync.erase(it++); continue; } for (auto valuePair:data) From 8cf355db09fc2ffdadcea936c349301193a760da Mon Sep 17 00:00:00 2001 From: Prince Sunny Date: Mon, 13 Sep 2021 12:33:48 -0700 Subject: [PATCH 02/19] Mux state order change (#1902) *Orchagent sometimes took >50ms to finish the state change operations. This change is to update notification to xcvrd (via APPDB) after execution is complete. This enables xcvrd to simulate the direction change after orchagent has completed the state transition. --- orchagent/muxorch.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/orchagent/muxorch.cpp b/orchagent/muxorch.cpp index 6cfc6bc4d5..eeae6b4cb2 100644 --- a/orchagent/muxorch.cpp +++ b/orchagent/muxorch.cpp @@ -394,15 +394,14 @@ void MuxCable::setState(string new_state) SWSS_LOG_NOTICE("[%s] Set MUX state from %s to %s", mux_name_.c_str(), muxStateValToString.at(state_).c_str(), new_state.c_str()); - // Update HW Mux cable state anyways - mux_cb_orch_->updateMuxState(mux_name_, new_state); - MuxState ns = muxStateStringToVal.at(new_state); auto it = muxStateTransition.find(make_pair(state_, ns)); if (it == muxStateTransition.end()) { + // Update HW Mux cable state anyways + mux_cb_orch_->updateMuxState(mux_name_, new_state); SWSS_LOG_ERROR("State transition from %s to %s is not-handled ", muxStateValToString.at(state_).c_str(), new_state.c_str()); return; @@ -430,6 +429,7 @@ void MuxCable::setState(string new_state) st_chg_failed_ = false; SWSS_LOG_INFO("Changed state to %s", new_state.c_str()); + mux_cb_orch_->updateMuxState(mux_name_, new_state); return; } From d01524d8fa3fb57968adee8ba921402c905991b8 Mon Sep 17 00:00:00 2001 From: anish-n <44376847+anish-n@users.noreply.github.com> Date: Wed, 15 Sep 2021 09:33:57 -0700 Subject: [PATCH 03/19] [fgnhgorch] Enable packet flow when no FG ECMP neighbors are resolved (#1900) *Added ability to set the FG route's next-hop as the RIF, the packets will trap to the kernel and be neighbor resolved via the kernel. Further, added the ability to set the next-hop to a fine grained ECMP next-hop after the 1st neighbor resolution occurs. --- orchagent/fgnhgorch.cpp | 243 ++++++++++++++++++++++++++++++---------- orchagent/fgnhgorch.h | 5 +- orchagent/routeorch.cpp | 8 +- tests/test_fgnhg.py | 36 +++++- 4 files changed, 226 insertions(+), 66 deletions(-) diff --git a/orchagent/fgnhgorch.cpp b/orchagent/fgnhgorch.cpp index 4111665e09..0deabdb24d 100644 --- a/orchagent/fgnhgorch.cpp +++ b/orchagent/fgnhgorch.cpp @@ -317,6 +317,7 @@ bool FgNhgOrch::createFineGrainedNextHopGroup(FGNextHopGroupEntry &syncd_fg_rout bool FgNhgOrch::removeFineGrainedNextHopGroup(FGNextHopGroupEntry *syncd_fg_route_entry) { SWSS_LOG_ENTER(); + sai_status_t status; for (auto nhgm : syncd_fg_route_entry->nhopgroup_members) @@ -337,6 +338,34 @@ bool FgNhgOrch::removeFineGrainedNextHopGroup(FGNextHopGroupEntry *syncd_fg_rout if (!gRouteOrch->removeFineGrainedNextHopGroup(syncd_fg_route_entry->next_hop_group_id)) { + SWSS_LOG_ERROR("Failed to remove nhgid %" PRIx64 " return failure", + syncd_fg_route_entry->next_hop_group_id); + return false; + } + + return true; +} + + +bool FgNhgOrch::modifyRoutesNextHopId(sai_object_id_t vrf_id, const IpPrefix &ipPrefix, sai_object_id_t next_hop_id) +{ + SWSS_LOG_ENTER(); + + sai_route_entry_t route_entry; + sai_attribute_t route_attr; + + route_entry.vr_id = vrf_id; + route_entry.switch_id = gSwitchId; + copy(route_entry.destination, ipPrefix); + + route_attr.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; + route_attr.value.oid = next_hop_id; + + sai_status_t status = sai_route_api->set_route_entry_attribute(&route_entry, &route_attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to set route %s with packet action forward, %d", + ipPrefix.to_string().c_str(), status); return false; } @@ -385,22 +414,56 @@ bool FgNhgOrch::validNextHopInNextHopGroup(const NextHopKey& nexthop) return true; } - for (auto active_nh : syncd_fg_route_entry->active_nexthops) + if (fgNhgEntry->hash_bucket_indices.size() == 0 && syncd_fg_route_entry->points_to_rif) { - bank_member_changes[fgNhgEntry->next_hops[active_nh.ip_address].bank]. - active_nhs.push_back(active_nh); + /* Only happens the 1st time when hash_bucket_indices are not inited + */ + for (auto it : fgNhgEntry->next_hops) + { + while (bank_member_changes.size() <= it.second.bank) + { + bank_member_changes.push_back(BankMemberChanges()); + } + } } bank_member_changes[fgNhgEntry->next_hops[nexthop.ip_address].bank]. nhs_to_add.push_back(nexthop); nhopgroup_members_set[nexthop] = m_neighOrch->getNextHopId(nexthop); - if (!computeAndSetHashBucketChanges(syncd_fg_route_entry, fgNhgEntry, - bank_member_changes, nhopgroup_members_set, route_table.first)) + if (syncd_fg_route_entry->points_to_rif) { - SWSS_LOG_ERROR("Failed to set fine grained next hop %s", - nexthop.to_string().c_str()); - return false; + // RIF route is now neigh resolved: create Fine Grained ECMP + if (!createFineGrainedNextHopGroup(*syncd_fg_route_entry, fgNhgEntry, syncd_fg_route_entry->nhg_key)) + { + return false; + } + + if (!setNewNhgMembers(*syncd_fg_route_entry, fgNhgEntry, bank_member_changes, nhopgroup_members_set, route_table.first)) + { + return false; + } + + if (!modifyRoutesNextHopId(route_tables.first, route_table.first, syncd_fg_route_entry->next_hop_group_id)) + { + return false; + } + } + else + { + for (auto active_nh : syncd_fg_route_entry->active_nexthops) + { + bank_member_changes[fgNhgEntry->next_hops[active_nh.ip_address].bank]. + active_nhs.push_back(active_nh); + } + + if (!computeAndSetHashBucketChanges(syncd_fg_route_entry, fgNhgEntry, + bank_member_changes, nhopgroup_members_set, route_table.first)) + { + SWSS_LOG_ERROR("Failed to set fine grained next hop %s", + nexthop.to_string().c_str()); + return false; + } } m_neighOrch->increaseNextHopRefCount(nexthop); @@ -760,14 +823,46 @@ bool FgNhgOrch::setInactiveBankToNextAvailableActiveBank(FGNextHopGroupEntry *sy if (new_bank_idx == bank_member_changes.size()) { - SWSS_LOG_NOTICE("All banks of FG next-hops are down for prefix %s", - ipPrefix.to_string().c_str()); /* Case where there are no active banks */ - /* Note: There is no way to set a NULL OID to the now inactive next-hops - * so we leave the next-hops as is in SAI, and future route/neighbor changes - * will take care of setting the next-hops to the correctly active nhs + SWSS_LOG_NOTICE("All banks of FG next-hops are down for prefix %s", + ipPrefix.to_string().c_str()); + + /* This may occur when there are no neigh entries available any more + * set route pointing to rif to allow for neigh resolution in kernel. + * If route already points to rif then we are done. */ - syncd_fg_route_entry->syncd_fgnhg_map[bank].clear(); + if (!syncd_fg_route_entry->points_to_rif) + { + std::string interface_alias = syncd_fg_route_entry->nhg_key.getNextHops().begin()->alias; + sai_object_id_t rif_next_hop_id = m_intfsOrch->getRouterIntfsId(interface_alias); + if (rif_next_hop_id == SAI_NULL_OBJECT_ID) + { + SWSS_LOG_INFO("Failed to get rif next hop for %s", interface_alias.c_str()); + return false; + } + if (!modifyRoutesNextHopId(gVirtualRouterId, ipPrefix, rif_next_hop_id)) + { + SWSS_LOG_ERROR("Failed to modify route nexthopid to rif"); + return false; + } + + if (!removeFineGrainedNextHopGroup(syncd_fg_route_entry)) + { + SWSS_LOG_ERROR("Failed to delete Fine Grained next hop group"); + return false; + } + + syncd_fg_route_entry->points_to_rif = true; + syncd_fg_route_entry->next_hop_group_id = rif_next_hop_id; + + // remove state_db entry + m_stateWarmRestartRouteTable.del(ipPrefix.to_string()); + // Clear data structures + syncd_fg_route_entry->syncd_fgnhg_map.clear(); + syncd_fg_route_entry->active_nexthops.clear(); + syncd_fg_route_entry->inactive_to_active_map.clear(); + syncd_fg_route_entry->nhopgroup_members.clear(); + } } return true; @@ -1017,6 +1112,7 @@ bool FgNhgOrch::setNewNhgMembers(FGNextHopGroupEntry &syncd_fg_route_entry, FgNh { m_recoveryMap.erase(nexthopsMap); } + syncd_fg_route_entry.points_to_rif = false; return true; } @@ -1094,13 +1190,13 @@ bool FgNhgOrch::syncdContainsFgNhg(sai_object_id_t vrf_id, const IpPrefix &ipPre bool FgNhgOrch::setFgNhg(sai_object_id_t vrf_id, const IpPrefix &ipPrefix, const NextHopGroupKey &nextHops, - sai_object_id_t &next_hop_id, bool &prevNhgWasFineGrained) + sai_object_id_t &next_hop_id, bool &isNextHopIdChanged) { SWSS_LOG_ENTER(); - /* default prevNhgWasFineGrained to true so that sai route is unaffected + /* default isNextHopIdChanged to false so that sai route is unaffected * when we return early with success */ - prevNhgWasFineGrained = true; + isNextHopIdChanged = false; FgNhgEntry *fgNhgEntry = 0; set next_hop_set = nextHops.getNextHops(); auto prefix_entry = m_fgNhgPrefixes.find(ipPrefix); @@ -1123,7 +1219,7 @@ bool FgNhgOrch::setFgNhg(sai_object_id_t vrf_id, const IpPrefix &ipPrefix, const break; } } - + if (m_syncdFGRouteTables.find(vrf_id) != m_syncdFGRouteTables.end() && m_syncdFGRouteTables.at(vrf_id).find(ipPrefix) != m_syncdFGRouteTables.at(vrf_id).end() && m_syncdFGRouteTables.at(vrf_id).at(ipPrefix).nhg_key == nextHops) @@ -1150,7 +1246,7 @@ bool FgNhgOrch::setFgNhg(sai_object_id_t vrf_id, const IpPrefix &ipPrefix, const */ for (auto it : fgNhgEntry->next_hops) { - while(bank_member_changes.size() <= it.second.bank) + while (bank_member_changes.size() <= it.second.bank) { bank_member_changes.push_back(BankMemberChanges()); } @@ -1202,6 +1298,7 @@ bool FgNhgOrch::setFgNhg(sai_object_id_t vrf_id, const IpPrefix &ipPrefix, const { bank_member_changes[fgNhgEntry->next_hops[nhk.ip_address].bank]. nhs_to_add.push_back(nhk); + next_hop_to_add = true; } } @@ -1211,54 +1308,81 @@ bool FgNhgOrch::setFgNhg(sai_object_id_t vrf_id, const IpPrefix &ipPrefix, const if (syncd_fg_route_entry_it != m_syncdFGRouteTables.at(vrf_id).end()) { + /* Route exists and nh was associated in the past */ FGNextHopGroupEntry *syncd_fg_route_entry = &(syncd_fg_route_entry_it->second); - /* Route exists, update FG ECMP group in SAI */ - for (auto nhk : syncd_fg_route_entry->active_nexthops) + + if (syncd_fg_route_entry->points_to_rif) { - if (nhopgroup_members_set.find(nhk) == nhopgroup_members_set.end()) + if (next_hop_to_add) { - bank_member_changes[fgNhgEntry->next_hops[nhk.ip_address].bank]. - nhs_to_del.push_back(nhk); + isNextHopIdChanged = true; + if (!createFineGrainedNextHopGroup(*syncd_fg_route_entry, fgNhgEntry, nextHops)) + { + return false; + } + + if (!setNewNhgMembers(*syncd_fg_route_entry, fgNhgEntry, bank_member_changes, nhopgroup_members_set, ipPrefix)) + { + return false; + } } - else + } + else + { + /* Update FG ECMP group in SAI */ + for (auto nhk : syncd_fg_route_entry->active_nexthops) { - bank_member_changes[fgNhgEntry->next_hops[nhk.ip_address].bank]. - active_nhs.push_back(nhk); + if (nhopgroup_members_set.find(nhk) == nhopgroup_members_set.end()) + { + bank_member_changes[fgNhgEntry->next_hops[nhk.ip_address].bank]. + nhs_to_del.push_back(nhk); + } + else + { + bank_member_changes[fgNhgEntry->next_hops[nhk.ip_address].bank]. + active_nhs.push_back(nhk); + } } - } - if (!computeAndSetHashBucketChanges(syncd_fg_route_entry, fgNhgEntry, bank_member_changes, - nhopgroup_members_set, ipPrefix)) - { - return false; + if (!computeAndSetHashBucketChanges(syncd_fg_route_entry, fgNhgEntry, bank_member_changes, + nhopgroup_members_set, ipPrefix)) + { + return false; + } } } else { /* New route + nhg addition */ - prevNhgWasFineGrained = false; - if (next_hop_to_add == false) - { - SWSS_LOG_INFO("There were no valid next-hops to add %s:%s", ipPrefix.to_string().c_str(), - nextHops.to_string().c_str()); - /* Let the route retry logic(upon false rc) take care of this case */ - return false; - } - + isNextHopIdChanged = true; FGNextHopGroupEntry syncd_fg_route_entry; - if (!createFineGrainedNextHopGroup(syncd_fg_route_entry, fgNhgEntry, nextHops)) + if (next_hop_to_add) { - return false; - } + if (!createFineGrainedNextHopGroup(syncd_fg_route_entry, fgNhgEntry, nextHops)) + { + return false; + } - if (!setNewNhgMembers(syncd_fg_route_entry, fgNhgEntry, bank_member_changes, nhopgroup_members_set, ipPrefix)) + if (!setNewNhgMembers(syncd_fg_route_entry, fgNhgEntry, bank_member_changes, nhopgroup_members_set, ipPrefix)) + { + return false; + } + } + else { - return false; + sai_object_id_t rif_next_hop_id = m_intfsOrch->getRouterIntfsId(next_hop_set.begin()->alias); + if (rif_next_hop_id == SAI_NULL_OBJECT_ID) + { + SWSS_LOG_INFO("Failed to get rif next hop %s for %s", + nextHops.to_string().c_str(), ipPrefix.to_string().c_str()); + return false; + } + + syncd_fg_route_entry.next_hop_group_id = rif_next_hop_id; + syncd_fg_route_entry.points_to_rif = true; } m_syncdFGRouteTables[vrf_id][ipPrefix] = syncd_fg_route_entry; - - SWSS_LOG_NOTICE("Created route %s:%s", ipPrefix.to_string().c_str(), nextHops.to_string().c_str()); } m_syncdFGRouteTables[vrf_id][ipPrefix].nhg_key = nextHops; @@ -1310,19 +1434,22 @@ bool FgNhgOrch::removeFgNhg(sai_object_id_t vrf_id, const IpPrefix &ipPrefix) } FGNextHopGroupEntry *syncd_fg_route_entry = &(it_route->second); - if (!removeFineGrainedNextHopGroup(syncd_fg_route_entry)) + if (!syncd_fg_route_entry->points_to_rif) { - SWSS_LOG_ERROR("Failed to clean-up fine grained ECMP SAI group"); - return false; - } + if (!removeFineGrainedNextHopGroup(syncd_fg_route_entry)) + { + SWSS_LOG_ERROR("Failed to clean-up fine grained ECMP SAI group"); + return false; + } - for (auto nh : syncd_fg_route_entry->active_nexthops) - { - m_neighOrch->decreaseNextHopRefCount(nh); - } + for (auto nh : syncd_fg_route_entry->active_nexthops) + { + m_neighOrch->decreaseNextHopRefCount(nh); + } - // remove state_db entry - m_stateWarmRestartRouteTable.del(ipPrefix.to_string()); + // remove state_db entry + m_stateWarmRestartRouteTable.del(ipPrefix.to_string()); + } it_route_table->second.erase(it_route); if (it_route_table->second.size() == 0) diff --git a/orchagent/fgnhgorch.h b/orchagent/fgnhgorch.h index 42b2b7f615..b7ec844a59 100644 --- a/orchagent/fgnhgorch.h +++ b/orchagent/fgnhgorch.h @@ -30,6 +30,7 @@ struct FGNextHopGroupEntry BankFGNextHopGroupMap syncd_fgnhg_map; // Map of (bank) -> (nexthops) -> (index in nhopgroup_members) NextHopGroupKey nhg_key; // Full next hop group key InactiveBankMapsToBank inactive_to_active_map; // Maps an inactive bank to an active one in terms of hash bkts + bool points_to_rif; // Flag to identify that route is currently pointing to a rif }; struct FGNextHopInfo @@ -104,7 +105,7 @@ class FgNhgOrch : public Orch, public Observer bool syncdContainsFgNhg(sai_object_id_t vrf_id, const IpPrefix &ipPrefix); bool validNextHopInNextHopGroup(const NextHopKey&); bool invalidNextHopInNextHopGroup(const NextHopKey&); - bool setFgNhg(sai_object_id_t vrf_id, const IpPrefix &ipPrefix, const NextHopGroupKey &nextHops, sai_object_id_t &next_hop_id, bool &prevNhgWasFineGrained); + bool setFgNhg(sai_object_id_t vrf_id, const IpPrefix &ipPrefix, const NextHopGroupKey &nextHops, sai_object_id_t &next_hop_id, bool &isNextHopIdChanged); bool removeFgNhg(sai_object_id_t vrf_id, const IpPrefix &ipPrefix); // warm reboot support @@ -150,10 +151,10 @@ class FgNhgOrch : public Orch, public Observer void setStateDbRouteEntry(const IpPrefix&, uint32_t index, NextHopKey nextHop); bool writeHashBucketChange(FGNextHopGroupEntry *syncd_fg_route_entry, uint32_t index, sai_object_id_t nh_oid, const IpPrefix &ipPrefix, NextHopKey nextHop); + bool modifyRoutesNextHopId(sai_object_id_t vrf_id, const IpPrefix &ipPrefix, sai_object_id_t next_hop_id); bool createFineGrainedNextHopGroup(FGNextHopGroupEntry &syncd_fg_route_entry, FgNhgEntry *fgNhgEntry, const NextHopGroupKey &nextHops); bool removeFineGrainedNextHopGroup(FGNextHopGroupEntry *syncd_fg_route_entry); - vector generateRouteTableFromNhgKey(NextHopGroupKey nhg); void cleanupIpInLinkToIpMap(const string &link, const IpAddress &ip, FgNhgEntry &fgNhg_entry); diff --git a/orchagent/routeorch.cpp b/orchagent/routeorch.cpp index 04907b3de9..d54e205ded 100644 --- a/orchagent/routeorch.cpp +++ b/orchagent/routeorch.cpp @@ -1411,7 +1411,7 @@ bool RouteOrch::addRoute(RouteBulkContext& ctx, const NextHopGroupKey &nextHops) bool overlay_nh = false; bool status = false; bool curNhgIsFineGrained = false; - bool prevNhgWasFineGrained = false; + bool isFineGrainedNextHopIdChanged = false; bool blackhole = false; if (m_syncdRoutes.find(vrf_id) == m_syncdRoutes.end()) @@ -1434,9 +1434,9 @@ bool RouteOrch::addRoute(RouteBulkContext& ctx, const NextHopGroupKey &nextHops) /* We get 3 return values from setFgNhg: * 1. success/failure: on addition/modification of nexthop group/members * 2. next_hop_id: passed as a param to fn, used for sai route creation - * 3. prevNhgWasFineGrained: passed as a param to fn, used to determine transitions + * 3. isFineGrainedNextHopIdChanged: passed as a param to fn, used to determine transitions * between regular and FG ECMP, this is an optimization to prevent multiple lookups */ - if (!m_fgNhgOrch->setFgNhg(vrf_id, ipPrefix, nextHops, next_hop_id, prevNhgWasFineGrained)) + if (!m_fgNhgOrch->setFgNhg(vrf_id, ipPrefix, nextHops, next_hop_id, isFineGrainedNextHopIdChanged)) { return false; } @@ -1627,7 +1627,7 @@ bool RouteOrch::addRoute(RouteBulkContext& ctx, const NextHopGroupKey &nextHops) gRouteBulker.set_entry_attribute(&object_statuses.back(), &route_entry, &route_attr); } - if (curNhgIsFineGrained && prevNhgWasFineGrained) + if (curNhgIsFineGrained && !isFineGrainedNextHopIdChanged) { /* Don't change route entry if the route is previously fine grained and new nhg is also fine grained. * We already modifed sai nhg objs as part of setFgNhg to account for nhg change. */ diff --git a/tests/test_fgnhg.py b/tests/test_fgnhg.py index 9e465935ad..2fa8a9d890 100644 --- a/tests/test_fgnhg.py +++ b/tests/test_fgnhg.py @@ -20,6 +20,7 @@ ASIC_NHG = "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP" ASIC_NHG_MEMB = "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER" ASIC_NH_TB = "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP" +ASIC_RIF = "ASIC_STATE:SAI_OBJECT_TYPE_ROUTER_INTERFACE" def create_entry(db, table, key, pairs): db.create_entry(table, key, pairs) @@ -79,6 +80,33 @@ def _access_function(): failure_message="Fine Grained ECMP route not found") return result +def validate_asic_nhg_router_interface(asic_db, ipprefix): + def _access_function(): + false_ret = (False, '') + keys = asic_db.get_keys(ASIC_ROUTE_TB) + key = '' + route_exists = False + for k in keys: + rt_key = json.loads(k) + if rt_key['dest'] == ipprefix: + route_exists = True + key = k + if not route_exists: + return false_ret + + fvs = asic_db.get_entry(ASIC_ROUTE_TB, key) + if not fvs: + return false_ret + + rifid = fvs.get("SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID") + fvs = asic_db.get_entry(ASIC_RIF, rifid) + if not fvs: + return false_ret + + return (True, rifid) + _, result = wait_for_result(_access_function, failure_message="Route pointing to RIF not found") + return result + def validate_asic_nhg_regular_ecmp(asic_db, ipprefix): def _access_function(): false_ret = (False, '') @@ -338,8 +366,9 @@ def fine_grained_ecmp_base_test(dvs, match_mode): found_route = True break - # Since we didn't populate ARP yet, the route shouldn't be programmed - assert (found_route == False) + # Since we didn't populate ARP yet, route should point to RIF for kernel arp resolution to occur + assert (found_route == True) + validate_asic_nhg_router_interface(asic_db, fg_nhg_prefix) asic_nh_count = len(asic_db.get_keys(ASIC_NH_TB)) dvs.runcmd("arp -s 10.0.0.1 00:00:00:00:00:01") @@ -484,6 +513,9 @@ def fine_grained_ecmp_base_test(dvs, match_mode): # bring all links up one by one startup_link(dvs, app_db, 3) startup_link(dvs, app_db, 4) + asic_db.wait_for_n_keys(ASIC_NHG_MEMB, bucket_size) + nhgid = validate_asic_nhg_fine_grained_ecmp(asic_db, fg_nhg_prefix, bucket_size) + nh_oid_map = get_nh_oid_map(asic_db) nh_memb_exp_count = {"10.0.0.7":30,"10.0.0.9":30} validate_fine_grained_asic_n_state_db_entries(asic_db, state_db, ip_to_if_map, fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) From 57d21e74c6e894a09a3a516250fd0270b53e033f Mon Sep 17 00:00:00 2001 From: Volodymyr Samotiy Date: Thu, 16 Sep 2021 12:09:33 +0300 Subject: [PATCH 04/19] [pfcwd] Convert polling interval from ms to us in LUA scripts (#1908) **What I did** Converted polling interval from milliseconds to microseconds in PFCWD LUA scripts. **Why I did it** PFCWD storm detection and restoration LUA scripts require values in microseconds. Due to recent changes polling interval is now passed in milliseconds by "FlexCounter". * Azure/sonic-sairedis#878 So need to convert values to microseconds (as it was before) in order to make PFCWD working, **How I verified it** Ran PFCWD tests from sonic-mgmt. --- orchagent/pfc_detect_mellanox.lua | 4 ++-- orchagent/pfc_restore.lua | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/orchagent/pfc_detect_mellanox.lua b/orchagent/pfc_detect_mellanox.lua index 0f4d6d4f5d..6df16241e9 100644 --- a/orchagent/pfc_detect_mellanox.lua +++ b/orchagent/pfc_detect_mellanox.lua @@ -1,12 +1,12 @@ -- KEYS - queue IDs -- ARGV[1] - counters db index -- ARGV[2] - counters table name --- ARGV[3] - poll time interval +-- ARGV[3] - poll time interval (milliseconds) -- return queue Ids that satisfy criteria local counters_db = ARGV[1] local counters_table_name = ARGV[2] -local poll_time = tonumber(ARGV[3]) +local poll_time = tonumber(ARGV[3]) * 1000 local rets = {} diff --git a/orchagent/pfc_restore.lua b/orchagent/pfc_restore.lua index 4cf0ab49e5..7b137a40d3 100644 --- a/orchagent/pfc_restore.lua +++ b/orchagent/pfc_restore.lua @@ -1,12 +1,12 @@ -- KEYS - queue IDs -- ARGV[1] - counters db index -- ARGV[2] - counters table name --- ARGV[3] - poll time interval +-- ARGV[3] - poll time interval (milliseconds) -- return queue Ids that satisfy criteria local counters_db = ARGV[1] local counters_table_name = ARGV[2] -local poll_time = tonumber(ARGV[3]) +local poll_time = tonumber(ARGV[3]) * 1000 local rets = {} From 002bb1d92fdc04435ac2d269cebf82996ab036da Mon Sep 17 00:00:00 2001 From: judyjoseph <53951155+judyjoseph@users.noreply.github.com> Date: Thu, 16 Sep 2021 19:20:29 -0700 Subject: [PATCH 05/19] [tlm teamd] Add retry mechanism before logging the ERR in get_dumps. (#1629) Fix https://github.com/Azure/sonic-buildimage/issues/6632 There has been cases when the get_dumps API in tlm_teamd process is not able to get the right data and logs an error message. The issue occurs very rarely and it is due to the race condition between teammgrd/teamsyncd/tlm_teamd when a Portchannel is removed. In the teamd telemetry module there are two places where the get_dumps() is called. 1. When the portchannel object is add/removed. [https://github.com/Azure/sonic-swss/blob/master/tlm_teamd/main.cpp#L101] 2. On timeout of 1 sec. [https://github.com/Azure/sonic-swss/blob/master/tlm_teamd/main.cpp#L108] In case of timeout call for get_dumps(), there could be an inconsistent state where the portchannel/teamd process is getting removed by teammgrd but the STATE table update to remove the lag interface is still not received by the tlm_teamd module. Seen below on a bad case where the get_dumps() call from TIMEOUT handler throws an ERR message - as the remove_lag message is not yet received. On a good case ``` Feb 7 02:03:27.576078 vlab-01 NOTICE teamd#tlm_teamd: :- remove_lag: The LAG 'PortChannel999' has been removed. Feb 7 02:03:28.453829 vlab-01 INFO teamd#supervisord 2021-02-07 02:03:28,451 INFO reaped unknown pid 4747 (exit status 0) Feb 7 02:03:28.458616 vlab-01 NOTICE teamd#teammgrd: :- removeLag: Stop port channel PortChannel999 ``` On a bad case ``` Feb 7 02:03:33.037401 vlab-01 ERR teamd#tlm_teamd: :- get_dump: Can't get dump for LAG 'PortChannel999'. Skipping Feb 7 02:03:33.046179 vlab-01 NOTICE teamd#tlm_teamd: :- remove_lag: The LAG 'PortChannel999' has been removed. Feb 7 02:03:33.997639 vlab-01 INFO teamd#supervisord 2021-02-07 02:03:33,996 INFO reaped unknown pid 4775 (exit status 0) Feb 7 02:03:34.040126 vlab-01 NOTICE teamd#teammgrd: :- removeLag: Stop port channel PortChannel999 ``` **How I did it** Add retry mechanism before logging the ERR in get_dumps API(). The number of retries is set as 3. So that if the same error repeats 3 times - it is logged, other wise it is considered a transient condition - not an error. Additionally added a **to_retry** flag to get_dumps() API so that the caller can decide whether to use the retry mechanism or not. **How I verified it** Verified that the error message is no more seen in the syslog. Confirmed by running ~ 200 times portchannel creation (which had reproduced the issue earlier on VS testbed). The new NOTICE message added in remove_lag shows that we had indeed hit the original issue earlier and clearing flags here. ``` admin@vlab-01:/var/log$ sudo zgrep -i "get dump for LAG" syslog*; sudo zgrep -i "clearing it" syslog* syslog.1:Feb 8 06:41:54.995716 vlab-01 NOTICE teamd#tlm_teamd: :- remove_lag: The LAG 'PortChannel999' had errored while getting dump, clearing it syslog.2.gz:Feb 8 06:31:32.360135 vlab-01 NOTICE teamd#tlm_teamd: :- remove_lag: The LAG 'PortChannel999' had errored while getting dump, clearing it syslog.2.gz:Feb 8 06:36:16.617283 vlab-01 NOTICE teamd#tlm_teamd: :- remove_lag: The LAG 'PortChannel999' had errored while getting dump, clearing it syslog.2.gz:Feb 8 06:37:56.906306 vlab-01 NOTICE teamd#tlm_teamd: :- remove_lag: The LAG 'PortChannel999' had errored while getting dump, clearing it syslog.3.gz:Feb 8 06:25:44.442474 vlab-01 NOTICE teamd#tlm_teamd: :- remove_lag: The LAG 'PortChannel999' had errored while getting dump, clearing it syslog.3.gz:Feb 8 06:27:02.539413 vlab-01 NOTICE teamd#tlm_teamd: :- remove_lag: The LAG 'PortChannel999' had errored while getting dump, clearing it syslog.3.gz:Feb 8 06:27:42.785533 vlab-01 NOTICE teamd#tlm_teamd: :- remove_lag: The LAG 'PortChannel999' had errored while getting dump, clearing it syslog.3.gz:Feb 8 06:29:33.510933 vlab-01 NOTICE teamd#tlm_teamd: :- remove_lag: The LAG 'PortChannel999' had errored while getting dump, clearing it syslog.5.gz:Feb 8 06:08:03.643106 vlab-01 NOTICE teamd#tlm_teamd: :- remove_lag: The LAG 'PortChannel999' had errored while getting dump, clearing it ``` --- tlm_teamd/main.cpp | 7 ++++-- tlm_teamd/teamdctl_mgr.cpp | 51 +++++++++++++++++++++++++++++++++++--- tlm_teamd/teamdctl_mgr.h | 6 +++-- 3 files changed, 56 insertions(+), 8 deletions(-) diff --git a/tlm_teamd/main.cpp b/tlm_teamd/main.cpp index 3f00c56940..3a464e8a67 100644 --- a/tlm_teamd/main.cpp +++ b/tlm_teamd/main.cpp @@ -98,7 +98,7 @@ int main() if (res == swss::Select::OBJECT) { update_interfaces(sst_lag, teamdctl_mgr); - values_store.update(teamdctl_mgr.get_dumps()); + values_store.update(teamdctl_mgr.get_dumps(false)); } else if (res == swss::Select::ERROR) { @@ -108,7 +108,10 @@ int main() else if (res == swss::Select::TIMEOUT) { teamdctl_mgr.process_add_queue(); - values_store.update(teamdctl_mgr.get_dumps()); + // In the case of lag removal, there is a scenario where the select::TIMEOUT + // occurs, it triggers get_dumps incorrectly for resource which was in process of + // getting deleted. The fix here is to retry and check if this is a real failure. + values_store.update(teamdctl_mgr.get_dumps(true)); } else { diff --git a/tlm_teamd/teamdctl_mgr.cpp b/tlm_teamd/teamdctl_mgr.cpp index 3ea374c797..6a7a36f740 100644 --- a/tlm_teamd/teamdctl_mgr.cpp +++ b/tlm_teamd/teamdctl_mgr.cpp @@ -5,6 +5,8 @@ #include "teamdctl_mgr.h" +#define MAX_RETRY 3 + /// /// Custom function for libteamdctl logger. IT is empty to prevent libteamdctl to spam us with the error messages /// @param tdc teamdctl descriptor @@ -136,6 +138,15 @@ bool TeamdCtlMgr::remove_lag(const std::string & lag_name) { SWSS_LOG_WARN("The LAG '%s' hasn't been added. Can't remove it", lag_name.c_str()); } + + // If this lag interface errored last time, remove the entry. + // This is needed as here in this remove API, we do m_handlers.erase(lag_name). + if (m_lags_err_retry.find(lag_name) != m_lags_err_retry.end()) + { + SWSS_LOG_NOTICE("The LAG '%s' had errored while getting dump, removing it", lag_name.c_str()); + m_lags_err_retry.erase(lag_name); + } + return true; } @@ -163,11 +174,12 @@ void TeamdCtlMgr::process_add_queue() /// /// Get json dump from teamd for LAG interface with name lag_name /// @param lag_name a name for LAG interface +/// @param to_retry is the flag used to do retry or not. /// @return a pair. First element of the pair is true, if the method is successful /// false otherwise. If the first element is true, the second element has a dump /// otherwise the second element is an empty string /// -TeamdCtlDump TeamdCtlMgr::get_dump(const std::string & lag_name) +TeamdCtlDump TeamdCtlMgr::get_dump(const std::string & lag_name, bool to_retry) { TeamdCtlDump res = { false, "" }; if (has_key(lag_name)) @@ -178,10 +190,41 @@ TeamdCtlDump TeamdCtlMgr::get_dump(const std::string & lag_name) if (r == 0) { res = { true, std::string(dump) }; + + // If this lag interface errored last time, remove the entry + if (m_lags_err_retry.find(lag_name) != m_lags_err_retry.end()) + { + SWSS_LOG_NOTICE("The LAG '%s' had errored in get_dump earlier, removing it", lag_name.c_str()); + m_lags_err_retry.erase(lag_name); + } } else { - SWSS_LOG_ERROR("Can't get dump for LAG '%s'. Skipping", lag_name.c_str()); + // In case of failure and retry flag is set, check if it fails for MAX_RETRY times. + if (to_retry) + { + if (m_lags_err_retry.find(lag_name) != m_lags_err_retry.end()) + { + if (m_lags_err_retry[lag_name] == MAX_RETRY) + { + SWSS_LOG_ERROR("Can't get dump for LAG '%s'. Skipping", lag_name.c_str()); + m_lags_err_retry.erase(lag_name); + } + else + m_lags_err_retry[lag_name]++; + } + else + { + + // This time a different lag interface errored out. + m_lags_err_retry[lag_name] = 1; + } + } + else + { + // No need to retry if the flag is not set. + SWSS_LOG_ERROR("Can't get dump for LAG '%s'. Skipping", lag_name.c_str()); + } } } else @@ -196,14 +239,14 @@ TeamdCtlDump TeamdCtlMgr::get_dump(const std::string & lag_name) /// Get dumps for all registered LAG interfaces /// @return vector of pairs. Each pair first value is a name of LAG, second value is a dump /// -TeamdCtlDumps TeamdCtlMgr::get_dumps() +TeamdCtlDumps TeamdCtlMgr::get_dumps(bool to_retry) { TeamdCtlDumps res; for (const auto & p: m_handlers) { const auto & lag_name = p.first; - const auto & result = get_dump(lag_name); + const auto & result = get_dump(lag_name, to_retry); const auto & status = result.first; const auto & dump = result.second; if (status) diff --git a/tlm_teamd/teamdctl_mgr.h b/tlm_teamd/teamdctl_mgr.h index 73d6bb3b7e..f6a5dd204f 100644 --- a/tlm_teamd/teamdctl_mgr.h +++ b/tlm_teamd/teamdctl_mgr.h @@ -18,8 +18,9 @@ class TeamdCtlMgr bool add_lag(const std::string & lag_name); bool remove_lag(const std::string & lag_name); void process_add_queue(); - TeamdCtlDump get_dump(const std::string & lag_name); - TeamdCtlDumps get_dumps(); + // Retry logic added to prevent incorrect error reporting in dump API's + TeamdCtlDump get_dump(const std::string & lag_name, bool to_retry); + TeamdCtlDumps get_dumps(bool to_retry); private: bool has_key(const std::string & lag_name) const; @@ -27,6 +28,7 @@ class TeamdCtlMgr std::unordered_map m_handlers; std::unordered_map m_lags_to_add; + std::unordered_map m_lags_err_retry; const int max_attempts_to_add = 10; }; From fbdcaae7869e9623fa53c55ccf87f47132fdf6ce Mon Sep 17 00:00:00 2001 From: Nazarii Hnydyn Date: Fri, 17 Sep 2021 11:19:20 +0300 Subject: [PATCH 06/19] [teammgrd]: Improve LAGs cleanup on shutdown: send SIGTERM directly to PID. (#1841) This PR is intended to fix LAGs cleanup degradation caused by python2.7 -> python3 migration. The approach is to replace `teamd -k -t` call with the raw `SIGTERM` and add PID alive check. This will make sure the `teammgrd` is stopped only after all managed processes are being killed. resolves: https://github.com/Azure/sonic-buildimage/issues/8071 **What I did** * Replaced `teamd -k -t` call with raw `SIGTERM` * Added PID alive check **Why I did it** * To fix LAGs cleanup timeout issue caused by python2.7 -> python3 upgrade **How I verified it** 1. Configure 64 LAG RIFs 2. Reload config --- cfgmgr/teammgr.cpp | 45 ++++++++++++++++++++++++++++++++++++++++----- cfgmgr/teammgr.h | 2 -- cfgmgr/teammgrd.cpp | 5 +++-- 3 files changed, 43 insertions(+), 9 deletions(-) diff --git a/cfgmgr/teammgr.cpp b/cfgmgr/teammgr.cpp index a0f7c41698..89377e7e2d 100644 --- a/cfgmgr/teammgr.cpp +++ b/cfgmgr/teammgr.cpp @@ -112,18 +112,53 @@ void TeamMgr::doTask(Consumer &consumer) } } - void TeamMgr::cleanTeamProcesses() { SWSS_LOG_ENTER(); SWSS_LOG_NOTICE("Cleaning up LAGs during shutdown..."); - for (const auto& it: m_lagList) + + std::unordered_map aliasPidMap; + + for (const auto& alias: m_lagList) + { + std::string res; + pid_t pid; + + { + std::stringstream cmd; + cmd << "cat " << shellquote("/var/run/teamd/" + alias + ".pid"); + EXEC_WITH_ERROR_THROW(cmd.str(), res); + + pid = static_cast(std::stoul(res, nullptr, 10)); + aliasPidMap[alias] = pid; + + SWSS_LOG_INFO("Read port channel %s pid %d", alias.c_str(), pid); + } + + { + std::stringstream cmd; + cmd << "kill -TERM " << pid; + EXEC_WITH_ERROR_THROW(cmd.str(), res); + + SWSS_LOG_INFO("Sent SIGTERM to port channel %s pid %d", alias.c_str(), pid); + } + } + + for (const auto& cit: aliasPidMap) { - //This will call team -k kill -t which internally send SIGTERM - removeLag(it); + const auto &alias = cit.first; + const auto &pid = cit.second; + + std::stringstream cmd; + std::string res; + + SWSS_LOG_NOTICE("Waiting for port channel %s pid %d to stop...", alias.c_str(), pid); + + cmd << "tail -f --pid=" << pid << " /dev/null"; + EXEC_WITH_ERROR_THROW(cmd.str(), res); } - return; + SWSS_LOG_NOTICE("LAGs cleanup is done"); } void TeamMgr::doLagTask(Consumer &consumer) diff --git a/cfgmgr/teammgr.h b/cfgmgr/teammgr.h index 4d2f20bc9f..c1b5d525c0 100644 --- a/cfgmgr/teammgr.h +++ b/cfgmgr/teammgr.h @@ -32,7 +32,6 @@ class TeamMgr : public Orch ProducerStateTable m_appLagTable; std::set m_lagList; - std::map m_lagPIDList; MacAddress m_mac; @@ -50,7 +49,6 @@ class TeamMgr : public Orch bool setLagMtu(const std::string &alias, const std::string &mtu); bool setLagLearnMode(const std::string &alias, const std::string &learn_mode); bool setLagTpid(const std::string &alias, const std::string &tpid); - bool isPortEnslaved(const std::string &); bool findPortMaster(std::string &, const std::string &); diff --git a/cfgmgr/teammgrd.cpp b/cfgmgr/teammgrd.cpp index 1ff2ed760e..e38456eebe 100644 --- a/cfgmgr/teammgrd.cpp +++ b/cfgmgr/teammgrd.cpp @@ -66,7 +66,7 @@ int main(int argc, char **argv) } while (!received_sigterm) - { + { Selectable *sel; int ret; @@ -91,7 +91,8 @@ int main(int argc, char **argv) catch (const exception &e) { SWSS_LOG_ERROR("Runtime error: %s", e.what()); + return EXIT_FAILURE; } - return -1; + return EXIT_SUCCESS; } From 14c937ef0899a1ad620b28021ba370c6c05151ce Mon Sep 17 00:00:00 2001 From: Sudharsan Dhamal Gopalarathnam Date: Fri, 17 Sep 2021 13:04:48 -0700 Subject: [PATCH 07/19] Enabling copp tests (#1914) * Enabling previously skipped copp tests --- tests/test_copp.py | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/tests/test_copp.py b/tests/test_copp.py index 59cf8b4c3f..567120a636 100644 --- a/tests/test_copp.py +++ b/tests/test_copp.py @@ -1,6 +1,5 @@ import time import os -import pytest from swsscommon import swsscommon @@ -299,7 +298,6 @@ def validate_trap_group(self, trap_oid, trap_group): if fv[0] == "SAI_HOSTIF_ATTR_NAME": assert fv[1] == trap_group[keys] - @pytest.mark.skip("Skip to be removed after sonic-buildimage changes get merged") def test_defaults(self, dvs, testlog): self.setup_copp(dvs) trap_keys = self.trap_atbl.getKeys() @@ -323,7 +321,6 @@ def test_defaults(self, dvs, testlog): if trap_id not in disabled_traps: assert trap_found == True - @pytest.mark.skip("Skip to be removed after sonic-buildimage changes get merged") def test_restricted_trap_sflow(self, dvs, testlog): self.setup_copp(dvs) fvs = swsscommon.FieldValuePairs([("state", "enabled")]) @@ -352,7 +349,6 @@ def test_restricted_trap_sflow(self, dvs, testlog): assert trap_found == True - @pytest.mark.skip("Skip to be removed after sonic-buildimage changes get merged") def test_policer_set(self, dvs, testlog): self.setup_copp(dvs) fvs = swsscommon.FieldValuePairs([("cbs", "900")]) @@ -384,7 +380,6 @@ def test_policer_set(self, dvs, testlog): if trap_id not in disabled_traps: assert trap_found == True - @pytest.mark.skip("Skip to be removed after sonic-buildimage changes get merged") def test_trap_group_set(self, dvs, testlog): self.setup_copp(dvs) global copp_trap @@ -414,7 +409,6 @@ def test_trap_group_set(self, dvs, testlog): if trap_id not in disabled_traps: assert trap_found == True - @pytest.mark.skip("Skip to be removed after sonic-buildimage changes get merged") def test_trap_ids_set(self, dvs, testlog): self.setup_copp(dvs) global copp_trap @@ -470,7 +464,6 @@ def test_trap_ids_set(self, dvs, testlog): break assert trap_found == True - @pytest.mark.skip("Skip to be removed after sonic-buildimage changes get merged") def test_trap_action_set(self, dvs, testlog): self.setup_copp(dvs) fvs = swsscommon.FieldValuePairs([("trap_action", "copy")]) @@ -502,7 +495,6 @@ def test_trap_action_set(self, dvs, testlog): if trap_id not in disabled_traps: assert trap_found == True - @pytest.mark.skip("Skip to be removed after sonic-buildimage changes get merged") def test_new_trap_add(self, dvs, testlog): self.setup_copp(dvs) global copp_trap @@ -532,7 +524,6 @@ def test_new_trap_add(self, dvs, testlog): if trap_id not in disabled_traps: assert trap_found == True - @pytest.mark.skip("Skip to be removed after sonic-buildimage changes get merged") def test_new_trap_del(self, dvs, testlog): self.setup_copp(dvs) global copp_trap @@ -564,7 +555,6 @@ def test_new_trap_del(self, dvs, testlog): if trap_id not in disabled_traps: assert trap_found == False - @pytest.mark.skip("Skip to be removed after sonic-buildimage changes get merged") def test_new_trap_group_add(self, dvs, testlog): self.setup_copp(dvs) global copp_trap @@ -599,7 +589,6 @@ def test_new_trap_group_add(self, dvs, testlog): if trap_id not in disabled_traps: assert trap_found == True - @pytest.mark.skip("Skip to be removed after sonic-buildimage changes get merged") def test_new_trap_group_del(self, dvs, testlog): self.setup_copp(dvs) global copp_trap @@ -636,7 +625,6 @@ def test_new_trap_group_del(self, dvs, testlog): if trap_id not in disabled_traps: assert trap_found != True - @pytest.mark.skip("Skip to be removed after sonic-buildimage changes get merged") def test_override_trap_grp_cfg_del (self, dvs, testlog): self.setup_copp(dvs) global copp_trap @@ -672,7 +660,6 @@ def test_override_trap_grp_cfg_del (self, dvs, testlog): if trap_id not in disabled_traps: assert trap_found == True - @pytest.mark.skip("Skip to be removed after sonic-buildimage changes get merged") def test_override_trap_cfg_del(self, dvs, testlog): self.setup_copp(dvs) global copp_trap @@ -706,7 +693,6 @@ def test_override_trap_cfg_del(self, dvs, testlog): elif trap_id == "ssh": assert trap_found == False - @pytest.mark.skip("Skip to be removed after sonic-buildimage changes get merged") def test_empty_trap_cfg(self, dvs, testlog): self.setup_copp(dvs) global copp_trap From d338bd00abf29d33fbe839c9b431ee4de5e40df7 Mon Sep 17 00:00:00 2001 From: Neetha John Date: Fri, 17 Sep 2021 19:58:42 -0700 Subject: [PATCH 08/19] [pfcwd] Fix the polling interval time granularity (#1912) Fixes Azure/sonic-buildimage#8773 Pfcwd regressions were observed due to the changes in the PR Azure/sonic-sairedis#878. Hence aligning the polling interval in lua script to use microseconds **How I verified it** Ran pfcwd tests that were failing prior to this change and they passed --- orchagent/pfc_detect_barefoot.lua | 4 ++-- orchagent/pfc_detect_broadcom.lua | 4 ++-- orchagent/pfc_detect_innovium.lua | 4 ++-- orchagent/pfc_detect_nephos.lua | 4 ++-- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/orchagent/pfc_detect_barefoot.lua b/orchagent/pfc_detect_barefoot.lua index e77137b287..b270549a29 100644 --- a/orchagent/pfc_detect_barefoot.lua +++ b/orchagent/pfc_detect_barefoot.lua @@ -1,12 +1,12 @@ -- KEYS - queue IDs -- ARGV[1] - counters db index -- ARGV[2] - counters table name --- ARGV[3] - poll time interval +-- ARGV[3] - poll time interval (milliseconds) -- return queue Ids that satisfy criteria local counters_db = ARGV[1] local counters_table_name = ARGV[2] -local poll_time = tonumber(ARGV[3]) +local poll_time = tonumber(ARGV[3]) * 1000 local rets = {} diff --git a/orchagent/pfc_detect_broadcom.lua b/orchagent/pfc_detect_broadcom.lua index e7106cbd24..4f82b93317 100644 --- a/orchagent/pfc_detect_broadcom.lua +++ b/orchagent/pfc_detect_broadcom.lua @@ -1,12 +1,12 @@ -- KEYS - queue IDs -- ARGV[1] - counters db index -- ARGV[2] - counters table name --- ARGV[3] - poll time interval +-- ARGV[3] - poll time interval (milliseconds) -- return queue Ids that satisfy criteria local counters_db = ARGV[1] local counters_table_name = ARGV[2] -local poll_time = tonumber(ARGV[3]) +local poll_time = tonumber(ARGV[3]) * 1000 local rets = {} diff --git a/orchagent/pfc_detect_innovium.lua b/orchagent/pfc_detect_innovium.lua index d90f4d5a15..cedd51baa3 100644 --- a/orchagent/pfc_detect_innovium.lua +++ b/orchagent/pfc_detect_innovium.lua @@ -1,12 +1,12 @@ -- KEYS - queue IDs -- ARGV[1] - counters db index -- ARGV[2] - counters table name --- ARGV[3] - poll time interval +-- ARGV[3] - poll time interval (milliseconds) -- return queue Ids that satisfy criteria local counters_db = ARGV[1] local counters_table_name = ARGV[2] -local poll_time = tonumber(ARGV[3]) +local poll_time = tonumber(ARGV[3]) * 1000 local rets = {} diff --git a/orchagent/pfc_detect_nephos.lua b/orchagent/pfc_detect_nephos.lua index bd0cb3e769..d152fc5f8c 100644 --- a/orchagent/pfc_detect_nephos.lua +++ b/orchagent/pfc_detect_nephos.lua @@ -1,12 +1,12 @@ -- KEYS - queue IDs -- ARGV[1] - counters db index -- ARGV[2] - counters table name --- ARGV[3] - poll time interval +-- ARGV[3] - poll time interval (milliseconds) -- return queue Ids that satisfy criteria local counters_db = ARGV[1] local counters_table_name = ARGV[2] -local poll_time = tonumber(ARGV[3]) +local poll_time = tonumber(ARGV[3]) * 1000 local rets = {} From 025032f54b21c9738c74b38500430d1ba5b497df Mon Sep 17 00:00:00 2001 From: Prince Sunny Date: Sat, 18 Sep 2021 05:28:54 -0700 Subject: [PATCH 09/19] [VS] Skip failing test - test_recirc_port (#1918) **What I did** Test is failing for swss-common and sairedis repos. **Why I did it** Temporarily skip to unblock PRs --- tests/test_port_config.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/test_port_config.py b/tests/test_port_config.py index b296d893be..d584899e97 100644 --- a/tests/test_port_config.py +++ b/tests/test_port_config.py @@ -155,6 +155,7 @@ def test_port_breakout(self, dvs, port_config): assert hw_lane_value, "Can't get hw_lane list" assert hw_lane_value == "1:%s" % (new_lanes[i]) + @pytest.mark.skip(reason="Failing. Under investigation") def test_recirc_port(self, dvs): # Get port config from configDB From 86b4ede8818b3917958318de6f0116399a47d6e7 Mon Sep 17 00:00:00 2001 From: Junchao-Mellanox <57339448+Junchao-Mellanox@users.noreply.github.com> Date: Tue, 21 Sep 2021 02:24:13 +0800 Subject: [PATCH 10/19] [portsorch] Avoid orchagent crash when set invalid interface types to port (#1906) What I did When setting invalid interface types to port, do not call handleSaiSetStatus to avoid orchagent exit. Why I did it Currently, when setting invalid interface types to port, orchagent would crash, this is not necessary since user still has chance to correct it --- orchagent/orch.cpp | 28 +++++++-- orchagent/portsorch.cpp | 124 ++++++++++++++++++++++++---------------- orchagent/portsorch.h | 10 ++-- 3 files changed, 104 insertions(+), 58 deletions(-) diff --git a/orchagent/orch.cpp b/orchagent/orch.cpp index b4ef246662..5cc9ac0335 100644 --- a/orchagent/orch.cpp +++ b/orchagent/orch.cpp @@ -748,16 +748,36 @@ task_process_status Orch::handleSaiSetStatus(sai_api_t api, sai_status_t status, * in each orch. * 3. Take the type of sai api into consideration. */ - switch (status) + if (status == SAI_STATUS_SUCCESS) { - case SAI_STATUS_SUCCESS: - SWSS_LOG_WARN("SAI_STATUS_SUCCESS is not expected in handleSaiSetStatus"); - return task_success; + SWSS_LOG_WARN("SAI_STATUS_SUCCESS is not expected in handleSaiSetStatus"); + return task_success; + } + + switch (api) + { + case SAI_API_PORT: + switch (status) + { + case SAI_STATUS_INVALID_ATTR_VALUE_0: + /* + * If user gives an invalid attribute value, no need to retry or exit orchagent, just fail the current task + * and let user correct the configuration. + */ + SWSS_LOG_ERROR("Encountered SAI_STATUS_INVALID_ATTR_VALUE_0 in set operation, task failed, SAI API: %s, status: %s", + sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); + return task_failed; + default: + SWSS_LOG_ERROR("Encountered failure in set operation, exiting orchagent, SAI API: %s, status: %s", + sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); + exit(EXIT_FAILURE); + } default: SWSS_LOG_ERROR("Encountered failure in set operation, exiting orchagent, SAI API: %s, status: %s", sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); exit(EXIT_FAILURE); } + return task_need_retry; } diff --git a/orchagent/portsorch.cpp b/orchagent/portsorch.cpp index 76babdf24d..68028e2c21 100755 --- a/orchagent/portsorch.cpp +++ b/orchagent/portsorch.cpp @@ -1919,7 +1919,7 @@ bool PortsOrch::setGearboxPortAttr(Port &port, dest_port_type_t port_type, sai_p return true; } -bool PortsOrch::setPortSpeed(Port &port, sai_uint32_t speed) +task_process_status PortsOrch::setPortSpeed(Port &port, sai_uint32_t speed) { sai_attribute_t attr; sai_status_t status; @@ -1932,15 +1932,11 @@ bool PortsOrch::setPortSpeed(Port &port, sai_uint32_t speed) status = sai_port_api->set_port_attribute(port.m_port_id, &attr); if (status != SAI_STATUS_SUCCESS) { - task_process_status handle_status = handleSaiSetStatus(SAI_API_PORT, status); - if (handle_status != task_success) - { - return parseHandleSaiStatusFailure(handle_status); - } + return handleSaiSetStatus(SAI_API_PORT, status); } setGearboxPortsAttr(port, SAI_PORT_ATTR_SPEED, &speed); - return true; + return task_success; } bool PortsOrch::getPortSpeed(sai_object_id_t id, sai_uint32_t &speed) @@ -1973,7 +1969,7 @@ bool PortsOrch::getPortSpeed(sai_object_id_t id, sai_uint32_t &speed) return true; } -bool PortsOrch::setPortAdvSpeeds(sai_object_id_t port_id, std::vector& speed_list) +task_process_status PortsOrch::setPortAdvSpeeds(sai_object_id_t port_id, std::vector& speed_list) { SWSS_LOG_ENTER(); sai_attribute_t attr; @@ -1984,11 +1980,15 @@ bool PortsOrch::setPortAdvSpeeds(sai_object_id_t port_id, std::vector(speed_list.size()); status = sai_port_api->set_port_attribute(port_id, &attr); + if (status != SAI_STATUS_SUCCESS) + { + return handleSaiSetStatus(SAI_API_PORT, status); + } - return status == SAI_STATUS_SUCCESS; + return task_success; } -bool PortsOrch::setPortInterfaceType(sai_object_id_t port_id, sai_port_interface_type_t interface_type) +task_process_status PortsOrch::setPortInterfaceType(sai_object_id_t port_id, sai_port_interface_type_t interface_type) { SWSS_LOG_ENTER(); sai_attribute_t attr; @@ -1998,11 +1998,15 @@ bool PortsOrch::setPortInterfaceType(sai_object_id_t port_id, sai_port_interface attr.value.u32 = static_cast(interface_type); status = sai_port_api->set_port_attribute(port_id, &attr); + if (status != SAI_STATUS_SUCCESS) + { + return handleSaiSetStatus(SAI_API_PORT, status); + } - return status == SAI_STATUS_SUCCESS; + return task_success; } -bool PortsOrch::setPortAdvInterfaceTypes(sai_object_id_t port_id, std::vector &interface_types) +task_process_status PortsOrch::setPortAdvInterfaceTypes(sai_object_id_t port_id, std::vector &interface_types) { SWSS_LOG_ENTER(); sai_attribute_t attr; @@ -2015,14 +2019,10 @@ bool PortsOrch::setPortAdvInterfaceTypes(sai_object_id_t port_id, std::vectorset_port_attribute(port_id, &attr); if (status != SAI_STATUS_SUCCESS) { - task_process_status handle_status = handleSaiSetStatus(SAI_API_PORT, status); - if (handle_status != task_success) - { - return parseHandleSaiStatusFailure(handle_status); - } + return handleSaiSetStatus(SAI_API_PORT, status); } - return true; + return task_success; } bool PortsOrch::getQueueTypeAndIndex(sai_object_id_t queue_id, string &type, uint8_t &index) @@ -2065,33 +2065,22 @@ bool PortsOrch::getQueueTypeAndIndex(sai_object_id_t queue_id, string &type, uin return true; } -bool PortsOrch::setPortAutoNeg(sai_object_id_t id, int an) +task_process_status PortsOrch::setPortAutoNeg(sai_object_id_t id, int an) { SWSS_LOG_ENTER(); sai_attribute_t attr; attr.id = SAI_PORT_ATTR_AUTO_NEG_MODE; - switch(an) { - case 1: - attr.value.booldata = true; - break; - default: - attr.value.booldata = false; - break; - } + attr.value.booldata = (an == 1 ? true : false); sai_status_t status = sai_port_api->set_port_attribute(id, &attr); if (status != SAI_STATUS_SUCCESS) { SWSS_LOG_ERROR("Failed to set AutoNeg %u to port pid:%" PRIx64, attr.value.booldata, id); - task_process_status handle_status = handleSaiSetStatus(SAI_API_PORT, status); - if (handle_status != task_success) - { - return parseHandleSaiStatusFailure(handle_status); - } + return handleSaiSetStatus(SAI_API_PORT, status); } SWSS_LOG_INFO("Set AutoNeg %u to port pid:%" PRIx64, attr.value.booldata, id); - return true; + return task_success; } bool PortsOrch::setHostIntfsOperStatus(const Port& port, bool isUp) const @@ -2827,18 +2816,23 @@ void PortsOrch::doPortTask(Consumer &consumer) m_portList[alias] = p; } - if (setPortAutoNeg(p.m_port_id, an)) - { - SWSS_LOG_NOTICE("Set port %s AutoNeg from %d to %d", alias.c_str(), p.m_autoneg, an); - p.m_autoneg = an; - m_portList[alias] = p; - } - else + auto status = setPortAutoNeg(p.m_port_id, an); + if (status != task_success) { SWSS_LOG_ERROR("Failed to set port %s AN from %d to %d", alias.c_str(), p.m_autoneg, an); - it++; + if (status == task_need_retry) + { + it++; + } + else + { + it = consumer.m_toSync.erase(it); + } continue; } + SWSS_LOG_NOTICE("Set port %s AutoNeg from %d to %d", alias.c_str(), p.m_autoneg, an); + p.m_autoneg = an; + m_portList[alias] = p; } } @@ -2869,10 +2863,18 @@ void PortsOrch::doPortTask(Consumer &consumer) m_portList[alias] = p; } - if (!setPortSpeed(p, speed)) + auto status = setPortSpeed(p, speed); + if (status != task_success) { SWSS_LOG_ERROR("Failed to set port %s speed from %u to %u", alias.c_str(), p.m_speed, speed); - it++; + if (status == task_need_retry) + { + it++; + } + else + { + it = consumer.m_toSync.erase(it); + } continue; } @@ -2914,13 +2916,21 @@ void PortsOrch::doPortTask(Consumer &consumer) } auto ori_adv_speeds = swss::join(',', p.m_adv_speeds.begin(), p.m_adv_speeds.end()); - if (!setPortAdvSpeeds(p.m_port_id, adv_speeds)) + auto status = setPortAdvSpeeds(p.m_port_id, adv_speeds); + if (status != task_success) { SWSS_LOG_ERROR("Failed to set port %s advertised speed from %s to %s", alias.c_str(), ori_adv_speeds.c_str(), adv_speeds_str.c_str()); - it++; + if (status == task_need_retry) + { + it++; + } + else + { + it = consumer.m_toSync.erase(it); + } continue; } SWSS_LOG_NOTICE("Set port %s advertised speed from %s to %s", alias.c_str(), @@ -2957,10 +2967,18 @@ void PortsOrch::doPortTask(Consumer &consumer) m_portList[alias] = p; } - if (!setPortInterfaceType(p.m_port_id, interface_type)) + auto status = setPortInterfaceType(p.m_port_id, interface_type); + if (status != task_success) { SWSS_LOG_ERROR("Failed to set port %s interface type to %s", alias.c_str(), interface_type_str.c_str()); - it++; + if (status == task_need_retry) + { + it++; + } + else + { + it = consumer.m_toSync.erase(it); + } continue; } @@ -2996,10 +3014,18 @@ void PortsOrch::doPortTask(Consumer &consumer) m_portList[alias] = p; } - if (!setPortAdvInterfaceTypes(p.m_port_id, adv_interface_types)) + auto status = setPortAdvInterfaceTypes(p.m_port_id, adv_interface_types); + if (status != task_success) { SWSS_LOG_ERROR("Failed to set port %s advertised interface type to %s", alias.c_str(), adv_interface_types_str.c_str()); - it++; + if (status == task_need_retry) + { + it++; + } + else + { + it = consumer.m_toSync.erase(it); + } continue; } diff --git a/orchagent/portsorch.h b/orchagent/portsorch.h index 22efce3561..9ae26242ce 100755 --- a/orchagent/portsorch.h +++ b/orchagent/portsorch.h @@ -278,12 +278,12 @@ class PortsOrch : public Orch, public Subject bool isSpeedSupported(const std::string& alias, sai_object_id_t port_id, sai_uint32_t speed); void getPortSupportedSpeeds(const std::string& alias, sai_object_id_t port_id, PortSupportedSpeeds &supported_speeds); void initPortSupportedSpeeds(const std::string& alias, sai_object_id_t port_id); - bool setPortSpeed(Port &port, sai_uint32_t speed); + task_process_status setPortSpeed(Port &port, sai_uint32_t speed); bool getPortSpeed(sai_object_id_t id, sai_uint32_t &speed); bool setGearboxPortsAttr(Port &port, sai_port_attr_t id, void *value); bool setGearboxPortAttr(Port &port, dest_port_type_t port_type, sai_port_attr_t id, void *value); - bool setPortAdvSpeeds(sai_object_id_t port_id, std::vector& speed_list); + task_process_status setPortAdvSpeeds(sai_object_id_t port_id, std::vector& speed_list); bool getQueueTypeAndIndex(sai_object_id_t queue_id, string &type, uint8_t &index); @@ -296,10 +296,10 @@ class PortsOrch : public Orch, public Subject bool m_isPortCounterMapGenerated = false; bool m_isPortBufferDropCounterMapGenerated = false; - bool setPortAutoNeg(sai_object_id_t id, int an); + task_process_status setPortAutoNeg(sai_object_id_t id, int an); bool setPortFecMode(sai_object_id_t id, int fec); - bool setPortInterfaceType(sai_object_id_t id, sai_port_interface_type_t interface_type); - bool setPortAdvInterfaceTypes(sai_object_id_t id, std::vector &interface_types); + task_process_status setPortInterfaceType(sai_object_id_t id, sai_port_interface_type_t interface_type); + task_process_status setPortAdvInterfaceTypes(sai_object_id_t id, std::vector &interface_types); bool getPortOperStatus(const Port& port, sai_port_oper_status_t& status) const; void updatePortOperStatus(Port &port, sai_port_oper_status_t status); From a89d1f811bf60667873b270c34b3eb3583c71a6e Mon Sep 17 00:00:00 2001 From: Sudharsan Dhamal Gopalarathnam Date: Thu, 23 Sep 2021 14:28:58 -0700 Subject: [PATCH 11/19] Fix failing DPB LAG tests (#1919) *Fixed the failed DPB LAG tests --- tests/test_port_dpb_lag.py | 34 ++++++++++++++++++++++++++++------ 1 file changed, 28 insertions(+), 6 deletions(-) diff --git a/tests/test_port_dpb_lag.py b/tests/test_port_dpb_lag.py index c56ec6134e..54d90ebc1a 100644 --- a/tests/test_port_dpb_lag.py +++ b/tests/test_port_dpb_lag.py @@ -2,6 +2,7 @@ import pytest from port_dpb import Port from port_dpb import DPB +from swsscommon import swsscommon @pytest.mark.usefixtures('dpb_setup_fixture') @pytest.mark.usefixtures('dvs_lag_manager') @@ -10,17 +11,39 @@ def check_syslog(self, dvs, marker, log, expected_cnt): (exitcode, num) = dvs.runcmd(['sh', '-c', "awk \'/%s/,ENDFILE {print;}\' /var/log/syslog | grep \"%s\" | wc -l" % (marker, log)]) assert num.strip() >= str(expected_cnt) - @pytest.mark.skip(reason="Failing. Under investigation") + def create_port_channel(self, dvs, channel): + tbl = swsscommon.ProducerStateTable(dvs.pdb, "LAG_TABLE") + fvs = swsscommon.FieldValuePairs([("admin", "up"), ("mtu", "9100")]) + tbl.set("PortChannel" + channel, fvs) + time.sleep(1) + + def remove_port_channel(self, dvs, channel): + tbl = swsscommon.ProducerStateTable(dvs.pdb, "LAG_TABLE") + tbl._del("PortChannel" + channel) + time.sleep(1) + + def create_port_channel_member(self, dvs, channel, interface): + tbl = swsscommon.ProducerStateTable(dvs.pdb, "LAG_MEMBER_TABLE") + fvs = swsscommon.FieldValuePairs([("status", "enabled")]) + tbl.set("PortChannel" + channel + ":" + interface, fvs) + time.sleep(1) + + def remove_port_channel_member(self, dvs, channel, interface): + tbl = swsscommon.ProducerStateTable(dvs.pdb, "LAG_MEMBER_TABLE") + tbl._del("PortChannel" + channel + ":" + interface) + time.sleep(1) + def test_dependency(self, dvs): + dvs.setup_db() lag = "0001" p = Port(dvs, "Ethernet0") p.sync_from_config_db() # 1. Create PortChannel0001. - self.dvs_lag.create_port_channel(lag) + self.create_port_channel(dvs, lag) # 2. Add Ethernet0 to PortChannel0001. - self.dvs_lag.create_port_channel_member(lag, p.get_name()) + self.create_port_channel_member(dvs, lag, p.get_name()) time.sleep(2) # 3. Add log marker to syslog @@ -38,7 +61,7 @@ def test_dependency(self, dvs): assert(p.exists_in_asic_db() == True) # 7. Remove port from LAG - self.dvs_lag.remove_port_channel_member(lag, p.get_name()) + self.remove_port_channel_member(dvs, lag, p.get_name()) # 8. Verify that port is removed from ASIC DB assert(p.not_exists_in_asic_db() == True) @@ -51,8 +74,7 @@ def test_dependency(self, dvs): p.verify_asic_db() # 10. Remove PortChannel0001 and verify that its removed. - self.dvs_lag.remove_port_channel(lag) - time.sleep(30) + self.remove_port_channel(dvs, lag) self.dvs_lag.get_and_verify_port_channel(0) From a8fcadfb58f5d1fa3a68ff4814a8d48441410bdc Mon Sep 17 00:00:00 2001 From: Shi Su <67605788+shi-su@users.noreply.github.com> Date: Thu, 23 Sep 2021 20:36:26 -0700 Subject: [PATCH 12/19] Add sleep to ensure starting route perf test after the vs is stable (#1929) *Add sleep in route perf test to ensure the test starts when route table is stable. --- tests/test_route.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/test_route.py b/tests/test_route.py index 56d3af5c5b..b6dff14cfe 100644 --- a/tests/test_route.py +++ b/tests/test_route.py @@ -930,12 +930,14 @@ def test_PerfAddRemoveRoute(self, dvs, testlog): dvs.servers[1].runcmd("ip address add 10.0.0.3/31 dev eth0") dvs.servers[1].runcmd("ip route add default via 10.0.0.2") + time.sleep(2) fieldValues = [{"nexthop": "10.0.0.1", "ifname": "Ethernet0"}, {"nexthop": "10.0.0.3", "ifname": "Ethernet4"}] # get neighbor and arp entry dvs.servers[0].runcmd("ping -c 1 10.0.0.3") dvs.servers[1].runcmd("ping -c 1 10.0.0.1") + time.sleep(2) # get number of routes before adding new routes startNumRoutes = len(self.adb.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY")) From a3fdaf41c363709404d484cd697099e82a637aa6 Mon Sep 17 00:00:00 2001 From: Ashok Daparthi-Dell Date: Fri, 24 Sep 2021 16:30:48 -0700 Subject: [PATCH 13/19] QOS fieldvalue reference ABNF format to string changes (#1754) * QOS field reference ABNF format to string changes --- cfgmgr/buffer_check_headroom_mellanox.lua | 6 +- cfgmgr/buffer_pool_mellanox.lua | 2 +- cfgmgr/buffermgr.cpp | 49 ++----------- cfgmgr/buffermgr.h | 1 - cfgmgr/buffermgrdyn.cpp | 60 ++-------------- cfgmgr/buffermgrdyn.h | 2 - doc/Configuration.md | 42 ++++++------ doc/swss-schema.md | 8 +-- orchagent/bufferorch.cpp | 30 ++++++-- orchagent/orch.cpp | 84 ++++++++++++++--------- orchagent/orch.h | 6 +- orchagent/qosorch.cpp | 25 +++++-- swssconfig/sample/sample.json | 10 +-- swssconfig/sample/sample.json.output.txt | 8 +-- tests/buffer_model.py | 2 +- tests/mock_tests/portsorch_ut.cpp | 14 ++-- tests/test_buffer_dynamic.py | 83 ++++++++++++---------- tests/test_buffer_traditional.py | 2 +- tests/test_qos_map.py | 2 +- tests/test_speed.py | 2 +- 20 files changed, 202 insertions(+), 236 deletions(-) diff --git a/cfgmgr/buffer_check_headroom_mellanox.lua b/cfgmgr/buffer_check_headroom_mellanox.lua index d700862448..73d720668e 100644 --- a/cfgmgr/buffer_check_headroom_mellanox.lua +++ b/cfgmgr/buffer_check_headroom_mellanox.lua @@ -91,11 +91,11 @@ end table.insert(debuginfo, 'debug:other overhead:' .. accumulative_size) local pg_keys = redis.call('KEYS', 'BUFFER_PG_TABLE:' .. port .. ':*') for i = 1, #pg_keys do - local profile = string.sub(redis.call('HGET', pg_keys[i], 'profile'), 2, -2) + local profile = redis.call('HGET', pg_keys[i], 'profile') local current_profile_size - if profile ~= 'BUFFER_PROFILE_TABLE:ingress_lossy_profile' and (no_input_pg or new_pg ~= pg_keys[i]) then + if profile ~= 'ingress_lossy_profile' and (no_input_pg or new_pg ~= pg_keys[i]) then if profile ~= input_profile_name and not no_input_pg then - local referenced_profile = redis.call('HGETALL', profile) + local referenced_profile = redis.call('HGETALL', 'BUFFER_PROFILE_TABLE:' .. profile) for j = 1, #referenced_profile, 2 do if referenced_profile[j] == 'size' then current_profile_size = tonumber(referenced_profile[j+1]) diff --git a/cfgmgr/buffer_pool_mellanox.lua b/cfgmgr/buffer_pool_mellanox.lua index 76316936bc..e49032fdf5 100644 --- a/cfgmgr/buffer_pool_mellanox.lua +++ b/cfgmgr/buffer_pool_mellanox.lua @@ -44,7 +44,7 @@ local function iterate_all_items(all_items, check_lossless) if not profile_name then return 1 end - profile_name = string.sub(profile_name, 2, -2) + profile_name = "BUFFER_PROFILE_TABLE:" .. profile_name local profile_ref_count = profiles[profile_name] if profile_ref_count == nil then -- Indicate an error in case the referenced profile hasn't been inserted or has been removed diff --git a/cfgmgr/buffermgr.cpp b/cfgmgr/buffermgr.cpp index 684b17f31d..14d4caa3a8 100644 --- a/cfgmgr/buffermgr.cpp +++ b/cfgmgr/buffermgr.cpp @@ -107,7 +107,7 @@ Create/update two tables: profile (in m_cfgBufferProfileTable) and port buffer ( "BUFFER_PROFILE": { "pg_lossless_100G_300m_profile": { - "pool":"[BUFFER_POOL_TABLE:ingress_lossless_pool]", + "pool":"ingress_lossless_pool", "xon":"18432", "xon_offset":"2496", "xoff":"165888", @@ -117,7 +117,7 @@ Create/update two tables: profile (in m_cfgBufferProfileTable) and port buffer ( } "BUFFER_PG" :{ Ethernet44|3-4": { - "profile" : "[BUFFER_PROFILE:pg_lossless_100000_300m_profile]" + "profile" : "pg_lossless_100000_300m_profile" } } */ @@ -168,11 +168,8 @@ task_process_status BufferMgr::doSpeedUpdateTask(string port) // profile threshold field name mode += "_th"; - string pg_pool_reference = string(CFG_BUFFER_POOL_TABLE_NAME) + - m_cfgBufferProfileTable.getTableNameSeparator() + - INGRESS_LOSSLESS_PG_POOL_NAME; - fvVector.push_back(make_pair("pool", "[" + pg_pool_reference + "]")); + fvVector.push_back(make_pair("pool", INGRESS_LOSSLESS_PG_POOL_NAME)); fvVector.push_back(make_pair("xon", m_pgProfileLookup[speed][cable].xon)); if (m_pgProfileLookup[speed][cable].xon_offset.length() > 0) { fvVector.push_back(make_pair("xon_offset", @@ -192,11 +189,7 @@ task_process_status BufferMgr::doSpeedUpdateTask(string port) string buffer_pg_key = port + m_cfgBufferPgTable.getTableNameSeparator() + LOSSLESS_PGS; - string profile_ref = string("[") + - CFG_BUFFER_PROFILE_TABLE_NAME + - m_cfgBufferPgTable.getTableNameSeparator() + - buffer_profile_key + - "]"; + string profile_ref = buffer_profile_key; /* Check if PG Mapping is already then log message and return. */ m_cfgBufferPgTable.get(buffer_pg_key, fvVector); @@ -224,32 +217,6 @@ void BufferMgr::transformSeperator(string &name) name.replace(pos, 1, ":"); } -void BufferMgr::transformReference(string &name) -{ - auto references = tokenize(name, list_item_delimiter); - int ref_index = 0; - - name = ""; - - for (auto &reference : references) - { - if (ref_index != 0) - name += list_item_delimiter; - ref_index ++; - - auto keys = tokenize(reference, config_db_key_delimiter); - int key_index = 0; - for (auto &key : keys) - { - if (key_index == 0) - name += key + "_TABLE"; - else - name += delimiter + key; - key_index ++; - } - } -} - /* * This function copies the data from tables in CONFIG_DB to APPL_DB. * With dynamically buffer calculation supported, the following tables @@ -292,14 +259,6 @@ void BufferMgr::doBufferTableTask(Consumer &consumer, ProducerStateTable &applTa for (auto i : kfvFieldsValues(t)) { - SWSS_LOG_INFO("Inserting field %s value %s", fvField(i).c_str(), fvValue(i).c_str()); - //transform the separator in values from "|" to ":" - if (fvField(i) == "pool") - transformReference(fvValue(i)); - if (fvField(i) == "profile") - transformReference(fvValue(i)); - if (fvField(i) == "profile_list") - transformReference(fvValue(i)); fvVector.emplace_back(FieldValueTuple(fvField(i), fvValue(i))); SWSS_LOG_INFO("Inserting field %s value %s", fvField(i).c_str(), fvValue(i).c_str()); } diff --git a/cfgmgr/buffermgr.h b/cfgmgr/buffermgr.h index c9777d2918..652e84dafb 100644 --- a/cfgmgr/buffermgr.h +++ b/cfgmgr/buffermgr.h @@ -61,7 +61,6 @@ class BufferMgr : public Orch void doBufferTableTask(Consumer &consumer, ProducerStateTable &applTable); void transformSeperator(std::string &name); - void transformReference(std::string &name); void doTask(Consumer &consumer); }; diff --git a/cfgmgr/buffermgrdyn.cpp b/cfgmgr/buffermgrdyn.cpp index 071fec6f78..f1bbcd395a 100644 --- a/cfgmgr/buffermgrdyn.cpp +++ b/cfgmgr/buffermgrdyn.cpp @@ -202,32 +202,6 @@ void BufferMgrDynamic::transformSeperator(string &name) name.replace(pos, 1, ":"); } -void BufferMgrDynamic::transformReference(string &name) -{ - auto references = tokenize(name, list_item_delimiter); - int ref_index = 0; - - name = ""; - - for (auto &reference : references) - { - if (ref_index != 0) - name += list_item_delimiter; - ref_index ++; - - auto keys = tokenize(reference, config_db_key_delimiter); - int key_index = 0; - for (auto &key : keys) - { - if (key_index == 0) - name += key + "_TABLE"; - else - name += delimiter + key; - key_index ++; - } - } -} - // For string "TABLE_NAME|objectname", returns "objectname" string BufferMgrDynamic::parseObjectNameFromKey(const string &key, size_t pos = 0) { @@ -240,13 +214,6 @@ string BufferMgrDynamic::parseObjectNameFromKey(const string &key, size_t pos = return keys[pos]; } -// For string "[foo]", returns "foo" -string BufferMgrDynamic::parseObjectNameFromReference(const string &reference) -{ - auto objName = reference.substr(1, reference.size() - 2); - return parseObjectNameFromKey(objName, 1); -} - string BufferMgrDynamic::getDynamicProfileName(const string &speed, const string &cable, const string &mtu, const string &threshold, const string &gearbox_model, long lane_count) { string buffer_profile_key; @@ -619,9 +586,6 @@ void BufferMgrDynamic::updateBufferProfileToDb(const string &name, const buffer_ // profile threshold field name mode += "_th"; - string pg_pool_reference = string(APP_BUFFER_POOL_TABLE_NAME) + - m_applBufferProfileTable.getTableNameSeparator() + - INGRESS_LOSSLESS_PG_POOL_NAME; fvVector.emplace_back("xon", profile.xon); if (!profile.xon_offset.empty()) { @@ -629,7 +593,7 @@ void BufferMgrDynamic::updateBufferProfileToDb(const string &name, const buffer_ } fvVector.emplace_back("xoff", profile.xoff); fvVector.emplace_back("size", profile.size); - fvVector.emplace_back("pool", "[" + pg_pool_reference + "]"); + fvVector.emplace_back("pool", INGRESS_LOSSLESS_PG_POOL_NAME); fvVector.emplace_back(mode, profile.threshold); m_applBufferProfileTable.set(name, fvVector); @@ -646,15 +610,7 @@ void BufferMgrDynamic::updateBufferPgToDb(const string &key, const string &profi fvVector.clear(); - string profile_ref = string("[") + - APP_BUFFER_PROFILE_TABLE_NAME + - m_applBufferPgTable.getTableNameSeparator() + - profile + - "]"; - - fvVector.clear(); - - fvVector.push_back(make_pair("profile", profile_ref)); + fvVector.push_back(make_pair("profile", profile)); m_applBufferPgTable.set(key, fvVector); } else @@ -1779,8 +1735,7 @@ task_process_status BufferMgrDynamic::handleBufferProfileTable(KeyOpFieldsValues { if (!value.empty()) { - transformReference(value); - auto poolName = parseObjectNameFromReference(value); + auto poolName = value; if (poolName.empty()) { SWSS_LOG_ERROR("BUFFER_PROFILE: Invalid format of reference to pool: %s", value.c_str()); @@ -1953,8 +1908,7 @@ task_process_status BufferMgrDynamic::handleOneBufferPgEntry(const string &key, { // Headroom override pureDynamic = false; - transformReference(value); - string profileName = parseObjectNameFromReference(value); + string profileName = value; if (profileName.empty()) { SWSS_LOG_ERROR("BUFFER_PG: Invalid format of reference to profile: %s", value.c_str()); @@ -2170,12 +2124,6 @@ task_process_status BufferMgrDynamic::doBufferTableTask(KeyOpFieldsValuesTuple & for (auto i : kfvFieldsValues(tuple)) { // Transform the separator in values from "|" to ":" - if (fvField(i) == "pool") - transformReference(fvValue(i)); - if (fvField(i) == "profile") - transformReference(fvValue(i)); - if (fvField(i) == "profile_list") - transformReference(fvValue(i)); fvVector.emplace_back(fvField(i), fvValue(i)); SWSS_LOG_INFO("Inserting field %s value %s", fvField(i).c_str(), fvValue(i).c_str()); } diff --git a/cfgmgr/buffermgrdyn.h b/cfgmgr/buffermgrdyn.h index 3b82a27bb2..68a614bbf8 100644 --- a/cfgmgr/buffermgrdyn.h +++ b/cfgmgr/buffermgrdyn.h @@ -222,9 +222,7 @@ class BufferMgrDynamic : public Orch // Tool functions to parse keys and references std::string getPgPoolMode(); void transformSeperator(std::string &name); - void transformReference(std::string &name); std::string parseObjectNameFromKey(const std::string &key, size_t pos/* = 1*/); - std::string parseObjectNameFromReference(const std::string &reference); std::string getDynamicProfileName(const std::string &speed, const std::string &cable, const std::string &mtu, const std::string &threshold, const std::string &gearbox_model, long lane_count); inline bool isNonZero(const std::string &value) const { diff --git a/doc/Configuration.md b/doc/Configuration.md index fcec60be43..a619531f70 100644 --- a/doc/Configuration.md +++ b/doc/Configuration.md @@ -343,13 +343,13 @@ When the system is running in traditional buffer model, profiles needs to explic { "BUFFER_PG": { "Ethernet0|3-4": { - "profile": "[BUFFER_PROFILE|pg_lossless_40000_5m_profile]" + "profile": "pg_lossless_40000_5m_profile" }, "Ethernet1|3-4": { - "profile": "[BUFFER_PROFILE|pg_lossless_40000_5m_profile]" + "profile": "pg_lossless_40000_5m_profile" }, "Ethernet2|3-4": { - "profile": "[BUFFER_PROFILE|pg_lossless_40000_5m_profile]" + "profile": "pg_lossless_40000_5m_profile" } } } @@ -371,7 +371,7 @@ When the system is running in dynamic buffer model, profiles can be: "profile": "NULL" }, "Ethernet2|3-4": { - "profile": "[BUFFER_PROFILE|static_profile]" + "profile": "static_profile" } } } @@ -437,17 +437,17 @@ When the system is running in dynamic buffer model, the size of some of the buff "BUFFER_PROFILE": { "egress_lossless_profile": { "static_th": "3995680", - "pool": "[BUFFER_POOL|egress_lossless_pool]", + "pool": "egress_lossless_pool", "size": "1518" }, "egress_lossy_profile": { "dynamic_th": "3", - "pool": "[BUFFER_POOL|egress_lossy_pool]", + "pool": "egress_lossy_pool", "size": "1518" }, "ingress_lossy_profile": { "dynamic_th": "3", - "pool": "[BUFFER_POOL|ingress_lossless_pool]", + "pool": "ingress_lossless_pool", "size": "0" }, "pg_lossless_40000_5m_profile": { @@ -455,7 +455,7 @@ When the system is running in dynamic buffer model, the size of some of the buff "dynamic_th": "-3", "xon": "2288", "xoff": "66560", - "pool": "[BUFFER_POOL|ingress_lossless_pool]", + "pool": "ingress_lossless_pool", "size": "1248" }, "pg_lossless_40000_40m_profile": { @@ -463,7 +463,7 @@ When the system is running in dynamic buffer model, the size of some of the buff "dynamic_th": "-3", "xon": "2288", "xoff": "71552", - "pool": "[BUFFER_POOL|ingress_lossless_pool]", + "pool": "ingress_lossless_pool", "size": "1248" } } @@ -491,13 +491,13 @@ This kind of profiles will be handled by buffer manager and won't be applied to { "BUFFER_QUEUE": { "Ethernet50,Ethernet52,Ethernet54,Ethernet56|0-2": { - "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + "profile": "egress_lossy_profile" }, "Ethernet50,Ethernet52,Ethernet54,Ethernet56|3-4": { - "profile": "[BUFFER_PROFILE|egress_lossless_profile]" + "profile": "egress_lossless_profile" }, "Ethernet50,Ethernet52,Ethernet54,Ethernet56|5-6": { - "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + "profile": "egress_lossy_profile" } } } @@ -1104,12 +1104,12 @@ name as object key and member list as attribute. { "PORT_QOS_MAP": { "Ethernet50,Ethernet52,Ethernet54,Ethernet56": { - "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", - "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]", + "tc_to_pg_map": "AZURE", + "tc_to_queue_map": "AZURE", "pfc_enable": "3,4", - "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", - "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]", - "scheduler": "[SCHEDULER|scheduler.port]" + "pfc_to_queue_map": "AZURE", + "dscp_to_tc_map": "AZURE", + "scheduler": "scheduler.port" } } } @@ -1120,14 +1120,14 @@ name as object key and member list as attribute. { "QUEUE": { "Ethernet56|4": { - "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", - "scheduler": "[SCHEDULER|scheduler.1]" + "wred_profile": "AZURE_LOSSLESS", + "scheduler": "scheduler.1" }, "Ethernet56|5": { - "scheduler": "[SCHEDULER|scheduler.0]" + "scheduler": "scheduler.0" }, "Ethernet56|6": { - "scheduler": "[SCHEDULER|scheduler.0]" + "scheduler": "scheduler.0" } } } diff --git a/doc/swss-schema.md b/doc/swss-schema.md index 7f25803a28..3ccc7b74af 100644 --- a/doc/swss-schema.md +++ b/doc/swss-schema.md @@ -35,9 +35,9 @@ Stores information for physical switch ports managed by the switch chip. Ports t Example: 127.0.0.1:6379> hgetall PORT_TABLE:ETHERNET4 1) "dscp_to_tc_map" - 2) "[DSCP_TO_TC_MAP_TABLE:AZURE]" + 2) "AZURE" 3) "tc_to_queue_map" - 4) "[TC_TO_QUEUE_MAP_TABLE:AZURE]" + 4) "AZURE" --------------------------------------------- ### INTF_TABLE @@ -209,9 +209,9 @@ and reflects the LAG ports into the redis under: `LAG_TABLE::port` Example: 127.0.0.1:6379> hgetall QUEUE_TABLE:ETHERNET4:1 1) "scheduler" - 2) "[SCHEDULER_TABLE:BEST_EFFORT]" + 2) "BEST_EFFORT" 3) "wred_profile" - 4) "[WRED_PROFILE_TABLE:AZURE]" + 4) "AZURE" --------------------------------------------- ### TC\_TO\_QUEUE\_MAP\_TABLE diff --git a/orchagent/bufferorch.cpp b/orchagent/bufferorch.cpp index 5e9eacba98..d91da13136 100644 --- a/orchagent/bufferorch.cpp +++ b/orchagent/bufferorch.cpp @@ -46,6 +46,12 @@ type_map BufferOrch::m_buffer_type_maps = { {APP_BUFFER_PORT_EGRESS_PROFILE_LIST_NAME, new object_reference_map()} }; +map buffer_to_ref_table_map = { + {buffer_pool_field_name, APP_BUFFER_POOL_TABLE_NAME}, + {buffer_profile_field_name, APP_BUFFER_PROFILE_TABLE_NAME}, + {buffer_profile_list_field_name, APP_BUFFER_PROFILE_TABLE_NAME} +}; + BufferOrch::BufferOrch(DBConnector *applDb, DBConnector *confDb, DBConnector *stateDb, vector &tableNames) : Orch(applDb, tableNames), m_flexCounterDb(new DBConnector("FLEX_COUNTER_DB", 0)), @@ -562,7 +568,9 @@ task_process_status BufferOrch::processBufferProfile(KeyOpFieldsValuesTuple &tup } sai_object_id_t sai_pool; - ref_resolve_status resolve_result = resolveFieldRefValue(m_buffer_type_maps, buffer_pool_field_name, tuple, sai_pool, pool_name); + ref_resolve_status resolve_result = resolveFieldRefValue(m_buffer_type_maps, buffer_pool_field_name, + buffer_to_ref_table_map.at(buffer_pool_field_name), + tuple, sai_pool, pool_name); if (ref_resolve_status::success != resolve_result) { if(ref_resolve_status::not_resolved == resolve_result) @@ -754,7 +762,9 @@ task_process_status BufferOrch::processQueue(KeyOpFieldsValuesTuple &tuple) if (op == SET_COMMAND) { - ref_resolve_status resolve_result = resolveFieldRefValue(m_buffer_type_maps, buffer_profile_field_name, tuple, sai_buffer_profile, buffer_profile_name); + ref_resolve_status resolve_result = resolveFieldRefValue(m_buffer_type_maps, buffer_profile_field_name, + buffer_to_ref_table_map.at(buffer_profile_field_name), tuple, + sai_buffer_profile, buffer_profile_name); if (ref_resolve_status::success != resolve_result) { if (ref_resolve_status::not_resolved == resolve_result) @@ -877,7 +887,9 @@ task_process_status BufferOrch::processPriorityGroup(KeyOpFieldsValuesTuple &tup if (op == SET_COMMAND) { - ref_resolve_status resolve_result = resolveFieldRefValue(m_buffer_type_maps, buffer_profile_field_name, tuple, sai_buffer_profile, buffer_profile_name); + ref_resolve_status resolve_result = resolveFieldRefValue(m_buffer_type_maps, buffer_profile_field_name, + buffer_to_ref_table_map.at(buffer_profile_field_name), tuple, + sai_buffer_profile, buffer_profile_name); if (ref_resolve_status::success != resolve_result) { if (ref_resolve_status::not_resolved == resolve_result) @@ -981,7 +993,7 @@ task_process_status BufferOrch::processPriorityGroup(KeyOpFieldsValuesTuple &tup } /* -Input sample:"[BUFFER_PROFILE_TABLE:i_port.profile0],[BUFFER_PROFILE_TABLE:i_port.profile1]" +Input sample:"i_port.profile0,i_port.profile1" */ task_process_status BufferOrch::processIngressBufferProfileList(KeyOpFieldsValuesTuple &tuple) { @@ -996,7 +1008,9 @@ task_process_status BufferOrch::processIngressBufferProfileList(KeyOpFieldsValue vector profile_list; string profile_name_list; - ref_resolve_status resolve_status = resolveFieldRefArray(m_buffer_type_maps, buffer_profile_list_field_name, tuple, profile_list, profile_name_list); + ref_resolve_status resolve_status = resolveFieldRefArray(m_buffer_type_maps, buffer_profile_list_field_name, + buffer_to_ref_table_map.at(buffer_profile_list_field_name), tuple, + profile_list, profile_name_list); if (ref_resolve_status::success != resolve_status) { if(ref_resolve_status::not_resolved == resolve_status) @@ -1037,7 +1051,7 @@ task_process_status BufferOrch::processIngressBufferProfileList(KeyOpFieldsValue } /* -Input sample:"[BUFFER_PROFILE_TABLE:e_port.profile0],[BUFFER_PROFILE_TABLE:e_port.profile1]" +Input sample:"e_port.profile0,e_port.profile1" */ task_process_status BufferOrch::processEgressBufferProfileList(KeyOpFieldsValuesTuple &tuple) { @@ -1050,7 +1064,9 @@ task_process_status BufferOrch::processEgressBufferProfileList(KeyOpFieldsValues vector profile_list; string profile_name_list; - ref_resolve_status resolve_status = resolveFieldRefArray(m_buffer_type_maps, buffer_profile_list_field_name, tuple, profile_list, profile_name_list); + ref_resolve_status resolve_status = resolveFieldRefArray(m_buffer_type_maps, buffer_profile_list_field_name, + buffer_to_ref_table_map.at(buffer_profile_list_field_name), tuple, + profile_list, profile_name_list); if (ref_resolve_status::success != resolve_status) { if(ref_resolve_status::not_resolved == resolve_status) diff --git a/orchagent/orch.cpp b/orchagent/orch.cpp index 5cc9ac0335..1015a30c5b 100644 --- a/orchagent/orch.cpp +++ b/orchagent/orch.cpp @@ -301,7 +301,7 @@ bool Orch::bake() } /* -- Validates reference has proper format which is [table_name:object_name] +- Validates reference has proper format which is object_name - validates table_name exists - validates object with object_name exists @@ -310,59 +310,77 @@ bool Orch::bake() - both type_name and object_name are cleared to empty strings as an - indication to the caller of the special case */ -bool Orch::parseReference(type_map &type_maps, string &ref_in, string &type_name, string &object_name) +bool Orch::parseReference(type_map &type_maps, string &ref_in, const string &type_name, string &object_name) { SWSS_LOG_ENTER(); SWSS_LOG_DEBUG("input:%s", ref_in.c_str()); - if (ref_in.size() < 2) - { - SWSS_LOG_ERROR("invalid reference received:%s\n", ref_in.c_str()); - return false; - } - if ((ref_in[0] != ref_start) || (ref_in[ref_in.size()-1] != ref_end)) - { - SWSS_LOG_ERROR("malformed reference:%s. Must be surrounded by [ ]\n", ref_in.c_str()); - return false; - } - if (ref_in.size() == 2) + + if (ref_in.size() == 0) { - // value set by user is "[]" + // value set by user is "" // Deem it as a valid format // clear both type_name and object_name // as an indication to the caller that // such a case has been encountered - type_name.clear(); object_name.clear(); return true; } - string ref_content = ref_in.substr(1, ref_in.size() - 2); - vector tokens; - tokens = tokenize(ref_content, delimiter); - if (tokens.size() != 2) + + if ((ref_in[0] == ref_start) || (ref_in[ref_in.size()-1] == ref_end)) { - tokens = tokenize(ref_content, config_db_key_delimiter); + SWSS_LOG_ERROR("malformed reference:%s. Must not be surrounded by [ ]\n", ref_in.c_str()); + /* + * Accepting old format until sonic-buildimage changes merged, swss tests depends on + * generate qos configs which are with old format. If we skip the old format + * isPortAllReady() will fail whcih is set ready by checking buffer config exists in CONFIG_DB are + * applied to ASIC_DB or not. + * Due to this All swss test cases are failing. + * This to avoid test case failures until merge happens. + * + */ + if (ref_in.size() == 2) + { + // value set by user is "[]" + // Deem it as a valid format + // clear both type_name and object_name + // as an indication to the caller that + // such a case has been encountered + // type_name.clear(); + object_name.clear(); + return true; + } + string ref_content = ref_in.substr(1, ref_in.size() - 2); + vector tokens; + tokens = tokenize(ref_content, delimiter); if (tokens.size() != 2) { - SWSS_LOG_ERROR("malformed reference:%s. Must contain 2 tokens\n", ref_content.c_str()); - return false; + tokens = tokenize(ref_content, config_db_key_delimiter); + if (tokens.size() != 2) + { + SWSS_LOG_ERROR("malformed reference:%s. Must contain 2 tokens\n", ref_content.c_str()); + return false; + } } + object_name = tokens[1]; + SWSS_LOG_ERROR("parsed: type_name:%s, object_name:%s", type_name.c_str(), object_name.c_str()); + + return true; } - auto type_it = type_maps.find(tokens[0]); + auto type_it = type_maps.find(type_name); if (type_it == type_maps.end()) { - SWSS_LOG_ERROR("not recognized type:%s\n", tokens[0].c_str()); + SWSS_LOG_ERROR("not recognized type:%s\n", type_name.c_str()); return false; } - auto obj_map = type_maps[tokens[0]]; - auto obj_it = obj_map->find(tokens[1]); + auto obj_map = type_maps[type_name]; + auto obj_it = obj_map->find(ref_in); if (obj_it == obj_map->end()) { - SWSS_LOG_INFO("map:%s does not contain object with name:%s\n", tokens[0].c_str(), tokens[1].c_str()); + SWSS_LOG_INFO("map:%s does not contain object with name:%s\n", type_name.c_str(), ref_in.c_str()); return false; } - type_name = tokens[0]; - object_name = tokens[1]; + object_name = ref_in; SWSS_LOG_DEBUG("parsed: type_name:%s, object_name:%s", type_name.c_str(), object_name.c_str()); return true; } @@ -370,6 +388,7 @@ bool Orch::parseReference(type_map &type_maps, string &ref_in, string &type_name ref_resolve_status Orch::resolveFieldRefValue( type_map &type_maps, const string &field_name, + const string &ref_type_name, KeyOpFieldsValuesTuple &tuple, sai_object_id_t &sai_object, string &referenced_object_name) @@ -387,7 +406,7 @@ ref_resolve_status Orch::resolveFieldRefValue( SWSS_LOG_ERROR("Multiple same fields %s", field_name.c_str()); return ref_resolve_status::multiple_instances; } - string ref_type_name, object_name; + string object_name; if (!parseReference(type_maps, fvValue(*i), ref_type_name, object_name)) { return ref_resolve_status::not_resolved; @@ -568,11 +587,12 @@ string Orch::dumpTuple(Consumer &consumer, const KeyOpFieldsValuesTuple &tuple) ref_resolve_status Orch::resolveFieldRefArray( type_map &type_maps, const string &field_name, + const string &ref_type_name, KeyOpFieldsValuesTuple &tuple, vector &sai_object_arr, string &object_name_list) { - // example: [BUFFER_PROFILE_TABLE:e_port.profile0],[BUFFER_PROFILE_TABLE:e_port.profile1] + // example: e_port.profile0,e_port.profile1 SWSS_LOG_ENTER(); size_t count = 0; sai_object_arr.clear(); @@ -585,7 +605,7 @@ ref_resolve_status Orch::resolveFieldRefArray( SWSS_LOG_ERROR("Singleton field with name:%s must have only 1 instance, actual count:%zd\n", field_name.c_str(), count); return ref_resolve_status::multiple_instances; } - string ref_type_name, object_name; + string object_name; string list = fvValue(*i); vector list_items; if (list.find(list_item_delimiter) != string::npos) diff --git a/orchagent/orch.h b/orchagent/orch.h index 766d02c766..7fe99cc6ac 100644 --- a/orchagent/orch.h +++ b/orchagent/orch.h @@ -223,10 +223,10 @@ class Orch static void logfileReopen(); std::string dumpTuple(Consumer &consumer, const swss::KeyOpFieldsValuesTuple &tuple); - ref_resolve_status resolveFieldRefValue(type_map&, const std::string&, swss::KeyOpFieldsValuesTuple&, sai_object_id_t&, std::string&); + ref_resolve_status resolveFieldRefValue(type_map&, const std::string&, const std::string&, swss::KeyOpFieldsValuesTuple&, sai_object_id_t&, std::string&); bool parseIndexRange(const std::string &input, sai_uint32_t &range_low, sai_uint32_t &range_high); - bool parseReference(type_map &type_maps, std::string &ref, std::string &table_name, std::string &object_name); - ref_resolve_status resolveFieldRefArray(type_map&, const std::string&, swss::KeyOpFieldsValuesTuple&, std::vector&, std::string&); + bool parseReference(type_map &type_maps, std::string &ref, const std::string &table_name, std::string &object_name); + ref_resolve_status resolveFieldRefArray(type_map&, const std::string&, const std::string&, swss::KeyOpFieldsValuesTuple&, std::vector&, std::string&); void setObjectReference(type_map&, const std::string&, const std::string&, const std::string&, const std::string&); void removeObject(type_map&, const std::string&, const std::string&); bool isObjectBeingReferenced(type_map&, const std::string&, const std::string&); diff --git a/orchagent/qosorch.cpp b/orchagent/qosorch.cpp index c2e15aa763..771ffbcd8a 100644 --- a/orchagent/qosorch.cpp +++ b/orchagent/qosorch.cpp @@ -71,6 +71,18 @@ type_map QosOrch::m_qos_maps = { {CFG_PFC_PRIORITY_TO_QUEUE_MAP_TABLE_NAME, new object_reference_map()} }; +map qos_to_ref_table_map = { + {dscp_to_tc_field_name, CFG_DSCP_TO_TC_MAP_TABLE_NAME}, + {dot1p_to_tc_field_name, CFG_DOT1P_TO_TC_MAP_TABLE_NAME}, + {tc_to_queue_field_name, CFG_TC_TO_QUEUE_MAP_TABLE_NAME}, + {tc_to_pg_map_field_name, CFG_TC_TO_PRIORITY_GROUP_MAP_TABLE_NAME}, + {pfc_to_pg_map_name, CFG_PFC_PRIORITY_TO_PRIORITY_GROUP_MAP_TABLE_NAME}, + {pfc_to_queue_map_name, CFG_PFC_PRIORITY_TO_QUEUE_MAP_TABLE_NAME}, + {scheduler_field_name, CFG_SCHEDULER_TABLE_NAME}, + {wred_profile_field_name, CFG_WRED_PROFILE_TABLE_NAME} +}; + + task_process_status QosMapHandler::processWorkItem(Consumer& consumer) { SWSS_LOG_ENTER(); @@ -1138,7 +1150,9 @@ task_process_status QosOrch::handleQueueTable(Consumer& consumer) SWSS_LOG_DEBUG("processing queue:%zd", queue_ind); sai_object_id_t sai_scheduler_profile; string scheduler_profile_name; - resolve_result = resolveFieldRefValue(m_qos_maps, scheduler_field_name, tuple, sai_scheduler_profile, scheduler_profile_name); + resolve_result = resolveFieldRefValue(m_qos_maps, scheduler_field_name, + qos_to_ref_table_map.at(scheduler_field_name), tuple, + sai_scheduler_profile, scheduler_profile_name); if (ref_resolve_status::success == resolve_result) { if (op == SET_COMMAND) @@ -1175,7 +1189,9 @@ task_process_status QosOrch::handleQueueTable(Consumer& consumer) sai_object_id_t sai_wred_profile; string wred_profile_name; - resolve_result = resolveFieldRefValue(m_qos_maps, wred_profile_field_name, tuple, sai_wred_profile, wred_profile_name); + resolve_result = resolveFieldRefValue(m_qos_maps, wred_profile_field_name, + qos_to_ref_table_map.at(wred_profile_field_name), tuple, + sai_wred_profile, wred_profile_name); if (ref_resolve_status::success == resolve_result) { if (op == SET_COMMAND) @@ -1263,7 +1279,8 @@ task_process_status QosOrch::ResolveMapAndApplyToPort( sai_object_id_t sai_object = SAI_NULL_OBJECT_ID; string object_name; bool result; - ref_resolve_status resolve_result = resolveFieldRefValue(m_qos_maps, field_name, tuple, sai_object, object_name); + ref_resolve_status resolve_result = resolveFieldRefValue(m_qos_maps, field_name, + qos_to_ref_table_map.at(field_name), tuple, sai_object, object_name); if (ref_resolve_status::success == resolve_result) { if (op == SET_COMMAND) @@ -1319,7 +1336,7 @@ task_process_status QosOrch::handlePortQosMapTable(Consumer& consumer) sai_object_id_t id; string object_name; string map_type_name = fvField(*it), map_name = fvValue(*it); - ref_resolve_status status = resolveFieldRefValue(m_qos_maps, map_type_name, tuple, id, object_name); + ref_resolve_status status = resolveFieldRefValue(m_qos_maps, map_type_name, qos_to_ref_table_map.at(map_type_name), tuple, id, object_name); if (status != ref_resolve_status::success) { diff --git a/swssconfig/sample/sample.json b/swssconfig/sample/sample.json index 7fb0ff0ac2..c00cdb0b9f 100644 --- a/swssconfig/sample/sample.json +++ b/swssconfig/sample/sample.json @@ -18,8 +18,8 @@ }, { "QOS_TABLE:PORT_TABLE:ETHERNET4": { - "dscp_to_tc_map" : "[DSCP_TO_TC_MAP_TABLE:AZURE]", - "tc_to_queue_map": "[TC_TO_QUEUE_MAP_TABLE:AZURE]" + "dscp_to_tc_map" : "AZURE", + "tc_to_queue_map": "AZURE" }, "OP": "SET" }, @@ -46,9 +46,9 @@ }, { "QUEUE_TABLE:ETHERNET4:1" : { - "scheduler" : "[SCHEDULER_TABLE:BEST_EFFORT]", - "wred_profile" : "[WRED_PROFILE_TABLE:AZURE]" + "scheduler" : "BEST_EFFORT", + "wred_profile" : "AZURE" }, "OP": "SET" } - ] \ No newline at end of file + ] diff --git a/swssconfig/sample/sample.json.output.txt b/swssconfig/sample/sample.json.output.txt index 8508de60c6..11f4203771 100644 --- a/swssconfig/sample/sample.json.output.txt +++ b/swssconfig/sample/sample.json.output.txt @@ -67,14 +67,14 @@ hgetall WRED_PROFILE_TABLE:AZURE 10) "8" 127.0.0.1:6379> hgetall QUEUE_TABLE:ETHERNET4:1 1) "scheduler" -2) "[SCHEDULER_TABLE:BEST_EFFORT]" +2) "BEST_EFFORT" 3) "wred_profile" -4) "[WRED_PROFILE_TABLE:AZURE]" +4) "AZURE" 127.0.0.1:6379> hgetall PORT_TABLE:ETHERNET4 1) "dscp_to_tc_map" -2) "[DSCP_TO_TC_MAP_TABLE:AZURE]" +2) "AZURE" 3) "tc_to_queue_map" -4) "[TC_TO_QUEUE_MAP_TABLE:AZURE]" +4) "AZURE" 127.0.0.1:6379> hgetall TC_TO_QUEUE_MAP_TABLE:AZURE diff --git a/tests/buffer_model.py b/tests/buffer_model.py index 51f6305c33..ae2d1ecb79 100644 --- a/tests/buffer_model.py +++ b/tests/buffer_model.py @@ -69,7 +69,7 @@ def disable_dynamic_buffer(config_db, cmd_runner): pgs = config_db.get_keys('BUFFER_PG') for key in pgs: pg = config_db.get_entry('BUFFER_PG', key) - if pg['profile'] != '[BUFFER_PROFILE|ingress_lossy_profile]': + if pg['profile'] != 'ingress_lossy_profile': config_db.delete_entry('BUFFER_PG', key) # Remove all the non-default profiles diff --git a/tests/mock_tests/portsorch_ut.cpp b/tests/mock_tests/portsorch_ut.cpp index 594c1f5dfc..853fdbfb69 100644 --- a/tests/mock_tests/portsorch_ut.cpp +++ b/tests/mock_tests/portsorch_ut.cpp @@ -187,7 +187,7 @@ namespace portsorch_test }); // Create test buffer profile - profileTable.set("test_profile", { { "pool", "[BUFFER_POOL_TABLE:test_pool]" }, + profileTable.set("test_profile", { { "pool", "test_pool" }, { "xon", "14832" }, { "xoff", "14832" }, { "size", "35000" }, @@ -198,9 +198,9 @@ namespace portsorch_test { std::ostringstream ossAppl, ossCfg; ossAppl << it.first << ":3-4"; - pgTable.set(ossAppl.str(), { { "profile", "[BUFFER_PROFILE_TABLE:test_profile]" } }); + pgTable.set(ossAppl.str(), { { "profile", "test_profile" } }); ossCfg << it.first << "|3-4"; - pgTableCfg.set(ossCfg.str(), { { "profile", "[BUFFER_PROFILE|test_profile]" } }); + pgTableCfg.set(ossCfg.str(), { { "profile", "test_profile" } }); } // Recreate buffer orch to read populated data @@ -292,7 +292,7 @@ namespace portsorch_test }); // Create test buffer profile - profileTable.set("test_profile", { { "pool", "[BUFFER_POOL_TABLE:test_pool]" }, + profileTable.set("test_profile", { { "pool", "test_pool" }, { "xon", "14832" }, { "xoff", "14832" }, { "size", "35000" }, @@ -303,7 +303,7 @@ namespace portsorch_test { std::ostringstream oss; oss << it.first << ":3-4"; - pgTable.set(oss.str(), { { "profile", "[BUFFER_PROFILE_TABLE:test_profile]" } }); + pgTable.set(oss.str(), { { "profile", "test_profile" } }); } // Populate pot table with SAI ports @@ -410,7 +410,7 @@ namespace portsorch_test }); // Create test buffer profile - profileTable.set("test_profile", { { "pool", "[BUFFER_POOL_TABLE:test_pool]" }, + profileTable.set("test_profile", { { "pool", "test_pool" }, { "xon", "14832" }, { "xoff", "14832" }, { "size", "35000" }, @@ -421,7 +421,7 @@ namespace portsorch_test { std::ostringstream oss; oss << it.first << ":3-4"; - pgTable.set(oss.str(), { { "profile", "[BUFFER_PROFILE_TABLE:test_profile]" } }); + pgTable.set(oss.str(), { { "profile", "test_profile" } }); } gBufferOrch->addExistingData(&pgTable); gBufferOrch->addExistingData(&poolTable); diff --git a/tests/test_buffer_dynamic.py b/tests/test_buffer_dynamic.py index 26f423d58a..a098d8dec5 100644 --- a/tests/test_buffer_dynamic.py +++ b/tests/test_buffer_dynamic.py @@ -141,6 +141,7 @@ def change_cable_length(self, cable_length): cable_lengths['Ethernet0'] = cable_length self.config_db.update_entry('CABLE_LENGTH', 'AZURE', cable_lengths) + @pytest.mark.skip("Skip to be removed after sonic-buildimage changes get merged") def test_changeSpeed(self, dvs, testlog): self.setup_db(dvs) @@ -158,7 +159,7 @@ def test_changeSpeed(self, dvs, testlog): self.check_new_profile_in_asic_db(dvs, expectedProfile) # Check whether buffer pg align - bufferPg = self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": "[BUFFER_PROFILE_TABLE:" + expectedProfile + "]"}) + bufferPg = self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": expectedProfile}) # Remove lossless PG self.config_db.delete_entry('BUFFER_PG', 'Ethernet0|3-4') @@ -170,7 +171,7 @@ def test_changeSpeed(self, dvs, testlog): # Re-add another lossless PG self.config_db.update_entry('BUFFER_PG', 'Ethernet0|6', {'profile': 'NULL'}) - self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:6", {"profile": "[BUFFER_PROFILE_TABLE:" + expectedProfile + "]"}) + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:6", {"profile": expectedProfile}) # Remove the lossless PG 6 self.config_db.delete_entry('BUFFER_PG', 'Ethernet0|6') @@ -183,7 +184,7 @@ def test_changeSpeed(self, dvs, testlog): expectedProfile = self.make_lossless_profile_name(self.originalSpeed, self.originalCableLen) self.app_db.wait_for_entry("BUFFER_PROFILE_TABLE", expectedProfile) self.check_new_profile_in_asic_db(dvs, expectedProfile) - self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": "[BUFFER_PROFILE_TABLE:" + expectedProfile + "]"}) + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": expectedProfile}) # Remove lossless PG 3-4 on interface self.config_db.delete_entry('BUFFER_PG', 'Ethernet0|3-4') @@ -192,6 +193,7 @@ def test_changeSpeed(self, dvs, testlog): # Shutdown interface dvs.runcmd('config interface shutdown Ethernet0') + @pytest.mark.skip("Skip to be removed after sonic-buildimage changes get merged") def test_changeCableLen(self, dvs, testlog): self.setup_db(dvs) @@ -206,7 +208,7 @@ def test_changeCableLen(self, dvs, testlog): expectedProfile = self.make_lossless_profile_name(self.originalSpeed, self.cableLenTest1) self.app_db.wait_for_entry("BUFFER_PROFILE_TABLE", expectedProfile) self.check_new_profile_in_asic_db(dvs, expectedProfile) - self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": "[BUFFER_PROFILE_TABLE:" + expectedProfile + "]"}) + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": expectedProfile}) # Remove the lossless PGs self.config_db.delete_entry('BUFFER_PG', 'Ethernet0|3-4') @@ -223,7 +225,7 @@ def test_changeCableLen(self, dvs, testlog): # Check the BUFFER_PROFILE_TABLE and BUFFER_PG_TABLE self.app_db.wait_for_entry("BUFFER_PROFILE_TABLE", expectedProfile) self.check_new_profile_in_asic_db(dvs, expectedProfile) - self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": "[BUFFER_PROFILE_TABLE:" + expectedProfile + "]"}) + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": expectedProfile}) # Revert the cable length self.change_cable_length(self.originalCableLen) @@ -234,7 +236,7 @@ def test_changeCableLen(self, dvs, testlog): expectedProfile = self.make_lossless_profile_name(self.originalSpeed, self.originalCableLen) self.app_db.wait_for_entry("BUFFER_PROFILE_TABLE", expectedProfile) - self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": "[BUFFER_PROFILE_TABLE:" + expectedProfile + "]"}) + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": expectedProfile}) # Remove lossless PG 3-4 on interface self.config_db.delete_entry('BUFFER_PG', 'Ethernet0|3-4') @@ -242,6 +244,7 @@ def test_changeCableLen(self, dvs, testlog): # Shutdown interface dvs.runcmd('config interface shutdown Ethernet0') + @pytest.mark.skip("Skip to be removed after sonic-buildimage changes get merged") def test_MultipleLosslessPg(self, dvs, testlog): self.setup_db(dvs) @@ -254,14 +257,14 @@ def test_MultipleLosslessPg(self, dvs, testlog): # Add another lossless PG self.config_db.update_entry('BUFFER_PG', 'Ethernet0|6', {'profile': 'NULL'}) expectedProfile = self.make_lossless_profile_name(self.originalSpeed, self.originalCableLen) - self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:6", {"profile": "[BUFFER_PROFILE_TABLE:" + expectedProfile + "]"}) + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:6", {"profile": expectedProfile}) # Change speed and check dvs.runcmd("config interface speed Ethernet0 " + self.speedToTest1) expectedProfile = self.make_lossless_profile_name(self.speedToTest1, self.originalCableLen) self.app_db.wait_for_entry("BUFFER_PROFILE_TABLE", expectedProfile) - self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": "[BUFFER_PROFILE_TABLE:" + expectedProfile + "]"}) - self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:6", {"profile": "[BUFFER_PROFILE_TABLE:" + expectedProfile + "]"}) + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": expectedProfile}) + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:6", {"profile": expectedProfile}) # Change cable length and check self.change_cable_length(self.cableLenTest1) @@ -269,8 +272,8 @@ def test_MultipleLosslessPg(self, dvs, testlog): expectedProfile = self.make_lossless_profile_name(self.speedToTest1, self.cableLenTest1) self.app_db.wait_for_entry("BUFFER_PROFILE_TABLE", expectedProfile) self.check_new_profile_in_asic_db(dvs, expectedProfile) - self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": "[BUFFER_PROFILE_TABLE:" + expectedProfile + "]"}) - self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:6", {"profile": "[BUFFER_PROFILE_TABLE:" + expectedProfile + "]"}) + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": expectedProfile}) + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:6", {"profile": expectedProfile}) # Revert the speed and cable length and check self.change_cable_length(self.originalCableLen) @@ -279,8 +282,8 @@ def test_MultipleLosslessPg(self, dvs, testlog): self.asic_db.wait_for_deleted_entry("ASIC_STATE:SAI_OBJECT_TYPE_BUFFER_PROFILE", self.newProfileInAsicDb) expectedProfile = self.make_lossless_profile_name(self.originalSpeed, self.originalCableLen) self.app_db.wait_for_entry("BUFFER_PROFILE_TABLE", expectedProfile) - self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": "[BUFFER_PROFILE_TABLE:" + expectedProfile + "]"}) - self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:6", {"profile": "[BUFFER_PROFILE_TABLE:" + expectedProfile + "]"}) + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": expectedProfile}) + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:6", {"profile": expectedProfile}) # Remove lossless PG 3-4 and 6 on interface self.config_db.delete_entry('BUFFER_PG', 'Ethernet0|3-4') @@ -289,6 +292,7 @@ def test_MultipleLosslessPg(self, dvs, testlog): # Shutdown interface dvs.runcmd('config interface shutdown Ethernet0') + @pytest.mark.skip("Skip to be removed after sonic-buildimage changes get merged") def test_headroomOverride(self, dvs, testlog): self.setup_db(dvs) @@ -301,11 +305,11 @@ def test_headroomOverride(self, dvs, testlog): 'xoff': '16384', 'size': '34816', 'dynamic_th': '0', - 'pool': '[BUFFER_POOL|ingress_lossless_pool]'}) + 'pool': 'ingress_lossless_pool'}) self.app_db.wait_for_entry("BUFFER_PROFILE_TABLE", "test") self.app_db.wait_for_exact_match("BUFFER_PROFILE_TABLE", "test", - { "pool" : "[BUFFER_POOL_TABLE:ingress_lossless_pool]", + { "pool" : "ingress_lossless_pool", "xon" : "18432", "xoff" : "16384", "size" : "34816", @@ -319,14 +323,14 @@ def test_headroomOverride(self, dvs, testlog): self.change_cable_length(self.cableLenTest1) expectedProfile = self.make_lossless_profile_name(self.originalSpeed, self.cableLenTest1) self.app_db.wait_for_entry("BUFFER_PROFILE_TABLE", expectedProfile) - self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": "[BUFFER_PROFILE_TABLE:" + expectedProfile + "]"}) + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": expectedProfile}) # configure lossless PG 3-4 with headroom override - self.config_db.update_entry('BUFFER_PG', 'Ethernet0|3-4', {'profile': '[BUFFER_PROFILE|test]'}) - self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": "[BUFFER_PROFILE_TABLE:test]"}) + self.config_db.update_entry('BUFFER_PG', 'Ethernet0|3-4', {'profile': 'test'}) + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": "test"}) # configure lossless PG 6 with headroom override - self.config_db.update_entry('BUFFER_PG', 'Ethernet0|6', {'profile': '[BUFFER_PROFILE|test]'}) - self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:6", {"profile": "[BUFFER_PROFILE_TABLE:test]"}) + self.config_db.update_entry('BUFFER_PG', 'Ethernet0|6', {'profile': 'test'}) + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:6", {"profile": "test"}) # update the profile self.config_db.update_entry('BUFFER_PROFILE', 'test', @@ -334,9 +338,9 @@ def test_headroomOverride(self, dvs, testlog): 'xoff': '18432', 'size': '36864', 'dynamic_th': '0', - 'pool': '[BUFFER_POOL|ingress_lossless_pool]'}) + 'pool': 'ingress_lossless_pool'}) self.app_db.wait_for_exact_match("BUFFER_PROFILE_TABLE", "test", - { "pool" : "[BUFFER_POOL_TABLE:ingress_lossless_pool]", + { "pool" : "ingress_lossless_pool", "xon" : "18432", "xoff" : "18432", "size" : "36864", @@ -353,7 +357,7 @@ def test_headroomOverride(self, dvs, testlog): # readd lossless PG with dynamic profile self.config_db.update_entry('BUFFER_PG', 'Ethernet0|3-4', {'profile': 'NULL'}) - self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": "[BUFFER_PROFILE_TABLE:" + expectedProfile + "]"}) + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": expectedProfile}) # remove the headroom override profile self.config_db.delete_entry('BUFFER_PROFILE', 'test') @@ -364,7 +368,7 @@ def test_headroomOverride(self, dvs, testlog): self.app_db.wait_for_deleted_entry("BUFFER_PROFILE_TABLE", expectedProfile) expectedProfile = self.make_lossless_profile_name(self.originalSpeed, self.originalCableLen) self.app_db.wait_for_entry("BUFFER_PROFILE_TABLE", expectedProfile) - self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": "[BUFFER_PROFILE_TABLE:" + expectedProfile + "]"}) + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": expectedProfile}) # remove lossless PG 3-4 on interface self.config_db.delete_entry('BUFFER_PG', 'Ethernet0|3-4') @@ -372,6 +376,7 @@ def test_headroomOverride(self, dvs, testlog): # Shutdown interface dvs.runcmd('config interface shutdown Ethernet0') + @pytest.mark.skip("Skip to be removed after sonic-buildimage changes get merged") def test_mtuUpdate(self, dvs, testlog): self.setup_db(dvs) @@ -392,13 +397,13 @@ def test_mtuUpdate(self, dvs, testlog): self.app_db.wait_for_entry("BUFFER_PG_TABLE", "Ethernet0:3-4") self.app_db.wait_for_entry("BUFFER_PROFILE_TABLE", expectedProfileMtu) self.check_new_profile_in_asic_db(dvs, expectedProfileMtu) - self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": "[BUFFER_PROFILE_TABLE:{}]".format(expectedProfileMtu)}) + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": expectedProfileMtu}) dvs.runcmd("config interface mtu Ethernet0 {}".format(default_mtu)) self.app_db.wait_for_deleted_entry("BUFFER_PROFILE_TABLE", expectedProfileMtu) self.app_db.wait_for_entry("BUFFER_PROFILE_TABLE", expectedProfileNormal) - self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": "[BUFFER_PROFILE_TABLE:{}]".format(expectedProfileNormal)}) + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": expectedProfileNormal}) # clear configuration self.config_db.delete_entry('BUFFER_PG', 'Ethernet0|3-4') @@ -406,6 +411,7 @@ def test_mtuUpdate(self, dvs, testlog): # Shutdown interface dvs.runcmd('config interface shutdown Ethernet0') + @pytest.mark.skip("Skip to be removed after sonic-buildimage changes get merged") def test_nonDefaultAlpha(self, dvs, testlog): self.setup_db(dvs) @@ -421,25 +427,25 @@ def test_nonDefaultAlpha(self, dvs, testlog): self.config_db.update_entry('BUFFER_PROFILE', 'non-default-dynamic', {'dynamic_th': test_dynamic_th_1, 'headroom_type': 'dynamic', - 'pool': '[BUFFER_POOL|ingress_lossless_pool]'}) + 'pool': 'ingress_lossless_pool'}) # configure lossless PG 3-4 on interface - self.config_db.update_entry('BUFFER_PG', 'Ethernet0|3-4', {'profile': '[BUFFER_PROFILE|non-default-dynamic]'}) + self.config_db.update_entry('BUFFER_PG', 'Ethernet0|3-4', {'profile': 'non-default-dynamic'}) self.app_db.wait_for_entry("BUFFER_PROFILE_TABLE", expectedProfile_th1) self.check_new_profile_in_asic_db(dvs, expectedProfile_th1) - self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": "[BUFFER_PROFILE_TABLE:" + expectedProfile_th1 + "]"}) + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": expectedProfile_th1}) # modify the profile to another dynamic_th self.config_db.update_entry('BUFFER_PROFILE', 'non-default-dynamic', {'dynamic_th': test_dynamic_th_2, 'headroom_type': 'dynamic', - 'pool': '[BUFFER_POOL|ingress_lossless_pool]'}) + 'pool': 'ingress_lossless_pool'}) self.app_db.wait_for_deleted_entry("BUFFER_PROFILE_TABLE", expectedProfile_th1) self.app_db.wait_for_entry("BUFFER_PROFILE_TABLE", expectedProfile_th2) self.check_new_profile_in_asic_db(dvs, expectedProfile_th2) - self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": "[BUFFER_PROFILE_TABLE:" + expectedProfile_th2 + "]"}) + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": expectedProfile_th2}) # clear configuration self.config_db.delete_entry('BUFFER_PG', 'Ethernet0|3-4') @@ -448,6 +454,7 @@ def test_nonDefaultAlpha(self, dvs, testlog): # Shutdown interface dvs.runcmd('config interface shutdown Ethernet0') + @pytest.mark.skip("Skip to be removed after sonic-buildimage changes get merged") def test_sharedHeadroomPool(self, dvs, testlog): self.setup_db(dvs) @@ -459,7 +466,7 @@ def test_sharedHeadroomPool(self, dvs, testlog): expectedProfile = self.make_lossless_profile_name(self.originalSpeed, self.originalCableLen) self.app_db.wait_for_entry("BUFFER_PROFILE_TABLE", expectedProfile) - self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": "[BUFFER_PROFILE_TABLE:" + expectedProfile + "]"}) + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": expectedProfile}) self.check_new_profile_in_asic_db(dvs, expectedProfile) profileInApplDb = self.app_db.get_entry('BUFFER_PROFILE_TABLE', expectedProfile) @@ -546,11 +553,12 @@ def test_sharedHeadroomPool(self, dvs, testlog): # Shutdown interface dvs.runcmd('config interface shutdown Ethernet0') + @pytest.mark.skip("Skip to be removed after sonic-buildimage changes get merged") def test_shutdownPort(self, dvs, testlog): self.setup_db(dvs) - lossy_pg_reference_config_db = '[BUFFER_PROFILE|ingress_lossy_profile]' - lossy_pg_reference_appl_db = '[BUFFER_PROFILE_TABLE:ingress_lossy_profile]' + lossy_pg_reference_config_db = 'ingress_lossy_profile' + lossy_pg_reference_appl_db = 'ingress_lossy_profile' # Startup interface dvs.runcmd('config interface startup Ethernet0') @@ -558,7 +566,7 @@ def test_shutdownPort(self, dvs, testlog): # Configure lossless PG 3-4 on interface self.config_db.update_entry('BUFFER_PG', 'Ethernet0|3-4', {'profile': 'NULL'}) expectedProfile = self.make_lossless_profile_name(self.originalSpeed, self.originalCableLen) - self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": "[BUFFER_PROFILE_TABLE:" + expectedProfile + "]"}) + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": expectedProfile}) # Shutdown port and check whether all the PGs have been removed dvs.runcmd("config interface shutdown Ethernet0") @@ -579,8 +587,8 @@ def test_shutdownPort(self, dvs, testlog): dvs.runcmd("config interface startup Ethernet0") self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:0", {"profile": lossy_pg_reference_appl_db}) self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:1", {"profile": lossy_pg_reference_appl_db}) - self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": "[BUFFER_PROFILE_TABLE:" + expectedProfile + "]"}) - self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:6", {"profile": "[BUFFER_PROFILE_TABLE:" + expectedProfile + "]"}) + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": expectedProfile }) + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:6", {"profile": expectedProfile}) # Remove lossless PG 3-4 on interface self.config_db.delete_entry('BUFFER_PG', 'Ethernet0|3-4') @@ -589,6 +597,7 @@ def test_shutdownPort(self, dvs, testlog): # Shutdown interface dvs.runcmd("config interface shutdown Ethernet0") + @pytest.mark.skip("Skip to be removed after sonic-buildimage changes get merged") def test_autoNegPort(self, dvs, testlog): self.setup_db(dvs) diff --git a/tests/test_buffer_traditional.py b/tests/test_buffer_traditional.py index 44bea70620..3defae0c80 100644 --- a/tests/test_buffer_traditional.py +++ b/tests/test_buffer_traditional.py @@ -119,7 +119,7 @@ def test_zero_cable_len_profile_update(self, dvs, setup_teardown_test): self.app_db.wait_for_deleted_entry("BUFFER_PROFILE_TABLE", test_lossless_profile) # buffer pgs should still point to the original buffer profile - self.app_db.wait_for_field_match("BUFFER_PG_TABLE", self.INTF + ":3-4", {"profile": "[BUFFER_PROFILE_TABLE:{}]".format(orig_lossless_profile)}) + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", self.INTF + ":3-4", {"profile": orig_lossless_profile}) fvs = dict() for pg in self.pg_name_map: fvs["SAI_INGRESS_PRIORITY_GROUP_ATTR_BUFFER_PROFILE"] = self.buf_pg_profile[pg] diff --git a/tests/test_qos_map.py b/tests/test_qos_map.py index 32a8b396aa..1970d5f60b 100644 --- a/tests/test_qos_map.py +++ b/tests/test_qos_map.py @@ -63,7 +63,7 @@ def find_dot1p_profile(self): def apply_dot1p_profile_on_all_ports(self): tbl = swsscommon.Table(self.config_db, CFG_PORT_QOS_MAP_TABLE_NAME) - fvs = swsscommon.FieldValuePairs([(CFG_PORT_QOS_MAP_FIELD, "[" + CFG_DOT1P_TO_TC_MAP_TABLE_NAME + "|" + CFG_DOT1P_TO_TC_MAP_KEY + "]")]) + fvs = swsscommon.FieldValuePairs([(CFG_PORT_QOS_MAP_FIELD, CFG_DOT1P_TO_TC_MAP_KEY)]) ports = swsscommon.Table(self.config_db, CFG_PORT_TABLE_NAME).getKeys() for port in ports: tbl.set(port, fvs) diff --git a/tests/test_speed.py b/tests/test_speed.py index 44d4932e58..bf44685989 100644 --- a/tests/test_speed.py +++ b/tests/test_speed.py @@ -72,7 +72,7 @@ def test_SpeedAndBufferSet(self, dvs, testlog): expected_pg_table = "Ethernet{}|3-4".format(i * 4) assert expected_pg_table in pg_tables - expected_fields = {"profile": "[BUFFER_PROFILE|{}]".format(expected_new_profile_name)} + expected_fields = {"profile": expected_new_profile_name} cdb.wait_for_field_match("BUFFER_PG", expected_pg_table, expected_fields) From f1aca8a7f2a4aa931ab5ef447cf8a291dc327eb3 Mon Sep 17 00:00:00 2001 From: Prince Sunny Date: Mon, 27 Sep 2021 08:12:40 -0700 Subject: [PATCH 14/19] Cache routes for single nexthop for faster retrieval (#1922) * Created a map to store single nexthop routes. This is for faster retrieval when you've to fetch the routes for a particular nexthop. No changes to ECMP route handling. --- orchagent/routeorch.cpp | 122 +++++++++++++++++++++++++++++++--------- orchagent/routeorch.h | 17 ++++++ tests/test_mux.py | 23 ++++++++ 3 files changed, 134 insertions(+), 28 deletions(-) diff --git a/orchagent/routeorch.cpp b/orchagent/routeorch.cpp index d54e205ded..9e9bbe9891 100644 --- a/orchagent/routeorch.cpp +++ b/orchagent/routeorch.cpp @@ -1317,47 +1317,94 @@ bool RouteOrch::removeNextHopGroup(const NextHopGroupKey &nexthops) return true; } +void RouteOrch::addNextHopRoute(const NextHopKey& nextHop, const RouteKey& routeKey) +{ + auto it = m_nextHops.find((nextHop)); + + if (it != m_nextHops.end()) + { + if (it->second.find(routeKey) != it->second.end()) + { + SWSS_LOG_INFO("Route already present in nh table %s", + routeKey.prefix.to_string().c_str()); + return; + } + + it->second.insert(routeKey); + } + else + { + set routes; + routes.insert(routeKey); + m_nextHops.insert(make_pair(nextHop, routes)); + } +} + +void RouteOrch::removeNextHopRoute(const NextHopKey& nextHop, const RouteKey& routeKey) +{ + auto it = m_nextHops.find((nextHop)); + + if (it != m_nextHops.end()) + { + if (it->second.find(routeKey) == it->second.end()) + { + SWSS_LOG_INFO("Route not present in nh table %s", routeKey.prefix.to_string().c_str()); + return; + } + + it->second.erase(routeKey); + if (it->second.empty()) + { + m_nextHops.erase(nextHop); + } + } + else + { + SWSS_LOG_INFO("Nexthop %s not found in nexthop table", nextHop.to_string().c_str()); + } +} + bool RouteOrch::updateNextHopRoutes(const NextHopKey& nextHop, uint32_t& numRoutes) { numRoutes = 0; + auto it = m_nextHops.find((nextHop)); + + if (it == m_nextHops.end()) + { + SWSS_LOG_INFO("No routes found for NH %s", nextHop.ip_address.to_string().c_str()); + return true; + } + sai_route_entry_t route_entry; sai_attribute_t route_attr; sai_object_id_t next_hop_id; - for (auto rt_table : m_syncdRoutes) + auto rt = it->second.begin(); + while(rt != it->second.end()) { - for (auto rt_entry : rt_table.second) - { - // Skip routes with ecmp nexthops - if (rt_entry.second.getSize() > 1) - { - continue; - } + SWSS_LOG_INFO("Updating route %s", (*rt).prefix.to_string().c_str()); + next_hop_id = m_neighOrch->getNextHopId(nextHop); - if (rt_entry.second.contains(nextHop)) - { - SWSS_LOG_INFO("Updating route %s during nexthop status change", - rt_entry.first.to_string().c_str()); - next_hop_id = m_neighOrch->getNextHopId(nextHop); - - route_entry.vr_id = rt_table.first; - route_entry.switch_id = gSwitchId; - copy(route_entry.destination, rt_entry.first); + route_entry.vr_id = (*rt).vrf_id; + route_entry.switch_id = gSwitchId; + copy(route_entry.destination, (*rt).prefix); - route_attr.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; - route_attr.value.oid = next_hop_id; + route_attr.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; + route_attr.value.oid = next_hop_id; - sai_status_t status = sai_route_api->set_route_entry_attribute(&route_entry, &route_attr); - if (status != SAI_STATUS_SUCCESS) - { - SWSS_LOG_ERROR("Failed to update route %s, rv:%d", - rt_entry.first.to_string().c_str(), status); - return false; - } - - ++numRoutes; + sai_status_t status = sai_route_api->set_route_entry_attribute(&route_entry, &route_attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to update route %s, rv:%d", (*rt).prefix.to_string().c_str(), status); + task_process_status handle_status = handleSaiSetStatus(SAI_API_ROUTE, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); } } + + ++numRoutes; + ++rt; } return true; @@ -1856,6 +1903,12 @@ bool RouteOrch::addRoutePost(const RouteBulkContext& ctx, const NextHopGroupKey SWSS_LOG_NOTICE("Update overlay Nexthop %s", ol_nextHops.to_string().c_str()); m_bulkNhgReducedRefCnt.emplace(ol_nextHops, vrf_id); } + else if (ol_nextHops.getSize() == 1) + { + RouteKey r_key = { vrf_id, ipPrefix }; + auto nexthop = NextHopKey(ol_nextHops.to_string()); + removeNextHopRoute(nexthop, r_key); + } } if (blackhole) @@ -1878,6 +1931,16 @@ bool RouteOrch::addRoutePost(const RouteBulkContext& ctx, const NextHopGroupKey ipPrefix.to_string().c_str(), nextHops.to_string().c_str()); } + if (nextHops.getSize() == 1 && !nextHops.is_overlay_nexthop()) + { + RouteKey r_key = { vrf_id, ipPrefix }; + auto nexthop = NextHopKey(nextHops.to_string()); + if (!nexthop.ip_address.isZero()) + { + addNextHopRoute(nexthop, r_key); + } + } + m_syncdRoutes[vrf_id][ipPrefix] = nextHops; notifyNextHopChangeObservers(vrf_id, ipPrefix, nextHops, true); @@ -2048,6 +2111,9 @@ bool RouteOrch::removeRoutePost(const RouteBulkContext& ctx) { m_neighOrch->removeMplsNextHop(nexthop); } + + RouteKey r_key = { vrf_id, ipPrefix }; + removeNextHopRoute(nexthop, r_key); } } diff --git a/orchagent/routeorch.h b/orchagent/routeorch.h index 20e79699d5..4f74db62dc 100644 --- a/orchagent/routeorch.h +++ b/orchagent/routeorch.h @@ -42,6 +42,18 @@ struct NextHopUpdate struct NextHopObserverEntry; +/* Route destination key for a nexthop */ +struct RouteKey +{ + sai_object_id_t vrf_id; + IpPrefix prefix; + + bool operator < (const RouteKey& rhs) const + { + return (vrf_id <= rhs.vrf_id && prefix < rhs.prefix); + } +}; + /* NextHopGroupTable: NextHopGroupKey, NextHopGroupEntry */ typedef std::map NextHopGroupTable; /* RouteTable: destination network, NextHopGroupKey */ @@ -56,6 +68,8 @@ typedef std::map LabelRouteTables; typedef std::pair Host; /* NextHopObserverTable: Host, next hop observer entry */ typedef std::map NextHopObserverTable; +/* Single Nexthop to Routemap */ +typedef std::map> NextHopRouteTable; struct NextHopObserverEntry { @@ -138,6 +152,8 @@ class RouteOrch : public Orch, public Subject bool addNextHopGroup(const NextHopGroupKey&); bool removeNextHopGroup(const NextHopGroupKey&); + void addNextHopRoute(const NextHopKey&, const RouteKey&); + void removeNextHopRoute(const NextHopKey&, const RouteKey&); bool updateNextHopRoutes(const NextHopKey&, uint32_t&); bool validnexthopinNextHopGroup(const NextHopKey&, uint32_t&); @@ -170,6 +186,7 @@ class RouteOrch : public Orch, public Subject RouteTables m_syncdRoutes; LabelRouteTables m_syncdLabelRoutes; NextHopGroupTable m_syncdNextHopGroups; + NextHopRouteTable m_nextHops; std::set> m_bulkNhgReducedRefCnt; /* m_bulkNhgReducedRefCnt: nexthop, vrf_id */ diff --git a/tests/test_mux.py b/tests/test_mux.py index 84458a841d..c9aa578f79 100644 --- a/tests/test_mux.py +++ b/tests/test_mux.py @@ -298,6 +298,29 @@ def create_and_test_route(self, appdb, asicdb, dvs, dvs_route): self.check_nexthop_in_asic_db(asicdb, rtkeys[0]) + # Check route set flow and changing nexthop + self.set_mux_state(appdb, "Ethernet4", "active") + + ps = swsscommon.ProducerStateTable(pdb.db_connection, "ROUTE_TABLE") + fvs = swsscommon.FieldValuePairs([("nexthop", self.SERV2_IPV4), ("ifname", "Vlan1000")]) + ps.set(rtprefix, fvs) + + # Check if route was propagated to ASIC DB + rtkeys = dvs_route.check_asicdb_route_entries([rtprefix]) + + # Change Mux status for Ethernet0 and expect no change to replaced route + self.set_mux_state(appdb, "Ethernet0", "standby") + self.check_nexthop_in_asic_db(asicdb, rtkeys[0]) + + self.set_mux_state(appdb, "Ethernet4", "standby") + self.check_nexthop_in_asic_db(asicdb, rtkeys[0], True) + + # Delete the route + ps._del(rtprefix) + + self.set_mux_state(appdb, "Ethernet4", "active") + dvs_route.check_asicdb_deleted_route_entries([rtprefix]) + # Test ECMP routes self.set_mux_state(appdb, "Ethernet0", "active") From a875d60ed12886ce8c6677c572ec7c8c74cb7101 Mon Sep 17 00:00:00 2001 From: Shi Su <67605788+shi-su@users.noreply.github.com> Date: Mon, 27 Sep 2021 15:25:21 -0700 Subject: [PATCH 15/19] Reduce route count for route perf test (#1928) * [VS Test] Reduce route count for route perf test --- tests/test_route.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_route.py b/tests/test_route.py index b6dff14cfe..bae9865a65 100644 --- a/tests/test_route.py +++ b/tests/test_route.py @@ -905,7 +905,7 @@ class TestRoutePerf(TestRouteBase): def test_PerfAddRemoveRoute(self, dvs, testlog): self.setup_db(dvs) self.clear_srv_config(dvs) - numRoutes = 10000 # number of routes to add/remove + numRoutes = 5000 # number of routes to add/remove # generate ip prefixes of routes prefixes = [] From a0315428d1bdb1aeaf76c529809f8cc59328878b Mon Sep 17 00:00:00 2001 From: Lawrence Lee Date: Mon, 27 Sep 2021 17:41:39 -0700 Subject: [PATCH 16/19] [pytest]: Re-use DVS container when possible (#1816) - If the fake_platform does not change between test modules, re-use the same DVS container (but still restart it between modules and do some cleanup/re-init to ensure a consistent start state for each test module) - Add a CLI option --force-recreate-dvs to revert to the previous behavior of creating a new DVS per test module Signed-off-by: Lawrence Lee --- tests/conftest.py | 125 +++++++++++++++++++++++++++++-------- tests/dvslib/dvs_common.py | 2 +- 2 files changed, 101 insertions(+), 26 deletions(-) diff --git a/tests/conftest.py b/tests/conftest.py index d77a2e17e1..ba295ce7c4 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -47,13 +47,18 @@ def pytest_addoption(parser): parser.addoption("--dvsname", action="store", default=None, - help="Name of a persistent DVS container to run the tests with") + help="Name of a persistent DVS container to run the tests with. Mutually exclusive with --force-recreate-dvs") parser.addoption("--forcedvs", action="store_true", default=False, help="Force tests to run in persistent DVS containers with <32 ports") + parser.addoption("--force-recreate-dvs", + action="store_true", + default=False, + help="Force the DVS container to be recreated between each test module. Mutually exclusive with --dvsname") + parser.addoption("--keeptb", action="store_true", default=False, @@ -192,6 +197,9 @@ def __init__(self, ctn_name: str, pid: int, i: int): ensure_system(f"nsenter -t {pid} -n ip link set arp off dev {self.pifname}") ensure_system(f"nsenter -t {pid} -n sysctl -w net.ipv6.conf.{self.pifname}.disable_ipv6=1") + def __repr__(self): + return f' {self.nsname}' + def kill_all_processes(self) -> None: pids = subprocess.check_output(f"ip netns pids {self.nsname}", shell=True).decode("utf-8") if pids: @@ -339,9 +347,7 @@ def __init__( # create virtual server self.servers = [] - for i in range(NUM_PORTS): - server = VirtualServer(self.ctn_sw.name, self.ctn_sw_pid, i) - self.servers.append(server) + self.create_servers() if self.vct: self.vct_connect(newctnname) @@ -377,13 +383,7 @@ def __init__( self.redis_sock = os.path.join(self.mount, "redis.sock") self.redis_chassis_sock = os.path.join(self.mount, "redis_chassis.sock") - # DB wrappers are declared here, lazy-loaded in the tests - self.app_db = None - self.asic_db = None - self.counters_db = None - self.config_db = None - self.flex_db = None - self.state_db = None + self.reset_dbs() # Make sure everything is up and running before turning over control to the caller self.check_ready_status_and_init_db() @@ -392,23 +392,40 @@ def __init__( if buffer_model == 'dynamic': enable_dynamic_buffer(self.get_config_db(), self.runcmd) + def create_servers(self): + for i in range(NUM_PORTS): + server = VirtualServer(self.ctn_sw.name, self.ctn_sw_pid, i) + self.servers.append(server) + + def reset_dbs(self): + # DB wrappers are declared here, lazy-loaded in the tests + self.app_db = None + self.asic_db = None + self.counters_db = None + self.config_db = None + self.flex_db = None + self.state_db = None + def destroy(self) -> None: - if self.appldb: + if getattr(self, 'appldb', False): del self.appldb # In case persistent dvs was used removed all the extra server link # that were created if self.persistent: - for s in self.servers: - s.destroy() + self.destroy_servers() # persistent and clean-up flag are mutually exclusive elif self.cleanup: self.ctn.remove(force=True) self.ctn_sw.remove(force=True) os.system(f"rm -rf {self.mount}") - for s in self.servers: - s.destroy() + self.destroy_servers() + + def destroy_servers(self): + for s in self.servers: + s.destroy() + self.servers = [] def check_ready_status_and_init_db(self) -> None: try: @@ -423,6 +440,7 @@ def check_ready_status_and_init_db(self) -> None: # Initialize the databases. self.init_asic_db_validator() self.init_appl_db_validator() + self.reset_dbs() # Verify that SWSS has finished initializing. self.check_swss_ready() @@ -451,9 +469,9 @@ def _polling_function(): for pname in self.alld: if process_status.get(pname, None) != "RUNNING": - return (False, None) + return (False, process_status) - return (process_status.get("start.sh", None) == "EXITED", None) + return (process_status.get("start.sh", None) == "EXITED", process_status) wait_for_result(_polling_function, service_polling_config) @@ -1556,8 +1574,15 @@ def verify_vct(self): print("vct verifications passed ? %s" % (ret1 and ret2)) return ret1 and ret2 -@pytest.yield_fixture(scope="module") -def dvs(request) -> DockerVirtualSwitch: +@pytest.fixture(scope="session") +def manage_dvs(request) -> str: + """ + Main fixture to manage the lifecycle of the DVS (Docker Virtual Switch) for testing + + Returns: + (func) update_dvs function which can be called on a per-module basis + to handle re-creating the DVS if necessary + """ if sys.version_info[0] < 3: raise NameError("Python 2 is not supported, please install python 3") @@ -1565,26 +1590,76 @@ def dvs(request) -> DockerVirtualSwitch: raise NameError("Cannot install kernel team module, please install a generic kernel") name = request.config.getoption("--dvsname") + using_persistent_dvs = name is not None forcedvs = request.config.getoption("--forcedvs") keeptb = request.config.getoption("--keeptb") imgname = request.config.getoption("--imgname") max_cpu = request.config.getoption("--max_cpu") buffer_model = request.config.getoption("--buffer_model") - fakeplatform = getattr(request.module, "DVS_FAKE_PLATFORM", None) - log_path = name if name else request.module.__name__ + force_recreate = request.config.getoption("--force-recreate-dvs") + dvs = None + curr_fake_platform = None # lgtm[py/unused-local-variable] + + if using_persistent_dvs and force_recreate: + pytest.fail("Options --dvsname and --force-recreate-dvs are mutually exclusive") + + def update_dvs(log_path, new_fake_platform=None): + """ + Decides whether or not to create a new DVS + + Create a new the DVS in the following cases: + 1. CLI option `--force-recreate-dvs` was specified (recreate for every module) + 2. The fake_platform has changed (this can only be set at container creation, + so it is necessary to spin up a new DVS) + 3. No DVS currently exists (i.e. first time startup) + + Otherwise, restart the existing DVS (to get to a clean state) + + Returns: + (DockerVirtualSwitch) a DVS object + """ + nonlocal curr_fake_platform, dvs + if force_recreate or \ + new_fake_platform != curr_fake_platform or \ + dvs is None: - dvs = DockerVirtualSwitch(name, imgname, keeptb, fakeplatform, log_path, max_cpu, forcedvs, buffer_model = buffer_model) + if dvs is not None: + dvs.get_logs() + dvs.destroy() - yield dvs + dvs = DockerVirtualSwitch(name, imgname, keeptb, new_fake_platform, log_path, max_cpu, forcedvs, buffer_model = buffer_model) + + curr_fake_platform = new_fake_platform + + else: + # First generate GCDA files for GCov + dvs.runcmd('killall5 -15') + # If not re-creating the DVS, restart container + # between modules to ensure a consistent start state + dvs.net_cleanup() + dvs.destroy_servers() + dvs.create_servers() + dvs.restart() + + return dvs + + yield update_dvs dvs.get_logs() dvs.destroy() - # restore original config db if dvs.persistent: dvs.runcmd("mv /etc/sonic/config_db.json.orig /etc/sonic/config_db.json") dvs.ctn_restart() +@pytest.yield_fixture(scope="module") +def dvs(request, manage_dvs) -> DockerVirtualSwitch: + fakeplatform = getattr(request.module, "DVS_FAKE_PLATFORM", None) + name = request.config.getoption("--dvsname") + log_path = name if name else request.module.__name__ + + return manage_dvs(log_path, fakeplatform) + @pytest.yield_fixture(scope="module") def vct(request): vctns = request.config.getoption("--vctns") diff --git a/tests/dvslib/dvs_common.py b/tests/dvslib/dvs_common.py index 7edae4e792..b2a09d5da7 100644 --- a/tests/dvslib/dvs_common.py +++ b/tests/dvslib/dvs_common.py @@ -56,7 +56,7 @@ def wait_for_result( time.sleep(polling_config.polling_interval) if polling_config.strict: - message = failure_message or f"Operation timed out after {polling_config.timeout} seconds" + message = failure_message or f"Operation timed out after {polling_config.timeout} seconds with result {result}" assert False, message return (False, result) From 3249cdb1124c1c4297b8b85bab991b196c5c84e1 Mon Sep 17 00:00:00 2001 From: tomer-israel <76040066+tomer-israel@users.noreply.github.com> Date: Wed, 29 Sep 2021 21:07:44 +0300 Subject: [PATCH 17/19] [PORTSYNCD] when no ports on config db on init, continue and set PortConfigDone (#1861) *continue the execution of handlePortConfigFromConfigDB when no ports on config db -> continue and set PortConfigDone *function handlePortConfigFromConfigDB is no longer boolean - change it to void Signed-off-by: tomeri --- portsyncd/portsyncd.cpp | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/portsyncd/portsyncd.cpp b/portsyncd/portsyncd.cpp index beaa008449..c55c1685af 100644 --- a/portsyncd/portsyncd.cpp +++ b/portsyncd/portsyncd.cpp @@ -43,7 +43,7 @@ void usage() } void handlePortConfigFile(ProducerStateTable &p, string file, bool warm); -bool handlePortConfigFromConfigDB(ProducerStateTable &p, DBConnector &cfgDb, bool warm); +void handlePortConfigFromConfigDB(ProducerStateTable &p, DBConnector &cfgDb, bool warm); void handleVlanIntfFile(string file); void handlePortConfig(ProducerStateTable &p, map &port_cfg_map); void checkPortInitDone(DBConnector *appl_db); @@ -86,11 +86,7 @@ int main(int argc, char **argv) netlink.dumpRequest(RTM_GETLINK); cout << "Listen to link messages..." << endl; - if (!handlePortConfigFromConfigDB(p, cfgDb, warm)) - { - SWSS_LOG_NOTICE("ConfigDB does not have port information, " - "however ports can be added later on, continuing..."); - } + handlePortConfigFromConfigDB(p, cfgDb, warm); LinkSync sync(&appl_db, &state_db); NetDispatcher::getInstance().registerMessageHandler(RTM_NEWLINK, &sync); @@ -191,7 +187,7 @@ static void notifyPortConfigDone(ProducerStateTable &p) p.set("PortConfigDone", attrs); } -bool handlePortConfigFromConfigDB(ProducerStateTable &p, DBConnector &cfgDb, bool warm) +void handlePortConfigFromConfigDB(ProducerStateTable &p, DBConnector &cfgDb, bool warm) { SWSS_LOG_ENTER(); @@ -204,8 +200,8 @@ bool handlePortConfigFromConfigDB(ProducerStateTable &p, DBConnector &cfgDb, boo if (keys.empty()) { - cout << "No port configuration in ConfigDB" << endl; - return false; + SWSS_LOG_NOTICE("ConfigDB does not have port information, " + "however ports can be added later on, continuing..."); } for ( auto &k : keys ) @@ -228,7 +224,6 @@ bool handlePortConfigFromConfigDB(ProducerStateTable &p, DBConnector &cfgDb, boo notifyPortConfigDone(p); } - return true; } void handlePortConfig(ProducerStateTable &p, map &port_cfg_map) From d23924f86abb9a100255eee52c26a497b2a15e97 Mon Sep 17 00:00:00 2001 From: Junhua Zhai Date: Thu, 30 Sep 2021 17:54:03 +0800 Subject: [PATCH 18/19] [gearbox] Add gearbox unit test (#1920) * Add test_gearbox * Add DVS_ENV for module specific dvs env variables --- tests/conftest.py | 36 ++++---- tests/test_gearbox.py | 143 +++++++++++++++++++++++++++++ tests/test_mirror_ipv6_combined.py | 2 +- tests/test_mirror_ipv6_separate.py | 2 +- 4 files changed, 162 insertions(+), 21 deletions(-) create mode 100644 tests/test_gearbox.py diff --git a/tests/conftest.py b/tests/conftest.py index ba295ce7c4..485f0a0a8b 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -244,7 +244,7 @@ def __init__( name: str = None, imgname: str = None, keeptb: bool = False, - fakeplatform: str = None, + env: list = [], log_path: str = None, max_cpu: int = 2, forcedvs: bool = None, @@ -356,8 +356,6 @@ def __init__( self.mount = f"/var/run/redis-vs/{self.ctn_sw.name}" ensure_system(f"mkdir -p {self.mount}") - self.environment = [f"fake_platform={fakeplatform}"] if fakeplatform else [] - kwargs = {} if newctnname: kwargs["name"] = newctnname @@ -372,7 +370,7 @@ def __init__( self.ctn = self.client.containers.run(imgname, privileged=True, detach=True, - environment=self.environment, + environment=env, network_mode=f"container:{self.ctn_sw.name}", cpu_count=max_cpu, **kwargs) @@ -1235,7 +1233,7 @@ def __init__( namespace=None, imgname=None, keeptb=False, - fakeplatform=None, + env=[], log_path=None, max_cpu=2, forcedvs=None, @@ -1244,7 +1242,7 @@ def __init__( self.ns = namespace self.chassbr = "br4chs" self.keeptb = keeptb - self.fakeplatform = fakeplatform + self.env = env self.topoFile = topoFile self.imgname = imgname self.ctninfo = {} @@ -1303,7 +1301,7 @@ def find_all_ctns(self): for ctn in docker.from_env().containers.list(): if ctn.name.endswith(suffix): self.dvss[ctn.name] = DockerVirtualSwitch(ctn.name, self.imgname, self.keeptb, - self.fakeplatform, log_path=ctn.name, + self.env, log_path=ctn.name, max_cpu=self.max_cpu, forcedvs=self.forcedvs, vct=self) if self.chassbr is None and len(self.dvss) > 0: @@ -1421,7 +1419,7 @@ def create_vct_ctn(self, ctndir): if ctnname not in self.dvss: self.dvss[ctnname] = DockerVirtualSwitch(name=None, imgname=self.imgname, keeptb=self.keeptb, - fakeplatform=self.fakeplatform, + env=self.env, log_path=self.log_path, max_cpu=self.max_cpu, forcedvs=self.forcedvs, @@ -1598,18 +1596,18 @@ def manage_dvs(request) -> str: buffer_model = request.config.getoption("--buffer_model") force_recreate = request.config.getoption("--force-recreate-dvs") dvs = None - curr_fake_platform = None # lgtm[py/unused-local-variable] + curr_dvs_env = [] # lgtm[py/unused-local-variable] if using_persistent_dvs and force_recreate: pytest.fail("Options --dvsname and --force-recreate-dvs are mutually exclusive") - def update_dvs(log_path, new_fake_platform=None): + def update_dvs(log_path, new_dvs_env=[]): """ Decides whether or not to create a new DVS Create a new the DVS in the following cases: 1. CLI option `--force-recreate-dvs` was specified (recreate for every module) - 2. The fake_platform has changed (this can only be set at container creation, + 2. The dvs_env has changed (this can only be set at container creation, so it is necessary to spin up a new DVS) 3. No DVS currently exists (i.e. first time startup) @@ -1618,18 +1616,18 @@ def update_dvs(log_path, new_fake_platform=None): Returns: (DockerVirtualSwitch) a DVS object """ - nonlocal curr_fake_platform, dvs + nonlocal curr_dvs_env, dvs if force_recreate or \ - new_fake_platform != curr_fake_platform or \ + new_dvs_env != curr_dvs_env or \ dvs is None: if dvs is not None: dvs.get_logs() dvs.destroy() - dvs = DockerVirtualSwitch(name, imgname, keeptb, new_fake_platform, log_path, max_cpu, forcedvs, buffer_model = buffer_model) + dvs = DockerVirtualSwitch(name, imgname, keeptb, new_dvs_env, log_path, max_cpu, forcedvs, buffer_model = buffer_model) - curr_fake_platform = new_fake_platform + curr_dvs_env = new_dvs_env else: # First generate GCDA files for GCov @@ -1654,11 +1652,11 @@ def update_dvs(log_path, new_fake_platform=None): @pytest.yield_fixture(scope="module") def dvs(request, manage_dvs) -> DockerVirtualSwitch: - fakeplatform = getattr(request.module, "DVS_FAKE_PLATFORM", None) + dvs_env = getattr(request.module, "DVS_ENV", []) name = request.config.getoption("--dvsname") log_path = name if name else request.module.__name__ - return manage_dvs(log_path, fakeplatform) + return manage_dvs(log_path, dvs_env) @pytest.yield_fixture(scope="module") def vct(request): @@ -1669,11 +1667,11 @@ def vct(request): imgname = request.config.getoption("--imgname") max_cpu = request.config.getoption("--max_cpu") log_path = vctns if vctns else request.module.__name__ - fakeplatform = getattr(request.module, "DVS_FAKE_PLATFORM", None) + dvs_env = getattr(request.module, "DVS_ENV", []) if not topo: # use ecmp topology as default topo = "virtual_chassis/chassis_with_ecmp_neighbors.json" - vct = DockerVirtualChassisTopology(vctns, imgname, keeptb, fakeplatform, log_path, max_cpu, + vct = DockerVirtualChassisTopology(vctns, imgname, keeptb, dvs_env, log_path, max_cpu, forcedvs, topo) yield vct vct.get_logs(request.module.__name__) diff --git a/tests/test_gearbox.py b/tests/test_gearbox.py new file mode 100644 index 0000000000..00a87c2f96 --- /dev/null +++ b/tests/test_gearbox.py @@ -0,0 +1,143 @@ +# This test suite covers the functionality of gearbox feature +import time +import os +import pytest +from swsscommon import swsscommon +from dvslib.dvs_database import DVSDatabase +from dvslib.dvs_common import PollingConfig, wait_for_result + +# module specific dvs env variables +DVS_ENV = ["HWSKU=brcm_gearbox_vs"] + +class Gearbox(object): + def __init__(self, dvs): + db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + t = swsscommon.Table(db, "_GEARBOX_TABLE") + assert len(t.getKeys()) > 0 + sr = t.getTableNameSeparator() + + # "_GEARBOX_TABLE:phy:1" + # "_GEARBOX_TABLE:phy:1:ports:0" + # "_GEARBOX_TABLE:phy:1:lanes:200" + self.phys = {} + phy_table = swsscommon.Table(db, sr.join([t.getKeyName(""), "phy"])) + for i in [x for x in phy_table.getKeys() if sr not in x]: + (status, fvs) = phy_table.get(i) + assert status == True + self.phys[i] = {"attrs" : dict(fvs)} + + port_table = swsscommon.Table(db, sr.join([phy_table.getKeyName(i), "ports"])) + port_list = [x for x in port_table.getKeys() if sr not in x] + self.phys[i]["port_table"] = port_table + self.phys[i]["ports"] = {} + for j in port_list: + (status, fvs) = port_table.get(j) + assert status == True + self.phys[i]["ports"][j] = dict(fvs) + + lane_table = swsscommon.Table(db, sr.join([phy_table.getKeyName(i), "lanes"])) + lane_list = [x for x in lane_table.getKeys() if sr not in x] + self.phys[i]["lanes"] = {} + for j in lane_list: + (status, fvs) = lane_table.get(j) + assert status == True + self.phys[i]["lanes"][j] = dict(fvs) + + # "_GEARBOX_TABLE:interface:0" + self.interfaces = {} + intf_table = swsscommon.Table(db, sr.join([t.getKeyName(""), "interface"])) + for i in [x for x in intf_table.getKeys() if sr not in x]: + (status, fvs) = intf_table.get(i) + assert status == True + self.interfaces[i] = {"attrs" : dict(fvs)} + + def SanityCheck(self, dvs, testlog): + """ + Verify data integrity of Gearbox objects in APPL_DB + """ + for i in self.interfaces: + phy_id = self.interfaces[i]["attrs"]["phy_id"] + assert phy_id in self.phys + assert self.interfaces[i]["attrs"]["index"] in self.phys[phy_id]["ports"] + + for lane in self.interfaces[i]["attrs"]["system_lanes"].split(','): + assert lane in self.phys[phy_id]["lanes"] + for lane in self.interfaces[i]["attrs"]["line_lanes"].split(','): + assert lane in self.phys[phy_id]["lanes"] + +class GBAsic(DVSDatabase): + def __init__(self, db_id: int, connector: str, gearbox: Gearbox): + DVSDatabase.__init__(self, db_id, connector) + self.gearbox = gearbox + self.ports = {} + self._wait_for_gb_asic_db_to_initialize() + + for connector in self.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_PORT_CONNECTOR"): + fvs = self.get_entry("ASIC_STATE:SAI_OBJECT_TYPE_PORT_CONNECTOR", connector) + system_port_oid = fvs.get("SAI_PORT_CONNECTOR_ATTR_SYSTEM_SIDE_PORT_ID") + line_port_oid = fvs.get("SAI_PORT_CONNECTOR_ATTR_LINE_SIDE_PORT_ID") + + fvs = self.get_entry("ASIC_STATE:SAI_OBJECT_TYPE_PORT", system_port_oid) + system_lanes = fvs.get("SAI_PORT_ATTR_HW_LANE_LIST").split(":")[-1] + + fvs = self.get_entry("ASIC_STATE:SAI_OBJECT_TYPE_PORT", line_port_oid) + line_lanes = fvs.get("SAI_PORT_ATTR_HW_LANE_LIST").split(":")[-1] + + for i in self.gearbox.interfaces: + intf = self.gearbox.interfaces[i] + if intf["attrs"]["system_lanes"] == system_lanes: + assert intf["attrs"]["line_lanes"] == line_lanes + self.ports[intf["attrs"]["index"]] = (system_port_oid, line_port_oid) + + assert len(self.ports) == len(self.gearbox.interfaces) + + def _wait_for_gb_asic_db_to_initialize(self) -> None: + """Wait up to 30 seconds for the default fields to appear in ASIC DB.""" + def _verify_db_contents(): + if len(self.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_SWITCH")) != \ + len(self.gearbox.phys): + return (False, None) + + if len(self.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_PORT")) != \ + 2 * len(self.gearbox.interfaces): + return (False, None) + + if len(self.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_PORT_CONNECTOR")) != \ + len(self.gearbox.interfaces): + return (False, None) + + return (True, None) + + # Verify that GB ASIC DB has been fully initialized + init_polling_config = PollingConfig(2, 30, strict=True) + wait_for_result(_verify_db_contents, init_polling_config) + + +class TestGearbox(object): + def test_GearboxSanity(self, dvs, testlog): + Gearbox(dvs).SanityCheck(dvs, testlog) + + def test_GbAsicFEC(self, dvs, testlog): + gbasic = GBAsic(swsscommon.GB_ASIC_DB, dvs.redis_sock, Gearbox(dvs)) + + # set fec rs on port 0 of phy 1 + fvs = swsscommon.FieldValuePairs([("system_fec","rs")]) + gbasic.gearbox.phys["1"]["port_table"].set("0", fvs) + fvs = swsscommon.FieldValuePairs([("line_fec","rs")]) + gbasic.gearbox.phys["1"]["port_table"].set("0", fvs) + + """FIXME: uncomment it after GearboxOrch is implemented + # validate if fec rs is pushed to system/line port in gb asic db + system_port_oid, line_port_oid = gbasic.ports["0"] + expected_fields = {"SAI_PORT_ATTR_FEC_MODE":"SAI_PORT_FEC_MODE_RS"} + gbasic.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", \ + system_port_oid, expected_fields) + gbasic.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", \ + line_port_oid, expected_fields) + """ + + +# Add Dummy always-pass test at end as workaroud +# for issue when Flaky fail on final test it invokes module tear-down before retrying +def test_nonflaky_dummy(): + pass diff --git a/tests/test_mirror_ipv6_combined.py b/tests/test_mirror_ipv6_combined.py index b2514f3ae9..3eb5f7ab51 100644 --- a/tests/test_mirror_ipv6_combined.py +++ b/tests/test_mirror_ipv6_combined.py @@ -3,7 +3,7 @@ from swsscommon import swsscommon -DVS_FAKE_PLATFORM = "broadcom" +DVS_ENV = ["fake_platform=broadcom"] class TestMirror(object): diff --git a/tests/test_mirror_ipv6_separate.py b/tests/test_mirror_ipv6_separate.py index 50e40236a4..8d96633325 100644 --- a/tests/test_mirror_ipv6_separate.py +++ b/tests/test_mirror_ipv6_separate.py @@ -3,7 +3,7 @@ from swsscommon import swsscommon -DVS_FAKE_PLATFORM = "mellanox" +DVS_ENV = ["fake_platform=mellanox"] class TestMirror(object): From da49332b80853d5dd1e88f92f266e4970d8112a8 Mon Sep 17 00:00:00 2001 From: Ashok Daparthi-Dell Date: Thu, 30 Sep 2021 12:04:58 -0700 Subject: [PATCH 19/19] Reverted skipped test_buffer_dynamic test cases (#1937) What I did Reverted skipped test_buffer_dynamic as part of #1754 Why I did it How I verified it sudo pytest --dvsname=vs --forcedvs -sv --keeptb test_buffer_dynamic.py ======================================================= test session starts ======================================================== platform linux -- Python 3.6.9, pytest-6.2.4, py-1.10.0, pluggy-0.13.1 -- /usr/bin/python3 cachedir: .pytest_cache rootdir: /home/ashokd/swss-vs/ashok-swss/sonic-swss/tests plugins: flaky-3.7.0 collected 9 items test_buffer_dynamic.py::TestBufferMgrDyn::test_changeSpeed remove extra link dummy PASSED test_buffer_dynamic.py::TestBufferMgrDyn::test_changeCableLen PASSED test_buffer_dynamic.py::TestBufferMgrDyn::test_MultipleLosslessPg PASSED test_buffer_dynamic.py::TestBufferMgrDyn::test_headroomOverride PASSED test_buffer_dynamic.py::TestBufferMgrDyn::test_mtuUpdate PASSED test_buffer_dynamic.py::TestBufferMgrDyn::test_nonDefaultAlpha PASSED test_buffer_dynamic.py::TestBufferMgrDyn::test_sharedHeadroomPool PASSED test_buffer_dynamic.py::TestBufferMgrDyn::test_shutdownPort PASSED test_buffer_dynamic.py::TestBufferMgrDyn::test_autoNegPort PASSED --- orchagent/orch.cpp | 37 +----------------------------------- tests/test_buffer_dynamic.py | 17 ++++------------- 2 files changed, 5 insertions(+), 49 deletions(-) diff --git a/orchagent/orch.cpp b/orchagent/orch.cpp index 1015a30c5b..d970c83f07 100644 --- a/orchagent/orch.cpp +++ b/orchagent/orch.cpp @@ -330,42 +330,7 @@ bool Orch::parseReference(type_map &type_maps, string &ref_in, const string &typ if ((ref_in[0] == ref_start) || (ref_in[ref_in.size()-1] == ref_end)) { SWSS_LOG_ERROR("malformed reference:%s. Must not be surrounded by [ ]\n", ref_in.c_str()); - /* - * Accepting old format until sonic-buildimage changes merged, swss tests depends on - * generate qos configs which are with old format. If we skip the old format - * isPortAllReady() will fail whcih is set ready by checking buffer config exists in CONFIG_DB are - * applied to ASIC_DB or not. - * Due to this All swss test cases are failing. - * This to avoid test case failures until merge happens. - * - */ - if (ref_in.size() == 2) - { - // value set by user is "[]" - // Deem it as a valid format - // clear both type_name and object_name - // as an indication to the caller that - // such a case has been encountered - // type_name.clear(); - object_name.clear(); - return true; - } - string ref_content = ref_in.substr(1, ref_in.size() - 2); - vector tokens; - tokens = tokenize(ref_content, delimiter); - if (tokens.size() != 2) - { - tokens = tokenize(ref_content, config_db_key_delimiter); - if (tokens.size() != 2) - { - SWSS_LOG_ERROR("malformed reference:%s. Must contain 2 tokens\n", ref_content.c_str()); - return false; - } - } - object_name = tokens[1]; - SWSS_LOG_ERROR("parsed: type_name:%s, object_name:%s", type_name.c_str(), object_name.c_str()); - - return true; + return false; } auto type_it = type_maps.find(type_name); if (type_it == type_maps.end()) diff --git a/tests/test_buffer_dynamic.py b/tests/test_buffer_dynamic.py index a098d8dec5..b3ce795ff3 100644 --- a/tests/test_buffer_dynamic.py +++ b/tests/test_buffer_dynamic.py @@ -141,7 +141,6 @@ def change_cable_length(self, cable_length): cable_lengths['Ethernet0'] = cable_length self.config_db.update_entry('CABLE_LENGTH', 'AZURE', cable_lengths) - @pytest.mark.skip("Skip to be removed after sonic-buildimage changes get merged") def test_changeSpeed(self, dvs, testlog): self.setup_db(dvs) @@ -193,7 +192,6 @@ def test_changeSpeed(self, dvs, testlog): # Shutdown interface dvs.runcmd('config interface shutdown Ethernet0') - @pytest.mark.skip("Skip to be removed after sonic-buildimage changes get merged") def test_changeCableLen(self, dvs, testlog): self.setup_db(dvs) @@ -244,7 +242,6 @@ def test_changeCableLen(self, dvs, testlog): # Shutdown interface dvs.runcmd('config interface shutdown Ethernet0') - @pytest.mark.skip("Skip to be removed after sonic-buildimage changes get merged") def test_MultipleLosslessPg(self, dvs, testlog): self.setup_db(dvs) @@ -292,7 +289,6 @@ def test_MultipleLosslessPg(self, dvs, testlog): # Shutdown interface dvs.runcmd('config interface shutdown Ethernet0') - @pytest.mark.skip("Skip to be removed after sonic-buildimage changes get merged") def test_headroomOverride(self, dvs, testlog): self.setup_db(dvs) @@ -376,7 +372,6 @@ def test_headroomOverride(self, dvs, testlog): # Shutdown interface dvs.runcmd('config interface shutdown Ethernet0') - @pytest.mark.skip("Skip to be removed after sonic-buildimage changes get merged") def test_mtuUpdate(self, dvs, testlog): self.setup_db(dvs) @@ -411,7 +406,6 @@ def test_mtuUpdate(self, dvs, testlog): # Shutdown interface dvs.runcmd('config interface shutdown Ethernet0') - @pytest.mark.skip("Skip to be removed after sonic-buildimage changes get merged") def test_nonDefaultAlpha(self, dvs, testlog): self.setup_db(dvs) @@ -454,7 +448,6 @@ def test_nonDefaultAlpha(self, dvs, testlog): # Shutdown interface dvs.runcmd('config interface shutdown Ethernet0') - @pytest.mark.skip("Skip to be removed after sonic-buildimage changes get merged") def test_sharedHeadroomPool(self, dvs, testlog): self.setup_db(dvs) @@ -553,7 +546,6 @@ def test_sharedHeadroomPool(self, dvs, testlog): # Shutdown interface dvs.runcmd('config interface shutdown Ethernet0') - @pytest.mark.skip("Skip to be removed after sonic-buildimage changes get merged") def test_shutdownPort(self, dvs, testlog): self.setup_db(dvs) @@ -597,7 +589,6 @@ def test_shutdownPort(self, dvs, testlog): # Shutdown interface dvs.runcmd("config interface shutdown Ethernet0") - @pytest.mark.skip("Skip to be removed after sonic-buildimage changes get merged") def test_autoNegPort(self, dvs, testlog): self.setup_db(dvs) @@ -623,11 +614,11 @@ def test_autoNegPort(self, dvs, testlog): self.app_db.wait_for_entry("BUFFER_PG_TABLE", "Ethernet0:3-4") self.app_db.wait_for_entry("BUFFER_PROFILE_TABLE", expectedProfile) self.check_new_profile_in_asic_db(dvs, expectedProfile) - self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": "[BUFFER_PROFILE_TABLE:{}]".format(expectedProfile)}) + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": expectedProfile}) # Configure another lossless PG on the interface self.config_db.update_entry('BUFFER_PG', 'Ethernet0|6', {'profile': 'NULL'}) - self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:6", {"profile": "[BUFFER_PROFILE_TABLE:{}]".format(expectedProfile)}) + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:6", {"profile": expectedProfile}) # Disable port auto negotiation dvs.runcmd('config interface autoneg Ethernet0 disabled') @@ -636,8 +627,8 @@ def test_autoNegPort(self, dvs, testlog): expectedProfile = self.make_lossless_profile_name(self.originalSpeed, self.originalCableLen) self.app_db.wait_for_entry("BUFFER_PROFILE_TABLE", expectedProfile) self.check_new_profile_in_asic_db(dvs, expectedProfile) - self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": "[BUFFER_PROFILE_TABLE:{}]".format(expectedProfile)}) - self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:6", {"profile": "[BUFFER_PROFILE_TABLE:{}]".format(expectedProfile)}) + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": expectedProfile}) + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:6", {"profile": expectedProfile}) # Remove lossless PGs on the interface self.config_db.delete_entry('BUFFER_PG', 'Ethernet0|3-4')