From 283cc511781f9e076baf8564dae234de52cb290a Mon Sep 17 00:00:00 2001
From: Donald Sharp <sharpd@nvidia.com>
Date: Wed, 15 Jan 2025 11:16:10 -0500
Subject: [PATCH 1/4] ospfd: Fix Coverity SA #1617470, 76 and 78

msg_new takes a uint16_t, the length passed
down variable is a unsigned int, thus 32 bit.
It's possible, but highly unlikely, that the
msglen could be greater than 16 bit.
Let's just add some checks to ensure that
this could not happen.

Signed-off-by: Donald Sharp <sharpd@nvidia.com>
---
 ospfd/ospf_api.c | 18 ++++++++++++++++++
 1 file changed, 18 insertions(+)

diff --git a/ospfd/ospf_api.c b/ospfd/ospf_api.c
index 213ee8c1fd16..cfc13fcc53a5 100644
--- a/ospfd/ospf_api.c
+++ b/ospfd/ospf_api.c
@@ -514,6 +514,12 @@ struct msg *new_msg_originate_request(uint32_t seqnum, struct in_addr ifaddr,
 	omsglen += sizeof(struct msg_originate_request)
 		   - sizeof(struct lsa_header);
 
+	if (omsglen > UINT16_MAX) {
+		zlog_warn("%s: LSA specified is bigger than maximum LSA size, something is wrong",
+			  __func__);
+		omsglen = UINT16_MAX;
+	}
+
 	return msg_new(MSG_ORIGINATE_REQUEST, omsg, seqnum, omsglen);
 }
 
@@ -639,6 +645,12 @@ struct msg *new_msg_lsa_change_notify(uint8_t msgtype, uint32_t seqnum,
 	memcpy(nmsg_data, data, len);
 	len += sizeof(struct msg_lsa_change_notify) - sizeof(struct lsa_header);
 
+	if (len > UINT16_MAX) {
+		zlog_warn("%s: LSA specified is bigger than maximum LSA size, something is wrong",
+			  __func__);
+		len = UINT16_MAX;
+	}
+
 	return msg_new(msgtype, nmsg, seqnum, len);
 }
 
@@ -666,6 +678,12 @@ struct msg *new_msg_reachable_change(uint32_t seqnum, uint16_t nadd,
 	nmsg->nremove = htons(nremove);
 	len = sizeof(*nmsg) + insz * (nadd + nremove);
 
+	if (len > UINT16_MAX) {
+		zlog_warn("%s: LSA specified is bigger than maximum LSA size, something is wrong",
+			  __func__);
+		len = UINT16_MAX;
+	}
+
 	return msg_new(MSG_REACHABLE_CHANGE, nmsg, seqnum, len);
 }
 

From 4b967527376db4f08c50f9c32cc77556700d0eec Mon Sep 17 00:00:00 2001
From: Donald Sharp <sharpd@nvidia.com>
Date: Wed, 15 Jan 2025 13:03:52 -0500
Subject: [PATCH 2/4] zebra: Add some documentation on when zserv_open should
 be used

Signed-off-by: Donald Sharp <sharpd@nvidia.com>
---
 zebra/zserv.c | 4 ++++
 zebra/zserv.h | 3 +++
 2 files changed, 7 insertions(+)

diff --git a/zebra/zserv.c b/zebra/zserv.c
index 7ef358232991..6965c285cdc0 100644
--- a/zebra/zserv.c
+++ b/zebra/zserv.c
@@ -937,6 +937,10 @@ void zserv_close(void)
 /*
  * Open zebra's ZAPI listener socket. This is done early during startup,
  * before zebra is ready to listen and accept client connections.
+ *
+ * This function should only ever be called from the startup pthread
+ * from main.c.  If it is called multiple times it will cause problems
+ * because it causes the zsock global variable to be setup.
  */
 void zserv_open(const char *path)
 {
diff --git a/zebra/zserv.h b/zebra/zserv.h
index ce47ef19fa36..1ff7ccd9810c 100644
--- a/zebra/zserv.h
+++ b/zebra/zserv.h
@@ -262,6 +262,9 @@ extern void zserv_close(void);
  *
  * path
  *    where to place the Unix domain socket
+ *
+ * This function *should* only ever be called from
+ * main() and only every from 1 pthread.
  */
 extern void zserv_open(const char *path);
 

From f94ad538cf93d2b18fa4181e8508f08f94f0a2cc Mon Sep 17 00:00:00 2001
From: Donald Sharp <sharpd@nvidia.com>
Date: Wed, 15 Jan 2025 13:26:58 -0500
Subject: [PATCH 3/4] bgpd: Ensure ibuf count is protected by mutex

Grab the count of streams in ibuf when it is protected
by a mutex.  Since this data is written to it in another
pthread.

Signed-off-by: Donald Sharp <sharpd@nvidia.com>
---
 bgpd/bgp_fsm.c | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/bgpd/bgp_fsm.c b/bgpd/bgp_fsm.c
index 0e3ed9f0d1fb..62b5a84e8798 100644
--- a/bgpd/bgp_fsm.c
+++ b/bgpd/bgp_fsm.c
@@ -525,8 +525,9 @@ static void bgp_holdtime_timer(struct event *thread)
 	 * for systems where we are heavily loaded for one
 	 * reason or another.
 	 */
-	inq_count = atomic_load_explicit(&connection->ibuf->count,
-					 memory_order_relaxed);
+	frr_with_mutex (&connection->io_mtx) {
+		inq_count = atomic_load_explicit(&connection->ibuf->count, memory_order_relaxed);
+	}
 	if (inq_count)
 		BGP_TIMER_ON(connection->t_holdtime, bgp_holdtime_timer,
 			     peer->v_holdtime);

From 19af3f3d7af0f8904794dae3c36f60ed1d5a3cc8 Mon Sep 17 00:00:00 2001
From: Donald Sharp <sharpd@nvidia.com>
Date: Thu, 16 Jan 2025 11:17:11 -0500
Subject: [PATCH 4/4] zebra: Ensure that changes to dg_update_list are
 protected by mutex

The dg_update_list access is controlled by the dg_mutex in all
other locations.  Let's just add a mutex usage around the initialization
of the dg_update_list even if it's part of the startup, just to keep
things consistent.

Signed-off-by: Donald Sharp <sharpd@nvidia.com>
---
 zebra/zebra_dplane.c | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/zebra/zebra_dplane.c b/zebra/zebra_dplane.c
index b57c930154dc..548ea7255847 100644
--- a/zebra/zebra_dplane.c
+++ b/zebra/zebra_dplane.c
@@ -7698,7 +7698,10 @@ static void zebra_dplane_init_internal(void)
 
 	dplane_prov_list_init(&zdplane_info.dg_providers);
 
-	dplane_ctx_list_init(&zdplane_info.dg_update_list);
+	frr_with_mutex (&zdplane_info.dg_mutex) {
+		dplane_ctx_list_init(&zdplane_info.dg_update_list);
+	}
+
 	zns_info_list_init(&zdplane_info.dg_zns_list);
 
 	zdplane_info.dg_updates_per_cycle = DPLANE_DEFAULT_NEW_WORK;