From a8de843fad8b9bb43a77a48e13b75bf5138aa515 Mon Sep 17 00:00:00 2001 From: Srujana Date: Tue, 30 Jul 2024 20:39:33 +0000 Subject: [PATCH] lib: Memory spike reduction for sh cmds at scale MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The output buffer vty->obuf is a linked list where each element is of 4KB. Currently, when a huge sh command like “show ip route json” is executed on a large scale, all the vty_out’s are processed and the entire data is accumulated. After the entire vty execution, vtysh_flush proceeses and puts this data in the socket (131KB at a time). Problem here is the memory spike for such heavy duty show commands. The fix here is to chunkify the output on VTY shell by flushing it intermediately for every 128 KB of output accumulated and free the memory allocated for the buffer data. This way, we achieve ~25-30% reduction in the memory spike. Signed-off-by: Srujana Signed-off-by: Rajasekar Raja --- lib/vty.c | 24 ++++++++++++++++++++++++ lib/vty.h | 1 + vtysh/vtysh.c | 17 +++++++++++++++++ 3 files changed, 42 insertions(+) diff --git a/lib/vty.c b/lib/vty.c index d0bbf0e61a5c..da17a444e222 100644 --- a/lib/vty.c +++ b/lib/vty.c @@ -345,8 +345,16 @@ int vty_out(struct vty *vty, const char *format, ...) case VTY_SHELL_SERV: case VTY_FILE: default: + vty->vty_buf_size_written += strlen(filtered); /* print without crlf replacement */ buffer_put(vty->obuf, (uint8_t *)filtered, strlen(filtered)); + /* For every 32 *4 KB invoke vtysh_flush where we + * put the data of collective vty->obuf Linked List items on the + * socket and free the vty->obuf data. This way, we are flushing + * data every 128KB for the show command. + */ + if (vty->vty_buf_size_written >= 32 * 4096) + vtysh_flush(vty); break; } @@ -2141,6 +2149,21 @@ static void vtysh_accept(struct event *thread) close(sock); return; } + /* Setting the SND buffer size to 16MB to increase the buffer size + * the socket can hold before sending it to VTY shell + */ + uint32_t sndbufsize = 4096*4096; + int ret; + + ret = setsockopt(sock, SOL_SOCKET, SO_SNDBUF, (char *)&sndbufsize, + sizeof(sndbufsize)); + if (ret < 0) { + flog_err(EC_LIB_SOCKET, + "Cannot set socket %d send buffer size, %s", sock, + safe_strerror(errno)); + close(sock); + return; + } set_cloexec(sock); #ifdef VTYSH_DEBUG @@ -2227,6 +2250,7 @@ static int vtysh_flush(struct vty *vty) vty_close(vty); return -1; case BUFFER_EMPTY: + vty->vty_buf_size_written = 0; break; } return 0; diff --git a/lib/vty.h b/lib/vty.h index c336a816cc1f..712b983817b4 100644 --- a/lib/vty.h +++ b/lib/vty.h @@ -236,6 +236,7 @@ struct vty { uintptr_t mgmt_req_pending_data; bool mgmt_locked_candidate_ds; bool mgmt_locked_running_ds; + uint64_t vty_buf_size_written; }; static inline void vty_push_context(struct vty *vty, int node, uint64_t id) diff --git a/vtysh/vtysh.c b/vtysh/vtysh.c index e657aa8af0ac..f50290b9202b 100644 --- a/vtysh/vtysh.c +++ b/vtysh/vtysh.c @@ -4721,6 +4721,23 @@ static int vtysh_connect(struct vtysh_client *vclient) close(sock); return -1; } + /* Setting the RCV buffer size to 16MB to increase the buffer size + * the socket can hold after receving from other process + */ + uint32_t rcvbufsize = 4096 * 4096; + + ret = setsockopt(sock, SOL_SOCKET, SO_RCVBUF, (char *)&rcvbufsize, + sizeof(rcvbufsize)); + if (ret < 0) { +#ifdef DEBUG + fprintf(stderr, + "Cannot set socket %d rcv buffer size, %s\n", + sock, safe_strerror(errno)); +#endif /* DEBUG */ + close(sock); + return -1; + } + vclient->fd = sock; return 0;