Skip to content

Commit

Permalink
net/nfp: fix resource leak for exit of flower firmware
Browse files Browse the repository at this point in the history
Fix the resource leak problem in the exit logic of flower firmware.

Fixes: e1124c4 ("net/nfp: add flower representor framework")
Cc: [email protected]

Signed-off-by: Chaoyong He <[email protected]>
Reviewed-by: Long Wu <[email protected]>
Reviewed-by: Peng Zhang <[email protected]>
  • Loading branch information
hechaoyong authored and ferruhy committed Dec 4, 2023
1 parent fbf1454 commit 39b3951
Show file tree
Hide file tree
Showing 3 changed files with 80 additions and 58 deletions.
73 changes: 15 additions & 58 deletions drivers/net/nfp/flower/nfp_flower.c
Original file line number Diff line number Diff line change
Expand Up @@ -82,71 +82,13 @@ nfp_flower_pf_start(struct rte_eth_dev *dev)
return 0;
}

/* Reset and stop device. The device can not be restarted. */
static int
nfp_flower_pf_close(struct rte_eth_dev *dev)
{
uint16_t i;
struct nfp_net_hw *hw;
struct nfp_pf_dev *pf_dev;
struct nfp_net_txq *this_tx_q;
struct nfp_net_rxq *this_rx_q;
struct nfp_flower_representor *repr;
struct nfp_app_fw_flower *app_fw_flower;

if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;

repr = dev->data->dev_private;
hw = repr->app_fw_flower->pf_hw;
pf_dev = hw->pf_dev;
app_fw_flower = NFP_PRIV_TO_APP_FW_FLOWER(pf_dev->app_fw_priv);

nfp_mtr_priv_uninit(pf_dev);

/*
* We assume that the DPDK application is stopping all the
* threads/queues before calling the device close function.
*/
nfp_net_disable_queues(dev);

/* Clear queues */
for (i = 0; i < dev->data->nb_tx_queues; i++) {
this_tx_q = dev->data->tx_queues[i];
nfp_net_reset_tx_queue(this_tx_q);
}

for (i = 0; i < dev->data->nb_rx_queues; i++) {
this_rx_q = dev->data->rx_queues[i];
nfp_net_reset_rx_queue(this_rx_q);
}

/* Cancel possible impending LSC work here before releasing the port */
rte_eal_alarm_cancel(nfp_net_dev_interrupt_delayed_handler, (void *)dev);

nn_cfg_writeb(&hw->super, NFP_NET_CFG_LSC, 0xff);

/* Now it is safe to free all PF resources */
PMD_DRV_LOG(INFO, "Freeing PF resources");
nfp_cpp_area_free(pf_dev->ctrl_area);
nfp_cpp_area_free(pf_dev->qc_area);
free(pf_dev->hwinfo);
free(pf_dev->sym_tbl);
nfp_cpp_free(pf_dev->cpp);
rte_free(app_fw_flower);
rte_free(pf_dev);

return 0;
}

static const struct eth_dev_ops nfp_flower_pf_vnic_ops = {
.dev_infos_get = nfp_net_infos_get,
.link_update = nfp_net_link_update,
.dev_configure = nfp_net_configure,

.dev_start = nfp_flower_pf_start,
.dev_stop = nfp_net_stop,
.dev_close = nfp_flower_pf_close,
};

static inline struct nfp_flower_representor *
Expand Down Expand Up @@ -858,6 +800,21 @@ nfp_init_app_fw_flower(struct nfp_pf_dev *pf_dev,
return ret;
}

void
nfp_uninit_app_fw_flower(struct nfp_pf_dev *pf_dev)
{
struct nfp_app_fw_flower *app_fw_flower;

app_fw_flower = pf_dev->app_fw_priv;
nfp_flower_cleanup_ctrl_vnic(app_fw_flower->ctrl_hw);
nfp_cpp_area_free(app_fw_flower->ctrl_hw->ctrl_area);
nfp_cpp_area_free(pf_dev->ctrl_area);
rte_free(app_fw_flower->pf_hw);
nfp_mtr_priv_uninit(pf_dev);
nfp_flow_priv_uninit(pf_dev);
rte_free(app_fw_flower);
}

int
nfp_secondary_init_app_fw_flower(struct nfp_pf_dev *pf_dev)
{
Expand Down
1 change: 1 addition & 0 deletions drivers/net/nfp/flower/nfp_flower.h
Original file line number Diff line number Diff line change
Expand Up @@ -106,6 +106,7 @@ nfp_flower_support_decap_v2(const struct nfp_app_fw_flower *app_fw_flower)

int nfp_init_app_fw_flower(struct nfp_pf_dev *pf_dev,
const struct nfp_dev_info *dev_info);
void nfp_uninit_app_fw_flower(struct nfp_pf_dev *pf_dev);
int nfp_secondary_init_app_fw_flower(struct nfp_pf_dev *pf_dev);
bool nfp_flower_pf_dispatch_pkts(struct nfp_net_hw *hw,
struct rte_mbuf *mbuf,
Expand Down
64 changes: 64 additions & 0 deletions drivers/net/nfp/flower/nfp_flower_representor.c
Original file line number Diff line number Diff line change
Expand Up @@ -328,12 +328,75 @@ nfp_flower_repr_free(struct nfp_flower_representor *repr,
}
}

/* Reset and stop device. The device can not be restarted. */
static int
nfp_flower_repr_dev_close(struct rte_eth_dev *dev)
{
uint16_t i;
struct nfp_net_hw *hw;
struct nfp_pf_dev *pf_dev;
struct nfp_net_txq *this_tx_q;
struct nfp_net_rxq *this_rx_q;
struct nfp_flower_representor *repr;
struct nfp_app_fw_flower *app_fw_flower;

if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;

repr = dev->data->dev_private;
app_fw_flower = repr->app_fw_flower;
hw = app_fw_flower->pf_hw;
pf_dev = hw->pf_dev;

/*
* We assume that the DPDK application is stopping all the
* threads/queues before calling the device close function.
*/
nfp_net_disable_queues(dev);

/* Clear queues */
for (i = 0; i < dev->data->nb_tx_queues; i++) {
this_tx_q = dev->data->tx_queues[i];
nfp_net_reset_tx_queue(this_tx_q);
}

for (i = 0; i < dev->data->nb_rx_queues; i++) {
this_rx_q = dev->data->rx_queues[i];
nfp_net_reset_rx_queue(this_rx_q);
}

if (pf_dev->app_fw_id != NFP_APP_FW_FLOWER_NIC)
return -EINVAL;

nfp_flower_repr_free(repr, repr->repr_type);

for (i = 0; i < MAX_FLOWER_VFS; i++) {
if (app_fw_flower->vf_reprs[i] != NULL)
return 0;
}

for (i = 0; i < NFP_MAX_PHYPORTS; i++) {
if (app_fw_flower->phy_reprs[i] != NULL)
return 0;
}

if (app_fw_flower->pf_repr != NULL)
return 0;

/* Now it is safe to free all PF resources */
nfp_uninit_app_fw_flower(pf_dev);
nfp_pf_uninit(pf_dev);

return 0;
}

static const struct eth_dev_ops nfp_flower_pf_repr_dev_ops = {
.dev_infos_get = nfp_flower_repr_dev_infos_get,

.dev_start = nfp_flower_pf_start,
.dev_configure = nfp_net_configure,
.dev_stop = nfp_net_stop,
.dev_close = nfp_flower_repr_dev_close,

.rx_queue_setup = nfp_net_rx_queue_setup,
.tx_queue_setup = nfp_net_tx_queue_setup,
Expand All @@ -356,6 +419,7 @@ static const struct eth_dev_ops nfp_flower_repr_dev_ops = {
.dev_start = nfp_flower_repr_dev_start,
.dev_configure = nfp_net_configure,
.dev_stop = nfp_flower_repr_dev_stop,
.dev_close = nfp_flower_repr_dev_close,

.rx_queue_setup = nfp_flower_repr_rx_queue_setup,
.tx_queue_setup = nfp_flower_repr_tx_queue_setup,
Expand Down

0 comments on commit 39b3951

Please sign in to comment.