From 7e107d4c88622e5e35c48b7bdbaad40f33d193e3 Mon Sep 17 00:00:00 2001 From: Dong Yibo Date: Fri, 11 Oct 2024 14:15:00 +0800 Subject: [PATCH] net: mucse: initial support for rnp drivers from Mucse Technology This driver is to support mucse n10-2ports ethernet card Signed-off-by: Dong Yibo --- .../configs/deepin_arm64_desktop_defconfig | 6 + .../deepin_loongarch_desktop_defconfig | 6 + arch/x86/configs/deepin_x86_desktop_defconfig | 6 + drivers/net/ethernet/mucse/Kconfig | 55 + drivers/net/ethernet/mucse/Makefile | 1 + drivers/net/ethernet/mucse/rnp/Makefile | 25 + drivers/net/ethernet/mucse/rnp/rnp.h | 1169 +++ drivers/net/ethernet/mucse/rnp/rnp_common.c | 17 + drivers/net/ethernet/mucse/rnp/rnp_common.h | 383 + drivers/net/ethernet/mucse/rnp/rnp_dcb.c | 351 + drivers/net/ethernet/mucse/rnp/rnp_dcb.h | 36 + drivers/net/ethernet/mucse/rnp/rnp_debugfs.c | 522 ++ drivers/net/ethernet/mucse/rnp/rnp_ethtool.c | 1927 ++++ drivers/net/ethernet/mucse/rnp/rnp_ethtool.h | 125 + drivers/net/ethernet/mucse/rnp/rnp_lib.c | 1334 +++ drivers/net/ethernet/mucse/rnp/rnp_main.c | 7943 +++++++++++++++++ drivers/net/ethernet/mucse/rnp/rnp_mbx.c | 650 ++ drivers/net/ethernet/mucse/rnp/rnp_mbx.h | 238 + drivers/net/ethernet/mucse/rnp/rnp_mbx_fw.c | 1495 ++++ drivers/net/ethernet/mucse/rnp/rnp_mbx_fw.h | 1135 +++ drivers/net/ethernet/mucse/rnp/rnp_mpe.c | 220 + drivers/net/ethernet/mucse/rnp/rnp_mpe.h | 12 + drivers/net/ethernet/mucse/rnp/rnp_n10.c | 4813 ++++++++++ drivers/net/ethernet/mucse/rnp/rnp_param.c | 346 + drivers/net/ethernet/mucse/rnp/rnp_pcs.c | 33 + drivers/net/ethernet/mucse/rnp/rnp_pcs.h | 9 + drivers/net/ethernet/mucse/rnp/rnp_phy.h | 73 + drivers/net/ethernet/mucse/rnp/rnp_ptp.c | 688 ++ drivers/net/ethernet/mucse/rnp/rnp_ptp.h | 99 + drivers/net/ethernet/mucse/rnp/rnp_regs.h | 820 ++ drivers/net/ethernet/mucse/rnp/rnp_sriov.c | 1731 ++++ drivers/net/ethernet/mucse/rnp/rnp_sriov.h | 41 + drivers/net/ethernet/mucse/rnp/rnp_sysfs.c | 2239 +++++ .../net/ethernet/mucse/rnp/rnp_tc_u32_parse.h | 56 + drivers/net/ethernet/mucse/rnp/rnp_type.h | 1298 +++ drivers/net/ethernet/mucse/rnp/version.h | 4 + 36 files changed, 29906 insertions(+) create mode 100644 drivers/net/ethernet/mucse/rnp/Makefile create mode 100644 drivers/net/ethernet/mucse/rnp/rnp.h create mode 100644 drivers/net/ethernet/mucse/rnp/rnp_common.c create mode 100644 drivers/net/ethernet/mucse/rnp/rnp_common.h create mode 100644 drivers/net/ethernet/mucse/rnp/rnp_dcb.c create mode 100644 drivers/net/ethernet/mucse/rnp/rnp_dcb.h create mode 100644 drivers/net/ethernet/mucse/rnp/rnp_debugfs.c create mode 100644 drivers/net/ethernet/mucse/rnp/rnp_ethtool.c create mode 100644 drivers/net/ethernet/mucse/rnp/rnp_ethtool.h create mode 100644 drivers/net/ethernet/mucse/rnp/rnp_lib.c create mode 100644 drivers/net/ethernet/mucse/rnp/rnp_main.c create mode 100644 drivers/net/ethernet/mucse/rnp/rnp_mbx.c create mode 100644 drivers/net/ethernet/mucse/rnp/rnp_mbx.h create mode 100644 drivers/net/ethernet/mucse/rnp/rnp_mbx_fw.c create mode 100644 drivers/net/ethernet/mucse/rnp/rnp_mbx_fw.h create mode 100644 drivers/net/ethernet/mucse/rnp/rnp_mpe.c create mode 100644 drivers/net/ethernet/mucse/rnp/rnp_mpe.h create mode 100644 drivers/net/ethernet/mucse/rnp/rnp_n10.c create mode 100644 drivers/net/ethernet/mucse/rnp/rnp_param.c create mode 100644 drivers/net/ethernet/mucse/rnp/rnp_pcs.c create mode 100644 drivers/net/ethernet/mucse/rnp/rnp_pcs.h create mode 100644 drivers/net/ethernet/mucse/rnp/rnp_phy.h create mode 100644 drivers/net/ethernet/mucse/rnp/rnp_ptp.c create mode 100644 drivers/net/ethernet/mucse/rnp/rnp_ptp.h create mode 100644 drivers/net/ethernet/mucse/rnp/rnp_regs.h create mode 100644 drivers/net/ethernet/mucse/rnp/rnp_sriov.c create mode 100644 drivers/net/ethernet/mucse/rnp/rnp_sriov.h create mode 100644 drivers/net/ethernet/mucse/rnp/rnp_sysfs.c create mode 100644 drivers/net/ethernet/mucse/rnp/rnp_tc_u32_parse.h create mode 100644 drivers/net/ethernet/mucse/rnp/rnp_type.h create mode 100644 drivers/net/ethernet/mucse/rnp/version.h diff --git a/arch/arm64/configs/deepin_arm64_desktop_defconfig b/arch/arm64/configs/deepin_arm64_desktop_defconfig index 358fda65842f..575190b3fa5c 100644 --- a/arch/arm64/configs/deepin_arm64_desktop_defconfig +++ b/arch/arm64/configs/deepin_arm64_desktop_defconfig @@ -1411,6 +1411,12 @@ CONFIG_ICE=m CONFIG_FM10K=m CONFIG_IGC=m CONFIG_MGBE=m +CONFIG_MXGBE=m +CONFIG_MXGBE_FIX_VF_QUEUE=y +CONFIG_MXGBE_FIX_MAC_PADDING=y +# CONFIG_MXGBE_OPTM_WITH_LARGE is not set +CONFIG_MXGBE_MSIX_COUNT=64 +CONFIG_MXGBE_DCB=y CONFIG_JME=m CONFIG_ADIN1110=m CONFIG_LITEX_LITEETH=m diff --git a/arch/loongarch/configs/deepin_loongarch_desktop_defconfig b/arch/loongarch/configs/deepin_loongarch_desktop_defconfig index 8f8044cb66f1..65489c92b67d 100644 --- a/arch/loongarch/configs/deepin_loongarch_desktop_defconfig +++ b/arch/loongarch/configs/deepin_loongarch_desktop_defconfig @@ -1355,6 +1355,12 @@ CONFIG_ICE=m CONFIG_FM10K=m CONFIG_IGC=m CONFIG_MGBE=m +CONFIG_MXGBE=m +CONFIG_MXGBE_FIX_VF_QUEUE=y +CONFIG_MXGBE_FIX_MAC_PADDING=y +# CONFIG_MXGBE_OPTM_WITH_LARGE is not set +CONFIG_MXGBE_MSIX_COUNT=64 +CONFIG_MXGBE_DCB=y CONFIG_JME=m CONFIG_ADIN1110=m CONFIG_LITEX_LITEETH=m diff --git a/arch/x86/configs/deepin_x86_desktop_defconfig b/arch/x86/configs/deepin_x86_desktop_defconfig index cb48541a0705..91a8b0d78fbe 100644 --- a/arch/x86/configs/deepin_x86_desktop_defconfig +++ b/arch/x86/configs/deepin_x86_desktop_defconfig @@ -1314,6 +1314,12 @@ CONFIG_ICE=m CONFIG_FM10K=m CONFIG_IGC=m CONFIG_MGBE=m +CONFIG_MXGBE=m +CONFIG_MXGBE_FIX_VF_QUEUE=y +CONFIG_MXGBE_FIX_MAC_PADDING=y +# CONFIG_MXGBE_OPTM_WITH_LARGE is not set +CONFIG_MXGBE_MSIX_COUNT=64 +CONFIG_MXGBE_DCB=y CONFIG_JME=m CONFIG_ADIN1110=m CONFIG_MVMDIO=m diff --git a/drivers/net/ethernet/mucse/Kconfig b/drivers/net/ethernet/mucse/Kconfig index f86972332d9f..52d1d88463ab 100644 --- a/drivers/net/ethernet/mucse/Kconfig +++ b/drivers/net/ethernet/mucse/Kconfig @@ -44,5 +44,60 @@ config MGBE_MSIX_COUNT help MXGBE range [2,26]. +config MXGBE + tristate "Mucse(R) 1/10/25/40GbE PCI Express adapters support" + depends on PCI + imply PTP_1588_CLOCK + help + This driver supports Mucse(R) 1/10/25/40GbE PCI Express family of + adapters. + + To compile this driver as a module, choose M here. The module + will be called rnp. + +config MXGBE_FIX_VF_QUEUE + bool "Fix VF Queue Used(pf)" + default y + depends on MXGBE + help + Say Y here if you want to fix vf queue order in the driver. + + If unsure, say N. + +config MXGBE_FIX_MAC_PADDING + bool "Close Mac Padding Function(pf)" + default y + depends on MXGBE + help + Say Y here if you want to fix close mac padding in the driver. + + If unsure, say N. + +config MXGBE_OPTM_WITH_LARGE + bool "Reduce Memory Cost In Large PAGE_SIZE(>8192)" + default n + depends on MXGBE + help + Say Y here if you want to reduce memory cost in large PAGE_SIZE. + + If unsure, say N. + +config MXGBE_MSIX_COUNT + int "Number of msix count" + default "64" + depends on MXGBE + help + MXGBE range [2,64]. + +config MXGBE_DCB + bool "Data Center Bridging (DCB) Support" + default y + depends on MXGBE && DCB + help + Say Y here if you want to use Data Center Bridging (DCB) in the + driver. + + If unsure, say N. + endif # NET_VENDOR_MUCSE diff --git a/drivers/net/ethernet/mucse/Makefile b/drivers/net/ethernet/mucse/Makefile index 5d4eff7b5b6c..58f8f9188f90 100644 --- a/drivers/net/ethernet/mucse/Makefile +++ b/drivers/net/ethernet/mucse/Makefile @@ -4,3 +4,4 @@ # obj-$(CONFIG_MGBE) += rnpgbe/ +obj-$(CONFIG_MXGBE) += rnp/ diff --git a/drivers/net/ethernet/mucse/rnp/Makefile b/drivers/net/ethernet/mucse/rnp/Makefile new file mode 100644 index 000000000000..62d3acbba458 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/Makefile @@ -0,0 +1,25 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright(c) 2022 - 2024 Mucse Corporation +# +# Makefile for the Mucse(R) 10GbE-2ports PCI Express ethernet driver +# +# + +obj-$(CONFIG_MXGBE) += rnp.o +rnp-objs := \ + rnp_main.o \ + rnp_common.o \ + rnp_ethtool.o \ + rnp_lib.o \ + rnp_mbx.o \ + rnp_pcs.o \ + rnp_n10.o \ + rnp_mbx_fw.o\ + rnp_sriov.o \ + rnp_param.o \ + rnp_sysfs.o \ + rnp_ptp.o \ + rnp_mpe.o + +rnp-$(CONFIG_DCB) += rnp_dcb.o +rnp-$(CONFIG_DEBUG_FS) += rnp_debugfs.o diff --git a/drivers/net/ethernet/mucse/rnp/rnp.h b/drivers/net/ethernet/mucse/rnp/rnp.h new file mode 100644 index 000000000000..87da29f489e7 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp.h @@ -0,0 +1,1169 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#ifndef _RNP_H_ +#define _RNP_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "rnp_type.h" +#include "rnp_common.h" +#include "rnp_dcb.h" + +/* common prefix used by pr_<> macros */ +#undef pr_fmt +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#define RNP_ALLOC_PAGE_ORDER 0 +#define RNP_PAGE_BUFFER_NUMS(ring) \ + ((1 << RNP_ALLOC_PAGE_ORDER) * PAGE_SIZE / \ + ALIGN((rnp_rx_offset(ring) + rnp_rx_bufsz(ring) + \ + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + \ + RNP_RX_HWTS_OFFSET), \ + 1024)) + +#define RNP_DEFAULT_TX_WORK (128) +#define RNP_MIN_TX_WORK (32) +#define RNP_MAX_TX_WORK (512) +#define RNP_MIN_RX_WORK (32) +#define RNP_MAX_RX_WORK (512) +#define RNP_WORK_ALIGN (2) +#define RNP_MIN_TX_FRAME (1) +#define RNP_MAX_TX_FRAME (256) +#define RNP_MIN_TX_USEC (30) +#define RNP_MAX_TX_USEC (10000) + +#define RNP_MIN_RX_FRAME (1) +#define RNP_MAX_RX_FRAME (256) +#define RNP_MIN_RX_USEC (10) +#define RNP_MAX_RX_USEC (10000) + +#define RNP_MAX_TXD (4096) +#define RNP_MIN_TXD (64) + +#define ACTION_TO_MPE (130) +#define MPE_PORT (10) +#define AUTO_ALL_MODES 0 +/* TX/RX descriptor defines */ +#ifdef FEITENG +#define RNP_DEFAULT_TXD 4096 +#else +#define RNP_DEFAULT_TXD 512 +#endif + +#define RNP_REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define RNP_REQ_RX_DESCRIPTOR_MULTIPLE 8 + +#ifdef FEITENG +#define RNP_DEFAULT_RXD 4096 +#else +#define RNP_DEFAULT_RXD 512 +#endif +#define RNP_MAX_RXD 4096 +#define RNP_MIN_RXD 64 + +/* flow control */ +#define RNP_MIN_FCRTL 0x40 +#define RNP_MAX_FCRTL 0x7FF80 +#define RNP_MIN_FCRTH 0x600 +#define RNP_MAX_FCRTH 0x7FFF0 +#define RNP_DEFAULT_FCPAUSE 0xFFFF +#define RNP10_DEFAULT_HIGH_WATER 0x320 +#define RNP10_DEFAULT_LOW_WATER 0x270 +#define RNP500_DEFAULT_HIGH_WATER 400 +#define RNP500_DEFAULT_LOW_WATER 256 +#define RNP_MIN_FCPAUSE 0 +#define RNP_MAX_FCPAUSE 0xFFFF + +/* Supported Rx Buffer Sizes */ +#define RNP_RXBUFFER_256 256 /* Used for skb receive header */ +#define RNP_RXBUFFER_1536 1536 +#define RNP_RXBUFFER_2K 2048 +#define RNP_RXBUFFER_3K 3072 +#define RNP_RXBUFFER_4K 4096 +#define RNP_MAX_RXBUFFER 16384 /* largest size for a single descriptor */ +#define RNP_RXBUFFER_MAX (RNP_RXBUFFER_2K) + +#define MAX_Q_VECTORS 128 + +#define RNP_RING_COUNTS_PEER_PF 8 +#define RNP_GSO_PARTIAL_FEATURES \ + (NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | \ + NETIF_F_GSO_UDP_TUNNEL_CSUM) + +/* + * NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we + * reserve 64 more, and skb_shared_info adds an additional 320 bytes more, + * this adds up to 448 bytes of extra data. + * + * Since netdev_alloc_skb now allocates a page fragment we can use a value + * of 256 and the resultant skb will have a truesize of 960 or less. + */ +#define RNP_RX_HDR_SIZE RNP_RXBUFFER_256 + +#define RNP_ITR_ADAPTIVE_MIN_INC 2 +#define RNP_ITR_ADAPTIVE_MIN_USECS 5 +#define RNP_ITR_ADAPTIVE_MAX_USECS 800 +#define RNP_ITR_ADAPTIVE_LATENCY 0x400 +#define RNP_ITR_ADAPTIVE_BULK 0x00 +#define RNP_ITR_ADAPTIVE_MASK_USECS \ + (RNP_ITR_ADAPTIVE_LATENCY - RNP_ITR_ADAPTIVE_MIN_INC) + +/* How many Rx Buffers do we bundle into one write to the hardware ? */ +#ifdef OPTM_WITH_LPAGE +#define RNP_RX_BUFFER_WRITE (PAGE_SIZE / 2048) /* Must be power of 2 */ +#else +#define RNP_RX_BUFFER_WRITE 16 /* Must be power of 2 */ +#endif +enum rnp_tx_flags { + /* cmd_type flags */ + RNP_TX_FLAGS_HW_VLAN = 0x01, + RNP_TX_FLAGS_TSO = 0x02, + RNP_TX_FLAGS_TSTAMP = 0x04, + + /* olinfo flags */ + RNP_TX_FLAGS_CC = 0x08, + RNP_TX_FLAGS_IPV4 = 0x10, + RNP_TX_FLAGS_CSUM = 0x20, + + /* software defined flags */ + RNP_TX_FLAGS_SW_VLAN = 0x40, + RNP_TX_FLAGS_FCOE = 0x80, +}; +#ifndef RNP_MAX_VF_CNT +#define RNP_MAX_VF_CNT 64 +#endif + +#define RNP_RX_RATE_HIGH 450000 +#define RNP_RX_COAL_TIME_HIGH 128 +#define RNP_RX_SIZE_THRESH 1024 +#define RNP_RX_RATE_THRESH (1000000 / RNP_RX_COAL_TIME_HIGH) +#define RNP_SAMPLE_INTERVAL 0 +#define RNP_AVG_PKT_SMALL 256 + +#define RNP_MAX_VF_MC_ENTRIES 30 +#define RNP_MAX_VF_FUNCTIONS RNP_MAX_VF_CNT +#define RNP_MAX_VFTA_ENTRIES 128 +#define MAX_EMULATION_MAC_ADDRS 16 +#define RNP_MAX_PF_MACVLANS_N10 15 +//#define RNP_MAX_PF_MACVLANS 15 +#define PF_RING_CNT_WHEN_IOV_ENABLED 2 +#define VMDQ_P(p) ((p) + adapter->ring_feature[RING_F_VMDQ].offset) + +enum vf_link_state { + rnp_link_state_on, + rnp_link_state_auto, + rnp_link_state_off, + +}; + +struct vf_data_storage { + unsigned char vf_mac_addresses[ETH_ALEN]; + u16 vf_mc_hashes[RNP_MAX_VF_MC_ENTRIES]; + u16 num_vf_mc_hashes; + u16 default_vf_vlan_id; + u16 vlans_enabled; + bool clear_to_send; + bool pf_set_mac; + u16 pf_vlan; /* When set, guest VLAN config not allowed. */ + u16 vf_vlan; // vf just can set 1 vlan + u16 pf_qos; + u16 tx_rate; + int link_state; + u16 vlan_count; + u8 spoofchk_enabled; + u8 trusted; + bool promisc_mode; + unsigned long status; + unsigned int vf_api; +}; + +enum vf_state_t { + __VF_MBX_USED, +}; + +struct vf_macvlans { + struct list_head l; + int vf; + int rar_entry; + bool free; + bool is_macvlan; + u8 vf_macvlan[ETH_ALEN]; +}; + +/* now tx max 4k for one desc */ +// feiteng use 12k can get better netperf performance +#define RNP_MAX_TXD_PWR 12 +#define RNP_MAX_DATA_PER_TXD (1 << RNP_MAX_TXD_PWR) +//#define RNP_MAX_DATA_PER_TXD (12 * 1024) + +/* Tx Descriptors needed, worst case */ +#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), RNP_MAX_DATA_PER_TXD) +#define DESC_NEEDED (MAX_SKB_FRAGS + 4) + +/* wrapper around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffers + */ +struct rnp_tx_buffer { + struct rnp_tx_desc *next_to_watch; + unsigned long time_stamp; + struct sk_buff *skb; + unsigned int bytecount; + unsigned short gso_segs; + bool gso_need_padding; + + __be16 protocol; + __be16 priv_tags; + DEFINE_DMA_UNMAP_ADDR(dma); + DEFINE_DMA_UNMAP_LEN(len); + union { + u32 mss_len_vf_num; + struct { + __le16 mss_len; + u8 vf_num; + u8 l4_hdr_len; + }; + }; + union { + u32 inner_vlan_tunnel_len; + struct { + u8 tunnel_hdr_len; + u8 inner_vlan_l; + u8 inner_vlan_h; + u8 resv; + }; + }; + bool ctx_flag; +}; + +struct rnp_rx_buffer { + struct sk_buff *skb; + dma_addr_t dma; + struct page *page; +#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) + __u32 page_offset; +#else + __u16 page_offset; +#endif + __u16 pagecnt_bias; +}; + +struct rnp_queue_stats { + u64 packets; + u64 bytes; +}; + +struct rnp_tx_queue_stats { + u64 restart_queue; + u64 tx_busy; + u64 tx_done_old; + u64 clean_desc; + u64 poll_count; + u64 irq_more_count; + u64 send_bytes; + u64 send_bytes_to_hw; + u64 todo_update; + u64 send_done_bytes; + u64 vlan_add; + u64 tx_next_to_clean; + u64 tx_irq_miss; + u64 tx_equal_count; + u64 tx_clean_times; + u64 tx_clean_count; +}; + +struct rnp_rx_queue_stats { + u64 driver_drop_packets; + u64 rsc_count; + u64 rsc_flush; + u64 non_eop_descs; + u64 alloc_rx_page_failed; + u64 alloc_rx_buff_failed; + u64 alloc_rx_page; + u64 csum_err; + u64 csum_good; + u64 poll_again_count; + u64 vlan_remove; + u64 rx_next_to_clean; + u64 rx_irq_miss; + u64 rx_equal_count; + u64 rx_clean_times; + u64 rx_clean_count; +}; + +enum rnp_ring_state_t { + __RNP_RX_3K_BUFFER, + __RNP_RX_BUILD_SKB_ENABLED, + __RNP_TX_FDIR_INIT_DONE, + __RNP_TX_XPS_INIT_DONE, + __RNP_TX_DETECT_HANG, + __RNP_HANG_CHECK_ARMED, + __RNP_RX_CSUM_UDP_ZERO_ERR, + __RNP_RX_FCOE, +}; + +#define ring_uses_build_skb(ring) \ + test_bit(__RNP_RX_BUILD_SKB_ENABLED, &(ring)->state) +#define check_for_tx_hang(ring) test_bit(__RNP_TX_DETECT_HANG, &(ring)->state) +#define set_check_for_tx_hang(ring) \ + set_bit(__RNP_TX_DETECT_HANG, &(ring)->state) +#define clear_check_for_tx_hang(ring) \ + clear_bit(__RNP_TX_DETECT_HANG, &(ring)->state) +struct rnp_ring { + struct rnp_ring *next; /* pointer to next ring in q_vector */ + struct rnp_q_vector *q_vector; /* backpointer to host q_vector */ + struct net_device *netdev; /* netdev ring belongs to */ + struct device *dev; /* device for DMA mapping */ + void *desc; /* descriptor ring memory */ + union { + struct rnp_tx_buffer *tx_buffer_info; + struct rnp_rx_buffer *rx_buffer_info; + }; + unsigned long last_rx_timestamp; + unsigned long state; + u8 __iomem *ring_addr; + u8 __iomem *tail; + u8 __iomem *dma_int_stat; + u8 __iomem *dma_int_mask; + u8 __iomem *dma_int_clr; + dma_addr_t dma; /* phys. address of descriptor ring */ + unsigned int size; /* length in bytes */ + u32 ring_flags; +#define RNP_RING_FLAG_DELAY_SETUP_RX_LEN ((u32)(1 << 0)) +#define RNP_RING_FLAG_CHANGE_RX_LEN ((u32)(1 << 1)) +#define RNP_RING_FLAG_DO_RESET_RX_LEN ((u32)(1 << 2)) +#define RNP_RING_SKIP_TX_START ((u32)(1 << 3)) +#define RNP_RING_NO_TUNNEL_SUPPORT ((u32)(1 << 4)) +#define RNP_RING_SIZE_CHANGE_FIX ((u32)(1 << 5)) +#define RNP_RING_SCATER_SETUP ((u32)(1 << 6)) +#define RNP_RING_STAGS_SUPPORT ((u32)(1 << 7)) +#define RNP_RING_DOUBLE_VLAN_SUPPORT ((u32)(1 << 8)) +#define RNP_RING_VEB_MULTI_FIX ((u32)(1 << 9)) +#define RNP_RING_IRQ_MISS_FIX ((u32)(1 << 10)) +#define RNP_RING_OUTER_VLAN_FIX ((u32)(1 << 11)) +#define RNP_RING_CHKSM_FIX ((u32)(1 << 12)) +#define RNP_RING_LOWER_ITR ((u32)(1 << 13)) + u8 pfvfnum; + + u16 count; /* amount of descriptors */ + u16 temp_count; + u16 reset_count; + + u8 queue_index; /* queue_index needed for multiqueue queue management */ + u8 rnp_queue_idx; /* the real ring,used by dma */ + u16 next_to_use; + u16 next_to_clean; + + u16 device_id; +#ifdef OPTM_WITH_LPAGE + u16 rx_page_buf_nums; + u32 rx_per_buf_mem; + struct sk_buff *skb; +#endif + union { + u16 next_to_alloc; + struct { + u8 atr_sample_rate; + u8 atr_count; + }; + }; + + u8 dcb_tc; + struct rnp_queue_stats stats; + struct u64_stats_sync syncp; + union { + struct rnp_tx_queue_stats tx_stats; + struct rnp_rx_queue_stats rx_stats; + }; +} ____cacheline_internodealigned_in_smp; + +#define RING2ADAPT(ring) netdev_priv((ring)->netdev) + +enum rnp_ring_f_enum { + RING_F_NONE = 0, + RING_F_VMDQ, /* SR-IOV uses the same ring feature */ + RING_F_RSS, + RING_F_FDIR, + + RING_F_ARRAY_SIZE /* must be last in enum set */ +}; + +#define RNP_MAX_RSS_INDICES 128 +#define RNP_MAX_RSS_INDICES_UV3P 8 +#define RNP_MAX_VMDQ_INDICES 64 +#define RNP_MAX_FDIR_INDICES 63 /* based on q_vector limit */ +#define RNP_MAX_FCOE_INDICES 8 +#define MAX_RX_QUEUES (128) +#define MAX_TX_QUEUES (128) +struct rnp_ring_feature { + u16 limit; /* upper limit on feature indices */ + u16 indices; /* current value of indices */ + u16 mask; /* Mask used for feature to ring mapping */ + u16 offset; /* offset to start of feature */ +} ____cacheline_internodealigned_in_smp; + +#define RNP_n10_VMDQ_8Q_MASK 0x78 +#define RNP_n10_VMDQ_4Q_MASK 0x7C +#define RNP_n10_VMDQ_2Q_MASK 0x7E + +/* + * FCoE requires that all Rx buffers be over 2200 bytes in length. Since + * this is twice the size of a half page we need to double the page order + * for FCoE enabled Rx queues. + */ +static inline unsigned int rnp_rx_bufsz(struct rnp_ring *ring) +{ + return (RNP_RXBUFFER_1536 - NET_IP_ALIGN); +} + +static inline unsigned int rnp_rx_pg_order(struct rnp_ring *ring) +{ + /* fixed 1 page */ + /* we don't support 3k buffer */ + return 0; +} +#define rnp_rx_pg_size(_ring) (PAGE_SIZE << rnp_rx_pg_order(_ring)) + +struct rnp_ring_container { + struct rnp_ring *ring; /* pointer to linked list of rings */ + unsigned long next_update; /* jiffies value of last update */ + unsigned int total_bytes; /* total bytes processed this int */ + unsigned int total_packets; /* total packets processed this int */ + unsigned int total_packets_old; + u16 work_limit; /* total work allowed per interrupt */ + u16 count; /* total number of rings in vector */ + u16 itr; /* current ITR/MSIX vector setting for ring */ + u16 add_itr; +}; + +/* iterator for handling rings in ring container */ +#define rnp_for_each_ring(pos, head) \ + for (pos = (head).ring; pos != NULL; pos = pos->next) + +#define MAX_RX_PACKET_BUFFERS ((adapter->flags & RNP_FLAG_DCB_ENABLED) ? 8 : 1) +#define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS + +/* MAX_Q_VECTORS of these are allocated, + * but we only use one per queue-specific vector. + */ + +#define SUPPORT_IRQ_AFFINITY_CHANGE +struct rnp_q_vector { + int old_rx_count; + int new_rx_count; + int new_tx_count; + int large_times; + int small_times; + int too_small_times; + int middle_time; + int large_times_tx; + int small_times_tx; + int too_small_times_tx; + int middle_time_tx; + struct rnp_adapter *adapter; + int v_idx; + /* index of q_vector within array, also used for + * finding the bit in EICR and friends that + * represents the vector for this ring */ + u16 itr_rx; + u16 itr_tx; + struct rnp_ring_container rx, tx; + + struct napi_struct napi; + cpumask_t affinity_mask; + struct irq_affinity_notify affinity_notify; + int numa_node; + struct rcu_head rcu; /* to avoid race with update stats on free */ + + u32 vector_flags; +#define RNP_QVECTOR_FLAG_IRQ_MISS_CHECK ((u32)(1 << 0)) +#define RNP_QVECTOR_FLAG_ITR_FEATURE ((u32)(1 << 1)) +#define RNP_QVECTOR_FLAG_REDUCE_TX_IRQ_MISS ((u32)(1 << 2)) + int irq_check_usecs; + struct hrtimer irq_miss_check_timer; + + char name[IFNAMSIZ + 9]; + + /* for dynamic allocation of rings associated with this q_vector */ + struct rnp_ring ring[0] ____cacheline_internodealigned_in_smp; +}; + +static inline __le16 rnp_test_ext_cmd(union rnp_rx_desc *rx_desc, + const u16 stat_err_bits) +{ + return rx_desc->wb.rev1 & cpu_to_le16(stat_err_bits); +} + +#ifdef RNP_HWMON + +#define RNP_HWMON_TYPE_LOC 0 +#define RNP_HWMON_TYPE_TEMP 1 +#define RNP_HWMON_TYPE_CAUTION 2 +#define RNP_HWMON_TYPE_MAX 3 +#define RNP_HWMON_TYPE_NAME 4 + +struct hwmon_attr { + struct device_attribute dev_attr; + struct rnp_hw *hw; + struct rnp_thermal_diode_data *sensor; + char name[12]; +}; + +struct hwmon_buff { + struct attribute_group group; + const struct attribute_group *groups[2]; + struct attribute *attrs[RNP_MAX_SENSORS * 4 + 1]; + struct hwmon_attr hwmon_list[RNP_MAX_SENSORS * 4]; + unsigned int n_hwmon; +}; +#endif /* RNPM_HWMON */ + +/* + rnp_test_staterr - tests bits in Rx descriptor status and error fields +*/ +static inline __le16 rnp_test_staterr(union rnp_rx_desc *rx_desc, + const u16 stat_err_bits) +{ + return rx_desc->wb.cmd & cpu_to_le16(stat_err_bits); +} + +static inline __le16 rnp_get_stat(union rnp_rx_desc *rx_desc, + const u16 stat_mask) +{ + return rx_desc->wb.cmd & cpu_to_le16(stat_mask); +} + +static inline u16 rnp_desc_unused(struct rnp_ring *ring) +{ + u16 ntc = ring->next_to_clean; + u16 ntu = ring->next_to_use; + + return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1; +} + +static inline u16 rnp_desc_unused_rx(struct rnp_ring *ring) +{ + u16 ntc = ring->next_to_clean; + u16 ntu = ring->next_to_use; + + return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1; +} + +#define RNP_RX_DESC(R, i) (&(((union rnp_rx_desc *)((R)->desc))[i])) +#define RNP_TX_DESC(R, i) (&(((struct rnp_tx_desc *)((R)->desc))[i])) +#define RNP_TX_CTXTDESC(R, i) (&(((struct rnp_tx_ctx_desc *)((R)->desc))[i])) + +#define RNP_MAX_JUMBO_FRAME_SIZE 9590 /* Maximum Supported Size 9.5KB */ +#define RNP_MIN_MTU 68 +#define RNP500_MAX_JUMBO_FRAME_SIZE 9722 /* Maximum Supported Size 9728 */ + +#define OTHER_VECTOR 1 +#define NON_Q_VECTORS (OTHER_VECTOR) + +/* default to trying for four seconds */ +#define RNP_TRY_LINK_TIMEOUT (4 * HZ) + +#define RNP_MAX_USER_PRIO (8) +#define RNP_MAX_TCS_NUM (4) +struct rnp_pfc_cfg { + u8 pfc_max; /* hardware can enabled max pfc channel */ + u8 hw_pfc_map; /* enable the prio channel bit */ + u8 pfc_num; /* at present enabled the pfc-channel num */ + u8 pfc_en; /* enabled the pfc feature or not */ +}; + +struct rnp_dcb_num_tcs { + u8 pg_tcs; + u8 pfc_tcs; +}; + +struct rnp_dcb_cfg { + u8 tc_num; + u16 delay; /* pause time */ + u8 dcb_en; /* enabled the dcb feature or not */ + u8 dcbx_mode; + struct rnp_pfc_cfg pfc_cfg; + struct rnp_dcb_num_tcs num_tcs; + /* statistic info */ + u64 requests[RNP_MAX_TCS_NUM]; + u64 indications[RNP_MAX_TCS_NUM]; + enum rnp_fc_mode last_lfc_mode; +}; + +struct rnp_pps_cfg { + bool available; + struct timespec64 start; + struct timespec64 period; +}; + +enum rss_func_mode_enum { + rss_func_top, + rss_func_xor, + rss_func_order, +}; + +enum outer_vlan_type_enum { + outer_vlan_type_88a8, + outer_vlan_type_9100, + outer_vlan_type_9200, + outer_vlan_type_max, +}; + +enum irq_mode_enum { + irq_mode_legency, + irq_mode_msi, + irq_mode_msix, +}; + +/* board specific private data structure */ +struct rnp_adapter { + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + unsigned long active_vlans_stags[BITS_TO_LONGS(VLAN_N_VID)]; + /* OS defined structs */ + u16 vf_vlan; + u16 vlan_count; + int miss_time; + struct net_device *netdev; + struct pci_dev *pdev; + bool quit_poll_thread; + struct task_struct *rx_poll_thread; + unsigned long state; + spinlock_t link_stat_lock; + + /* this var is used for auto itr modify */ + /* hw not Supported well */ + unsigned long last_moder_packets[MAX_RX_QUEUES]; + unsigned long last_moder_tx_packets; + unsigned long last_moder_bytes[MAX_RX_QUEUES]; + unsigned long last_moder_jiffies; + int last_moder_time[MAX_RX_QUEUES]; + /* only rx itr is Supported */ + int usecendcount; + u16 rx_usecs; + u16 rx_usecs_usr_set; + u16 rx_frames; + u16 usecstocount; + u16 tx_frames; + u16 tx_usecs; + u16 tx_usecs_usr_set; + u32 pkt_rate_low; + u16 rx_usecs_low; + u32 pkt_rate_high; + u16 rx_usecs_high; + u32 sample_interval; + u32 adaptive_rx_coal; + u32 adaptive_tx_coal; + u32 auto_rx_coal; + int napi_budge; + union { + int phy_addr; + struct { + u8 mod_abs : 1; + u8 fault : 1; + u8 tx_dis : 1; + u8 los : 1; + } sfp; + }; + + struct { + u32 main; + u32 pre; + u32 post; + u32 tx_boost; + } si; + + int speed; + + u8 an : 1; + u8 fec : 1; + u8 link_traing : 1; + u8 duplex : 1; + u8 rpu_inited : 1; + + /* Some features need tri-state capability, + * thus the additional *_CAPABLE flags. + */ + u32 vf_num_for_pf; + u32 flags; +#define RNP_FLAG_MSI_CAPABLE ((u32)(1 << 0)) +#define RNP_FLAG_MSI_ENABLED ((u32)(1 << 1)) +#define RNP_FLAG_MSIX_CAPABLE ((u32)(1 << 2)) +#define RNP_FLAG_MSIX_ENABLED ((u32)(1 << 3)) +#define RNP_FLAG_RX_1BUF_CAPABLE ((u32)(1 << 4)) +#define RNP_FLAG_RX_PS_CAPABLE ((u32)(1 << 5)) +#define RNP_FLAG_RX_PS_ENABLED ((u32)(1 << 6)) +#define RNP_FLAG_IN_NETPOLL ((u32)(1 << 7)) +#define RNP_FLAG_DCA_ENABLED ((u32)(1 << 8)) +#define RNP_FLAG_DCA_CAPABLE ((u32)(1 << 9)) +#define RNP_FLAG_IMIR_ENABLED ((u32)(1 << 10)) +#define RNP_FLAG_MQ_CAPABLE ((u32)(1 << 11)) +#define RNP_FLAG_DCB_ENABLED ((u32)(1 << 12)) +#define RNP_FLAG_VMDQ_CAPABLE ((u32)(1 << 13)) +#define RNP_FLAG_VMDQ_ENABLED ((u32)(1 << 14)) +#define RNP_FLAG_FAN_FAIL_CAPABLE ((u32)(1 << 15)) +#define RNP_FLAG_NEED_LINK_UPDATE ((u32)(1 << 16)) +#define RNP_FLAG_NEED_LINK_CONFIG ((u32)(1 << 17)) +#define RNP_FLAG_FDIR_HASH_CAPABLE ((u32)(1 << 18)) +#define RNP_FLAG_FDIR_PERFECT_CAPABLE ((u32)(1 << 19)) +#define RNP_FLAG_FCOE_CAPABLE ((u32)(1 << 20)) +#define RNP_FLAG_FCOE_ENABLED ((u32)(1 << 21)) +#define RNP_FLAG_SRIOV_CAPABLE ((u32)(1 << 22)) +#define RNP_FLAG_SRIOV_ENABLED ((u32)(1 << 23)) +#define RNP_FLAG_VXLAN_OFFLOAD_CAPABLE ((u32)(1 << 24)) +#define RNP_FLAG_VXLAN_OFFLOAD_ENABLE ((u32)(1 << 25)) +#define RNP_FLAG_SWITCH_LOOPBACK_EN ((u32)(1 << 26)) +#define RNP_FLAG_SRIOV_INIT_DONE ((u32)(1 << 27)) +#define RNP_FLAG_IN_IRQ ((u32)(1 << 28)) +#define RNP_FLAG_VF_INIT_DONE ((u32)(1 << 29)) +#define RNP_FLAG_LEGACY_CAPABLE ((u32)(1 << 30)) +#define RNP_FLAG_LEGACY_ENABLED ((u32)(1 << 31)) + u32 flags2; +#define RNP_FLAG2_RSC_CAPABLE ((u32)(1 << 0)) +#define RNP_FLAG2_RSC_ENABLED ((u32)(1 << 1)) +#define RNP_FLAG2_TEMP_SENSOR_CAPABLE ((u32)(1 << 2)) +#define RNP_FLAG2_TEMP_SENSOR_EVENT ((u32)(1 << 3)) +#define RNP_FLAG2_SEARCH_FOR_SFP ((u32)(1 << 4)) +#define RNP_FLAG2_SFP_NEEDS_RESET ((u32)(1 << 5)) +#define RNP_FLAG2_RESET_REQUESTED ((u32)(1 << 6)) +#define RNP_FLAG2_FDIR_REQUIRES_REINIT ((u32)(1 << 7)) +#define RNP_FLAG2_RSS_FIELD_IPV4_UDP ((u32)(1 << 8)) +#define RNP_FLAG2_RSS_FIELD_IPV6_UDP ((u32)(1 << 9)) +#define RNP_FLAG2_PTP_ENABLED ((u32)(1 << 10)) +#define RNP_FLAG2_PTP_PPS_ENABLED ((u32)(1 << 11)) +#define RNP_FLAG2_BRIDGE_MODE_VEB ((u32)(1 << 12)) +#define RNP_FLAG2_VLAN_STAGS_ENABLED ((u32)(1 << 13)) +#define RNP_FLAG2_UDP_TUN_REREG_NEEDED ((u32)(1 << 14)) +#define RNP_FLAG2_RESET_PF ((u32)(1 << 15)) +#define RNP_FLAG2_CHKSM_FIX ((u32)(1 << 16)) + + u32 priv_flags; +#define RNP_PRIV_FLAG_MAC_LOOPBACK BIT(0) +#define RNP_PRIV_FLAG_SWITCH_LOOPBACK BIT(1) +#define RNP_PRIV_FLAG_VEB_ENABLE BIT(2) +#define RNP_PRIV_FLAG_FT_PADDING BIT(3) +#define RNP_PRIV_FLAG_PADDING_DEBUG BIT(4) +#define RNP_PRIV_FLAG_PTP_DEBUG BIT(5) +#define RNP_PRIV_FLAG_SIMUATE_DOWN BIT(6) +#define RNP_PRIV_FLAG_VXLAN_INNER_MATCH BIT(7) +#define RNP_PRIV_FLAG_ULTRA_SHORT BIT(8) +#define RNP_PRIV_FLAG_DOUBLE_VLAN BIT(9) +#define RNP_PRIV_FLAG_TCP_SYNC BIT(10) +#define RNP_PRIV_FLAG_PAUSE_OWN BIT(11) +#define RNP_PRIV_FLAG_JUMBO BIT(12) +#define RNP_PRIV_FLAG_TX_PADDING BIT(13) +#define RNP_PRIV_FLAG_RX_ALL BIT(14) +#define RNP_PRIV_FLAG_REC_HDR_LEN_ERR BIT(15) +#define RNP_PRIV_FLAG_RX_FCS BIT(16) +#define RNP_PRIV_FLAG_DOUBLE_VLAN_RECEIVE BIT(17) +#define RNP_PRIV_FLGA_TEST_TX_HANG BIT(18) +#define RNP_PRIV_FLAG_RX_SKIP_EN BIT(19) +#define RNP_PRIV_FLAG_TCP_SYNC_PRIO BIT(20) +#define RNP_PRIV_FLAG_REMAP_PRIO BIT(21) +#define RNP_PRIV_FLAG_8023_PRIO BIT(22) +#define RNP_PRIV_FLAG_SRIOV_VLAN_MODE BIT(23) +#define RNP_PRIV_FLAG_REMAP_MODE BIT(24) +#define RNP_PRIV_FLAG_LLDP_EN_STAT BIT(25) +#define RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE BIT(26) +#define RNP_PRIV_FLAG_LINK_DOWN_BEFORE BIT(27) + +#define PRIV_DATA_EN BIT(7) + int rss_func_mode; + int outer_vlan_type; + int tcp_sync_queue; + int priv_skip_count; + u64 rx_drop_status; + int drop_time; + /* Tx fast path data */ + unsigned int num_tx_queues; + unsigned int max_ring_pair_counts; + u16 tx_work_limit; + __be16 vxlan_port; + /* Rx fast path data */ + int num_rx_queues; + u16 rx_itr_setting; + u32 eth_queue_idx; + u32 max_rate[MAX_TX_QUEUES]; + /* TX */ + struct rnp_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp; + int tx_ring_item_count; + u64 restart_queue; + u64 lsc_int; + u32 tx_timeout_count; + /* RX */ + struct rnp_ring *rx_ring[MAX_RX_QUEUES]; + int rx_ring_item_count; + u64 hw_csum_rx_error; + u64 hw_csum_rx_good; + u64 hw_rx_no_dma_resources; + u64 rsc_total_count; + u64 rsc_total_flush; + u64 non_eop_descs; + u32 alloc_rx_page_failed; + u32 alloc_rx_buff_failed; + int num_other_vectors; + int irq_mode; + struct rnp_q_vector *q_vector[MAX_Q_VECTORS]; + /* used for IEEE 1588 ptp clock start */ + u8 __iomem *ptp_addr; + int gmac4; + const struct rnp_hwtimestamp *hwts_ops; + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_clock_ops; + struct sk_buff *ptp_tx_skb; + struct hwtstamp_config tstamp_config; + u32 ptp_config_value; + spinlock_t ptp_lock; /* Used to protect the SYSTIME registers. */ + u64 clk_ptp_rate; /* uint is HZ 1MHz=1 000 000Hz */ + u32 sub_second_inc; + u32 systime_flags; + struct timespec64 ptp_prev_hw_time; + unsigned int default_addend; + bool ptp_tx_en; + bool ptp_rx_en; + struct work_struct tx_hwtstamp_work; + unsigned long tx_hwtstamp_start; + unsigned long tx_hwtstamp_skipped; + unsigned long tx_timeout_factor; + u64 tx_hwtstamp_timeouts; + /*used for IEEE 1588 ptp clock end */ + /* DCB parameters */ + struct rnp_dcb_cfg dcb_cfg; + u8 prio_tc_map[RNP_MAX_USER_PRIO * 2]; + u8 num_tc; + int num_q_vectors; /* current number of q_vectors for device */ + int max_q_vectors; /* true count of q_vectors for device */ + struct rnp_ring_feature ring_feature[RING_F_ARRAY_SIZE]; + struct msix_entry *msix_entries; + u32 test_icr; + struct rnp_ring test_tx_ring; + struct rnp_ring test_rx_ring; + /* structs defined in rnp_hw.h */ + struct rnp_hw hw; + u16 msg_enable; + struct rnp_hw_stats hw_stats; + u64 tx_busy; + u32 link_speed; + bool link_up; + bool duplex_status; + u32 link_speed_old; + bool link_up_old; + bool duplex_old; + unsigned long link_check_timeout; + struct timer_list service_timer; + struct work_struct service_task; + /* fdir relative */ + struct hlist_head fdir_filter_list; + unsigned long fdir_overflow; /* number of times ATR was backed off */ + union rnp_atr_input fdir_mask; + int fdir_mode; + int fdir_filter_count; + int layer2_count; + int tuple_5_count; + u32 fdir_pballoc; + u32 atr_sample_rate; + spinlock_t fdir_perfect_lock; + u8 __iomem *io_addr_bar0; + u8 __iomem *io_addr; + u32 wol; + u16 bd_number; + u16 q_vector_off; + u16 eeprom_verh; + u16 eeprom_verl; + u16 eeprom_cap; + u16 stags_vid; + u32 sysfs_tx_ring_num; + u32 sysfs_rx_ring_num; + u32 sysfs_tx_desc_num; + u32 sysfs_rx_desc_num; + u32 interrupt_event; + u32 led_reg; + /* maintain */ + char *maintain_buf; + int maintain_buf_len; + void *maintain_dma_buf; + dma_addr_t maintain_dma_phy; + int maintain_dma_size; + int maintain_in_bytes; + /* SR-IOV */ + DECLARE_BITMAP(active_vfs, RNP_MAX_VF_FUNCTIONS); + unsigned int num_vfs; + struct vf_data_storage *vfinfo; + int vf_rate_link_speed; + struct vf_macvlans vf_mvs; + struct vf_macvlans *mv_list; + u32 timer_event_accumulator; + u32 vferr_refcount; + struct kobject *info_kobj; +#ifdef RNP_SYSFS +#ifdef RNP_HWMON + struct hwmon_buff *rnp_hwmon_buff; +#endif /* RNP_HWMON */ +#endif /* RNPM_SYSFS */ +#ifdef CONFIG_DEBUG_FS + struct dentry *rnp_dbg_adapter; +#endif /*CONFIG_DEBUG_FS*/ + u8 default_up; + u8 port; /* nr_pf_port: 0 or 1 */ + u8 portid_of_card; /* port num in card*/ +#define RNP_MAX_RETA_ENTRIES 512 + u8 rss_indir_tbl[RNP_MAX_RETA_ENTRIES]; +#define RNP_MAX_TC_ENTRIES 8 + u8 rss_tc_tbl[RNP_MAX_TC_ENTRIES]; + int rss_indir_tbl_num; + int rss_tc_tbl_num; + u32 rss_tbl_setup_flag; +#define RNP_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */ + u8 rss_key[RNP_RSS_KEY_SIZE]; + u32 rss_key_setup_flag; + u32 sysfs_is_phy_ext_reg; + u32 sysfs_phy_reg; + u32 sysfs_bar4_reg_val; + u32 sysfs_bar4_reg_addr; + u32 sysfs_pcs_lane_num; + int sysfs_input_arg_cnt; + bool dma2_in_1pf; + char name[60]; + void *csl_dma_buf; + dma_addr_t csl_dma_phy; + int csl_dma_size; +}; + +struct device_list_own { + unsigned short vendor; + unsigned short device; +}; + +struct rnp_fdir_filter { + struct hlist_node fdir_node; + union rnp_atr_input filter; + u16 sw_idx; + u16 hw_idx; + u32 vf_num; + u64 action; +}; + +enum rnp_state_t { + __RNP_TESTING, + __RNP_RESETTING, + __RNP_DOWN, + __RNP_SERVICE_SCHED, + __RNP_IN_SFP_INIT, + __RNP_READ_I2C, + __RNP_PTP_TX_IN_PROGRESS, + __RNP_USE_VFINFI, + __RNP_IN_IRQ, + __RNP_REMOVE, + __RNP_SERVICE_CHECK, +}; + +struct rnp_cb { + union { /* Union defining head/tail partner */ + struct sk_buff *head; + struct sk_buff *tail; + }; + dma_addr_t dma; + u16 append_cnt; + bool page_released; +}; +#define RNP_CB(skb) ((struct rnp_cb *)(skb)->cb) + +enum rnp_boards { + board_n10_709_1pf_2x10G, + board_vu440s, + board_n10, + board_n400, +}; + +#if IS_ENABLED(CONFIG_DCB) +extern const struct dcbnl_rtnl_ops dcbnl_ops; +#endif + +extern char rnp_driver_name[]; +extern const char rnp_driver_version[]; + +extern void rnp_up(struct rnp_adapter *adapter); +extern void rnp_down(struct rnp_adapter *adapter); +extern void rnp_reinit_locked(struct rnp_adapter *adapter); +extern void rnp_reset(struct rnp_adapter *adapter); +extern void rnp_set_ethtool_ops(struct net_device *netdev); +extern int rnp_setup_rx_resources(struct rnp_ring *, struct rnp_adapter *); +extern int rnp_setup_tx_resources(struct rnp_ring *, struct rnp_adapter *); +extern void rnp_free_rx_resources(struct rnp_ring *); +extern void rnp_free_tx_resources(struct rnp_ring *); +extern void rnp_configure_rx_ring(struct rnp_adapter *, struct rnp_ring *); +extern void rnp_configure_tx_ring(struct rnp_adapter *, struct rnp_ring *); +extern void rnp_disable_rx_queue(struct rnp_adapter *adapter, + struct rnp_ring *); +extern void rnp_update_stats(struct rnp_adapter *adapter); +extern int rnp_init_interrupt_scheme(struct rnp_adapter *adapter); +extern int rnp_wol_supported(struct rnp_adapter *adapter, u16 device_id, + u16 subdevice_id); +extern void rnp_clear_interrupt_scheme(struct rnp_adapter *adapter); +extern netdev_tx_t rnp_xmit_frame_ring(struct sk_buff *, struct rnp_adapter *, + struct rnp_ring *, bool); +extern void rnp_alloc_rx_buffers(struct rnp_ring *, u16); +extern int rnp_poll(struct napi_struct *napi, int budget); +extern int ethtool_ioctl(struct ifreq *ifr); +extern void rnp_release_hw_control(struct rnp_adapter *adapter); +extern void rnp_get_hw_control(struct rnp_adapter *adapter); +extern s32 rnp_fdir_write_perfect_filter(int fdir_mode, struct rnp_hw *hw, + union rnp_atr_input *filter, u16 hw_id, + u8 queue, bool prio_flag); +extern void rnp_set_rx_mode(struct net_device *netdev); +#ifdef CONFIG_RNP_DCB +extern void rnp_set_rx_drop_en(struct rnp_adapter *adapter); +#endif +extern int rnp_setup_tx_maxrate(struct rnp_ring *tx_ring, u64 max_rate, + int sample_interval); +extern int rnp_setup_tc(struct net_device *dev, u8 tc); +void rnp_check_options(struct rnp_adapter *adapter); +extern int rnp_open(struct net_device *netdev); +extern int rnp_close(struct net_device *netdev); +void rnp_tx_ctxtdesc(struct rnp_ring *tx_ring, u32 mss_len_vf_num, + u32 inner_vlan_tunnel_len, int ignore_vlan, bool crc_pad); +void rnp_maybe_tx_ctxtdesc(struct rnp_ring *tx_ring, + struct rnp_tx_buffer *first, u32 type_tucmd); +extern void rnp_store_reta(struct rnp_adapter *adapter); +extern void rnp_store_key(struct rnp_adapter *adapter); +extern int rnp_init_rss_key(struct rnp_adapter *adapter); +extern int rnp_init_rss_table(struct rnp_adapter *adapter); +extern void rnp_setup_dma_rx(struct rnp_adapter *adapter, int count_in_dw); +extern s32 rnp_fdir_erase_perfect_filter(int fdir_mode, struct rnp_hw *hw, + union rnp_atr_input *input, u16 hw_id); +extern u32 rnp_rss_indir_tbl_entries(struct rnp_adapter *adapter); +extern u32 rnp_tx_desc_unused_sw(struct rnp_ring *tx_ring); +extern u32 rnp_tx_desc_unused_hw(struct rnp_hw *hw, struct rnp_ring *tx_ring); +extern s32 rnp_disable_rxr_maxrate(struct net_device *netdev, u8 queue_index); +extern s32 rnp_enable_rxr_maxrate(struct net_device *netdev, u8 queue_index, + u32 maxrate); +extern u32 rnp_rx_desc_used_hw(struct rnp_hw *hw, struct rnp_ring *rx_ring); +extern void rnp_do_reset(struct net_device *netdev); +#ifdef RNP_HWMON +extern void rnp_sysfs_exit(struct rnp_adapter *adapter); +extern int rnp_sysfs_init(struct rnp_adapter *adapter); +#endif /* CONFIG_RNP_HWMON */ +#ifdef CONFIG_DEBUG_FS +extern void rnp_dbg_adapter_init(struct rnp_adapter *adapter); +extern void rnp_dbg_adapter_exit(struct rnp_adapter *adapter); +extern void rnp_dbg_init(void); +extern void rnp_dbg_exit(void); +#else +static inline void rnp_dbg_adapter_init(struct rnp_adapter *adapter) +{ +} +static inline void rnp_dbg_adapter_exit(struct rnp_adapter *adapter) +{ +} +static inline void rnp_dbg_init(void) +{ +} +static inline void rnp_dbg_exit(void) +{ +} +#endif /* CONFIG_DEBUG_FS */ +static inline struct netdev_queue *txring_txq(const struct rnp_ring *ring) +{ + return netdev_get_tx_queue(ring->netdev, ring->queue_index); +} + +void rnp_service_event_schedule(struct rnp_adapter *adapter); +extern void rnp_ptp_init(struct rnp_adapter *adapter); +extern void rnp_ptp_stop(struct rnp_adapter *adapter); +extern void rnp_ptp_overflow_check(struct rnp_adapter *adapter); +extern void rnp_ptp_rx_hang(struct rnp_adapter *adapter); +extern void __rnp_ptp_rx_hwtstamp(struct rnp_q_vector *q_vector, + struct sk_buff *skb); +static inline void rnp_ptp_rx_hwtstamp(struct rnp_ring *rx_ring, + union rnp_rx_desc *rx_desc, + struct sk_buff *skb) +{ + if (unlikely(!rnp_test_staterr(rx_desc, RNP_RXD_STAT_PTP))) + return; + /* + * Update the last_rx_timestamp timer in order to enable watchdog check + * for error case of latched timestamp on a dropped packet. + */ + rx_ring->last_rx_timestamp = jiffies; +} + +static inline int ignore_veb_vlan(struct rnp_adapter *adapter, + union rnp_rx_desc *rx_desc) +{ + if (unlikely((adapter->flags & RNP_FLAG_SRIOV_ENABLED) && + (cpu_to_le16(rx_desc->wb.rev1) & VEB_VF_IGNORE_VLAN))) { + return 1; + } + return 0; +} + +static inline int ignore_veb_pkg_err(struct rnp_adapter *adapter, + union rnp_rx_desc *rx_desc) +{ + if (unlikely((adapter->flags & RNP_FLAG_SRIOV_ENABLED) && + (cpu_to_le16(rx_desc->wb.rev1) & VEB_VF_PKG))) { + return 1; + } + return 0; +} + +int rnp_update_ethtool_fdir_entry(struct rnp_adapter *adapter, + struct rnp_fdir_filter *input, u16 sw_idx); + +static inline int rnp_is_pf1(struct rnp_hw *hw) +{ + return !!(hw->pfvfnum & BIT(PF_BIT)); +} + +static inline int rnp_is_pf0(struct rnp_hw *hw) +{ + return !rnp_is_pf1(hw); +} + +static inline int rnp_get_fuc(struct pci_dev *pdev) +{ + return pdev->devfn; +} + +extern void rnp_service_task(struct work_struct *work); +extern void rnp_sysfs_exit(struct rnp_adapter *adapter); +extern int rnp_sysfs_init(struct rnp_adapter *adapter); + +#ifdef CONFIG_PCI_IOV +void rnp_sriov_reinit(struct rnp_adapter *adapter); +#endif + +#define SET_BIT(n, var) (var = (var | (1 << n))) +#define CLR_BIT(n, var) (var = (var & (~(1 << n)))) +#define CHK_BIT(n, var) (var & (1 << n)) +#define RNP_RX_DMA_ATTR (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) + +static inline bool rnp_removed(void __iomem *addr) +{ + return unlikely(!addr); +} +#define RNP_REMOVED(a) rnp_removed(a) +int rnp_fw_msg_handler(struct rnp_adapter *adapter); + +int rnp500_fw_update(struct rnp_hw *hw, int partition, const u8 *fw_bin, + int bytes); + +int rnp_fw_update(struct rnp_hw *hw, int partition, const u8 *fw_bin, + int bytes); +#define RNPM_FW_VERSION_NEW_ETHTOOL 0x00050010 +static inline bool rnp_fw_is_old_ethtool(struct rnp_hw *hw) +{ + return hw->fw_version >= RNPM_FW_VERSION_NEW_ETHTOOL ? false : true; +} + +#endif /* _RNP_H_ */ diff --git a/drivers/net/ethernet/mucse/rnp/rnp_common.c b/drivers/net/ethernet/mucse/rnp/rnp_common.c new file mode 100644 index 000000000000..70e1686c4c84 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_common.c @@ -0,0 +1,17 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#include +#include +#include +#include +#include + +#include "rnp.h" +#include "rnp_common.h" +#include "rnp_mbx.h" + +unsigned int rnp_loglevel; +module_param(rnp_loglevel, uint, S_IRUSR | S_IWUSR); + + diff --git a/drivers/net/ethernet/mucse/rnp/rnp_common.h b/drivers/net/ethernet/mucse/rnp/rnp_common.h new file mode 100644 index 000000000000..5de180c61b48 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_common.h @@ -0,0 +1,383 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#ifndef _RNP_COMMON_H_ +#define _RNP_COMMON_H_ + +#include +#include +#include "rnp_type.h" +#include "rnp.h" +#include "rnp_regs.h" + +struct rnp_adapter; + +#define TRACE() printk(KERN_DEBUG "==[ %s %d ] ==\n", __func__, __LINE__) + +#ifdef CONFIG_RNP_RX_DEBUG +#define rx_debug_printk printk +#define rx_buf_dump buf_dump +#define rx_dbg(fmt, args...) \ + printk(KERN_DEBUG "[ %s:%d ] " fmt, __func__, __LINE__, ##args) +#else +#define rx_debug_printk(fmt, args...) +#define rx_buf_dump(a, b, c) +#define rx_dbg(fmt, args...) +#endif //CONFIG_RNP_RX_DEBUG + +#ifdef CONFIG_RNP_TX_DEBUG +#define desc_hex_dump(msg, buf, len) \ + print_hex_dump(KERN_WARNING, msg, DUMP_PREFIX_OFFSET, 16, 1, (buf), \ + (len), false) +#define rnp_skb_dump _rnp_skb_dump + +#define tx_dbg(fmt, args...) \ + printk(KERN_DEBUG "[ %s:%d ] " fmt, __func__, __LINE__, ##args) +#else +#define desc_hex_dump(msg, buf, len) +#define rnp_skb_dump(skb, full_pkt) +#define tx_dbg(fmt, args...) +#endif //CONFIG_RNP_TX_DEBUG + +#ifdef DEBUG +#define dbg(fmt, args...) \ + printk(KERN_DEBUG "[ %s:%d ] " fmt, __func__, __LINE__, ##args) +#else +#define dbg(fmt, args...) +#endif + +#ifdef CONFIG_RNP_VF_DEBUG +#define vf_dbg(fmt, args...) \ + printk(KERN_DEBUG "[ %s:%d ] " fmt, __func__, __LINE__, ##args) +#else +#define vf_dbg(fmt, args...) +#endif + +int rnp_acquire_msix_vectors(struct rnp_adapter *adapter, int vectors); + +//================= registers read/write helper ===== +#define p_rnp_wr_reg(reg, val) \ + do { \ + printk(KERN_DEBUG " wr-reg: %p <== 0x%08x \t#%-4d %s\n", \ + (reg), (val), __LINE__, __FILE__); \ + iowrite32((val), (void *)(reg)); \ + } while (0) + +static inline unsigned int prnp_rd_reg(void *reg) +{ + unsigned int v = ioread32((void *)(reg)); + + printk(KERN_DEBUG " %p => 0x%08x\n", reg, v); + return v; +} + +#ifdef IO_PRINT +static inline unsigned int rnp_rd_reg(void *reg) +{ + unsigned int v = ioread32((void *)(reg)); + + dbg(" rd-reg: %p <== 0x%08x\n", reg, v); + return v; +} +#define rnp_wr_reg(reg, val) \ + do { \ + dbg(" wr-reg: %p <== 0x%08x \t#%-4d %s\n", (reg), (val), \ + __LINE__, __FILE__); \ + iowrite32((val), (void *)(reg)); \ + } while (0) +#else +#define rnp_rd_reg(reg) readl((void *)(reg)) +#define rnp_wr_reg(reg, val) writel((val), (void *)(reg)) +#endif + +#define rd32(hw, off) rnp_rd_reg((hw)->hw_addr + (off)) +#define wr32(hw, off, val) rnp_wr_reg((hw)->hw_addr + (off), (val)) + +#define nic_rd32(nic, off) rnp_rd_reg((nic)->nic_base_addr + (off)) +#define nic_wr32(nic, off, val) rnp_wr_reg((nic)->nic_base_addr + (off), (val)) + +#define dma_rd32(dma, off) rnp_rd_reg((dma)->dma_base_addr + (off)) +#define dma_wr32(dma, off, val) rnp_wr_reg((dma)->dma_base_addr + (off), (val)) + +#define dma_ring_rd32(dma, off) rnp_rd_reg((dma)->dma_ring_addr + (off)) +#define dma_ring_wr32(dma, off, val) \ + rnp_wr_reg((dma)->dma_ring_addr + (off), (val)) + +#define eth_rd32(eth, off) rnp_rd_reg((eth)->eth_base_addr + (off)) +#define eth_wr32(eth, off, val) rnp_wr_reg((eth)->eth_base_addr + (off), (val)) + +#define mac_rd32(mac, off) rnp_rd_reg((mac)->mac_addr + (off)) +#define mac_wr32(mac, off, val) rnp_wr_reg((mac)->mac_addr + (off), (val)) +#ifdef debug_ring +static inline unsigned int rnp_rd_reg_1(int ring, u32 off, void *reg) +{ + unsigned int v = ioread32((void *)(reg)); + + printk(KERN_DEBUG "%d rd-reg: %x <== 0x%08x\n", ring, off, v); + return v; +} + +#define ring_rd32(ring, off) \ + rnp_rd_reg_1(ring->rnp_queue_idx, off, (ring)->ring_addr + (off)) +#define ring_wr32(ring, off, val) rnp_wr_reg((ring)->ring_addr + (off), (val)) +#else +#define ring_rd32(ring, off) rnp_rd_reg((ring)->ring_addr + (off)) +#define ring_wr32(ring, off, val) rnp_wr_reg((ring)->ring_addr + (off), (val)) +#endif + +#define pwr32(hw, off, val) p_rnp_wr_reg((hw)->hw_addr + (off), (val)) + +#define rnp_mbx_rd(hw, off) rnp_rd_reg((hw)->ring_msix_base + (off)) +#define rnp_mbx_wr(hw, off, val) rnp_wr_reg((hw)->ring_msix_base + (off), val) + +static inline void hw_queue_strip_rx_vlan(struct rnp_hw *hw, u8 ring_num, + bool enable) +{ + u32 reg = RNP_ETH_VLAN_VME_REG(ring_num / 32); + u32 offset = ring_num % 32; + u32 data = rd32(hw, reg); + + if (enable == true) + data |= (1 << offset); + else + data &= ~(1 << offset); + wr32(hw, reg, data); +} + +#define rnp_set_reg_bit(hw, reg_def, bit) \ + do { \ + u32 reg = reg_def; \ + u32 value = rd32(hw, reg); \ + dbg("before set %x %x\n", reg, value); \ + value |= (0x01 << bit); \ + dbg("after set %x %x\n", reg, value); \ + wr32(hw, reg, value); \ + } while (0) + +#define rnp_clr_reg_bit(hw, reg_def, bit) \ + do { \ + u32 reg = reg_def; \ + u32 value = rd32(hw, reg); \ + dbg("before clr %x %x\n", reg, value); \ + value &= (~(0x01 << bit)); \ + dbg("after clr %x %x\n", reg, value); \ + wr32(hw, reg, value); \ + } while (0) + +#define rnp_vlan_filter_on(hw) \ + rnp_set_reg_bit(hw, RNP_ETH_VLAN_FILTER_ENABLE, 30) +#define rnp_vlan_filter_off(hw) \ + rnp_clr_reg_bit(hw, RNP_ETH_VLAN_FILTER_ENABLE, 30) + +#define DPRINTK(nlevel, klevel, fmt, args...) \ + ((NETIF_MSG_##nlevel & adapter->msg_enable) ? \ + (void)(netdev_printk(KERN_##klevel, adapter->netdev, fmt, \ + ##args)) : \ + NULL) + +//==== log helper === +#ifdef HW_DEBUG +#define hw_dbg(hw, fmt, args...) printk(KERN_DEBUG "hw-dbg : " fmt, ##args) +#define eth_dbg(eth, fmt, args...) printk(KERN_DEBUG "hw-dbg : " fmt, ##args) +#else +#define hw_dbg(hw, fmt, args...) +#define eth_dbg(hw, fmt, args...) +#endif + +//#define RNP_DEBUG_OPEN +#ifdef RNP_DEBUG_OPEN +#define rnp_dbg(fmt, args...) printk(KERN_DEBUG fmt, ##args) +#else +#define rnp_dbg(fmt, args...) +#endif +#define rnp_info(fmt, args...) printk(KERN_DEBUG "rnp-info: " fmt, ##args) +#define rnp_warn(fmt, args...) printk(KERN_DEBUG "rnp-warn: " fmt, ##args) +#define rnp_err(fmt, args...) printk(KERN_ERR "rnp-err : " fmt, ##args) + +#define e_info(msglvl, format, arg...) \ + netif_info(adapter, msglvl, adapter->netdev, format, ##arg) +#define e_err(msglvl, format, arg...) \ + netif_err(adapter, msglvl, adapter->netdev, format, ##arg) +#define e_warn(msglvl, format, arg...) \ + netif_warn(adapter, msglvl, adapter->netdev, format, ##arg) +#define e_crit(msglvl, format, arg...) \ + netif_crit(adapter, msglvl, adapter->netdev, format, ##arg) + +#define e_dev_info(format, arg...) dev_info(&adapter->pdev->dev, format, ##arg) +#define e_dev_warn(format, arg...) dev_warn(&adapter->pdev->dev, format, ##arg) +#define e_dev_err(format, arg...) dev_err(&adapter->pdev->dev, format, ##arg) + +#ifdef CONFIG_RNP_TX_DEBUG +static inline void buf_dump_line(const char *msg, int line, void *buf, int len) +{ + int i, offset = 0; + int msg_len = 1024; + u8 msg_buf[1024]; + u8 *ptr = (u8 *)buf; + + offset += snprintf(msg_buf + offset, msg_len, + "=== %s #%d line:%d buf:%p==\n000: ", msg, len, line, + buf); + + for (i = 0; i < len; ++i) { + if ((i != 0) && (i % 16) == 0 && (offset >= (1024 - 10 * 16))) { + printk(KERN_DEBUG "%s\n", msg_buf); + offset = 0; + } + + if ((i != 0) && (i % 16) == 0) { + offset += snprintf(msg_buf + offset, msg_len, + "\n%03x: ", i); + } + offset += snprintf(msg_buf + offset, msg_len, "%02x ", ptr[i]); + } + + offset += snprintf(msg_buf + offset, msg_len, "\n"); + printk(KERN_DEBUG "%s\n", msg_buf); +} +#else +#define buf_dump_line(msg, line, buf, len) +#endif + +static inline __le64 build_ctob(u32 vlan_cmd, u32 mac_ip_len, u32 size) +{ + return cpu_to_le64(((u64)vlan_cmd << 32) | ((u64)mac_ip_len << 16) | + ((u64)size)); +} + +static inline void buf_dump(const char *msg, void *buf, int len) +{ + int i, offset = 0; + int msg_len = 1024; + u8 msg_buf[1024]; + u8 *ptr = (u8 *)buf; + + offset += snprintf(msg_buf + offset, msg_len, + "=== %s #%d ==\n000: ", msg, len); + + for (i = 0; i < len; ++i) { + if ((i != 0) && (i % 16) == 0 && (offset >= (1024 - 10 * 16))) { + printk(KERN_DEBUG "%s\n", msg_buf); + offset = 0; + } + + if ((i != 0) && (i % 16) == 0) { + offset += snprintf(msg_buf + offset, msg_len, + "\n%03x: ", i); + } + offset += snprintf(msg_buf + offset, msg_len, "%02x ", ptr[i]); + } + + offset += snprintf(msg_buf + offset, msg_len, "\n=== done ==\n"); + printk(KERN_DEBUG "%s\n", msg_buf); +} + +#ifndef NO_SKB_DUMP +static inline void _rnp_skb_dump(const struct sk_buff *skb, bool full_pkt) +{ + static atomic_t can_dump_full = ATOMIC_INIT(5); +#ifdef DEBUG + struct skb_shared_info *sh = skb_shinfo(skb); +#endif + struct net_device *dev = skb->dev; + //struct sock *sk = skb->sk; + struct sk_buff *list_skb; + bool has_mac, has_trans; + int headroom, tailroom; + int i, len, seg_len; + const char *level = KERN_WARNING; + + if (full_pkt) + full_pkt = atomic_dec_if_positive(&can_dump_full) >= 0; + + if (full_pkt) + len = skb->len; + else + len = min_t(int, skb->len, MAX_HEADER + 128); + + headroom = skb_headroom(skb); + tailroom = skb_tailroom(skb); + + has_mac = skb_mac_header_was_set(skb); + has_trans = skb_transport_header_was_set(skb); + + dbg("%sskb len=%u headroom=%u headlen=%u tailroom=%u\n" + "mac=(%d,%d) net=(%d,%d) trans=%d\n" + "shinfo(txflags=%u nr_frags=%u gso(size=%hu type=%u segs=%hu))\n" + "csum(0x%x ip_summed=%u complete_sw=%u valid=%u level=%u)\n" + "hash(0x%x sw=%u l4=%u) proto=0x%04x pkttype=%u iif=%d\n", + level, skb->len, headroom, skb_headlen(skb), tailroom, + has_mac ? skb->mac_header : -1, + has_mac ? (skb->network_header - skb->mac_header) : -1, + skb->network_header, has_trans ? skb_network_header_len(skb) : -1, + has_trans ? skb->transport_header : -1, sh->tx_flags, sh->nr_frags, + sh->gso_size, sh->gso_type, sh->gso_segs, skb->csum, skb->ip_summed, + skb->csum_complete_sw, skb->csum_valid, skb->csum_level, skb->hash, + skb->sw_hash, skb->l4_hash, ntohs(skb->protocol), skb->pkt_type, + skb->skb_iif); + + if (dev) + dbg("%sdev name=%s feat=0x%pNF\n", level, dev->name, + &dev->features); + + seg_len = min_t(int, skb_headlen(skb), len); + if (seg_len) + print_hex_dump(level, "skb linear: ", DUMP_PREFIX_OFFSET, 16, + 1, skb->data, seg_len, false); + len -= seg_len; + + for (i = 0; len && i < skb_shinfo(skb)->nr_frags; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + u32 p_len; + struct page *p; + u8 *vaddr; + + p = skb_frag_address(frag); + p_len = skb_frag_size(frag); + seg_len = min_t(int, p_len, len); + vaddr = kmap_atomic(p); + print_hex_dump(level, "skb frag: ", DUMP_PREFIX_OFFSET, 16, + 1, vaddr, seg_len, false); + kunmap_atomic(vaddr); + len -= seg_len; + if (!len) + break; + } + + if (full_pkt && skb_has_frag_list(skb)) { + dbg("skb fraglist:\n"); + skb_walk_frags(skb, list_skb) _rnp_skb_dump(list_skb, true); + } +} +#endif + +enum RNP_LOG_EVT { + LOG_MBX_IN, + LOG_MBX_OUT, + LOG_MBX_MSG_IN, + LOG_MBX_MSG_OUT, + LOG_LINK_EVENT, + LOG_ADPT_STAT, + LOG_MBX_ABLI, + LOG_MBX_LINK_STAT, + LOG_MBX_IFUP_DOWN, + LOG_MBX_LOCK, + LOG_ETHTOOL, + LOG_PHY, + +}; + +#define MII_BUSY 0x00000001 +#define MII_WRITE 0x00000002 +#define MII_DATA_MASK GENMASK(15, 0) + +extern unsigned int rnp_loglevel; + +#define rnp_logd(evt, fmt, args...) \ + do { \ + if (BIT(evt) & rnp_loglevel) { \ + printk(KERN_DEBUG fmt, ##args); \ + } \ + } while (0) + +#endif /* _RNP_COMMON_H_ */ diff --git a/drivers/net/ethernet/mucse/rnp/rnp_dcb.c b/drivers/net/ethernet/mucse/rnp/rnp_dcb.c new file mode 100644 index 000000000000..65580d02a0a6 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_dcb.c @@ -0,0 +1,351 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#include + +#ifdef CONFIG_DCB +#include "rnp.h" +#include "rnp_dcb.h" +#include "rnp_sriov.h" +#include "rnp_common.h" + +static void rnp_config_prio_map(struct rnp_adapter *adapter, u8 pfc_map) +{ + int i, j; + u32 prio_map = 0; + u8 port = adapter->port; + u8 *prio_tc = adapter->prio_tc_map; + void __iomem *ioaddr = adapter->hw.hw_addr; + u8 num_tc = adapter->num_tc; + + for (i = 0; i < num_tc; i++) { + if (i > RNP_MAX_TCS_NUM) + break; + for (j = 0; j < RNP_MAX_USER_PRIO; j++) { + dbg("prio_tc[%d]==%d tc_num[%d] pfc_map 0x%.2x\n", j, + prio_tc[j], i, pfc_map); + if ((prio_tc[j] == i) && (pfc_map & BIT(j))) { + dbg("match rule tc_num %d prio_%d\n", i, j); + prio_map |= (i << (2 * j)); + dbg("match prio_tc change to 0x%.2x\n", + prio_map); + } + } + } + /* config untage pkt fifo */ + /* we just have four tc fifo and one fifo is must belong to untage-pkt + * so untage need map to the remain tc fifio + */ + prio_map |= i << RNP_FC_UNCTAGS_MAP_OFFSET; + prio_map |= (1 << 30) | (1 << 31); + rnp_wr_reg(ioaddr + RNP_FC_PORT_PRIO_MAP(port), prio_map); + dbg("tc_prio_map[%d] 0x%.2x\n", i, prio_map); + + /* enable port prio_map config */ + rnp_wr_reg(ioaddr + RNP_FC_EN_CONF_AVAILABLE, 1); +} + +static int rnp_dcb_hw_pfc_config(struct rnp_adapter *adapter, u8 pfc_map) +{ + struct rnp_dcb_cfg *dcb = &adapter->dcb_cfg; + void __iomem *ioaddr = adapter->hw.hw_addr; + u8 i = 0, j = 0; + u32 reg = 0; + u8 num_tc = adapter->num_tc; + + if (!(adapter->flags & RNP_FLAG_DCB_ENABLED) || + adapter->num_rx_queues <= 1) { + dev_warn(&adapter->pdev->dev, "%s DCB_FLAG%d", + "don't support pfc when rx quene less" + "than 1 or disable dcb feature \n", + adapter->flags & RNP_FLAG_DCB_ENABLED); + return 0; + } + /* 1.Enable Receive Priority Flow Control */ + reg = RNP_RX_RFE | RNP_PFCE; + rnp_wr_reg(ioaddr + RNP_MAC_RX_FLOW_CTRL, reg); + /* 2.Configure which port will in pfc mode*/ + reg = rnp_rd_reg(ioaddr + RNP_FC_PORT_ENABLE); + /* 3.For Now just support two port Version So just enabled + * PF port 0 to enable flow control + */ + reg |= 1 << adapter->port; + rnp_wr_reg(ioaddr + RNP_FC_PORT_ENABLE, reg); + + for (i = 0; i < num_tc; i++) { + int enabled = 0; + + for (j = 0; j < RNP_MAX_USER_PRIO; j++) { + if ((adapter->prio_tc_map[j] == i) && + (pfc_map & BIT(j))) { + enabled = 1; + dcb->pfc_cfg.hw_pfc_map |= BIT(j); + dcb->pfc_cfg.pfc_num++; + break; + } + } + if (enabled) { + /* 4.Enable Transmit Priority Flow Control */ + reg = RNP_TX_TFE | + (RNP_PAUSE_28_SLOT_TIME + << RNP_FC_TX_PLTH_OFFSET) | + (RNP_DEFAULT_PAUSE_TIME << RNP_FC_TX_PT_OFFSET); + + rnp_wr_reg(ioaddr + RNP_MAC_Q0_TX_FLOW_CTRL(j), reg); + } + } + /* the below configure can just use default config */ + /* 5.config for pri_map */ + rnp_config_prio_map(adapter, pfc_map); + /* 6.Configure PFC Rx high/low thresholds per TC */ + + /* 7.Configure Rx full/empty thresholds per tc*/ + + /* 8.Configure pause time (3 TCs per register) */ + /* 9.Configure flow control pause low threshold value */ + + return 0; +} + +__maybe_unused static int rnp_dcb_hw_fc_enable(struct rnp_adapter *adapter) +{ + void __iomem *ioaddr = adapter->hw.hw_addr; + + /* 1. Enabled Transmit Flow Control */ + rnp_wr_reg(ioaddr + RNP_MAC_Q0_TX_FLOW_CTRL(0), RNP_TX_TFE); + /* 2. Enabled Recvive Flow Control */ + rnp_wr_reg(ioaddr + RNP_MAC_RX_FLOW_CTRL, RNP_RX_RFE); + /* 3. Configure Fc Pause Time And Pause Low Threshold + * just use default value? + */ + return 0; +} + +static int rnp_dcbnl_getpfc(struct net_device *dev, struct ieee_pfc *pfc) +{ + struct rnp_adapter *adapter = netdev_priv(dev); + struct rnp_dcb_cfg *dcb = &adapter->dcb_cfg; + u8 i = 0, j = 0; + + memset(pfc, 0, sizeof(*pfc)); + pfc->pfc_cap = dcb->pfc_cfg.pfc_max; + /* Pfc setting is based on TC */ + for (i = 0; i < adapter->num_tc; i++) { + for (j = 0; j < RNP_MAX_USER_PRIO; j++) { + if ((adapter->prio_tc_map[j] == i) && + (dcb->pfc_cfg.hw_pfc_map & BIT(i))) + pfc->pfc_en |= BIT(j); + } + } + /* do we need to get the pfc statistic*/ + /* 1. get the tc channel send and recv pfc pkts*/ + /* + *for (i = 0; i < TSRN10_MAX_TC_NUM; i++) { + * pfc->requests[i] = dcb->requests[i]; + * pfc->indications[i] = dcb->indications[i]; + } + */ + + return 0; +} + +/* rnp Support IEEE 802.3 flow-control and + * Priority base flow control (PFC) + */ +static u8 rnp_dcbnl_getcap(struct net_device *net_dev, int capid, u8 *cap) +{ + struct rnp_adapter *priv = netdev_priv(net_dev); + + switch (capid) { + case DCB_CAP_ATTR_PFC: + *cap = true; + break; + case DCB_CAP_ATTR_PFC_TCS: + *cap = 0x80; + break; + case DCB_CAP_ATTR_DCBX: + *cap = priv->dcb_cfg.dcbx_mode; + break; + default: + *cap = false; + break; + } + + return 0; +} + +static u8 rnp_dcbnl_getstate(struct net_device *netdev) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + + return !!(adapter->flags & RNP_FLAG_DCB_ENABLED); +} + +static u8 rnp_dcbnl_setstate(struct net_device *netdev, u8 state) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + int err = 0; + + /* verify there is something to do, if not then exit */ + if (!state == !(adapter->flags & RNP_FLAG_DCB_ENABLED)) + goto out; + + err = rnp_setup_tc(netdev, + state ? adapter->dcb_cfg.num_tcs.pfc_tcs : 0); +out: + return !!err; +} + +static u8 rnp_dcbnl_getdcbx(struct net_device *net_dev) +{ + struct rnp_adapter *adapter = netdev_priv(net_dev); + + return adapter->dcb_cfg.dcbx_mode; +} + +static u8 rnp_dcbnl_setdcbx(struct net_device *net_dev, u8 mode) +{ + struct rnp_adapter *adapter = netdev_priv(net_dev); + + adapter->dcb_cfg.dcbx_mode = mode; + + return 0; + return (mode != (adapter->dcb_cfg.dcbx_mode)) ? 1 : 0; +} + +static int rnp_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + u8 rval = 0; + + if (adapter->flags & RNP_FLAG_DCB_ENABLED) { + switch (tcid) { + case DCB_NUMTCS_ATTR_PFC: + if (adapter->dcb_cfg.num_tcs.pfc_tcs > + RNP_MAX_TCS_NUM) { + rval = -EINVAL; + break; + } + *num = adapter->dcb_cfg.num_tcs.pfc_tcs; + break; + default: + rval = -EINVAL; + break; + } + } else { + rval = -EINVAL; + } + + return rval; +} + +static int rnp_dcbnl_setnumtcs(struct net_device *netdev, int tcid, u8 num) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + u8 rval = 0; + + if (adapter->flags & RNP_FLAG_DCB_ENABLED) { + switch (tcid) { + case DCB_NUMTCS_ATTR_PFC: + adapter->dcb_cfg.num_tcs.pfc_tcs = num; + break; + default: + rval = -EINVAL; + break; + } + } else { + rval = -EINVAL; + } + + return rval; +} + +static int rnp_dcb_parse_config(struct rnp_dcb_cfg *dcb, struct ieee_pfc *pfc) +{ + u8 j = 0, pfc_en_num = 0, pfc_map = 0; + + for (j = 0; j < RNP_MAX_USER_PRIO; j++) { + if ((pfc->pfc_en & BIT(j))) { + pfc_map |= BIT(j); + pfc_en_num++; + } + } + dcb->pfc_cfg.pfc_num = pfc_en_num; + dcb->pfc_cfg.hw_pfc_map = pfc_map; + dbg("pfc_map 0x%.2x pfc->pfc_en 0x%.2x\n", pfc_map, pfc->pfc_en); + /* tc resource rebuild */ + /* we need to decide tx_ring bind to tc 4 fifo-mac*/ + return pfc_map; +} + +static int rnp_dcbnl_setpfc(struct net_device *dev, struct ieee_pfc *pfc) +{ + struct rnp_adapter *adapter = netdev_priv(dev); + struct rnp_dcb_cfg *dcb = &adapter->dcb_cfg; + u8 pfc_map = 0; + + dbg("%s:%d pfc enabled %d\n", __func__, __LINE__, pfc->pfc_en); + if (pfc->pfc_en) { + /*set PFC Priority mask */ + pfc_map = rnp_dcb_parse_config(dcb, pfc); + rnp_dcb_hw_pfc_config(adapter, pfc_map); + } else { + /* set PAUSE mode */ + // fc is controlled by ethtool + //rnp_dcb_hw_fc_enable(adapter); + } + + return 0; +} + +static u8 rnp_dcbnl_getpfcstate(struct net_device *netdev) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_pfc_cfg *pfc_cfg = &adapter->dcb_cfg.pfc_cfg; + + return pfc_cfg->pfc_en; +} + +static void rnp_dcbnl_setpfcstate(struct net_device *netdev, u8 state) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + + adapter->dcb_cfg.pfc_cfg.pfc_en = state; +} + +const struct dcbnl_rtnl_ops rnp_dcbnl_ops = { + /*DCB PFC*/ + /*IEEE*/ + .ieee_getpfc = rnp_dcbnl_getpfc, + .ieee_setpfc = rnp_dcbnl_setpfc, + .getcap = rnp_dcbnl_getcap, + .setdcbx = rnp_dcbnl_setdcbx, + .getdcbx = rnp_dcbnl_getdcbx, + .getnumtcs = rnp_dcbnl_getnumtcs, + .setnumtcs = rnp_dcbnl_setnumtcs, + + /*CEE*/ + .getstate = rnp_dcbnl_getstate, + .setstate = rnp_dcbnl_setstate, + + .getpfcstate = rnp_dcbnl_getpfcstate, + .setpfcstate = rnp_dcbnl_setpfcstate, +}; + +int rnp_dcb_init(struct net_device *dev, struct rnp_adapter *adapter) +{ + struct rnp_dcb_cfg *dcb = &adapter->dcb_cfg; + struct rnp_hw *hw = &adapter->hw; + + if (hw->hw_type != rnp_hw_n10) + return 0; + + dcb->dcb_en = false; + dcb->pfc_cfg.pfc_max = RNP_MAX_TCS_NUM; + dcb->num_tcs.pfc_tcs = RNP_MAX_TCS_NUM; + dcb->dcbx_mode = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE; + dev->dcbnl_ops = &rnp_dcbnl_ops; + + return 0; +} +#endif diff --git a/drivers/net/ethernet/mucse/rnp/rnp_dcb.h b/drivers/net/ethernet/mucse/rnp/rnp_dcb.h new file mode 100644 index 000000000000..23941b81eef5 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_dcb.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#ifndef __RNP_DCB_H__ +#define __RNP_DCB_H__ +#include "rnp.h" + +enum rnp_pause_low_thrsh { + RNP_PAUSE_4_SLOT_TIME = 0, + RNP_PAUSE_28_SLOT_TIME, + RNP_PAUSE_36_SLOT_TIME, + RNP_PAUSE_144_SLOT_TIME, + RNP_PAUSE_256_SLOT_TIME, +}; +/*Rx Flow Ctrl */ +#define RNP_RX_RFE BIT(0) /* Receive Flow Control Enable */ +#define RNP_UP BIT(1) /* Unicast Pause Packet Detect */ +#define RNP_PFCE BIT(8) /* Priority Based Flow Control Enable. */ + +/*Tx Flow Ctrl */ +#define RNP_TX_FCB BIT(0) /* Tx Flow Control Busy. */ +#define RNP_TX_TFE BIT(1) /* Transmit Flow Control Enable.*/ +#define RNP_TX_PLT GENMASK(6, 4) /* Pause Low Threshold. */ +#define RNP_DZPQ BIT(7) /*Disable Zero-Quanta Pause.*/ +#define RNP_PT GENMASK(31, 16) /* Pause Time. */ + +#define RNP_DEFAULT_PAUSE_TIME (0x100) /* */ +#define RNP_FC_TX_PLTH_OFFSET (4) /* Pause Low Threshold */ +#define RNP_FC_TX_PT_OFFSET (16) /* Pause Time */ + +#define RNP_DCB_MAX_TCS_NUM (4) +#define RNP_DCB_MAX_PFC_NUM (4) + +struct rnp_adapter; +int rnp_dcb_init(struct net_device *dev, struct rnp_adapter *adapter); +#endif diff --git a/drivers/net/ethernet/mucse/rnp/rnp_debugfs.c b/drivers/net/ethernet/mucse/rnp/rnp_debugfs.c new file mode 100644 index 000000000000..e47758b58bcf --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_debugfs.c @@ -0,0 +1,522 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#include +#include + +#include "rnp.h" +#include "rnp_type.h" + +#ifdef HAVE_RNP_DEBUG_FS +static struct dentry *rnp_dbg_root; +static char rnp_dbg_reg_ops_buf[256] = ""; + +#define bus_to_virt phys_to_virt + +static int rnp_dbg_csl_open(struct inode *inode, struct file *filp) +{ + void *dma_buf = NULL; + dma_addr_t dma_phy; + int err, bytes = 4096; + struct rnp_adapter *adapter; + const char *name; + struct rnp_hw *hw; + + if (inode->i_private) { + filp->private_data = inode->i_private; + } else { + return -EIO; + } + + adapter = filp->private_data; + + if (adapter == NULL) { + return -EIO; + } + + if (adapter->csl_dma_buf != NULL) { + return 0; + } + hw = &adapter->hw; + name = adapter->name; + + dma_buf = + dma_alloc_coherent(&hw->pdev->dev, bytes, &dma_phy, GFP_ATOMIC); + if (!dma_buf) { + e_dev_err("%s: no dma buf", name); + return -ENOMEM; + } + memset(dma_buf, 0, bytes); + + adapter->csl_dma_buf = dma_buf; + adapter->csl_dma_phy = dma_phy; + adapter->csl_dma_size = bytes; + + err = rnp_mbx_ddr_csl_enable(hw, 1, dma_phy, bytes); + if (err) { + dma_free_coherent(&hw->pdev->dev, bytes, dma_buf, dma_phy); + adapter->csl_dma_buf = NULL; + return -EIO; + } + + return 0; +} + +static int rnp_dbg_csl_release(struct inode *inode, struct file *filp) +{ + struct rnp_adapter *adapter = filp->private_data; + struct rnp_hw *hw = &adapter->hw; + + if (adapter->csl_dma_buf) { + rnp_mbx_ddr_csl_enable(hw, 0, 0, 0); + dma_free_coherent(&hw->pdev->dev, adapter->csl_dma_size, + adapter->csl_dma_buf, adapter->csl_dma_phy); + adapter->csl_dma_buf = NULL; + } + + return 0; +} + +static int rnp_dbg_csl_mmap(struct file *filp, struct vm_area_struct *vma) +{ + unsigned long length; + struct rnp_adapter *adapter = filp->private_data; + void *dma_buf = adapter->csl_dma_buf; + dma_addr_t dma_phy = adapter->csl_dma_phy; + int dma_bytes = adapter->csl_dma_size; + int ret = 0; + + length = (unsigned long)(vma->vm_end - vma->vm_start); + + if (length > dma_bytes) { + return -EIO; + } + if (vma->vm_pgoff == 0) { + ret = dma_mmap_coherent(&adapter->pdev->dev, vma, dma_buf, dma_phy, length); + } else { + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + ret = remap_pfn_range( + vma, vma->vm_start, + PFN_DOWN(virt_to_phys(bus_to_virt(dma_phy))) + + vma->vm_pgoff, + length, vma->vm_page_prot); + } + + if (ret < 0) { + printk(KERN_ERR "%s: remap failed (%d)\n", __func__, ret); + return ret; + } + + return 0; +} + +static const struct file_operations rnp_dbg_csl_fops = { + .owner = THIS_MODULE, + .open = rnp_dbg_csl_open, + .release = rnp_dbg_csl_release, + .mmap = rnp_dbg_csl_mmap, +}; + +static ssize_t rnp_dbg_eth_info_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + struct rnp_adapter *adapter = filp->private_data; + char *buf = NULL; + int len; + + if (adapter == NULL) { + return -EIO; + } + + /* don't allow partial reads */ + if (*ppos != 0) + return 0; + + buf = kasprintf(GFP_KERNEL, "bd:%d port%d %s %s\n", adapter->bd_number, + 0, adapter->netdev->name, pci_name(adapter->pdev)); + if (!buf) + return -ENOMEM; + + if (count < strlen(buf)) { + kfree(buf); + return -ENOSPC; + } + + len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); + + kfree(buf); + return len; +} + +static const struct file_operations rnp_dbg_eth_info_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = rnp_dbg_eth_info_read, +}; + +static ssize_t rnp_dbg_mbx_cookies_info_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + struct rnp_adapter *adapter = filp->private_data; + char *buf = NULL; + int len,i; + struct mbx_req_cookie_pool* cookie_pool = &(adapter->hw.mbx.cookie_pool); + struct mbx_req_cookie*cookie; + int free_cnt=0, wait_timout_cnt=0, alloced_cnt=0; + + if (adapter == NULL) { + return -EIO; + } + + /* don't allow partial reads */ + if (*ppos != 0) + return 0; + for(i=0;icookies[i]); + if(cookie->stat == COOKIE_FREE){ + free_cnt++; + }else if(cookie->stat == COOKIE_FREE_WAIT_TIMEOUT){ + wait_timout_cnt++; + }else if(cookie->stat == COOKIE_ALLOCED){ + alloced_cnt++; + } + } + + buf = kasprintf(GFP_KERNEL, "pool items:cur:%d total: %d. free:%d wait_free:%d alloced:%d \n", cookie_pool->next_idx, MAX_COOKIES_ITEMS, + free_cnt, wait_timout_cnt, alloced_cnt); + if (!buf) + return -ENOMEM; + + if (count < strlen(buf)) { + kfree(buf); + return -ENOSPC; + } + + len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); + + kfree(buf); + return len; +} + +static const struct file_operations rnp_dbg_mbx_cookies_info_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = rnp_dbg_mbx_cookies_info_read, +}; + +/** + * rnp_dbg_reg_ops_read - read for reg_ops datum + * @filp: the opened file + * @buffer: where to write the data for the user to read + * @count: the size of the user's buffer + * @ppos: file position offset + **/ +static ssize_t rnp_dbg_reg_ops_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + struct rnp_adapter *adapter = filp->private_data; + char *buf; + int len; + + /* don't allow partial reads */ + if (*ppos != 0) + return 0; + + buf = kasprintf(GFP_KERNEL, "%s: %s\n", adapter->name, + rnp_dbg_reg_ops_buf); + if (!buf) + return -ENOMEM; + + if (count < strlen(buf)) { + kfree(buf); + return -ENOSPC; + } + + len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); + + kfree(buf); + return len; +} + +/** + * rnp_dbg_reg_ops_write - write into reg_ops datum + * @filp: the opened file + * @buffer: where to find the user's data + * @count: the length of the user's data + * @ppos: file position offset + **/ +static ssize_t rnp_dbg_reg_ops_write(struct file *filp, + const char __user *buffer, size_t count, + loff_t *ppos) +{ + struct rnp_adapter *adapter = filp->private_data; + struct rnp_hw *hw = &adapter->hw; + int len; + + /* don't allow partial writes */ + if (*ppos != 0) + return 0; + if (count >= sizeof(rnp_dbg_reg_ops_buf)) + return -ENOSPC; + + len = simple_write_to_buffer(rnp_dbg_reg_ops_buf, + sizeof(rnp_dbg_reg_ops_buf) - 1, ppos, + buffer, count); + if (len < 0) + return len; + + rnp_dbg_reg_ops_buf[len] = '\0'; + + if (strncmp(rnp_dbg_reg_ops_buf, "write", 5) == 0) { + u32 reg, value; + int cnt; + + cnt = sscanf(&rnp_dbg_reg_ops_buf[5], "%x %x", ®, &value); + if (cnt == 2) { + if (reg >= 0x30000000) { + rnp_mbx_reg_write(hw, reg, value); + e_dev_info("write: 0x%08x = 0x%08x\n", reg, + value); + } else { + rnp_wr_reg(hw->hw_addr + reg, value); + value = rnp_rd_reg(hw->hw_addr + reg); + e_dev_info("write: 0x%08x = 0x%08x\n", reg, + value); + } + } else { + e_dev_info("write \n"); + } + } else if (strncmp(rnp_dbg_reg_ops_buf, "read", 4) == 0) { + u32 reg, value; + int cnt; + + cnt = sscanf(&rnp_dbg_reg_ops_buf[4], "%x", ®); + if (cnt == 1) { + if (reg >= 0x30000000) { + value = rnp_mbx_fw_reg_read(hw, reg); + } else { + value = rnp_rd_reg(hw->hw_addr + reg); + } + snprintf(rnp_dbg_reg_ops_buf, + sizeof(rnp_dbg_reg_ops_buf), "0x%08x: 0x%08x", + reg, value); + e_dev_info("read 0x%08x = 0x%08x\n", reg, value); + } else { + e_dev_info("read \n"); + } + } else { + e_dev_info("Unknown command %s\n", rnp_dbg_reg_ops_buf); + e_dev_info("Available commands:\n"); + e_dev_info(" read \n"); + e_dev_info(" write \n"); + } + return count; +} + +static const struct file_operations rnp_dbg_reg_ops_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = rnp_dbg_reg_ops_read, + .write = rnp_dbg_reg_ops_write, +}; + +static char rnp_dbg_netdev_ops_buf[256] = ""; + +/** + * rnp_dbg_netdev_ops_read - read for netdev_ops datum + * @filp: the opened file + * @buffer: where to write the data for the user to read + * @count: the size of the user's buffer + * @ppos: file position offset + **/ +static ssize_t rnp_dbg_netdev_ops_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + struct rnp_adapter *adapter = filp->private_data; + char *buf; + int len; + + /* don't allow partial reads */ + if (*ppos != 0) + return 0; + + buf = kasprintf(GFP_KERNEL, "%s: %s\n", adapter->name, + rnp_dbg_netdev_ops_buf); + if (!buf) + return -ENOMEM; + + if (count < strlen(buf)) { + kfree(buf); + return -ENOSPC; + } + + len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); + + kfree(buf); + return len; +} + +/** + * rnp_dbg_netdev_ops_write - write into netdev_ops datum + * @filp: the opened file + * @buffer: where to find the user's data + * @count: the length of the user's data + * @ppos: file position offset + **/ +static ssize_t rnp_dbg_netdev_ops_write(struct file *filp, + const char __user *buffer, size_t count, + loff_t *ppos) +{ + struct rnp_adapter *adapter = filp->private_data; + int len; + + /* don't allow partial writes */ + if (*ppos != 0) + return 0; + if (count >= sizeof(rnp_dbg_netdev_ops_buf)) + return -ENOSPC; + + len = simple_write_to_buffer(rnp_dbg_netdev_ops_buf, + sizeof(rnp_dbg_netdev_ops_buf) - 1, ppos, + buffer, count); + if (len < 0) + return len; + + rnp_dbg_netdev_ops_buf[len] = '\0'; + + if (strncmp(rnp_dbg_netdev_ops_buf, "stat", 4) == 0) { + rnp_info("adapter->stat=0x%lx\n", adapter->state); + rnp_info("adapter->tx_timeout_count=%d\n", + adapter->tx_timeout_count); + } else if (strncmp(rnp_dbg_netdev_ops_buf, "tx_timeout", 10) == 0) { + adapter->netdev->netdev_ops->ndo_tx_timeout(adapter->netdev, + UINT_MAX); + e_dev_info("tx_timeout called\n"); + } else { + e_dev_info("Unknown command: %s\n", rnp_dbg_netdev_ops_buf); + e_dev_info("Available commands:\n"); + e_dev_info(" tx_timeout\n"); + } + return count; +} + +static const struct file_operations rnp_dbg_netdev_ops_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = rnp_dbg_netdev_ops_read, + .write = rnp_dbg_netdev_ops_write, +}; + +static ssize_t rnp_dbg_netdev_temp_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + struct rnp_adapter *adapter = filp->private_data; + struct rnp_hw *hw = &adapter->hw; + char *buf; + int len; + int temp = 0, voltage = 0; + + /* don't allow partial reads */ + if (*ppos != 0) + return 0; + + temp = rnp_mbx_get_temp(hw, &voltage); + + buf = kasprintf(GFP_KERNEL, "%s: temp: %d oC voltage:%d mV\n", + adapter->name, temp, voltage); + if (!buf) + return -ENOMEM; + + if (count < strlen(buf)) { + kfree(buf); + return -ENOSPC; + } + + len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); + + kfree(buf); + return len; +} +static const struct file_operations rnp_dbg_netdev_temp = { + .owner = THIS_MODULE, + .open = simple_open, + .read = rnp_dbg_netdev_temp_read, +}; + +/** + * rnp_dbg_adapter_init - setup the debugfs directory for the adapter + * @adapter: the adapter that is starting up + **/ +void rnp_dbg_adapter_init(struct rnp_adapter *adapter) +{ + const char *name = adapter->name; + struct dentry *pfile; + + adapter->rnp_dbg_adapter = debugfs_create_dir(name, rnp_dbg_root); + if (adapter->rnp_dbg_adapter) { + pfile = debugfs_create_file("reg_ops", 0600, + adapter->rnp_dbg_adapter, adapter, + &rnp_dbg_reg_ops_fops); + if (!pfile) + e_dev_err("debugfs reg_ops for %s failed\n", name); + pfile = debugfs_create_file("netdev_ops", 0600, + adapter->rnp_dbg_adapter, adapter, + &rnp_dbg_netdev_ops_fops); + if (!pfile) + e_dev_err("debugfs netdev_ops for %s failed\n", name); + + pfile = debugfs_create_file("temp", 0600, + adapter->rnp_dbg_adapter, adapter, + &rnp_dbg_netdev_temp); + if (!pfile) + e_dev_err("debugfs temp for %s failed\n", name); + if (rnp_is_pf1(&adapter->hw) == 0) { + pfile = debugfs_create_file_unsafe("csl", 0755, + adapter->rnp_dbg_adapter, + adapter, &rnp_dbg_csl_fops); + if (!pfile) + e_dev_err("debugfs csl failed\n"); + } + pfile = debugfs_create_file("info", 0600, + adapter->rnp_dbg_adapter, adapter, + &rnp_dbg_eth_info_fops); + if (!pfile) + e_dev_err("debugfs info failed\n"); + pfile = debugfs_create_file("mbx_cookies_info", 0600, + adapter->rnp_dbg_adapter, adapter, + &rnp_dbg_mbx_cookies_info_fops); + if (!pfile) + e_dev_err("debugfs reg_ops for mbx_cookies_info failed\n"); + } else { + e_dev_err("debugfs entry for %s failed\n", name); + } +} + +/** + * rnp_dbg_adapter_exit - clear out the adapter's debugfs entries + * @pf: the pf that is stopping + **/ +void rnp_dbg_adapter_exit(struct rnp_adapter *adapter) +{ + debugfs_remove_recursive(adapter->rnp_dbg_adapter); + adapter->rnp_dbg_adapter = NULL; +} + +/** + * rnp_dbg_init - start up debugfs for the driver + **/ +void rnp_dbg_init(void) +{ + rnp_dbg_root = debugfs_create_dir(rnp_driver_name, NULL); + if (rnp_dbg_root == NULL) + pr_err("init of debugfs failed\n"); +} + +/** + * rnp_dbg_exit - clean out the driver's debugfs entries + **/ +void rnp_dbg_exit(void) +{ + debugfs_remove_recursive(rnp_dbg_root); +} +#endif /* HAVE_RNP_DEBUG_FS */ diff --git a/drivers/net/ethernet/mucse/rnp/rnp_ethtool.c b/drivers/net/ethernet/mucse/rnp/rnp_ethtool.c new file mode 100644 index 000000000000..8f1c7b236e45 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_ethtool.c @@ -0,0 +1,1927 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "rnp.h" +#include "rnp_sriov.h" +#include "rnp_phy.h" +#include "rnp_mbx_fw.h" +#include "rnp_ethtool.h" + +int rnp_wol_exclusion(struct rnp_adapter *adapter, struct ethtool_wolinfo *wol) +{ + struct rnp_hw *hw = &adapter->hw; + int retval = 0; + + /* WOL not supported for all devices */ + if (!rnp_wol_supported(adapter, hw->device_id, + hw->subsystem_device_id)) { + retval = 1; + wol->supported = 0; + } + + return retval; +} + +void rnp_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + + wol->wolopts = 0; + + /* we now can't wol */ + if (rnp_wol_exclusion(adapter, wol) || + !device_can_wakeup(&adapter->pdev->dev)) + return; + + /* Only support magic */ + if (RNP_WOL_GET_SUPPORTED(adapter)) + wol->supported = hw->wol_supported; + if (RNP_WOL_GET_STATUS(adapter)) + wol->wolopts |= hw->wol_supported; +} + +int rnp_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + + if (!!wol->wolopts) { + if ((wol->wolopts & (~hw->wol_supported)) || + !RNP_WOL_GET_SUPPORTED(adapter)) + return -EOPNOTSUPP; + } + + RNP_WOL_SET_SUPPORTED(adapter); + if (wol->wolopts & WAKE_MAGIC) { + RNP_WOL_SET_SUPPORTED(adapter); + RNP_WOL_SET_STATUS(adapter); + } else { + RNP_WOL_CLEAR_STATUS(adapter); + } + + rnp_mbx_wol_set(hw, RNP_WOL_GET_STATUS(adapter)); + device_set_wakeup_enable(&adapter->pdev->dev, !!wol->wolopts); + + return 0; +} + +/* ethtool register test data */ +struct rnp_reg_test { + u16 reg; + u8 array_len; + u8 test_type; + u32 mask; + u32 write; +}; + +/* In the hardware, registers are laid out either singly, in arrays + * spaced 0x40 bytes apart, or in contiguous tables. We assume + * most tests take place on arrays or single registers (handled + * as a single-element array) and special-case the tables. + * Table tests are always pattern tests. + * + * We also make provision for some required setup steps by specifying + * registers to be written without any read-back testing. + */ + +#define PATTERN_TEST 1 +#define SET_READ_TEST 2 +#define WRITE_NO_TEST 3 +#define TABLE32_TEST 4 +#define TABLE64_TEST_LO 5 +#define TABLE64_TEST_HI 6 + +/* default n10 register test */ +static struct rnp_reg_test reg_test_n10[] = { + //{RNP_DMA_CONFIG, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF}, + /* + * { RNP_FCRTL_n10(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, + * { RNP_FCRTH_n10(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, + * { RNP_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + * { RNP_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 }, + * { RNP_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 }, + * { RNP_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + * { RNP_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + * { RNP_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + * { RNP_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 }, + * { RNP_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, + * { RNP_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + * { RNP_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + * { RNP_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + * { RNP_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFF80 }, + * { RNP_RXCTRL, 1, SET_READ_TEST, 0x00000001, 0x00000001 }, + * { RNP_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, + * { RNP_RAL(0), 16, TABLE64_TEST_HI, 0x8001FFFF, 0x800CFFFF }, + * { RNP_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + */ + { .reg = 0 }, +}; + +/* write and read check */ +static bool reg_pattern_test(struct rnp_adapter *adapter, u64 *data, int reg, + u32 mask, u32 write) +{ + u32 pat, val, before; + static const u32 test_pattern[] = { 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, + 0xFFFFFFFF }; + + for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) { + before = readl(adapter->hw.hw_addr + reg); + printk("before reg %x is %x\n", reg, before); + writel((test_pattern[pat] & write), + (adapter->hw.hw_addr + reg)); + val = readl(adapter->hw.hw_addr + reg); + if (val != (test_pattern[pat] & write & mask)) { + e_err(drv, + "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n", + reg, val, (test_pattern[pat] & write & mask)); + *data = reg; + writel(before, adapter->hw.hw_addr + reg); + return 1; + } + writel(before, adapter->hw.hw_addr + reg); + } + return 0; +} + +static bool reg_set_and_check(struct rnp_adapter *adapter, u64 *data, int reg, + u32 mask, u32 write) +{ + u32 val, before; + + before = readl(adapter->hw.hw_addr + reg); + writel((write & mask), (adapter->hw.hw_addr + reg)); + val = readl(adapter->hw.hw_addr + reg); + if ((write & mask) != (val & mask)) { + e_err(drv, + "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n", + reg, (val & mask), (write & mask)); + *data = reg; + writel(before, (adapter->hw.hw_addr + reg)); + return 1; + } + writel(before, (adapter->hw.hw_addr + reg)); + return 0; +} + +static bool rnp_reg_test(struct rnp_adapter *adapter, u64 *data) +{ + struct rnp_reg_test *test; + struct rnp_hw *hw = &adapter->hw; + u32 i; + + if (RNP_REMOVED(hw->hw_addr)) { + e_err(drv, "Adapter removed - register test blocked\n"); + *data = 1; + return true; + } + + test = reg_test_n10; + /* + * Perform the remainder of the register test, looping through + * the test table until we either fail or reach the null entry. + */ + while (test->reg) { + for (i = 0; i < test->array_len; i++) { + bool b = false; + + switch (test->test_type) { + case PATTERN_TEST: + b = reg_pattern_test(adapter, data, + test->reg + (i * 0x40), + test->mask, test->write); + break; + case SET_READ_TEST: + b = reg_set_and_check(adapter, data, + test->reg + (i * 0x40), + test->mask, test->write); + break; + case WRITE_NO_TEST: + wr32(hw, test->reg + (i * 0x40), test->write); + break; + case TABLE32_TEST: + b = reg_pattern_test(adapter, data, + test->reg + (i * 4), + test->mask, test->write); + break; + case TABLE64_TEST_LO: + b = reg_pattern_test(adapter, data, + test->reg + (i * 8), + test->mask, test->write); + break; + case TABLE64_TEST_HI: + b = reg_pattern_test(adapter, data, + (test->reg + 4) + (i * 8), + test->mask, test->write); + break; + } + if (b) + return true; + } + test++; + } + + *data = 0; + return false; +} + +static int rnp_link_test(struct rnp_adapter *adapter, u64 *data) +{ + struct rnp_hw *hw = &adapter->hw; + bool link_up; + u32 link_speed = 0; + bool duplex; + *data = 0; + + hw->ops.check_link(hw, &link_speed, &link_up, &duplex, true); + if (!link_up) + *data = 1; + return *data; +} + +void rnp_diag_test(struct net_device *netdev, struct ethtool_test *eth_test, + u64 *data) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + bool if_running = netif_running(netdev); + + set_bit(__RNP_TESTING, &adapter->state); + if (eth_test->flags == ETH_TEST_FL_OFFLINE) { + if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) { + int i; + + for (i = 0; i < adapter->num_vfs; i++) { + if (adapter->vfinfo[i].clear_to_send) { + netdev_warn( + netdev, "%s", + "offline diagnostic is not supported when VFs " + "are present\n"); + data[0] = 1; + data[1] = 1; + data[2] = 1; + data[3] = 1; + eth_test->flags |= ETH_TEST_FL_FAILED; + clear_bit(__RNP_TESTING, + &adapter->state); + goto skip_ol_tests; + } + } + } + + /* Offline tests */ + e_info(hw, "offline testing starting\n"); + + /* bringing adapter down disables SFP+ optics */ + if (hw->ops.enable_tx_laser) + hw->ops.enable_tx_laser(hw); + + /* Link test performed before hardware reset so autoneg doesn't + * interfere with test result + */ + if (rnp_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + e_info(hw, "register testing starting\n"); + if (rnp_reg_test(adapter, &data[0])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + data[1] = 0; + data[2] = 0; + /* If SRIOV or VMDq is enabled then skip MAC + * loopback diagnostic. + */ + if (adapter->flags & + (RNP_FLAG_SRIOV_ENABLED | RNP_FLAG_VMDQ_ENABLED)) { + e_info(hw, "Skip MAC loopback diagnostic in VT mode\n"); + data[3] = 0; + goto skip_loopback; + } + + data[3] = 0; +skip_loopback: + /* clear testing bit and return adapter to previous state */ + clear_bit(__RNP_TESTING, &adapter->state); + } else { + e_info(hw, "online testing starting\n"); + + /* if adapter is down, SFP+ optics will be disabled */ + if (!if_running && hw->ops.enable_tx_laser) + hw->ops.enable_tx_laser(hw); + + /* Online tests */ + if (rnp_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + /* Offline tests aren't run; pass by default */ + data[0] = 0; + data[1] = 0; + data[2] = 0; + data[3] = 0; + + clear_bit(__RNP_TESTING, &adapter->state); + } + + /* if adapter was down, ensure SFP+ optics are disabled again */ + if (!if_running && hw->ops.disable_tx_laser) + hw->ops.disable_tx_laser(hw); +skip_ol_tests: + msleep_interruptible(4 * 1000); +} + +int rnp_get_fecparam(struct net_device *netdev, + struct ethtool_fecparam *fecparam) +{ + int err; + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + + err = rnp_mbx_get_lane_stat(hw); + if (err) + return err; + + if (adapter->fec) { + fecparam->active_fec = ETHTOOL_FEC_BASER; + } else { + fecparam->active_fec = ETHTOOL_FEC_NONE; + } + fecparam->fec = ETHTOOL_FEC_BASER; + + return 0; +} + +int rnp_set_fecparam(struct net_device *netdev, + struct ethtool_fecparam *fecparam) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + + if (fecparam->fec & ETHTOOL_FEC_OFF) { + return rnp_set_lane_fun(hw, LANE_FUN_FEC, 0, 0, 0, 0); + } else if (fecparam->fec & ETHTOOL_FEC_BASER) { + return rnp_set_lane_fun(hw, LANE_FUN_FEC, 1, 0, 0, 0); + } + + return -EINVAL; +} + +u32 rnp_get_msglevel(struct net_device *netdev) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + return adapter->msg_enable; +} + +void rnp_set_msglevel(struct net_device *netdev, u32 data) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + adapter->msg_enable = data; +} + +int rnp_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + rnp_mbx_led_set(hw, 1); + return 2; + + case ETHTOOL_ID_ON: + rnp_mbx_led_set(hw, 2); + break; + + case ETHTOOL_ID_OFF: + rnp_mbx_led_set(hw, 3); + break; + + case ETHTOOL_ID_INACTIVE: + rnp_mbx_led_set(hw, 0); + break; + } + return 0; +} + +int rnp_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) +{ + struct rnp_adapter *adapter = netdev_priv(dev); + + /* For we just set it as pf0 */ + if (!(adapter->flags2 & RNP_FLAG2_PTP_ENABLED)) + return ethtool_op_get_ts_info(dev, info); + + if (adapter->ptp_clock) + info->phc_index = ptp_clock_index(adapter->ptp_clock); + else + info->phc_index = -1; + + dbg("phc_index is %d\n", info->phc_index); + info->so_timestamping = + SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_RAW_HARDWARE; + + info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); + + info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | + BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | + BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | + BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | + BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | + BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | + BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | + BIT(HWTSTAMP_FILTER_ALL); + + return 0; +} + +static unsigned int rnp_max_channels(struct rnp_adapter *adapter) +{ + unsigned int max_combined; + struct rnp_hw *hw = &adapter->hw; + + if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) { + /* SR-IOV currently only allows 2 queue on the PF */ + max_combined = hw->sriov_ring_limit; + } else if (adapter->flags & RNP_FLAG_DCB_ENABLED) { + /* dcb on max support 32 */ + max_combined = 32; + } else { + /* support up to 16 queues with RSS */ + max_combined = adapter->max_ring_pair_counts; + /* should not large than q_vectors ? */ + } + + return max_combined; +} + +void rnp_get_channels(struct net_device *dev, struct ethtool_channels *ch) +{ + struct rnp_adapter *adapter = netdev_priv(dev); + + /* report maximum channels */ + ch->max_combined = rnp_max_channels(adapter); + + /* report info for other vector */ + ch->max_other = NON_Q_VECTORS; + ch->other_count = NON_Q_VECTORS; + + /* record RSS queues */ + ch->combined_count = adapter->ring_feature[RING_F_RSS].indices; + + /* nothing else to report if RSS is disabled */ + if (ch->combined_count == 1) + return; + + /* we do not support ATR queueing if SR-IOV is enabled */ + if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) + return; + + /* same thing goes for being DCB enabled */ + if (netdev_get_num_tc(dev) > 1) + return; +} + +int rnp_set_channels(struct net_device *dev, struct ethtool_channels *ch) +{ + struct rnp_adapter *adapter = netdev_priv(dev); + unsigned int count = ch->combined_count; + + if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) + return -EINVAL; + + /* verify they are not requesting separate vectors */ + if (!count || ch->rx_count || ch->tx_count) + return -EINVAL; + + /* verify other_count has not changed */ + if (ch->other_count != NON_Q_VECTORS) + return -EINVAL; + + dbg("call set channels %d %d %d \n", count, ch->rx_count, ch->tx_count); + dbg("max channels %d\n", rnp_max_channels(adapter)); + /* verify the number of channels does not exceed hardware limits */ + if (count > rnp_max_channels(adapter)) + return -EINVAL; + + /* update feature limits from largest to smallest supported values */ + adapter->ring_feature[RING_F_FDIR].limit = count; + + if (count > adapter->max_ring_pair_counts) + count = adapter->max_ring_pair_counts; + adapter->ring_feature[RING_F_RSS].limit = count; + + /* use setup TC to update any traffic class queue mapping */ + return rnp_setup_tc(dev, netdev_get_num_tc(dev)); +} + +int rnp_get_module_info(struct net_device *dev, struct ethtool_modinfo *modinfo) +{ + struct rnp_adapter *adapter = netdev_priv(dev); + struct rnp_hw *hw = &adapter->hw; + u8 module_id, diag_supported; + int rc; + + rnp_mbx_get_lane_stat(hw); + + if (hw->is_sgmii) + return -EIO; + + rc = rnp_mbx_sfp_module_eeprom_info(hw, 0xA0, SFF_MODULE_ID_OFFSET, 1, + &module_id); + if (rc || module_id == 0xff) { + return -EIO; + } + rc = rnp_mbx_sfp_module_eeprom_info(hw, 0xA0, SFF_DIAG_SUPPORT_OFFSET, + 1, &diag_supported); + if (!rc) { + switch (module_id) { + case SFF_MODULE_ID_SFP: + modinfo->type = ETH_MODULE_SFF_8472; + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + if (!diag_supported) + modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; + break; + case SFF_MODULE_ID_QSFP: + case SFF_MODULE_ID_QSFP_PLUS: + modinfo->type = ETH_MODULE_SFF_8436; + modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; + break; + case SFF_MODULE_ID_QSFP28: + modinfo->type = ETH_MODULE_SFF_8636; + modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; + break; + default: + printk("%s: module_id:0x%x diag_supported:0x%x\n", + __func__, module_id, diag_supported); + rc = -EOPNOTSUPP; + break; + } + } + + return rc; +} + +int rnp_get_module_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, + u8 *data) +{ + struct rnp_adapter *adapter = netdev_priv(dev); + struct rnp_hw *hw = &adapter->hw; + u16 start = eeprom->offset, length = eeprom->len; + int rc = 0; + + rnp_mbx_get_lane_stat(hw); + + if (hw->is_sgmii) + return -EIO; + + memset(data, 0, eeprom->len); + + /* Read A0 portion of the EEPROM */ + if (start < ETH_MODULE_SFF_8436_LEN) { + if (start + eeprom->len > ETH_MODULE_SFF_8436_LEN) + length = ETH_MODULE_SFF_8436_LEN - start; + rc = rnp_mbx_sfp_module_eeprom_info(hw, 0xA0, start, length, + data); + if (rc) + return rc; + start += length; + data += length; + length = eeprom->len - length; + } + + /* Read A2 portion of the EEPROM */ + if (length) { + start -= ETH_MODULE_SFF_8436_LEN; + rc = rnp_mbx_sfp_module_eeprom_info(hw, 0xA2, start, length, + data); + } + + return rc; +} + +void rnp_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam __always_unused *ker, + struct netlink_ext_ack __always_unused *extack) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + /* all ring share the same status*/ + + ring->rx_max_pending = RNP_MAX_RXD; + ring->tx_max_pending = RNP_MAX_TXD; + ring->rx_mini_max_pending = 0; + ring->rx_jumbo_max_pending = 0; + ring->rx_pending = adapter->rx_ring_item_count; + ring->tx_pending = adapter->tx_ring_item_count; + ring->rx_mini_pending = 0; + ring->rx_jumbo_pending = 0; +} + +int rnp_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam __always_unused *ker, + struct netlink_ext_ack __always_unused *extack) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_ring *temp_ring; + int i, err = 0; + u32 new_rx_count, new_tx_count; + + /* sriov mode can't change ring param */ + if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) { + return -EINVAL; + } + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + if ((ring->tx_pending < RNP_MIN_TXD) || + (ring->tx_pending > RNP_MAX_TXD) || + (ring->rx_pending < RNP_MIN_RXD) || + (ring->rx_pending > RNP_MAX_RXD)) { + netdev_info( + netdev, + "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d]\n", + ring->tx_pending, ring->rx_pending, RNP_MIN_TXD, + RNP_MAX_TXD); + return -EINVAL; + } + + new_tx_count = clamp_t(u32, ring->tx_pending, RNP_MIN_TXD, RNP_MAX_TXD); + new_tx_count = ALIGN(new_tx_count, RNP_REQ_TX_DESCRIPTOR_MULTIPLE); + new_rx_count = clamp_t(u32, ring->rx_pending, RNP_MIN_RXD, RNP_MAX_RXD); + new_rx_count = ALIGN(new_rx_count, RNP_REQ_RX_DESCRIPTOR_MULTIPLE); + + if ((new_tx_count == adapter->tx_ring_item_count) && + (new_rx_count == adapter->rx_ring_item_count)) { + /* nothing to do */ + return 0; + } + + while (test_and_set_bit(__RNP_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + if (!netif_running(adapter->netdev)) { + for (i = 0; i < adapter->num_tx_queues; i++) + adapter->tx_ring[i]->count = new_tx_count; + for (i = 0; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i]->count = new_rx_count; + adapter->tx_ring_item_count = new_tx_count; + adapter->rx_ring_item_count = new_rx_count; + goto clear_reset; + } + + /* allocate temporary buffer to store rings in */ + i = max_t(int, adapter->num_tx_queues, adapter->num_rx_queues); + temp_ring = vmalloc(i * sizeof(struct rnp_ring)); + if (!temp_ring) { + err = -ENOMEM; + goto clear_reset; + } + memset(temp_ring, 0x00, i * sizeof(struct rnp_ring)); + + if (new_rx_count != adapter->rx_ring_item_count) { + for (i = 0; i < adapter->num_rx_queues; i++) { + adapter->rx_ring[i]->reset_count = new_rx_count; + if (!(adapter->rx_ring[i]->ring_flags & + RNP_RING_SIZE_CHANGE_FIX)) + adapter->rx_ring[i]->ring_flags |= + RNP_RING_FLAG_CHANGE_RX_LEN; + } + } + rnp_down(adapter); + /* + * Setup new Tx resources and free the old Tx resources in that order. + * We can then assign the new resources to the rings via a memcpy. + * The advantage to this approach is that we are guaranteed to still + * have resources even in the case of an allocation failure. + */ + if (new_tx_count != adapter->tx_ring_item_count) { + for (i = 0; i < adapter->num_tx_queues; i++) { + memcpy(&temp_ring[i], adapter->tx_ring[i], + sizeof(struct rnp_ring)); + + temp_ring[i].count = new_tx_count; + err = rnp_setup_tx_resources(&temp_ring[i], adapter); + if (err) { + while (i) { + i--; + rnp_free_tx_resources(&temp_ring[i]); + } + goto err_setup; + } + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + rnp_free_tx_resources(adapter->tx_ring[i]); + memcpy(adapter->tx_ring[i], &temp_ring[i], + sizeof(struct rnp_ring)); + } + + adapter->tx_ring_item_count = new_tx_count; + } + + /* Repeat the process for the Rx rings if needed */ + if (new_rx_count != adapter->rx_ring_item_count) { + for (i = 0; i < adapter->num_rx_queues; i++) { + memcpy(&temp_ring[i], adapter->rx_ring[i], + sizeof(struct rnp_ring)); + /* setup ring count */ + if (!(adapter->rx_ring[i]->ring_flags & + RNP_RING_FLAG_DELAY_SETUP_RX_LEN)) { + temp_ring[i].count = new_rx_count; + } else { + /* setup temp count */ + temp_ring[i].count = temp_ring[i].temp_count; + adapter->rx_ring[i]->reset_count = new_rx_count; + new_rx_count = temp_ring[i].temp_count; + } + err = rnp_setup_rx_resources(&temp_ring[i], adapter); + if (err) { + while (i) { + i--; + rnp_free_rx_resources(&temp_ring[i]); + } + goto err_setup; + } + } + + for (i = 0; i < adapter->num_rx_queues; i++) { + rnp_free_rx_resources(adapter->rx_ring[i]); + memcpy(adapter->rx_ring[i], &temp_ring[i], + sizeof(struct rnp_ring)); + } + adapter->rx_ring_item_count = new_rx_count; + } + +err_setup: + rnp_up(adapter); + vfree(temp_ring); +clear_reset: + clear_bit(__RNP_RESETTING, &adapter->state); + return err; +} + +int rnp_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + + rnp_mbx_get_dump(&adapter->hw, 0, NULL, 0); + + dump->flag = adapter->hw.dump.flag; + dump->len = adapter->hw.dump.len; + dump->version = adapter->hw.dump.version; + + return 0; +} + +int rnp_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump, + void *buffer) +{ + int err; + struct rnp_adapter *adapter = netdev_priv(netdev); + + err = rnp_mbx_get_dump(&adapter->hw, dump->flag, buffer, dump->len); + if (err) + return err; + + dump->flag = adapter->hw.dump.flag; + dump->len = adapter->hw.dump.len; + dump->version = adapter->hw.dump.version; + + return 0; +} + +int rnp_set_dump(struct net_device *netdev, struct ethtool_dump *dump) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + + rnp_mbx_set_dump(&adapter->hw, dump->flag); + + return 0; +} + +int rnp_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + + coal->use_adaptive_tx_coalesce = adapter->adaptive_tx_coal; + coal->tx_coalesce_usecs = adapter->tx_usecs_usr_set; + coal->tx_coalesce_usecs_irq = 0; + coal->tx_max_coalesced_frames = adapter->tx_frames; + coal->tx_max_coalesced_frames_irq = adapter->tx_work_limit; + + coal->use_adaptive_rx_coalesce = adapter->adaptive_rx_coal; + coal->rx_coalesce_usecs_irq = 0; + coal->rx_coalesce_usecs = adapter->rx_usecs_usr_set; + coal->rx_max_coalesced_frames = adapter->rx_frames; + coal->rx_max_coalesced_frames_irq = adapter->napi_budge; + + /* this is not support */ + coal->pkt_rate_low = 0; + coal->pkt_rate_high = 0; + coal->rx_coalesce_usecs_low = 0; + coal->rx_max_coalesced_frames_low = 0; + coal->tx_coalesce_usecs_low = 0; + coal->tx_max_coalesced_frames_low = 0; + coal->rx_coalesce_usecs_high = 0; + coal->rx_max_coalesced_frames_high = 0; + coal->tx_coalesce_usecs_high = 0; + coal->tx_max_coalesced_frames_high = 0; + coal->rate_sample_interval = 0; + + return 0; +} + +int rnp_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + int reset = 0; + struct rnp_adapter *adapter = netdev_priv(netdev); + u32 value; + /* we don't support close tx and rx coalesce */ + if (!(ec->use_adaptive_tx_coalesce) || !(ec->use_adaptive_rx_coalesce)) + return -EINVAL; + + /* check coalesce frame irq */ + if ((ec->tx_max_coalesced_frames_irq < RNP_MIN_TX_WORK) || + (ec->tx_max_coalesced_frames_irq > RNP_MAX_TX_WORK)) + return -EINVAL; + + value = clamp_t(u32, ec->tx_max_coalesced_frames_irq, RNP_MIN_TX_WORK, + RNP_MAX_TX_WORK); + value = ALIGN(value, RNP_WORK_ALIGN); + + if (adapter->tx_work_limit != value) { + reset = 1; + adapter->tx_work_limit = value; + } + + if ((ec->tx_max_coalesced_frames < RNP_MIN_TX_FRAME) || + (ec->tx_max_coalesced_frames > RNP_MAX_TX_FRAME)) + return -EINVAL; + + value = clamp_t(u32, ec->tx_max_coalesced_frames, RNP_MIN_TX_FRAME, + RNP_MAX_TX_FRAME); + if (adapter->tx_frames != value) { + reset = 1; + adapter->tx_frames = value; + } + + /* check vlaue */ + if ((ec->tx_coalesce_usecs < RNP_MIN_TX_USEC) || + (ec->tx_coalesce_usecs > RNP_MAX_TX_USEC)) + return -EINVAL; + + value = clamp_t(u32, ec->tx_coalesce_usecs, RNP_MIN_TX_USEC, + RNP_MAX_TX_USEC); + if (adapter->tx_usecs != value) { + reset = 1; + adapter->tx_usecs = value; + adapter->tx_usecs_usr_set = value; + } + + if ((ec->rx_max_coalesced_frames_irq < RNP_MIN_RX_WORK) || + (ec->rx_max_coalesced_frames_irq > RNP_MAX_RX_WORK)) + return -EINVAL; + + value = clamp_t(u32, ec->rx_max_coalesced_frames_irq, RNP_MIN_RX_WORK, + RNP_MAX_RX_WORK); + value = ALIGN(value, RNP_WORK_ALIGN); + + if (adapter->napi_budge != value) { + reset = 1; + adapter->napi_budge = value; + } + + if ((ec->rx_max_coalesced_frames < RNP_MIN_RX_FRAME) || + (ec->rx_max_coalesced_frames > RNP_MAX_RX_FRAME)) + return -EINVAL; + + value = clamp_t(u32, ec->rx_max_coalesced_frames, RNP_MIN_RX_FRAME, + RNP_MAX_RX_FRAME); + if (adapter->rx_frames != value) { + reset = 1; + adapter->rx_frames = value; + } + + /* check vlaue */ + if ((ec->rx_coalesce_usecs < RNP_MIN_RX_USEC) || + (ec->rx_coalesce_usecs > RNP_MAX_RX_USEC)) + return -EINVAL; + + value = clamp_t(u32, ec->rx_coalesce_usecs, RNP_MIN_RX_USEC, + RNP_MAX_RX_USEC); + + if (adapter->rx_usecs != value) { + reset = 1; + adapter->rx_usecs = value; + adapter->rx_usecs_usr_set = value; + } + /* other setup is not supported */ + if ((ec->pkt_rate_low) || (ec->pkt_rate_high) || + (ec->rx_coalesce_usecs_low) || (ec->rx_max_coalesced_frames_low) || + (ec->tx_coalesce_usecs_low) || (ec->tx_max_coalesced_frames_low) || + (ec->rx_coalesce_usecs_high) || + (ec->rx_max_coalesced_frames_high) || + (ec->tx_coalesce_usecs_high) || + (ec->tx_max_coalesced_frames_high) || (ec->rate_sample_interval) || + (ec->tx_coalesce_usecs_irq) || (ec->rx_coalesce_usecs_irq)) + return -EINVAL; + + if (reset) + return rnp_setup_tc(netdev, netdev_get_num_tc(netdev)); + + return 0; +} + +static int rnp_get_rss_hash_opts(struct rnp_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + cmd->data = 0; + + /* Report default options for RSS on rnp */ + switch (cmd->flow_type) { + case TCP_V4_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* fall through */ + fallthrough; + case UDP_V4_FLOW: + case SCTP_V4_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* fall through */ + fallthrough; + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case IPV4_FLOW: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + case TCP_V6_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* fall through */ + fallthrough; + case UDP_V6_FLOW: + case SCTP_V6_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* fall through */ + fallthrough; + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case IPV6_FLOW: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + default: + return -EINVAL; + } + return 0; +} + +static int rnp_get_ethtool_fdir_entry(struct rnp_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + struct hlist_node *node2; + struct rnp_fdir_filter *rule = NULL; + + /* report total rule count */ + cmd->data = adapter->fdir_pballoc; + + hlist_for_each_entry_safe(rule, node2, &adapter->fdir_filter_list, + fdir_node) + if (fsp->location <= rule->sw_idx) + break; + + if (!rule || fsp->location != rule->sw_idx) + return -EINVAL; + /* set flow type field */ + switch (rule->filter.formatted.flow_type) { + case RNP_ATR_FLOW_TYPE_TCPV4: + fsp->flow_type = TCP_V4_FLOW; + break; + case RNP_ATR_FLOW_TYPE_UDPV4: + fsp->flow_type = UDP_V4_FLOW; + break; + case RNP_ATR_FLOW_TYPE_SCTPV4: + fsp->flow_type = SCTP_V4_FLOW; + break; + case RNP_ATR_FLOW_TYPE_IPV4: + fsp->flow_type = IP_USER_FLOW; + fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; + if (adapter->fdir_mode == fdir_mode_tuple5) { + fsp->h_u.usr_ip4_spec.proto = + rule->filter.formatted.inner_mac[0]; + fsp->m_u.usr_ip4_spec.proto = 0xff; + } else { + fsp->h_u.usr_ip4_spec.proto = + rule->filter.formatted.inner_mac[0] & + rule->filter.formatted.inner_mac_mask[0]; + fsp->m_u.usr_ip4_spec.proto = + rule->filter.formatted.inner_mac_mask[0]; + } + break; + case RNP_ATR_FLOW_TYPE_ETHER: + fsp->flow_type = ETHER_FLOW; + /* support proto and mask only in this mode */ + fsp->h_u.ether_spec.h_proto = rule->filter.layer2_formate.proto; + fsp->m_u.ether_spec.h_proto = 0xffff; + break; + default: + return -EINVAL; + } + if (rule->filter.formatted.flow_type != RNP_ATR_FLOW_TYPE_ETHER) { + /* not support mask in tuple 5 mode */ + if (adapter->fdir_mode == fdir_mode_tuple5) { + fsp->h_u.tcp_ip4_spec.psrc = + rule->filter.formatted.src_port; + fsp->h_u.tcp_ip4_spec.pdst = + rule->filter.formatted.dst_port; + fsp->h_u.tcp_ip4_spec.ip4src = + rule->filter.formatted.src_ip[0]; + fsp->h_u.tcp_ip4_spec.ip4dst = + rule->filter.formatted.dst_ip[0]; + fsp->m_u.tcp_ip4_spec.psrc = 0xffff; + fsp->m_u.tcp_ip4_spec.pdst = 0xffff; + fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff; + fsp->m_u.tcp_ip4_spec.ip4dst = 0xffffffff; + } else { + fsp->h_u.tcp_ip4_spec.psrc = + rule->filter.formatted.src_port & + rule->filter.formatted.src_port_mask; + fsp->m_u.tcp_ip4_spec.psrc = + rule->filter.formatted.src_port_mask; + fsp->h_u.tcp_ip4_spec.pdst = + rule->filter.formatted.dst_port & + rule->filter.formatted.dst_port_mask; + fsp->m_u.tcp_ip4_spec.pdst = + rule->filter.formatted.dst_port_mask; + + fsp->h_u.tcp_ip4_spec.ip4src = + rule->filter.formatted.src_ip[0] & + rule->filter.formatted.src_ip_mask[0]; + fsp->m_u.tcp_ip4_spec.ip4src = + rule->filter.formatted.src_ip_mask[0]; + + fsp->h_u.tcp_ip4_spec.ip4dst = + rule->filter.formatted.dst_ip[0] & + rule->filter.formatted.dst_ip_mask[0]; + fsp->m_u.tcp_ip4_spec.ip4dst = + rule->filter.formatted.dst_ip_mask[0]; + } + } + + /* record action */ + if (rule->action == RNP_FDIR_DROP_QUEUE) + fsp->ring_cookie = RX_CLS_FLOW_DISC; + else { + int add = 0; + + if (rule->action & 0x1) + add = 1; + + if (rule->vf_num != 0) { + fsp->ring_cookie = ((u64)rule->vf_num << 32) | (add); + } else { + fsp->ring_cookie = rule->action; + } + } + + return 0; +} + +static int rnp_get_ethtool_fdir_all(struct rnp_adapter *adapter, + struct ethtool_rxnfc *cmd, u32 *rule_locs) +{ + struct hlist_node *node2; + struct rnp_fdir_filter *rule; + int cnt = 0; + + /* report total rule count */ + cmd->data = adapter->fdir_pballoc; + + hlist_for_each_entry_safe(rule, node2, &adapter->fdir_filter_list, + fdir_node) { + if (cnt == cmd->rule_cnt) + return -EMSGSIZE; + rule_locs[cnt] = rule->sw_idx; + cnt++; + } + + cmd->rule_cnt = cnt; + + return 0; +} + +int rnp_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct rnp_adapter *adapter = netdev_priv(dev); + int ret = -EOPNOTSUPP; + struct rnp_hw *hw = &adapter->hw; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) { + /* we fix 2 when srio on */ + cmd->data = hw->sriov_ring_limit; + } else { + cmd->data = adapter->num_rx_queues; + } + ret = 0; + break; + case ETHTOOL_GRXCLSRLCNT: + cmd->rule_cnt = adapter->fdir_filter_count; + ret = 0; + break; + case ETHTOOL_GRXCLSRULE: + ret = rnp_get_ethtool_fdir_entry(adapter, cmd); + break; + case ETHTOOL_GRXCLSRLALL: + ret = rnp_get_ethtool_fdir_all(adapter, cmd, (u32 *)rule_locs); + break; + case ETHTOOL_GRXFH: + ret = rnp_get_rss_hash_opts(adapter, cmd); + break; + default: + break; + } + + return ret; +} +#define UDP_RSS_FLAGS \ + (RNP_FLAG2_RSS_FIELD_IPV4_UDP | RNP_FLAG2_RSS_FIELD_IPV6_UDP) +static int rnp_set_rss_hash_opt(struct rnp_adapter *adapter, + struct ethtool_rxnfc *nfc) +{ + /* + * RSS does not support anything other than hashing + * to queues on src and dst IPs and ports + */ + if (nfc->data & + ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)) + return -EINVAL; + + switch (nfc->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + case UDP_V4_FLOW: + case UDP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || !(nfc->data & RXH_IP_DST) || + !(nfc->data & RXH_L4_B_0_1) || !(nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + break; + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case SCTP_V4_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case SCTP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || !(nfc->data & RXH_IP_DST) || + (nfc->data & RXH_L4_B_0_1) || (nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + break; + default: + return -EINVAL; + } + return 0; +} + +static int rnp_flowspec_to_flow_type(struct rnp_adapter *adapter, + struct ethtool_rx_flow_spec *fsp, + uint8_t *flow_type, + struct rnp_fdir_filter *input) +{ + int i; + int ret = 1; + /* not support flow_ext */ + if (fsp->flow_type & FLOW_EXT) + return 0; + + switch (fsp->flow_type & ~FLOW_EXT) { + /* todo ipv6 is not considered*/ + case TCP_V4_FLOW: + *flow_type = RNP_ATR_FLOW_TYPE_TCPV4; + break; + case UDP_V4_FLOW: + *flow_type = RNP_ATR_FLOW_TYPE_UDPV4; + break; + case SCTP_V4_FLOW: + *flow_type = RNP_ATR_FLOW_TYPE_SCTPV4; + break; + case ETHER_FLOW: + /* layer 2 flow */ + *flow_type = RNP_ATR_FLOW_TYPE_ETHER; + input->filter.layer2_formate.proto = + fsp->h_u.ether_spec.h_proto; + break; + case IP_USER_FLOW: + switch (fsp->h_u.usr_ip4_spec.proto) { + case IPPROTO_TCP: + *flow_type = RNP_ATR_FLOW_TYPE_TCPV4; + break; + case IPPROTO_UDP: + *flow_type = RNP_ATR_FLOW_TYPE_UDPV4; + break; + case IPPROTO_SCTP: + *flow_type = RNP_ATR_FLOW_TYPE_SCTPV4; + break; + case 0: + /* if only ip4 no src no dst*/ + if (!(fsp->h_u.tcp_ip4_spec.ip4src) && + (!(fsp->h_u.tcp_ip4_spec.ip4dst))) { + /* if have no l4 proto, use layer2 */ + *flow_type = RNP_ATR_FLOW_TYPE_ETHER; + input->filter.layer2_formate.proto = + htons(0x0800); + } else { + /* may only src or dst input */ + *flow_type = RNP_ATR_FLOW_TYPE_IPV4; + } + break; + default: + /* other unknown l4 proto ip */ + *flow_type = RNP_ATR_FLOW_TYPE_IPV4; + } + break; + default: + return 0; + } + /* layer2 flow */ + if (*flow_type == RNP_ATR_FLOW_TYPE_ETHER) { + if (adapter->layer2_count < 0) { + e_err(drv, "layer2 count full\n"); + ret = 0; + } + /* should check dst mac filter */ + /* should check src dst all zeros */ + for (i = 0; i < ETH_ALEN; i++) { + if (fsp->h_u.ether_spec.h_source[i] != 0) + ret = 0; + + if (fsp->h_u.ether_spec.h_dest[i] != 0) + ret = 0; + + if (fsp->m_u.ether_spec.h_source[i] != 0) + ret = 0; + + if (fsp->m_u.ether_spec.h_dest[i] != 0) + ret = 0; + } + } else if (*flow_type == RNP_ATR_FLOW_TYPE_IPV4) { + if (adapter->fdir_mode == fdir_mode_tuple5) { + if (adapter->tuple_5_count < 0) { + e_err(drv, "tuple 5 count full\n"); + ret = 0; + } + if ((fsp->h_u.usr_ip4_spec.ip4src != 0) && + (fsp->m_u.usr_ip4_spec.ip4src != 0xffffffff)) { + e_err(drv, "ip src mask error\n"); + ret = 0; + } + if ((fsp->h_u.usr_ip4_spec.ip4dst != 0) && + (fsp->m_u.usr_ip4_spec.ip4dst != 0xffffffff)) { + e_err(drv, "ip dst mask error\n"); + ret = 0; + } + if ((fsp->h_u.usr_ip4_spec.proto != 0) && + (fsp->m_u.usr_ip4_spec.proto != 0xff)) { + e_err(drv, "ip l4 proto mask error\n"); + ret = 0; + } + } else { + if (adapter->tuple_5_count < 0) { + e_err(drv, "tcam count full\n"); + ret = 0; + } + /* tcam mode can support mask */ + } + /* not support l4_4_bytes */ + if ((fsp->h_u.usr_ip4_spec.l4_4_bytes != 0)) { + e_err(drv, "ip l4_4_bytes error\n"); + ret = 0; + } + } else { + if (adapter->fdir_mode == fdir_mode_tuple5) { + /* should check mask all ff */ + if (adapter->tuple_5_count < 0) { + e_err(drv, "tuple 5 count full\n"); + ret = 0; + } + if ((fsp->h_u.tcp_ip4_spec.ip4src != 0) && + (fsp->m_u.tcp_ip4_spec.ip4src != 0xffffffff)) { + e_err(drv, "src mask error\n"); + ret = 0; + } + if ((fsp->h_u.tcp_ip4_spec.ip4dst != 0) && + (fsp->m_u.tcp_ip4_spec.ip4dst != 0xffffffff)) { + e_err(drv, "dst mask error\n"); + ret = 0; + } + if ((fsp->h_u.tcp_ip4_spec.psrc != 0) && + (fsp->m_u.tcp_ip4_spec.psrc != 0xffff)) { + e_err(drv, "src port mask error\n"); + ret = 0; + } + if ((fsp->h_u.tcp_ip4_spec.pdst != 0) && + (fsp->m_u.tcp_ip4_spec.pdst != 0xffff)) { + e_err(drv, "src port mask error\n"); + ret = 0; + } + } else { + if (adapter->tuple_5_count < 0) { + e_err(drv, "tcam count full\n"); + ret = 0; + } + } + /* l4 tos is not supported */ + if (fsp->h_u.tcp_ip4_spec.tos != 0) { + e_err(drv, "tos error\n"); + ret = 0; + } + } + + return ret; +} + +int rnp_update_ethtool_fdir_entry(struct rnp_adapter *adapter, + struct rnp_fdir_filter *input, u16 sw_idx) +{ + struct rnp_hw *hw = &adapter->hw; + struct hlist_node *node2; + struct rnp_fdir_filter *rule, *parent; + bool deleted = false; + u16 hw_idx_layer2 = 0; + u16 hw_idx_tuple5 = 0; + + s32 err; + + parent = NULL; + rule = NULL; + + hlist_for_each_entry_safe(rule, node2, &adapter->fdir_filter_list, + fdir_node) { + /* hash found, or no matching entry */ + if (rule->sw_idx >= sw_idx) + break; + + parent = rule; + } + + /* if there is an old rule occupying our place remove it */ + if (rule && (rule->sw_idx == sw_idx)) { + /* only clear hw enable bits */ + /* hardware filters are only configured when interface is up, + * and we should not issue filter commands while the interface + * is down + */ + if (netif_running(adapter->netdev) && (!input)) { + err = rnp_fdir_erase_perfect_filter(adapter->fdir_mode, + hw, &rule->filter, + rule->hw_idx); + if (err) + return -EINVAL; + } + + adapter->fdir_filter_count--; + if (rule->filter.formatted.flow_type == + RNP_ATR_FLOW_TYPE_ETHER) { + /* used to determine hw reg offset */ + adapter->layer2_count++; + } else { + adapter->tuple_5_count++; + } + + hlist_del(&rule->fdir_node); + kfree(rule); + deleted = true; + } + + /* If we weren't given an input, then this was a request to delete a + * filter. We should return -EINVAL if the filter wasn't found, but + * return 0 if the rule was successfully deleted. + */ + if (!input) + return deleted ? 0 : -EINVAL; + + /* initialize node and set software index */ + INIT_HLIST_NODE(&input->fdir_node); + + /* add filter to the list */ + if (parent) + hlist_add_behind(&input->fdir_node, &parent->fdir_node); + else + hlist_add_head(&input->fdir_node, &adapter->fdir_filter_list); + + /* we must setup all */ + /* should first earase all tcam and l2 rule */ + if (adapter->fdir_mode != fdir_mode_tcam) { + hw->ops.clr_all_layer2_remapping(hw); + /* earase all layer2 */ + } else { + hw->ops.clr_all_tuple5_remapping(hw); + /* earase all tcam */ + } + + /* setup hw */ + hlist_for_each_entry_safe(rule, node2, &adapter->fdir_filter_list, + fdir_node) { + if (netif_running(adapter->netdev)) { + /* hw_idx */ + if (rule->filter.formatted.flow_type == + RNP_ATR_FLOW_TYPE_ETHER) { + rule->hw_idx = hw_idx_layer2++; + } else { + rule->hw_idx = hw_idx_tuple5++; + } + + if ((!rule->vf_num) && + (rule->action != ACTION_TO_MPE)) { + int idx = rule->action; + + err = rnp_fdir_write_perfect_filter( + adapter->fdir_mode, hw, &rule->filter, + rule->hw_idx, + (rule->action == RNP_FDIR_DROP_QUEUE) ? + RNP_FDIR_DROP_QUEUE : + adapter->rx_ring[idx] + ->rnp_queue_idx, + (adapter->priv_flags & + RNP_PRIV_FLAG_REMAP_PRIO) ? + true : + false); + } else { + /* ACTION_TO_MPE use this */ + err = rnp_fdir_write_perfect_filter( + adapter->fdir_mode, hw, &rule->filter, + rule->hw_idx, + (rule->action == RNP_FDIR_DROP_QUEUE) ? + RNP_FDIR_DROP_QUEUE : + rule->action, + (adapter->priv_flags & + RNP_PRIV_FLAG_REMAP_PRIO) ? + true : + false); + } + if (err) + return -EINVAL; + } + } + + /* update counts */ + adapter->fdir_filter_count++; + if (input->filter.formatted.flow_type == RNP_ATR_FLOW_TYPE_ETHER) { + /* used to determine hw reg offset */ + adapter->layer2_count--; + } else { + adapter->tuple_5_count--; + } + return 0; +} + +/* used to dbg flo_spec info */ +static void print_fsp(struct ethtool_rx_flow_spec *fsp) +{ + int i; + + switch (fsp->flow_type & ~FLOW_EXT) { + case ETHER_FLOW: + for (i = 0; i < ETH_ALEN; i++) + dbg("src 0x%02x\n", fsp->h_u.ether_spec.h_source[i]); + for (i = 0; i < ETH_ALEN; i++) + dbg("dst 0x%02x\n", fsp->h_u.ether_spec.h_dest[i]); + for (i = 0; i < ETH_ALEN; i++) + dbg("src mask 0x%02x\n", + fsp->m_u.ether_spec.h_source[i]); + for (i = 0; i < ETH_ALEN; i++) + dbg("dst mask 0x%02x\n", fsp->m_u.ether_spec.h_dest[i]); + + dbg("proto type is %x\n", fsp->h_u.ether_spec.h_proto); + + break; + + default: + dbg("flow type is %x\n", fsp->flow_type); + + dbg("ip4 src ip is %x\n", fsp->h_u.tcp_ip4_spec.ip4src); + dbg("ip4 src ip mask is %x\n", fsp->m_u.tcp_ip4_spec.ip4src); + + dbg("ip4 dst ip is %x\n", fsp->h_u.tcp_ip4_spec.ip4dst); + dbg("ip4 dst ip mask is %x\n", fsp->m_u.tcp_ip4_spec.ip4dst); + + dbg("ip4 src port is %x\n", fsp->h_u.tcp_ip4_spec.psrc); + dbg("ip4 src port mask is %x\n", fsp->m_u.tcp_ip4_spec.psrc); + + dbg("ip4 dst port is %x\n", fsp->h_u.tcp_ip4_spec.pdst); + dbg("ip4 dst port mask is %x\n", fsp->m_u.tcp_ip4_spec.pdst); + + dbg("l4 proto type is %x\n", fsp->h_u.usr_ip4_spec.proto); + break; + } +} + +static int rnp_add_ethtool_fdir_entry(struct rnp_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + struct rnp_fdir_filter *input; + struct rnp_hw *hw = &adapter->hw; + /* we don't support mask */ + int err; + int vf_fix = 0; + + u32 ring_cookie_high = fsp->ring_cookie >> 32; + + if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) + vf_fix = 1; + + if (!(adapter->flags & RNP_FLAG_FDIR_PERFECT_CAPABLE)) + return -EOPNOTSUPP; + + /* + * Don't allow programming if the action is a queue greater than + * the number of online Rx queues. + */ + /* is sriov is on, allow vf and queue */ + /* vf should smaller than num_vfs */ + print_fsp(fsp); + if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) { + if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) && + (((ring_cookie_high & 0xff) > adapter->num_vfs) || + ((fsp->ring_cookie & (u64)0xffffffff) >= + hw->sriov_ring_limit))) + /* return error if not mpe */ + if (fsp->ring_cookie != ACTION_TO_MPE) + return -EINVAL; + + } else { + if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) && + (fsp->ring_cookie >= adapter->num_rx_queues)) { + /* ACTION_TO_MPE to mpe special */ + if (fsp->ring_cookie != ACTION_TO_MPE) + return -EINVAL; + } + } + + /* Don't allow indexes to exist outside of available space */ + if (fsp->location >= (adapter->fdir_pballoc)) { + e_err(drv, "Location out of range\n"); + return -EINVAL; + } + + input = kzalloc(sizeof(*input), GFP_ATOMIC); + if (!input) + return -ENOMEM; + + /* set SW index */ + input->sw_idx = fsp->location; + + /* record flow type */ + if (!rnp_flowspec_to_flow_type( + adapter, fsp, &input->filter.formatted.flow_type, input)) { + e_err(drv, "Unrecognized flow type\n"); + goto err_out; + } + + if (input->filter.formatted.flow_type == RNP_ATR_FLOW_TYPE_ETHER) { + /* used to determine hw reg offset */ + } else if (input->filter.formatted.flow_type == + RNP_ATR_FLOW_TYPE_IPV4) { + /* Copy input into formatted structures */ + input->filter.formatted.src_ip[0] = + fsp->h_u.usr_ip4_spec.ip4src; + input->filter.formatted.src_ip_mask[0] = + fsp->m_u.usr_ip4_spec.ip4src; + input->filter.formatted.dst_ip[0] = + fsp->h_u.usr_ip4_spec.ip4dst; + input->filter.formatted.dst_ip_mask[0] = + fsp->m_u.usr_ip4_spec.ip4dst; + input->filter.formatted.src_port = 0; + input->filter.formatted.src_port_mask = 0xffff; + input->filter.formatted.dst_port = 0; + input->filter.formatted.dst_port_mask = 0xffff; + input->filter.formatted.inner_mac[0] = + fsp->h_u.usr_ip4_spec.proto; + input->filter.formatted.inner_mac_mask[0] = + fsp->m_u.usr_ip4_spec.proto; + } else { + /* tcp or udp or sctp*/ + /* Copy input into formatted structures */ + input->filter.formatted.src_ip[0] = + fsp->h_u.tcp_ip4_spec.ip4src; + input->filter.formatted.src_ip_mask[0] = + fsp->m_u.usr_ip4_spec.ip4src; + input->filter.formatted.dst_ip[0] = + fsp->h_u.tcp_ip4_spec.ip4dst; + input->filter.formatted.dst_ip_mask[0] = + fsp->m_u.usr_ip4_spec.ip4dst; + input->filter.formatted.src_port = fsp->h_u.tcp_ip4_spec.psrc; + input->filter.formatted.src_port_mask = + fsp->m_u.tcp_ip4_spec.psrc; + input->filter.formatted.dst_port = fsp->h_u.tcp_ip4_spec.pdst; + input->filter.formatted.dst_port_mask = + fsp->m_u.tcp_ip4_spec.pdst; + } + + /* determine if we need to drop or route the packet */ + if (fsp->ring_cookie == RX_CLS_FLOW_DISC) + input->action = RNP_FDIR_DROP_QUEUE; + else { + input->vf_num = (fsp->ring_cookie >> 32) & 0xff; + if (input->vf_num) { + /* in vf mode input->action is the real queue nums */ + if (adapter->priv_flags & RNP_PRIV_FLAG_REMAP_MODE) { + input->action = (fsp->ring_cookie & 0xffffffff); + } else { + input->action = + 2 * (((fsp->ring_cookie >> 32) & 0xff) + + vf_fix - 1) + + (fsp->ring_cookie & 0xffffffff); + } + } else + input->action = fsp->ring_cookie; + } + + spin_lock(&adapter->fdir_perfect_lock); + err = rnp_update_ethtool_fdir_entry(adapter, input, input->sw_idx); + spin_unlock(&adapter->fdir_perfect_lock); + + return err; +err_out: + kfree(input); + return -EINVAL; +} + +static int rnp_del_ethtool_fdir_entry(struct rnp_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + int err; + + spin_lock(&adapter->fdir_perfect_lock); + err = rnp_update_ethtool_fdir_entry(adapter, NULL, fsp->location); + spin_unlock(&adapter->fdir_perfect_lock); + + return err; +} + +int rnp_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) +{ + struct rnp_adapter *adapter = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_SRXCLSRLINS: + ret = rnp_add_ethtool_fdir_entry(adapter, cmd); + break; + case ETHTOOL_SRXCLSRLDEL: + ret = rnp_del_ethtool_fdir_entry(adapter, cmd); + break; + case ETHTOOL_SRXFH: + ret = rnp_set_rss_hash_opt(adapter, cmd); + break; + default: + break; + } + + return ret; +} + +u32 rnp_rss_indir_size(struct net_device *netdev) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + + return rnp_rss_indir_tbl_entries(adapter); +} + +u32 rnp_get_rxfh_key_size(struct net_device *netdev) +{ + return RNP_RSS_KEY_SIZE; +} + +void rnp_get_reta(struct rnp_adapter *adapter, u32 *indir) +{ + int i, reta_size = rnp_rss_indir_tbl_entries(adapter); + u16 rss_m = adapter->ring_feature[RING_F_RSS].mask; + + if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) + rss_m = adapter->ring_feature[RING_F_RSS].indices - 1; + + for (i = 0; i < reta_size; i++) + indir[i] = adapter->rss_indir_tbl[i] & rss_m; +} + +int rnp_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + + if (hfunc) { + switch (adapter->rss_func_mode) { + case rss_func_top: + *hfunc = ETH_RSS_HASH_TOP; + break; + case rss_func_xor: + *hfunc = ETH_RSS_HASH_XOR; + break; + case rss_func_order: + *hfunc = ETH_RSS_HASH_TOP; + break; + } + } + + if (indir) + rnp_get_reta(adapter, indir); + + if (key) + memcpy(key, adapter->rss_key, rnp_get_rxfh_key_size(netdev)); + + return 0; +} + +enum { + PART_FW, + PART_CFG, + PART_MACSN, + PART_PCSPHY, + PART_PXE, +}; + +#define UCFG_OFF 0x41000 +#define UCFG_SZ (4096) +#define PXE_OFF 0x4a000 +#define PXE_SZ (512 * 1024) + +static int rnp_flash_firmware(struct rnp_adapter *adapter, int region, + const u8 *data, int bytes) +{ + struct rnp_hw *hw = &adapter->hw; + + switch (region) { + case PART_FW: { + if (*((u32 *)(data + 28)) != 0xA51BBEAF) { + return -EINVAL; + } + if (bytes > PXE_OFF) { + int err; + int wbytes_seg1 = bytes - PXE_OFF; + if (wbytes_seg1 > PXE_SZ) { + wbytes_seg1 = PXE_SZ; + } + + err = rnp_fw_update(hw, PART_FW, data, UCFG_OFF); + if (err) { + return err; + } + /* skip ucfg flush only pxe */ + err = rnp_fw_update(hw, PART_PXE, data + PXE_OFF, + wbytes_seg1); + if (err) { + return err; + } + return 0; + } + break; + } + case PART_CFG: { + if (*((u32 *)(data)) != 0x00010cf9) { + return -EINVAL; + } + break; + } + case PART_MACSN: { + break; + } + case PART_PCSPHY: { + if (*((u16 *)(data)) != 0x081d) { + return -EINVAL; + } + break; + } + case PART_PXE: { + if ((*((u16 *)(data)) != 0xaa55) && + (*((u16 *)(data)) != 0x5a4d)) { + return -EINVAL; + } + break; + } + default: { + return -EINVAL; + } + } + return rnp_fw_update(hw, region, data, bytes); +} + +static int rnp_flash_firmware_from_file(struct net_device *dev, + struct rnp_adapter *adapter, int region, + const char *filename) +{ + const struct firmware *fw; + int rc; + + rc = request_firmware(&fw, filename, &dev->dev); + if (rc != 0) { + netdev_err(dev, "Error %d requesting firmware file: %s\n", rc, + filename); + return rc; + } + + rc = rnp_flash_firmware(adapter, region, fw->data, fw->size); + release_firmware(fw); + return rc; +} + +int rnp_flash_device(struct net_device *dev, struct ethtool_flash *flash) +{ + struct rnp_adapter *adapter = netdev_priv(dev); + + if (IS_VF(adapter->hw.pfvfnum)) { + netdev_err(dev, + "flashdev not supported from a virtual function\n"); + return -EINVAL; + } + + return rnp_flash_firmware_from_file(dev, adapter, flash->region, + flash->data); +} +static int rnp_rss_indir_tbl_max(struct rnp_adapter *adapter) +{ + if (adapter->hw.rss_type == rnp_rss_uv3p) + return 8; + else if (adapter->hw.rss_type == rnp_rss_uv440) + return 128; + else if (adapter->hw.rss_type == rnp_rss_n10) + return 128; + else + return 128; +} + +int rnp_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key, + const u8 hfunc) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + int i; + u32 reta_entries = rnp_rss_indir_tbl_entries(adapter); + + if (hfunc != ETH_RSS_HASH_NO_CHANGE && + hfunc != ETH_RSS_HASH_TOP) + return -EOPNOTSUPP; + if ((indir) && (adapter->flags & RNP_FLAG_SRIOV_ENABLED)) { + return -EINVAL; + } + + /* Fill out the redirection table */ + if (indir) { + int max_queues = min_t(int, adapter->num_rx_queues, + rnp_rss_indir_tbl_max(adapter)); + + /* Allow max 2 queues w/ SR-IOV. */ + if ((adapter->flags & RNP_FLAG_SRIOV_ENABLED) && + (max_queues > 2)) + max_queues = 2; + + /* Verify user input. */ + for (i = 0; i < reta_entries; i++) + if (indir[i] >= max_queues) + return -EINVAL; + + /* store rss tbl */ + for (i = 0; i < reta_entries; i++) + adapter->rss_indir_tbl[i] = indir[i]; + + rnp_store_reta(adapter); + } + + /* Fill out the rss hash key */ + if (key) { + memcpy(adapter->rss_key, key, rnp_get_rxfh_key_size(netdev)); + rnp_store_key(adapter); + } + + return 0; +} + +void rnp_set_ethtool_ops(struct net_device *netdev) +{ +} diff --git a/drivers/net/ethernet/mucse/rnp/rnp_ethtool.h b/drivers/net/ethernet/mucse/rnp/rnp_ethtool.h new file mode 100644 index 000000000000..929c06f6e833 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_ethtool.h @@ -0,0 +1,125 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#ifndef _RNP_ETHTOOL_H_ +#define _RNP_ETHTOOL_H_ + +enum { NETDEV_STATS, RNP_STATS }; + +struct rnp_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; +}; + +/* rnp allocates num_tx_queues and num_rx_queues symmetrically so + * we set the num_rx_queues to evaluate to num_tx_queues. This is + * used because we do not have a good way to get the max number of + * rx queues with CONFIG_RPS disabled. + */ +#define RNP_NUM_RX_QUEUES netdev->real_num_rx_queues +#define RNP_NUM_TX_QUEUES netdev->real_num_tx_queues + +#define RNP_NETDEV_STAT(_net_stat) \ + { \ + .stat_string = #_net_stat, \ + .sizeof_stat = \ + sizeof_field(struct net_device_stats, _net_stat), \ + .stat_offset = offsetof(struct net_device_stats, _net_stat) \ + } + +#define RNP_HW_STAT(_name, _stat) \ + { \ + .stat_string = _name, \ + .sizeof_stat = sizeof_field(struct rnp_adapter, _stat), \ + .stat_offset = offsetof(struct rnp_adapter, _stat) \ + } + +struct rnp_tx_queue_ring_stat { + u64 hw_head; + u64 hw_tail; + u64 sw_to_clean; + u64 sw_to_next_to_use; +}; + +struct rnp_rx_queue_ring_stat { + u64 hw_head; + u64 hw_tail; + u64 sw_to_use; + u64 sw_to_clean; +}; + +#define RNP_QUEUE_STATS_LEN \ + (RNP_NUM_TX_QUEUES * \ + (sizeof(struct rnp_tx_queue_stats) / sizeof(u64) + \ + sizeof(struct rnp_queue_stats) / sizeof(u64) + \ + sizeof(struct rnp_tx_queue_ring_stat) / sizeof(u64)) + \ + RNP_NUM_RX_QUEUES * \ + (sizeof(struct rnp_rx_queue_stats) / sizeof(u64) + \ + sizeof(struct rnp_queue_stats) / sizeof(u64) + \ + sizeof(struct rnp_rx_queue_ring_stat) / sizeof(u64))) + +#define RNP_STATS_LEN \ + (RNP_GLOBAL_STATS_LEN + RNP_HWSTRINGS_STATS_LEN + RNP_QUEUE_STATS_LEN) + +int rnp_wol_exclusion(struct rnp_adapter *adapter, struct ethtool_wolinfo *wol); +void rnp_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol); +int rnp_wol_exclusion(struct rnp_adapter *adapter, struct ethtool_wolinfo *wol); +int rnp_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol); +void rnp_diag_test(struct net_device *netdev, struct ethtool_test *eth_test, + u64 *data); +void rnp_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause); +int rnp_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause); +int rnp_get_fecparam(struct net_device *netdev, + struct ethtool_fecparam *fecparam); +int rnp_set_fecparam(struct net_device *netdev, + struct ethtool_fecparam *fecparam); +u32 rnp_get_msglevel(struct net_device *netdev); +void rnp_set_msglevel(struct net_device *netdev, u32 data); +int rnp_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state); +int rnp_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info); +void rnp_get_channels(struct net_device *dev, struct ethtool_channels *ch); +int rnp_set_channels(struct net_device *dev, struct ethtool_channels *ch); +int rnp_get_module_info(struct net_device *dev, + struct ethtool_modinfo *modinfo); +int rnp_get_module_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, + u8 *data); +void rnp_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam __always_unused *ker, + struct netlink_ext_ack __always_unused *extack); +int rnp_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam __always_unused *ker, + struct netlink_ext_ack __always_unused *extack); +int rnp_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump); +int rnp_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump, + void *buffer); +int rnp_set_dump(struct net_device *netdev, struct ethtool_dump *dump); +int rnp_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack); +int rnp_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack); +int rnp_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, + u32 *rule_locs); +int rnp_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd); +u32 rnp_rss_indir_size(struct net_device *netdev); +u32 rnp_get_rxfh_key_size(struct net_device *netdev); +void rnp_get_reta(struct rnp_adapter *adapter, u32 *indir); +int rnp_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc); +int rnp_flash_device(struct net_device *dev, struct ethtool_flash *flash); +int rnp_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key, + const u8 hfunc); +#define RNP_WOL_GET_SUPPORTED(adapter) (!!(adapter->wol & GENMASK(3, 0))) +#define RNP_WOL_GET_STATUS(adapter) (!!(adapter->wol & GENMASK(7, 4))) +#define RNP_WOL_SET_SUPPORTED(adapter) (adapter->wol |= BIT(0)) +#define RNP_WOL_SET_STATUS(adapter) (adapter->wol |= BIT(4)) +#define RNP_WOL_CLEAR_STATUS(adapter) (adapter->wol &= ~BIT(4)) + +#endif diff --git a/drivers/net/ethernet/mucse/rnp/rnp_lib.c b/drivers/net/ethernet/mucse/rnp/rnp_lib.c new file mode 100644 index 000000000000..be5c6fddfb4b --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_lib.c @@ -0,0 +1,1334 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#include "rnp.h" +#include "rnp_sriov.h" +#include "rnp_common.h" + +#if IS_ENABLED(CONFIG_DCB) +/** + * rnp_cache_ring_dcb_sriov - Descriptor ring to register mapping for SRIOV + * @adapter: board private structure to initialize + * + * Cache the descriptor ring offsets for SR-IOV to the assigned rings. It + * will also try to cache the proper offsets if RSS/FCoE are enabled along + * with VMDq. + * + **/ +static bool rnp_cache_ring_dcb_sriov(struct rnp_adapter *adapter) +{ + u8 tcs = netdev_get_num_tc(adapter->netdev); + /* verify we have DCB queueing enabled before proceeding */ + if (tcs <= 1) + return false; + + /* verify we have VMDq enabled before proceeding */ + if (!(adapter->flags & RNP_FLAG_SRIOV_ENABLED)) + return false; + + return true; +} +#endif + +/** + * rnp_cache_ring_dcb - Descriptor ring to register mapping for DCB + * @adapter: board private structure to initialize + * + * Cache the descriptor ring offsets for DCB to the assigned rings. + * + **/ +static bool rnp_cache_ring_dcb(struct rnp_adapter *adapter) +{ + struct net_device *dev = adapter->netdev; + unsigned int tx_idx, rx_idx; + int tc, offset, rss_i, i, step; + u8 num_tcs = netdev_get_num_tc(dev); + struct rnp_ring *ring; + struct rnp_hw *hw = &adapter->hw; + struct rnp_dma_info *dma = &hw->dma; + + /* verify we have DCB queueing enabled before proceeding */ + if (num_tcs <= 1) + return false; + + rss_i = adapter->ring_feature[RING_F_RSS].indices; + + step = 4; + for (tc = 0, offset = 0; tc < num_tcs; tc++, offset += rss_i) { + /* + * we from tc start + * tc0 0 4 8 c + * tc1 1 5 9 d + * tc2 2 6 a e + * tc3 3 7 b f + */ + tx_idx = tc; + rx_idx = tc; + for (i = 0; i < rss_i; i++, tx_idx += step, rx_idx += step) { + ring = adapter->tx_ring[offset + i]; + + ring->ring_addr = + dma->dma_ring_addr + RING_OFFSET(tx_idx); + ring->rnp_queue_idx = tx_idx; + ring->dma_int_stat = ring->ring_addr + RNP_DMA_INT_STAT; + ring->dma_int_mask = ring->ring_addr + RNP_DMA_INT_MASK; + ring->dma_int_clr = ring->ring_addr + RNP_DMA_INT_CLR; + + ring = adapter->rx_ring[offset + i]; + ring->ring_addr = + dma->dma_ring_addr + RING_OFFSET(rx_idx); + ring->rnp_queue_idx = rx_idx; + ring->dma_int_stat = ring->ring_addr + RNP_DMA_INT_STAT; + ring->dma_int_mask = ring->ring_addr + RNP_DMA_INT_MASK; + ring->dma_int_clr = ring->ring_addr + RNP_DMA_INT_CLR; + } + } + + return true; +} + +/** + * rnp_cache_ring_sriov - Descriptor ring to register mapping for sriov + * @adapter: board private structure to initialize + * + * SR-IOV doesn't use any descriptor rings but changes the default if + * no other mapping is used. + * + */ +static bool rnp_cache_ring_sriov(struct rnp_adapter *adapter) +{ + /* only proceed if VMDq is enabled */ + if (!(adapter->flags & RNP_FLAG_VMDQ_ENABLED)) + return false; + + return true; +} + +/** + * rnp_cache_ring_rss - Descriptor ring to register mapping for RSS + * @adapter: board private structure to initialize + * + * Cache the descriptor ring offsets for RSS to the assigned rings. + * + **/ +static bool rnp_cache_ring_rss(struct rnp_adapter *adapter) +{ + int i; + /* setup here */ + int ring_step = 1; + struct rnp_ring *ring; + struct rnp_hw *hw = &adapter->hw; + struct rnp_dma_info *dma = &hw->dma; + + /* n400 use 0 4 8 c */ + if (hw->hw_type == rnp_hw_n400) + ring_step = 4; + + /* some ring alloc rules can be added here */ + for (i = 0; i < adapter->num_rx_queues; i++) { + ring = adapter->tx_ring[i]; + ring->rnp_queue_idx = i * ring_step; + ring->ring_addr = + dma->dma_ring_addr + RING_OFFSET(ring->rnp_queue_idx); + + ring->dma_int_stat = ring->ring_addr + RNP_DMA_INT_STAT; + ring->dma_int_mask = ring->ring_addr + RNP_DMA_INT_MASK; + ring->dma_int_clr = ring->ring_addr + RNP_DMA_INT_CLR; + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + ring = adapter->rx_ring[i]; + ring->rnp_queue_idx = i * ring_step; + ring->ring_addr = + dma->dma_ring_addr + RING_OFFSET(ring->rnp_queue_idx); + ring->dma_int_stat = ring->ring_addr + RNP_DMA_INT_STAT; + ring->dma_int_mask = ring->ring_addr + RNP_DMA_INT_MASK; + ring->dma_int_clr = ring->ring_addr + RNP_DMA_INT_CLR; + } + + return true; +} + +/** + * rnp_cache_ring_register - Descriptor ring to register mapping + * @adapter: board private structure to initialize + * + * Once we know the feature-set enabled for the device, we'll cache + * the register offset the descriptor ring is assigned to. + * + * Note, the order the various feature calls is important. It must start + * with the "most" features enabled at the same time, then trickle down to + * the least amount of features turned on at once. + **/ +static void rnp_cache_ring_register(struct rnp_adapter *adapter) +{ + /* start with default case */ + +#if IS_ENABLED(CONFIG_DCB) + if (rnp_cache_ring_dcb_sriov(adapter)) + return; + +#endif + if (rnp_cache_ring_dcb(adapter)) + return; + + /* sriov ring alloc is added before, this maybe no use */ + if (rnp_cache_ring_sriov(adapter)) + return; + + rnp_cache_ring_rss(adapter); +} + +#define RNP_RSS_128Q_MASK 0x7F +#define RNP_RSS_64Q_MASK 0x3F +#define RNP_RSS_16Q_MASK 0xF +#define RNP_RSS_32Q_MASK 0x1F +#define RNP_RSS_8Q_MASK 0x7 +#define RNP_RSS_4Q_MASK 0x3 +#define RNP_RSS_2Q_MASK 0x1 +#define RNP_RSS_DISABLED_MASK 0x0 + +#if IS_ENABLED(CONFIG_DCB) + +/** + * rnp_set_dcb_sriov_queues: Allocate queues for SR-IOV devices w/ DCB + * @adapter: board private structure to initialize + * + * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues + * and VM pools where appropriate. Also assign queues based on DCB + * priorities and map accordingly.. + * + **/ +static bool rnp_set_dcb_sriov_queues(struct rnp_adapter *adapter) +{ + int i; + u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit; + u16 vmdq_m = 0; + u8 tcs = netdev_get_num_tc(adapter->netdev); + + /* verify we have DCB queueing enabled before proceeding */ + if (tcs <= 1) + return false; + + /* verify we have VMDq enabled before proceeding */ + if (!(adapter->flags & RNP_FLAG_SRIOV_ENABLED)) + return false; + + /* Add starting offset to total pool count */ + vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset; + + /* 16 pools w/ 8 TC per pool */ + if (tcs > 4) { + vmdq_i = min_t(u16, vmdq_i, 16); + vmdq_m = RNP_n10_VMDQ_8Q_MASK; + /* 32 pools w/ 4 TC per pool */ + } else { + vmdq_i = min_t(u16, vmdq_i, 32); + vmdq_m = RNP_n10_VMDQ_4Q_MASK; + } + + /* remove the starting offset from the pool count */ + vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset; + + /* save features for later use */ + adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i; + adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m; + + /* + * We do not support DCB, VMDq, and RSS all simultaneously + * so we will disable RSS since it is the lowest priority + */ + adapter->ring_feature[RING_F_RSS].indices = 2; + adapter->ring_feature[RING_F_RSS].mask = RNP_RSS_DISABLED_MASK; + + /* disable ATR as it is not supported when VMDq is enabled */ + adapter->flags &= ~RNP_FLAG_FDIR_HASH_CAPABLE; + + adapter->num_tx_queues = vmdq_i * tcs; + adapter->num_rx_queues = vmdq_i * tcs; + + /* configure TC to queue mapping */ + for (i = 0; i < tcs; i++) + netdev_set_tc_queue(adapter->netdev, i, 1, i); + + return true; +} +#endif + +static bool rnp_set_dcb_queues(struct rnp_adapter *adapter) +{ + struct net_device *dev = adapter->netdev; + struct rnp_ring_feature *f; + int rss_i, rss_m, i; + int tcs; + + /* Map queue offset and counts onto allocated tx queues */ + tcs = netdev_get_num_tc(dev); + + /* verify we have DCB queueing enabled before proceeding */ + if (tcs <= 1) + return false; + + /* determine the upper limit for our current DCB mode */ + rss_i = dev->num_tx_queues / tcs; + + /* we only support 4 tc , rss_i max is 32 */ + + /* 4 TC w/ 32 queues per TC */ + rss_i = min_t(u16, rss_i, 32); + rss_m = RNP_RSS_32Q_MASK; + + /* set RSS mask and indices */ + /* f->limit is relative with cpu_vector */ + f = &adapter->ring_feature[RING_F_RSS]; + /* use f->limit to change rss */ + rss_i = min_t(int, rss_i, f->limit); + f->indices = rss_i; + f->mask = rss_m; + + /* disable ATR as it is not supported when multiple TCs are enabled */ + adapter->flags &= ~RNP_FLAG_FDIR_HASH_CAPABLE; + + /* setup queue tc num */ + for (i = 0; i < tcs; i++) + netdev_set_tc_queue(dev, i, rss_i, rss_i * i); + + /* set the true queues */ + adapter->num_tx_queues = rss_i * tcs; + adapter->num_rx_queues = rss_i * tcs; + + return true; +} + +/** + * rnp_set_sriov_queues - Allocate queues for SR-IOV devices + * @adapter: board private structure to initialize + * + * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues + * and VM pools where appropriate. If RSS is available, then also try and + * enable RSS and map accordingly. + * + **/ +static bool rnp_set_sriov_queues(struct rnp_adapter *adapter) +{ + u16 vmdq_m = 0; + u16 rss_i = adapter->ring_feature[RING_F_RSS].limit; + u16 rss_m = RNP_RSS_DISABLED_MASK; + struct rnp_hw *hw = &adapter->hw; + + /* only proceed if SR-IOV is enabled */ + if (!(adapter->flags & RNP_FLAG_SRIOV_ENABLED)) + return false; + + /* save features for later use */ + adapter->ring_feature[RING_F_VMDQ].indices = + adapter->max_ring_pair_counts - 1; + adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m; + + /* limit RSS based on user input and save for later use */ + adapter->ring_feature[RING_F_RSS].indices = rss_i; + adapter->ring_feature[RING_F_RSS].mask = rss_m; + + adapter->num_rx_queues = hw->sriov_ring_limit; + adapter->num_tx_queues = hw->sriov_ring_limit; + + /* disable ATR as it is not supported when VMDq is enabled */ + adapter->flags &= ~RNP_FLAG_FDIR_HASH_CAPABLE; + + return true; +} + +u32 rnp_rss_indir_tbl_entries(struct rnp_adapter *adapter) +{ + if (adapter->hw.rss_type == rnp_rss_uv3p) + return 8; + else if (adapter->hw.rss_type == rnp_rss_uv440) + return 128; + else if (adapter->hw.rss_type == rnp_rss_n10) + return 128; + else + return 128; +} + +/** + * rnp_set_rss_queues - Allocate queues for RSS + * @adapter: board private structure to initialize + * + * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try + * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU. + * + **/ +static bool rnp_set_rss_queues(struct rnp_adapter *adapter) +{ + struct rnp_ring_feature *f; + u16 rss_i; + + f = &adapter->ring_feature[RING_F_RSS]; + /* use thid to change ring num */ + rss_i = f->limit; + /* set limit -> indices */ + f->indices = rss_i; + + /* should init rss mask */ + switch (adapter->hw.rss_type) { + case rnp_rss_uv3p: + f->mask = RNP_RSS_8Q_MASK; + break; + case rnp_rss_uv440: + f->mask = RNP_RSS_64Q_MASK; + break; + case rnp_rss_n10: + /* maybe not good */ + f->mask = RNP_RSS_128Q_MASK; + break; + /* maybe not good */ + default: + f->mask = 0; + + break; + } + + adapter->num_tx_queues = + min_t(int, rss_i, adapter->max_ring_pair_counts); + adapter->num_rx_queues = adapter->num_tx_queues; + + rnp_dbg("[%s] limit:%d indices:%d queues:%d\n", adapter->name, f->limit, + f->indices, adapter->num_tx_queues); + + return true; +} + +/** + * rnp_set_num_queues - Allocate queues for device, feature dependent + * @adapter: board private structure to initialize + * + * This is the top level queue allocation routine. The order here is very + * important, starting with the "most" number of features turned on at once, + * and ending with the smallest set of features. This way large combinations + * can be allocated if they're turned on, and smaller combinations are the + * fallthrough conditions. + * + **/ +static void rnp_set_num_queues(struct rnp_adapter *adapter) +{ + /* Start with base case */ + adapter->num_tx_queues = 1; + adapter->num_rx_queues = 1; + +#if IS_ENABLED(CONFIG_DCB) + if (rnp_set_dcb_sriov_queues(adapter)) + return; + +#endif + if (rnp_set_dcb_queues(adapter)) + return; + + if (rnp_set_sriov_queues(adapter)) + return; + /* at last we support rss */ + rnp_set_rss_queues(adapter); +} + +int rnp_acquire_msix_vectors(struct rnp_adapter *adapter, int vectors) +{ + int err; + + err = pci_enable_msix_range(adapter->pdev, adapter->msix_entries, + vectors, vectors); + if (err < 0) { + rnp_err("pci_enable_msix failed: req:%d err:%d\n", vectors, + err); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + return -EINVAL; + } + /* + * Adjust for only the vectors we'll use, which is minimum + * of max_msix_q_vectors + NON_Q_VECTORS, or the number of + * vectors we were allocated. + */ + vectors -= adapter->num_other_vectors; + adapter->num_q_vectors = min(vectors, adapter->max_q_vectors); + /* in dcb we use max 32 q-vectors */ + /* each vectors for max 4 tcs */ + if (adapter->flags & RNP_FLAG_DCB_ENABLED) + adapter->num_q_vectors = min(32, adapter->num_q_vectors); + + return 0; +} + +static void rnp_add_ring(struct rnp_ring *ring, struct rnp_ring_container *head) +{ + ring->next = head->ring; + head->ring = ring; + head->count++; +} + +static inline void rnp_irq_enable_queues(struct rnp_q_vector *q_vector) +{ + struct rnp_ring *ring; + + rnp_for_each_ring(ring, q_vector->rx) + rnp_wr_reg(ring->dma_int_mask, ~(RX_INT_MASK | TX_INT_MASK)); +} + +static inline void rnp_irq_disable_queues(struct rnp_q_vector *q_vector) +{ + struct rnp_ring *ring; + + rnp_for_each_ring(ring, q_vector->tx) + rnp_wr_reg(ring->dma_int_mask, (RX_INT_MASK | TX_INT_MASK)); +} + +static enum hrtimer_restart irq_miss_check(struct hrtimer *hrtimer) +{ + struct rnp_q_vector *q_vector; + struct rnp_ring *ring; + struct rnp_tx_desc *eop_desc; + struct rnp_adapter *adapter; + + int tx_next_to_clean; + int tx_next_to_use; + + struct rnp_tx_buffer *tx_buffer; + union rnp_rx_desc *rx_desc; + + q_vector = container_of(hrtimer, struct rnp_q_vector, + irq_miss_check_timer); + adapter = q_vector->adapter; + if (test_bit(__RNP_DOWN, &adapter->state) || + test_bit(__RNP_RESETTING, &adapter->state)) + goto do_self_napi; + rnp_irq_disable_queues(q_vector); + /* check tx irq miss */ + rnp_for_each_ring(ring, q_vector->tx) { + tx_next_to_clean = ring->next_to_clean; + tx_next_to_use = ring->next_to_use; + /* have work to do */ + if (tx_next_to_use == tx_next_to_clean) + continue; + /* have tx done */ + tx_buffer = &ring->tx_buffer_info[tx_next_to_clean]; + eop_desc = tx_buffer->next_to_watch; + /* next_to_watch maybe null in some condition */ + if (eop_desc) { + if ((eop_desc->vlan_cmd & + cpu_to_le32(RNP_TXD_STAT_DD))) { + if (q_vector->new_rx_count != + q_vector->old_rx_count) { + ring_wr32(ring, + RNP_DMA_REG_RX_INT_DELAY_PKTCNT, + q_vector->new_rx_count); + q_vector->old_rx_count = + q_vector->new_rx_count; + } + napi_schedule_irqoff(&q_vector->napi); + goto do_self_napi; + } + } + } + + /* check rx irq */ + rnp_for_each_ring(ring, q_vector->rx) { + rx_desc = RNP_RX_DESC(ring, ring->next_to_clean); + if (rnp_test_staterr(rx_desc, RNP_RXD_STAT_DD)) { + int size; + + size = le16_to_cpu(rx_desc->wb.len); + + if (size) { + if (q_vector->new_rx_count != + q_vector->old_rx_count) { + ring_wr32( + ring, + RNP_DMA_REG_RX_INT_DELAY_PKTCNT, + q_vector->new_rx_count); + q_vector->old_rx_count = + q_vector->new_rx_count; + } + napi_schedule_irqoff(&q_vector->napi); + } else { + /* in sriov mode set reset pf flags */ + if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) + adapter->flags2 |= RNP_FLAG2_RESET_PF; + else + adapter->flags2 |= + RNP_FLAG2_RESET_REQUESTED; + } + goto do_self_napi; + } + } + /* open irq again */ + rnp_irq_enable_queues(q_vector); +do_self_napi: + return HRTIMER_NORESTART; +} + +/** + * rnp_alloc_q_vector - Allocate memory for a single interrupt vector + * @adapter: board private structure to initialize + * @v_count: q_vectors allocated on adapter, used for ring interleaving + * @v_idx: index of vector in adapter struct + * @txr_count: total number of Tx rings to allocate + * @txr_idx: index of first Tx ring to allocate + * @rxr_count: total number of Rx rings to allocate + * @rxr_idx: index of first Rx ring to allocate + * + * We allocate one q_vector. If allocation fails we return -ENOMEM. + **/ +static int rnp_alloc_q_vector(struct rnp_adapter *adapter, int eth_queue_idx, + int v_idx, int r_idx, int r_count, int step) +{ + struct rnp_q_vector *q_vector; + struct rnp_ring *ring; + struct rnp_hw *hw = &adapter->hw; + struct rnp_dma_info *dma = &hw->dma; + int node = NUMA_NO_NODE; + int cpu = -1; + int ring_count, size; + int txr_count, rxr_count, idx; + int rxr_idx = r_idx, txr_idx = r_idx; + int cpu_offset = 0; + + DPRINTK(PROBE, INFO, + "eth_queue_idx:%d v_idx:%d(off:%d) ring:%d ring_cnt:%d, " + "step:%d\n", + eth_queue_idx, v_idx, adapter->q_vector_off, r_idx, r_count, + step); + + txr_count = rxr_count = r_count; + + ring_count = txr_count + rxr_count; + size = sizeof(struct rnp_q_vector) + + (sizeof(struct rnp_ring) * ring_count); + + /* should minis adapter->q_vector_off */ + if (cpu_online(cpu_offset + v_idx - adapter->q_vector_off)) { + /* cpu 1 - 7 */ + cpu = cpu_offset + v_idx - adapter->q_vector_off; + node = cpu_to_node(cpu); + } + + /* allocate q_vector and rings */ + q_vector = kzalloc_node(size, GFP_KERNEL, node); + if (!q_vector) + q_vector = kzalloc(size, GFP_KERNEL); + if (!q_vector) + return -ENOMEM; + + /* setup affinity mask and node */ + if (cpu != -1) + cpumask_set_cpu(cpu, &q_vector->affinity_mask); + + q_vector->numa_node = node; + + /* initialize nap */ + netif_napi_add_weight(adapter->netdev, &q_vector->napi, rnp_poll, + adapter->napi_budge); + /* tie q_vector and adapter together */ + adapter->q_vector[v_idx - adapter->q_vector_off] = q_vector; + q_vector->adapter = adapter; + q_vector->v_idx = v_idx; + + /* initialize work limits */ + q_vector->tx.work_limit = adapter->tx_work_limit; + + /* initialize pointer to rings */ + ring = q_vector->ring; + + for (idx = 0; idx < txr_count; idx++) { + /* assign generic ring traits */ + ring->dev = &adapter->pdev->dev; + ring->netdev = adapter->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Tx values */ + rnp_add_ring(ring, &q_vector->tx); + + /* apply Tx specific ring traits */ + ring->count = adapter->tx_ring_item_count; + if (adapter->flags & RNP_FLAG_DCB_ENABLED) { + int rss_i; + + rss_i = adapter->ring_feature[RING_F_RSS].indices; + /* in dcb mode should assign rss */ + ring->queue_index = eth_queue_idx + idx * rss_i; + } else { + ring->queue_index = eth_queue_idx + idx; + } + /* rnp_queue_idx can be changed after */ + /* it is used to location hw reg */ + ring->rnp_queue_idx = txr_idx; + ring->ring_addr = dma->dma_ring_addr + RING_OFFSET(txr_idx); + ring->dma_int_stat = ring->ring_addr + RNP_DMA_INT_STAT; + ring->dma_int_mask = ring->ring_addr + RNP_DMA_INT_MASK; + ring->dma_int_clr = ring->ring_addr + RNP_DMA_INT_CLR; + ring->device_id = adapter->pdev->device; + ring->pfvfnum = hw->pfvfnum; + /* n10 should skip tx start control */ + if (hw->hw_type == rnp_hw_n10) + ring->ring_flags |= RNP_RING_SKIP_TX_START; + + if (hw->hw_type == rnp_hw_n400) + ring->ring_flags |= RNP_RING_SKIP_TX_START; + + /* assign ring to adapter */ + adapter->tx_ring[ring->queue_index] = ring; + + /* update count and index */ + txr_idx += step; + + rnp_dbg("\t\t%s:vector[%d] <--RNP TxRing:%d, eth_queue:%d\n", + adapter->name, v_idx, ring->rnp_queue_idx, + ring->queue_index); + + /* push pointer to next ring */ + ring++; + } + + for (idx = 0; idx < rxr_count; idx++) { + /* assign generic ring traits */ + ring->dev = &adapter->pdev->dev; + ring->netdev = adapter->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Rx values */ + rnp_add_ring(ring, &q_vector->rx); + + /* apply Rx specific ring traits */ + ring->count = adapter->rx_ring_item_count; + /* rnp_queue_idx can be changed after */ + /* it is used to location hw reg */ + if (adapter->flags & RNP_FLAG_DCB_ENABLED) { + int rss_i; + + rss_i = adapter->ring_feature[RING_F_RSS].indices; + /* in dcb mode should assign rss */ + ring->queue_index = eth_queue_idx + idx * rss_i; + } else { + ring->queue_index = eth_queue_idx + idx; + } + ring->rnp_queue_idx = rxr_idx; + ring->ring_addr = dma->dma_ring_addr + RING_OFFSET(rxr_idx); + ring->dma_int_stat = ring->ring_addr + RNP_DMA_INT_STAT; + ring->dma_int_mask = ring->ring_addr + RNP_DMA_INT_MASK; + ring->dma_int_clr = ring->ring_addr + RNP_DMA_INT_CLR; + ring->device_id = adapter->pdev->device; + ring->pfvfnum = hw->pfvfnum; + if (hw->hw_type == rnp_hw_n10) { + } else if (hw->hw_type == rnp_hw_n400) { + } + + /* assign ring to adapter */ + adapter->rx_ring[ring->queue_index] = ring; + rnp_dbg("\t\t%s:vector[%d] <--RNP RxRing:%d, eth_queue:%d\n", + adapter->name, v_idx, ring->rnp_queue_idx, + ring->queue_index); + + /* update count and index */ + rxr_idx += step; + + /* push pointer to next ring */ + ring++; + } + if ((hw->hw_type == rnp_hw_n10) || (hw->hw_type == rnp_hw_n400)) { + q_vector->vector_flags |= RNP_QVECTOR_FLAG_IRQ_MISS_CHECK; + q_vector->vector_flags |= RNP_QVECTOR_FLAG_REDUCE_TX_IRQ_MISS; + /* initialize timer */ + q_vector->irq_check_usecs = 1000; + hrtimer_init(&q_vector->irq_miss_check_timer, CLOCK_MONOTONIC, + HRTIMER_MODE_REL_PINNED); + q_vector->irq_miss_check_timer.function = irq_miss_check; + q_vector->new_rx_count = adapter->rx_frames; + q_vector->old_rx_count = adapter->rx_frames; + } + + return 0; +} + +/** + * rnp_free_q_vector - Free memory allocated for specific interrupt vector + * @adapter: board private structure to initialize + * @v_idx: Index of vector to be freed + * + * This function frees the memory allocated to the q_vector. In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + **/ +static void rnp_free_q_vector(struct rnp_adapter *adapter, int v_idx) +{ + struct rnp_q_vector *q_vector = adapter->q_vector[v_idx]; + struct rnp_ring *ring; + + rnp_dbg("v_idx:%d\n", v_idx); + + rnp_for_each_ring(ring, q_vector->tx) + adapter->tx_ring[ring->queue_index] = NULL; + + rnp_for_each_ring(ring, q_vector->rx) + adapter->rx_ring[ring->queue_index] = NULL; + + adapter->q_vector[v_idx] = NULL; + netif_napi_del(&q_vector->napi); + + /* must stop timer */ + if (q_vector->vector_flags & RNP_QVECTOR_FLAG_IRQ_MISS_CHECK) + hrtimer_cancel(&q_vector->irq_miss_check_timer); + + /* + * rnp_get_stats64() might access the rings on this vector, + * we must wait a grace period before freeing it. + */ + kfree_rcu(q_vector, rcu); +} + +/** + * rnp_alloc_q_vectors - Allocate memory for interrupt vectors + * @adapter: board private structure to initialize + * + * We allocate one q_vector per queue interrupt. If allocation fails we + * return -ENOMEM. + **/ +static int rnp_alloc_q_vectors(struct rnp_adapter *adapter) +{ + int v_idx = adapter->q_vector_off; + int ring_idx = 0; + int r_remaing = + min_t(int, adapter->num_tx_queues, adapter->num_rx_queues); + int ring_step = 1; + int err, ring_cnt, v_remaing = adapter->num_q_vectors; + int q_vector_nums = 0; + struct rnp_hw *hw = &adapter->hw; + + if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) { + ring_idx = 0; + /* only 2 rings when sriov enabled */ + /* from back */ + if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) { + ring_idx = 0; + r_remaing = hw->sriov_ring_limit; + } else { + ring_idx = adapter->max_ring_pair_counts - + ring_step * hw->sriov_ring_limit; + r_remaing = hw->sriov_ring_limit; + } + } + + adapter->eth_queue_idx = 0; + BUG_ON(adapter->num_q_vectors == 0); + + if (adapter->flags & RNP_FLAG_DCB_ENABLED) { + rnp_dbg("in dcb mode r_remaing %d, num_q_vectors %d\n", + r_remaing, v_remaing); + } + + rnp_dbg("r_remaing:%d, ring_step:%d num_q_vectors:%d\n", r_remaing, + ring_step, v_remaing); + + /* can support muti rings in one q_vector */ + for (; r_remaing > 0 && v_remaing > 0; v_remaing--) { + ring_cnt = DIV_ROUND_UP(r_remaing, v_remaing); + if (adapter->flags & RNP_FLAG_DCB_ENABLED) + BUG_ON(ring_cnt != adapter->num_tc); + + err = rnp_alloc_q_vector(adapter, adapter->eth_queue_idx, v_idx, + ring_idx, ring_cnt, ring_step); + if (err) + goto err_out; + ring_idx += ring_step * ring_cnt; + r_remaing -= ring_cnt; + v_idx++; + q_vector_nums++; + /* dcb mode only add 1 */ + if (adapter->flags & RNP_FLAG_DCB_ENABLED) + adapter->eth_queue_idx += 1; + else + adapter->eth_queue_idx += ring_cnt; + } + /* should fix the real used q_vectors_nums */ + adapter->num_q_vectors = q_vector_nums; + + return 0; + +err_out: + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + adapter->num_q_vectors = 0; + + while (v_idx--) + rnp_free_q_vector(adapter, v_idx); + + return -ENOMEM; +} + +/** + * rnp_free_q_vectors - Free memory allocated for interrupt vectors + * @adapter: board private structure to initialize + * + * This function frees the memory allocated to the q_vectors. In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + **/ +static void rnp_free_q_vectors(struct rnp_adapter *adapter) +{ + int v_idx = adapter->num_q_vectors; + + adapter->num_rx_queues = 0; + adapter->num_tx_queues = 0; + adapter->num_q_vectors = 0; + + while (v_idx--) + rnp_free_q_vector(adapter, v_idx); +} + +static void rnp_reset_interrupt_capability(struct rnp_adapter *adapter) +{ + if (adapter->flags & RNP_FLAG_MSIX_ENABLED) + pci_disable_msix(adapter->pdev); + else if (adapter->flags & RNP_FLAG_MSI_CAPABLE) + pci_disable_msi(adapter->pdev); + + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + adapter->q_vector_off = 0; + + /* frist clean msix flags */ + adapter->flags &= (~RNP_FLAG_MSIX_ENABLED); + adapter->flags &= (~RNP_FLAG_MSI_ENABLED); +} + +/** + * rnp_set_interrupt_capability - set MSI-X or MSI if supported + * @adapter: board private structure to initialize + * + * Attempt to configure the interrupts using the best available + * capabilities of the hardware and the kernel. + **/ +static int rnp_set_interrupt_capability(struct rnp_adapter *adapter) +{ + struct rnp_hw *hw = &adapter->hw; + int vector, v_budget, err = 0; + int irq_mode_back = adapter->irq_mode; + + v_budget = min_t(int, adapter->num_tx_queues, adapter->num_rx_queues); + /* in one ring mode should reset v_budget */ + v_budget = min_t(int, v_budget, num_online_cpus()); + v_budget += adapter->num_other_vectors; + v_budget = min_t(int, v_budget, hw->mac.max_msix_vectors); + + if (adapter->irq_mode == irq_mode_msix) { + adapter->msix_entries = kcalloc( + v_budget, sizeof(struct msix_entry), GFP_KERNEL); + + if (!adapter->msix_entries) { + rnp_err("alloc msix_entries failed!\n"); + return -EINVAL; + } + dbg("[%s] adapter:%p msix_entry:%p\n", __func__, adapter, + adapter->msix_entries); + + for (vector = 0; vector < v_budget; vector++) + adapter->msix_entries[vector].entry = vector; + + err = rnp_acquire_msix_vectors(adapter, v_budget); + if (!err) { + if (adapter->num_other_vectors) + adapter->q_vector_off = 1; + rnp_dbg("adapter%d alloc vectors: cnt:%d [%d~%d] num_q_vectors:%d\n", + adapter->bd_number, v_budget, + adapter->q_vector_off, + adapter->q_vector_off + v_budget - 1, + adapter->num_q_vectors); + adapter->flags |= RNP_FLAG_MSIX_ENABLED; + + goto out; + } + /* if has msi capability try it */ + if (adapter->flags & RNP_FLAG_MSI_CAPABLE) + adapter->irq_mode = irq_mode_msi; + kfree(adapter->msix_entries); + rnp_dbg("acquire msix failed, try to use msi\n"); + } else { + rnp_dbg("adapter%d not in msix mode\n", adapter->bd_number); + } + /* if has msi capability or set irq_mode */ + if (adapter->irq_mode == irq_mode_msi) { + err = pci_enable_msi(adapter->pdev); + if (err) { + rnp_dbg("Failed to allocate MSI interrupt, falling back to legacy. Error"); + } else { + /* msi mode use only 1 irq */ + adapter->flags |= RNP_FLAG_MSI_ENABLED; + } + } + /* write back origin irq_mode */ + adapter->irq_mode = irq_mode_back; + /* legacy and msi only 1 vectors */ + adapter->num_q_vectors = 1; +out: + return err; +} + +static void rnp_print_ring_info(struct rnp_adapter *adapter) +{ + int i; + struct rnp_ring *ring; + struct rnp_q_vector *q_vector; + + rnp_dbg("tx_queue count %d\n", adapter->num_tx_queues); + rnp_dbg("queue-mapping :\n"); + for (i = 0; i < adapter->num_tx_queues; i++) { + ring = adapter->tx_ring[i]; + rnp_dbg(" queue %d , physical ring %d\n", i, + ring->rnp_queue_idx); + } + rnp_dbg("rx_queue count %d\n", adapter->num_rx_queues); + rnp_dbg("queue-mapping :\n"); + for (i = 0; i < adapter->num_rx_queues; i++) { + ring = adapter->rx_ring[i]; + rnp_dbg(" queue %d , physical ring %d\n", i, + ring->rnp_queue_idx); + } + rnp_dbg("q_vector count %d\n", adapter->num_q_vectors); + rnp_dbg("vector-queue mapping:\n"); + for (i = 0; i < adapter->num_q_vectors; i++) { + q_vector = adapter->q_vector[i]; + rnp_dbg("vector %d\n", i); + rnp_for_each_ring(ring, q_vector->tx) + rnp_dbg(" tx physical ring %d\n", ring->rnp_queue_idx); + + rnp_for_each_ring(ring, q_vector->rx) + rnp_dbg(" rx physical ring %d\n", ring->rnp_queue_idx); + } +} + +/** + * rnp_init_interrupt_scheme - Determine proper interrupt scheme + * @adapter: board private structure to initialize + * + * We determine which interrupt scheme to use based on... + * - Hardware queue count (num_*_queues) + * - defined by miscellaneous hardware support/features (RSS, etc.) + **/ +int rnp_init_interrupt_scheme(struct rnp_adapter *adapter) +{ + int err; + + /* Number of supported queues */ + rnp_set_num_queues(adapter); + + /* Set interrupt mode */ + err = rnp_set_interrupt_capability(adapter); + if (err) { + e_dev_err("Unable to get interrupt\n"); + goto err_set_interrupt; + } + + err = rnp_alloc_q_vectors(adapter); + if (err) { + e_dev_err("Unable to allocate memory for queue vectors\n"); + goto err_alloc_q_vectors; + } + rnp_cache_ring_register(adapter); + + DPRINTK(PROBE, INFO, + "Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n\n", + (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled", + adapter->num_rx_queues, adapter->num_tx_queues); + rnp_print_ring_info(adapter); + + set_bit(__RNP_DOWN, &adapter->state); + + return 0; + +err_alloc_q_vectors: + rnp_reset_interrupt_capability(adapter); +err_set_interrupt:; + return err; +} + +/** + * rnp_clear_interrupt_scheme - Clear the current interrupt scheme settings + * @adapter: board private structure to clear interrupt scheme on + * + * We go through and clear interrupt specific resources and reset the structure + * to pre-load conditions + **/ +void rnp_clear_interrupt_scheme(struct rnp_adapter *adapter) +{ + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + + rnp_free_q_vectors(adapter); + rnp_reset_interrupt_capability(adapter); +} + +/** + * rnp_tx_ctxtdesc - Send a control desc to hw + * @tx_ring: target ring of this control desc + * @mss_seg_len: mss length + * @l4_hdr_len: l4 length + * @tunnel_hdr_len: tunnel_hdr_len + * @inner_vlan_tag: inner_vlan_tag + * @type_tucmd: cmd + * + **/ +void rnp_tx_ctxtdesc(struct rnp_ring *tx_ring, u32 mss_len_vf_num, + u32 inner_vlan_tunnel_len, int ignore_vlan, bool crc_pad) +{ + struct rnp_tx_ctx_desc *context_desc; + u16 i = tx_ring->next_to_use; + struct rnp_adapter *adapter = RING2ADAPT(tx_ring); + u32 type_tucmd = 0; + + context_desc = RNP_TX_CTXTDESC(tx_ring, i); + + i++; + tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; + + /* set bits to identify this as an advanced context descriptor */ + type_tucmd |= RNP_TXD_CTX_CTRL_DESC; + + if (adapter->priv_flags & RNP_PRIV_FLAG_TX_PADDING) { + if (!crc_pad) + type_tucmd |= RNP_TXD_MTI_CRC_PAD_CTRL; + /* close mac padding */ + } + + if (tx_ring->ring_flags & RNP_RING_OUTER_VLAN_FIX) { +#define VLAN_MASK (0x0000ffff) +#define VLAN_INSERT (0x00800000) + if (inner_vlan_tunnel_len & VLAN_MASK) + type_tucmd |= VLAN_INSERT; + + } else { + if (inner_vlan_tunnel_len & 0x00ffff00) { + /* if a inner vlan */ + type_tucmd |= RNP_TXD_CMD_INNER_VLAN; + } + } + + context_desc->mss_len_vf_num = cpu_to_le32(mss_len_vf_num); + context_desc->inner_vlan_tunnel_len = + cpu_to_le32(inner_vlan_tunnel_len); + context_desc->resv_cmd = cpu_to_le32(type_tucmd); + if (tx_ring->q_vector->adapter->flags & RNP_FLAG_SRIOV_ENABLED) { + if (ignore_vlan) + context_desc->inner_vlan_tunnel_len |= + VF_VEB_IGNORE_VLAN; + } + buf_dump_line("ctx ", __LINE__, context_desc, sizeof(*context_desc)); +} + +void rnp_maybe_tx_ctxtdesc(struct rnp_ring *tx_ring, + struct rnp_tx_buffer *first, u32 ignore_vlan) +{ + /* sriov mode pf use the last vf */ + if (first->ctx_flag) { + rnp_tx_ctxtdesc(tx_ring, first->mss_len_vf_num, + first->inner_vlan_tunnel_len, ignore_vlan, + first->gso_need_padding); + } +} + +void rnp_store_reta(struct rnp_adapter *adapter) +{ + u32 i, reta_entries = rnp_rss_indir_tbl_entries(adapter); + struct rnp_hw *hw = &adapter->hw; + u32 reta = 0; + /* relative with rss table */ + struct rnp_ring *rx_ring; + + /* Write redirection table to HW */ + for (i = 0; i < reta_entries; i++) { + if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) { + reta = adapter->rss_indir_tbl[i]; + } else { + rx_ring = adapter->rx_ring[adapter->rss_indir_tbl[i]]; + reta = rx_ring->rnp_queue_idx; + } + hw->rss_indir_tbl[i] = reta; + } + hw->ops.set_rss_table(hw); +} + +void rnp_store_key(struct rnp_adapter *adapter) +{ + struct rnp_hw *hw = &adapter->hw; + bool sriov_flag = !!(adapter->flags & RNP_FLAG_SRIOV_ENABLED); + + hw->ops.set_rss_key(hw, sriov_flag); +} + +int rnp_init_rss_key(struct rnp_adapter *adapter) +{ + struct rnp_hw *hw = &adapter->hw; + bool sriov_flag = !!(adapter->flags & RNP_FLAG_SRIOV_ENABLED); + + /* only init rss key once */ + /* no change rss key if user input one */ + if (!adapter->rss_key_setup_flag) { + netdev_rss_key_fill(adapter->rss_key, RNP_RSS_KEY_SIZE); + adapter->rss_key_setup_flag = 1; + } + hw->ops.set_rss_key(hw, sriov_flag); + + return 0; +} + +int rnp_init_rss_table(struct rnp_adapter *adapter) +{ + int rx_nums = adapter->num_rx_queues; + int i, j; + struct rnp_hw *hw = &adapter->hw; + struct rnp_ring *rx_ring; + u32 reta = 0; + u32 reta_entries = rnp_rss_indir_tbl_entries(adapter); + + if (adapter->flags & RNP_FLAG_DCB_ENABLED) { + rx_nums = rx_nums / adapter->num_tc; + for (i = 0, j = 0; i < 8; i++) { + adapter->rss_tc_tbl[i] = j; + hw->rss_tc_tbl[i] = j; + j = (j + 1) % adapter->num_tc; + } + } else { + for (i = 0, j = 0; i < 8; i++) { + hw->rss_tc_tbl[i] = 0; + adapter->rss_tc_tbl[i] = 0; + } + } + + /* adapter->num_q_vectors is not correct */ + for (i = 0, j = 0; i < reta_entries; i++) { + /* init with default value */ + if (!adapter->rss_tbl_setup_flag) + adapter->rss_indir_tbl[i] = j; + + if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) { + /* in sriov mode reta in [0, rx_nums] */ + reta = j; + } else { + /* in no sriov, reta is real ring number */ + rx_ring = adapter->rx_ring[adapter->rss_indir_tbl[i]]; + reta = rx_ring->rnp_queue_idx; + } + /* store rss_indir_tbl */ + hw->rss_indir_tbl[i] = reta; + + j = (j + 1) % rx_nums; + } + /* tbl only init once */ + adapter->rss_tbl_setup_flag = 1; + + hw->ops.set_rss_table(hw); + return 0; +} + +void rnp_setup_dma_rx(struct rnp_adapter *adapter, int count_in_dw) +{ + struct rnp_hw *hw = &adapter->hw; + u32 data; + + data = rd32(hw, RNP_DMA_CONFIG); + data &= (0x00000ffff); + data |= (count_in_dw << 16); + wr32(hw, RNP_DMA_CONFIG, data); +} + +/* setup to the hw */ +s32 rnp_fdir_write_perfect_filter(int fdir_mode, struct rnp_hw *hw, + union rnp_atr_input *filter, u16 hw_id, + u8 queue, bool prio_flag) +{ + if (filter->formatted.flow_type == RNP_ATR_FLOW_TYPE_ETHER) + hw->ops.set_layer2_remapping(hw, filter, hw_id, queue, + prio_flag); + else + hw->ops.set_tuple5_remapping(hw, filter, hw_id, queue, + prio_flag); + + return 0; +} + +s32 rnp_fdir_erase_perfect_filter(int fdir_mode, struct rnp_hw *hw, + union rnp_atr_input *input, u16 pri_id) +{ + /* just disable filter */ + if (input->formatted.flow_type == RNP_ATR_FLOW_TYPE_ETHER) { + hw->ops.clr_layer2_remapping(hw, pri_id); + dbg("disable layer2 %d\n", pri_id); + } else { + hw->ops.clr_tuple5_remapping(hw, pri_id); + dbg("disable tuple5 %d\n", pri_id); + } + + return 0; +} + +u32 rnp_tx_desc_unused_sw(struct rnp_ring *tx_ring) +{ + u16 ntu = tx_ring->next_to_use; + u16 ntc = tx_ring->next_to_clean; + u16 count = tx_ring->count; + + return ((ntu >= ntc) ? (count - ntu + ntc) : (ntc - ntu)); +} + +u32 rnp_rx_desc_used_hw(struct rnp_hw *hw, struct rnp_ring *rx_ring) +{ + u32 head = ring_rd32(rx_ring, RNP_DMA_REG_RX_DESC_BUF_HEAD); + u32 tail = ring_rd32(rx_ring, RNP_DMA_REG_RX_DESC_BUF_TAIL); + u16 count = rx_ring->count; + + return ((tail >= head) ? (count - tail + head) : (head - tail)); +} + +u32 rnp_tx_desc_unused_hw(struct rnp_hw *hw, struct rnp_ring *tx_ring) +{ + u32 head = ring_rd32(tx_ring, RNP_DMA_REG_TX_DESC_BUF_HEAD); + u32 tail = ring_rd32(tx_ring, RNP_DMA_REG_TX_DESC_BUF_TAIL); + u16 count = tx_ring->count; + + return ((tail >= head) ? (count - tail + head) : (head - tail)); +} + +s32 rnp_disable_rxr_maxrate(struct net_device *netdev, u8 queue_index) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + struct rnp_ring *rx_ring = adapter->rx_ring[queue_index]; + u32 reg_idx = rx_ring->rnp_queue_idx; + + /* disable which dma ring in maxrate limit mode */ + wr32(hw, RNP_SELECT_RING_EN(reg_idx), 0); + /* Clear Tx Ring maxrate */ + wr32(hw, RNP_RX_RING_MAXRATE(reg_idx), 0); + + return 0; +} + +s32 rnp_enable_rxr_maxrate(struct net_device *netdev, u8 queue_index, + u32 maxrate) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + struct rnp_ring *rx_ring = adapter->rx_ring[queue_index]; + u32 reg_idx = rx_ring->rnp_queue_idx; + u32 real_rate = maxrate / 16; + + if (!real_rate) + return -EINVAL; + + wr32(hw, RNP_RING_FC_ENABLE, true); + /* disable which dma ring in maxrate limit mode */ + wr32(hw, RNP_SELECT_RING_EN(reg_idx), true); + /* Clear Tx Ring maxrate */ + wr32(hw, RNP_RX_RING_MAXRATE(reg_idx), real_rate); + + return 0; +} diff --git a/drivers/net/ethernet/mucse/rnp/rnp_main.c b/drivers/net/ethernet/mucse/rnp/rnp_main.c new file mode 100644 index 000000000000..59d7873bf017 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_main.c @@ -0,0 +1,7943 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "rnp_tc_u32_parse.h" +#include "rnp_common.h" +#include "rnp.h" +#include "rnp_dcb.h" +#include "rnp_sriov.h" +#include "rnp_ptp.h" +#include "rnp_ethtool.h" +#include "rnp_mpe.h" + +#ifdef CONFIG_ARM64 +#define NO_BQL_TEST +#endif + +char rnp_driver_name[] = "rnp"; +static const char rnp_driver_string[] = + "mucse 1/10/25/40 Gigabit PCI Express Network Driver"; +#define DRV_VERSION "1.0.1-rc2" +#include "version.h" + +const char rnp_driver_version[] = DRV_VERSION; +static const char rnp_copyright[] = + "Copyright (c) 2020-2024 mucse Corporation."; + +extern struct rnp_info rnp_n10_info; +extern struct rnp_info rnp_n400_info; + +static struct rnp_info *rnp_info_tbl[] = { + [board_n10] = &rnp_n10_info, + [board_n400] = &rnp_n400_info, +}; + +static int register_mbx_irq(struct rnp_adapter *adapter); +static void remove_mbx_irq(struct rnp_adapter *adapter); + +static void rnp_pull_tail(struct sk_buff *skb); +#ifdef OPTM_WITH_LPAGE +static bool rnp_alloc_mapped_page(struct rnp_ring *rx_ring, + struct rnp_rx_buffer *bi, + union rnp_rx_desc *rx_desc, u16 bufsz, + u64 fun_id); + +static void rnp_put_rx_buffer(struct rnp_ring *rx_ring, + struct rnp_rx_buffer *rx_buffer); +#else +static bool rnp_alloc_mapped_page(struct rnp_ring *rx_ring, + struct rnp_rx_buffer *bi); +static void rnp_put_rx_buffer(struct rnp_ring *rx_ring, + struct rnp_rx_buffer *rx_buffer, + struct sk_buff *skb); +#endif + +static struct pci_device_id rnp_pci_tbl[] = { + { PCI_DEVICE(PCI_VENDOR_ID_MUCSE, PCI_DEVICE_ID_N10), + .driver_data = board_n10 }, /* n10 40G 10G */ + { PCI_DEVICE(PCI_VENDOR_ID_MUCSE, PCI_DEVICE_ID_N10_X1), + .driver_data = board_n10 }, /* n10 40G 10G */ + { PCI_DEVICE(PCI_VENDOR_ID_MUCSE, PCI_DEVICE_ID_N10_TP), + .driver_data = board_n10 }, /* n10 10G TP */ + { PCI_DEVICE(PCI_VENDOR_ID_MUCSE, PCI_DEVICE_ID_N400), + .driver_data = board_n400 }, /* n400 4port 1G */ + { PCI_DEVICE(PCI_VENDOR_ID_MUCSE, PCI_DEVICE_ID_N400C), + .driver_data = board_n400 }, /* n400 4port 1G */ + { PCI_DEVICE(PCI_VENDOR_ID_MUCSE, PCI_DEVICE_ID_N400_X1), + .driver_data = board_n10 }, /* n400 1port 10G/1G */ + { PCI_DEVICE(PCI_VENDOR_ID_MUCSE, PCI_DEVICE_ID_N400C_X1), + .driver_data = board_n10 }, /* n400 1port 10G/1G */ + { PCI_DEVICE(PCI_VENDOR_ID_MUCSE, PCI_DEVICE_ID_N10C), + .driver_data = board_n10 }, /* n10c 40G 10G */ + /* required last entry */ + { + 0, + }, +}; + +MODULE_DEVICE_TABLE(pci, rnp_pci_tbl); + +#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) +static int debug = -1; +module_param(debug, int, 0000); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); + +static unsigned int fix_eth_name; +module_param(fix_eth_name, uint, 0000); +MODULE_PARM_DESC(fix_eth_name, "set eth adapter name to rnpXX"); + +static int module_enable_ptp = 1; +module_param(module_enable_ptp, uint, 0000); +MODULE_PARM_DESC(module_enable_ptp, "enable ptp feature, disabled default"); + +unsigned int mpe_src_port; +module_param(mpe_src_port, uint, 0000); +MODULE_PARM_DESC(mpe_src_port, "mpe src port"); + +unsigned int mpe_pkt_version; +module_param(mpe_pkt_version, uint, 0000); +MODULE_PARM_DESC(mpe_pkt_version, "ipv4 or ipv6 src port"); + +MODULE_AUTHOR("Mucse Corporation, "); +MODULE_DESCRIPTION("Mucse(R) 1/10/25/40 Gigabit PCI Express Network Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +static struct workqueue_struct *rnp_wq; +static int enable_hi_dma; +extern void rnp_service_timer(struct timer_list *t); + +void rnp_service_event_schedule(struct rnp_adapter *adapter) +{ + if (!test_bit(__RNP_DOWN, &adapter->state) && + !test_and_set_bit(__RNP_SERVICE_SCHED, &adapter->state)) + queue_work(rnp_wq, &adapter->service_task); +} + +static void rnp_service_event_complete(struct rnp_adapter *adapter) +{ + BUG_ON(!test_bit(__RNP_SERVICE_SCHED, &adapter->state)); + + /* flush memory to make sure state is correct before next watchdog */ + smp_mb__before_atomic(); + clear_bit(__RNP_SERVICE_SCHED, &adapter->state); +} + + +/** + * rnp_set_ring_vector - set the ring_vector registers, mapping interrupt + * causes to vectors + * + * @adapter: pointer to adapter struct + * @queue: queue to map the corresponding interrupt to + * @msix_vector: the vector to map to the corresponding queue + * + */ +static void rnp_set_ring_vector(struct rnp_adapter *adapter, u8 rnp_queue, + u8 rnp_msix_vector) +{ + struct rnp_hw *hw = &adapter->hw; + u32 data = 0; + + data = hw->pfvfnum << 24; + data |= (rnp_msix_vector << 8); + data |= (rnp_msix_vector << 0); + + DPRINTK(IFUP, INFO, + "Set Ring-Vector queue:%d (reg:0x%x) <-- Rx-MSIX:%d, Tx-MSIX:%d\n", + rnp_queue, RING_VECTOR(rnp_queue), rnp_msix_vector, + rnp_msix_vector); + + rnp_wr_reg(hw->ring_msix_base + RING_VECTOR(rnp_queue), data); +} + +static void rnp_unmap_and_free_tx_resource(struct rnp_ring *ring, + struct rnp_tx_buffer *tx_buffer) +{ + if (tx_buffer->skb) { + dev_kfree_skb_any(tx_buffer->skb); + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_single(ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + } else if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(ring->dev, dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); + } + tx_buffer->next_to_watch = NULL; + tx_buffer->skb = NULL; + dma_unmap_len_set(tx_buffer, len, 0); + /* tx_buffer must be completely set up in the transmit path */ +} + +static u64 rnp_get_tx_completed(struct rnp_ring *ring) +{ + return ring->stats.packets; +} + +static u64 rnp_get_tx_pending(struct rnp_ring *ring) +{ + u32 head = ring_rd32(ring, RNP_DMA_REG_TX_DESC_BUF_HEAD); + u32 tail = ring_rd32(ring, RNP_DMA_REG_TX_DESC_BUF_TAIL); + + if (head != tail) + return (head < tail) ? tail - head : + (tail + ring->count - head); + + return 0; +} + +static inline bool rnp_check_tx_hang(struct rnp_ring *tx_ring) +{ + u32 tx_done = rnp_get_tx_completed(tx_ring); + u32 tx_done_old = tx_ring->tx_stats.tx_done_old; + u32 tx_pending = rnp_get_tx_pending(tx_ring); + bool ret = false; + + clear_check_for_tx_hang(tx_ring); + + /* + * Check for a hung queue, but be thorough. This verifies + * that a transmit has been completed since the previous + * check AND there is at least one packet pending. The + * ARMED bit is set to indicate a potential hang. The + * bit is cleared if a pause frame is received to remove + * false hang detection due to PFC or 802.3x frames. By + * requiring this to fail twice we avoid races with + * pfc clearing the ARMED bit and conditions where we + * run the check_tx_hang logic with a transmit completion + * pending but without time to complete it yet. + */ + if ((tx_done_old == tx_done) && tx_pending) { + /* make sure it is true for two checks in a row */ + ret = test_and_set_bit(__RNP_HANG_CHECK_ARMED, &tx_ring->state); + } else { + /* update completed stats and continue */ + tx_ring->tx_stats.tx_done_old = tx_done; + /* reset the countdown */ + clear_bit(__RNP_HANG_CHECK_ARMED, &tx_ring->state); + } + return ret; +} + +/** + * rnp_tx_timeout_reset - initiate reset due to Tx timeout + * @adapter: driver private struct + **/ +static void rnp_tx_timeout_reset(struct rnp_adapter *adapter) +{ + /* Do the reset outside of interrupt context */ + if (!test_bit(__RNP_DOWN, &adapter->state)) { + adapter->flags2 |= RNP_FLAG2_RESET_REQUESTED; + e_warn(drv, "initiating reset due to tx timeout\n"); + rnp_service_event_schedule(adapter); + } +} + +static void rnp_check_restart_tx(struct rnp_q_vector *q_vector, + struct rnp_ring *tx_ring) +{ + struct rnp_adapter *adapter = q_vector->adapter; +#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) + if (likely(netif_carrier_ok(tx_ring->netdev) && + (rnp_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + if (__netif_subqueue_stopped(tx_ring->netdev, + tx_ring->queue_index) && + !test_bit(__RNP_DOWN, &adapter->state)) { + netif_wake_subqueue(tx_ring->netdev, + tx_ring->queue_index); + ++tx_ring->tx_stats.restart_queue; + } + } +} + +/** + * rnp_clean_tx_irq - Reclaim resources after transmit completes + * @q_vector: structure containing interrupt and ring information + * @tx_ring: tx ring to clean + **/ +static bool rnp_clean_tx_irq(struct rnp_q_vector *q_vector, + struct rnp_ring *tx_ring, int napi_budget) +{ + struct rnp_adapter *adapter = q_vector->adapter; + struct rnp_tx_buffer *tx_buffer; + struct rnp_tx_desc *tx_desc; + u64 total_bytes = 0, total_packets = 0; + int budget = q_vector->tx.work_limit; + int i = tx_ring->next_to_clean; + + if (test_bit(__RNP_DOWN, &adapter->state)) + return true; + tx_ring->tx_stats.poll_count++; + tx_buffer = &tx_ring->tx_buffer_info[i]; + tx_desc = RNP_TX_DESC(tx_ring, i); + i -= tx_ring->count; + + do { + struct rnp_tx_desc *eop_desc = tx_buffer->next_to_watch; + /* if next_to_watch is not set then there is no work pending */ + if (!eop_desc) + break; + + /* prevent any other reads prior to eop_desc */ + rmb(); + + /* if eop DD is not set pending work has not been completed */ + if (!(eop_desc->vlan_cmd & cpu_to_le32(RNP_TXD_STAT_DD))) + break; + /* clear next_to_watch to prevent false hangs */ + tx_buffer->next_to_watch = NULL; + + /* update the statistics for this packet */ + total_bytes += tx_buffer->bytecount; + total_packets += tx_buffer->gso_segs; + + /* free the skb */ + napi_consume_skb(tx_buffer->skb, napi_budget); + + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); + + /* clear tx_buffer data */ + tx_buffer->skb = NULL; + dma_unmap_len_set(tx_buffer, len, 0); + + /* unmap remaining buffers */ + while (tx_desc != eop_desc) { + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = RNP_TX_DESC(tx_ring, 0); + } + + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); + } + } + + /* move us one more past the eop_desc for start of next pkt */ + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = RNP_TX_DESC(tx_ring, 0); + } + + /* issue prefetch for next Tx descriptor */ + prefetch(tx_desc); + + /* update budget accounting */ + budget--; + } while (likely(budget)); + + i += tx_ring->count; + tx_ring->next_to_clean = i; + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->stats.bytes += total_bytes; + tx_ring->stats.packets += total_packets; + tx_ring->tx_stats.tx_clean_count += total_packets; + tx_ring->tx_stats.tx_clean_times++; + if (tx_ring->tx_stats.tx_clean_times > 10) { + tx_ring->tx_stats.tx_clean_times = 0; + tx_ring->tx_stats.tx_clean_count = 0; + } + + u64_stats_update_end(&tx_ring->syncp); + q_vector->tx.total_bytes += total_bytes; + q_vector->tx.total_packets += total_packets; + + tx_ring->tx_stats.send_done_bytes += total_bytes; +#ifdef NO_BQL_TEST +#else + netdev_tx_completed_queue(txring_txq(tx_ring), total_packets, + total_bytes); +#endif + + if (!(q_vector->vector_flags & RNP_QVECTOR_FLAG_REDUCE_TX_IRQ_MISS)) { +#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) + if (likely(netif_carrier_ok(tx_ring->netdev) && + (rnp_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + if (__netif_subqueue_stopped(tx_ring->netdev, + tx_ring->queue_index) && + !test_bit(__RNP_DOWN, &adapter->state)) { + netif_wake_subqueue(tx_ring->netdev, + tx_ring->queue_index); + ++tx_ring->tx_stats.restart_queue; + } + } + } + + /* now we start tx queue later */ + return !!budget; +} + +static inline void rnp_rx_hash(struct rnp_ring *ring, + union rnp_rx_desc *rx_desc, struct sk_buff *skb) +{ + int rss_type; + + if (!(ring->netdev->features & NETIF_F_RXHASH)) + return; +#define RNP_RSS_TYPE_MASK 0xc0 + rss_type = rx_desc->wb.cmd & RNP_RSS_TYPE_MASK; + skb_set_hash(skb, le32_to_cpu(rx_desc->wb.rss_hash), + rss_type ? PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3); +} + +/** + * rnp_rx_checksum - indicate in skb if hw indicated a good cksum + * @ring: structure containing ring specific data + * @rx_desc: current Rx descriptor being processed + * @skb: skb currently being received and modified + **/ +static inline void rnp_rx_checksum(struct rnp_ring *ring, + union rnp_rx_desc *rx_desc, + struct sk_buff *skb) +{ + bool encap_pkt = false; + + skb_checksum_none_assert(skb); + /* Rx csum disabled */ + if (!(ring->netdev->features & NETIF_F_RXCSUM)) + return; + + if (!(ring->ring_flags & RNP_RING_NO_TUNNEL_SUPPORT)) { + if (rnp_get_stat(rx_desc, RNP_RXD_STAT_TUNNEL_MASK) == + RNP_RXD_STAT_TUNNEL_VXLAN) { + encap_pkt = true; + skb->encapsulation = 1; + skb->ip_summed = CHECKSUM_NONE; + } + } + /* if outer L3/L4 error */ + /* must in promisc mode or rx-all mode */ + if (rnp_test_staterr(rx_desc, RNP_RXD_STAT_ERR_MASK)) { + return; + } + ring->rx_stats.csum_good++; + /* at least it is a ip packet which has ip checksum */ + + /* It must be a TCP or UDP packet with a valid checksum */ + skb->ip_summed = CHECKSUM_UNNECESSARY; + if (encap_pkt) { + /* If we checked the outer header let the stack know */ + skb->csum_level = 1; + } +} + +static inline void rnp_update_rx_tail(struct rnp_ring *rx_ring, u32 val) +{ + rx_ring->next_to_use = val; + /* update next to alloc since we have filled the ring */ + rx_ring->next_to_alloc = val; + /* + * Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + rnp_wr_reg(rx_ring->tail, val); +} + +#if (PAGE_SIZE < 8192) +#define RNP_MAX_2K_FRAME_BUILD_SKB (RNP_RXBUFFER_1536 - NET_IP_ALIGN) +#define RNP_2K_TOO_SMALL_WITH_PADDING \ + ((NET_SKB_PAD + RNP_RXBUFFER_1536) > SKB_WITH_OVERHEAD(RNP_RXBUFFER_2K)) + +static inline int rnp_compute_pad(int rx_buf_len) +{ + int page_size, pad_size; + + page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2); + pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len; + + return pad_size; +} + +static inline int rnp_skb_pad(void) +{ + int rx_buf_len; + + /* If a 2K buffer cannot handle a standard Ethernet frame then + * optimize padding for a 3K buffer instead of a 1.5K buffer. + * + * For a 3K buffer we need to add enough padding to allow for + * tailroom due to NET_IP_ALIGN possibly shifting us out of + * cache-line alignment. + */ + if (RNP_2K_TOO_SMALL_WITH_PADDING) + rx_buf_len = RNP_RXBUFFER_3K + SKB_DATA_ALIGN(NET_IP_ALIGN); + else + rx_buf_len = RNP_RXBUFFER_1536; + + /* if needed make room for NET_IP_ALIGN */ + rx_buf_len -= NET_IP_ALIGN; + return rnp_compute_pad(rx_buf_len); +} + +#define RNP_SKB_PAD rnp_skb_pad() +#else /* PAGE_SIZE < 8192 */ +#define RNP_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) +#endif + +/** + * rnp_process_skb_fields - Populate skb header fields from Rx descriptor + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being populated + * + * This function checks the ring, descriptor, and packet information in + * order to populate the hash, checksum, VLAN, timestamp, protocol, and + * other fields within the skb. + **/ +static void rnp_process_skb_fields(struct rnp_ring *rx_ring, + union rnp_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct net_device *dev = rx_ring->netdev; + struct rnp_adapter *adapter = netdev_priv(dev); + + rnp_rx_hash(rx_ring, rx_desc, skb); + rnp_rx_checksum(rx_ring, rx_desc, skb); + if (((dev->features & NETIF_F_HW_VLAN_CTAG_RX) + || (dev->features & NETIF_F_HW_VLAN_STAG_RX)) && + rnp_test_staterr(rx_desc, RNP_RXD_STAT_VLAN_VALID) && + !ignore_veb_vlan(rx_ring->q_vector->adapter, rx_desc)) { + + if (rx_ring->ring_flags & RNP_RING_DOUBLE_VLAN_SUPPORT) { + /* check outer vlan first */ + if (rnp_test_ext_cmd(rx_desc, REV_OUTER_VLAN)) { + u16 vid_inner = le16_to_cpu(rx_desc->wb.vlan); + u16 vid_outer; + u16 vlan_tci = htons(ETH_P_8021Q); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + vid_inner); + /* check outer vlan type */ + if (rx_ring->ring_flags & + RNP_RING_STAGS_SUPPORT) { + if (rnp_test_staterr( + rx_desc, + RNP_RXD_STAT_STAG)) { + switch (rx_ring->q_vector + ->adapter + ->outer_vlan_type) { + case outer_vlan_type_88a8: + vlan_tci = htons( + ETH_P_8021AD); + break; + case outer_vlan_type_9100: + vlan_tci = htons( + ETH_P_QINQ1); + break; + case outer_vlan_type_9200: + vlan_tci = htons( + ETH_P_QINQ2); + break; + default: + vlan_tci = htons( + ETH_P_8021AD); + break; + } + } else { + vlan_tci = htons(ETH_P_8021Q); + } + } else { + vlan_tci = htons(ETH_P_8021Q); + } + vid_outer = le16_to_cpu(rx_desc->wb.mark); + /* if in stags mode should ignore only stags */ + if (adapter->flags2 & + RNP_FLAG2_VLAN_STAGS_ENABLED) { + /* push outer in if not equal stags or cvlan */ + if ((vid_outer != adapter->stags_vid) || + (vlan_tci == htons(ETH_P_8021Q))) { + /* push outer inner */ + skb = __vlan_hwaccel_push_inside( + skb); + __vlan_hwaccel_put_tag( + skb, vlan_tci, + vid_outer); + } + } else { + /* push outer */ + skb = __vlan_hwaccel_push_inside(skb); + __vlan_hwaccel_put_tag(skb, vlan_tci, + vid_outer); + } + } else { + /* only inner vlan */ + u16 vid = le16_to_cpu(rx_desc->wb.vlan); + if (rx_ring->ring_flags & RNP_RING_STAGS_SUPPORT) { + if (rnp_test_staterr(rx_desc, + RNP_RXD_STAT_STAG)) { + if ((adapter->flags2 & + RNP_FLAG2_VLAN_STAGS_ENABLED) && + (vid == + adapter->stags_vid)) { + } else + __vlan_hwaccel_put_tag( + skb, + htons(ETH_P_8021AD), + vid); + + } else { + __vlan_hwaccel_put_tag(skb, + htons(ETH_P_8021Q), + vid); + } + } else { + __vlan_hwaccel_put_tag( + skb, htons(ETH_P_8021Q), vid); + } + } + } else { + u16 vid = le16_to_cpu(rx_desc->wb.vlan); + if (rx_ring->ring_flags & RNP_RING_STAGS_SUPPORT) { + if (rnp_test_staterr(rx_desc, + RNP_RXD_STAT_STAG)) { + __vlan_hwaccel_put_tag( + skb, htons(ETH_P_8021AD), vid); + } else { + __vlan_hwaccel_put_tag( + skb, htons(ETH_P_8021Q), vid); + } + } else { + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + vid); + } + } + rx_ring->rx_stats.vlan_remove++; + } + + skb_record_rx_queue(skb, rx_ring->queue_index); + + skb->protocol = eth_type_trans(skb, dev); +} + +static void rnp_rx_skb(struct rnp_q_vector *q_vector, struct sk_buff *skb) +{ + struct rnp_adapter *adapter = q_vector->adapter; + + if (!(adapter->flags & RNP_FLAG_IN_NETPOLL)) + napi_gro_receive(&q_vector->napi, skb); + else + netif_rx(skb); +} + +/* drop this packets if error */ +static bool rnp_check_csum_error(struct rnp_ring *rx_ring, + union rnp_rx_desc *rx_desc, unsigned int size, + unsigned int *driver_drop_packets) +{ + bool err = false; + + struct net_device *netdev = rx_ring->netdev; + + if (netdev->features & NETIF_F_RXCSUM) { + if (unlikely( + rnp_test_staterr(rx_desc, RNP_RXD_STAT_ERR_MASK))) { + rx_debug_printk("rx error: VEB:%s mark:0x%x cmd:0x%x\n", + (rx_ring->q_vector->adapter->flags & + RNP_FLAG_SRIOV_ENABLED) ? + "On" : + "Off", + rx_desc->wb.mark, rx_desc->wb.cmd); + /* push this packet to stack if in promisc mode */ + rx_ring->rx_stats.csum_err++; + + if ((!(netdev->flags & IFF_PROMISC) && + (!(netdev->features & NETIF_F_RXALL)))) { + if (rx_ring->ring_flags & RNP_RING_CHKSM_FIX) { + err = true; + goto skip_fix; + } + if (unlikely(rnp_test_staterr( + rx_desc, + RNP_RXD_STAT_L4_MASK) && + (!(rx_desc->wb.rev1 & + RNP_RX_L3_TYPE_MASK)))) { + rx_ring->rx_stats.csum_err--; + goto skip_fix; + } + /* we ignore sctp csum erro small than 60 */ + if (unlikely(rnp_test_staterr(rx_desc, + RNP_RXD_STAT_SCTP_MASK))) { + if ((size > 60) && + (rx_desc->wb.rev1 & + RNP_RX_L3_TYPE_MASK)) { + err = true; + } else { + /* sctp less than 60 hw report err by mistake */ + rx_ring->rx_stats.csum_err--; + } + } else { + err = true; + } + } + } + } +skip_fix: + if (err) { + u32 ntc = rx_ring->next_to_clean + 1; + struct rnp_rx_buffer *rx_buffer; +#if (PAGE_SIZE < 8192) + unsigned int truesize = rnp_rx_pg_size(rx_ring) / 2; +#else + unsigned int truesize = + ring_uses_build_skb(rx_ring) ? + SKB_DATA_ALIGN(RNP_SKB_PAD + size) : + SKB_DATA_ALIGN(size); +#endif + + /* if eop add drop_packets */ + if (likely(rnp_test_staterr(rx_desc, RNP_RXD_STAT_EOP))) + *driver_drop_packets = *driver_drop_packets + 1; + + /* we are reusing so sync this buffer for CPU use */ + rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + dma_sync_single_range_for_cpu(rx_ring->dev, rx_buffer->dma, + rx_buffer->page_offset, size, + DMA_FROM_DEVICE); + + /* we should clean it since we used all info in it */ + rx_desc->wb.cmd = 0; + +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif +#ifdef OPTM_WITH_LPAGE + rnp_put_rx_buffer(rx_ring, rx_buffer); +#else + rnp_put_rx_buffer(rx_ring, rx_buffer, NULL); +#endif + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + } + return err; +} + +/** + * rnp_rx_ring_reinit - just reinit rx_ring with new count in ->reset_count + * @rx_ring: rx descriptor ring to transact packets on + */ +static int rnp_rx_ring_reinit(struct rnp_adapter *adapter, struct rnp_ring *rx_ring) +{ + struct rnp_ring *temp_ring; + int err = 0; + + if (rx_ring->count == rx_ring->reset_count) + return 0; + /* stop rx queue */ + + temp_ring = vzalloc(array_size(1, sizeof(struct rnp_ring))); + if (!temp_ring) + goto err_setup; + + rnp_disable_rx_queue(adapter, rx_ring); + /* reinit for this ring */ + memcpy(temp_ring, rx_ring, sizeof(struct rnp_ring)); + /* setup new count */ + temp_ring->count = rx_ring->reset_count; + err = rnp_setup_rx_resources(temp_ring, adapter); + if (err) { + rnp_free_rx_resources(temp_ring); + vfree(temp_ring); + goto err_setup; + } + rnp_free_rx_resources(rx_ring); + memcpy(rx_ring, temp_ring, sizeof(struct rnp_ring)); + rnp_configure_rx_ring(adapter, rx_ring); + /* start rx */ + vfree(temp_ring); + ring_wr32(rx_ring, RNP_DMA_RX_START, 1); + return 0; +err_setup: + return -1; +} + +#ifndef OPTM_WITH_LPAGE +/** + * rnp_alloc_rx_buffers - Replace used receive buffers + * @rx_ring: ring to place buffers on + * @cleaned_count: number of buffers to replace + **/ +void rnp_alloc_rx_buffers(struct rnp_ring *rx_ring, u16 cleaned_count) +{ + union rnp_rx_desc *rx_desc; + struct rnp_rx_buffer *bi; + u16 i = rx_ring->next_to_use; + u64 fun_id = ((u64)(rx_ring->pfvfnum) << (32 + 24)); + u16 bufsz; + /* nothing to do */ + if (!cleaned_count) + return; + + rx_desc = RNP_RX_DESC(rx_ring, i); + + BUG_ON(rx_desc == NULL); + + bi = &rx_ring->rx_buffer_info[i]; + + BUG_ON(bi == NULL); + + i -= rx_ring->count; + bufsz = rnp_rx_bufsz(rx_ring); + + do { + if (!rnp_alloc_mapped_page(rx_ring, bi)) + break; + + dma_sync_single_range_for_device(rx_ring->dev, bi->dma, + bi->page_offset, bufsz, + DMA_FROM_DEVICE); + + /* + * Refresh the desc even if buffer_addrs didn't change + * because each write-back erases this info. + */ + rx_desc->pkt_addr = + cpu_to_le64(bi->dma + bi->page_offset + fun_id); + /* clean dd */ + rx_desc->resv_cmd = 0; + + rx_desc++; + bi++; + i++; + if (unlikely(!i)) { + rx_desc = RNP_RX_DESC(rx_ring, 0); + bi = rx_ring->rx_buffer_info; + i -= rx_ring->count; + } + + /* clear the hdr_addr for the next_to_use descriptor */ + cleaned_count--; + } while (cleaned_count); + + i += rx_ring->count; + + if (rx_ring->next_to_use != i) + rnp_update_rx_tail(rx_ring, i); +} +#endif + +static inline unsigned int rnp_rx_offset(struct rnp_ring *rx_ring) +{ + return ring_uses_build_skb(rx_ring) ? RNP_SKB_PAD : 0; +} + +#ifdef OPTM_WITH_LPAGE +/** + * rnp_alloc_rx_buffers - Replace used receive buffers + * @rx_ring: ring to place buffers on + * @cleaned_count: number of buffers to replace + **/ +void rnp_alloc_rx_buffers(struct rnp_ring *rx_ring, u16 cleaned_count) +{ + union rnp_rx_desc *rx_desc; + struct rnp_rx_buffer *bi; + u16 i = rx_ring->next_to_use; + u64 fun_id = ((u64)(rx_ring->pfvfnum) << (32 + 24)); + u16 bufsz; + /* nothing to do */ + if (!cleaned_count) + return; + + rx_desc = RNP_RX_DESC(rx_ring, i); + + BUG_ON(rx_desc == NULL); + + bi = &rx_ring->rx_buffer_info[i]; + + BUG_ON(bi == NULL); + + i -= rx_ring->count; + bufsz = rnp_rx_bufsz(rx_ring); + + do { + int count = 1; + struct page *page; + + if (!rnp_alloc_mapped_page(rx_ring, bi, rx_desc, bufsz, fun_id)) + break; + page = bi->page; + + rx_desc->resv_cmd = 0; + + rx_desc++; + i++; + bi++; + + if (unlikely(!i)) { + rx_desc = RNP_RX_DESC(rx_ring, 0); + bi = rx_ring->rx_buffer_info; + i -= rx_ring->count; + } + + rx_desc->resv_cmd = 0; + + cleaned_count--; + + while (count < rx_ring->rx_page_buf_nums && cleaned_count) { + dma_addr_t dma; + + bi->page_offset = rx_ring->rx_per_buf_mem * count + + rnp_rx_offset(rx_ring); + /* map page for use */ + dma = dma_map_page_attrs(rx_ring->dev, page, + bi->page_offset, bufsz, + DMA_FROM_DEVICE, + RNP_RX_DMA_ATTR); + + if (dma_mapping_error(rx_ring->dev, dma)) { + printk("map second error\n"); + rx_ring->rx_stats.alloc_rx_page_failed++; + break; + } + + bi->dma = dma; + bi->page = page; + page_ref_add(page, USHRT_MAX); + bi->pagecnt_bias = USHRT_MAX; + + /* sync the buffer for use by the device */ + dma_sync_single_range_for_device(rx_ring->dev, bi->dma, + 0, bufsz, + DMA_FROM_DEVICE); + + /* + * Refresh the desc even if buffer_addrs didn't change + * because each write-back erases this info. + */ + rx_desc->pkt_addr = cpu_to_le64(bi->dma + fun_id); + /* clean dd */ + rx_desc->resv_cmd = 0; + + rx_desc++; + bi++; + i++; + if (unlikely(!i)) { + rx_desc = RNP_RX_DESC(rx_ring, 0); + bi = rx_ring->rx_buffer_info; + i -= rx_ring->count; + } + count++; + /* clear the hdr_addr for the next_to_use descriptor */ + cleaned_count--; + } + } while (cleaned_count); + + i += rx_ring->count; + + if (rx_ring->next_to_use != i) + rnp_update_rx_tail(rx_ring, i); +} + +#endif /* OPTM_WITH_LPAGE */ +/** + * rnp_get_headlen - determine size of header for RSC/LRO/GRO/FCOE + * @data: pointer to the start of the headers + * @max_len: total length of section to find headers in + * + * This function is meant to determine the length of headers that will + * be recognized by hardware for LRO, GRO, and RSC offloads. The main + * motivation of doing this is to only perform one pull for IPv4 TCP + * packets so that we can do basic things like calculating the gso_size + * based on the average data per packet. + **/ +static unsigned int rnp_get_headlen(unsigned char *data, unsigned int max_len) +{ + union { + unsigned char *network; + /* l2 headers */ + struct ethhdr *eth; + struct vlan_hdr *vlan; + /* l3 headers */ + struct iphdr *ipv4; + struct ipv6hdr *ipv6; + } hdr; + __be16 protocol; + u8 nexthdr = 0; /* default to not TCP */ + u8 hlen; + + /* this should never happen, but better safe than sorry */ + if (max_len < ETH_HLEN) + return max_len; + + /* initialize network frame pointer */ + hdr.network = data; + + /* set first protocol and move network header forward */ + protocol = hdr.eth->h_proto; + hdr.network += ETH_HLEN; + + /* handle any vlan tag if present */ + if (protocol == htons(ETH_P_8021Q)) { + if ((hdr.network - data) > (max_len - VLAN_HLEN)) + return max_len; + + protocol = hdr.vlan->h_vlan_encapsulated_proto; + hdr.network += VLAN_HLEN; + } + + /* handle L3 protocols */ + if (protocol == htons(ETH_P_IP)) { + if ((hdr.network - data) > (max_len - sizeof(struct iphdr))) + return max_len; + + /* access ihl as a u8 to avoid unaligned access on ia64 */ + hlen = (hdr.network[0] & 0x0F) << 2; + + /* verify hlen meets minimum size requirements */ + if (hlen < sizeof(struct iphdr)) + return hdr.network - data; + + /* record next protocol if header is present */ + if (!(hdr.ipv4->frag_off & htons(IP_OFFSET))) + nexthdr = hdr.ipv4->protocol; + } else if (protocol == htons(ETH_P_IPV6)) { + if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr))) + return max_len; + + /* record next protocol */ + nexthdr = hdr.ipv6->nexthdr; + hlen = sizeof(struct ipv6hdr); + } else { + return hdr.network - data; + } + + /* relocate pointer to start of L4 header */ + hdr.network += hlen; + + /* finally sort out TCP/UDP */ + if (nexthdr == IPPROTO_TCP) { + if ((hdr.network - data) > (max_len - sizeof(struct tcphdr))) + return max_len; + + /* access doff as a u8 to avoid unaligned access on ia64 */ + hlen = (hdr.network[12] & 0xF0) >> 2; + + /* verify hlen meets minimum size requirements */ + if (hlen < sizeof(struct tcphdr)) + return hdr.network - data; + + hdr.network += hlen; + } else if (nexthdr == IPPROTO_UDP) { + if ((hdr.network - data) > (max_len - sizeof(struct udphdr))) + return max_len; + + hdr.network += sizeof(struct udphdr); + } + + /* + * If everything has gone correctly hdr.network should be the + * data section of the packet and will be the end of the header. + * If not then it probably represents the end of the last recognized + * header. + */ + if ((hdr.network - data) < max_len) + return hdr.network - data; + else + return max_len; +} + +#ifdef OPTM_WITH_LPAGE +/** + * rnp_is_non_eop - process handling of non-EOP buffers + * @rx_ring: Rx ring being processed + * @rx_desc: Rx descriptor for current buffer + * @skb: Current socket buffer containing buffer in progress + * + * This function updates next to clean. If the buffer is an EOP buffer + * this function exits returning false, otherwise it will place the + * sk_buff in the next buffer to be chained and return true indicating + * that this is in fact a non-EOP buffer. + **/ +static bool rnp_is_non_eop(struct rnp_ring *rx_ring, union rnp_rx_desc *rx_desc) +{ + u32 ntc = rx_ring->next_to_clean + 1; + /* fetch, update, and store next to clean */ + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + + prefetch(RNP_RX_DESC(rx_ring, ntc)); + + /* if we are the last buffer then there is nothing else to do */ + if (likely(rnp_test_staterr(rx_desc, RNP_RXD_STAT_EOP))) + return false; + /* place skb in next buffer to be received */ + /* we should clean it since we used all info in it */ + rx_desc->wb.cmd = 0; + + return true; +} + +static bool rnp_alloc_mapped_page(struct rnp_ring *rx_ring, + struct rnp_rx_buffer *bi, + union rnp_rx_desc *rx_desc, u16 bufsz, + u64 fun_id) +{ + struct page *page = bi->page; + dma_addr_t dma; + + /* since we are recycling buffers we should seldom need to alloc */ + if (likely(page)) + return true; + + page = dev_alloc_pages(RNP_ALLOC_PAGE_ORDER); + if (unlikely(!page)) { + rx_ring->rx_stats.alloc_rx_page_failed++; + return false; + } + + bi->page_offset = rnp_rx_offset(rx_ring); + + /* map page for use */ + dma = dma_map_page_attrs(rx_ring->dev, page, bi->page_offset, bufsz, + DMA_FROM_DEVICE, + RNP_RX_DMA_ATTR); + + /* + * if mapping failed free memory back to system since + * there isn't much point in holding memory we can't use + */ + if (dma_mapping_error(rx_ring->dev, dma)) { + __free_pages(page, RNP_ALLOC_PAGE_ORDER); + printk("map failed\n"); + + rx_ring->rx_stats.alloc_rx_page_failed++; + return false; + } + bi->dma = dma; + bi->page = page; + bi->page_offset = rnp_rx_offset(rx_ring); + page_ref_add(page, USHRT_MAX - 1); + bi->pagecnt_bias = USHRT_MAX; + rx_ring->rx_stats.alloc_rx_page++; + + /* sync the buffer for use by the device */ + dma_sync_single_range_for_device(rx_ring->dev, bi->dma, 0, bufsz, + DMA_FROM_DEVICE); + + /* + * Refresh the desc even if buffer_addrs didn't change + * because each write-back erases this info. + */ + rx_desc->pkt_addr = cpu_to_le64(bi->dma + fun_id); + + return true; +} + +#else +static bool rnp_alloc_mapped_page(struct rnp_ring *rx_ring, + struct rnp_rx_buffer *bi) +{ + struct page *page = bi->page; + dma_addr_t dma; + + /* since we are recycling buffers we should seldom need to alloc */ + if (likely(page)) + return true; + + page = dev_alloc_pages(rnp_rx_pg_order(rx_ring)); + if (unlikely(!page)) { + rx_ring->rx_stats.alloc_rx_page_failed++; + return false; + } + + /* map page for use */ + dma = dma_map_page_attrs(rx_ring->dev, page, 0, rnp_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, + RNP_RX_DMA_ATTR); + + /* + * if mapping failed free memory back to system since + * there isn't much point in holding memory we can't use + */ + if (dma_mapping_error(rx_ring->dev, dma)) { + __free_pages(page, rnp_rx_pg_order(rx_ring)); + + rx_ring->rx_stats.alloc_rx_page_failed++; + return false; + } + bi->dma = dma; + bi->page = page; + bi->page_offset = rnp_rx_offset(rx_ring); + page_ref_add(page, USHRT_MAX - 1); + bi->pagecnt_bias = USHRT_MAX; + rx_ring->rx_stats.alloc_rx_page++; + + return true; +} + +/** + * rnp_is_non_eop - process handling of non-EOP buffers + * @rx_ring: Rx ring being processed + * @rx_desc: Rx descriptor for current buffer + * @skb: Current socket buffer containing buffer in progress + * + * This function updates next to clean. If the buffer is an EOP buffer + * this function exits returning false, otherwise it will place the + * sk_buff in the next buffer to be chained and return true indicating + * that this is in fact a non-EOP buffer. + **/ +static bool rnp_is_non_eop(struct rnp_ring *rx_ring, union rnp_rx_desc *rx_desc, + struct sk_buff *skb) +{ + u32 ntc = rx_ring->next_to_clean + 1; + + /* fetch, update, and store next to clean */ + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + + prefetch(RNP_RX_DESC(rx_ring, ntc)); + + /* if we are the last buffer then there is nothing else to do */ + if (likely(rnp_test_staterr(rx_desc, RNP_RXD_STAT_EOP))) + return false; + /* place skb in next buffer to be received */ + rx_ring->rx_buffer_info[ntc].skb = skb; + rx_ring->rx_stats.non_eop_descs++; + /* we should clean it since we used all info in it */ + rx_desc->wb.cmd = 0; + + return true; +} + +#endif +/** + * rnp_pull_tail - rnp specific version of skb_pull_tail + * @skb: pointer to current skb being adjusted + * + * This function is an rnp specific version of __pskb_pull_tail. The + * main difference between this version and the original function is that + * this function can make several assumptions about the state of things + * that allow for significant optimizations versus the standard function. + * As a result we can do things like drop a frag and maintain an accurate + * truesize for the skb. + */ +static void rnp_pull_tail(struct sk_buff *skb) +{ + skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; + unsigned char *va; + unsigned int pull_len; + + /* + * it is valid to use page_address instead of kmap since we are + * working with pages allocated out of the lomem pool per + * alloc_page(GFP_ATOMIC) + */ + va = skb_frag_address(frag); + + /* + * we need the header to contain the greater of either ETH_HLEN or + * 60 bytes if the skb->len is less than 60 for skb_pad. + */ + pull_len = rnp_get_headlen(va, RNP_RX_HDR_SIZE); + + /* align pull length to size of long to optimize memcpy performance */ + skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); + + /* update all of the pointers */ + skb_frag_size_sub(frag, pull_len); + skb_frag_off_add(frag, pull_len); + skb->data_len -= pull_len; + skb->tail += pull_len; +} + +static bool rnp_check_src_mac(struct sk_buff *skb, struct net_device *netdev) +{ + char *data = (char *)skb->data; + bool ret = false; + struct netdev_hw_addr *ha; + + if (is_multicast_ether_addr(data)) { + if (0 == memcmp(data + netdev->addr_len, netdev->dev_addr, + netdev->addr_len)) { + dev_kfree_skb_any(skb); + ret = true; + } + /* if src mac equal own mac */ + netdev_for_each_uc_addr(ha, netdev) { + if (0 == memcmp(data + netdev->addr_len, ha->addr, + netdev->addr_len)) { + dev_kfree_skb_any(skb); + ret = true; + } + } + } + return ret; +} + +/** + * rnp_cleanup_headers - Correct corrupted or empty headers + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being fixed + * + * Check if the skb is valid. In the XDP case it will be an error pointer. + * Return true in this case to abort processing and advance to next + * descriptor. + * + * Check for corrupted packet headers caused by senders on the local L2 + * embedded NIC switch not setting up their Tx Descriptors right. These + * should be very rare. + * + * Also address the case where we are pulling data in on pages only + * and as such no data is present in the skb header. + * + * In addition if skb is not at least 60 bytes we need to pad it so that + * it is large enough to qualify as a valid Ethernet frame. + * + * Returns true if an error was encountered and skb was freed. + **/ +static bool rnp_cleanup_headers(struct rnp_ring __maybe_unused *rx_ring, + union rnp_rx_desc *rx_desc, struct sk_buff *skb) +{ + struct net_device *netdev = rx_ring->netdev; + struct rnp_adapter *adapter = netdev_priv(netdev); +#ifdef OPTM_WITH_LPAGE +#else + /* XDP packets use error pointer so abort at this point */ + if (IS_ERR(skb)) + return true; +#endif + + /* place header in linear portion of buffer */ + if (!skb_headlen(skb)) + rnp_pull_tail(skb); + /* if eth_skb_pad returns an error the skb was freed */ + if (eth_skb_pad(skb)) + return true; + + if ((adapter->flags & RNP_FLAG_SRIOV_ENABLED) && + (!(rx_ring->ring_flags & RNP_RING_VEB_MULTI_FIX))) + return rnp_check_src_mac(skb, rx_ring->netdev); + else + return false; +} + +/** + * rnp_reuse_rx_page - page flip buffer and store it back on the ring + * @rx_ring: rx descriptor ring to store buffers on + * @old_buff: donor buffer to have page reused + * + * Synchronizes page for reuse by the adapter + **/ +static void rnp_reuse_rx_page(struct rnp_ring *rx_ring, + struct rnp_rx_buffer *old_buff) +{ + struct rnp_rx_buffer *new_buff; + u16 nta = rx_ring->next_to_alloc; + + new_buff = &rx_ring->rx_buffer_info[nta]; + + /* update, and store next to alloc */ + nta++; + rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; + + /* + * Transfer page from old buffer to new buffer. + * Move each member individually to avoid possible store + * forwarding stalls and unnecessary copy of skb. + */ + new_buff->dma = old_buff->dma; + new_buff->page = old_buff->page; + new_buff->page_offset = old_buff->page_offset; + new_buff->pagecnt_bias = old_buff->pagecnt_bias; +} + +static inline bool rnp_page_is_reserved(struct page *page) +{ + return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); +} + +static bool rnp_can_reuse_rx_page(struct rnp_rx_buffer *rx_buffer, int size) +{ + unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; + struct page *page = rx_buffer->page; + +#ifdef OPTM_WITH_LPAGE + return false; +#endif + /* avoid re-using remote pages */ + if (unlikely(rnp_page_is_reserved(page))) + return false; + +#if (PAGE_SIZE < 8192) + /* if we are only owner of page we can reuse it */ + if (unlikely((page_ref_count(page) - pagecnt_bias) > 1)) + return false; +#else + + /* + * The last offset is a bit aggressive in that we assume the + * worst case of FCoE being enabled and using a 3K buffer. + * However this should have minimal impact as the 1K extra is + * still less than one buffer in size. + */ +#define RNP_LAST_OFFSET (SKB_WITH_OVERHEAD(PAGE_SIZE)) + if (rx_buffer->page_offset > (RNP_LAST_OFFSET - size)) + return false; +#endif + + /* If we have drained the page fragment pool we need to update + * the pagecnt_bias and page count so that we fully restock the + * number of references the driver holds. + */ + if (unlikely(pagecnt_bias == 1)) { + page_ref_add(page, USHRT_MAX - 1); + rx_buffer->pagecnt_bias = USHRT_MAX; + } + + return true; +} + +/** + * rnp_add_rx_frag - Add contents of Rx buffer to sk_buff + * @rx_ring: rx descriptor ring to transact packets on + * @rx_buffer: buffer containing page to add + * @skb: sk_buff to place the data into + * @size: size of data + * + * This function will add the data contained in rx_buffer->page to the skb. + * This is done either through a direct copy if the data in the buffer is + * less than the skb header size, otherwise it will just attach the page as + * a frag to the skb. + * + * The function will then update the page offset if necessary and return + * true if the buffer can be reused by the adapter. + **/ +static void rnp_add_rx_frag(struct rnp_ring *rx_ring, + struct rnp_rx_buffer *rx_buffer, + struct sk_buff *skb, unsigned int size) +{ +#if (PAGE_SIZE < 8192) + unsigned int truesize = rnp_rx_pg_size(rx_ring) / 2; +#else + unsigned int truesize = ring_uses_build_skb(rx_ring) ? + SKB_DATA_ALIGN(RNP_SKB_PAD + size) : + SKB_DATA_ALIGN(size); +#endif + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, + rx_buffer->page_offset, size, truesize); + +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif +} + +#ifdef OPTM_WITH_LPAGE +static struct rnp_rx_buffer *rnp_get_rx_buffer(struct rnp_ring *rx_ring, + union rnp_rx_desc *rx_desc, + const unsigned int size) +{ + struct rnp_rx_buffer *rx_buffer; + + rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + prefetchw(rx_buffer->page); + + rx_buf_dump("rx buf", + page_address(rx_buffer->page) + rx_buffer->page_offset, + rx_desc->wb.len); + + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, rx_buffer->dma, 0, size, + DMA_FROM_DEVICE); + /* skip_sync: */ + rx_buffer->pagecnt_bias--; + + return rx_buffer; +} +#else + +static struct rnp_rx_buffer *rnp_get_rx_buffer(struct rnp_ring *rx_ring, + union rnp_rx_desc *rx_desc, + struct sk_buff **skb, + const unsigned int size) +{ + struct rnp_rx_buffer *rx_buffer; + + rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + prefetchw(rx_buffer->page); + *skb = rx_buffer->skb; + + rx_buf_dump("rx buf", + page_address(rx_buffer->page) + rx_buffer->page_offset, + rx_desc->wb.len); + + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, rx_buffer->dma, + rx_buffer->page_offset, size, + DMA_FROM_DEVICE); + rx_buffer->pagecnt_bias--; + + return rx_buffer; +} +#endif + +#ifdef OPTM_WITH_LPAGE +static void rnp_put_rx_buffer(struct rnp_ring *rx_ring, + struct rnp_rx_buffer *rx_buffer) +{ + struct rnp_q_vector *q_vector = rx_ring->q_vector; + struct rnp_adapter *adapter = q_vector->adapter; + struct rnp_hw *hw = &adapter->hw; + + if (rnp_can_reuse_rx_page(rx_buffer, hw->dma_split_size)) { + /* hand second half of page back to the ring */ + rnp_reuse_rx_page(rx_ring, rx_buffer); + } else { + /* we are not reusing the buffer so unmap it */ + dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, + rnp_rx_bufsz(rx_ring), DMA_FROM_DEVICE, + RNP_RX_DMA_ATTR); + __page_frag_cache_drain(rx_buffer->page, + rx_buffer->pagecnt_bias); + } + + /* clear contents of rx_buffer */ + rx_buffer->page = NULL; +} + +#else +static void rnp_put_rx_buffer(struct rnp_ring *rx_ring, + struct rnp_rx_buffer *rx_buffer, + struct sk_buff *skb) +{ + struct rnp_q_vector *q_vector = rx_ring->q_vector; + struct rnp_adapter *adapter = q_vector->adapter; + struct rnp_hw *hw = &adapter->hw; + + if (rnp_can_reuse_rx_page(rx_buffer, hw->dma_split_size)) { + /* hand second half of page back to the ring */ + rnp_reuse_rx_page(rx_ring, rx_buffer); + } else { + /* we are not reusing the buffer so unmap it */ + dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, + rnp_rx_pg_size(rx_ring), DMA_FROM_DEVICE, + RNP_RX_DMA_ATTR); + __page_frag_cache_drain(rx_buffer->page, + rx_buffer->pagecnt_bias); + } + + /* clear contents of rx_buffer */ + rx_buffer->page = NULL; + rx_buffer->skb = NULL; +} +#endif + +#ifdef OPTM_WITH_LPAGE +static struct sk_buff *rnp_construct_skb(struct rnp_ring *rx_ring, + struct rnp_rx_buffer *rx_buffer, + union rnp_rx_desc *rx_desc, + unsigned int size) +{ + void *va = page_address(rx_buffer->page) + rx_buffer->page_offset; + unsigned int truesize = SKB_DATA_ALIGN(size); + unsigned int headlen; + struct sk_buff *skb; + + /* prefetch first cache line of first page */ + prefetch(va); +#if L1_CACHE_BYTES < 128 + prefetch(va + L1_CACHE_BYTES); +#endif + /* allocate a skb to store the frags */ + skb = napi_alloc_skb(&rx_ring->q_vector->napi, RNP_RX_HDR_SIZE); + if (unlikely(!skb)) + return NULL; + + prefetchw(skb->data); + + /* Determine available headroom for copy */ + headlen = size; + if (headlen > RNP_RX_HDR_SIZE) + headlen = rnp_get_headlen(va, RNP_RX_HDR_SIZE); + + /* align pull length to size of long to optimize memcpy performance */ + memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long))); + + /* update all of the pointers */ + size -= headlen; + + if (size) { + + skb_add_rx_frag(skb, 0, rx_buffer->page, + (va + headlen) - page_address(rx_buffer->page), + size, truesize); + rx_buffer->page_offset += truesize; + } else { + + rx_buffer->pagecnt_bias++; + } + + return skb; +} + +static struct sk_buff *rnp_build_skb(struct rnp_ring *rx_ring, + struct rnp_rx_buffer *rx_buffer, + union rnp_rx_desc *rx_desc, + unsigned int size) +{ + void *va = page_address(rx_buffer->page) + rx_buffer->page_offset; + unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + + SKB_DATA_ALIGN(size + RNP_SKB_PAD); + struct sk_buff *skb; + + /* prefetch first cache line of first page */ + prefetch(va); +#if L1_CACHE_BYTES < 128 + prefetch(va + L1_CACHE_BYTES); +#endif + + /* build an skb around the page buffer */ + skb = build_skb(va - RNP_SKB_PAD, truesize); + if (unlikely(!skb)) + return NULL; + + /* update pointers within the skb to store the data */ + skb_reserve(skb, RNP_SKB_PAD); + __skb_put(skb, size); + /* record DMA address if this is the start of a + * chain of buffers + */ + + return skb; +} + +#else + +static struct sk_buff *rnp_construct_skb(struct rnp_ring *rx_ring, + struct rnp_rx_buffer *rx_buffer, + struct xdp_buff *xdp, + union rnp_rx_desc *rx_desc) +{ + unsigned int size = xdp->data_end - xdp->data; +#if (PAGE_SIZE < 8192) + unsigned int truesize = rnp_rx_pg_size(rx_ring) / 2; +#else + unsigned int truesize = + SKB_DATA_ALIGN(xdp->data_end - xdp->data_hard_start); +#endif + struct sk_buff *skb; + + /* prefetch first cache line of first page */ + prefetch(xdp->data); +#if L1_CACHE_BYTES < 128 + prefetch(xdp->data + L1_CACHE_BYTES); +#endif + /* allocate a skb to store the frags */ + skb = napi_alloc_skb(&rx_ring->q_vector->napi, RNP_RX_HDR_SIZE); + if (unlikely(!skb)) + return NULL; + + prefetchw(skb->data); + + if (size > RNP_RX_HDR_SIZE) { + + skb_add_rx_frag(skb, 0, rx_buffer->page, + xdp->data - page_address(rx_buffer->page), size, + truesize); +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif + } else { + memcpy(__skb_put(skb, size), xdp->data, + ALIGN(size, sizeof(long))); + rx_buffer->pagecnt_bias++; + } + + return skb; +} + +static struct sk_buff *rnp_build_skb(struct rnp_ring *rx_ring, + struct rnp_rx_buffer *rx_buffer, + struct xdp_buff *xdp, + union rnp_rx_desc *rx_desc) +{ + unsigned int metasize = xdp->data - xdp->data_meta; + void *va = xdp->data_meta; +#if (PAGE_SIZE < 8192) + unsigned int truesize = rnp_rx_pg_size(rx_ring) / 2; +#else + unsigned int truesize = + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + + SKB_DATA_ALIGN(xdp->data_end - xdp->data_hard_start); +#endif + struct sk_buff *skb; + + /* prefetch first cache line of first page */ + prefetch(va); +#if L1_CACHE_BYTES < 128 + prefetch(va + L1_CACHE_BYTES); +#endif + + /* build an skb around the page buffer */ + skb = build_skb(xdp->data_hard_start, truesize); + if (unlikely(!skb)) + return NULL; + + /* update pointers within the skb to store the data */ + skb_reserve(skb, xdp->data - xdp->data_hard_start); + __skb_put(skb, xdp->data_end - xdp->data); + if (metasize) + skb_metadata_set(skb, metasize); + /* update buffer offset */ +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif + + return skb; +} + +#endif + +#define RNP_XDP_PASS 0 +#define RNP_XDP_CONSUMED 1 +#define RNP_XDP_TX 2 + +#ifndef OPTM_WITH_LPAGE +static void rnp_rx_buffer_flip(struct rnp_ring *rx_ring, + struct rnp_rx_buffer *rx_buffer, + unsigned int size) +{ +#if (PAGE_SIZE < 8192) + unsigned int truesize = rnp_rx_pg_size(rx_ring) / 2; + + rx_buffer->page_offset ^= truesize; +#else + unsigned int truesize = ring_uses_build_skb(rx_ring) ? + SKB_DATA_ALIGN(RNP_SKB_PAD + size) : + SKB_DATA_ALIGN(size); + + rx_buffer->page_offset += truesize; +#endif +} +#endif + +#ifdef OPTM_WITH_LPAGE +/** + * rnp_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf + * @q_vector: structure containing interrupt and ring information + * @rx_ring: rx descriptor ring to transact packets on + * @budget: Total limit on number of packets to process + * + * This function provides a "bounce buffer" approach to Rx interrupt + * processing. The advantage to this is that on systems that have + * expensive overhead for IOMMU access this provides a means of avoiding + * it by maintaining the mapping of the page to the system. + * + * Returns amount of work completed. + **/ + +static int rnp_clean_rx_irq(struct rnp_q_vector *q_vector, + struct rnp_ring *rx_ring, int budget) +{ + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + unsigned int err_packets = 0; + unsigned int driver_drop_packets = 0; + struct sk_buff *skb = rx_ring->skb; + struct rnp_adapter *adapter = q_vector->adapter; + u16 cleaned_count = rnp_desc_unused_rx(rx_ring); + + while (likely(total_rx_packets < budget)) { + union rnp_rx_desc *rx_desc; + struct rnp_rx_buffer *rx_buffer; + unsigned int size; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= RNP_RX_BUFFER_WRITE) { + rnp_alloc_rx_buffers(rx_ring, cleaned_count); + cleaned_count = 0; + } + rx_desc = RNP_RX_DESC(rx_ring, rx_ring->next_to_clean); + + rx_buf_dump("rx-desc:", rx_desc, sizeof(*rx_desc)); + rx_debug_printk(" dd set: %s\n", + (rx_desc->wb.cmd & RNP_RXD_STAT_DD) ? "Yes" : + "No"); + + if (!rnp_test_staterr(rx_desc, RNP_RXD_STAT_DD)) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * descriptor has been written back + */ + dma_rmb(); + + rx_debug_printk( + "queue:%d rx-desc:%d has-data len:%d next_to_clean %d\n", + rx_ring->rnp_queue_idx, rx_ring->next_to_clean, + rx_desc->wb.len, rx_ring->next_to_clean); + + /* handle padding */ + if ((adapter->priv_flags & RNP_PRIV_FLAG_FT_PADDING) && + (!(adapter->priv_flags & RNP_PRIV_FLAG_PADDING_DEBUG))) { + if (likely(rnp_test_staterr(rx_desc, + RNP_RXD_STAT_EOP))) { + size = le16_to_cpu(rx_desc->wb.len) - + le16_to_cpu(rx_desc->wb.padding_len); + } else { + size = le16_to_cpu(rx_desc->wb.len); + } + } else { + /* size should not zero */ + size = le16_to_cpu(rx_desc->wb.len); + } + + if (!size) + break; + + /* + * should check csum err + * maybe one packet use multiple descs + * no problems hw set all csum_err in multiple descs + * maybe BUG if the last sctp desc less than 60 + */ + if (rnp_check_csum_error(rx_ring, rx_desc, size, + &driver_drop_packets)) { + cleaned_count++; + err_packets++; + if (err_packets + total_rx_packets > budget) + break; + continue; + } + + rx_buffer = rnp_get_rx_buffer(rx_ring, rx_desc, size); + + if (skb) { + rnp_add_rx_frag(rx_ring, rx_buffer, skb, size); + } else if (ring_uses_build_skb(rx_ring)) { + skb = rnp_build_skb(rx_ring, rx_buffer, rx_desc, size); + } else { + skb = rnp_construct_skb(rx_ring, rx_buffer, rx_desc, + size); + } + + /* exit if we failed to retrieve a buffer */ + if (!skb) { + rx_ring->rx_stats.alloc_rx_buff_failed++; + rx_buffer->pagecnt_bias++; + break; + } + if (module_enable_ptp && adapter->ptp_rx_en && + adapter->flags2 & RNP_FLAG2_PTP_ENABLED) + rnp_ptp_get_rx_hwstamp(adapter, rx_desc, skb); + + rnp_put_rx_buffer(rx_ring, rx_buffer); + cleaned_count++; + + /* place incomplete frames back on ring for completion */ + if (rnp_is_non_eop(rx_ring, rx_desc)) + continue; + + /* verify the packet layout is correct */ + if (rnp_cleanup_headers(rx_ring, rx_desc, skb)) { + skb = NULL; + /* we should clean it since we used all info in it */ + rx_desc->wb.cmd = 0; + continue; + } + + /* probably a little skewed due to removing CRC */ + total_rx_bytes += skb->len; + + /* populate checksum, timestamp, VLAN, and protocol */ + rnp_process_skb_fields(rx_ring, rx_desc, skb); + + /* we should clean it since we used all info in it */ + rx_desc->wb.cmd = 0; + rnp_rx_skb(q_vector, skb); + skb = NULL; + + /* update budget accounting */ + total_rx_packets++; + } + + rx_ring->skb = skb; + + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->stats.packets += total_rx_packets; + rx_ring->stats.bytes += total_rx_bytes; + rx_ring->rx_stats.driver_drop_packets += driver_drop_packets; + rx_ring->rx_stats.rx_clean_count += total_rx_packets; + rx_ring->rx_stats.rx_clean_times++; + if (rx_ring->rx_stats.rx_clean_times > 10) { + rx_ring->rx_stats.rx_clean_times = 0; + rx_ring->rx_stats.rx_clean_count = 0; + } + u64_stats_update_end(&rx_ring->syncp); + q_vector->rx.total_packets += total_rx_packets; + q_vector->rx.total_bytes += total_rx_bytes; + + if (total_rx_packets >= budget) + rx_ring->rx_stats.poll_again_count++; + + return total_rx_packets; +} + +#else +/** + * rnp_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf + * @q_vector: structure containing interrupt and ring information + * @rx_ring: rx descriptor ring to transact packets on + * @budget: Total limit on number of packets to process + * + * This function provides a "bounce buffer" approach to Rx interrupt + * processing. The advantage to this is that on systems that have + * expensive overhead for IOMMU access this provides a means of avoiding + * it by maintaining the mapping of the page to the system. + * + * Returns amount of work completed. + **/ +static int rnp_clean_rx_irq(struct rnp_q_vector *q_vector, + struct rnp_ring *rx_ring, int budget) +{ + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + unsigned int err_packets = 0; + unsigned int driver_drop_packets = 0; + struct rnp_adapter *adapter = q_vector->adapter; + u16 cleaned_count = rnp_desc_unused_rx(rx_ring); + bool xdp_xmit = false; + struct xdp_buff xdp; + + xdp.data = NULL; + xdp.data_end = NULL; + + while (likely(total_rx_packets < budget)) { + union rnp_rx_desc *rx_desc; + struct rnp_rx_buffer *rx_buffer; + struct sk_buff *skb; + unsigned int size; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= RNP_RX_BUFFER_WRITE) { + rnp_alloc_rx_buffers(rx_ring, cleaned_count); + cleaned_count = 0; + } + rx_desc = RNP_RX_DESC(rx_ring, rx_ring->next_to_clean); + + rx_buf_dump("rx-desc:", rx_desc, sizeof(*rx_desc)); + rx_debug_printk(" dd set: %s\n", + (rx_desc->wb.cmd & RNP_RXD_STAT_DD) ? "Yes" : + "No"); + + if (!rnp_test_staterr(rx_desc, RNP_RXD_STAT_DD)) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * descriptor has been written back + */ + dma_rmb(); + + rx_debug_printk( + "queue:%d rx-desc:%d has-data len:%d next_to_clean %d\n", + rx_ring->rnp_queue_idx, rx_ring->next_to_clean, + rx_desc->wb.len, rx_ring->next_to_clean); + + /* handle padding */ + if ((adapter->priv_flags & RNP_PRIV_FLAG_FT_PADDING) && + (!(adapter->priv_flags & RNP_PRIV_FLAG_PADDING_DEBUG))) { + if (likely(rnp_test_staterr(rx_desc, + RNP_RXD_STAT_EOP))) { + size = le16_to_cpu(rx_desc->wb.len) - + le16_to_cpu(rx_desc->wb.padding_len); + } else { + size = le16_to_cpu(rx_desc->wb.len); + } + } else { + /* size should not zero */ + size = le16_to_cpu(rx_desc->wb.len); + } + + if (!size) + break; + + /* + * should check csum err + * maybe one packet use multiple descs + * no problems hw set all csum_err in multiple descs + * maybe BUG if the last sctp desc less than 60 + */ + if (rnp_check_csum_error(rx_ring, rx_desc, size, + &driver_drop_packets)) { + cleaned_count++; + err_packets++; + if (err_packets + total_rx_packets > budget) + break; + continue; + } + + rx_buffer = rnp_get_rx_buffer(rx_ring, rx_desc, &skb, size); + + if (!skb) { + xdp.data = page_address(rx_buffer->page) + + rx_buffer->page_offset; + xdp.data_meta = xdp.data; + xdp.data_hard_start = xdp.data - rnp_rx_offset(rx_ring); + xdp.data_end = xdp.data + size; + } + + if (IS_ERR(skb)) { + if (PTR_ERR(skb) == -RNP_XDP_TX) { + xdp_xmit = true; + rnp_rx_buffer_flip(rx_ring, rx_buffer, size); + } else { + rx_buffer->pagecnt_bias++; + } + total_rx_packets++; + total_rx_bytes += size; + } else if (skb) { + rnp_add_rx_frag(rx_ring, rx_buffer, skb, size); + } else if (ring_uses_build_skb(rx_ring)) { + skb = rnp_build_skb(rx_ring, rx_buffer, &xdp, rx_desc); + } else { + skb = rnp_construct_skb(rx_ring, rx_buffer, &xdp, + rx_desc); + } + + /* exit if we failed to retrieve a buffer */ + if (!skb) { + rx_ring->rx_stats.alloc_rx_buff_failed++; + rx_buffer->pagecnt_bias++; + break; + } + if (module_enable_ptp && adapter->ptp_rx_en && + adapter->flags2 & RNP_FLAG2_PTP_ENABLED) + rnp_ptp_get_rx_hwstamp(adapter, rx_desc, skb); + + rnp_put_rx_buffer(rx_ring, rx_buffer, skb); + cleaned_count++; + + /* place incomplete frames back on ring for completion */ + if (rnp_is_non_eop(rx_ring, rx_desc, skb)) + continue; + + /* verify the packet layout is correct */ + if (rnp_cleanup_headers(rx_ring, rx_desc, skb)) { + /* we should clean it since we used all info in it */ + rx_desc->wb.cmd = 0; + continue; + } + + /* probably a little skewed due to removing CRC */ + total_rx_bytes += skb->len; + + /* populate checksum, timestamp, VLAN, and protocol */ + rnp_process_skb_fields(rx_ring, rx_desc, skb); + + /* we should clean it since we used all info in it */ + rx_desc->wb.cmd = 0; + + rnp_rx_skb(q_vector, skb); + + /* update budget accounting */ + total_rx_packets++; + } + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->stats.packets += total_rx_packets; + rx_ring->stats.bytes += total_rx_bytes; + rx_ring->rx_stats.driver_drop_packets += driver_drop_packets; + rx_ring->rx_stats.rx_clean_count += total_rx_packets; + rx_ring->rx_stats.rx_clean_times++; + if (rx_ring->rx_stats.rx_clean_times > 10) { + rx_ring->rx_stats.rx_clean_times = 0; + rx_ring->rx_stats.rx_clean_count = 0; + } + u64_stats_update_end(&rx_ring->syncp); + q_vector->rx.total_packets += total_rx_packets; + q_vector->rx.total_bytes += total_rx_bytes; + + if (total_rx_packets >= budget) + rx_ring->rx_stats.poll_again_count++; + return total_rx_packets; +} +#endif + +/** + * rnp_configure_msix - Configure MSI-X hardware + * @adapter: board private structure + * + * rnp_configure_msix sets up the hardware to properly generate MSI-X + * interrupts. + **/ +static void rnp_configure_msix(struct rnp_adapter *adapter) +{ + struct rnp_q_vector *q_vector; + int i; + + /* + * configure ring-msix Registers table + */ + for (i = 0; i < adapter->num_q_vectors; i++) { + struct rnp_ring *ring; + + q_vector = adapter->q_vector[i]; + rnp_for_each_ring(ring, q_vector->rx) { + rnp_set_ring_vector(adapter, ring->rnp_queue_idx, + q_vector->v_idx); + } + } +} + +/** + * rnp_update_itr - update the dynamic ITR value based on statistics + * @q_vector: structure containing interrupt and ring information + * @ring_container: structure containing ring performance data + * + * Stores a new ITR value based on packets and byte + * counts during the last interrupt. The advantage of per interrupt + * computation is faster updates and more accurate ITR for the current + * traffic pattern. Constants in this function were computed + * based on theoretical maximum wire speed and thresholds were set based + * on testing data as well as attempting to minimize response time + * while increasing bulk throughput. + **/ +static void rnp_update_itr(struct rnp_q_vector *q_vector, + struct rnp_ring_container *ring_container, int type) +{ + unsigned int itr = RNP_ITR_ADAPTIVE_MIN_USECS | + RNP_ITR_ADAPTIVE_LATENCY; + unsigned int avg_wire_size, packets, bytes; + unsigned int packets_old; + unsigned long next_update = jiffies; + u32 old_itr; + u16 add_itr, add = 0; + /* 0 is tx ;1 is rx */ + if (type) + old_itr = q_vector->itr_rx; + else + old_itr = q_vector->itr_tx; + + /* If we don't have any rings just leave ourselves set for maximum + * possible latency so we take ourselves out of the equation. + */ + if (!ring_container->ring) + return; + + packets_old = ring_container->total_packets_old; + packets = ring_container->total_packets; + bytes = ring_container->total_bytes; + add_itr = ring_container->add_itr; + /* If Rx and there are 1 to 23 packets and bytes are less than + * 12112 assume insufficient data to use bulk rate limiting + * approach. Instead we will focus on simply trying to target + * receiving 8 times as much data in the next interrupt. + */ + if (!packets) + return; + + if (packets && packets < 24 && bytes < 12112) { + itr = RNP_ITR_ADAPTIVE_LATENCY; + + avg_wire_size = (bytes + packets * 24); + avg_wire_size = + clamp_t(unsigned int, avg_wire_size, 128, 12800); + + goto adjust_for_speed; + } + + /* Less than 48 packets we can assume that our current interrupt delay + * is only slightly too low. As such we should increase it by a small + * fixed amount. + */ + if (packets < 48) { + if (add_itr) { + if (packets_old < packets) { + itr = (old_itr >> 2) + RNP_ITR_ADAPTIVE_MIN_INC; + if (itr > RNP_ITR_ADAPTIVE_MAX_USECS) + itr = RNP_ITR_ADAPTIVE_MAX_USECS; + add = 1; + + if (packets < 8) + itr += RNP_ITR_ADAPTIVE_LATENCY; + else + itr += ring_container->itr & + RNP_ITR_ADAPTIVE_LATENCY; + + } else { + /* we add itr before ,but not get more packets */ + itr = (old_itr >> 2) - RNP_ITR_ADAPTIVE_MIN_INC; + if (itr < RNP_ITR_ADAPTIVE_MIN_USECS) + itr = RNP_ITR_ADAPTIVE_MIN_USECS; + } + + } else { + /* we not add before, add itr */ + add = 1; + itr = (old_itr >> 2) + RNP_ITR_ADAPTIVE_MIN_INC; + if (itr > RNP_ITR_ADAPTIVE_MAX_USECS) + itr = RNP_ITR_ADAPTIVE_MAX_USECS; + + /* If sample size is 0 - 7 we should probably switch + * to latency mode instead of trying to control + * things as though we are in bulk. + * + * Otherwise if the number of packets is less than 48 + * we should maintain whatever mode we are currently + * in. The range between 8 and 48 is the cross-over + * point between latency and bulk traffic. + */ + if (packets < 8) + itr += RNP_ITR_ADAPTIVE_LATENCY; + else + itr += ring_container->itr & + RNP_ITR_ADAPTIVE_LATENCY; + } + goto clear_counts; + } + + /* Between 48 and 96 is our "goldilocks" zone where we are working + * out "just right". Just report that our current ITR is good for us. + */ + if (packets < 96) { + itr = old_itr >> 2; + goto clear_counts; + } + /* If packet count is 96 or greater we are likely looking at a slight + * overrun of the delay we want. Try halving our delay to see if that + * will cut the number of packets in half per interrupt. + */ + if (packets < 256) { + itr = old_itr >> 3; + if (itr < RNP_ITR_ADAPTIVE_MIN_USECS) + itr = RNP_ITR_ADAPTIVE_MIN_USECS; + goto clear_counts; + } + + /* The paths below assume we are dealing with a bulk ITR since number + * of packets is 256 or greater. We are just going to have to compute + * a value and try to bring the count under control, though for smaller + * packet sizes there isn't much we can do as NAPI polling will likely + * be kicking in sooner rather than later. + */ + itr = RNP_ITR_ADAPTIVE_BULK; + + /* If packet counts are 256 or greater we can assume we have a gross + * overestimation of what the rate should be. Instead of trying to fine + * tune it just use the formula below to try and dial in an exact value + * give the current packet size of the frame. + */ + avg_wire_size = bytes / packets; + + /* The following is a crude approximation of: + * wmem_default / (size + overhead) = desired_pkts_per_int + * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate + * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value + * + * Assuming wmem_default is 212992 and overhead is 640 bytes per + * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the + * formula down to + * + * (170 * (size + 24)) / (size + 640) = ITR + * + * We first do some math on the packet size and then finally bitshift + * by 8 after rounding up. We also have to account for PCIe link speed + * difference as ITR scales based on this. + */ + if (avg_wire_size <= 60) { + /* Start at 50k ints/sec */ + avg_wire_size = 5120; + } else if (avg_wire_size <= 316) { + /* 50K ints/sec to 16K ints/sec */ + avg_wire_size *= 40; + avg_wire_size += 2720; + } else if (avg_wire_size <= 1084) { + /* 16K ints/sec to 9.2K ints/sec */ + avg_wire_size *= 15; + avg_wire_size += 11452; + } else if (avg_wire_size <= 1980) { + /* 9.2K ints/sec to 8K ints/sec */ + avg_wire_size *= 5; + avg_wire_size += 22420; + } else { + /* plateau at a limit of 8K ints/sec */ + avg_wire_size = 32256; + } + +adjust_for_speed: + /* Resultant value is 256 times larger than it needs to be. This + * gives us room to adjust the value as needed to either increase + * or decrease the value based on link speeds of 10G, 2.5G, 1G, etc. + * + * Use addition as we have already recorded the new latency flag + * for the ITR value. + */ + switch (q_vector->adapter->link_speed) { + case RNP_LINK_SPEED_10GB_FULL: + case RNP_LINK_SPEED_100_FULL: + default: + itr += DIV_ROUND_UP(avg_wire_size, + RNP_ITR_ADAPTIVE_MIN_INC * 256) * + RNP_ITR_ADAPTIVE_MIN_INC; + break; + case RNP_LINK_SPEED_1GB_FULL: + case RNP_LINK_SPEED_10_FULL: + itr += DIV_ROUND_UP(avg_wire_size, + RNP_ITR_ADAPTIVE_MIN_INC * 64) * + RNP_ITR_ADAPTIVE_MIN_INC; + break; + } + + /* In the case of a latency specific workload only allow us to + * reduce the ITR by at most 2us. By doing this we should dial + * in so that our number of interrupts is no more than 2x the number + * of packets for the least busy workload. So for example in the case + * of a TCP worload the ack packets being received would set the + * the interrupt rate as they are a latency specific workload. + */ + if ((itr & RNP_ITR_ADAPTIVE_LATENCY) && itr < ring_container->itr) + itr = ring_container->itr - RNP_ITR_ADAPTIVE_MIN_INC; + +clear_counts: + /* write back value */ + ring_container->itr = itr; + + /* next update should occur within next jiffy */ + ring_container->next_update = next_update + 1; + + ring_container->total_bytes = 0; + ring_container->total_packets_old = packets; + ring_container->add_itr = add; + ring_container->total_packets = 0; +} + +/** + * rnp_write_eitr - write EITR register in hardware specific way + * @q_vector: structure containing interrupt and ring information + * + * This function is made to be called by ethtool and by the driver + * when it needs to update EITR registers at runtime. Hardware + * specific quirks/differences are taken care of here. + */ +static void rnp_write_eitr_rx(struct rnp_q_vector *q_vector) +{ + struct rnp_adapter *adapter = q_vector->adapter; + struct rnp_hw *hw = &adapter->hw; + u32 itr_reg = q_vector->itr_rx >> 2; + struct rnp_ring *ring; + + itr_reg = itr_reg * hw->usecstocount; + + rnp_for_each_ring(ring, q_vector->rx) { + ring_wr32(ring, RNP_DMA_REG_RX_INT_DELAY_TIMER, itr_reg); + } +} + +static void rnp_set_itr(struct rnp_q_vector *q_vector) +{ + u32 new_itr_rx; + + rnp_update_itr(q_vector, &q_vector->rx, 1); + new_itr_rx = q_vector->rx.itr; + new_itr_rx &= RNP_ITR_ADAPTIVE_MASK_USECS; + new_itr_rx <<= 2; + if (new_itr_rx != q_vector->itr_rx) { + /* save the algorithm value here */ + q_vector->itr_rx = new_itr_rx; + rnp_write_eitr_rx(q_vector); + } +} + +enum latency_range { + lowest_latency = 0, + low_latency = 1, + bulk_latency = 2, + latency_invalid = 255 +}; + +static inline void rnp_irq_enable_queues(struct rnp_adapter *adapter, + struct rnp_q_vector *q_vector) +{ + struct rnp_ring *ring; + + rnp_for_each_ring(ring, q_vector->rx) + rnp_wr_reg(ring->dma_int_mask, ~(RX_INT_MASK | TX_INT_MASK)); +} + +static inline void rnp_irq_disable_queues(struct rnp_q_vector *q_vector) +{ + struct rnp_ring *ring; + + rnp_for_each_ring(ring, q_vector->tx) { + if (q_vector->new_rx_count != q_vector->old_rx_count) { + ring_wr32(ring, RNP_DMA_REG_RX_INT_DELAY_PKTCNT, + q_vector->new_rx_count); + q_vector->old_rx_count = q_vector->new_rx_count; + } + rnp_wr_reg(ring->dma_int_mask, (RX_INT_MASK | TX_INT_MASK)); + } +} + +/** + * rnp_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + **/ +static inline void rnp_irq_enable(struct rnp_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_q_vectors; i++) + rnp_irq_enable_queues(adapter, adapter->q_vector[i]); +} + +static irqreturn_t rnp_msix_other(int irq, void *data) +{ + struct rnp_adapter *adapter = data; + set_bit(__RNP_IN_IRQ, &adapter->state); + + rnp_msg_task(adapter); + + clear_bit(__RNP_IN_IRQ, &adapter->state); + + return IRQ_HANDLED; +} + +static void rnp_htimer_start(struct rnp_q_vector *q_vector) +{ + unsigned long ns = q_vector->irq_check_usecs * NSEC_PER_USEC / 2; + + hrtimer_start_range_ns(&q_vector->irq_miss_check_timer, ns_to_ktime(ns), + ns, HRTIMER_MODE_REL); +} + +static void rnp_htimer_stop(struct rnp_q_vector *q_vector) +{ + hrtimer_cancel(&q_vector->irq_miss_check_timer); +} + +static irqreturn_t rnp_msix_clean_rings(int irq, void *data) +{ + struct rnp_q_vector *q_vector = data; + + if (q_vector->vector_flags & RNP_QVECTOR_FLAG_IRQ_MISS_CHECK) + rnp_htimer_stop(q_vector); + + /* disabled interrupts (on this vector) for us */ + rnp_irq_disable_queues(q_vector); + + if (q_vector->rx.ring || q_vector->tx.ring) + napi_schedule_irqoff(&q_vector->napi); + + return IRQ_HANDLED; +} + +static void update_rx_count(int cleaned, struct rnp_q_vector *q_vector) +{ + struct rnp_adapter *adapter = q_vector->adapter; + u32 link_speed = adapter->link_speed; + struct rnp_ring *ring; + + if (link_speed == RNP_LINK_SPEED_10GB_FULL) { + if ((cleaned) && (cleaned != q_vector->new_rx_count)) { + if (cleaned < 5) { + q_vector->small_times = 0; + q_vector->large_times = 0; + q_vector->too_small_times++; + if (q_vector->too_small_times >= 2) { + q_vector->new_rx_count = 1; + } + } else if (cleaned < 30) { + q_vector->too_small_times = 0; + q_vector->middle_time++; + /* count is 5 -30 */ + if (cleaned < q_vector->new_rx_count) { + /* change small */ + q_vector->small_times = 0; + q_vector->new_rx_count -= + (1 << (q_vector->large_times++)); + if (q_vector->new_rx_count < 0) + q_vector->new_rx_count = 1; + + } else { + q_vector->large_times = 0; + + if (cleaned > 30) { + if (q_vector->new_rx_count == + (cleaned - 4)) { + } else { + q_vector->new_rx_count += + (1 + << (q_vector->small_times++)); + } + /* should no more than q_vector */ + if (q_vector->new_rx_count >= cleaned) { + q_vector->new_rx_count = + cleaned - 4; + q_vector->small_times = 0; + } + + } else { + if (q_vector->new_rx_count == + (cleaned - 1)) { + } else { + q_vector->new_rx_count += + (1 + << (q_vector->small_times++)); + } + /* should no more than q_vector */ + if (q_vector->new_rx_count >= cleaned) { + q_vector->new_rx_count = + cleaned - 1; + q_vector->small_times = 0; + } + } + } + } else { + q_vector->too_small_times = 0; + q_vector->new_rx_count = + max_t(int, 64, adapter->rx_frames); + q_vector->small_times = 0; + q_vector->large_times = 0; + } + } + } else { + rnp_for_each_ring(ring, q_vector->rx) { + if (ring->ring_flags & RNP_RING_LOWER_ITR) { + q_vector->new_rx_count = 1; + } else { + q_vector->new_rx_count = 32; + } + } + + + } +} + +/** + * rnp_poll - NAPI Rx polling callback + * @napi: structure for representing this polling device + * @budget: how many packets driver is allowed to clean + * + * This function is used for legacy and MSI, NAPI mode + **/ +int rnp_poll(struct napi_struct *napi, int budget) +{ + struct rnp_q_vector *q_vector = + container_of(napi, struct rnp_q_vector, napi); + struct rnp_adapter *adapter = q_vector->adapter; + struct rnp_ring *ring; + int per_ring_budget, work_done = 0; + bool clean_complete = true; + int cleaned_total = 0; + + rnp_for_each_ring(ring, q_vector->tx) { + if (!rnp_clean_tx_irq(q_vector, ring, budget)) + clean_complete = false; + } + + /* attempt to distribute budget to each queue fairly, but don't allow + * the budget to go below 1 because we'll exit polling + */ + if (q_vector->rx.count > 1) + per_ring_budget = max(budget / q_vector->rx.count, 1); + else + per_ring_budget = budget; + + rnp_for_each_ring(ring, q_vector->rx) { + int cleaned = 0; + /* this ring is waitting to reset rx_len*/ + /* avoid to deal this ring until reset done */ + if (likely(!(ring->ring_flags & RNP_RING_FLAG_DO_RESET_RX_LEN))) + cleaned = rnp_clean_rx_irq(q_vector, ring, + per_ring_budget); + /* check delay rx setup */ + if (unlikely(ring->ring_flags & + RNP_RING_FLAG_DELAY_SETUP_RX_LEN)) { + int head; + + rnp_disable_rx_queue(adapter, ring); + head = ring_rd32(ring, RNP_DMA_REG_RX_DESC_BUF_HEAD); + if (head < RNP_MIN_RXD) { + /* it is time to delay set */ + /* stop rx */ + rnp_disable_rx_queue(adapter, ring); + ring->ring_flags &= + (~RNP_RING_FLAG_DELAY_SETUP_RX_LEN); + ring->ring_flags |= + RNP_RING_FLAG_DO_RESET_RX_LEN; + } else { + + ring_wr32(ring, RNP_DMA_RX_START, 1); + } + } + work_done += cleaned; + cleaned_total += cleaned; + if (cleaned >= per_ring_budget) + clean_complete = false; + } + + /* force close irq */ + if (test_bit(__RNP_DOWN, &adapter->state)) { + clean_complete = true; + } + /* all work done, exit the polling mode */ + if (!(q_vector->vector_flags & RNP_QVECTOR_FLAG_ITR_FEATURE)) + update_rx_count(cleaned_total, q_vector); + + if (!clean_complete) { + int cpu_id = smp_processor_id(); + + /* It is possible that the interrupt affinity has changed but, + * if the cpu is pegged at 100%, polling will never exit while + * traffic continues and the interrupt will be stuck on this + * cpu. We check to make sure affinity is correct before we + * continue to poll, otherwise we must stop polling so the + * interrupt can move to the correct cpu. + */ + if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) { + /* Tell napi that we are done polling */ + napi_complete_done(napi, work_done); + if (!test_bit(__RNP_DOWN, &adapter->state)) { + rnp_irq_enable_queues(adapter, q_vector); + /* we need this to ensure irq start before tx start */ + if (q_vector->vector_flags & + RNP_QVECTOR_FLAG_REDUCE_TX_IRQ_MISS) { + smp_mb(); + rnp_for_each_ring(ring, q_vector->tx) { + rnp_check_restart_tx(q_vector, + ring); + if (q_vector->new_rx_count != + q_vector->old_rx_count) { + ring_wr32( + ring, + RNP_DMA_REG_RX_INT_DELAY_PKTCNT, + q_vector->new_rx_count); + q_vector->old_rx_count = + q_vector->new_rx_count; + } + } + } + if (!test_bit(__RNP_DOWN, &adapter->state)) { + if (q_vector->vector_flags & + RNP_QVECTOR_FLAG_IRQ_MISS_CHECK) + rnp_htimer_start(q_vector); + /* Return budget-1 so that polling stops */ + return budget - 1; + } + } + return min(work_done, budget - 1); + } + if (q_vector->vector_flags & + RNP_QVECTOR_FLAG_REDUCE_TX_IRQ_MISS) { + rnp_for_each_ring(ring, q_vector->tx) { + rnp_check_restart_tx(q_vector, ring); + /* update rx count if need */ + if (q_vector->new_rx_count != + q_vector->old_rx_count) { + ring_wr32(ring, + RNP_DMA_REG_RX_INT_DELAY_PKTCNT, + q_vector->new_rx_count); + q_vector->old_rx_count = + q_vector->new_rx_count; + } + } + } + return budget; + } + + if (likely(napi_complete_done(napi, work_done))) { + /* try to do itr handle */ + if (q_vector->vector_flags & RNP_QVECTOR_FLAG_ITR_FEATURE) + rnp_set_itr(q_vector); + + if (!test_bit(__RNP_DOWN, &adapter->state)) { + rnp_irq_enable_queues(adapter, q_vector); + smp_mb(); + /* we need this to ensure irq start before tx start */ + if (q_vector->vector_flags & + RNP_QVECTOR_FLAG_REDUCE_TX_IRQ_MISS) { + rnp_for_each_ring(ring, q_vector->tx) { + rnp_check_restart_tx(q_vector, ring); + if (q_vector->new_rx_count != + q_vector->old_rx_count) { + ring_wr32( + ring, + RNP_DMA_REG_RX_INT_DELAY_PKTCNT, + q_vector->new_rx_count); + q_vector->old_rx_count = + q_vector->new_rx_count; + } + } + } + if (q_vector->vector_flags & RNP_QVECTOR_FLAG_IRQ_MISS_CHECK) + rnp_htimer_start(q_vector); + + } + } + + return min(work_done, budget - 1); +} + +/** + * rnp_irq_affinity_notify - Callback for affinity changes + * @notify: context as to what irq was changed + * @mask: the new affinity mask + * + * This is a callback function used by the irq_set_affinity_notifier function + * so that we may register to receive changes to the irq affinity masks. + **/ +static void rnp_irq_affinity_notify(struct irq_affinity_notify *notify, + const cpumask_t *mask) +{ + struct rnp_q_vector *q_vector = + container_of(notify, struct rnp_q_vector, affinity_notify); + + cpumask_copy(&q_vector->affinity_mask, mask); +} + +/** + * rnp_irq_affinity_release - Callback for affinity notifier release + * @ref: internal core kernel usage + * + * This is a callback function used by the irq_set_affinity_notifier function + * to inform the current notification subscriber that they will no longer + * receive notifications. + **/ +static void rnp_irq_affinity_release(struct kref *ref) +{ +} + +static irqreturn_t rnp_intr(int irq, void *data) +{ + struct rnp_adapter *adapter = data; + struct rnp_q_vector *q_vector = adapter->q_vector[0]; + if (q_vector->vector_flags & RNP_QVECTOR_FLAG_IRQ_MISS_CHECK) + rnp_htimer_stop(q_vector); + + /* disabled interrupts (on this vector) for us */ + rnp_irq_disable_queues(q_vector); + + if (q_vector->rx.ring || q_vector->tx.ring) + napi_schedule_irqoff(&q_vector->napi); + + /* handle other */ + rnp_msg_task(adapter); + + return IRQ_HANDLED; +} + +/** + * rnp_request_msix_irqs - Initialize MSI-X interrupts + * @adapter: board private structure + * + * rnp_request_msix_irqs allocates MSI-X vectors and requests + * interrupts from the kernel. + **/ +static int rnp_request_msix_irqs(struct rnp_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int err; + int i = 0; + DPRINTK(IFUP, INFO, "[%s] num_q_vectors:%d\n", __func__, + adapter->num_q_vectors); + + for (i = 0; i < adapter->num_q_vectors; i++) { + struct rnp_q_vector *q_vector = adapter->q_vector[i]; + struct msix_entry *entry = + &adapter->msix_entries[i + adapter->q_vector_off]; + + if (q_vector->tx.ring && q_vector->rx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-%s-%d-%d", netdev->name, "TxRx", i, + q_vector->v_idx); + } else { + WARN(!(q_vector->tx.ring && q_vector->rx.ring), + "%s vector%d tx rx is null, v_idx:%d\n", + netdev->name, i, q_vector->v_idx); + /* skip this unused q_vector */ + continue; + } + err = request_irq(entry->vector, &rnp_msix_clean_rings, 0, + q_vector->name, q_vector); + if (err) { + e_err(probe, + "%s:request_irq failed for MSIX interrupt:%d " + "Error: %d\n", + netdev->name, entry->vector, err); + goto free_queue_irqs; + } + /* register for affinity change notifications */ + q_vector->affinity_notify.notify = rnp_irq_affinity_notify; + q_vector->affinity_notify.release = rnp_irq_affinity_release; + irq_set_affinity_notifier(entry->vector, + &q_vector->affinity_notify); + DPRINTK(IFUP, INFO, "[%s] set %s affinity_mask\n", __func__, + q_vector->name); + + irq_set_affinity_hint(entry->vector, &q_vector->affinity_mask); + } + + return 0; + +free_queue_irqs: + while (i) { + i--; + irq_set_affinity_hint( + adapter->msix_entries[i + adapter->q_vector_off].vector, + NULL); + free_irq( + adapter->msix_entries[i + adapter->q_vector_off].vector, + adapter->q_vector[i]); + irq_set_affinity_notifier( + adapter->msix_entries[i + adapter->q_vector_off].vector, + NULL); + irq_set_affinity_hint( + adapter->msix_entries[i + adapter->q_vector_off].vector, + NULL); + } + return err; +} + +static int rnp_free_msix_irqs(struct rnp_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_q_vectors; i++) { + struct rnp_q_vector *q_vector = adapter->q_vector[i]; + struct msix_entry *entry = + &adapter->msix_entries[i + adapter->q_vector_off]; + + /* free only the irqs that were actually requested */ + if (!q_vector->rx.ring && !q_vector->tx.ring) + continue; + /* clear the affinity notifier in the IRQ descriptor */ + irq_set_affinity_notifier(entry->vector, NULL); + /* clear the affinity_mask in the IRQ descriptor */ + irq_set_affinity_hint(entry->vector, NULL); + DPRINTK(IFDOWN, INFO, "free irq %s\n", q_vector->name); + free_irq(entry->vector, q_vector); + } + + return 0; +} + +/** + * rnp_request_irq - initialize interrupts + * @adapter: board private structure + * + * Attempts to configure interrupts using the best available + * capabilities of the hardware and kernel. + **/ +static int rnp_request_irq(struct rnp_adapter *adapter) +{ + int err; + + if (adapter->flags & RNP_FLAG_MSIX_ENABLED) { + pr_info("msix mode is used\n"); + err = rnp_request_msix_irqs(adapter); + + } else if (adapter->flags & RNP_FLAG_MSI_ENABLED) { + /* in this case one for all */ + pr_info("msi mode is used\n"); + err = request_irq(adapter->pdev->irq, rnp_intr, 0, + adapter->netdev->name, adapter); + adapter->hw.mbx.other_irq_enabled = true; + } else { + pr_info("legacy mode is used\n"); + err = request_irq(adapter->pdev->irq, rnp_intr, IRQF_SHARED, + adapter->netdev->name, adapter); + adapter->hw.mbx.other_irq_enabled = true; + } + + if (err) + e_err(probe, "request_irq failed, Error %d\n", err); + + return err; +} + +static void rnp_free_irq(struct rnp_adapter *adapter) +{ + if (adapter->flags & RNP_FLAG_MSIX_ENABLED) { + rnp_free_msix_irqs(adapter); + } else if (adapter->flags & RNP_FLAG_MSI_ENABLED) { + /* in this case one for all */ + free_irq(adapter->pdev->irq, adapter); + adapter->hw.mbx.other_irq_enabled = false; + } else { + free_irq(adapter->pdev->irq, adapter); + adapter->hw.mbx.other_irq_enabled = false; + } + +} + +/** + * rnp_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure + **/ +static inline void rnp_irq_disable(struct rnp_adapter *adapter) +{ + int i, j; + + for (i = 0; i < adapter->num_q_vectors; i++) { + rnp_irq_disable_queues(adapter->q_vector[i]); + j = i + adapter->q_vector_off; + if (adapter->flags & RNP_FLAG_MSIX_ENABLED) + synchronize_irq(adapter->msix_entries[j].vector); + else + synchronize_irq(adapter->pdev->irq); + } +} + +int rnp_setup_tx_maxrate(struct rnp_ring *tx_ring, u64 max_rate, + int samples_1sec) +{ + /* set hardware samping internal 1S */ + ring_wr32(tx_ring, RNP_DMA_REG_TX_FLOW_CTRL_TM, samples_1sec); + ring_wr32(tx_ring, RNP_DMA_REG_TX_FLOW_CTRL_TH, max_rate); + + return 0; +} + +/** + * rnp_tx_maxrate_own - callback to set the maximum per-queue bitrate + * @netdev: network interface device structure + * @queue_index: Tx queue to set + * @maxrate: desired maximum transmit bitrate Mbps + **/ +static int rnp_tx_maxrate_own(struct rnp_adapter *adapter, int queue_index) +{ + struct rnp_ring *tx_ring = adapter->tx_ring[queue_index]; + u64 real_rate = 0; + u32 maxrate = adapter->max_rate[queue_index]; + + if (!maxrate) + return rnp_setup_tx_maxrate(tx_ring, 0, + adapter->hw.usecstocount * 1000000); + /* we need turn it to bytes/s */ + real_rate = ((u64)maxrate * 1024 * 1024) / 8; + rnp_setup_tx_maxrate(tx_ring, real_rate, + adapter->hw.usecstocount * 1000000); + + return 0; +} + +/** + * rnp_configure_tx_ring - Configure 8259x Tx ring after Reset + * @adapter: board private structure + * @ring: structure containing ring specific data + * + * Configure the Tx descriptor ring after a reset. + **/ +void rnp_configure_tx_ring(struct rnp_adapter *adapter, struct rnp_ring *ring) +{ + struct rnp_hw *hw = &adapter->hw; + + /* disable queue to avoid issues while updating state */ + + if (!(ring->ring_flags & RNP_RING_SKIP_TX_START)) + ring_wr32(ring, RNP_DMA_TX_START, 0); + + ring_wr32(ring, RNP_DMA_REG_TX_DESC_BUF_BASE_ADDR_LO, (u32)ring->dma); + ring_wr32(ring, RNP_DMA_REG_TX_DESC_BUF_BASE_ADDR_HI, + (u32)(((u64)ring->dma) >> 32) | (hw->pfvfnum << 24)); + ring_wr32(ring, RNP_DMA_REG_TX_DESC_BUF_LEN, ring->count); + + ring->next_to_clean = ring_rd32(ring, RNP_DMA_REG_TX_DESC_BUF_HEAD); + ring->next_to_use = ring->next_to_clean; + ring->tail = ring->ring_addr + RNP_DMA_REG_TX_DESC_BUF_TAIL; + rnp_wr_reg(ring->tail, ring->next_to_use); + + if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) { + ring_wr32(ring, RNP_DMA_REG_TX_DESC_FETCH_CTRL, + (8 << 0) /* max_water_flow */ + | (8 << 16) + /* max-num_descs_peer_read */ + ); + + } else { + ring_wr32(ring, RNP_DMA_REG_TX_DESC_FETCH_CTRL, + (64 << 0) /* max_water_flow */ + | (TSRN10_TX_DEFAULT_BURST << 16) + /* max-num_descs_peer_read */ + ); + } + ring_wr32(ring, RNP_DMA_REG_TX_INT_DELAY_TIMER, + adapter->tx_usecs * hw->usecstocount); + ring_wr32(ring, RNP_DMA_REG_TX_INT_DELAY_PKTCNT, adapter->tx_frames); + + rnp_tx_maxrate_own(adapter, ring->queue_index); + if (adapter->flags & RNP_FLAG_FDIR_HASH_CAPABLE) { + ring->atr_sample_rate = adapter->atr_sample_rate; + ring->atr_count = 0; + set_bit(__RNP_TX_FDIR_INIT_DONE, &ring->state); + } else { + ring->atr_sample_rate = 0; + } + /* initialize XPS */ + if (!test_and_set_bit(__RNP_TX_XPS_INIT_DONE, &ring->state)) { + struct rnp_q_vector *q_vector = ring->q_vector; + + if (q_vector) + netif_set_xps_queue(adapter->netdev, + &q_vector->affinity_mask, + ring->queue_index); + } + + clear_bit(__RNP_HANG_CHECK_ARMED, &ring->state); + + if (!(ring->ring_flags & RNP_RING_SKIP_TX_START)) { + /* should wait tx_ready before open tx start */ + int timeout = 0; + u32 status = 0; + + do { + status = ring_rd32(ring, RNP_DMA_TX_READY); + usleep_range(100, 200); + timeout++; + rnp_dbg("wait %d tx ready to 1\n", ring->rnp_queue_idx); + } while ((status != 1) && (timeout < 100)); + + if (timeout >= 100) + printk("wait tx ready timeout\n"); + ring_wr32(ring, RNP_DMA_TX_START, 1); + } +} + +/** + * rnp_configure_tx - Configure Transmit Unit after Reset + * @adapter: board private structure + * + * Configure the Tx unit of the MAC after a reset. + **/ +static void rnp_configure_tx(struct rnp_adapter *adapter) +{ + u32 i, dma_axi_ctl; + struct rnp_hw *hw = &adapter->hw; + struct rnp_dma_info *dma = &hw->dma; + + /* dma_axi_en.tx_en must be before Tx queues are enabled */ + dma_axi_ctl = dma_rd32(dma, RNP_DMA_AXI_EN); + dma_axi_ctl |= TX_AXI_RW_EN; + dma_wr32(dma, RNP_DMA_AXI_EN, dma_axi_ctl); + + /* Setup the HW Tx Head and Tail descriptor pointers */ + for (i = 0; i < (adapter->num_tx_queues); i++) + rnp_configure_tx_ring(adapter, adapter->tx_ring[i]); +} + +void rnp_disable_rx_queue(struct rnp_adapter *adapter, struct rnp_ring *ring) +{ + ring_wr32(ring, RNP_DMA_RX_START, 0); +} + +void rnp_configure_rx_ring(struct rnp_adapter *adapter, struct rnp_ring *ring) +{ + struct rnp_hw *hw = &adapter->hw; + u64 desc_phy = ring->dma; + u16 q_idx = ring->queue_index; + + /* disable queue to avoid issues while updating state */ + rnp_disable_rx_queue(adapter, ring); + + /* set descripts registers*/ + ring_wr32(ring, RNP_DMA_REG_RX_DESC_BUF_BASE_ADDR_LO, (u32)desc_phy); + ring_wr32(ring, RNP_DMA_REG_RX_DESC_BUF_BASE_ADDR_HI, + ((u32)(desc_phy >> 32)) | (hw->pfvfnum << 24)); + ring_wr32(ring, RNP_DMA_REG_RX_DESC_BUF_LEN, ring->count); + + ring->tail = ring->ring_addr + RNP_DMA_REG_RX_DESC_BUF_TAIL; + ring->next_to_clean = ring_rd32(ring, RNP_DMA_REG_RX_DESC_BUF_HEAD); + ring->next_to_use = ring->next_to_clean; + + if (ring->ring_flags & RNP_RING_SCATER_SETUP) + ring_wr32(ring, PCI_DMA_REG_RX_SCATTER_LENGTH, 96); + + if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) { + ring_wr32(ring, RNP_DMA_REG_RX_DESC_FETCH_CTRL, + 0 | (TSRN10_RX_DEFAULT_LINE << 0) /* rx-desc-flow */ + | (TSRN10_RX_DEFAULT_BURST << 16) + /* max-read-desc-cnt */ + ); + + } else { + ring_wr32(ring, RNP_DMA_REG_RX_DESC_FETCH_CTRL, + 0 | (TSRN10_RX_DEFAULT_LINE << 0) /* rx-desc-flow */ + | (TSRN10_RX_DEFAULT_BURST << 16) + /* max-read-desc-cnt */ + ); + } + /* setup rx drop */ + if (adapter->rx_drop_status & BIT(q_idx)) { + ring_wr32(ring, PCI_DMA_REG_RX_DESC_TIMEOUT_TH, + adapter->drop_time); + } else { + ring_wr32(ring, PCI_DMA_REG_RX_DESC_TIMEOUT_TH, 0); + } + + if (ring->ring_flags & RNP_RING_IRQ_MISS_FIX) + ring_wr32(ring, RNP_DMA_INT_TRIG, TX_INT_MASK | RX_INT_MASK); + + ring_wr32(ring, RNP_DMA_REG_RX_INT_DELAY_TIMER, + adapter->rx_usecs * hw->usecstocount); + ring_wr32(ring, RNP_DMA_REG_RX_INT_DELAY_PKTCNT, adapter->rx_frames); + rnp_alloc_rx_buffers(ring, rnp_desc_unused_rx(ring)); +} + +static void rnp_configure_virtualization(struct rnp_adapter *adapter) +{ + struct rnp_hw *hw = &adapter->hw; + struct rnp_dma_info *dma = &hw->dma; + u32 ring, vfnum; + u64 real_rate = 0; + int i, vf_ring, j; + + if (!(adapter->flags & RNP_FLAG_SRIOV_ENABLED)) { + hw->ops.set_sriov_status(hw, false); + return; + } + + /* Enable only the PF's pool for Tx/Rx */ + + if (adapter->flags2 & RNP_FLAG2_BRIDGE_MODE_VEB) { + dma_wr32(dma, RNP_DMA_CONFIG, + dma_rd32(dma, RNP_DMA_CONFIG) & (~DMA_VEB_BYPASS)); + adapter->flags2 |= RNP_FLAG2_BRIDGE_MODE_VEB; + } + ring = adapter->tx_ring[0]->rnp_queue_idx; + hw->ops.set_sriov_status(hw, true); + + /* store vfnum */ + vfnum = hw->max_vfs - 1; + hw->veb_ring = ring; + hw->vfnum = vfnum; + /* use last-vf's table entry. */ + adapter->vf_num_for_pf = 0x80 | vfnum; + + /* setup vf tx rate setup here */ + for (i = 0; i < adapter->num_vfs; i++) { + real_rate = (adapter->vfinfo[i].tx_rate * 1024 * 128) / + hw->sriov_ring_limit; + for (j = 0; i < hw->sriov_ring_limit; i++) { + vf_ring = rnp_get_vf_ringnum(hw, i, j); + rnp_setup_ring_maxrate(adapter, vf_ring, real_rate); + } + } +} + +static void rnp_set_rx_buffer_len(struct rnp_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN * 3; + struct rnp_ring *rx_ring; + int i; + + if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN)) + max_frame = (ETH_FRAME_LEN + ETH_FCS_LEN); + + for (i = 0; i < adapter->num_rx_queues; i++) { + rx_ring = adapter->rx_ring[i]; + clear_bit(__RNP_RX_3K_BUFFER, &rx_ring->state); + clear_bit(__RNP_RX_BUILD_SKB_ENABLED, &rx_ring->state); + set_bit(__RNP_RX_BUILD_SKB_ENABLED, &rx_ring->state); + +#ifdef OPTM_WITH_LPAGE + rx_ring->rx_page_buf_nums = RNP_PAGE_BUFFER_NUMS(rx_ring); + rx_ring->rx_per_buf_mem = + ALIGN((rnp_rx_offset(rx_ring) + rnp_rx_bufsz(rx_ring) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + + RNP_RX_HWTS_OFFSET), + 1024); +#endif + + } +} + +/** + * rnp_configure_rx - Configure 8259x Receive Unit after Reset + * @adapter: board private structure + * + * Configure the Rx unit of the MAC after a reset. + **/ +static void rnp_configure_rx(struct rnp_adapter *adapter) +{ + struct rnp_hw *hw = &adapter->hw; + struct rnp_dma_info *dma = &hw->dma; + int i; + u32 rxctrl = 0, dma_axi_ctl; + + /* disable receives while setting up the descriptors */ + /* set_rx_buffer_len must be called before ring initialization */ + rnp_set_rx_buffer_len(adapter); + + /* + * Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring + */ + for (i = 0; i < adapter->num_rx_queues; i++) + rnp_configure_rx_ring(adapter, adapter->rx_ring[i]); + + if (adapter->num_rx_queues > 0) { + wr32(hw, RNP_ETH_DEFAULT_RX_RING, + adapter->rx_ring[0]->rnp_queue_idx); + } + + /* enable all receives */ + rxctrl |= 0; + + dma_axi_ctl = dma_rd32(dma, RNP_DMA_AXI_EN); + dma_axi_ctl |= RX_AXI_RW_EN; + dma_wr32(dma, RNP_DMA_AXI_EN, dma_axi_ctl); +} + +static int rnp_vlan_rx_add_vid(struct net_device *netdev, + __always_unused __be16 proto, u16 vid) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + bool veb_setup = true; + bool sriov_flag = !!(adapter->flags & RNP_FLAG_SRIOV_ENABLED); + + if (sriov_flag) { + if (hw->feature_flags & RNP_VEB_VLAN_MASK_EN) { + if (hw->ops.set_veb_vlan_mask) { + if (hw->ops.set_veb_vlan_mask( + hw, vid, hw->vfnum, true) != 0) { + dev_err(&adapter->pdev->dev, + "out of vlan entries in sriov mode \n"); + return -EACCES; + } + } + } else { + /* in sriov mode */ + if ((vid) && (adapter->vf_vlan) && + (vid != adapter->vf_vlan)) { + dev_err(&adapter->pdev->dev, + "only 1 vlan in sriov mode \n"); + return -EACCES; + } + + /* update this */ + if (vid) { + adapter->vf_vlan = vid; + if (hw->ops.set_vf_vlan_mode) { + if (hw->feature_flags & + RNP_NET_FEATURE_VF_FIXED) + hw->ops.set_vf_vlan_mode( + hw, vid, 0, true); + else + hw->ops.set_vf_vlan_mode( + hw, vid, hw->vfnum, + true); + } + } + } + } + + if (vid) { + if (proto == htons(ETH_P_8021Q)) { + adapter->vlan_count++; + } + } + + if (vid < VLAN_N_VID) { + if (proto != htons(ETH_P_8021Q)) { + set_bit(vid, adapter->active_vlans_stags); + veb_setup = false; + } else { + set_bit(vid, adapter->active_vlans); + } + } + + if (hw->ops.set_vlan_filter) { + hw->ops.set_vlan_filter(hw, vid, true, + (sriov_flag && veb_setup)); + } + + return 0; +} + +static int rnp_vlan_rx_kill_vid(struct net_device *netdev, + __always_unused __be16 proto, u16 vid) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + struct rnp_eth_info *eth = &hw->eth; + int i; + bool sriov_flag = !!(adapter->flags & RNP_FLAG_SRIOV_ENABLED); + bool veb_setup = true; + + if (!vid) + return 0; + + if (sriov_flag) { + if (vid) { + int true_remove = 1; + /* clean this */ + adapter->vf_vlan = 0; + for (i = 0; i < adapter->num_vfs; i++) { + if (vid == adapter->vfinfo[i].vf_vlan) { + true_remove = 0; + } + if (vid == adapter->vfinfo[i].pf_vlan) { + true_remove = 0; + } + /* setup pf_vlan */ + } + /* if no vf use this vid */ + if (true_remove) { + /* if remove stags */ + if (proto != htons(ETH_P_8021Q)) { + veb_setup = false; + if (!test_bit(vid, + adapter->active_vlans)) + true_remove = 1; + } else { + /* if remove ctags */ + if (!test_bit(vid, + adapter->active_vlans_stags)) + true_remove = 1; + } + /* if no other tags use this vid */ + if (true_remove) { + if ((adapter->flags2 & + RNP_FLAG2_VLAN_STAGS_ENABLED) && + (vid != adapter->stags_vid)) + /* should also check stags */ + hw->ops.set_vlan_filter( + hw, vid, false, + veb_setup); + } + } + /* always clean veb */ + hw->ops.set_vlan_filter(hw, vid, true, false); + + if (hw->ops.set_vf_vlan_mode) { + if (hw->feature_flags & + RNP_NET_FEATURE_VF_FIXED) + hw->ops.set_vf_vlan_mode(hw, vid, 0, + false); + else + hw->ops.set_vf_vlan_mode( + hw, vid, hw->vfnum, false); + } + + /* remove veb */ + if (hw->feature_flags & RNP_VEB_VLAN_MASK_EN) { + if (hw->ops.set_veb_vlan_mask) { + hw->ops.set_veb_vlan_mask( + hw, vid, hw->vfnum, false); + } + } + } + } else { + int true_remove = 0; + if (proto != htons(ETH_P_8021Q)) { + veb_setup = false; + if (!test_bit(vid, adapter->active_vlans)) + true_remove = 1; + + } else { + /* if remove ctags */ + if (!test_bit(vid, adapter->active_vlans_stags)) + true_remove = 1; + } + if (true_remove) { + if ((adapter->flags2 & RNP_FLAG2_VLAN_STAGS_ENABLED) && + (vid != adapter->stags_vid)) + /* should also check stags */ + hw->ops.set_vlan_filter(hw, vid, false, false); + } + } + + /* need set ncsi vfta again */ + if (hw->ncsi_en) + eth->ops.ncsi_set_vfta(eth); + + if (vid) { + if (proto == htons(ETH_P_8021Q)) { + /* should check proto todo */ + adapter->vlan_count--; + } + } + if (proto == htons(ETH_P_8021Q)) + clear_bit(vid, adapter->active_vlans); + /* clear stags */ + if (proto != htons(ETH_P_8021Q)) + clear_bit(vid, adapter->active_vlans_stags); + return 0; +} + +/** + * rnp_vlan_strip_disable - helper to disable hw vlan stripping + * @adapter: driver data + */ +static void rnp_vlan_strip_disable(struct rnp_adapter *adapter) +{ + int i; + struct rnp_ring *tx_ring; + struct rnp_hw *hw = &adapter->hw; + + for (i = 0; i < adapter->num_rx_queues; i++) { + tx_ring = adapter->rx_ring[i]; + hw->ops.set_vlan_strip(hw, tx_ring->rnp_queue_idx, false); + } +} + +/** + * rnp_vlan_strip_enable - helper to enable hw vlan stripping + * @adapter: driver data + */ +static void rnp_vlan_strip_enable(struct rnp_adapter *adapter) +{ + struct rnp_hw *hw = &adapter->hw; + struct rnp_ring *tx_ring; + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) { + tx_ring = adapter->rx_ring[i]; + + hw->ops.set_vlan_strip(hw, tx_ring->rnp_queue_idx, true); + } +} + +static void rnp_remove_vlan(struct rnp_adapter *adapter) +{ + adapter->vlan_count = 0; +} + +static void rnp_restore_vlan(struct rnp_adapter *adapter) +{ + u16 vid; + struct rnp_hw *hw = &adapter->hw; + struct rnp_eth_info *eth = &hw->eth; + int i; + + /* in stags open, set stags_vid to vlan filter */ + if (adapter->flags2 & RNP_FLAG2_VLAN_STAGS_ENABLED) + eth->ops.set_vfta(eth, adapter->stags_vid, true); + + rnp_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0); + + for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) + rnp_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); + /* config vlan mode for mac */ + if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) { + for (i = 0; i < adapter->num_vfs; i++) { + vid = adapter->vfinfo[i].vf_vlan; + if (vid) { + rnp_vlan_rx_add_vid(adapter->netdev, + htons(ETH_P_8021Q), vid); + } + vid = adapter->vfinfo[i].pf_vlan; + if (vid) { + rnp_vlan_rx_add_vid(adapter->netdev, + htons(ETH_P_8021Q), vid); + } + } + } +} + +/** + * rnp_set_rx_mode - Unicast, Multicast and Promiscuous mode set + * @netdev: network interface device structure + * + * The set_rx_method entry point is called whenever the unicast/multicast + * address list or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper unicast, multicast and + * promiscuous mode. + **/ +void rnp_set_rx_mode(struct net_device *netdev) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + netdev_features_t features; + bool sriov_flag = !!(adapter->flags & RNP_FLAG_SRIOV_ENABLED); + + hw->ops.set_rx_mode(hw, netdev, sriov_flag); + + if (sriov_flag) { + if (!test_and_set_bit(__RNP_USE_VFINFI, &adapter->state)) { + rnp_restore_vf_macvlans(adapter); + rnp_restore_vf_macs(adapter); + clear_bit(__RNP_USE_VFINFI, &adapter->state); + } + } + + features = netdev->features; + + if (features & NETIF_F_HW_VLAN_CTAG_RX) + rnp_vlan_strip_enable(adapter); + else + rnp_vlan_strip_disable(adapter); + /* stags */ + /* only do this if hw support stags */ + if (hw->feature_flags & RNP_NET_FEATURE_STAG_OFFLOAD) { + if (features & NETIF_F_HW_VLAN_STAG_RX) + rnp_vlan_strip_enable(adapter); + else + rnp_vlan_strip_disable(adapter); + } +} + +static void rnp_napi_enable_all(struct rnp_adapter *adapter) +{ + int q_idx; + + for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) + napi_enable(&adapter->q_vector[q_idx]->napi); +} + +static void rnp_napi_disable_all(struct rnp_adapter *adapter) +{ + int q_idx; + + for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) + napi_disable(&adapter->q_vector[q_idx]->napi); +} + +static void rnp_fdir_filter_restore(struct rnp_adapter *adapter) +{ + struct rnp_hw *hw = &adapter->hw; + struct hlist_node *node2; + struct rnp_fdir_filter *filter; + + spin_lock(&adapter->fdir_perfect_lock); + + /* enable tcam if set tcam mode */ + if (adapter->fdir_mode == fdir_mode_tcam) { + wr32(hw, RNP_ETH_TCAM_EN, 1); + wr32(hw, RNP_TOP_ETH_TCAM_CONFIG_ENABLE, 1); + wr32(hw, RNP_TCAM_CACHE_ENABLE, 0); + } + + /* setup ntuple */ + hlist_for_each_entry_safe(filter, node2, &adapter->fdir_filter_list, + fdir_node) { + if ((!filter->vf_num) && + (filter->action != ACTION_TO_MPE)) { + rnp_fdir_write_perfect_filter( + adapter->fdir_mode, hw, &filter->filter, filter->hw_idx, + (filter->action == RNP_FDIR_DROP_QUEUE) ? + RNP_FDIR_DROP_QUEUE : + adapter->rx_ring[filter->action] + ->rnp_queue_idx, + (adapter->priv_flags & RNP_PRIV_FLAG_REMAP_PRIO) ? + true : + false); + } else { + rnp_fdir_write_perfect_filter( + adapter->fdir_mode, hw, &filter->filter, + filter->hw_idx, + (filter->action == RNP_FDIR_DROP_QUEUE) ? + RNP_FDIR_DROP_QUEUE : + filter->action, + (adapter->priv_flags & + RNP_PRIV_FLAG_REMAP_PRIO) ? + true : + false); + } + } + + spin_unlock(&adapter->fdir_perfect_lock); +} + +static void rnp_configure_pause(struct rnp_adapter *adapter) +{ + struct rnp_hw *hw = &adapter->hw; + + hw->ops.set_pause_mode(hw); +} + +static void rnp_vlan_stags_flag(struct rnp_adapter *adapter) +{ + struct rnp_hw *hw = &adapter->hw; + + /* stags is added */ + if (adapter->flags2 & RNP_FLAG2_VLAN_STAGS_ENABLED) + hw->ops.set_txvlan_mode(hw, false); + else + hw->ops.set_txvlan_mode(hw, true); +} + +static void rnp_configure(struct rnp_adapter *adapter) +{ + struct rnp_hw *hw = &adapter->hw; + bool sriov_flag = !!(adapter->flags & RNP_FLAG_SRIOV_ENABLED); +#if (PAGE_SIZE < 8192) + struct rnp_ring *rx_ring = adapter->rx_ring[0]; +#endif + /* + * We must restore virtualization before VLANs or else + * the VLVF registers will not be populated + */ + rnp_configure_virtualization(adapter); + + /* Unicast, Multicast and Promiscuous mode set */ + rnp_set_rx_mode(adapter->netdev); + /* reconfigure hw */ + hw->ops.set_mac(hw, hw->mac.addr, sriov_flag); + + /* in sriov mode vlan is not reset */ + rnp_restore_vlan(adapter); + + /* we first update rx_offset */ +#if (PAGE_SIZE < 8192) + /* setup before calculate dma_split_size */ + rnp_set_rx_buffer_len(adapter); + hw->dma_split_size = rnp_rx_pg_size(rx_ring) / 2 - + rnp_rx_offset(rx_ring) - + sizeof(struct skb_shared_info); +#else + /* if mtu more than this */ + hw->dma_split_size = SKB_WITH_OVERHEAD(PAGE_SIZE) - RNP_SKB_PAD; + + if (hw->max_length_current >= 1536) + hw->dma_split_size = min_t(int, hw->dma_split_size, hw->max_length_current); + /* up to 16-asign */ + hw->dma_split_size = (hw->dma_split_size + 15) & (~0xf); +#endif + hw->ops.update_hw_info(hw); + + /* init setup pause */ + rnp_configure_pause(adapter); + rnp_vlan_stags_flag(adapter); + rnp_init_rss_key(adapter); + rnp_init_rss_table(adapter); + + if (adapter->flags & RNP_FLAG_FDIR_HASH_CAPABLE) { + + } else if (adapter->flags & RNP_FLAG_FDIR_PERFECT_CAPABLE) { + + rnp_fdir_filter_restore(adapter); + + } + + /* setup vxlan match mode */ + if (adapter->priv_flags & RNP_PRIV_FLAG_VXLAN_INNER_MATCH) + hw->ops.set_vxlan_mode(hw, true); + else + hw->ops.set_vxlan_mode(hw, false); + rnp_configure_tx(adapter); + rnp_configure_rx(adapter); +} + +static inline bool rnp_is_sfp(struct rnp_hw *hw) +{ + return true; +} + +/** + * rnp_sfp_link_config - set up SFP+ link + * @adapter: pointer to private adapter struct + **/ +static void rnp_sfp_link_config(struct rnp_adapter *adapter) +{ + /* + * We are assuming the worst case scenario here, and that + * is that an SFP was inserted/removed after the reset + * but before SFP detection was enabled. As such the best + * solution is to just start searching as soon as we start + */ + adapter->flags2 |= RNP_FLAG2_SFP_NEEDS_RESET; +} + +static void rnp_up_complete(struct rnp_adapter *adapter) +{ + struct rnp_hw *hw = &adapter->hw; + int i; + + rnp_configure_msix(adapter); + + /* enable the optics for n10 SFP+ fiber */ + if (hw->ops.enable_tx_laser) + hw->ops.enable_tx_laser(hw); + + smp_mb__before_atomic(); + clear_bit(__RNP_DOWN, &adapter->state); + rnp_napi_enable_all(adapter); + + if (rnp_is_sfp(hw)) { + rnp_sfp_link_config(adapter); + } + /*clear any pending interrupts*/ + rnp_irq_enable(adapter); + + /* enable transmits */ + netif_tx_start_all_queues(adapter->netdev); + + /* enable rx transmit */ + for (i = 0; i < adapter->num_rx_queues; i++) + ring_wr32(adapter->rx_ring[i], RNP_DMA_RX_START, 1); + + /* bring the link up in the watchdog, this could race with our first + * link up interrupt but shouldn't be a problems + */ + adapter->flags |= RNP_FLAG_NEED_LINK_UPDATE; + adapter->link_check_timeout = jiffies; + mod_timer(&adapter->service_timer, jiffies); + + /* Set PF Reset Done bit so PF/VF Mail Ops can work */ + hw->link = 0; + rnp_mbx_force_speed(hw, hw->saved_force_link_speed); + hw->ops.set_mbx_link_event(hw, 1); + hw->ops.set_mbx_ifup(hw, 1); +} + +void rnp_reinit_locked(struct rnp_adapter *adapter) +{ + WARN_ON(in_interrupt()); + /* put off any impending NetWatchDogTimeout */ + while (test_and_set_bit(__RNP_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + rnp_down(adapter); + /* + * If SR-IOV enabled then wait a bit before bringing the adapter + * back up to give the VFs time to respond to the reset. The + * two second wait is based upon the watchdog timer cycle in + * the VF driver. + */ + if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) + msleep(2000); + rnp_up(adapter); + + clear_bit(__RNP_RESETTING, &adapter->state); +} + +void rnp_up(struct rnp_adapter *adapter) +{ + /* hardware has been reset, we need to reload some things */ + rnp_configure(adapter); + rnp_up_complete(adapter); +} + +void rnp_reset(struct rnp_adapter *adapter) +{ + struct rnp_hw *hw = &adapter->hw; + int err; + bool sriov_flag = !!(adapter->flags & RNP_FLAG_SRIOV_ENABLED); + + rnp_logd(LOG_ADPT_STAT, "%s\n", __func__); + + /* lock SFP init bit to prevent race conditions with the watchdog */ + while (test_and_set_bit(__RNP_IN_SFP_INIT, &adapter->state)) + usleep_range(1000, 2000); + + /* clear all SFP and link config related flags while holding SFP_INIT */ + adapter->flags2 &= + ~(RNP_FLAG2_SEARCH_FOR_SFP | RNP_FLAG2_SFP_NEEDS_RESET); + adapter->flags &= ~RNP_FLAG_NEED_LINK_CONFIG; + + err = hw->ops.init_hw(hw); + + if (err) { + e_dev_err("init_hw: Hardware Error: err:%d. line:%d\n", err, + __LINE__); + } + + clear_bit(__RNP_IN_SFP_INIT, &adapter->state); + + /* reprogram the RAR[0] in case user changed it. */ + hw->ops.set_mac(hw, hw->mac.addr, sriov_flag); + + if (module_enable_ptp) { + if (adapter->flags2 & RNP_FLAG2_PTP_ENABLED && + (adapter->ptp_rx_en || adapter->ptp_tx_en)) + rnp_ptp_reset(adapter); + } +} + +#ifdef OPTM_WITH_LPAGE +/** + * rnp_clean_rx_ring - Free Rx Buffers per Queue + * @rx_ring: ring to free buffers from + **/ +static void rnp_clean_rx_ring(struct rnp_ring *rx_ring) +{ + u16 i = rx_ring->next_to_clean; + struct rnp_rx_buffer *rx_buffer; + + if (!rx_ring->rx_buffer_info) + return; + + if (rx_ring->skb) + dev_kfree_skb(rx_ring->skb); + rx_ring->skb = NULL; + rx_buffer = &rx_ring->rx_buffer_info[i]; + + /* Free all the Rx ring sk_buffs */ + while (i != rx_ring->next_to_alloc) { + if (!rx_buffer->page) + goto next_buffer; + /* Invalidate cache lines that may have been written to by + * device so that we avoid corrupting memory. + */ + dma_sync_single_range_for_cpu(rx_ring->dev, rx_buffer->dma, + rx_buffer->page_offset, + rnp_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); + + /* free resources associated with mapping */ + dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, + rnp_rx_pg_size(rx_ring), DMA_FROM_DEVICE, + RNP_RX_DMA_ATTR); + + __page_frag_cache_drain(rx_buffer->page, + rx_buffer->pagecnt_bias); + /* now this page is not used */ + rx_buffer->page = NULL; +next_buffer: + i++; + rx_buffer++; + if (i == rx_ring->count) { + i = 0; + rx_buffer = rx_ring->rx_buffer_info; + } + } + + rx_ring->next_to_alloc = 0; + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; +} + +#else +/** + * rnp_clean_rx_ring - Free Rx Buffers per Queue + * @rx_ring: ring to free buffers from + **/ +static void rnp_clean_rx_ring(struct rnp_ring *rx_ring) +{ + u16 i = rx_ring->next_to_clean; + struct rnp_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i]; + + /* Free all the Rx ring sk_buffs */ + while (i != rx_ring->next_to_alloc) { + if (rx_buffer->skb) { + struct sk_buff *skb = rx_buffer->skb; + + dev_kfree_skb(skb); + rx_buffer->skb = NULL; + } + + /* Invalidate cache lines that may have been written to by + * device so that we avoid corrupting memory. + */ + dma_sync_single_range_for_cpu(rx_ring->dev, rx_buffer->dma, + rx_buffer->page_offset, + rnp_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); + + /* free resources associated with mapping */ + dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, + rnp_rx_pg_size(rx_ring), DMA_FROM_DEVICE, + RNP_RX_DMA_ATTR); + + __page_frag_cache_drain(rx_buffer->page, + rx_buffer->pagecnt_bias); + /* now this page is not used */ + rx_buffer->page = NULL; + i++; + rx_buffer++; + if (i == rx_ring->count) { + i = 0; + rx_buffer = rx_ring->rx_buffer_info; + } + } + + rx_ring->next_to_alloc = 0; + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; +} +#endif + +/** + * rnp_clean_tx_ring - Free Tx Buffers + * @tx_ring: ring to be cleaned + **/ +static void rnp_clean_tx_ring(struct rnp_ring *tx_ring) +{ + unsigned long size; + u16 i = tx_ring->next_to_clean; + struct rnp_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i]; + + BUG_ON(tx_ring == NULL); + + /* ring already cleared, nothing to do */ + if (!tx_ring->tx_buffer_info) + return; + + while (i != tx_ring->next_to_use) { + struct rnp_tx_desc *eop_desc, *tx_desc; + + dev_kfree_skb_any(tx_buffer->skb); + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); + + eop_desc = tx_buffer->next_to_watch; + tx_desc = RNP_TX_DESC(tx_ring, i); + /* unmap remaining buffers */ + while (tx_desc != eop_desc) { + tx_buffer++; + tx_desc++; + i++; + if (unlikely(i == tx_ring->count)) { + i = 0; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = RNP_TX_DESC(tx_ring, 0); + } + + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + } + /* move us one more past the eop_desc for start of next pkt */ + tx_buffer++; + i++; + if (unlikely(i == tx_ring->count)) { + i = 0; + tx_buffer = tx_ring->tx_buffer_info; + } + } + + netdev_tx_reset_queue(txring_txq(tx_ring)); + size = sizeof(struct rnp_tx_buffer) * tx_ring->count; + memset(tx_ring->tx_buffer_info, 0, size); + + /* Zero out the descriptor ring */ + memset(tx_ring->desc, 0, tx_ring->size); + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; +} + +/** + * rnp_clean_all_rx_rings - Free Rx Buffers for all queues + * @adapter: board private structure + **/ +static void rnp_clean_all_rx_rings(struct rnp_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + rnp_clean_rx_ring(adapter->rx_ring[i]); +} + +/** + * rnp_clean_all_tx_rings - Free Tx Buffers for all queues + * @adapter: board private structure + **/ +static void rnp_clean_all_tx_rings(struct rnp_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + rnp_clean_tx_ring(adapter->tx_ring[i]); +} + +static void rnp_fdir_filter_exit(struct rnp_adapter *adapter) +{ + struct hlist_node *node2; + struct rnp_fdir_filter *filter; + struct rnp_hw *hw = &adapter->hw; + + spin_lock(&adapter->fdir_perfect_lock); + + hlist_for_each_entry_safe(filter, node2, &adapter->fdir_filter_list, + fdir_node) { + /* call earase to hw */ + rnp_fdir_erase_perfect_filter(adapter->fdir_mode, hw, + &filter->filter, filter->hw_idx); + + hlist_del(&filter->fdir_node); + kfree(filter); + } + adapter->fdir_filter_count = 0; + adapter->layer2_count = hw->layer2_count; + adapter->tuple_5_count = hw->tuple5_count; + + spin_unlock(&adapter->fdir_perfect_lock); +} + +static int rnp_xmit_nop_frame_ring(struct rnp_adapter *adapter, + struct rnp_ring *tx_ring) +{ + u16 i = tx_ring->next_to_use; + struct rnp_tx_desc *tx_desc; + + tx_desc = RNP_TX_DESC(tx_ring, i); + + /* set length to 0 */ + tx_desc->blen_mac_ip_len = 0; + tx_desc->vlan_cmd = cpu_to_le32(RNP_TXD_CMD_EOP | RNP_TXD_CMD_RS); + /* + * Force memory writes to complete before letting h/w know there + * are new descriptors to fetch. (Only applicable for weak-ordered + * memory model archs, such as IA-64). + * + * We also need this memory barrier to make certain all of the + * status bits have been updated before next_to_watch is written. + */ + wmb(); + /* update tail */ + rnp_wr_reg(tx_ring->tail, 0); + return 0; +} + +static void print_status(struct rnp_adapter *adapter) +{ + struct rnp_hw *hw = &adapter->hw; + struct rnp_eth_info *eth = &hw->eth; + int i; + struct rnp_dma_info *dma = &hw->dma; + + printk("eth 0x120 %x\n", eth_rd32(eth, 0x120)); + printk("eth 0x124 %x\n", eth_rd32(eth, 0x124)); + + for (i = 0x300; i < 0x318; i = i + 4) { + printk("eth 0x%x %x\n", i, eth_rd32(eth, i)); + } + + printk("eth 0x%x %x\n", 0x98, eth_rd32(eth, 0x98)); + printk("eth 0x%x %x\n", 0x220, eth_rd32(eth, 0x220)); + + for (i = 0x138; i < 0x158; i = i + 4) { + printk("dma 0x%x %x\n", i, dma_rd32(dma, i)); + } + i = 0x170; + printk("dma 0x%x %x\n", i, dma_rd32(dma, i)); + i = 0x174; + printk("dma 0x%x %x\n", i, dma_rd32(dma, i)); + for (i = 0x214; i < 0x220; i = i + 4) { + printk("dma 0x%x %x\n", i, dma_rd32(dma, i)); + } + for (i = 0x234; i < 0x270; i = i + 4) { + printk("dma 0x%x %x\n", i, dma_rd32(dma, i)); + } +} + +void rnp_down(struct rnp_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct rnp_hw *hw = &adapter->hw; + int i; + int free_tx_ealay = 0; + int err = 0; + /* signal that we are down to the interrupt handler */ + set_bit(__RNP_DOWN, &adapter->state); + if ((!hw->ncsi_en) && (!(adapter->flags & RNP_FLAG_SRIOV_ENABLED))) + hw->ops.set_mac_rx(hw, false); + + if (hw->ncsi_en) { + /* if we false down, we should set mac loopback */ + hw->ops.set_mac_rx(hw, false); + } + + hw->ops.set_mbx_link_event(hw, 0); + hw->ops.set_mbx_ifup(hw, 0); + + if (hw->ops.clean_link) + hw->ops.clean_link(hw); + + /* if carrier on before */ + if (netif_carrier_ok(netdev)) + e_info(drv, "NIC Link is Down\n"); + + rnp_remove_vlan(adapter); + netif_tx_stop_all_queues(netdev); + netif_carrier_off(netdev); + usleep_range(5000, 10000); + /* if we have tx desc to clean */ + for (i = 0; i < adapter->num_tx_queues; i++) { + struct rnp_ring *tx_ring = adapter->tx_ring[i]; + + if (!(tx_ring->ring_flags & RNP_RING_SKIP_TX_START)) { + int head, tail; + int timeout = 0; + + free_tx_ealay = 1; + + head = ring_rd32(tx_ring, RNP_DMA_REG_TX_DESC_BUF_HEAD); + tail = ring_rd32(tx_ring, RNP_DMA_REG_TX_DESC_BUF_TAIL); + + while (head != tail) { + usleep_range(30000, 50000); + head = ring_rd32(tx_ring, + RNP_DMA_REG_TX_DESC_BUF_HEAD); + tail = ring_rd32(tx_ring, + RNP_DMA_REG_TX_DESC_BUF_TAIL); + timeout++; + if ((timeout >= 100) && (timeout < 101)) { + e_info(drv, + "wait tx done timeout %x %x\n", + head, tail); + /* set this to hold hardware status */ + adapter->priv_flags |= + RNP_PRIV_FLGA_TEST_TX_HANG; + print_status(adapter); + err = 1; + } + if (timeout >= 200) { + e_info(drv, + "200 wait tx done timeout %x %x\n", + head, tail); + print_status(adapter); + break; + } + } + } + } + + { + int time = 0; + + while (test_bit(__RNP_SERVICE_CHECK, &adapter->state)) { + usleep_range(100, 200); + time++; + if (time > 100) + break; + } + } + + if (free_tx_ealay) + rnp_clean_all_tx_rings(adapter); + + usleep_range(2000, 5000); + + rnp_irq_disable(adapter); + + usleep_range(5000, 10000); + + netif_tx_disable(netdev); + + /* disable all enabled rx queues */ + for (i = 0; i < adapter->num_rx_queues; i++) { + rnp_disable_rx_queue(adapter, adapter->rx_ring[i]); + /* only handle when srio enable and change rx length setup */ + if ((((adapter->flags & RNP_FLAG_SRIOV_ENABLED) || + hw->ncsi_en)) && + (adapter->rx_ring[i]->ring_flags & + RNP_RING_FLAG_CHANGE_RX_LEN)) { + int head; + struct rnp_ring *ring = adapter->rx_ring[i]; + + head = ring_rd32(ring, RNP_DMA_REG_RX_DESC_BUF_HEAD); + adapter->rx_ring[i]->ring_flags &= + (~RNP_RING_FLAG_CHANGE_RX_LEN); + /* we should delay setup rx length to + * wait rx head to 0 + */ + if (head >= adapter->rx_ring[i]->reset_count) { + adapter->rx_ring[i]->ring_flags |= + RNP_RING_FLAG_DELAY_SETUP_RX_LEN; + /* set sw count to head + 1*/ + adapter->rx_ring[i]->temp_count = head + 1; + } + } + /* only down without rx_len change no need handle */ + } + /* call carrier off first to avoid false dev_watchdog timeouts */ + + rnp_napi_disable_all(adapter); + + adapter->flags2 &= + ~(RNP_FLAG2_FDIR_REQUIRES_REINIT | RNP_FLAG2_RESET_REQUESTED); + adapter->flags &= ~RNP_FLAG_NEED_LINK_UPDATE; + + if (adapter->num_vfs) { + /* ping all the active vfs to let them know we are going down */ + rnp_ping_all_vfs(adapter); + /* Disable all VFTE/VFRE TX/RX */ + rnp_disable_tx_rx(adapter); + } + { + + u32 status = 0; + int timeout = 0; + + do { + status = rd32(hw, RNP_DMA_AXI_READY); + usleep_range(100, 200); + timeout++; + } while ((status != 0xffff) && (timeout < 100)); + + if (timeout > 100) + printk("wait axi ready timeout\n"); + } + + + /* disable transmits in the hardware now that interrupts are off */ + for (i = 0; i < adapter->num_tx_queues; i++) { + struct rnp_ring *tx_ring = adapter->tx_ring[i]; + int count = tx_ring->count; + int head; + int timeout = 0; + + /* 1. stop queue */ + if (!err) { + if (!(tx_ring->ring_flags & RNP_RING_SKIP_TX_START)) { + ring_wr32(tx_ring, RNP_DMA_TX_START, 0); + } + } + /* 2. try to set tx head to 0 in sriov mode + * since we don't reset + */ + if ((((adapter->flags & RNP_FLAG_SRIOV_ENABLED) || + hw->ncsi_en)) && + (!(tx_ring->ring_flags & RNP_RING_SIZE_CHANGE_FIX))) { + /* only do this if hw not support tx head to zero auto */ + /* n10 should wait tx_ready */ + u32 status = 0; + + timeout = 0; + do { + status = ring_rd32(tx_ring, RNP_DMA_TX_READY); + usleep_range(100, 200); + timeout++; + rnp_dbg("wait %d tx ready to 1\n", + tx_ring->rnp_queue_idx); + } while ((status != 1) && (timeout < 100)); + + if (timeout >= 100) + printk("wait tx ready timeout\n"); + + head = ring_rd32(tx_ring, RNP_DMA_REG_TX_DESC_BUF_HEAD); + if (head != 0) { + u16 next_to_use = tx_ring->next_to_use; + + if (head != (count - 1)) { + /* 3 set len head + 1 */ + ring_wr32(tx_ring, + RNP_DMA_REG_TX_DESC_BUF_LEN, + head + 1); + } + /* set to use head */ + tx_ring->next_to_use = head; + /* 4 send a len zero packet */ + rnp_xmit_nop_frame_ring(adapter, tx_ring); + if (!(tx_ring->ring_flags & + RNP_RING_SKIP_TX_START)) + ring_wr32(tx_ring, RNP_DMA_TX_START, 1); + /* 5 wait head to zero */ + while ((head != 0) && (timeout < 1000)) { + head = ring_rd32( + tx_ring, + RNP_DMA_REG_TX_DESC_BUF_HEAD); + usleep_range(10000, 20000); + timeout++; + } + if (timeout >= 1000) { + printk("[%s] Wait Tx-ring %d head to zero time out\n", + netdev->name, + tx_ring->rnp_queue_idx); + } + /* 6 stop queue again*/ + if (!(tx_ring->ring_flags & + RNP_RING_SKIP_TX_START)) + ring_wr32(tx_ring, RNP_DMA_TX_START, 0); + /* 7 write back next_to_use maybe hw hang */ + tx_ring->next_to_use = next_to_use; + } + } + } + if (!err) { + if (!pci_channel_offline(adapter->pdev)) { + if (hw->ncsi_en == 0 && + !(adapter->flags & RNP_FLAG_SRIOV_ENABLED)) { + rnp_reset(adapter); + } + } + } + /* power down the optics for n10 SFP+ fiber */ + if (hw->ops.disable_tx_laser) + hw->ops.disable_tx_laser(hw); + + if (!free_tx_ealay) + rnp_clean_all_tx_rings(adapter); + + rnp_clean_all_rx_rings(adapter); + + if (hw->ncsi_en) + hw->ops.set_mac_rx(hw, true); +} + +/** + * rnp_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + **/ +static void rnp_tx_timeout(struct net_device *netdev, unsigned int txqueue) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + /* Do the reset outside of interrupt context */ + int i; + bool real_tx_hang = false; + +#define TX_TIMEO_LIMIT 16000 + for (i = 0; i < adapter->num_tx_queues; i++) { + struct rnp_ring *tx_ring = adapter->tx_ring[i]; + + if (check_for_tx_hang(tx_ring) && rnp_check_tx_hang(tx_ring)) + real_tx_hang = true; + } + + if (real_tx_hang) { + printk("hw real hang!!!!"); + /* Do the reset outside of interrupt context */ + rnp_tx_timeout_reset(adapter); + } else { + printk("Fake Tx hang detected with timeout of %d " + "seconds\n", + netdev->watchdog_timeo / HZ); + + /* fake Tx hang - increase the kernel timeout */ + if (netdev->watchdog_timeo < TX_TIMEO_LIMIT) + netdev->watchdog_timeo *= 2; + } +} + +/** + * rnp_sw_init - Initialize general software structures (struct rnp_adapter) + * @adapter: board private structure to initialize + * + * rnp_sw_init initializes the Adapter private data structure. + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + **/ +static int rnp_sw_init(struct rnp_adapter *adapter) +{ + struct rnp_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + unsigned int rss = 0, fdir; + int rss_limit = num_online_cpus(); + + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_device_id = pdev->subsystem_device; + + /* if this hw can setup msix count */ + rss = min_t(int, adapter->max_ring_pair_counts, rss_limit); + rss = min_t(int, rss, + hw->mac.max_msix_vectors - adapter->num_other_vectors); + adapter->ring_feature[RING_F_RSS].limit = + min_t(int, rss, adapter->max_ring_pair_counts); + + adapter->flags |= RNP_FLAG_VXLAN_OFFLOAD_CAPABLE; + adapter->flags |= RNP_FLAG_VXLAN_OFFLOAD_ENABLE; + + adapter->max_q_vectors = hw->max_msix_vectors - 1; + adapter->atr_sample_rate = 20; + + fdir = min_t(int, adapter->max_q_vectors, rss_limit); + adapter->ring_feature[RING_F_FDIR].limit = fdir; + + if (hw->feature_flags & RNP_NET_FEATURE_RX_NTUPLE_FILTER) { + spin_lock_init(&adapter->fdir_perfect_lock); + adapter->fdir_filter_count = 0; + adapter->fdir_mode = hw->fdir_mode; + /* fdir_pballoc not from zero, so add 2 */ + adapter->fdir_pballoc = 2 + hw->layer2_count + hw->tuple5_count; + adapter->layer2_count = hw->layer2_count; + adapter->tuple_5_count = hw->tuple5_count; + } + + /* itr sw setup here */ + adapter->sample_interval = 10; + adapter->adaptive_rx_coal = 1; + adapter->adaptive_tx_coal = 1; + adapter->auto_rx_coal = 0; + adapter->napi_budge = 64; + /* set default work limits */ + adapter->tx_work_limit = RNP_DEFAULT_TX_WORK; + adapter->rx_usecs = RNP_PKT_TIMEOUT; + adapter->rx_usecs_usr_set = RNP_PKT_TIMEOUT; + adapter->rx_frames = RNP_RX_PKT_POLL_BUDGET; + adapter->tx_usecs = RNP_PKT_TIMEOUT_TX; + adapter->tx_usecs_usr_set = RNP_PKT_TIMEOUT_TX; + adapter->tx_frames = RNP_TX_PKT_POLL_BUDGET; + + /* set default ring sizes */ + adapter->tx_ring_item_count = RNP_DEFAULT_TXD; + adapter->rx_ring_item_count = RNP_DEFAULT_RXD; + + set_bit(__RNP_DOWN, &adapter->state); + + return 0; +} + +/** + * rnp_setup_tx_resources - allocate Tx resources (Descriptors) + * @tx_ring: tx descriptor ring (for a specific queue) to setup + * + * Return 0 on success, negative on failure + **/ +int rnp_setup_tx_resources(struct rnp_ring *tx_ring, + struct rnp_adapter *adapter) +{ + struct device *dev = tx_ring->dev; + int orig_node = dev_to_node(dev); + int numa_node = NUMA_NO_NODE; + int size; + + size = sizeof(struct rnp_tx_buffer) * tx_ring->count; + + if (tx_ring->q_vector) + numa_node = tx_ring->q_vector->numa_node; + tx_ring->tx_buffer_info = vzalloc_node(size, numa_node); + if (!tx_ring->tx_buffer_info) + tx_ring->tx_buffer_info = vzalloc(size); + if (!tx_ring->tx_buffer_info) + goto err; + /* round up to nearest 4K */ + tx_ring->size = tx_ring->count * sizeof(struct rnp_tx_desc); + tx_ring->size = ALIGN(tx_ring->size, 4096); + + set_dev_node(dev, numa_node); + tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, &tx_ring->dma, + GFP_KERNEL); + set_dev_node(dev, orig_node); + if (!tx_ring->desc) + tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, + &tx_ring->dma, GFP_KERNEL); + if (!tx_ring->desc) + goto err; + memset(tx_ring->desc, 0, tx_ring->size); + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + DPRINTK(IFUP, INFO, + "TxRing:%d, vector:%d ItemCounts:%d " + "desc:%p(0x%llx) node:%d\n", + tx_ring->rnp_queue_idx, tx_ring->q_vector->v_idx, + tx_ring->count, tx_ring->desc, (u64)tx_ring->dma, numa_node); + return 0; + +err: + vfree(tx_ring->tx_buffer_info); + tx_ring->tx_buffer_info = NULL; + dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n"); + return -ENOMEM; +} + +/** + * rnp_setup_all_tx_resources - allocate all queues Tx resources + * @adapter: board private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +static int rnp_setup_all_tx_resources(struct rnp_adapter *adapter) +{ + int i, err = 0; + + tx_dbg("adapter->num_tx_queues:%d, adapter->tx_ring[0]:%p\n", + adapter->num_tx_queues, adapter->tx_ring[0]); + + for (i = 0; i < (adapter->num_tx_queues); i++) { + BUG_ON(adapter->tx_ring[i] == NULL); + err = rnp_setup_tx_resources(adapter->tx_ring[i], adapter); + if (!err) + continue; + + e_err(probe, "Allocation for Tx Queue %u failed\n", i); + goto err_setup_tx; + } + + return 0; +err_setup_tx: + /* rewind the index freeing the rings as we go */ + while (i--) + rnp_free_tx_resources(adapter->tx_ring[i]); + return err; +} + +/** + * rnp_setup_rx_resources - allocate Rx resources (Descriptors) + * @rx_ring: rx descriptor ring (for a specific queue) to setup + * + * Returns 0 on success, negative on failure + **/ +int rnp_setup_rx_resources(struct rnp_ring *rx_ring, + struct rnp_adapter *adapter) +{ + struct device *dev = rx_ring->dev; + int orig_node = dev_to_node(dev); + int numa_node = NUMA_NO_NODE; + int size; + + BUG_ON(rx_ring == NULL); + + size = sizeof(struct rnp_rx_buffer) * rx_ring->count; + + if (rx_ring->q_vector) + numa_node = rx_ring->q_vector->numa_node; + + rx_ring->rx_buffer_info = vzalloc_node(size, numa_node); + if (!rx_ring->rx_buffer_info) + rx_ring->rx_buffer_info = vzalloc(size); + if (!rx_ring->rx_buffer_info) + goto err; + /* Round up to nearest 4K */ + rx_ring->size = rx_ring->count * sizeof(union rnp_rx_desc); + rx_ring->size = ALIGN(rx_ring->size, 4096); + + set_dev_node(dev, numa_node); + rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, &rx_ring->dma, + GFP_KERNEL); + set_dev_node(dev, orig_node); + if (!rx_ring->desc) + rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, + &rx_ring->dma, GFP_KERNEL); + if (!rx_ring->desc) + goto err; + memset(rx_ring->desc, 0, rx_ring->size); + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + + DPRINTK(IFUP, INFO, + "RxRing:%d, vector:%d ItemCounts:%d " + "desc:%p(0x%llx) node:%d\n", + rx_ring->rnp_queue_idx, rx_ring->q_vector->v_idx, + rx_ring->count, rx_ring->desc, (u64)rx_ring->dma, numa_node); + + return 0; +err: + + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n"); + return -ENOMEM; +} + +/** + * rnp_setup_all_rx_resources - allocate all queues Rx resources + * @adapter: board private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +static int rnp_setup_all_rx_resources(struct rnp_adapter *adapter) +{ + int i, err = 0; + u32 head; + + for (i = 0; i < adapter->num_rx_queues; i++) { + BUG_ON(adapter->rx_ring[i] == NULL); + + /* should check count and head */ + /* in sriov condition may head large than count */ + head = ring_rd32(adapter->rx_ring[i], + RNP_DMA_REG_RX_DESC_BUF_HEAD); + if (unlikely(head >= adapter->rx_ring[i]->count)) { + dbg("[%s] Ring %d head large than count", + adapter->netdev->name, + adapter->rx_ring[i]->rnp_queue_idx); + adapter->rx_ring[i]->ring_flags |= + RNP_RING_FLAG_DELAY_SETUP_RX_LEN; + adapter->rx_ring[i]->reset_count = + adapter->rx_ring[i]->count; + adapter->rx_ring[i]->count = head + 1; + } + err = rnp_setup_rx_resources(adapter->rx_ring[i], adapter); + if (!err) + continue; + + e_err(probe, "Allocation for Rx Queue %u failed\n", i); + goto err_setup_rx; + } + + return 0; +err_setup_rx: + /* rewind the index freeing the rings as we go */ + while (i--) + rnp_free_rx_resources(adapter->rx_ring[i]); + return err; +} + +/** + * rnp_free_tx_resources - Free Tx Resources per Queue + * @tx_ring: Tx descriptor ring for a specific queue + * + * Free all transmit software resources + **/ +void rnp_free_tx_resources(struct rnp_ring *tx_ring) +{ + BUG_ON(tx_ring == NULL); + + rnp_clean_tx_ring(tx_ring); + vfree(tx_ring->tx_buffer_info); + tx_ring->tx_buffer_info = NULL; + + /* if not set, then don't free */ + if (!tx_ring->desc) + return; + + dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc, + tx_ring->dma); + + tx_ring->desc = NULL; +} + +/** + * rnp_free_all_tx_resources - Free Tx Resources for All Queues + * @adapter: board private structure + * + * Free all transmit software resources + **/ +static void rnp_free_all_tx_resources(struct rnp_adapter *adapter) +{ + int i; + + for (i = 0; i < (adapter->num_tx_queues); i++) + rnp_free_tx_resources(adapter->tx_ring[i]); +} + +/** + * rnp_free_rx_resources - Free Rx Resources + * @rx_ring: ring to clean the resources from + * + * Free all receive software resources + **/ +void rnp_free_rx_resources(struct rnp_ring *rx_ring) +{ + BUG_ON(rx_ring == NULL); + + rnp_clean_rx_ring(rx_ring); + + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + + /* if not set, then don't free */ + if (!rx_ring->desc) + return; + + dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc, + rx_ring->dma); + + rx_ring->desc = NULL; +} + +/** + * rnp_free_all_rx_resources - Free Rx Resources for All Queues + * @adapter: board private structure + * + * Free all receive software resources + **/ +static void rnp_free_all_rx_resources(struct rnp_adapter *adapter) +{ + int i; + + for (i = 0; i < (adapter->num_rx_queues); i++) + if (adapter->rx_ring[i]->desc) + rnp_free_rx_resources(adapter->rx_ring[i]); +} + +/** + * rnp_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + **/ +static int rnp_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN * 2; + + /* MTU < 68 is an error and causes problems on some kernels */ + if ((new_mtu < hw->min_length) || (max_frame > hw->max_length)) + return -EINVAL; + + e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); + + if (netdev->mtu == new_mtu) + return 0; + + /* must set new MTU before calling down or up */ + netdev->mtu = new_mtu; + + if (netif_running(netdev)) + rnp_reinit_locked(adapter); + + rnp_msg_post_status(adapter, PF_SET_MTU); + + return 0; +} + +/** + * rnp_tx_maxrate - callback to set the maximum per-queue bitrate + * @netdev: network interface device structure + * @queue_index: Tx queue to set + * @maxrate: desired maximum transmit bitrate Mbps + **/ +__maybe_unused static int rnp_tx_maxrate(struct net_device *netdev, + int queue_index, u32 maxrate) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_ring *tx_ring = adapter->tx_ring[queue_index]; + u64 real_rate = 0; + + adapter->max_rate[queue_index] = maxrate; + rnp_dbg("%s: queue:%d maxrate:%d\n", __func__, queue_index, maxrate); + if (!maxrate) + return rnp_setup_tx_maxrate(tx_ring, 0, + adapter->hw.usecstocount * 1000000); + /* we need turn it to bytes/s */ + real_rate = ((u64)maxrate * 1024 * 1024) / 8; + rnp_setup_tx_maxrate(tx_ring, real_rate, + adapter->hw.usecstocount * 1000000); + + return 0; +} + +/** + * rnp_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + **/ +int rnp_open(struct net_device *netdev) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + int err; + + DPRINTK(IFUP, INFO, "ifup\n"); + + /* disallow open during test */ + if (test_bit(__RNP_TESTING, &adapter->state)) + return -EBUSY; + + netif_carrier_off(netdev); + + /* allocate transmit descriptors */ + err = rnp_setup_all_tx_resources(adapter); + if (err) + goto err_setup_tx; + + /* allocate receive descriptors */ + err = rnp_setup_all_rx_resources(adapter); + if (err) + goto err_setup_rx; + + rnp_configure(adapter); + + err = rnp_request_irq(adapter); + if (err) + goto err_req_irq; + + /* Notify the stack of the actual queue counts. */ + err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues); + if (err) + goto err_set_queues; + + err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues); + if (err) + goto err_set_queues; + + if (module_enable_ptp) + rnp_ptp_register(adapter); + + rnp_up_complete(adapter); + + return 0; + +err_set_queues: + rnp_free_irq(adapter); +err_req_irq: + rnp_free_all_rx_resources(adapter); +err_setup_rx: + rnp_free_all_tx_resources(adapter); +err_setup_tx: + hw->ops.set_mbx_ifup(hw, 0); + rnp_reset(adapter); + + return err; +} + +/** + * rnp_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + **/ +int rnp_close(struct net_device *netdev) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + DPRINTK(IFDOWN, INFO, "ifdown\n"); + + if (module_enable_ptp) + rnp_ptp_unregister(adapter); + + rnp_down(adapter); + rnp_free_irq(adapter); + rnp_free_all_tx_resources(adapter); + rnp_free_all_rx_resources(adapter); + + /* if in sriov mode send link down to all vfs */ + if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) { + adapter->link_up = 0; + adapter->link_up_old = 0; + rnp_msg_post_status(adapter, PF_SET_LINK_STATUS); + /* wait all vf get this status */ + usleep_range(5000, 10000); + } + + return 0; +} + +#ifdef CONFIG_PM +static int rnp_resume(struct pci_dev *pdev) +{ + struct rnp_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + u32 err; + struct rnp_hw *hw = &adapter->hw; + + printk("call rnp_resume\n"); + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + /* + * pci_restore_state clears dev->state_saved so call + * pci_save_state to restore it. + */ + pci_save_state(pdev); + + err = pcim_enable_device(pdev); + if (err) { + e_dev_err("Cannot enable PCI device from suspend\n"); + return err; + } + pci_set_master(pdev); + + pci_wake_from_d3(pdev, false); + + switch (hw->hw_type) { + case rnp_hw_n10: + case rnp_hw_n400: + case rnp_hw_n20: + case rnp_hw_uv440: + wait_mbx_init_done(hw); +#ifdef FIX_VF_BUG + rnp_wr_reg(adapter->io_addr_bar0 + + (0x7982fc & (pci_resource_len(pdev, 0) - 1)), + 0); +#endif + break; + default: + + break; + } + + rtnl_lock(); + + err = rnp_init_interrupt_scheme(adapter); + if (!err) + err = register_mbx_irq(adapter); + + if (hw->ops.driver_status) + hw->ops.driver_status(hw, false, rnp_driver_suspuse); + + rnp_reset(adapter); + + if (!err && netif_running(netdev)) + err = rnp_open(netdev); + + rtnl_unlock(); + + if (err) + return err; + + netif_device_attach(netdev); + + return 0; +} +#endif + +static int __rnp_shutdown(struct pci_dev *pdev, bool *enable_wake) +{ + struct rnp_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + struct rnp_hw *hw = &adapter->hw; + u32 wufc = adapter->wol; +#ifdef CONFIG_PM + int retval = 0; +#endif + + netif_device_detach(netdev); + + rtnl_lock(); + if (netif_running(netdev)) { + rnp_down(adapter); + rnp_free_irq(adapter); + rnp_free_all_tx_resources(adapter); + rnp_free_all_rx_resources(adapter); + /* should consider sriov mode ? */ + } + rtnl_unlock(); + + if (hw->ops.driver_status) + hw->ops.driver_status(hw, true, rnp_driver_suspuse); + + remove_mbx_irq(adapter); + rnp_clear_interrupt_scheme(adapter); + +#ifdef CONFIG_PM + retval = pci_save_state(pdev); + if (retval) + return retval; + +#endif + if (wufc) { + rnp_set_rx_mode(netdev); + + /* enable the optics for n10 SFP+ fiber as we can WoL */ + if (hw->ops.enable_tx_laser) + hw->ops.enable_tx_laser(hw); + + /* turn on all-multi mode if wake on multicast is enabled */ + } + + if (hw->ops.setup_wol) + hw->ops.setup_wol(hw, adapter->wol); + + pci_wake_from_d3(pdev, !!wufc); + *enable_wake = !!wufc; + + pci_disable_device(pdev); + + return 0; +} + +#ifdef CONFIG_PM +static int rnp_suspend(struct pci_dev *pdev, pm_message_t state) +{ + int retval; + bool wake; + + printk("call rnp_suspend\n"); + + retval = __rnp_shutdown(pdev, &wake); + if (retval) + return retval; + + if (wake) { + pci_prepare_to_sleep(pdev); + } else { + pci_wake_from_d3(pdev, false); + pci_set_power_state(pdev, PCI_D3hot); + } + + return 0; +} +#endif /* CONFIG_PM */ + +__maybe_unused static void rnp_shutdown(struct pci_dev *pdev) +{ + bool wake; + + __rnp_shutdown(pdev, &wake); + + if (system_state == SYSTEM_POWER_OFF) { + pci_wake_from_d3(pdev, wake); + pci_set_power_state(pdev, PCI_D3hot); + } +} + +/** + * rnp_update_stats - Update the board statistics counters. + * @adapter: board private structure + **/ +void rnp_update_stats(struct rnp_adapter *adapter) +{ + struct net_device_stats *net_stats = &adapter->netdev->stats; + struct rnp_hw *hw = &adapter->hw; + struct rnp_hw_stats *hw_stats = &adapter->hw_stats; + int i; + struct rnp_ring *ring; + u64 hw_csum_rx_error = 0; + u64 hw_csum_rx_good = 0; + + net_stats->tx_packets = 0; + net_stats->tx_bytes = 0; + net_stats->rx_packets = 0; + net_stats->rx_bytes = 0; + net_stats->rx_dropped = 0; + net_stats->rx_errors = 0; + hw_stats->vlan_strip_cnt = 0; + hw_stats->vlan_add_cnt = 0; + + if (test_bit(__RNP_DOWN, &adapter->state) || + test_bit(__RNP_RESETTING, &adapter->state)) + return; + + for (i = 0; i < adapter->num_q_vectors; i++) { + rnp_for_each_ring(ring, adapter->q_vector[i]->rx) { + hw_csum_rx_error += ring->rx_stats.csum_err; + hw_csum_rx_good += ring->rx_stats.csum_good; + hw_stats->vlan_strip_cnt += ring->rx_stats.vlan_remove; + net_stats->rx_packets += ring->stats.packets; + net_stats->rx_bytes += ring->stats.bytes; + } + rnp_for_each_ring(ring, adapter->q_vector[i]->tx) { + hw_stats->vlan_add_cnt += ring->tx_stats.vlan_add; + net_stats->tx_packets += ring->stats.packets; + net_stats->tx_bytes += ring->stats.bytes; + } + } + net_stats->rx_errors += hw_csum_rx_error; + + hw->ops.update_hw_status(hw, hw_stats, net_stats); + + adapter->hw_csum_rx_error = hw_csum_rx_error; + adapter->hw_csum_rx_good = hw_csum_rx_good; + net_stats->rx_errors = hw_csum_rx_error; +} + +/** + * rnp_check_hang_subtask - check for hung queues and dropped interrupts + * @adapter: pointer to the device adapter structure + * + * This function serves two purposes. First it strobes the interrupt lines + * in order to make certain interrupts are occurring. Secondly it sets the + * bits needed to check for TX hangs. As a result we should immediately + * determine if a hang has occurred. + */ +static void rnp_check_hang_subtask(struct rnp_adapter *adapter) +{ + int i; + struct rnp_ring *tx_ring; + u64 tx_next_to_clean_old; + u64 tx_next_to_clean; + u64 tx_next_to_use; + struct rnp_ring *rx_ring; + u64 rx_next_to_clean_old; + u64 rx_next_to_clean; + union rnp_rx_desc *rx_desc; + + /* If we're down or resetting, just bail */ + if (test_bit(__RNP_DOWN, &adapter->state) || + test_bit(__RNP_RESETTING, &adapter->state)) + return; + + set_bit(__RNP_SERVICE_CHECK, &adapter->state); + + /* Force detection of hung controller */ + if (netif_carrier_ok(adapter->netdev)) { + for (i = 0; i < adapter->num_tx_queues; i++) + set_check_for_tx_hang(adapter->tx_ring[i]); + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + tx_ring = adapter->tx_ring[i]; + /* get the last next_to_clean */ + tx_next_to_clean_old = tx_ring->tx_stats.tx_next_to_clean; + tx_next_to_clean = tx_ring->next_to_clean; + tx_next_to_use = tx_ring->next_to_use; + + /* if we have tx desc to clean */ + if (tx_next_to_use != tx_next_to_clean) { + if (tx_next_to_clean == tx_next_to_clean_old) { + tx_ring->tx_stats.tx_equal_count++; + if (tx_ring->tx_stats.tx_equal_count > 2) { + /* maybe not so good */ + struct rnp_q_vector *q_vector = + tx_ring->q_vector; + + /* stats */ + if (q_vector->rx.ring || + q_vector->tx.ring) + napi_schedule_irqoff( + &q_vector->napi); + + tx_ring->tx_stats.tx_irq_miss++; + tx_ring->tx_stats.tx_equal_count = 0; + } + } else { + tx_ring->tx_stats.tx_equal_count = 0; + } + /* update */ + /* record this next_to_clean */ + tx_ring->tx_stats.tx_next_to_clean = tx_next_to_clean; + } else { + /* clean record to -1 */ + tx_ring->tx_stats.tx_next_to_clean = -1; + } + } + + /* check if we lost rx irq */ + for (i = 0; i < adapter->num_rx_queues; i++) { + rx_ring = adapter->rx_ring[i]; + /* get the last next_to_clean */ + rx_next_to_clean_old = rx_ring->rx_stats.rx_next_to_clean; + /* get the now clean */ + rx_next_to_clean = rx_ring->next_to_clean; + + if (rx_next_to_clean == rx_next_to_clean_old) { + rx_ring->rx_stats.rx_equal_count++; + + if ((rx_ring->rx_stats.rx_equal_count > 2) && + (rx_ring->rx_stats.rx_equal_count < 5)) { + rx_desc = RNP_RX_DESC(rx_ring, + rx_ring->next_to_clean); + if (rnp_test_staterr(rx_desc, + RNP_RXD_STAT_DD)) { + int size; + struct rnp_q_vector *q_vector = + rx_ring->q_vector; + + size = le16_to_cpu(rx_desc->wb.len); + if (size) { + rx_ring->rx_stats.rx_irq_miss++; + if (q_vector->rx.ring || + q_vector->tx.ring) + napi_schedule_irqoff( + &q_vector->napi); + } else { + printk("set RNP_FLAG2_RESET_REQUESTED since size is 0\n"); + adapter->flags2 |= + RNP_FLAG2_RESET_REQUESTED; + } + } + } + if (rx_ring->rx_stats.rx_equal_count > 1000) + rx_ring->rx_stats.rx_equal_count = 0; + } else { + rx_ring->rx_stats.rx_equal_count = 0; + } + rx_ring->rx_stats.rx_next_to_clean = rx_next_to_clean; + } + + clear_bit(__RNP_SERVICE_CHECK, &adapter->state); +} + +static void update_ring_delay(struct rnp_adapter *adapter) +{ + int i; + struct rnp_ring *ring; + struct rnp_hw *hw = &adapter->hw; + + for (i = 0; i < adapter->num_rx_queues; i++) { + ring = adapter->rx_ring[i]; + ring_wr32(ring, RNP_DMA_REG_RX_INT_DELAY_TIMER, + adapter->rx_usecs * hw->usecstocount); + ring = adapter->tx_ring[i]; + ring_wr32(ring, RNP_DMA_REG_TX_INT_DELAY_TIMER, + adapter->tx_usecs * hw->usecstocount); + } +} + +/** + * rnp_watchdog_update_link - update the link status + * @adapter: pointer to the device adapter structure + * @link_speed: pointer to a u32 to store the link_speed + **/ +static void rnp_watchdog_update_link(struct rnp_adapter *adapter) +{ + struct rnp_hw *hw = &adapter->hw; + u32 link_speed = adapter->link_speed; + bool link_up = adapter->link_up; + bool duplex = adapter->duplex_old; + bool flow_rx = true, flow_tx = true; + + if (!(adapter->flags & RNP_FLAG_NEED_LINK_UPDATE)) + return; + + if (hw->ops.check_link) { + hw->ops.check_link(hw, &link_speed, &link_up, &duplex, false); + } else { + /* always assume link is up, if no check link function */ + link_speed = RNP_LINK_SPEED_10GB_FULL; + link_up = true; + } + + if (link_up || time_after(jiffies, (adapter->link_check_timeout + + RNP_TRY_LINK_TIMEOUT))) { + adapter->flags &= ~RNP_FLAG_NEED_LINK_UPDATE; + } + adapter->link_up = link_up; + adapter->link_speed = link_speed; + adapter->duplex_old = duplex; + + if (hw->ops.get_pause_mode) + hw->ops.get_pause_mode(hw); + switch (hw->fc.current_mode) { + case rnp_fc_none: + flow_rx = false; + flow_tx = false; + break; + case rnp_fc_tx_pause: + flow_rx = false; + flow_tx = true; + + break; + case rnp_fc_rx_pause: + flow_rx = true; + flow_tx = false; + break; + + case rnp_fc_full: + flow_rx = true; + flow_tx = true; + break; + default: + hw_dbg(hw, "Flow control param set incorrectly\n"); + } + /* if we detect changed link setup new */ + if (adapter->link_up) { + if (hw->ops.set_mac_speed) + hw->ops.set_mac_speed(hw, true, link_speed, duplex); + /* we should also update pause mode */ + if (hw->ops.set_pause_mode) + hw->ops.set_pause_mode(hw); + + e_info(drv, "NIC Link is Up %s, %s Duplex, Flow Control: %s\n", + (link_speed == RNP_LINK_SPEED_40GB_FULL ? + "40 Gbps" : + (link_speed == RNP_LINK_SPEED_25GB_FULL ? + "25 Gbps" : + (link_speed == RNP_LINK_SPEED_10GB_FULL ? + "10 Gbps" : + (link_speed == RNP_LINK_SPEED_1GB_FULL ? + "1 Gbps" : + (link_speed == RNP_LINK_SPEED_100_FULL ? + "100 Mbps" : + (link_speed == RNP_LINK_SPEED_10_FULL ? + "10 Mbps" : + "unknown speed")))))), + ((duplex) ? "Full" : "Half"), + ((flow_rx && flow_tx) ? + "RX/TX" : + (flow_rx ? "RX" : (flow_tx ? "TX" : "None")))); + /* we should update rx irq delay and tx irq delay */ + if (link_speed == RNP_LINK_SPEED_10GB_FULL) { + adapter->rx_usecs = adapter->rx_usecs_usr_set; + adapter->tx_usecs = adapter->tx_usecs_usr_set; + } else { + adapter->rx_usecs = adapter->rx_usecs_usr_set * 6; + adapter->tx_usecs = adapter->tx_usecs_usr_set * 2; + } + update_ring_delay(adapter); + } else { + if (hw->ops.set_mac_speed) + hw->ops.set_mac_speed(hw, false, 0, false); + } +} + +/** + * rnp_watchdog_link_is_up - update netif_carrier status and + * print link up message + * @adapter: pointer to the device adapter structure + **/ +static void rnp_watchdog_link_is_up(struct rnp_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct rnp_hw *hw = &adapter->hw; + + /* only continue if link was previously down */ + if (netif_carrier_ok(netdev)) + return; + + adapter->flags2 &= ~RNP_FLAG2_SEARCH_FOR_SFP; + switch (hw->mac.type) { + default: + break; + } + + netif_carrier_on(netdev); + + netif_tx_wake_all_queues(netdev); + + hw->ops.set_mac_rx(hw, true); +} + +/** + * rnp_watchdog_link_is_down - update netif_carrier status and + * print link down message + * @adapter: pointer to the adapter structure + **/ +static void rnp_watchdog_link_is_down(struct rnp_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct rnp_hw *hw = &adapter->hw; + + adapter->link_up = false; + adapter->link_speed = 0; + + /* only continue if link was up previously */ + if (!netif_carrier_ok(netdev)) + return; + + /* poll for SFP+ cable when link is down */ + if (rnp_is_sfp(hw)) + adapter->flags2 |= RNP_FLAG2_SEARCH_FOR_SFP; + + e_info(drv, "NIC Link is Down\n"); + + netif_carrier_off(netdev); + + netif_tx_stop_all_queues(netdev); + + hw->ops.set_mac_rx(hw, false); +} + +static void rnp_update_link_to_vf(struct rnp_adapter *adapter) +{ + /* maybe confict with vf */ + if (!(adapter->flags & RNP_FLAG_VF_INIT_DONE)) + return; + + if ((adapter->link_up_old != adapter->link_up) || + (adapter->link_speed_old != adapter->link_speed)) { + /* if change send mbx to all vf */ + if (!test_bit(__RNP_IN_IRQ, &adapter->state)) { + if (0 == + rnp_msg_post_status(adapter, PF_SET_LINK_STATUS)) { + /* maybe delay if we are in other irq? */ + adapter->link_up_old = adapter->link_up; + adapter->link_speed_old = adapter->link_speed; + } + } + } +} +/** + * rnp_watchdog_subtask - check and bring link up + * @adapter: pointer to the device adapter structure + **/ +static void rnp_watchdog_subtask(struct rnp_adapter *adapter) +{ + /* if interface is down do nothing */ + /* should do link status if in sriov */ + if (test_bit(__RNP_DOWN, &adapter->state) || + test_bit(__RNP_RESETTING, &adapter->state)) + return; + + rnp_watchdog_update_link(adapter); + + if (adapter->link_up) + rnp_watchdog_link_is_up(adapter); + else + rnp_watchdog_link_is_down(adapter); + + rnp_update_link_to_vf(adapter); + + rnp_update_stats(adapter); +} + +/** + * rnp_service_timer - Timer Call-back + * @data: pointer to adapter cast into an unsigned long + **/ +void rnp_service_timer(struct timer_list *t) +{ + struct rnp_adapter *adapter = from_timer(adapter, t, service_timer); + unsigned long next_event_offset; + bool ready = true; + + /* poll faster when waiting for link */ + if (adapter->flags & RNP_FLAG_NEED_LINK_UPDATE) + next_event_offset = HZ / 10; + else + next_event_offset = HZ; + /* Reset the timer */ + if (!test_bit(__RNP_REMOVE, &adapter->state)) + mod_timer(&adapter->service_timer, next_event_offset + jiffies); + + if (ready) + rnp_service_event_schedule(adapter); +} + +static void rnp_reset_pf_subtask(struct rnp_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + u32 err; + + if (!(adapter->flags2 & RNP_FLAG2_RESET_PF)) + return; + + rtnl_lock(); + netif_device_detach(netdev); + if (netif_running(netdev)) { + rnp_down(adapter); + rnp_free_irq(adapter); + rnp_free_all_tx_resources(adapter); + rnp_free_all_rx_resources(adapter); + } + rtnl_unlock(); + + adapter->link_up = 0; + adapter->link_up_old = 0; + rnp_msg_post_status(adapter, PF_SET_LINK_STATUS); + /* wait all vf get this status */ + usleep_range(500, 1000); + + rnp_reset(adapter); + remove_mbx_irq(adapter); + rnp_clear_interrupt_scheme(adapter); + + rtnl_lock(); + err = rnp_init_interrupt_scheme(adapter); + + register_mbx_irq(adapter); + + if (!err && netif_running(netdev)) + err = rnp_open(netdev); + + rtnl_unlock(); + rnp_msg_post_status(adapter, PF_SET_RESET); + netif_device_attach(netdev); + adapter->flags2 &= (~RNP_FLAG2_RESET_PF); +} + +static void rnp_reset_subtask(struct rnp_adapter *adapter) +{ + if (!(adapter->flags2 & RNP_FLAG2_RESET_REQUESTED)) + return; + + adapter->flags2 &= ~RNP_FLAG2_RESET_REQUESTED; + + /* If we're already down or resetting, just bail */ + if (test_bit(__RNP_DOWN, &adapter->state) || + test_bit(__RNP_RESETTING, &adapter->state)) + return; + + netdev_err(adapter->netdev, "Reset adapter\n"); + adapter->tx_timeout_count++; + rtnl_lock(); + rnp_reinit_locked(adapter); + rtnl_unlock(); +} + +static void rnp_rx_len_reset_subtask(struct rnp_adapter *adapter) +{ + int i; + struct rnp_ring *rx_ring; + + for (i = 0; i < adapter->num_tx_queues; i++) { + rx_ring = adapter->rx_ring[i]; + if (unlikely(rx_ring->ring_flags & + RNP_RING_FLAG_DO_RESET_RX_LEN)) { + dbg("[%s] Rx-ring %d count reset\n", + adapter->netdev->name, rx_ring->rnp_queue_idx); + if (!rnp_rx_ring_reinit(adapter, rx_ring)) { + rx_ring->ring_flags &= + (~RNP_RING_FLAG_DO_RESET_RX_LEN); + } + } + } +} + +static void rnp_auto_itr_moderation(struct rnp_adapter *adapter) +{ + int i; + struct rnp_ring *rx_ring; + u64 period = (u64)(jiffies - adapter->last_moder_jiffies); + + if (!adapter->adaptive_rx_coal || + period < adapter->sample_interval * HZ) { + return; + } + + adapter->last_moder_jiffies = jiffies; + + /* it is time to check moderation */ + for (i = 0; i < adapter->num_rx_queues; i++) { + u64 x, y, rate; + u64 rx_packets, packets, rx_pkt_diff; + + rx_ring = adapter->rx_ring[i]; + rx_packets = READ_ONCE(rx_ring->stats.packets); + rx_pkt_diff = rx_packets - + adapter->last_moder_packets[rx_ring->queue_index]; + packets = rx_pkt_diff; + + x = packets * HZ; + y = do_div(x, period); + rate = x; + + + if (rate == 0) { + + } else if (rate < 20000) { + + rx_ring->ring_flags |= RNP_RING_LOWER_ITR; + } else { + + rx_ring->ring_flags &= (~RNP_RING_LOWER_ITR); + } + + /* write back new count */ + adapter->last_moder_packets[rx_ring->queue_index] = rx_packets; + } +} + +/** + * rnp_service_task - manages and runs subtasks + * @work: pointer to work_struct containing our data + **/ +void rnp_service_task(struct work_struct *work) +{ + struct rnp_adapter *adapter = + container_of(work, struct rnp_adapter, service_task); + + rnp_reset_subtask(adapter); + rnp_reset_pf_subtask(adapter); + rnp_watchdog_subtask(adapter); + rnp_rx_len_reset_subtask(adapter); + rnp_auto_itr_moderation(adapter); + rnp_check_hang_subtask(adapter); + rnp_service_event_complete(adapter); +} + +static int rnp_tso(struct rnp_ring *tx_ring, struct rnp_tx_buffer *first, + u32 *mac_ip_len, u8 *hdr_len, u32 *tx_flags) +{ + struct sk_buff *skb = first->skb; + struct net_device *netdev = tx_ring->netdev; + struct rnp_adapter *adapter = netdev_priv(netdev); + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } ip; + union { + struct tcphdr *tcp; + struct udphdr *udp; + unsigned char *hdr; + } l4; + u32 paylen, l4_offset; + int err; + u8 *inner_mac; + u16 gso_segs, gso_size; + u16 gso_need_pad; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + if (!skb_is_gso(skb)) + return 0; + + err = skb_cow_head(skb, 0); + if (err < 0) + return err; + + inner_mac = skb->data; + ip.hdr = skb_network_header(skb); + l4.hdr = skb_transport_header(skb); + + /* initialize outer IP header fields */ + if (ip.v4->version == 4) { + /* IP header will have to cancel out any data that + * is not a part of the outer IP header + */ + ip.v4->tot_len = 0; + ip.v4->check = 0x0000; + } else { + ip.v6->payload_len = 0; + } + + if (skb_shinfo(skb)->gso_type & + (SKB_GSO_GRE | + SKB_GSO_GRE_CSUM | + SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL_CSUM)) { + if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && + (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) { + } + /* we should alayws do this */ + inner_mac = skb_inner_mac_header(skb); + first->tunnel_hdr_len = (inner_mac - skb->data); + + if (skb_shinfo(skb)->gso_type & + (SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL_CSUM)) { + *tx_flags |= RNP_TXD_TUNNEL_VXLAN; + l4.udp->check = 0; + tx_dbg("set outer l4.udp to 0\n"); + } else { + *tx_flags |= RNP_TXD_TUNNEL_NVGRE; + } + + /* reset pointers to inner headers */ + ip.hdr = skb_inner_network_header(skb); + l4.hdr = skb_inner_transport_header(skb); + } + + if (ip.v4->version == 4) { + /* IP header will have to cancel out any data that + * is not a part of the outer IP header + */ + ip.v4->tot_len = 0; + ip.v4->check = 0x0000; + + } else { + ip.v6->payload_len = 0; + /* set ipv6 type */ + *tx_flags |= RNP_TXD_FLAG_IPv6; + } + + /* determine offset of inner transport header */ + l4_offset = l4.hdr - skb->data; + + paylen = skb->len - l4_offset; + + if (skb->csum_offset == offsetof(struct tcphdr, check)) { + *tx_flags |= RNP_TXD_L4_TYPE_TCP; + /* compute length of segmentation header */ + *hdr_len = (l4.tcp->doff * 4) + l4_offset; + csum_replace_by_diff(&l4.tcp->check, + (__force __wsum)htonl(paylen)); + l4.tcp->psh = 0; + } else { + *tx_flags |= RNP_TXD_L4_TYPE_UDP; + /* compute length of segmentation header */ + *hdr_len = sizeof(*l4.udp) + l4_offset; + csum_replace_by_diff(&l4.udp->check, + (__force __wsum)htonl(paylen)); + } + + *mac_ip_len = (l4.hdr - ip.hdr) | ((ip.hdr - inner_mac) << 9); + + /* compute header lengths */ + /* pull values out of skb_shinfo */ + gso_size = skb_shinfo(skb)->gso_size; + gso_segs = skb_shinfo(skb)->gso_segs; + + /* if we close padding check gso confition */ + if (adapter->priv_flags & RNP_PRIV_FLAG_TX_PADDING) { + gso_need_pad = (first->skb->len - *hdr_len) % gso_size; + if (gso_need_pad) { + if ((gso_need_pad + *hdr_len) <= 60) { + gso_need_pad = 60 - (gso_need_pad + *hdr_len); + first->gso_need_padding = !!gso_need_pad; + } + } + } + + /* update gso size and bytecount with header size */ + /* to fix tx status */ + first->gso_segs = gso_segs; + first->bytecount += (first->gso_segs - 1) * *hdr_len; + if (skb->csum_offset == offsetof(struct tcphdr, check)) { + first->mss_len_vf_num |= + (gso_size | ((l4.tcp->doff * 4) << 24)); + } else { + first->mss_len_vf_num |= (gso_size | ((8) << 24)); + } + + *tx_flags |= RNP_TXD_FLAG_TSO | RNP_TXD_IP_CSUM | RNP_TXD_L4_CSUM; + + first->ctx_flag = true; + return 1; +} + +static int rnp_tx_csum(struct rnp_ring *tx_ring, struct rnp_tx_buffer *first, + u32 *mac_ip_len, u32 *tx_flags) +{ + struct sk_buff *skb = first->skb; + u8 l4_proto = 0; + u8 ip_len = 0; + u8 mac_len = 0; + u8 *inner_mac = skb->data; + u8 *exthdr; + __be16 frag_off; + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } ip; + union { + struct tcphdr *tcp; + struct udphdr *udp; + unsigned char *hdr; + } l4; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + ip.hdr = skb_network_header(skb); + l4.hdr = skb_transport_header(skb); + + inner_mac = skb->data; + + /* outer protocol */ + if (skb->encapsulation) { + /* define outer network header type */ + if (ip.v4->version == 4) { + l4_proto = ip.v4->protocol; + } else { + exthdr = ip.hdr + sizeof(*ip.v6); + l4_proto = ip.v6->nexthdr; + if (l4.hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &l4_proto, &frag_off); + } + + /* define outer transport */ + switch (l4_proto) { + case IPPROTO_UDP: + l4.udp->check = 0; + *tx_flags |= RNP_TXD_TUNNEL_VXLAN; + break; + case IPPROTO_GRE: + *tx_flags |= RNP_TXD_TUNNEL_NVGRE; + /* There was a long-standing issue in GRE where GSO + * was not setting the outer transport header unless + * a GRE checksum was requested. This was fixed in + * the 4.6 version of the kernel. In the 4.7 kernel + * support for GRE over IPv6 was added to GSO. So we + * can assume this workaround for all IPv4 headers + * without impacting later versions of the GRE. + */ + if (ip.v4->version == 4) + l4.hdr = ip.hdr + (ip.v4->ihl * 4); + break; + default: + skb_checksum_help(skb); + return -1; + } + + /* switch IP header pointer from outer to inner header */ + ip.hdr = skb_inner_network_header(skb); + l4.hdr = skb_inner_transport_header(skb); + + inner_mac = skb_inner_mac_header(skb); + first->tunnel_hdr_len = inner_mac - skb->data; + first->ctx_flag = true; + tx_dbg("tunnel length is %d\n", first->tunnel_hdr_len); + } + + mac_len = (ip.hdr - inner_mac); // mac length + *mac_ip_len = (ip.hdr - inner_mac) << 9; + tx_dbg("inner checksum needed %d", skb_checksum_start_offset(skb)); + tx_dbg("skb->encapsulation %d\n", skb->encapsulation); + ip_len = (l4.hdr - ip.hdr); + if (ip.v4->version == 4) { + l4_proto = ip.v4->protocol; + } else { + exthdr = ip.hdr + sizeof(*ip.v6); + l4_proto = ip.v6->nexthdr; + if (l4.hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto, + &frag_off); + *tx_flags |= RNP_TXD_FLAG_IPv6; + } + /* Enable L4 checksum offloads */ + switch (l4_proto) { + case IPPROTO_TCP: + *tx_flags |= RNP_TXD_L4_TYPE_TCP | RNP_TXD_L4_CSUM; + break; + case IPPROTO_SCTP: + tx_dbg("sctp checksum packet\n"); + *tx_flags |= RNP_TXD_L4_TYPE_SCTP | RNP_TXD_L4_CSUM; + break; + case IPPROTO_UDP: + *tx_flags |= RNP_TXD_L4_TYPE_UDP | RNP_TXD_L4_CSUM; + break; + default: + skb_checksum_help(skb); + return 0; + } + + /* should consider stags mode */ + if ((tx_ring->ring_flags & RNP_RING_NO_TUNNEL_SUPPORT) && + (first->ctx_flag)) { + /* if not support tunnel */ + *tx_flags &= (~RNP_TXD_TUNNEL_MASK); + if (!(first->priv_tags)) { + first->ctx_flag = false; + mac_len += first->tunnel_hdr_len; + first->tunnel_hdr_len = 0; + } + } + *mac_ip_len = (mac_len << 9) | ip_len; + + return 0; +} + +static int __rnp_maybe_stop_tx(struct rnp_ring *tx_ring, u16 size) +{ + netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); + /* Herbert's original patch had: + * smp_mb__after_netif_stop_queue(); + * but since that doesn't exist yet, just open code it. + */ + smp_mb(); + + /* We need to check again in a case another CPU has just + * made room available. + */ + if (likely(rnp_desc_unused(tx_ring) < size)) + return -EBUSY; + + /* A reprieve! - use start_queue because it doesn't call schedule */ + netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); + ++tx_ring->tx_stats.restart_queue; + return 0; +} + +static inline int rnp_maybe_stop_tx(struct rnp_ring *tx_ring, u16 size) +{ + if (likely(rnp_desc_unused(tx_ring) >= size)) + return 0; + return __rnp_maybe_stop_tx(tx_ring, size); +} + +static int rnp_tx_map(struct rnp_ring *tx_ring, struct rnp_tx_buffer *first, + u32 mac_ip_len, u32 tx_flags) +{ + struct sk_buff *skb = first->skb; + struct rnp_tx_buffer *tx_buffer; + struct rnp_tx_desc *tx_desc; + skb_frag_t *frag; + dma_addr_t dma; + unsigned int data_len, size; + u16 i = tx_ring->next_to_use; + u64 fun_id = ((u64)(tx_ring->pfvfnum) << (56)); + + tx_desc = RNP_TX_DESC(tx_ring, i); + size = skb_headlen(skb); + data_len = skb->data_len; + + dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); + + tx_buffer = first; + + for (frag = &skb_shinfo(skb)->frags[0];; frag++) { + if (dma_mapping_error(tx_ring->dev, dma)) + goto dma_error; + + /* record length, and DMA address */ + dma_unmap_len_set(tx_buffer, len, size); + dma_unmap_addr_set(tx_buffer, dma, dma); + + /* 1st desc */ + tx_desc->pkt_addr = cpu_to_le64(dma | fun_id); + + while (unlikely(size > RNP_MAX_DATA_PER_TXD)) { + tx_desc->vlan_cmd_bsz = build_ctob( + tx_flags, mac_ip_len, RNP_MAX_DATA_PER_TXD); + /* ==== desc== */ + buf_dump_line("tx0 ", __LINE__, tx_desc, + sizeof(*tx_desc)); + i++; + tx_desc++; + if (i == tx_ring->count) { + tx_desc = RNP_TX_DESC(tx_ring, 0); + i = 0; + } + dma += RNP_MAX_DATA_PER_TXD; + size -= RNP_MAX_DATA_PER_TXD; + + tx_desc->pkt_addr = cpu_to_le64(dma | fun_id); + } + + buf_dump_line("tx1 ", __LINE__, tx_desc, sizeof(*tx_desc)); + if (likely(!data_len)) + break; + tx_desc->vlan_cmd_bsz = build_ctob(tx_flags, mac_ip_len, size); + buf_dump_line("tx2 ", __LINE__, tx_desc, sizeof(*tx_desc)); + + /* ==== frag== */ + i++; + tx_desc++; + if (i == tx_ring->count) { + tx_desc = RNP_TX_DESC(tx_ring, 0); + i = 0; + } + + size = skb_frag_size(frag); + data_len -= size; + dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, + DMA_TO_DEVICE); + tx_buffer = &tx_ring->tx_buffer_info[i]; + } + + /* write last descriptor with RS and EOP bits */ + tx_desc->vlan_cmd_bsz = build_ctob( + tx_flags | RNP_TXD_CMD_EOP | RNP_TXD_CMD_RS, mac_ip_len, size); + buf_dump_line("tx3 ", __LINE__, tx_desc, sizeof(*tx_desc)); + + /* set the timestamp */ + first->time_stamp = jiffies; + + tx_ring->tx_stats.send_bytes += first->bytecount; +#ifdef NO_BQL_TEST +#else + netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); +#endif + + /* + * Force memory writes to complete before letting h/w know there + * are new descriptors to fetch. (Only applicable for weak-ordered + * memory model archs, such as IA-64). + * + * We also need this memory barrier to make certain all of the + * status bits have been updated before next_to_watch is written. + */ + /* timestamp the skb as late as possible, just prior to notifying + * the MAC that it should transmit this packet + */ + wmb(); + /* set next_to_watch value indicating a packet is present */ + first->next_to_watch = tx_desc; + + buf_dump_line("tx4 ", __LINE__, tx_desc, sizeof(*tx_desc)); + i++; + if (i == tx_ring->count) + i = 0; + tx_ring->next_to_use = i; + + /* need this */ + rnp_maybe_stop_tx(tx_ring, DESC_NEEDED); + skb_tx_timestamp(skb); + + if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { + tx_ring->tx_stats.send_bytes_to_hw += first->bytecount; + tx_ring->tx_stats.send_bytes_to_hw += + tx_ring->tx_stats.todo_update; + tx_ring->tx_stats.todo_update = 0; + rnp_wr_reg(tx_ring->tail, i); + } else { + tx_ring->tx_stats.todo_update += first->bytecount; + } + return 0; +dma_error: + dev_err(tx_ring->dev, "TX DMA map failed\n"); + + /* clear dma mappings for failed tx_buffer_info map */ + for (;;) { + tx_buffer = &tx_ring->tx_buffer_info[i]; + rnp_unmap_and_free_tx_resource(tx_ring, tx_buffer); + if (tx_buffer == first) + break; + if (i == 0) + i += tx_ring->count; + i--; + } + dev_kfree_skb_any(first->skb); + first->skb = NULL; + tx_ring->next_to_use = i; + + return -1; +} + +static void rnp_force_src_mac(struct sk_buff *skb, struct net_device *netdev) +{ + u8 *data = skb->data; + bool ret = false; + struct netdev_hw_addr *ha; + /* force all src mac to myself */ + if (is_multicast_ether_addr(data)) { + if (0 == memcmp(data + netdev->addr_len, netdev->dev_addr, + netdev->addr_len)) { + ret = true; + goto DONE; + } + netdev_for_each_uc_addr(ha, netdev) { + if (0 == memcmp(data + netdev->addr_len, ha->addr, + netdev->addr_len)) { + ret = true; + goto DONE; + } + } + /* if not src mac, force to src mac */ + if (!ret) + memcpy(data + netdev->addr_len, netdev->dev_addr, + netdev->addr_len); + } +DONE: + return; +} + +netdev_tx_t rnp_xmit_frame_ring(struct sk_buff *skb, + struct rnp_adapter *adapter, + struct rnp_ring *tx_ring, bool tx_padding) +{ + struct rnp_tx_buffer *first; + int tso; + u32 tx_flags = 0; + unsigned short f; + u16 count = TXD_USE_COUNT(skb_headlen(skb)); + __be16 protocol = skb->protocol; + u8 hdr_len = 0; + int ignore_vlan = 0; + /* default len should not 0 (hw request) */ + u32 mac_ip_len = 20; + + tx_dbg("=== begin ====\n"); + tx_dbg("rnp skb:%p, skb->len:%d headlen:%d, data_len:%d\n", skb, + skb->len, skb_headlen(skb), skb->data_len); + tx_dbg("next_to_clean %d, next_to_use %d\n", tx_ring->next_to_clean, + tx_ring->next_to_use); + /* + * need: 1 descriptor per page * PAGE_SIZE/RNP_MAX_DATA_PER_TXD, + * + 1 desc for skb_headlen/RNP_MAX_DATA_PER_TXD, + * + 2 desc gap to keep tail from touching head, + * + 1 desc for context descriptor, + * otherwise try next time + */ + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { + skb_frag_t *frag_temp = &skb_shinfo(skb)->frags[f]; + + count += TXD_USE_COUNT(skb_frag_size(frag_temp)); + tx_dbg(" rnp #%d frag: size:%d\n", f, skb_frag_size(frag_temp)); + } + + if (rnp_maybe_stop_tx(tx_ring, count + 3)) { + tx_ring->tx_stats.tx_busy++; + return NETDEV_TX_BUSY; + } + + if ((adapter->flags & RNP_FLAG_SRIOV_ENABLED) && + (!(tx_ring->ring_flags & RNP_RING_VEB_MULTI_FIX))) + rnp_force_src_mac(skb, tx_ring->netdev); + + /* record the location of the first descriptor for this packet */ + first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; + first->skb = skb; + first->bytecount = skb->len; + first->gso_segs = 1; + first->priv_tags = 0; + + first->mss_len_vf_num = 0; + first->inner_vlan_tunnel_len = 0; + + first->ctx_flag = (adapter->flags & RNP_FLAG_SRIOV_ENABLED) ? true : + false; + + /* if we have a HW VLAN tag being added default to the HW one */ + /* RNP_TXD_VLAN_VALID is used for veb */ + /* setup padding flag */ + + if (adapter->priv_flags & RNP_PRIV_FLAG_TX_PADDING) { + first->ctx_flag = true; + /* should consider sctp */ + first->gso_need_padding = tx_padding; + } + + /* RNP_FLAG2_VLAN_STAGS_ENABLED and + * tx-stags-offload not support together + */ + if (adapter->flags2 & RNP_FLAG2_VLAN_STAGS_ENABLED) { + /* always add a stags for any packets out */ + if (tx_ring->ring_flags & RNP_RING_OUTER_VLAN_FIX) { + /* set outer_vlan to ctx */ + first->inner_vlan_tunnel_len |= (adapter->stags_vid); + first->priv_tags = 1; + first->ctx_flag = true; + + if (skb_vlan_tag_present(skb)) { + tx_flags |= RNP_TXD_VLAN_VALID | + RNP_TXD_VLAN_CTRL_INSERT_VLAN; + tx_flags |= skb_vlan_tag_get(skb); + /* else if it is a SW VLAN check the next + * protocol and store the tag + */ + } else if (protocol == htons(ETH_P_8021Q)) { + struct vlan_hdr *vhdr, _vhdr; + + vhdr = skb_header_pointer( + skb, ETH_HLEN, sizeof(_vhdr), &_vhdr); + if (!vhdr) + goto out_drop; + + protocol = vhdr->h_vlan_encapsulated_proto; + tx_flags |= ntohs(vhdr->h_vlan_TCI); + tx_flags |= RNP_TXD_VLAN_VALID; + } + + } else { + /* sriov mode not support this */ + tx_flags |= adapter->stags_vid; + tx_flags |= RNP_TXD_VLAN_CTRL_INSERT_VLAN; + if (skb_vlan_tag_present(skb)) { + tx_flags |= RNP_TXD_VLAN_VALID; + first->inner_vlan_tunnel_len |= + (skb_vlan_tag_get(skb) << 8); + first->ctx_flag = true; + /* else if it is a SW VLAN check the next + * protocol and store the tag + */ + } else if (protocol == htons(ETH_P_8021Q)) { + struct vlan_hdr *vhdr, _vhdr; + + vhdr = skb_header_pointer( + skb, ETH_HLEN, sizeof(_vhdr), &_vhdr); + if (!vhdr) + goto out_drop; + + protocol = vhdr->h_vlan_encapsulated_proto; + tx_flags |= RNP_TXD_VLAN_VALID; + } + } + } else { + /* normal mode*/ + if (skb_vlan_tag_present(skb)) { + if (skb->vlan_proto != htons(ETH_P_8021Q)) { + /* veb only use ctags */ + tx_flags |= skb_vlan_tag_get(skb); + tx_flags |= RNP_TXD_SVLAN_TYPE | + RNP_TXD_VLAN_CTRL_INSERT_VLAN; + } else { + tx_flags |= skb_vlan_tag_get(skb); + tx_flags |= RNP_TXD_VLAN_VALID | + RNP_TXD_VLAN_CTRL_INSERT_VLAN; + } + tx_ring->tx_stats.vlan_add++; + /* else if it is a SW VLAN check the next + * protocol and store the tag + */ + /* veb only use ctags */ + } else if ((protocol == htons(ETH_P_8021Q))) { + struct vlan_hdr *vhdr, _vhdr; + + vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), + &_vhdr); + if (!vhdr) + goto out_drop; + + protocol = vhdr->h_vlan_encapsulated_proto; + tx_flags |= ntohs(vhdr->h_vlan_TCI); + tx_flags |= RNP_TXD_VLAN_VALID; + ignore_vlan = 1; + } + } + protocol = vlan_get_protocol(skb); + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && + adapter->flags2 & RNP_FLAG2_PTP_ENABLED && adapter->ptp_tx_en) { + if (!test_and_set_bit_lock(__RNP_PTP_TX_IN_PROGRESS, + &adapter->state)) { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + tx_flags |= RNP_TXD_FLAG_PTP; + adapter->ptp_tx_skb = skb_get(skb); + adapter->tx_hwtstamp_start = jiffies; + schedule_work(&adapter->tx_hwtstamp_work); + } else { + printk("ptp_tx_skb miss\n"); + } + } + /* record initial flags and protocol */ + tso = rnp_tso(tx_ring, first, &mac_ip_len, &hdr_len, &tx_flags); + if (tso < 0) + goto out_drop; + else if (!tso) + rnp_tx_csum(tx_ring, first, &mac_ip_len, &tx_flags); + /* check sriov mode */ + /* in this mode pf send msg should with vf_num */ + if (unlikely(adapter->flags & RNP_FLAG_SRIOV_ENABLED)) { + first->ctx_flag = true; + first->mss_len_vf_num |= (adapter->vf_num_for_pf << 16); + } + + /* add control desc */ + rnp_maybe_tx_ctxtdesc(tx_ring, first, ignore_vlan); + /* add the ATR filter if ATR is on */ + if (rnp_tx_map(tx_ring, first, mac_ip_len, tx_flags)) { + goto cleanup_tx_tstamp; + } + tx_dbg("=== end ====\n\n\n\n"); + return NETDEV_TX_OK; + +out_drop: + dev_kfree_skb_any(first->skb); + first->skb = NULL; +cleanup_tx_tstamp: + if (unlikely(tx_flags & RNP_TXD_FLAG_PTP)) { + dev_kfree_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; + cancel_work_sync(&adapter->tx_hwtstamp_work); + clear_bit_unlock(__RNP_PTP_TX_IN_PROGRESS, &adapter->state); + } + + return NETDEV_TX_OK; +} + +static bool check_sctp_no_padding(struct sk_buff *skb) +{ + bool no_padding = false; + u8 l4_proto = 0; + u8 *exthdr; + __be16 frag_off; + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } ip; + union { + struct tcphdr *tcp; + struct udphdr *udp; + unsigned char *hdr; + } l4; + + ip.hdr = skb_network_header(skb); + l4.hdr = skb_transport_header(skb); + + if (ip.v4->version == 4) { + l4_proto = ip.v4->protocol; + } else { + exthdr = ip.hdr + sizeof(*ip.v6); + l4_proto = ip.v6->nexthdr; + if (l4.hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto, + &frag_off); + } + /* sctp set no_padding to true */ + switch (l4_proto) { + case IPPROTO_SCTP: + no_padding = true; + break; + default: + + break; + } + return no_padding; +} + +static netdev_tx_t rnp_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_ring *tx_ring; + bool tx_padding = false; + + if (!netif_carrier_ok(netdev)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + /* + * The minimum packet size for olinfo paylen is 17 so pad the skb + * in order to meet this minimum size requirement. + */ + if (adapter->priv_flags & RNP_PRIV_FLAG_TX_PADDING) { + if (skb->len < 60) { + if (!check_sctp_no_padding(skb)) { + if (skb_put_padto(skb, 60)) + return NETDEV_TX_OK; + } else { + /* if sctp smaller than 60, never padding */ + tx_padding = true; + } + } + } else { + if (skb_put_padto(skb, 33)) + return NETDEV_TX_OK; + } + tx_ring = adapter->tx_ring[skb->queue_mapping]; + + return rnp_xmit_frame_ring(skb, adapter, tx_ring, tx_padding); +} + +/** + * rnp_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + **/ +static int rnp_set_mac(struct net_device *netdev, void *p) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + struct sockaddr *addr = p; + bool sriov_flag = !!(adapter->flags & RNP_FLAG_SRIOV_ENABLED); + + dbg("[%s] call set mac\n", netdev->name); + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + eth_hw_addr_set(netdev, addr->sa_data); + memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); + hw->ops.set_mac(hw, hw->mac.addr, sriov_flag); + + /* reset veb table */ + rnp_configure_virtualization(adapter); + return 0; +} + +static int rnp_mdio_read(struct net_device *netdev, int prtad, int devad, + u32 addr, u32 *phy_value) +{ + int rc = -EIO; + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + u16 value; + + rc = hw->ops.phy_read_reg(hw, addr, 0, &value); + *phy_value = value; + + return rc; +} + +static int rnp_mdio_write(struct net_device *netdev, int prtad, int devad, + u16 addr, u16 value) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + + return hw->ops.phy_write_reg(hw, addr, 0, value); +} + +static int rnp_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + struct mii_ioctl_data *mii = (struct mii_ioctl_data *)&ifr->ifr_data; + int prtad, devad, ret; + u32 phy_value; + + prtad = (mii->phy_id & MDIO_PHY_ID_PRTAD) >> 5; + devad = (mii->phy_id & MDIO_PHY_ID_DEVAD); + + if (cmd == SIOCGMIIREG) { + ret = rnp_mdio_read(netdev, prtad, devad, mii->reg_num, + &phy_value); + if (ret < 0) + return ret; + mii->val_out = phy_value; + return 0; + } else { + return rnp_mdio_write(netdev, prtad, devad, mii->reg_num, + mii->val_in); + } +} + +static int rnp_ioctl(struct net_device *netdev, struct ifreq *req, int cmd) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + + /* ptp 1588 used this */ + switch (cmd) { + case SIOCGHWTSTAMP: + if (module_enable_ptp) + return rnp_ptp_get_ts_config(adapter, req); + break; + case SIOCSHWTSTAMP: + if (module_enable_ptp) + return rnp_ptp_set_ts_config(adapter, req); + break; + case SIOCGMIIPHY: + return 0; + break; + case SIOCGMIIREG: + /* n400 use this */ + /* fall through */ + case SIOCSMIIREG: + return rnp_mii_ioctl(netdev, req, cmd); + break; + } + return -EINVAL; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* + * Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ +static void rnp_netpoll(struct net_device *netdev) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + int i; + + /* if interface is down do nothing */ + if (test_bit(__RNP_DOWN, &adapter->state)) + return; + + adapter->flags |= RNP_FLAG_IN_NETPOLL; + for (i = 0; i < adapter->num_q_vectors; i++) + rnp_msix_clean_rings(0, adapter->q_vector[i]); + adapter->flags &= ~RNP_FLAG_IN_NETPOLL; +} + +#endif + +static void rnp_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + int i; + + rcu_read_lock(); + for (i = 0; i < adapter->num_rx_queues; i++) { + struct rnp_ring *ring = READ_ONCE(adapter->rx_ring[i]); + u64 bytes, packets; + unsigned int start; + + if (ring) { + do { + start = u64_stats_fetch_begin(&ring->syncp); + packets = ring->stats.packets; + bytes = ring->stats.bytes; + } while (u64_stats_fetch_retry(&ring->syncp, start)); + stats->rx_packets += packets; + stats->rx_bytes += bytes; + } + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct rnp_ring *ring = READ_ONCE(adapter->tx_ring[i]); + u64 bytes, packets; + unsigned int start; + + if (ring) { + do { + start = u64_stats_fetch_begin(&ring->syncp); + packets = ring->stats.packets; + bytes = ring->stats.bytes; + } while (u64_stats_fetch_retry(&ring->syncp, start)); + stats->tx_packets += packets; + stats->tx_bytes += bytes; + } + } + rcu_read_unlock(); + /* following stats updated by rnp_watchdog_task() */ + stats->multicast = netdev->stats.multicast; + stats->rx_errors = netdev->stats.rx_errors; + stats->rx_length_errors = netdev->stats.rx_length_errors; + stats->rx_crc_errors = netdev->stats.rx_crc_errors; + stats->rx_missed_errors = netdev->stats.rx_missed_errors; + +} + +/** + * rnp_setup_tc - configure net_device for multiple traffic classes + * + * @netdev: net device to configure + * @tc: number of traffic classes to enable + */ +int rnp_setup_tc(struct net_device *dev, u8 tc) +{ + struct rnp_adapter *adapter = netdev_priv(dev); + struct rnp_hw *hw = &adapter->hw; + int ret = 0; + + if (hw->hw_type != rnp_hw_n10) + return -EINVAL; + /* if now we are in force mode, never need force, if not force it */ + if (!(adapter->priv_flags & RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE)) { + hw->ops.set_mac_rx(hw, false); + if (hw->ops.driver_status) + hw->ops.driver_status(hw, true, + rnp_driver_force_control_mac); + } + + /* Hardware supports up to 8 traffic classes */ + if ((tc > RNP_MAX_TCS_NUM) || (tc == 1)) + return -EINVAL; + /* we canot support tc with sriov mode */ + if ((tc) && (adapter->flags & RNP_FLAG_SRIOV_ENABLED)) + return -EINVAL; + + /* Hardware has to reinitialize queues and interrupts to + * match packet buffer alignment. Unfortunately, the + * hardware is not flexible enough to do this dynamically. + */ + while (test_and_set_bit(__RNP_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + if (netif_running(dev)) + rnp_close(dev); + + rnp_fdir_filter_exit(adapter); + adapter->priv_flags &= (~RNP_PRIV_FLAG_TCP_SYNC); + remove_mbx_irq(adapter); + rnp_clear_interrupt_scheme(adapter); + adapter->num_tc = tc; + + if (tc) { + netdev_set_num_tc(dev, tc); + adapter->flags |= RNP_FLAG_DCB_ENABLED; + } else { + netdev_reset_tc(dev); + adapter->flags &= ~RNP_FLAG_DCB_ENABLED; + } + + rnp_init_interrupt_scheme(adapter); + + register_mbx_irq(adapter); + /* rss table must reset */ + adapter->rss_tbl_setup_flag = 0; + + if (netif_running(dev)) + ret = rnp_open(dev); + + /* if we not set force now */ + if (!(adapter->priv_flags & RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE)) { + hw->ops.set_mac_rx(hw, false); + if (hw->ops.driver_status) + hw->ops.driver_status(hw, false, + rnp_driver_force_control_mac); + } + + clear_bit(__RNP_RESETTING, &adapter->state); + return ret; +} + +#ifdef CONFIG_PCI_IOV +void rnp_sriov_reinit(struct rnp_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + rtnl_lock(); + rnp_setup_tc(netdev, netdev_get_num_tc(netdev)); + rtnl_unlock(); + usleep_range(10000, 20000); +} +#endif + +static int rnp_delete_knode(struct net_device *dev, struct tc_cls_u32_offload *cls) +{ + /* 1. check weather filter rule is ingress root */ + struct rnp_adapter *adapter = netdev_priv(dev); + u32 loc = cls->knode.handle & 0xfffff; + u32 uhtid = TC_U32_USERHTID(cls->knode.handle); + int ret; + + if ((uhtid != 0x800)) + return -EINVAL; + + spin_lock(&adapter->fdir_perfect_lock); + ret = rnp_update_ethtool_fdir_entry(adapter, NULL, loc); + spin_unlock(&adapter->fdir_perfect_lock); + + return ret; +} + +#ifdef CONFIG_NET_CLS_ACT +static int rnp_action_parse(struct tcf_exts *exts, u64 *action, u8 *queue) +{ + const struct tc_action *a; + int j; + + if (!tcf_exts_has_actions(exts)) + return -EINVAL; + + tcf_exts_for_each_action(j, a, exts) { + /* Drop action */ + if (is_tcf_gact_shot(a)) { + *action = RNP_FDIR_DROP_QUEUE; + *queue = RNP_FDIR_DROP_QUEUE; + return 0; + } + /* Redirect to a VF or a offloaded macvlan */ + if (is_tcf_mirred_egress_redirect(a)) { + + struct net_device *dev = tcf_mirred_dev(a); + + if (!dev) + return -EINVAL; + } + + return -EINVAL; + } + + return 0; +} + +#else +static int rnp_action_parse(struct tcf_exts *exts, u64 *action, u8 *queue) +{ + return -EINVAL; +} +#endif + +static int rnp_clsu32_build_input(struct tc_cls_u32_offload *cls, + struct rnp_fdir_filter *input, + const struct rnp_match_parser *parsers) +{ + int i = 0, j = 0, err = -1; + __be32 val, mask, off; + bool found; + + for (i = 0; i < cls->knode.sel->nkeys; i++) { + off = cls->knode.sel->keys[i].off; + val = cls->knode.sel->keys[i].val; + mask = cls->knode.sel->keys[i].mask; + dbg("cls-key[%d] off %d val %d mask %d\n ", i, off, val, mask); + found = false; + for (j = 0; parsers[j].val; j++) { + /* according the off select parser */ + if (off == parsers[j].off) { + found = true; + err = parsers[j].val(input, val, mask); + if (err) + return err; + + break; + } + } + /* if the rule can't parse that we don't support the rule */ + if (!found) + return -EINVAL; + } + + return 0; +} + +static int rnp_config_knode(struct net_device *dev, __be16 protocol, + struct tc_cls_u32_offload *cls) +{ + /*1. check ethernet hw-feature U32 can offload */ + /*2. check U32 protocol We just support IPV4 offloading For now*/ + /*3. check if this cls is a cls of root u32 or cls of class u32*/ + /*4. check if this cls has been added. + * the filter extry create but the match val and mask don't fill + * so we can use it. + * find a exist extry and the match val and mask is added before + * so we don't need add it again + */ + u32 uhtid, link_uhtid; + int ret; + struct rnp_adapter *adapter = netdev_priv(dev); + u8 queue; + struct rnp_fdir_filter *input; + // struct rnp_hw *hw = &adapter->hw; + u32 loc = cls->knode.handle & 0xfffff; + + if (protocol != htons(ETH_P_IP)) + return -EOPNOTSUPP; + + uhtid = TC_U32_USERHTID(cls->knode.handle); + link_uhtid = TC_U32_USERHTID(cls->knode.link_handle); + + netdev_info(dev, "uhtid %d link_uhtid %d protocol 0x%2x\n", uhtid, + link_uhtid, ntohs(protocol)); + /* For now just support handle root ingress + * TODO more feature + */ + if (uhtid != 0x800) + return -EINVAL; + + input = kzalloc(sizeof(*input), GFP_KERNEL); + /*be carefull this input mem need to free */ + ret = rnp_clsu32_build_input(cls, input, rnp_ipv4_parser); + if (ret) { + netdev_warn(dev, "This Rules We Can't Support It\n"); + goto out; + } + ret = rnp_action_parse(cls->knode.exts, &input->action, &queue); + if (ret) + goto out; + + dbg("tc filter rule sw_location %d\n", loc); + + /* maybe bug here */ + input->hw_idx = adapter->tuple_5_count++; + input->sw_idx = loc; + spin_lock(&adapter->fdir_perfect_lock); + rnp_update_ethtool_fdir_entry(adapter, input, input->sw_idx); + spin_unlock(&adapter->fdir_perfect_lock); + + return 0; +out: + kfree(input); + return -EOPNOTSUPP; +} + +static int rnp_setup_tc_cls_u32(struct net_device *dev, + struct tc_cls_u32_offload *cls_u32) +{ + __be16 proto = cls_u32->common.protocol; + dbg("cls_u32->command is %d\n", cls_u32->command); + switch (cls_u32->command) { + case TC_CLSU32_NEW_KNODE: + case TC_CLSU32_REPLACE_KNODE: + return rnp_config_knode(dev, proto, cls_u32); + case TC_CLSU32_DELETE_KNODE: + return rnp_delete_knode(dev, cls_u32); + default: + return -EOPNOTSUPP; + } +} + +static int rnp_setup_tc_block_ingress_cb(enum tc_setup_type type, + void *type_data, void *cb_priv) +{ + struct net_device *dev = cb_priv; + struct rnp_adapter *adapter = netdev_priv(dev); + + if (test_bit(__RNP_DOWN, &adapter->state)) { + netdev_err( + adapter->netdev, + "Failed to setup tc on port %d. Link Down? 0x%.2lx\n", + adapter->port, adapter->state); + return -EINVAL; + } + if (!tc_cls_can_offload_and_chain0(dev, type_data)) + return -EOPNOTSUPP; + + switch (type) { + case TC_SETUP_CLSU32: + return rnp_setup_tc_cls_u32(dev, type_data); + default: + return -EOPNOTSUPP; + } +} + +static LIST_HEAD(rnp_block_cb_list); + +static int rnp_setup_mqprio(struct net_device *dev, + struct tc_mqprio_qopt *mqprio) +{ + mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + return rnp_setup_tc(dev, mqprio->num_tc); +} + +static int __rnp_setup_tc(struct net_device *netdev, enum tc_setup_type type, + void *type_data) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + switch (type) { + case TC_SETUP_BLOCK: { + struct flow_block_offload *f = + (struct flow_block_offload *)type_data; + if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) + return flow_block_cb_setup_simple( + type_data, &rnp_block_cb_list, + rnp_setup_tc_block_ingress_cb, adapter, adapter, + true); + else + return -EOPNOTSUPP; + } + case TC_SETUP_CLSU32: + return rnp_setup_tc_cls_u32(netdev, type_data); + case TC_SETUP_QDISC_MQPRIO: + return rnp_setup_mqprio(netdev, type_data); + default: + return -EOPNOTSUPP; + } + + return 0; +} + +void rnp_do_reset(struct net_device *netdev) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + + if (netif_running(netdev)) + rnp_reinit_locked(adapter); + else + rnp_reset(adapter); +} + +static netdev_features_t rnp_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + + /* If Rx checksum is disabled, then RSC/LRO should also be disabled */ + if (!(features & NETIF_F_RXCSUM)) + features &= ~NETIF_F_LRO; + + /* close rx csum when rx fcs on */ + if (!(adapter->flags2 & RNP_FLAG2_CHKSM_FIX)) { + if (features & NETIF_F_RXFCS) + features &= (~NETIF_F_RXCSUM); + } + /* Turn off LRO if not RSC capable */ + if (!(adapter->flags2 & RNP_FLAG2_RSC_CAPABLE)) + features &= ~NETIF_F_LRO; + if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER)) { + if (hw->feature_flags & RNP_NET_FEATURE_STAG_FILTER) + features &= ~NETIF_F_HW_VLAN_STAG_FILTER; + } + + if (hw->feature_flags & RNP_NET_FEATURE_STAG_FILTER) { + if (!(features & NETIF_F_HW_VLAN_STAG_FILTER)) + features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; + } + + if (!(features & NETIF_F_HW_VLAN_CTAG_RX)) { + if (hw->feature_flags & RNP_NET_FEATURE_STAG_OFFLOAD) + features &= ~NETIF_F_HW_VLAN_STAG_RX; + } + + if (hw->feature_flags & RNP_NET_FEATURE_STAG_OFFLOAD) { + if (!(features & NETIF_F_HW_VLAN_STAG_RX)) + features &= ~NETIF_F_HW_VLAN_CTAG_RX; + } + + if (!(features & NETIF_F_HW_VLAN_CTAG_TX)) { + if (hw->feature_flags & RNP_NET_FEATURE_STAG_OFFLOAD) + features &= ~NETIF_F_HW_VLAN_STAG_TX; + } + + if (hw->feature_flags & RNP_NET_FEATURE_STAG_OFFLOAD) { + if (!(features & NETIF_F_HW_VLAN_STAG_TX)) + features &= ~NETIF_F_HW_VLAN_CTAG_TX; + } + + return features; +} + +static int rnp_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + netdev_features_t changed = netdev->features ^ features; + bool need_reset = false; + struct rnp_hw *hw = &adapter->hw; + + netdev->features = features; + + /* if changed ntuple should close all */ + if (changed & NETIF_F_NTUPLE) { + if (!(features & NETIF_F_NTUPLE)) { + rnp_fdir_filter_exit(adapter); + } + } + + switch (features & NETIF_F_NTUPLE) { + case NETIF_F_NTUPLE: + /* turn off ATR, enable perfect filters and reset */ + if (!(adapter->flags & RNP_FLAG_FDIR_PERFECT_CAPABLE)) + need_reset = true; + + adapter->flags &= ~RNP_FLAG_FDIR_HASH_CAPABLE; + adapter->flags |= RNP_FLAG_FDIR_PERFECT_CAPABLE; + break; + default: + /* turn off perfect filters, enable ATR and reset */ + if (adapter->flags & RNP_FLAG_FDIR_PERFECT_CAPABLE) + need_reset = true; + + adapter->flags &= ~RNP_FLAG_FDIR_PERFECT_CAPABLE; + + /* We cannot enable ATR if SR-IOV is enabled */ + if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) + break; + + /* We cannot enable ATR if we have 2 or more traffic classes */ + if (netdev_get_num_tc(netdev) > 1) + break; + + /* A sample rate of 0 indicates ATR disabled */ + if (!adapter->atr_sample_rate) + break; + + adapter->flags |= RNP_FLAG_FDIR_HASH_CAPABLE; + break; + } + + /* vlan filter changed */ + if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) { + if (features & (NETIF_F_HW_VLAN_CTAG_FILTER)) + hw->ops.set_vlan_filter_en(hw, true); + else + hw->ops.set_vlan_filter_en(hw, false); + rnp_msg_post_status(adapter, PF_VLAN_FILTER_STATUS); + } + + /* rss hash changed */ + if (changed & (NETIF_F_RXHASH)) { + bool iov_en = (adapter->flags & RNP_FLAG_SRIOV_ENABLED) ? true : + false; + + if (netdev->features & (NETIF_F_RXHASH)) + hw->ops.set_rx_hash(hw, true, iov_en); + else + hw->ops.set_rx_hash(hw, false, iov_en); + } + + /* rx fcs changed */ + /* in this mode rx l4/sctp checksum will get error */ + if (changed & NETIF_F_RXFCS) { + + if (features & NETIF_F_RXFCS) { + adapter->priv_flags |= RNP_PRIV_FLAG_RX_FCS; + hw->ops.set_fcs_mode(hw, true); + /* if in rx fcs mode ,hw rxcsum may error, + * close rxcusm + */ + } else { + adapter->priv_flags &= (~RNP_PRIV_FLAG_RX_FCS); + hw->ops.set_fcs_mode(hw, false); + } + rnp_msg_post_status(adapter, PF_FCS_STATUS); + } + + if (changed & NETIF_F_RXALL) + need_reset = true; + + if (features & NETIF_F_RXALL) + adapter->priv_flags |= RNP_PRIV_FLAG_RX_ALL; + else + adapter->priv_flags &= (~RNP_PRIV_FLAG_RX_ALL); + + if (features & NETIF_F_HW_VLAN_CTAG_RX) + rnp_vlan_strip_enable(adapter); + else + rnp_vlan_strip_disable(adapter); + + if (need_reset) + rnp_do_reset(netdev); + + return 0; +} + +static int rnp_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, + __always_unused u16 flags, + struct netlink_ext_ack __always_unused *ext) +{ + struct rnp_adapter *adapter = netdev_priv(dev); + struct rnp_hw *hw = &adapter->hw; + struct nlattr *attr, *br_spec; + int rem; + + if (!(adapter->flags & RNP_FLAG_SRIOV_ENABLED)) + return -EOPNOTSUPP; + + br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); + + nla_for_each_nested(attr, br_spec, rem) { + __u16 mode; + + if (nla_type(attr) != IFLA_BRIDGE_MODE) + continue; + + mode = nla_get_u16(attr); + if (mode == BRIDGE_MODE_VEPA) { + adapter->flags2 &= ~RNP_FLAG2_BRIDGE_MODE_VEB; + wr32(hw, RNP_DMA_CONFIG, + rd32(hw, RNP_DMA_CONFIG) | DMA_VEB_BYPASS); + } else if (mode == BRIDGE_MODE_VEB) { + adapter->flags2 |= RNP_FLAG2_BRIDGE_MODE_VEB; + wr32(hw, RNP_DMA_CONFIG, + rd32(hw, RNP_DMA_CONFIG) & (~DMA_VEB_BYPASS)); + + } else + return -EINVAL; + + e_info(drv, "enabling bridge mode: %s\n", + mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); + } + + return 0; +} + +static int rnp_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, + struct net_device *dev, + u32 __maybe_unused filter_mask, int nlflags) +{ + struct rnp_adapter *adapter = netdev_priv(dev); + u16 mode; + + if (!(adapter->flags & RNP_FLAG_SRIOV_ENABLED)) + return 0; + + if (adapter->flags2 & RNP_FLAG2_BRIDGE_MODE_VEB) + mode = BRIDGE_MODE_VEB; + else + mode = BRIDGE_MODE_VEPA; + + return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 0, 0, nlflags, + filter_mask, NULL); +} + +#define RNP_MAX_TUNNEL_HDR_LEN 80 +#define RNP_MAX_MAC_HDR_LEN 127 +#define RNP_MAX_NETWORK_HDR_LEN 511 + +static netdev_features_t rnp_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) +{ + unsigned int network_hdr_len, mac_hdr_len; + + /* Make certain the headers can be described by a context descriptor */ + mac_hdr_len = skb_network_header(skb) - skb->data; + if (unlikely(mac_hdr_len > RNP_MAX_MAC_HDR_LEN)) + return features & + ~(NETIF_F_HW_CSUM | NETIF_F_SCTP_CRC | + NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_TSO | NETIF_F_TSO6); + + network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb); + if (unlikely(network_hdr_len > RNP_MAX_NETWORK_HDR_LEN)) + return features & ~(NETIF_F_HW_CSUM | NETIF_F_SCTP_CRC | + NETIF_F_TSO | NETIF_F_TSO6); + + /* We can only support IPV4 TSO in tunnels if we can mangle the + * inner IP ID field, so strip TSO if MANGLEID is not supported. + */ + if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) + features &= ~NETIF_F_TSO; + + return features; +} + +static void rnp_clear_udp_tunnel_port(struct rnp_adapter *adapter) +{ + struct rnp_hw *hw = &adapter->hw; + + if (!(adapter->flags & (RNP_FLAG_VXLAN_OFFLOAD_CAPABLE))) + return; + + adapter->vxlan_port = 0; + hw->ops.set_vxlan_port(hw, adapter->vxlan_port); +} + +/** + * rnp_add_udp_tunnel_port - Get notifications about adding UDP tunnel ports + * @dev: The port's netdev + * @ti: Tunnel endpoint information + **/ +__maybe_unused static void rnp_add_udp_tunnel_port(struct net_device *dev, + struct udp_tunnel_info *ti) +{ + struct rnp_adapter *adapter = netdev_priv(dev); + struct rnp_hw *hw = &adapter->hw; + __be16 port = ti->port; + + if (ti->sa_family != AF_INET) + return; + + switch (ti->type) { + case UDP_TUNNEL_TYPE_VXLAN: + if (!(adapter->flags & RNP_FLAG_VXLAN_OFFLOAD_CAPABLE)) + return; + + if (adapter->vxlan_port == port) + return; + + if (adapter->vxlan_port) { + netdev_info(dev, + "VXLAN port %d set, not adding port %d\n", + ntohs(adapter->vxlan_port), ntohs(port)); + return; + } + + adapter->vxlan_port = port; + break; + default: + return; + } + hw->ops.set_vxlan_port(hw, ntohs(adapter->vxlan_port)); +} + +/** + * rnp_del_udp_tunnel_port - Get notifications about removing UDP tunnel ports + * @dev: The port's netdev + * @ti: Tunnel endpoint information + **/ +__maybe_unused static void rnp_del_udp_tunnel_port(struct net_device *dev, + struct udp_tunnel_info *ti) +{ + struct rnp_adapter *adapter = netdev_priv(dev); + + if (ti->type != UDP_TUNNEL_TYPE_VXLAN) + return; + + if (ti->sa_family != AF_INET) + return; + + switch (ti->type) { + case UDP_TUNNEL_TYPE_VXLAN: + if (!(adapter->flags & RNP_FLAG_VXLAN_OFFLOAD_CAPABLE)) + return; + + if (adapter->vxlan_port != ti->port) { + netdev_info(dev, "VXLAN port %d not found\n", + ntohs(ti->port)); + return; + } + + break; + default: + return; + } + + rnp_clear_udp_tunnel_port(adapter); + adapter->flags2 |= RNP_FLAG2_UDP_TUN_REREG_NEEDED; +} + +const struct net_device_ops rnp10_netdev_ops = { + .ndo_open = rnp_open, + .ndo_stop = rnp_close, + .ndo_start_xmit = rnp_xmit_frame, + .ndo_set_rx_mode = rnp_set_rx_mode, + .ndo_validate_addr = eth_validate_addr, + .ndo_eth_ioctl = rnp_ioctl, + .ndo_change_mtu = rnp_change_mtu, + .ndo_get_stats64 = rnp_get_stats64, + .ndo_tx_timeout = rnp_tx_timeout, + .ndo_set_tx_maxrate = rnp_tx_maxrate, + .ndo_set_mac_address = rnp_set_mac, + .ndo_vlan_rx_add_vid = rnp_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = rnp_vlan_rx_kill_vid, + .ndo_set_vf_mac = rnp_ndo_set_vf_mac, + .ndo_set_vf_vlan = rnp_ndo_set_vf_vlan, + .ndo_set_vf_rate = rnp_ndo_set_vf_bw, + .ndo_set_vf_spoofchk = rnp_ndo_set_vf_spoofchk, + .ndo_set_vf_link_state = rnp_ndo_set_vf_link_state, + .ndo_set_vf_trust = rnp_ndo_set_vf_trust, + .ndo_get_vf_config = rnp_ndo_get_vf_config, + .ndo_setup_tc = __rnp_setup_tc, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = rnp_netpoll, +#endif + .ndo_bridge_setlink = rnp_ndo_bridge_setlink, + .ndo_bridge_getlink = rnp_ndo_bridge_getlink, + .ndo_features_check = rnp_features_check, + .ndo_set_features = rnp_set_features, + .ndo_fix_features = rnp_fix_features, +}; + +static void rnp_assign_netdev_ops(struct net_device *dev) +{ + /* different hw can assign difference fun */ + dev->netdev_ops = &rnp10_netdev_ops; + rnp_set_ethtool_ops(dev); + dev->watchdog_timeo = 5 * HZ; +} + +/** + * rnp_wol_supported - Check whether device supports WoL + * @hw: hw specific details + * @device_id: the device ID + * @subdev_id: the subsystem device ID + * + * This function is used by probe and ethtool to determine + * which devices have WoL support + * + **/ +int rnp_wol_supported(struct rnp_adapter *adapter, u16 device_id, + u16 subdevice_id) +{ + int is_wol_supported = 0; + + struct rnp_hw *hw = &adapter->hw; + + if (hw->wol_supported) + is_wol_supported = 1; + return is_wol_supported; +} + +static inline unsigned long rnp_tso_features(struct rnp_hw *hw) +{ + unsigned long features = 0; + + if (hw->feature_flags & RNP_NET_FEATURE_TSO) + features |= NETIF_F_TSO; + if (hw->feature_flags & RNP_NET_FEATURE_TSO) + features |= NETIF_F_TSO6; + features |= NETIF_F_GSO_PARTIAL; + if (hw->feature_flags & RNP_NET_FEATURE_TX_UDP_TUNNEL) + features |= RNP_GSO_PARTIAL_FEATURES; + + return features; +} + +static void remove_mbx_irq(struct rnp_adapter *adapter) +{ + /* mbx */ + if (adapter->num_other_vectors) { + /* only msix use indepented intr */ + if (adapter->flags & RNP_FLAG_MSIX_ENABLED) { + adapter->hw.mbx.ops.configure( + &adapter->hw, adapter->msix_entries[0].entry, + false); + free_irq(adapter->msix_entries[0].vector, adapter); + + adapter->hw.mbx.other_irq_enabled = false; + } + } +} + +static int register_mbx_irq(struct rnp_adapter *adapter) +{ + struct rnp_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + int err = 0; + + /* for mbx:vector0 */ + if (adapter->num_other_vectors) { + /* only do this in msix mode */ + if (adapter->flags & RNP_FLAG_MSIX_ENABLED) { + err = request_irq(adapter->msix_entries[0].vector, + rnp_msix_other, 0, netdev->name, + adapter); + if (err) { + e_err(probe, + "request_irq for msix_other failed: %d\n", + err); + goto err_mbx; + } + hw->mbx.ops.configure( + hw, adapter->msix_entries[0].entry, true); + adapter->hw.mbx.other_irq_enabled = true; + } + } + +err_mbx: + return err; +} + +static int rnp_rm_adpater(struct rnp_adapter *adapter) +{ + struct net_device *netdev; + struct rnp_hw *hw = &adapter->hw; + + netdev = adapter->netdev; + pr_info("= remove adapter:%s =\n", netdev->name); + + rnp_dbg_adapter_exit(adapter); + + netif_carrier_off(netdev); + + set_bit(__RNP_DOWN, &adapter->state); + set_bit(__RNP_REMOVE, &adapter->state); + if (module_enable_ptp) { + while (test_bit(__RNP_PTP_TX_IN_PROGRESS, &adapter->state)) { + usleep_range(10000, 20000); + } + cancel_work_sync(&adapter->tx_hwtstamp_work); + } + cancel_work_sync(&adapter->service_task); + + del_timer_sync(&adapter->service_timer); + rnp_sysfs_exit(adapter); + rnp_fdir_filter_exit(adapter); + adapter->priv_flags &= (~RNP_PRIV_FLAG_TCP_SYNC); + + if (adapter->rpu_inited) { + rnp_rpu_mpe_stop(adapter); + adapter->rpu_inited = 0; + } + + if (netdev->reg_state == NETREG_REGISTERED) + unregister_netdev(netdev); + + adapter->netdev = NULL; + + if (hw->ops.driver_status) + hw->ops.driver_status(hw, false, rnp_driver_insmod); + + remove_mbx_irq(adapter); + + rnp_clear_interrupt_scheme(adapter); + + if (hw->ncsi_en) { + rnp_mbx_probe_stat_set(hw, MBX_REMOVE); + } + + if (adapter->io_addr) + iounmap(adapter->io_addr); + + if (adapter->io_addr_bar0) + iounmap(adapter->io_addr_bar0); + + free_netdev(netdev); + + pr_info("remove complete\n"); + + return 0; +} + +static void rnp_fix_dma_tx_status(struct rnp_adapter *adapter) +{ + int i; + struct rnp_hw *hw = &adapter->hw; + struct rnp_dma_info *dma = &hw->dma; + + if ((hw->hw_type == rnp_hw_n10) || (hw->hw_type == rnp_hw_n400)) { + for (i = 0; i < dma->max_tx_queues; i++) + dma_ring_wr32(dma, RING_OFFSET(i) + RNP_DMA_TX_START, + 1); + } +} + +static u8 rnp10_pfnum(u8 __iomem *hw_addr_bar0, struct pci_dev *pdev) +{ + /* n10 read this from bar0 */ + u16 vf_num = -1; + u32 pfvfnum_reg; +#define PF_NUM_REG_N10 (0x75f000) + pfvfnum_reg = (PF_NUM_REG_N10 & (pci_resource_len(pdev, 0) - 1)); + vf_num = readl(hw_addr_bar0 + pfvfnum_reg); +#define VF_NUM_MASK_TEMP (0x400) +#define VF_NUM_OFF (4) + return ((vf_num & VF_NUM_MASK_TEMP) >> VF_NUM_OFF); +} + +static int rnp_can_rpu_start(struct rnp_adapter *adapter) +{ + if (adapter->hw.rpu_addr == NULL) + return 0; + if ((adapter->pdev->device & 0xff00) == 0x1c00) { + return 1; + } + if (adapter->hw.rpu_availble) { + return 1; + } + return 0; +} + +static int rnp_add_adpater(struct pci_dev *pdev, struct rnp_info *ii, + struct rnp_adapter **padapter) +{ + int i, err = 0; + struct rnp_adapter *adapter = NULL; + struct net_device *netdev; + struct rnp_hw *hw; + u8 __iomem *hw_addr = NULL; + u8 __iomem *hw_addr_bar0 = NULL; + + u32 dma_version = 0; + u32 nic_version = 0; + u32 queues = ii->total_queue_pair_cnts; + static int bd_number; + + pr_info("==== add adapter queues:%d ====", queues); + netdev = alloc_etherdev_mq(sizeof(struct rnp_adapter), queues); + if (!netdev) + return -ENOMEM; + + if (!fix_eth_name) + SET_NETDEV_DEV(netdev, &pdev->dev); + + adapter = netdev_priv(netdev); + + memset((char *)adapter, 0x00, sizeof(struct rnp_adapter)); + adapter->netdev = netdev; + adapter->pdev = pdev; + + adapter->max_ring_pair_counts = queues; + if (padapter) + *padapter = adapter; + + adapter->bd_number = bd_number++; + adapter->port = 0; + snprintf(adapter->name, sizeof(netdev->name), "%s%d%d", rnp_driver_name, + 1, adapter->bd_number); + pci_set_drvdata(pdev, adapter); + + hw = &adapter->hw; + hw->back = adapter; + /* first setup hw type */ + hw->rss_type = ii->rss_type; + hw->hw_type = ii->hw_type; + switch (hw->hw_type) { + case rnp_hw_n10: + case rnp_hw_n20: + case rnp_hw_n400: + case rnp_hw_uv440: + hw_addr_bar0 = ioremap(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); + if (!hw_addr_bar0) { + dev_err(&pdev->dev, "pcim_iomap bar%d failed!\n", 0); + return -EIO; + } +#ifdef FIX_VF_BUG + rnp_wr_reg(hw_addr_bar0 + + (0x7982fc & (pci_resource_len(pdev, 0) - 1)), + 0); +#endif + + /* n10 use bar4 */ +#define RNP_NIC_BAR_N10 4 + hw_addr = ioremap(pci_resource_start(pdev, RNP_NIC_BAR_N10), + pci_resource_len(pdev, RNP_NIC_BAR_N10)); + if (!hw_addr) { + dev_err(&pdev->dev, "pcim_iomap bar%d failed!\n", + RNP_NIC_BAR_N10); + return -EIO; + } + pr_info("[bar%d]:%p %llx len=%d MB\n", RNP_NIC_BAR_N10, hw_addr, + (unsigned long long)pci_resource_start(pdev, + RNP_NIC_BAR_N10), + (int)pci_resource_len(pdev, RNP_NIC_BAR_N10) / 1024 / + 1024); + /* get dma version */ + dma_version = rnp_rd_reg(hw_addr); + + if (rnp10_pfnum(hw_addr_bar0, pdev)) + hw->pfvfnum = PF_NUM(1); + else + hw->pfvfnum = PF_NUM(0); + +#ifdef FIX_VF_BUG + if (hw->pfvfnum) + hw->hw_addr = hw_addr + 0x100000; + else + hw->hw_addr = hw_addr; +#else + hw->hw_addr = hw_addr; +#endif + /* setup msix base */ +#ifdef FIX_VF_BUG + if (hw->pfvfnum) + hw->ring_msix_base = hw->hw_addr + 0xa4000 + 0x200; + else + hw->ring_msix_base = hw->hw_addr + 0xa4000; +#else + hw->ring_msix_base = hw->hw_addr + 0xa4000; +#endif + nic_version = rd32(hw, RNP_TOP_NIC_VERSION); + adapter->irq_mode = irq_mode_msix; + adapter->flags |= RNP_FLAG_MSIX_CAPABLE; + + break; + default: +#ifdef FIX_VF_BUG + hw_addr_bar0 = ioremap(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); +#endif + hw_addr = ioremap(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); + goto err_free_net; + break; + } + + /* setup FT_PADDING */ + { +#ifdef FT_PADDING + u32 data; + + data = rnp_rd_reg(hw->hw_addr + RNP_DMA_CONFIG); + SET_BIT(8, data); + rnp_wr_reg(hw->hw_addr + RNP_DMA_CONFIG, data); + adapter->priv_flags |= RNP_PRIV_FLAG_FT_PADDING; +#endif + } + + /* assign to adapter */ + adapter->io_addr = hw_addr; + adapter->io_addr_bar0 = hw_addr_bar0; + if (pci_resource_len(pdev, 0) == (8 * 1024 * 1024)) { + hw->rpu_addr = hw_addr_bar0; + } + + hw->pdev = pdev; + hw->dma_version = dma_version; + adapter->msg_enable = netif_msg_init(debug, NETIF_MSG_DRV +#ifdef MSG_PROBE_ENABLE + | NETIF_MSG_PROBE +#endif +#ifdef MSG_IFUP_ENABLE + | NETIF_MSG_IFUP +#endif +#ifdef MSG_IFDOWN_ENABLE + | NETIF_MSG_IFDOWN +#endif + ); + + /* we have other irq */ + adapter->num_other_vectors = 1; + /* get software info */ + ii->get_invariants(hw); + + spin_lock_init(&adapter->link_stat_lock); + + if (adapter->num_other_vectors) { + /* Mailbox */ + rnp_init_mbx_params_pf(hw); + memcpy(&hw->mbx.ops, ii->mbx_ops, sizeof(hw->mbx.ops)); + if (dma_version >= 0x20210111) { + rnp_mbx_link_event_enable(hw, 0); + if ((hw->hw_type == rnp_hw_n10) || + (hw->hw_type == rnp_hw_n400)) + rnp_mbx_force_speed(hw, 0); + if (rnp_mbx_get_capability(hw, ii)) { + dev_err(&pdev->dev, + "rnp_mbx_get_capability failed!\n"); + err = -EIO; + goto err_free_net; + } + + /* should check eco */ +#ifdef VF_PROMISC_SUPPORT + if (!hw->eco) { + dev_err(&pdev->dev, + "only v2 chips support vf promisc!\n"); + err = -EIO; + goto err_free_net; + + } +#endif + adapter->portid_of_card = hw->port_id[0]; + if (hw->eco) { + hw->eth.num_rar_entries -= 1; + hw->mac.num_rar_entries -= 1; + hw->num_rar_entries -= 1; + } + + adapter->portid_of_card = hw->pfvfnum ? 1 : 0; + adapter->wol = hw->wol; + } + } + if (hw->ncsi_en) { + hw->eth.num_rar_entries -= hw->ncsi_rar_entries; + hw->mac.num_rar_entries -= hw->ncsi_rar_entries; + hw->num_rar_entries -= hw->ncsi_rar_entries; + } + + if (hw->force_status) + adapter->priv_flags |= RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE; + else + adapter->priv_flags &= (~RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE); + hw->default_rx_queue = 0; + pr_info("%s %s: dma version:0x%x, nic version:0x%x, pfvfnum:0x%x\n", + adapter->name, pci_name(pdev), hw->dma_version, nic_version, + hw->pfvfnum); + + /* Setup hw api */ + hw->mac.type = ii->mac; + /* EEPROM */ + if (ii->eeprom_ops) + memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops)); + + hw->phy.sfp_type = rnp_sfp_type_unknown; + + hw->ops.setup_ethtool(netdev); + rnp_assign_netdev_ops(netdev); + rnp_check_options(adapter); + /* setup the private structure */ + /* this private is used only once + */ + err = rnp_sw_init(adapter); + if (err) + goto err_sw_init; + + err = hw->ops.reset_hw(hw); + hw->phy.reset_if_overtemp = false; + if (err) { + e_dev_err("HW Init failed: %d\n", err); + goto err_sw_init; + } + if (hw->ops.driver_status) + hw->ops.driver_status(hw, true, rnp_driver_insmod); + if (hw->ops.driver_status) { + hw->ops.driver_status(hw, !!(adapter->priv_flags & RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE), + rnp_driver_force_control_mac); + } + +#ifdef CONFIG_PCI_IOV + if (adapter->num_other_vectors) { + rnp_enable_sriov(adapter); + pci_sriov_set_totalvfs(pdev, hw->max_vfs - 1); + } +#endif + + /* MTU range: 68 - 9710 */ + netdev->min_mtu = hw->min_length; + netdev->max_mtu = hw->max_length - (ETH_HLEN + 2 * ETH_FCS_LEN); + + if (hw->feature_flags & RNP_NET_FEATURE_SG) + netdev->features |= NETIF_F_SG; + if (hw->feature_flags & RNP_NET_FEATURE_TSO) + netdev->features |= NETIF_F_TSO | NETIF_F_TSO6; + if (hw->feature_flags & RNP_NET_FEATURE_RX_HASH) + netdev->features |= NETIF_F_RXHASH; + if (hw->feature_flags & RNP_NET_FEATURE_RX_CHECKSUM) + netdev->features |= NETIF_F_RXCSUM; + if (hw->feature_flags & RNP_NET_FEATURE_TX_CHECKSUM) + netdev->features |= NETIF_F_HW_CSUM | NETIF_F_SCTP_CRC; + + if (hw->feature_flags & RNP_NET_FEATURE_USO) + netdev->features |= NETIF_F_GSO_UDP_L4; + + netdev->features |= NETIF_F_HIGHDMA; + + if (hw->feature_flags & RNP_NET_FEATURE_TX_UDP_TUNNEL) { + netdev->gso_partial_features = RNP_GSO_PARTIAL_FEATURES; + netdev->features |= NETIF_F_GSO_PARTIAL | + RNP_GSO_PARTIAL_FEATURES; + } + + netdev->hw_features |= netdev->features; + + if (hw->ncsi_en) { + hw->feature_flags &= ~RNP_NET_FEATURE_VLAN_OFFLOAD; + } + if (hw->feature_flags & RNP_NET_FEATURE_VLAN_FILTER) + netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; + if (hw->feature_flags & RNP_NET_FEATURE_STAG_FILTER) + netdev->hw_features |= NETIF_F_HW_VLAN_STAG_FILTER; + if (hw->feature_flags & RNP_NET_FEATURE_VLAN_OFFLOAD) { + netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX; + } + if (hw->feature_flags & RNP_NET_FEATURE_STAG_OFFLOAD) { + netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX | + NETIF_F_HW_VLAN_STAG_TX; + } + netdev->hw_features |= NETIF_F_RXALL; + if (hw->feature_flags & RNP_NET_FEATURE_RX_NTUPLE_FILTER) + netdev->hw_features |= NETIF_F_NTUPLE; + if (hw->feature_flags & RNP_NET_FEATURE_RX_FCS) + netdev->hw_features |= NETIF_F_RXFCS; + if (hw->feature_flags & RNP_NET_FEATURE_HW_TC) + netdev->hw_features |= NETIF_F_HW_TC; + + netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; + netdev->hw_enc_features |= netdev->vlan_features; + netdev->mpls_features |= NETIF_F_HW_CSUM; + + if (hw->feature_flags & RNP_NET_FEATURE_VLAN_FILTER) + netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; + if (hw->feature_flags & RNP_NET_FEATURE_STAG_FILTER) + netdev->features |= NETIF_F_HW_VLAN_STAG_FILTER; + if (hw->feature_flags & RNP_NET_FEATURE_VLAN_OFFLOAD) { + netdev->features |= NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX; + } + if (hw->feature_flags & RNP_NET_FEATURE_STAG_OFFLOAD) { + netdev->features |= NETIF_F_HW_VLAN_STAG_RX | + NETIF_F_HW_VLAN_STAG_TX; + } + + netdev->priv_flags |= IFF_UNICAST_FLT; + netdev->priv_flags |= IFF_SUPP_NOFCS; + + if (adapter->flags2 & RNP_FLAG2_RSC_CAPABLE) + netdev->hw_features |= NETIF_F_LRO; + + netdev->priv_flags |= IFF_UNICAST_FLT; + netdev->priv_flags |= IFF_SUPP_NOFCS; + +#if IS_ENABLED(CONFIG_DCB) + rnp_dcb_init(netdev, adapter); +#endif + + if (adapter->flags2 & RNP_FLAG2_RSC_ENABLED) + netdev->features |= NETIF_F_LRO; + + eth_hw_addr_set(netdev, hw->mac.perm_addr); + memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len); + pr_info("dev mac:%pM \n", netdev->dev_addr); + + if (!is_valid_ether_addr(netdev->dev_addr)) { + e_dev_err("invalid MAC address\n"); + err = -EIO; + goto err_sw_init; + } + ether_addr_copy(hw->mac.addr, hw->mac.perm_addr); + + timer_setup(&adapter->service_timer, rnp_service_timer, 0); + + if (module_enable_ptp) { + /* setup ptp_addr according to mac type */ + switch (adapter->hw.mac.mac_type) { + case mac_dwc_xlg: + adapter->ptp_addr = adapter->hw.mac.mac_addr + 0xd00; + adapter->gmac4 = 1; + break; + case mac_dwc_g: + adapter->ptp_addr = adapter->hw.mac.mac_addr + 0x700; + adapter->gmac4 = 0; + break; + } + adapter->flags2 |= RNP_FLAG2_PTP_ENABLED; + if (adapter->flags2 & RNP_FLAG2_PTP_ENABLED) { + adapter->tx_timeout_factor = 10; + INIT_WORK(&adapter->tx_hwtstamp_work, + rnp_tx_hwtstamp_work); + } + } + + INIT_WORK(&adapter->service_task, rnp_service_task); + clear_bit(__RNP_SERVICE_SCHED, &adapter->state); + + if (fix_eth_name) + strncpy(netdev->name, adapter->name, sizeof(netdev->name) - 1); + else { + strscpy(netdev->name, pci_name(pdev), sizeof(netdev->name)); + } + + err = rnp_init_interrupt_scheme(adapter); + if (err) + goto err_sw_init; + + err = register_mbx_irq(adapter); + if (err) + goto err_register; + +#ifdef CONFIG_PCI_IOV + rnp_enable_sriov_true(adapter); +#endif + + /* WOL not supported for all devices */ + { + struct ethtool_wolinfo wol; + + if (rnp_wol_exclusion(adapter, &wol) || + !device_can_wakeup(&adapter->pdev->dev)) + adapter->wol = 0; + } + /* reset the hardware with the new settings */ + err = hw->ops.start_hw(hw); + rnp_fix_dma_tx_status(adapter); + + if (!fix_eth_name) + strscpy(netdev->name, "eth%d", sizeof(netdev->name)); + err = register_netdev(netdev); + if (err) { + e_dev_err("register_netdev failed!\n"); + goto err_register; + } + + /* power down the optics for n10 SFP+ fiber */ + if (hw->ops.disable_tx_laser) + hw->ops.disable_tx_laser(hw); + + /* carrier off reporting is important to ethtool even BEFORE open */ + netif_carrier_off(netdev); + + if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) { + DPRINTK(PROBE, INFO, "IOV is enabled with %d VFs\n", + adapter->num_vfs); + for (i = 0; i < adapter->num_vfs; i++) + rnp_vf_configuration(pdev, (i | 0x10000000)); + } + + if (rnp_mbx_lldp_status_get(hw) == 1) { + adapter->priv_flags |= RNP_PRIV_FLAG_LLDP_EN_STAT; + } + + if (rnp_sysfs_init(adapter)) + e_err(probe, "failed to allocate sysfs resources\n"); + + rnp_dbg_adapter_init(adapter); + /* only pf0 download mpe */ + if (rnp_is_pf0(&adapter->hw) && rnp_can_rpu_start(adapter)) { + rnp_rpu_mpe_start(adapter); + } + + if (hw->ncsi_en) { + hw->ops.set_mac_rx(hw, true); + rnp_mbx_probe_stat_set(hw, MBX_PROBE); + } + + return 0; +err_register: + remove_mbx_irq(adapter); + rnp_clear_interrupt_scheme(adapter); +err_sw_init: + rnp_disable_sriov(adapter); + adapter->flags2 &= ~RNP_FLAG2_SEARCH_FOR_SFP; +err_free_net: + free_netdev(netdev); + return err; +} + +/** + * rnp_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in rnp_pci_tbl + * + * Returns 0 on success, negative on failure + * + * rnp_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + **/ +static int rnp_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct rnp_adapter *adapter; + struct rnp_info *ii = rnp_info_tbl[id->driver_data]; + int err; + + /* Catch broken hardware that put the wrong VF device ID in + * the PCIe SR-IOV capability. + */ + if (pdev->is_virtfn) { + WARN(1, "%s (%hx:%hx) should not be a VF!\n", pci_name(pdev), + pdev->vendor, pdev->device); + return -EINVAL; + } + /* not support bus reset*/ + pdev->dev_flags |= PCI_DEV_FLAGS_NO_BUS_RESET; + err = pci_enable_device_mem(pdev); + if (err) + return err; + + if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(56)) && + !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(56))) { + enable_hi_dma = 1; + } else { + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + err = dma_set_coherent_mask(&pdev->dev, + DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, + "No usable DMA configuration, aborting\n"); + goto err_dma; + } + } + enable_hi_dma = 0; + } + + err = pci_request_mem_regions(pdev, rnp_driver_name); + if (err) { + dev_err(&pdev->dev, + "pci_request_selected_regions failed 0x%x\n", err); + goto err_pci_reg; + } + pci_set_master(pdev); + pci_save_state(pdev); + + err = rnp_add_adpater(pdev, ii, &adapter); + if (err) + goto err_regions; + + return 0; +err_regions: + pci_release_mem_regions(pdev); +err_dma: +err_pci_reg: + return err; +} + +/** + * rnp_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * rnp_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. The could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + **/ +static void rnp_remove(struct pci_dev *pdev) +{ + struct rnp_adapter *adapter = pci_get_drvdata(pdev); + +#ifdef CONFIG_PCI_IOV + /* + * Only disable SR-IOV on unload if the user specified the now + * deprecated max_vfs module parameter. + */ + rnp_disable_sriov(adapter); +#endif + rnp_rm_adpater(adapter); + + pci_release_mem_regions(pdev); + pci_disable_device(pdev); +} + +static struct pci_driver rnp_driver = { + .name = rnp_driver_name, + .id_table = rnp_pci_tbl, + .probe = rnp_probe, + .remove = rnp_remove, +#ifdef CONFIG_PM + .suspend = rnp_suspend, + .resume = rnp_resume, +#endif + .shutdown = rnp_shutdown, + .sriov_configure = rnp_pci_sriov_configure, +}; + +static int __init rnp_init_module(void) +{ + int ret; + + pr_info("%s - version %s\n", rnp_driver_string, rnp_driver_version); + pr_info("%s \n", rnp_copyright); + rnp_wq = create_singlethread_workqueue(rnp_driver_name); + + if (!rnp_wq) { + pr_err("%s: Failed to create workqueue\n", rnp_driver_name); + return -ENOMEM; + } + + rnp_dbg_init(); + + ret = pci_register_driver(&rnp_driver); + if (ret) { + destroy_workqueue(rnp_wq); + rnp_dbg_exit(); + return ret; + } + + return 0; +} +module_init(rnp_init_module); + +static void __exit rnp_exit_module(void) +{ + pci_unregister_driver(&rnp_driver); + + destroy_workqueue(rnp_wq); + + rnp_dbg_exit(); + + rcu_barrier(); /* Wait for completion of call_rcu()'s */ +} + +module_exit(rnp_exit_module); diff --git a/drivers/net/ethernet/mucse/rnp/rnp_mbx.c b/drivers/net/ethernet/mucse/rnp/rnp_mbx.c new file mode 100644 index 000000000000..e4399ad1f73a --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_mbx.c @@ -0,0 +1,650 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#include +#include +#include +#include "rnp.h" +#include "rnp_type.h" +#include "rnp_common.h" +#include "rnp_mbx.h" +#include "rnp_mbx_fw.h" + +#define VF2PF_MBOX_VEC(mbx, vf) (mbx->vf2pf_mbox_vec_base + 4 * (vf)) +#define CPU2PF_MBOX_VEC(mbx) (mbx->cpu2pf_mbox_vec) +/* == PF <--> VF mailbox ==== */ +#define SHARE_MEM_BYTES 64 +#define PF_VF_SHM(mbx, vf) \ + (mbx->pf_vf_shm_base + \ + mbx->mbx_mem_size * vf) +/* for PF1 rtl will remap 6000 to 0xb000 */ +#define PF2VF_COUNTER(mbx, vf) (PF_VF_SHM(mbx, vf) + 0) +#define VF2PF_COUNTER(mbx, vf) (PF_VF_SHM(mbx, vf) + 4) +#define PF_VF_SHM_DATA(mbx, vf) (PF_VF_SHM(mbx, vf) + 8) +#define PF2VF_MBOX_CTRL(mbx, vf) (mbx->pf2vf_mbox_ctrl_base + 4 * vf) +#define PF_VF_MBOX_MASK_LO(mbx) (mbx->pf_vf_mbox_mask_lo) +#define PF_VF_MBOX_MASK_HI(mbx) (mbx->pf_vf_mbox_mask_hi) + +/* === CPU <--> PF === */ +#define CPU_PF_SHM(mbx) (mbx->cpu_pf_shm_base) +#define CPU2PF_COUNTER(mbx) (CPU_PF_SHM(mbx) + 0) +#define PF2CPU_COUNTER(mbx) (CPU_PF_SHM(mbx) + 4) +#define CPU_PF_SHM_DATA(mbx) (CPU_PF_SHM(mbx) + 8) +#define PF2CPU_MBOX_CTRL(mbx) (mbx->pf2cpu_mbox_ctrl) +#define CPU_PF_MBOX_MASK(mbx) (mbx->cpu_pf_mbox_mask) +#define MBOX_CTRL_REQ (1 << 0) /* WO */ +#define MBOX_CTRL_PF_HOLD_SHM (1 << 3) /* VF:RO, PF:WR */ +#define MBOX_IRQ_EN 0 +#define MBOX_IRQ_DISABLE 1 +#define mbx_prd32(hw, reg) prnp_rd_reg((hw)->hw_addr + (reg)) +#define mbx_rd32(hw, reg) rnp_rd_reg((hw)->hw_addr + (reg)) +#define mbx_pwr32(hw, reg, val) p_rnp_wr_reg((hw)->hw_addr + (reg), (val)) +#define mbx_wr32(hw, reg, val) rnp_wr_reg((hw)->hw_addr + (reg), (val)) + +/** + * rnp_read_mbx - Reads a message from the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox/vfnum to read + * + * returns SUCCESS if it successfully read message from buffer + **/ +s32 rnp_read_mbx(struct rnp_hw *hw, u32 *msg, u16 size, enum MBX_ID mbx_id) +{ + struct rnp_mbx_info *mbx = &hw->mbx; + s32 ret_val = RNP_ERR_MBX; + + /* limit read to size of mailbox */ + if (size > mbx->size) + size = mbx->size; + + if (mbx->ops.read) + ret_val = mbx->ops.read(hw, msg, size, mbx_id); + + return ret_val; +} + +/** + * rnp_write_mbx - Write a message to the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +s32 rnp_write_mbx(struct rnp_hw *hw, u32 *msg, u16 size, enum MBX_ID mbx_id) +{ + struct rnp_mbx_info *mbx = &hw->mbx; + s32 ret_val = 0; + + if (size > mbx->size) + ret_val = RNP_ERR_MBX; + else if (mbx->ops.write) + ret_val = mbx->ops.write(hw, msg, size, mbx_id); + + return ret_val; +} + +static inline u16 rnp_mbx_get_req(struct rnp_hw *hw, int reg) +{ + mb(); + return ioread32(hw->hw_addr + reg) & 0xffff; +} + +static inline u16 rnp_mbx_get_ack(struct rnp_hw *hw, int reg) +{ + mb(); + return (mbx_rd32(hw, reg) >> 16); +} + +static inline void rnp_mbx_inc_pf_req(struct rnp_hw *hw, enum MBX_ID mbx_id) +{ + u16 req; + int reg; + struct rnp_mbx_info *mbx = &hw->mbx; + u32 v; + + reg = (mbx_id == MBX_CM3CPU) ? PF2CPU_COUNTER(mbx) : + PF2VF_COUNTER(mbx, mbx_id); + v = mbx_rd32(hw, reg); + + req = (v & 0xffff); + req++; + v &= ~(0x0000ffff); + v |= req; + mb(); + mbx_wr32(hw, reg, v); + + /* update stats */ + hw->mbx.stats.msgs_tx++; +} + +static inline void rnp_mbx_inc_pf_ack(struct rnp_hw *hw, enum MBX_ID mbx_id) +{ + u16 ack; + struct rnp_mbx_info *mbx = &hw->mbx; + int reg = (mbx_id == MBX_CM3CPU) ? PF2CPU_COUNTER(mbx) : + PF2VF_COUNTER(mbx, mbx_id); + u32 v = mbx_rd32(hw, reg); + + ack = (v >> 16) & 0xffff; + ack++; + v &= ~(0xffff0000); + v |= (ack << 16); + mb(); + mbx_wr32(hw, reg, v); + + /* update stats */ + hw->mbx.stats.msgs_rx++; +} + +/** + * rnp_check_for_msg - checks to see if someone sent us mail + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +s32 rnp_check_for_msg(struct rnp_hw *hw, enum MBX_ID mbx_id) +{ + struct rnp_mbx_info *mbx = &hw->mbx; + s32 ret_val = RNP_ERR_MBX; + + if (mbx->ops.check_for_msg) + ret_val = mbx->ops.check_for_msg(hw, mbx_id); + + return ret_val; +} + +/** + * rnp_check_for_ack - checks to see if someone sent us ACK + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +s32 rnp_check_for_ack(struct rnp_hw *hw, enum MBX_ID mbx_id) +{ + struct rnp_mbx_info *mbx = &hw->mbx; + s32 ret_val = RNP_ERR_MBX; + + if (mbx->ops.check_for_ack) + ret_val = mbx->ops.check_for_ack(hw, mbx_id); + + return ret_val; +} + +/** + * rnp_poll_for_msg - Wait for message notification + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message notification + **/ +static s32 rnp_poll_for_msg(struct rnp_hw *hw, enum MBX_ID mbx_id) +{ + struct rnp_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + + if (!countdown || !mbx->ops.check_for_msg) + goto out; + + while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) { + countdown--; + if (!countdown) + break; + udelay(mbx->usec_delay); + } + +out: + return countdown ? 0 : -ETIME; +} + +/** + * rnp_poll_for_ack - Wait for message acknowledgment + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message acknowledgment + **/ +static s32 rnp_poll_for_ack(struct rnp_hw *hw, enum MBX_ID mbx_id) +{ + struct rnp_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + + if (!countdown || !mbx->ops.check_for_ack) + goto out; + + while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) { + countdown--; + if (!countdown) { + printk("mbx poll for ack ack timeout\n"); + break; + } + udelay(mbx->usec_delay); + } + +out: + return countdown ? 0 : RNP_ERR_MBX; +} + +/** + * rnp_read_posted_mbx - Wait for message notification and receive message + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message notification and + * copied it into the receive buffer. + **/ +static s32 rnp_read_posted_mbx(struct rnp_hw *hw, u32 *msg, u16 size, + enum MBX_ID mbx_id) +{ + struct rnp_mbx_info *mbx = &hw->mbx; + s32 ret_val = RNP_ERR_MBX; + + if (!mbx->ops.read) + goto out; + + ret_val = rnp_poll_for_msg(hw, mbx_id); + + /* if ack received read message, otherwise we timed out */ + if (!ret_val) + ret_val = mbx->ops.read(hw, msg, size, mbx_id); +out: + return ret_val; +} + +/** + * rnp_write_posted_mbx - Write a message to the mailbox, wait for ack + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully copied message into the buffer and + * received an ack to that message within delay * timeout period + **/ +static s32 rnp_write_posted_mbx(struct rnp_hw *hw, u32 *msg, u16 size, + enum MBX_ID mbx_id) +{ + struct rnp_mbx_info *mbx = &hw->mbx; + s32 ret_val = RNP_ERR_MBX; + + /* exit if either we can't write or there isn't a defined timeout */ + if (!mbx->ops.write || !mbx->timeout) + goto out; + + /* send msg and hold buffer lock */ + ret_val = mbx->ops.write(hw, msg, size, mbx_id); + + /* if msg sent wait until we receive an ack */ + if (!ret_val) + ret_val = rnp_poll_for_ack(hw, mbx_id); + +out: + return ret_val; +} + +/** + * rnp_check_for_msg_pf - checks to see if the VF has sent mail + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +static s32 rnp_check_for_msg_pf(struct rnp_hw *hw, enum MBX_ID mbx_id) +{ + s32 ret_val = RNP_ERR_MBX; + u16 hw_req_count = 0; + struct rnp_mbx_info *mbx = &hw->mbx; + + if (mbx_id == MBX_CM3CPU) { + hw_req_count = rnp_mbx_get_req(hw, CPU2PF_COUNTER(mbx)); + if (mbx->mbx_feature & MBX_FEATURE_NO_ZERO) { + if ((hw_req_count != 0) && + (hw_req_count != hw->mbx.cpu_req)) { + ret_val = 0; + hw->mbx.stats.reqs++; + } + + } else { + if (hw_req_count != hw->mbx.cpu_req) { + ret_val = 0; + hw->mbx.stats.reqs++; + } + } + } else { + if (rnp_mbx_get_req(hw, VF2PF_COUNTER(mbx, mbx_id)) != + hw->mbx.vf_req[mbx_id]) { + ret_val = 0; + hw->mbx.stats.reqs++; + } + } + + return ret_val; +} + +/** + * rnp_check_for_ack_pf - checks to see if the VF has ACKed + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +static s32 rnp_check_for_ack_pf(struct rnp_hw *hw, enum MBX_ID mbx_id) +{ + s32 ret_val = RNP_ERR_MBX; + struct rnp_mbx_info *mbx = &hw->mbx; + + if (mbx_id == MBX_CM3CPU) { + if (rnp_mbx_get_ack(hw, CPU2PF_COUNTER(mbx)) != + hw->mbx.cpu_ack) { + ret_val = 0; + hw->mbx.stats.acks++; + } + } else { + if (rnp_mbx_get_ack(hw, VF2PF_COUNTER(mbx, mbx_id)) != + hw->mbx.vf_ack[mbx_id]) { + ret_val = 0; + hw->mbx.stats.acks++; + } + } + + return ret_val; +} + +/** + * rnp_obtain_mbx_lock_pf - obtain mailbox lock + * @hw: pointer to the HW structure + * @mbx_id: the VF index or CPU + * + * return SUCCESS if we obtained the mailbox lock + **/ +static s32 rnp_obtain_mbx_lock_pf(struct rnp_hw *hw, enum MBX_ID mbx_id) +{ + int try_cnt = 5000; + struct rnp_mbx_info *mbx = &hw->mbx; + u32 CTRL_REG = (mbx_id == MBX_CM3CPU) ? PF2CPU_MBOX_CTRL(mbx) : + PF2VF_MBOX_CTRL(mbx, mbx_id); + + while (try_cnt-- > 0) { + /* Take ownership of the buffer */ + mbx_wr32(hw, CTRL_REG, MBOX_CTRL_PF_HOLD_SHM); + wmb(); + /* reserve mailbox for cm3 use */ + if (mbx_rd32(hw, CTRL_REG) & MBOX_CTRL_PF_HOLD_SHM) + return 0; + udelay(100); + } + + rnp_err("%s: failed to get:%d lock \n", __func__, mbx_id); + return EPERM; +} + +/** + * rnp_write_mbx_pf - Places a message in the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: the VF index + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +static s32 rnp_write_mbx_pf(struct rnp_hw *hw, u32 *msg, u16 size, + enum MBX_ID mbx_id) +{ + s32 ret_val = 0; + u16 i; + struct rnp_mbx_info *mbx = &hw->mbx; + u32 DATA_REG = (mbx_id == MBX_CM3CPU) ? CPU_PF_SHM_DATA(mbx) : + PF_VF_SHM_DATA(mbx, mbx_id); + u32 CTRL_REG = (mbx_id == MBX_CM3CPU) ? PF2CPU_MBOX_CTRL(mbx) : + PF2VF_MBOX_CTRL(mbx, mbx_id); + + if (size > RNP_VFMAILBOX_SIZE) { + printk("%s: size:%d should <%d\n", __func__, size, + RNP_VFMAILBOX_SIZE); + return -EINVAL; + } + + /* lock the mailbox to prevent pf/vf/cpu race condition */ + ret_val = rnp_obtain_mbx_lock_pf(hw, mbx_id); + if (ret_val) { + printk("%s: get mbx:%d wlock failed. ret:%d. req:0x%08x-0x%08x\n", + __func__, mbx_id, ret_val, msg[0], msg[1]); + goto out_no_write; + } + + /* copy the caller specified message to the mailbox memory buffer */ + for (i = 0; i < size; i++) { + mbx_wr32(hw, DATA_REG + i * 4, msg[i]); + rnp_logd(LOG_MBX_OUT, " w-mbx:0x%x <= 0x%x\n", + DATA_REG + i * 4, msg[i]); + } + + /* flush msg and acks as we are overwriting the message buffer */ + if (mbx_id == MBX_CM3CPU) { + hw->mbx.cpu_ack = rnp_mbx_get_ack(hw, CPU2PF_COUNTER(mbx)); + } else { + hw->mbx.vf_ack[mbx_id] = + rnp_mbx_get_ack(hw, VF2PF_COUNTER(mbx, mbx_id)); + } + rnp_mbx_inc_pf_req(hw, mbx_id); + + /* Interrupt VF/CM3 to tell it a message + * has been sent and release buffer + */ + if (mbx->mbx_feature & MBX_FEATURE_WRITE_DELAY) + udelay(300); + mbx_wr32(hw, CTRL_REG, MBOX_CTRL_REQ); + +out_no_write: + /* sometimes happen */ + + return ret_val; +} + +/** + * rnp_read_mbx_pf - Read a message from the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @vf_number: the VF index + * + * This function copies a message from the mailbox buffer to the caller's + * memory buffer. The presumption is that the caller knows that there was + * a message due to a VF/CPU request so no polling for message is needed. + **/ +static s32 rnp_read_mbx_pf(struct rnp_hw *hw, u32 *msg, u16 size, + enum MBX_ID mbx_id) +{ + s32 ret_val = -EIO; + u32 i; + struct rnp_mbx_info *mbx = &hw->mbx; + u32 BUF_REG = (mbx_id == MBX_CM3CPU) ? CPU_PF_SHM_DATA(mbx) : + PF_VF_SHM_DATA(mbx, mbx_id); + u32 CTRL_REG = (mbx_id == MBX_CM3CPU) ? PF2CPU_MBOX_CTRL(mbx) : + PF2VF_MBOX_CTRL(mbx, mbx_id); + if (size > RNP_VFMAILBOX_SIZE) { + printk("%s: size:%d should <%d\n", __func__, size, + RNP_VFMAILBOX_SIZE); + return -EINVAL; + } + /* lock the mailbox to prevent pf/vf race condition */ + ret_val = rnp_obtain_mbx_lock_pf(hw, mbx_id); + if (ret_val) + goto out_no_read; + + mb(); + /* copy the message from the mailbox memory buffer */ + for (i = 0; i < size; i++) { + msg[i] = mbx_rd32(hw, BUF_REG + 4 * i); + rnp_logd(LOG_MBX_IN, " r-mbx:0x%x => 0x%x\n", BUF_REG + 4 * i, + msg[i]); + } + mbx_wr32(hw, BUF_REG, 0); + + /* update req. used by rnpvf_check_for_msg_vf */ + if (mbx_id == MBX_CM3CPU) { + hw->mbx.cpu_req = rnp_mbx_get_req(hw, CPU2PF_COUNTER(mbx)); + } else { + hw->mbx.vf_req[mbx_id] = + rnp_mbx_get_req(hw, VF2PF_COUNTER(mbx, mbx_id)); + } + /* this ack maybe too earier? */ + /* Acknowledge receipt and release mailbox, then we're done */ + rnp_mbx_inc_pf_ack(hw, mbx_id); + + /* free ownership of the buffer */ + mbx_wr32(hw, CTRL_REG, 0); + +out_no_read: + + return ret_val; +} + +static void rnp_mbx_reset(struct rnp_hw *hw) +{ + int idx, v; + struct rnp_mbx_info *mbx = &hw->mbx; + + for (idx = 0; idx < hw->max_vfs; idx++) { + v = mbx_rd32(hw, VF2PF_COUNTER(mbx, idx)); + hw->mbx.vf_req[idx] = v & 0xffff; + hw->mbx.vf_ack[idx] = (v >> 16) & 0xffff; + mbx_wr32(hw, PF2VF_MBOX_CTRL(mbx, idx), 0); + } + + v = mbx_rd32(hw, CPU2PF_COUNTER(mbx)); + hw->mbx.cpu_req = v & 0xffff; + hw->mbx.cpu_ack = (v >> 16) & 0xffff; + + printk("now mbx.cpu_req %d mbx.cpu_ack %d\n", hw->mbx.cpu_req, + hw->mbx.cpu_ack); + /* release pf->cm3 buffer lock */ + mbx_wr32(hw, PF2CPU_MBOX_CTRL(mbx), 0); + + if (PF_VF_MBOX_MASK_LO(mbx)) + wr32(hw, PF_VF_MBOX_MASK_LO(mbx), + 0); /* allow vf to vectors */ + if (PF_VF_MBOX_MASK_HI(mbx)) + wr32(hw, PF_VF_MBOX_MASK_HI(mbx), 0); + + /* allow CM3CPU to PF MBX IRQ */ + wr32(hw, CPU_PF_MBOX_MASK(mbx), 0); +} + +static int rnp_mbx_configure_pf(struct rnp_hw *hw, int nr_vec, bool enable) +{ + int idx = 0; + u32 v; + struct rnp_mbx_info *mbx = &hw->mbx; + + if (enable) { + for (idx = 0; idx < hw->max_vfs; idx++) { + v = mbx_rd32(hw, VF2PF_COUNTER(mbx, idx)); + hw->mbx.vf_req[idx] = v & 0xffff; + hw->mbx.vf_ack[idx] = (v >> 16) & 0xffff; + + mbx_wr32(hw, PF2VF_MBOX_CTRL(mbx, idx), 0); + } + /* reset pf->cm3 status */ + v = mbx_rd32(hw, CPU2PF_COUNTER(mbx)); + hw->mbx.cpu_req = v & 0xffff; + hw->mbx.cpu_ack = (v >> 16) & 0xffff; + /* release pf->cm3 buffer lock */ + mbx_wr32(hw, PF2CPU_MBOX_CTRL(mbx), 0); + /* allow VF to PF MBX IRQ */ + for (idx = 0; idx < hw->max_vfs; idx++) { + mbx_wr32(hw, VF2PF_MBOX_VEC(mbx, idx), + nr_vec); + /* vf to pf req interrupt */ + } + + if (PF_VF_MBOX_MASK_LO(mbx)) + wr32(hw, PF_VF_MBOX_MASK_LO(mbx), + 0); + /* allow vf to vectors */ + + if (PF_VF_MBOX_MASK_HI(mbx)) + wr32(hw, PF_VF_MBOX_MASK_HI(mbx), 0); + + /* bind cm3cpu mbx to irq */ + wr32(hw, CPU2PF_MBOX_VEC(mbx), + nr_vec); + /* cm3 and VF63 share #63 irq */ + /* allow CM3CPU to PF MBX IRQ */ + wr32(hw, CPU_PF_MBOX_MASK(mbx), 0); + + rnp_dbg("[%s] mbx-vector:%d\n", __func__, nr_vec); + + } else { + if (PF_VF_MBOX_MASK_LO(mbx)) + wr32(hw, PF_VF_MBOX_MASK_LO(mbx), + 0xffffffff); + if (PF_VF_MBOX_MASK_HI(mbx)) + wr32(hw, PF_VF_MBOX_MASK_HI(mbx), + 0xffffffff); + + /* disable CM3CPU to PF MBX IRQ */ + wr32(hw, CPU_PF_MBOX_MASK(mbx), 0xffffffff); + + /* reset vf->pf status/ctrl */ + for (idx = 0; idx < hw->max_vfs; idx++) + mbx_wr32(hw, PF2VF_MBOX_CTRL(mbx, idx), 0); + /* reset pf->cm3 ctrl */ + mbx_wr32(hw, PF2CPU_MBOX_CTRL(mbx), 0); + /* used to sync link status */ + wr32(hw, RNP_DMA_DUMY, 0); + } + return 0; +} + +unsigned int rnp_mbx_change_timeout(struct rnp_hw *hw, int timeout_ms) +{ + unsigned int old_timeout = hw->mbx.timeout; + + hw->mbx.timeout = timeout_ms * 1000 / hw->mbx.usec_delay; + + return old_timeout; +} + +/** + * rnp_init_mbx_params_pf - set initial values for pf mailbox + * @hw: pointer to the HW structure + * + * Initializes the hw->mbx struct to correct values for pf mailbox + */ +s32 rnp_init_mbx_params_pf(struct rnp_hw *hw) +{ + struct rnp_mbx_info *mbx = &hw->mbx; + + mbx->usec_delay = 100; + /* wait 5s */ + mbx->timeout = (4 * 1000 * 1000) / mbx->usec_delay; + mbx->stats.msgs_tx = 0; + mbx->stats.msgs_rx = 0; + mbx->stats.reqs = 0; + mbx->stats.acks = 0; + mbx->stats.rsts = 0; + mbx->size = RNP_VFMAILBOX_SIZE; + mutex_init(&mbx->lock); + rnp_mbx_reset(hw); + + return 0; +} + +struct rnp_mbx_operations mbx_ops_generic = { + .init_params = rnp_init_mbx_params_pf, + .read = rnp_read_mbx_pf, + .write = rnp_write_mbx_pf, + .read_posted = rnp_read_posted_mbx, + .write_posted = rnp_write_posted_mbx, + .check_for_msg = rnp_check_for_msg_pf, + .check_for_ack = rnp_check_for_ack_pf, + .configure = rnp_mbx_configure_pf, +}; diff --git a/drivers/net/ethernet/mucse/rnp/rnp_mbx.h b/drivers/net/ethernet/mucse/rnp/rnp_mbx.h new file mode 100644 index 000000000000..692f5b75ce89 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_mbx.h @@ -0,0 +1,238 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#ifndef _RNP_MBX_H_ +#define _RNP_MBX_H_ + +#include "rnp_type.h" + +#define RNP_VFMAILBOX_SIZE 14 /* 16 32 bit words - 64 bytes */ +#define RNP_ERR_MBX -100 +#define RNP_VT_MSGTYPE_ACK 0x80000000 +/* Messages below or'd with */ +/* this are the ACK */ +#define RNP_VT_MSGTYPE_NACK 0x40000000 +/* Messages below or'd with + * this are the NACK + */ +#define RNP_VT_MSGTYPE_CTS 0x20000000 +/* Indicates that VF is still + *clear to send requests + */ +#define RNP_VT_MSGINFO_SHIFT 14 +/* bits 23:16 are used for exra info for certain messages */ +#define RNP_VT_MSGINFO_MASK (0x7F << RNP_VT_MSGINFO_SHIFT) +/* VLAN pool filtering masks */ +#define RNP_VLVF_VIEN 0x80000000 /* filter is valid */ +#define RNP_VLVF_ENTRIES 64 +#define RNP_VLVF_VLANID_MASK 0x00000FFF +/* + * mailbox msg_data + * + * + * + */ +#define RNP_VNUM_OFFSET (21) +#define RNP_VF_MASK (0x7f << 21) +#define RNP_MAIL_CMD_MASK 0x3fff +/* mailbox API, legacy requests */ +#define RNP_VF_RESET 0x01 /* VF requests reset */ +#define RNP_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ +#define RNP_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ +#define RNP_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ + +/* mailbox API, version 1.0 VF requests */ +#define RNP_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ +#define RNP_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */ +#define RNP_VF_GET_MACADDR 0x07 /* get vf macaddr */ +#define RNP_VF_API_NEGOTIATE 0x08 /* negotiate API version */ + +/* mailbox API, version 1.1 VF requests */ +#define RNP_VF_GET_QUEUES 0x09 /* get queue configuration */ +#define RNP_VF_SET_VLAN_STRIP 0x0a /* VF requests PF to set VLAN STRIP */ +#define RNP_VF_REG_RD 0x0b /* vf read reg */ +#define RNP_VF_GET_MTU 0x0c /* vf get pf ethtool setup */ +#define RNP_VF_SET_MTU 0x0d /* vf get pf ethtool setup */ +#define RNP_VF_GET_FW 0x0e /* vf get firmware version */ +#define RNP_VF_GET_LINK 0x10 /* get link status */ +#define RNP_VF_RESET_PF 0x11 +#define RNP_VF_GET_DMA_FRAG 0x12 +#define RNP_VF_SET_PROMISCE 0x16 +#define RNP_PF_SET_FCS 0x10 /* PF set fcs status */ +#define RNP_PF_SET_PAUSE 0x11 /* PF set pause status */ +#define RNP_PF_SET_FT_PADDING 0x12 /* PF set ft padding status */ +#define RNP_PF_SET_VLAN_FILTER 0x13 /* PF set ntuple status */ +#define RNP_PF_SET_VLAN 0x14 /* PF set ntuple status */ +#define RNP_PF_SET_LINK 0x15 /* PF set ntuple status */ +#define RNP_PF_SET_MTU 0x16 /* PF set ntuple status */ +#define RNP_PF_SET_RESET 0x17 /* PF set ntuple status */ +#define RNP_PF_LINK_UP (1 << 31) +#define RNP_PF_REMOVE 0x0f +/* GET_QUEUES return data indices within the mailbox */ +#define RNP_VF_TX_QUEUES 1 /* number of Tx queues supported */ +#define RNP_VF_RX_QUEUES 2 /* number of Rx queues supported */ +#define RNP_VF_TRANS_VLAN 3 /* Indication of port vlan */ +#define RNP_VF_DEF_QUEUE 4 /* Default queue offset */ +#define RNP_VF_QUEUE_START 5 /* Default queue offset */ +#define RNP_VF_QUEUE_DEPTH 6 /* ring depth */ + +/* length of permanent address message returned from PF */ +#define RNP_VF_PERMADDR_MSG_LEN 11 +/* word in permanent address message with the current multicast type */ +#define RNP_VF_MC_TYPE_WORD 3 +#define RNP_VF_DMA_VERSION_WORD 4 +#define RNP_VF_VLAN_WORD 5 +#define RNP_VF_PHY_TYPE_WORD 6 +#define RNP_VF_FW_VERSION_WORD 7 +#define RNP_VF_LINK_STATUS_WORD 8 +#define RNP_VF_AXI_MHZ 9 +#define PF_FEATRURE_VLAN_FILTER BIT(0) +#define PF_NCSI_EN BIT(1) +#define RNP_VF_FEATURE 10 + +#define RNP_PF_CONTROL_PRING_MSG 0x0100 /* PF control message */ + +#define RNP_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */ +#define RNP_VF_MBX_INIT_DELAY 500 /* microseconds between retries */ + +enum MBX_ID { + MBX_VF0 = 0, + MBX_VF1, + MBX_VF2, + MBX_VF3, + MBX_VF4, + MBX_VF5, + MBX_VF6, + MBX_VF7, + MBX_VF8, + MBX_VF9, + MBX_VF10, + MBX_VF11, + MBX_VF12, + MBX_VF13, + MBX_VF14, + MBX_VF15, + MBX_VF16, + MBX_VF17, + MBX_VF18, + MBX_VF19, + MBX_VF20, + MBX_VF21, + MBX_VF22, + MBX_VF23, + MBX_VF24, + MBX_VF25, + MBX_VF26, + MBX_VF27, + MBX_VF28, + MBX_VF29, + MBX_VF30, + MBX_VF31, + MBX_VF32, + MBX_VF33, + MBX_VF34, + MBX_VF35, + MBX_VF36, + MBX_VF37, + MBX_VF38, + MBX_VF39, + MBX_VF40, + MBX_VF41, + MBX_VF42, + MBX_VF43, + MBX_VF44, + MBX_VF45, + MBX_VF46, + MBX_VF47, + MBX_VF48, + MBX_VF49, + MBX_VF50, + MBX_VF51, + MBX_VF52, + MBX_VF53, + MBX_VF54, + MBX_VF55, + MBX_VF56, + MBX_VF57, + MBX_VF58, + MBX_VF59, + MBX_VF60, + MBX_VF61, + MBX_VF62, + //... + MBX_VF63, + MBX_CM3CPU, + MBX_FW = MBX_CM3CPU, + MBX_VFCNT +}; + +enum PF_STATUS { + PF_FCS_STATUS, + PF_PAUSE_STATUS, + PF_FT_PADDING_STATUS, + PF_VLAN_FILTER_STATUS, + PF_SET_VLAN_STATUS, + PF_SET_LINK_STATUS, + PF_SET_MTU, + PF_SET_RESET, +}; + +s32 rnp_read_mbx(struct rnp_hw *, u32 *, u16, enum MBX_ID); +s32 rnp_write_mbx(struct rnp_hw *, u32 *, u16, enum MBX_ID); +s32 rnp_check_for_msg(struct rnp_hw *, enum MBX_ID); +s32 rnp_check_for_ack(struct rnp_hw *, enum MBX_ID); +s32 rnp_check_for_rst(struct rnp_hw *, enum MBX_ID); +s32 rnp_init_mbx_params_pf(struct rnp_hw *); +extern struct rnp_mbx_operations mbx_ops_generic; +#define MBX_IFDOWN (0) +#define MBX_IFUP (1) +#define MBX_PROBE (2) +#define MBX_REMOVE (3) +void rnp_mbx_probe_stat_set(struct rnp_hw *hw, int stat); +int rnp_fw_get_macaddr(struct rnp_hw *hw, int pfvfnum, u8 *mac_addr, int lane); +int rnp_mbx_fw_reset_phy(struct rnp_hw *hw); +unsigned int rnp_mbx_change_timeout(struct rnp_hw *hw, int timeout_ms); +struct rnp_info; +int rnp_mbx_get_capability(struct rnp_hw *hw, struct rnp_info *info); +int rnp_mbx_link_event_enable(struct rnp_hw *hw, int enable); +int rnp_mbx_get_link_stat(struct rnp_hw *hw); +int rnp_mbx_ifup_down(struct rnp_hw *hw, int up); +int rnp_mbx_led_set(struct rnp_hw *hw, int value); +int rnp_mbx_get_dump(struct rnp_hw *hw, int flags, u8 *data_out, int buflen); +int rnp_mbx_set_dump(struct rnp_hw *hw, int flag); +int rnp_mbx_sfp_write(struct rnp_hw *hw, int sfp_addr, int reg, short v); +int rnp_mbx_sfp_module_eeprom_info(struct rnp_hw *hw, int sfp_addr, int reg, + int data_len, u8 *buf); +int rnp_mbx_get_temp(struct rnp_hw *hw, int *voltage); +int rnp_mbx_phy_link_set(struct rnp_hw *hw, int adv, int autoneg, int speed, + int duplex, int tp_mdix_ctrl); +int rnp_mbx_phy_pause_set(struct rnp_hw *hw, int pause_mode); +int rnp_mbx_phy_write(struct rnp_hw *hw, u32 reg, u32 val); +int rnp_mbx_phy_read(struct rnp_hw *hw, u32 reg, u32 *val); + +int rnp_maintain_req(struct rnp_hw *hw, int cmd, int arg0, int req_data_bytes, + int reply_bytes, dma_addr_t dma_phy_addr); +int rnp_mbx_get_lane_stat(struct rnp_hw *hw); +int rnp_mbx_wol_set(struct rnp_hw *hw, u32 mode); +int rnp_mbx_ifsuspuse(struct rnp_hw *hw, int status); +int rnp_mbx_ifinsmod(struct rnp_hw *hw, int status); +int rnp_mbx_ifforce_control_mac(struct rnp_hw *hw, int status); +int wait_mbx_init_done(struct rnp_hw *hw); +int rnp_set_lane_fun(struct rnp_hw *hw, int fun, int value0, int value1, + int value2, int value3); +void rnp_link_stat_mark(struct rnp_hw *hw, int up); +int rnp_mbx_reg_writev(struct rnp_hw *hw, int fw_reg, int value[4], int bytes); +int rnp_mbx_reg_write(struct rnp_hw *hw, int fw_reg, int value); +int rnp_mbx_fw_reg_read(struct rnp_hw *hw, int fw_reg); +int rnp_mbx_force_speed(struct rnp_hw *hw, int speed); + +#define cm3_reg_write32(hw, cm3_rpu_reg, v) \ + rnp_mbx_reg_write((hw), (cm3_rpu_reg), (v)) + +#define cm3_reg_read32(hw, cm3_rpu_reg) rnp_mbx_fw_reg_read((hw), (cm3_rpu_reg)) + +int rnp_mbx_lldp_status_get(struct rnp_hw *hw); +int rnp_mbx_lldp_port_enable(struct rnp_hw *hw, bool enable); +int rnp_mbx_ddr_csl_enable(struct rnp_hw *hw, int enable, dma_addr_t dma_phy, + int bytes); +#endif /* _RNP_MBX_H_ */ diff --git a/drivers/net/ethernet/mucse/rnp/rnp_mbx_fw.c b/drivers/net/ethernet/mucse/rnp/rnp_mbx_fw.c new file mode 100644 index 000000000000..dcd07c615fcc --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_mbx_fw.c @@ -0,0 +1,1495 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#include +#include +#include +#include + +#include "rnp.h" +#include "rnp_mbx.h" +#include "rnp_mbx_fw.h" + +#define RNP_FW_MAILBOX_SIZE RNP_VFMAILBOX_SIZE + +static bool is_cookie_valid(struct rnp_hw *hw, void *cookie) +{ + unsigned char* begin = (unsigned char*)(&hw->mbx.cookie_pool.cookies[0]); + unsigned char* end = (unsigned char*)(&hw->mbx.cookie_pool.cookies[MAX_COOKIES_ITEMS]); + if(((unsigned char*)cookie)>=begin && ((unsigned char*)cookie)< end){ + return true; + } + return false; +} + +static struct mbx_req_cookie *mbx_cookie_zalloc(struct rnp_hw *hw, int priv_len) +{ + struct mbx_req_cookie *cookie = NULL; + int loop_cnt = MAX_COOKIES_ITEMS, i; + bool find = false; + + u64 now_jiffies = get_jiffies_64(); + + if (mutex_lock_interruptible(&hw->mbx.lock)) { + rnp_err("[%s] get mbx lock failed,priv_len:%d\n", __func__, priv_len); + return NULL; + } + i = hw->mbx.cookie_pool.next_idx; + while(loop_cnt--){ + cookie = &(hw->mbx.cookie_pool.cookies[i]); + if(cookie->stat == COOKIE_FREE || + /* force free cookie if cookie not freed after 120 seconds */ + time_after64(now_jiffies,cookie->alloced_jiffies + (2 * 60) * HZ)){ + find = true; + cookie->alloced_jiffies = get_jiffies_64(); + cookie->stat = COOKIE_ALLOCED; + hw->mbx.cookie_pool.next_idx = (i+1)%MAX_COOKIES_ITEMS; + break; + } + i = (i+1)%MAX_COOKIES_ITEMS; + } + mutex_unlock(&hw->mbx.lock); + + if (!find) { + rnp_err("[%s] no free cookies availble\n", __func__); + return NULL; + } + + cookie->timeout_jiffes = 30 * HZ; + cookie->priv_len = priv_len; + + return cookie; +} + +static void mbx_free_cookie(struct mbx_req_cookie *cookie, bool force_free) +{ + if (!cookie) + return; + + if (force_free) { + cookie->stat = COOKIE_FREE; + } else { + cookie->stat = COOKIE_FREE_WAIT_TIMEOUT; + } +} + +static int rnp_mbx_write_posted_locked(struct rnp_hw *hw, struct mbx_fw_cmd_req *req) +{ + int err = 0; + int retry = 3; + + if (mutex_lock_interruptible(&hw->mbx.lock)) { + rnp_err("[%s] get mbx lock failed opcode:0x%x\n", __func__, + req->opcode); + return -EAGAIN; + } + + rnp_logd(LOG_MBX_LOCK, "%s %d lock:%p hw:%p opcode:0x%x\n", __func__, + hw->pfvfnum, &hw->mbx.lock, hw, req->opcode); + +try_again: + retry--; + if (retry < 0) { + mutex_unlock(&hw->mbx.lock); + rnp_err("%s: write_posted failed! err:0x%x opcode:0x%x\n", + __func__, err, req->opcode); + return -EIO; + } + + err = hw->mbx.ops.write_posted( + hw, (u32 *)req, (req->datalen + MBX_REQ_HDR_LEN) / 4, MBX_FW); + if (err) { + goto try_again; + } + mutex_unlock(&hw->mbx.lock); + + return err; +} + +static void rnp_link_stat_mark_reset(struct rnp_hw *hw) +{ + wr32(hw, RNP_DMA_DUMY, 0xa5a40000); +} + +static void rnp_link_stat_mark_disable(struct rnp_hw *hw) +{ + wr32(hw, RNP_DMA_DUMY, 0); +} + +static int rnp_mbx_fw_post_req(struct rnp_hw *hw, struct mbx_fw_cmd_req *req, + struct mbx_req_cookie *cookie) +{ + int err = 0; + struct rnp_adapter *adpt = hw->back; + + cookie->errcode = 0; + cookie->done = 0; + init_waitqueue_head(&cookie->wait); + + if (mutex_lock_interruptible(&hw->mbx.lock)) { + rnp_err("[%s] wait mbx lock timeout pfvf:0x%x opcode:0x%x\n", + __func__, hw->pfvfnum, req->opcode); + return -EAGAIN; + } + + rnp_logd(LOG_MBX_LOCK, "%s %d lock:%p hw:%p opcode:0x%x\n", __func__, + hw->pfvfnum, &hw->mbx.lock, hw, req->opcode); + + err = rnp_write_mbx(hw, (u32 *)req, + (req->datalen + MBX_REQ_HDR_LEN) / 4, MBX_FW); + if (err) { + rnp_err("rnp_write_mbx failed! err:%d opcode:0x%x\n", err, + req->opcode); + mutex_unlock(&hw->mbx.lock); + return err; + } + + if (cookie->timeout_jiffes != 0) { + int retry_cnt = 4; +retry: + err = wait_event_interruptible_timeout(cookie->wait, + cookie->done == 1, + cookie->timeout_jiffes); + + if (err == -ERESTARTSYS && retry_cnt) { + retry_cnt--; + goto retry; + } + if (err == 0) { + rnp_err("[%s] %s failed! pfvfnum:0x%x hw:%p timeout err:%d opcode:%x\n", + adpt->name, __func__, hw->pfvfnum, hw, err, + req->opcode); + err = -ETIME; + } else if (err > 0) { + err = 0; + } + } else { + wait_event_interruptible(cookie->wait, cookie->done == 1); + } + + mutex_unlock(&hw->mbx.lock); + + if (cookie->errcode) { + err = cookie->errcode; + } + + return err; +} + +static int rnp_fw_send_cmd_wait(struct rnp_hw *hw, struct mbx_fw_cmd_req *req, + struct mbx_fw_cmd_reply *reply) +{ + int err; + int retry_cnt = 3; + + if (!hw || !req || !reply || !hw->mbx.ops.read_posted) { + printk("error: hw:%p req:%p reply:%p\n", hw, req, reply); + return -EINVAL; + } + + if (mutex_lock_interruptible(&hw->mbx.lock)) { + rnp_err("[%s] get mbx lock failed opcode:0x%x\n", __func__, + req->opcode); + return -EAGAIN; + } + + rnp_logd(LOG_MBX_LOCK, "%s %d lock:%p hw:%p opcode:0x%x\n", __func__, + hw->pfvfnum, &hw->mbx.lock, hw, req->opcode); + err = hw->mbx.ops.write_posted( + hw, (u32 *)req, (req->datalen + MBX_REQ_HDR_LEN) / 4, MBX_FW); + if (err) { + rnp_err("%s: write_posted failed! err:0x%x opcode:0x%x\n", + __func__, err, req->opcode); + mutex_unlock(&hw->mbx.lock); + return err; + } + +retry: + retry_cnt--; + if (retry_cnt < 0) { + rnp_err("retry timeout opcode:0x%x\n", req->opcode); + return -EIO; + } + err = hw->mbx.ops.read_posted(hw, (u32 *)reply, sizeof(*reply) / 4, + MBX_FW); + if (err) { + rnp_err("%s: read_posted failed! err:0x%x opcode:0x%x\n", + __func__, err, req->opcode); + mutex_unlock(&hw->mbx.lock); + return err; + } + if (reply->opcode != req->opcode) + goto retry; + + mutex_unlock(&hw->mbx.lock); + + if (reply->error_code) { + rnp_err("%s: reply err:0x%x req:0x%x\n", __func__, + reply->error_code, req->opcode); + return -reply->error_code; + } + return 0; +} + +int wait_mbx_init_done(struct rnp_hw *hw) +{ + int count = 10000; + u32 v = rd32(hw, RNP_TOP_NIC_DUMMY); + + while (count) { + v = rd32(hw, RNP_TOP_NIC_DUMMY); + if (((v & 0xFF000000) == 0xa5000000) && (v & 0x80)) + break; + + usleep_range(500, 1000); + printk("waiting fw up\n"); + count--; + } + printk("fw init ok %x\n", v); + + return 0; +} + +int rnp_mbx_get_lane_stat(struct rnp_hw *hw) +{ + int err = 0; + struct mbx_fw_cmd_req req; + struct rnp_adapter *adpt = hw->back; + struct lane_stat_data *st; + struct mbx_req_cookie *cookie = NULL; + struct mbx_fw_cmd_reply reply; + + memset(&req, 0, sizeof(req)); + + if (hw->mbx.other_irq_enabled) { + cookie = mbx_cookie_zalloc(hw,sizeof(struct lane_stat_data)); + if (!cookie) { + rnp_err("%s: no memory\n", __func__); + return -ENOMEM; + } + st = (struct lane_stat_data *)cookie->priv; + + build_get_lane_status_req(&req, hw->nr_lane, cookie); + + err = rnp_mbx_fw_post_req(hw, &req, cookie); + if (err) { + rnp_err("%s: error:%d\n", __func__, err); + goto quit; + } + } else { + memset(&reply, 0, sizeof(reply)); + + build_get_lane_status_req(&req, hw->nr_lane, &req); + err = rnp_fw_send_cmd_wait(hw, &req, &reply); + if (err) { + rnp_err("%s: 1 error:%d\n", __func__, err); + goto quit; + } + st = (struct lane_stat_data *)&(reply.data); + } + + hw->phy_type = st->phy_type; + hw->speed = adpt->speed = st->speed; + if ((st->is_sgmii) || (hw->phy_type == PHY_TYPE_10G_TP)) { + adpt->phy_addr = st->phy_addr; + } else { + adpt->sfp.fault = st->sfp.fault; + adpt->sfp.los = st->sfp.los; + adpt->sfp.mod_abs = st->sfp.mod_abs; + adpt->sfp.tx_dis = st->sfp.tx_dis; + } + adpt->si.main = st->si_main; + adpt->si.pre = st->si_pre; + adpt->si.post = st->si_post; + adpt->si.tx_boost = st->si_tx_boost; + adpt->link_traing = st->link_traing; + adpt->fec = st->fec; + hw->is_sgmii = st->is_sgmii; + hw->pci_gen = st->pci_gen; + hw->pci_lanes = st->pci_lanes; + adpt->speed = st->speed; + adpt->hw.link = st->linkup; + hw->is_backplane = st->is_backplane; + hw->supported_link = st->supported_link; + hw->advertised_link = st->advertised_link; + hw->tp_mdx = st->tp_mdx; + + if ((hw->hw_type == rnp_hw_n10) || (hw->hw_type == rnp_hw_n400)) { + if (hw->fw_version >= 0x00050000) { + hw->sfp_connector = st->sfp_connector; + hw->duplex = st->duplex; + adpt->an = st->autoneg; + } else { + hw->sfp_connector = 0xff; + hw->duplex = 1; + adpt->an = st->an; + } + if (hw->fw_version <= 0x00050000) { + hw->supported_link |= RNP_LINK_SPEED_10GB_FULL | + RNP_LINK_SPEED_1GB_FULL; + } + } + + rnp_logd( + LOG_MBX_LINK_STAT, + "%s:pma_type:0x%x phy_type:0x%x,linkup:%d duplex:%d auton:%d " + "fec:%d an:%d lt:%d is_sgmii:%d supported_link:0x%x, backplane:%d " + "speed:%d sfp_connector:0x%x\n", + adpt->name, st->pma_type, st->phy_type, st->linkup, st->duplex, + st->autoneg, st->fec, st->an, st->link_traing, st->is_sgmii, + hw->supported_link, hw->is_backplane, st->speed, + st->sfp_connector); +quit: + if (cookie) + mbx_free_cookie(cookie, err ? false : true); + + return err; +} + +int rnp_mbx_get_link_stat(struct rnp_hw *hw) +{ + struct mbx_fw_cmd_req req; + struct mbx_fw_cmd_reply reply; + + memset(&req, 0, sizeof(req)); + memset(&reply, 0, sizeof(reply)); + + build_get_link_status_req(&req, hw->nr_lane, &req); + return rnp_fw_send_cmd_wait(hw, &req, &reply); +} + +int rnp_mbx_fw_reset_phy(struct rnp_hw *hw) +{ + struct mbx_fw_cmd_req req; + struct mbx_fw_cmd_reply reply; + int ret; + + memset(&req, 0, sizeof(req)); + memset(&reply, 0, sizeof(reply)); + + if (hw->mbx.other_irq_enabled) { + struct mbx_req_cookie *cookie = mbx_cookie_zalloc(hw,0); + + if (!cookie) { + return -ENOMEM; + } + + build_reset_phy_req(&req, cookie); + + ret = rnp_mbx_fw_post_req(hw, &req, cookie); + mbx_free_cookie(cookie,ret?false:true); + return ret; + } else { + build_reset_phy_req(&req, &req); + return rnp_fw_send_cmd_wait(hw, &req, &reply); + } +} + +int rnp_maintain_req(struct rnp_hw *hw, int cmd, int arg0, int req_data_bytes, + int reply_bytes, dma_addr_t dma_phy_addr) +{ + int err; + struct mbx_req_cookie *cookie = NULL; + struct mbx_fw_cmd_req req; + struct mbx_fw_cmd_reply reply; + u64 address = dma_phy_addr; + + cookie = mbx_cookie_zalloc(hw,0); + if (!cookie) { + return -ENOMEM; + } + + memset(&req, 0, sizeof(req)); + memset(&reply, 0, sizeof(reply)); + cookie->timeout_jiffes = 60 * HZ; + + build_maintain_req(&req, cookie, cmd, arg0, req_data_bytes, reply_bytes, + address & 0xffffffff, (address >> 32) & 0xffffffff); + + if (hw->mbx.other_irq_enabled) { + cookie->timeout_jiffes = 400 * HZ; + err = rnp_mbx_fw_post_req(hw, &req, cookie); + } else { + int old_mbx_timeout = hw->mbx.timeout; + hw->mbx.timeout = + (400 * 1000 * 1000) / hw->mbx.usec_delay; + err = rnp_fw_send_cmd_wait(hw, &req, &reply); + hw->mbx.timeout = old_mbx_timeout; + } + + if (cookie) + mbx_free_cookie(cookie,err?false:true); + + return (err) ? -EIO : 0; +} + +int rnp_fw_get_macaddr(struct rnp_hw *hw, int pfvfnum, u8 *mac_addr, + int nr_lane) +{ + int err; + struct mbx_fw_cmd_req req; + struct mbx_fw_cmd_reply reply; + + memset(&req, 0, sizeof(req)); + memset(&reply, 0, sizeof(reply)); + + rnp_dbg("%s: pfvfnum:0x%x nr_lane:%d\n", __func__, pfvfnum, nr_lane); + + if (!mac_addr) { + rnp_err("%s: mac_addr is null\n", __func__); + return -EINVAL; + } + + if (hw->mbx.other_irq_enabled) { + struct mbx_req_cookie *cookie = + mbx_cookie_zalloc(hw,sizeof(reply.mac_addr)); + struct mac_addr *mac = (struct mac_addr *)cookie->priv; + + if (!cookie) { + return -ENOMEM; + } + + build_get_macaddress_req(&req, 1 << nr_lane, pfvfnum, cookie); + + err = rnp_mbx_fw_post_req(hw, &req, cookie); + if (err) { + mbx_free_cookie(cookie,false); + return err; + } + hw->pcode = mac->pcode; + + if ((1 << nr_lane) & mac->lanes) { + memcpy(mac_addr, mac->addrs[nr_lane].mac, 6); + } + + mbx_free_cookie(cookie,true); + return 0; + } else { + build_get_macaddress_req(&req, 1 << nr_lane, pfvfnum, &req); + err = rnp_fw_send_cmd_wait(hw, &req, &reply); + if (err) { + rnp_err("%s: failed. err:%d\n", __func__, err); + return err; + } + + hw->pcode = reply.mac_addr.pcode; + if ((1 << nr_lane) & reply.mac_addr.lanes) { + memcpy(mac_addr, reply.mac_addr.addrs[nr_lane].mac, 6); + return 0; + } + } + + return -ENODATA; +} + +static int rnp_mbx_sfp_read(struct rnp_hw *hw, int sfp_i2c_addr, int reg, + int cnt, u8 *out_buf) +{ + struct mbx_fw_cmd_req req; + int err = -EIO; + int nr_lane = hw->nr_lane; + + if ((cnt > MBX_SFP_READ_MAX_CNT) || !out_buf) { + rnp_err("%s: cnt:%d should <= %d out_buf:%p\n", __func__, cnt, + MBX_SFP_READ_MAX_CNT, out_buf); + return -EINVAL; + } + + memset(&req, 0, sizeof(req)); + + if (hw->mbx.other_irq_enabled) { + struct mbx_req_cookie *cookie = mbx_cookie_zalloc(hw,cnt); + if (!cookie) { + return -ENOMEM; + } + build_mbx_sfp_read(&req, nr_lane, sfp_i2c_addr, reg, cnt, + cookie); + + err = rnp_mbx_fw_post_req(hw, &req, cookie); + if (err) { + mbx_free_cookie(cookie,false); + return err; + } else { + memcpy(out_buf, cookie->priv, cnt); + err = 0; + mbx_free_cookie(cookie,true); + } + } else { + struct mbx_fw_cmd_reply reply; + + memset(&reply, 0, sizeof(reply)); + build_mbx_sfp_read(&req, nr_lane, sfp_i2c_addr, reg, cnt, + &reply); + + err = rnp_fw_send_cmd_wait(hw, &req, &reply); + if (err == 0) { + memcpy(out_buf, reply.sfp_read.value, cnt); + } + } + + return err; +} + +int rnp_mbx_sfp_module_eeprom_info(struct rnp_hw *hw, int sfp_addr, int reg, + int data_len, u8 *buf) +{ + int left = data_len; + int cnt, err; + + do { + cnt = (left > MBX_SFP_READ_MAX_CNT) ? MBX_SFP_READ_MAX_CNT : + left; + err = rnp_mbx_sfp_read(hw, sfp_addr, reg, cnt, buf); + if (err) { + rnp_err("%s: error:%d\n", __func__, err); + return err; + } + reg += cnt; + buf += cnt; + left -= cnt; + } while (left > 0); + + return 0; +} + +int rnp_mbx_sfp_write(struct rnp_hw *hw, int sfp_addr, int reg, short v) +{ + struct mbx_fw_cmd_req req; + int err; + int nr_lane = hw->nr_lane; + + memset(&req, 0, sizeof(req)); + + build_mbx_sfp_write(&req, nr_lane, sfp_addr, reg, v); + err = rnp_mbx_write_posted_locked(hw, &req); + + return err; +} + +int rnp_mbx_fw_reg_read(struct rnp_hw *hw, int fw_reg) +{ + struct mbx_fw_cmd_req req; + struct mbx_fw_cmd_reply reply; + int err, ret = 0xffffffff; + + memset(&req, 0, sizeof(req)); + memset(&reply, 0, sizeof(reply)); + + if (hw->fw_version < 0x00050200) { + return -EOPNOTSUPP; + } + + if (hw->mbx.other_irq_enabled) { + struct mbx_req_cookie *cookie = + mbx_cookie_zalloc(hw,sizeof(reply.r_reg)); + + build_readreg_req(&req, fw_reg, cookie); + err = rnp_mbx_fw_post_req(hw, &req, cookie); + if (err) { + mbx_free_cookie(cookie,false); + return ret; + } + ret = ((int *)(cookie->priv))[0]; + mbx_free_cookie(cookie,true); + } else { + build_readreg_req(&req, fw_reg, &reply); + err = rnp_fw_send_cmd_wait(hw, &req, &reply); + if (err) { + rnp_err("%s: failed. err:%d\n", __func__, err); + return err; + } else { + ret = reply.r_reg.value[0]; + } + } + return ret; +} + +int rnp_mbx_reg_write(struct rnp_hw *hw, int fw_reg, int value) +{ + struct mbx_fw_cmd_req req; + int err; + memset(&req, 0, sizeof(req)); + + if (hw->fw_version < 0x00050200) { + return -EOPNOTSUPP; + } + + build_writereg_req(&req, NULL, fw_reg, 4, &value); + + err = rnp_mbx_write_posted_locked(hw, &req); + return err; +} + +int rnp_mbx_reg_writev(struct rnp_hw *hw, int fw_reg, int value[4], int bytes) +{ + struct mbx_fw_cmd_req req; + int err; + memset(&req, 0, sizeof(req)); + + build_writereg_req(&req, NULL, fw_reg, bytes, value); + + err = rnp_mbx_write_posted_locked(hw, &req); + return err; +} + +int rnp_mbx_wol_set(struct rnp_hw *hw, u32 mode) +{ + struct mbx_fw_cmd_req req; + int err; + int nr_lane = hw->nr_lane; + + memset(&req, 0, sizeof(req)); + + build_mbx_wol_set(&req, nr_lane, mode); + + err = rnp_mbx_write_posted_locked(hw, &req); + return err; +} + +int rnp_mbx_set_dump(struct rnp_hw *hw, int flag) +{ + int err; + struct mbx_fw_cmd_req req; + + memset(&req, 0, sizeof(req)); + build_set_dump(&req, hw->nr_lane, flag); + + err = rnp_mbx_write_posted_locked(hw, &req); + + return err; +} + +int rnp_mbx_force_speed(struct rnp_hw *hw, int speed) +{ + int cmd = 0x01150000; + + if (hw->force_10g_1g_speed_ablity == 0) + return -EINVAL; + + hw->saved_force_link_speed = speed; + if (speed == RNP_LINK_SPEED_10GB_FULL) { + cmd = 0x01150002; + hw->force_speed_stat = FORCE_SPEED_STAT_10G; + } else if (speed == RNP_LINK_SPEED_1GB_FULL) { + cmd = 0x01150001; + hw->force_speed_stat = FORCE_SPEED_STAT_1G; + } else { + cmd = 0x01150000; + hw->force_speed_stat = FORCE_SPEED_STAT_DISABLED; + } + return rnp_mbx_set_dump(hw, cmd); +} + +int rnp_mbx_get_dump(struct rnp_hw *hw, int flags, u8 *data_out, int bytes) +{ + int err; + struct mbx_req_cookie *cookie = NULL; + + struct mbx_fw_cmd_reply reply; + struct mbx_fw_cmd_req req; + struct get_dump_reply *get_dump; + + void *dma_buf = NULL; + dma_addr_t dma_phy = 0; + u64 address; + + cookie = mbx_cookie_zalloc(hw,sizeof(*get_dump)); + if (!cookie) { + return -ENOMEM; + } + get_dump = (struct get_dump_reply *)cookie->priv; + + memset(&req, 0, sizeof(req)); + memset(&reply, 0, sizeof(reply)); + + if (bytes > sizeof(get_dump->data)) { + dma_buf = dma_alloc_coherent(&hw->pdev->dev, bytes, &dma_phy, + GFP_ATOMIC); + if (!dma_buf) { + dev_err(&hw->pdev->dev, "%s: no memory:%d!", __func__, + bytes); + err = -ENOMEM; + goto quit; + } + } + address = dma_phy; + build_get_dump_req(&req, cookie, hw->nr_lane, address & 0xffffffff, + (address >> 32) & 0xffffffff, bytes); + + if (hw->mbx.other_irq_enabled) { + err = rnp_mbx_fw_post_req(hw, &req, cookie); + } else { + err = rnp_fw_send_cmd_wait(hw, &req, &reply); + get_dump = &reply.get_dump; + } + +quit: + if (err == 0) { + hw->dump.version = get_dump->version; + hw->dump.flag = get_dump->flags; + hw->dump.len = get_dump->bytes; + } + if (err == 0 && data_out) { + if (dma_buf) { + memcpy(data_out, dma_buf, bytes); + } else { + memcpy(data_out, get_dump->data, bytes); + } + } + if (dma_buf) + dma_free_coherent(&hw->pdev->dev, bytes, dma_buf, dma_phy); + + if (cookie) + mbx_free_cookie(cookie, err ? false : true); + return err ? -err : 0; +} + +int rnp_fw_update(struct rnp_hw *hw, int partition, const u8 *fw_bin, int bytes) +{ + int err; + struct mbx_req_cookie *cookie = NULL; + + struct mbx_fw_cmd_req req; + struct mbx_fw_cmd_reply reply; + + void *dma_buf = NULL; + dma_addr_t dma_phy; + + cookie = mbx_cookie_zalloc(hw,0); + if (!cookie) { + dev_err(&hw->pdev->dev, "%s: no memory:%d!", __func__, 0); + return -ENOMEM; + } + + memset(&req, 0, sizeof(req)); + memset(&reply, 0, sizeof(reply)); + + dma_buf = + dma_alloc_coherent(&hw->pdev->dev, bytes, &dma_phy, GFP_ATOMIC); + if (!dma_buf) { + dev_err(&hw->pdev->dev, "%s: no memory:%d!", __func__, bytes); + err = -ENOMEM; + goto quit; + } + + memcpy(dma_buf, fw_bin, bytes); + + build_fw_update_req(&req, cookie, partition, dma_phy & 0xffffffff, + (dma_phy >> 32) & 0xffffffff, bytes); + if (hw->mbx.other_irq_enabled) { + cookie->timeout_jiffes = 400 * HZ; + err = rnp_mbx_fw_post_req(hw, &req, cookie); + } else { + int old_mbx_timeout = hw->mbx.timeout; + hw->mbx.timeout = + (400 * 1000 * 1000) / hw->mbx.usec_delay; + err = rnp_fw_send_cmd_wait(hw, &req, &reply); + hw->mbx.timeout = old_mbx_timeout; + } + +quit: + if (dma_buf) + dma_free_coherent(&hw->pdev->dev, bytes, dma_buf, dma_phy); + if (cookie) + mbx_free_cookie(cookie, err ? false : true); + printk("%s: %s (errcode:%d)\n", __func__, err ? " failed" : " success", + err); + return (err) ? -EIO : 0; +} + +int rnp_mbx_link_event_enable(struct rnp_hw *hw, int enable) +{ + struct mbx_fw_cmd_reply reply; + struct mbx_fw_cmd_req req; + int err; + + memset(&req, 0, sizeof(req)); + memset(&reply, 0, sizeof(reply)); + + if (enable) { + int v = rd32(hw, RNP_DMA_DUMY); + v &= 0x0000ffff; + v |= 0xa5a40000; + wr32(hw, RNP_DMA_DUMY, v); + } else { + wr32(hw, RNP_DMA_DUMY, 0); + } + + build_link_set_event_mask(&req, BIT(EVT_LINK_UP), + (enable & 1) << EVT_LINK_UP, &req); + err = rnp_mbx_write_posted_locked(hw, &req); + + return err; +} + +int rnp_fw_get_capability(struct rnp_hw *hw, struct phy_abilities *abil) +{ + int err; + struct mbx_fw_cmd_req req; + struct mbx_fw_cmd_reply reply; + + memset(&req, 0, sizeof(req)); + memset(&reply, 0, sizeof(reply)); + + build_phy_abalities_req(&req, &req); + err = rnp_fw_send_cmd_wait(hw, &req, &reply); + + if (err == 0) + memcpy(abil, &reply.phy_abilities, sizeof(*abil)); + + return err; +} + +static int to_mac_type(struct phy_abilities *ability) +{ + int lanes = hweight_long(ability->lane_mask); + if ((ability->phy_type == PHY_TYPE_40G_BASE_KR4) || + (ability->phy_type == PHY_TYPE_40G_BASE_LR4) || + (ability->phy_type == PHY_TYPE_40G_BASE_CR4) || + (ability->phy_type == PHY_TYPE_40G_BASE_SR4)) { + if (lanes == 1) { + return rnp_mac_n10g_x8_40G; + } else { + return rnp_mac_n10g_x8_10G; + } + } else if ((ability->phy_type == PHY_TYPE_10G_BASE_KR) || + (ability->phy_type == PHY_TYPE_10G_BASE_LR) || + (ability->phy_type == PHY_TYPE_10G_BASE_ER) || + (ability->phy_type == PHY_TYPE_10G_BASE_SR)) { + if (lanes == 1) { + return rnp_mac_n10g_x2_10G; + } else if (lanes == 2) { + return rnp_mac_n10g_x4_10G; + } else { + return rnp_mac_n10g_x8_10G; + } + } else if (ability->phy_type == PHY_TYPE_1G_BASE_KX) { + return rnp_mac_n10l_x8_1G; + } else if (ability->phy_type == PHY_TYPE_SGMII) { + return rnp_mac_n10l_x8_1G; + } + return rnp_mac_unknown; +} + +int rnp_set_lane_fun(struct rnp_hw *hw, int fun, int value0, int value1, + int value2, int value3) +{ + struct mbx_fw_cmd_req req; + struct mbx_fw_cmd_reply reply; + + memset(&req, 0, sizeof(req)); + memset(&reply, 0, sizeof(reply)); + + build_set_lane_fun(&req, hw->nr_lane, fun, value0, value1, value2, + value3); + + return rnp_mbx_write_posted_locked(hw, &req); +} + +int rnp_mbx_ifinsmod(struct rnp_hw *hw, int status) +{ + int err; + struct mbx_fw_cmd_req req; + struct mbx_fw_cmd_reply reply; + + memset(&req, 0, sizeof(req)); + memset(&reply, 0, sizeof(reply)); + + build_ifinsmod(&req, hw->nr_lane, status); + + if (mutex_lock_interruptible(&hw->mbx.lock)) + return -EAGAIN; + err = hw->mbx.ops.write_posted( + hw, (u32 *)&req, (req.datalen + MBX_REQ_HDR_LEN) / 4, MBX_FW); + + mutex_unlock(&hw->mbx.lock); + + rnp_logd(LOG_MBX_IFUP_DOWN, "%s: lane:%d status:%d\n", __func__, + hw->nr_lane, status); + return err; +} + +int rnp_mbx_ifsuspuse(struct rnp_hw *hw, int status) +{ + int err; + struct mbx_fw_cmd_req req; + struct mbx_fw_cmd_reply reply; + + memset(&req, 0, sizeof(req)); + memset(&reply, 0, sizeof(reply)); + + build_ifsuspuse(&req, hw->nr_lane, status); + + if (mutex_lock_interruptible(&hw->mbx.lock)) + return -EAGAIN; + err = hw->mbx.ops.write_posted( + hw, (u32 *)&req, (req.datalen + MBX_REQ_HDR_LEN) / 4, MBX_FW); + + mutex_unlock(&hw->mbx.lock); + + rnp_logd(LOG_MBX_IFUP_DOWN, "%s: lane:%d status:%d\n", __func__, + hw->nr_lane, status); + + return err; +} + +int rnp_mbx_ifforce_control_mac(struct rnp_hw *hw, int status) +{ + int err; + struct mbx_fw_cmd_req req; + struct mbx_fw_cmd_reply reply; + + memset(&req, 0, sizeof(req)); + memset(&reply, 0, sizeof(reply)); + + build_ifforce(&req, hw->nr_lane, status); + + if (mutex_lock_interruptible(&hw->mbx.lock)) + return -EAGAIN; + + err = hw->mbx.ops.write_posted( + hw, (u32 *)&req, (req.datalen + MBX_REQ_HDR_LEN) / 4, MBX_FW); + + mutex_unlock(&hw->mbx.lock); + + rnp_logd(LOG_MBX_IFUP_DOWN, "%s: lane:%d status:%d\n", __func__, + hw->nr_lane, status); + + return err; +} + +int rnp_mbx_ifup_down(struct rnp_hw *hw, int up) +{ + int err; + struct mbx_fw_cmd_req req; + struct mbx_fw_cmd_reply reply; + + memset(&req, 0, sizeof(req)); + memset(&reply, 0, sizeof(reply)); + + build_ifup_down(&req, hw->nr_lane, up); + + if (mutex_lock_interruptible(&hw->mbx.lock)) + return -EAGAIN; + err = hw->mbx.ops.write_posted( + hw, (u32 *)&req, (req.datalen + MBX_REQ_HDR_LEN) / 4, MBX_FW); + + mutex_unlock(&hw->mbx.lock); + + rnp_logd(LOG_MBX_IFUP_DOWN, "%s: lane:%d up:%d\n", __func__, + hw->nr_lane, up); + + /* force firmware report link-status */ + if (up) + rnp_link_stat_mark_reset(hw); + + return err; +} + +int rnp_mbx_led_set(struct rnp_hw *hw, int value) +{ + struct mbx_fw_cmd_req req; + struct mbx_fw_cmd_reply reply; + + memset(&req, 0, sizeof(req)); + memset(&reply, 0, sizeof(reply)); + + build_led_set(&req, hw->nr_lane, value, &reply); + + return rnp_mbx_write_posted_locked(hw, &req); +} + +int rnp_mbx_get_capability(struct rnp_hw *hw, struct rnp_info *info) +{ + int err; + struct phy_abilities ablity; + int try_cnt = 3; + + memset(&ablity, 0, sizeof(ablity)); + rnp_link_stat_mark_disable(hw); + + while (try_cnt--) { + err = rnp_fw_get_capability(hw, &ablity); + if (err == 0 && info) { + hw->lane_mask = ablity.lane_mask & 0xf; + info->mac = to_mac_type(&ablity); + info->adapter_cnt = hweight_long(hw->lane_mask); + hw->mode = ablity.nic_mode; + hw->pfvfnum = ablity.pfnum; + hw->speed = ablity.speed; + hw->nr_lane = 0; // PF1 + hw->fw_version = ablity.fw_version; + hw->mac_type = info->mac; + hw->phy_type = ablity.phy_type; + hw->axi_mhz = ablity.axi_mhz; + hw->port_ids = ablity.port_ids; + hw->bd_uid = ablity.bd_uid; + hw->phy_id = ablity.phy_id; + hw->wol = ablity.wol_status; + hw->eco = ablity.e.v2; + hw->force_link_supported = + ablity.e.force_link_supported; + + if (ablity.e.force_link_supported && + (ablity.e.force_down_en & 0x1)) { + hw->force_status = 1; + } + + if ((hw->fw_version >= 0x00050201) && + (ablity.speed == SPEED_10000)) { + hw->force_speed_stat = + FORCE_SPEED_STAT_DISABLED; + hw->force_10g_1g_speed_ablity = 1; + } + if (ablity.ext_ablity != 0xffffffff && ablity.e.valid) { + hw->ncsi_en = (ablity.e.ncsi_en == 1); + hw->ncsi_rar_entries = 1; + hw->rpu_en = ablity.e.rpu_en; + if (hw->rpu_en) { + ablity.e.rpu_availble = 1; + } + hw->rpu_availble = ablity.e.rpu_availble; + hw->fw_lldp_ablity = ablity.e.fw_lldp_ablity; + } else { + hw->ncsi_rar_entries = 0; + } + + if (hw->force_link_supported == 0) { + hw->force_status = hw->ncsi_en ? 0 : 1; + } + + pr_info("%s: nic-mode:%d mac:%d adpt_cnt:%d lane_mask:0x%x, phy_type: " + "0x%x, " + "pfvfnum:0x%x, fw-version:0x%08x\n, axi:%d Mhz," + "port_id:%d bd_uid:0x%08x 0x%x ex-ablity:0x%x fs:%d speed:%d " + "ncsi_en:%u %d wol=0x%x rpu:%d-%d v2:%d force-status:%d,%d\n", + __func__, hw->mode, info->mac, + info->adapter_cnt, hw->lane_mask, hw->phy_type, + hw->pfvfnum, ablity.fw_version, ablity.axi_mhz, + ablity.port_id[0], hw->bd_uid, ablity.phy_id, + ablity.ext_ablity, + hw->force_10g_1g_speed_ablity, ablity.speed, + hw->ncsi_en, hw->ncsi_rar_entries, hw->wol, + hw->rpu_en, hw->rpu_availble, hw->eco, + hw->force_status, hw->force_link_supported); + if (hw->phy_type == PHY_TYPE_10G_TP) { + hw->supported_link = RNP_LINK_SPEED_10GB_FULL | + RNP_LINK_SPEED_1GB_FULL | + RNP_LINK_SPEED_1GB_HALF; + hw->phy.autoneg_advertised = hw->supported_link; + hw->autoneg = 1; + } + if (info->adapter_cnt != 0) + return 0; + } + } + + dev_err(&hw->pdev->dev, "%s: error!\n", __func__); + return -EIO; +} + +int rnp_mbx_get_temp(struct rnp_hw *hw, int *voltage) +{ + int err; + struct mbx_req_cookie *cookie = NULL; + struct mbx_fw_cmd_reply reply; + struct mbx_fw_cmd_req req; + struct get_temp *temp; + int temp_v = 0; + + cookie = mbx_cookie_zalloc(hw,sizeof(*temp)); + if (!cookie) { + return -ENOMEM; + } + temp = (struct get_temp *)cookie->priv; + + memset(&req, 0, sizeof(req)); + + build_get_temp(&req, cookie); + + if (hw->mbx.other_irq_enabled) { + err = rnp_mbx_fw_post_req(hw, &req, cookie); + } else { + memset(&reply, 0, sizeof(reply)); + err = rnp_fw_send_cmd_wait(hw, &req, &reply); + temp = &reply.get_temp; + } + + if (voltage) + *voltage = temp->volatage; + temp_v = temp->temp; + + if (cookie) { + mbx_free_cookie(cookie,err?false:true); + } + return temp_v; +} + +enum speed_enum { + speed_10, + speed_100, + speed_1000, + speed_10000, + speed_25000, + speed_40000, + +}; + +void rnp_link_stat_mark(struct rnp_hw *hw, int up) +{ + u32 v; + + v = rd32(hw, RNP_DMA_DUMY); + if ((hw->hw_type == rnp_hw_n10) || (hw->hw_type == rnp_hw_n400)) { + v &= ~(0xffff0000); + v |= 0xa5a40000; + if (up) { + v |= BIT(0); + } else { + v &= ~BIT(0); + } + } + wr32(hw, RNP_DMA_DUMY, v); +} + +void rnp_mbx_probe_stat_set(struct rnp_hw *hw, int stat) +{ +#define RNP10_DMA_DUMMY_PROBE_STAT_BIT (4) + u32 v; + + v = rd32(hw, RNP_DMA_DUMY); + if ((hw->hw_type == rnp_hw_n10) || (hw->hw_type == rnp_hw_n400)) { + v &= ~(0xffff0000); + v |= 0xa5a40000; + + if (stat == MBX_PROBE) { + v |= BIT(RNP10_DMA_DUMMY_PROBE_STAT_BIT); + } else if (stat == MBX_REMOVE) { + v = 0xFFA5A6A7; + } else { + v &= ~BIT(RNP10_DMA_DUMMY_PROBE_STAT_BIT); + } + } + wr32(hw, RNP_DMA_DUMY, v); +} + +static inline int rnp_mbx_fw_req_handler(struct rnp_adapter *adapter, + struct mbx_fw_cmd_req *req) +{ + struct rnp_hw *hw = &adapter->hw; + + switch (req->opcode) { + case LINK_STATUS_EVENT: + rnp_logd( + LOG_LINK_EVENT, + "[LINK_STATUS_EVENT:0x%x] %s:link changed: changed_lane:0x%x, " + "status:0x%x, speed:%d, duplex:%d\n", + req->opcode, adapter->name, + req->link_stat.changed_lanes, + req->link_stat.lane_status, req->link_stat.st[0].speed, + req->link_stat.st[0].duplex); + + if (req->link_stat.lane_status) { + adapter->hw.link = 1; + } else { + adapter->hw.link = 0; + } + if (req->link_stat.st[0].lldp_status) + adapter->priv_flags |= RNP_PRIV_FLAG_LLDP_EN_STAT; + else + adapter->priv_flags &= (~RNP_PRIV_FLAG_LLDP_EN_STAT); + + if (req->link_stat.port_st_magic == SPEED_VALID_MAGIC) { + hw->speed = req->link_stat.st[0].speed; + hw->duplex = req->link_stat.st[0].duplex; + + switch (hw->speed) { + case 10: + adapter->speed = RNP_LINK_SPEED_10_FULL; + break; + case 100: + adapter->speed = RNP_LINK_SPEED_100_FULL; + break; + case 1000: + adapter->speed = RNP_LINK_SPEED_1GB_FULL; + break; + case 10000: + adapter->speed = RNP_LINK_SPEED_10GB_FULL; + break; + case 25000: + adapter->speed = RNP_LINK_SPEED_25GB_FULL; + break; + case 40000: + adapter->speed = RNP_LINK_SPEED_40GB_FULL; + break; + } + } + if (req->link_stat.lane_status) { + rnp_link_stat_mark(hw, 1); + } else { + rnp_link_stat_mark(hw, 0); + } + + adapter->flags |= RNP_FLAG_NEED_LINK_UPDATE; + break; + } + rnp_service_event_schedule(adapter); + + return 0; +} + +static inline int rnp_mbx_fw_reply_handler(struct rnp_adapter *adapter, + struct mbx_fw_cmd_reply *reply) +{ + struct mbx_req_cookie *cookie; + + cookie = reply->cookie; + if (!cookie || is_cookie_valid(&adapter->hw,cookie)== false + || cookie->stat != COOKIE_ALLOCED) { + return -EIO; + } + + if (cookie->priv_len > 0) { + memcpy(cookie->priv, reply->data, cookie->priv_len); + } + + cookie->done = 1; + + if (reply->flags & FLAGS_ERR) { + cookie->errcode = reply->error_code; + } else { + cookie->errcode = 0; + } + + if(cookie->stat == COOKIE_ALLOCED){ + wake_up_interruptible(&cookie->wait); + } + /* not really free cookie, mark as free-able */ + mbx_free_cookie(cookie, false); + + return 0; +} + +static inline int rnp_rcv_msg_from_fw(struct rnp_adapter *adapter) +{ + u32 msgbuf[RNP_FW_MAILBOX_SIZE]; + struct rnp_hw *hw = &adapter->hw; + s32 retval; + + retval = rnp_read_mbx(hw, msgbuf, RNP_FW_MAILBOX_SIZE, MBX_FW); + if (retval) { + printk("Error receiving message from FW:%d\n", retval); + return retval; + } + + rnp_logd(LOG_MBX_MSG_IN, + "msg from fw: msg[0]=0x%08x_0x%08x_0x%08x_0x%08x\n", msgbuf[0], + msgbuf[1], msgbuf[2], msgbuf[3]); + + /* this is a message we already processed, do nothing */ + if (((unsigned short *)msgbuf)[0] & FLAGS_DD) { + return rnp_mbx_fw_reply_handler( + adapter, (struct mbx_fw_cmd_reply *)msgbuf); + } else { + return rnp_mbx_fw_req_handler(adapter, + (struct mbx_fw_cmd_req *)msgbuf); + } +} + +static void rnp_rcv_ack_from_fw(struct rnp_adapter *adapter) +{ + /* do-nothing */ +} + +int rnp_fw_msg_handler(struct rnp_adapter *adapter) +{ + /* == check fw-req */ + if (!rnp_check_for_msg(&adapter->hw, MBX_FW)) + rnp_rcv_msg_from_fw(adapter); + + /* process any acks */ + if (!rnp_check_for_ack(&adapter->hw, MBX_FW)) + rnp_rcv_ack_from_fw(adapter); + + return 0; +} + +int rnp_mbx_phy_write(struct rnp_hw *hw, u32 reg, u32 val) +{ + struct mbx_fw_cmd_req req; + char nr_lane = hw->nr_lane; + memset(&req, 0, sizeof(req)); + + build_set_phy_reg(&req, NULL, PHY_EXTERNAL_PHY_MDIO, nr_lane, reg, val, + 0); + + return rnp_mbx_write_posted_locked(hw, &req); +} + +int rnp_mbx_phy_read(struct rnp_hw *hw, u32 reg, u32 *val) +{ + struct mbx_fw_cmd_req req; + int err = -EIO; + char nr_lane = hw->nr_lane; + int times = 0; +retry: + memset(&req, 0, sizeof(req)); + + if (hw->mbx.other_irq_enabled) { + struct mbx_req_cookie *cookie = mbx_cookie_zalloc(hw,4); + if (!cookie) { + return -ENOMEM; + } + build_get_phy_reg(&req, cookie, PHY_EXTERNAL_PHY_MDIO, nr_lane, + reg); + + err = rnp_mbx_fw_post_req(hw, &req, cookie); + if (err) { + mbx_free_cookie(cookie,false); + return err; + } else { + memcpy(val, cookie->priv, 4); + err = 0; + } + mbx_free_cookie(cookie,true); + } else { + struct mbx_fw_cmd_reply reply; + memset(&reply, 0, sizeof(reply)); + build_get_phy_reg(&req, &reply, PHY_EXTERNAL_PHY_MDIO, nr_lane, + reg); + + err = rnp_fw_send_cmd_wait(hw, &req, &reply); + if (err == 0) { + *val = reply.r_reg.value[0]; + } + } + if ((*(val) == 0xffff) && (times <= 5)) { + printk("%x warning mbx_phy_read 0xffff, addr %x\n", times, reg); + times++; + goto retry; + } + return err; +} + +int rnp_mbx_phy_link_set(struct rnp_hw *hw, int adv, int autoneg, int speed, + int duplex, int mdix_ctrl) +{ + int err; + struct mbx_fw_cmd_req req; + + memset(&req, 0, sizeof(req)); + + printk("%s:lane:%d adv:0x%x\n", __func__, hw->nr_lane, adv); + printk("%s:autoneg %x, speed %x, duplex %x\n", __func__, autoneg, speed, + duplex); + + build_phy_link_set(&req, adv, hw->nr_lane, autoneg, speed, duplex, + mdix_ctrl); + + if (mutex_lock_interruptible(&hw->mbx.lock)) + return -EAGAIN; + err = hw->mbx.ops.write_posted( + hw, (u32 *)&req, (req.datalen + MBX_REQ_HDR_LEN) / 4, MBX_FW); + + mutex_unlock(&hw->mbx.lock); + return err; +} + +int rnp_mbx_phy_pause_set(struct rnp_hw *hw, int pause_mode) +{ + int err; + struct mbx_fw_cmd_req req; + + memset(&req, 0, sizeof(req)); + + printk("%s:lane:%d pause:0x%x\n", __func__, hw->nr_lane, pause_mode); + + build_phy_pause_set(&req, pause_mode, hw->nr_lane); + + if (mutex_lock_interruptible(&hw->mbx.lock)) + return -EAGAIN; + err = hw->mbx.ops.write_posted( + hw, (u32 *)&req, (req.datalen + MBX_REQ_HDR_LEN) / 4, MBX_FW); + + mutex_unlock(&hw->mbx.lock); + return err; +} + +int rnp_mbx_lldp_port_enable(struct rnp_hw *hw, bool enable) +{ + struct mbx_fw_cmd_req req; + int err; + int nr_lane = hw->nr_lane; + + if (!hw->fw_lldp_ablity) { + rnp_warn("lldp set not supported\n"); + return -EOPNOTSUPP; + } + + memset(&req, 0, sizeof(req)); + + build_lldp_ctrl_set(&req, nr_lane, enable); + + err = rnp_mbx_write_posted_locked(hw, &req); + return err; +} + +int rnp_mbx_lldp_status_get(struct rnp_hw *hw) +{ + struct mbx_fw_cmd_req req; + struct mbx_fw_cmd_reply reply; + int err, ret = 0; + + if (!hw->fw_lldp_ablity) { + rnp_warn("fw lldp not supported\n"); + return -EOPNOTSUPP; + } + + memset(&req, 0, sizeof(req)); + memset(&reply, 0, sizeof(reply)); + + if (hw->mbx.other_irq_enabled) { + struct mbx_req_cookie *cookie = + mbx_cookie_zalloc(hw,sizeof(reply.lldp)); + + if (!cookie) { + return -ENOMEM; + } + build_lldp_ctrl_get(&req, hw->nr_lane, cookie); + + err = rnp_mbx_fw_post_req(hw, &req, cookie); + if (err) { + mbx_free_cookie(cookie,false); + return ret; + } + ret = ((int *)(cookie->priv))[0]; + mbx_free_cookie(cookie,true); + } else { + build_lldp_ctrl_get(&req, hw->nr_lane, &reply); + err = rnp_fw_send_cmd_wait(hw, &req, &reply); + if (err) { + rnp_err("%s: 1 error:%d\n", __func__, err); + return -EIO; + } + ret = reply.lldp.enable_stat; + } + return ret; +} + +int rnp_mbx_ddr_csl_enable(struct rnp_hw *hw, int enable, + dma_addr_t dma_phy, + int bytes) +{ + struct mbx_fw_cmd_req req; + memset(&req, 0, sizeof(req)); + + build_ddr_csl(&req, NULL, enable, dma_phy, bytes); + + if (hw->mbx.other_irq_enabled) { + return rnp_mbx_write_posted_locked(hw, &req); + } else { + struct mbx_fw_cmd_reply reply; + memset(&reply, 0, sizeof(reply)); + return rnp_fw_send_cmd_wait(hw, &req, &reply); + } +} diff --git a/drivers/net/ethernet/mucse/rnp/rnp_mbx_fw.h b/drivers/net/ethernet/mucse/rnp/rnp_mbx_fw.h new file mode 100644 index 000000000000..183e84d060d1 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_mbx_fw.h @@ -0,0 +1,1135 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#ifndef MBX_FW_CMD_H +#define MBX_FW_CMD_H + +#include +#include +#include + +#ifndef _PACKED_ALIGN4 +#define _PACKED_ALIGN4 __attribute__((packed, aligned(4))) +#endif + +enum GENERIC_CMD { + /* generat */ + GET_VERSION = 0x0001, + READ_REG = 0xFF03, + WRITE_REG = 0xFF04, + MODIFY_REG = 0xFF07, + /* virtualization */ + IFUP_DOWN = 0x0800, + SEND_TO_PF = 0x0801, + SEND_TO_VF = 0x0802, + DRIVER_INSMOD = 0x0803, + SYSTEM_SUSPUSE = 0x0804, + FORCE_LINK_ON_CLOSE = 0x0805, + /* link configuration admin commands */ + GET_PHY_ABALITY = 0x0601, + GET_MAC_ADDRESS = 0x0602, + RESET_PHY = 0x0603, + LED_SET = 0x0604, + GET_LINK_STATUS = 0x0607, + LINK_STATUS_EVENT = 0x0608, + SET_LANE_FUN = 0x0609, + GET_LANE_STATUS = 0x0610, + SFP_SPEED_CHANGED_EVENT = 0x0611, + SET_EVENT_MASK = 0x0613, + SET_LOOPBACK_MODE = 0x0618, + SET_PHY_REG = 0x0628, + GET_PHY_REG = 0x0629, + PHY_LINK_SET = 0x0630, + GET_PHY_STATISTICS = 0x0631, + PHY_PAUSE_SET = 0x0632, + /*sfp-module*/ + SFP_MODULE_READ = 0x0900, + SFP_MODULE_WRITE = 0x0901, + /* fw update */ + FW_UPDATE = 0x0700, + FW_MAINTAIN = 0x0701, + WOL_EN = 0x0910, + GET_DUMP = 0x0a00, + SET_DUMP = 0x0a10, + GET_TEMP = 0x0a11, + SET_WOL = 0x0a12, + LLDP_TX_CTL = 0x0a13, + SET_DDR_CSL = 0xFF11, +}; + +enum link_event_mask { + EVT_LINK_UP = 1, + EVT_NO_MEDIA = 2, + EVT_LINK_FAULT = 3, + EVT_PHY_TEMP_ALARM = 4, + EVT_EXCESSIVE_ERRORS = 5, + EVT_SIGNAL_DETECT = 6, + EVT_AUTO_NEGOTIATION_DONE = 7, + EVT_MODULE_QUALIFICATION_FAILED = 8, + EVT_PORT_TX_SUSPEND = 9, +}; + +enum pma_type { + PHY_TYPE_NONE = 0, + PHY_TYPE_1G_BASE_KX, + PHY_TYPE_SGMII, + PHY_TYPE_10G_BASE_KR, + PHY_TYPE_25G_BASE_KR, + PHY_TYPE_40G_BASE_KR4, + PHY_TYPE_10G_BASE_SR, + PHY_TYPE_40G_BASE_SR4, + PHY_TYPE_40G_BASE_CR4, + PHY_TYPE_40G_BASE_LR4, + PHY_TYPE_10G_BASE_LR, + PHY_TYPE_10G_BASE_ER, + PHY_TYPE_10G_TP +}; + +#define PHY_C45 (BIT(30)) +#define PHY_MMD(i) (i << 16) +#define PHY_MMD_PMAPMD PHY_MMD(1) +#define PHY_MMD_AN PHY_MMD(7) +#define PHY_MMD_VEND2 PHY_MMD(31) +#define PHY_826x_MDIX (PHY_C45 | PHY_MMD_VEND2 | 0xa430) +#define PHY_826x_SPEED (PHY_C45 | PHY_MMD_PMAPMD | 0) +#define PHY_826x_DUPLEX (PHY_C45 | PHY_MMD_VEND2 | 0xa44) +#define PHY_826x_AN (PHY_C45 | PHY_MMD_AN | 0) +#define PHY_826x_ADV (PHY_C45 | PHY_MMD_AN | 16) +#define PHY_826x_GBASE_ADV (PHY_C45 | PHY_MMD_AN | 0x20) +#define PHY_826x_GBASE_ADV_2 (PHY_C45 | PHY_MMD_VEND2 | 0xa412) +struct phy_abilities { + unsigned char link_stat; + unsigned char lane_mask; + int speed; + short phy_type; + short nic_mode; + short pfnum; + unsigned int fw_version; + unsigned int axi_mhz; + union { + unsigned char port_id[4]; + unsigned int port_ids; + }; + unsigned int bd_uid; + int phy_id; + int wol_status; + + union { + unsigned int ext_ablity; + struct { + unsigned int valid : 1; /* 0 */ + unsigned int wol_en : 1; /* 1 */ + unsigned int pci_preset_runtime_en : 1; /* 2 */ + unsigned int smbus_en : 1; /* 3 */ + unsigned int ncsi_en : 1; /* 4 */ + unsigned int rpu_en : 1; /* 5 */ + unsigned int v2 : 1; /* 6 */ + unsigned int pxe_en : 1; /* 7 */ + unsigned int mctp_en : 1; /* 8 */ + unsigned int yt8614 : 1; /* 9 */ + unsigned int pci_ext_reset : 1; /* 10 */ + unsigned int rpu_availble : 1; /* 11 */ + unsigned int fw_lldp_ablity : 1; /* 12 */ + unsigned int lldp_enabled : 1; /* 13 */ + unsigned int only_1g : 1; /* 14 */ + unsigned int force_down_en : 4; /* 15-18 */ + unsigned int force_link_supported : 1; /* 19 */ + unsigned int ports_is_sgmii_valid : 1; /* [20] */ + unsigned int lane0_is_sgmii : 1; /* [21] */ + unsigned int lane1_is_sgmii : 1; /* [22] */ + unsigned int lane2_is_sgmii : 1; /* [23] */ + unsigned int lane3_is_sgmii : 1; /* [24] */ + } e; + }; + +} _PACKED_ALIGN4; + +enum LOOPBACK_LEVEL { + LOOPBACK_DISABLE = 0, + LOOPBACK_MAC = 1, + LOOPBACK_PCS = 5, + LOOPBACK_EXTERNAL = 6, +}; +enum LOOPBACK_TYPE { + /* Tx->Rx */ + LOOPBACK_TYPE_LOCAL = 0x0, +}; + +enum LOOPBACK_FORCE_SPEED { + LOOPBACK_FORCE_SPEED_NONE = 0x0, + LOOPBACK_FORCE_SPEED_1GBS = 0x1, + LOOPBACK_FORCE_SPEED_10GBS = 0x2, + LOOPBACK_FORCE_SPEED_40_25GBS = 0x3, +}; + +enum PHY_INTERFACE { + PHY_INTERNAL_PHY = 0, + PHY_EXTERNAL_PHY_MDIO = 1, +}; + +/* Table 3-54. Get link status response (opcode: 0x0607) */ +struct link_stat_data { + char phy_type; + unsigned char speed; +#define LNK_STAT_SPEED_UNKNOWN 0 +#define LNK_STAT_SPEED_10 1 +#define LNK_STAT_SPEED_100 2 +#define LNK_STAT_SPEED_1000 3 +#define LNK_STAT_SPEED_10000 4 +#define LNK_STAT_SPEED_25000 5 +#define LNK_STAT_SPEED_40000 6 + /* 2 */ + char link_stat : 1; +#define LINK_UP 1 +#define LINK_DOWN 0 + char link_fault : 4; +#define LINK_LINK_FAULT BIT(0) +#define LINK_TX_FAULT BIT(1) +#define LINK_RX_FAULT BIT(2) +#define LINK_REMOTE_FAULT BIT(3) + char extern_link_stat : 1; + char media_available : 1; + char rev1 : 1; + /* 3:ignore */ + char an_completed : 1; + char lp_an_ablity : 1; + char parallel_detection_fault : 1; + char fec_enabled : 1; + char low_power_state : 1; + char link_pause_status : 2; + char qualified_odule : 1; + /* 4 */ + char phy_temp_alarm : 1; + char excessive_link_errors : 1; + char port_tx_suspended : 2; + char force_40G_enabled : 1; + char external_25G_phy_err_code : 3; +#define EXTERNAL_25G_PHY_NOT_PRESENT 1 +#define EXTERNAL_25G_PHY_NVM_CRC_ERR 2 +#define EXTERNAL_25G_PHY_MDIO_ACCESS_FAILED 6 +#define EXTERNAL_25G_PHY_INIT_SUCCED 7 + /* 5 */ + char loopback_enabled_status : 4; +#define LOOPBACK_DISABLE 0x0 +#define LOOPBACK_MAC 0x1 +#define LOOPBACK_SERDES 0x2 +#define LOOPBACK_PHY_INTERNAL 0x3 +#define LOOPBACK_PHY_EXTERNAL 0x4 + char loopback_type_status : 1; +#define LOCAL_LOOPBACK 0 /* tx->rx */ +#define FAR_END_LOOPBACK 0 /* rx->Tx */ + char rev3 : 1; + char external_dev_power_ability : 2; + /* 6-7 */ + short max_frame_sz; + /* 8 */ + char _25gb_kr_fec_enabled : 1; + char _25gb_rs_fec_enabled : 1; + char crc_enabled : 1; + char rev4 : 5; + /* 9 */ + int link_type; /* same as Phy type */ + char link_type_ext; +} _PACKED_ALIGN4; + +struct port_stat { + u8 phyid; + u8 duplex : 1; + u8 autoneg : 1; + u8 fec : 1; + u8 rev : 1; + u8 link_traing : 1; + u8 is_sgmii : 1; + u8 lldp_status : 1; + u32 speed; +} __attribute__((packed)); + +struct lane_stat_data { + u8 nr_lane; + u8 pci_gen : 4; + u8 pci_lanes : 4; + u8 pma_type; + u8 phy_type; + u16 linkup : 1; + u16 duplex : 1; + u16 autoneg : 1; + u16 fec : 1; + u16 an : 1; + u16 link_traing : 1; + u16 media_available : 1; + u16 is_sgmii : 1; // + u16 link_fault : 4; +#define LINK_LINK_FAULT BIT(0) +#define LINK_TX_FAULT BIT(1) +#define LINK_RX_FAULT BIT(2) +#define LINK_REMOTE_FAULT BIT(3) + u16 is_backplane : 1; + u16 tp_mdx : 2; + union { + u8 phy_addr; + struct { + u8 mod_abs : 1; + u8 fault : 1; + u8 tx_dis : 1; + u8 los : 1; + } sfp; + }; + u8 sfp_connector; + u32 speed; + u32 si_main; + u32 si_pre; + u32 si_post; + u32 si_tx_boost; + u32 supported_link; + u32 phy_id; + u32 advertised_link; +} __attribute__((packed)); + +struct yt_phy_statistics { + u32 pkg_ib_valid; /* rx crc good and length 64-1518 */ + u32 pkg_ib_os_good; /* rx crc good and length >1518 */ + u32 pkg_ib_us_good; /* rx crc good and length <64 */ + u16 pkg_ib_err; /* rx crc wrong and length 64-1518 */ + u16 pkg_ib_os_bad; /* rx crc wrong and length >1518 */ + u16 pkg_ib_frag; /* rx crc wrong and length <64 */ + u16 pkg_ib_nosfd; /* rx sfd missed */ + u32 pkg_ob_valid; /* tx crc good and length 64-1518 */ + u32 pkg_ob_os_good; /* tx crc good and length >1518 */ + u32 pkg_ob_us_good; /* tx crc good and length <64 */ + u16 pkg_ob_err; /* tx crc wrong and length 64-1518 */ + u16 pkg_ob_os_bad; /* tx crc wrong and length >1518 */ + u16 pkg_ob_frag; /* tx crc wrong and length <64 */ + u16 pkg_ob_nosfd; /* tx sfd missed */ +} __attribute__((packed)); + +struct phy_statistics { + union { + struct yt_phy_statistics yt; + }; +} __attribute__((packed)); +/* == flags == */ +#define FLAGS_DD BIT(0) /* driver clear 0, FW must set 1 */ +#define FLAGS_CMP BIT(1) /* driver clear 0, FW mucst set */ +#define FLAGS_ERR BIT(2) +/* driver clear 0, FW must set only if it reporting an error */ +#define FLAGS_LB BIT(9) +#define FLAGS_RD BIT(10) /* set if additional buffer has command parameters */ +#define FLAGS_BUF BIT(12) /* set 1 on indirect command */ +#define FLAGS_SI BIT(13) /* not irq when command complete */ +#define FLAGS_EI BIT(14) /* interrupt on error */ +#define FLAGS_FE BIT(15) /* flush erro */ + +#ifndef SHM_DATA_MAX_BYTES +#define SHM_DATA_MAX_BYTES (64 - 2 * 4) +#endif + +#define MBX_REQ_HDR_LEN 24 +#define MBX_REPLYHDR_LEN 16 +#define MBX_REQ_MAX_DATA_LEN (SHM_DATA_MAX_BYTES - MBX_REQ_HDR_LEN) +#define MBX_REPLY_MAX_DATA_LEN (SHM_DATA_MAX_BYTES - MBX_REPLYHDR_LEN) + +/* TODO req is little endian. bigendian should be conserened */ + +struct mbx_fw_cmd_req { + unsigned short flags; /* 0-1 */ + unsigned short opcode; /* 2-3 enum LINK_ADM_CMD */ + unsigned short datalen; /* 4-5 */ + unsigned short ret_value; /* 6-7 */ + union { + struct { + unsigned int cookie_lo; /* 8-11 */ + unsigned int cookie_hi; /* 12-15 */ + }; + void *cookie; + }; + unsigned int reply_lo; /* 16-19 5dw */ + unsigned int reply_hi; /* 20-23 */ + /* === data === 7dw [24-64] */ + union { + char data[0]; + + struct { + unsigned int addr; + unsigned int bytes; + } r_reg; + + struct { + unsigned int addr; + unsigned int bytes; + unsigned int data[4]; + } w_reg; + + struct { + unsigned int lanes; + } ptp; + + struct { + int lane; + int up; + } ifup; + + struct { + int nr_lane; +#define LLDP_TX_ALL_LANES 0xFF + int op; +#define LLDP_TX_SET 0x0 +#define LLDP_TX_GET 0x1 + int enable; + } lldp_tx; + + struct { + int lane; + int status; + } ifinsmod; + + struct { + int lane; + int status; + } ifsuspuse; + + struct { + int nr_lane; + int status; + } ifforce; + + struct { + int nr_lane; + } get_lane_st; + + struct { + int nr_lane; + int func; +#define LANE_FUN_AN 0 +#define LANE_FUN_LINK_TRAING 1 +#define LANE_FUN_FEC 2 +#define LANE_FUN_SI 3 +#define LANE_FUN_SFP_TX_DISABLE 4 +#define LANE_FUN_PCI_LANE 5 +#define LANE_FUN_PRBS 6 +#define LANE_FUN_SPEED_CHANGE 7 + + int value0; + int value1; + int value2; + int value3; + } set_lane_fun; + + struct { + int flag; + int nr_lane; + } set_dump; + + struct { + int lane; + int enable; + } wol; + + struct { + unsigned int bytes; + unsigned int nr_lane; + unsigned int bin_phy_lo; + unsigned int bin_phy_hi; + } get_dump; + + struct { + unsigned int nr_lane; + int value; +#define LED_IDENTIFY_INACTIVE 0 +#define LED_IDENTIFY_ACTIVE 1 +#define LED_IDENTIFY_ON 2 +#define LED_IDENTIFY_OFF 3 + } led_set; + + struct { + unsigned int addr; + unsigned int data; + unsigned int mask; + } modify_reg; + + struct { + unsigned int adv_speed_mask; + unsigned int autoneg; + unsigned int speed; + unsigned int duplex; + int nr_lane; + unsigned int tp_mdix_ctrl; + } phy_link_set; + + struct { + unsigned int pause_mode; + int nr_lane; + } phy_pause_set; + + struct { + unsigned int nr_lane; + unsigned int sfp_adr; + unsigned int reg; + unsigned int cnt; + } sfp_read; + + struct { + unsigned int nr_lane; + unsigned int sfp_adr; + unsigned int reg; + unsigned int val; + } sfp_write; + + struct { + unsigned int nr_lane; /* 0-3 */ + } get_linkstat; + struct { + unsigned short changed_lanes; + unsigned short lane_status; + unsigned int port_st_magic; +#define SPEED_VALID_MAGIC 0xa4a6a8a9 + struct port_stat st[4]; + } link_stat; + + struct { + unsigned short enable_stat; + unsigned short event_mask; + } stat_event_mask; + + struct { /* set loopback */ + unsigned char loopback_level; + unsigned char loopback_type; + unsigned char loopback_force_speed; + + char loopback_force_speed_enable : 1; + } loopback; + + struct { + int cmd; + int arg0; + int req_bytes; + int reply_bytes; + int ddr_lo; + int ddr_hi; + } maintain; + + struct { /* set phy register */ + char phy_interface; + union { + char page_num; + char external_phy_addr; + }; + int phy_reg_addr; + int phy_w_data; + int reg_addr; + int w_data; + /* 1 = ignore page_num, use last QSFP */ + char recall_qsfp_page : 1; + /* page value */ + /* 0 = use page_num for QSFP */ + char nr_lane; + } set_phy_reg; + + struct { + int enable; + int ddr_phy_hi; + int ddr_phy_lo; + int bytes; + } ddr_csl; + + struct { + } get_phy_ablity; + + struct { + int lane_mask; + int pfvf_num; + } get_mac_addr; + + struct { + char phy_interface; + union { + char page_num; + char external_phy_addr; + }; + int phy_reg_addr; + char nr_lane; + } get_phy_reg; + + struct { + unsigned int nr_lane; + } phy_statistics; + + struct { + char paration; + unsigned int bytes; + unsigned int bin_phy_lo; + unsigned int bin_phy_hi; + } fw_update; + }; +} _PACKED_ALIGN4; + +/* firmware -> driver */ +struct mbx_fw_cmd_reply { + unsigned short flags; + /* fw must set: DD, CMP, Error(if error), copy value */ + /* from command: LB,RD,VFC,BUF,SI,EI,FE */ + unsigned short opcode; /* 2-3: copy from req */ + unsigned short error_code; /* 4-5: 0 if no error */ + unsigned short datalen; + /* 6-7: */ + union { + struct { + unsigned int cookie_lo; /* 8-11: */ + unsigned int cookie_hi; /* 12-15: */ + }; + void *cookie; + }; + /* ===== data ==== [16-64] */ + union { + char data[0]; + + struct version { + unsigned int major; + unsigned int sub; + unsigned int modify; + } version; + + struct { + unsigned int value[4]; + } r_reg; + + struct { + unsigned int new_value; + } modify_reg; + + struct get_temp { + int temp; + int volatage; + } get_temp; + + struct lldp_stat { + int enable_stat; + } lldp; + + struct { +#define MBX_SFP_READ_MAX_CNT 32 + char value[MBX_SFP_READ_MAX_CNT]; + } sfp_read; + + struct mac_addr { + int lanes; + struct _addr { + /* for macaddr:01:02:03:04:05:06 + * mac-hi=0x01020304 mac-lo=0x05060000 + */ + unsigned char mac[8]; + } addrs[4]; + u32 pcode; + } mac_addr; + + struct get_dump_reply { + int flags; + int version; + int bytes; + int data[4]; + } get_dump; + + struct lane_stat_data lanestat; + struct link_stat_data linkstat; + struct phy_abilities phy_abilities; + struct phy_statistics phy_statistics; + }; +} _PACKED_ALIGN4; + +static inline void build_lldp_ctrl_set(struct mbx_fw_cmd_req *req, int nr_lane, + int enable) +{ + req->flags = 0; + req->opcode = LLDP_TX_CTL; + req->datalen = sizeof(req->lldp_tx); + req->cookie = NULL; + req->reply_lo = 0; + req->reply_hi = 0; + req->lldp_tx.op = LLDP_TX_SET; + req->lldp_tx.nr_lane = nr_lane; + req->lldp_tx.enable = enable; +} + +static inline void build_lldp_ctrl_get(struct mbx_fw_cmd_req *req, int nr_lane, + void *cookie) +{ + req->flags = 0; + req->opcode = LLDP_TX_CTL; + req->datalen = sizeof(req->lldp_tx); + req->cookie = cookie; + req->reply_lo = 0; + req->reply_hi = 0; + req->lldp_tx.op = LLDP_TX_GET; + req->lldp_tx.nr_lane = nr_lane; +} + +static inline void build_maintain_req(struct mbx_fw_cmd_req *req, void *cookie, + int cmd, int arg0, int req_bytes, + int reply_bytes, u32 dma_phy_lo, + u32 dma_phy_hi) +{ + req->flags = 0; + req->opcode = FW_MAINTAIN; + req->datalen = sizeof(req->maintain); + req->cookie = cookie; + req->reply_lo = 0; + req->reply_hi = 0; + req->maintain.cmd = cmd; + req->maintain.arg0 = arg0; + req->maintain.req_bytes = req_bytes; + req->maintain.reply_bytes = reply_bytes; + req->maintain.ddr_lo = dma_phy_lo; + req->maintain.ddr_hi = dma_phy_hi; +} + +static inline void build_fw_update_req(struct mbx_fw_cmd_req *req, void *cookie, + int partition, u32 fw_bin_phy_lo, + u32 fw_bin_phy_hi, int fw_bytes) +{ + req->flags = 0; + req->opcode = FW_UPDATE; + req->datalen = sizeof(req->fw_update); + req->cookie = cookie; + req->reply_lo = 0; + req->reply_hi = 0; + req->fw_update.paration = partition; + req->fw_update.bytes = fw_bytes; + req->fw_update.bin_phy_lo = fw_bin_phy_lo; + req->fw_update.bin_phy_hi = fw_bin_phy_hi; +} + +static inline void build_reset_phy_req(struct mbx_fw_cmd_req *req, void *cookie) +{ + req->flags = 0; + req->opcode = RESET_PHY; + req->datalen = 0; + req->reply_lo = 0; + req->reply_hi = 0; + req->cookie = cookie; +} + +static inline void build_phy_abalities_req(struct mbx_fw_cmd_req *req, + void *cookie) +{ + req->flags = 0; + req->opcode = GET_PHY_ABALITY; + req->datalen = 0; + req->reply_lo = 0; + req->reply_hi = 0; + req->cookie = cookie; +} + +static inline void build_get_macaddress_req(struct mbx_fw_cmd_req *req, + int lane_mask, int pfvfnum, + void *cookie) +{ + req->flags = 0; + req->opcode = GET_MAC_ADDRESS; + req->datalen = sizeof(req->get_mac_addr); + req->cookie = cookie; + req->reply_lo = 0; + req->reply_hi = 0; + req->get_mac_addr.lane_mask = lane_mask; + req->get_mac_addr.pfvf_num = pfvfnum; +} + +static inline void build_version_req(struct mbx_fw_cmd_req *req, void *cookie) +{ + req->flags = 0; + req->opcode = GET_VERSION; + req->reply_lo = 0; + req->reply_hi = 0; + req->datalen = 0; + req->cookie = cookie; +} + +/* 7.10.11.8 Read egister admin command */ +static inline void build_readreg_req(struct mbx_fw_cmd_req *req, int reg_addr, + void *cookie) +{ + req->flags = 0; + req->opcode = READ_REG; + req->datalen = sizeof(req->r_reg); + req->cookie = cookie; + req->reply_lo = 0; + req->reply_hi = 0; + req->r_reg.addr = reg_addr & ~(3); + req->r_reg.bytes = 4; +} + +static inline void mbx_fw_req_set_reply(struct mbx_fw_cmd_req *req, + dma_addr_t reply) +{ + u64 address = reply; + + req->reply_hi = (address >> 32); + req->reply_lo = (address) & 0xffffffff; +} + +/* 7.10.11.9 Write egister admin command */ +static inline void build_writereg_req(struct mbx_fw_cmd_req *req, void *cookie, + int reg_addr, int bytes, int value[4]) +{ + int i; + + req->flags = 0; + req->opcode = WRITE_REG; + req->datalen = sizeof(req->w_reg); + req->cookie = cookie; + req->reply_lo = 0; + req->reply_hi = 0; + req->w_reg.addr = reg_addr & ~3; + req->w_reg.bytes = bytes; + for (i = 0; i < bytes / 4; i++) + req->w_reg.data[i] = value[i]; +} + +/* 7.10.11.10 modify egister admin command */ +static inline void build_modifyreg_req(struct mbx_fw_cmd_req *req, void *cookie, + int reg_addr, int value, + unsigned int mask) +{ + req->flags = 0; + req->opcode = MODIFY_REG; + req->datalen = sizeof(req->modify_reg); + req->cookie = cookie; + req->reply_lo = 0; + req->reply_hi = 0; + req->modify_reg.addr = reg_addr; + req->modify_reg.data = value; + req->modify_reg.mask = mask; +} + +static inline void build_get_lane_status_req(struct mbx_fw_cmd_req *req, + int nr_lane, void *cookie) +{ + req->flags = 0; + req->opcode = GET_LANE_STATUS; + req->datalen = sizeof(req->get_lane_st); + req->cookie = cookie; + req->reply_lo = 0; + req->reply_hi = 0; + req->get_lane_st.nr_lane = nr_lane; +} + +static inline void build_get_link_status_req(struct mbx_fw_cmd_req *req, + int nr_lane, void *cookie) +{ + req->flags = 0; + req->opcode = GET_LINK_STATUS; + req->datalen = sizeof(req->get_linkstat); + req->cookie = cookie; + req->reply_lo = 0; + req->reply_hi = 0; + req->get_linkstat.nr_lane = nr_lane; +} + +static inline void build_get_temp(struct mbx_fw_cmd_req *req, void *cookie) +{ + req->flags = 0; + req->opcode = GET_TEMP; + req->datalen = 0; + req->cookie = cookie; + req->reply_lo = 0; + req->reply_hi = 0; +} +static inline void build_get_dump_req(struct mbx_fw_cmd_req *req, void *cookie, + int nr_lane, u32 fw_bin_phy_lo, + u32 fw_bin_phy_hi, int bytes) +{ + req->flags = 0; + req->opcode = GET_DUMP; + req->datalen = sizeof(req->get_dump); + req->cookie = cookie; + req->reply_lo = 0; + req->reply_hi = 0; + req->get_dump.bytes = bytes; + req->get_dump.nr_lane = nr_lane; + req->get_dump.bin_phy_lo = fw_bin_phy_lo; + req->get_dump.bin_phy_hi = fw_bin_phy_hi; +} + +static inline void build_set_dump(struct mbx_fw_cmd_req *req, int nr_lane, + int flag) +{ + req->flags = 0; + req->opcode = SET_DUMP; + req->datalen = sizeof(req->set_dump); + req->cookie = NULL; + req->reply_lo = 0; + req->reply_hi = 0; + req->set_dump.flag = flag; + req->set_dump.nr_lane = nr_lane; +} + +static inline void build_led_set(struct mbx_fw_cmd_req *req, + unsigned int nr_lane, int value, void *cookie) +{ + req->flags = 0; + req->opcode = LED_SET; + req->datalen = sizeof(req->led_set); + req->cookie = cookie; + req->reply_lo = 0; + req->reply_hi = 0; + req->led_set.nr_lane = nr_lane; + req->led_set.value = value; +} + +static inline void build_set_lane_fun(struct mbx_fw_cmd_req *req, int nr_lane, + int fun, int value0, int value1, + int value2, int value3) +{ + req->flags = 0; + req->opcode = SET_LANE_FUN; + req->datalen = sizeof(req->set_lane_fun); + req->cookie = NULL; + req->reply_lo = 0; + req->reply_hi = 0; + req->set_lane_fun.func = fun; + req->set_lane_fun.nr_lane = nr_lane; + req->set_lane_fun.value0 = value0; + req->set_lane_fun.value1 = value1; + req->set_lane_fun.value2 = value2; + req->set_lane_fun.value3 = value3; +} + +static inline void build_set_phy_reg(struct mbx_fw_cmd_req *req, void *cookie, + enum PHY_INTERFACE phy_inf, char nr_lane, + int reg, int w_data, int recall_qsfp_page) +{ + req->flags = 0; + req->opcode = SET_PHY_REG; + req->datalen = sizeof(req->set_phy_reg); + req->cookie = cookie; + req->reply_lo = 0; + req->reply_hi = 0; + + req->set_phy_reg.phy_interface = phy_inf; + req->set_phy_reg.nr_lane = nr_lane; + req->set_phy_reg.phy_reg_addr = reg; + req->set_phy_reg.phy_w_data = w_data; + + if (recall_qsfp_page) + req->set_phy_reg.recall_qsfp_page = 1; + else + req->set_phy_reg.recall_qsfp_page = 0; +} + +static inline void build_get_phy_reg(struct mbx_fw_cmd_req *req, void *cookie, + enum PHY_INTERFACE phy_inf, char nr_lane, + int reg) +{ + req->flags = 0; + req->opcode = GET_PHY_REG; + req->datalen = sizeof(req->get_phy_reg); + req->cookie = cookie; + req->reply_lo = 0; + req->reply_hi = 0; + + req->get_phy_reg.phy_interface = phy_inf; + + req->get_phy_reg.nr_lane = nr_lane; + req->get_phy_reg.phy_reg_addr = reg; +} + +static inline void build_phy_pause_set(struct mbx_fw_cmd_req *req, + int pause_mode, int nr_lane) +{ + req->flags = 0; + req->opcode = PHY_PAUSE_SET; + req->datalen = sizeof(req->phy_pause_set); + req->cookie = NULL; + req->reply_lo = 0; + req->reply_hi = 0; + req->phy_pause_set.nr_lane = nr_lane; + req->phy_pause_set.pause_mode = pause_mode; +} + +static inline void build_phy_link_set(struct mbx_fw_cmd_req *req, + unsigned int adv, int nr_lane, + unsigned int autoneg, unsigned int speed, + unsigned int duplex, + unsigned int tp_mdix_ctrl) +{ + req->flags = 0; + req->opcode = PHY_LINK_SET; + req->datalen = sizeof(req->phy_link_set); + req->cookie = NULL; + req->reply_lo = 0; + req->reply_hi = 0; + req->phy_link_set.nr_lane = nr_lane; + req->phy_link_set.adv_speed_mask = adv; + req->phy_link_set.autoneg = autoneg; + req->phy_link_set.speed = speed; + req->phy_link_set.duplex = duplex; + req->phy_link_set.tp_mdix_ctrl = tp_mdix_ctrl; +} + +static inline void build_ifup_down(struct mbx_fw_cmd_req *req, + unsigned int nr_lane, int up) +{ + req->flags = 0; + req->opcode = IFUP_DOWN; + req->datalen = sizeof(req->ifup); + req->cookie = NULL; + req->reply_lo = 0; + req->reply_hi = 0; + req->ifup.lane = nr_lane; + req->ifup.up = up; +} + +static inline void build_ifinsmod(struct mbx_fw_cmd_req *req, + unsigned int nr_lane, int status) +{ + req->flags = 0; + req->opcode = DRIVER_INSMOD; + req->datalen = sizeof(req->ifinsmod); + req->cookie = NULL; + req->reply_lo = 0; + req->reply_hi = 0; + req->ifinsmod.lane = nr_lane; + req->ifinsmod.status = status; +} + +static inline void build_ifsuspuse(struct mbx_fw_cmd_req *req, + unsigned int nr_lane, int status) +{ + req->flags = 0; + req->opcode = SYSTEM_SUSPUSE; + req->datalen = sizeof(req->ifsuspuse); + req->cookie = NULL; + req->reply_lo = 0; + req->reply_hi = 0; + req->ifinsmod.lane = nr_lane; + req->ifinsmod.status = status; +} + +static inline void build_ifforce(struct mbx_fw_cmd_req *req, + unsigned int nr_lane, int status) +{ + req->flags = 0; + req->opcode = FORCE_LINK_ON_CLOSE; + req->datalen = sizeof(req->ifforce); + req->cookie = NULL; + req->reply_lo = 0; + req->reply_hi = 0; + req->ifforce.nr_lane = nr_lane; + req->ifforce.status = status; +} + +static inline void build_mbx_sfp_read(struct mbx_fw_cmd_req *req, + unsigned int nr_lane, int sfp_addr, + int reg, int cnt, void *cookie) +{ + req->flags = 0; + req->opcode = SFP_MODULE_READ; + req->datalen = sizeof(req->sfp_read); + req->cookie = cookie; + req->reply_lo = 0; + req->reply_hi = 0; + req->sfp_read.nr_lane = nr_lane; + req->sfp_read.sfp_adr = sfp_addr; + req->sfp_read.reg = reg; + ; + req->sfp_read.cnt = cnt; +} + +static inline void build_mbx_sfp_write(struct mbx_fw_cmd_req *req, + unsigned int nr_lane, int sfp_addr, + int reg, int v) +{ + req->flags = 0; + req->opcode = SFP_MODULE_WRITE; + req->datalen = sizeof(req->sfp_write); + req->cookie = NULL; + req->reply_lo = 0; + req->reply_hi = 0; + req->sfp_write.nr_lane = nr_lane; + req->sfp_write.sfp_adr = sfp_addr; + req->sfp_write.reg = reg; + req->sfp_write.val = v; +} + +static inline void build_mbx_wol_set(struct mbx_fw_cmd_req *req, + unsigned int nr_lane, u32 mode) +{ + req->flags = 0; + req->opcode = SET_WOL; + req->datalen = sizeof(req->sfp_write); + req->cookie = NULL; + req->reply_lo = 0; + req->reply_hi = 0; + req->wol.lane = nr_lane; + req->wol.enable = mode; +} + +/* enum link_event_mask or */ +static inline void build_link_set_event_mask(struct mbx_fw_cmd_req *req, + unsigned short event_mask, + unsigned short enable, + void *cookie) +{ + req->flags = 0; + req->opcode = SET_EVENT_MASK; + req->datalen = sizeof(req->stat_event_mask); + req->cookie = cookie; + req->reply_lo = 0; + req->reply_hi = 0; + req->stat_event_mask.event_mask = event_mask; + req->stat_event_mask.enable_stat = enable; +} + +static inline void +build_link_set_loopback_req(struct mbx_fw_cmd_req *req, void *cookie, + enum LOOPBACK_LEVEL level, + enum LOOPBACK_FORCE_SPEED force_speed) +{ + req->flags = 0; + req->opcode = SET_LOOPBACK_MODE; + req->datalen = sizeof(req->loopback); + req->cookie = cookie; + req->reply_lo = 0; + req->reply_hi = 0; + + req->loopback.loopback_level = level; + req->loopback.loopback_type = LOOPBACK_TYPE_LOCAL; + if (force_speed != LOOPBACK_FORCE_SPEED_NONE) { + req->loopback.loopback_force_speed = force_speed; + req->loopback.loopback_force_speed_enable = 1; + } +} + +static inline void build_ddr_csl(struct mbx_fw_cmd_req *req, void *cookie, + bool enable, dma_addr_t dma_phy, int bytes) +{ + req->flags = 0; + req->opcode = SET_DDR_CSL; + req->datalen = sizeof(req->ddr_csl); + req->cookie = cookie; + req->reply_lo = 0; + req->reply_hi = 0; + + req->ddr_csl.enable = enable; + + if (enable) { + req->ddr_csl.bytes = bytes; + req->ddr_csl.ddr_phy_hi = (dma_phy >> 32); + req->ddr_csl.ddr_phy_lo = dma_phy & 0xffffffff; + } else { + req->ddr_csl.bytes = 0; + } +} + +/* =========== errcode======= */ +enum MBX_ERR { + MBX_OK = 0, + MBX_ERR_NO_PERM, + MBX_ERR_INVAL_OPCODE, + MBX_ERR_INVALID_PARAM, + MBX_ERR_INVALID_ADDR, + MBX_ERR_INVALID_LEN, + MBX_ERR_NODEV, + MBX_ERR_IO, +}; + +int rnp_fw_get_capability(struct rnp_hw *hw, struct phy_abilities *abil); +#endif diff --git a/drivers/net/ethernet/mucse/rnp/rnp_mpe.c b/drivers/net/ethernet/mucse/rnp/rnp_mpe.c new file mode 100644 index 000000000000..fc95b16e98eb --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_mpe.c @@ -0,0 +1,220 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#include +#include +#include + +#include "rnp_common.h" +#include "rnp_mbx.h" +#include "rnp_mpe.h" +#define MPE_FW_BIN "n10c/n10-mpe.bin" +#define MPE_FW_DATA "n10c/n10-mpe-data.bin" +#define MPE_RPU_BIN "n10c/n10-rpu.bin" + +extern unsigned int mpe_src_port; +extern unsigned int mpe_pkt_version; +#define CFG_RPU_OFFSET 0x100000 +/* 4010_0000 broadcast addr */ +#define START_MPE_REG 0x00198700 +/* 4019_8700 start all mpe */ + +/* RV_CORE_STATUS: 4000_6000 */ +#define RV_CORE0_WORING_REG 0x6000 +#define RPU_ID 0x6060 +/* read-only rpu id */ + +/* RPU_REG */ +#define RV_BROADCAST_START_REG (0x106000) +#define RPU_DMA_START_REG (0x110000) +#define RPU_ENDIAN_REG (0x110010) +#define N10_START_REG (0x106000) + +/* MPE0_ICCM: 4020_0000H */ +#define CFG_MPE_ICCM(nr) (0x200000 + (nr) * 0x80000) +#define CFG_MPE_DCCM(nr) (0x220000 + (nr) * 0x80000) + +#define RPU_CM3_BASE 0x40000000 +#define RPU_SDRAM_BASE (0x60000000) +#define SDRAM_DEFAULT_VAL (0x88481c00) + +#define iowrite32_arrary(rpubase, offset, array, size) \ + do { \ + int i; \ + for (i = 0; i < size; i++) { \ + rnp_wr_reg(((char *)(rpubase)) + (offset) + i * 4, \ + (array)[i]); \ + } \ + } while (0) + +static void rnp_reset_mpe_and_rpu(struct rnp_hw *hw) +{ +#define SYSCTL_CRG_CTRL12 0x30007030 +#define RPU_RESET_BIT 9 + + /* reset rpu/mpe/pub */ + cm3_reg_write32(hw, SYSCTL_CRG_CTRL12, BIT(RPU_RESET_BIT + 16) | 0); + smp_mb(); + mdelay(150); + cm3_reg_write32(hw, SYSCTL_CRG_CTRL12, + BIT(RPU_RESET_BIT + 16) | BIT(RPU_RESET_BIT)); + smp_mb(); + mdelay(100); +} + +static void rnp_start_rpu(char *rpu_base, int do_start) +{ + int mpe_start_v = 0xff, rpu_start_v = 0x1; + + if (do_start == 0) { + mpe_start_v = 0; + rpu_start_v = 0; + } + rnp_wr_reg(rpu_base + START_MPE_REG, mpe_start_v); + + /* start all rpu-rv-core */ + rnp_wr_reg(rpu_base + RV_BROADCAST_START_REG, rpu_start_v); + /* start rpu */ + rnp_wr_reg(rpu_base + RPU_DMA_START_REG, rpu_start_v); + + smp_mb(); +} + +/* + @rpu_base: mapped(0x4000_0000) + @mpe_bin : required + @mpe_data: optional + @rpu_bin : optional +*/ +static int +rnp_download_and_start_rpu(struct rnp_hw *hw, char *rpu_base, + const unsigned int *mpe_bin, const int mpe_bin_sz, + const unsigned int *mpe_data, const int mpe_data_sz, + const unsigned int *rpu_bin, const int rpu_sz) +{ + int nr = 0; + + rnp_info("MPE: rpu:%d mpe:%d mpe-data:%d. Downloading...\n", rpu_sz, + mpe_bin_sz, mpe_data_sz); + + rnp_reset_mpe_and_rpu(hw); + + /* download rpu firmeware */ + if (rpu_sz) { + iowrite32_arrary(rpu_base, CFG_RPU_OFFSET + 0x4000, rpu_bin, + rpu_sz / 4); + } + + /* download firmware to 4 mpe-core: mpe0,mpe1,mpe2,mpe3 */ + for (nr = 0; nr < 4; nr++) { + iowrite32_arrary(rpu_base, CFG_MPE_ICCM(nr), mpe_bin, + mpe_bin_sz / 4); + if (mpe_data_sz) + iowrite32_arrary(rpu_base, CFG_MPE_DCCM(nr), mpe_data, + mpe_data_sz / 4); + } + smp_mb(); + + /* Enable MPE */ + if (mpe_src_port != 0) { + printk("%s %d\n", __func__, __LINE__); + rnp_wr_reg(rpu_base + 0x100000, mpe_pkt_version); + rnp_wr_reg(rpu_base + 0x100004, mpe_src_port); + } + + /* start mpe */ + rnp_wr_reg(rpu_base + RPU_ENDIAN_REG, 0xf); + smp_mb(); + rnp_start_rpu(rpu_base, 1); + + return 0; +} + +/* + *load fw bin from: /lib/firmware/ directory + */ +static const struct firmware *rnp_load_fw(struct device *dev, + const char *fw_name) +{ + const struct firmware *fw; + int rc; + + rc = request_firmware(&fw, fw_name, dev); + if (rc != 0) { + // dev_warn( dev, "Faild to requesting firmware file: %s, %d\n", + // fw_name, rc); + return NULL; + } + + return fw; +} + +int rnp_rpu_mpe_start(struct rnp_adapter *adapter) +{ + const struct firmware *mpe_bin = NULL, *mpe_data = NULL, + *rpu_bin = NULL; + struct rnp_hw *hw = &adapter->hw; + int rpu_version, err = 0; + // u32 val = 0; + + rpu_version = cm3_reg_read32(hw, RPU_CM3_BASE + RPU_ID); + dev_info(&adapter->pdev->dev, "rpu_version:0x%x\n", rpu_version); + + if (rpu_version != 0x20201125) { + dev_info(&adapter->pdev->dev, "rpu not enabled!\n"); + return -1; + } + + dev_info(&adapter->pdev->dev, "rpu_addr=%p\n", hw->rpu_addr); + if (hw->rpu_addr == NULL) { + return -EINVAL; + } + + mpe_bin = rnp_load_fw(&adapter->pdev->dev, MPE_FW_BIN); + if (!mpe_bin) { + dev_warn(&adapter->pdev->dev, "can't load mpe fw:%s\n", + MPE_FW_BIN); + goto quit; + } + mpe_data = rnp_load_fw(&adapter->pdev->dev, MPE_FW_DATA); + if (!mpe_data) { + dev_warn(&adapter->pdev->dev, "no %s, ignored\n", MPE_FW_DATA); + } + rpu_bin = rnp_load_fw(&adapter->pdev->dev, MPE_RPU_BIN); + if (!rpu_bin) { + dev_warn(&adapter->pdev->dev, "no %s, ignored\n", MPE_RPU_BIN); + } + + err = rnp_download_and_start_rpu( + hw, hw->rpu_addr, (unsigned int *)mpe_bin->data, mpe_bin->size, + mpe_data ? (unsigned int *)mpe_data->data : NULL, + mpe_data ? mpe_data->size : 0, + rpu_bin ? (unsigned int *)rpu_bin->data : NULL, + rpu_bin ? rpu_bin->size : 0); + if (err != 0) { + dev_warn(&adapter->pdev->dev, "can't start mpe and rpu\n"); + goto quit; + } + + adapter->rpu_inited = 1; + +quit: + if (rpu_bin) { + release_firmware(rpu_bin); + } + if (mpe_data) + release_firmware(mpe_data); + if (mpe_bin) + release_firmware(mpe_bin); + return 0; +} + +void rnp_rpu_mpe_stop(struct rnp_adapter *adapter) +{ + if (adapter->rpu_inited) { + rnp_start_rpu(adapter->hw.rpu_addr, 0); + rnp_reset_mpe_and_rpu(&adapter->hw); + } + + adapter->rpu_inited = 0; +} diff --git a/drivers/net/ethernet/mucse/rnp/rnp_mpe.h b/drivers/net/ethernet/mucse/rnp/rnp_mpe.h new file mode 100644 index 000000000000..d36fcb2a1b01 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_mpe.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#ifndef RNP_MPE_H +#define RNP_MPE_H + +#include "rnp.h" + +int rnp_rpu_mpe_start(struct rnp_adapter *adapter); +void rnp_rpu_mpe_stop(struct rnp_adapter *adapter); + +#endif //RNP_MPE_H diff --git a/drivers/net/ethernet/mucse/rnp/rnp_n10.c b/drivers/net/ethernet/mucse/rnp/rnp_n10.c new file mode 100644 index 000000000000..1cc0ba9836f1 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_n10.c @@ -0,0 +1,4813 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#include +#include +#include + +#include "rnp.h" +#include "rnp_phy.h" +#include "rnp_mbx.h" +#include "rnp_mbx_fw.h" +#include "rnp_pcs.h" +#include "rnp_ethtool.h" +#include "rnp_sriov.h" + +#define RNP_N400_MAX_VF 8 +#define RNP_N400_RSS_TBL_NUM 128 +#define RNP_N400_RSS_TC_TBL_NUM 8 +#define RNP_N400_MAX_TX_QUEUES 8 +#define RNP_N400_MAX_RX_QUEUES 8 +#define RNP_N400_RAR_NCSI_RAR_ENTRIES 0 +#define RNP_N10_MAX_VF 64 +#define RNP_N10_RSS_TBL_NUM 128 +#define RNP_N10_RSS_TC_TBL_NUM 8 +#define RNP_N10_MAX_TX_QUEUES 128 +#define RNP_N10_MAX_RX_QUEUES 128 +#define RNP_N10_RAR_NCSI_RAR_ENTRIES 0 + +#if defined(NIC_VF_FXIED) || defined(VF_PROMISC_SUPPORT) +/* we use the last dmac to support vf promisc */ +#define RNP_N10_RAR_ENTRIES (127 - RNP_N10_RAR_NCSI_RAR_ENTRIES) +#else +#define RNP_N10_RAR_ENTRIES (128 - RNP_N10_RAR_NCSI_RAR_ENTRIES) +#endif + + +#define RNP_N10_MC_TBL_SIZE 128 +#define RNP_N10_VFT_TBL_SIZE 128 +#define RNP_N10_RX_PB_SIZE 512 +#ifndef RNP_N10_MSIX_VECTORS +#define RNP_N10_MSIX_VECTORS 64 +#endif +#define RNP_N400_MSIX_VECTORS 17 + +#define RNP10_MAX_LAYER2_FILTERS 16 +#define RNP10_MAX_TCAM_FILTERS 4096 +#define RNP10_MAX_TUPLE5_FILTERS 128 + + +/* setup queue speed limit to max_rate */ +static void rnp_dma_set_tx_maxrate_n10(struct rnp_dma_info *dma, u16 queue, + u32 max_rate) +{ +} + +/* setup mac with vf_num to veb table */ +static void rnp_dma_set_veb_mac_n10(struct rnp_dma_info *dma, u8 *mac, + u32 vfnum, u32 ring) +{ + u32 maclow, machi, ring_vfnum; + int port; + + maclow = (mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5]; + machi = (mac[0] << 8) | mac[1]; + ring_vfnum = ring | ((0x80 | vfnum) << 8); + for (port = 0; port < 4; port++) { + dma_wr32(dma, RNP10_DMA_PORT_VBE_MAC_LO_TBL(port, vfnum), + maclow); + dma_wr32(dma, RNP10_DMA_PORT_VBE_MAC_HI_TBL(port, vfnum), + machi); + dma_wr32(dma, RNP10_DMA_PORT_VEB_VF_RING_TBL(port, vfnum), + ring_vfnum); + } +} + +/* setup vlan with vf_num to veb table */ +static void rnp_dma_set_veb_vlan_n10(struct rnp_dma_info *dma, u16 vlan, + u32 vfnum) +{ + int port; + + /* each vf can support only one vlan */ + for (port = 0; port < 4; port++) + dma_wr32(dma, RNP10_DMA_PORT_VEB_VID_TBL(port, vfnum), vlan); +} + +static void rnp_dma_clr_veb_all_n10(struct rnp_dma_info *dma) +{ + int port, i; + + for (port = 0; port < 4; port++) { + for (i = 0; i < VEB_TBL_CNTS; i++) { + dma_wr32(dma, RNP_DMA_PORT_VBE_MAC_LO_TBL(port, i), 0); + dma_wr32(dma, RNP_DMA_PORT_VBE_MAC_HI_TBL(port, i), 0); + dma_wr32(dma, RNP_DMA_PORT_VEB_VID_TBL(port, i), 0); + dma_wr32(dma, RNP_DMA_PORT_VEB_VF_RING_TBL(port, i), 0); + } + } +} + +static struct rnp_dma_operations dma_ops_n10 = { + .set_tx_maxrate = &rnp_dma_set_tx_maxrate_n10, + .set_veb_mac = &rnp_dma_set_veb_mac_n10, + .set_veb_vlan = &rnp_dma_set_veb_vlan_n10, + .clr_veb_all = &rnp_dma_clr_veb_all_n10, + +}; + +/** + * rnp_eth_set_rar_n10 - Set Rx address register + * @eth: pointer to eth structure + * @index: Receive address register to write + * @addr: Address to put into receive address register + * @vmdq: VMDq "set" or "pool" index + * @enable_addr: set flag that address is active + * @sriov_flag + * + * Puts an ethernet address into a receive address register. + **/ +static s32 rnp_eth_set_rar_n10(struct rnp_eth_info *eth, u32 index, u8 *addr, + bool enable_addr) +{ + u32 mcstctrl; + u32 rar_low, rar_high = 0; + u32 rar_entries = eth->num_rar_entries; + struct rnp_hw *hw = (struct rnp_hw *)eth->back; + + /* Make sure we are using a valid rar index range */ + if (index >= (rar_entries + hw->ncsi_rar_entries)) { + rnp_err("RAR index %d is out of range.\n", index); + return RNP_ERR_INVALID_ARGUMENT; + } + + eth_dbg(eth, " RAR[%d] <= %pM. vmdq:%d enable:0x%x\n", index, addr); + + /* + * HW expects these in big endian so we reverse the byte + * order from network order (big endian) to little endian + */ + rar_low = ((u32)addr[5] | ((u32)addr[4] << 8) | ((u32)addr[3] << 16) | + ((u32)addr[2] << 24)); + /* + * Some parts put the VMDq setting in the extra RAH bits, + * so save everything except the lower 16 bits that hold part + * of the address and the address valid bit. + */ + rar_high = eth_rd32(eth, RNP10_ETH_RAR_RH(index)); + rar_high &= ~(0x0000FFFF | RNP10_RAH_AV); + rar_high |= ((u32)addr[1] | ((u32)addr[0] << 8)); + + if (enable_addr) + rar_high |= RNP10_RAH_AV; + + eth_wr32(eth, RNP10_ETH_RAR_RL(index), rar_low); + eth_wr32(eth, RNP10_ETH_RAR_RH(index), rar_high); + + /* open unicast filter */ + /* we now not use unicast */ + /* but we must open this since dest-mac filter | unicast table */ + /* all packets up if close unicast table */ + mcstctrl = eth_rd32(eth, RNP10_ETH_DMAC_MCSTCTRL); + mcstctrl |= RNP10_MCSTCTRL_UNICASE_TBL_EN; + eth_wr32(eth, RNP10_ETH_DMAC_MCSTCTRL, mcstctrl); + + return 0; +} + +/** + * rnp_eth_clear_rar_n10 - Remove Rx address register + * @eth: pointer to eth structure + * @index: Receive address register to write + * + * Clears an ethernet address from a receive address register. + **/ +static s32 rnp_eth_clear_rar_n10(struct rnp_eth_info *eth, u32 index) +{ + u32 rar_high; + u32 rar_entries = eth->num_rar_entries; + + /* Make sure we are using a valid rar index range */ + if (index >= rar_entries) { + eth_dbg(eth, "RAR index %d is out of range.\n", index); + return RNP_ERR_INVALID_ARGUMENT; + } + + /* + * Some parts put the VMDq setting in the extra RAH bits, + * so save everything except the lower 16 bits that hold part + * of the address and the address valid bit. + */ + rar_high = eth_rd32(eth, RNP10_ETH_RAR_RH(index)); + rar_high &= ~(0x0000FFFF | RNP10_RAH_AV); + + eth_wr32(eth, RNP10_ETH_RAR_RL(index), 0); + eth_wr32(eth, RNP10_ETH_RAR_RH(index), rar_high); + /* clear VMDq pool/queue selection for this RAR */ + eth->ops.clear_vmdq(eth, index, RNP_CLEAR_VMDQ_ALL); + + return 0; +} + +/** + * rnp_eth_set_vmdq_n10 - Associate a VMDq pool index with a rx address + * @eth: pointer to eth struct + * @rar: receive address register index to associate with a VMDq index + * @vmdq: VMDq pool index + * only mac->vf + **/ +static s32 rnp_eth_set_vmdq_n10(struct rnp_eth_info *eth, u32 rar, u32 vmdq) +{ + u32 rar_entries = eth->num_rar_entries; + struct rnp_hw *hw = (struct rnp_hw *)ð->back; + + /* Make sure we are using a valid rar index range */ + if (rar >= rar_entries) { + eth_dbg(eth, "RAR index %d is out of range.\n", rar); + return RNP_ERR_INVALID_ARGUMENT; + } + /* n400 should use like this + * ---------- + * vf0 | vf1 | vf2 + * n400 4 | 8 | 12 + * n10 2 | 4 | 6 + * n10(1)0 | 2 | 4 + * not good here + */ + if (hw->hw_type == rnp_hw_n400) + eth_wr32(eth, RNP10_VM_DMAC_MPSAR_RING(rar), vmdq * 2); + else + eth_wr32(eth, RNP10_VM_DMAC_MPSAR_RING(rar), vmdq); + + return 0; +} + +/** + * rnp_eth_clear_vmdq_n10 - Disassociate a VMDq pool index from a rx address + * @eth: pointer to eth struct + * @rar: receive address register index to disassociate + * @vmdq: VMDq pool index to remove from the rar + **/ +static s32 rnp_eth_clear_vmdq_n10(struct rnp_eth_info *eth, u32 rar, u32 vmdq) +{ + u32 rar_entries = eth->num_rar_entries; + + /* Make sure we are using a valid rar index range */ + if (rar >= rar_entries) { + eth_dbg(eth, "RAR index %d is out of range.\n", rar); + return RNP_ERR_INVALID_ARGUMENT; + } + + eth_wr32(eth, RNP10_VM_DMAC_MPSAR_RING(rar), 0); + + return 0; +} + +static s32 rnp10_mta_vector(struct rnp_eth_info *eth, u8 *mc_addr) +{ + u32 vector = 0; + + switch (eth->mc_filter_type) { + case 0: /* use bits [36:47] of the address */ + vector = ((mc_addr[4] << 8) | (((u16)mc_addr[5]))); + break; + case 1: /* use bits [35:46] of the address */ + vector = ((mc_addr[4] << 7) | (((u16)mc_addr[5]) >> 1)); + break; + case 2: /* use bits [34:45] of the address */ + vector = ((mc_addr[4] << 6) | (((u16)mc_addr[5]) >> 2)); + break; + case 3: /* use bits [32:43] of the address */ + vector = ((mc_addr[4] << 5) | (((u16)mc_addr[5]) >> 3)); + break; + default: /* Invalid mc_filter_type */ + hw_dbg(hw, "MC filter type param set incorrectly\n"); + break; + } + + /* vector can only be 12-bits or boundary will be exceeded */ + vector &= 0xFFF; + return vector; +} + +static void rnp10_set_mta(struct rnp_hw *hw, u8 *mc_addr) +{ + u32 vector; + u32 vector_bit; + u32 vector_reg; + struct rnp_eth_info *eth = &hw->eth; + + hw->addr_ctrl.mta_in_use++; + + vector = rnp10_mta_vector(eth, mc_addr); + + /* + * The MTA is a register array of 128 32-bit registers. It is treated + * like an array of 4096 bits. We want to set bit + * BitArray[vector_value]. So we figure out what register the bit is + * in, read it, OR in the new bit, then write back the new value. The + * register is determined by the upper 7 bits of the vector value and + * the bit within that register are determined by the lower 5 bits of + * the value. + */ + vector_reg = (vector >> 5) & 0x7F; + vector_bit = vector & 0x1F; + hw_dbg(hw, "\t\t%pM: MTA-BIT:%4d, MTA_REG[%d][%d] <= 1\n", mc_addr, + vector, vector_reg, vector_bit); + eth->mta_shadow[vector_reg] |= (1 << vector_bit); +} + +static void rnp10_set_vf_mta(struct rnp_hw *hw, u16 vector) +{ + u32 vector_bit; + u32 vector_reg; + struct rnp_eth_info *eth = &hw->eth; + + hw->addr_ctrl.mta_in_use++; + + vector_reg = (vector >> 5) & 0x7F; + vector_bit = vector & 0x1F; + hw_dbg(hw, "\t\t vf M: MTA-BIT:%4d, MTA_REG[%d][%d] <= 1\n", vector, + vector_reg, vector_bit); + eth->mta_shadow[vector_reg] |= (1 << vector_bit); +} + +static u8 *rnp_addr_list_itr(struct rnp_hw __maybe_unused *hw, u8 **mc_addr_ptr) +{ + struct netdev_hw_addr *mc_ptr; + u8 *addr = *mc_addr_ptr; + + mc_ptr = container_of(addr, struct netdev_hw_addr, addr[0]); + if (mc_ptr->list.next) { + struct netdev_hw_addr *ha; + + ha = list_entry(mc_ptr->list.next, struct netdev_hw_addr, list); + *mc_addr_ptr = ha->addr; + } else + *mc_addr_ptr = NULL; + + return addr; +} + +/** + * rnp_update_mc_addr_list_n10 - Updates MAC list of multicast addresses + * @hw: pointer to hardware structure + * @netdev: pointer to net device structure + * + * The given list replaces any existing list. Clears the MC addrs from receive + * address registers and the multicast table. Uses unused receive address + * registers for the first multicast addresses, and hashes the rest into the + * multicast table. + **/ +static s32 rnp_eth_update_mc_addr_list_n10(struct rnp_eth_info *eth, + struct net_device *netdev, + bool sriov_on) +{ + struct rnp_hw *hw = (struct rnp_hw *)eth->back; + struct netdev_hw_addr *ha; + u32 i; + u32 v; + int addr_count = 0; + u8 *addr_list = NULL; + + /* + * Set the new number of MC addresses that we are being requested to + * use. + */ + hw->addr_ctrl.num_mc_addrs = netdev_mc_count(netdev); + hw->addr_ctrl.mta_in_use = 0; + + /* Clear mta_shadow */ + eth_dbg(eth, " Clearing MTA(multicast table)\n"); + + memset(ð->mta_shadow, 0, sizeof(eth->mta_shadow)); + + /* Update mta shadow */ + eth_dbg(eth, " Updating MTA..\n"); + + addr_count = netdev_mc_count(netdev); + + ha = list_first_entry(&netdev->mc.list, struct netdev_hw_addr, list); + addr_list = ha->addr; + for (i = 0; i < addr_count; i++) { + eth_dbg(eth, " Adding the multicast addresses:\n"); + rnp10_set_mta(hw, rnp_addr_list_itr(hw, &addr_list)); + } + + if (hw->ncsi_en) { + eth->ops.ncsi_set_mc_mta(eth); + } + + if (sriov_on) { + struct rnp_adapter *adapter = (struct rnp_adapter *)hw->back; + if (!test_and_set_bit(__RNP_USE_VFINFI, &adapter->state)) { + for (i = 0; i < adapter->num_vfs; i++) { + if (adapter->vfinfo) { + struct vf_data_storage *vfinfo = + &adapter->vfinfo[i]; + int j; + + for (j = 0; + j < vfinfo->num_vf_mc_hashes; j++) + rnp10_set_vf_mta( + hw, + vfinfo->vf_mc_hashes[j]); + } + } + clear_bit(__RNP_USE_VFINFI, &adapter->state); + } + } + + /* Enable mta */ + for (i = 0; i < hw->eth.mcft_size; i++) { + if (hw->addr_ctrl.mta_in_use) + eth_wr32(eth, RNP10_ETH_MULTICAST_HASH_TABLE(i), + eth->mta_shadow[i]); + } + + if (hw->addr_ctrl.mta_in_use > 0) { + struct rnp_adapter *adapter = (struct rnp_adapter *)hw->back; + if (!(adapter->flags & RNP_FLAG_SWITCH_LOOPBACK_EN)) { + v = eth_rd32(eth, RNP10_ETH_DMAC_MCSTCTRL); + eth_wr32(eth, RNP10_ETH_DMAC_MCSTCTRL, + v | RNP10_MCSTCTRL_MULTICASE_TBL_EN | + eth->mc_filter_type); + } + } + + eth_dbg(eth, " update MTA Done. mta_in_use:%d\n", + hw->addr_ctrl.mta_in_use); + return hw->addr_ctrl.mta_in_use; +} + +/* clean all mc addr */ +static void rnp_eth_clr_mc_addr_n10(struct rnp_eth_info *eth) +{ + int i; + + for (i = 0; i < eth->mcft_size; i++) + eth_wr32(eth, RNP10_ETH_MULTICAST_HASH_TABLE(i), 0); +} + +/** + * rnp_eth_update_rss_key_n10 - Remove Rx address register + * @eth: pointer to eth structure + * @sriov_flag sriov status + * + * update rss key to eth regs + **/ +static void rnp_eth_update_rss_key_n10(struct rnp_eth_info *eth, bool sriov_flag) +{ + struct rnp_hw *hw = (struct rnp_hw *)eth->back; + int i; + u8 *key_temp; + int key_len = RNP_RSS_KEY_SIZE; + u8 *key = hw->rss_key; + u32 *value; + + u32 iov_en = (sriov_flag) ? RNP10_IOV_ENABLED : 0; + + key_temp = kmalloc(key_len, GFP_KERNEL); + /* reoder the key */ + for (i = 0; i < key_len; i++) + *(key_temp + key_len - i - 1) = *(key + i); + + value = (u32 *)key_temp; + for (i = 0; i < key_len; i = i + 4) + eth_wr32(eth, RNP10_ETH_RSS_KEY + i, *(value + i / 4)); + + kfree(key_temp); + + /* open rss now */ + eth_wr32(eth, RNP10_ETH_RSS_CONTROL, + RNP10_ETH_ENABLE_RSS_ONLY | iov_en); +} + +/** + * rnp_eth_update_rss_table_n10 - Remove Rx address register + * @eth: pointer to eth structure + * + * update rss table to eth regs + **/ +static void rnp_eth_update_rss_table_n10(struct rnp_eth_info *eth) +{ + struct rnp_hw *hw = (struct rnp_hw *)eth->back; + u32 reta_entries = hw->rss_indir_tbl_num; + u32 tc_entries = hw->rss_tc_tbl_num; + int i; + + for (i = 0; i < tc_entries; i++) + eth_wr32(eth, RNP10_ETH_TC_IPH_OFFSET_TABLE(i), + hw->rss_tc_tbl[i]); + + for (i = 0; i < reta_entries; i++) + eth_wr32(eth, RNP10_ETH_RSS_INDIR_TBL(i), hw->rss_indir_tbl[i]); +} + +/** + * rnp_eth_set_vfta_n10 - Set VLAN filter table + * @eth: pointer to eth structure + * @vlan: VLAN id to write to VLAN filter + * @vlan_on: boolean flag to turn on/off VLAN in VFVF + * + * Turn on/off specified VLAN in the VLAN filter table. + **/ +static s32 rnp_eth_set_vfta_n10(struct rnp_eth_info *eth, u32 vlan, bool vlan_on) +{ + s32 regindex; + u32 bitindex; + u32 vfta; + u32 targetbit; + bool vfta_changed = false; + + /* todo in vf mode vlvf regester can be set according to vind*/ + if (vlan > 4095) + return RNP_ERR_PARAM; + + regindex = (vlan >> 5) & 0x7F; + bitindex = vlan & 0x1F; + targetbit = (1 << bitindex); + vfta = eth_rd32(eth, RNP10_VFTA(regindex)); + + if (vlan_on) { + if (!(vfta & targetbit)) { + vfta |= targetbit; + vfta_changed = true; + } + } else { + if ((vfta & targetbit)) { + vfta &= ~targetbit; + vfta_changed = true; + } + } + + if (vfta_changed) + eth_wr32(eth, RNP10_VFTA(regindex), vfta); + return 0; +} + +static void rnp_eth_clr_vfta_n10(struct rnp_eth_info *eth) +{ + u32 offset; + + for (offset = 0; offset < eth->vft_size; offset++) + eth_wr32(eth, RNP10_VFTA(offset), 0); +} + +/** + * rnp_eth_set_vlan_filter_n10 - Set VLAN filter table + * @eth: pointer to eth structure + * @status: on |off + * Turn on/off VLAN filter table. + **/ +static void rnp_eth_set_vlan_filter_n10(struct rnp_eth_info *eth, bool status) +{ +#define ETH_VLAN_FILTER_BIT (30) + u32 value = eth_rd32(eth, RNP10_ETH_VLAN_FILTER_ENABLE); + + /* clear bit first */ + value &= (~(0x01 << ETH_VLAN_FILTER_BIT)); + if (status) + value |= (0x01 << ETH_VLAN_FILTER_BIT); + eth_wr32(eth, RNP10_ETH_VLAN_FILTER_ENABLE, value); +} + +static u16 rnp_layer2_pritologic_n10(u16 hw_id) +{ + return hw_id; +} + +static void rnp_eth_set_layer2_n10(struct rnp_eth_info *eth, + union rnp_atr_input *input, u16 pri_id, u8 queue, + bool prio_flag) +{ + u16 hw_id; + + hw_id = rnp_layer2_pritologic_n10(pri_id); + /* enable layer2 */ + eth_wr32(eth, RNP10_ETH_LAYER2_ETQF(hw_id), + (0x1 << 31) | (ntohs(input->layer2_formate.proto))); + + /* setup action */ + if (queue == RNP_FDIR_DROP_QUEUE) { + eth_wr32(eth, RNP10_ETH_LAYER2_ETQS(hw_id), (0x1 << 31)); + } else { + if (queue == ACTION_TO_MPE) + eth_wr32(eth, RNP10_ETH_LAYER2_ETQS(hw_id), + (0x1 << 29) | (MPE_PORT << 16)); + else + /* setup ring_number */ + eth_wr32(eth, RNP10_ETH_LAYER2_ETQS(hw_id), + (0x1 << 30) | (queue << 20)); + } +} + +static void rnp_eth_clr_layer2_n10(struct rnp_eth_info *eth, u16 pri_id) +{ + u16 hw_id; + + hw_id = rnp_layer2_pritologic_n10(pri_id); + eth_wr32(eth, RNP10_ETH_LAYER2_ETQF(hw_id), 0); +} + +static void rnp_eth_clr_all_layer2_n10(struct rnp_eth_info *eth) +{ + int i; +#define RNP10_MAX_LAYER2_FILTERS 16 + for (i = 0; i < RNP10_MAX_LAYER2_FILTERS; i++) + eth_wr32(eth, RNP10_ETH_LAYER2_ETQF(i), 0); +} + +static u16 rnp_tuple5_pritologic_n10(u16 hw_id) +{ + return hw_id; +} + +static u16 rnp_tuple5_pritologic_tcam_n10(u16 pri_id) +{ + int i; + int hw_id = 0; + int step = 32; + for (i = 0; i < pri_id; i++) { + hw_id += step; + if (hw_id > RNP10_MAX_TCAM_FILTERS) + hw_id = hw_id - RNP10_MAX_TCAM_FILTERS + 1; + } + + return hw_id; +} + +static void rnp_eth_set_tuple5_n10(struct rnp_eth_info *eth, + union rnp_atr_input *input, u16 pri_id, u8 queue, + bool prio_flag) +{ + struct rnp_hw *hw = (struct rnp_hw *)eth->back; + +#define RNP10_SRC_IP_MASK BIT(0) +#define RNP10_DST_IP_MASK BIT(1) +#define RNP10_SRC_PORT_MASK BIT(2) +#define RNP10_DST_PORT_MASK BIT(3) +#define RNP10_L4_PROTO_MASK BIT(4) + + if (hw->fdir_mode != fdir_mode_tcam) { + u32 port = 0; + u8 mask_temp = 0; + u8 l4_proto_type = 0; + u16 hw_id; + + hw_id = rnp_tuple5_pritologic_n10(pri_id); + dbg("try to eable tuple 5 %x\n", hw_id); + if (input->formatted.src_ip[0] != 0) { + eth_wr32(eth, RNP10_ETH_TUPLE5_SAQF(hw_id), + htonl(input->formatted.src_ip[0])); + } else { + mask_temp |= RNP10_SRC_IP_MASK; + } + if (input->formatted.dst_ip[0] != 0) { + eth_wr32(eth, RNP10_ETH_TUPLE5_DAQF(hw_id), + htonl(input->formatted.dst_ip[0])); + } else + mask_temp |= RNP10_DST_IP_MASK; + if (input->formatted.src_port != 0) + port |= (htons(input->formatted.src_port)); + else + mask_temp |= RNP10_SRC_PORT_MASK; + if (input->formatted.dst_port != 0) + port |= (htons(input->formatted.dst_port) << 16); + else + mask_temp |= RNP10_DST_PORT_MASK; + + if (port != 0) + eth_wr32(eth, RNP10_ETH_TUPLE5_SDPQF(hw_id), port); + + switch (input->formatted.flow_type) { + case RNP_ATR_FLOW_TYPE_TCPV4: + l4_proto_type = IPPROTO_TCP; + break; + case RNP_ATR_FLOW_TYPE_UDPV4: + l4_proto_type = IPPROTO_UDP; + break; + case RNP_ATR_FLOW_TYPE_SCTPV4: + l4_proto_type = IPPROTO_SCTP; + break; + case RNP_ATR_FLOW_TYPE_IPV4: + l4_proto_type = input->formatted.inner_mac[0]; + break; + default: + l4_proto_type = 0; + } + + if (l4_proto_type == 0) + mask_temp |= RNP10_L4_PROTO_MASK; + + /* setup ftqf*/ + /* always set 0x3 */ + eth_wr32(eth, RNP10_ETH_TUPLE5_FTQF(hw_id), + (1 << 31) | (mask_temp << 25) | (l4_proto_type << 16) | + 0x3); + + /* setup action */ + if (queue == RNP_FDIR_DROP_QUEUE) { + eth_wr32(eth, RNP10_ETH_TUPLE5_POLICY(hw_id), + (0x1 << 31)); + } else { + if (queue == ACTION_TO_MPE) + eth_wr32(eth, RNP10_ETH_TUPLE5_POLICY(hw_id), + (0x1 << 29) | (MPE_PORT << 16)); + else + eth_wr32(eth, RNP10_ETH_TUPLE5_POLICY(hw_id), + ((0x1 << 30) | (queue << 20))); + } + + } else { + u32 port = 0; + u32 port_mask = 0; + u8 l4_proto_type = 0; + u8 l4_proto_mask = 0xff; + u32 action = 0; + u32 mark = 0; + u16 hw_id; + + hw_id = rnp_tuple5_pritologic_tcam_n10(pri_id); + eth_wr32(eth, RNP10_TCAM_MODE, 2); + dbg("try to eable tcam %d\n", hw_id); + if (input->formatted.src_ip[0] != 0) { + eth_wr32(eth, RNP10_TCAM_SAQF(hw_id), + htonl(input->formatted.src_ip[0])); + eth_wr32(eth, RNP10_TCAM_SAQF_MASK(hw_id), + htonl(input->formatted.src_ip_mask[0])); + + dbg("tcam src ip 0%x ---> 0x%x\n", + htonl(input->formatted.src_ip[0]), + RNP10_TCAM_SAQF(hw_id)); + dbg("tcam src ip mask 0%x ---> 0x%x\n", + htonl(input->formatted.src_ip_mask[0]), + RNP10_TCAM_SAQF_MASK(hw_id)); + } else { + eth_wr32(eth, RNP10_TCAM_SAQF(hw_id), 0); + eth_wr32(eth, RNP10_TCAM_SAQF_MASK(hw_id), 0); + dbg("tcam src ip 0%x ---> 0x%x\n", 0, + RNP10_TCAM_SAQF(hw_id)); + dbg("tcam src ip mask 0%x ---> 0x%x\n", 0, + RNP10_TCAM_SAQF_MASK(hw_id)); + } + if (input->formatted.dst_ip[0] != 0) { + eth_wr32(eth, RNP10_TCAM_DAQF(hw_id), + htonl(input->formatted.dst_ip[0])); + eth_wr32(eth, RNP10_TCAM_DAQF_MASK(hw_id), + htonl(input->formatted.dst_ip_mask[0])); + dbg("tcam dst ip 0%x ---> 0x%x\n", + htonl(input->formatted.dst_ip[0]), + RNP10_TCAM_DAQF(hw_id)); + dbg("tcam dst ip mask 0%x ---> 0x%x\n", + htonl(input->formatted.dst_ip_mask[0]), + RNP10_TCAM_DAQF_MASK(hw_id)); + } else { + eth_wr32(eth, RNP10_TCAM_DAQF(hw_id), 0); + eth_wr32(eth, RNP10_TCAM_DAQF_MASK(hw_id), 0); + dbg("tcam dst ip 0%x ---> 0x%x\n", 0, + RNP10_TCAM_DAQF(hw_id)); + dbg("tcam dst ip mask 0%x ---> 0x%x\n", 0, + RNP10_TCAM_DAQF_MASK(hw_id)); + } + if (input->formatted.src_port != 0) { + port |= (htons(input->formatted.src_port) << 16); + port_mask |= + (htons(input->formatted.src_port_mask) << 16); + + } + if (input->formatted.dst_port != 0) { + port |= (htons(input->formatted.dst_port)); + port_mask |= (htons(input->formatted.dst_port_mask)); + } + + /* setup src & dst port */ + if (port != 0) { + eth_wr32(eth, RNP10_TCAM_SDPQF(hw_id), port); + eth_wr32(eth, RNP10_TCAM_SDPQF_MASK(hw_id), port_mask); + + dbg("tcam port 0%x ---> 0x%x\n", port, + RNP10_TCAM_SDPQF(hw_id)); + dbg("tcam port mask 0%x ---> 0x%x\n", port_mask, + RNP10_TCAM_SDPQF_MASK(hw_id)); + } else { + eth_wr32(eth, RNP10_TCAM_SDPQF(hw_id), 0); + eth_wr32(eth, RNP10_TCAM_SDPQF_MASK(hw_id), 0); + dbg("tcam port 0%x ---> 0x%x\n", port, + RNP10_TCAM_SDPQF(hw_id)); + dbg("tcam port mask 0%x ---> 0x%x\n", port_mask, + RNP10_TCAM_SDPQF_MASK(hw_id)); + } + + switch (input->formatted.flow_type) { + case RNP_ATR_FLOW_TYPE_TCPV4: + l4_proto_type = IPPROTO_TCP; + break; + case RNP_ATR_FLOW_TYPE_UDPV4: + l4_proto_type = IPPROTO_UDP; + break; + case RNP_ATR_FLOW_TYPE_SCTPV4: + l4_proto_type = IPPROTO_SCTP; + break; + case RNP_ATR_FLOW_TYPE_IPV4: + l4_proto_type = input->formatted.inner_mac[0]; + l4_proto_mask = input->formatted.inner_mac_mask[0]; + break; + default: + l4_proto_type = 0; + l4_proto_mask = 0; + } + + if (l4_proto_type != 0) { + action |= l4_proto_type; + mark |= l4_proto_mask; + } else { + } + + /* setup action */ + if (queue == RNP_FDIR_DROP_QUEUE) { + eth_wr32(eth, RNP10_TCAM_APQF(hw_id), + (0x1 << 31) | action); + eth_wr32(eth, RNP10_TCAM_APQF_MASK(hw_id), mark); + dbg("tcam action 0%x ---> 0x%x\n", (0x1 << 31) | action, + RNP10_TCAM_APQF(hw_id)); + dbg("tcam action mask 0%x ---> 0x%x\n", mark, + RNP10_TCAM_APQF_MASK(hw_id)); + } else { + if (queue == ACTION_TO_MPE) { + eth_wr32(eth, RNP10_TCAM_APQF(hw_id), + (0x1 << 29) | (MPE_PORT << 24) | + action); + } else { + eth_wr32(eth, RNP10_TCAM_APQF(hw_id), + ((0x1 << 30) | (queue << 16) | + action)); + } + eth_wr32(eth, RNP10_TCAM_APQF_MASK(hw_id), mark); + + dbg("tcam action 0%x ---> 0x%x\n", + (0x1 << 30) | (queue << 16) | action, + RNP10_TCAM_APQF(hw_id)); + dbg("tcam action mask 0%x ---> 0x%x\n", mark, + RNP10_TCAM_APQF_MASK(hw_id)); + } + eth_wr32(eth, RNP10_TCAM_MODE, 1); + } +} + +static void rnp_eth_clr_tuple5_n10(struct rnp_eth_info *eth, u16 pri_id) +{ + u16 hw_id; + struct rnp_hw *hw = (struct rnp_hw *)eth->back; + + if (hw->fdir_mode != fdir_mode_tcam) { + hw_id = rnp_tuple5_pritologic_n10(pri_id); + eth_wr32(eth, RNP10_ETH_TUPLE5_FTQF(hw_id), 0); + } else { + hw_id = rnp_tuple5_pritologic_tcam_n10(pri_id); + dbg("disable tcam tuple5 %d\n", hw_id); + /* earase tcam */ + eth_wr32(eth, RNP10_TCAM_MODE, 2); + eth_wr32(eth, RNP10_TCAM_SAQF(hw_id), 0); + eth_wr32(eth, RNP10_TCAM_SAQF_MASK(hw_id), 0); + eth_wr32(eth, RNP10_TCAM_DAQF(hw_id), 0); + eth_wr32(eth, RNP10_TCAM_DAQF_MASK(hw_id), 0); + eth_wr32(eth, RNP10_TCAM_SDPQF(hw_id), 0); + eth_wr32(eth, RNP10_TCAM_SDPQF_MASK(hw_id), 0); + eth_wr32(eth, RNP10_TCAM_APQF(hw_id), 0); + eth_wr32(eth, RNP10_TCAM_APQF_MASK(hw_id), 0); + eth_wr32(eth, RNP10_TCAM_MODE, 1); + } +} + +static void rnp_eth_clr_all_tuple5_n10(struct rnp_eth_info *eth) +{ + int i; + + struct rnp_hw *hw = (struct rnp_hw *)eth->back; + + if (hw->fdir_mode != fdir_mode_tcam) { + for (i = 0; i < RNP10_MAX_TUPLE5_FILTERS; i++) + eth_wr32(eth, RNP10_ETH_TUPLE5_FTQF(i), 0); + eth_wr32(eth, RNP10_ETH_TCAM_EN, 0); + } else { + /* todo earase tcm */ + eth_wr32(eth, RNP10_ETH_TCAM_EN, 1); + eth_wr32(eth, RNP10_TOP_ETH_TCAM_CONFIG_ENABLE, 1); + eth_wr32(eth, RNP10_TCAM_MODE, 2); + /* dont't open tcam cache */ + eth_wr32(eth, RNP10_TCAM_CACHE_ENABLE, 0); + + for (i = 0; i < RNP10_MAX_TCAM_FILTERS; i++) { + eth_wr32(eth, RNP10_TCAM_SDPQF(i), 0); + eth_wr32(eth, RNP10_TCAM_DAQF(i), 0); + eth_wr32(eth, RNP10_TCAM_SAQF(i), 0); + eth_wr32(eth, RNP10_TCAM_APQF(i), 0); + + eth_wr32(eth, RNP10_TCAM_SDPQF_MASK(i), 0); + eth_wr32(eth, RNP10_TCAM_DAQF_MASK(i), 0); + eth_wr32(eth, RNP10_TCAM_SAQF_MASK(i), 0); + eth_wr32(eth, RNP10_TCAM_APQF_MASK(i), 0); + } + eth_wr32(eth, RNP10_TCAM_MODE, 1); + } +} + +static void rnp_eth_set_tcp_sync_n10(struct rnp_eth_info *eth, + int queue, bool flag, + bool prio) +{ + if (flag) + eth_wr32(eth, RNP10_ETH_SYNQF, (0x1 << 30) | (queue << 20)); + else + eth_wr32(eth, RNP10_ETH_SYNQF, 0); +} + +static void rnp_eth_set_min_max_packets_n10(struct rnp_eth_info *eth, int min, + int max) +{ + eth_wr32(eth, RNP10_ETH_DEFAULT_RX_MIN_LEN, min); + eth_wr32(eth, RNP10_ETH_DEFAULT_RX_MAX_LEN, max); +} + +static void rnp_eth_set_vlan_strip_n10(struct rnp_eth_info *eth, u16 queue, + bool enable) +{ + u32 reg = RNP10_ETH_VLAN_VME_REG(queue / 32); + u32 offset = queue % 32; + u32 data = eth_rd32(eth, reg); + + if (enable == true) + data |= (1 << offset); + else + data &= ~(1 << offset); + + eth_wr32(eth, reg, data); +} + +static void rnp_eth_set_vxlan_port_n10(struct rnp_eth_info *eth, u32 port) +{ + eth_wr32(eth, RNP10_ETH_VXLAN_PORT, port); +} + +static void rnp_eth_set_vxlan_mode_n10(struct rnp_eth_info *eth, bool inner) +{ + if (inner) + eth_wr32(eth, RNP10_ETH_WRAP_FIELD_TYPE, 1); + else + eth_wr32(eth, RNP10_ETH_WRAP_FIELD_TYPE, 0); +} + +static void rnp_eth_set_rx_hash_n10(struct rnp_eth_info *eth, bool status, + bool sriov_flag) +{ + u32 iov_en = (sriov_flag) ? RNP10_IOV_ENABLED : 0; + + if (status) { + eth_wr32(eth, RNP10_ETH_RSS_CONTROL, + RNP10_ETH_ENABLE_RSS_ONLY | iov_en); + } else { + eth_wr32(eth, RNP10_ETH_RSS_CONTROL, + RNP10_ETH_DISABLE_RSS | iov_en); + } +} + +static s32 rnp_eth_set_fc_mode_n10(struct rnp_eth_info *eth) +{ + struct rnp_hw *hw = (struct rnp_hw *)eth->back; + s32 ret_val = 0; + int i; + + for (i = 0; i < RNP_MAX_TRAFFIC_CLASS; i++) { + if ((hw->fc.current_mode & rnp_fc_tx_pause) && + hw->fc.high_water[i]) { + if (!hw->fc.low_water[i] || + hw->fc.low_water[i] >= hw->fc.high_water[i]) { + hw_dbg(hw, + "Invalid water mark configuration\n"); + ret_val = RNP_ERR_INVALID_LINK_SETTINGS; + goto out; + } + } + } + + for (i = 0; i < RNP_MAX_TRAFFIC_CLASS; i++) { + if ((hw->fc.current_mode & rnp_fc_tx_pause)) { + if (hw->fc.high_water[i]) { + eth_wr32(eth, RNP10_ETH_HIGH_WATER(i), + hw->fc.high_water[i]); + } + if (hw->fc.low_water[i]) { + eth_wr32(eth, RNP10_ETH_LOW_WATER(i), + hw->fc.low_water[i]); + } + } + } +out: + return ret_val; +} + +static void rnp_eth_set_vf_vlan_mode_n10(struct rnp_eth_info *eth, u16 vlan, + int vf, bool enable) +{ + struct rnp_hw *hw = (struct rnp_hw *)ð->back; + u32 value = vlan; + if (enable) + value |= BIT(31); + + eth_wr32(eth, RNP10_VLVF(vf), value); + + /* todo, should consider mutiple queue */ + if (hw->hw_type == rnp_hw_n400) { + if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) + eth_wr32(eth, RNP10_VLVF_TABLE(vf), (vf + 1) * 2); + else + eth_wr32(eth, RNP10_VLVF_TABLE(vf), vf * 2); + + } else { + if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) + eth_wr32(eth, RNP10_VLVF_TABLE(vf), vf + 1); + else + eth_wr32(eth, RNP10_VLVF_TABLE(vf), vf); + } +} + +static int __get_ncsi_shm_info(struct rnp_hw *hw, + struct ncsi_shm_info *ncsi_shm) +{ + int i; + int *ptr = (int *)ncsi_shm; + int rbytes = round_up(sizeof(*ncsi_shm), 4); + + memset(ncsi_shm, 0, sizeof(*ncsi_shm)); + for (i = 0; i < (rbytes / 4); i++) + ptr[i] = rd32(hw, hw->ncsi_vf_cpu_shm_pf_base + 4 * i); + + return (ncsi_shm->valid & RNP_NCSI_SHM_VALID_MASK) == + RNP_NCSI_SHM_VALID; +} + +static void rnp_ncsi_set_uc_addr_n10(struct rnp_eth_info *eth) +{ + struct ncsi_shm_info ncsi_shm; + struct rnp_hw *hw = (struct rnp_hw *)eth->back; + u8 mac[ETH_ALEN]; + + if (!hw->ncsi_en) + return; + if (__get_ncsi_shm_info(hw, &ncsi_shm)) { + if (ncsi_shm.valid & RNP_MC_VALID) { + mac[0] = ncsi_shm.uc.uc_addr_lo & 0xff; + mac[1] = (ncsi_shm.uc.uc_addr_lo >> 8) & 0xff; + mac[2] = (ncsi_shm.uc.uc_addr_lo >> 16) & 0xff; + mac[3] = (ncsi_shm.uc.uc_addr_lo >> 24) & 0xff; + mac[4] = ncsi_shm.uc.uc_addr_hi & 0xff; + mac[5] = (ncsi_shm.uc.uc_addr_hi >> 8) & 0xff; + if (is_valid_ether_addr(mac)) + eth->ops.set_rar(eth, hw->num_rar_entries, mac, + true); + } + } +} + +static void rnp_ncsi_set_mc_mta_n10(struct rnp_eth_info *eth) +{ + struct ncsi_shm_info ncsi_shm; + struct rnp_hw *hw = (struct rnp_hw *)eth->back; + u8 i; + u8 mac[ETH_ALEN]; + + if (!hw->ncsi_en) + return; + if (__get_ncsi_shm_info(hw, &ncsi_shm)) { + if (ncsi_shm.valid & RNP_MC_VALID) { + for (i = 0; i < RNP_NCSI_MC_COUNT; i++) { + mac[0] = ncsi_shm.mc[i].mc_addr_lo & 0xff; + mac[1] = (ncsi_shm.mc[i].mc_addr_lo >> 8) & + 0xff; + mac[2] = (ncsi_shm.mc[i].mc_addr_lo >> 16) & + 0xff; + mac[3] = (ncsi_shm.mc[i].mc_addr_lo >> 24) & + 0xff; + mac[4] = ncsi_shm.mc[i].mc_addr_hi & 0xff; + mac[5] = (ncsi_shm.mc[i].mc_addr_hi >> 8) & + 0xff; + if (is_multicast_ether_addr(mac) && + !is_zero_ether_addr(mac)) { + rnp10_set_mta(hw, mac); + } + } + } + } +} + +static void rnp_ncsi_set_vfta_n10(struct rnp_eth_info *eth) +{ + struct ncsi_shm_info ncsi_shm; + struct rnp_hw *hw = (struct rnp_hw *)eth->back; + + if (!hw->ncsi_en) + return; + if (__get_ncsi_shm_info(hw, &ncsi_shm)) { + if (ncsi_shm.valid & RNP_VLAN_VALID) + hw->ops.set_vlan_filter(hw, ncsi_shm.ncsi_vlan, true, + false); + } +} + +static struct rnp_eth_operations eth_ops_n10 = { + .set_rar = &rnp_eth_set_rar_n10, + .clear_rar = &rnp_eth_clear_rar_n10, + .set_vmdq = &rnp_eth_set_vmdq_n10, + .clear_vmdq = &rnp_eth_clear_vmdq_n10, + .update_mc_addr_list = &rnp_eth_update_mc_addr_list_n10, + .clr_mc_addr = &rnp_eth_clr_mc_addr_n10, + /* store rss info to eth */ + .set_rss_key = &rnp_eth_update_rss_key_n10, + .set_rss_table = &rnp_eth_update_rss_table_n10, + .set_vfta = &rnp_eth_set_vfta_n10, + .clr_vfta = &rnp_eth_clr_vfta_n10, + .set_vlan_filter = &rnp_eth_set_vlan_filter_n10, + /* ncsi */ + .ncsi_set_vfta = &rnp_ncsi_set_vfta_n10, + .ncsi_set_uc_addr = &rnp_ncsi_set_uc_addr_n10, + .ncsi_set_mc_mta = &rnp_ncsi_set_mc_mta_n10, + .set_layer2_remapping = &rnp_eth_set_layer2_n10, + .clr_layer2_remapping = &rnp_eth_clr_layer2_n10, + .clr_all_layer2_remapping = &rnp_eth_clr_all_layer2_n10, + .set_tuple5_remapping = &rnp_eth_set_tuple5_n10, + .clr_tuple5_remapping = &rnp_eth_clr_tuple5_n10, + .clr_all_tuple5_remapping = &rnp_eth_clr_all_tuple5_n10, + .set_tcp_sync_remapping = &rnp_eth_set_tcp_sync_n10, + .set_min_max_packet = &rnp_eth_set_min_max_packets_n10, + .set_vlan_strip = &rnp_eth_set_vlan_strip_n10, + .set_vxlan_port = &rnp_eth_set_vxlan_port_n10, + .set_vxlan_mode = &rnp_eth_set_vxlan_mode_n10, + .set_rx_hash = &rnp_eth_set_rx_hash_n10, + .set_fc_mode = &rnp_eth_set_fc_mode_n10, + .set_vf_vlan_mode = &rnp_eth_set_vf_vlan_mode_n10, +}; + +/** + * rnp_init_hw_n10 - Generic hardware initialization + * @hw: pointer to hardware structure + * + * Initialize the hardware by resetting the hardware, filling the bus info + * structure and media type, clears all on chip counters, initializes receive + * address registers, multicast table, VLAN filter table, calls routine to set + * up link and flow control settings, and leaves transmit and receive units + * disabled and uninitialized + **/ +static s32 rnp_init_hw_ops_n10(struct rnp_hw *hw) +{ + s32 status = 0; + + /* Reset the hardware */ + status = hw->ops.reset_hw(hw); + + /* Start the HW */ + if (status == 0) + status = hw->ops.start_hw(hw); + + return status; +} + +static s32 rnp_get_permtion_mac_addr_n10(struct rnp_hw *hw, u8 *mac_addr) +{ + if (rnp_fw_get_macaddr(hw, hw->pfvfnum, mac_addr, hw->nr_lane)) { + dbg("generate ramdom macaddress...\n"); + eth_random_addr(mac_addr); + } + + hw->mac.mac_flags |= RNP_FLAGS_INIT_MAC_ADDRESS; + dbg("%s mac:%pM\n", __func__, mac_addr); + + return 0; +} + +static s32 rnp_reset_hw_ops_n10(struct rnp_hw *hw) +{ + int i; + struct rnp_dma_info *dma = &hw->dma; + struct rnp_eth_info *eth = &hw->eth; + + /* Call adapter stop to disable tx/rx and clear interrupts */ + dma_wr32(dma, RNP_DMA_AXI_EN, 0); + + + /* if not ncsi or hw not support 'control nic_reset', driver control it */ + if (hw->ncsi_en && hw->fw_version >= 0x00060000) { + /* fw will do nic-reset. to reduct ncsi bmc ping pkg lose */ + } else { +#define N10_NIC_RESET 0 + wr32(hw, RNP10_TOP_NIC_REST_N, N10_NIC_RESET); + /* + * we need this + */ + wmb(); + wr32(hw, RNP10_TOP_NIC_REST_N, ~N10_NIC_RESET); + } + + rnp_mbx_fw_reset_phy(hw); + /* should set all tx-start to 1 */ + for (i = 0; i < RNP_N10_MAX_TX_QUEUES; i++) + dma_ring_wr32(dma, RING_OFFSET(i) + RNP_DMA_TX_START, 1); + + wr32(hw, RNP10_TOP_ETH_BUG_40G_PATCH, 1); + /* set 2046 --> 0x18070 */ + eth_wr32(eth, RNP10_ETH_RX_PROGFULL_THRESH_PORT, DROP_ALL_THRESH); + + /* tcam not reset */ + eth->ops.clr_all_tuple5_remapping(eth); + + /* Store the permanent mac address */ + if (!(hw->mac.mac_flags & RNP_FLAGS_INIT_MAC_ADDRESS)) { + rnp_get_permtion_mac_addr_n10(hw, hw->mac.perm_addr); + memcpy(hw->mac.addr, hw->mac.perm_addr, ETH_ALEN); + } + + hw->ops.init_rx_addrs(hw); + + /* open vxlan default */ +#define VXLAN_HW_ENABLE (1) + eth_wr32(eth, RNP10_ETH_TUNNEL_MOD, VXLAN_HW_ENABLE); + for (i = 0; i < dma->max_tx_queues; i++) + rnp_wr_reg(hw->ring_msix_base + RING_VECTOR(i), 0); + + if (hw->phy_type == PHY_TYPE_SGMII) { + u16 pause_bits = 0; + u32 value; + + if (hw->fc.requested_mode == PAUSE_AUTO) { + pause_bits |= ASYM_PAUSE | SYM_PAUSE; + } else { + if ((hw->fc.requested_mode & PAUSE_TX) && + (!(hw->fc.requested_mode & PAUSE_RX))) { + pause_bits |= ASYM_PAUSE; + + } else if ((!(hw->fc.requested_mode & PAUSE_TX)) && + (!(hw->fc.requested_mode & PAUSE_RX))) { + } else + pause_bits |= ASYM_PAUSE | SYM_PAUSE; + } + rnp_mbx_phy_read(hw, 4, &value); + value &= ~0xC00; + value |= pause_bits; + rnp_mbx_phy_write(hw, 4, value); + } + + return 0; +} + +static s32 rnp_start_hw_ops_n10(struct rnp_hw *hw) +{ + s32 ret_val = 0; + struct rnp_eth_info *eth = &hw->eth; + struct rnp_dma_info *dma = &hw->dma; + + eth_wr32(eth, RNP10_ETH_ERR_MASK_VECTOR, + INNER_L4_BIT | PKT_LEN_ERR | HDR_LEN_ERR); + eth_wr32(eth, RNP10_ETH_BYPASS, 0); + eth_wr32(eth, RNP10_ETH_DEFAULT_RX_RING, 0); + + /* DMA common Registers */ + dma_wr32(dma, RNP_DMA_CONFIG, DMA_VEB_BYPASS); + + /* enable-dma-axi */ + dma_wr32(dma, RNP_DMA_AXI_EN, (RX_AXI_RW_EN | TX_AXI_RW_EN)); + + return ret_val; +} + +/* set n10 min/max packet according to new_mtu + * we support mtu + 14 + 4 * 3 as max packet len*/ +static void rnp_set_mtu_hw_ops_n10(struct rnp_hw *hw, int new_mtu) +{ + struct rnp_eth_info *eth = &hw->eth; + + int min = 60; + int max = new_mtu + ETH_HLEN + ETH_FCS_LEN * 3; + + hw->min_length_current = min; + hw->max_length_current = max; + + eth->ops.set_min_max_packet(eth, min, max); +} + +/* setup n10 vlan filter status */ +static void rnp_set_vlan_filter_en_hw_ops_n10(struct rnp_hw *hw, bool status) +{ + struct rnp_eth_info *eth = &hw->eth; + eth->ops.set_vlan_filter(eth, status); +} + +/* set vlan to n10 vlan filter table & veb */ +/* pf setup call */ +static void rnp_set_vlan_filter_hw_ops_n10(struct rnp_hw *hw, u16 vid, + bool enable, bool sriov_flag) +{ + struct rnp_eth_info *eth = &hw->eth; + struct rnp_dma_info *dma = &hw->dma; + u32 vfnum = hw->max_vfs - 1; + + /* setup n10 eth vlan table */ + eth->ops.set_vfta(eth, vid, enable); + + /* setup veb */ + /* only ctags setup veb if in sriov and not stags */ + if (vid && sriov_flag) { + if (enable) { + dma->ops.set_veb_vlan(dma, vid, vfnum); + } else { + dma->ops.set_veb_vlan(dma, 0, vfnum); + } + } +} + +static void rnp_set_vf_vlan_filter_hw_ops_n10(struct rnp_hw *hw, u16 vid, + int vf, bool enable, + bool veb_only) +{ + struct rnp_dma_info *dma = &hw->dma; + + if (!veb_only) { + /* call set vfta without veb setup */ + hw->ops.set_vlan_filter(hw, vid, enable, false); + + } else { + if (enable) { + dma->ops.set_veb_vlan(dma, vid, vf); + } else { + dma->ops.set_veb_vlan(dma, 0, vf); + } + } +} + +static void rnp_clr_vlan_veb_hw_ops_n10(struct rnp_hw *hw) +{ + struct rnp_dma_info *dma = &hw->dma; + u32 vfnum = hw->vfnum; + + dma->ops.set_veb_vlan(dma, 0, vfnum); +} + +/* setup n10 vlan strip status */ +static void rnp_set_vlan_strip_hw_ops_n10(struct rnp_hw *hw, u16 queue, + bool strip) +{ + struct rnp_eth_info *eth = &hw->eth; + + eth->ops.set_vlan_strip(eth, queue, strip); +} + +/* update new n10 mac */ +static void rnp_set_mac_hw_ops_n10(struct rnp_hw *hw, u8 *mac, bool sriov_flag) +{ + struct rnp_eth_info *eth = &hw->eth; + struct rnp_dma_info *dma = &hw->dma; + struct rnp_mac_info *mac_info = &hw->mac; + /* use this queue index to setup veb */ + /* now pf use queu 0 /1 + * vfnum is the last vfnum */ + int queue = hw->veb_ring; + int vfnum = hw->vfnum; + + eth->ops.set_rar(eth, 0, mac, true); + if (sriov_flag) { + eth->ops.set_vmdq(eth, 0, queue / hw->sriov_ring_limit); + dma->ops.set_veb_mac(dma, mac, vfnum, queue); + } + + mac_info->ops.set_mac(mac_info, mac, 0); +} + +/** + * rnp_write_uc_addr_list - write unicast addresses to RAR table + * @netdev: network interface device structure + * + * Writes unicast address list to the RAR table. + * Returns: -ENOMEM on failure/insufficient address space + * 0 on no addresses written + * X on writing X addresses to the RAR table + **/ +static int rnp_write_uc_addr_list_n10(struct rnp_hw *hw, + struct net_device *netdev, + bool sriov_flag) +{ + unsigned int rar_entries = hw->num_rar_entries - 1; + u32 vfnum = hw->vfnum; + struct rnp_eth_info *eth = &hw->eth; + int count = 0; + + if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) + vfnum = 0; + /* In SR-IOV mode significantly less RAR entries are available */ + if (sriov_flag) + rar_entries = hw->max_pf_macvlans - 1; + + /* return ENOMEM indicating insufficient memory for addresses */ + if (netdev_uc_count(netdev) > rar_entries) + return -ENOMEM; + + if (!netdev_uc_empty(netdev)) { + struct netdev_hw_addr *ha; + + hw_dbg(hw, "%s: rar_entries:%d, uc_count:%d\n", __func__, + hw->num_rar_entries, netdev_uc_count(netdev)); + + /* return error if we do not support writing to RAR table */ + if (!eth->ops.set_rar) + return -ENOMEM; + + netdev_for_each_uc_addr(ha, netdev) { + if (!rar_entries) + break; + /* that's ok */ + eth->ops.set_rar(eth, rar_entries, ha->addr, + RNP10_RAH_AV); + if (sriov_flag) + eth->ops.set_vmdq(eth, rar_entries, vfnum); + + rar_entries--; + + count++; + } + } + /* write the addresses in reverse order to avoid write combining */ + + hw_dbg(hw, "%s: Clearing RAR[1 - %d]\n", __func__, rar_entries); + for (; rar_entries > 0; rar_entries--) + eth->ops.clear_rar(eth, rar_entries); + + if (hw->ncsi_en) + eth->ops.ncsi_set_uc_addr(eth); + + return count; +} + +void check_vf_promisc(struct rnp_adapter *adapter) +{ + struct rnp_hw *hw = &adapter->hw; + int i; + + hw->vf_promisc_mode = 0; + for (i = 0; i < adapter->num_vfs; i++) { + if (adapter->vfinfo[i].promisc_mode) { + hw->vf_promisc_mode = 1; + hw->vf_promisc_num = i; + break; + } + } +} + +static void rnp_set_rx_mode_hw_ops_n10(struct rnp_hw *hw, + struct net_device *netdev, + bool sriov_flag) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + u32 fctrl; + netdev_features_t features = netdev->features; + int count; + struct rnp_eth_info *eth = &hw->eth; + + hw_dbg(hw, "%s\n", __func__); + + /* broadcast always bypass */ + fctrl = eth_rd32(eth, RNP10_ETH_DMAC_FCTRL) | RNP10_FCTRL_BPE; + /* clear the bits we are changing the status of */ + fctrl &= ~(RNP10_FCTRL_UPE | RNP10_FCTRL_MPE); + /* promisc mode */ +#ifdef VF_PROMISC_SUPPORT + check_vf_promisc(adapter); + if ((netdev->flags & IFF_PROMISC) || (!hw->vf_promisc_mode)) { +#else + if (netdev->flags & IFF_PROMISC) { +#endif + hw->addr_ctrl.user_set_promisc = true; + fctrl |= (RNP10_FCTRL_UPE | RNP10_FCTRL_MPE); + /* disable hardware filter vlans in promisc mode */ + features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; + features &= ~NETIF_F_HW_VLAN_CTAG_RX; + } else { + if (netdev->flags & IFF_ALLMULTI) { + fctrl |= RNP10_FCTRL_MPE; + } else { + /* Write addresses to the MTA, if the attempt fails + * then we should just turn on promiscuous mode so + * that we can at least receive multicast traffic + */ + /* we always update vf multicast info */ + count = eth->ops.update_mc_addr_list(eth, netdev, true); + if (count < 0) { + fctrl |= RNP10_FCTRL_MPE; + } else if (count) { + + } + } + hw->addr_ctrl.user_set_promisc = false; + } + + /* + * Write addresses to available RAR registers, if there is not + * sufficient space to store all the addresses then enable + * unicast promiscuous mode + */ + if (rnp_write_uc_addr_list_n10(hw, netdev, sriov_flag) < 0) { + fctrl |= RNP10_FCTRL_UPE; + } + + eth_wr32(eth, RNP10_ETH_DMAC_FCTRL, fctrl); + if (features & NETIF_F_HW_VLAN_CTAG_FILTER) + eth->ops.set_vlan_filter(eth, true); + else + eth->ops.set_vlan_filter(eth, false); + + if ((hw->addr_ctrl.user_set_promisc == true) || + (adapter->priv_flags & RNP_PRIV_FLAG_REC_HDR_LEN_ERR)) { + /* set pkt_len_err and hdr_len_err default to 1 */ + eth_wr32(eth, RNP10_ETH_ERR_MASK_VECTOR, + INNER_L4_BIT | PKT_LEN_ERR | HDR_LEN_ERR); + } else { + eth_wr32(eth, RNP10_ETH_ERR_MASK_VECTOR, INNER_L4_BIT); + } + + hw->ops.set_mtu(hw, netdev->mtu); +} + +/* setup an rar with vfnum */ +static void rnp_set_rar_with_vf_hw_ops_n10(struct rnp_hw *hw, u8 *mac, int idx, + u32 vfnum, bool enable) +{ + struct rnp_eth_info *eth = &hw->eth; + + eth->ops.set_rar(eth, idx, mac, enable); + eth->ops.set_vmdq(eth, idx, vfnum); +} + +static void rnp_clr_rar_hw_ops_n10(struct rnp_hw *hw, int idx) +{ + struct rnp_eth_info *eth = &hw->eth; + + eth->ops.clear_rar(eth, idx); +} + +static void rnp_clr_rar_all_hw_ops_n10(struct rnp_hw *hw) +{ + struct rnp_eth_info *eth = &hw->eth; + unsigned int rar_entries = hw->num_rar_entries - 1; + int i; + + for (i = 0; i < rar_entries; i++) + eth->ops.clear_rar(eth, rar_entries); +} + +static void rnp_set_fcs_mode_hw_ops_n10(struct rnp_hw *hw, bool status) +{ + struct rnp_mac_info *mac = &hw->mac; + + mac->ops.set_mac_fcs(mac, status); +} + +static void rnp_set_vxlan_port_hw_ops_n10(struct rnp_hw *hw, u32 port) +{ + struct rnp_eth_info *eth = &hw->eth; + + eth->ops.set_vxlan_port(eth, port); +} + +static void rnp_set_vxlan_mode_hw_ops_n10(struct rnp_hw *hw, bool inner) +{ + struct rnp_eth_info *eth = &hw->eth; + + eth->ops.set_vxlan_mode(eth, inner); +} + +static void rnp_set_mac_rx_hw_ops_n10(struct rnp_hw *hw, bool status) +{ + struct rnp_mac_info *mac = &hw->mac; + struct rnp_eth_info *eth = &hw->eth; + + if (status) + eth_wr32(eth, RNP10_ETH_RX_PROGFULL_THRESH_PORT, + RECEIVE_ALL_THRESH); + else + eth_wr32(eth, RNP10_ETH_RX_PROGFULL_THRESH_PORT, + DROP_ALL_THRESH); + + mac->ops.set_mac_rx(mac, status); +} + +static void rnp_set_sriov_status_hw_ops_n10(struct rnp_hw *hw, bool status) +{ + struct rnp_dma_info *dma = &hw->dma; + struct rnp_eth_info *eth = &hw->eth; + u32 v; + + if (status) { + dma_wr32(dma, RNP_DMA_CONFIG, + dma_rd32(dma, RNP_DMA_CONFIG) & (~DMA_VEB_BYPASS)); + v = eth_rd32(eth, RNP10_MRQC_IOV_EN); + v |= RNP10_IOV_ENABLED; + eth_wr32(eth, RNP10_MRQC_IOV_EN, v); + } else { + v = eth_rd32(eth, RNP10_MRQC_IOV_EN); + v &= ~(RNP10_IOV_ENABLED); + eth_wr32(eth, RNP10_MRQC_IOV_EN, v); + dma->ops.clr_veb_all(dma); + } + +#if defined(NIC_VF_FXIED) || defined(VF_PROMISC_SUPPORT) + /* we setup default to pf */ + eth_wr32(eth, RNP10_VM_DMAC_MPSAR_RING(127), hw->default_vf_num); + /* if pf or vf in promisc mode set promisc to that vf*/ + if (hw->vf_promisc_mode) { + int fix_vf_num; + + if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) + fix_vf_num = (hw->vf_promisc_num + 1) * hw->sriov_ring_limit / 2; + else + fix_vf_num = (hw->vf_promisc_num) * hw->sriov_ring_limit / 2; + + eth_wr32(eth, RNP10_VM_DMAC_MPSAR_RING(127), fix_vf_num); + } +#endif +} + +static void rnp_set_sriov_vf_mc_hw_ops_n10(struct rnp_hw *hw, u16 mc_addr) +{ + struct rnp_eth_info *eth = &hw->eth; + u32 vector_bit; + u32 vector_reg; + u32 mta_reg; + + vector_reg = (mc_addr >> 5) & 0x7F; + vector_bit = mc_addr & 0x1F; + mta_reg = eth_rd32(eth, RNP10_ETH_MULTICAST_HASH_TABLE(vector_reg)); + mta_reg |= (1 << vector_bit); + eth_wr32(eth, RNP10_ETH_MULTICAST_HASH_TABLE(vector_reg), mta_reg); +} + +static void rnp_update_sriov_info_hw_ops_n10(struct rnp_hw *hw) +{ +} + +static void rnp_set_pause_mode_hw_ops_n10(struct rnp_hw *hw) +{ + struct rnp_mac_info *mac = &hw->mac; + struct rnp_eth_info *eth = &hw->eth; + + mac->ops.set_fc_mode(mac); + eth->ops.set_fc_mode(eth); +} + +static void rnp_get_pause_mode_hw_ops_n10(struct rnp_hw *hw) +{ + u32 value_r5; + + if (hw->phy_type != PHY_TYPE_SGMII) { + if ((hw->fc.requested_mode & PAUSE_TX) && + (hw->fc.requested_mode & PAUSE_RX)) { + hw->fc.current_mode = rnp_fc_full; + } else if (hw->fc.requested_mode & PAUSE_TX) { + hw->fc.current_mode = rnp_fc_tx_pause; + } else if (hw->fc.requested_mode & PAUSE_RX) { + hw->fc.current_mode = rnp_fc_rx_pause; + } else { + hw->fc.current_mode = rnp_fc_none; + } + return; + } + + /* we get pause mode from phy reg */ + rnp_mbx_phy_read(hw, 5, &value_r5); + if (!hw->link) { + /* if link is not up ,fc is null */ + hw->fc.current_mode = rnp_fc_none; + } else { + if (hw->fc.requested_mode == PAUSE_AUTO) { + if (value_r5 & SYM_PAUSE) + hw->fc.current_mode = rnp_fc_full; + else if (value_r5 & ASYM_PAUSE) + hw->fc.current_mode = rnp_fc_rx_pause; + else + hw->fc.current_mode = rnp_fc_none; + + } else if ((hw->fc.requested_mode & PAUSE_TX) && + (hw->fc.requested_mode & PAUSE_RX)) { + if (value_r5 & SYM_PAUSE) + hw->fc.current_mode = rnp_fc_full; + else if (value_r5 & ASYM_PAUSE) + hw->fc.current_mode = rnp_fc_rx_pause; + else + hw->fc.current_mode = rnp_fc_none; + + } else if (hw->fc.requested_mode & PAUSE_TX) { + if (value_r5 & SYM_PAUSE) + hw->fc.current_mode = rnp_fc_tx_pause; + else if (value_r5 & ASYM_PAUSE) + hw->fc.current_mode = rnp_fc_none; + else + hw->fc.current_mode = rnp_fc_none; + + } else if (hw->fc.requested_mode & PAUSE_RX) { + if (value_r5 & SYM_PAUSE) + hw->fc.current_mode = rnp_fc_rx_pause; + else if (value_r5 & ASYM_PAUSE) + hw->fc.current_mode = rnp_fc_rx_pause; + else + hw->fc.current_mode = rnp_fc_none; + + } else { + hw->fc.current_mode = rnp_fc_none; + } + } +} + +static void rnp_update_hw_info_hw_ops_n10(struct rnp_hw *hw) +{ + struct rnp_dma_info *dma = &hw->dma; + struct rnp_eth_info *eth = &hw->eth; + struct rnp_adapter *adapter = (struct rnp_adapter *)hw->back; + u32 data; + /* 1 enable eth filter */ + eth_wr32(eth, RNP10_HOST_FILTER_EN, 1); + /* 2 open redir en */ + eth_wr32(eth, RNP10_REDIR_EN, 1); + + /* 3 open sctp checksum and other checksum */ + if (hw->feature_flags & RNP_NET_FEATURE_TX_CHECKSUM) + eth_wr32(eth, RNP10_ETH_SCTP_CHECKSUM_EN, 1); + + /* 4 mark muticaset as broadcast */ + dma_wr32(dma, RNP_VEB_MAC_MASK_LO, 0xffffffff); + dma_wr32(dma, RNP_VEB_MAC_MASK_HI, 0xfeff); + /* 5 setup dma split */ + + data = dma_rd32(dma, RNP_DMA_CONFIG); + data &= (0x00000ffff); +#ifdef FT_PADDING +#define PADDING_BIT 8 + if (adapter->priv_flags & RNP_PRIV_FLAG_FT_PADDING) + SET_BIT(PADDING_BIT, data); +#endif + /* in this mode we fixed dm split */ + /* if PAGE_SIZE */ +#define RX_MAX_DWORD (96) + data |= (((hw->dma_split_size) >> 4) << 16); + dma_wr32(dma, RNP_DMA_CONFIG, data); + /* 6 open vxlan inner match? */ + /* 7 setuptcp sync remmapping */ + /* n10 not support prio */ + if (adapter->priv_flags & RNP_PRIV_FLAG_TCP_SYNC) { + hw->ops.set_tcp_sync_remapping(hw, adapter->tcp_sync_queue, + true, false); + } else { + hw->ops.set_tcp_sync_remapping(hw, adapter->tcp_sync_queue, + false, false); + } +} + +static void rnp_update_hw_rx_drop_hw_ops_n10(struct rnp_hw *hw) +{ + struct rnp_adapter *adapter = (struct rnp_adapter *)hw->back; + int i; + struct rnp_ring *ring; + + for (i = 0; i < adapter->num_rx_queues; i++) { + ring = adapter->rx_ring[i]; + if (adapter->rx_drop_status & BIT(i)) { + ring_wr32(ring, PCI_DMA_REG_RX_DESC_TIMEOUT_TH, + adapter->drop_time); + } else { + ring_wr32(ring, PCI_DMA_REG_RX_DESC_TIMEOUT_TH, 0); + } + } +} + +static void rnp_set_rx_hash_hw_ops_n10(struct rnp_hw *hw, bool status, + bool sriov_flag) +{ + struct rnp_eth_info *eth = &hw->eth; + + eth->ops.set_rx_hash(eth, status, sriov_flag); +} + +/* setup mac to rar 0 + * clean vmdq + * clean mc addr */ +static s32 rnp_init_rx_addrs_hw_ops_n10(struct rnp_hw *hw) +{ + struct rnp_eth_info *eth = &hw->eth; + + u32 i; + u32 rar_entries = eth->num_rar_entries; + u32 v; + + hw_dbg(hw, "init_rx_addrs:rar_entries:%d, mac.addr:%pM\n", rar_entries, + hw->mac.addr); + /* + * If the current mac address is valid, assume it is a software override + * to the permanent address. + * Otherwise, use the permanent address from the eeprom. + */ + if (!is_valid_ether_addr(hw->mac.addr)) { + /* Get the MAC address from the RAR0 for later reference */ + memcpy(hw->mac.addr, hw->mac.perm_addr, ETH_ALEN); + hw_dbg(hw, " Keeping Current RAR0 Addr =%pM\n", hw->mac.addr); + } else { + /* Setup the receive address. */ + hw_dbg(hw, "Overriding MAC Address in RAR[0]\n"); + hw_dbg(hw, " New MAC Addr =%pM\n", hw->mac.addr); + eth->ops.set_rar(eth, 0, hw->mac.addr, true); + + /* clear VMDq pool/queue selection for RAR 0 */ + eth->ops.clear_vmdq(eth, 0, RNP_CLEAR_VMDQ_ALL); + } + hw->addr_ctrl.overflow_promisc = 0; + hw->addr_ctrl.rar_used_count = 1; + + /* Zero out the other receive addresses. */ + hw_dbg(hw, "Clearing RAR[1-%d]\n", rar_entries - 1); + for (i = 1; i < rar_entries; i++) { + eth->ops.clear_rar(eth, i); + } + if (hw->ncsi_en) + eth->ops.ncsi_set_uc_addr(eth); + + /* Clear the MTA */ + hw->addr_ctrl.mta_in_use = 0; + v = eth_rd32(eth, RNP10_ETH_DMAC_MCSTCTRL); + v &= (~0x3); + v |= eth->mc_filter_type; + eth_wr32(eth, RNP10_ETH_DMAC_MCSTCTRL, v); + + hw_dbg(hw, " Clearing MTA\n"); + eth->ops.clr_mc_addr(eth); + if (hw->ncsi_en) { + eth->ops.ncsi_set_mc_mta(eth); + eth->ops.ncsi_set_vfta(eth); + } + + return 0; +} + +static void rnp_clr_vfta_hw_ops_n10(struct rnp_hw *hw) +{ + struct rnp_eth_info *eth = &hw->eth; + + eth->ops.clr_vfta(eth); +} + +static void rnp_set_txvlan_mode_hw_ops_n10(struct rnp_hw *hw, bool cvlan) +{ + struct rnp_mac_info *mac = &hw->mac; + if (cvlan) { + mac_wr32(mac, RNP10_MAC_TX_VLAN_TAG, 0x4000000); + mac_wr32(mac, RNP10_MAC_TX_VLAN_MODE, 0x100000); + mac_wr32(mac, RNP10_MAC_INNER_VLAN_INCL, 0x100000); + } else { + mac_wr32(mac, RNP10_MAC_TX_VLAN_TAG, 0xc600000); + mac_wr32(mac, RNP10_MAC_TX_VLAN_MODE, 0x180000); + mac_wr32(mac, RNP10_MAC_INNER_VLAN_INCL, 0x100000); + } +} + +static void rnp_set_rss_key_hw_ops_n10(struct rnp_hw *hw, bool sriov_flag) +{ + struct rnp_eth_info *eth = &hw->eth; + struct rnp_adapter *adapter = (struct rnp_adapter *)hw->back; + int key_len = RNP_RSS_KEY_SIZE; + + memcpy(hw->rss_key, adapter->rss_key, key_len); + + eth->ops.set_rss_key(eth, sriov_flag); +} + +static void rnp_set_rss_table_hw_ops_n10(struct rnp_hw *hw) +{ + struct rnp_eth_info *eth = &hw->eth; + + eth->ops.set_rss_table(eth); +} + +static void rnp_set_mbx_link_event_hw_ops_n10(struct rnp_hw *hw, int enable) +{ + rnp_mbx_link_event_enable(hw, enable); +} + +static void rnp_set_mbx_ifup_hw_ops_n10(struct rnp_hw *hw, int enable) +{ + rnp_mbx_ifup_down(hw, enable); + + if (hw->phy_type == PHY_TYPE_10G_TP) { + struct rnp_adapter *adapter = (struct rnp_adapter *)hw->back; + /* first call reset an */ + if (enable) { + hw->ops.setup_link(hw, hw->phy.autoneg_advertised, + hw->autoneg, adapter->speed, + hw->duplex); + } + } +} + +/** + * rnp_check_mac_link_n10 - Determine link and speed status + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @link_up: true when link is up + * @link_up_wait_to_complete: bool used to wait for link up or not + * + * Reads the links register to determine if link is up and the current speed + **/ +static s32 rnp_check_mac_link_hw_ops_n10(struct rnp_hw *hw, rnp_link_speed *speed, + bool *link_up, bool *duplex, + bool link_up_wait_to_complete) +{ + + if (hw->speed == 10) { + *speed = RNP_LINK_SPEED_10_FULL; + } else if (hw->speed == 100) { + *speed = RNP_LINK_SPEED_100_FULL; + } else if (hw->speed == 1000) { + *speed = RNP_LINK_SPEED_1GB_FULL; + } else if (hw->speed == 10000) { + *speed = RNP_LINK_SPEED_10GB_FULL; + } else if (hw->speed == 25000) { + *speed = RNP_LINK_SPEED_25GB_FULL; + } else if (hw->speed == 40000) { + *speed = RNP_LINK_SPEED_40GB_FULL; + } else { + *speed = RNP_LINK_SPEED_UNKNOWN; + } + + *link_up = hw->link; + *duplex = 1; + + return 0; +} + +static s32 rnp_setup_mac_link_hw_ops_n10(struct rnp_hw *hw, u32 adv, u32 autoneg, + u32 speed, u32 duplex) +{ + struct rnp_adapter *adpt = hw->back; + u32 value = 0; + u32 value_r4 = 0; + u32 value_r9 = 0; + + rnp_logd(LOG_PHY, + "%s setup phy: phy_addr=%d speed=%d duplex=%d autoneg=%d " + "is_backplane=%d is_sgmii=%d\n", + __func__, adpt->phy_addr, speed, duplex, autoneg, + hw->is_backplane, hw->is_sgmii); + + if (hw->is_backplane) { + /* Backplane type, support AN, unsupport set speed */ + return rnp_set_lane_fun(hw, LANE_FUN_AN, autoneg, 0, 0, 0); + } + + /* TODO: Not support fiber */ + if ((!hw->is_sgmii) && (hw->phy_type != PHY_TYPE_10G_TP)) { + if (hw->force_10g_1g_speed_ablity) { + return rnp_mbx_force_speed(hw, speed); + } else { + return 0; + } + } + + if (hw->phy_type == PHY_TYPE_10G_TP) { + rnp_mbx_phy_read(hw, PHY_826x_MDIX, &value); + + value &= ~(BIT(8) | BIT(9)); + /* Options: 0: Auto (default) 1: MDI mode 2: MDI-X mode */ + switch (hw->phy.mdix) { + case 1: + value |= BIT(8)|BIT(9); + break; + case 2: + value |= BIT(9); + break; + case 0: + default: + break; + } + rnp_mbx_phy_write(hw, PHY_826x_MDIX, value); + + if (!autoneg) { + rnp_mbx_phy_read(hw, PHY_826x_SPEED, &value); + value &= (~(BIT(13) | BIT(6) | BIT(5) | BIT(4) | + BIT(3) | BIT(2))); + + switch (speed) { + case RNP_LINK_SPEED_10GB_FULL: + value |= BIT(13) | BIT(6); + break; + case RNP_LINK_SPEED_1GB_FULL: + case RNP_LINK_SPEED_1GB_HALF: + value |= BIT(6); + ; + break; + case RNP_LINK_SPEED_100_FULL: + case RNP_LINK_SPEED_100_HALF: + value |= BIT(13); + break; + case RNP_LINK_SPEED_10_FULL: + case RNP_LINK_SPEED_10_HALF: + value = 0; + break; + default: + hw_dbg(hw, "unknown speed = 0x%x.\n", speed); + break; + } + rnp_mbx_phy_write(hw, PHY_826x_SPEED, value); + rnp_mbx_phy_read(hw, PHY_826x_DUPLEX, &value); + value &= (~BIT(8)); + if (duplex) + value |= BIT(8); + rnp_mbx_phy_write(hw, PHY_826x_DUPLEX, value); + rnp_mbx_phy_read(hw, PHY_826x_AN, &value); + value &= (~BIT(12)); + rnp_mbx_phy_write(hw, PHY_826x_AN, value); + } else { + rnp_mbx_phy_read(hw, PHY_826x_ADV, &value); + + value &= (~(BIT(5) | BIT(6) | BIT(7) | BIT(8) | + BIT(10) | BIT(11))); + + if (adv & RNP_LINK_SPEED_100_FULL) { + hw->phy.autoneg_advertised |= + RNP_LINK_SPEED_100_FULL; + value |= BIT(8); + } + if (adv & RNP_LINK_SPEED_100_HALF) { + hw->phy.autoneg_advertised |= + RNP_LINK_SPEED_100_FULL; + value |= BIT(7); + } + + value |= BIT(10) | BIT(11); + /* BIT10 fc BIT11 asyfc */ + rnp_mbx_phy_write(hw, PHY_826x_ADV, value); + + rnp_mbx_phy_read(hw, PHY_826x_GBASE_ADV, &value); + value &= (~(BIT(7) | BIT(8) | BIT(12))); + + /* bit 7 2.5G bit 8 5G */ + if (adv & RNP_LINK_SPEED_10GB_FULL) { + hw->phy.autoneg_advertised |= + RNP_LINK_SPEED_10GB_FULL; + value |= BIT(12); + } + rnp_mbx_phy_write(hw, PHY_826x_GBASE_ADV, value); + rnp_mbx_phy_read(hw, PHY_826x_GBASE_ADV_2, &value); + value &= 0x00ff; + if (adv & RNP_LINK_SPEED_1GB_FULL) { + hw->phy.autoneg_advertised |= + RNP_LINK_SPEED_1GB_FULL; + value |= BIT(9); + } + if (adv & RNP_LINK_SPEED_1GB_HALF) { + hw->phy.autoneg_advertised |= + RNP_LINK_SPEED_1GB_HALF; + value |= BIT(8); + } + rnp_mbx_phy_write(hw, PHY_826x_GBASE_ADV_2, value); + rnp_mbx_phy_read(hw, PHY_826x_AN, &value); + value |= BIT(12) | BIT(9); + rnp_mbx_phy_write(hw, PHY_826x_AN, value); + } + + return 0; + } + + /* Set MDI/MDIX mode */ + rnp_mbx_phy_read(hw, RNP_YT8531_PHY_SPEC_CTRL, &value); + value &= ~RNP_YT8531_PHY_SPEC_CTRL_MDIX_CFG_MASK; + /* Options: 0: Auto (default) 1: MDI mode 2: MDI-X mode */ + switch (hw->phy.mdix) { + case 1: + break; + case 2: + value |= RNP_YT8531_PHY_SPEC_CTRL_FORCE_MDIX; + break; + case 0: + default: + value |= RNP_YT8531_PHY_SPEC_CTRL_AUTO_MDI_MDIX; + break; + } + rnp_mbx_phy_write(hw, RNP_YT8531_PHY_SPEC_CTRL, value); + + /* + * Clear autoneg_advertised and set new values based on input link + * speed. + */ + hw->phy.autoneg_advertised = speed; + + if (!autoneg) { + switch (speed) { + case RNP_LINK_SPEED_1GB_FULL: + case RNP_LINK_SPEED_1GB_HALF: + value = RNP_MDI_PHY_SPEED_SELECT1; + speed = RNP_LINK_SPEED_1GB_FULL; + goto out; + break; + case RNP_LINK_SPEED_100_FULL: + case RNP_LINK_SPEED_100_HALF: + value = RNP_MDI_PHY_SPEED_SELECT0; + break; + case RNP_LINK_SPEED_10_FULL: + case RNP_LINK_SPEED_10_HALF: + value = 0; + break; + default: + value = RNP_MDI_PHY_SPEED_SELECT0 | + RNP_MDI_PHY_SPEED_SELECT1; + hw_dbg(hw, "unknown speed = 0x%x.\n", speed); + break; + } + /* duplex full */ + if (duplex) + value |= RNP_MDI_PHY_DUPLEX; + value |= 0x8000; + rnp_mbx_phy_write(hw, 0x0, value); + goto skip_an; + } + + /* start_an */ + value_r4 = 0x1E0; + value_r9 = 0x300; + /* disable 100/10base-T Self-negotiation ability */ + rnp_mbx_phy_read(hw, 0x4, &value); + value &= ~value_r4; + rnp_mbx_phy_write(hw, 0x4, value); + + /* disable 1000base-T Self-negotiation ability */ + rnp_mbx_phy_read(hw, 0x9, &value); + value &= ~value_r9; + rnp_mbx_phy_write(hw, 0x9, value); + + value_r4 = 0x0; + value_r9 = 0x0; + + if (adv & RNP_LINK_SPEED_1GB_FULL) { + hw->phy.autoneg_advertised |= RNP_LINK_SPEED_1GB_FULL; + value_r9 |= 0x200; + } + if (adv & RNP_LINK_SPEED_100_FULL) { + hw->phy.autoneg_advertised |= RNP_LINK_SPEED_100_FULL; + value_r4 |= 0x100; + } + if (adv & RNP_LINK_SPEED_10_FULL) { + hw->phy.autoneg_advertised |= RNP_LINK_SPEED_10_FULL; + value_r4 |= 0x40; + } + + if (adv & RNP_LINK_SPEED_1GB_HALF) { + hw->phy.autoneg_advertised |= RNP_LINK_SPEED_1GB_HALF; + value_r9 |= 0x100; + } + if (adv & RNP_LINK_SPEED_100_HALF) { + hw->phy.autoneg_advertised |= RNP_LINK_SPEED_100_HALF; + value_r4 |= 0x80; + } + if (adv & RNP_LINK_SPEED_10_HALF) { + hw->phy.autoneg_advertised |= RNP_LINK_SPEED_10_HALF; + value_r4 |= 0x20; + } + + /* enable 1000base-T Self-negotiation ability */ + rnp_mbx_phy_read(hw, 0x9, &value); + value |= value_r9; + rnp_mbx_phy_write(hw, 0x9, value); + + /* enable 100/10base-T Self-negotiation ability */ + rnp_mbx_phy_read(hw, 0x4, &value); + value |= value_r4; + rnp_mbx_phy_write(hw, 0x4, value); + + /* software reset to make the above configuration take effect*/ + rnp_mbx_phy_read(hw, 0x0, &value); + value |= 0x9200; + rnp_mbx_phy_write(hw, 0x0, value); +skip_an: + /* power on in UTP mode */ + rnp_mbx_phy_read(hw, 0x0, &value); + value &= ~0x800; + rnp_mbx_phy_write(hw, 0x0, value); + +out: + return 0; +} + +static void rnp_clean_link_hw_ops_n10(struct rnp_hw *hw) +{ + hw->link = 0; +} + +static void rnp_set_layer2_hw_ops_n10(struct rnp_hw *hw, + union rnp_atr_input *input, u16 pri_id, + u8 queue, bool prio_flag) +{ + struct rnp_eth_info *eth = &hw->eth; + + eth->ops.set_layer2_remapping(eth, input, pri_id, queue, prio_flag); +} + +static void rnp_clr_layer2_hw_ops_n10(struct rnp_hw *hw, u16 pri_id) +{ + struct rnp_eth_info *eth = &hw->eth; + + eth->ops.clr_layer2_remapping(eth, pri_id); +} + +static void rnp_clr_all_layer2_hw_ops_n10(struct rnp_hw *hw) +{ + struct rnp_eth_info *eth = &hw->eth; + + eth->ops.clr_all_layer2_remapping(eth); +} + +static void rnp_clr_all_tuple5_hw_ops_n10(struct rnp_hw *hw) +{ + struct rnp_eth_info *eth = &hw->eth; + + eth->ops.clr_all_tuple5_remapping(eth); +} + +static void rnp_set_tcp_sync_hw_ops_n10(struct rnp_hw *hw, int queue, bool flag, + bool prio) +{ + struct rnp_eth_info *eth = &hw->eth; + + eth->ops.set_tcp_sync_remapping(eth, queue, flag, prio); +} + +static void rnp_update_msix_count_hw_ops_n10(struct rnp_hw *hw, int msix_count) +{ + int msix_count_new; + struct rnp_mac_info *mac = &hw->mac; + + msix_count_new = clamp_t(int, msix_count, 2, RNP_N10_MSIX_VECTORS); + + mac->max_msix_vectors = msix_count_new; + hw->max_msix_vectors = msix_count_new; +} + +static void rnp_set_tuple5_hw_ops_n10(struct rnp_hw *hw, + union rnp_atr_input *input, u16 pri_id, + u8 queue, bool prio_flag) +{ + struct rnp_eth_info *eth = &hw->eth; + + eth->ops.set_tuple5_remapping(eth, input, pri_id, queue, prio_flag); +} + +static void rnp_clr_tuple5_hw_ops_n10(struct rnp_hw *hw, u16 pri_id) +{ + struct rnp_eth_info *eth = &hw->eth; + + eth->ops.clr_tuple5_remapping(eth, pri_id); +} + +static void rnp_update_hw_status_hw_ops_n10(struct rnp_hw *hw, + struct rnp_hw_stats *hw_stats, + struct net_device_stats *net_stats) +{ + struct rnp_dma_info *dma = &hw->dma; + struct rnp_eth_info *eth = &hw->eth; + struct rnp_mac_info *mac = &hw->mac; + int port; + + hw_stats->dma_to_dma = + dma_rd32(dma, RNP_DMA_STATS_DMA_TO_MAC_CHANNEL_0) + + dma_rd32(dma, RNP_DMA_STATS_DMA_TO_MAC_CHANNEL_1) + + dma_rd32(dma, RNP_DMA_STATS_DMA_TO_MAC_CHANNEL_2) + + dma_rd32(dma, RNP_DMA_STATS_DMA_TO_MAC_CHANNEL_3); + + hw_stats->dma_to_switch = dma_rd32(dma, RNP_DMA_STATS_DMA_TO_SWITCH); + hw_stats->mac_to_dma = dma_rd32(dma, RNP_DMA_STATS_MAC_TO_DMA); + + net_stats->rx_crc_errors = 0; + net_stats->rx_errors = 0; + + for (port = 0; port < 4; port++) { + /* we use Hardware stats? */ + net_stats->rx_crc_errors += + eth_rd32(eth, RNP10_RXTRANS_CRC_ERR_PKTS(port)); + net_stats->rx_errors += + eth_rd32(eth, RNP10_RXTRANS_WDT_ERR_PKTS(port)) + + eth_rd32(eth, RNP10_RXTRANS_CODE_ERR_PKTS(port)) + + eth_rd32(eth, RNP10_RXTRANS_CRC_ERR_PKTS(port)) + + eth_rd32(eth, RNP10_RXTRANS_SLEN_ERR_PKTS(port)) + + eth_rd32(eth, RNP10_RXTRANS_GLEN_ERR_PKTS(port)) + + eth_rd32(eth, RNP10_RXTRANS_IPH_ERR_PKTS(port)) + + eth_rd32(eth, RNP10_RXTRANS_LEN_ERR_PKTS(port)); + } + hw_stats->invalid_dropped_packets = + eth_rd32(eth, RNP10_ETH_INVALID_DROP_PKTS); + hw_stats->rx_capabity_lost = + eth_rd32(eth, RNP10_RXTRANS_DROP(0)) + + eth_rd32(eth, RNP10_RXTRANS_CUT_ERR_PKTS(0)); + hw_stats->filter_dropped_packets = + eth_rd32(eth, RNP10_ETH_FILTER_DROP_PKTS); + hw_stats->host_l2_match_drop = + eth_rd32(eth, RNP10_ETH_HOST_L2_DROP_PKTS); + hw_stats->redir_input_match_drop = + eth_rd32(eth, RNP10_ETH_REDIR_INPUT_MATCH_DROP_PKTS); + hw_stats->redir_etype_match_drop = + eth_rd32(eth, RNP10_ETH_ETYPE_DROP_PKTS); + hw_stats->redir_tcp_syn_match_drop = + eth_rd32(eth, RNP10_ETH_TCP_SYN_DROP_PKTS); + hw_stats->redir_tuple5_match_drop = + eth_rd32(eth, RNP10_ETH_REDIR_TUPLE5_DROP_PKTS); + hw_stats->redir_tcam_match_drop = + eth_rd32(eth, RNP10_ETH_REDIR_TCAM_DROP_PKTS); + hw_stats->bmc_dropped_packets = + eth_rd32(eth, RNP10_ETH_DECAP_BMC_DROP_NUM); + hw_stats->switch_dropped_packets = + eth_rd32(eth, RNP10_ETH_DECAP_SWITCH_DROP_NUM); + hw_stats->mac_rx_broadcast = + mac_rd32(mac, RNP10_MAC_STATS_BROADCAST_LOW); + hw_stats->mac_rx_broadcast += + ((u64)mac_rd32(mac, RNP10_MAC_STATS_BROADCAST_HIGH) << 32); + hw_stats->mac_rx_multicast = + mac_rd32(mac, RNP10_MAC_STATS_MULTICAST_LOW); + hw_stats->mac_rx_multicast += + ((u64)mac_rd32(mac, RNP10_MAC_STATS_MULTICAST_HIGH) << 32); + hw_stats->mac_rx_pause_count = + mac_rd32(mac, RNP10_MAC_STATS_RX_PAUSE_COUNT_LOW); + hw_stats->mac_rx_pause_count += + ((u64)mac_rd32(mac, RNP10_MAC_STATS_RX_PAUSE_COUNT_HIGH) << 32); + hw_stats->mac_tx_pause_count = + mac_rd32(mac, RNP10_MAC_STATS_TX_PAUSE_COUNT_LOW); + hw_stats->mac_tx_pause_count += + ((u64)mac_rd32(mac, RNP10_MAC_STATS_TX_PAUSE_COUNT_HIGH) << 32); +} + +enum n10_priv_bits { + n10_mac_loopback = 0, + n10_switch_loopback = 1, + n10_veb_enable = 4, + n10_padding_enable = 8, + n10_padding_debug_enable = 0x10, +}; + +static const char rnp10_priv_flags_strings[][ETH_GSTRING_LEN] = { +#define RNP10_MAC_LOOPBACK BIT(0) +#define RNP10_SWITCH_LOOPBACK BIT(1) +#define RNP10_VEB_ENABLE BIT(2) +#define RNP10_FT_PADDING BIT(3) +#define RNP10_PADDING_DEBUG BIT(4) +#define RNP10_PTP_FEATURE BIT(5) +#define RNP10_SIMULATE_DOWN BIT(6) +#define RNP10_VXLAN_INNER_MATCH BIT(7) +#define RNP10_STAG_ENABLE BIT(8) +#define RNP10_REC_HDR_LEN_ERR BIT(9) +#define RNP10_SRIOV_VLAN_MODE BIT(10) +#define RNP10_REMAP_MODE BIT(11) +#define RNP10_LLDP_EN_STAT BIT(12) +#define RNP10_FORCE_CLOSE BIT(13) + "mac_loopback", + "switch_loopback", + "veb_enable", + "pcie_patch", + "padding_debug", + "ptp_performance_debug", + "simulate_link_down", + "vxlan_inner_match", + "stag_enable", + "mask_len_err", + "sriov_vlan_mode", + "remap_mode1", + "lldp_en", + "link_down_on_close", +}; + +#define RNP10_PRIV_FLAGS_STR_LEN ARRAY_SIZE(rnp10_priv_flags_strings) + +const struct rnp_stats rnp10_gstrings_net_stats[] = { + RNP_NETDEV_STAT(rx_packets), + RNP_NETDEV_STAT(tx_packets), + RNP_NETDEV_STAT(rx_bytes), + RNP_NETDEV_STAT(tx_bytes), + RNP_NETDEV_STAT(rx_errors), + RNP_NETDEV_STAT(tx_errors), + RNP_NETDEV_STAT(rx_dropped), + RNP_NETDEV_STAT(tx_dropped), + RNP_NETDEV_STAT(multicast), + RNP_NETDEV_STAT(collisions), + RNP_NETDEV_STAT(rx_over_errors), + RNP_NETDEV_STAT(rx_crc_errors), + RNP_NETDEV_STAT(rx_frame_errors), + RNP_NETDEV_STAT(rx_fifo_errors), + RNP_NETDEV_STAT(rx_missed_errors), + RNP_NETDEV_STAT(tx_aborted_errors), + RNP_NETDEV_STAT(tx_carrier_errors), + RNP_NETDEV_STAT(tx_fifo_errors), + RNP_NETDEV_STAT(tx_heartbeat_errors), +}; + +#define RNP10_GLOBAL_STATS_LEN ARRAY_SIZE(rnp10_gstrings_net_stats) + +static struct rnp_stats rnp10_hwstrings_stats[] = { + RNP_HW_STAT("dma_to_mac", hw_stats.dma_to_dma), + RNP_HW_STAT("dma_to_switch", hw_stats.dma_to_switch), + RNP_HW_STAT("eth_to_dma", hw_stats.mac_to_dma), + RNP_HW_STAT("vlan_add_cnt", hw_stats.vlan_add_cnt), + RNP_HW_STAT("vlan_strip_cnt", hw_stats.vlan_strip_cnt), + RNP_HW_STAT("invalid_dropped_packets", + hw_stats.invalid_dropped_packets), + RNP_HW_STAT("rx_capabity_drop", hw_stats.rx_capabity_lost), + RNP_HW_STAT("filter_dropped_packets", hw_stats.filter_dropped_packets), + RNP_HW_STAT("host_l2_match_drop", hw_stats.host_l2_match_drop), + RNP_HW_STAT("redir_input_match_drop", hw_stats.redir_input_match_drop), + RNP_HW_STAT("redir_etype_match_drop", hw_stats.redir_etype_match_drop), + RNP_HW_STAT("redir_tcp_syn_match_drop", + hw_stats.redir_tcp_syn_match_drop), + RNP_HW_STAT("redir_tuple5_match_drop", + hw_stats.redir_tuple5_match_drop), + RNP_HW_STAT("redir_tcam_match_drop", hw_stats.redir_tcam_match_drop), + RNP_HW_STAT("bmc_dropped_packets", hw_stats.bmc_dropped_packets), + RNP_HW_STAT("switch_dropped_packets", hw_stats.switch_dropped_packets), + RNP_HW_STAT("rx_csum_offload_errors", hw_csum_rx_error), + RNP_HW_STAT("rx_csum_offload_good", hw_csum_rx_good), + RNP_HW_STAT("rx_broadcast_count", hw_stats.mac_rx_broadcast), + RNP_HW_STAT("rx_multicast_count", hw_stats.mac_rx_multicast), + RNP_HW_STAT("mac_rx_pause_count", hw_stats.mac_rx_pause_count), + RNP_HW_STAT("mac_tx_pause_count", hw_stats.mac_tx_pause_count), +}; + +#define RNP10_HWSTRINGS_STATS_LEN ARRAY_SIZE(rnp10_hwstrings_stats) + +#define RNP10_STATS_LEN \ + (RNP10_GLOBAL_STATS_LEN + RNP10_HWSTRINGS_STATS_LEN + \ + RNP_QUEUE_STATS_LEN) + +static const char rnp10_gstrings_test[][ETH_GSTRING_LEN] = { + "Register test (offline)", "Eeprom test (offline)", + "Interrupt test (offline)", "Loopback test (offline)", + "Link test (on/offline)" +}; + +#define RNP10_TEST_LEN (sizeof(rnp10_gstrings_test) / ETH_GSTRING_LEN) + +static int rnp10_get_regs_len(struct net_device *netdev) +{ +#define RNP10_REGS_LEN 1 + return RNP10_REGS_LEN * sizeof(u32); +} + +#define ADVERTISED_MASK_10G \ + (SUPPORTED_10000baseT_Full | SUPPORTED_10000baseKX4_Full | \ + SUPPORTED_10000baseKR_Full) + +#define SUPPORTED_MASK_40G \ + (SUPPORTED_40000baseKR4_Full | SUPPORTED_40000baseCR4_Full | \ + SUPPORTED_40000baseSR4_Full | SUPPORTED_40000baseLR4_Full) + +#define ADVERTISED_MASK_40G \ + (SUPPORTED_40000baseKR4_Full | SUPPORTED_40000baseCR4_Full | \ + SUPPORTED_40000baseSR4_Full | SUPPORTED_40000baseLR4_Full) + +#define SUPPORTED_10000baseT 0 + +static int rnp_set_autoneg_adv_from_hw(struct rnp_hw *hw, + struct ethtool_link_ksettings *ks) +{ + u32 value_r0 = 0, value_r4 = 0, value_r9 = 0; + u32 value_r20, value_r412; + + /* Read autoneg state from phy */ + if (hw->phy_type == PHY_TYPE_SGMII) { + rnp_mbx_phy_read(hw, 0x0, &value_r0); + /* Not support AN, return directly */ + if (!(value_r0 & BIT(12))) + return 0; + + rnp_mbx_phy_read(hw, 0x4, &value_r4); + rnp_mbx_phy_read(hw, 0x9, &value_r9); + if (value_r4 & 0x100) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 100baseT_Full); + if (value_r4 & 0x80) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 100baseT_Half); + if (value_r4 & 0x40) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10baseT_Full); + if (value_r4 & 0x20) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10baseT_Half); + if (value_r9 & 0x200) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 1000baseT_Full); + if (value_r9 & 0x100) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 1000baseT_Half); + } + + if (hw->phy_type == PHY_TYPE_10G_TP) { + rnp_mbx_phy_read(hw, (PHY_C45 | PHY_MMD(7) | 0x0), &value_r0); + + if (!(value_r0 & BIT(12))) + return 0; + + rnp_mbx_phy_read(hw, (PHY_C45 | PHY_MMD(7) | 0x20), &value_r20); + + if (value_r20 & BIT(12)) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseT_Full); + + rnp_mbx_phy_read(hw, (PHY_C45 | PHY_MMD_VEND2 | 0xa412), + &value_r412); + + if (value_r412 & BIT(8)) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 1000baseT_Full); + if (value_r412 & BIT(9)) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 1000baseT_Full); + } + + return 0; +} + +/** + * rnp_phy_type_to_ethtool - convert the phy_types to ethtool link modes + * @adapter: adapter struct with hw->phy_type + * @ks: ethtool link ksettings struct to fill out + * + **/ +static void rnp_phy_type_to_ethtool(struct rnp_adapter *adapter, + struct ethtool_link_ksettings *ks) +{ + struct rnp_hw *hw = &adapter->hw; + u32 supported_link = hw->supported_link; + u8 phy_type = hw->phy_type; + + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + + if (phy_type == PHY_TYPE_NONE) { + if (supported_link & RNP_LINK_SPEED_10GB_FULL) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseT_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseT_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseSR_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseSR_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseLR_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseLR_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseER_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseER_Full); + } + + if (((supported_link & RNP_LINK_SPEED_10GB_FULL) || + (supported_link & RNP_LINK_SPEED_1GB_FULL))) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseX_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 1000baseX_Full); + } + } + if (phy_type == PHY_TYPE_SGMII) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseT_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 100baseT_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 10baseT_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 100baseT_Half); + ethtool_link_ksettings_add_link_mode(ks, supported, + 10baseT_Half); + + rnp_set_autoneg_adv_from_hw(hw, ks); + } + + if (phy_type == PHY_TYPE_10G_TP) { + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseT_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseT_Full); + rnp_set_autoneg_adv_from_hw(hw, ks); + } + + if (rnp_fw_is_old_ethtool(hw) && + (supported_link & RNP_LINK_SPEED_40GB_FULL)) { + supported_link |= RNP_SFP_MODE_40G_CR4 | RNP_SFP_MODE_40G_SR4 | + PHY_TYPE_40G_BASE_LR4; + } + + if (supported_link & RNP_SFP_MODE_40G_CR4) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 40000baseCR4_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 40000baseCR4_Full); + } + if (supported_link & RNP_SFP_MODE_40G_SR4) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 40000baseSR4_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 40000baseSR4_Full); + } + if (supported_link & RNP_SFP_MODE_40G_LR4) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 40000baseLR4_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 40000baseLR4_Full); + } + + /* add 25G support here */ + if (supported_link & RNP_SFP_25G_SR) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 25000baseSR_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 25000baseSR_Full); + } + if (supported_link & RNP_SFP_25G_KR) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 25000baseKR_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 25000baseKR_Full); + } + if (supported_link & RNP_SFP_25G_CR) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 25000baseCR_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 25000baseCR_Full); + } + if (hw->is_backplane) { + if (phy_type == PHY_TYPE_40G_BASE_KR4) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 40000baseKR4_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 40000baseKR4_Full); + } + if (phy_type == PHY_TYPE_10G_BASE_KR) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseKR_Full); + if (supported_link & RNP_LINK_SPEED_10GB_FULL) + ethtool_link_ksettings_add_link_mode( + ks, advertising, 10000baseKR_Full); + } + } + + if (supported_link & RNP_SFP_MODE_1G_LX || + supported_link & RNP_SFP_MODE_1G_SX) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseX_Full); + if (supported_link & RNP_LINK_SPEED_1GB_FULL) { + ethtool_link_ksettings_add_link_mode(ks, advertising, + 1000baseX_Full); + } + } + + if (phy_type == PHY_TYPE_1G_BASE_KX) { + if (hw->is_backplane) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseKX_Full); + if (supported_link & RNP_LINK_SPEED_1GB_FULL) + ethtool_link_ksettings_add_link_mode( + ks, advertising, 1000baseKX_Full); + } + + if ((supported_link & RNP_SFP_MODE_1G_T) || + (supported_link & RNP_LINK_SPEED_1GB_FULL)) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseT_Full); + if (supported_link & RNP_LINK_SPEED_1GB_FULL) + ethtool_link_ksettings_add_link_mode( + ks, advertising, 1000baseT_Full); + } + } + /* need to add new 10G PHY types */ + if (phy_type == PHY_TYPE_10G_BASE_SR) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseSR_Full); + if (supported_link & RNP_LINK_SPEED_10GB_FULL) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseSR_Full); + } + if (phy_type == PHY_TYPE_10G_BASE_ER) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseER_Full); + if (supported_link & RNP_LINK_SPEED_10GB_FULL) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseER_Full); + } + if (phy_type == PHY_TYPE_10G_BASE_LR) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseLR_Full); + if (supported_link & RNP_LINK_SPEED_10GB_FULL) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseLR_Full); + } + if (hw->force_speed_stat == FORCE_SPEED_STAT_10G) { + ethtool_link_ksettings_del_link_mode(ks, supported, + 1000baseT_Full); + ethtool_link_ksettings_del_link_mode(ks, advertising, + 1000baseT_Full); + + ethtool_link_ksettings_del_link_mode(ks, supported, + 1000baseX_Full); + ethtool_link_ksettings_del_link_mode(ks, advertising, + 1000baseX_Full); + + if (phy_type == PHY_TYPE_1G_BASE_KX) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseSR_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseSR_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseLR_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseLR_Full); + } + } +} +/** + * rnp_get_settings_link_up - Get Link settings for when link is up + * @hw: hw structure + * @ks: ethtool ksettings to fill in + * @netdev: network interface device structure + **/ +static void rnp_get_settings_link_up(struct rnp_hw *hw, + struct ethtool_link_ksettings *ks, + struct net_device *netdev) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct ethtool_link_ksettings cap_ksettings; + + /* Initialize supported and advertised settings based on phy settings */ + switch (hw->phy_type) { + case PHY_TYPE_40G_BASE_CR4: + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, supported, + 40000baseCR4_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 40000baseCR4_Full); + break; + + case PHY_TYPE_40G_BASE_SR4: + ethtool_link_ksettings_add_link_mode(ks, supported, + 40000baseSR4_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 40000baseSR4_Full); + break; + case PHY_TYPE_40G_BASE_LR4: + ethtool_link_ksettings_add_link_mode(ks, supported, + 40000baseLR4_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 40000baseLR4_Full); + break; + case PHY_TYPE_10G_BASE_SR: + case PHY_TYPE_10G_BASE_LR: + case PHY_TYPE_10G_BASE_ER: + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseSR_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseSR_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseLR_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseLR_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseER_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseER_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseX_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 1000baseX_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseT_Full); + if (hw->speed == SPEED_10000) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseT_Full); + break; + case PHY_TYPE_1G_BASE_KX: + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + if (!!hw->is_backplane) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseKX_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 1000baseKX_Full); + } + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseX_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 1000baseX_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseT_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 1000baseT_Full); + break; + + case PHY_TYPE_SGMII: + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseT_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 100baseT_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 10baseT_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseT_Half); + ethtool_link_ksettings_add_link_mode(ks, supported, + 100baseT_Half); + ethtool_link_ksettings_add_link_mode(ks, supported, + 10baseT_Half); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 1000baseT_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 100baseT_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10baseT_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 1000baseT_Half); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 100baseT_Half); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10baseT_Half); + break; + + case PHY_TYPE_40G_BASE_KR4: + case PHY_TYPE_10G_BASE_KR: + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, supported, + 40000baseKR4_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseKR_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseKX_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseKX4_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 25000baseKR_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 25000baseSR_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 25000baseCR_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 40000baseKR4_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseKR_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseKX4_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 1000baseKX_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 25000baseKR_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 25000baseSR_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 25000baseCR_Full); + break; + case PHY_TYPE_10G_TP: + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseT_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseT_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseT_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 1000baseT_Full); + break; + + default: + /* if we got here and link is up something bad is afoot + */ + netdev_info(netdev, + "WARNING: Link is up but PHY type 0x%x is not " + "recognized, or incorrect cable is in use\n", + hw->phy_type); + } + + /* Now that we've worked out everything that could be supported by the + * current PHY type, get what is supported by the NVM and intersect + * them to get what is truly supported + */ + memset(&cap_ksettings, 0, sizeof(struct ethtool_link_ksettings)); + rnp_phy_type_to_ethtool(adapter, &cap_ksettings); + ethtool_intersect_link_masks(ks, &cap_ksettings); + + /* Set speed and duplex */ + ks->base.speed = adapter->speed; + ks->base.duplex = hw->duplex; +} + +/** + * rnp_get_settings_link_down - Get the Link settings when link is down + * @hw: hw structure + * @ks: ethtool ksettings to fill in + * @netdev: network interface device structure + * + * Reports link settings that can be determined when link is down + **/ +static void rnp_get_settings_link_down(struct rnp_hw *hw, + struct ethtool_link_ksettings *ks, + struct net_device *netdev) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + + /* link is down and the driver needs to fall back on + * supported phy types to figure out what info to display + */ + rnp_phy_type_to_ethtool(adapter, ks); + + /* With no link speed and duplex are unknown */ + ks->base.speed = SPEED_UNKNOWN; + ks->base.duplex = DUPLEX_UNKNOWN; + + if ((hw->phy_type == PHY_TYPE_SGMII) || + (hw->phy_type == PHY_TYPE_10G_TP)) { + ks->base.eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID; + ks->base.eth_tp_mdix_ctrl = hw->tp_mdix_ctrl; + } +} + +/** + * rnp_set_autoneg_state_from_hw - Set the autoneg state from hardware + * @hw: hw structure + * @ks: ethtool ksettings to fill in + * + * Set the autoneg state from hardware, like PHY + **/ +static int rnp_set_autoneg_state_from_hw(struct rnp_hw *hw, + struct ethtool_link_ksettings *ks) +{ + int ret; + struct rnp_adapter *adapter = hw->back; + + ks->base.autoneg = (adapter->an ? AUTONEG_ENABLE : AUTONEG_DISABLE); + + /* Read autoneg state from phy */ + if (hw->phy_type == PHY_TYPE_SGMII) { + u32 value_r0 = 0; + ret = rnp_mbx_phy_read(hw, 0x0, &value_r0); + if (ret) + return -1; + + ks->base.autoneg = (value_r0 & BIT(12)) ? AUTONEG_ENABLE : + AUTONEG_DISABLE; + } + if (hw->phy_type == PHY_TYPE_10G_TP) { + u32 value_r0 = 0; + + rnp_mbx_phy_read(hw, PHY_826x_AN, &value_r0); + + ks->base.autoneg = (value_r0 & BIT(12)) ? AUTONEG_ENABLE : + AUTONEG_DISABLE; + if (value_r0) + adapter->an = 1; + } + + return 0; +} + +static int rnp_get_phy_mdix_from_hw(struct rnp_hw *hw) +{ + int ret; + int rmmd_reg = 0; + u32 value_r17 = 0; + + if (hw->phy_type == PHY_TYPE_SGMII) { + ret = rnp_mbx_phy_read(hw, 0x11, &value_r17); + if (ret) + return -1; + hw->phy.is_mdix = !!(value_r17 & 0x0040); + } + if (hw->phy_type == PHY_TYPE_10G_TP) { + rmmd_reg = (1 << 30) | (0x1f << 16) | (0xa430 & 0xffff); + ret = rnp_mbx_phy_read(hw, rmmd_reg, &value_r17); + if (ret) + return -1; + hw->phy.is_mdix = !!(value_r17 & 0x0200); + } + + return 0; +} + +__maybe_unused static bool fiber_unsupport(u32 supported_link, u8 phy_type) +{ + if ((phy_type == PHY_TYPE_10G_BASE_KR) || + (phy_type == PHY_TYPE_10G_BASE_SR) || + (phy_type == PHY_TYPE_10G_BASE_LR) || + (phy_type == PHY_TYPE_10G_BASE_ER)) { + if (!(supported_link & RNP_LINK_SPEED_10GB_FULL)) + return true; + } + + if ((phy_type == PHY_TYPE_40G_BASE_KR4) || + (phy_type == PHY_TYPE_40G_BASE_SR4) || + (phy_type == PHY_TYPE_40G_BASE_CR4) || + (phy_type == PHY_TYPE_40G_BASE_LR4)) { + if (!(supported_link & + (RNP_LINK_SPEED_40GB_FULL | RNP_LINK_SPEED_25GB_FULL))) + return true; + } + + if (phy_type == PHY_TYPE_1G_BASE_KX) { + if (!(supported_link & RNP_LINK_SPEED_1GB_FULL)) + return true; + } + + return false; +} + +static int rnp10_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *ks) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + bool link_up; + int err; + + ethtool_link_ksettings_zero_link_mode(ks, supported); + ethtool_link_ksettings_zero_link_mode(ks, advertising); + + /* update hw from firmware */ + err = rnp_mbx_get_lane_stat(hw); + if (err /*|| fiber_unsupport(hw->supported_link, hw->phy_type)*/) { + /* + when force 1G speed and plugin in 10G-AOC, should not return + -1 + */ + return -1; + } + + /* update hw->phy.media_type by hw->phy_type */ + switch (hw->phy_type) { + case PHY_TYPE_NONE: + hw->phy.media_type = rnp_media_type_unknown; + break; + case PHY_TYPE_1G_BASE_KX: + if (hw->is_backplane) { + hw->phy.media_type = rnp_media_type_backplane; + } else if (hw->is_sgmii) { + hw->phy.media_type = rnp_media_type_copper; + } else { + if ((hw->supported_link & RNP_LINK_SPEED_1GB_FULL) || + (hw->supported_link & RNP_SFP_MODE_1G_LX)) { + hw->phy.media_type = rnp_media_type_fiber; + } else { + hw->phy.media_type = rnp_media_type_unknown; + } + } + break; + case PHY_TYPE_SGMII: + case PHY_TYPE_10G_TP: + hw->phy.media_type = rnp_media_type_copper; + ks->base.phy_address = adapter->phy_addr; + break; + case PHY_TYPE_10G_BASE_KR: + case PHY_TYPE_25G_BASE_KR: + case PHY_TYPE_40G_BASE_KR4: + hw->phy.media_type = rnp_media_type_backplane; + break; + case PHY_TYPE_10G_BASE_SR: + case PHY_TYPE_40G_BASE_SR4: + case PHY_TYPE_40G_BASE_CR4: + case PHY_TYPE_40G_BASE_LR4: + case PHY_TYPE_10G_BASE_LR: + case PHY_TYPE_10G_BASE_ER: + hw->phy.media_type = rnp_media_type_fiber; + break; + default: + hw->phy.media_type = rnp_media_type_unknown; + break; + } + + if (hw->supported_link & RNP_SFP_CONNECTOR_DAC) { + hw->phy.media_type = rnp_media_type_da; + } + + if ((hw->supported_link & RNP_SFP_TO_SGMII) || + (hw->supported_link & RNP_SFP_MODE_1G_T)) { + hw->phy.media_type = rnp_media_type_copper; + } + + /* Check Whether there is media on port */ + if (hw->phy.media_type == rnp_media_type_fiber) { + /* If adapter->sfp.mod_abs is 0, there is no media on port. */ + if (!adapter->sfp.mod_abs) { + hw->phy.media_type = rnp_media_type_unknown; + hw->phy_type = PHY_TYPE_NONE; + } + } + + /* Now set the settings that don't rely on link being up/down */ + /* Set autoneg settings */ + rnp_set_autoneg_state_from_hw(hw, ks); + + link_up = hw->link; + if (link_up) + rnp_get_settings_link_up(hw, ks, netdev); + else + rnp_get_settings_link_down(hw, ks, netdev); + + /* Set media type settings */ + switch (hw->phy.media_type) { + case rnp_media_type_backplane: + ethtool_link_ksettings_add_link_mode(ks, supported, Backplane); + ethtool_link_ksettings_add_link_mode(ks, advertising, + Backplane); + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + ks->base.port = PORT_NONE; + break; + case rnp_media_type_copper: + ethtool_link_ksettings_add_link_mode(ks, supported, TP); + ethtool_link_ksettings_add_link_mode(ks, advertising, TP); + if (PHY_TYPE_SGMII == hw->phy_type) + ethtool_link_ksettings_add_link_mode(ks, supported, + Autoneg); + if (AUTONEG_ENABLE == ks->base.autoneg) + ethtool_link_ksettings_add_link_mode(ks, advertising, + Autoneg); + else + ethtool_link_ksettings_del_link_mode(ks, advertising, + Autoneg); + ks->base.port = PORT_TP; + break; + case rnp_media_type_da: + case rnp_media_type_cx4: + ethtool_link_ksettings_add_link_mode(ks, supported, FIBRE); + ethtool_link_ksettings_add_link_mode(ks, advertising, FIBRE); + ks->base.port = PORT_DA; + break; + case rnp_media_type_fiber: + ethtool_link_ksettings_add_link_mode(ks, supported, FIBRE); + ethtool_link_ksettings_add_link_mode(ks, advertising, FIBRE); + ks->base.port = PORT_FIBRE; + break; + case rnp_media_type_unknown: + default: + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + ks->base.port = PORT_OTHER; + break; + } + + if (hw->force_speed_stat != FORCE_SPEED_STAT_DISABLED) { + ethtool_link_ksettings_del_link_mode(ks, advertising, Autoneg); + } + + /* Set flow control settings */ + ethtool_link_ksettings_add_link_mode(ks, supported, Pause); + ethtool_link_ksettings_add_link_mode(ks, supported, Asym_Pause); + + /* should get pause from hw if 10G-TP */ + switch (hw->fc.requested_mode) { + case rnp_fc_full: + ethtool_link_ksettings_add_link_mode(ks, advertising, Pause); + break; + case rnp_fc_tx_pause: + ethtool_link_ksettings_add_link_mode(ks, advertising, + Asym_Pause); + break; + case rnp_fc_rx_pause: + ethtool_link_ksettings_add_link_mode(ks, advertising, Pause); + ethtool_link_ksettings_add_link_mode(ks, advertising, + Asym_Pause); + break; + default: + ethtool_link_ksettings_del_link_mode(ks, advertising, Pause); + ethtool_link_ksettings_del_link_mode(ks, advertising, + Asym_Pause); + break; + } + /* MDI-X => 2; MDI =>1; Invalid =>0 */ + if ((hw->phy_type == PHY_TYPE_SGMII) || + (hw->phy_type == PHY_TYPE_10G_TP)) { + if (rnp_get_phy_mdix_from_hw(hw)) { + ks->base.eth_tp_mdix = ETH_TP_MDI_INVALID; + } else { + ks->base.eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X : + ETH_TP_MDI; + } + } else { + ks->base.eth_tp_mdix = hw->tp_mdx; + } + + if (hw->phy.mdix == AUTO_ALL_MODES) + ks->base.eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO; + else + ks->base.eth_tp_mdix_ctrl = hw->phy.mdix; + + rnp_logd(LOG_ETHTOOL, + "%s %s set link: speed=%d port=%d duplex=%d autoneg=%d " + "phy_address=%d, media_type=%d hw->phy_type:%d\n", + __func__, netdev->name, ks->base.speed, ks->base.port, + ks->base.duplex, ks->base.autoneg, ks->base.phy_address, + hw->phy.media_type, hw->phy_type); + return 0; +} + +static int rnp10_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *ks) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + struct ethtool_link_ksettings safe_ks; + struct ethtool_link_ksettings copy_ks; + bool autoneg_changed = false, duplex_changed = false; + int timeout = 50; + int err = 0; + u8 autoneg; + u32 advertising_link_speed, speed = 0; + + /* copy the ksettings to copy_ks to avoid modifying the origin */ + memcpy(©_ks, ks, sizeof(struct ethtool_link_ksettings)); + + /* save autoneg out of ksettings */ + + autoneg = copy_ks.base.autoneg; + rnp_logd(LOG_ETHTOOL, + "%s %s set link: speed=%d port=%d duplex=%d autoneg=%d " + "phy_address=%d\n", + __func__, netdev->name, copy_ks.base.speed, copy_ks.base.port, + copy_ks.base.duplex, copy_ks.base.autoneg, + copy_ks.base.phy_address); + + /* get our own copy of the bits to check against */ + memset(&safe_ks, 0, sizeof(struct ethtool_link_ksettings)); + safe_ks.base.cmd = copy_ks.base.cmd; + safe_ks.base.link_mode_masks_nwords = + copy_ks.base.link_mode_masks_nwords; + + if (rnp10_get_link_ksettings(netdev, &safe_ks)) { + /* return err */ + return 0; + } + /* Get link modes supported by hardware and check against modes + * requested by user. Return an error if unsupported mode was set. + */ + /* if autoneg is off, this is not error ? */ + if (!bitmap_subset(copy_ks.link_modes.advertising, + safe_ks.link_modes.supported, + __ETHTOOL_LINK_MODE_MASK_NBITS)) { + return -EINVAL; + } + /* set autoneg back to what it currently is */ + copy_ks.base.autoneg = safe_ks.base.autoneg; + + memset(&advertising_link_speed, 0, sizeof(u32)); + + /* Check autoneg */ + if (autoneg == AUTONEG_ENABLE) { + /* If autoneg was not already enabled */ + if (!(adapter->an)) { + /* If autoneg is not supported, return error */ + if (!ethtool_link_ksettings_test_link_mode( + &safe_ks, supported, Autoneg)) { + netdev_info( + netdev, + "Autoneg not supported on this phy\n"); + err = -EINVAL; + goto done; + } + /* Autoneg is allowed to change */ + autoneg_changed = true; + } + + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 10baseT_Full)) + advertising_link_speed |= RNP_LINK_SPEED_10_FULL; + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 100baseT_Full)) + advertising_link_speed |= RNP_LINK_SPEED_100_FULL; + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 1000baseT_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 1000baseX_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 1000baseKX_Full)) + advertising_link_speed |= RNP_LINK_SPEED_1GB_FULL; + + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 10baseT_Half)) + advertising_link_speed |= RNP_LINK_SPEED_10_HALF; + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 100baseT_Half)) + advertising_link_speed |= RNP_LINK_SPEED_100_HALF; + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 1000baseT_Half)) + advertising_link_speed |= RNP_LINK_SPEED_1GB_HALF; + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 10000baseT_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 10000baseKX4_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 10000baseKR_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 10000baseCR_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 10000baseSR_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 10000baseLR_Full)) + advertising_link_speed |= RNP_LINK_SPEED_10GB_FULL; + + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 40000baseKR4_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 40000baseCR4_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 40000baseSR4_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 40000baseLR4_Full)) + advertising_link_speed |= RNP_LINK_SPEED_40GB_FULL; + + if (advertising_link_speed) { + hw->phy.autoneg_advertised = advertising_link_speed; + } else { + if ((hw->force_speed_stat == + FORCE_SPEED_STAT_DISABLED)) { + netdev_info(netdev, + "advertising_link_speed is 0\n"); + err = -EINVAL; + goto done; + } + } + + hw->advertised_link = advertising_link_speed; + if (hw->is_sgmii && hw->autoneg == false) + autoneg_changed = true; + hw->autoneg = true; + } else { + /* If autoneg is currently enabled */ + if (adapter->an) { + /* If autoneg is supported 10GBASE_T is the only PHY + * that can disable it, so otherwise return error + */ + if (ethtool_link_ksettings_test_link_mode( + &safe_ks, supported, Autoneg) && + hw->phy.media_type != rnp_media_type_copper) { + netdev_info( + netdev, + "Autoneg cannot be disabled on this phy\n"); + err = -EINVAL; + goto done; + } + /* Autoneg is allowed to change */ + autoneg_changed = true; + } + /* if 10G -TP, not support close an */ + if (hw->phy_type == PHY_TYPE_10G_TP) { + netdev_info(netdev, + "Autoneg cannot be disabled on this phy\n"); + err = -EINVAL; + goto done; + } + + /* Only allow one speed at a time when autoneg is AUTONEG_DISABLE. */ + switch (ks->base.speed) { + case SPEED_10: + speed = RNP_LINK_SPEED_10_FULL; + break; + case SPEED_100: + speed = RNP_LINK_SPEED_100_FULL; + break; + case SPEED_1000: + speed = RNP_LINK_SPEED_1GB_FULL; + break; + case SPEED_10000: + speed = RNP_LINK_SPEED_10GB_FULL; + break; + default: + netdev_info(netdev, "unsupported speed\n"); + err = -EINVAL; + goto done; + } + + hw->autoneg = false; + } + + hw->phy.autoneg_advertised = RNP_LINK_SPEED_UNKNOWN; + /* If speed didn't get set, set it to what it currently is. + * This is needed because if advertise is 0 (as it is when autoneg + * is disabled) then speed won't get set. + */ + + if (hw->is_sgmii) { + hw->duplex = ks->base.duplex; + duplex_changed = true; + } + + if (hw->phy_type == PHY_TYPE_10G_TP) { + hw->duplex = ks->base.duplex; + duplex_changed = true; + } + /* this sets the link speed and restarts auto-neg */ + while (test_and_set_bit(__RNP_IN_SFP_INIT, &adapter->state)) { + timeout--; + if (!timeout) + return -EBUSY; + usleep_range(1000, 2000); + } + /* MDI-X => 2; MDI => 1; Auto => 3 */ + if (copy_ks.base.eth_tp_mdix_ctrl) { + /* fix up the value for auto (3 => 0) as zero is mapped + * internally to auto + */ + if (copy_ks.base.eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO) + hw->phy.mdix = AUTO_ALL_MODES; + else + hw->phy.mdix = copy_ks.base.eth_tp_mdix_ctrl; + } + + hw->mac.autotry_restart = true; + /* set speed */ + err = hw->ops.setup_link(hw, advertising_link_speed, hw->autoneg, speed, + hw->duplex); + if (err) + e_info(probe, "setup link failed with code %d\n", err); + + clear_bit(__RNP_IN_SFP_INIT, &adapter->state); +done: + return err; +} + +static void rnp10_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + + strncpy(drvinfo->driver, rnp_driver_name, sizeof(drvinfo->driver)); + snprintf(drvinfo->version, sizeof(drvinfo->version), "%s", + rnp_driver_version); + + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), + "%d.%d.%d.%d", ((unsigned char *)&(hw->fw_version))[3], + ((unsigned char *)&(hw->fw_version))[2], + ((unsigned char *)&(hw->fw_version))[1], + ((unsigned char *)&(hw->fw_version))[0]); + + strncpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info)); + drvinfo->n_stats = RNP10_STATS_LEN; + drvinfo->testinfo_len = RNP10_TEST_LEN; + drvinfo->regdump_len = rnp10_get_regs_len(netdev); + drvinfo->n_priv_flags = RNP10_PRIV_FLAGS_STR_LEN; +} + +static void rnp10_get_regs(struct net_device *netdev, struct ethtool_regs *regs, + void *p) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + u32 *regs_buff = p; + int i; + + memset(p, 0, RNP10_REGS_LEN * sizeof(u32)); + + for (i = 0; i < RNP10_REGS_LEN; i++) + regs_buff[i] = rd32(hw, i * 4); +} + +static int rnp_nway_reset(struct net_device *netdev) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + netdev_info(netdev, "NIC Link is Down\n"); + rnp_down(adapter); + msleep(10); + rnp_up(adapter); + return 0; +} + +/** + * rnpm_device_supports_autoneg_fc - Check if phy supports autoneg flow + * control + * @hw: pointer to hardware structure + * + * There are several phys that do not support autoneg flow control. This + * function check the device id to see if the associated phy supports + * autoneg flow control. + **/ +static bool rnp_device_supports_autoneg_fc(struct rnp_hw *hw) +{ + bool supported = false; + + switch (hw->phy.media_type) { + case rnp_media_type_fiber: + break; + case rnp_media_type_backplane: + break; + case rnp_media_type_copper: + /* only some copper devices support flow control autoneg */ + supported = true; + break; + default: + break; + } + + return supported; +} + +static void rnp10_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + + /* we don't support autoneg */ + if (rnp_device_supports_autoneg_fc(hw) && !hw->fc.disable_fc_autoneg) + pause->autoneg = 1; + else + pause->autoneg = 0; + if (hw->fc.current_mode == rnp_fc_rx_pause) { + pause->rx_pause = 1; + } else if (hw->fc.current_mode == rnp_fc_tx_pause) { + pause->tx_pause = 1; + } else if (hw->fc.current_mode == rnp_fc_full) { + pause->rx_pause = 1; + pause->tx_pause = 1; + } +} + +static int rnp10_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + struct rnp_fc_info fc = hw->fc; + + /* we not support change in dcb mode */ + if (adapter->flags & RNP_FLAG_DCB_ENABLED) + return -EINVAL; + + /* we not support autoneg mode */ + if ((pause->autoneg == AUTONEG_ENABLE) && + !rnp_device_supports_autoneg_fc(hw)) + return -EINVAL; + + fc.disable_fc_autoneg = (pause->autoneg != AUTONEG_ENABLE); + fc.requested_mode &= (~(PAUSE_TX | PAUSE_RX)); + if (pause->autoneg) { + fc.requested_mode |= PAUSE_AUTO; + } else { + if (pause->tx_pause) + fc.requested_mode |= PAUSE_TX; + if (pause->rx_pause) + fc.requested_mode |= PAUSE_RX; + } + + if (hw->phy_type == PHY_TYPE_SGMII) { + u16 pause_bits = 0; + u32 value; + u32 value_r0; + + if (hw->fc.requested_mode == PAUSE_AUTO) { + pause_bits |= ASYM_PAUSE | SYM_PAUSE; + } else { + if ((hw->fc.requested_mode & PAUSE_TX) && + (!(hw->fc.requested_mode & PAUSE_RX))) { + pause_bits |= ASYM_PAUSE; + + } else if ((!(hw->fc.requested_mode & PAUSE_TX)) && + (!(hw->fc.requested_mode & PAUSE_RX))) { + } else + pause_bits |= ASYM_PAUSE | SYM_PAUSE; + } + rnp_mbx_phy_read(hw, 4, &value); + value &= ~0xC00; + value |= pause_bits; + rnp_mbx_phy_write(hw, 4, value); + + if (hw->autoneg) { + rnp_mbx_phy_read(hw, 0, &value_r0); + value_r0 |= BIT(9); + rnp_mbx_phy_write(hw, 0, value_r0); + } + } + + /* if the thing changed then we'll update and use new autoneg */ + if (memcmp(&fc, &hw->fc, sizeof(struct rnp_fc_info))) { + /* to tell all vf new pause status */ + hw->fc = fc; + rnp_msg_post_status(adapter, PF_PAUSE_STATUS); + if (netif_running(netdev)) + rnp_reinit_locked(adapter); + else + rnp_reset(adapter); + } + + return 0; +} + +static void rnp10_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + char *p = (char *)data; + int i; + struct rnp_ring *ring; + u32 dma_ch; + + switch (stringset) { + /* maybe we don't support test? */ + case ETH_SS_TEST: + for (i = 0; i < RNP10_TEST_LEN; i++) { + memcpy(data, rnp10_gstrings_test[i], ETH_GSTRING_LEN); + data += ETH_GSTRING_LEN; + } + break; + case ETH_SS_STATS: + for (i = 0; i < RNP10_GLOBAL_STATS_LEN; i++) { + memcpy(p, rnp10_gstrings_net_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < RNP10_HWSTRINGS_STATS_LEN; i++) { + memcpy(p, rnp10_hwstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < RNP_NUM_TX_QUEUES; i++) { + /* ==== tx ======== */ + ring = adapter->tx_ring[i]; + dma_ch = ring->rnp_queue_idx; + sprintf(p, "---\n queue%u_tx_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_tx_bytes", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_tx_restart", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_tx_busy", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_tx_done_old", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_tx_clean_desc", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_tx_poll_count", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_tx_irq_more", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_tx_hw_head", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_tx_hw_tail", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_tx_sw_next_to_clean", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_tx_sw_next_to_use", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_send_bytes", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_send_bytes_to_hw", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_todo_update", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_send_done_bytes", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_added_vlan_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_tx_next_to_clean", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_tx_irq_miss", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_tx_equal_count", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_tx_clean_times", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_tx_clean_count", i); + p += ETH_GSTRING_LEN; + + /* ==== rx ======== */ + ring = adapter->rx_ring[i]; + dma_ch = ring->rnp_queue_idx; + sprintf(p, "queue%u_rx_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_rx_bytes", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_driver_drop_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_rx_rsc", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_rx_rsc_flush", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_rx_non_eop_descs", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_rx_alloc_page_failed", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_rx_alloc_buff_failed", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_rx_alloc_page", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_rx_csum_offload_errs", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_rx_csum_offload_good", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_rx_poll_again_count", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_rx_rm_vlan_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_rx_hw_head", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_rx_hw_tail", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_rx_sw_next_to_use", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_rx_sw_next_to_clean", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_rx_next_to_clean", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_rx_irq_miss", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_rx_equal_count", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_rx_clean_times", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_rx_clean_count", i); + p += ETH_GSTRING_LEN; + } + + break; + case ETH_SS_PRIV_FLAGS: + memcpy(data, rnp10_priv_flags_strings, + RNP10_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN); + break; + } +} + +static int rnp10_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + /* now we don't support test */ + case ETH_SS_TEST: + return RNP10_TEST_LEN; + case ETH_SS_STATS: + return RNP10_STATS_LEN; + case ETH_SS_PRIV_FLAGS: + return RNP10_PRIV_FLAGS_STR_LEN; + default: + return -EOPNOTSUPP; + } +} + +static u32 rnp10_get_priv_flags(struct net_device *netdev) +{ + struct rnp_adapter *adapter = (struct rnp_adapter *)netdev_priv(netdev); + u32 priv_flags = 0; + + if (adapter->priv_flags & RNP_PRIV_FLAG_MAC_LOOPBACK) + priv_flags |= RNP10_MAC_LOOPBACK; + if (adapter->priv_flags & RNP_PRIV_FLAG_SWITCH_LOOPBACK) + priv_flags |= RNP10_SWITCH_LOOPBACK; + if (adapter->priv_flags & RNP_PRIV_FLAG_VEB_ENABLE) + priv_flags |= RNP10_VEB_ENABLE; + if (adapter->priv_flags & RNP_PRIV_FLAG_FT_PADDING) + priv_flags |= RNP10_FT_PADDING; + if (adapter->priv_flags & RNP_PRIV_FLAG_PADDING_DEBUG) + priv_flags |= RNP10_PADDING_DEBUG; + if (adapter->priv_flags & RNP_PRIV_FLAG_PTP_DEBUG) + priv_flags |= RNP10_PTP_FEATURE; + if (adapter->priv_flags & RNP_PRIV_FLAG_SIMUATE_DOWN) + priv_flags |= RNP10_SIMULATE_DOWN; + if (adapter->priv_flags & RNP_PRIV_FLAG_VXLAN_INNER_MATCH) + priv_flags |= RNP10_VXLAN_INNER_MATCH; + if (adapter->flags2 & RNP_FLAG2_VLAN_STAGS_ENABLED) + priv_flags |= RNP10_STAG_ENABLE; + if (adapter->priv_flags & RNP_PRIV_FLAG_REC_HDR_LEN_ERR) + priv_flags |= RNP10_REC_HDR_LEN_ERR; + if (adapter->priv_flags & RNP_PRIV_FLAG_SRIOV_VLAN_MODE) + priv_flags |= RNP10_SRIOV_VLAN_MODE; + if (adapter->priv_flags & RNP_PRIV_FLAG_REMAP_MODE) + priv_flags |= RNP10_REMAP_MODE; + if (adapter->priv_flags & RNP_PRIV_FLAG_LLDP_EN_STAT) + priv_flags |= RNP10_LLDP_EN_STAT; + if (adapter->priv_flags & RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE) + priv_flags |= RNP10_FORCE_CLOSE; + + return priv_flags; +} + +static int rnp10_set_priv_flags(struct net_device *netdev, u32 priv_flags) +{ + struct rnp_adapter *adapter = (struct rnp_adapter *)netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + struct rnp_dma_info *dma = &hw->dma; + struct rnp_eth_info *eth = &hw->eth; + u32 data_old; + u32 data_new; + + data_old = dma_rd32(dma, RNP_DMA_CONFIG); + data_new = data_old; + + if (priv_flags & RNP10_MAC_LOOPBACK) { + SET_BIT(n10_mac_loopback, data_new); + adapter->priv_flags |= RNP_PRIV_FLAG_MAC_LOOPBACK; + } else if (adapter->priv_flags & RNP_PRIV_FLAG_MAC_LOOPBACK) { + adapter->priv_flags &= (~RNP_PRIV_FLAG_MAC_LOOPBACK); + CLR_BIT(n10_mac_loopback, data_new); + } + + if (priv_flags & RNP10_LLDP_EN_STAT) { + if (rnp_mbx_lldp_port_enable(hw, true) == 0) { + adapter->priv_flags |= RNP_PRIV_FLAG_LLDP_EN_STAT; + } else { + rnp_err("%s: set lldp enable faild!\n", + adapter->netdev->name); + adapter->priv_flags &= (~RNP_PRIV_FLAG_LLDP_EN_STAT); + } + } else if (adapter->priv_flags & RNP_PRIV_FLAG_LLDP_EN_STAT) { + adapter->priv_flags &= (~RNP_PRIV_FLAG_LLDP_EN_STAT); + rnp_mbx_lldp_port_enable(hw, false); + } + + if (priv_flags & RNP10_SWITCH_LOOPBACK) { + SET_BIT(n10_switch_loopback, data_new); + adapter->priv_flags |= RNP_PRIV_FLAG_SWITCH_LOOPBACK; + } else if (adapter->priv_flags & RNP_PRIV_FLAG_SWITCH_LOOPBACK) { + adapter->priv_flags &= (~RNP_PRIV_FLAG_SWITCH_LOOPBACK); + CLR_BIT(n10_switch_loopback, data_new); + } + + if (priv_flags & RNP10_VEB_ENABLE) { + SET_BIT(n10_veb_enable, data_new); + adapter->priv_flags |= RNP_PRIV_FLAG_VEB_ENABLE; + } else if (adapter->priv_flags & RNP_PRIV_FLAG_VEB_ENABLE) { + adapter->priv_flags &= (~RNP_PRIV_FLAG_VEB_ENABLE); + CLR_BIT(n10_veb_enable, data_new); + } + + if (priv_flags & RNP10_FT_PADDING) { + SET_BIT(n10_padding_enable, data_new); + adapter->priv_flags |= RNP_PRIV_FLAG_FT_PADDING; + } else if (adapter->priv_flags & RNP_PRIV_FLAG_FT_PADDING) { + adapter->priv_flags &= (~RNP_PRIV_FLAG_FT_PADDING); + CLR_BIT(n10_padding_enable, data_new); + } + + if (priv_flags & RNP10_PADDING_DEBUG) + adapter->priv_flags |= RNP_PRIV_FLAG_PADDING_DEBUG; + else if (adapter->priv_flags & RNP_PRIV_FLAG_PADDING_DEBUG) + adapter->priv_flags &= (~RNP_PRIV_FLAG_PADDING_DEBUG); + + if (priv_flags & RNP10_PTP_FEATURE) { + adapter->priv_flags |= RNP_PRIV_FLAG_PTP_DEBUG; + adapter->flags2 |= ~RNP_FLAG2_PTP_ENABLED; + } else if (adapter->priv_flags & RNP_PRIV_FLAG_PTP_DEBUG) { + adapter->priv_flags &= (~RNP_PRIV_FLAG_PTP_DEBUG); + adapter->flags2 &= (~RNP_FLAG2_PTP_ENABLED); + } + + if (priv_flags & RNP10_SIMULATE_DOWN) { + adapter->priv_flags |= RNP_PRIV_FLAG_SIMUATE_DOWN; + /* set check link again */ + adapter->flags |= RNP_FLAG_NEED_LINK_UPDATE; + } else if (adapter->priv_flags & RNP_PRIV_FLAG_SIMUATE_DOWN) { + adapter->priv_flags &= (~RNP_PRIV_FLAG_SIMUATE_DOWN); + /* set check link again */ + adapter->flags |= RNP_FLAG_NEED_LINK_UPDATE; + } + + if (priv_flags & RNP10_VXLAN_INNER_MATCH) { + adapter->priv_flags |= RNP_PRIV_FLAG_VXLAN_INNER_MATCH; + hw->ops.set_vxlan_mode(hw, true); + } else if (adapter->priv_flags & RNP_PRIV_FLAG_VXLAN_INNER_MATCH) { + adapter->priv_flags &= (~RNP_PRIV_FLAG_VXLAN_INNER_MATCH); + hw->ops.set_vxlan_mode(hw, false); + } + + if (priv_flags & RNP10_STAG_ENABLE) + adapter->flags2 |= RNP_FLAG2_VLAN_STAGS_ENABLED; + else + adapter->flags2 &= (~RNP_FLAG2_VLAN_STAGS_ENABLED); + + if (priv_flags & RNP10_REC_HDR_LEN_ERR) { + adapter->priv_flags |= RNP_PRIV_FLAG_REC_HDR_LEN_ERR; + eth_wr32(eth, RNP10_ETH_ERR_MASK_VECTOR, + INNER_L4_BIT | PKT_LEN_ERR | HDR_LEN_ERR); + + } else if (adapter->priv_flags & RNP_PRIV_FLAG_REC_HDR_LEN_ERR) { + adapter->priv_flags &= (~RNP_PRIV_FLAG_REC_HDR_LEN_ERR); + eth_wr32(eth, RNP10_ETH_ERR_MASK_VECTOR, INNER_L4_BIT); + } + + if (priv_flags & RNP10_REMAP_MODE) + adapter->priv_flags |= RNP_PRIV_FLAG_REMAP_MODE; + else + adapter->priv_flags &= (~RNP_PRIV_FLAG_REMAP_MODE); + + if (priv_flags & RNP10_SRIOV_VLAN_MODE) { + int i; + + adapter->priv_flags |= RNP_PRIV_FLAG_SRIOV_VLAN_MODE; + if (!(adapter->flags & RNP_FLAG_SRIOV_INIT_DONE)) + goto skip_setup_vf_vlan; + /* should setup vlvf table */ + for (i = 0; i < adapter->num_vfs; i++) { + if (hw->ops.set_vf_vlan_mode) { + if (adapter->vfinfo[i].vf_vlan) + hw->ops.set_vf_vlan_mode( + hw, adapter->vfinfo[i].vf_vlan, + i, true); + + if (adapter->vfinfo[i].pf_vlan) + hw->ops.set_vf_vlan_mode( + hw, adapter->vfinfo[i].pf_vlan, + i, true); + } + } + + } else if (adapter->priv_flags & RNP_PRIV_FLAG_SRIOV_VLAN_MODE) { + int i; + adapter->priv_flags &= (~RNP_PRIV_FLAG_SRIOV_VLAN_MODE); + /* should clean vlvf table */ + for (i = 0; i < hw->max_vfs; i++) { + if (hw->ops.set_vf_vlan_mode) + hw->ops.set_vf_vlan_mode(hw, 0, i, false); + } + } + + if (hw->force_link_supported) { + if (priv_flags & RNP10_FORCE_CLOSE) { + if (!(adapter->priv_flags & + RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE)) { + adapter->priv_flags |= + RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE; + if (hw->ops.driver_status) { + hw->ops.driver_status( + hw, true, + rnp_driver_force_control_mac); + } + } + } else { + if (adapter->priv_flags & + RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE) { + adapter->priv_flags &= + (~RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE); + if (hw->ops.driver_status) { + hw->ops.driver_status( + hw, false, + rnp_driver_force_control_mac); + } + } + } + } else { + if (priv_flags & RNP10_FORCE_CLOSE) + rnp_err("%s: firmware not support set `link_down_on_close` private flag\n", + adapter->netdev->name); + } + +skip_setup_vf_vlan: + + dbg("data new is %x\n", data_new); + if (data_old != data_new) + dma_wr32(dma, RNP_DMA_CONFIG, data_new); + /* if ft_padding changed */ + if (CHK_BIT(n10_padding_enable, data_old) != + CHK_BIT(n10_padding_enable, data_new)) { + rnp_msg_post_status(adapter, PF_FT_PADDING_STATUS); + } + + return 0; +} + +static void rnp10_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct net_device_stats *net_stats = &netdev->stats; + struct rnp_ring *ring; + int i, j; + char *p = NULL; + + rnp_update_stats(adapter); + + for (i = 0; i < RNP10_GLOBAL_STATS_LEN; i++) { + p = (char *)net_stats + rnp10_gstrings_net_stats[i].stat_offset; + data[i] = (rnp10_gstrings_net_stats[i].sizeof_stat == + sizeof(u64)) ? + *(u64 *)p : + *(u32 *)p; + } + for (j = 0; j < RNP10_HWSTRINGS_STATS_LEN; j++, i++) { + p = (char *)adapter + rnp10_hwstrings_stats[j].stat_offset; + data[i] = + (rnp10_hwstrings_stats[j].sizeof_stat == sizeof(u64)) ? + *(u64 *)p : + *(u32 *)p; + } + + BUG_ON(RNP_NUM_TX_QUEUES != RNP_NUM_RX_QUEUES); + + for (j = 0; j < RNP_NUM_TX_QUEUES; j++) { + int idx; + /* tx-ring */ + ring = adapter->tx_ring[j]; + if (!ring) { + /* tx */ + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + /* rx */ + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + continue; + } + idx = ring->rnp_queue_idx; + + data[i++] = ring->stats.packets; + data[i++] = ring->stats.bytes; + data[i++] = ring->tx_stats.restart_queue; + data[i++] = ring->tx_stats.tx_busy; + data[i++] = ring->tx_stats.tx_done_old; + data[i++] = ring->tx_stats.clean_desc; + data[i++] = ring->tx_stats.poll_count; + data[i++] = ring->tx_stats.irq_more_count; + + /* rnp_tx_queue_ring_stat */ + data[i++] = ring_rd32(ring, RNP_DMA_REG_TX_DESC_BUF_HEAD); + data[i++] = ring_rd32(ring, RNP_DMA_REG_TX_DESC_BUF_TAIL); + data[i++] = ring->next_to_clean; + data[i++] = ring->next_to_use; + data[i++] = ring->tx_stats.send_bytes; + data[i++] = ring->tx_stats.send_bytes_to_hw; + data[i++] = ring->tx_stats.todo_update; + data[i++] = ring->tx_stats.send_done_bytes; + data[i++] = ring->tx_stats.vlan_add; + if (ring->tx_stats.tx_next_to_clean == -1) + data[i++] = ring->count; + else + data[i++] = ring->tx_stats.tx_next_to_clean; + data[i++] = ring->tx_stats.tx_irq_miss; + data[i++] = ring->tx_stats.tx_equal_count; + data[i++] = ring->tx_stats.tx_clean_times; + data[i++] = ring->tx_stats.tx_clean_count; + + /* rx-ring */ + ring = adapter->rx_ring[j]; + if (!ring) { + /* rx */ + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + continue; + } + idx = ring->rnp_queue_idx; + data[i++] = ring->stats.packets; + data[i++] = ring->stats.bytes; + + data[i++] = ring->rx_stats.driver_drop_packets; + data[i++] = ring->rx_stats.rsc_count; + data[i++] = ring->rx_stats.rsc_flush; + data[i++] = ring->rx_stats.non_eop_descs; + data[i++] = ring->rx_stats.alloc_rx_page_failed; + data[i++] = ring->rx_stats.alloc_rx_buff_failed; + data[i++] = ring->rx_stats.alloc_rx_page; + data[i++] = ring->rx_stats.csum_err; + data[i++] = ring->rx_stats.csum_good; + data[i++] = ring->rx_stats.poll_again_count; + data[i++] = ring->rx_stats.vlan_remove; + + /* rnp_rx_queue_ring_stat */ + data[i++] = ring_rd32(ring, RNP_DMA_REG_RX_DESC_BUF_HEAD); + data[i++] = ring_rd32(ring, RNP_DMA_REG_RX_DESC_BUF_TAIL); + data[i++] = ring->next_to_use; + data[i++] = ring->next_to_clean; + if (ring->rx_stats.rx_next_to_clean == -1) + data[i++] = ring->count; + else + data[i++] = ring->rx_stats.rx_next_to_clean; + data[i++] = ring->rx_stats.rx_irq_miss; + data[i++] = ring->rx_stats.rx_equal_count; + data[i++] = ring->rx_stats.rx_clean_times; + data[i++] = ring->rx_stats.rx_clean_count; + } +} + +/* n10 ethtool_ops ops here */ +static const struct ethtool_ops rnp10_ethtool_ops = { + + .get_link_ksettings = rnp10_get_link_ksettings, + .set_link_ksettings = rnp10_set_link_ksettings, + .get_drvinfo = rnp10_get_drvinfo, + .get_regs_len = rnp10_get_regs_len, + .get_regs = rnp10_get_regs, + .get_wol = rnp_get_wol, + .set_wol = rnp_set_wol, + .nway_reset = rnp_nway_reset, + .get_link = ethtool_op_get_link, + .get_ringparam = rnp_get_ringparam, + .set_ringparam = rnp_set_ringparam, + .get_pauseparam = rnp10_get_pauseparam, + .set_pauseparam = rnp10_set_pauseparam, + .get_msglevel = rnp_get_msglevel, + .set_msglevel = rnp_set_msglevel, + .get_fecparam = rnp_get_fecparam, + .set_fecparam = rnp_set_fecparam, + .self_test = rnp_diag_test, + .get_strings = rnp10_get_strings, + .set_phys_id = rnp_set_phys_id, + .get_sset_count = rnp10_get_sset_count, + .get_priv_flags = rnp10_get_priv_flags, + .set_priv_flags = rnp10_set_priv_flags, + .get_ethtool_stats = rnp10_get_ethtool_stats, + .get_coalesce = rnp_get_coalesce, + .set_coalesce = rnp_set_coalesce, + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_MAX_FRAMES_IRQ | + ETHTOOL_COALESCE_MAX_FRAMES, + .get_rxnfc = rnp_get_rxnfc, + .set_rxnfc = rnp_set_rxnfc, + .get_channels = rnp_get_channels, + .set_channels = rnp_set_channels, + .get_module_info = rnp_get_module_info, + .get_module_eeprom = rnp_get_module_eeprom, + .get_ts_info = rnp_get_ts_info, + .get_rxfh_indir_size = rnp_rss_indir_size, + .get_rxfh_key_size = rnp_get_rxfh_key_size, + .get_rxfh = rnp_get_rxfh, + .set_rxfh = rnp_set_rxfh, + .get_dump_flag = rnp_get_dump_flag, + .get_dump_data = rnp_get_dump_data, + .set_dump = rnp_set_dump, + .flash_device = rnp_flash_device, +}; + +static void rnp_set_ethtool_hw_ops_n10(struct net_device *netdev) +{ + netdev->ethtool_ops = &rnp10_ethtool_ops; +} + +/** + * rnp_get_thermal_sensor_data_hw_ops_n10 - Gathers thermal sensor data + * @hw: pointer to hardware structure + * Returns the thermal sensor data structure + **/ +static s32 rnp_get_thermal_sensor_data_hw_ops_n10(struct rnp_hw *hw) +{ + int voltage = 0; + struct rnp_thermal_sensor_data *data = &hw->thermal_sensor_data; + + voltage = voltage; + data->sensor[0].temp = rnp_mbx_get_temp(hw, &voltage); + + return 0; +} + +/** + * rnp_init_thermal_sensor_thresh_hw_ops_n10 - Inits thermal sensor thresholds + * @hw: pointer to hardware structure + * Inits the thermal sensor thresholds according to the NVM map + * and save off the threshold and location values into mac.thermal_sensor_data + **/ +static s32 rnp_init_thermal_sensor_thresh_hw_ops_n10(struct rnp_hw *hw) +{ + u8 i; + struct rnp_thermal_sensor_data *data = &hw->thermal_sensor_data; + + for (i = 0; i < RNP_MAX_SENSORS; i++) { + data->sensor[i].location = i + 1; + data->sensor[i].caution_thresh = 90; + data->sensor[i].max_op_thresh = 100; + } + + return 0; +} + +static s32 rnp_phy_read_reg_hw_ops_n10(struct rnp_hw *hw, u32 reg_addr, + u32 device_type, u16 *phy_data) +{ + s32 status = 0; + u32 data = 0; + + status = rnp_mbx_phy_read(hw, reg_addr, &data); + *phy_data = data & 0xffff; + + return status; +} + +static s32 rnp_phy_write_reg_hw_ops_n10(struct rnp_hw *hw, u32 reg_addr, + u32 device_type, u16 phy_data) +{ + s32 status = 0; + + status = rnp_mbx_phy_write(hw, reg_addr, (u32)phy_data); + + return status; +} + +static void rnp_set_vf_vlan_mode_hw_ops_n10(struct rnp_hw *hw, u16 vlan, int vf, + bool enable) +{ + struct rnp_eth_info *eth = &hw->eth; + struct rnp_adapter *adapter = (struct rnp_adapter *)hw->back; + + if (adapter->priv_flags & RNP_PRIV_FLAG_SRIOV_VLAN_MODE) + eth->ops.set_vf_vlan_mode(eth, vlan, vf, enable); +} + +static void rnp_driver_status_hw_ops_n10(struct rnp_hw *hw, bool enable, int mode) +{ + switch (mode) { + case rnp_driver_insmod: + rnp_mbx_ifinsmod(hw, enable); + break; + case rnp_driver_suspuse: + rnp_mbx_ifsuspuse(hw, enable); + break; + case rnp_driver_force_control_mac: + rnp_mbx_ifforce_control_mac(hw, enable); + + break; + } +} + +static struct rnp_hw_operations hw_ops_n10 = { + .init_hw = &rnp_init_hw_ops_n10, + .reset_hw = &rnp_reset_hw_ops_n10, + .start_hw = &rnp_start_hw_ops_n10, + .set_mtu = &rnp_set_mtu_hw_ops_n10, + .set_vlan_filter_en = &rnp_set_vlan_filter_en_hw_ops_n10, + .set_vlan_filter = &rnp_set_vlan_filter_hw_ops_n10, + .set_vf_vlan_filter = &rnp_set_vf_vlan_filter_hw_ops_n10, + .set_vlan_strip = &rnp_set_vlan_strip_hw_ops_n10, + .set_mac = &rnp_set_mac_hw_ops_n10, + .set_rx_mode = &rnp_set_rx_mode_hw_ops_n10, + .set_rar_with_vf = &rnp_set_rar_with_vf_hw_ops_n10, + .clr_rar = &rnp_clr_rar_hw_ops_n10, + .clr_rar_all = &rnp_clr_rar_all_hw_ops_n10, + .clr_vlan_veb = &rnp_clr_vlan_veb_hw_ops_n10, + .set_txvlan_mode = &rnp_set_txvlan_mode_hw_ops_n10, + .set_fcs_mode = &rnp_set_fcs_mode_hw_ops_n10, + .set_vxlan_port = &rnp_set_vxlan_port_hw_ops_n10, + .set_vxlan_mode = &rnp_set_vxlan_mode_hw_ops_n10, + .set_mac_rx = &rnp_set_mac_rx_hw_ops_n10, + .set_rx_hash = &rnp_set_rx_hash_hw_ops_n10, + .set_pause_mode = &rnp_set_pause_mode_hw_ops_n10, + .get_pause_mode = &rnp_get_pause_mode_hw_ops_n10, + .update_hw_info = &rnp_update_hw_info_hw_ops_n10, + .update_rx_drop = &rnp_update_hw_rx_drop_hw_ops_n10, + .update_sriov_info = &rnp_update_sriov_info_hw_ops_n10, + .set_sriov_status = &rnp_set_sriov_status_hw_ops_n10, + .set_sriov_vf_mc = &rnp_set_sriov_vf_mc_hw_ops_n10, + .init_rx_addrs = &rnp_init_rx_addrs_hw_ops_n10, + .clr_vfta = &rnp_clr_vfta_hw_ops_n10, + .set_rss_key = &rnp_set_rss_key_hw_ops_n10, + .set_rss_table = &rnp_set_rss_table_hw_ops_n10, + .update_hw_status = &rnp_update_hw_status_hw_ops_n10, + .set_mbx_link_event = &rnp_set_mbx_link_event_hw_ops_n10, + .set_mbx_ifup = &rnp_set_mbx_ifup_hw_ops_n10, + .check_link = &rnp_check_mac_link_hw_ops_n10, + .setup_link = &rnp_setup_mac_link_hw_ops_n10, + .clean_link = &rnp_clean_link_hw_ops_n10, + .set_layer2_remapping = &rnp_set_layer2_hw_ops_n10, + .clr_layer2_remapping = &rnp_clr_layer2_hw_ops_n10, + .clr_all_layer2_remapping = &rnp_clr_all_layer2_hw_ops_n10, + .set_tuple5_remapping = &rnp_set_tuple5_hw_ops_n10, + .clr_tuple5_remapping = &rnp_clr_tuple5_hw_ops_n10, + .clr_all_tuple5_remapping = &rnp_clr_all_tuple5_hw_ops_n10, + .set_tcp_sync_remapping = &rnp_set_tcp_sync_hw_ops_n10, + .update_msix_count = &rnp_update_msix_count_hw_ops_n10, + .get_thermal_sensor_data = &rnp_get_thermal_sensor_data_hw_ops_n10, + .init_thermal_sensor_thresh = + &rnp_init_thermal_sensor_thresh_hw_ops_n10, + .setup_ethtool = &rnp_set_ethtool_hw_ops_n10, + .phy_read_reg = &rnp_phy_read_reg_hw_ops_n10, + .phy_write_reg = &rnp_phy_write_reg_hw_ops_n10, + .set_vf_vlan_mode = &rnp_set_vf_vlan_mode_hw_ops_n10, + .driver_status = &rnp_driver_status_hw_ops_n10, +}; + +static void rnp_mac_set_rx_n10(struct rnp_mac_info *mac, bool status) +{ + struct rnp_hw *hw = (struct rnp_hw *)mac->back; + struct rnp_adapter *adapter = (struct rnp_adapter *)hw->back; + + u32 value = 0; + u32 count = 0; + + if (status) { + do { + mac_wr32(mac, RNP10_MAC_RX_CFG, + mac_rd32(mac, RNP10_MAC_RX_CFG) | 0x01); + usleep_range(100, 200); + value = mac_rd32(mac, RNP10_MAC_RX_CFG); + count++; + if (count > 1000) { + printk("setup rx on timeout\n"); + break; + } + } while (!(value & 0x01)); + + if (adapter->flags & RNP_FLAG_SWITCH_LOOPBACK_EN) { + mac_wr32(mac, RNP10_MAC_PKT_FLT, BIT(31) | BIT(0)); + eth_wr32(&hw->eth, RNP10_ETH_DMAC_MCSTCTRL, 0x0); + } else { + do { + mac_wr32(mac, RNP10_MAC_RX_CFG, + mac_rd32(mac, RNP10_MAC_RX_CFG) & + (~0x400)); + usleep_range(100, 200); + value = mac_rd32(mac, RNP_MAC_RX_CFG); + count++; + if (count > 1000) { + printk("setup rx off timeout\n"); + break; + } + } while (value & 0x400); + if (hw->ncsi_en) + mac_wr32(mac, RNP10_MAC_PKT_FLT, 0x80000001); + else + mac_wr32(mac, RNP10_MAC_PKT_FLT, 0x00000001); + } + } else { + do { + mac_wr32(mac, RNP10_MAC_RX_CFG, + mac_rd32(mac, RNP10_MAC_RX_CFG) | 0x400); + usleep_range(100, 200); + value = mac_rd32(mac, RNP10_MAC_RX_CFG); + count++; + if (count > 1000) { + printk("setup rx on timeout\n"); + break; + } + } while (!(value & 0x400)); + mac_wr32(mac, RNP10_MAC_PKT_FLT, 0x0); + } +} + +static void rnp_mac_fcs_n10(struct rnp_mac_info *mac, bool status) +{ + u32 value; + +#define FCS_MASK (0x6) + value = mac_rd32(mac, RNP10_MAC_RX_CFG); + if (status) { + value &= (~FCS_MASK); + + } else { + value |= FCS_MASK; + } + + mac_wr32(mac, RNP10_MAC_RX_CFG, value); +} + +/** + * rnp_fc_mode_n10 - Enable flow control + * @hw: pointer to hardware structure + * + * Enable flow control according to the current settings. + **/ +static s32 rnp_mac_fc_mode_n10(struct rnp_mac_info *mac) +{ + struct rnp_hw *hw = (struct rnp_hw *)mac->back; + s32 ret_val = 0; + u32 reg; + u32 rxctl_reg, txctl_reg[RNP_MAX_TRAFFIC_CLASS]; + int i; + + /* + * Validate the water mark configuration for packet buffer 0. Zero + * water marks indicate that the packet buffer was not configured + * and the watermarks for packet buffer 0 should always be configured. + */ + if (!hw->fc.pause_time) { + ret_val = RNP_ERR_INVALID_LINK_SETTINGS; + goto out; + } + + /* Disable any previous flow control settings */ + rxctl_reg = mac_rd32(mac, RNP10_MAC_RX_FLOW_CTRL); + rxctl_reg &= (~RNP10_RX_FLOW_ENABLE_MASK); + + for (i = 0; i < RNP_MAX_TRAFFIC_CLASS; i++) { + txctl_reg[i] = mac_rd32(mac, RNP10_MAC_Q0_TX_FLOW_CTRL(i)); + txctl_reg[i] &= (~RNP10_TX_FLOW_ENABLE_MASK); + } + /* + * The possible values of fc.current_mode are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames, + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but + * we do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. + * other: Invalid. + */ + switch (hw->fc.current_mode) { + case rnp_fc_none: + /* + * Flow control is disabled by software override or autoneg. + * The code below will actually disable it in the HW. + */ + break; + case rnp_fc_rx_pause: + /* + * Rx Flow control is enabled and Tx Flow control is + * disabled by software override. Since there really + * isn't a way to advertise that we are capable of RX + * Pause ONLY, we will advertise that we support both + * symmetric and asymmetric Rx PAUSE. Later, we will + * disable the adapter's ability to send PAUSE frames. + */ + rxctl_reg |= (RNP10_RX_FLOW_ENABLE_MASK); + break; + case rnp_fc_tx_pause: + /* + * Tx Flow control is enabled, and Rx Flow control is + * disabled by software override. + */ + for (i = 0; i < RNP_MAX_TRAFFIC_CLASS; i++) + txctl_reg[i] |= (RNP10_TX_FLOW_ENABLE_MASK); + break; + case rnp_fc_full: + /* Flow control (both Rx and Tx) is enabled by SW override. */ + rxctl_reg |= (RNP10_RX_FLOW_ENABLE_MASK); + for (i = 0; i < RNP_MAX_TRAFFIC_CLASS; i++) + txctl_reg[i] |= (RNP10_TX_FLOW_ENABLE_MASK); + break; + default: + hw_dbg(hw, "Flow control param set incorrectly\n"); + ret_val = RNP_ERR_CONFIG; + goto out; + } + + /* Configure pause time (2 TCs per register) */ + reg = hw->fc.pause_time; + for (i = 0; i < (RNP_MAX_TRAFFIC_CLASS); i++) + txctl_reg[i] |= (reg << 16); + + /* Set 802.3x based flow control settings. */ + mac_wr32(mac, RNP10_MAC_RX_FLOW_CTRL, rxctl_reg); + for (i = 0; i < (RNP_MAX_TRAFFIC_CLASS); i++) + mac_wr32(mac, RNP10_MAC_Q0_TX_FLOW_CTRL(i), txctl_reg[i]); +out: + return ret_val; +} + +static void rnp_mac_set_mac_n10(struct rnp_mac_info *mac, u8 *addr, int index) +{ + u32 rar_low, rar_high = 0; + rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) | ((u32)addr[2] << 16) | + ((u32)addr[3] << 24)); + + rar_high = RNP_RAH_AV | ((u32)addr[4] | (u32)addr[5] << 8); + + mac_wr32(mac, RNP10_MAC_UNICAST_HIGH(index), rar_high); + mac_wr32(mac, RNP10_MAC_UNICAST_LOW(index), rar_low); +} + +static struct rnp_mac_operations mac_ops_n10 = { + .set_mac_rx = &rnp_mac_set_rx_n10, + .set_mac_fcs = &rnp_mac_fcs_n10, + .set_fc_mode = &rnp_mac_fc_mode_n10, + .set_mac = &rnp_mac_set_mac_n10, +}; + +static s32 rnp_get_invariants_n10(struct rnp_hw *hw) +{ + struct rnp_mac_info *mac = &hw->mac; + struct rnp_dma_info *dma = &hw->dma; + struct rnp_eth_info *eth = &hw->eth; + struct rnp_mbx_info *mbx = &hw->mbx; + struct rnp_adapter *adapter = (struct rnp_adapter *)hw->back; + int i; + + /* setup dma info */ + dma->dma_base_addr = hw->hw_addr; + dma->dma_ring_addr = hw->hw_addr + RNP10_RING_BASE; + dma->max_tx_queues = RNP_N10_MAX_TX_QUEUES; + dma->max_rx_queues = RNP_N10_MAX_RX_QUEUES; + dma->back = hw; + memcpy(&hw->dma.ops, &dma_ops_n10, sizeof(hw->dma.ops)); + + /* setup eth info */ + memcpy(&hw->eth.ops, ð_ops_n10, sizeof(hw->eth.ops)); + + eth->eth_base_addr = hw->hw_addr + RNP10_ETH_BASE; + printk(" eth_base is %p\n", eth->eth_base_addr); + eth->back = hw; + eth->mc_filter_type = 0; + eth->mcft_size = RNP_N10_MC_TBL_SIZE; + eth->vft_size = RNP_N10_VFT_TBL_SIZE; + if (hw->eco) + eth->num_rar_entries = RNP_N10_RAR_ENTRIES - 1; + else + eth->num_rar_entries = RNP_N10_RAR_ENTRIES; + + eth->max_rx_queues = RNP_N10_MAX_RX_QUEUES; + eth->max_tx_queues = RNP_N10_MAX_TX_QUEUES; + + /* setup mac info */ + memcpy(&hw->mac.ops, &mac_ops_n10, sizeof(hw->mac.ops)); + mac->mac_addr = hw->hw_addr + RNP10_MAC_BASE; + mac->back = hw; + mac->mac_type = mac_dwc_xlg; + /* move this to eth todo */ + mac->mc_filter_type = 0; + mac->mcft_size = RNP_N10_MC_TBL_SIZE; + mac->vft_size = RNP_N10_VFT_TBL_SIZE; + if (hw->eco) + mac->num_rar_entries = RNP_N10_RAR_ENTRIES - 1; + else + mac->num_rar_entries = RNP_N10_RAR_ENTRIES; + mac->max_rx_queues = RNP_N10_MAX_RX_QUEUES; + mac->max_tx_queues = RNP_N10_MAX_TX_QUEUES; + mac->max_msix_vectors = RNP_N10_MSIX_VECTORS; + if (!hw->axi_mhz) + hw->usecstocount = 500; + else + hw->usecstocount = hw->axi_mhz; + + /* set up hw feature */ + hw->feature_flags |= + RNP_NET_FEATURE_SG | RNP_NET_FEATURE_TX_CHECKSUM | + RNP_NET_FEATURE_RX_CHECKSUM | RNP_NET_FEATURE_TSO | + RNP_NET_FEATURE_TX_UDP_TUNNEL | RNP_NET_FEATURE_VLAN_FILTER | + RNP_NET_FEATURE_VLAN_OFFLOAD | + RNP_NET_FEATURE_RX_NTUPLE_FILTER | RNP_NET_FEATURE_TCAM | + RNP_NET_FEATURE_RX_HASH | RNP_NET_FEATURE_RX_FCS; + /* maybe supported future*/ + /* setup some fdir resource */ + hw->min_length = RNP_MIN_MTU; + hw->max_length = RNP_MAX_JUMBO_FRAME_SIZE; + hw->max_msix_vectors = RNP_N10_MSIX_VECTORS; + if (hw->eco) + hw->num_rar_entries = RNP_N10_RAR_ENTRIES - 1; + else + hw->num_rar_entries = RNP_N10_RAR_ENTRIES; + hw->fdir_mode = fdir_mode_tuple5; + hw->max_vfs = RNP_N10_MAX_VF; + hw->max_vfs_noari = 3; + hw->sriov_ring_limit = 2; + /* some user only want 1 queue for each vf */ + hw->max_pf_macvlans = RNP_MAX_PF_MACVLANS_N10; + hw->wol_supported = WAKE_MAGIC; + /* ncsi */ + hw->ncsi_vf_cpu_shm_pf_base = RNP_VF_CPU_SHM_BASE_NR62; + hw->ncsi_mc_count = RNP_NCSI_MC_COUNT; + hw->ncsi_vlan_count = RNP_NCSI_VLAN_COUNT; + /* we suppose 1536 */ + hw->dma_split_size = 1536; + if (hw->fdir_mode == fdir_mode_tcam) { + hw->layer2_count = RNP10_MAX_LAYER2_FILTERS - 1; + hw->tuple5_count = RNP10_MAX_TCAM_FILTERS - 1; + } else { + hw->layer2_count = RNP10_MAX_LAYER2_FILTERS - 1; + hw->tuple5_count = RNP10_MAX_TUPLE5_FILTERS - 1; + } + + hw->default_rx_queue = 0; + hw->rss_indir_tbl_num = RNP_N10_RSS_TBL_NUM; + hw->rss_tc_tbl_num = RNP_N10_RSS_TC_TBL_NUM; + /* vf use the last vfnum */ + hw->vfnum = RNP_N10_MAX_VF - 1; + hw->feature_flags |= RNP_NET_FEATURE_VF_FIXED; + if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) + hw->veb_ring = 0; + else + hw->veb_ring = RNP_N10_MAX_RX_QUEUES; + + memcpy(&hw->ops, &hw_ops_n10, sizeof(hw->ops)); + /* PHY */ + /* setup pcs */ + memcpy(&hw->pcs.ops, &pcs_ops_generic, sizeof(hw->pcs.ops)); + mbx->mbx_feature |= MBX_FEATURE_WRITE_DELAY; + mbx->vf2pf_mbox_vec_base = 0xa5100; + mbx->cpu2pf_mbox_vec = 0xa5300; + mbx->pf_vf_shm_base = 0xa6000; + mbx->mbx_mem_size = 64; + mbx->pf2vf_mbox_ctrl_base = 0xa7100; + mbx->pf_vf_mbox_mask_lo = 0xa7200; + mbx->pf_vf_mbox_mask_hi = 0xa7300; + mbx->cpu_pf_shm_base = 0xaa000; + mbx->pf2cpu_mbox_ctrl = 0xaa100; + mbx->cpu_pf_mbox_mask = 0xaa300; + adapter->drop_time = 100; + hw->fc.requested_mode = PAUSE_TX | PAUSE_RX; + hw->fc.pause_time = RNP_DEFAULT_FCPAUSE; + for (i = 0; i < RNP_MAX_TRAFFIC_CLASS; i++) { + hw->fc.high_water[i] = RNP10_DEFAULT_HIGH_WATER; + hw->fc.low_water[i] = RNP10_DEFAULT_LOW_WATER; + } +#ifdef FIX_MAC_PADDIN + adapter->priv_flags |= RNP_PRIV_FLAG_TX_PADDING; + +#endif + return 0; +} + +struct rnp_info rnp_n10_info = { + .one_pf_with_two_dma = false, + .total_queue_pair_cnts = RNP_N10_MAX_TX_QUEUES, + .adapter_cnt = 1, + .rss_type = rnp_rss_n10, + .hw_type = rnp_hw_n10, + .get_invariants = &rnp_get_invariants_n10, + .mac_ops = &mac_ops_n10, + .eeprom_ops = NULL, + .mbx_ops = &mbx_ops_generic, + .pcs_ops = &pcs_ops_generic, +}; + +static s32 rnp_get_invariants_n400(struct rnp_hw *hw) +{ + struct rnp_mac_info *mac = &hw->mac; + struct rnp_dma_info *dma = &hw->dma; + struct rnp_eth_info *eth = &hw->eth; + struct rnp_mbx_info *mbx = &hw->mbx; + struct rnp_adapter *adapter = (struct rnp_adapter *)hw->back; + int i; + /* setup dma info */ + dma->dma_base_addr = hw->hw_addr; + dma->dma_ring_addr = hw->hw_addr + RNP10_RING_BASE; + dma->max_tx_queues = RNP_N400_MAX_TX_QUEUES; + dma->max_rx_queues = RNP_N400_MAX_RX_QUEUES; + dma->back = hw; + memcpy(&hw->dma.ops, &dma_ops_n10, sizeof(hw->dma.ops)); + + /* setup eth info */ + memcpy(&hw->eth.ops, ð_ops_n10, sizeof(hw->eth.ops)); + eth->eth_base_addr = hw->hw_addr + RNP10_ETH_BASE; + eth->back = hw; + eth->mc_filter_type = 0; + eth->mcft_size = RNP_N10_MC_TBL_SIZE; + eth->vft_size = RNP_N10_VFT_TBL_SIZE; + if (hw->eco) + eth->num_rar_entries = RNP_N10_RAR_ENTRIES - 1; + else + eth->num_rar_entries = RNP_N10_RAR_ENTRIES; + eth->max_rx_queues = RNP_N400_MAX_RX_QUEUES; + eth->max_tx_queues = RNP_N400_MAX_TX_QUEUES; + + /* setup mac info */ + memcpy(&hw->mac.ops, &mac_ops_n10, sizeof(hw->mac.ops)); + mac->mac_addr = hw->hw_addr + RNP10_MAC_BASE; + mac->back = hw; + mac->mac_type = mac_dwc_xlg; + /* move this to eth todo */ + mac->mc_filter_type = 0; + mac->mcft_size = RNP_N10_MC_TBL_SIZE; + mac->vft_size = RNP_N10_VFT_TBL_SIZE; + if (hw->eco) + mac->num_rar_entries = RNP_N10_RAR_ENTRIES - 1; + else + mac->num_rar_entries = RNP_N10_RAR_ENTRIES; + mac->max_rx_queues = RNP_N400_MAX_RX_QUEUES; + mac->max_tx_queues = RNP_N400_MAX_TX_QUEUES; + mac->max_msix_vectors = RNP_N400_MSIX_VECTORS; + if (!hw->axi_mhz) + hw->usecstocount = 125; + else + hw->usecstocount = hw->axi_mhz; + /* set up hw feature */ + hw->feature_flags |= + RNP_NET_FEATURE_SG | RNP_NET_FEATURE_TX_CHECKSUM | + RNP_NET_FEATURE_RX_CHECKSUM | RNP_NET_FEATURE_TSO | + RNP_NET_FEATURE_TX_UDP_TUNNEL | RNP_NET_FEATURE_VLAN_FILTER | + RNP_NET_FEATURE_VLAN_OFFLOAD | + RNP_NET_FEATURE_RX_NTUPLE_FILTER | RNP_NET_FEATURE_TCAM | + RNP_NET_FEATURE_RX_HASH | RNP_NET_FEATURE_RX_FCS; + /* setup some fdir resource */ + hw->min_length = RNP_MIN_MTU; + hw->max_length = RNP_MAX_JUMBO_FRAME_SIZE; + hw->max_msix_vectors = RNP_N400_MSIX_VECTORS; + if (hw->eco) + hw->num_rar_entries = RNP_N10_RAR_ENTRIES - 1; + else + hw->num_rar_entries = RNP_N10_RAR_ENTRIES; + hw->fdir_mode = fdir_mode_tuple5; + hw->max_vfs = RNP_N400_MAX_VF; + hw->max_vfs_noari = 3; + /* n400 only use 1 ring for each vf */ + hw->sriov_ring_limit = 1; + hw->max_pf_macvlans = RNP_MAX_PF_MACVLANS_N10; + /* ncsi */ + hw->ncsi_vf_cpu_shm_pf_base = RNP_VF_CPU_SHM_BASE_NR62; + hw->ncsi_mc_count = RNP_NCSI_MC_COUNT; + hw->ncsi_vlan_count = RNP_NCSI_VLAN_COUNT; + + if (hw->fdir_mode == fdir_mode_tcam) { + hw->layer2_count = RNP10_MAX_LAYER2_FILTERS - 1; + hw->tuple5_count = RNP10_MAX_TCAM_FILTERS - 1; + } else { + hw->layer2_count = RNP10_MAX_LAYER2_FILTERS - 1; + hw->tuple5_count = RNP10_MAX_TUPLE5_FILTERS - 1; + } + + hw->default_rx_queue = 0; + hw->rss_indir_tbl_num = RNP_N10_RSS_TBL_NUM; + hw->rss_tc_tbl_num = RNP_N10_RSS_TC_TBL_NUM; + /* vf use the last vfnum */ + hw->vfnum = RNP_N400_MAX_VF - 1; + + /* n400 should fix_vf_bug */ + hw->feature_flags |= RNP_NET_FEATURE_VF_FIXED; + + if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) { + hw->veb_ring = 0; + hw->default_vf_num = 0; + } else { + hw->veb_ring = RNP_N400_MAX_RX_QUEUES; + hw->default_vf_num = RNP_N10_MAX_VF - 1; + } + + memcpy(&hw->ops, &hw_ops_n10, sizeof(hw->ops)); + /* setup pcs */ + memcpy(&hw->pcs.ops, &pcs_ops_generic, sizeof(hw->pcs.ops)); + mbx->mbx_feature |= MBX_FEATURE_WRITE_DELAY; + mbx->vf2pf_mbox_vec_base = 0xa5100; + mbx->cpu2pf_mbox_vec = 0xa5300; + mbx->pf_vf_shm_base = 0xa6000; + mbx->mbx_mem_size = 64; + mbx->pf2vf_mbox_ctrl_base = 0xa7100; + mbx->pf_vf_mbox_mask_lo = 0xa7200; + mbx->pf_vf_mbox_mask_hi = 0xa7300; + mbx->cpu_pf_shm_base = 0xaa000; + mbx->pf2cpu_mbox_ctrl = 0xaa100; + mbx->cpu_pf_mbox_mask = 0xaa300; + + adapter->drop_time = 100; + /* initialization default pause flow */ + hw->fc.requested_mode |= PAUSE_AUTO; + hw->fc.pause_time = RNP_DEFAULT_FCPAUSE; + for (i = 0; i < RNP_MAX_TRAFFIC_CLASS; i++) { + hw->fc.high_water[i] = RNP10_DEFAULT_HIGH_WATER; + hw->fc.low_water[i] = RNP10_DEFAULT_LOW_WATER; + } + + hw->autoneg = 1; + hw->tp_mdix_ctrl = ETH_TP_MDI_AUTO; + + return 0; +} + +struct rnp_info rnp_n400_info = { + .one_pf_with_two_dma = false, + .total_queue_pair_cnts = RNP_N400_MAX_TX_QUEUES, + .adapter_cnt = 1, + .rss_type = rnp_rss_n10, + .hw_type = rnp_hw_n400, + .get_invariants = &rnp_get_invariants_n400, + .mac_ops = &mac_ops_n10, + .eeprom_ops = NULL, + .mbx_ops = &mbx_ops_generic, + .pcs_ops = &pcs_ops_generic, +}; diff --git a/drivers/net/ethernet/mucse/rnp/rnp_param.c b/drivers/net/ethernet/mucse/rnp/rnp_param.c new file mode 100644 index 000000000000..550974280cb7 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_param.c @@ -0,0 +1,346 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#include +#include + +#include "rnp.h" + +/* This is the only thing that needs to be changed to adjust the + * maximum number of ports that the driver can manage. + */ + +#define RNP_MAX_NIC 32 + +#define OPTION_UNSET -1 +#define OPTION_DISABLED 0 +#define OPTION_ENABLED 1 + +#define STRINGIFY(foo) #foo /* magic for getting defines into strings */ +#define XSTRINGIFY(bar) STRINGIFY(bar) + +/* All parameters are treated the same, as an integer array of values. + * This macro just reduces the need to repeat the same declaration code + * over and over (plus this helps to avoid typo bugs). + */ + +#define RNP_PARAM_INIT \ + { \ + [0 ... RNP_MAX_NIC] = OPTION_UNSET \ + } + +#define RNP_PARAM(X, desc) \ + static int X[RNP_MAX_NIC + 1] = RNP_PARAM_INIT; \ + static unsigned int num_##X; \ + module_param_array_named(X, X, int, &num_##X, 0); \ + MODULE_PARM_DESC(X, desc); +/* IntMode (Interrupt Mode) + * + * Valid Range: 0-2 + * - 0 - Legacy Interrupt + * - 1 - MSI Interrupt + * - 2 - MSI-X Interrupt(s) + * + * Default Value: 2 + */ +RNP_PARAM(IntMode, "Change Interrupt Mode (0=Legacy, 1=MSI, 2=MSI-X), " + "default 2"); +#define RNP_INT_LEGACY 0 +#define RNP_INT_MSI 1 +#define RNP_INT_MSIX 2 + +#ifdef CONFIG_PCI_IOV +/* max_vfs - SR I/O Virtualization + * + * Valid Range: 0-63 for n10 + * Valid Range: 0-7 for n400/n10 + * - 0 Disables SR-IOV + * - 1-x - enables SR-IOV and sets the number of VFs enabled + * + * Default Value: 0 + */ + +RNP_PARAM(max_vfs, "Number of Virtual Functions: 0 = disable (default), " + "1-" XSTRINGIFY(MAX_SRIOV_VFS) " = enable " + "this many VFs"); + +/* SRIOV_Mode (SRIOV Mode) + * + * Valid Range: 0-1 + * - 0 - Legacy Interrupt + * - 1 - MSI Interrupt + * - 2 - MSI-X Interrupt(s) + * + * Default Value: 0 + */ +RNP_PARAM(SRIOV_Mode, "Change SRIOV Mode (0=MAC_MODE, 1=VLAN_MODE), " + "default 0"); +#define RNP_SRIOV_MAC_MODE 0 +#define RNP_SRIOV_VLAN_MODE 1 +#endif + +/* pf_msix_counts_set - Limit max msix counts + * + * Valid Range: 2-63 for n10 + * Valid Range: 2-7 for n400/n10 + * + * Default Value: 0 (un-limit) + */ +RNP_PARAM(pf_msix_counts_set, "Number of Max MSIX Count: (default un-limit)"); +#define RNP_INT_MIN 2 + +struct rnp_option { + enum { enable_option, range_option, list_option } type; + const char *name; + const char *err; + const char *msg; + int def; + union { + struct { /* range_option info */ + int min; + int max; + } r; + struct { /* list_option info */ + int nr; + const struct rnp_opt_list { + int i; + char *str; + } *p; + } l; + } arg; +}; + +static int rnp_validate_option(struct net_device *netdev, unsigned int *value, + struct rnp_option *opt) +{ + if (*value == OPTION_UNSET) { + netdev_info(netdev, "Invalid %s specified (%d), %s\n", + opt->name, *value, opt->err); + *value = opt->def; + return 0; + } + + switch (opt->type) { + case enable_option: + switch (*value) { + case OPTION_ENABLED: + netdev_info(netdev, "%s Enabled\n", opt->name); + return 0; + case OPTION_DISABLED: + netdev_info(netdev, "%s Disabled\n", opt->name); + return 0; + } + break; + case range_option: + if ((*value >= opt->arg.r.min && *value <= opt->arg.r.max) || + *value == opt->def) { + if (opt->msg) + netdev_info(netdev, "%s set to %d, %s\n", + opt->name, *value, opt->msg); + else + netdev_info(netdev, "%s set to %d\n", opt->name, + *value); + return 0; + } + break; + case list_option: { + int i; + + for (i = 0; i < opt->arg.l.nr; i++) { + const struct rnp_opt_list *ent = &opt->arg.l.p[i]; + if (*value == ent->i) { + if (ent->str[0] != '\0') + netdev_info(netdev, "%s\n", ent->str); + return 0; + } + } + } break; + default: + BUG(); + } + + netdev_info(netdev, "Invalid %s specified (%d), %s\n", opt->name, + *value, opt->err); + *value = opt->def; + return -1; +} + +#define LIST_LEN(l) (sizeof(l) / sizeof(l[0])) +#define PSTR_LEN 10 + +/** + * rnp_check_options - Range Checking for Command Line Parameters + * @adapter: board private structure + * + * This routine checks all command line parameters for valid user + * input. If an invalid value is given, or if no user specified + * value exists, a default value is used. The final value is stored + * in a variable in the adapter structure. + **/ +void rnp_check_options(struct rnp_adapter *adapter) +{ + int bd = adapter->bd_number; + u32 *aflags = &adapter->flags; + + if (bd >= RNP_MAX_NIC) { + netdev_notice(adapter->netdev, + "Warning: no configuration for board #%d\n", bd); + netdev_notice(adapter->netdev, + "Using defaults for all values\n"); + } + + /* try to setup new irq mode */ + { /* Interrupt Mode */ + unsigned int int_mode; + static struct rnp_option opt = { + .type = range_option, + .name = "Interrupt Mode", + .err = "using default of " __MODULE_STRING( + RNP_INT_MSIX), + .def = RNP_INT_MSIX, + .arg = { .r = { .min = RNP_INT_LEGACY, + .max = RNP_INT_MSIX } } + }; + + if (num_IntMode > bd) { + int_mode = IntMode[bd]; + if (int_mode == OPTION_UNSET) + int_mode = RNP_INT_MSIX; + rnp_validate_option(adapter->netdev, &int_mode, &opt); + switch (int_mode) { + case RNP_INT_MSIX: + if (!(*aflags & RNP_FLAG_MSIX_CAPABLE)) { + netdev_info(adapter->netdev, + "Ignoring MSI-X setting; " + "support unavailable\n"); + } else + adapter->irq_mode = irq_mode_msix; + break; + case RNP_INT_MSI: + if (!(*aflags & RNP_FLAG_MSI_CAPABLE)) { + netdev_info(adapter->netdev, + "Ignoring MSI setting; " + "support unavailable\n"); + } else + adapter->irq_mode = irq_mode_msi; + + break; + case RNP_INT_LEGACY: + if (!(*aflags & RNP_FLAG_LEGACY_CAPABLE)) { + netdev_info(adapter->netdev, + "Ignoring MSI setting; " + "support unavailable\n"); + } else + adapter->irq_mode = irq_mode_legency; + + break; + } + } else { + /* default settings */ + /* msix -> msi -> Legacy */ + if (*aflags & RNP_FLAG_MSIX_CAPABLE) + adapter->irq_mode = irq_mode_msix; + else if (*aflags & RNP_FLAG_MSI_CAPABLE) + adapter->irq_mode = irq_mode_msi; + else + adapter->irq_mode = irq_mode_legency; + } + } + +#ifdef CONFIG_PCI_IOV + { /* Single Root I/O Virtualization (SR-IOV) */ + struct rnp_hw *hw = &adapter->hw; + static struct rnp_option opt = { + .type = range_option, + .name = "I/O Virtualization (IOV)", + .err = "defaulting to Disabled", + .def = OPTION_DISABLED, + .arg = { .r = { .min = OPTION_DISABLED, + .max = OPTION_DISABLED } } + }; + + opt.arg.r.max = hw->max_vfs; + if (num_max_vfs > bd) { + unsigned int vfs = max_vfs[bd]; + + if (rnp_validate_option(adapter->netdev, &vfs, &opt)) { + vfs = 0; + DPRINTK(PROBE, INFO, + "max_vfs out of range " + "Disabling SR-IOV.\n"); + } + + adapter->num_vfs = vfs; + + if (vfs) + *aflags |= RNP_FLAG_SRIOV_ENABLED; + else + *aflags &= ~RNP_FLAG_SRIOV_ENABLED; + } else { + if (opt.def == OPTION_DISABLED) { + adapter->num_vfs = 0; + *aflags &= ~RNP_FLAG_SRIOV_ENABLED; + } else { + adapter->num_vfs = opt.def; + *aflags |= RNP_FLAG_SRIOV_ENABLED; + } + } + } + + { /* Sriov Mode */ + unsigned int sriov_mode; + static struct rnp_option opt = { + .type = range_option, + .name = "SRIOV Mode", + .err = "using default of " __MODULE_STRING( + RNP_SRIOV_MAC_MODE), + .def = RNP_SRIOV_MAC_MODE, + .arg = { .r = { .min = RNP_SRIOV_MAC_MODE, + .max = RNP_SRIOV_VLAN_MODE } } + }; + + if (num_SRIOV_Mode > bd) { + sriov_mode = SRIOV_Mode[bd]; + if (sriov_mode == OPTION_UNSET) + sriov_mode = RNP_SRIOV_MAC_MODE; + rnp_validate_option(adapter->netdev, &sriov_mode, &opt); + + if (sriov_mode == RNP_SRIOV_VLAN_MODE) + adapter->priv_flags |= + RNP_PRIV_FLAG_SRIOV_VLAN_MODE; + + } else { + /* default settings */ + adapter->priv_flags &= (~RNP_PRIV_FLAG_SRIOV_VLAN_MODE); + } + } +#endif /* CONFIG_PCI_IOV */ + + { /* max msix count setup */ + int pf_msix_counts; + struct rnp_hw *hw = &adapter->hw; + static struct rnp_option opt = { + .type = range_option, + .name = "Limit Msix Count", + .err = "using default of Un-limit", + .def = OPTION_DISABLED, + .arg = { .r = { .min = RNP_INT_MIN, + .max = RNP_INT_MIN } } + }; + + opt.arg.r.max = hw->max_msix_vectors; + if (num_pf_msix_counts_set > bd) { + pf_msix_counts = pf_msix_counts_set[bd]; + if (pf_msix_counts == OPTION_DISABLED) + pf_msix_counts = 0; + rnp_validate_option(adapter->netdev, &pf_msix_counts, + &opt); + + if (pf_msix_counts) { + if (hw->ops.update_msix_count) + hw->ops.update_msix_count( + hw, pf_msix_counts); + } + } + } +} diff --git a/drivers/net/ethernet/mucse/rnp/rnp_pcs.c b/drivers/net/ethernet/mucse/rnp/rnp_pcs.c new file mode 100644 index 000000000000..e84879c43722 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_pcs.c @@ -0,0 +1,33 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#include "rnp_pcs.h" +#include "rnp_regs.h" +#include "rnp_common.h" + +static u32 rnp_read_pcs(struct rnp_hw *hw, int num, u32 addr) +{ + u32 reg_hi, reg_lo; + u32 value; + + reg_hi = addr >> 8; + reg_lo = (addr & 0xff) << 2; + wr32(hw, RNP_PCS_BASE(num) + (0xff << 2), reg_hi); + value = rd32(hw, RNP_PCS_BASE(num) + reg_lo); + return value; +} + +static void rnp_write_pcs(struct rnp_hw *hw, int num, u32 addr, u32 value) +{ + u32 reg_hi, reg_lo; + + reg_hi = addr >> 8; + reg_lo = (addr & 0xff) << 2; + wr32(hw, RNP_PCS_BASE(num) + (0xff << 2), reg_hi); + wr32(hw, RNP_PCS_BASE(num) + reg_lo, value); +} + +struct rnp_pcs_operations pcs_ops_generic = { + .read = rnp_read_pcs, + .write = rnp_write_pcs, +}; diff --git a/drivers/net/ethernet/mucse/rnp/rnp_pcs.h b/drivers/net/ethernet/mucse/rnp/rnp_pcs.h new file mode 100644 index 000000000000..d79d947cc31d --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_pcs.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#ifndef _RNP_PCS_H_ +#define _RNP_PCS_H_ + +extern struct rnp_pcs_operations pcs_ops_generic; + +#endif diff --git a/drivers/net/ethernet/mucse/rnp/rnp_phy.h b/drivers/net/ethernet/mucse/rnp/rnp_phy.h new file mode 100644 index 000000000000..6c0df9e098f2 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_phy.h @@ -0,0 +1,73 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#ifndef _RNP_PHY_H_ +#define _RNP_PHY_H_ + +#include "rnp_type.h" +#define RNP_I2C_EEPROM_DEV_ADDR 0xA0 +#define RNP_I2C_EEPROM_DEV_ADDR2 0xA2 + +#define RNP_YT8531_PHY_SPEC_CTRL 0x10 +#define RNP_YT8531_PHY_SPEC_CTRL_FORCE_MDIX 0x0020 +#define RNP_YT8531_PHY_SPEC_CTRL_AUTO_MDI_MDIX 0x0060 +#define RNP_YT8531_PHY_SPEC_CTRL_MDIX_CFG_MASK 0x0060 + +/* EEPROM byte offsets */ +#define SFF_MODULE_ID_OFFSET 0x00 +#define SFF_DIAG_SUPPORT_OFFSET 0x5c +#define SFF_MODULE_ID_SFP 0x3 +#define SFF_MODULE_ID_QSFP 0xc +#define SFF_MODULE_ID_QSFP_PLUS 0xd +#define SFF_MODULE_ID_QSFP28 0x11 + +/* Bitmasks */ +#define RNP_SFF_DA_PASSIVE_CABLE 0x4 +#define RNP_SFF_DA_ACTIVE_CABLE 0x8 +#define RNP_SFF_DA_SPEC_ACTIVE_LIMITING 0x4 +#define RNP_SFF_1GBASESX_CAPABLE 0x1 +#define RNP_SFF_1GBASELX_CAPABLE 0x2 +#define RNP_SFF_1GBASET_CAPABLE 0x8 +#define RNP_SFF_10GBASESR_CAPABLE 0x10 +#define RNP_SFF_10GBASELR_CAPABLE 0x20 +#define RNP_SFF_ADDRESSING_MODE 0x4 +#define RNP_I2C_EEPROM_READ_MASK 0x100 +#define RNP_I2C_EEPROM_STATUS_MASK 0x3 +#define RNP_I2C_EEPROM_STATUS_NO_OPERATION 0x0 +#define RNP_I2C_EEPROM_STATUS_PASS 0x1 +#define RNP_I2C_EEPROM_STATUS_FAIL 0x2 +#define RNP_I2C_EEPROM_STATUS_IN_PROGRESS 0x3 + +/* Flow control defines */ +#define RNP_TAF_SYM_PAUSE 0x400 +#define RNP_TAF_ASM_PAUSE 0x800 + +/* Bit-shift macros */ +#define RNP_SFF_VENDOR_OUI_BYTE0_SHIFT 24 +#define RNP_SFF_VENDOR_OUI_BYTE1_SHIFT 16 +#define RNP_SFF_VENDOR_OUI_BYTE2_SHIFT 8 + +/* Vendor OUIs: format of OUI is 0x[byte0][byte1][byte2][00] */ +#define RNP_SFF_VENDOR_OUI_TYCO 0x00407600 +#define RNP_SFF_VENDOR_OUI_FTL 0x00906500 +#define RNP_SFF_VENDOR_OUI_AVAGO 0x00176A00 +#define RNP_SFF_VENDOR_OUI_INTEL 0x001B2100 + +/* I2C SDA and SCL timing parameters for standard mode */ +#define RNP_I2C_T_HD_STA 4 +#define RNP_I2C_T_LOW 5 +#define RNP_I2C_T_HIGH 4 +#define RNP_I2C_T_SU_STA 5 +#define RNP_I2C_T_HD_DATA 5 +#define RNP_I2C_T_SU_DATA 1 +#define RNP_I2C_T_RISE 1 +#define RNP_I2C_T_FALL 1 +#define RNP_I2C_T_SU_STO 4 +#define RNP_I2C_T_BUF 5 + +#define RNP_TN_LASI_STATUS_REG 0x9005 +#define RNP_TN_LASI_STATUS_TEMP_ALARM 0x0008 + +/* SFP+ SFF-8472 Compliance code */ +#define RNP_SFF_SFF_8472_UNSUP 0x00 +#endif /* _RNP_PHY_H_ */ diff --git a/drivers/net/ethernet/mucse/rnp/rnp_ptp.c b/drivers/net/ethernet/mucse/rnp/rnp_ptp.c new file mode 100644 index 000000000000..d3b1130605a2 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_ptp.c @@ -0,0 +1,688 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#include +#include +#include +#include +#include + +#include "rnp.h" +#include "rnp_regs.h" +#include "rnp_ptp.h" + +/* PTP and HW Timer ops */ +static void config_hw_tstamping(void __iomem *ioaddr, u32 data) +{ + writel(data, ioaddr + PTP_TCR); +} + +static void config_sub_second_increment(void __iomem *ioaddr, u32 ptp_clock, + int gmac4, u32 *ssinc) +{ + u32 value = readl(ioaddr + PTP_TCR); + unsigned long data; + u32 reg_value; + + /* For GMAC3.x, 4.x versions, in "fine adjustement mode" set sub-second + * increment to twice the number of nanoseconds of a clock cycle. + * The calculation of the default_addend value by the caller will set it + * to mid-range = 2^31 when the remainder of this division is zero, + * which will make the accumulator overflow once every 2 ptp_clock + * cycles, adding twice the number of nanoseconds of a clock cycle : + * 2000000000ULL / ptp_clock. + */ + if (value & RNP_PTP_TCR_TSCFUPDT) + data = (2000000000ULL / ptp_clock); + else + data = (1000000000ULL / ptp_clock); + + /* 0.465ns accuracy */ + if (!(value & RNP_PTP_TCR_TSCTRLSSR)) + data = (data * 1000) / 465; + + data &= RNP_PTP_SSIR_SSINC_MASK; + + reg_value = data; + if (gmac4) + reg_value <<= RNP_PTP_SSIR_SSINC_SHIFT; + + writel(reg_value, ioaddr + PTP_SSIR); + + if (ssinc) + *ssinc = data; +} + +static int config_addend(void __iomem *ioaddr, u32 addend) +{ + u32 value; + int limit; + + writel(addend, ioaddr + PTP_TAR); + /* issue command to update the addend value */ + value = readl(ioaddr + PTP_TCR); + value |= RNP_PTP_TCR_TSADDREG; + writel(value, ioaddr + PTP_TCR); + + /* wait for present addend update to complete */ + limit = 10; + while (limit--) { + if (!(readl(ioaddr + PTP_TCR) & RNP_PTP_TCR_TSADDREG)) + break; + mdelay(10); + } + if (limit < 0) + return -EBUSY; + + return 0; +} + +static int init_systime(void __iomem *ioaddr, u32 sec, u32 nsec) +{ + int limit; + u32 value; + + writel(sec, ioaddr + PTP_STSUR); + writel(nsec, ioaddr + PTP_STNSUR); + /* issue command to initialize the system time value */ + value = readl(ioaddr + PTP_TCR); + value |= RNP_PTP_TCR_TSINIT; + writel(value, ioaddr + PTP_TCR); + + /* wait for present system time initialize to complete */ + limit = 10; + while (limit--) { + if (!(readl(ioaddr + PTP_TCR) & RNP_PTP_TCR_TSINIT)) + break; + mdelay(10); + } + if (limit < 0) + return -EBUSY; + + return 0; +} + +static void get_systime(void __iomem *ioaddr, u64 *systime) +{ + u64 ns; + + /* Get the TSSS value */ + ns = readl(ioaddr + PTP_STNSR); + /* Get the TSS and convert sec time value to nanosecond */ + ns += readl(ioaddr + PTP_STSR) * 1000000000ULL; + + if (systime) + *systime = ns; +} + +static void config_mac_interrupt_enable(void __iomem *ioaddr, bool on) +{ + rnp_wr_reg(ioaddr + RNP_MAC_INTERRUPT_ENABLE, on); +} + +static int adjust_systime(void __iomem *ioaddr, u32 sec, u32 nsec, int add_sub, + int gmac4) +{ + u32 value; + int limit; + + if (add_sub) { + /* If the new sec value needs to be subtracted with + * the system time, then MAC_STSUR reg should be + * programmed with (2^32 – ) + */ + if (gmac4) + sec = -sec; + + value = readl(ioaddr + PTP_TCR); + if (value & RNP_PTP_TCR_TSCTRLSSR) + nsec = (RNP_PTP_DIGITAL_ROLLOVER_MODE - nsec); + else + nsec = (RNP_PTP_BINARY_ROLLOVER_MODE - nsec); + } + + writel(sec, ioaddr + PTP_STSUR); + value = (add_sub << RNP_PTP_STNSUR_ADDSUB_SHIFT) | nsec; + writel(value, ioaddr + PTP_STNSUR); + + /* issue command to initialize the system time value */ + value = readl(ioaddr + PTP_TCR); + value |= RNP_PTP_TCR_TSUPDT; + writel(value, ioaddr + PTP_TCR); + + /* wait for present system time adjust/update to complete */ + limit = 10; + while (limit--) { + if (!(readl(ioaddr + PTP_TCR) & RNP_PTP_TCR_TSUPDT)) + break; + mdelay(10); + } + if (limit < 0) + return -EBUSY; + + return 0; +} + +const struct rnp_hwtimestamp mac_ptp = { + .config_hw_tstamping = config_hw_tstamping, + .config_mac_irq_enable = config_mac_interrupt_enable, + .init_systime = init_systime, + .config_sub_second_increment = config_sub_second_increment, + .config_addend = config_addend, + .adjust_systime = adjust_systime, + .get_systime = get_systime, +}; + +static int rnp_ptp_adjfreq(struct ptp_clock_info *ptp, long scaled_ppm) +{ + struct rnp_adapter *pf = + container_of(ptp, struct rnp_adapter, ptp_clock_ops); + unsigned long flags; + u32 addend; + + if (pf == NULL) { + printk(KERN_DEBUG "adapter_of contail is null\n"); + return 0; + } + + addend = adjust_by_scaled_ppm(pf->default_addend, scaled_ppm); + + spin_lock_irqsave(&pf->ptp_lock, flags); + pf->hwts_ops->config_addend(pf->ptp_addr, addend); + spin_unlock_irqrestore(&pf->ptp_lock, flags); + + return 0; +} + +static int rnp_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + struct rnp_adapter *pf = + container_of(ptp, struct rnp_adapter, ptp_clock_ops); + unsigned long flags; + u32 sec, nsec; + u32 quotient, reminder; + int neg_adj = 0; + + if (delta < 0) { + neg_adj = 1; + delta = -delta; + } + + if (delta == 0) + return 0; + + quotient = div_u64_rem(delta, 1000000000ULL, &reminder); + sec = quotient; + nsec = reminder; + + spin_lock_irqsave(&pf->ptp_lock, flags); + pf->hwts_ops->adjust_systime(pf->ptp_addr, sec, nsec, neg_adj, + pf->gmac4); + spin_unlock_irqrestore(&pf->ptp_lock, flags); + + return 0; +} + +static int rnp_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) +{ + struct rnp_adapter *pf = + container_of(ptp, struct rnp_adapter, ptp_clock_ops); + unsigned long flags; + u64 ns = 0; + + spin_lock_irqsave(&pf->ptp_lock, flags); + + pf->hwts_ops->get_systime(pf->ptp_addr, &ns); + + spin_unlock_irqrestore(&pf->ptp_lock, flags); + + *ts = ns_to_timespec64(ns); + + return 0; +} + +static int rnp_ptp_settime(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct rnp_adapter *pf = + container_of(ptp, struct rnp_adapter, ptp_clock_ops); + unsigned long flags; + + spin_lock_irqsave(&pf->ptp_lock, flags); + pf->hwts_ops->init_systime(pf->ptp_addr, ts->tv_sec, ts->tv_nsec); + spin_unlock_irqrestore(&pf->ptp_lock, flags); + + return 0; +} + +static int rnp_ptp_feature_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + /*TODO add support for enable the option 1588 feature PPS Auxiliary */ + return -EOPNOTSUPP; +} + +int rnp_ptp_get_ts_config(struct rnp_adapter *pf, struct ifreq *ifr) +{ + struct hwtstamp_config *config = &pf->tstamp_config; + + return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ? -EFAULT : + 0; +} + +static int rnp_ptp_setup_ptp(struct rnp_adapter *pf, u32 value) +{ + u32 sec_inc = 0; + u64 temp = 0; + struct timespec64 now; + + /* For now just use extrnal clock(the kernel-system clock)*/ + /* 1.Mask the Timestamp Trigger interrupt */ + /* 2.enable time stamping */ + /* 2.1 clear all bytes about time ctrl reg*/ + + pf->hwts_ops->config_hw_tstamping(pf->ptp_addr, value); + /* 3.Program the PTPclock frequency */ + /* program Sub Second Increment reg + * we use kernel-system clock + */ + pf->hwts_ops->config_sub_second_increment( + pf->ptp_addr, pf->clk_ptp_rate, pf->gmac4, &sec_inc); + /* 4.If use fine correction approash then, + * Program MAC_Timestamp_Addend register + */ + if (sec_inc == 0) { + printk(KERN_DEBUG "%s:%d the sec_inc is zero this is a bug\n", + __func__, __LINE__); + return -EFAULT; + } + temp = div_u64(1000000000ULL, sec_inc); + /* Store sub second increment and flags for later use */ + pf->sub_second_inc = sec_inc; + pf->systime_flags = value; + /* calculate default added value: + * formula is : + * addend = (2^32)/freq_div_ratio; + * where, freq_div_ratio = 1e9ns/sec_inc + */ + temp = (u64)(temp << 32); + + if (pf->clk_ptp_rate == 0) { + pf->clk_ptp_rate = 1000; + printk(KERN_DEBUG "%s:%d clk_ptp_rate is zero\n", __func__, + __LINE__); + } + + pf->default_addend = div_u64(temp, pf->clk_ptp_rate); + + pf->hwts_ops->config_addend(pf->ptp_addr, pf->default_addend); + /* 5.Poll wait for the TCR Update Addend Register*/ + /* 6.enabled Fine Update method */ + /* 7.program the second and nanosecond register*/ + /*TODO If we need to enable one-step timestamp */ + + /* initialize system time */ + ktime_get_real_ts64(&now); + + /* lower 32 bits of tv_sec are safe until y2106 */ + pf->hwts_ops->init_systime(pf->ptp_addr, (u32)now.tv_sec, now.tv_nsec); + + return 0; +} + +int rnp_ptp_set_ts_config(struct rnp_adapter *pf, struct ifreq *ifr) +{ + struct hwtstamp_config config; + u32 ptp_v2 = 0; + u32 tstamp_all = 0; + u32 ptp_over_ipv4_udp = 0; + u32 ptp_over_ipv6_udp = 0; + u32 ptp_over_ethernet = 0; + u32 snap_type_sel = 0; + u32 ts_master_en = 0; + u32 ts_event_en = 0; + u32 value = 0; + s32 ret = -1; + + if (!(pf->flags2 & RNP_FLAG2_PTP_ENABLED)) { + pci_alert(pf->pdev, "No support for HW time stamping\n"); + pf->ptp_tx_en = 0; + pf->ptp_tx_en = 0; + + return -EOPNOTSUPP; + } + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + netdev_info(pf->netdev, + "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n", + __func__, config.flags, config.tx_type, config.rx_filter); + /* reserved for future extensions */ + if (config.flags) + return -EINVAL; + + if (config.tx_type != HWTSTAMP_TX_OFF && + config.tx_type != HWTSTAMP_TX_ON) + return -ERANGE; + + switch (config.rx_filter) { + case HWTSTAMP_FILTER_NONE: + /* time stamp no incoming packet at all */ + config.rx_filter = HWTSTAMP_FILTER_NONE; + break; + + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + /* PTP v1, UDP, any kind of event packet */ + config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; + /* 'mac' hardware can support Sync, Pdelay_Req and + * Pdelay_resp by setting bit14 and bits17/16 to 01 + * This leaves Delay_Req timestamps out. + * Enable all events *and* general purpose message + * timestamping + */ + snap_type_sel = RNP_PTP_TCR_SNAPTYPSEL_1; + ptp_over_ipv4_udp = RNP_PTP_TCR_TSIPV4ENA; + ptp_over_ipv6_udp = RNP_PTP_TCR_TSIPV6ENA; + break; + + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + /* PTP v1, UDP, Sync packet */ + config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC; + /* take time stamp for SYNC messages only */ + ts_event_en = RNP_PTP_TCR_TSEVNTENA; + + ptp_over_ipv4_udp = RNP_PTP_TCR_TSIPV4ENA; + ptp_over_ipv6_udp = RNP_PTP_TCR_TSIPV6ENA; + break; + + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + /* PTP v1, UDP, Delay_req packet */ + config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ; + /* take time stamp for Delay_Req messages only */ + ts_master_en = RNP_PTP_TCR_TSMSTRENA; + ts_event_en = RNP_PTP_TCR_TSEVNTENA; + + ptp_over_ipv4_udp = RNP_PTP_TCR_TSIPV4ENA; + ptp_over_ipv6_udp = RNP_PTP_TCR_TSIPV6ENA; + break; + + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + /* PTP v2, UDP, any kind of event packet */ + config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; + ptp_v2 = RNP_PTP_TCR_TSVER2ENA; + + /* take time stamp for all event messages */ + snap_type_sel = RNP_PTP_TCR_SNAPTYPSEL_1; + + ptp_over_ipv4_udp = RNP_PTP_TCR_TSIPV4ENA; + ptp_over_ipv6_udp = RNP_PTP_TCR_TSIPV6ENA; + break; + + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + /* PTP v2, UDP, Sync packet */ + config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC; + ptp_v2 = RNP_PTP_TCR_TSVER2ENA; + /* take time stamp for SYNC messages only */ + ts_event_en = RNP_PTP_TCR_TSEVNTENA; + ptp_over_ipv4_udp = RNP_PTP_TCR_TSIPV4ENA; + ptp_over_ipv6_udp = RNP_PTP_TCR_TSIPV6ENA; + break; + + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + /* PTP v2, UDP, Delay_req packet */ + config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ; + ptp_v2 = RNP_PTP_TCR_TSVER2ENA; + /* take time stamp for Delay_Req messages only */ + ts_master_en = RNP_PTP_TCR_TSMSTRENA; + ts_event_en = RNP_PTP_TCR_TSEVNTENA; + ptp_over_ipv4_udp = RNP_PTP_TCR_TSIPV4ENA; + ptp_over_ipv6_udp = RNP_PTP_TCR_TSIPV6ENA; + break; + + case HWTSTAMP_FILTER_PTP_V2_EVENT: + /* PTP v2/802.AS1 any layer, any kind of event packet */ + config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + ptp_v2 = RNP_PTP_TCR_TSVER2ENA; + snap_type_sel = RNP_PTP_TCR_SNAPTYPSEL_1; + ts_event_en = RNP_PTP_TCR_TSEVNTENA; + ptp_over_ipv4_udp = RNP_PTP_TCR_TSIPV4ENA; + ptp_over_ipv6_udp = RNP_PTP_TCR_TSIPV6ENA; + ptp_over_ethernet = RNP_PTP_TCR_TSIPENA; + break; + + case HWTSTAMP_FILTER_PTP_V2_SYNC: + /* PTP v2/802.AS1, any layer, Sync packet */ + config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC; + ptp_v2 = RNP_PTP_TCR_TSVER2ENA; + /* take time stamp for SYNC messages only */ + ts_event_en = RNP_PTP_TCR_TSEVNTENA; + ptp_over_ipv4_udp = RNP_PTP_TCR_TSIPV4ENA; + ptp_over_ipv6_udp = RNP_PTP_TCR_TSIPV6ENA; + ptp_over_ethernet = RNP_PTP_TCR_TSIPENA; + break; + + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + /* PTP v2/802.AS1, any layer, Delay_req packet */ + config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ; + ptp_v2 = RNP_PTP_TCR_TSVER2ENA; + /* take time stamp for Delay_Req messages only */ + ts_master_en = RNP_PTP_TCR_TSMSTRENA; + ts_event_en = RNP_PTP_TCR_TSEVNTENA; + + ptp_over_ipv4_udp = RNP_PTP_TCR_TSIPV4ENA; + ptp_over_ipv6_udp = RNP_PTP_TCR_TSIPV6ENA; + ptp_over_ethernet = RNP_PTP_TCR_TSIPENA; + break; + + case HWTSTAMP_FILTER_NTP_ALL: + case HWTSTAMP_FILTER_ALL: + /* time stamp any incoming packet */ + config.rx_filter = HWTSTAMP_FILTER_ALL; + tstamp_all = RNP_PTP_TCR_TSENALL; + break; + + default: + return -ERANGE; + } + + pf->ptp_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1); + pf->ptp_tx_en = config.tx_type == HWTSTAMP_TX_ON; + + netdev_info( + pf->netdev, + "ptp config rx filter 0x%.2x tx_type 0x%.2x rx_en[%d] tx_en[%d]\n", + config.rx_filter, config.tx_type, pf->ptp_rx_en, pf->ptp_tx_en); + if (!pf->ptp_rx_en && !pf->ptp_tx_en) + /*rx and tx is not use hardware ts so clear the ptp register */ + pf->hwts_ops->config_hw_tstamping(pf->ptp_addr, 0); + else { + value = (RNP_PTP_TCR_TSENA | RNP_PTP_TCR_TSCFUPDT | + RNP_PTP_TCR_TSCTRLSSR | tstamp_all | ptp_v2 | + ptp_over_ethernet | ptp_over_ipv6_udp | + ptp_over_ipv4_udp | ts_master_en | snap_type_sel); + + ret = rnp_ptp_setup_ptp(pf, value); + if (ret < 0) + return ret; + } + pf->ptp_config_value = value; + memcpy(&pf->tstamp_config, &config, sizeof(config)); + + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? -EFAULT : + 0; +} + +/* structure describing a PTP hardware clock */ +static struct ptp_clock_info rnp_ptp_clock_ops = { + .owner = THIS_MODULE, + .name = "rnp ptp", + .max_adj = 50000000, + .n_alarm = 0, + .n_ext_ts = 0, + .n_per_out = 0, + /* will be overwritten in stmmac_ptp_register */ + .n_pins = 0, + /* should be 0 if not set */ + .adjfine = rnp_ptp_adjfreq, + .adjtime = rnp_ptp_adjtime, + .gettime64 = rnp_ptp_gettime, + .settime64 = rnp_ptp_settime, + .enable = rnp_ptp_feature_enable, +}; + +int rnp_ptp_register(struct rnp_adapter *pf) +{ + pf->hwts_ops = &mac_ptp; + + pf->ptp_tx_en = 0; + pf->ptp_rx_en = 0; + + spin_lock_init(&pf->ptp_lock); + pf->flags2 |= RNP_FLAG2_PTP_ENABLED; + pf->ptp_clock_ops = rnp_ptp_clock_ops; + + /* default mac clock rate is 50Mhz */ + pf->clk_ptp_rate = 50000000; + if (pf->pdev == NULL) + printk(KERN_DEBUG "pdev dev is null\n"); + + pf->ptp_clock = ptp_clock_register(&pf->ptp_clock_ops, &pf->pdev->dev); + if (pf->ptp_clock == NULL) + pci_err(pf->pdev, "ptp clock register failed\n"); + + if (IS_ERR(pf->ptp_clock)) { + pci_err(pf->pdev, "ptp_clock_register failed\n"); + pf->ptp_clock = NULL; + } else { + pci_info(pf->pdev, "registered PTP clock\n"); + } + + return 0; +} + +void rnp_ptp_unregister(struct rnp_adapter *pf) +{ + /*1. stop the ptp module*/ + if (pf->ptp_clock) { + ptp_clock_unregister(pf->ptp_clock); + pf->ptp_clock = NULL; + pr_debug("Removed PTP HW clock successfully on %s\n", + "rnp_ptp"); + } +} + +void rnp_tx_hwtstamp_work(struct work_struct *work) +{ + struct rnp_adapter *adapter = + container_of(work, struct rnp_adapter, tx_hwtstamp_work); + void __iomem *ioaddr = adapter->hw.hw_addr; + + /* 1. read port belone timestatmp status reg */ + /* 2. status enabled read nsec and sec reg*/ + /* 3. */ + u64 nanosec = 0, sec = 0; + + if (!adapter->ptp_tx_skb) { + clear_bit_unlock(__RNP_PTP_TX_IN_PROGRESS, &adapter->state); + return; + } + + if (rnp_rd_reg(ioaddr + RNP_ETH_PTP_TX_TSVALUE_STATUS(0)) & 0x01) { + struct sk_buff *skb = adapter->ptp_tx_skb; + struct skb_shared_hwtstamps shhwtstamps; + u64 txstmp = 0; + /* read and add nsec, sec turn to nsec*/ + + nanosec = rnp_rd_reg(ioaddr + RNP_ETH_PTP_TX_LTIMES(0)); + sec = rnp_rd_reg(ioaddr + RNP_ETH_PTP_TX_HTIMES(0)); + /* when we read the timestamp finish need to notice the hardware + * that the timestamp need to update via set tx_hwts_clear-reg + * from high to low + */ + rnp_wr_reg(ioaddr + RNP_ETH_PTP_TX_CLEAR(0), + PTP_GET_TX_HWTS_FINISH); + rnp_wr_reg(ioaddr + RNP_ETH_PTP_TX_CLEAR(0), + PTP_GET_TX_HWTS_UPDATE); + + txstmp = nanosec & PTP_HWTX_TIME_VALUE_MASK; + txstmp += (sec & PTP_HWTX_TIME_VALUE_MASK) * 1000000000ULL; + + /* Clear the global tx_hwtstamp_skb pointer and force writes + * prior to notifying the stack of a Tx timestamp. + */ + memset(&shhwtstamps, 0, sizeof(shhwtstamps)); + shhwtstamps.hwtstamp = ns_to_ktime(txstmp); + adapter->ptp_tx_skb = NULL; + /* force write prior to skb_tstamp_tx + * because the xmit will re used the point to store ptp skb + */ + wmb(); + + skb_tstamp_tx(skb, &shhwtstamps); + dev_consume_skb_any(skb); + clear_bit_unlock(__RNP_PTP_TX_IN_PROGRESS, &adapter->state); + } else if (time_after(jiffies, + adapter->tx_hwtstamp_start + + adapter->tx_timeout_factor * HZ)) { + /* this function will mark the skb drop*/ + if (adapter->ptp_tx_skb) + dev_kfree_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; + adapter->tx_hwtstamp_timeouts++; + clear_bit_unlock(__RNP_PTP_TX_IN_PROGRESS, &adapter->state); + netdev_warn(adapter->netdev, "clearing Tx timestamp hang\n"); + } else { + /* reschedule to check later */ + schedule_work(&adapter->tx_hwtstamp_work); + } +} + +void rnp_ptp_get_rx_hwstamp(struct rnp_adapter *adapter, + union rnp_rx_desc *desc, struct sk_buff *skb) +{ + u64 ns = 0; + u64 tsvalueh = 0, tsvaluel = 0; + struct skb_shared_hwtstamps *hwtstamps = NULL; + + if (!skb || !adapter->ptp_rx_en) { + netdev_dbg(adapter->netdev, + "hwstamp skb is null or " + "rx_en iszero %u\n", + adapter->ptp_rx_en); + return; + } + + if (likely(!((desc->wb.cmd) & RNP_RXD_STAT_PTP))) + return; + hwtstamps = skb_hwtstamps(skb); + /* because of rx hwstamp store before the mac head + * skb->head and skb->data is point to same location when call alloc_skb + * so we must move 16 bytes the skb->data to the mac head location + * but for the head point if we need move the skb->head need to be diss + */ + /* low8bytes is null high8bytes is timestamp + * high32bit is seconds low32bits is nanoseconds + */ + skb_copy_from_linear_data_offset(skb, RNP_RX_TIME_RESERVE, &tsvalueh, + RNP_RX_SEC_SIZE); + skb_copy_from_linear_data_offset(skb, + RNP_RX_TIME_RESERVE + RNP_RX_SEC_SIZE, + &tsvaluel, RNP_RX_NANOSEC_SIZE); + skb_pull(skb, RNP_RX_HWTS_OFFSET); + tsvalueh = ntohl(tsvalueh); + tsvaluel = ntohl(tsvaluel); + + ns = tsvaluel & RNP_RX_NSEC_MASK; + ns += ((tsvalueh & RNP_RX_SEC_MASK) * 1000000000ULL); + + netdev_dbg(adapter->netdev, + "ptp get hardware ts-sec %llu ts-nanosec %llu\n", tsvalueh, + tsvaluel); + hwtstamps->hwtstamp = ns_to_ktime(ns); +} + +void rnp_ptp_reset(struct rnp_adapter *adapter) +{ + rnp_ptp_setup_ptp(adapter, adapter->ptp_config_value); +} diff --git a/drivers/net/ethernet/mucse/rnp/rnp_ptp.h b/drivers/net/ethernet/mucse/rnp/rnp_ptp.h new file mode 100644 index 000000000000..a62e8128f0a4 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_ptp.h @@ -0,0 +1,99 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#ifndef __RNP_PTP_H__ +#define __RNP_PTP_H__ + +struct rnp_hwtimestamp { + void (*config_hw_tstamping)(void __iomem *ioaddr, u32 data); + void (*config_sub_second_increment)(void __iomem *ioaddr, u32 ptp_clock, + int gmac4, u32 *ssinc); + void (*config_mac_irq_enable)(void __iomem *ioaddr, bool on); + int (*init_systime)(void __iomem *ioaddr, u32 sec, u32 nsec); + int (*config_addend)(void __iomem *ioaddr, u32 addend); + int (*adjust_systime)(void __iomem *ioaddr, u32 sec, u32 nsec, + int add_sub, int gmac4); + void (*get_systime)(void __iomem *ioaddr, u64 *systime); +}; +/* IEEE 1588 PTP register offsets */ +#define PTP_TCR 0x00 /* Timestamp Control Reg */ +#define PTP_SSIR 0x04 /* Sub-Second Increment Reg */ +#define PTP_STSR 0x08 /* System Time – Seconds Regr */ +#define PTP_STNSR 0x0c /* System Time – Nanoseconds Reg */ +#define PTP_STSUR 0x10 /* System Time – Seconds Update Reg */ +#define PTP_STNSUR 0x14 /* System Time – Nanoseconds Update Reg */ +#define PTP_TAR 0x18 /* Timestamp Addend Reg */ + +#define RNP_PTP_STNSUR_ADDSUB_SHIFT 31 +#define RNP_PTP_DIGITAL_ROLLOVER_MODE 0x3B9ACA00 /* 10e9-1 ns */ +#define RNP_PTP_BINARY_ROLLOVER_MODE 0x80000000 /* ~0.466 ns */ + +/* PTP Timestamp control register defines */ +#define RNP_PTP_TCR_TSENA BIT(0) /*Timestamp Enable*/ +#define RNP_PTP_TCR_TSCFUPDT BIT(1) /* Timestamp Fine/Coarse Update */ +#define RNP_PTP_TCR_TSINIT BIT(2) /* Timestamp Initialize */ +#define RNP_PTP_TCR_TSUPDT BIT(3) /* Timestamp Update */ +#define RNP_PTP_TCR_TSTRIG BIT(4) /* Timestamp Interrupt Trigger Enable */ +#define RNP_PTP_TCR_TSADDREG BIT(5) /* Addend Reg Update */ +#define RNP_PTP_TCR_TSENALL BIT(8) /* Enable Timestamp for All Frames */ +#define RNP_PTP_TCR_TSCTRLSSR BIT(9) /* Digital or Binary Rollover Control */ +#define RNP_PTP_TCR_TSVER2ENA \ + BIT(10) /* Enable PTP packet Processing for Version 2 Format */ +#define RNP_PTP_TCR_TSIPENA \ + BIT(11) /* Enable Processing of PTP over Ethernet Frames */ +#define RNP_PTP_TCR_TSIPV6ENA \ + BIT(12) /* Enable Processing of PTP Frames Sent over IPv6-UDP */ +#define RNP_PTP_TCR_TSIPV4ENA \ + BIT(13) /* Enable Processing of PTP Frames Sent over IPv4-UDP */ +#define RNP_PTP_TCR_TSEVNTENA \ + BIT(14) /* Enable Timestamp Snapshot for Event Messages */ +#define RNP_PTP_TCR_TSMSTRENA \ + BIT(15) /* Enable Snapshot for Messages Relevant to Master */ +/* Note 802.1 AS Is work Over Ethernet FramesC_Sub_Second_Incremen + * and Normal PTP Is work Oveer UDP + */ + +/* Select PTP packets for Taking Snapshots + * On mac specifically: + * Enable SYNC, Pdelay_Req, Pdelay_Resp when TSEVNTENA is enabled. + * or + * Enable SYNC, Follow_Up, Delay_Req, Delay_Resp, Pdelay_Req, Pdelay_Resp, + * Pdelay_Resp_Follow_Up if TSEVNTENA is disabled + */ +#define RNP_PTP_TCR_SNAPTYPSEL_1 BIT(16) +#define RNP_PTP_TCR_TSENMACADDR \ + BIT(18) /* Enable MAC address for PTP Frame Filtering */ +#define RNP_PTP_TCR_ESTI \ + BIT(20) /* External System Time Input Or MAC Internal Clock*/ +#define RNP_PTP_TCR_AV8021ASMEN BIT(28) /* AV802.1 AS Mode Enable*/ +/* Sub Second increament define */ +#define RNP_PTP_SSIR_SSINC_MASK (0xff) /* Sub-second increment value */ +#define RNP_PTP_SSIR_SSINC_SHIFT (16) /* Sub-second increment offset */ + +#define RNP_MAC_TXTSC BIT(15) /* TX timestamp reg is fill complete */ +#define RNP_MAC_TXTSSTSLO GENMASK(30, 0) /*nano second avalid value */ + +#define RNP_RX_SEC_MASK GENMASK(30, 0) +#define RNP_RX_NSEC_MASK GENMASK(30, 0) +#define RNP_RX_TIME_RESERVE (8) +#define RNP_RX_SEC_SIZE (4) +#define RNP_RX_NANOSEC_SIZE (4) +#define RNP_RX_HWTS_OFFSET \ + (RNP_RX_SEC_SIZE + RNP_RX_NANOSEC_SIZE + RNP_RX_TIME_RESERVE) + +#define PTP_HWTX_TIME_VALUE_MASK GENMASK(31, 0) +#define PTP_GET_TX_HWTS_FINISH (1) +#define PTP_GET_TX_HWTS_UPDATE (0) +/* hardware ts can't so fake ts from the software clock */ +#define DEBUG_PTP_HARD_SOFTWAY + +int rnp_ptp_get_ts_config(struct rnp_adapter *pf, struct ifreq *ifr); +int rnp_ptp_set_ts_config(struct rnp_adapter *pf, struct ifreq *ifr); +int rnp_ptp_register(struct rnp_adapter *pf); +void rnp_ptp_unregister(struct rnp_adapter *pf); + +void rnp_ptp_get_rx_hwstamp(struct rnp_adapter *pf, union rnp_rx_desc *desc, + struct sk_buff *skb); +void rnp_tx_hwtstamp_work(struct work_struct *work); +void rnp_ptp_reset(struct rnp_adapter *adapter); +#endif diff --git a/drivers/net/ethernet/mucse/rnp/rnp_regs.h b/drivers/net/ethernet/mucse/rnp/rnp_regs.h new file mode 100644 index 000000000000..6fa8fb1ef83a --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_regs.h @@ -0,0 +1,820 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#ifndef RNP_REGS_H +#define RNP_REGS_H + +/* BAR4 memory */ +/* ------------------------------------------*/ +/* module | size | start | end */ +/* DMA | 64KB | 0_0000H | 0_FFFFH */ +/* ETH | 64KB | 1_0000H | 1_FFFFH */ +/* REG | 64KB | 3_0000H | 3_FFFFH */ +/* SERDES | 128KB | 4_0000H | 5_FFFFH */ +/* XLMAC | 256KB | 6_0000H | 9_FFFFH */ +/* MSIX | 64KB | A_0000H | A_FFFFH */ +/* SWITCH | 64KB | B_0000H | B_FFFFH */ +/* TCAM | 256KB | C_0000H | F_FFFFH */ +/* ------------------------------------------*/ + +/* ==================== RNP-DMA Global Registers ==================== */ +/* n10 */ +#define RNP10_RING_BASE (0x8000) +/* n20 */ +#define RNP20_RING_BASE (0x8000) +#define RING_OFFSET(queue_idx) (0x100 * (queue_idx)) +#define RNP_DMA_VERSION (0x0000) +#define RNP_DMA_CONFIG (0x0004) +#define RNP_DMA_AXI_READY (0x0014) +#define DMA_MAC_LOOPBACK (1 << 0) +#define DMA_SWITCH_LOOPBACK (1 << 1) +#define DMA_VEB_BYPASS (1 << 4) +#define DMA_AXI_ORDER (1 << 5) +#define DMA_RX_PADDING (1 << 8) +#define DMA_MAP_MODE(n) (n << 12) +#define DMA_RX_FRAGMENT_BYTES(n) (((n) / 16) << 16) +#define RNP_DMA_STATUS (0x0008) +#define RNP_DMA_RX_DATA_PROG_FULL_THRESH (0x00a0) +#define DMA_RING_NUM (0xff << 24) +#define RC_CONTROL_HW (0x01) +#define RC_CONTROL_PHY_DRIVER (0x02) +#define RC_JUMP_STATUS (0x04) +#define RC_PHY_LINK_DONE (0x08) +#define RC_LINK_CHANGE (0x10) +#define RNP_DMA_DUMY (0x000c) +#define RNP_DMA_RX_START (0x10) +#define RNP_DMA_RX_READY (0x14) +#define RNP_DMA_TX_START (0x18) +#define RNP_DMA_TX_READY (0x1c) +#define RNP_DMA_INT_STAT (0x20) +#define RNP_DMA_INT_MASK (0x24) +#define TX_INT_MASK (1 << 1) +#define RX_INT_MASK (1 << 0) +#define RNP_DMA_INT_CLR (0x28) +#define RNP_DMA_INT_TRIG (0x2c) +#define RNP_DMA_AXI_EN (0x0010) +#define RX_AXI_RW_EN (0x03 << 0) +#define TX_AXI_RW_EN (0x03 << 2) +#define RNP_DMA_AXI_STAT (0x0014) +#define RNP_VEB_MAC_MASK_LO (0x0020) +#define RNP_VEB_MAC_MASK_HI (0x0024) +#define RNP_VEB_VLAN_MASK (0x0028) +#define DEBUG_PROBE_NUM 16 +#define RNP_DMA_DEBUG_PROBE_LO_REG(n) (0x0100 + 0x08 * (n)) +#define RNP_DMA_DEBUG_PROBE_HI_REG(n) (0x0100 + 0x08 * (n)) +#define DEBUG_CNT_NUM 76 +#define RNP_DMA_DEBUG_CNT(n) (0x0200 + 0x04 * (n)) +#define RNP_DMA_STATS_DMA_TO_MAC_CHANNEL_0 (RNP_DMA_DEBUG_CNT(17)) +#define RNP_DMA_STATS_DMA_TO_MAC_CHANNEL_1 (RNP_DMA_DEBUG_CNT(18)) +#define RNP_DMA_STATS_DMA_TO_MAC_CHANNEL_2 (RNP_DMA_DEBUG_CNT(19)) +#define RNP_DMA_STATS_DMA_TO_MAC_CHANNEL_3 (RNP_DMA_DEBUG_CNT(20)) +#define RNP_DMA_STATS_DMA_TO_SWITCH (RNP_DMA_DEBUG_CNT(21)) +#define RNP_DMA_STATS_MAC_TO_DMA (RNP_DMA_DEBUG_CNT(22)) +#define RNP_DMA_STATS_SWITCH_TO_DMA (RNP_DMA_DEBUG_CNT(23)) +#define RNP_PCI_WR_TO_HOST (RNP_DMA_DEBUG_CNT(34)) +/* RX-Queue Registers */ +#define RNP_DMA_REG_RX_DESC_BUF_BASE_ADDR_HI (0x30) +#define RNP_DMA_REG_RX_DESC_BUF_BASE_ADDR_LO (0x34) +#define RNP_DMA_REG_RX_DESC_BUF_LEN (0x38) +#define RNP_DMA_REG_RX_DESC_BUF_HEAD (0x3c) +#define RNP_DMA_REG_RX_DESC_BUF_TAIL (0x40) +#define RNP_DMA_REG_RX_DESC_FETCH_CTRL (0x44) +#define RNP_DMA_REG_RX_INT_DELAY_TIMER (0x48) +#define RNP_DMA_REG_RX_INT_DELAY_PKTCNT (0x4c) +#define RNP_DMA_REG_RX_ARB_DEF_LVL (0x50) +#define PCI_DMA_REG_RX_DESC_TIMEOUT_TH (0x54) +#define PCI_DMA_REG_RX_SCATTER_LENGTH (0x58) +/* TX-Queue Registers */ +#define RNP_DMA_REG_TX_DESC_BUF_BASE_ADDR_HI (0x60) +#define RNP_DMA_REG_TX_DESC_BUF_BASE_ADDR_LO (0x64) +#define RNP_DMA_REG_TX_DESC_BUF_LEN (0x68) +#define RNP_DMA_REG_TX_DESC_BUF_HEAD (0x6c) +#define RNP_DMA_REG_TX_DESC_BUF_TAIL (0x70) +#define RNP_DMA_REG_TX_DESC_FETCH_CTRL (0x74) +#define RNP_DMA_REG_TX_INT_DELAY_TIMER (0x78) +#define RNP_DMA_REG_TX_INT_DELAY_PKTCNT (0x7c) +#define RNP_DMA_REG_TX_ARB_DEF_LVL (0x80) +#define RNP_DMA_REG_TX_FLOW_CTRL_TH (0x84) +#define RNP_DMA_REG_TX_FLOW_CTRL_TM (0x88) +#define RNP_DMA_PKT_FIFO_DATA_PROG_FULL_THRESH (0x0098) +#define VEB_TBL_CNTS 64 +#define RNP_DMA_PORT_VBE_MAC_LO_TBL(port, vf) \ + (0x80A0 + 4 * (port) + 0x100 * (vf)) +#define RNP_DMA_PORT_VBE_MAC_HI_TBL(port, vf) \ + (0x80B0 + 4 * (port) + 0x100 * (vf)) +#define RNP_DMA_PORT_VEB_VID_TBL(port, vf) (0x80C0 + 4 * (port) + 0x100 * (vf)) +#define RNP_DMA_PORT_VEB_VF_RING_TBL(port, vf) \ + (0x80D0 + 4 * (port) + 0x100 * (vf)) +#define RNP_DMA_STATS_MAC_TO_MAC (0x1b0) +#define RNP_DMA_STATS_SWITCH_TO_SWITCH (0x1a4) + +/* ==================== RNP-ETH Global Registers ==================== */ +#define RNP_ETH_BASE (0x10000) +#define RNP10_ETH_BASE (0x10000) +#define RNP10_ETH_ENABLE_RSS_ONLY (0x3f30001) +#define RNP10_RAH_AV 0x80000000 +#define RNP10_ETH_RAR_RL(n) (0xa000 + 0x04 * n) +#define RNP10_ETH_RAR_RH(n) (0xa400 + 0x04 * n) +#define RNP10_ETH_DMAC_FCTRL (0x9110) +#define RNP10_ETH_DMAC_MCSTCTRL (0x9114) +#define RNP10_MCSTCTRL_MULTICASE_TBL_EN (1 << 2) +#define RNP10_MCSTCTRL_UNICASE_TBL_EN (1 << 3) +#define RNP10_VM_DMAC_MPSAR_RING(entry) \ + (0xb400 + (4 * (entry))) +#define RNP10_ETH_MULTICAST_HASH_TABLE(n) (0xac00 + 0x04 * n) +#define RNP10_ETH_LAYER2_ETQF(n) (0x9200 + 0x04 * (n)) +#define RNP10_ETH_LAYER2_ETQS(n) (0x9240 + 0x04 * (n)) + +/* ==================== RNP10-TCAM Global Registers ==================== */ +#define RNP10_TCAM_BASE (0xc0000 - RNP10_ETH_BASE) +#define RNP10_TCAM_SDPQF(n) \ + (RNP10_TCAM_BASE + 0x00 + 0x40 * (n / 2) + 0x10 * (n % 2)) +#define RNP10_TCAM_DAQF(n) \ + (RNP10_TCAM_BASE + 0x04 + 0x40 * (n / 2) + 0x10 * (n % 2)) +#define RNP10_TCAM_SAQF(n) \ + (RNP10_TCAM_BASE + 0x08 + 0x40 * (n / 2) + 0x10 * (n % 2)) +#define RNP10_TCAM_APQF(n) \ + (RNP10_TCAM_BASE + 0x0c + 0x40 * (n / 2) + 0x10 * (n % 2)) +#define RNP10_ETH_TCAM_EN (0x8024) +#define RNP10_TCAM_SDPQF_MASK(n) \ + (RNP10_TCAM_BASE + 0x20 + 0x40 * (n / 2) + 0x10 * (n % 2)) +#define RNP10_TCAM_DAQF_MASK(n) \ + (RNP10_TCAM_BASE + 0x24 + 0x40 * (n / 2) + 0x10 * (n % 2)) +#define RNP10_TCAM_SAQF_MASK(n) \ + (RNP10_TCAM_BASE + 0x28 + 0x40 * (n / 2) + 0x10 * (n % 2)) +#define RNP10_TCAM_APQF_MASK(n) \ + (RNP10_TCAM_BASE + 0x2c + 0x40 * (n / 2) + 0x10 * (n % 2)) +#define RNP10_TCAM_MODE (RNP10_TCAM_BASE + 0x20000) +#define RNP10_TCAM_CACHE_ENABLE (RNP10_TCAM_BASE + 0x20004) +#define RNP10_TCAM_CACHE_ADDR_CLR (RNP10_TCAM_BASE + 0x20008) +#define RNP10_TCAM_CACHE_REQ_CLR (RNP10_TCAM_BASE + 0x2000c) +#define RNP10_TOP_ETH_TCAM_CONFIG_ENABLE (0x30000 - RNP10_ETH_BASE + 0x8050) +#define RNP10_VEB_TBL_CNTS 64 +#define RNP10_DMA_PORT_VBE_MAC_LO_TBL(port, vf) \ + (0x80A0 + 4 * (port) + 0x100 * (vf)) +#define RNP10_DMA_PORT_VBE_MAC_HI_TBL(port, vf) \ + (0x80B0 + 4 * (port) + 0x100 * (vf)) +#define RNP10_DMA_PORT_VEB_VID_TBL(port, vf) \ + (0x80C0 + 4 * (port) + 0x100 * (vf)) +#define RNP10_DMA_PORT_VEB_VF_RING_TBL(port, vf) \ + (0x80D0 + 4 * (port) + 0x100 * (vf)) +/* + * [3:0]: + * 4'b0000:RSS disable + * 4'b0001:RSS only + * 4'b0100:DCB and RSS--8*16 + * 4'b1010:POOLS and RSS--32*4 + * [3] :virtual enable + * [16]:ipv4_hash_tcp_enable + * [17]:ipv4_hash_enable + * [20]:ipv6_hash_enable + * [21]:ipv6_hash_tcp_enable + * [22]:ipv4_hash_udp_enable + * [23]:ipv6_hash_udp_enable + * [24]:ipv4_hash_sctp_enable + * [25]:ipv6_hash_sctp_enable + */ +#define RNP10_ETH_RSS_CONTROL (0x92a0) +#define RNP10_IOV_ENABLED (1 << 3) +#define RNP10_ETH_RSS_KEY (0x92d0) +#define RNP10_ETH_TC_IPH_OFFSET_TABLE(n) (0xe800 + 0x04 * (n)) +#define RNP10_ETH_RSS_INDIR_TBL(n) (0xe000 + 0x04 * (n)) +#define RNP10_ETH_VLAN_FILTER_TABLE(n) (0xb000 + 0x04 * (n)) +#define RNP10_VFTA RNP10_ETH_VLAN_FILTER_TABLE +#define RNP10_VLVF(idx) (0xb600 + 4 * (idx)) +#define RNP10_VLVF_TABLE(idx) (0xb700 + 4 * (idx)) +#define RNP10_ETH_TUPLE5_SAQF(n) (0xc000 + 0x04 * (n)) +#define RNP10_ETH_TUPLE5_DAQF(n) (0xc400 + 0x04 * (n)) +#define RNP10_ETH_TUPLE5_SDPQF(n) (0xc800 + 0x04 * (n)) +#define RNP10_ETH_TUPLE5_FTQF(n) (0xcc00 + 0x04 * (n)) +#define RNP10_ETH_TUPLE5_POLICY(n) (0xd000 + 0x04 * (n)) +#define RNP10_ETH_VLAN_FILTER_ENABLE (0x9118) +#define RNP10_ETH_DEFAULT_RX_MIN_LEN (0x80f0) +#define RNP10_ETH_DEFAULT_RX_MAX_LEN (0x80f4) +#define RNP10_ETH_VLAN_VME_REG(n) (0x8040 + 0x04 * (n)) +#define RNP10_ETH_VXLAN_PORT (0x8010) +#define RNP10_FCTRL_BPE BIT(10) +#define RNP10_FCTRL_UPE BIT(9) +#define RNP10_FCTRL_MPE BIT(8) +#define RNP10_HOST_FILTER_EN (0x801c) +#define RNP10_REDIR_EN (0x8030) +#define RNP10_ETH_SCTP_CHECKSUM_EN (0x8038) +#define RNP10_ETH_ENABLE_RSS_ONLY (0x3f30001) +#define RNP10_ETH_DISABLE_RSS (0) +#define RNP10_COMM_REG0 0x30000 +#define RNP10_TOP_NIC_CONFIG (RNP10_COMM_REG0 + 0x0004) +#define RNP10_TOP_NIC_REST_N (RNP10_COMM_REG0 + 0x0010) +#define RNP10_TOP_ETH_BUG_40G_PATCH (RNP10_COMM_REG0 + 0x801c) +#define RNP10_TOP_MAC_OUI (RNP10_COMM_REG0 + 0xc004) +#define RNP10_TOP_MAC_SN (RNP10_COMM_REG0 + 0xc008) +#define RNP10_ETH_TUNNEL_MOD (0x8004) +#define INNER_L4_BIT BIT(6) +#define PKT_LEN_ERR (2) +#define HDR_LEN_ERR (1) +#define RNP10_ETH_ERR_MASK_VECTOR (0x8060) +#define RNP10_ETH_BYPASS (0x8000) +#define RNP10_ETH_DEFAULT_RX_RING (0x806c) +#define DROP_ALL_THRESH (2046) +#define RECEIVE_ALL_THRESH (0x270) +#define RNP10_ETH_RX_PROGFULL_THRESH_PORT (0x8070) +#define RNP10_ETH_HIGH_WATER(n) (0x80c0 + n * (0x08)) +#define RNP10_ETH_LOW_WATER(n) (0x80c4 + n * (0x08)) +#define RNP10_ETH_WRAP_FIELD_TYPE (0x805c) +#define RNP10_MRQC_IOV_EN (0x92a0) +#define RNP10_ETH_SYNQF (0x9290) +#define RNP10_ETH_SYNQF_PRIORITY (0x9294) +#define RNP10_RXTRANS_DROP(port) (0x8904 + 0x40 * (port)) +#define RNP10_RXTRANS_WDT_ERR_PKTS(port) (0x8908 + 0x40 * (port)) +#define RNP10_RXTRANS_CODE_ERR_PKTS(port) (0x890c + 0x40 * (port)) +#define RNP10_RXTRANS_CRC_ERR_PKTS(port) (0x8910 + 0x40 * (port)) +#define RNP10_RXTRANS_SLEN_ERR_PKTS(port) (0x8914 + 0x40 * (port)) +#define RNP10_RXTRANS_GLEN_ERR_PKTS(port) (0x8918 + 0x40 * (port)) +#define RNP10_RXTRANS_IPH_ERR_PKTS(port) (0x891c + 0x40 * (port)) +#define RNP10_RXTRANS_CSUM_ERR_PKTS(port) (0x8920 + 0x40 * (port)) +#define RNP10_RXTRANS_LEN_ERR_PKTS(port) (0x8924 + 0x40 * (port)) +#define RNP10_RXTRANS_CUT_ERR_PKTS(port) (0x8928 + 0x40 * (port)) +#define RNP10_ETH_DECAP_PKT_DROP_NUM(port) (0x82e8 + 0x04 * (port)) +#define RNP10_ETH_INVALID_DROP_PKTS RNP10_ETH_DECAP_PKT_DROP_NUM(0) +#define RNP10_ETH_FILTER_DROP_PKTS RNP10_ETH_DECAP_PKT_DROP_NUM(1) +#define RNP10_ETH_RX_DEBUG(n) (0x8400 + 0x04 * (n)) +#define RNP10_ETH_RX_FC_DEBUG0_NUM RNP10_ETH_RX_DEBUG(0) +#define RNP10_ETH_RX_FC_DEBUG1_NUM RNP10_ETH_RX_DEBUG(1) +#define RNP10_ETH_RX_DIS_DEBUG0_NUM RNP10_ETH_RX_DEBUG(2) +#define RNP10_ETH_RX_DIS_DEBUG1_NUM RNP10_ETH_RX_DEBUG(3) +#define RNP10_ETH_HOST_L2_DROP_PKTS RNP10_ETH_RX_DEBUG(4) +#define RNP10_ETH_REDIR_INPUT_MATCH_DROP_PKTS RNP10_ETH_RX_DEBUG(5) +#define RNP10_ETH_ETYPE_DROP_PKTS RNP10_ETH_RX_DEBUG(6) +#define RNP10_ETH_TCP_SYN_DROP_PKTS RNP10_ETH_RX_DEBUG(7) +#define RNP10_ETH_REDIR_TUPLE5_DROP_PKTS RNP10_ETH_RX_DEBUG(8) +#define RNP10_ETH_REDIR_TCAM_DROP_PKTS RNP10_ETH_RX_DEBUG(9) +#define RNP10_MAC_STATS_BROADCAST_LOW (0x0918) +#define RNP10_MAC_STATS_BROADCAST_HIGH (0x091c) +#define RNP10_MAC_STATS_MULTICAST_LOW (0x0920) +#define RNP10_MAC_STATS_MULTICAST_HIGH (0x0924) +#define RNP10_MAC_STATS_RX_PAUSE_COUNT_LOW (0x0988) +#define RNP10_MAC_STATS_RX_PAUSE_COUNT_HIGH (0x098C) +#define RNP10_MAC_STATS_TX_PAUSE_COUNT_LOW (0x0894) +#define RNP10_MAC_STATS_TX_PAUSE_COUNT_HIGH (0x898) +#define RNP10_ETH_DECAP_BMC_DROP_NUM (0x82f4) +#define RNP10_ETH_DECAP_SWITCH_DROP_NUM (0x82f8) +#define RNP10_VLVF(idx) (0xb600 + 4 * (idx)) +#define WATCHDOG_TIMER_ERROR BIT(0) +#define RUN_FRAME_ERROR BIT(1) +#define GAINT_FRAME_ERROR BIT(2) +#define LATE_COLLISION_ERROR BIT(3) +#define GMII_ERROR BIT(4) +#define DRIBBLING_BIT_ERROR BIT(5) +#define CRC_ERROR BIT(6) +#define LENGTH_ERROR BIT(8) +#define DA_FILTER_ERROR BIT(9) +#define SA_FILTER_ERROR BIT(10) + +/* ================================================================== */ +#define ETH_ERR_SCTP (1 << 4) +#define ETH_ERR_L4 (1 << 3) +#define ETH_ERR_L3 (1 << 2) +#define ETH_ERR_PKT_LEN_ERR (1 << 1) +#define ETH_ERR_HDR_LEN_ERR (1 << 0) +#define ETH_IGNORE_ALL_ERR \ + (ETH_ERR_SCTP | ETH_ERR_L4 | ETH_ERR_L3 | ETH_ERR_PKT_LEN_ERR | \ + ETH_ERR_HDR_LEN_ERR) +#define VM_DMAC_TBL_SZ 128 +#define RNP_ETH_ENABLE_RSS_ONLY (0x3f30001) +#define RNP_ETH_DISABLE_RSS (0) +#define RNP_ETH_TX_PROGFULL_THRESH_PORT(n) (RNP_ETH_BASE + 0x0060 + 0x08 * (n)) +#define RNP_ETH_TX_PROGEMPTY_THRESH_PORT(n) (RNP_ETH_BASE + 0x0064 + 0x08 * (n)) +#define RNP_ETH_EMAC_DMA_PROFULL_THRESH (RNP_ETH_BASE + 0x0080) +#define RNP_ETH_EMAC_DMA_PROEMPTY_THRESH (RNP_ETH_BASE + 0x0084) +#define RNP_ETH_EMAC_SW_PROFULL_THRESH (RNP_ETH_BASE + 0x0088) +#define RNP_ETH_EMAC_SW_PROEMPTY_THRESH (RNP_ETH_BASE + 0x008c) +#define RNP_ETH_EMAC_BMC_TX_PROFULL_THRESH (RNP_ETH_BASE + 0x0090) +#define RNP_ETH_EMAC_BMC_TX_PROEMPTY_THRESH (RNP_ETH_BASE + 0x0094) +#define RNP_ETH_CNT_PKT_EMAC_TX(n) (RNP_ETH_BASE + 0x00a0 + 0x04 * (n)) +#define RNP_ETH_CNT_PKT_PECL_TX(n) (RNP_ETH_BASE + 0x00b0 + 0x04 * (n)) +#define RNP_ETH_STATUS_TX_FLOWCTRL(n) (RNP_ETH_BASE + 0x00c0 + 0x04 * (n)) +#define RNP_ETH_VERSION_FLOWWCTRL (RNP_ETH_BASE + 0x00d0) +#define RNP_ETH_CFG_ETH_MAC (RNP_ETH_BASE + 0x00d4) +#define RNP_ETH_SCA_TX_CS(port) (RNP_ETH_BASE + 0x0100 + 0x08 * (port)) +#define RNP_ETH_SCA_TX_NS(port) (RNP_ETH_BASE + 0x0104 + 0x08 * (port)) +#define RNP_ETH_TXTRANS_CS(port) (RNP_ETH_BASE + 0x0120 + 0x08 * (port)) +#define RNP_ETH_TXTRANS_NS(port) (RNP_ETH_BASE + 0x0124 + 0x08 * (port)) +#define RNP_ETH_1TO4_INST0_IN_PKTS (RNP_ETH_BASE + 0x0200) +#define RNP_ETH_1TO4_INST1_IN_PKTS (RNP_ETH_BASE + 0x0204) +#define RNP_ETH_1TO4_INST2_IN_PKTS (RNP_ETH_BASE + 0x0208) +#define RNP_ETH_1TO4_INST3_IN_PKTS (RNP_ETH_BASE + 0x020c) +#define RNP_ETH_IN_0_TX_PKT_NUM(port) (RNP_ETH_BASE + 0x0210 + 0x10 * (port)) +#define RNP_ETH_IN_1_TX_PKT_NUM(port) (RNP_ETH_BASE + 0x0214 + 0x10 * (port)) +#define RNP_ETH_IN_2_TX_PKT_NUM(port) (RNP_ETH_BASE + 0x0218 + 0x10 * (port)) +#define RNP_ETH_IN_3_TX_PKT_NUM(port) (RNP_ETH_BASE + 0x021c + 0x10 * (port)) +#define RNP_ETH_EMAC_TX_TO_PHY_PKTS(port) (RNP_ETH_BASE + 0x0250 + 4 * (port)) +#define RNP_ETH_TXTRANS_PTP_PKT_NUM(port) (RNP_ETH_BASE + 0x0260 + 4 * (port)) +#define RNP_ETH_TX_DEBUG(n) (RNP_ETH_BASE + 0x0300 + 0x04 * (n)) +#define RNP_ETH_PTP_TX_STATUS(n) (RNP_ETH_BASE + 0x0400) +#define RNP_ETH_PTP_TX_HTIMES(n) (RNP_ETH_BASE + 0x0404) +#define RNP_ETH_PTP_TX_LTIMES(n) (RNP_ETH_BASE + 0x0408) +#define RNP_ETH_PTP_TX_TSVALUE_STATUS(n) (RNP_ETH_BASE + 0x040c) +#define RNP_ETH_PTP_TX_CLEAR(n) (RNP_ETH_BASE + 0x0410) +#define RNP_ETH_MAC_SPEED_PORT(n) (RNP_ETH_BASE + 0x0450 + 0x04 * (n)) +#define RNP_ETH_MAC_LOOPBACK_MODE_PORT(n) (RNP_ETH_BASE + 0x0460 + 0x04 * (n)) +#define RNP_ETH_EXCEPT_DROP_PROC (RNP_ETH_BASE + 0x0470) +#define RNP_ETH_IPP (RNP_ETH_BASE + 0x8000) +#define RNP_ETH_BYPASS (RNP_ETH_BASE + 0x8000) +#define RNP_ETH_TUNNEL_MOD (RNP_ETH_BASE + 0x8004) +#define RNP_ETH_LOOPBACK_EN (RNP_ETH_BASE + 0x8008) +#define RNP_FIFO_CTRL_MODE (RNP_ETH_BASE + 0x800c) +#define RNP_ETH_VXLAN_PORT (RNP_ETH_BASE + 0x8010) +#define RNP_ETH_NVGRE_PORT (RNP_ETH_BASE + 0x8014) +#define RNP_ETH_RDMA_PORT (RNP_ETH_BASE + 0x8018) +#define RNP_HOST_FILTER_EN (RNP_ETH_BASE + 0x801c) +#define RNP_MNG_FILTER_EN (RNP_ETH_BASE + 0x8020) +#define RNP_ETH_TCAM_EN (RNP_ETH_BASE + 0x8024) +#define RNP_CONGEST_DROP_EN (RNP_ETH_BASE + 0x8028) +#define RNP_REDIR_EN (RNP_ETH_BASE + 0x8030) +#define RNP_ETH_SCTP_CHECKSUM_EN (RNP_ETH_BASE + 0x8038) +#define RNP_ETH_ARP_FUNC_EN (RNP_ETH_BASE + 0x803c) +#define RNP_ETH_VLAN_VME_REG(n) (RNP_ETH_BASE + 0x8040 + 0x04 * (n)) +#define RNP_ETH_CVLAN_RM_EN (RNP_ETH_BASE + 0x8050) +#define RNP_ETH_VLAN_RM_TYPE (RNP_ETH_BASE + 0x8054) +#define RNP_ETH_WRAP_FIELD_TYPE (RNP_ETH_BASE + 0x805c) +#define RNP_ETH_ERR_MASK_VECTOR (RNP_ETH_BASE + 0x8060) +#define RNP_ETH_DEFAULT_RX_RING (RNP_ETH_BASE + 0x806c) +#define RNP_ETH_RX_PROGFULL_THRESH_PORT(n) (RNP_ETH_BASE + 0x8070 + 0x08 * (n)) +#define RNP_ETH_RX_PROGEMPTY_THRESH_PORT(n) (RNP_ETH_BASE + 0x8074 + 0x08 * (n)) +#define RNP_ETH_EMAC_GAT_PROGFULL_THRESH (RNP_ETH_BASE + 0x8090) +#define RNP_ETH_EMAC_GAT_PROGEMPTY_THRESH (RNP_ETH_BASE + 0x8094) +#define RNP_ETH_EMAC_PARSE_PROGFULL_THRESH (RNP_ETH_BASE + 0x8098) +#define RNP_ETH_EMAC_PARSE_PROGEMPTY_THRESH (RNP_ETH_BASE + 0x809c) +#define RNP_ETH_FC_PROGFULL_THRESH (RNP_ETH_BASE + 0x80a0) +#define RNP_ETH_FC_PROGEMPTY_THRESH (RNP_ETH_BASE + 0x80a4) +#define RNP_ETH_DIS_PROGFULL_THRESH (RNP_ETH_BASE + 0x80a8) +#define RNP_ETH_DIS_PROGEMPTY_THRESH (RNP_ETH_BASE + 0x80ac) +#define RNP_ETH_COV_PROGFULL_THRESH (RNP_ETH_BASE + 0x80b0) +#define RNP_ETH_COV_PROGEMPTY_THRESH (RNP_ETH_BASE + 0x80b4) +#define RNP_ETH_BMC_RX_PROGFULL_THRESH (RNP_ETH_BASE + 0x80b8) +#define RNP_ETH_BMC_RX_PROGEMPTY_THRESH (RNP_ETH_BASE + 0x80bc) +#define RNP_ETH_HIGH_WATER(n) (RNP_ETH_BASE + 0x80c0 + n * (0x08)) +#define RNP_ETH_LOW_WATER(n) (RNP_ETH_BASE + 0x80c4 + n * (0x08)) +#define RNP_ETH_DEFAULT_RX_MIN_LEN (RNP_ETH_BASE + 0x80f0) +#define RNP_ETH_DEFAULT_RX_MAX_LEN (RNP_ETH_BASE + 0x80f4) +#define RNP_ETH_PTP_EVENT_PORT (RNP_ETH_BASE + 0x80f8) +#define RNP_ETH_PTP_GENER_PORT_REG (RNP_ETH_BASE + 0x80fc) +#define RNP_ETH_RX_TRANS_CS_PORT(n) (RNP_ETH_BASE + 0x8100 + 0x08 * (n)) +#define RNP_ETH_RX_TRANS_NS_PORT(n) (RNP_ETH_BASE + 0x8104 + 0x08 * (n)) +#define RNP_ETH_GAT_RX_CS (RNP_ETH_BASE + 0x8120) +#define RNP_ETH_GAT_RX_NS (RNP_ETH_BASE + 0x8124) +#define RNP_ETH_EMAC_PIP_CS (RNP_ETH_BASE + 0x8128) +#define RNP_ETH_EMAC_PIP_NS (RNP_ETH_BASE + 0x812c) +#define RNP_ETH_EMAC_FC_CS (RNP_ETH_BASE + 0x8138) +#define RNP_ETH_EMAC_FC_NS (RNP_ETH_BASE + 0x813c) +#define RNP_ETH_EMAC_DIS_CS (RNP_ETH_BASE + 0x8140) +#define RNP_ETH_EMAC_DIS_NS (RNP_ETH_BASE + 0x8144) +#define RNP_ETH_HOST_L2_FILTER_CS (RNP_ETH_BASE + 0x8150) +#define RNP_ETH_HOST_L2_FILTER_NS (RNP_ETH_BASE + 0x8154) +#define RNP_ETH_EMAC_DECAP_CS (RNP_ETH_BASE + 0x8158) +#define RNP_ETH_EMAC_DECAP_NS (RNP_ETH_BASE + 0x815c) +#define RNP_ETH_PFC_CONFIG_PROT(n) (RNP_ETH_BASE + 0x8180 + n * (0x04)) +#define RNP_ETH_RX_PKT_NUM(port) (RNP_ETH_BASE + 0x8220 + 0x04 * (port)) +#define RNP_ETH_RX_DROP_PKT_NUM(port) (RNP_ETH_BASE + 0x8230 + 0x04 * (port)) +#define RNP_ETH_TOTAL_GAT_RX_PKT_NUM (RNP_ETH_BASE + 0x8240) +#define RNP_ETH_PKT_ARP_REQ_NUM (RNP_ETH_BASE + 0x8250) +#define RNP_ETH_PKT_ARP_RESPONSE_NUM (RNP_ETH_BASE + 0x8254) +#define RNP_ETH_ICMP_NUM (RNP_ETH_BASE + 0x8258) +#define RNP_ETH_PKT_UDP_NUM (RNP_ETH_BASE + 0x825c) +#define RNP_ETH_PKT_TCP_NUM (RNP_ETH_BASE + 0x8260) +#define RNP_ETH_PKT_ESP_NUM (RNP_ETH_BASE + 0x8264) +#define RNP_ETH_PKT_GRE_NUM (RNP_ETH_BASE + 0x8268) +#define RNP_ETH_PKT_SCTP_NUM (RNP_ETH_BASE + 0x826c) +#define RNP_ETH_PKT_TCPSYN_NUM (RNP_ETH_BASE + 0x8270) +#define RNP_ETH_PKT_VXLAN_NUM (RNP_ETH_BASE + 0x8274) +#define RNP_ETH_PKT_NVGRE_NUM (RNP_ETH_BASE + 0x8278) +#define RNP_ETH_PKT_FRAGMENT_NUM (RNP_ETH_BASE + 0x827c) +#define RNP_ETH_PKT_LAYER1_VLAN_NUM (RNP_ETH_BASE + 0x8280) +#define RNP_ETH_PKT_LAYER2_VLAN_NUM (RNP_ETH_BASE + 0x8284) +#define RNP_ETH_PKT_IPV4_NUM (RNP_ETH_BASE + 0x8288) +#define RNP_ETH_PKT_IPV6_NUM (RNP_ETH_BASE + 0x828c) +#define RNP_ETH_PKT_INGRESS_NUM (RNP_ETH_BASE + 0x8290) +#define RNP_ETH_PKT_EGRESS_NUM (RNP_ETH_BASE + 0x8294) +#define RNP_ETH_PKT_IP_HDR_LEN_ERR_NUM (RNP_ETH_BASE + 0x8298) +#define RNP_ETH_PKT_IP_PKT_LEN_ERR_NUM (RNP_ETH_BASE + 0x829c) +#define RNP_ETH_PKT_L3_HDR_CHK_ERR_NUM (RNP_ETH_BASE + 0x82a0) +#define RNP_ETH_PKT_L4_HDR_CHK_ERR_NUM (RNP_ETH_BASE + 0x82a4) +#define RNP_ETH_PKT_SCTP_CHK_ERR_NUM (RNP_ETH_BASE + 0x82a8) +#define RNP_ETH_PKT_VLAN_ERR_NUM (RNP_ETH_BASE + 0x82ac) +#define RNP_ETH_PKT_RDMA_NUM (RNP_ETH_BASE + 0x82b0) +#define RNP_ETH_PKT_ARP_AUTO_RESPONSE_NUM (RNP_ETH_BASE + 0x82b4) +#define RNP_ETH_PKT_ICMPV6_NUM (RNP_ETH_BASE + 0x82b8) +#define RNP_ETH_PKT_IPV6_EXTEND_NUM (RNP_ETH_BASE + 0x82bc) +#define RNP_ETH_PKT_802_3_NUM (RNP_ETH_BASE + 0x82c0) +#define RNP_ETH_PKT_EXCEPT_SHORT_NUM (RNP_ETH_BASE + 0x82c4) +#define RNP_ETH_PKT_PTP_NUM (RNP_ETH_BASE + 0x82c8) +#define RNP_ETH_DECAP_PKT_IN_NUM (RNP_ETH_BASE + 0x82d0) +#define RNP_ETH_DECAP_PKT_OUT_NUM (RNP_ETH_BASE + 0x82d4) +#define RNP_ETH_DECAP_DMAC_OUT_NUM (RNP_ETH_BASE + 0x82d8) +#define RNP_ETH_DECAP_BMC_OUT_NUM (RNP_ETH_BASE + 0x82dc) +#define RNP_ETH_DECAP_SW_OUT_NUM (RNP_ETH_BASE + 0x82e0) +#define RNP_ETH_DECAP_MIRROR_OUT_NUM (RNP_ETH_BASE + 0x82e4) +#define RNP_ETH_DECAP_PKT_DROP_NUM(port) (RNP_ETH_BASE + 0x82e8 + 0x04 * (port)) +#define RNP_ETH_INVALID_DROP_PKTS RNP_ETH_DECAP_PKT_DROP_NUM(0) +#define RNP_ETH_FILTER_DROP_PKTS RNP_ETH_DECAP_PKT_DROP_NUM(1) +#define RNP_ETH_DECAP_DMAC_DROP_NUM (RNP_ETH_BASE + 0x82f0) +#define RNP_ETH_DECAP_BMC_DROP_NUM (RNP_ETH_BASE + 0x82f4) +#define RNP_ETH_DECAP_SWITCH_DROP_NUM (RNP_ETH_BASE + 0x82f8) +#define RNP_ETH_DECAP_RM_VLAN_NUM (RNP_ETH_BASE + 0x82fc) +#define RNP_ETH_RX_FC_PKT_IN_NUM (RNP_ETH_BASE + 0x8300) +#define RNP_ETH_RX_FC_PKT_OUT_NUM (RNP_ETH_BASE + 0x8304) +#define RNP_ETH_RX_FC_PKT_DROP0_NUM (RNP_ETH_BASE + 0x8308) +#define RNP_ETH_RX_FC_PKT_DROP1_NUM (RNP_ETH_BASE + 0x830c) +#define RNP_ETH_RING_FC_STATUS0 (RNP_ETH_BASE + 0x8310) +#define RNP_ETH_RING_FC_STATUS1 (RNP_ETH_BASE + 0x8314) +#define RNP_ETH_RING_FC_STATUS2 (RNP_ETH_BASE + 0x8318) +#define RNP_ETH_RING_FC_STATUS3 (RNP_ETH_BASE + 0x831c) +#define RNP_ETH_RX_DEBUG(n) (RNP_ETH_BASE + 0x8400 + 0x04 * (n)) +#define RNP_ETH_RX_FC_DEBUG0_NUM RNP_ETH_RX_DEBUG(0) +#define RNP_ETH_RX_FC_DEBUG1_NUM RNP_ETH_RX_DEBUG(1) +#define RNP_ETH_RX_DIS_DEBUG0_NUM RNP_ETH_RX_DEBUG(2) +#define RNP_ETH_RX_DIS_DEBUG1_NUM RNP_ETH_RX_DEBUG(3) +#define RNP_ETH_HOST_L2_DROP_PKTS RNP_ETH_RX_DEBUG(4) +#define RNP_ETH_REDIR_INPUT_MATCH_DROP_PKTS RNP_ETH_RX_DEBUG(5) +#define RNP_ETH_ETYPE_DROP_PKTS RNP_ETH_RX_DEBUG(6) +#define RNP_ETH_TCP_SYN_DROP_PKTS RNP_ETH_RX_DEBUG(7) +#define RNP_ETH_REDIR_TUPLE5_DROP_PKTS RNP_ETH_RX_DEBUG(8) +#define RNP_ETH_REDIR_TCAM_DROP_PKTS RNP_ETH_RX_DEBUG(9) +#define RNP_ETH_VMARK_TC(n) (RNP_ETH_BASE + 0x8500 + 0x04 * (n)) +#define RNP_RING_FC_ENABLE (RNP_ETH_BASE + 0x8520) +#define RNP_SELECT_RING_EN(n) (RNP_ETH_BASE + 0x8524 + (0x4 * n)) +#define RNP_TC_FC_SW_EN (RNP_ETH_BASE + 0x8534) +#define RNP_ETH_LOCAL_DIP(n) (RNP_ETH_BASE + 0x8600 + 0x04 * (n)) +#define RNP_ETH_LOCAL_DMAC_H(n) (RNP_ETH_BASE + 0x8700 + 0x04 * (n)) +#define RNP_ETH_LOCAL_DMAC_L(n) (RNP_ETH_BASE + 0x8800 + 0x04 * (n)) +/* Rx Ring Flow Control */ +#define RNP_RXTRANS_RX_PKTS(port) (RNP_ETH_BASE + 0x8900 + 0x40 * (port)) +#define RNP_RXTRANS_DROP_PKTS(port) (RNP_ETH_BASE + 0x8904 + 0x40 * (port)) +#define RNP_RXTRANS_WDT_ERR_PKTS(port) (RNP_ETH_BASE + 0x8908 + 0x40 * (port)) +#define RNP_RXTRANS_CODE_ERR_PKTS(port) (RNP_ETH_BASE + 0x890c + 0x40 * (port)) +#define RNP_RXTRANS_CRC_ERR_PKTS(port) (RNP_ETH_BASE + 0x8910 + 0x40 * (port)) +#define RNP_RXTRANS_SLEN_ERR_PKTS(port) (RNP_ETH_BASE + 0x8914 + 0x40 * (port)) +#define RNP_RXTRANS_GLEN_ERR_PKTS(port) (RNP_ETH_BASE + 0x8918 + 0x40 * (port)) +#define RNP_RXTRANS_IPH_ERR_PKTS(port) (RNP_ETH_BASE + 0x891c + 0x40 * (port)) +#define RNP_RXTRANS_CSUM_ERR_PKTS(port) (RNP_ETH_BASE + 0x8920 + 0x40 * (port)) +#define RNP_RXTRANS_LEN_ERR_PKTS(port) (RNP_ETH_BASE + 0x8924 + 0x40 * (port)) +#define RNP_RXTRANS_CUT_ERR_PKTS(port) (RNP_ETH_BASE + 0x8928 + 0x40 * (port)) +#define RNP_RXTRANS_EXCEPT_BYTES(port) (RNP_ETH_BASE + 0x892c + 0x40 * (port)) +#define RNP_RXTRANS_G1600_BYTES_PKTS(port) \ + (RNP_ETH_BASE + 0x8930 + 0x40 * (port)) +#define RNP_RX_RING_MAXRATE(n) (RNP_ETH_BASE + 0x8a00 + (0x4 * n)) +#define RNP_ETH_RX_PROGFULL_RTRN(n) (RNP_ETH_BASE + 0x8c00 + 0x04 * (n)) +#define RNP_ETH_CNT_PKT_EMAC_RX(n) (RNP_ETH_BASE + 0x8c10 + 0x04 * (n)) +#define RNP_ETH_CNT_PKT_PECL_RX(n) (RNP_ETH_BASE + 0x8c20 + 0x04 * (n)) +#define RNP_ETH_STATUS_RX_FLOWCTRL(n) (RNP_ETH_BASE + 0x8c30 + 0x04 * (n)) +#define RNP_ETH_DMAC_FCTRL (RNP_ETH_BASE + 0x9110) +#define RNP_ETH_DMAC_MCSTCTRL (RNP_ETH_BASE + 0x9114) +#define RNP_MCSTCTRL_MULTICASE_TBL_EN (1 << 2) +#define RNP_MCSTCTRL_UNICASE_TBL_EN (1 << 3) +#define RNP_MCSTCTRL_DMAC_47 0x00 +#define RNP_MCSTCTRL_DMAC_46 0x01 +#define RNP_MCSTCTRL_DMAC_45 0x02 +#define RNP_MCSTCTRL_DMAC_43 0x03 +#define RNP_ETH_VLAN_FILTER_ENABLE (RNP_ETH_BASE + 0x9118) +#define RNP_ETH_INPORT_POLICY_VAL (RNP_ETH_BASE + 0x91d0) +#define RNP_ETH_INPORT_POLICY_REG(n) (RNP_ETH_BASE + 0x91e0 + 0x04 * (n)) +#define ETH_LAYER2_NUM (16) +#define RNP_ETH_LAYER2_ETQF(n) (RNP_ETH_BASE + 0x9200 + 0x04 * (n)) +#define RNP_ETH_LAYER2_ETQS(n) (RNP_ETH_BASE + 0x9240 + 0x04 * (n)) +#define RNP_ETH_LAYER2_ETQS_DEFAULT (RNP_ETH_BASE + 0x9280) +#define RNP_ETH_ETQF_DEFAULT (RNP_ETH_BASE + 0x9284) +#define RNP_ETH_SYNQF (RNP_ETH_BASE + 0x9290) +#define RNP_ETH_SYNQF_PRIORITY (RNP_ETH_BASE + 0x9294) +/* + * [3:0]: + * 4'b0000:RSS disable + * 4'b0001:RSS only + * 4'b0100:DCB and RSS--8*16 + * 4'b1010:POOLS and RSS--32*4 + * [3] :virtual enable + * [16]:ipv4_hash_tcp_enable + * [17]:ipv4_hash_enable + * [20]:ipv6_hash_enable + * [21]:ipv6_hash_tcp_enable + * [22]:ipv4_hash_udp_enable + * [23]:ipv6_hash_udp_enable + * [24]:ipv4_hash_sctp_enable + * [25]:ipv6_hash_sctp_enable + */ +#define RNP_ETH_RSS_CONTROL (RNP_ETH_BASE + 0x92a0) +#define RNP_MRQC_IOV_EN (RNP_ETH_BASE + 0x92a0) +#define RNP_IOV_ENABLED (1 << 3) +#define RNP_ETH_RSS_KEY (RNP_ETH_BASE + 0x92d0) +#define RNP_ETH_RAR_RL(n) (RNP_ETH_BASE + 0xa000 + 0x04 * n) +#define RNP_ETH_RAR_RH(n) (RNP_ETH_BASE + 0xa400 + 0x04 * n) +#define RNP_ETH_UTA(n) (RNP_ETH_BASE + 0xa800 + 0x04 * n) +#define RNP_ETH_MULTICAST_HASH_TABLE(n) (RNP_ETH_BASE + 0xac00 + 0x04 * n) +#define RNP_MTA(n) RNP_ETH_MULTICAST_HASH_TABLE(n) +#define RNP_ETH_VLAN_FILTER_TABLE(n) (RNP_ETH_BASE + 0xb000 + 0x04 * (n)) +#define RNP_VFTA RNP_ETH_VLAN_FILTER_TABLE +#define RNP_FCTRL_MULTICASE_BYPASS (1 << 8) +#define RNP_FCTRL_UNICASE_BYPASS (1 << 9) +#define RNP_FCTRL_BROADCAST_BYPASS (1 << 10) +#define RNP_ETH_ETYPE_TABLE(n) (RNP_ETH_BASE + 0xb300 + 0x04 * (n)) +#define RNP_VM_DMAC_MPSAR_RING(entry) \ + (RNP_ETH_BASE + 0xb400 + (4 * (entry))) +#define RNP_VLVF(idx) (RNP_ETH_BASE + 0xb600 + 4 * (idx)) +#define RNP_VLVFB(idx) (RNP_ETH_BASE + 0xb700 + 4 * (idx)) +#define RNP_VM_TUNNEL_PFVLVF_L(n) (RNP_ETH_BASE + 0xb800 + 0x04 * (n)) +#define RNP_VM_TUNNEL_PFVLVF_H(n) (RNP_ETH_BASE + 0xb900 + 0x04 * (n)) +/* 5 tuple */ +#define ETH_TUPLE5_NUM 128 +#define RNP_ETH_TUPLE5_SAQF(n) (RNP_ETH_BASE + 0xc000 + 0x04 * (n)) +#define RNP_ETH_TUPLE5_DAQF(n) (RNP_ETH_BASE + 0xc400 + 0x04 * (n)) +#define RNP_ETH_TUPLE5_SDPQF(n) (RNP_ETH_BASE + 0xc800 + 0x04 * (n)) +#define RNP_ETH_TUPLE5_FTQF(n) (RNP_ETH_BASE + 0xcc00 + 0x04 * (n)) +#define RNP_ETH_TUPLE5_POLICY(n) (RNP_ETH_BASE + 0xd000 + 0x04 * (n)) +#define RNP_ETH_RSS_INDIR_TBL(p, n) \ + (RNP_ETH_BASE + 0xe000 + 0x04 * (n) + 0x200 * (p)) +#define RNP_ETH_TC_IPH_OFFSET_TABLE(n) (RNP_ETH_BASE + 0xe800 + 0x04 * (n)) +#define RNP_ETH_TC_VLAN_OFFSET_TABLE(n) (RNP_ETH_BASE + 0xe820 + 0x04 * (n)) +#define RNP_ETH_TC_PORT_OFFSET_TABLE(n) (RNP_ETH_BASE + 0xe840 + 0x04 * (n)) +#define RNP_REDIR_RING_MASK (RNP_ETH_BASE + 0xe860) +#define RNP_ETH_RSS_MODE (0x6fe00) +#define RNP_ETH_RSS_INDIR_TBL_UV3P(n) (0x6ff00 + 0x04 * (n)) +/* ================================================================== */ + +/* ==================== RNP-REG Global Registers ==================== */ +#define RNP_COMM_REG0 0x30000 +#define RNP_TOP_NIC_VERSION (RNP_COMM_REG0 + 0x0000) +#define RNP_TOP_NIC_CONFIG (RNP_COMM_REG0 + 0x0004) +#define RNP_TOP_NIC_STAT (RNP_COMM_REG0 + 0x0008) +#define RNP_TOP_NIC_DUMMY (RNP_COMM_REG0 + 0x000c) +#define RNP_TOP_NIC_REST_N (RNP_COMM_REG0 + 0x0010) +#define NIC_RESET 0 +#define RNP_TOP_DMA_MEM_SLP (RNP_COMM_REG0 + 0x4004) +#define RNP_TOP_DMA_MEM_SD (RNP_COMM_REG0 + 0x4008) +#define RNP_TOP_ETH_TIMESTAMP_SEL (RNP_COMM_REG0 + 0x8010) +#define RNP_TOP_ETH_MAC_CLK_SEL (RNP_COMM_REG0 + 0x8014) +#define RNP_TOP_ETH_INF_ETH_STATUS (RNP_COMM_REG0 + 0x8018) +#define RNP_TOP_ETH_BUG_40G_PATCH (RNP_COMM_REG0 + 0x801c) +#define RNP_TOP_ETH_PWR_PORT_NUM (4) +#define RNP_TOP_ETH_PWR_CLAMP_CTRL_PORT(n) (RNP_COMM_REG0 + 0x8020 + 0xc * (n)) +#define RNP_TOP_ETH_PWR_ISOLATE_PORT(n) (RNP_COMM_REG0 + 0x8024 + 0xc * (n)) +#define RNP_TOP_ETH_PWR_DOWN_PORT(n) (RNP_COMM_REG0 + 0x8028 + 0xc * (n)) +#define RNP_TOP_ETH_TCAM_CONFIG_ENABLE (RNP_COMM_REG0 + 0x8050) +#define RNP_TOP_ETH_SLIP (RNP_COMM_REG0 + 0x8060) +#define RNP_TOP_ETH_SHUT_DOWN (RNP_COMM_REG0 + 0x8064) +#define RNP_TOP_ETH_OVS_SLIP (RNP_COMM_REG0 + 0x8068) +#define RNP_TOP_ETH_OVS_SHUT_DOWN (RNP_COMM_REG0 + 0x806c) +#define RNP_FC_PORT_ENABLE (RNP_COMM_REG0 + 0x9004) +#define RNP_FC_PORT_PRIO_MAP(n) (RNP_COMM_REG0 + 0x9008 + (0x04 * n)) +#define RNP_FC_EN_CONF_AVAILABLE (RNP_COMM_REG0 + 0x9018) +#define RNP_FC_UNCTAGS_MAP_OFFSET (16) +#define RNP_TOP_MAC_OUI (RNP_COMM_REG0 + 0xc004) +#define RNP_TOP_MAC_SN (RNP_COMM_REG0 + 0xc008) +/* ================================================================== */ + +/* ==================== RNP-SERDES Global Registers ================= */ + +#define RNP_SERDES (0x40000) +#define RNP_PCS_OFFSET (0x1000) + +#define RNP_PCS_BASE(i) (RNP_SERDES + RNP_PCS_OFFSET * i) +#define RNP_PCS_1G_OR_10G BIT(13) +#define RNP_PCS_SPPEED_MASK (0x1c) +#define RNP_PCS_SPPEED_10G (0x0) +#define RNP_PCS_SPPEED_40G (0xc) +#define RNP_PCS_LINK_SPEED (0x30000) +#define RNP_PCS_LINKUP BIT(2) +#define RNP_PCS_LINK_STATUS (0x30001) + +/* ================================================================== */ + +/* ==================== RNP-MAC Global Registers ==================== */ +#define RNP10_MAC_BASE (0x60000) +#define RNP_XLMAC (0x60000) +#define RNP10_MAC_TX_CFG (0x0000) +#define RNP10_MAC_RX_CFG (0x0004) +#define RNP_IPC_MASK_XLGMAC BIT(9) +#define RNP_RX_ALL BIT(31) +#define RNP_RX_ALL_MUL BIT(4) +#define RNP10_MAC_PKT_FLT (0x0008) +#define RNP10_MAC_LPI_CTRL (0x00d0) +#define RNP10_MAC_Q0_TX_FLOW_CTRL(i) (0x0070 + 0x04 * (i)) +#define RNP10_MAC_RX_FLOW_CTRL (0x0090) +#define RNP10_TX_FLOW_ENABLE_MASK (0x2) +#define RNP10_RX_FLOW_ENABLE_MASK (0x1) +#define RNP10_MAC_TX_VLAN_TAG (0x0050) +#define RNP10_MAC_TX_VLAN_MODE (0x0060) +#define RNP10_MAC_INNER_VLAN_INCL (0x0064) +#define RNP10_MAC_UNICAST_LOW(i) (0x304 + i * 0x08) +#define RNP10_MAC_UNICAST_HIGH(i) (0x300 + i * 0x08) +#define RNP_MODE_NO_SA_INSER (0x0) +#define RNP_SARC_OFFSET (28) +#define RNP_TWOKPE_MASK BIT(27) +#define RNP_SFTERR_MASK BIT(26) +#define RNP_CST_MASK BIT(25) +#define RNP_TC_MASK BIT(24) +#define RNP_WD_MASK BIT(23) +#define RNP_JD_MASK BIT(22) +#define RNP_BE_MASK BIT(21) +#define RNP_JE_MASK BIT(20) +#define RNP_IFG_96 (0x00) +#define RNP_IFG_OFFSET (17) +#define RNP_DCRS_MASK BIT(16) +#define RNP_PS_MASK BIT(15) +#define RNP_FES_MASK BIT(14) +#define RNP_DO_MASK BIT(13) +#define RNP_LM_MASK BIT(12) +#define RNP_DM_MASK BIT(11) +#define RNP_IPC_MASK BIT(10) +#define RNP_DR_MASK BIT(9) +#define RNP_LUD_MASK BIT(8) +#define RNP_ACS_MASK BIT(7) +#define RNP_BL_MODE (0x00) +#define RNP_BL_OFFSET (5) +#define RNP_DC_MASK BIT(4) +#define RNP_TE_MASK BIT(3) +#define RNP_RE_MASK BIT(2) +#define RNP_PRELEN_MODE (0) +#define GMAC_CONTROL 0x00000000 /* Configuration */ +#define GMAC_FRAME_FILTER 0x00000004 /* Frame Filter */ +#define GMAC_HASH_HIGH 0x00000008 /* Multicast Hash Table High */ +#define GMAC_HASH_LOW 0x0000000c /* Multicast Hash Table Low */ +#define GMAC_MII_ADDR 0x00000010 /* MII Address */ +#define GMAC_MII_DATA 0x00000014 /* MII Data */ +#define GMAC_FLOW_CTRL 0x00000018 /* Flow Control */ +#define GMAC_PMT 0x0000002c + +enum power_event { + pointer_reset = 0x80000000, + global_unicast = 0x00000200, + wake_up_rx_frame = 0x00000040, + magic_frame = 0x00000020, + wake_up_frame_en = 0x00000004, + magic_pkt_en = 0x00000002, + power_down = 0x00000001, +}; + +#define GMAC_VTHM_MASK BIT(19) +#define GMAC_ESVL_MASK BIT(18) +#define GMAC_VTIM_MASK BIT(17) +#define GMAC_ETV_MASK BIT(16) +#define GMAC_VLAN_TAG_CTRL 0x0000001c +#define GMAC_CONTROL_DCRS 0x00010000 /* Disable carrier sense */ +#define GMAC_CONTROL_PS 0x00008000 /* Port Select 0:GMI 1:MII */ +#define GMAC_CONTROL_FES 0x00004000 /* Speed 0:10 1:100 */ +#define GMAC_CONTROL_DO 0x00002000 /* Disable Rx Own */ +#define GMAC_CONTROL_LM 0x00001000 /* Loop-back mode */ +#define GMAC_CONTROL_DM 0x00000800 /* Duplex Mode */ +#define GMAC_CONTROL_IPC 0x00000400 /* Checksum Offload */ +#define GMAC_CONTROL_DR 0x00000200 /* Disable Retry */ +#define GMAC_CONTROL_LUD 0x00000100 /* Link up/down */ +#define GMAC_CONTROL_ACS 0x00000080 /* Auto Pad/FCS Stripping */ +#define GMAC_CONTROL_DC 0x00000010 /* Deferral Check */ +#define GMAC_CONTROL_TE 0x00000008 /* Transmitter Enable */ +#define GMAC_CONTROL_RE 0x00000004 /* Receiver Enable */ +/* GMAC Frame Filter defines */ +#define GMAC_FRAME_FILTER_PR 0x00000001 /* Promiscuous Mode */ +#define GMAC_FRAME_FILTER_HUC 0x00000002 /* Hash Unicast */ +#define GMAC_FRAME_FILTER_HMC 0x00000004 /* Hash Multicast */ +#define GMAC_FRAME_FILTER_DAIF 0x00000008 /* DA Inverse Filtering */ +#define GMAC_FRAME_FILTER_PM 0x00000010 /* Pass all multicast */ +#define GMAC_FRAME_FILTER_DBF 0x00000020 /* Disable Broadcast frames */ +#define GMAC_FRAME_FILTER_PCF 0x00000080 /* Pass Control frames */ +#define GMAC_FRAME_FILTER_SAIF 0x00000100 /* Inverse Filtering */ +#define GMAC_FRAME_FILTER_SAF 0x00000200 /* Source Address Filter */ +#define GMAC_FRAME_FILTER_HPF 0x00000400 /* Hash or perfect Filter */ +#define GMAC_FRAME_FILTER_VLAN 0x00010000 /* vlan filter open */ +#define GMAC_FRAME_FILTER_RA 0x80000000 /* Receive all mode */ +/* GMII ADDR defines */ +#define GMAC_MII_ADDR_WRITE 0x00000002 /* MII Write */ +#define GMAC_MII_ADDR_BUSY 0x00000001 /* MII Busy */ +/* GMAC FLOW CTRL defines */ +#define GMAC_FLOW_CTRL_PT_MASK 0xffff0000 /* Pause Time Mask */ +#define GMAC_FLOW_CTRL_PT_SHIFT 16 +#define GMAC_FLOW_CTRL_UP 0x00000008 /* Unicast pause frame enable */ +#define GMAC_FLOW_CTRL_RFE 0x00000004 /* Rx Flow Control Enable */ +#define GMAC_FLOW_CTRL_TFE 0x00000002 /* Tx Flow Control Enable */ +#define GMAC_FLOW_CTRL_FCB_BPA 0x00000001 /* Flow Control Busy ... */ +#define GMAC_MANAGEMENT_RX_UNDERSIZE (0x01a4) +#define RNP_MAC_TX_CFG (RNP_XLMAC + 0x0000) +#define RNP_MAC_RX_CFG (RNP_XLMAC + 0x0004) +#define RNP_MAC_PKT_FLT (RNP_XLMAC + 0x0008) +#define RNP_MAC_LPI_CTRL (RNP_XLMAC + 0x00d0) +#define RNP_MAC_TX_VLAN_TAG (RNP_XLMAC + 0x0050) +#define RNP_MAC_TX_VLAN_MODE (RNP_XLMAC + 0x0060) +#define RNP_MAC_INNER_VLAN_INCL (RNP_XLMAC + 0x0064) +#define RNP_MAC_Q0_TX_FLOW_CTRL(i) (RNP_XLMAC + 0x0070 + 0x04 * (i)) +#define RNP_MAC_RX_FLOW_CTRL (RNP_XLMAC + 0x0090) +#define RNP_MAC_HW_FEATURE (RNP_XLMAC + 0x0120) +/*1588 */ +#define RNP_MAC_TS_CTRL (RNP_XLMAC + 0X0d00) +#define RNP_MAC_SUB_SECOND_INCREMENT (RNP_XLMAC + 0x0d04) +#define RNP_MAC_SYS_TIME_SEC_CFG (RNP_XLMAC + 0x0d08) +#define RNP_MAC_SYS_TIME_NANOSEC_CFG (RNP_XLMAC + 0x0d0c) +#define RNP_MAC_SYS_TIME_SEC_UPDATE (RNP_XLMAC + 0x0d10) +#define RNP_MAC_SYS_TIME_NANOSEC_UPDATE (RNP_XLMAC + 0x0d14) +#define RNP_MAC_TS_ADDEND (RNP_XLMAC + 0x0d18) +#define RNP_MAC_TS_STATS (RNP_XLMAC + 0x0d20) +#define RNP_MAC_INTERRUPT_ENABLE (RNP_XLMAC + 0x00b4) +#define RNP_MAC_STATS_BROADCAST_LOW (RNP_XLMAC + 0x0918) +#define RNP_MAC_STATS_BROADCAST_HIGH (RNP_XLMAC + 0x091c) +#define RNP_MAC_STATS_MULTICAST_LOW (RNP_XLMAC + 0x0920) +#define RNP_MAC_STATS_MULTICAST_HIGH (RNP_XLMAC + 0x0924) +#define RNP_TX_FLOW_ENABLE_MASK (0x2) +#define RNP_RX_FLOW_ENABLE_MASK (0x1) +/* ================================================================== */ + +/* ==================== RNP-MSIX Global Registers ==================== */ +//==== Ring-MSIX Registers (MSI-X_module_design.docs) === +#define RING_VECTOR(n) (0x04 * (n)) + +/* ================================================================== */ + +/* ==================== RNP-SWITCH Global Registers ================= */ +#define RNP_SWITCH_BASE 0xB0000 + +#define RNP_SWITCH_RULE_INGS(port, n) \ + (RNP_SWITCH_BASE + 0x24 * (port) + 0x1000 + 0x04 * (n)) +#define RNP_SWITCH_RULE_INGS_RPU_NP(port) \ + (RNP_SWITCH_BASE + 0x24 * (port) + 0x1014) +#define RNP_SWITCH_RULE_INGS_RPU_SWITCH(port) \ + (RNP_SWITCH_BASE + 0x24 * (port) + 0x1018) +#define RNP_SWITCH_RULE_INGS_SEC(port) \ + (RNP_SWITCH_BASE + 0x24 * (port) + 0x101c) +#define RNP_SWITCH_RULE_INGS_EXFPGA(port) \ + (RNP_SWITCH_BASE + 0x24 * (port) + 0x1020) +#define RNP_SWITCH_CNT_EGRESS_PKT(port) (RNP_SWITCH_BASE + 0x10db + 0x04 * (n)) +#define RNP_SWITCH_CNT_INGRESS_PKT(port) (RNP_SWITCH_BASE + 0x10f0 + 0x04 * (n)) +#define RNP_SWITCH_RPUUP_DATA_PROG_FULL_THRESH (RNP_SWITCH_BASE + 0x1108) +#define RNP_SWITCH_RPUDN_DATA_PROG_FULL_THRESH (RNP_SWITCH_BASE + 0x110c) +#define RNP_SWITCH_MAC0_DATA_PROG_FULL_THRESH (RNP_SWITCH_BASE + 0x1110) +#define RNP_SWITCH_MAC1_DATA_PROG_FULL_THRESH (RNP_SWITCH_BASE + 0x1114) +#define RNP_SWITCH_DMA0_DATA_PROG_FULL_THRESH (RNP_SWITCH_BASE + 0x1118) +#define RNP_SWITCH_DMA1_DATA_PROG_FULL_THRESH (RNP_SWITCH_BASE + 0x111c) +#define RNP_SWITCH_REG1_INGRESS_STATUS(port) \ + (RNP_SWITCH_BASE + 0x1120 + 0x08 * (port)) +#define RNP_SWITCH_REG2_INGRESS_STATUS(port) \ + (RNP_SWITCH_BASE + 0x1124 + 0x08 * (port)) +#define RNP_SWITCH_REG_STATUS_ROBIN(port) \ + (RNP_SWITCH_BASE + 0x1150 + 0x04 * (port)) +#define RNP_SWITCH_REG_EGRESS_STATUS(port) \ + (RNP_SWITCH_BASE + 0x1168 + 0x04 * (port)) +#define RNP_SWITCH_INFO_FIFO_DMA_TX(n) (RNP_SWITCH_BASE + 0x1198 + 0x08 * (n)) +#define RNP_SWITCH_INFO_FIFO_DMA_RX(n) (RNP_SWITCH_BASE + 0x119c + 0x08 * (n)) +#define RNP_SWITCH_INFO_FIFO_MAC_TX(n) (RNP_SWITCH_BASE + 0x11a8 + 0x08 * (n)) +#define RNP_SWITCH_INFO_FIFO_MAC_RX(n) (RNP_SWITCH_BASE + 0x11ac + 0x08 * (n)) +#define RNP_SWITCH_INFO_FIFO_RPUUP_RX(n) (RNP_SWITCH_BASE + 0x11bc + 0x08 * (n)) +#define RNP_SWITCH_INFO_FIFO_RPUDN_RX(n) (RNP_SWITCH_BASE + 0x11c0 + 0x08 * (n)) +#define RNP_SWITCH_EN_SOFT_RESET (RNP_SWITCH_BASE + 0xf000) +#define RNP_SWITCH_SOFT_RESET (RNP_SWITCH_BASE + 0xf004) +#define RNP_SWITCH_CLR_INGS_ERR (RNP_SWITCH_BASE + 0xf008) +#define RNP_SWITCH_ERR_CODE_INGS(port) \ + (RNP_SWITCH_BASE + 0xf010 + 0x04 * (port)) +#define RNP_SWITCH_MEM_SD (RNP_SWITCH_BASE + 0xf028) +#define RNP_SWITCH_MEM_SLP (RNP_SWITCH_BASE + 0xf02c) +#define RNP_SWITCH_EN_INVALID_DPORT_DROP_O (RNP_SWITCH_BASE + 0xf030) + +/* ================================================================== */ + +/* ==================== RNP-TCAM Global Registers ==================== */ +#define RNP_TCAM_BASE (0xc0000) +#define RNP_TCAM_SDPQF(n) \ + (RNP_TCAM_BASE + 0x00 + 0x40 * (n / 2) + 0x10 * (n % 2)) +#define RNP_TCAM_DAQF(n) \ + (RNP_TCAM_BASE + 0x04 + 0x40 * (n / 2) + 0x10 * (n % 2)) +#define RNP_TCAM_SAQF(n) \ + (RNP_TCAM_BASE + 0x08 + 0x40 * (n / 2) + 0x10 * (n % 2)) +#define RNP_TCAM_APQF(n) \ + (RNP_TCAM_BASE + 0x0c + 0x40 * (n / 2) + 0x10 * (n % 2)) +#define RNP_TCAM_SDPQF_MASK(n) \ + (RNP_TCAM_BASE + 0x20 + 0x40 * (n / 2) + 0x10 * (n % 2)) +#define RNP_TCAM_DAQF_MASK(n) \ + (RNP_TCAM_BASE + 0x24 + 0x40 * (n / 2) + 0x10 * (n % 2)) +#define RNP_TCAM_SAQF_MASK(n) \ + (RNP_TCAM_BASE + 0x28 + 0x40 * (n / 2) + 0x10 * (n % 2)) +#define RNP_TCAM_APQF_MASK(n) \ + (RNP_TCAM_BASE + 0x2c + 0x40 * (n / 2) + 0x10 * (n % 2)) +#define RNP_TCAM_MODE (RNP_TCAM_BASE + 0x20000) +#define RNP_TCAM_CACHE_ENABLE (RNP_TCAM_BASE + 0x20004) +#define RNP_TCAM_CACHE_ADDR_CLR (RNP_TCAM_BASE + 0x20008) +#define RNP_TCAM_CACHE_REQ_CLR (RNP_TCAM_BASE + 0x2000c) + +/* ================================================================== */ + +/* ==================== OTHER Global Registers ==================== */ +/* ===== PF-VF Functions ==== */ +#define VF_NUM_REG 0xa3000 +/* 8bit: 7:vf_actiove 6:fun0/fun1 [5:0]:vf_num */ +#define VF_NUM(vfnum, fun) ((1 << 7) | (((fun) & 0x1) << 6) | ((vfnum) & 0x3f)) +#define PF_BIT 6 +#define PF_NUM(fun) (((fun) & 0x1) << 6) +#define IS_VF(vfnum) (((vfnum) & (1 << 7)) ? 1 : 0) + +/* PFC Flow Control*/ +enum NIC_MODE { + MODE_NIC_MODE_2PORT_40G = 0, + MODE_NIC_MODE_2PORT_10G = 1, + MODE_NIC_MODE_4PORT_10G = 2, + MODE_NIC_MODE_8PORT_10G = 3, +}; + +/* ================================================================== */ + +#endif /* RNP_REGS_H */ diff --git a/drivers/net/ethernet/mucse/rnp/rnp_sriov.c b/drivers/net/ethernet/mucse/rnp/rnp_sriov.c new file mode 100644 index 000000000000..5a7723b2b9b8 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_sriov.c @@ -0,0 +1,1731 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "rnp.h" +#include "rnp_type.h" +#include "rnp_sriov.h" + +int rnp_msg_post_status_signle(struct rnp_adapter *adapter, + enum PF_STATUS status, int vf); +#ifdef CONFIG_PCI_IOV +static int __rnp_enable_sriov(struct rnp_adapter *adapter) +{ + struct rnp_hw *hw = &adapter->hw; + int num_vf_macvlans, i, num_vebvlans; + struct vf_macvlans *mv_list; + struct vf_vebvlans *vv_list = NULL; + + dbg("%s:%d flags:0x%x\n", __func__, __LINE__, adapter->flags); + /* sriov and dcb cannot open together */ + /* reset numtc */ + adapter->flags &= (~RNP_FLAG_DCB_ENABLED); + netdev_reset_tc(adapter->netdev); + + e_info(probe, "SR-IOV enabled with %d VFs\n", adapter->num_vfs); + + dbg("%s:%d flags:0x%x\n", __func__, __LINE__, adapter->flags); + + /* Enable VMDq flag so device will be set in VM mode */ + adapter->flags |= RNP_FLAG_VMDQ_ENABLED; + if (!adapter->ring_feature[RING_F_VMDQ].limit) + adapter->ring_feature[RING_F_VMDQ].limit = 1; + if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) + adapter->ring_feature[RING_F_VMDQ].offset = 0; + else + adapter->ring_feature[RING_F_VMDQ].offset = hw->max_vfs - 1; + + num_vf_macvlans = hw->num_rar_entries - + (hw->max_pf_macvlans + 1 + adapter->num_vfs); + num_vebvlans = hw->num_vebvlan_entries; + + adapter->mv_list = mv_list = kcalloc( + num_vf_macvlans, sizeof(struct vf_macvlans), GFP_KERNEL); + if (num_vebvlans) + hw->vv_list = vv_list = kcalloc( + num_vebvlans, sizeof(struct vf_vebvlans), GFP_KERNEL); + + if (mv_list) { + /* Initialize list of VF macvlans */ + INIT_LIST_HEAD(&adapter->vf_mvs.l); + for (i = 0; i < num_vf_macvlans; i++) { + mv_list->vf = -1; + mv_list->free = true; + mv_list->rar_entry = hw->mac.num_rar_entries - + (i + adapter->num_vfs + 1); + list_add(&mv_list->l, &adapter->vf_mvs.l); + mv_list++; + } + } + + if (vv_list) { + /* Initialize list of VF macvlans */ + INIT_LIST_HEAD(&hw->vf_vas.l); + for (i = 0; i < num_vebvlans; i++) { + vv_list->vid = -1; + vv_list->vid = 0; + vv_list->free = true; + vv_list->veb_entry = i; + list_add(&vv_list->l, &hw->vf_vas.l); + vv_list++; + } + } + + adapter->flags2 |= RNP_FLAG2_BRIDGE_MODE_VEB; + dbg("%s:%d flags:0x%x\n", __func__, __LINE__, adapter->flags); + + hw->ops.set_sriov_status(hw, true); + adapter->vfinfo = kcalloc(adapter->num_vfs, + sizeof(struct vf_data_storage), GFP_KERNEL); + if (adapter->vfinfo) { + /* limit trafffic classes based on VFs enabled */ + /* TODO analyze VF need support pfc or traffic classes */ + /* We do not support RSS w/ SR-IOV */ + adapter->ring_feature[RING_F_RSS].limit = hw->sriov_ring_limit; + + /* Disable RSC when in SR-IOV mode */ + adapter->flags2 &= + ~(RNP_FLAG2_RSC_CAPABLE | RNP_FLAG2_RSC_ENABLED); + + adapter->flags |= RNP_FLAG_SRIOV_ENABLED; + + /* enable spoof checking for all VFs */ + return 0; + } + + /* open flags at last to avoid null call adapter->vfinfo */ + dbg("%s:%d flags:0x%x\n", __func__, __LINE__, adapter->flags); + return -ENOMEM; +} + +void rnp_enable_sriov_true(struct rnp_adapter *adapter) +{ + int err = 0; + + if (!(adapter->flags & RNP_FLAG_SRIOV_ENABLED)) + return; + + adapter->flags |= RNP_FLAG_SRIOV_INIT_DONE; + + err = pci_enable_sriov(adapter->pdev, adapter->num_vfs); + if (err) { + printk("Failed to enable PCI sriov: %d num %d\n", err, + adapter->num_vfs); + printk("We cannot handle this error\n"); + } + + adapter->flags |= RNP_FLAG_VF_INIT_DONE; +} + +/* Note this function is called when the user wants to enable SR-IOV + * VFs using the now deprecated module parameter + * never used + */ +void rnp_enable_sriov(struct rnp_adapter *adapter) +{ + int pre_existing_vfs = 0; + struct rnp_hw *hw = &adapter->hw; + + pre_existing_vfs = pci_num_vf(adapter->pdev); + if (!pre_existing_vfs && !adapter->num_vfs) + return; + + dbg("%s:%d flags:0x%x\n", __func__, __LINE__, adapter->flags); + if (!pre_existing_vfs) + dev_warn( + &adapter->pdev->dev, + "Enabling SR-IOV VFs using the module parameter is deprecated " + "- please use the pci sysfs interface.\n"); + + dbg("%s:%d flags:0x%x\n", __func__, __LINE__, adapter->flags); + /* If there are pre-existing VFs then we have to force + * use of that many - over ride any module parameter value. + * This may result from the user unloading the PF driver + * while VFs were assigned to guest VMs or because the VFs + * have been created via the new PCI SR-IOV sysfs interface. + */ + if (pre_existing_vfs) { + adapter->num_vfs = pre_existing_vfs; + dev_warn( + &adapter->pdev->dev, + "Virtual Functions already enabled for this device - Please " + "reload all VF drivers to avoid spoofed packet errors\n"); + } else { + int i; + /* + * The n10 supports up to 64 VFs per physical function + * but this implementation limits allocation to 126 so that + * basic networking resources are still available to the + * physical function. If the user requests greater than + * 64 VFs then it is an error - reset to default of zero. + */ + adapter->num_vfs = + min_t(unsigned int, adapter->num_vfs, hw->max_vfs - 1); + + /* should first alloc memory for sriov */ + if (__rnp_enable_sriov(adapter)) { + e_err(probe, "Failed to alloc memory for sriov\n"); + adapter->num_vfs = 0; + } + + for (i = 0; i < adapter->num_vfs; i++) + rnp_vf_configuration(adapter->pdev, (i | 0x10000000)); + + dbg("%s:%d flags:0x%x\n", __func__, __LINE__, adapter->flags); + } +} + +static bool rnp_vfs_are_assigned(struct rnp_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + struct pci_dev *vfdev; + unsigned int dev_id = RNP_DEV_ID_N10_PF0_VF_N; + unsigned int vendor_id = PCI_VENDOR_ID_MUCSE; + + switch (adapter->pdev->device) { + case RNP_DEV_ID_N10_PF0: + case RNP_DEV_ID_N10_PF1: + vendor_id = 0x1dab; + if (rnp_is_pf1(&adapter->hw)) + dev_id = RNP_DEV_ID_N10_PF1_VF; + else + dev_id = RNP_DEV_ID_N10_PF0_VF; + break; + case PCI_DEVICE_ID_N10_PF0: + case PCI_DEVICE_ID_N10_PF1: + vendor_id = PCI_VENDOR_ID_MUCSE; + if (rnp_is_pf1(&adapter->hw)) + dev_id = RNP_DEV_ID_N10_PF1_VF_N; + else + dev_id = RNP_DEV_ID_N10_PF0_VF_N; + } + + /* loop through all the VFs to see if we own any that are assigned */ + vfdev = pci_get_device(vendor_id, dev_id, NULL); + while (vfdev) { + /* if we don't own it we don't care */ + if (vfdev->is_virtfn && vfdev->physfn == pdev) { + /* if it is assigned we cannot release it */ + if (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED) + return true; + } + + vfdev = pci_get_device(vendor_id, dev_id, vfdev); + } + + return false; +} +#endif /* #ifdef CONFIG_PCI_IOV */ + +int rnp_disable_sriov(struct rnp_adapter *adapter) +{ + struct rnp_hw *hw = &adapter->hw; + int rss; + int time = 0; + + if (!(adapter->flags & RNP_FLAG_SRIOV_ENABLED)) + return 0; + + adapter->num_vfs = 0; + adapter->flags &= ~RNP_FLAG_SRIOV_ENABLED; + adapter->flags &= ~RNP_FLAG_SRIOV_INIT_DONE; + adapter->flags &= ~RNP_FLAG_VF_INIT_DONE; + adapter->vlan_count = 0; + msleep(100); + + /* only do if not ncsi card */ + if (!hw->ncsi_en) + hw->ops.set_mac_rx(hw, false); + + hw->ops.set_sriov_status(hw, false); + + /* set num VFs to 0 to prevent access to vfinfo */ + while (test_and_set_bit(__RNP_USE_VFINFI, &adapter->state)) { + msleep(100); + time++; + + if (time > 100) { + printk("wait flags timeout\n"); + break; + } + } + if (time < 100) + clear_bit(__RNP_USE_VFINFI, &adapter->state); + + /* free VF control structures */ + kfree(adapter->vfinfo); + adapter->vfinfo = NULL; + + /* free macvlan list */ + if (hw->vv_list) { + kfree(hw->vv_list); + hw->vv_list = NULL; + } + + if (adapter->mv_list) { + kfree(adapter->mv_list); + adapter->mv_list = NULL; + } + + dbg("%s:%d flags:0x%x\n", __func__, __LINE__, adapter->flags); + /* if SR-IOV is already disabled then there is nothing to do */ + dbg("%s:%d flags:0x%x\n", __func__, __LINE__, adapter->flags); +#ifdef CONFIG_PCI_IOV + /* + * If our VFs are assigned we cannot shut down SR-IOV + * without causing issues, so just leave the hardware + * available but disabled + */ + if (rnp_vfs_are_assigned(adapter)) { + e_dev_warn( + "Unloading driver while VFs are assigned - VFs will not be " + "deallocated\n"); + return -EPERM; + } + /* disable iov and allow time for transactions to clear */ + pci_disable_sriov(adapter->pdev); +#endif + dbg("%s:%d flags:0x%x\n", __func__, __LINE__, adapter->flags); + + /* set default pool back to 0 */ + + /* Disable VMDq flag so device will be set in VM mode */ + if (adapter->ring_feature[RING_F_VMDQ].limit == 1) + adapter->flags &= ~RNP_FLAG_VMDQ_ENABLED; + adapter->ring_feature[RING_F_VMDQ].offset = 0; + + rss = min_t(int, adapter->max_ring_pair_counts, num_online_cpus()); + + rss = min_t(int, rss, + hw->mac.max_msix_vectors - adapter->num_other_vectors); + + adapter->ring_feature[RING_F_RSS].limit = rss; + + /* take a breather then clean up driver data */ + msleep(100); + + dbg("%s:%d flags:0x%x\n", __func__, __LINE__, adapter->flags); + return 0; +} + +static bool check_ari_mode(struct pci_dev *dev) +{ + struct pci_bus *bus = dev->bus; + + return bus->self && bus->self->ari_enabled; +} + +static int rnp_pci_sriov_enable(struct pci_dev *dev, int num_vfs) +{ +#ifdef CONFIG_PCI_IOV + struct rnp_adapter *adapter = pci_get_drvdata(dev); + struct rnp_hw *hw = &adapter->hw; + int err = 0; + int i; + int pre_existing_vfs = pci_num_vf(dev); + + if (pre_existing_vfs && pre_existing_vfs != num_vfs) + err = rnp_disable_sriov(adapter); + else if (pre_existing_vfs && pre_existing_vfs == num_vfs) + goto out; + + /* maybe bug, if add 1 vlan, then open sriov */ + if (hw->feature_flags & RNP_VEB_VLAN_MASK_EN) { + if (adapter->vlan_count > hw->max_vfs - 1) { + dev_err(&adapter->pdev->dev, + "vlans is too much, delete less than %d vlans\n", + hw->max_vfs - 1); + + err = -EOPNOTSUPP; + goto err_out; + } + + } else if (adapter->vlan_count > 1) { + dev_err(&adapter->pdev->dev, + "only 1 vlan in sriov mode, delete other vlans\n"); + dev_err(&adapter->pdev->dev, "please delete all vlans first\n"); + + err = -EOPNOTSUPP; + goto err_out; + } + + adapter->vlan_count = 0; + if (err) + goto err_out; + + /* While the SR-IOV capability structure reports total VFs to be + * 64 we limit the actual number that can be allocated to 63 so + * that some transmit/receive resources can be reserved to the + * PF. The PCI bus driver already checks for other values out of + * range. + */ + + if (check_ari_mode(dev)) { + int temp = hw->sriov_ring_limit; + + if (temp == 1) + temp = 2; + + + if (num_vfs > (128 / temp - 1)) { + err = -EPERM; + goto err_out; + } + } else { + if (num_vfs > hw->max_vfs_noari) { + err = -EPERM; + goto err_out; + } + } + + adapter->num_vfs = num_vfs; + err = __rnp_enable_sriov(adapter); + if (err) + goto err_out; + + for (i = 0; i < adapter->num_vfs; i++) + rnp_vf_configuration(dev, (i | 0x10000000)); + /* we should reinit pf first */ + dbg("flags:0x%x\n", adapter->flags); + if (hw->ops.clr_rar_all) + hw->ops.clr_rar_all(hw); + + rnp_sriov_reinit(adapter); + + adapter->flags |= RNP_FLAG_SRIOV_INIT_DONE; + err = pci_enable_sriov(dev, num_vfs); + if (err) { + e_dev_warn("Failed to enable PCI sriov: %d num %d\n", err, + num_vfs); + rnp_disable_sriov(adapter); + rnp_sriov_reinit(adapter); + goto err_out; + } + adapter->flags |= RNP_FLAG_VF_INIT_DONE; + +out: + return num_vfs; + +err_out: + return err; +#endif + return 0; +} + +static int rnp_pci_sriov_disable(struct pci_dev *dev) +{ + struct rnp_adapter *adapter = pci_get_drvdata(dev); + int err; + u32 current_flags = adapter->flags; + + err = rnp_disable_sriov(adapter); + + /* Only reinit if no error and state changed */ + if (!err && current_flags != adapter->flags) { + /* rnp_disable_sriov() doesn't clear VMDQ flag */ + adapter->flags &= ~RNP_FLAG_VMDQ_ENABLED; +#ifdef CONFIG_PCI_IOV + rnp_sriov_reinit(adapter); +#endif + } + + return err; +} + +static int rnp_set_vf_multicasts(struct rnp_adapter *adapter, u32 *msgbuf, + u32 vf) +{ + int entries = (msgbuf[0] & RNP_VT_MSGINFO_MASK) >> RNP_VT_MSGINFO_SHIFT; + u16 *hash_list = (u16 *)&msgbuf[1]; + struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; + struct rnp_hw *hw = &adapter->hw; + int i; + + /* only so many hash values supported */ + entries = min(entries, RNP_MAX_VF_MC_ENTRIES); + + /* + * salt away the number of multi cast addresses assigned + * to this VF for later use to restore when the PF multi cast + * list changes + */ + vfinfo->num_vf_mc_hashes = entries; + + /* + * VFs are limited to using the MTA hash table for their multicast + * addresses + */ + for (i = 0; i < entries; i++) + vfinfo->vf_mc_hashes[i] = hash_list[i]; + + for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) { + /* fixed mode */ + hw->ops.set_sriov_vf_mc(hw, vfinfo->vf_mc_hashes[i]); + } + + return 0; +} + +void rnp_restore_vf_macs(struct rnp_adapter *adapter) +{ + struct rnp_hw *hw = &adapter->hw; + int vf; + u8 *mac_addr; + int rar_entry; + int fix_vf_num = 0; + + for (vf = 0; vf < adapter->num_vfs; vf++) { + mac_addr = adapter->vfinfo[vf].vf_mac_addresses; + rar_entry = hw->mac.num_rar_entries - (vf + 1); + /* setup to the hw */ + if (hw->sriov_ring_limit > 2) { + if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) { + fix_vf_num = (vf + 1) * hw->sriov_ring_limit / 2; + } else { + fix_vf_num = (vf) * hw->sriov_ring_limit / 2; + } + hw->ops.set_rar_with_vf(hw, mac_addr, rar_entry, + fix_vf_num, true); + } else { + if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) + hw->ops.set_rar_with_vf(hw, mac_addr, rar_entry, vf + 1, + true); + else + hw->ops.set_rar_with_vf(hw, mac_addr, rar_entry, vf, + true); + } + } +} + +void rnp_restore_vf_macvlans(struct rnp_adapter *adapter) +{ + struct rnp_hw *hw = &adapter->hw; + struct list_head *pos; + struct vf_macvlans *entry; + int fix_vf_num = 0; + + hw_dbg(hw, "%s Staring..\n", __func__); + + list_for_each(pos, &adapter->vf_mvs.l) { + entry = list_entry(pos, struct vf_macvlans, l); + if (!entry->free) { + hw_dbg(hw, " vf:%d MACVLAN: RAR[%d] <= %pM\n", + entry->vf, entry->rar_entry, entry->vf_macvlan); + + if (hw->sriov_ring_limit > 2) { + if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) { + fix_vf_num = (entry->vf + 1) * hw->sriov_ring_limit / 2; + } else { + fix_vf_num = (entry->vf) * hw->sriov_ring_limit / 2; + } + hw->ops.set_rar_with_vf(hw, entry->vf_macvlan, entry->rar_entry, + fix_vf_num, true); + + + } else { + if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) { + hw->ops.set_rar_with_vf(hw, entry->vf_macvlan, + entry->rar_entry, + entry->vf + 1, true); + } else { + hw->ops.set_rar_with_vf(hw, entry->vf_macvlan, + entry->rar_entry, + entry->vf, true); + } + } + } + } + hw_dbg(hw, "%s Done\n", __func__); +} + +void rnp_restore_vf_multicasts(struct rnp_adapter *adapter) +{ + /* Restore any VF macvlans */ + rnp_restore_vf_macvlans(adapter); +} + +static int rnp_set_vf_vlan(struct rnp_adapter *adapter, int add, int vid, + u32 vf) +{ + struct rnp_hw *hw = &adapter->hw; + int true_handle = 1; + int i; + /* VLAN 0 is a special case, don't allow it to be removed */ + if (!vid && !add) + return 0; + + /* should check other vf */ + if ((adapter->flags & RNP_FLAG_SRIOV_ENABLED)) { + /* if other vf use this vlan, don't true remove */ + if (!add) { + /* check equal pf_vlan */ + if (vid == adapter->vf_vlan) + true_handle = 0; + if (!test_and_set_bit(__RNP_USE_VFINFI, + &adapter->state)) { + for (i = 0; i < adapter->num_vfs; i++) { + /* check if other vf_vlan still valid */ + if ((i != vf) && + (vid == adapter->vfinfo[i].vf_vlan)) + true_handle = 0; + /* check if other pf_vlan still valid */ + if ((i != vf) && + (vid == adapter->vfinfo[i].pf_vlan)) + true_handle = 0; + } + clear_bit(__RNP_USE_VFINFI, &adapter->state); + } + } + } + if (true_handle) + hw->ops.set_vf_vlan_filter(hw, vid, vf, (bool)add, false); + + return 0; +} + +static s32 rnp_set_vf_lpe(struct rnp_adapter *adapter, u32 *msgbuf, u32 vf) +{ + return 0; +} + +static inline void rnp_vf_reset_event(struct rnp_adapter *adapter, u32 vf) +{ + struct rnp_hw *hw = &adapter->hw; + int rar_entry = hw->mac.num_rar_entries - (vf + 1); + int i; + + /* reset multicast table array for vf */ + adapter->vfinfo[vf].num_vf_mc_hashes = 0; + + /* Flush and reset the mta with the new values */ + rnp_set_rx_mode(adapter->netdev); + + /* clear this rar_entry */ + hw->ops.clr_rar(hw, rar_entry); + + /* reset VF api back to unknown */ + adapter->vfinfo[vf].vf_api = 0; + for (i = 0; i < RNP_MAX_VF_MC_ENTRIES; i++) + adapter->vfinfo[vf].vf_mc_hashes[i] = 0; + adapter->vfinfo[vf].vf_vlan = 0; + adapter->vfinfo[vf].vlan_count = 0; +} + +static int rnp_set_vf_mac(struct rnp_adapter *adapter, int vf, + unsigned char *mac_addr) +{ + struct rnp_hw *hw = &adapter->hw; + int fix_vf_num = 0; + /* this rar_entry may be cofict with mac vlan with pf */ + int rar_entry = hw->mac.num_rar_entries - (vf + 1); + + memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, 6); + + /* setup to the hw */ + if (hw->sriov_ring_limit > 2) { + if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) { + fix_vf_num = (vf + 1) * hw->sriov_ring_limit / 2; + } else { + fix_vf_num = (vf) * hw->sriov_ring_limit / 2; + } + hw->ops.set_rar_with_vf(hw, mac_addr, rar_entry, + fix_vf_num, true); + + } else { + if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) + hw->ops.set_rar_with_vf(hw, mac_addr, rar_entry, vf + 1, true); + else + hw->ops.set_rar_with_vf(hw, mac_addr, rar_entry, vf, true); + } + + return 0; +} + +static int rnp_set_vf_macvlan(struct rnp_adapter *adapter, int vf, int index, + unsigned char *mac_addr) +{ + struct rnp_hw *hw = &adapter->hw; + struct list_head *pos; + struct vf_macvlans *entry; + int fix_vf_num = 0; + /* index = 0 , only earase */ + /* index = 1 , earase and then set */ + if (index <= 1) { + list_for_each(pos, &adapter->vf_mvs.l) { + entry = list_entry(pos, struct vf_macvlans, l); + if (entry->vf == vf) { + entry->vf = -1; + entry->free = true; + entry->is_macvlan = false; + hw->ops.clr_rar(hw, entry->rar_entry); + } + } + } + + /* + * If index was zero then we were asked to clear the uc list + * for the VF. We're done. + */ + if (!index) + return 0; + + entry = NULL; + + list_for_each(pos, &adapter->vf_mvs.l) { + entry = list_entry(pos, struct vf_macvlans, l); + if (entry->free) + break; + } + + /* + * If we traversed the entire list and didn't find a free entry + * then we're out of space on the RAR table. Also entry may + * be NULL because the original memory allocation for the list + * failed, which is not fatal but does mean we can't support + * VF requests for MACVLAN because we couldn't allocate + * memory for the list management required. + */ + if (!entry || !entry->free) + return -ENOSPC; + + entry->free = false; + entry->is_macvlan = true; + entry->vf = vf; + memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN); + if (hw->sriov_ring_limit > 2) { + if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) { + fix_vf_num = (entry->vf + 1) * hw->sriov_ring_limit / 2; + } else { + fix_vf_num = (entry->vf) * hw->sriov_ring_limit / 2; + } + hw->ops.set_rar_with_vf(hw, entry->vf_macvlan, entry->rar_entry, + fix_vf_num, true); + + } else { + + if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) { + hw->ops.set_rar_with_vf(hw, entry->vf_macvlan, entry->rar_entry, + entry->vf + 1, true); + } else { + hw->ops.set_rar_with_vf(hw, entry->vf_macvlan, entry->rar_entry, + entry->vf, true); + } + } + + return 0; +} + +int rnp_vf_configuration(struct pci_dev *pdev, unsigned int event_mask) +{ + unsigned char vf_mac_addr[6]; + struct rnp_adapter *adapter = pci_get_drvdata(pdev); + unsigned int vfn = (event_mask & 0x3f); + + bool enable = ((event_mask & 0x10000000U) != 0); + + if (enable) { + eth_zero_addr(vf_mac_addr); + memcpy(vf_mac_addr, adapter->hw.mac.perm_addr, 6); + vf_mac_addr[5] = vf_mac_addr[5] + (0x80 | vfn); + vf_mac_addr[4] = vf_mac_addr[4] + (pdev->devfn); + + memcpy(adapter->vfinfo[vfn].vf_mac_addresses, vf_mac_addr, 6); + } + + return 0; +} + +static int rnp_vf_reset_msg(struct rnp_adapter *adapter, u32 vf) +{ + struct rnp_hw *hw = &adapter->hw; + unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses; + u32 msgbuf[RNP_VF_PERMADDR_MSG_LEN]; + u8 *addr = (u8 *)(&msgbuf[1]); + + /* reset the filters for the device */ + rnp_vf_reset_event(adapter, vf); + + /* set vf mac address */ + if (!is_zero_ether_addr(vf_mac)) + rnp_set_vf_mac(adapter, vf, vf_mac); + + /* enable VF mailbox for further messages */ + adapter->vfinfo[vf].clear_to_send = true; + + /* Enable counting of spoofed packets in the SSVPC register */ + /* reply to reset with ack and vf mac address */ + msgbuf[0] = RNP_VF_RESET; + if (!is_zero_ether_addr(vf_mac)) { + msgbuf[0] |= RNP_VT_MSGTYPE_ACK; + memcpy(addr, vf_mac, ETH_ALEN); + } else { + msgbuf[0] |= RNP_VT_MSGTYPE_NACK; + dev_warn( + &adapter->pdev->dev, + "VF %d has no MAC address assigned, you may have to assign " + "one manually\n", + vf); + } + + /* + * Piggyback the multicast filter type so VF can compute the + * correct vectors + */ + msgbuf[RNP_VF_MC_TYPE_WORD] = 0; + /* setup link status , pause mode, ft padding mode */ + /* pause mode */ + msgbuf[RNP_VF_MC_TYPE_WORD] |= (0xff & hw->fc.current_mode) << 16; + if (adapter->priv_flags & RNP_PRIV_FLAG_FT_PADDING) + msgbuf[RNP_VF_MC_TYPE_WORD] |= (0x01 << 8); + else + msgbuf[RNP_VF_MC_TYPE_WORD] |= (0x00 << 8); + /* mc_type */ + msgbuf[RNP_VF_MC_TYPE_WORD] |= rd32(hw, RNP_ETH_DMAC_MCSTCTRL) & 0x03; + msgbuf[RNP_VF_DMA_VERSION_WORD] = rd32(hw, RNP_DMA_VERSION); + msgbuf[RNP_VF_VLAN_WORD] = adapter->vfinfo[vf].pf_vlan; + /* fixme tx fetch to be added here */ + msgbuf[RNP_VF_PHY_TYPE_WORD] = (hw->mac_type << 16) | hw->phy_type; + msgbuf[RNP_VF_FW_VERSION_WORD] = (hw->fw_version); + if (adapter->vfinfo[vf].link_state == rnp_link_state_auto) { + msgbuf[RNP_VF_LINK_STATUS_WORD] = + (adapter->link_up ? RNP_PF_LINK_UP : 0) | + adapter->link_speed; + } else if (adapter->vfinfo[vf].link_state == rnp_link_state_on) { + msgbuf[RNP_VF_LINK_STATUS_WORD] = RNP_PF_LINK_UP | + adapter->link_speed; + } else { + msgbuf[RNP_VF_LINK_STATUS_WORD] = 0; + } + + msgbuf[RNP_VF_AXI_MHZ] = hw->usecstocount; + /* we start from 0 */ + msgbuf[RNP_VF_FEATURE] = 0; + if (adapter->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER) + msgbuf[RNP_VF_FEATURE] |= PF_FEATRURE_VLAN_FILTER; + if (hw->ncsi_en) + msgbuf[RNP_VF_FEATURE] |= PF_NCSI_EN; + + /* now vf maybe has no irq handler if it is the first reset*/ + rnp_write_mbx(hw, msgbuf, RNP_VF_PERMADDR_MSG_LEN, vf); + + return 0; +} + +static int rnp_get_vf_mac_addr(struct rnp_adapter *adapter, u32 *msgbuf, u32 vf) +{ + u8 *mac = ((u8 *)(&msgbuf[1])); + + memcpy(mac, adapter->vfinfo[vf].vf_mac_addresses, 6); + + return 0; +} + +/* vf call setup a new mac */ +static int rnp_set_vf_mac_addr(struct rnp_adapter *adapter, u32 *msgbuf, u32 vf) +{ + u8 *new_mac = ((u8 *)(&msgbuf[1])); + + if (!is_valid_ether_addr(new_mac)) { + e_warn(drv, "VF %d attempted to set invalid mac\n", vf); + return -1; + } + + if (adapter->vfinfo[vf].pf_set_mac && + memcmp(adapter->vfinfo[vf].vf_mac_addresses, new_mac, ETH_ALEN)) { + e_warn(drv, + "VF %d attempted to override administratively set MAC address\n" + "Reload the VF driver to resume operations\n", + vf); + return -1; + } + rnp_set_vf_mac(adapter, vf, new_mac); + + return 0; +} + +static int rnp_set_vf_vlan_msg(struct rnp_adapter *adapter, u32 *msgbuf, u32 vf) +{ + int add = ((msgbuf[0] & RNP_VT_MSGINFO_MASK) >> RNP_VT_MSGINFO_SHIFT); + int vid = (msgbuf[1] & RNP_VLVF_VLANID_MASK); + int err; + + if (adapter->vfinfo[vf].pf_vlan) { + e_warn(drv, + "VF %d attempted to override administratively set VLAN " + "configuration\n" + "Reload the VF driver to resume operations\n", + vf); + return -1; + } + /* only allow 1 vlan for each vf */ + if ((add) && (adapter->vfinfo[vf].vlan_count)) { + e_warn(drv, "VF %d attempted to set more than 1 vlan", vf); + e_warn(drv, " vlan now %d, try to set %d\n", + adapter->vfinfo[vf].vf_vlan, vid); + return -1; + } + + /* vlan 0 has no work todo */ + if (!vid) + return 0; + if (add) { + adapter->vfinfo[vf].vlan_count++; + adapter->vfinfo[vf].vf_vlan = vid; + } else if (adapter->vfinfo[vf].vlan_count) { + adapter->vfinfo[vf].vf_vlan = 0; + adapter->vfinfo[vf].vlan_count--; + } + + err = rnp_set_vf_vlan(adapter, add, vid, vf); + + return err; +} + +static int rnp_set_vf_vlan_strip_msg(struct rnp_adapter *adapter, u32 *msgbuf, + u32 vf) +{ + struct rnp_hw *hw = &adapter->hw; + int vlan_strip_on = !!(msgbuf[1] >> 31); + int queue_cnt = msgbuf[1] & 0xffff; + int err = 0, i; + + vf_dbg("strip_on:%d queeu_cnt:%d, %d %d\n", vlan_strip_on, queue_cnt, + msgbuf[2], msgbuf[3]); + + for (i = 0; i < queue_cnt; i++) { + if (vlan_strip_on) + hw->ops.set_vlan_strip(hw, msgbuf[2 + i], true); + else + hw->ops.set_vlan_strip(hw, msgbuf[2 + i], false); + } + + return err; +} + +static int rnp_set_vf_macvlan_msg(struct rnp_adapter *adapter, u32 *msgbuf, + u32 vf) +{ + u8 *new_mac = ((u8 *)(&msgbuf[1])); + int index = (msgbuf[0] & RNP_VT_MSGINFO_MASK) >> RNP_VT_MSGINFO_SHIFT; + int err; + + if (adapter->vfinfo[vf].pf_set_mac && index > 0) { + e_warn(drv, + "VF %d requested MACVLAN filter but is administratively denied\n", + vf); + return -1; + } + + /* An non-zero index indicates the VF is setting a filter */ + if (index) { + if (!is_valid_ether_addr(new_mac)) { + e_warn(drv, "VF %d attempted to set invalid mac\n", vf); + return -1; + } + } + + err = rnp_set_vf_macvlan(adapter, vf, index, new_mac); + if (err == -ENOSPC) + e_warn(drv, + "VF %d has requested a MACVLAN filter but there is no space for " + "it\n", + vf); + + return err < 0; + + return 0; +} + +static int rnp_negotiate_vf_api(struct rnp_adapter *adapter, u32 *msgbuf, + u32 vf) +{ + adapter->vfinfo[vf].vf_api = 0; + + return 0; +} + +static int rnp_get_vf_reg(struct rnp_adapter *adapter, u32 *msgbuf, u32 vf) +{ + u32 reg = msgbuf[1]; + + msgbuf[1] = rd32(&adapter->hw, reg); + + return 0; +} + +static int rnp_set_vf_mtu(struct rnp_adapter *adapter, u32 *msgbuf, u32 vf) +{ + struct net_device *netdev = adapter->netdev; + if (msgbuf[1] > netdev->mtu) { + e_dev_warn( + "vf %d try to change %d mtu to %d (large than pf limit)\n", + vf, netdev->mtu, msgbuf[1]); + return -1; + } else + return 0; +} + + +static int rnp_set_vf_promisc(struct rnp_adapter *adapter, u32 *msgbuf, u32 vf) +{ + int i; + int ret = 0; + struct rnp_hw *hw = &adapter->hw; + + if (msgbuf[1]) { + /* check if other vf in promisc */ + for (i = 0; i < adapter->num_vfs; i++) { + if (adapter->vfinfo[vf].promisc_mode) { + printk("vf %d already in promisc\n", vf); + ret = -1; + break; + } + } + /* if no vf in promisc mode */ + adapter->vfinfo[vf].promisc_mode = true; + hw->ops.set_rx_mode(hw, adapter->netdev, true); + hw->ops.set_sriov_status(hw, true); + + } else { + adapter->vfinfo[vf].promisc_mode = false; + hw->ops.set_rx_mode(hw, adapter->netdev, true); + hw->ops.set_sriov_status(hw, true); + } + return ret; +} + +static int rnp_get_vf_mtu(struct rnp_adapter *adapter, u32 *msgbuf, u32 vf) +{ + struct net_device *netdev = adapter->netdev; + msgbuf[1] = netdev->mtu; + return 0; +} + +static int rnp_get_vf_fw(struct rnp_adapter *adapter, u32 *msgbuf, u32 vf) +{ + struct rnp_hw *hw = &adapter->hw; + + msgbuf[1] = hw->fw_version; + + return 0; +} + +static int rnp_get_vf_link(struct rnp_adapter *adapter, u32 *msgbuf, u32 vf) +{ + if (adapter->vfinfo[vf].link_state == rnp_link_state_auto) { + msgbuf[1] = (adapter->link_up ? RNP_PF_LINK_UP : 0) | + adapter->link_speed; + } else if (adapter->vfinfo[vf].link_state == rnp_link_state_on) { + msgbuf[1] = RNP_PF_LINK_UP | adapter->link_speed; + + } else { + msgbuf[1] = 0; + } + return 0; +} + +static int rnp_get_vf_dma_frag(struct rnp_adapter *adapter, u32 *msgbuf, u32 vf) +{ + /* we fixed 1536 bytes */ + msgbuf[1] = 1536; + return 0; +} + +static int rnp_get_vf_queues(struct rnp_adapter *adapter, u32 *msgbuf, u32 vf) +{ + struct rnp_hw *hw = &adapter->hw; + + msgbuf[RNP_VF_TX_QUEUES] = hw->sriov_ring_limit; + msgbuf[RNP_VF_RX_QUEUES] = hw->sriov_ring_limit; + msgbuf[RNP_VF_TRANS_VLAN] = adapter->vfinfo[vf].pf_vlan; + msgbuf[RNP_VF_DEF_QUEUE] = 0; + if (hw->hw_type == rnp_hw_n400) { + /* n400, we use + * vf0 use ring4 + * vf1 use ring8 + */ + msgbuf[RNP_VF_QUEUE_START] = vf * 4 + 4; + + } else if ((hw->hw_type == rnp_hw_n10) && (hw->sriov_ring_limit == 1)) { + if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) + msgbuf[RNP_VF_QUEUE_START] = vf * 2 + 2; + else + msgbuf[RNP_VF_QUEUE_START] = vf * 2; + + } else { + if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) + msgbuf[RNP_VF_QUEUE_START] = vf * hw->sriov_ring_limit + + hw->sriov_ring_limit; + else + msgbuf[RNP_VF_QUEUE_START] = vf * hw->sriov_ring_limit; + } + msgbuf[RNP_VF_QUEUE_DEPTH] = (adapter->tx_ring_item_count << 16) | + adapter->rx_ring_item_count; + + return 0; +} + +static int rnp_rcv_msg_from_vf(struct rnp_adapter *adapter, u32 vf) +{ + u32 mbx_size = RNP_VFMAILBOX_SIZE; + u32 msgbuf[RNP_VFMAILBOX_SIZE]; + struct rnp_hw *hw = &adapter->hw; + s32 retval; + + vf_dbg("msg from vf:%d\n", vf); + + retval = rnp_read_mbx(hw, msgbuf, mbx_size, vf); + if (retval) { + pr_err("Error receiving message from VF\n"); + return retval; + } + vf_dbg("msg[0]=0x%08x\n", msgbuf[0]); + + /* this is a message we already processed, do nothing */ + if (msgbuf[0] & (RNP_VT_MSGTYPE_ACK | RNP_VT_MSGTYPE_NACK)) + return retval; + + /* flush the ack before we write any messages back */ + /* clear vf_num */ + msgbuf[0] &= (~RNP_VF_MASK); + + /* this is a vf reset irq */ + if ((msgbuf[0] & RNP_MAIL_CMD_MASK) == RNP_VF_RESET) { + vf_dbg("vf %d up\n", vf); + return rnp_vf_reset_msg(adapter, vf); + } + + /* + * until the vf completes a virtual function reset it should not be + * allowed to start any configuration. + */ + if (!adapter->vfinfo[vf].clear_to_send) { + vf_dbg("wait vf clear to send\n"); + msgbuf[0] |= RNP_VT_MSGTYPE_NACK; + rnp_write_mbx(hw, msgbuf, 1, vf); + return retval; + } + + switch ((msgbuf[0] & RNP_MAIL_CMD_MASK)) { + case RNP_VF_SET_MAC_ADDR: + retval = rnp_set_vf_mac_addr(adapter, msgbuf, vf); + break; + case RNP_VF_SET_MULTICAST: + retval = rnp_set_vf_multicasts(adapter, msgbuf, vf); + break; + case RNP_VF_SET_VLAN: + retval = rnp_set_vf_vlan_msg(adapter, msgbuf, vf); + break; + case RNP_VF_SET_VLAN_STRIP: + retval = rnp_set_vf_vlan_strip_msg(adapter, msgbuf, vf); + break; + case RNP_VF_SET_LPE: + retval = rnp_set_vf_lpe(adapter, msgbuf, vf); + break; + case RNP_VF_GET_MACADDR: + retval = rnp_get_vf_mac_addr(adapter, msgbuf, vf); + break; + case RNP_VF_SET_MACVLAN: + retval = rnp_set_vf_macvlan_msg(adapter, msgbuf, vf); + break; + case RNP_VF_API_NEGOTIATE: + retval = rnp_negotiate_vf_api(adapter, msgbuf, vf); + break; + case RNP_VF_GET_QUEUES: + retval = rnp_get_vf_queues(adapter, msgbuf, vf); + break; + case RNP_VF_REG_RD: + retval = rnp_get_vf_reg(adapter, msgbuf, vf); + break; + case RNP_VF_GET_MTU: + retval = rnp_get_vf_mtu(adapter, msgbuf, vf); + break; + case RNP_VF_SET_MTU: + retval = rnp_set_vf_mtu(adapter, msgbuf, vf); + break; + case RNP_VF_GET_FW: + retval = rnp_get_vf_fw(adapter, msgbuf, vf); + break; + case RNP_VF_GET_LINK: + retval = rnp_get_vf_link(adapter, msgbuf, vf); + break; + case RNP_PF_REMOVE: + vf_dbg("vf %d removed\n", vf); + adapter->vfinfo[vf].clear_to_send = false; + retval = 1; + break; + case RNP_VF_RESET_PF: + adapter->flags2 |= RNP_FLAG2_RESET_PF; + retval = 1; + break; + case RNP_VF_GET_DMA_FRAG: + retval = rnp_get_vf_dma_frag(adapter, msgbuf, vf); + + break; + case RNP_VF_SET_PROMISCE: + retval = rnp_set_vf_promisc(adapter, msgbuf, vf); + break; + default: + e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]); + retval = RNP_ERR_MBX; + break; + } + + /* notify the VF of the results of what it sent us */ + if (retval) + msgbuf[0] |= RNP_VT_MSGTYPE_NACK; + else + msgbuf[0] |= RNP_VT_MSGTYPE_ACK; + + /* write vf_num */ + msgbuf[0] |= (vf << 21); + + msgbuf[0] |= RNP_VT_MSGTYPE_CTS; + + if ((msgbuf[0] & RNP_MAIL_CMD_MASK) != RNP_PF_REMOVE) + rnp_write_mbx(hw, msgbuf, mbx_size, vf); + + return retval; +} + +static void rnp_rcv_ack_from_vf(struct rnp_adapter *adapter, u32 vf) +{ + struct rnp_hw *hw = &adapter->hw; + u32 msg = RNP_VT_MSGTYPE_NACK; + + /* if device isn't clear to send it shouldn't be reading either */ + if (!adapter->vfinfo[vf].clear_to_send) + rnp_write_mbx(hw, &msg, 1, vf); +} + +void rnp_msg_task(struct rnp_adapter *adapter) +{ + struct rnp_hw *hw = &adapter->hw; + u32 vf; + + rnp_fw_msg_handler(adapter); + + if (!(adapter->flags & RNP_FLAG_SRIOV_INIT_DONE)) + return; + for (vf = 0; vf < adapter->num_vfs; vf++) { + /* process any reset requests */ + + /* check flag */ + if (test_and_set_bit(__VF_MBX_USED, + &adapter->vfinfo[vf].status)) { + adapter->miss_time++; + e_info(drv, "we missed some irqs %d\n", vf); + continue; + } + + /* process any messages pending */ + if (!rnp_check_for_msg(hw, vf)) + rnp_rcv_msg_from_vf(adapter, vf); + + /* process any acks */ + if (!rnp_check_for_ack(hw, vf)) + rnp_rcv_ack_from_vf(adapter, vf); + clear_bit(__VF_MBX_USED, &adapter->vfinfo[vf].status); + } +} + +static int rnp_msg_post_status_signle_link(struct rnp_adapter *adapter, int vf, + int link_state) +{ + u32 msgbuf[RNP_VFMAILBOX_SIZE]; + struct rnp_hw *hw = &adapter->hw; + struct rnp_mbx_info *mbx = &hw->mbx; + msgbuf[0] = RNP_PF_SET_LINK | (vf << RNP_VNUM_OFFSET); + + switch (link_state) { + case rnp_link_state_on: + msgbuf[1] = RNP_PF_LINK_UP | adapter->link_speed; + break; + case rnp_link_state_off: + msgbuf[1] = 0; + break; + case rnp_link_state_auto: + if (adapter->link_up) { + msgbuf[1] = RNP_PF_LINK_UP | adapter->link_speed; + } else { + msgbuf[1] = 0; + } + break; + } + return mbx->ops.write(hw, msgbuf, 2, vf); +} + +int rnp_msg_post_status_signle(struct rnp_adapter *adapter, + enum PF_STATUS status, int vf) +{ + u32 msgbuf[RNP_VFMAILBOX_SIZE]; + struct rnp_hw *hw = &adapter->hw; + struct rnp_mbx_info *mbx = &hw->mbx; + switch (status) { + case PF_FCS_STATUS: + msgbuf[0] = RNP_PF_SET_FCS | (vf << RNP_VNUM_OFFSET); + if (adapter->netdev->features & NETIF_F_RXFCS) + msgbuf[1] = 1; + else + msgbuf[1] = 0; + break; + case PF_PAUSE_STATUS: + msgbuf[0] = RNP_PF_SET_PAUSE | (vf << RNP_VNUM_OFFSET); + msgbuf[1] = hw->fc.requested_mode; + break; + case PF_FT_PADDING_STATUS: + msgbuf[0] = RNP_PF_SET_FT_PADDING | (vf << RNP_VNUM_OFFSET); + if (adapter->priv_flags & RNP_PRIV_FLAG_FT_PADDING) { + msgbuf[1] = 1; + } else { + msgbuf[1] = 0; + } + + break; + case PF_VLAN_FILTER_STATUS: + msgbuf[0] = RNP_PF_SET_VLAN_FILTER | (vf << RNP_VNUM_OFFSET); + if (adapter->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER) { + msgbuf[1] = 1; + } else { + msgbuf[1] = 0; + } + + break; + case PF_SET_VLAN_STATUS: + msgbuf[0] = RNP_PF_SET_VLAN | (vf << RNP_VNUM_OFFSET); + + msgbuf[1] = adapter->vfinfo[vf].pf_vlan; + break; + case PF_SET_LINK_STATUS: + if (adapter->vfinfo[vf].link_state != rnp_link_state_auto) + return 0; + /* only update link state if in auto mode */ + msgbuf[0] = RNP_PF_SET_LINK | (vf << RNP_VNUM_OFFSET); + if (adapter->link_up) { + msgbuf[1] = RNP_PF_LINK_UP | adapter->link_speed; + } else { + msgbuf[1] = 0; + } + break; + case PF_SET_MTU: + msgbuf[0] = RNP_PF_SET_MTU | (vf << RNP_VNUM_OFFSET); + msgbuf[1] = adapter->netdev->mtu; + break; + case PF_SET_RESET: + msgbuf[0] = RNP_PF_SET_RESET | (vf << RNP_VNUM_OFFSET); + msgbuf[1] = 0; + + break; + } + + return mbx->ops.write(hw, msgbuf, 2, vf); +} + +/* try to send mailbox to all active vf */ +int rnp_msg_post_status(struct rnp_adapter *adapter, enum PF_STATUS status) +{ + u32 vf; + int err = 0; + + if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) { + /* broadcast */ + for (vf = 0; vf < adapter->num_vfs; vf++) { + if (adapter->vfinfo[vf].clear_to_send) { + if (!test_bit(__RNP_IN_IRQ, &adapter->state)) { + if (test_and_set_bit(__VF_MBX_USED, + &adapter->vfinfo[vf].status)) { + adapter->miss_time++; + printk("send \n"); + return -1; + } + err |= rnp_msg_post_status_signle( + adapter, status, vf); + // clear flags + clear_bit(__VF_MBX_USED, + &adapter->vfinfo[vf].status); + } + } + } + } + return err; +} + +void rnp_disable_tx_rx(struct rnp_adapter *adapter) +{ +} + +void rnp_ping_all_vfs(struct rnp_adapter *adapter) +{ + struct rnp_hw *hw = &adapter->hw; + u32 ping; + int i; + + for (i = 0; i < adapter->num_vfs; i++) { + ping = RNP_PF_CONTROL_PRING_MSG; + /* only send to active vf */ + ping |= RNP_VT_MSGTYPE_CTS; + rnp_write_mbx(hw, &ping, 1, i); + } +} + +int rnp_get_vf_ringnum(struct rnp_hw *hw, int vf, int num) +{ + int fix_vf_num; + + if (hw->sriov_ring_limit >= 2) { + if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) { + fix_vf_num = (vf + 1) * hw->sriov_ring_limit + num; + } else { + fix_vf_num = (vf) * hw->sriov_ring_limit + num; + } + } else { + if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) { + fix_vf_num = (vf + 1) * 2 + num; + } else { + fix_vf_num = (vf) * 2 + num; + } + + + } + + return fix_vf_num; +} + +int rnp_setup_ring_maxrate(struct rnp_adapter *adapter, int ring, u64 max_rate) +{ + struct rnp_hw *hw = &adapter->hw; + struct rnp_dma_info *dma = &hw->dma; + int samples_1sec = adapter->hw.usecstocount * 1000000; + + dma_ring_wr32(dma, RING_OFFSET(ring) + RNP_DMA_REG_TX_FLOW_CTRL_TM, + samples_1sec); + dma_ring_wr32(dma, RING_OFFSET(ring) + RNP_DMA_REG_TX_FLOW_CTRL_TH, + max_rate); + return 0; +} + +static int rnp_disable_port_vlan(struct rnp_adapter *adapter, int vf) +{ + struct rnp_hw *hw = &adapter->hw; + int err; + + err = rnp_set_vf_vlan(adapter, false, adapter->vfinfo[vf].pf_vlan, vf); + + if (adapter->priv_flags & RNP_PRIV_FLAG_SRIOV_VLAN_MODE) { + if (hw->ops.set_vf_vlan_mode) { + if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) + hw->ops.set_vf_vlan_mode( + hw, adapter->vfinfo[vf].pf_vlan, vf + 1, + false); + else + hw->ops.set_vf_vlan_mode( + hw, adapter->vfinfo[vf].pf_vlan, vf, + false); + } + } + adapter->vfinfo[vf].pf_vlan = 0; + adapter->vfinfo[vf].pf_qos = 0; + /* clear veb */ + hw->ops.set_vf_vlan_filter(hw, 0, vf, false, true); + + return err; +} + +static int rnp_enable_port_vlan(struct rnp_adapter *adapter, int vf, u16 vlan, + u8 qos) +{ + struct rnp_hw *hw = &adapter->hw; + int err; + + err = rnp_set_vf_vlan(adapter, true, vlan, vf); + if (err) + goto out; + + adapter->vfinfo[vf].pf_vlan = vlan; + adapter->vfinfo[vf].pf_qos = qos; + dev_info(&adapter->pdev->dev, + "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf); + if (test_bit(__RNP_DOWN, &adapter->state)) { + dev_warn(&adapter->pdev->dev, + "The VF VLAN has been set, but the PF device is not up.\n"); + dev_warn(&adapter->pdev->dev, + "Bring the PF device up before attempting to use the VF device.\n"); + } + hw->ops.set_vf_vlan_filter(hw, vlan, vf, true, true); + + /* if in sriov vlan mode should setup pfvlvf table */ + if (adapter->priv_flags & RNP_PRIV_FLAG_SRIOV_VLAN_MODE) { + if (hw->ops.set_vf_vlan_mode) { + if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) + hw->ops.set_vf_vlan_mode(hw, vlan, vf + 1, + true); + else + hw->ops.set_vf_vlan_mode(hw, vlan, vf, true); + } + } +out: + return err; +} + +int rnp_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos, + __be16 vlan_proto) +{ + int err = 0; + struct rnp_adapter *adapter = netdev_priv(netdev); + + /* VLAN IDs accepted range 0-4094 */ + if (vf < 0 || vf >= adapter->num_vfs || vlan > VLAN_VID_MASK - 1 || + qos > 7) + return -EINVAL; + + if (vlan_proto != htons(ETH_P_8021Q)) + return -EPROTONOSUPPORT; + if (vlan || qos) { + /* + * Check if there is already a port VLAN set, if so + * we have to delete the old one first before we + * can set the new one. The usage model had + * previously assumed the user would delete the + * old port VLAN before setting a new one but this + * is not necessarily the case. + */ + if (adapter->vfinfo[vf].vf_vlan) { + dev_err(&adapter->pdev->dev, + "vf set vlan before, delete it before add new\n"); + err = -EINVAL; + goto out; + } + if (adapter->vfinfo[vf].pf_vlan) + err = rnp_disable_port_vlan(adapter, vf); + if (err) + goto out; + err = rnp_enable_port_vlan(adapter, vf, vlan, qos); + + } else { + /* if only vf set vlan */ + if ((adapter->vfinfo[vf].pf_vlan == 0) && + (adapter->vfinfo[vf].vf_vlan)) { + dev_err(&adapter->pdev->dev, + "pf cannot delete vm vlan(ip link add)\n"); + err = -EINVAL; + } + /* if not set vlan before, nothing todo */ + if (adapter->vfinfo[vf].pf_vlan == 0) + return 0; + + err = rnp_disable_port_vlan(adapter, vf); + } + /* send mbx to vf */ + rnp_msg_post_status_signle(adapter, PF_SET_VLAN_STATUS, vf); +out: + return err; +} + +#ifdef CONFIG_PCI_IOV +int rnp_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + + if (vf < 0 || vf >= adapter->num_vfs) + return -EINVAL; + + adapter->vfinfo[vf].spoofchk_enabled = setting; + + return 0; +} + +#endif /* CONFIG_PCI_IOV */ + +int rnp_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + + if (vf < 0 || vf >= adapter->num_vfs) + return -EINVAL; + + /* nothing to do */ + if (adapter->vfinfo[vf].trusted == setting) + return 0; + + adapter->vfinfo[vf].trusted = setting; + + /* reset VF to reconfigure features */ + e_info(drv, "VF %u is %strusted\n", vf, setting ? "" : "not "); + + return 0; +} + +int rnp_ndo_set_vf_link_state(struct net_device *netdev, int vf, int state) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + int ret = 0; + + if (vf < 0 || vf >= adapter->num_vfs) { + dev_err(&adapter->pdev->dev, + "NDO set VF link - invalid VF identifier %d\n", vf); + ret = -EINVAL; + goto out; + } + + switch (state) { + case IFLA_VF_LINK_STATE_ENABLE: + dev_info(&adapter->pdev->dev, + "NDO set VF %d link state %d \n", vf, state); + adapter->vfinfo[vf].link_state = rnp_link_state_on; + rnp_msg_post_status_signle_link(adapter, vf, rnp_link_state_on); + break; + case IFLA_VF_LINK_STATE_DISABLE: + dev_info(&adapter->pdev->dev, + "NDO set VF %d link state disable\n", vf); + adapter->vfinfo[vf].link_state = rnp_link_state_off; + rnp_msg_post_status_signle_link(adapter, vf, + rnp_link_state_off); + break; + case IFLA_VF_LINK_STATE_AUTO: + dev_info(&adapter->pdev->dev, + "NDO set VF %d link state auto\n", vf); + adapter->vfinfo[vf].link_state = rnp_link_state_auto; + rnp_msg_post_status_signle_link(adapter, vf, + rnp_link_state_auto); + break; + default: + dev_info(&adapter->pdev->dev, + "NDO set VF %d - invalid link state %d\n", vf, state); + ret = -EINVAL; + } +out: + return ret; +} + +int rnp_ndo_set_vf_bw(struct net_device *netdev, int vf, + int __always_unused min_tx_rate, int max_tx_rate) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + /* limit vf ring rate */ + int ring_max_rate; + int vf_ring; + int link_speed = 0; + u64 real_rate = 0; + int i; + + if (vf >= hw->max_vfs - 1) + return -EINVAL; + + switch (adapter->link_speed) { + case RNP_LINK_SPEED_40GB_FULL: + link_speed = 40000; + break; + case RNP_LINK_SPEED_25GB_FULL: + link_speed = 25000; + break; + case RNP_LINK_SPEED_10GB_FULL: + link_speed = 10000; + break; + case RNP_LINK_SPEED_1GB_FULL: + link_speed = 1000; + break; + case RNP_LINK_SPEED_100_FULL: + link_speed = 100; + break; + } + /* rate limit cannot be less than 10Mbs or greater than link speed */ + if (max_tx_rate && ((max_tx_rate <= 10) || (max_tx_rate > link_speed))) + return -EINVAL; + + adapter->vfinfo[vf].tx_rate = max_tx_rate; + + ring_max_rate = max_tx_rate / hw->sriov_ring_limit; + + if (max_tx_rate && (ring_max_rate == 0)) + return -EINVAL; + + real_rate = (ring_max_rate * 1024 * 128); + + for (i = 0; i < hw->sriov_ring_limit; i++) { + vf_ring = rnp_get_vf_ringnum(hw, vf, i); + rnp_setup_ring_maxrate(adapter, vf_ring, real_rate); + } + return 0; +} + +int rnp_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + + if (!is_valid_ether_addr(mac) || (vf >= adapter->num_vfs)) + return -EINVAL; + adapter->vfinfo[vf].pf_set_mac = true; + dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf); + dev_info(&adapter->pdev->dev, "Reload the VF driver to make this" + " change effective."); + if (test_bit(__RNP_DOWN, &adapter->state)) { + dev_warn(&adapter->pdev->dev, + "The VF MAC address has been set," + " but the PF device is not up.\n"); + dev_warn(&adapter->pdev->dev, + "Bring the PF device up before" + " attempting to use the VF device.\n"); + } + rnp_set_vf_mac(adapter, vf, mac); + rnp_msg_post_status_signle(adapter, PF_SET_RESET, vf); + + return 0; +} + +int rnp_ndo_get_vf_config(struct net_device *netdev, int vf, + struct ifla_vf_info *ivi) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + + if (vf >= adapter->num_vfs) + return -EINVAL; + ivi->vf = vf; + memcpy(&ivi->mac, adapter->vfinfo[vf].vf_mac_addresses, ETH_ALEN); + ivi->max_tx_rate = adapter->vfinfo[vf].tx_rate; + ivi->min_tx_rate = 0; + + if (adapter->vfinfo[vf].pf_vlan) + ivi->vlan = adapter->vfinfo[vf].pf_vlan; + else + ivi->vlan = adapter->vfinfo[vf].vf_vlan; + + ivi->qos = adapter->vfinfo[vf].pf_qos; + ivi->spoofchk = adapter->vfinfo[vf].spoofchk_enabled; + switch (adapter->vfinfo[vf].link_state) { + case rnp_link_state_on: + ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE; + break; + case rnp_link_state_off: + ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE; + break; + case rnp_link_state_auto: + ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; + break; + default: + ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; + } + ivi->trusted = adapter->vfinfo[vf].trusted; + + return 0; +} + +int rnp_pci_sriov_configure(struct pci_dev *dev, int num_vfs) +{ + vf_dbg("\n\n !!!! %s:%d num_vfs:%d\n", __func__, __LINE__, num_vfs); + if (num_vfs == 0) + return rnp_pci_sriov_disable(dev); + else + return rnp_pci_sriov_enable(dev, num_vfs); +} diff --git a/drivers/net/ethernet/mucse/rnp/rnp_sriov.h b/drivers/net/ethernet/mucse/rnp/rnp_sriov.h new file mode 100644 index 000000000000..37f7adf7d1bd --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_sriov.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#ifndef _RNP_SRIOV_H_ +#define _RNP_SRIOV_H_ + +void rnp_restore_vf_multicasts(struct rnp_adapter *adapter); +void rnp_restore_vf_macvlans(struct rnp_adapter *adapter); + +void rnp_restore_vf_macs(struct rnp_adapter *adapter); +void rnp_msg_task(struct rnp_adapter *adapter); +int rnp_vf_configuration(struct pci_dev *pdev, unsigned int event_mask); +void rnp_disable_tx_rx(struct rnp_adapter *adapter); +void rnp_ping_all_vfs(struct rnp_adapter *adapter); +int rnp_ndo_set_vf_bw(struct net_device *netdev, int vf, + int __always_unused min_tx_rate, int max_tx_rate); +int rnp_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac); +int rnp_msg_post_status(struct rnp_adapter *adapter, enum PF_STATUS status); + +int rnp_setup_ring_maxrate(struct rnp_adapter *adapter, int ring, u64 max_rate); +int rnp_get_vf_ringnum(struct rnp_hw *hw, int vf, int num); +int rnp_ndo_set_vf_bw(struct net_device *netdev, int vf, + int __always_unused min_tx_rate, int max_tx_rate); +int rnp_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting); +int rnp_ndo_get_vf_config(struct net_device *netdev, int vf, + struct ifla_vf_info *ivi); +void rnp_check_vf_rate_limit(struct rnp_adapter *adapter); +int rnp_disable_sriov(struct rnp_adapter *adapter); +#ifdef CONFIG_PCI_IOV +void rnp_enable_sriov_true(struct rnp_adapter *adapter); +void rnp_enable_sriov(struct rnp_adapter *adapter); +#endif +int rnp_pci_sriov_configure(struct pci_dev *dev, int num_vfs); +int rnp_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos, + __be16 vlan_proto); +int rnp_ndo_set_vf_link_state(struct net_device *netdev, int vf, int state); +#ifdef CONFIG_PCI_IOV +int rnp_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting); +#endif +int rnp_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting); +#endif /* _RNP_SRIOV_H_ */ diff --git a/drivers/net/ethernet/mucse/rnp/rnp_sysfs.c b/drivers/net/ethernet/mucse/rnp/rnp_sysfs.c new file mode 100644 index 000000000000..0af15e7d992d --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_sysfs.c @@ -0,0 +1,2239 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "rnp.h" +#include "rnp_common.h" +#include "rnp_type.h" + +#include "rnp_mbx.h" +#include "rnp_mbx_fw.h" + +#define PHY_EXT_REG_FLAG 0x80000000 + +struct maintain_req { + int magic; +#define MAINTAIN_MAGIC 0xa6a7a8a9 + + int cmd; + int arg0; + int req_data_bytes; + int reply_bytes; + char data[0]; +} __attribute__((packed)); + +struct ucfg_mac_sn { + unsigned char macaddr[64]; + unsigned char sn[32]; + int magic; +#define MAC_SN_MAGIC 0x87654321 + char rev[52]; + unsigned char pn[32]; +} __attribute__((packed, aligned(4))); + +static int print_desc(char *buf, void *data, int len) +{ + u8 *ptr = (u8 *)data; + int ret = 0; + int i = 0; + + for (i = 0; i < len; i++) + ret += sprintf(buf + ret, "%02x ", *(ptr + i)); + + return ret; +} + +#ifdef RNP_HWMON +static ssize_t rnp_hwmon_show_location(struct device __always_unused *dev, + struct device_attribute *attr, char *buf) +{ + struct hwmon_attr *rnp_attr = + container_of(attr, struct hwmon_attr, dev_attr); + + return snprintf(buf, PAGE_SIZE, "loc%u\n", rnp_attr->sensor->location); +} + +static ssize_t rnp_hwmon_show_name(struct device __always_unused *dev, + struct device_attribute *attr, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "rnp\n"); +} + +static ssize_t rnp_hwmon_show_temp(struct device __always_unused *dev, + struct device_attribute *attr, char *buf) +{ + struct hwmon_attr *rnp_attr = + container_of(attr, struct hwmon_attr, dev_attr); + unsigned int value; + + /* reset the temp field */ + rnp_attr->hw->ops.get_thermal_sensor_data(rnp_attr->hw); + + value = rnp_attr->sensor->temp; + /* display millidegree */ + value *= 1000; + + return snprintf(buf, PAGE_SIZE, "%u\n", value); +} + +static ssize_t rnp_hwmon_show_cautionthresh(struct device __always_unused *dev, + struct device_attribute *attr, + char *buf) +{ + struct hwmon_attr *rnp_attr = + container_of(attr, struct hwmon_attr, dev_attr); + unsigned int value = rnp_attr->sensor->caution_thresh; + /* display millidegree */ + value *= 1000; + + return snprintf(buf, PAGE_SIZE, "%u\n", value); +} + +static ssize_t rnp_hwmon_show_maxopthresh(struct device __always_unused *dev, + struct device_attribute *attr, + char *buf) +{ + struct hwmon_attr *rnp_attr = + container_of(attr, struct hwmon_attr, dev_attr); + unsigned int value = rnp_attr->sensor->max_op_thresh; + + /* display millidegree */ + value *= 1000; + + return snprintf(buf, PAGE_SIZE, "%u\n", value); +} + +/** + * rnp_add_hwmon_attr - Create hwmon attr table for a hwmon sysfs file. + * @adapter: pointer to the adapter structure + * @offset: offset in the eeprom sensor data table + * @type: type of sensor data to display + * + * For each file we want in hwmon's sysfs interface we need a + * device_attribute This is included in our hwmon_attr struct that contains + * the references to the data structures we need to get the data to display + */ +static int rnp_add_hwmon_attr(struct rnp_adapter *adapter, unsigned int offset, + int type) +{ + unsigned int n_attr; + struct hwmon_attr *rnp_attr; + + n_attr = adapter->rnp_hwmon_buff->n_hwmon; + rnp_attr = &adapter->rnp_hwmon_buff->hwmon_list[n_attr]; + + switch (type) { + case RNP_HWMON_TYPE_LOC: + rnp_attr->dev_attr.show = rnp_hwmon_show_location; + snprintf(rnp_attr->name, sizeof(rnp_attr->name), "temp%u_label", + offset + 1); + break; + case RNP_HWMON_TYPE_NAME: + rnp_attr->dev_attr.show = rnp_hwmon_show_name; + snprintf(rnp_attr->name, sizeof(rnp_attr->name), "name"); + break; + case RNP_HWMON_TYPE_TEMP: + rnp_attr->dev_attr.show = rnp_hwmon_show_temp; + snprintf(rnp_attr->name, sizeof(rnp_attr->name), "temp%u_input", + offset + 1); + break; + case RNP_HWMON_TYPE_CAUTION: + rnp_attr->dev_attr.show = rnp_hwmon_show_cautionthresh; + snprintf(rnp_attr->name, sizeof(rnp_attr->name), "temp%u_max", + offset + 1); + break; + case RNP_HWMON_TYPE_MAX: + rnp_attr->dev_attr.show = rnp_hwmon_show_maxopthresh; + snprintf(rnp_attr->name, sizeof(rnp_attr->name), "temp%u_crit", + offset + 1); + break; + default: + return -EPERM; + } + + /* These always the same regardless of type */ + rnp_attr->sensor = &adapter->hw.thermal_sensor_data.sensor[offset]; + rnp_attr->hw = &adapter->hw; + rnp_attr->dev_attr.store = NULL; + rnp_attr->dev_attr.attr.mode = 0444; + rnp_attr->dev_attr.attr.name = rnp_attr->name; + sysfs_attr_init(&rnp_attr->dev_attr.attr); + adapter->rnp_hwmon_buff->attrs[n_attr] = &rnp_attr->dev_attr.attr; + ++adapter->rnp_hwmon_buff->n_hwmon; + + return 0; +} +#endif /* RNP_HWMON */ + +#define to_net_device(n) container_of(n, struct net_device, dev) +static ssize_t maintain_read(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, char *buf, loff_t off, + size_t count) +{ + struct device *dev = kobj_to_dev(kobj); + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + int rbytes = count; + + if (adapter->maintain_buf == NULL) + return 0; + + if (off + count > adapter->maintain_buf_len) + rbytes = adapter->maintain_buf_len - off; + + memcpy(buf, adapter->maintain_buf + off, rbytes); + + if ((off + rbytes) >= adapter->maintain_buf_len) { + kfree(adapter->maintain_buf); + adapter->maintain_buf = NULL; + adapter->maintain_buf_len = 0; + } + + return rbytes; +} + +static ssize_t maintain_write(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, char *buf, loff_t off, + size_t count) +{ + struct device *dev = kobj_to_dev(kobj); + int err = -EINVAL; + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + struct maintain_req *req; + void *dma_buf = NULL; + dma_addr_t dma_phy; + int bytes; + + if (off == 0) { + if (count < sizeof(*req)) { + return -EINVAL; + } + req = (struct maintain_req *)buf; + if (req->magic != MAINTAIN_MAGIC) { + return -EINVAL; + } + bytes = max_t(int, req->req_data_bytes, req->reply_bytes); + bytes += sizeof(*req); + + /* free no readed buf */ + if (adapter->maintain_buf) { + kfree(adapter->maintain_buf); + adapter->maintain_buf = NULL; + adapter->maintain_buf_len = 0; + } + + dma_buf = dma_alloc_coherent(&hw->pdev->dev, bytes, &dma_phy, + GFP_ATOMIC); + if (!dma_buf) { + netdev_err(netdev, "%s: no memory:%d!", __func__, + bytes); + return -ENOMEM; + } + + adapter->maintain_dma_buf = dma_buf; + adapter->maintain_dma_phy = dma_phy; + adapter->maintain_dma_size = bytes; + adapter->maintain_in_bytes = req->req_data_bytes + sizeof(*req); + + memcpy(dma_buf + off, buf, count); + + if (count < adapter->maintain_in_bytes) + return count; + } + + dma_buf = adapter->maintain_dma_buf; + dma_phy = adapter->maintain_dma_phy; + req = (struct maintain_req *)dma_buf; + + memcpy(dma_buf + off, buf, count); + + /* all data got, send req */ + if ((off + count) >= adapter->maintain_in_bytes) { + int reply_bytes = req->reply_bytes; + err = rnp_maintain_req(hw, req->cmd, req->arg0, + req->req_data_bytes, req->reply_bytes, + dma_phy); + if (err != 0) { + goto err_quit; + } + /* copy data for read */ + if (reply_bytes > 0) { + adapter->maintain_buf_len = reply_bytes; + adapter->maintain_buf = + kmalloc(adapter->maintain_buf_len, GFP_KERNEL); + if (!adapter->maintain_buf) { + netdev_err(netdev, + "No Memory for maintain buf:%d\n", + adapter->maintain_buf_len); + err = -ENOMEM; + + goto err_quit; + } + memcpy(adapter->maintain_buf, dma_buf, reply_bytes); + } + + if (dma_buf) { + dma_free_coherent(&hw->pdev->dev, + adapter->maintain_dma_size, dma_buf, + dma_phy); + } + adapter->maintain_dma_buf = NULL; + } + + return count; +err_quit: + if (dma_buf) { + dma_free_coherent(&hw->pdev->dev, adapter->maintain_dma_size, + dma_buf, dma_phy); + adapter->maintain_dma_buf = NULL; + } + return err; +} + +static BIN_ATTR(maintain, (S_IWUSR | S_IRUGO), maintain_read, maintain_write, + 1 * 1024 * 1024); + +static ssize_t show_version_info(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + int ret = 0; + + ret += sprintf(buf + ret, "driver :%s-%x\n", + rnp_driver_version, hw->pcode); + ret += sprintf(buf + ret, "fw :%d.%d.%d.%d 0x%08x\n", ((char *)&(hw->fw_version))[3], + ((char *)&(hw->fw_version))[2], ((char *)&(hw->fw_version))[1], + ((char *)&(hw->fw_version))[0], hw->bd_uid); + + return ret; +} + +static ssize_t show_ring_sriov_info(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + int ret = 0; + + ret += sprintf(buf + ret, "now sriov ring num is %d\n", hw->sriov_ring_limit); + + return ret; +} + +static ssize_t store_ring_sriov_info(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + int ret = count; + u32 sriov_ring_num; + + if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) { + printk("should close sriov first\n"); + return -EINVAL; + } + + if (0 != kstrtou32(buf, 0, &sriov_ring_num)) + return -EINVAL; + /* should check tx_ring_num is valid */ + if ((sriov_ring_num != 0) && (sriov_ring_num <= 32)) { + hw->sriov_ring_limit = sriov_ring_num; + } else { + ret = -EINVAL; + } + + return ret; +} + +static ssize_t show_rx_desc_info(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + u32 rx_ring_num = adapter->sysfs_rx_ring_num; + u32 rx_desc_num = adapter->sysfs_rx_desc_num; + struct rnp_ring *ring = adapter->rx_ring[rx_ring_num]; + int ret = 0; + union rnp_rx_desc *desc; + + if (test_bit(__RNP_DOWN, &adapter->state)) { + ret += sprintf(buf + ret, "port not up \n"); + return ret; + } + + desc = RNP_RX_DESC(ring, rx_desc_num); + ret += sprintf(buf + ret, "rx ring %d desc %d:\n", rx_ring_num, + rx_desc_num); + ret += print_desc(buf + ret, desc, sizeof(*desc)); + ret += sprintf(buf + ret, "\n"); + + return ret; +} + +static ssize_t store_rx_desc_info(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + int ret = count; + u32 rx_desc_num = adapter->sysfs_rx_desc_num; + u32 rx_ring_num = adapter->sysfs_rx_ring_num; + struct rnp_ring *ring = adapter->rx_ring[rx_ring_num]; + + if (0 != kstrtou32(buf, 0, &rx_desc_num)) + return -EINVAL; + /* should check tx_ring_num is valid */ + if (rx_desc_num < ring->count) { + adapter->sysfs_rx_desc_num = rx_desc_num; + } else { + ret = -EINVAL; + } + + return ret; +} + +static ssize_t show_tcp_sync_info(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + int ret = 0; + + if (adapter->priv_flags & RNP_PRIV_FLAG_TCP_SYNC) + ret += sprintf( + buf + ret, "tcp sync remap on queue %d prio %s\n", + adapter->tcp_sync_queue, + (adapter->priv_flags & RNP_PRIV_FLAG_TCP_SYNC_PRIO) ? + "NO" : + "OFF"); + else + ret += sprintf(buf + ret, "tcp sync remap off\n"); + + return ret; +} + +static ssize_t store_tcp_sync_info(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + int ret = count; + u32 tcp_sync_queue; + + if (0 != kstrtou32(buf, 0, &tcp_sync_queue)) + return -EINVAL; + + if (tcp_sync_queue < adapter->num_rx_queues) { + adapter->tcp_sync_queue = tcp_sync_queue; + adapter->priv_flags |= RNP_PRIV_FLAG_TCP_SYNC; + + if (adapter->priv_flags & RNP_PRIV_FLAG_TCP_SYNC_PRIO) + hw->ops.set_tcp_sync_remapping( + hw, adapter->tcp_sync_queue, true, true); + else + hw->ops.set_tcp_sync_remapping( + hw, adapter->tcp_sync_queue, true, false); + + } else { + adapter->priv_flags &= ~RNP_PRIV_FLAG_TCP_SYNC; + + hw->ops.set_tcp_sync_remapping(hw, adapter->tcp_sync_queue, + false, false); + } + + return ret; +} + +static ssize_t show_rx_skip_info(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + int ret = 0; + + if (adapter->priv_flags & RNP_PRIV_FLAG_RX_SKIP_EN) { + ret += sprintf(buf + ret, "rx skip bytes: %d\n", + 16 * (adapter->priv_skip_count + 1)); + } else { + ret += sprintf(buf + ret, "rx skip off\n"); + } + + return ret; +} + +static ssize_t store_rx_skip_info(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + int ret = count; + u32 rx_skip_count; + + if (0 != kstrtou32(buf, 0, &rx_skip_count)) + return -EINVAL; + + if ((rx_skip_count > 0) && (rx_skip_count < 17)) { + adapter->priv_skip_count = rx_skip_count - 1; + adapter->priv_flags |= RNP_PRIV_FLAG_RX_SKIP_EN; + hw->ops.set_rx_skip(hw, adapter->priv_skip_count, true); + + } else { + adapter->priv_flags &= ~RNP_PRIV_FLAG_RX_SKIP_EN; + + hw->ops.set_rx_skip(hw, adapter->priv_skip_count, false); + + return -EINVAL; + } + + return ret; +} + +static ssize_t show_rx_drop_info(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + int ret = 0; + + ret += sprintf(buf + ret, "rx_drop_status %llx\n", + adapter->rx_drop_status); + + return ret; +} + +static ssize_t store_rx_drop_info(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + int ret = count; + u64 rx_drop_status; + + if (0 != kstrtou64(buf, 0, &rx_drop_status)) + return -EINVAL; + + adapter->rx_drop_status = rx_drop_status; + + hw->ops.update_rx_drop(hw); + + return ret; +} + +static ssize_t show_outer_vlan_info(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + int ret = 0; + + if (adapter->priv_flags & RNP_PRIV_FLAG_DOUBLE_VLAN) + ret += sprintf(buf + ret, "double vlan on\n"); + else + ret += sprintf(buf + ret, "double vlan off\n"); + + switch (adapter->outer_vlan_type) { + case outer_vlan_type_88a8: + ret += sprintf(buf + ret, "outer vlan 0x88a8\n"); + + break; + case outer_vlan_type_9100: + ret += sprintf(buf + ret, "outer vlan 0x9100\n"); + + break; + case outer_vlan_type_9200: + ret += sprintf(buf + ret, "outer vlan 0x9200\n"); + + break; + default: + ret += sprintf(buf + ret, "outer vlan error\n"); + break; + } + return ret; +} + +static ssize_t store_outer_vlan_info(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + int ret = count; + u32 outer_vlan_type; + + if (0 != kstrtou32(buf, 0, &outer_vlan_type)) + return -EINVAL; + /* should check tx_ring_num is valid */ + if (outer_vlan_type < outer_vlan_type_max) { + adapter->outer_vlan_type = outer_vlan_type; + } else + ret = -EINVAL; + /* should update to hw */ + if (hw->ops.set_outer_vlan_type) + hw->ops.set_outer_vlan_type(hw, outer_vlan_type); + + return ret; +} + +static ssize_t show_tx_stags_info(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + int ret = 0; + + if (adapter->flags2 & RNP_FLAG2_VLAN_STAGS_ENABLED) + ret += sprintf(buf + ret, "tx stags on\n"); + else + ret += sprintf(buf + ret, "tx stags off\n"); + + ret += sprintf(buf + ret, "vid 0x%x\n", adapter->stags_vid); + + return ret; +} + +static ssize_t store_tx_stags_info(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + struct rnp_eth_info *eth = &hw->eth; + int ret = count; + u16 tx_stags; + + if (0 != kstrtou16(buf, 0, &tx_stags)) + return -EINVAL; + if (tx_stags < VLAN_N_VID) { + adapter->stags_vid = tx_stags; + } else + ret = -EINVAL; + /* should update vlan filter */ + eth->ops.set_vfta(eth, adapter->stags_vid, true); + + return ret; +} + +static ssize_t show_tx_desc_info(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + u32 tx_ring_num = adapter->sysfs_tx_ring_num; + u32 tx_desc_num = adapter->sysfs_tx_desc_num; + struct rnp_ring *ring = adapter->tx_ring[tx_ring_num]; + int ret = 0; + struct rnp_tx_desc *desc; + + if (test_bit(__RNP_DOWN, &adapter->state)) { + ret += sprintf(buf + ret, "port not up \n"); + return ret; + } + + desc = RNP_TX_DESC(ring, tx_desc_num); + ret += sprintf(buf + ret, "tx ring %d desc %d:\n", tx_ring_num, + tx_desc_num); + ret += print_desc(buf + ret, desc, sizeof(*desc)); + ret += sprintf(buf + ret, "\n"); + + return ret; +} + +static ssize_t store_tx_desc_info(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + int ret = count; + u32 tx_desc_num = adapter->sysfs_tx_desc_num; + u32 tx_ring_num = adapter->sysfs_tx_ring_num; + struct rnp_ring *ring = adapter->tx_ring[tx_ring_num]; + + if (0 != kstrtou32(buf, 0, &tx_desc_num)) + return -EINVAL; + if (tx_desc_num < ring->count) + adapter->sysfs_tx_desc_num = tx_desc_num; + else + ret = -EINVAL; + + return ret; +} + +static ssize_t show_para_info(struct device *dev, struct device_attribute *attr, + char *buf) +{ + int ret = 0; + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + struct rnp_eth_info *eth = &hw->eth; + struct rnp_mac_info *mac = &hw->mac; + + ret += sprintf(buf + ret, "nsi_en:%d\n", hw->ncsi_en); + ret += sprintf( + buf + ret, + "eth: \n\tmc_filter_type:%u, mcft_size:%u, vft_size:%u, " + "num_rar_entries:%u,\n" + "\trar_highwater:%u, rx_pb_size:%u, max_tx_queues:%u, " + "max_rx_queues:%u, \n" + "\treg_off:%u, orig_autoc:%u, cached_autoc:%u, orig_autoc2:%u\n", + eth->mc_filter_type, eth->mcft_size, eth->vft_size, + eth->num_rar_entries, eth->rar_highwater, eth->rx_pb_size, + eth->max_tx_queues, eth->max_rx_queues, eth->reg_off, + eth->orig_autoc, eth->cached_autoc, eth->orig_autoc2); + + ret += sprintf( + buf + ret, + "mac:\n\t" + "mc_filter_type:%u mcft_size:%u vft_size:%u num_rar_entries:%u \n" + "\trar_highwater:%u rx_pb_size:%u max_tx_queues:%u max_rx_queues:%u \n" + "\treg_off:%u orig_autoc:%u cached_autoc:%u orig_autoc2:%u " + "orig_link_settings_stored:%u \n" + "\tautotry_restart:%u mac_flags:%u\n", + mac->mc_filter_type, mac->mcft_size, mac->vft_size, + mac->num_rar_entries, mac->rar_highwater, mac->rx_pb_size, + mac->max_tx_queues, mac->max_rx_queues, mac->reg_off, + mac->orig_autoc, mac->cached_autoc, mac->orig_autoc2, + mac->orig_link_settings_stored, mac->autotry_restart, + mac->mac_flags); + + return ret; +} + +static ssize_t show_rx_ring_info(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + u32 rx_ring_num = adapter->sysfs_rx_ring_num; + struct rnp_ring *ring = adapter->rx_ring[rx_ring_num]; + int ret = 0; + union rnp_rx_desc *rx_desc; + + ret += sprintf(buf + ret, "queue %d info:\n", rx_ring_num); + ret += sprintf(buf + ret, "next_to_use %d\n", ring->next_to_use); + ret += sprintf(buf + ret, "next_to_clean %d\n", ring->next_to_clean); + rx_desc = RNP_RX_DESC(ring, ring->next_to_clean); + ret += sprintf(buf + ret, "next_to_clean desc: "); + ret += print_desc(buf + ret, rx_desc, sizeof(*rx_desc)); + ret += sprintf(buf + ret, "\n"); + + return ret; +} + +static ssize_t store_rx_ring_info(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + int ret = count; + + u32 rx_ring_num = adapter->sysfs_rx_ring_num; + + if (0 != kstrtou32(buf, 0, &rx_ring_num)) + return -EINVAL; + if (rx_ring_num < adapter->num_rx_queues) + adapter->sysfs_rx_ring_num = rx_ring_num; + else + ret = -EINVAL; + + return ret; +} + +static ssize_t show_tx_ring_info(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + u32 tx_ring_num = adapter->sysfs_tx_ring_num; + struct rnp_ring *ring = adapter->tx_ring[tx_ring_num]; + int ret = 0; + struct rnp_tx_buffer *tx_buffer; + struct rnp_tx_desc *eop_desc; + + ret += sprintf(buf + ret, "queue %d info:\n", tx_ring_num); + ret += sprintf(buf + ret, "next_to_use %d\n", ring->next_to_use); + ret += sprintf(buf + ret, "next_to_clean %d\n", ring->next_to_clean); + + tx_buffer = &ring->tx_buffer_info[ring->next_to_clean]; + eop_desc = tx_buffer->next_to_watch; + /* if have watch desc */ + if (eop_desc) { + ret += sprintf(buf + ret, "next_to_watch:\n"); + ret += print_desc(buf + ret, eop_desc, sizeof(*eop_desc)); + ret += sprintf(buf + ret, "\n"); + } else { + ret += sprintf(buf + ret, "no next_to_watch data\n"); + } + + return ret; +} + +static ssize_t store_tx_ring_info(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + int ret = count; + + u32 tx_ring_num = adapter->sysfs_tx_ring_num; + + if (0 != kstrtou32(buf, 0, &tx_ring_num)) + return -EINVAL; + if (tx_ring_num < adapter->num_tx_queues) + adapter->sysfs_tx_ring_num = tx_ring_num; + else + ret = -EINVAL; + + return ret; +} + +static ssize_t show_tx_counter(struct device *dev, + struct device_attribute *attr, char *buf) +{ + u32 val = 0; + int i, ret = 0; + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + + ret += sprintf(buf + ret, "tx counters\n"); + for (i = 0; i < 4; i++) { + ret += sprintf(buf + ret, "ring%d-tx:\n", i); + + val = rd32(hw, RNP10_RING_BASE + RING_OFFSET(i) + + RNP_DMA_REG_TX_DESC_BUF_LEN); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", "len:", + RNP10_RING_BASE + RING_OFFSET(i) + + RNP_DMA_REG_TX_DESC_BUF_LEN, + val); + + val = rd32(hw, RNP10_RING_BASE + RING_OFFSET(i) + + RNP_DMA_REG_TX_DESC_BUF_HEAD); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", "head:", + RNP10_RING_BASE + RING_OFFSET(i) + + RNP_DMA_REG_TX_DESC_BUF_HEAD, + val); + + val = rd32(hw, RNP10_RING_BASE + RING_OFFSET(i) + + RNP_DMA_REG_TX_DESC_BUF_TAIL); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", "tail:", + RNP10_RING_BASE + RING_OFFSET(i) + + RNP_DMA_REG_TX_DESC_BUF_TAIL, + val); + } + + ret += sprintf(buf + ret, "to_1to4_p1:\n"); + + val = rd32(hw, RNP_ETH_1TO4_INST0_IN_PKTS); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "emac_in:", RNP_ETH_1TO4_INST0_IN_PKTS, val); + + val = rd32(hw, RNP_ETH_IN_0_TX_PKT_NUM(0)); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "emac_send:", RNP_ETH_IN_0_TX_PKT_NUM(0), val); + + ret += sprintf(buf + ret, "to_1to4_p2:\n"); + + val = rd32(hw, RNP_ETH_IN_1_TX_PKT_NUM(0)); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "sop_pkt:", RNP_ETH_IN_1_TX_PKT_NUM(0), val); + + val = rd32(hw, RNP_ETH_IN_2_TX_PKT_NUM(0)); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "eop_pkt:", RNP_ETH_IN_2_TX_PKT_NUM(0), val); + + val = rd32(hw, RNP_ETH_IN_3_TX_PKT_NUM(0)); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "send_terr:", RNP_ETH_IN_3_TX_PKT_NUM(0), val); + + ret += sprintf(buf + ret, "to_tx_trans(phy):\n"); + + val = rd32(hw, RNP_ETH_EMAC_TX_TO_PHY_PKTS(0)); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "in:", RNP_ETH_EMAC_TX_TO_PHY_PKTS(0), val); + + val = rd32(hw, RNP_ETH_TXTRANS_PTP_PKT_NUM(0)); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "out:", RNP_ETH_TXTRANS_PTP_PKT_NUM(0), val); + + ret += sprintf(buf + ret, "mac:\n"); + + val = rd32(hw, 0x60000); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "mac-tx-cfg:", 0x60000, val); + + val = rd32(hw, 0x1081c); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", "mac-tx:", 0x1081c, + val); + + val = rd32(hw, 0x1087c); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "underflow_err:", 0x1087c, val); + + val = rd32(hw, RNP_ETH_TX_DEBUG(0)); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "port0_txtrans_sop:", RNP_ETH_TX_DEBUG(0), val); + + val = rd32(hw, RNP_ETH_TX_DEBUG(4)); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "port0_txtrans_eop:", RNP_ETH_TX_DEBUG(4), val); + + val = rd32(hw, RNP_ETH_TX_DEBUG(13)); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "tx_empty:", RNP_ETH_TX_DEBUG(13), val); + + val = rd32(hw, RNP_ETH_TX_DEBUG(14)); + ret += sprintf(buf + ret, "\t %16s 0x%08x: 0x%x\n", + "tx_prog_full:", RNP_ETH_TX_DEBUG(14), val); + + val = rd32(hw, RNP_ETH_TX_DEBUG(15)); + ret += sprintf(buf + ret, "\t %16s 0x%08x: 0x%x\n", + "tx_full:", RNP_ETH_TX_DEBUG(15), val); + + return ret; +} + +static DEVICE_ATTR(tx_counter, S_IRUGO | S_IWUSR, show_tx_counter, NULL); + +static ssize_t show_rx_counter(struct device *dev, + struct device_attribute *attr, char *buf) +{ + u32 val = 0, port = 0; + int ret = 0; + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + + ret += sprintf(buf + ret, "rx counters\n"); + for (port = 0; port < 4; port++) { + ret += sprintf(buf + ret, "emac_rx_trans (port:%d):\n", port); + + val = rd32(hw, RNP_XLMAC + 0x900 + port * 0x10000); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "mac-pkts:", RNP_XLMAC + 0x900 + port * 0x10000, + val); + + val = rd32(hw, RNP_RXTRANS_RX_PKTS(port)); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "pkts:", RNP_RXTRANS_RX_PKTS(port), val); + + val = rd32(hw, RNP_RXTRANS_DROP_PKTS(port)); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "drop:", RNP_RXTRANS_DROP_PKTS(port), val); + + val = rd32(hw, RNP_RXTRANS_WDT_ERR_PKTS(port)); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "wdt_err:", RNP_RXTRANS_WDT_ERR_PKTS(port), val); + + val = rd32(hw, RNP_RXTRANS_CODE_ERR_PKTS(port)); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "code_err:", RNP_RXTRANS_CODE_ERR_PKTS(port), + val); + + val = rd32(hw, RNP_RXTRANS_CRC_ERR_PKTS(port)); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "crc_err:", RNP_RXTRANS_CRC_ERR_PKTS(port), val); + + val = rd32(hw, RNP_RXTRANS_SLEN_ERR_PKTS(port)); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "slen_err:", RNP_RXTRANS_SLEN_ERR_PKTS(port), + val); + + val = rd32(hw, RNP_RXTRANS_GLEN_ERR_PKTS(port)); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "glen_err:", RNP_RXTRANS_GLEN_ERR_PKTS(port), + val); + + val = rd32(hw, RNP_RXTRANS_IPH_ERR_PKTS(port)); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "iph_err:", RNP_RXTRANS_IPH_ERR_PKTS(port), val); + + val = rd32(hw, RNP_RXTRANS_CSUM_ERR_PKTS(port)); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "csum_err:", RNP_RXTRANS_CSUM_ERR_PKTS(port), + val); + + val = rd32(hw, RNP_RXTRANS_LEN_ERR_PKTS(port)); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "len_err:", RNP_RXTRANS_LEN_ERR_PKTS(port), val); + + val = rd32(hw, RNP_RXTRANS_CUT_ERR_PKTS(port)); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "trans_cut_err:", RNP_RXTRANS_CUT_ERR_PKTS(port), + val); + + val = rd32(hw, RNP_RXTRANS_EXCEPT_BYTES(port)); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "expt_byte_err:", RNP_RXTRANS_EXCEPT_BYTES(port), + val); + + val = rd32(hw, RNP_RXTRANS_G1600_BYTES_PKTS(port)); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + ">1600Byte:", RNP_RXTRANS_G1600_BYTES_PKTS(port), + val); + } + + ret += sprintf(buf + ret, "gather:\n"); + val = rd32(hw, RNP_ETH_TOTAL_GAT_RX_PKT_NUM); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "total_in_pkts:", RNP_ETH_TOTAL_GAT_RX_PKT_NUM, val); + + port = 0; + val = rd32(hw, RNP_ETH_RX_PKT_NUM(port)); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "to_nxt_mdodule:", RNP_ETH_RX_PKT_NUM(port), val); + + for (port = 0; port < 4; port++) { + u8 pname[16] = { 0 }; + val = rd32(hw, RNP_ETH_RX_PKT_NUM(port)); + sprintf(pname, "p%d-rx:", port); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", pname, + RNP_ETH_RX_PKT_NUM(port), val); + } + + for (port = 0; port < 4; port++) { + u8 pname[16] = { 0 }; + val = rd32(hw, RNP_ETH_RX_DROP_PKT_NUM(port)); + sprintf(pname, "p%d-drop:", port); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", pname, + RNP_ETH_RX_DROP_PKT_NUM(port), val); + } + + ret += sprintf(buf + ret, "debug:\n"); + val = rd32(hw, RNP_ETH_RX_DEBUG(10)); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "data_eop:", RNP_ETH_RX_DEBUG(10), val); + val = rd32(hw, RNP_ETH_RX_DEBUG(11)); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "data_descs:", RNP_ETH_RX_DEBUG(11), val); + val = rd32(hw, RNP_ETH_RX_DEBUG(12)); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "data_desc_sop:", RNP_ETH_RX_DEBUG(12), val); + val = rd32(hw, RNP_ETH_RX_DEBUG(13)); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "data_desc_eop:", RNP_ETH_RX_DEBUG(13), val); + val = rd32(hw, RNP_ETH_RX_DEBUG(14)); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "4to1_gather_sop:", RNP_ETH_RX_DEBUG(14), val); + val = rd32(hw, RNP_ETH_RX_DEBUG(15)); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "4to1_gather_eop:", RNP_ETH_RX_DEBUG(15), val); + + ret += sprintf(buf + ret, "ip-parse:\n"); + + val = rd32(hw, RNP_ETH_PKT_EGRESS_NUM); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "pkg_egree:", RNP_ETH_PKT_EGRESS_NUM, val); + + val = rd32(hw, RNP_ETH_PKT_IP_HDR_LEN_ERR_NUM); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "L3_len_err:", RNP_ETH_PKT_IP_HDR_LEN_ERR_NUM, val); + + val = rd32(hw, RNP_ETH_PKT_IP_PKT_LEN_ERR_NUM); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "ip_hdr_err:", RNP_ETH_PKT_IP_PKT_LEN_ERR_NUM, val); + + val = rd32(hw, RNP_ETH_PKT_L3_HDR_CHK_ERR_NUM); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "l3-csum-err:", RNP_ETH_PKT_L3_HDR_CHK_ERR_NUM, val); + + val = rd32(hw, RNP_ETH_PKT_L4_HDR_CHK_ERR_NUM); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "l4-csum-err:", RNP_ETH_PKT_L4_HDR_CHK_ERR_NUM, val); + + val = rd32(hw, RNP_ETH_PKT_SCTP_CHK_ERR_NUM); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "sctp-err:", RNP_ETH_PKT_SCTP_CHK_ERR_NUM, val); + + val = rd32(hw, RNP_ETH_PKT_VLAN_ERR_NUM); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "vlan-err:", RNP_ETH_PKT_VLAN_ERR_NUM, val); + + val = rd32(hw, RNP_ETH_PKT_EXCEPT_SHORT_NUM); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "except_short_num:", RNP_ETH_PKT_EXCEPT_SHORT_NUM, val); + + val = rd32(hw, RNP_ETH_PKT_PTP_NUM); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "ptp:", RNP_ETH_PKT_PTP_NUM, val); + + ret += sprintf(buf + ret, "to-indecap:\n"); + + val = rd32(hw, RNP_ETH_DECAP_PKT_IN_NUM); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "*in engin*:", RNP_ETH_DECAP_PKT_IN_NUM, val); + + val = rd32(hw, RNP_ETH_DECAP_PKT_OUT_NUM); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "*out engin*:", RNP_ETH_DECAP_PKT_OUT_NUM, val); + + val = rd32(hw, RNP_ETH_DECAP_DMAC_OUT_NUM); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "to-dma/host:", RNP_ETH_DECAP_DMAC_OUT_NUM, val); + + val = rd32(hw, RNP_ETH_DECAP_BMC_OUT_NUM); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "to-bmc:", RNP_ETH_DECAP_BMC_OUT_NUM, val); + + val = rd32(hw, RNP_ETH_DECAP_SW_OUT_NUM); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "to-switch:", RNP_ETH_DECAP_SW_OUT_NUM, val); + + val = rd32(hw, RNP_ETH_DECAP_MIRROR_OUT_NUM); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "bmc+host:", RNP_ETH_DECAP_MIRROR_OUT_NUM, val); + + val = rd32(hw, RNP_ETH_DECAP_PKT_DROP_NUM(0x0)); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "err_drop:", RNP_ETH_DECAP_PKT_DROP_NUM(0x0), val); + + val = rd32(hw, RNP_ETH_DECAP_PKT_DROP_NUM(1)); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "plicy_drop:", RNP_ETH_DECAP_PKT_DROP_NUM(1), val); + + val = rd32(hw, RNP_ETH_DECAP_PKT_DROP_NUM(2)); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "dmac_drop:", RNP_ETH_DECAP_PKT_DROP_NUM(2), val); + + val = rd32(hw, RNP_ETH_DECAP_PKT_DROP_NUM(3)); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "bmc_drop:", RNP_ETH_DECAP_PKT_DROP_NUM(3), val); + + val = rd32(hw, RNP_ETH_DECAP_PKT_DROP_NUM(4)); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "sw_drop:", RNP_ETH_DECAP_PKT_DROP_NUM(4), val); + + val = rd32(hw, RNP_ETH_DECAP_PKT_DROP_NUM(5)); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "rm_vlane_num:", RNP_ETH_DECAP_PKT_DROP_NUM(5), val); + + ret += sprintf(buf + ret, "\npolicy-drop-reason:\n"); + val = rd32(hw, RNP_ETH_BASE + RNP10_ETH_RX_DEBUG(4)); + ret += sprintf(buf + ret, "\t %30s 0x%08x: %d\n", "host_l2_match_drop:", + RNP_ETH_BASE + RNP10_ETH_RX_DEBUG(4), val); + val = rd32(hw, RNP_ETH_BASE + RNP10_ETH_RX_DEBUG(5)); + ret += sprintf(buf + ret, "\t %30s 0x%08x: %d\n", + "redir_input_match_drop:", + RNP_ETH_BASE + RNP10_ETH_RX_DEBUG(5), val); + val = rd32(hw, RNP_ETH_BASE + RNP10_ETH_RX_DEBUG(6)); + ret += sprintf(buf + ret, "\t %30s 0x%08x: %d\n", + "redir_etypt_match_drop:", + RNP_ETH_BASE + RNP10_ETH_RX_DEBUG(6), val); + val = rd32(hw, RNP_ETH_BASE + RNP10_ETH_RX_DEBUG(7)); + ret += sprintf(buf + ret, "\t %30s 0x%08x: %d\n", + "redir_tcp_sync_match_drop:", + RNP_ETH_BASE + RNP10_ETH_RX_DEBUG(7), val); + val = rd32(hw, RNP_ETH_BASE + RNP10_ETH_RX_DEBUG(8)); + ret += sprintf(buf + ret, "\t %30s 0x%08x: %d\n", + "redir_tuple5_match_drop:", + RNP_ETH_BASE + RNP10_ETH_RX_DEBUG(8), val); + val = rd32(hw, RNP_ETH_BASE + RNP10_ETH_RX_DEBUG(9)); + ret += sprintf(buf + ret, "\t %30s 0x%08x: %d\n", + "recdir_tcam_match_drop:", + RNP_ETH_BASE + RNP10_ETH_RX_DEBUG(9), val); + + ret += sprintf(buf + ret, "dma-2-host:\n"); + + val = rd32(hw, 0x264); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", "fifo equ:", 0x264, + val); + + val = rd32(hw, 0x268); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", "fifo deq:", 0x268, + val); + + val = rd32(hw, 0x114); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "unexpt_abtring:", 0x114, val); + + val = rd32(hw, 0x288); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", "pci2host:", 0x288, + val); + + for (port = 0; port < 4; port++) { + ret += sprintf(buf + ret, "rx-ring%d:\n", port); + + val = rd32(hw, RNP10_RING_BASE + RING_OFFSET(port) + + RNP_DMA_REG_RX_DESC_BUF_HEAD); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %u\n", "head:", + RNP10_RING_BASE + RING_OFFSET(port) + + RNP_DMA_REG_RX_DESC_BUF_HEAD, + val); + + val = rd32(hw, RNP10_RING_BASE + RING_OFFSET(port) + + RNP_DMA_REG_RX_DESC_BUF_TAIL); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %u\n", "tail:", + RNP10_RING_BASE + RING_OFFSET(port) + + RNP_DMA_REG_RX_DESC_BUF_TAIL, + val); + + val = rd32(hw, RNP10_RING_BASE + RING_OFFSET(port) + + RNP_DMA_REG_RX_DESC_BUF_LEN); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %u\n", "len:", + RNP10_RING_BASE + RING_OFFSET(port) + + RNP_DMA_REG_RX_DESC_BUF_LEN, + val); + } + + /* maybe too large */ + if (ret >= PAGE_SIZE) + ret = PAGE_SIZE; + + return ret; +} + +static DEVICE_ATTR(rx_counter, S_IRUGO | S_IWUSR, show_rx_counter, NULL); + +static ssize_t show_active_vid(struct device *dev, + struct device_attribute *attr, char *buf) +{ + u16 vid; + u16 current_vid = 0; + int ret = 0; + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + u8 vfnum = hw->max_vfs - 1; + /* use last-vf's table entry. the last one */ + + if ((adapter->flags & RNP_FLAG_SRIOV_ENABLED)) { + current_vid = rd32(hw, RNP_DMA_PORT_VEB_VID_TBL(adapter->port, + vfnum)); + } + + for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) { + ret += sprintf(buf + ret, "%u%s ", vid, + (current_vid == vid ? "*" : "")); + } + ret += sprintf(buf + ret, "\n"); + return ret; +} + +static ssize_t store_active_vid(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t count) +{ + u16 vid; + int err = -EINVAL; + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + u8 vfnum = hw->max_vfs - 1; + int port = 0; + + if (!(adapter->flags & RNP_FLAG_SRIOV_ENABLED)) + return -EIO; + + if (0 != kstrtou16(buf, 0, &vid)) + return -EINVAL; + + if ((vid < 4096) && test_bit(vid, adapter->active_vlans)) { + if (rd32(hw, RNP_DMA_VERSION) >= 0x20201231) { + for (port = 0; port < 4; port++) + wr32(hw, RNP_DMA_PORT_VEB_VID_TBL(port, vfnum), + vid); + } else { + wr32(hw, RNP_DMA_PORT_VEB_VID_TBL(adapter->port, vfnum), + vid); + } + err = 0; + } + + return err ? err : count; +} + +static inline int pn_sn_dlen(char *v, int v_len) +{ + int i, len = 0; + for (i = 0; i < v_len; i++) { + if (isascii(v[i])) { + len++; + } else { + break; + } + } + return len; +} + +static int rnp_mbx_get_pn_sn(struct rnp_hw *hw, char pn[33], char sn[33]) +{ + struct maintain_req *req; + void *dma_buf = NULL; + dma_addr_t dma_phy; + struct ucfg_mac_sn *cfg; + + int err = 0, bytes = sizeof(*req) + sizeof(struct ucfg_mac_sn); + + memset(pn, 0, 33); + memset(sn, 0, 33); + + dma_buf = + dma_alloc_coherent(&hw->pdev->dev, bytes, &dma_phy, GFP_KERNEL); + if (!dma_buf) { + printk("%s: no memory:%d!", __func__, bytes); + return -ENOMEM; + } + + req = (struct maintain_req *)dma_buf; + memset(dma_buf, 0, bytes); + cfg = (struct ucfg_mac_sn *)(req + 1); + req->magic = MAINTAIN_MAGIC; + req->cmd = 0; + req->arg0 = 3; + req->req_data_bytes = 0; + req->reply_bytes = bytes - sizeof(*req); + + err = rnp_maintain_req(hw, req->cmd, req->arg0, req->req_data_bytes, + req->reply_bytes, dma_phy); + if (err != 0) { + goto err_quit; + } + if (cfg->magic == MAC_SN_MAGIC) { + int sz = pn_sn_dlen(cfg->pn, 32); + if (sz) { + memcpy(pn, cfg->pn, sz); + pn[sz] = 0; + } + sz = pn_sn_dlen(cfg->sn, 32); + if (sz) { + memcpy(sn, cfg->sn, sz); + sn[sz] = 0; + } + } + +err_quit: + if (dma_buf) + dma_free_coherent(&hw->pdev->dev, bytes, dma_buf, dma_phy); + + return 0; +} + +static ssize_t show_own_vpd(struct device *dev, struct device_attribute *attr, + char *buf) +{ + int ret = 0; + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + char pn[33] = { 0 }, sn[33] = { 0 }; + + rnp_mbx_get_pn_sn(hw, pn, sn); + + ret += sprintf( + buf + ret, "Product Name: %s\n", + "Ethernet Controller N10 Series for 10GbE or 40GbE (Dual-port)"); + ret += sprintf(buf + ret, "[PN] Part number: %s\n", pn); + ret += sprintf(buf + ret, "[SN] Serial number: %s\n", sn); + + return ret; +} +static DEVICE_ATTR(own_vpd, S_IRUGO, show_own_vpd, NULL); + +static ssize_t show_port_idx(struct device *dev, struct device_attribute *attr, + char *buf) +{ + int ret = 0; + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + + ret += sprintf(buf, "%d\n", adapter->portid_of_card); + return ret; +} +static DEVICE_ATTR(port_idx, S_IRUGO | S_IRUSR, show_port_idx, NULL); + +static ssize_t show_debug_linkstat(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int ret = 0; + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + + ret += sprintf(buf, "%d %d dumy:0x%x up-flag:%d carry:%d\n", + adapter->link_up, adapter->hw.link, rd32(hw, 0xc), + adapter->flags & RNP_FLAG_NEED_LINK_UPDATE, + netif_carrier_ok(netdev)); + return ret; +} + +static DEVICE_ATTR(debug_linkstat, S_IRUGO | S_IRUSR, show_debug_linkstat, + NULL); + +static ssize_t show_sfp(struct device *dev, struct device_attribute *attr, + char *buf) +{ + int ret = 0; + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + + if (rnp_mbx_get_lane_stat(hw) != 0) { + ret += sprintf(buf, " IO Error\n"); + } else { + ret += sprintf( + buf, "mod-abs:%d\ntx-fault:%d\ntx-dis:%d\nrx-los:%d\n", + adapter->sfp.mod_abs, adapter->sfp.fault, + adapter->sfp.tx_dis, adapter->sfp.los); + } + + return ret; +} +static DEVICE_ATTR(sfp, S_IRUGO | S_IRUSR, show_sfp, NULL); + +static ssize_t store_pci(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + int err = -EINVAL; + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + int gen = 3, lanes = 8; + + if (count > 30) + return -EINVAL; + + if (sscanf(buf, "gen%dx%d", &gen, &lanes) != 2) { + printk("Error: invalid input. example: gen3x8\n"); + return -EINVAL; + } + if (gen > 3 || lanes > 8) + return -EINVAL; + + err = rnp_set_lane_fun(hw, LANE_FUN_PCI_LANE, gen, lanes, 0, 0); + + return err ? err : count; +} + +static ssize_t show_pci(struct device *dev, struct device_attribute *attr, + char *buf) +{ + int ret = 0; + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + + if (rnp_mbx_get_lane_stat(hw) != 0) { + ret += sprintf(buf, " IO Error\n"); + } else { + ret += sprintf(buf, "gen%dx%d\n", hw->pci_gen, hw->pci_lanes); + } + + return ret; +} + +static DEVICE_ATTR(pci, S_IRUGO | S_IWUSR | S_IRUSR, show_pci, store_pci); + +static ssize_t store_sfp_tx_disable(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int err = -EINVAL; + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + long enable = 0; + + if (kstrtol(buf, 10, &enable)) { + return -EINVAL; + } + + err = rnp_set_lane_fun(hw, LANE_FUN_SFP_TX_DISABLE, !!enable, 0, 0, 0); + + return err ? err : count; +} + +static ssize_t show_sfp_tx_disable(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int ret = 0; + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + + if (rnp_mbx_get_lane_stat(hw) != 0) { + ret += sprintf(buf, " IO Error\n"); + } else { + ret += sprintf(buf, "%d\n", adapter->sfp.tx_dis); + } + + return ret; +} + +static DEVICE_ATTR(sfp_tx_disable, S_IRUGO | S_IWUSR | S_IRUSR, + show_sfp_tx_disable, store_sfp_tx_disable); + +static ssize_t store_link_traing(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t count) +{ + int err = -EINVAL; + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + long enable = 0; + + if (kstrtol(buf, 10, &enable)) { + return -EINVAL; + } + + err = rnp_set_lane_fun(hw, LANE_FUN_LINK_TRAING, !!enable, 0, 0, 0); + + return err ? err : count; +} + +static ssize_t show_link_traing(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int ret = 0; + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + + if (rnp_mbx_get_lane_stat(hw) != 0) { + ret += sprintf(buf, " IO Error\n"); + } else { + ret += sprintf(buf, "%d\n", adapter->link_traing); + } + + return ret; +} + +static DEVICE_ATTR(link_traing, S_IRUGO | S_IWUSR | S_IRUSR, show_link_traing, + store_link_traing); + +static ssize_t store_fec(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + int err = -EINVAL; + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + long enable = 0; + + if (kstrtol(buf, 10, &enable)) { + return -EINVAL; + } + + err = rnp_set_lane_fun(hw, LANE_FUN_FEC, !!enable, 0, 0, 0); + + return err ? err : count; +} + +static ssize_t show_fec(struct device *dev, struct device_attribute *attr, + char *buf) +{ + int ret = 0; + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + + if (rnp_mbx_get_lane_stat(hw) != 0) { + ret += sprintf(buf, " IO Error\n"); + } else { + ret += sprintf(buf, "%d\n", adapter->fec); + } + + return ret; +} + +static DEVICE_ATTR(fec, S_IRUGO | S_IWUSR | S_IRUSR, show_fec, store_fec); + +static ssize_t store_pcs(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + u32 reg_hi = 0, reg_lo = 0, pcs_base_regs = 0; + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + int input_arg_cnt; + u32 pcs_phy_regs[] = { + 0x00040000, 0x00041000, 0x00042000, 0x00043000, + 0x00040000, 0x00041000, 0x00042000, 0x00043000, + }; + + if (count > 64) { + printk("Error: Input size >100: too large\n"); + return -EINVAL; + } + + input_arg_cnt = sscanf(buf, "%u %x %x", &adapter->sysfs_pcs_lane_num, + &adapter->sysfs_bar4_reg_addr, + &adapter->sysfs_bar4_reg_val); + + if (input_arg_cnt != 2 && input_arg_cnt != 3) { + printk("Error: Invalid Input: read lane x reg 0xXXX or write phy x reg " + "0xXXX val 0xXXX\n"); + return -EINVAL; + } + + if (adapter->sysfs_pcs_lane_num > 8) { + printk("Error: Invalid value. should in 0~7\n"); + return -EINVAL; + } + + switch (input_arg_cnt) { + case 2: + reg_hi = adapter->sysfs_bar4_reg_addr >> 8; + reg_lo = (adapter->sysfs_bar4_reg_addr & 0xff) << 2; + pcs_base_regs = pcs_phy_regs[adapter->sysfs_pcs_lane_num]; + wr32(hw, pcs_base_regs + (0xff << 2), reg_hi); + adapter->sysfs_bar4_reg_val = rd32(hw, pcs_base_regs + reg_lo); + break; + case 3: + reg_hi = adapter->sysfs_bar4_reg_addr >> 8; + reg_lo = (adapter->sysfs_bar4_reg_addr & 0xff) << 2; + pcs_base_regs = pcs_phy_regs[adapter->sysfs_pcs_lane_num]; + wr32(hw, pcs_base_regs + (0xff << 2), reg_hi); + wr32(hw, pcs_base_regs + reg_lo, adapter->sysfs_bar4_reg_val); + break; + default: + printk("Error: Invalid value. input_arg_cnt=%d\n", + input_arg_cnt); + break; + } + adapter->sysfs_input_arg_cnt = input_arg_cnt; + + return count; +} + +static ssize_t show_pcs(struct device *dev, struct device_attribute *attr, + char *buf) +{ + int ret = 0; + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + + switch (adapter->sysfs_input_arg_cnt) { + case 2: + ret += sprintf(buf, "lane%u pcs: 0x%x => 0x%x\n", + adapter->sysfs_pcs_lane_num, + adapter->sysfs_bar4_reg_addr, + adapter->sysfs_bar4_reg_val); + break; + case 3: + ret += sprintf(buf, "lane%u pcs: 0x%x <= 0x%x\n", + adapter->sysfs_pcs_lane_num, + adapter->sysfs_bar4_reg_addr, + adapter->sysfs_bar4_reg_val); + break; + default: + break; + } + + return ret; +} + +static DEVICE_ATTR(pcs_reg, S_IRUGO | S_IWUSR | S_IRUSR, show_pcs, store_pcs); + +static ssize_t phy_reg_read(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + int val = 0; + int err = -EINVAL; + int phy_reg = adapter->sysfs_phy_reg; + + if (hw) { + if (adapter->sysfs_is_phy_ext_reg) { + err = rnp_mbx_phy_read(hw, phy_reg | PHY_EXT_REG_FLAG, + &val); + } else { + err = rnp_mbx_phy_read(hw, phy_reg, &val); + } + } + + if (err) { + return 0; + } else { + return sprintf(buf, "phy %s 0x%04x : 0x%04x\n", + adapter->sysfs_is_phy_ext_reg ? "ext reg" : + "reg", + phy_reg, val & 0xffff); + } +} + +static ssize_t phy_reg_write(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + int i = 0, argc = 0, err = -EINVAL; + char argv[3][16]; + unsigned long val[3] = { 0 }; + int phy_reg = 0; + + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + + memset(argv, 0, sizeof(argv)); + argc = sscanf(buf, "%15s %15s %15s", argv[0], argv[1], argv[2]); + + if (argc < 1) { + return -EINVAL; + } + + adapter->sysfs_is_phy_ext_reg = 0; + + if (strcmp(argv[0], "ext") == 0) { + adapter->sysfs_is_phy_ext_reg = 1; + } else { + if (kstrtoul(argv[0], 0, &val[0])) { + return -EINVAL; + } + } + + for (i = 1; i < argc; i++) { + if (kstrtoul(argv[i], 0, &val[i])) { + return -EINVAL; + } + } + + if (argc == 1) { + if (adapter->sysfs_is_phy_ext_reg) { + return -EINVAL; + } else { + /* set phy reg index */ + phy_reg = val[0]; + err = 0; + } + } + + if (argc == 2) { + if (adapter->sysfs_is_phy_ext_reg) { + /* set ext phy reg index */ + phy_reg = val[1]; + err = 0; + } else { + /* write phy reg */ + phy_reg = val[0]; + err = rnp_mbx_phy_write(hw, phy_reg, val[1]); + } + } + + if (argc == 3) { + if (adapter->sysfs_is_phy_ext_reg) { + /* write ext phy reg */ + phy_reg = val[1]; + err = rnp_mbx_phy_write(hw, phy_reg | PHY_EXT_REG_FLAG, + val[2]); + } else { + return -EINVAL; + } + } + + adapter->sysfs_phy_reg = phy_reg; + + return err ? err : count; +} + +static DEVICE_ATTR(phy_reg, 0664, phy_reg_read, phy_reg_write); + +static ssize_t store_prbs(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + int err = -EINVAL; + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + long prbs = 0; + + if (kstrtol(buf, 10, &prbs)) { + return -EINVAL; + } + + err = rnp_set_lane_fun(hw, LANE_FUN_PRBS, prbs, 0, 0, 0); + + return err ? err : count; +} + +static DEVICE_ATTR(prbs, S_IRUGO | S_IWUSR | S_IRUSR, NULL, store_prbs); + +static ssize_t store_autoneg(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + int err = -EINVAL; + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + long enable = 0; + + if (kstrtol(buf, 10, &enable)) { + return -EINVAL; + } + + err = rnp_set_lane_fun(hw, LANE_FUN_AN, !!enable, 0, 0, 0); + + return err ? err : count; +} + +static ssize_t show_autoneg(struct device *dev, struct device_attribute *attr, + char *buf) +{ + int ret = 0; + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + + if (rnp_mbx_get_lane_stat(hw) != 0) { + ret += sprintf(buf, " IO Error\n"); + } else { + ret += sprintf(buf, "%d\n", adapter->an); + } + + return ret; +} + +static DEVICE_ATTR(autoneg, S_IRUGO | S_IWUSR | S_IRUSR, show_autoneg, + store_autoneg); + +static ssize_t store_lane_si(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + int err = -EINVAL; + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + int si_main = -1, si_pre = -1, si_post = -1, si_txboost = -1; + int cnt; + + if (rnp_mbx_get_lane_stat(hw) != 0) { + printk("Error: rnp_mbx_get_lane_stat failed\n"); + return -EIO; + } + if (count > 100) { + printk("Error: Input size >100: too large\n"); + return -EINVAL; + } + + if (hw->supported_link & + (RNP_LINK_SPEED_40GB_FULL | RNP_LINK_SPEED_25GB_FULL)) { + u32 lane0_main, lane0_pre, lane0_post, lane0_boost; + u32 lane1_main, lane1_pre, lane1_post, lane1_boost; + u32 lane2_main, lane2_pre, lane2_post, lane2_boost; + u32 lane3_main, lane3_pre, lane3_post, lane3_boost; + + cnt = sscanf(buf, + "%u %u %u %u,%u %u %u %u,%u %u %u %u,%u %u %u %u", + &lane0_main, &lane0_pre, &lane0_post, &lane0_boost, + &lane1_main, &lane1_pre, &lane1_post, &lane1_boost, + &lane2_main, &lane2_pre, &lane2_post, &lane2_boost, + &lane3_main, &lane3_pre, &lane3_post, + &lane3_boost); + if (cnt != 16) { + printk("Error: Invalid Input.\n" + " ,,,\n" + " laneX_si:
  \n\n"
+			       "   ie: 21 0 11 11,22 0 12 12,23 0 13 13,24 0 14 14 \n");
+
+			return -EINVAL;
+		}
+
+		si_main = ((lane0_main & 0xff) << 0) |
+			  ((lane1_main & 0xff) << 8) |
+			  ((lane2_main & 0xff) << 16) |
+			  ((lane3_main & 0xff) << 24);
+		si_pre = ((lane0_pre & 0xff) << 0) | ((lane1_pre & 0xff) << 8) |
+			 ((lane2_pre & 0xff) << 16) |
+			 ((lane3_pre & 0xff) << 24);
+		si_post = ((lane0_post & 0xff) << 0) |
+			  ((lane1_post & 0xff) << 8) |
+			  ((lane2_post & 0xff) << 16) |
+			  ((lane3_post & 0xff) << 24);
+		si_txboost = ((lane0_boost & 0xf) << 0) |
+			     ((lane1_boost & 0xf) << 4) |
+			     ((lane2_boost & 0xf) << 8) |
+			     ((lane3_boost & 0xf) << 12);
+		printk("%s: main:0x%x pre:0x%x post:0x%x boost:0x%x\n",
+		       adapter->name, si_main, si_pre, si_post, si_txboost);
+	} else {
+		cnt = sscanf(buf, "%u %u %u %u", &si_main, &si_pre, &si_post,
+			     &si_txboost);
+		if (cnt != 4) {
+			printk("Error: Invalid Input: 
  \n");
+			return -EINVAL;
+		}
+		if (si_main > 63 || si_pre > 63 || si_post > 63) {
+			printk("Error: Invalid value. should in 0~63\n");
+			return -EINVAL;
+		}
+		if (si_txboost > 16) {
+			printk("Error: Invalid txboost. should in 0~15\n");
+			return -EINVAL;
+		}
+	}
+	err = rnp_set_lane_fun(hw, LANE_FUN_SI, si_main, si_pre, si_post,
+			       si_txboost);
+
+	return err ? err : count;
+}
+
+static ssize_t show_lane_si(struct device *dev, struct device_attribute *attr,
+			    char *buf)
+{
+	int ret = 0, i;
+	struct net_device *netdev = to_net_device(dev);
+	struct rnp_adapter *adapter = netdev_priv(netdev);
+	struct rnp_hw *hw = &adapter->hw;
+
+	if (rnp_mbx_get_lane_stat(hw) != 0) {
+		ret += sprintf(buf, " IO Error\n");
+	} else {
+		if (hw->supported_link &
+		    (RNP_LINK_SPEED_40GB_FULL | RNP_LINK_SPEED_25GB_FULL)) {
+			ret += sprintf(
+				buf + ret,
+				"main:0x%08x pre:0x%08x post:0x%08x tx_boost:0x%04x\n\n",
+				adapter->si.main, adapter->si.pre,
+				adapter->si.post, adapter->si.tx_boost);
+			for (i = 0; i < 4; i++) {
+				ret += sprintf(
+					buf + ret,
+					" lane%d main:%u pre:%u post:%u tx_boost:%u\n",
+					i, (adapter->si.main >> (i * 8)) & 0xff,
+					(adapter->si.pre >> (i * 8)) & 0xff,
+					(adapter->si.post >> (i * 8)) & 0xff,
+					(adapter->si.tx_boost >> (i * 4)) &
+						0xf);
+			}
+		} else {
+			ret += sprintf(
+				buf + ret,
+				"lane:%d main:%u pre:%u post:%u tx_boost:%u\n",
+				hw->nr_lane, adapter->si.main, adapter->si.pre,
+				adapter->si.post, adapter->si.tx_boost & 0xf);
+		}
+	}
+
+	return ret;
+}
+
+static DEVICE_ATTR(si, S_IRUGO | S_IWUSR | S_IRUSR, show_lane_si,
+		   store_lane_si);
+
+static ssize_t show_temperature(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct net_device *netdev = to_net_device(dev);
+	struct rnp_adapter *adapter = netdev_priv(netdev);
+	struct rnp_hw *hw = &adapter->hw;
+	int ret = 0, temp = 0, voltage = 0;
+
+	temp = rnp_mbx_get_temp(hw, &voltage);
+
+	ret += sprintf(buf, "temp:%d oC  volatage:%d mV\n", temp, voltage);
+	return ret;
+}
+
+static struct pci_dev *pcie_find_root_port_old(struct pci_dev *dev)
+{
+	while (1) {
+		if (!pci_is_pcie(dev))
+			break;
+		if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
+			return dev;
+		if (!dev->bus->self)
+			break;
+		dev = dev->bus->self;
+	}
+	return NULL;
+}
+
+static ssize_t show_root_slot_info(struct device *dev,
+				   struct device_attribute *attr, char *buf)
+{
+	struct net_device *netdev = to_net_device(dev);
+	struct rnp_adapter *adapter = netdev_priv(netdev);
+	int ret = 0;
+	struct pci_dev *root_pdev = pcie_find_root_port_old(adapter->pdev);
+
+	if (root_pdev) {
+		ret += sprintf(buf + ret, "%02x:%02x.%x\n",
+			       root_pdev->bus->number,
+			       PCI_SLOT(root_pdev->devfn),
+			       PCI_FUNC(root_pdev->devfn));
+	}
+	return ret;
+}
+
+static int do_switch_loopback_set(struct rnp_adapter *adapter, int en,
+				  int sport_lane, int dport_lane)
+{
+	int v;
+	struct rnp_hw *hw = &adapter->hw;
+
+	printk("%s: %s %d -> %d en:%d\n", __func__,
+	       netdev_name(adapter->netdev), sport_lane, dport_lane, en);
+
+	if (en) {
+		adapter->flags |= RNP_FLAG_SWITCH_LOOPBACK_EN;
+	} else {
+		adapter->flags &= ~RNP_FLAG_SWITCH_LOOPBACK_EN;
+	}
+
+	wr32(hw, RNP_ETH_INPORT_POLICY_REG(sport_lane),
+	     BIT(29) | (dport_lane << 16));
+
+	v = rd32(hw, RNP_ETH_INPORT_POLICY_VAL);
+	if (en) {
+		v |= BIT(sport_lane);
+	} else {
+		v &= ~BIT(sport_lane);
+	}
+	wr32(hw, RNP_ETH_INPORT_POLICY_VAL, v);
+
+	v = mac_rd32(&hw->mac, RNP10_MAC_PKT_FLT);
+	if (en) {
+		v |= (RNP_RX_ALL | RNP_RX_ALL_MUL);
+	} else {
+		v &= ~(RNP_RX_ALL | RNP_RX_ALL_MUL);
+	}
+	mac_wr32(&hw->mac, RNP10_MAC_PKT_FLT, v);
+
+	eth_wr32(&hw->eth, RNP10_ETH_DMAC_MCSTCTRL, 0x0);
+
+	return 0;
+}
+
+static ssize_t _switch_loopback(struct rnp_adapter *adapter,
+				const char *peer_eth, int en)
+{
+	struct net_device *peer_netdev = NULL;
+	struct rnp_adapter *peer_adapter = NULL;
+	char name[100];
+
+	strncpy(name, peer_eth, sizeof(name));
+	strim(name);
+
+	printk("%s: nr_lane:%d peer_lane:%s en:%d\n", __func__, 0, peer_eth,
+	       en);
+
+	peer_netdev = dev_get_by_name(&init_net, name);
+	if (!peer_netdev) {
+		printk("canot' find %s\n", name);
+		return -EINVAL;
+	}
+	peer_adapter = netdev_priv(peer_netdev);
+
+	if (PCI_SLOT(peer_adapter->pdev->devfn) !=
+	    PCI_SLOT(adapter->pdev->devfn)) {
+		printk("%s %s not in same slot\n", netdev_name(adapter->netdev),
+		       netdev_name(peer_adapter->netdev));
+		dev_put(peer_netdev);
+		return -EINVAL;
+	}
+
+	printk("%s: %s(%d)<->%s(%d)\n", __func__, netdev_name(adapter->netdev),
+	       0, netdev_name(peer_adapter->netdev), 0);
+
+	do_switch_loopback_set(adapter, en, 0,
+			       rnp_is_pf1(&peer_adapter->hw) ? 4 : 0);
+	do_switch_loopback_set(peer_adapter, en, 0,
+			       rnp_is_pf1(&adapter->hw) ? 4 : 0);
+
+	if (peer_netdev) {
+		dev_put(peer_netdev);
+	}
+
+	return 0;
+}
+
+static ssize_t store_switch_loopback_on(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t count)
+{
+	struct rnp_adapter *adapter = netdev_priv(to_net_device(dev));
+
+	return _switch_loopback(adapter, buf, 1) == 0 ? count : -EINVAL;
+}
+
+static DEVICE_ATTR(switch_loopback_on, 0664, NULL, store_switch_loopback_on);
+
+static ssize_t store_switch_loopback_off(struct device *dev,
+					 struct device_attribute *attr,
+					 const char *buf, size_t count)
+{
+	struct rnp_adapter *adapter = netdev_priv(to_net_device(dev));
+
+	return _switch_loopback(adapter, buf, 0) == 0 ? count : -EINVAL;
+}
+static DEVICE_ATTR(switch_loopback_off, 0664, NULL, store_switch_loopback_off);
+static DEVICE_ATTR(root_slot_info, 0644, show_root_slot_info, NULL);
+static DEVICE_ATTR(temperature, S_IRUGO | S_IRUSR, show_temperature, NULL);
+static DEVICE_ATTR(active_vid, 0644, show_active_vid, store_active_vid);
+static DEVICE_ATTR(tx_ring_info, 0644, show_tx_ring_info, store_tx_ring_info);
+static DEVICE_ATTR(rx_ring_info, 0644, show_rx_ring_info, store_rx_ring_info);
+static DEVICE_ATTR(para_info, 0644, show_para_info, NULL);
+static DEVICE_ATTR(tx_desc_info, 0644, show_tx_desc_info, store_tx_desc_info);
+static DEVICE_ATTR(rx_desc_info, 0644, show_rx_desc_info, store_rx_desc_info);
+static DEVICE_ATTR(ring_sriov_info, 0644, show_ring_sriov_info, store_ring_sriov_info);
+static DEVICE_ATTR(rx_drop_info, 0644, show_rx_drop_info, store_rx_drop_info);
+static DEVICE_ATTR(outer_vlan_info, 0644, show_outer_vlan_info,
+		   store_outer_vlan_info);
+static DEVICE_ATTR(tcp_sync_info, 0644, show_tcp_sync_info,
+		   store_tcp_sync_info);
+static DEVICE_ATTR(rx_skip_info, 0644, show_rx_skip_info, store_rx_skip_info);
+static DEVICE_ATTR(tx_stags_info, 0644, show_tx_stags_info,
+		   store_tx_stags_info);
+static DEVICE_ATTR(version_info, 0644, show_version_info, NULL);
+static struct attribute *dev_attrs[] = {
+	&dev_attr_tx_stags_info.attr,
+	&dev_attr_version_info.attr,
+	&dev_attr_root_slot_info.attr,
+	&dev_attr_active_vid.attr,
+	&dev_attr_rx_drop_info.attr,
+	&dev_attr_outer_vlan_info.attr,
+	&dev_attr_tcp_sync_info.attr,
+	&dev_attr_rx_skip_info.attr,
+	&dev_attr_tx_ring_info.attr,
+	&dev_attr_rx_ring_info.attr,
+	&dev_attr_para_info.attr,
+	&dev_attr_tx_desc_info.attr,
+	&dev_attr_rx_desc_info.attr,
+	&dev_attr_ring_sriov_info.attr,
+	&dev_attr_tx_counter.attr,
+	&dev_attr_rx_counter.attr,
+	&dev_attr_own_vpd.attr,
+	&dev_attr_port_idx.attr,
+	&dev_attr_temperature.attr,
+	&dev_attr_si.attr,
+	&dev_attr_sfp.attr,
+	&dev_attr_autoneg.attr,
+	&dev_attr_sfp_tx_disable.attr,
+	&dev_attr_fec.attr,
+	&dev_attr_link_traing.attr,
+	&dev_attr_pci.attr,
+	&dev_attr_prbs.attr,
+	&dev_attr_pcs_reg.attr,
+	&dev_attr_phy_reg.attr,
+	&dev_attr_debug_linkstat.attr,
+	&dev_attr_switch_loopback_off.attr,
+	&dev_attr_switch_loopback_on.attr,
+	NULL,
+};
+static struct bin_attribute *dev_bin_attrs[] = {
+	&bin_attr_maintain,
+	NULL,
+};
+static struct attribute_group dev_attr_grp = {
+	.attrs = dev_attrs,
+	.bin_attrs = dev_bin_attrs,
+};
+
+static void rnp_sysfs_del_adapter(struct rnp_adapter __maybe_unused *adapter)
+{
+}
+
+/* called from rnp_main.c */
+void rnp_sysfs_exit(struct rnp_adapter *adapter)
+{
+	rnp_sysfs_del_adapter(adapter);
+	sysfs_remove_group(&adapter->netdev->dev.kobj, &dev_attr_grp);
+}
+
+/* called from rnp_main.c */
+int rnp_sysfs_init(struct rnp_adapter *adapter)
+{
+	int rc = 0;
+	int flag;
+#ifdef RNP_HWMON
+	struct hwmon_buff *rnp_hwmon;
+	struct device *hwmon_dev;
+	unsigned int i;
+#endif /* RNP_HWMON */
+
+	flag = sysfs_create_group(&adapter->netdev->dev.kobj, &dev_attr_grp);
+	if (flag != 0) {
+		dev_err(&adapter->netdev->dev,
+			"sysfs_create_group failed:flag:%d\n", flag);
+		return flag;
+	}
+#ifdef RNP_HWMON
+	/* If this method isn't defined we don't support thermals */
+	if (adapter->hw.ops.init_thermal_sensor_thresh == NULL) {
+		goto no_thermal;
+	}
+
+	/* Don't create thermal hwmon interface if no sensors present */
+	if (adapter->hw.ops.init_thermal_sensor_thresh(&adapter->hw))
+		goto no_thermal;
+
+	rnp_hwmon = devm_kzalloc(&adapter->pdev->dev, sizeof(*rnp_hwmon),
+				 GFP_KERNEL);
+
+	if (!rnp_hwmon) {
+		rc = -ENOMEM;
+		goto exit;
+	}
+
+	adapter->rnp_hwmon_buff = rnp_hwmon;
+
+	for (i = 0; i < RNP_MAX_SENSORS; i++) {
+		/*
+		 * Only create hwmon sysfs entries for sensors that have
+		 * meaningful data for.
+		 */
+		if (adapter->hw.thermal_sensor_data.sensor[i].location == 0)
+			continue;
+
+		/* Bail if any hwmon attr struct fails to initialize */
+		rc = rnp_add_hwmon_attr(adapter, i, RNP_HWMON_TYPE_CAUTION);
+		if (rc)
+			goto err;
+		rc = rnp_add_hwmon_attr(adapter, i, RNP_HWMON_TYPE_LOC);
+		if (rc)
+			goto err;
+		rc = rnp_add_hwmon_attr(adapter, i, RNP_HWMON_TYPE_TEMP);
+		if (rc)
+			goto err;
+		rc = rnp_add_hwmon_attr(adapter, i, RNP_HWMON_TYPE_MAX);
+		if (rc)
+			goto err;
+	}
+
+	rnp_hwmon->groups[0] = &rnp_hwmon->group;
+	rnp_hwmon->group.attrs = rnp_hwmon->attrs;
+
+	hwmon_dev = devm_hwmon_device_register_with_groups(
+		&adapter->pdev->dev, "rnp", rnp_hwmon, rnp_hwmon->groups);
+
+	if (IS_ERR(hwmon_dev)) {
+		rc = PTR_ERR(hwmon_dev);
+		goto exit;
+	}
+no_thermal:
+#endif /* RNP_HWMON */
+	goto exit;
+
+err:
+	rnp_sysfs_exit(adapter);
+exit:
+	return rc;
+}
diff --git a/drivers/net/ethernet/mucse/rnp/rnp_tc_u32_parse.h b/drivers/net/ethernet/mucse/rnp/rnp_tc_u32_parse.h
new file mode 100644
index 000000000000..c40c4c054211
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnp/rnp_tc_u32_parse.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#ifndef __RNP_TC_U32_PARSE_H__
+#define __RNP_TC_U32_PARSE_H__
+#include "rnp.h"
+
+struct rnp_match_parser {
+	int off; /* the skb offset begin form the 12 bytes mac_type */
+	/* parse the value/mask to realy value*/
+	int (*val)(struct rnp_fdir_filter *f, __be32 val, __be32 mask);
+};
+inline void ip_print(u32 ip, bool src_true)
+{
+	printk(KERN_DEBUG "%s_ip is %d.%d.%d.%d \n", src_true ? "src" : "dst",
+	       ip & 0xff, ip >> 8 & 0xff, ip >> 16 & 0xff, ip >> 24 & 0xff);
+}
+/* Ipv4 Rule Parse */
+static inline int rnp_fill_ipv4_src_ip(struct rnp_fdir_filter *f, __be32 val,
+				       __be32 mask)
+{
+	memcpy(&f->filter.formatted.src_ip[0], &val, sizeof(u32));
+	memcpy(&f->filter.formatted.src_ip_mask[0], &mask, sizeof(u32));
+
+	f->filter.formatted.flow_type = RNP_ATR_FLOW_TYPE_IPV4;
+	f->filter.layer2_formate.proto = htons(ETH_P_IP);
+
+	ip_print(f->filter.formatted.src_ip[0], true);
+	printk(KERN_DEBUG "ip mask is 0x%.2x\n",
+	       f->filter.formatted.src_ip_mask[0]);
+	return 0;
+}
+
+static inline int rnp_fill_ipv4_dst_ip(struct rnp_fdir_filter *f, __be32 val,
+				       __be32 mask)
+{
+	memcpy(&f->filter.formatted.dst_ip[0], &val, sizeof(u32));
+	memcpy(&f->filter.formatted.dst_ip_mask[0], &mask, sizeof(u32));
+
+	f->filter.formatted.flow_type = RNP_ATR_FLOW_TYPE_IPV4;
+	f->filter.layer2_formate.proto = htons(ETH_P_IP);
+
+	ip_print(f->filter.formatted.dst_ip[0], false);
+	printk(KERN_DEBUG "ip mask is 0x%.2x\n",
+	       f->filter.formatted.dst_ip_mask[0]);
+
+	return 0;
+}
+
+static const struct rnp_match_parser rnp_ipv4_parser[] = {
+	{ .off = 12, .val = rnp_fill_ipv4_src_ip },
+	{ .off = 16, .val = rnp_fill_ipv4_dst_ip },
+	{ .val = NULL }
+};
+
+#endif
diff --git a/drivers/net/ethernet/mucse/rnp/rnp_type.h b/drivers/net/ethernet/mucse/rnp/rnp_type.h
new file mode 100644
index 000000000000..db9396be0cb3
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnp/rnp_type.h
@@ -0,0 +1,1298 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#ifndef _RNP_TYPE_H_
+#define _RNP_TYPE_H_
+
+#include 
+#include 
+#include 
+
+#if defined(CONFIG_MXGBE_FIX_VF_QUEUE) && !defined(FIX_VF_BUG)
+#define FIX_VF_BUG
+#endif
+#if defined(CONFIG_MXGBE) && !defined(N10)
+#define N10
+#endif
+
+#if defined(CONFIG_MXGBE_FIX_MAC_PADDING) && !defined(FIX_MAC_PADDIN)
+#define FIX_MAC_PADDIN
+#endif
+
+#if defined(CONFIG_MXGBE_OPTM_WITH_LARGE) && !defined(OPTM_WITH_LPAGE)
+#define OPTM_WITH_LPAGE
+#endif
+
+#if defined(CONFIG_MXGBE_MSIX_COUNT)
+#define RNP_N10_MSIX_VECTORS CONFIG_MXGBE_MSIX_COUNT
+#endif
+
+// if kylin os, try to set OPTM_WITH_LPAGE to reduce memory cost?
+#if (PAGE_SIZE < 8192)
+//error
+#ifdef OPTM_WITH_LPAGE
+//#error can't open OPTM_WITH_LPAGE with PAGE_SIZE small than 8192
+#undef OPTM_WITH_LPAGE
+#endif
+#endif
+
+/* not open it in default */
+//#define VF_PROMISC_SUPPORT
+
+#if (PAGE_SIZE < 8192)
+/* if page_size is 4k, no need use this */
+#ifdef OPTM_WITH_LPAGE
+#undef OPTM_WITH_LPAGE
+#endif /* OPTM_WITH_LARGE */
+#endif
+
+#include "rnp_regs.h"
+
+#if IS_ENABLED(CONFIG_SYSFS)
+#ifndef RNP_SYSFS
+#define RNP_SYSFS
+#endif /* RNP_SYSFS */
+#endif /* CONFIG_SYSFS */
+
+#if IS_ENABLED(CONFIG_HWMON)
+#ifndef RNP_HWMON
+#define RNP_HWMON
+#endif /* RNP_HWMON */
+#endif /* CONFIG_HWMON */
+
+#ifdef CONFIG_DEBUG_FS
+#define HAVE_RNP_DEBUG_FS
+#endif /* CONFIG_DEBUG_FS */
+
+/* Device IDs */
+#define PCI_VENDOR_ID_MUCSE 0x8848
+#define PCI_DEVICE_ID_N10_PF0 0x1000
+#define PCI_DEVICE_ID_N10_PF1 0x1001
+
+#define RNP_DEV_ID_N10_PF0 0x7001
+#define RNP_DEV_ID_N10_PF1 0x7002
+
+#define PCI_DEVICE_ID_N10 0x1000
+#define PCI_DEVICE_ID_N10_TP 0x1004
+#define PCI_DEVICE_ID_N10_X1 0x1002
+#define PCI_DEVICE_ID_N10C 0x1C00
+#define PCI_DEVICE_ID_N400 0x1001 /* N400  2-port */
+#define PCI_DEVICE_ID_N400C 0x1C01 /* N400C 2-port */
+#define PCI_DEVICE_ID_N400_X1 0x1003 /* N400  1-port */
+#define PCI_DEVICE_ID_N400C_X1 0x1C03 /* N400C 1-port */
+/* Wake Up Control */
+#define RNP_WUC_PME_EN 0x00000002 /* PME Enable */
+#define RNP_WUC_PME_STATUS 0x00000004 /* PME Status */
+#define RNP_WUC_WKEN 0x00000010 /* Enable PE_WAKE_N pin assertion  */
+
+/* Wake Up Filter Control */
+#define RNP_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
+#define RNP_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */
+#define RNP_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */
+#define RNP_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */
+#define RNP_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
+#define RNP_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */
+#define RNP_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */
+#define RNP_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */
+#define RNP_WUFC_MNG 0x00000100 /* Directed Mgmt Packet Wakeup Enable */
+
+#define RNP_WUFC_IGNORE_TCO 0x00008000 /* Ignore WakeOn TCO packets */
+#define RNP_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */
+#define RNP_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */
+#define RNP_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */
+#define RNP_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */
+#define RNP_WUFC_FLX4 0x00100000 /* Flexible Filter 4 Enable */
+#define RNP_WUFC_FLX5 0x00200000 /* Flexible Filter 5 Enable */
+#define RNP_WUFC_FLX_FILTERS 0x000F0000 /* Mask for 4 flex filters */
+#define RNP_WUFC_FLX_FILTERS_6 0x003F0000 /* Mask for 6 flex filters */
+#define RNP_WUFC_FLX_FILTERS_8 0x00FF0000 /* Mask for 8 flex filters */
+#define RNP_WUFC_FW_RST_WK 0x80000000 /* Ena wake on FW reset assertion */
+/* Mask for Ext. flex filters */
+#define RNP_WUFC_EXT_FLX_FILTERS 0x00300000
+#define RNP_WUFC_ALL_FILTERS 0x000F00FF /* Mask all 4 flex filters */
+#define RNP_WUFC_ALL_FILTERS_6 0x003F00FF /* Mask all 6 flex filters */
+#define RNP_WUFC_ALL_FILTERS_8 0x00FF00FF /* Mask all 8 flex filters */
+#define RNP_WUFC_FLX_OFFSET 16 /* Offset to the Flexible Filters bits */
+
+#define RNP_MAX_SENSORS 1
+struct rnp_thermal_diode_data {
+	u8 location;
+	u8 temp;
+	u8 caution_thresh;
+	u8 max_op_thresh;
+};
+
+struct rnp_thermal_sensor_data {
+	struct rnp_thermal_diode_data sensor[RNP_MAX_SENSORS];
+};
+
+/* Proxy Status */
+#define RNP_PROXYS_EX 0x00000004 /* Exact packet received */
+#define RNP_PROXYS_ARP_DIR 0x00000020 /* ARP w/filter match received */
+#define RNP_PROXYS_NS 0x00000200 /* IPV6 NS received */
+#define RNP_PROXYS_NS_DIR 0x00000400 /* IPV6 NS w/DA match received */
+#define RNP_PROXYS_ARP 0x00000800 /* ARP request packet received */
+#define RNP_PROXYS_MLD 0x00001000 /* IPv6 MLD packet received */
+
+/* Proxying Filter Control */
+#define RNP_PROXYFC_ENABLE 0x00000001 /* Port Proxying Enable */
+#define RNP_PROXYFC_EX 0x00000004 /* Directed Exact Proxy Enable */
+#define RNP_PROXYFC_ARP_DIR 0x00000020 /* Directed ARP Proxy Enable */
+#define RNP_PROXYFC_NS 0x00000200 /* IPv6 Neighbor Solicitation */
+#define RNP_PROXYFC_ARP 0x00000800 /* ARP Request Proxy Enable */
+#define RNP_PROXYFC_MLD 0x00000800 /* IPv6 MLD Proxy Enable */
+#define RNP_PROXYFC_NO_TCO 0x00008000 /* Ignore TCO packets */
+
+#define RNP_WUPL_LENGTH_MASK 0xFFFF
+
+/* max 4 in n10 */
+#define RNP_MAX_TRAFFIC_CLASS 4
+#define TSRN10_TX_DEFAULT_BURST 16
+
+#ifndef TSRN10_RX_DEFAULT_BURST
+#define TSRN10_RX_DEFAULT_BURST 16
+#endif
+
+#ifndef TSRN10_RX_DEFAULT_LINE
+#define TSRN10_RX_DEFAULT_LINE 64
+#endif
+
+#ifndef RNP_PKT_TIMEOUT
+#define RNP_PKT_TIMEOUT 30
+#endif
+
+#ifndef RNP_RX_PKT_POLL_BUDGET
+#define RNP_RX_PKT_POLL_BUDGET 64
+#endif
+
+#ifndef RNP_TX_PKT_POLL_BUDGET
+#define RNP_TX_PKT_POLL_BUDGET 0x30
+#endif
+
+#ifndef RNP_PKT_TIMEOUT_TX
+#define RNP_PKT_TIMEOUT_TX 100
+#endif
+/* VF Device IDs */
+#define RNP_DEV_ID_N10_PF0_VF 0x8001
+#define RNP_DEV_ID_N10_PF1_VF 0x8002
+
+#define RNP_DEV_ID_N10_PF0_VF_N 0x1010
+#define RNP_DEV_ID_N10_PF1_VF_N 0x1011
+
+/* Transmit Descriptor - Advanced */
+struct rnp_tx_desc {
+	union {
+		__le64 pkt_addr; // Packet buffer address
+		struct {
+			__le32 adr_lo;
+			__le32 adr_hi;
+		};
+	};
+	union {
+		__le64 vlan_cmd_bsz;
+		struct {
+			__le32 blen_mac_ip_len;
+			__le32 vlan_cmd;
+		};
+	};
+#define RNP_TXD_FLAGS_VLAN_PRIO_MASK 0xe000
+#define RNP_TX_FLAGS_VLAN_PRIO_SHIFT 13
+#define RNP_TX_FLAGS_VLAN_CFI_SHIFT 12
+#define RNP_TXD_VLAN_VALID (0x80000000)
+#define RNP_TXD_SVLAN_TYPE (0x02000000)
+#define RNP_TXD_VLAN_CTRL_NOP (0x00 << 13)
+#define RNP_TXD_VLAN_CTRL_RM_VLAN (0x20000000)
+#define RNP_TXD_VLAN_CTRL_INSERT_VLAN (0x40000000)
+#define RNP_TXD_L4_CSUM (0x10000000) /* udp tcp sctp csum */
+#define RNP_TXD_IP_CSUM (0x8000000)
+#define RNP_TXD_TUNNEL_MASK (0x3000000)
+#define RNP_TXD_TUNNEL_VXLAN (0x1000000)
+#define RNP_TXD_TUNNEL_NVGRE (0x2000000)
+#define RNP_TXD_L4_TYPE_UDP (0xc00000)
+#define RNP_TXD_L4_TYPE_TCP (0x400000)
+#define RNP_TXD_L4_TYPE_SCTP (0x800000)
+#define RNP_TXD_FLAG_IPv4 (0)
+#define RNP_TXD_FLAG_IPv6 (0x200000)
+#define RNP_TXD_FLAG_TSO (0x100000)
+#define RNP_TXD_FLAG_PTP (0x4000000)
+#define RNP_TXD_CMD_RS (0x040000)
+#define RNP_TXD_CMD_INNER_VLAN (0x08000000)
+#define RNP_TXD_STAT_DD (0x020000)
+#define RNP_TXD_CMD_EOP (0x010000)
+#define RNP_TXD_PAD_CTRL (0x01000000)
+};
+
+struct rnp_tx_ctx_desc {
+	__le32 mss_len_vf_num;
+	__le32 inner_vlan_tunnel_len;
+#define VF_VEB_MARK (1 << 24) /* bit 56 */
+#define VF_VEB_IGNORE_VLAN (1 << 25) /* bit 57 */
+	__le32 resv;
+	__le32 resv_cmd;
+#define RNP_TXD_FLAG_TO_RPU (1 << 15)
+#define RNP_TXD_SMAC_CTRL_NOP (0x00 << 12)
+#define RNP_TXD_SMAC_CTRL_REPLACE_MACADDR0 (0x02 << 12)
+#define RNP_TXD_SMAC_CTRL_REPLACE_MACADDR1 (0x06 << 12)
+#define RNP_TXD_CTX_VLAN_CTRL_NOP (0x00 << 10)
+#define RNP_TXD_CTX_VLAN_CTRL_RM_VLAN (0x01 << 10)
+#define RNP_TXD_CTX_VLAN_CTRL_INSERT_VLAN (0x02 << 10)
+#define RNP_TXD_MTI_CRC_PAD_CTRL (0x01000000)
+#define RNP_TXD_CTX_CTRL_DESC (0x080000)
+#define RNP_TXD_CMD_RS (0x040000)
+#define RNP_TXD_STAT_DD (0x020000)
+};
+
+/* Receive Descriptor - Advanced */
+union rnp_rx_desc {
+	struct {
+		union {
+			__le64 pkt_addr; /* Packet buffer address */
+			struct {
+				__le32 addr_lo;
+				__le32 addr_hi;
+			};
+		};
+		__le64 resv_cmd;
+#define RNP_RXD_FLAG_RS (0)
+	};
+
+	struct {
+		__le32 rss_hash;
+		__le16 mark;
+		__le16 rev1;
+#define RNP_RX_L3_TYPE_MASK (1 << 15) /* 1 is ipv4 */
+#define VEB_VF_PKG (1 << 0) /* bit 48 */
+#define VEB_VF_IGNORE_VLAN (1 << 1) /* bit 49 */
+#define REV_OUTER_VLAN (1 << 5)
+		__le16 len;
+		__le16 padding_len;
+		__le16 vlan;
+		__le16 cmd;
+#define RNP_RXD_STAT_VLAN_VALID (1 << 15)
+#define RNP_RXD_STAT_STAG (0x01 << 14)
+#define RNP_RXD_STAT_TUNNEL_NVGRE (0x02 << 13)
+#define RNP_RXD_STAT_TUNNEL_VXLAN (0x01 << 13)
+#define RNP_RXD_STAT_TUNNEL_MASK (0x03 << 13)
+#define RNP_RXD_STAT_ERR_MASK (0x1f << 8)
+#define RNP_RXD_STAT_SCTP_MASK (0x04 << 8)
+#define RNP_RXD_STAT_L4_MASK (0x02 << 8)
+#define RNP_RXD_STAT_L4_SCTP (0x02 << 6)
+#define RNP_RXD_STAT_L4_TCP (0x01 << 6)
+#define RNP_RXD_STAT_L4_UDP (0x03 << 6)
+#define RNP_RXD_STAT_IPV6 (1 << 5)
+#define RNP_RXD_STAT_IPV4 (0 << 5)
+#define RNP_RXD_STAT_PTP (1 << 4)
+#define RNP_RXD_STAT_DD (1 << 1)
+#define RNP_RXD_STAT_EOP (1 << 0)
+	} wb;
+} __packed;
+
+/* Host Interface Command Structures */
+struct rnp_hic_hdr {
+	u8 cmd;
+	u8 buf_len;
+	union {
+		u8 cmd_resv;
+		u8 ret_status;
+	} cmd_or_resp;
+	u8 checksum;
+};
+
+struct rnp_hic_drv_info {
+	struct rnp_hic_hdr hdr;
+	u8 port_num;
+	u8 ver_sub;
+	u8 ver_build;
+	u8 ver_min;
+	u8 ver_maj;
+	u8 pad; /* end spacing to ensure length is mult. of dword */
+	u16 pad2; /* end spacing to ensure length is mult. of dword2 */
+};
+
+/* Context descriptors */
+struct rnp_adv_tx_context_desc {
+	__le32 vlan_macip_lens;
+	__le32 seqnum_seed;
+	__le32 type_tucmd_mlhl;
+	__le32 mss_l4len_idx;
+};
+
+/* RAH */
+#define RNP_RAH_VIND_MASK 0x003C0000
+#define RNP_RAH_VIND_SHIFT 18
+#define RNP_RAH_AV 0x80000000
+#define RNP_CLEAR_VMDQ_ALL 0xFFFFFFFF
+
+/* Autonegotiation advertised speeds */
+typedef u32 rnp_autoneg_advertised;
+/* Link speed */
+typedef u32 rnp_link_speed;
+#define RNP_LINK_SPEED_UNKNOWN 0
+#define RNP_LINK_SPEED_10_FULL BIT(2)
+#define RNP_LINK_SPEED_100_FULL BIT(3)
+#define RNP_LINK_SPEED_1GB_FULL BIT(4)
+#define RNP_LINK_SPEED_10GB_FULL BIT(5)
+#define RNP_LINK_SPEED_40GB_FULL BIT(6)
+#define RNP_LINK_SPEED_25GB_FULL BIT(7)
+#define RNP_LINK_SPEED_50GB_FULL BIT(8)
+#define RNP_LINK_SPEED_100GB_FULL BIT(9)
+#define RNP_LINK_SPEED_10_HALF BIT(10)
+#define RNP_LINK_SPEED_100_HALF BIT(11)
+#define RNP_LINK_SPEED_1GB_HALF BIT(12)
+#define RNP_SFP_MODE_10G_LR BIT(13)
+#define RNP_SFP_MODE_10G_SR BIT(14)
+#define RNP_SFP_MODE_10G_LRM BIT(15)
+#define RNP_SFP_MODE_1G_T BIT(16)
+#define RNP_SFP_MODE_1G_KX BIT(17)
+#define RNP_SFP_MODE_1G_SX BIT(18)
+#define RNP_SFP_MODE_1G_LX BIT(19)
+#define RNP_SFP_MODE_40G_SR4 BIT(20)
+#define RNP_SFP_MODE_40G_CR4 BIT(21)
+#define RNP_SFP_MODE_40G_LR4 BIT(22)
+#define RNP_SFP_MODE_1G_CX BIT(23)
+#define RNP_SFP_MODE_10G_BASE_T BIT(24)
+#define RNP_SFP_MODE_FIBER_CHANNEL_SPEED BIT(25)
+#define RNP_SFP_CONNECTOR_DAC BIT(26)
+#define RNP_SFP_TO_SGMII BIT(27)
+#define RNP_SFP_25G_SR BIT(28)
+#define RNP_SFP_25G_KR BIT(29)
+#define RNP_SFP_25G_CR BIT(30)
+#define RNP_LINK_SPEED_10GB_HALF BIT(31)
+
+/* Flow Control Data Sheet defined values
+ * Calculation and defines taken from 802.1bb Annex O
+ */
+
+enum rnp_atr_flow_type {
+	RNP_ATR_FLOW_TYPE_IPV4 = 0x0,
+	RNP_ATR_FLOW_TYPE_UDPV4 = 0x1,
+	RNP_ATR_FLOW_TYPE_TCPV4 = 0x2,
+	RNP_ATR_FLOW_TYPE_SCTPV4 = 0x3,
+	RNP_ATR_FLOW_TYPE_IPV6 = 0x4,
+	RNP_ATR_FLOW_TYPE_UDPV6 = 0x5,
+	RNP_ATR_FLOW_TYPE_TCPV6 = 0x6,
+	RNP_ATR_FLOW_TYPE_SCTPV6 = 0x7,
+	RNP_ATR_FLOW_TYPE_TUNNELED_IPV4 = 0x10,
+	RNP_ATR_FLOW_TYPE_TUNNELED_UDPV4 = 0x11,
+	RNP_ATR_FLOW_TYPE_TUNNELED_TCPV4 = 0x12,
+	RNP_ATR_FLOW_TYPE_TUNNELED_SCTPV4 = 0x13,
+	RNP_ATR_FLOW_TYPE_TUNNELED_IPV6 = 0x14,
+	RNP_ATR_FLOW_TYPE_TUNNELED_UDPV6 = 0x15,
+	RNP_ATR_FLOW_TYPE_TUNNELED_TCPV6 = 0x16,
+	RNP_ATR_FLOW_TYPE_TUNNELED_SCTPV6 = 0x17,
+	RNP_ATR_FLOW_TYPE_ETHER = 0x18,
+	RNP_ATR_FLOW_TYPE_USERDEF = 0x19,
+};
+
+#define RNP_FDIR_DROP_QUEUE (200)
+
+enum {
+	fdir_mode_tcam = 0,
+	fdir_mode_tuple5,
+};
+/* Flow Director ATR input struct. */
+union rnp_atr_input {
+	/*
+	 * Byte layout in order, all values with MSB first:
+	 *
+	 * vm_pool      - 1 byte
+	 * flow_type    - 1 byte
+	 * vlan_id      - 2 bytes
+	 * src_ip       - 16 bytes
+	 * inner_mac    - 6 bytes
+	 * cloud_mode   - 2 bytes
+	 * tni_vni      - 4 bytes
+	 * dst_ip       - 16 bytes
+	 * src_port     - 2 bytes
+	 * dst_port     - 2 bytes
+	 * flex_bytes   - 2 bytes
+	 * bkt_hash     - 2 bytes
+	 */
+	struct {
+		u8 vm_pool;
+		u8 flow_type;
+		__be16 vlan_id;
+		__be32 dst_ip[4];
+		__be32 dst_ip_mask[4];
+		__be32 src_ip[4];
+		__be32 src_ip_mask[4];
+		u8 inner_mac[6];
+		u8 inner_mac_mask[6];
+		__be16 tunnel_type;
+		__be32 tni_vni;
+		__be16 src_port;
+		__be16 src_port_mask;
+		__be16 dst_port;
+		__be16 dst_port_mask;
+		__be16 flex_bytes;
+		__be16 bkt_hash;
+	} formatted;
+	struct {
+		u8 vm_poll;
+		u8 flow_type;
+		u16 vlan_id;
+		__be16 proto;
+		__be16 resv;
+		__be32 nouse[12];
+	} layer2_formate;
+	__be32 dword_stream[14];
+};
+
+/* BitTimes (BT) conversion */
+#define RNP_BT2KB(BT) ((BT + (8 * 1024 - 1)) / (8 * 1024))
+#define RNP_B2BT(BT) (BT * 8)
+
+/* Calculate Delay to respond to PFC */
+#define RNP_PFC_D 672
+
+/* Calculate Cable Delay */
+#define RNP_CABLE_DC 5556 /* Delay Copper */
+#define RNP_CABLE_DO 5000 /* Delay Optical */
+
+/* Calculate Interface Delay X540 */
+#define RNP_PHY_DC 25600 /* Delay 10G BASET */
+#define RNP_MAC_DC 8192 /* Delay Copper XAUI interface */
+#define RNP_XAUI_DC (2 * 2048) /* Delay Copper Phy */
+
+#define RNP_ID_X540 (RNP_MAC_DC + RNP_XAUI_DC + RNP_PHY_DC)
+
+/* Calculate Interface Delay 82598, n10 */
+#define RNP_PHY_D 12800
+#define RNP_MAC_D 4096
+#define RNP_XAUI_D (2 * 1024)
+
+/* PHY MDI STANDARD CONFIG */
+#define RNP_MDI_PHY_ID1_OFFSET 2
+#define RNP_MDI_PHY_ID2_OFFSET 3
+#define RNP_MDI_PHY_ID_MASK 0xFFFFFC00U
+#define RNP_MDI_PHY_SPEED_SELECT1 0x0040
+#define RNP_MDI_PHY_DUPLEX 0x0100
+#define RNP_MDI_PHY_RESTART_AN 0x0200
+#define RNP_MDI_PHY_ANE 0x1000
+#define RNP_MDI_PHY_SPEED_SELECT0 0x2000
+#define RNP_MDI_PHY_RESET
+
+#define NGBE_PHY_RST_WAIT_PERIOD 50
+
+#define RNP_ID (RNP_MAC_D + RNP_XAUI_D + RNP_PHY_D)
+
+/* Calculate Delay incurred from higher layer */
+#define RNP_HD 6144
+
+/* Calculate PCI Bus delay for low thresholds */
+#define RNP_PCI_DELAY 10000
+
+/* Flow Director compressed ATR hash input struct */
+union rnp_atr_hash_dword {
+	struct {
+		u8 vm_pool;
+		u8 flow_type;
+		__be16 vlan_id;
+	} formatted;
+	__be32 ip;
+	struct {
+		__be16 src;
+		__be16 dst;
+	} port;
+	__be16 flex_bytes;
+	__be32 dword;
+};
+
+enum rnp_eeprom_type {
+	rnp_eeprom_uninitialized = 0,
+	rnp_eeprom_spi,
+	rnp_flash,
+	rnp_eeprom_none /* No NVM support */
+};
+
+enum mac_type {
+	mac_dwc_xlg,
+	mac_dwc_g,
+
+};
+
+enum rnp_mac_type {
+	rnp_mac_unknown = 0,
+	rnp_mac_n10g_x8_40G,
+	rnp_mac_n10g_x2_10G,
+	rnp_mac_n10g_x4_10G,
+	rnp_mac_n10g_x8_10G,
+	rnp_mac_n10l_x8_1G,
+	rnp_num_macs
+};
+
+enum rnp_rss_type {
+	rnp_rss_uv440 = 0,
+	rnp_rss_uv3p,
+	rnp_rss_n10,
+	rnp_rss_n20,
+};
+
+enum rnp_hw_type {
+	rnp_hw_uv440 = 0,
+	rnp_hw_uv3p,
+	rnp_hw_n10,
+	rnp_hw_n20,
+	rnp_hw_n400
+};
+
+enum rnp_eth_type { rnp_eth_n10 = 0 };
+
+enum rnp_phy_type {
+	rnp_phy_unknown = 0,
+	rnp_phy_none,
+	rnp_phy_sfp,
+	rnp_phy_sfp_unsupported,
+	rnp_phy_generic,
+	rnp_phy_sfp_unknown,
+	rnp_phy_sgmii,
+};
+
+enum rnp_sfp_type {
+	rnp_sfp_type_da_cu = 0,
+	rnp_sfp_type_sr = 1,
+	rnp_sfp_type_lr = 2,
+	rnp_sfp_type_da_cu_core0 = 3,
+	rnp_sfp_type_da_cu_core1 = 4,
+	rnp_sfp_type_srlr_core0 = 5,
+	rnp_sfp_type_srlr_core1 = 6,
+	rnp_sfp_type_da_act_lmt_core0 = 7,
+	rnp_sfp_type_da_act_lmt_core1 = 8,
+	rnp_sfp_type_1g_cu_core0 = 9,
+	rnp_sfp_type_1g_cu_core1 = 10,
+	rnp_sfp_type_1g_sx_core0 = 11,
+	rnp_sfp_type_1g_sx_core1 = 12,
+	rnp_sfp_type_1g_lx_core0 = 13,
+	rnp_sfp_type_1g_lx_core1 = 14,
+	rnp_sfp_type_not_present = 0xFFFE,
+	rnp_sfp_type_unknown = 0xFFFF
+};
+
+enum rnp_media_type {
+	rnp_media_type_unknown = 0,
+	rnp_media_type_fiber,
+	rnp_media_type_copper,
+	rnp_media_type_backplane,
+	rnp_media_type_cx4,
+	rnp_media_type_da,
+	rnp_media_type_virtual
+
+};
+
+/* Flow Control Settings */
+enum rnp_fc_mode {
+	rnp_fc_none = 0,
+	rnp_fc_rx_pause,
+	rnp_fc_tx_pause,
+	rnp_fc_full,
+	rnp_fc_default
+};
+
+#define PAUSE_TX (0x1)
+#define PAUSE_RX (0x2)
+#define PAUSE_AUTO (0x10)
+
+#define ASYM_PAUSE BIT(11)
+#define SYM_PAUSE BIT(10)
+
+struct rnp_addr_filter_info {
+	u32 num_mc_addrs;
+	u32 rar_used_count;
+	u32 mta_in_use;
+	u32 overflow_promisc;
+	bool uc_set_promisc;
+	bool user_set_promisc;
+};
+
+/* Bus parameters */
+struct rnp_bus_info {
+	u16 func;
+	u16 lan_id;
+};
+
+/* Flow control parameters */
+struct rnp_fc_info {
+	u32 high_water[RNP_MAX_TRAFFIC_CLASS]; /* Flow Control High-water */
+	u32 low_water[RNP_MAX_TRAFFIC_CLASS]; /* Flow Control Low-water */
+	u16 pause_time; /* Flow Control Pause timer */
+	bool send_xon; /* Flow control send XON */
+	bool strict_ieee; /* Strict IEEE mode */
+	bool disable_fc_autoneg; /* Do not autonegotiate FC */
+	bool fc_was_autonegged; /* Is current_mode the result of autonegging? */
+	enum rnp_fc_mode current_mode; /* FC mode in effect */
+	u32 requested_mode; /* FC mode requested by caller */
+};
+
+/* Statistics counters collected by the MAC */
+struct rnp_hw_stats {
+	u64 dma_to_dma;
+	u64 dma_to_switch;
+	u64 mac_to_mac;
+	u64 switch_to_switch;
+	u64 mac_to_dma;
+	u64 switch_to_dma;
+	u64 vlan_add_cnt;
+	u64 vlan_strip_cnt;
+	/* === error */
+	u64 invalid_dropped_packets;
+	u64 filter_dropped_packets;
+	/* == drop == */
+	u64 rx_capabity_lost;
+	u64 host_l2_match_drop;
+	u64 redir_input_match_drop;
+	u64 redir_etype_match_drop;
+	u64 redir_tcp_syn_match_drop;
+	u64 redir_tuple5_match_drop;
+	u64 redir_tcam_match_drop;
+
+	u64 bmc_dropped_packets;
+	u64 switch_dropped_packets;
+	/* === rx */
+	u64 dma_to_host;
+	/* === dma-tx == */
+	u64 port0_tx_packets;
+	u64 port1_tx_packets;
+	u64 port2_tx_packets;
+	u64 port3_tx_packets;
+	/* === emac 1to4 tx == */
+	u64 in0_tx_pkts;
+	u64 in1_tx_pkts;
+	u64 in2_tx_pkts;
+	u64 in3_tx_pkts;
+	/* === phy tx == */
+	u64 port0_to_phy_pkts;
+	u64 port1_to_phy_pkts;
+	u64 port2_to_phy_pkts;
+	u64 port3_to_phy_pkts;
+	/* === mac rx === */
+	u64 mac_rx_broadcast;
+	u64 mac_rx_multicast;
+	u64 mac_rx_pause_count;
+	u64 mac_tx_pause_count;
+	u64 tx_broadcast;
+	u64 tx_multicast;
+
+	u64 dma_rx_drop_cnt_0;
+	u64 dma_rx_drop_cnt_1;
+	u64 dma_rx_drop_cnt_2;
+	u64 dma_rx_drop_cnt_3;
+	u64 dma_rx_drop_cnt_4;
+	u64 dma_rx_drop_cnt_5;
+	u64 dma_rx_drop_cnt_6;
+	u64 dma_rx_drop_cnt_7;
+};
+
+/* forward declaration */
+struct rnp_hw;
+struct rnp_eth_info;
+struct rnp_dma_info;
+struct rnp_mac_info;
+
+/* iterator type for walking multicast address lists */
+typedef u8 *(*rnp_mc_addr_itr)(struct rnp_hw *hw, u8 **mc_addr_ptr, u32 *vmdq);
+
+/* Function pointer table */
+struct rnp_eeprom_operations {
+	s32 (*init_params)(struct rnp_hw *hw);
+	s32 (*read)(struct rnp_hw *hw, u16, u16 *);
+	s32 (*read_buffer)(struct rnp_hw *, u16, u16, u16 *);
+	s32 (*write)(struct rnp_hw *, u16, u16);
+	s32 (*write_buffer)(struct rnp_hw *, u16, u16, u16 *);
+	s32 (*validate_checksum)(struct rnp_hw *, u16 *);
+	s32 (*update_checksum)(struct rnp_hw *);
+	u16 (*calc_checksum)(struct rnp_hw *);
+};
+
+/* add nic operations */
+struct rnp_eth_operations {
+	/* RAR, Multicast, VLAN */
+	s32 (*get_mac_addr)(struct rnp_eth_info *, u8 *);
+	s32 (*set_rar)(struct rnp_eth_info *, u32, u8 *, bool);
+	s32 (*clear_rar)(struct rnp_eth_info *, u32);
+	s32 (*set_vmdq)(struct rnp_eth_info *, u32, u32);
+	s32 (*clear_vmdq)(struct rnp_eth_info *, u32, u32);
+	s32 (*update_mc_addr_list)(struct rnp_eth_info *, struct net_device *,
+				   bool);
+	void (*clr_mc_addr)(struct rnp_eth_info *);
+	int (*set_rss_hfunc)(struct rnp_eth_info *, int hfunc);
+	void (*set_rss_key)(struct rnp_eth_info *, bool);
+	void (*set_rss_table)(struct rnp_eth_info *);
+	void (*set_rx_hash)(struct rnp_eth_info *, bool, bool);
+	/* ncsi */
+	void (*ncsi_set_vfta)(struct rnp_eth_info *);
+	void (*ncsi_set_uc_addr)(struct rnp_eth_info *);
+	void (*ncsi_set_mc_mta)(struct rnp_eth_info *);
+	void (*set_layer2_remapping)(struct rnp_eth_info *,
+				     union rnp_atr_input *, u16, u8, bool);
+	void (*clr_layer2_remapping)(struct rnp_eth_info *, u16);
+	void (*clr_all_layer2_remapping)(struct rnp_eth_info *);
+	void (*set_tuple5_remapping)(struct rnp_eth_info *,
+				     union rnp_atr_input *, u16, u8, bool);
+	void (*clr_tuple5_remapping)(struct rnp_eth_info *, u16);
+	void (*clr_all_tuple5_remapping)(struct rnp_eth_info *);
+	void (*set_tcp_sync_remapping)(struct rnp_eth_info *, int, bool, bool);
+	void (*set_rx_skip)(struct rnp_eth_info *, int, bool);
+	void (*set_min_max_packet)(struct rnp_eth_info *, int, int);
+	void (*set_vlan_strip)(struct rnp_eth_info *, u16, bool);
+	s32 (*set_vfta)(struct rnp_eth_info *, u32, bool);
+	void (*clr_vfta)(struct rnp_eth_info *);
+	void (*set_vlan_filter)(struct rnp_eth_info *, bool);
+	void (*set_outer_vlan_type)(struct rnp_eth_info *, int type);
+	void (*set_double_vlan)(struct rnp_eth_info *, bool);
+	void (*set_vxlan_port)(struct rnp_eth_info *, u32);
+	void (*set_vxlan_mode)(struct rnp_eth_info *, bool);
+	s32 (*set_fc_mode)(struct rnp_eth_info *);
+	void (*set_rx)(struct rnp_eth_info *, bool);
+	void (*set_fcs)(struct rnp_eth_info *, bool);
+	void (*set_vf_vlan_mode)(struct rnp_eth_info *, u16, int, bool);
+};
+
+enum {
+	rnp_driver_insmod,
+	rnp_driver_suspuse,
+	rnp_driver_force_control_mac,
+};
+
+struct rnp_hw_operations {
+	s32 (*init_hw)(struct rnp_hw *);
+	s32 (*reset_hw)(struct rnp_hw *);
+	s32 (*start_hw)(struct rnp_hw *);
+	void (*set_mtu)(struct rnp_hw *, int);
+	void (*set_vlan_filter_en)(struct rnp_hw *, bool);
+	void (*set_vlan_filter)(struct rnp_hw *, u16, bool, bool);
+	int (*set_veb_vlan_mask)(struct rnp_hw *, u16, int, bool);
+	void (*set_vf_vlan_filter)(struct rnp_hw *, u16, int, bool, bool);
+	void (*clr_vfta)(struct rnp_hw *);
+	void (*set_vlan_strip)(struct rnp_hw *, u16, bool);
+	void (*set_mac)(struct rnp_hw *, u8 *mac, bool);
+	void (*set_rx_mode)(struct rnp_hw *, struct net_device *netdev, bool);
+	void (*set_rar_with_vf)(struct rnp_hw *hw, u8 *mac, int, u32, bool);
+	void (*clr_rar)(struct rnp_hw *hw, int idx);
+	void (*clr_rar_all)(struct rnp_hw *hw);
+	void (*clr_vlan_veb)(struct rnp_hw *);
+	void (*set_txvlan_mode)(struct rnp_hw *, bool);
+	void (*set_tx_maxrate)(struct rnp_hw *, bool);
+	void (*set_fcs_mode)(struct rnp_hw *, bool);
+	void (*set_vxlan_port)(struct rnp_hw *, u32);
+	void (*set_vxlan_mode)(struct rnp_hw *, bool);
+	void (*set_mac_speed)(struct rnp_hw *, bool, u32, bool);
+	void (*set_mac_rx)(struct rnp_hw *, bool);
+	void (*update_sriov_info)(struct rnp_hw *);
+	void (*set_sriov_status)(struct rnp_hw *, bool);
+	void (*set_sriov_vf_mc)(struct rnp_hw *, u16);
+	void (*set_pause_mode)(struct rnp_hw *);
+	void (*get_pause_mode)(struct rnp_hw *);
+	void (*update_hw_info)(struct rnp_hw *);
+	void (*set_rx_hash)(struct rnp_hw *, bool, bool);
+	int (*set_rss_hfunc)(struct rnp_hw *, u8 hfunc);
+	void (*set_rss_key)(struct rnp_hw *, bool);
+	void (*set_rss_table)(struct rnp_hw *);
+	void (*set_mbx_link_event)(struct rnp_hw *, int);
+	void (*set_mbx_ifup)(struct rnp_hw *, int);
+	s32 (*get_thermal_sensor_data)(struct rnp_hw *);
+	s32 (*init_thermal_sensor_thresh)(struct rnp_hw *hw);
+	void (*disable_tx_laser)(struct rnp_hw *);
+	void (*enable_tx_laser)(struct rnp_hw *);
+	void (*flap_tx_laser)(struct rnp_hw *);
+	s32 (*check_link)(struct rnp_hw *, rnp_link_speed *, bool *, bool *,
+			  bool);
+	s32 (*setup_link)(struct rnp_hw *, rnp_link_speed, u32, u32, u32);
+	void (*clean_link)(struct rnp_hw *);
+	s32 (*get_link_capabilities)(struct rnp_hw *, rnp_link_speed *, bool *);
+	s32 (*init_rx_addrs)(struct rnp_hw *);
+	void (*set_layer2_remapping)(struct rnp_hw *, union rnp_atr_input *,
+				     u16, u8, bool);
+	void (*clr_layer2_remapping)(struct rnp_hw *, u16);
+	void (*clr_all_layer2_remapping)(struct rnp_hw *);
+	void (*set_tuple5_remapping)(struct rnp_hw *, union rnp_atr_input *,
+				     u16, u8, bool);
+	void (*clr_tuple5_remapping)(struct rnp_hw *, u16);
+	void (*clr_all_tuple5_remapping)(struct rnp_hw *);
+	void (*set_tcp_sync_remapping)(struct rnp_hw *, int queue, bool, bool);
+	void (*set_rx_skip)(struct rnp_hw *, int count, bool);
+	void (*set_outer_vlan_type)(struct rnp_hw *, int);
+	void (*update_hw_status)(struct rnp_hw *, struct rnp_hw_stats *,
+				 struct net_device_stats *);
+	void (*update_msix_count)(struct rnp_hw *, int msix_count);
+	void (*update_rx_drop)(struct rnp_hw *);
+	void (*setup_ethtool)(struct net_device *);
+	s32 (*phy_read_reg)(struct rnp_hw *, u32, u32, u16 *);
+	s32 (*phy_write_reg)(struct rnp_hw *, u32, u32, u16);
+	void (*setup_wol)(struct rnp_hw *, u32);
+	void (*set_vf_vlan_mode)(struct rnp_hw *, u16, int, bool);
+	void (*driver_status)(struct rnp_hw *, bool, int);
+};
+
+struct rnp_mac_operations {
+	void (*set_mac_rx)(struct rnp_mac_info *mac, bool);
+	void (*set_mac_speed)(struct rnp_mac_info *, bool, u32, bool);
+	void (*set_mac_fcs)(struct rnp_mac_info *mac, bool);
+	s32 (*set_fc_mode)(struct rnp_mac_info *mac);
+	void (*check_link)(struct rnp_mac_info *, rnp_link_speed *, bool *,
+			   bool);
+	void (*set_mac)(struct rnp_mac_info *, u8 *, int);
+	int (*mdio_write)(struct rnp_mac_info *, int phyreg, int phydata);
+	int (*mdio_read)(struct rnp_mac_info *, int phyreg, int *regvalue);
+	void (*pmt)(struct rnp_mac_info *, u32);
+};
+
+struct rnp_eeprom_info {
+	struct rnp_eeprom_operations ops;
+	enum rnp_eeprom_type type;
+	u32 semaphore_delay;
+	u16 word_size;
+	u16 address_bits;
+	u16 word_page_size;
+};
+
+struct rnp_dma_operations {
+	void (*set_tx_maxrate)(struct rnp_dma_info *dma, u16, u32);
+	void (*set_veb_mac)(struct rnp_dma_info *dma, u8 *, u32, u32);
+	/* only set own vlan */
+	void (*set_veb_vlan)(struct rnp_dma_info *dma, u16, u32);
+	void (*set_veb_vlan_mask)(struct rnp_dma_info *dma, u16, u16, int);
+	void (*clr_veb_all)(struct rnp_dma_info *dma);
+};
+
+struct rnp_dma_info {
+	struct rnp_dma_operations ops;
+	u8 __iomem *dma_base_addr;
+	u8 __iomem *dma_ring_addr;
+	void *back;
+	u32 max_tx_queues;
+	u32 max_rx_queues;
+	u32 dma_version;
+};
+
+#define RNP_MAX_MTA 128
+struct rnp_eth_info {
+	struct rnp_eth_operations ops;
+	u8 __iomem *eth_base_addr;
+	enum rnp_eth_type eth_type;
+	void *back;
+
+	u32 mta_shadow[RNP_MAX_MTA];
+	s32 mc_filter_type;
+	u32 mcft_size;
+	u32 vft_size;
+	u32 num_rar_entries;
+	u32 rar_highwater;
+	u32 rx_pb_size;
+	u32 max_tx_queues;
+	u32 max_rx_queues;
+	u32 reg_off;
+	u32 orig_autoc;
+	u32 cached_autoc;
+	u32 orig_autoc2;
+};
+
+struct rnp_nic_info {
+	u8 __iomem *nic_base_addr;
+};
+
+struct mii_regs {
+	unsigned int addr; /* MII Address */
+	unsigned int data; /* MII Data */
+	unsigned int addr_shift; /* MII address shift */
+	unsigned int reg_shift; /* MII reg shift */
+	unsigned int addr_mask; /* MII address mask */
+	unsigned int reg_mask; /* MII reg mask */
+	unsigned int clk_csr_shift;
+	unsigned int clk_csr_mask;
+};
+
+#define RNP_FLAGS_DOUBLE_RESET_REQUIRED 0x01
+#define RNP_FLAGS_INIT_MAC_ADDRESS 0x02
+struct rnp_mac_info {
+	struct rnp_mac_operations ops;
+	u8 __iomem *mac_addr;
+	void *back;
+	struct mii_regs mii;
+	int phy_addr;
+	int clk_csr;
+	enum rnp_mac_type type;
+	enum mac_type mac_type;
+	u8 addr[ETH_ALEN];
+	u8 perm_addr[ETH_ALEN];
+	/* prefix for World Wide Node Name (WWNN) */
+	u16 wwnn_prefix;
+	/* prefix for World Wide Port Name (WWPN) */
+	u16 wwpn_prefix;
+	u16 max_msix_vectors;
+	u32 mta_shadow[RNP_MAX_MTA];
+	s32 mc_filter_type;
+	u32 mcft_size;
+	u32 vft_size;
+	u32 num_rar_entries;
+	u32 rar_highwater;
+	u32 rx_pb_size;
+	u32 max_tx_queues;
+	u32 max_rx_queues;
+	u32 reg_off;
+	u32 orig_autoc;
+	u32 cached_autoc;
+	u32 orig_autoc2;
+	bool orig_link_settings_stored;
+	bool autotry_restart;
+	u8 mac_flags;
+};
+
+struct rnp_phy_info {
+	struct mdio_if_info mdio;
+	enum rnp_phy_type type;
+	u32 id;
+	u32 phy_addr;
+	bool is_mdix;
+	u8 mdix;
+	enum rnp_sfp_type sfp_type;
+	bool sfp_setup_needed;
+	u32 revision;
+	enum rnp_media_type media_type;
+	bool reset_disable;
+	rnp_autoneg_advertised autoneg_advertised;
+	bool smart_speed_active;
+	bool multispeed_fiber;
+	bool reset_if_overtemp;
+};
+
+#include "rnp_mbx.h"
+
+struct rnp_pcs_operations {
+	u32 (*read)(struct rnp_hw *hw, int num, u32 addr);
+	void (*write)(struct rnp_hw *hw, int num, u32 addr, u32 value);
+};
+
+struct rnp_mbx_operations {
+	s32 (*init_params)(struct rnp_hw *hw);
+	s32 (*read)(struct rnp_hw *, u32 *, u16, enum MBX_ID);
+	s32 (*write)(struct rnp_hw *, u32 *, u16, enum MBX_ID);
+	s32 (*read_posted)(struct rnp_hw *, u32 *, u16, enum MBX_ID);
+	s32 (*write_posted)(struct rnp_hw *, u32 *, u16, enum MBX_ID);
+	s32 (*check_for_msg)(struct rnp_hw *, enum MBX_ID);
+	s32 (*check_for_ack)(struct rnp_hw *, enum MBX_ID);
+	s32 (*configure)(struct rnp_hw *hw, int nr_vec, bool enable);
+};
+
+struct rnp_mbx_stats {
+	u32 msgs_tx;
+	u32 msgs_rx;
+	u32 acks;
+	u32 reqs;
+	u32 rsts;
+};
+
+struct rnp_pcs_info {
+	struct rnp_pcs_operations ops;
+	int pcs_count;
+};
+
+struct mbx_fw_cmd_reply;
+
+typedef void (*cookie_cb)(struct mbx_fw_cmd_reply *reply, void *priv);
+
+enum cookie_stat{
+	COOKIE_FREE=0,
+	COOKIE_FREE_WAIT_TIMEOUT,
+	COOKIE_ALLOCED,
+};
+
+struct mbx_req_cookie {
+	u64 alloced_jiffies;
+	enum cookie_stat stat;
+	cookie_cb cb;
+	int timeout_jiffes;
+	int errcode;
+	wait_queue_head_t wait;
+	int done;
+	int priv_len;
+#define MAX_PRIV_LEN 64
+	char priv[MAX_PRIV_LEN];
+};
+
+struct mbx_req_cookie_pool {
+#define MAX_COOKIES_ITEMS (20*400)
+	struct mbx_req_cookie cookies[MAX_COOKIES_ITEMS];
+	int next_idx;
+};
+
+struct rnp_mbx_info {
+	struct rnp_mbx_operations ops;
+	struct rnp_mbx_stats stats;
+	u32 timeout;
+	u32 usec_delay;
+	u32 v2p_mailbox;
+	u16 size;
+	u16 vf_req[64];
+	u16 vf_ack[64];
+	u16 cpu_req;
+	u16 cpu_ack;
+	struct mutex lock;
+	bool other_irq_enabled;
+	int mbx_size;
+	int mbx_mem_size;
+#define MBX_FEATURE_NO_ZERO BIT(0)
+#define MBX_FEATURE_WRITE_DELAY BIT(1)
+	u32 mbx_feature;
+	/* cm3 <-> pf mbx */
+	u32 cpu_pf_shm_base;
+	u32 pf2cpu_mbox_ctrl;
+	u32 pf2cpu_mbox_mask;
+	u32 cpu_pf_mbox_mask;
+	u32 cpu2pf_mbox_vec;
+	/* pf <--> vf mbx */
+	u32 pf_vf_shm_base;
+	u32 pf2vf_mbox_ctrl_base;
+	u32 pf_vf_mbox_mask_lo;
+	u32 pf_vf_mbox_mask_hi;
+	u32 pf2vf_mbox_vec_base;
+	u32 vf2pf_mbox_vec_base;
+	u32 cpu_vf_share_ram;
+	int share_size;
+	struct mbx_req_cookie_pool cookie_pool;
+};
+
+struct vf_vebvlans {
+	struct list_head l;
+	bool free;
+	int veb_entry;
+	u16 vid;
+	u16 mask;
+};
+
+#define RNP_MBX_VF_CPU_SHM_PF_BASE (0xA8000)
+#define RNP_NCSI_MC_COUNT (11)
+#define RNP_NCSI_VLAN_COUNT (1)
+
+#define RNP_VF_CPU_SHM_BASE_NR62 (RNP_MBX_VF_CPU_SHM_PF_BASE + 62 * 64)
+struct ncsi_shm_info {
+	u32 valid;
+#define RNP_NCSI_SHM_VALID 0xa5000000
+#define RNP_NCSI_SHM_VALID_MASK 0xff000000
+#define RNP_MC_VALID BIT(0)
+#define RNP_UC_VALID BIT(1)
+#define RNP_VLAN_VALID BIT(2)
+
+	struct {
+		u32 uc_addr_lo;
+		u32 uc_addr_hi;
+	} uc;
+
+	struct {
+		u32 mc_addr_lo;
+		u32 mc_addr_hi;
+	} mc[RNP_NCSI_MC_COUNT];
+	u32 ncsi_vlan;
+};
+
+struct rnp_hw {
+	void *back;
+	u8 __iomem *hw_addr;
+	u8 __iomem *ring_msix_base;
+	u8 __iomem *rpu_addr;
+	u8 pfvfnum;
+	struct pci_dev *pdev;
+	u16 device_id;
+	u16 vendor_id;
+	u16 subsystem_device_id;
+	u16 subsystem_vendor_id;
+	char lane_mask;
+	u16 mac_type;
+	u16 phy_type;
+	int nr_lane;
+	u8 is_backplane : 1;
+	u8 is_sgmii : 1;
+	u8 force_10g_1g_speed_ablity : 1;
+	u8 force_speed_stat : 2;
+#define FORCE_SPEED_STAT_DISABLED 0
+#define FORCE_SPEED_STAT_1G 1
+#define FORCE_SPEED_STAT_10G 2
+	u8 rpu_en : 1;
+	u8 rpu_availble : 1;
+	u8 ncsi_en;
+	u8 ncsi_rar_entries;
+	u16 ncsi_mc_count;
+	u16 ncsi_vlan_count;
+	u32 ncsi_vf_cpu_shm_pf_base;
+	u32 saved_force_link_speed;
+	u32 pcode;
+	u32 supported_link;
+	u32 advertised_link;
+	u32 autoneg;
+	u32 tp_mdx;
+	u32 tp_mdix_ctrl;
+	u32 phy_id;
+	u8 fw_lldp_ablity;
+	u8 link;
+	u8 pci_gen;
+	u8 pci_lanes;
+	u16 max_msix_vectors;
+	int speed;
+	int duplex;
+	u32 dma_version;
+	u32 wol;
+	u32 eco;
+	u32 force_status;
+	u32 force_link_supported;
+	u16 min_length;
+	u16 max_length;
+	u16 min_length_current;
+	u16 max_length_current;
+	/* rss info */
+#define HW_MAX_RETA_ENTRIES 512
+	u8 rss_indir_tbl[HW_MAX_RETA_ENTRIES];
+#define HW_MAX_TC_ENTRIES 8
+	u8 rss_tc_tbl[HW_MAX_TC_ENTRIES];
+	int rss_indir_tbl_num;
+	int rss_tc_tbl_num;
+	u32 rss_tbl_setup_flag;
+#define HW_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */
+	u8 rss_key[HW_RSS_KEY_SIZE];
+	u32 rss_key_setup_flag;
+	u32 vfnum;
+	int dma_split_size;
+	int num_rar_entries;
+	int max_vfs;
+	int max_vfs_noari;
+	int sriov_ring_limit;
+	int max_pf_macvlans;
+	int num_vebvlan_entries;
+	int fdir_mode;
+	int layer2_count;
+	int tuple5_count;
+	int veb_ring;
+	int default_vf_num;
+	int vf_promisc_mode;
+	int vf_promisc_num;
+	u32 fdir_pballoc;
+	enum rnp_rss_type rss_type;
+	enum rnp_hw_type hw_type;
+	struct rnp_hw_operations ops;
+	struct rnp_nic_info nic;
+	struct rnp_dma_info dma;
+	struct rnp_eth_info eth;
+	struct rnp_mac_info mac;
+	struct rnp_addr_filter_info addr_ctrl;
+	struct rnp_fc_info fc;
+	struct rnp_phy_info phy;
+	struct rnp_eeprom_info eeprom;
+	struct rnp_bus_info bus;
+	struct rnp_mbx_info mbx;
+	struct rnp_pcs_info pcs;
+	bool adapter_stopped;
+	bool force_full_reset;
+	bool mng_fw_enabled;
+	bool wol_enabled;
+	unsigned long wol_supported;
+	int fw_version;
+	u8 sfp_connector;
+	struct vf_vebvlans vf_vas;
+	struct vf_vebvlans *vv_list;
+	u32 axi_mhz;
+	u32 bd_uid;
+	union {
+		u8 port_id[4];
+		u32 port_ids;
+	};
+	int mode;
+	int default_rx_queue;
+	u32 usecstocount;
+#define RNP_NET_FEATURE_SG ((u32)(1 << 0))
+#define RNP_NET_FEATURE_TX_CHECKSUM ((u32)(1 << 1))
+#define RNP_NET_FEATURE_RX_CHECKSUM ((u32)(1 << 2))
+#define RNP_NET_FEATURE_TSO ((u32)(1 << 3))
+#define RNP_NET_FEATURE_TX_UDP_TUNNEL ((1 << 4))
+#define RNP_NET_FEATURE_VLAN_FILTER ((1 << 5))
+#define RNP_NET_FEATURE_VLAN_OFFLOAD ((1 << 6))
+#define RNP_NET_FEATURE_RX_NTUPLE_FILTER ((1 << 7))
+#define RNP_NET_FEATURE_TCAM ((1 << 8))
+#define RNP_NET_FEATURE_RX_HASH ((1 << 9))
+#define RNP_NET_FEATURE_RX_FCS ((1 << 10))
+#define RNP_NET_FEATURE_HW_TC ((1 << 11))
+#define RNP_NET_FEATURE_USO ((1 << 12))
+#define RNP_NET_FEATURE_STAG_FILTER ((1 << 13))
+#define RNP_NET_FEATURE_STAG_OFFLOAD ((1 << 14))
+#define RNP_NET_FEATURE_VF_FIXED ((1 << 15))
+#define RNP_VEB_VLAN_MASK_EN ((1 << 16))
+
+	u32 feature_flags;
+	struct rnp_thermal_sensor_data thermal_sensor_data;
+
+	struct {
+		int version;
+		int len;
+		int flag;
+	} dump;
+};
+
+struct rnp_info {
+	enum rnp_mac_type mac;
+	enum rnp_rss_type rss_type;
+	enum rnp_hw_type hw_type;
+	s32 (*get_invariants)(struct rnp_hw *);
+	struct rnp_mac_operations *mac_ops;
+	struct rnp_eeprom_operations *eeprom_ops;
+	struct rnp_mbx_operations *mbx_ops;
+	struct rnp_pcs_operations *pcs_ops;
+	bool one_pf_with_two_dma;
+	int reg_off;
+	int adapter_cnt;
+	char lane_mask;
+	int hi_dma;
+	int total_queue_pair_cnts;
+	int dma2_in_1pf;
+	char *hw_addr;
+};
+
+/* Error Codes */
+#define RNP_ERR_EEPROM -1
+#define RNP_ERR_EEPROM_CHECKSUM -2
+#define RNP_ERR_PHY -3
+#define RNP_ERR_CONFIG -4
+#define RNP_ERR_PARAM -5
+#define RNP_ERR_MAC_TYPE -6
+#define RNP_ERR_UNKNOWN_PHY -7
+#define RNP_ERR_LINK_SETUP -8
+#define RNP_ERR_ADAPTER_STOPPED -9
+#define RNP_ERR_INVALID_MAC_ADDR -10
+#define RNP_ERR_DEVICE_NOT_SUPPORTED -11
+#define RNP_ERR_MASTER_REQUESTS_PENDING -12
+#define RNP_ERR_INVALID_LINK_SETTINGS -13
+#define RNP_ERR_AUTONEG_NOT_COMPLETE -14
+#define RNP_ERR_RESET_FAILED -15
+#define RNP_ERR_SWFW_SYNC -16
+#define RNP_ERR_PHY_ADDR_INVALID -17
+#define RNP_ERR_I2C -18
+#define RNP_ERR_SFP_NOT_SUPPORTED -19
+#define RNP_ERR_SFP_NOT_PRESENT -20
+#define RNP_ERR_SFP_NO_INIT_SEQ_PRESENT -21
+#define RNP_ERR_FDIR_REINIT_FAILED -23
+#define RNP_ERR_EEPROM_VERSION -24
+#define RNP_ERR_NO_SPACE -25
+#define RNP_ERR_OVERTEMP -26
+#define RNP_ERR_FC_NOT_NEGOTIATED -27
+#define RNP_ERR_FC_NOT_SUPPORTED -28
+#define RNP_ERR_SFP_SETUP_NOT_COMPLETE -30
+#define RNP_ERR_PBA_SECTION -31
+#define RNP_ERR_INVALID_ARGUMENT -32
+#define RNP_ERR_HOST_INTERFACE_COMMAND -33
+#define RNP_NOT_IMPLEMENTED 0x7FFFFFFF
+
+#define RNP_RAH_AV 0x80000000
+/* eth fix code */
+#define RNP_FCTRL_BPE BIT(10)
+#define RNP_FCTRL_UPE BIT(9)
+#define RNP_FCTRL_MPE BIT(8)
+
+#define RNP_MCSTCTRL_MTA BIT(2)
+#define RNP_MCSTCTRL_UTA BIT(3)
+
+#define RNP_MAX_LAYER2_FILTERS (16)
+#define RNP_MAX_TUPLE5_FILTERS (128)
+#define RNP_MAX_TCAM_FILTERS (4096)
+
+#define RNP_SRC_IP_MASK BIT(0)
+#define RNP_DST_IP_MASK BIT(1)
+#define RNP_SRC_PORT_MASK BIT(2)
+#define RNP_DST_PORT_MASK BIT(3)
+#define RNP_L4_PROTO_MASK BIT(4)
+#endif /* _RNP_TYPE_H_ */
diff --git a/drivers/net/ethernet/mucse/rnp/version.h b/drivers/net/ethernet/mucse/rnp/version.h
new file mode 100644
index 000000000000..ca720dae0c47
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnp/version.h
@@ -0,0 +1,4 @@
+#ifndef VERSION_H
+#define VERSION_H
+#define GIT_COMMIT " 83aa5f1"
+#endif