diff --git a/boards/qemu/riscv32/Kconfig.defconfig b/boards/qemu/riscv32/Kconfig.defconfig index 8564054e21de5..f4a513a8ce963 100644 --- a/boards/qemu/riscv32/Kconfig.defconfig +++ b/boards/qemu/riscv32/Kconfig.defconfig @@ -3,6 +3,26 @@ if BOARD_QEMU_RISCV32 +if NETWORKING + +choice NET_QEMU_NETWORKING + default NET_QEMU_USER +endchoice + +configdefault USB_HOST_STACK + default y + +configdefault USBH_CDC_ECM_CLASS + default y + +endif # NETWORKING + +configdefault NET_DRIVERS + default n + +configdefault NET_L2_ETHERNET + default y + # Use thread local storage by default so that this feature gets more CI coverage. config THREAD_LOCAL_STORAGE default y diff --git a/boards/qemu/riscv32/board.cmake b/boards/qemu/riscv32/board.cmake index bac8478bb54f7..06632f1434d1c 100644 --- a/boards/qemu/riscv32/board.cmake +++ b/boards/qemu/riscv32/board.cmake @@ -34,4 +34,6 @@ set(QEMU_FLAGS_${ARCH} -cpu ${qemu_riscv_cpu} ) +set(QEMU_PCI_OHCI_FLAGS addr=2.0) + include(${ZEPHYR_BASE}/boards/common/qemu.board.cmake) diff --git a/boards/qemu/riscv32/qemu_riscv32.dts b/boards/qemu/riscv32/qemu_riscv32.dts index 2c38ca1da1d7c..8fabe27b2ee7a 100644 --- a/boards/qemu/riscv32/qemu_riscv32.dts +++ b/boards/qemu/riscv32/qemu_riscv32.dts @@ -9,9 +9,23 @@ zephyr,console = &uart0; zephyr,shell-uart = &uart0; zephyr,sram = &ram0; + zephyr,uhc = &zephyr_uhc0; }; }; &uart0 { status = "okay"; }; + +&pcie { + zephyr_uhc0: pcie@2,0 { + compatible = "generic-ohci"; + reg = <0x1000 0 0 0 0>; + vendor-id = <0x106b>; + device-id = <0x003f>; + interrupts = <1>; + interrupt-parent = <&pcie>; + maximum-speed = "full-speed"; + status = "okay"; + }; +}; diff --git a/boards/qemu/riscv32/qemu_riscv32_qemu_virt_riscv32_smp.dts b/boards/qemu/riscv32/qemu_riscv32_qemu_virt_riscv32_smp.dts index 2c38ca1da1d7c..8fabe27b2ee7a 100644 --- a/boards/qemu/riscv32/qemu_riscv32_qemu_virt_riscv32_smp.dts +++ b/boards/qemu/riscv32/qemu_riscv32_qemu_virt_riscv32_smp.dts @@ -9,9 +9,23 @@ zephyr,console = &uart0; zephyr,shell-uart = &uart0; zephyr,sram = &ram0; + zephyr,uhc = &zephyr_uhc0; }; }; &uart0 { status = "okay"; }; + +&pcie { + zephyr_uhc0: pcie@2,0 { + compatible = "generic-ohci"; + reg = <0x1000 0 0 0 0>; + vendor-id = <0x106b>; + device-id = <0x003f>; + interrupts = <1>; + interrupt-parent = <&pcie>; + maximum-speed = "full-speed"; + status = "okay"; + }; +}; diff --git a/cmake/emu/qemu.cmake b/cmake/emu/qemu.cmake index 38df6d3b3c3e0..b94fd758779d7 100644 --- a/cmake/emu/qemu.cmake +++ b/cmake/emu/qemu.cmake @@ -419,6 +419,40 @@ else() add_custom_target(qemu_nvme_disk) endif() +if(CONFIG_UHC_OHCI_PCI) + list(APPEND QEMU_EXTRA_FLAGS + -device pci-ohci,${QEMU_PCI_OHCI_FLAGS} + ) +endif() + +# If we are using a suitable ethernet driver inside qemu, then these options +# must be set, otherwise a zephyr instance cannot receive any network packets. +# The Qemu supported ethernet driver should define CONFIG_ETH_NIC_MODEL +# string that tells what nic model Qemu should use. +if(CONFIG_QEMU_TARGET) + if((CONFIG_NET_QEMU_ETHERNET OR CONFIG_NET_QEMU_USER) AND NOT CONFIG_ETH_NIC_MODEL) + message(FATAL_ERROR " + No Qemu ethernet driver configured! + Enable Qemu supported ethernet driver like e1000 at drivers/ethernet" + ) + elseif(CONFIG_NET_QEMU_ETHERNET) + if(CONFIG_ETH_QEMU_EXTRA_ARGS) + set(NET_QEMU_ETH_EXTRA_ARGS ",${CONFIG_ETH_QEMU_EXTRA_ARGS}") + endif() + list(APPEND QEMU_EXTRA_FLAGS + -netdev tap,id=n1,script=no,downscript=no,ifname=${CONFIG_ETH_QEMU_IFACE_NAME}${NET_QEMU_ETH_EXTRA_ARGS} -device ${CONFIG_ETH_NIC_MODEL},netdev=n1 + ) + elseif(CONFIG_NET_QEMU_USER) + list(APPEND QEMU_EXTRA_FLAGS + -netdev user,id=n1,${CONFIG_NET_QEMU_USER_EXTRA_ARGS} -device ${CONFIG_ETH_NIC_MODEL},netdev=n1 + ) + else() + list(APPEND QEMU_EXTRA_FLAGS + -net none + ) + endif() +endif() + if(NOT QEMU_PIPE) set(QEMU_PIPE_COMMENT "\nTo exit from QEMU enter: 'CTRL+a, x'\n") endif() diff --git a/cmake/modules/kernel.cmake b/cmake/modules/kernel.cmake index 310a836eebcf5..018fed50304bf 100644 --- a/cmake/modules/kernel.cmake +++ b/cmake/modules/kernel.cmake @@ -184,34 +184,6 @@ foreach(dir ${BOARD_DIRECTORIES}) include(${dir}/board.cmake OPTIONAL) endforeach() -# If we are using a suitable ethernet driver inside qemu, then these options -# must be set, otherwise a zephyr instance cannot receive any network packets. -# The Qemu supported ethernet driver should define CONFIG_ETH_NIC_MODEL -# string that tells what nic model Qemu should use. -if(CONFIG_QEMU_TARGET) - if((CONFIG_NET_QEMU_ETHERNET OR CONFIG_NET_QEMU_USER) AND NOT CONFIG_ETH_NIC_MODEL) - message(FATAL_ERROR " - No Qemu ethernet driver configured! - Enable Qemu supported ethernet driver like e1000 at drivers/ethernet" - ) - elseif(CONFIG_NET_QEMU_ETHERNET) - if(CONFIG_ETH_QEMU_EXTRA_ARGS) - set(NET_QEMU_ETH_EXTRA_ARGS ",${CONFIG_ETH_QEMU_EXTRA_ARGS}") - endif() - list(APPEND QEMU_FLAGS_${ARCH} - -nic tap,model=${CONFIG_ETH_NIC_MODEL},script=no,downscript=no,ifname=${CONFIG_ETH_QEMU_IFACE_NAME}${NET_QEMU_ETH_EXTRA_ARGS} - ) - elseif(CONFIG_NET_QEMU_USER) - list(APPEND QEMU_FLAGS_${ARCH} - -nic user,model=${CONFIG_ETH_NIC_MODEL},${CONFIG_NET_QEMU_USER_EXTRA_ARGS} - ) - else() - list(APPEND QEMU_FLAGS_${ARCH} - -net none - ) - endif() -endif() - # General purpose Zephyr target. # This target can be used for custom zephyr settings that needs to be used elsewhere in the build system # diff --git a/drivers/usb/uhc/CMakeLists.txt b/drivers/usb/uhc/CMakeLists.txt index 69f1cea8433b2..3ec2886328a16 100644 --- a/drivers/usb/uhc/CMakeLists.txt +++ b/drivers/usb/uhc/CMakeLists.txt @@ -6,6 +6,7 @@ zephyr_library() zephyr_library_sources(uhc_common.c) zephyr_library_sources_ifdef(CONFIG_UHC_MAX3421E uhc_max3421e.c) +zephyr_library_sources_ifdef(CONFIG_UHC_OHCI uhc_ohci.c) zephyr_library_sources_ifdef(CONFIG_UHC_VIRTUAL uhc_virtual.c) zephyr_library_sources_ifdef(CONFIG_UHC_NXP_EHCI uhc_mcux_common.c uhc_mcux_ehci.c) zephyr_library_sources_ifdef(CONFIG_UHC_NXP_KHCI uhc_mcux_common.c uhc_mcux_khci.c) diff --git a/drivers/usb/uhc/Kconfig b/drivers/usb/uhc/Kconfig index a9cdbad42e4d2..5fc12c9c5b1d2 100644 --- a/drivers/usb/uhc/Kconfig +++ b/drivers/usb/uhc/Kconfig @@ -44,6 +44,7 @@ module-str = uhc drv source "subsys/logging/Kconfig.template.log_config" source "drivers/usb/uhc/Kconfig.max3421e" +source "drivers/usb/uhc/Kconfig.ohci" source "drivers/usb/uhc/Kconfig.virtual" source "drivers/usb/uhc/Kconfig.mcux" diff --git a/drivers/usb/uhc/Kconfig.ohci b/drivers/usb/uhc/Kconfig.ohci new file mode 100644 index 0000000000000..d11ed04da6fae --- /dev/null +++ b/drivers/usb/uhc/Kconfig.ohci @@ -0,0 +1,45 @@ +# SPDX-FileCopyrightText: Copyright The Zephyr Project Contributors +# SPDX-License-Identifier: Apache-2.0 + +config UHC_OHCI + bool "Generic OHCI USB host controller driver" + default y + depends on DT_HAS_GENERIC_OHCI_ENABLED + depends on !64BIT + select EVENTS + help + Generic OHCI USB host controller driver for any OHCI + controller (OpenHCI 1.0a compatible). Only supported on + 32 bit, as OHCI only uses 32 bit registers for addresses. + +if UHC_OHCI + +config UHC_OHCI_PCI + bool + default y + depends on $(dt_compat_on_bus,$(DT_COMPAT_GENERIC_OHCI),pcie) + select PCIE + select DYNAMIC_INTERRUPTS if !PCIE_CONTROLLER + +config UHC_OHCI_BULK_SLOTS + int "Number of concurrent bulk/interrupt transfer slots" + range 1 8 + default 3 + help + Number of concurrently active bulk/interrupt transfer slots. One slot + is always reserved for control transfers. Each additional slot allows + one more bulk or interrupt endpoint to stay posted simultaneously + (e.g. both IN and OUT for CDC-ECM data plus the interrupt notification + endpoint). Memory cost is one ED plus UHC_OHCI_MAX_TDS transfer + descriptors per slot. + +config UHC_OHCI_MAX_TDS + int "Maximum TDs per transfer slot" + range 4 128 + default 64 + help + Maximum number of OHCI transfer descriptors per slot. The driver + allocates one pool of this size for the control slot and for each + bulk/interrupt slot (see UHC_OHCI_BULK_SLOTS). + +endif # UHC_OHCI diff --git a/drivers/usb/uhc/uhc_ohci.c b/drivers/usb/uhc/uhc_ohci.c new file mode 100644 index 0000000000000..b5a0a8f646bc4 --- /dev/null +++ b/drivers/usb/uhc/uhc_ohci.c @@ -0,0 +1,1183 @@ +/* + * SPDX-FileCopyrightText: Copyright The Zephyr Project Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +#define DT_DRV_COMPAT generic_ohci + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "uhc_common.h" + +#include +LOG_MODULE_REGISTER(uhc_ohci, CONFIG_UHC_DRIVER_LOG_LEVEL); + +/* Required by DEVICE_MMIO_NAMED_* macros */ +#define DEV_CFG(_dev) ((const struct ohci_config *)(_dev)->config) +#define DEV_DATA(_dev) ((struct ohci_data *)(uhc_get_private(_dev))) + +#define OHCI_REVISION 0x00U +#define OHCI_CONTROL 0x04U +#define OHCI_CMDSTATUS 0x08U +#define OHCI_INTRSTATUS 0x0cU +#define OHCI_INTRENABLE 0x10U +#define OHCI_INTRDISABLE 0x14U +#define OHCI_HCCA 0x18U +#define OHCI_CONTROL_HEAD_ED 0x20U +#define OHCI_BULK_HEAD_ED 0x28U +#define OHCI_FM_INTERVAL 0x34U +#define OHCI_PERIODIC_START 0x40U +#define OHCI_LS_THRESHOLD 0x44U +#define OHCI_RH_DESC_A 0x48U +#define OHCI_RH_STATUS 0x50U +#define OHCI_RH_PORT_STATUS(n) (0x54U + ((n) * 4U)) + +#define OHCI_CONTROL_PLE BIT(2) +#define OHCI_CONTROL_CLE BIT(4) +#define OHCI_CONTROL_BLE BIT(5) +#define OHCI_CONTROL_HCFS_MASK GENMASK(7, 6) +#define OHCI_CONTROL_HCFS_RESET (0U << 6) +#define OHCI_CONTROL_HCFS_RESUME (1U << 6) +#define OHCI_CONTROL_HCFS_OPERATIONAL (2U << 6) +#define OHCI_CONTROL_HCFS_SUSPEND (3U << 6) + +#define OHCI_CMDSTATUS_HCR BIT(0) +#define OHCI_CMDSTATUS_CLF BIT(1) +#define OHCI_CMDSTATUS_BLF BIT(2) + +#define OHCI_INTR_WDH BIT(1) +#define OHCI_INTR_RD BIT(3) +#define OHCI_INTR_UE BIT(4) +#define OHCI_INTR_RHSC BIT(6) +#define OHCI_INTR_MIE BIT(31) + +#define OHCI_RHDA_NDP_MASK GENMASK(7, 0) + +#define OHCI_RHPS_CCS BIT(0) +#define OHCI_RHPS_PRS BIT(4) +#define OHCI_RHPS_PPS BIT(8) +#define OHCI_RHPS_LSDA BIT(9) +#define OHCI_RHPS_CSC BIT(16) +#define OHCI_RHPS_PESC BIT(17) +#define OHCI_RHPS_PSSC BIT(18) +#define OHCI_RHPS_OCIC BIT(19) +#define OHCI_RHPS_PRSC BIT(20) +#define OHCI_RHPS_W1C_MASK (OHCI_RHPS_CSC | OHCI_RHPS_PESC | OHCI_RHPS_PSSC | \ + OHCI_RHPS_OCIC | OHCI_RHPS_PRSC) + +#define OHCI_ED_FA_SHIFT 0U +#define OHCI_ED_EN_SHIFT 7U +#define OHCI_ED_D_SHIFT 11U +#define OHCI_ED_D_FROM_TD 0U +#define OHCI_ED_D_OUT 1U +#define OHCI_ED_D_IN 2U +#define OHCI_ED_SPEED BIT(13) +#define OHCI_ED_SKIP BIT(14) +#define OHCI_ED_MPS_SHIFT 16U +#define OHCI_ED_HEAD_HALTED BIT(0) +#define OHCI_ED_HEAD_CARRY BIT(1) +#define OHCI_ED_HEAD_PTR_MASK GENMASK(31, 4) + +#define OHCI_TD_CC_SHIFT 28U +#define OHCI_TD_CC_MASK GENMASK(31, 28) +#define OHCI_TD_CC_NO_ERROR 0U +#define OHCI_TD_CC_DATA_UNDERRUN 9U +#define OHCI_TD_CC_NOT_ACCESSED 15U +#define OHCI_TD_T_SHIFT 24U +#define OHCI_TD_T_TOGGLE_ED 0U +#define OHCI_TD_T_DATA0 2U +#define OHCI_TD_T_DATA1 3U +#define OHCI_TD_DI_SHIFT 21U +#define OHCI_TD_DI_NO_INTERRUPT 7U +#define OHCI_TD_DP_SHIFT 19U +#define OHCI_TD_DP_SETUP 0U +#define OHCI_TD_DP_OUT 1U +#define OHCI_TD_DP_IN 2U +#define OHCI_TD_R BIT(18) +#define OHCI_TD_PTR_MASK GENMASK(31, 4) + +#define OHCI_RESET_TIMEOUT_US 10000U +#define OHCI_BUS_RESET_TIME_MS 50U +#define OHCI_DEFAULT_FMINTERVAL 0x2edfU +#define OHCI_DEFAULT_PERIODIC_START 0x2a2fU +#define OHCI_DEFAULT_LS_THRESHOLD 0x0628U +#define OHCI_PCI_CMD_INTX_DISABLE BIT(10) + +struct ohci_hw_ed { + uint32_t flags; + uint32_t tailp; + uint32_t headp; + uint32_t next; +} __aligned(16); + +struct ohci_hw_td { + uint32_t flags; + uint32_t cbp; + uint32_t next; + uint32_t be; +} __aligned(16); + +struct ohci_hcca { + uint32_t intr_table[32]; + uint16_t frame_no; + uint16_t pad; + uint32_t done_head; +} __aligned(256); + +struct ohci_td { + struct ohci_hw_td hw; + uintptr_t data; + uint16_t len; + uint8_t allow_short; + uint8_t dir_in; +}; + +struct ohci_config { + DEVICE_MMIO_NAMED_ROM(reg_base); +#ifdef CONFIG_UHC_OHCI_PCI + struct pcie_dev *pcie; +#endif + void (*irq_enable_func)(const struct device *dev); +}; + +/* + * One transfer slot: an ED plus its private TD pool. + * ed must be the first field so the struct's alignment satisfies the OHCI + * hardware requirement of 16-byte-aligned EDs. + */ +struct ohci_xfer_slot { + struct ohci_hw_ed ed; /* Must be first — requires 16-byte alignment */ + struct ohci_td tds[CONFIG_UHC_OHCI_MAX_TDS]; + struct uhc_transfer *xfer; + uint16_t td_count; +}; + +/* + * slot[OHCI_CTRL_SLOT] = control transfers (OHCI control list) + * slot[1 .. OHCI_MAX_SLOTS-1] = bulk/interrupt transfers (OHCI bulk list) + */ +#define OHCI_CTRL_SLOT 0U +#define OHCI_MAX_SLOTS (1U + CONFIG_UHC_OHCI_BULK_SLOTS) + +struct ohci_data { + DEVICE_MMIO_NAMED_RAM(reg_base); + struct ohci_hcca hcca; + struct ohci_xfer_slot slots[OHCI_MAX_SLOTS]; + uint8_t port_count; + uint8_t port_connected; + uint8_t bus_suspended; + uint8_t bulk_toggle[128][32]; +}; + +static inline uintptr_t ohci_base(const struct device *dev) +{ + return DEVICE_MMIO_NAMED_GET(dev, reg_base); +} + +static inline uint32_t ohci_read(const struct device *dev, uint32_t reg) +{ + return sys_read32(ohci_base(dev) + reg); +} + +static inline void ohci_write(const struct device *dev, uint32_t reg, uint32_t value) +{ + sys_write32(value, ohci_base(dev) + reg); +} + +static inline uint32_t ohci_dma_read32(const uint32_t *value) +{ + return *(volatile const uint32_t *)value; +} + +static inline uint32_t ohci_head_ptr(const struct ohci_hw_ed *ed) +{ + return ohci_dma_read32(&ed->headp) & OHCI_ED_HEAD_PTR_MASK; +} + +static inline uint32_t ohci_td_cc(const struct ohci_hw_td *td) +{ + return FIELD_GET(OHCI_TD_CC_MASK, ohci_dma_read32(&td->flags)); +} + +static inline uint32_t ohci_td_phys(const struct ohci_td *td) +{ + return (uint32_t)k_mem_phys_addr((void *)&td->hw); +} + +static inline uint8_t ohci_ep_toggle_idx(uint8_t ep) +{ + return USB_EP_GET_IDX(ep) + (USB_EP_DIR_IS_IN(ep) ? 16U : 0U); +} + +static inline uint32_t ohci_bulk_toggle_get(struct ohci_data *data, struct uhc_transfer *xfer) +{ + return data->bulk_toggle[xfer->udev->addr][ohci_ep_toggle_idx(xfer->ep)] != 0U ? + OHCI_TD_T_DATA1 : OHCI_TD_T_DATA0; +} + +static inline void ohci_bulk_toggle_save(struct ohci_data *data, struct uhc_transfer *xfer, + uint32_t ed_headp) +{ + data->bulk_toggle[xfer->udev->addr][ohci_ep_toggle_idx(xfer->ep)] = + (ed_headp & OHCI_ED_HEAD_CARRY) != 0U; +} + +static inline uint32_t ohci_phys(const void *addr) +{ + return (uint32_t)k_mem_phys_addr((void *)addr); +} + +static int ohci_lock(const struct device *dev) +{ + return uhc_lock_internal(dev, K_FOREVER); +} + +static int ohci_unlock(const struct device *dev) +{ + return uhc_unlock_internal(dev); +} + +static int ohci_td_status_to_errno(uint32_t cc, bool allow_short) +{ + switch (cc) { + case OHCI_TD_CC_NO_ERROR: + return 0; + case OHCI_TD_CC_DATA_UNDERRUN: + return allow_short ? 0 : -EIO; + case 4: + return -EPIPE; + case 8: + return -EOVERFLOW; + case 3: + return -EAGAIN; + case 5: + return -ETIMEDOUT; + default: + return -EIO; + } +} + +static size_t ohci_td_actual_len(const struct ohci_td *td) +{ + uint32_t cbp; + + if (td->len == 0U) { + return 0U; + } + + cbp = ohci_dma_read32(&td->hw.cbp); + if (cbp == 0U) { + return td->len; + } + + if (cbp <= td->data) { + return 0U; + } + + return MIN((size_t)(cbp - td->data), (size_t)td->len); +} + +static void ohci_clear_slot(struct ohci_xfer_slot *slot) +{ + /* + * Reset the ED so the HC skips it cleanly while it holds no TDs. + * + * Order matters: + * 1. Set OHCI_ED_SKIP first so the HC will skip this ED if it is + * traversing the list concurrently. + * 2. Clear headp/tailp to mark the TD queue as empty. + * 3. Leave ed.next UNTOUCHED — bulk slot EDs are pre-linked into a + * permanent chain by ohci_setup_bulk_list(); zeroing next would + * break the chain the moment any slot completes a transfer. + * + * For the control slot (slot 0) ed.next is always 0, so no-op. + */ + slot->ed.flags = OHCI_ED_SKIP; + compiler_barrier(); + slot->ed.headp = 0U; + slot->ed.tailp = 0U; + /* ed.next deliberately not modified */ + + memset(slot->tds, 0, sizeof(slot->tds)); + slot->td_count = 0U; + slot->xfer = NULL; +} + +static void ohci_clear_all_slots(struct ohci_data *data) +{ + for (unsigned int i = 0U; i < OHCI_MAX_SLOTS; i++) { + ohci_clear_slot(&data->slots[i]); + } +} + +static int ohci_wait_reset(const struct device *dev) +{ + for (int i = 0; i < OHCI_RESET_TIMEOUT_US; i += 10) { + if ((ohci_read(dev, OHCI_CMDSTATUS) & OHCI_CMDSTATUS_HCR) == 0U) { + return 0; + } + + k_busy_wait(10); + } + + return -ETIMEDOUT; +} + +static size_t ohci_chunk_len(uintptr_t addr, size_t remaining, uint16_t mps) +{ + size_t page_remaining = 0x1000U - (addr & 0xfffU); + size_t chunk = MIN(remaining, (size_t)mps); + + return MIN(chunk, page_remaining); +} + +static void ohci_fill_td(struct ohci_td *td, uintptr_t data, size_t len, + uint32_t dp, uint32_t toggle, bool allow_short, + bool dir_in, bool interrupt_on_done) +{ + uint32_t flags = FIELD_PREP(OHCI_TD_CC_MASK, OHCI_TD_CC_NOT_ACCESSED) | + FIELD_PREP(GENMASK(25, 24), toggle) | + FIELD_PREP(GENMASK(20, 19), dp) | + FIELD_PREP(GENMASK(23, 21), + interrupt_on_done ? 0U : OHCI_TD_DI_NO_INTERRUPT); + + if (allow_short) { + flags |= OHCI_TD_R; + } + + td->hw.flags = flags; + td->hw.cbp = len ? ohci_phys((void *)data) : 0U; + td->hw.be = len ? ohci_phys((void *)(data + len - 1U)) : 0U; + td->data = len ? ohci_phys((void *)data) : 0U; + td->len = (uint16_t)len; + td->allow_short = allow_short; + td->dir_in = dir_in; + td->hw.next = 0U; +} + +static int ohci_build_control_chain(struct ohci_xfer_slot *slot, struct uhc_transfer *xfer) +{ + uint8_t *buffer = NULL; + size_t remaining = 0U; + uintptr_t ptr = 0U; + uint16_t td_idx = 0U; + uint32_t toggle = OHCI_TD_T_DATA1; + bool dir_in = USB_EP_DIR_IS_IN(xfer->ep); + const struct usb_setup_packet *setup = (const struct usb_setup_packet *)xfer->setup_pkt; + uint16_t w_length = sys_le16_to_cpu(setup->wLength); + + if (CONFIG_UHC_OHCI_MAX_TDS < 3) { + return -ENOMEM; + } + + ohci_fill_td(&slot->tds[td_idx++], (uintptr_t)xfer->setup_pkt, sizeof(xfer->setup_pkt), + OHCI_TD_DP_SETUP, OHCI_TD_T_DATA0, false, false, false); + + if (xfer->buf != NULL) { + if (dir_in) { + buffer = net_buf_tail(xfer->buf); + remaining = MIN((size_t)w_length, net_buf_tailroom(xfer->buf)); + } else { + buffer = xfer->buf->data; + remaining = MIN((size_t)w_length, (size_t)xfer->buf->len); + } + + ptr = (uintptr_t)buffer; + while (remaining != 0U) { + size_t chunk; + + if (td_idx >= (CONFIG_UHC_OHCI_MAX_TDS - 1)) { + return -ENOMEM; + } + + chunk = ohci_chunk_len(ptr, remaining, xfer->mps); + ohci_fill_td(&slot->tds[td_idx++], ptr, chunk, + dir_in ? OHCI_TD_DP_IN : OHCI_TD_DP_OUT, + toggle, dir_in, dir_in, false); + ptr += chunk; + remaining -= chunk; + toggle = (toggle == OHCI_TD_T_DATA1) ? OHCI_TD_T_DATA0 : OHCI_TD_T_DATA1; + } + } + + if (!xfer->no_status) { + if (td_idx >= (CONFIG_UHC_OHCI_MAX_TDS - 1)) { + return -ENOMEM; + } + + ohci_fill_td(&slot->tds[td_idx++], 0U, 0U, + dir_in ? OHCI_TD_DP_OUT : OHCI_TD_DP_IN, + OHCI_TD_T_DATA1, false, !dir_in, true); + } else { + slot->tds[td_idx - 1U].hw.flags &= ~GENMASK(23, 21); + } + + slot->td_count = td_idx + 1U; + return 0; +} + +static int ohci_build_bulk_chain(struct ohci_xfer_slot *slot, struct uhc_transfer *xfer) +{ + uint8_t *buffer; + size_t remaining; + uintptr_t ptr; + uint16_t td_idx = 0U; + bool dir_in = USB_EP_DIR_IS_IN(xfer->ep); + + if (xfer->buf == NULL) { + return -EINVAL; + } + + if (dir_in) { + buffer = net_buf_tail(xfer->buf); + remaining = net_buf_tailroom(xfer->buf); + } else { + buffer = xfer->buf->data; + remaining = xfer->buf->len; + } + + ptr = (uintptr_t)buffer; + while (remaining != 0U) { + size_t chunk; + + if (td_idx >= (CONFIG_UHC_OHCI_MAX_TDS - 1)) { + return -ENOMEM; + } + + /* + * Bulk TDs can carry multiple USB packets; splitting by endpoint MPS + * causes excessive TD chaining and may coalesce multiple Ethernet + * frames into one completion. Keep TDs page-bounded, not MPS-bounded. + */ + chunk = ohci_chunk_len(ptr, remaining, UINT16_MAX); + /* + * For IN transfers every TD must have DI=0 (interrupt on done). + * When a short packet arrives the OHCI HC sets headP = tailP and + * adds the TD to the done list. If that TD has DI=7 (no + * interrupt) the WritebackDoneHead interrupt never fires and the + * driver never learns data arrived. Setting DI=0 on every IN TD + * ensures WDH fires on the very first short-packet completion. + * For OUT transfers only the last TD generates an interrupt. + */ + ohci_fill_td(&slot->tds[td_idx], ptr, chunk, + dir_in ? OHCI_TD_DP_IN : OHCI_TD_DP_OUT, + OHCI_TD_T_TOGGLE_ED, + dir_in, dir_in, dir_in); + ptr += chunk; + remaining -= chunk; + td_idx++; + } + + if (td_idx == 0U) { + return -EINVAL; + } + + slot->tds[td_idx - 1U].hw.flags &= ~GENMASK(23, 21); + slot->td_count = td_idx + 1U; + return 0; +} + +static void ohci_link_chain(struct ohci_data *data, struct ohci_xfer_slot *slot, + struct uhc_transfer *xfer) +{ + uint32_t ed_flags = FIELD_PREP(GENMASK(6, 0), xfer->udev->addr) | + FIELD_PREP(GENMASK(10, 7), USB_EP_GET_IDX(xfer->ep)) | + FIELD_PREP(GENMASK(31, 16), xfer->mps) | + FIELD_PREP(GENMASK(12, 11), OHCI_ED_D_FROM_TD); + + for (uint16_t i = 0U; i < (slot->td_count - 1U); i++) { + slot->tds[i].hw.next = ohci_td_phys(&slot->tds[i + 1U]); + } + memset(&slot->tds[slot->td_count - 1U], 0, sizeof(slot->tds[0])); + + if (xfer->udev->speed == USB_SPEED_SPEED_LS) { + ed_flags |= OHCI_ED_SPEED; + } + + /* + * Preserve the next pointer so the pre-linked bulk chain written at + * enable time is not clobbered. The ed.next field is set up once in + * ohci_setup_bulk_list() and must not be touched here. + */ + slot->ed.flags = ed_flags; + slot->ed.headp = ohci_td_phys(&slot->tds[0]); + if (xfer->type == USB_EP_TYPE_BULK && ohci_bulk_toggle_get(data, xfer) == OHCI_TD_T_DATA1) { + slot->ed.headp |= OHCI_ED_HEAD_CARRY; + } + slot->ed.tailp = ohci_td_phys(&slot->tds[slot->td_count - 1U]); +} + +/* Return true if xfer is currently being processed in any slot. */ +static bool ohci_xfer_is_active(const struct ohci_data *data, + const struct uhc_transfer *xfer) +{ + for (unsigned int i = 0U; i < OHCI_MAX_SLOTS; i++) { + if (data->slots[i].xfer == xfer) { + return true; + } + } + return false; +} + +/* Return true if endpoint ep already has an active slot. */ +static bool ohci_ep_is_active(const struct ohci_data *data, uint8_t ep) +{ + for (unsigned int i = 0U; i < OHCI_MAX_SLOTS; i++) { + if (data->slots[i].xfer != NULL && data->slots[i].xfer->ep == ep) { + return true; + } + } + return false; +} + +/* + * Write the permanent bulk-list head once and pre-link all bulk slot EDs with + * the SKIP bit set. The head register never needs to be updated afterwards; + * scheduling a new bulk transfer just clears the SKIP bit and sets BLF. + */ +static void ohci_setup_bulk_list(const struct device *dev) +{ + struct ohci_data *data = uhc_get_private(dev); + + for (unsigned int i = 1U; i < OHCI_MAX_SLOTS; i++) { + data->slots[i].ed.flags = OHCI_ED_SKIP; + data->slots[i].ed.headp = 0U; + data->slots[i].ed.tailp = 0U; + data->slots[i].ed.next = + (i + 1U < OHCI_MAX_SLOTS) + ? ohci_phys(&data->slots[i + 1U].ed) + : 0U; + } + compiler_barrier(); + ohci_write(dev, OHCI_BULK_HEAD_ED, + (OHCI_MAX_SLOTS > 1U) ? ohci_phys(&data->slots[1U].ed) : 0U); +} + +static int ohci_finish_slot(const struct device *dev, struct ohci_xfer_slot *slot) +{ + struct ohci_data *data = uhc_get_private(dev); + struct uhc_transfer *xfer = slot->xfer; + uint32_t ed_headp; + int err = 0; + size_t actual_in = 0U; + bool is_ctrl; + + if (xfer == NULL) { + return 0; + } + + compiler_barrier(); + ed_headp = ohci_dma_read32(&slot->ed.headp); + if ((ed_headp & OHCI_ED_HEAD_HALTED) == 0U && + (ed_headp & OHCI_ED_HEAD_PTR_MASK) != slot->ed.tailp) { + return 0; + } + + for (uint16_t i = 0U; i < (slot->td_count - 1U); i++) { + uint32_t cc = ohci_td_cc(&slot->tds[i].hw); + + /* + * NOT_ACCESSED means the HC never processed this TD — normal + * when a short-packet on an earlier TD terminated the transfer + * by setting headP = tailP. Treat as end-of-transfer, no error. + */ + if (cc == OHCI_TD_CC_NOT_ACCESSED) { + break; + } + + err = ohci_td_status_to_errno(cc, slot->tds[i].allow_short != 0U); + if (err != 0) { + LOG_INF("TD %u completion code %u -> err %d", i, cc, err); + break; + } + + if (slot->tds[i].dir_in != 0U) { + actual_in += ohci_td_actual_len(&slot->tds[i]); + } + } + + if (err == 0 && xfer->type == USB_EP_TYPE_BULK) { + ohci_bulk_toggle_save(data, xfer, ed_headp); + } + + if (err == 0 && xfer->buf != NULL && USB_EP_DIR_IS_IN(xfer->ep) && actual_in != 0U) { + net_buf_add(xfer->buf, actual_in); + } + + is_ctrl = (slot == &data->slots[OHCI_CTRL_SLOT]); + LOG_DBG("Complete xfer ep 0x%02x err %d td_count %u actual_in %zu slot %u", + xfer->ep, err, slot->td_count, actual_in, + (unsigned int)(slot - data->slots)); + + if (is_ctrl) { + ohci_write(dev, OHCI_CONTROL_HEAD_ED, 0U); + } + + /* + * ohci_clear_slot() preserves ed.next and sets OHCI_ED_SKIP, so the + * HC will skip this ED cleanly after the clear. + */ + compiler_barrier(); + ohci_clear_slot(slot); + uhc_xfer_return(dev, xfer, err); + + return err; +} + +/* Find a free slot for the given transfer type. Returns NULL if none free. */ +static struct ohci_xfer_slot *ohci_alloc_slot(struct ohci_data *data, bool is_ctrl) +{ + if (is_ctrl) { + return (data->slots[OHCI_CTRL_SLOT].xfer == NULL) + ? &data->slots[OHCI_CTRL_SLOT] : NULL; + } + for (unsigned int i = 1U; i < OHCI_MAX_SLOTS; i++) { + if (data->slots[i].xfer == NULL) { + return &data->slots[i]; + } + } + return NULL; +} + +static int ohci_try_schedule_next(const struct device *dev) +{ + struct ohci_data *data = uhc_get_private(dev); + struct uhc_data *uhc = dev->data; + struct uhc_transfer *xfer, *tmp; + struct ohci_xfer_slot *slot; + int ret; + bool any_ctrl_free, any_bulk_free; + + if (!uhc_is_enabled(dev)) { + return 0; + } + + any_ctrl_free = (data->slots[OHCI_CTRL_SLOT].xfer == NULL); + any_bulk_free = (ohci_alloc_slot(data, false) != NULL); + + if (!any_ctrl_free && !any_bulk_free) { + return 0; + } + + /* + * Walk the pending queue. Use the _SAFE variant because + * uhc_xfer_return() may remove the node mid-iteration. + */ + SYS_DLIST_FOR_EACH_CONTAINER_SAFE(&uhc->ctrl_xfers, xfer, tmp, node) { + bool is_ctrl = (xfer->type == USB_EP_TYPE_CONTROL); + + /* Skip transfers already being processed in a slot. */ + if (ohci_xfer_is_active(data, xfer)) { + continue; + } + + /* Drain transfers cancelled before they were scheduled. */ + if (xfer->err == -ECONNRESET) { + uhc_xfer_return(dev, xfer, -ECONNRESET); + continue; + } + + /* Check whether a suitable slot is available. */ + slot = ohci_alloc_slot(data, is_ctrl); + if (slot == NULL) { + continue; + } + + /* Don't schedule a second transfer for the same endpoint. */ + if (ohci_ep_is_active(data, xfer->ep)) { + continue; + } + + LOG_DBG("Schedule xfer ep 0x%02x type %u mps %u slot %u", + xfer->ep, xfer->type, xfer->mps, + (unsigned int)(slot - data->slots)); + + if (is_ctrl) { + ret = ohci_build_control_chain(slot, xfer); + } else if (xfer->type == USB_EP_TYPE_BULK || + xfer->type == USB_EP_TYPE_INTERRUPT) { + ret = ohci_build_bulk_chain(slot, xfer); + } else { + ret = -ENOTSUP; + } + + if (ret != 0) { + LOG_DBG("Failed to build chain ep 0x%02x err %d", xfer->ep, ret); + uhc_xfer_return(dev, xfer, ret); + continue; + } + + ohci_link_chain(data, slot, xfer); + slot->xfer = xfer; + + LOG_DBG("ED flags=0x%08x head=0x%08x tail=0x%08x count=%u", + slot->ed.flags, slot->ed.headp, slot->ed.tailp, slot->td_count); + + /* Ensure all TD/ED writes reach hardware before kicking the HC. */ + compiler_barrier(); + + if (is_ctrl) { + ohci_write(dev, OHCI_CONTROL_HEAD_ED, ohci_phys(&slot->ed)); + ohci_write(dev, OHCI_CMDSTATUS, OHCI_CMDSTATUS_CLF); + } else { + /* Clear the SKIP bit to make this ED visible to the HC. */ + slot->ed.flags &= ~OHCI_ED_SKIP; + compiler_barrier(); + ohci_write(dev, OHCI_CMDSTATUS, OHCI_CMDSTATUS_BLF); + } + + /* Refresh free-slot flags after allocation. */ + any_ctrl_free = (data->slots[OHCI_CTRL_SLOT].xfer == NULL); + any_bulk_free = (ohci_alloc_slot(data, false) != NULL); + if (!any_ctrl_free && !any_bulk_free) { + break; + } + } + + return 0; +} + +static void ohci_handle_root_hub_change(const struct device *dev) +{ + struct ohci_data *data = uhc_get_private(dev); + bool connected_now = false; + bool low_speed = false; + enum uhc_event_type evt; + + if (data->port_count == 0U) { + return; + } + + for (uint8_t port = 0U; port < data->port_count; port++) { + uint32_t status = ohci_read(dev, OHCI_RH_PORT_STATUS(port)); + uint32_t clear = status & OHCI_RHPS_W1C_MASK; + + LOG_DBG("RH port%u status 0x%08x (connected=%u)", port, status, + data->port_connected); + + if ((status & OHCI_RHPS_CCS) != 0U) { + connected_now = true; + if ((status & OHCI_RHPS_LSDA) != 0U) { + low_speed = true; + } + } + + if (clear != 0U) { + ohci_write(dev, OHCI_RH_PORT_STATUS(port), clear); + } + } + + if (connected_now && data->port_connected == 0U) { + data->port_connected = 1U; + evt = low_speed ? UHC_EVT_DEV_CONNECTED_LS : UHC_EVT_DEV_CONNECTED_FS; + LOG_DBG("Submit connect event %d from RH scan", evt); + uhc_submit_event(dev, evt, 0); + } else if (!connected_now && data->port_connected != 0U) { + data->port_connected = 0U; + uhc_submit_event(dev, UHC_EVT_DEV_REMOVED, 0); + } +} + +static void ohci_irq_handler(const struct device *dev, uint32_t irqs) +{ + struct ohci_data *data = uhc_get_private(dev); + + if ((irqs & OHCI_INTR_RHSC) != 0U) { + ohci_handle_root_hub_change(dev); + } + + if ((irqs & OHCI_INTR_WDH) != 0U) { + uint32_t done_head = ohci_dma_read32(&data->hcca.done_head); + + data->hcca.done_head = 0U; + compiler_barrier(); + + if (done_head != 0U) { + /* Check all slots; more than one may have completed. */ + for (unsigned int i = 0U; i < OHCI_MAX_SLOTS; i++) { + ohci_finish_slot(dev, &data->slots[i]); + } + } + } + + if ((irqs & OHCI_INTR_RD) != 0U) { + uhc_submit_event(dev, UHC_EVT_RWUP, 0); + } + + if ((irqs & OHCI_INTR_UE) != 0U) { + uhc_submit_event(dev, UHC_EVT_ERROR, -EIO); + } + + (void)ohci_try_schedule_next(dev); +} + +static void ohci_isr(const struct device *dev) +{ + uint32_t irqs; + + while (true) { + irqs = ohci_read(dev, OHCI_INTRSTATUS); + + if (irqs == UINT32_MAX) { + /* Spurious interrupt with all bits set, ignore */ + return; + } + /* Only handle interrupts that are currently enabled (Linux pattern) */ + irqs &= ohci_read(dev, OHCI_INTRENABLE); + if (irqs == 0U) { + return; + } + + ohci_irq_handler(dev, irqs); + + ohci_write(dev, OHCI_INTRSTATUS, irqs); + } +} + +static int ohci_init(const struct device *dev) +{ + struct ohci_data *data = uhc_get_private(dev); + + memset(&data->hcca, 0, sizeof(data->hcca)); + memset(data->bulk_toggle, 0, sizeof(data->bulk_toggle)); + ohci_clear_all_slots(data); + data->port_connected = 0U; + data->bus_suspended = 0U; + + return 0; +} + +static int ohci_enable(const struct device *dev) +{ + struct ohci_data *data = uhc_get_private(dev); + uint32_t control; + + ohci_write(dev, OHCI_CMDSTATUS, OHCI_CMDSTATUS_HCR); + if (ohci_wait_reset(dev) != 0) { + return -ETIMEDOUT; + } + + memset(&data->hcca, 0, sizeof(data->hcca)); + memset(data->bulk_toggle, 0, sizeof(data->bulk_toggle)); + ohci_clear_all_slots(data); + ohci_write(dev, OHCI_INTRDISABLE, 0xffffffffU); + ohci_write(dev, OHCI_INTRSTATUS, 0xffffffffU); + ohci_write(dev, OHCI_FM_INTERVAL, OHCI_DEFAULT_FMINTERVAL); + ohci_write(dev, OHCI_PERIODIC_START, OHCI_DEFAULT_PERIODIC_START); + ohci_write(dev, OHCI_LS_THRESHOLD, OHCI_DEFAULT_LS_THRESHOLD); + ohci_write(dev, OHCI_HCCA, ohci_phys(&data->hcca)); + + /* Set up the permanent bulk-list chain before enabling the HC. */ + ohci_setup_bulk_list(dev); + + control = ohci_read(dev, OHCI_CONTROL); + control &= ~OHCI_CONTROL_HCFS_MASK; + control |= OHCI_CONTROL_HCFS_OPERATIONAL | OHCI_CONTROL_CLE | OHCI_CONTROL_BLE; + ohci_write(dev, OHCI_CONTROL, control); + + for (uint8_t port = 0U; port < data->port_count; port++) { + ohci_write(dev, OHCI_RH_PORT_STATUS(port), OHCI_RHPS_PPS); + } + + ohci_write(dev, OHCI_INTRENABLE, + OHCI_INTR_MIE | OHCI_INTR_WDH | OHCI_INTR_RD | OHCI_INTR_RHSC | OHCI_INTR_UE); + + ohci_handle_root_hub_change(dev); + + return ohci_try_schedule_next(dev); +} + +static int ohci_disable(const struct device *dev) +{ + struct ohci_data *data = uhc_get_private(dev); + + ohci_write(dev, OHCI_INTRDISABLE, UINT32_MAX); + ohci_write(dev, OHCI_CONTROL, + (ohci_read(dev, OHCI_CONTROL) & ~OHCI_CONTROL_HCFS_MASK) | + OHCI_CONTROL_HCFS_RESET); + + for (unsigned int i = 0U; i < OHCI_MAX_SLOTS; i++) { + if (data->slots[i].xfer != NULL) { + struct uhc_transfer *xfer = data->slots[i].xfer; + + ohci_clear_slot(&data->slots[i]); + uhc_xfer_return(dev, xfer, -ECONNRESET); + } + } + + return 0; +} + +static int ohci_shutdown(const struct device *dev) +{ + return 0; +} + +static int ohci_bus_reset(const struct device *dev) +{ + struct ohci_data *data = uhc_get_private(dev); + int ret; + + if (data->port_count == 0U) { + return -ENODEV; + } + + for (uint8_t port = 0U; port < data->port_count; port++) { + uint32_t status = ohci_read(dev, OHCI_RH_PORT_STATUS(port)); + + if ((status & OHCI_RHPS_CCS) != 0U) { + ohci_write(dev, OHCI_RH_PORT_STATUS(port), OHCI_RHPS_PRS); + } + } + + k_msleep(OHCI_BUS_RESET_TIME_MS); + + for (uint8_t port = 0U; port < data->port_count; port++) { + ohci_write(dev, OHCI_RH_PORT_STATUS(port), OHCI_RHPS_PRSC); + } + + ret = uhc_submit_event(dev, UHC_EVT_RESETED, 0); + if (ret != 0) { + return ret; + } + + /* Re-check port status after reset to catch delayed presence updates. */ + ohci_handle_root_hub_change(dev); + + return 0; +} + +static int ohci_sof_enable(const struct device *dev) +{ + ARG_UNUSED(dev); + return 0; +} + +static int ohci_bus_suspend(const struct device *dev) +{ + struct ohci_data *data = uhc_get_private(dev); + uint32_t control; + + if (data->bus_suspended != 0U) { + return -EALREADY; + } + + control = ohci_read(dev, OHCI_CONTROL) & ~OHCI_CONTROL_HCFS_MASK; + ohci_write(dev, OHCI_CONTROL, control | OHCI_CONTROL_HCFS_SUSPEND); + data->bus_suspended = 1U; + + return uhc_submit_event(dev, UHC_EVT_SUSPENDED, 0); +} + +static int ohci_bus_resume(const struct device *dev) +{ + struct ohci_data *data = uhc_get_private(dev); + uint32_t control; + + if (data->bus_suspended == 0U) { + return -EALREADY; + } + + control = ohci_read(dev, OHCI_CONTROL) & ~OHCI_CONTROL_HCFS_MASK; + ohci_write(dev, OHCI_CONTROL, control | OHCI_CONTROL_HCFS_RESUME); + k_msleep(20); + ohci_write(dev, OHCI_CONTROL, control | OHCI_CONTROL_HCFS_OPERATIONAL | + OHCI_CONTROL_CLE | OHCI_CONTROL_BLE); + data->bus_suspended = 0U; + + return uhc_submit_event(dev, UHC_EVT_RESUMED, 0); +} + +static int ohci_ep_enqueue(const struct device *dev, struct uhc_transfer *const xfer) +{ + int ret; + + ret = uhc_xfer_append(dev, xfer); + if (ret != 0) { + LOG_DBG("Append xfer ep 0x%02x failed %d", xfer->ep, ret); + return ret; + } + + ret = ohci_try_schedule_next(dev); + if (ret != 0) { + LOG_DBG("Schedule xfer ep 0x%02x failed %d", xfer->ep, ret); + } + + return ret; +} + +static int ohci_ep_dequeue(const struct device *dev, struct uhc_transfer *const xfer) +{ + struct ohci_data *data = uhc_get_private(dev); + + for (unsigned int i = 0U; i < OHCI_MAX_SLOTS; i++) { + if (data->slots[i].xfer != xfer) { + continue; + } + /* + * CH9 request timeout paths expect an in-flight transfer to be + * cancelable. Stop the relevant list, free the slot, and return + * the transfer with -ECONNRESET so upper layers can recover. + */ + if (i == OHCI_CTRL_SLOT) { + ohci_write(dev, OHCI_CONTROL_HEAD_ED, 0U); + } else { + data->slots[i].ed.flags |= OHCI_ED_SKIP; + compiler_barrier(); + } + ohci_clear_slot(&data->slots[i]); + uhc_xfer_return(dev, xfer, -ECONNRESET); + (void)ohci_try_schedule_next(dev); + return 0; + } + + if (xfer->queued != 0U) { + uhc_xfer_return(dev, xfer, -ECONNRESET); + return 0; + } + + return -ENOENT; +} + +static int ohci_driver_init_register(const struct device *dev) +{ +#ifdef CONFIG_UHC_OHCI_PCI + const struct ohci_config *config = dev->config; + + if (config->pcie != NULL) { + struct pcie_bar mbar; + + if (config->pcie->bdf == PCIE_BDF_NONE) { + return -EINVAL; + } + + pcie_probe_mbar(config->pcie->bdf, 0, &mbar); + pcie_set_cmd(config->pcie->bdf, PCIE_CONF_CMDSTAT_MEM, true); + device_map(DEVICE_MMIO_NAMED_RAM_PTR(dev, reg_base), mbar.phys_addr, mbar.size, + K_MEM_CACHE_NONE); + pcie_set_cmd(config->pcie->bdf, PCIE_CONF_CMDSTAT_MASTER, true); + + /* Ensure INTx signaling is enabled for legacy PCI interrupt delivery. */ + uint32_t cmdstat = pcie_conf_read(config->pcie->bdf, PCIE_CONF_CMDSTAT); + + if ((cmdstat & OHCI_PCI_CMD_INTX_DISABLE) != 0U) { + pcie_conf_write(config->pcie->bdf, PCIE_CONF_CMDSTAT, + cmdstat & ~OHCI_PCI_CMD_INTX_DISABLE); + cmdstat = pcie_conf_read(config->pcie->bdf, PCIE_CONF_CMDSTAT); + } + LOG_DBG("PCI cmdstat after init 0x%08x", cmdstat); + return 0; + } +#endif /* CONFIG_UHC_OHCI_PCI */ + DEVICE_MMIO_NAMED_MAP(dev, reg_base, K_MEM_CACHE_NONE); + return 0; +} + +static int ohci_driver_init(const struct device *dev) +{ + const struct ohci_config *config = dev->config; + struct ohci_data *priv = uhc_get_private(dev); + struct uhc_data *data = dev->data; + int ret; + + ret = ohci_driver_init_register(dev); + if (ret < 0) { + return ret; + } + + k_mutex_init(&data->mutex); + + priv->port_count = (uint8_t)(ohci_read(dev, OHCI_RH_DESC_A) & OHCI_RHDA_NDP_MASK); + + LOG_DBG("OHCI revision 0x%08x ports %u", ohci_read(dev, OHCI_REVISION), priv->port_count); + + config->irq_enable_func(dev); + + return 0; +} + +static const struct uhc_api ohci_api = { + .lock = ohci_lock, + .unlock = ohci_unlock, + .init = ohci_init, + .enable = ohci_enable, + .disable = ohci_disable, + .shutdown = ohci_shutdown, + .bus_reset = ohci_bus_reset, + .sof_enable = ohci_sof_enable, + .bus_suspend = ohci_bus_suspend, + .bus_resume = ohci_bus_resume, + .ep_enqueue = ohci_ep_enqueue, + .ep_dequeue = ohci_ep_dequeue, +}; + +#define OHCI_DECLARE_PCIE(n) IF_ENABLED(DT_INST_ON_BUS(n, pcie), (DEVICE_PCIE_INST_DECLARE(n))) + +#define OHCI_IRQ_FLAGS(n) COND_CODE_1(DT_INST_IRQ_HAS_CELL(n, sense), (DT_INST_IRQ(n, sense)), (0)) + +#define OHCI_IRQ_ENABLE_PCIE0(n) \ + static void ohci_irq_enable_func_##n(const struct device *dev) \ + { \ + ARG_UNUSED(dev); \ + IRQ_CONNECT(DT_INST_IRQN(n), DT_INST_IRQ(n, priority), ohci_isr, \ + DEVICE_DT_INST_GET(n), OHCI_IRQ_FLAGS(n)); \ + irq_enable(DT_INST_IRQN(n)); \ + } + +#ifdef CONFIG_PCIE_CONTROLLER +#define OHCI_IRQ_ENABLE_PCIE1(n) OHCI_IRQ_ENABLE_PCIE0(n) +#else +#define OHCI_IRQ_ENABLE_PCIE1(n) \ + static void ohci_irq_enable_func_##n(const struct device *dev) \ + { \ + /* No firmware, IRQ assigned by BIOS/ACPI, read from PCI config at runtime */ \ + const struct ohci_config *config = dev->config; \ + unsigned int irq = pcie_alloc_irq(config->pcie->bdf); \ + LOG_DBG("Allocated PCI IRQ %u for bdf 0x%x", irq, config->pcie->bdf); \ + if (irq != PCIE_CONF_INTR_IRQ_NONE) { \ + pcie_connect_dynamic_irq(config->pcie->bdf, irq, DT_INST_IRQ(n, priority), \ + (void (*)(const void *))ohci_isr, \ + DEVICE_DT_INST_GET(n), OHCI_IRQ_FLAGS(n)); \ + pcie_irq_enable(config->pcie->bdf, irq); \ + } else { \ + LOG_DBG("No PCI IRQ assigned for bdf 0x%x", config->pcie->bdf); \ + } \ + } +#endif /* CONFIG_PCIE_CONTROLLER */ + +#define OHCI_IRQ_ENABLE(n) \ + COND_CODE_1(DT_INST_ON_BUS(n, pcie), (OHCI_IRQ_ENABLE_PCIE1(n)), (OHCI_IRQ_ENABLE_PCIE0(n))) + +#define OHCI_REG_INIT(n) \ + COND_CODE_1(DT_INST_ON_BUS(n, pcie), (DEVICE_PCIE_INST_INIT(n, pcie)), \ + (DEVICE_MMIO_NAMED_ROM_INIT(reg_base, DT_DRV_INST(n)))) + +#define OHCI_DEVICE_DEFINE(n) \ + OHCI_DECLARE_PCIE(n); \ + OHCI_IRQ_ENABLE(n); \ + \ + static const struct ohci_config ohci_config_##n = { \ + OHCI_REG_INIT(n), \ + .irq_enable_func = ohci_irq_enable_func_##n, \ + }; \ + \ + static struct ohci_data ohci_priv_##n; \ + static struct uhc_data ohci_data_##n = { \ + .priv = &ohci_priv_##n, \ + }; \ + \ + DEVICE_DT_INST_DEFINE(n, ohci_driver_init, NULL, &ohci_data_##n, &ohci_config_##n, \ + POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEVICE, &ohci_api) + +DT_INST_FOREACH_STATUS_OKAY(OHCI_DEVICE_DEFINE) diff --git a/dts/bindings/usb/generic-ohci.yaml b/dts/bindings/usb/generic-ohci.yaml new file mode 100644 index 0000000000000..66169d637827a --- /dev/null +++ b/dts/bindings/usb/generic-ohci.yaml @@ -0,0 +1,8 @@ +# SPDX-FileCopyrightText: Copyright The Zephyr Project Contributors +# SPDX-License-Identifier: Apache-2.0 + +description: Generic OHCI USB host controller + +compatible: "generic-ohci" + +include: [usb-controller.yaml, pcie-device.yaml] diff --git a/dts/riscv/qemu/virt-riscv.dtsi b/dts/riscv/qemu/virt-riscv.dtsi index 4b64bc4144edb..025f9947aecca 100644 --- a/dts/riscv/qemu/virt-riscv.dtsi +++ b/dts/riscv/qemu/virt-riscv.dtsi @@ -18,6 +18,10 @@ compatible = "riscv-virtio"; model = "riscv-virtio,qemu"; + chosen { + zephyr,pcie-controller = &pcie; + }; + flash@20000000 { bank-width = <0x04>; reg = <0x20000000 0x2000000 0x22000000 0x2000000>; @@ -205,5 +209,35 @@ reg = <0x200bff8 0x8 0x2004000 0x8>; reg-names = "mtime", "mtimecmp"; }; + + pcie: pcie@30000000 { + compatible = "pci-host-ecam-generic"; + device_type = "pci"; + reg = <0x30000000 0x10000000>; + #size-cells = <0x02>; + #address-cells = <0x03>; + ranges = <0x1000000 0x00 0x00 0x3000000 0x00 0x10000 + 0x2000000 0x00 0x40000000 0x40000000 0x00 0x40000000>; + #interrupt-cells = <0x01>; + interrupt-map-mask = <0x1800 0x00 0x00 0x07>; + interrupt-map = <0x0000 0x00 0x00 1 &plic 32 1 + 0x0000 0x00 0x00 2 &plic 33 1 + 0x0000 0x00 0x00 3 &plic 34 1 + 0x0000 0x00 0x00 4 &plic 35 1 + 0x0800 0x00 0x00 1 &plic 33 1 + 0x0800 0x00 0x00 2 &plic 34 1 + 0x0800 0x00 0x00 3 &plic 35 1 + 0x0800 0x00 0x00 4 &plic 32 1 + 0x1000 0x00 0x00 1 &plic 34 1 + 0x1000 0x00 0x00 2 &plic 35 1 + 0x1000 0x00 0x00 3 &plic 32 1 + 0x1000 0x00 0x00 4 &plic 33 1 + 0x1800 0x00 0x00 1 &plic 35 1 + 0x1800 0x00 0x00 2 &plic 32 1 + 0x1800 0x00 0x00 3 &plic 33 1 + 0x1800 0x00 0x00 4 &plic 34 1>; + bus-range = <0x00 0xff>; + status = "okay"; + }; }; }; diff --git a/include/zephyr/usb/class/usb_cdc.h b/include/zephyr/usb/class/usb_cdc.h index d07dd76bfcb80..3a041b76ec459 100644 --- a/include/zephyr/usb/class/usb_cdc.h +++ b/include/zephyr/usb/class/usb_cdc.h @@ -2,6 +2,7 @@ /* * Copyright (c) 2017 PHYTEC Messtechnik GmbH + * Copyright (c) 2025 - 2026 NXP * * SPDX-License-Identifier: Apache-2.0 */ @@ -69,13 +70,22 @@ * @brief PSTN Subclass Class-Specific Notification Codes * @note PSTN120.pdf, 6.5, Table 30 */ -#define USB_CDC_NETWORK_CONNECTION 0x00 -#define USB_CDC_RESPONSE_AVAILABLE 0x01 -#define USB_CDC_AUX_JACK_HOOK_STATE 0x08 -#define USB_CDC_RING_DETECT 0x09 -#define USB_CDC_SERIAL_STATE 0x20 -#define USB_CDC_CALL_STATE_CHANGE 0x28 -#define USB_CDC_LINE_STATE_CHANGE 0x23 +/** Network connection notification */ +#define USB_CDC_NETWORK_CONNECTION 0x00 +/** Response available notification */ +#define USB_CDC_RESPONSE_AVAILABLE 0x01 +/** Auxiliary jack hook state notification */ +#define USB_CDC_AUX_JACK_HOOK_STATE 0x08 +/** Ring detect notification */ +#define USB_CDC_RING_DETECT 0x09 +/** Serial state notification */ +#define USB_CDC_SERIAL_STATE 0x20 +/** Line state change notification */ +#define USB_CDC_LINE_STATE_CHANGE 0x23 +/** Call state change notification */ +#define USB_CDC_CALL_STATE_CHANGE 0x28 +/** Connection speed change notification */ +#define USB_CDC_CONNECTION_SPEED_CHANGE 0x2A /** * @brief PSTN UART State Bitmap Values @@ -165,6 +175,70 @@ #define PACKET_TYPE_ALL_MULTICAST 0x02 #define PACKET_TYPE_PROMISCUOUS 0x01 +/** + * @brief ECM Subclass Ethernet Statistics Feature Selector Codes + * @note ECM120.pdf, 6.2.5, Table 9 + */ + +/** Frames transmitted without errors */ +#define ETHERNET_STAT_XMIT_OK 0x01 +/** Frames received without errors */ +#define ETHERNET_STAT_RCV_OK 0x02 +/** Frames transmitted with errors */ +#define ETHERNET_STAT_XMIT_ERROR 0x03 +/** Frames received with errors */ +#define ETHERNET_STAT_RCV_ERROR 0x04 +/** Frames missed due to no buffers */ +#define ETHERNET_STAT_RCV_NO_BUFFER 0x05 +/** Directed bytes transmitted without errors */ +#define ETHERNET_STAT_DIRECTED_BYTES_XMIT 0x06 +/** Directed frames transmitted without errors */ +#define ETHERNET_STAT_DIRECTED_FRAMES_XMIT 0x07 +/** Multicast bytes transmitted without errors */ +#define ETHERNET_STAT_MULTICAST_BYTES_XMIT 0x08 +/** Multicast frames transmitted without errors */ +#define ETHERNET_STAT_MULTICAST_FRAMES_XMIT 0x09 +/** Broadcast bytes transmitted without errors */ +#define ETHERNET_STAT_BROADCAST_BYTES_XMIT 0x0A +/** Broadcast frames transmitted without errors */ +#define ETHERNET_STAT_BROADCAST_FRAMES_XMIT 0x0B +/** Directed bytes received without errors */ +#define ETHERNET_STAT_DIRECTED_BYTES_RCV 0x0C +/** Directed frames received without errors */ +#define ETHERNET_STAT_DIRECTED_FRAMES_RCV 0x0D +/** Multicast bytes received without errors */ +#define ETHERNET_STAT_MULTICAST_BYTES_RCV 0x0E +/** Multicast frames received without errors */ +#define ETHERNET_STAT_MULTICAST_FRAMES_RCV 0x0F +/** Broadcast bytes received without errors */ +#define ETHERNET_STAT_BROADCAST_BYTES_RCV 0x10 +/** Broadcast frames received without errors */ +#define ETHERNET_STAT_BROADCAST_FRAMES_RCV 0x11 +/** Frames received with CRC or FCS error */ +#define ETHERNET_STAT_RCV_CRC_ERROR 0x12 +/** Length of transmit queue */ +#define ETHERNET_STAT_TRANSMIT_QUEUE_LENGTH 0x13 +/** Frames received with alignment error */ +#define ETHERNET_STAT_RCV_ERROR_ALIGNMENT 0x14 +/** Frames transmitted with exactly one collision */ +#define ETHERNET_STAT_XMIT_ONE_COLLISION 0x15 +/** Frames transmitted with more than one collision */ +#define ETHERNET_STAT_XMIT_MORE_COLLISIONS 0x16 +/** Frames transmitted after deferral */ +#define ETHERNET_STAT_XMIT_DEFERRED 0x17 +/** Frames not transmitted due to excessive collisions */ +#define ETHERNET_STAT_XMIT_MAX_COLLISIONS 0x18 +/** Frames not received due to overrun */ +#define ETHERNET_STAT_RCV_OVERRUN 0x19 +/** Frames not transmitted due to underrun */ +#define ETHERNET_STAT_XMIT_UNDERRUN 0x1A +/** Frames transmitted with heartbeat failure */ +#define ETHERNET_STAT_XMIT_HEARTBEAT_FAILURE 0x1B +/** Times carrier sense signal lost during transmission */ +#define ETHERNET_STAT_XMIT_TIMES_CRS_LOST 0x1C +/** Frames transmitted with late collisions */ +#define ETHERNET_STAT_XMIT_LATE_COLLISIONS 0x1D + /** Header Functional Descriptor */ struct cdc_header_descriptor { uint8_t bFunctionLength; diff --git a/subsys/usb/host/CMakeLists.txt b/subsys/usb/host/CMakeLists.txt index 72976f4aeea8e..ae0f33d3df2d8 100644 --- a/subsys/usb/host/CMakeLists.txt +++ b/subsys/usb/host/CMakeLists.txt @@ -19,6 +19,11 @@ zephyr_library_sources_ifdef( usbh_shell.c ) +zephyr_library_sources_ifdef( + CONFIG_USBH_CDC_ECM_CLASS + class/usbh_cdc_ecm.c +) + zephyr_library_sources_ifdef( CONFIG_USBH_VIDEO_CLASS class/usbh_uvc.c diff --git a/subsys/usb/host/class/Kconfig b/subsys/usb/host/class/Kconfig index 1a68b415273e3..3dbcb67082fcd 100644 --- a/subsys/usb/host/class/Kconfig +++ b/subsys/usb/host/class/Kconfig @@ -1,4 +1,5 @@ # SPDX-FileCopyrightText: Copyright Nordic Semiconductor ASA # SPDX-License-Identifier: Apache-2.0 +rsource "Kconfig.cdc_ecm" rsource "Kconfig.uvc" diff --git a/subsys/usb/host/class/Kconfig.cdc_ecm b/subsys/usb/host/class/Kconfig.cdc_ecm new file mode 100644 index 0000000000000..7a9545fe4b6b2 --- /dev/null +++ b/subsys/usb/host/class/Kconfig.cdc_ecm @@ -0,0 +1,62 @@ +# SPDX-FileCopyrightText: Copyright (c) 2025 - 2026 NXP +# SPDX-License-Identifier: Apache-2.0 + +config USBH_CDC_ECM_CLASS + bool "USB Communication Device Class - Ethernet Control Mode (CDC-ECM) implementation [EXPERIMENTAL]" + select EXPERIMENTAL + select NET_L2_ETHERNET + select CRC + help + USB Host Communication Device Class - Ethernet Control Mode (CDC-ECM) implementation. + +if USBH_CDC_ECM_CLASS + +config ETH_NIC_MODEL + string + default "usb-net" + help + Tells what Qemu network model to use. This value is given as + a parameter to -nic qemu command line option. + +config USBH_CDC_ECM_ETH_DRV_NAME + string "USB CDC-ECM ethernet interface name" + default "zeth" + help + This option sets the driver name and name of the network interface + in your host system. If there are multiple network interfaces defined, + then this value is used as a prefix and use "(usbh_cdc_ecm)" as suffix + and the interface names will be "zeth0 (usbh_cdc_ecm)", + "zeth1 (usbh_cdc_ecm)", etc. + +config USBH_CDC_ECM_DATA_BUF_POOL_SIZE + int "USB CDC-ECM buffer pool size" + default 1514 + range 60 9000 + help + USB CDC-ECM buffer pool size in bytes. + +config USBH_CDC_ECM_DATA_TX_CONCURRENT_NUM + int "USB CDC-ECM maximum concurrent transmissions" + default 4 + help + Number of USB CDC-ECM maximum concurrent transmissions. + +config USBH_CDC_ECM_HW_STATS + bool "CDC-ECM hardware network statistics (if the USB device supports it)" + help + Collection of hardware network statistics for CDC-ECM devices. + +config USBH_CDC_ECM_HW_STATS_INTERVAL + int "CDC-ECM hardware network statistics interval" + depends on USBH_CDC_ECM_HW_STATS + default 5 + help + CDC-ECM hardware network statistics collection interval in seconds. + +module = USBH_CDC_ECM +module-str = "usbh cdc_ecm" +default-count = 1 +source "subsys/logging/Kconfig.template.log_config" +source "subsys/usb/common/Kconfig.template.instances_count" + +endif # USBH_CDC_ECM_CLASS diff --git a/subsys/usb/host/class/usbh_cdc_ecm.c b/subsys/usb/host/class/usbh_cdc_ecm.c new file mode 100644 index 0000000000000..5ee2755dfc552 --- /dev/null +++ b/subsys/usb/host/class/usbh_cdc_ecm.c @@ -0,0 +1,1742 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2025 - 2026 NXP + * SPDX-FileCopyrightText: Copyright (c) 2026 Linumiz + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include +#include +#include +#include +#include + +#include "usbh_class.h" +#include "usbh_desc.h" +#include "usbh_ch9.h" + +LOG_MODULE_REGISTER(usbh_cdc_ecm, CONFIG_USBH_CDC_ECM_LOG_LEVEL); + +struct usbh_cdc_ecm_multicast_filter { + unsigned int ignored_addrs; + sys_dlist_t addrs_list; + struct k_mutex mutex; +}; + +struct usbh_cdc_ecm_multicast_filter_addr { + sys_dnode_t node; + struct net_eth_addr mac_addr; + uint8_t hash; + unsigned int hash_ref; +}; + +struct usbh_cdc_ecm_pkt_filter { + uint16_t bitmap; + struct k_mutex mutex; +}; + +#if defined(CONFIG_NET_STATISTICS_ETHERNET) +struct usbh_cdc_ecm_stats { + struct net_stats_eth data; +#if defined(CONFIG_USBH_CDC_ECM_HW_STATS) + k_timepoint_t last_tp; +#endif + struct k_mutex mutex; +}; +#endif + +struct usbh_cdc_ecm_xfer_list { + sys_dlist_t list; + struct k_mutex mutex; +}; + +struct usbh_cdc_ecm_xfer_cb_priv { + sys_dnode_t node; + struct usbh_cdc_ecm_ctx *ctx; + struct uhc_transfer *xfer; +}; + +struct usbh_cdc_ecm_ctx { + struct net_if *eth_iface; + struct usb_device *udev; + uint8_t comm_if_num; + uint8_t data_if_num; + uint8_t data_alt_num; + uint8_t comm_in_ep_addr; + uint8_t data_in_ep_addr; + uint8_t data_out_ep_addr; + uint16_t data_out_ep_mps; + uint8_t mac_str_desc_idx; +#if defined(CONFIG_NET_STATISTICS_ETHERNET) && defined(CONFIG_USBH_CDC_ECM_HW_STATS) + uint32_t stats_hw_caps; +#endif + uint16_t max_segment_size; + bool mc_filter_is_imperfect; + uint16_t mc_filter_supported_num; + uint8_t mc_filter_crc32_shift; + struct usbh_cdc_ecm_multicast_filter mc_filters; + struct usbh_cdc_ecm_pkt_filter pkt_filter; +#if defined(CONFIG_NET_STATISTICS_ETHERNET) + struct usbh_cdc_ecm_stats stats; +#endif + struct usbh_cdc_ecm_xfer_list queued_xfers; + bool available; + bool auto_restart_rx_xfer; + uint32_t upload_speed; + uint32_t download_speed; + struct k_mutex mutex; + struct k_sem data_tx_sem; +}; + +USB_BUF_POOL_DEFINE(usbh_cdc_ecm_data_pool, + (CONFIG_USBH_CDC_ECM_INSTANCES_COUNT * + CONFIG_USBH_CDC_ECM_DATA_TX_CONCURRENT_NUM) + + CONFIG_NET_PKT_RX_COUNT, + CONFIG_USBH_CDC_ECM_DATA_BUF_POOL_SIZE, 0, NULL); + +K_MEM_SLAB_DEFINE_STATIC(usbh_cdc_ecm_xfer_cb_priv_pool, sizeof(struct usbh_cdc_ecm_xfer_cb_priv), + CONFIG_USBH_CDC_ECM_INSTANCES_COUNT * + (2 + CONFIG_USBH_CDC_ECM_DATA_TX_CONCURRENT_NUM * 2), + sizeof(void *)); + +static int usbh_cdc_ecm_xfer_comm_in_cb(struct usb_device *const udev, + struct uhc_transfer *const xfer); +static int usbh_cdc_ecm_xfer_data_in_cb(struct usb_device *const udev, + struct uhc_transfer *const xfer); +static int usbh_cdc_ecm_xfer_data_out_cb(struct usb_device *const udev, + struct uhc_transfer *const xfer); + +static int usbh_cdc_ecm_parse_descriptors(struct usbh_cdc_ecm_ctx *const ctx, + const struct usb_if_descriptor *if_desc) +{ + const struct usb_if_descriptor *comm_if_desc = NULL; + const struct usb_if_descriptor *data_if_desc = NULL; + const struct cdc_header_descriptor *cdc_header_desc = NULL; + const struct cdc_union_descriptor *cdc_union_desc = NULL; + const struct cdc_ecm_descriptor *cdc_ecm_desc = NULL; + const struct usb_ep_descriptor *comm_in_ep_desc = NULL; + const struct usb_ep_descriptor *data_in_ep_desc = NULL; + const struct usb_ep_descriptor *data_out_ep_desc = NULL; + const struct usb_desc_header *current_desc = (const struct usb_desc_header *)if_desc; + const struct usb_if_descriptor *current_if_desc = NULL; + const struct cdc_header_descriptor *current_cdc_if_desc; + const struct cdc_union_descriptor *current_cdc_union_desc; + const struct usb_ep_descriptor *current_ep_desc; + + while (current_desc != NULL) { + switch (current_desc->bDescriptorType) { + case USB_DESC_INTERFACE: + current_if_desc = (const struct usb_if_descriptor *)current_desc; + if (current_if_desc->bInterfaceClass == USB_BCC_CDC_CONTROL && + current_if_desc->bInterfaceSubClass == ECM_SUBCLASS && + current_if_desc->bInterfaceProtocol == 0 && + current_if_desc->bNumEndpoints == 1) { + comm_if_desc = current_if_desc; + } + if (current_if_desc->bInterfaceClass == USB_BCC_CDC_DATA && + cdc_union_desc != NULL && + current_if_desc->bInterfaceNumber == + cdc_union_desc->bSubordinateInterface0 && + current_if_desc->bNumEndpoints == 2) { + data_if_desc = current_if_desc; + } + break; + case USB_DESC_CS_INTERFACE: + current_cdc_if_desc = (const struct cdc_header_descriptor *)current_desc; + if (comm_if_desc == NULL) { + break; + } + if (current_cdc_if_desc->bDescriptorSubtype == HEADER_FUNC_DESC) { + cdc_header_desc = current_cdc_if_desc; + } + if (current_cdc_if_desc->bDescriptorSubtype == UNION_FUNC_DESC && + cdc_header_desc != NULL) { + current_cdc_union_desc = + (const struct cdc_union_descriptor *)current_desc; + if (current_cdc_union_desc->bControlInterface == + comm_if_desc->bInterfaceNumber && + current_cdc_union_desc->bFunctionLength == 5) { + cdc_union_desc = + (const struct cdc_union_descriptor *)current_desc; + } + } + if (current_cdc_if_desc->bDescriptorSubtype == ETHERNET_FUNC_DESC && + cdc_union_desc != NULL) { + cdc_ecm_desc = (const struct cdc_ecm_descriptor *)current_desc; + } + break; + case USB_DESC_ENDPOINT: + current_ep_desc = (const struct usb_ep_descriptor *)current_desc; + if (current_if_desc == NULL) { + break; + } + if (current_if_desc == comm_if_desc && + ((current_ep_desc->bEndpointAddress & USB_EP_DIR_MASK) == + USB_EP_DIR_IN)) { + comm_in_ep_desc = current_ep_desc; + } + if (current_if_desc == data_if_desc) { + if ((current_ep_desc->bEndpointAddress & USB_EP_DIR_MASK) == + USB_EP_DIR_IN) { + data_in_ep_desc = current_ep_desc; + } else { + data_out_ep_desc = current_ep_desc; + } + } + break; + default: + break; + } + + current_desc = usbh_desc_get_next(current_desc); + } + + if (comm_if_desc == NULL || data_if_desc == NULL || cdc_header_desc == NULL || + cdc_union_desc == NULL || cdc_ecm_desc == NULL || comm_in_ep_desc == NULL || + data_in_ep_desc == NULL || data_out_ep_desc == NULL) { + LOG_WRN("missing required CDC-ECM descriptors"); + return -ENOTSUP; + } + + k_mutex_lock(&ctx->mutex, K_FOREVER); + + ctx->comm_if_num = comm_if_desc->bInterfaceNumber; + ctx->data_if_num = data_if_desc->bInterfaceNumber; + ctx->data_alt_num = data_if_desc->bAlternateSetting; + ctx->comm_in_ep_addr = comm_in_ep_desc->bEndpointAddress; + ctx->data_in_ep_addr = data_in_ep_desc->bEndpointAddress; + ctx->data_out_ep_addr = data_out_ep_desc->bEndpointAddress; + ctx->data_out_ep_mps = sys_le16_to_cpu(data_out_ep_desc->wMaxPacketSize); + ctx->mac_str_desc_idx = cdc_ecm_desc->iMACAddress; +#if defined(CONFIG_NET_STATISTICS_ETHERNET) && defined(CONFIG_USBH_CDC_ECM_HW_STATS) + ctx->stats_hw_caps = sys_le32_to_cpu(cdc_ecm_desc->bmEthernetStatistics); +#endif + ctx->max_segment_size = sys_le16_to_cpu(cdc_ecm_desc->wMaxSegmentSize); + ctx->mc_filter_is_imperfect = + (bool)(sys_le16_to_cpu(cdc_ecm_desc->wNumberMCFilters) & BIT(15)); + ctx->mc_filter_supported_num = sys_le16_to_cpu(cdc_ecm_desc->wNumberMCFilters) & 0x7FFF; + ctx->mc_filter_crc32_shift = 0; + if (ctx->mc_filter_supported_num > 0 && ctx->mc_filter_is_imperfect) { + ctx->mc_filter_crc32_shift = + 32 - (31 - __builtin_clz((uint32_t)ctx->mc_filter_supported_num)); + } + + k_mutex_unlock(&ctx->mutex); + + return 0; +} + +static int usbh_cdc_ecm_get_mac_address(struct usbh_cdc_ecm_ctx *const ctx, + struct net_eth_addr *const eth_mac) +{ + uint8_t mac_str_desc_idx; + struct usb_device *udev; + uint16_t lang_ids[126]; + uint8_t supported_langs; + struct net_buf *mac_str_desc_buf = NULL; + char mac_str[NET_ETH_ADDR_LEN * 2 + 1]; + bool found_mac = false; + int ret; + + k_mutex_lock(&ctx->mutex, K_FOREVER); + if (ctx->mac_str_desc_idx == 0) { + LOG_ERR("MAC address string descriptor index is 0"); + k_mutex_unlock(&ctx->mutex); + return -ENODEV; + } + udev = ctx->udev; + mac_str_desc_idx = ctx->mac_str_desc_idx; + k_mutex_unlock(&ctx->mutex); + + ret = usbh_desc_get_supported_langs(udev, lang_ids, ARRAY_SIZE(lang_ids)); + if (ret < 0) { + goto cleanup; + } + + supported_langs = ret; + if (supported_langs == 0) { + LOG_ERR("no supported language IDs found"); + ret = -ENODEV; + goto cleanup; + } + + mac_str_desc_buf = usbh_xfer_buf_alloc(udev, 26); + if (mac_str_desc_buf == NULL) { + ret = -ENOMEM; + goto cleanup; + } + + for (unsigned int i = 0; i < supported_langs; i++) { + net_buf_reset(mac_str_desc_buf); + ret = usbh_req_desc_str(udev, mac_str_desc_idx, lang_ids[i], mac_str_desc_buf); + if (ret != 0) { + LOG_WRN("failed to read String Descriptor for language 0x%04X (%d)", + lang_ids[i], ret); + continue; + } + + ret = usbh_desc_str_utfle16_to_ascii(mac_str_desc_buf, mac_str, + ARRAY_SIZE(mac_str)); + if (ret != 0) { + continue; + } + + if (hex2bin(mac_str, strlen(mac_str), eth_mac->addr, NET_ETH_ADDR_LEN) != + NET_ETH_ADDR_LEN) { + continue; + } + + if (net_eth_is_addr_valid(eth_mac)) { + found_mac = true; + break; + } + } + + if (!found_mac) { + ret = -ENODEV; + } else { + ret = 0; + } + +cleanup: + if (mac_str_desc_buf != NULL) { + usbh_xfer_buf_free(udev, mac_str_desc_buf); + } + + return ret; +} + +static int usbh_cdc_ecm_update_packet_filter(struct usbh_cdc_ecm_ctx *const ctx, bool enable, + uint16_t eth_pkt_filter_bitmap) +{ + struct usb_device *udev; + uint16_t old_filter_bitmap, new_filter_bitmap; + int ret; + + k_mutex_lock(&ctx->mutex, K_FOREVER); + if (!ctx->available) { + k_mutex_unlock(&ctx->mutex); + return -ENODEV; + } + udev = ctx->udev; + k_mutex_unlock(&ctx->mutex); + + k_mutex_lock(&ctx->pkt_filter.mutex, K_FOREVER); + + old_filter_bitmap = ctx->pkt_filter.bitmap; + if (enable) { + new_filter_bitmap = old_filter_bitmap | eth_pkt_filter_bitmap; + } else { + new_filter_bitmap = old_filter_bitmap & ~eth_pkt_filter_bitmap; + } + if (old_filter_bitmap == new_filter_bitmap) { + k_mutex_unlock(&ctx->pkt_filter.mutex); + return 0; + } + + ret = usbh_req_setup(udev, + (USB_REQTYPE_DIR_TO_DEVICE << 7) | (USB_REQTYPE_TYPE_CLASS << 5) | + USB_REQTYPE_RECIPIENT_INTERFACE, + SET_ETHERNET_PACKET_FILTER, new_filter_bitmap, ctx->comm_if_num, 0, + NULL); + if (ret != 0) { + LOG_ERR("set Ethernet Packet Filter bitmap [0x%04x -> 0x%04x] error (%d)", + old_filter_bitmap, new_filter_bitmap, ret); + } else { + ctx->pkt_filter.bitmap = new_filter_bitmap; + } + + k_mutex_unlock(&ctx->pkt_filter.mutex); + + return ret; +} + +static int usbh_cdc_ecm_add_multicast_group(struct usbh_cdc_ecm_ctx *const ctx, + const struct net_eth_addr *mac_addr) +{ + uint16_t mc_filter_supported_num; + bool mc_filter_is_imperfect; + uint8_t mc_filter_crc32_shift; + uint8_t comm_if_num; + uint32_t addr_hash; + struct usbh_cdc_ecm_multicast_filter_addr *mc_addr, *added_mc_addr = NULL; + struct usb_device *udev; + uint16_t mc_filters_num; + struct net_buf *mc_filters_buf = NULL; + int ret = 0; + + k_mutex_lock(&ctx->mutex, K_FOREVER); + if (!ctx->available) { + k_mutex_unlock(&ctx->mutex); + return -ENODEV; + } + mc_filter_supported_num = ctx->mc_filter_supported_num; + mc_filter_is_imperfect = ctx->mc_filter_is_imperfect; + mc_filter_crc32_shift = ctx->mc_filter_crc32_shift; + comm_if_num = ctx->comm_if_num; + udev = ctx->udev; + k_mutex_unlock(&ctx->mutex); + + if (mc_filter_supported_num > 0) { + ret = usbh_cdc_ecm_update_packet_filter(ctx, true, PACKET_TYPE_MULTICAST); + if (ret != 0) { + return ret; + } + } else { + ret = usbh_cdc_ecm_update_packet_filter(ctx, true, PACKET_TYPE_ALL_MULTICAST); + if (ret != 0) { + return ret; + } + + k_mutex_lock(&ctx->mc_filters.mutex, K_FOREVER); + ctx->mc_filters.ignored_addrs++; + k_mutex_unlock(&ctx->mc_filters.mutex); + return 0; + } + + k_mutex_lock(&ctx->mc_filters.mutex, K_FOREVER); + + addr_hash = 0; + if (mc_filter_is_imperfect) { + addr_hash = crc32_ieee(mac_addr->addr, NET_ETH_ADDR_LEN) >> mc_filter_crc32_shift; + } + + SYS_DLIST_FOR_EACH_CONTAINER(&ctx->mc_filters.addrs_list, mc_addr, node) { + if (mc_filter_is_imperfect) { + if (mc_addr->hash == addr_hash) { + mc_addr->hash_ref++; + goto done; + } + } else { + if (memcmp(&mc_addr->mac_addr, mac_addr, sizeof(struct net_eth_addr)) == + 0) { + goto done; + } + } + } + + added_mc_addr = k_malloc(sizeof(struct usbh_cdc_ecm_multicast_filter_addr)); + if (added_mc_addr == NULL) { + LOG_ERR("failed to allocate multicast address node"); + ret = -ENOMEM; + goto done; + } + memcpy(&added_mc_addr->mac_addr, mac_addr, sizeof(struct net_eth_addr)); + if (mc_filter_is_imperfect) { + added_mc_addr->hash = addr_hash; + added_mc_addr->hash_ref = 1; + } + + mc_filters_num = sys_dlist_len(&ctx->mc_filters.addrs_list); + if (mc_filters_num >= mc_filter_supported_num) { + LOG_WRN("multicast filters are full, current number is %u", mc_filters_num); + k_free(added_mc_addr); + ret = -ENOTSUP; + goto done; + } + sys_dlist_append(&ctx->mc_filters.addrs_list, &added_mc_addr->node); + mc_filters_num = sys_dlist_len(&ctx->mc_filters.addrs_list); + if (mc_filters_num > UINT16_MAX / NET_ETH_ADDR_LEN) { + ret = -EINVAL; + goto recover_filter; + } + mc_filters_buf = usbh_xfer_buf_alloc(udev, mc_filters_num * NET_ETH_ADDR_LEN); + if (mc_filters_buf == NULL) { + ret = -ENOMEM; + goto recover_filter; + } + SYS_DLIST_FOR_EACH_CONTAINER(&ctx->mc_filters.addrs_list, mc_addr, node) { + net_buf_add_mem(mc_filters_buf, mc_addr->mac_addr.addr, NET_ETH_ADDR_LEN); + } + + ret = usbh_req_setup(udev, + (USB_REQTYPE_DIR_TO_DEVICE << 7) | (USB_REQTYPE_TYPE_CLASS << 5) | + USB_REQTYPE_RECIPIENT_INTERFACE, + SET_ETHERNET_MULTICAST_FILTERS, mc_filters_num, comm_if_num, + mc_filters_num * NET_ETH_ADDR_LEN, mc_filters_buf); + if (ret != 0) { + LOG_ERR("add ethernet multicast filters error (%d)", ret); + goto recover_filter; + } + + goto done; + +recover_filter: + if (added_mc_addr != NULL) { + sys_dlist_remove(&added_mc_addr->node); + k_free(added_mc_addr); + } + +done: + if (mc_filters_buf != NULL) { + usbh_xfer_buf_free(udev, mc_filters_buf); + } + + mc_filters_num = sys_dlist_len(&ctx->mc_filters.addrs_list); + + k_mutex_unlock(&ctx->mc_filters.mutex); + + if (mc_filters_num == 0 && ret != 0) { + usbh_cdc_ecm_update_packet_filter(ctx, false, PACKET_TYPE_MULTICAST); + } + + return ret; +} + +static int usbh_cdc_ecm_leave_multicast_group(struct usbh_cdc_ecm_ctx *const ctx, + const struct net_eth_addr *mac_addr) +{ + uint16_t mc_filter_supported_num; + bool mc_filter_is_imperfect; + uint8_t mc_filter_crc32_shift; + uint8_t comm_if_num; + bool disable_all_multicast = false; + uint32_t addr_hash; + struct usbh_cdc_ecm_multicast_filter_addr *mc_addr, *removed_mc_addr = NULL; + struct usb_device *udev; + uint16_t mc_filters_num; + struct net_buf *mc_filters_buf = NULL; + int ret = 0; + + k_mutex_lock(&ctx->mutex, K_FOREVER); + if (!ctx->available) { + k_mutex_unlock(&ctx->mutex); + return -ENODEV; + } + mc_filter_supported_num = ctx->mc_filter_supported_num; + mc_filter_is_imperfect = ctx->mc_filter_is_imperfect; + mc_filter_crc32_shift = ctx->mc_filter_crc32_shift; + comm_if_num = ctx->comm_if_num; + udev = ctx->udev; + k_mutex_unlock(&ctx->mutex); + + if (mc_filter_supported_num == 0) { + k_mutex_lock(&ctx->mc_filters.mutex, K_FOREVER); + if (ctx->mc_filters.ignored_addrs == 0) { + k_mutex_unlock(&ctx->mc_filters.mutex); + return -EINVAL; + } + ctx->mc_filters.ignored_addrs--; + if (ctx->mc_filters.ignored_addrs == 0) { + disable_all_multicast = true; + } + k_mutex_unlock(&ctx->mc_filters.mutex); + + if (disable_all_multicast) { + ret = usbh_cdc_ecm_update_packet_filter(ctx, false, + PACKET_TYPE_ALL_MULTICAST); + if (ret != 0) { + k_mutex_lock(&ctx->mc_filters.mutex, K_FOREVER); + ctx->mc_filters.ignored_addrs++; + k_mutex_unlock(&ctx->mc_filters.mutex); + } + } + return ret; + } + + k_mutex_lock(&ctx->mc_filters.mutex, K_FOREVER); + + addr_hash = 0; + if (mc_filter_is_imperfect) { + addr_hash = crc32_ieee(mac_addr->addr, NET_ETH_ADDR_LEN) >> mc_filter_crc32_shift; + } + SYS_DLIST_FOR_EACH_CONTAINER(&ctx->mc_filters.addrs_list, mc_addr, node) { + if (mc_filter_is_imperfect) { + if (mc_addr->hash == addr_hash) { + mc_addr->hash_ref--; + if (mc_addr->hash_ref > 0) { + goto done; + } + removed_mc_addr = mc_addr; + break; + } + } else { + if (memcmp(&mc_addr->mac_addr, mac_addr, sizeof(struct net_eth_addr)) == + 0) { + removed_mc_addr = mc_addr; + break; + } + } + } + if (removed_mc_addr == NULL) { + ret = -EINVAL; + goto done; + } + + mc_filters_num = sys_dlist_len(&ctx->mc_filters.addrs_list); + if (mc_filters_num > mc_filter_supported_num) { + LOG_WRN("multicast filters exceed the maximum supported, current number is %u", + mc_filters_num); + } + sys_dlist_remove(&removed_mc_addr->node); + mc_filters_num = sys_dlist_len(&ctx->mc_filters.addrs_list); + if (mc_filters_num > 0) { + if (mc_filters_num > UINT16_MAX / NET_ETH_ADDR_LEN) { + ret = -EINVAL; + goto recover_filter; + } + mc_filters_buf = usbh_xfer_buf_alloc(udev, mc_filters_num * NET_ETH_ADDR_LEN); + if (mc_filters_buf == NULL) { + ret = -ENOMEM; + goto recover_filter; + } + SYS_DLIST_FOR_EACH_CONTAINER(&ctx->mc_filters.addrs_list, mc_addr, node) { + net_buf_add_mem(mc_filters_buf, mc_addr->mac_addr.addr, NET_ETH_ADDR_LEN); + } + } + + ret = usbh_req_setup(udev, + (USB_REQTYPE_DIR_TO_DEVICE << 7) | (USB_REQTYPE_TYPE_CLASS << 5) | + USB_REQTYPE_RECIPIENT_INTERFACE, + SET_ETHERNET_MULTICAST_FILTERS, mc_filters_num, comm_if_num, + mc_filters_num * NET_ETH_ADDR_LEN, mc_filters_buf); + if (ret != 0) { + LOG_ERR("leave ethernet multicast filters error (%d)", ret); + goto recover_filter; + } + + k_free(removed_mc_addr); + + goto done; + +recover_filter: + if (removed_mc_addr != NULL) { + if (mc_filter_is_imperfect) { + removed_mc_addr->hash_ref++; + } + sys_dlist_append(&ctx->mc_filters.addrs_list, &removed_mc_addr->node); + } + +done: + if (mc_filters_buf != NULL) { + usbh_xfer_buf_free(udev, mc_filters_buf); + } + + mc_filters_num = sys_dlist_len(&ctx->mc_filters.addrs_list); + + k_mutex_unlock(&ctx->mc_filters.mutex); + + if (mc_filters_num == 0 && ret == 0) { + usbh_cdc_ecm_update_packet_filter(ctx, false, PACKET_TYPE_MULTICAST); + } + + return ret; +} + +#if defined(CONFIG_NET_STATISTICS_ETHERNET) && defined(CONFIG_USBH_CDC_ECM_HW_STATS) +static int usbh_cdc_ecm_update_stats(struct usbh_cdc_ecm_ctx *const ctx) +{ + uint32_t stats_hw_caps; + uint8_t comm_if_num; + struct usb_device *udev; + struct net_buf *stats_buf = NULL; + uint32_t stats_data; + uint32_t sent_bytes[3] = {0}; + uint8_t sent_mask = 0; + uint32_t recv_bytes[3] = {0}; + uint8_t recv_mask = 0; + uint32_t collisions[3] = {0}; + uint8_t collisions_mask = 0; + int ret = 0; + + k_mutex_lock(&ctx->mutex, K_FOREVER); + if (!ctx->available) { + k_mutex_unlock(&ctx->mutex); + return -ENODEV; + } + stats_hw_caps = ctx->stats_hw_caps; + comm_if_num = ctx->comm_if_num; + udev = ctx->udev; + k_mutex_unlock(&ctx->mutex); + + stats_buf = usbh_xfer_buf_alloc(udev, 4); + if (!stats_buf) { + return -ENOMEM; + } + + for (unsigned int i = 0; i < 29; i++) { + if ((stats_hw_caps & BIT(i)) == 0) { + continue; + } + + net_buf_reset(stats_buf); + + ret = usbh_req_setup(udev, + (USB_REQTYPE_DIR_TO_HOST << 7) | + (USB_REQTYPE_TYPE_CLASS << 5) | + USB_REQTYPE_RECIPIENT_INTERFACE, + GET_ETHERNET_STATISTIC, i + 1, comm_if_num, 4, stats_buf); + if (ret != 0) { + LOG_ERR("get ethernet statistic for feature %u error (%d)", i + 1, ret); + break; + } + + k_mutex_lock(&ctx->stats.mutex, K_FOREVER); + stats_data = sys_get_le32(stats_buf->data); + switch (i + 1) { + case ETHERNET_STAT_XMIT_OK: + ctx->stats.data.pkts.tx = stats_data; + break; + case ETHERNET_STAT_RCV_OK: + ctx->stats.data.pkts.rx = stats_data; + break; + case ETHERNET_STAT_XMIT_ERROR: + ctx->stats.data.errors.tx = stats_data; + break; + case ETHERNET_STAT_RCV_ERROR: + ctx->stats.data.errors.rx = stats_data; + break; + case ETHERNET_STAT_RCV_NO_BUFFER: + ctx->stats.data.error_details.rx_no_buffer_count = stats_data; + break; + case ETHERNET_STAT_DIRECTED_BYTES_XMIT: + sent_mask |= BIT(0); + sent_bytes[0] = stats_data; + break; + case ETHERNET_STAT_DIRECTED_FRAMES_XMIT: + break; + case ETHERNET_STAT_MULTICAST_BYTES_XMIT: + sent_mask |= BIT(1); + sent_bytes[1] = stats_data; + break; + case ETHERNET_STAT_MULTICAST_FRAMES_XMIT: + ctx->stats.data.multicast.tx = stats_data; + break; + case ETHERNET_STAT_BROADCAST_BYTES_XMIT: + sent_mask |= BIT(2); + sent_bytes[2] = stats_data; + break; + case ETHERNET_STAT_BROADCAST_FRAMES_XMIT: + ctx->stats.data.broadcast.tx = stats_data; + break; + case ETHERNET_STAT_DIRECTED_BYTES_RCV: + recv_mask |= BIT(0); + recv_bytes[0] = stats_data; + break; + case ETHERNET_STAT_DIRECTED_FRAMES_RCV: + break; + case ETHERNET_STAT_MULTICAST_BYTES_RCV: + recv_mask |= BIT(1); + recv_bytes[1] = stats_data; + break; + case ETHERNET_STAT_MULTICAST_FRAMES_RCV: + ctx->stats.data.multicast.rx = stats_data; + break; + case ETHERNET_STAT_BROADCAST_BYTES_RCV: + recv_mask |= BIT(2); + recv_bytes[2] = stats_data; + break; + case ETHERNET_STAT_BROADCAST_FRAMES_RCV: + ctx->stats.data.broadcast.rx = stats_data; + break; + case ETHERNET_STAT_RCV_CRC_ERROR: + ctx->stats.data.error_details.rx_crc_errors = stats_data; + break; + case ETHERNET_STAT_TRANSMIT_QUEUE_LENGTH: + break; + case ETHERNET_STAT_RCV_ERROR_ALIGNMENT: + ctx->stats.data.error_details.rx_align_errors = stats_data; + break; + case ETHERNET_STAT_XMIT_ONE_COLLISION: + collisions_mask |= BIT(0); + collisions[0] = stats_data; + break; + case ETHERNET_STAT_XMIT_MORE_COLLISIONS: + collisions_mask |= BIT(1); + collisions[1] = stats_data; + break; + case ETHERNET_STAT_XMIT_DEFERRED: + break; + case ETHERNET_STAT_XMIT_MAX_COLLISIONS: + ctx->stats.data.error_details.tx_aborted_errors = stats_data; + break; + case ETHERNET_STAT_RCV_OVERRUN: + ctx->stats.data.error_details.rx_over_errors = stats_data; + break; + case ETHERNET_STAT_XMIT_UNDERRUN: + ctx->stats.data.error_details.tx_fifo_errors = stats_data; + break; + case ETHERNET_STAT_XMIT_HEARTBEAT_FAILURE: + ctx->stats.data.error_details.tx_heartbeat_errors = stats_data; + break; + case ETHERNET_STAT_XMIT_TIMES_CRS_LOST: + ctx->stats.data.error_details.tx_carrier_errors = stats_data; + break; + case ETHERNET_STAT_XMIT_LATE_COLLISIONS: + collisions_mask |= BIT(2); + collisions[2] = stats_data; + break; + default: + break; + } + k_mutex_unlock(&ctx->stats.mutex); + } + + k_mutex_lock(&ctx->stats.mutex, K_FOREVER); + if (sent_mask == 0x07) { + ctx->stats.data.bytes.sent = sent_bytes[0] + sent_bytes[1] + sent_bytes[2]; + } + if (recv_mask == 0x07) { + ctx->stats.data.bytes.received = recv_bytes[0] + recv_bytes[1] + recv_bytes[2]; + } + if (collisions_mask == 0x07) { + ctx->stats.data.collisions = collisions[0] + collisions[1] + collisions[2]; + } + k_mutex_unlock(&ctx->stats.mutex); + + if (stats_buf != NULL) { + usbh_xfer_buf_free(udev, stats_buf); + } + + return ret; +} +#endif + +static int usbh_cdc_ecm_xfer(struct usbh_cdc_ecm_ctx *const ctx, uint8_t ep_addr, + struct net_buf *const buf, + struct usbh_cdc_ecm_xfer_cb_priv **const cb_priv) +{ + uint8_t comm_in_ep_addr; + uint8_t data_in_ep_addr; + uint8_t data_out_ep_addr; + struct usb_device *udev; + usbh_udev_cb_t cb; + struct usbh_cdc_ecm_xfer_cb_priv *_cb_priv = NULL; + struct uhc_transfer *xfer; + int ret = 0; + + k_mutex_lock(&ctx->mutex, K_FOREVER); + if (!ctx->available) { + k_mutex_unlock(&ctx->mutex); + return -ENODEV; + } + comm_in_ep_addr = ctx->comm_in_ep_addr; + data_in_ep_addr = ctx->data_in_ep_addr; + data_out_ep_addr = ctx->data_out_ep_addr; + udev = ctx->udev; + k_mutex_unlock(&ctx->mutex); + + if (ep_addr == ctx->comm_in_ep_addr) { + cb = usbh_cdc_ecm_xfer_comm_in_cb; + } else if (ep_addr == data_in_ep_addr) { + cb = usbh_cdc_ecm_xfer_data_in_cb; + } else if (ep_addr == data_out_ep_addr) { + cb = usbh_cdc_ecm_xfer_data_out_cb; + } else { + return -EINVAL; + } + + ret = k_mem_slab_alloc(&usbh_cdc_ecm_xfer_cb_priv_pool, (void **)&_cb_priv, K_NO_WAIT); + if (ret != 0) { + LOG_WRN("failed to allocate transfer callback private data of endpoint 0x%02x", + ep_addr); + return -ENOMEM; + } + + xfer = usbh_xfer_alloc(udev, ep_addr, cb, _cb_priv); + if (xfer == NULL) { + LOG_WRN("failed to allocate transfer of endpoint 0x%02x", ep_addr); + ret = -ENOMEM; + goto cleanup; + } + + ret = usbh_xfer_buf_add(udev, xfer, buf); + if (ret != 0) { + goto cleanup; + } + + _cb_priv->ctx = ctx; + _cb_priv->xfer = xfer; + + ret = usbh_xfer_enqueue(udev, xfer); + if (ret != 0) { + goto cleanup; + } + + k_mutex_lock(&ctx->queued_xfers.mutex, K_FOREVER); + sys_dlist_append(&ctx->queued_xfers.list, &_cb_priv->node); + k_mutex_unlock(&ctx->queued_xfers.mutex); + + if (cb_priv != NULL) { + *cb_priv = _cb_priv; + } + + return 0; + +cleanup: + if (xfer != NULL) { + usbh_xfer_free(udev, xfer); + } + + if (_cb_priv != NULL) { + k_mem_slab_free(&usbh_cdc_ecm_xfer_cb_priv_pool, _cb_priv); + } + + return ret; +} + +static int usbh_cdc_ecm_start_comm_in_xfer(struct usbh_cdc_ecm_ctx *const ctx) +{ + struct usb_device *udev; + uint8_t comm_in_ep_addr; + struct net_buf *buf; + int ret; + + k_mutex_lock(&ctx->mutex, K_FOREVER); + udev = ctx->udev; + comm_in_ep_addr = ctx->comm_in_ep_addr; + k_mutex_unlock(&ctx->mutex); + + buf = usbh_xfer_buf_alloc(udev, sizeof(struct usb_setup_packet) + 8); + if (buf == NULL) { + LOG_WRN("failed to allocate data buffer for notification reception"); + return -ENOMEM; + } + + ret = usbh_cdc_ecm_xfer(ctx, comm_in_ep_addr, buf, NULL); + if (ret != 0) { + usbh_xfer_buf_free(udev, buf); + } + + return ret; +} + +static int usbh_cdc_ecm_start_data_in_xfer(struct usbh_cdc_ecm_ctx *const ctx) +{ + struct net_buf *buf; + uint8_t data_in_ep_addr; + int ret; + + k_mutex_lock(&ctx->mutex, K_FOREVER); + data_in_ep_addr = ctx->data_in_ep_addr; + k_mutex_unlock(&ctx->mutex); + + buf = net_buf_alloc(&usbh_cdc_ecm_data_pool, K_NO_WAIT); + if (buf == NULL) { + LOG_WRN("failed to allocate data buffer for data reception"); + return -ENOMEM; + } + + ret = usbh_cdc_ecm_xfer(ctx, data_in_ep_addr, buf, NULL); + if (ret != 0) { + net_buf_unref(buf); + } + + return ret; +} + +static int usbh_cdc_ecm_start_data_out_xfer(struct usbh_cdc_ecm_ctx *const ctx, + struct net_buf *const buf) +{ + struct usb_device *udev; + uint8_t data_out_ep_addr; + struct usbh_cdc_ecm_xfer_cb_priv *cb_priv; + bool need_zlp; + int ret; + + if (buf == NULL) { + return -EINVAL; + } + + if (buf->frags != NULL) { + return -EINVAL; + } + + k_mutex_lock(&ctx->mutex, K_FOREVER); + data_out_ep_addr = ctx->data_out_ep_addr; + udev = ctx->udev; + k_mutex_unlock(&ctx->mutex); + + need_zlp = (bool)((buf->len % ctx->data_out_ep_mps) == 0); + + k_sem_take(&ctx->data_tx_sem, K_FOREVER); + + ret = usbh_cdc_ecm_xfer(ctx, data_out_ep_addr, buf, &cb_priv); + if (ret != 0) { + k_sem_give(&ctx->data_tx_sem); + return ret; + } + + if (need_zlp) { + ret = usbh_cdc_ecm_xfer(ctx, data_out_ep_addr, NULL, NULL); + if (ret != 0) { + LOG_WRN("request data OUT ZLP transfer error (%d)", ret); + + usbh_xfer_dequeue(udev, cb_priv->xfer); + } + } + + return ret; +} + +static int usbh_cdc_ecm_start_rx(struct usbh_cdc_ecm_ctx *const ctx) +{ + int ret; + + ret = usbh_cdc_ecm_start_comm_in_xfer(ctx); + if (ret != 0) { + LOG_ERR("start receiving failed, comm IN transfer error (%d)", ret); + return ret; + } + + ret = usbh_cdc_ecm_start_data_in_xfer(ctx); + if (ret != 0) { + LOG_ERR("start receiving failed, data IN transfer error (%d)", ret); + return ret; + } + + return 0; +} + +static int usbh_cdc_ecm_xfer_comm_in_cb(struct usb_device *const udev, + struct uhc_transfer *const xfer) +{ + struct usbh_cdc_ecm_xfer_cb_priv *cb_priv = xfer->priv; + struct usbh_cdc_ecm_ctx *ctx = cb_priv->ctx; + struct usb_setup_packet *notif; + uint32_t *link_speeds; + bool trigger_next = false; + int ret = 0; + int restart_err; + + if (xfer->err != 0) { + if (xfer->err != -EIO) { + LOG_WRN("comm IN transfer error (%d)", xfer->err); + } + goto restart_transfer; + } + + notif = (struct usb_setup_packet *)xfer->buf->data; + switch (notif->bRequest) { + case USB_CDC_NETWORK_CONNECTION: + if (xfer->buf->len != sizeof(struct usb_setup_packet)) { + ret = -EBADMSG; + goto restart_transfer; + } + + if (sys_le16_to_cpu(notif->wValue) == 1) { + net_eth_carrier_on(ctx->eth_iface); + } else if (sys_le16_to_cpu(notif->wValue) == 0) { + net_eth_carrier_off(ctx->eth_iface); + } else { + LOG_WRN("unknown CDC Network Connection value 0x%02x", + sys_le16_to_cpu(notif->wValue)); + } + break; + case USB_CDC_CONNECTION_SPEED_CHANGE: + if (xfer->buf->len != (sizeof(struct usb_setup_packet) + 8)) { + ret = -EBADMSG; + goto restart_transfer; + } + + k_mutex_lock(&ctx->mutex, K_FOREVER); + link_speeds = (uint32_t *)(notif + 1); + ctx->download_speed = sys_le32_to_cpu(link_speeds[0]); + ctx->upload_speed = sys_le32_to_cpu(link_speeds[1]); + LOG_INF("network link %s, speed [UL %u bps / DL %u bps]", + net_if_is_carrier_ok(ctx->eth_iface) ? "up" : "down", ctx->upload_speed, + ctx->download_speed); + k_mutex_unlock(&ctx->mutex); + break; + default: + ret = -ENOTSUP; + break; + } + +restart_transfer: + usbh_xfer_buf_free(udev, xfer->buf); + usbh_xfer_free(udev, xfer); + + k_mutex_lock(&ctx->queued_xfers.mutex, K_FOREVER); + sys_dlist_remove(&cb_priv->node); + k_mutex_unlock(&ctx->queued_xfers.mutex); + k_mem_slab_free(&usbh_cdc_ecm_xfer_cb_priv_pool, cb_priv); + + k_mutex_lock(&ctx->mutex, K_FOREVER); + if (ctx->available && ctx->auto_restart_rx_xfer) { + trigger_next = true; + } + k_mutex_unlock(&ctx->mutex); + if (trigger_next) { + restart_err = usbh_cdc_ecm_start_comm_in_xfer(ctx); + if (restart_err != 0) { + LOG_ERR("restart comm IN transfer error (%d)", restart_err); + } + } + + return ret; +} + +static int usbh_cdc_ecm_xfer_data_in_cb(struct usb_device *const udev, + struct uhc_transfer *const xfer) +{ + struct usbh_cdc_ecm_xfer_cb_priv *cb_priv = xfer->priv; + struct usbh_cdc_ecm_ctx *ctx = cb_priv->ctx; + uint16_t max_segment_size; + struct net_pkt *pkt; + bool trigger_next = false; +#if defined(CONFIG_NET_STATISTICS_ETHERNET) + bool is_broadcast; + bool is_multicast; +#endif + int ret = 0; + int restart_err; + + if (xfer->err != 0) { + if (xfer->err != -EIO) { + LOG_WRN("data IN transfer error (%d)", xfer->err); + } + +#if defined(CONFIG_NET_STATISTICS_ETHERNET) + k_mutex_lock(&ctx->stats.mutex, K_FOREVER); + ctx->stats.data.errors.rx++; + if (xfer->err == -EPIPE) { + ctx->stats.data.error_details.rx_over_errors++; + } + k_mutex_unlock(&ctx->stats.mutex); +#endif + + goto restart_transfer; + } + + k_mutex_lock(&ctx->mutex, K_FOREVER); + max_segment_size = ctx->max_segment_size; + k_mutex_unlock(&ctx->mutex); + + if (xfer->buf->len == 0) { + goto restart_transfer; + } + + if (xfer->buf->len > max_segment_size) { + LOG_WRN("dropped received data which length [%u] exceeding max segment size [%u]", + xfer->buf->len, max_segment_size); + +#if defined(CONFIG_NET_STATISTICS_ETHERNET) + k_mutex_lock(&ctx->stats.mutex, K_FOREVER); + ctx->stats.data.errors.rx++; + ctx->stats.data.error_details.rx_length_errors++; + k_mutex_unlock(&ctx->stats.mutex); +#endif + + goto restart_transfer; + } + + pkt = net_pkt_rx_alloc(K_NO_WAIT); + if (pkt == NULL) { + LOG_WRN("failed to allocate net packet and lost received data"); + +#if defined(CONFIG_NET_STATISTICS_ETHERNET) + k_mutex_lock(&ctx->stats.mutex, K_FOREVER); + ctx->stats.data.errors.rx++; + ctx->stats.data.error_details.rx_no_buffer_count++; + k_mutex_unlock(&ctx->stats.mutex); +#endif + + goto restart_transfer; + } + + pkt->buffer = xfer->buf; + +#if defined(CONFIG_NET_STATISTICS_ETHERNET) + is_broadcast = net_eth_is_addr_broadcast((struct net_eth_addr *)xfer->buf->data); + is_multicast = net_eth_is_addr_multicast((struct net_eth_addr *)xfer->buf->data); +#endif + + ret = net_recv_data(ctx->eth_iface, pkt); + if (ret != 0) { + LOG_ERR("passed data into network stack error (%d)", ret); + + net_pkt_unref(pkt); + +#if defined(CONFIG_NET_STATISTICS_ETHERNET) + k_mutex_lock(&ctx->stats.mutex, K_FOREVER); + ctx->stats.data.errors.rx++; + k_mutex_unlock(&ctx->stats.mutex); +#endif + } else { +#if defined(CONFIG_NET_STATISTICS_ETHERNET) + k_mutex_lock(&ctx->stats.mutex, K_FOREVER); + ctx->stats.data.pkts.rx++; + ctx->stats.data.bytes.received += xfer->buf->len; + if (is_broadcast) { + ctx->stats.data.broadcast.rx++; + } + if (is_multicast) { + ctx->stats.data.multicast.rx++; + } + k_mutex_unlock(&ctx->stats.mutex); +#endif + } + + xfer->buf = NULL; + +restart_transfer: + if (xfer->buf != NULL) { + net_buf_unref(xfer->buf); + } + usbh_xfer_free(udev, xfer); + + k_mutex_lock(&ctx->queued_xfers.mutex, K_FOREVER); + sys_dlist_remove(&cb_priv->node); + k_mutex_unlock(&ctx->queued_xfers.mutex); + k_mem_slab_free(&usbh_cdc_ecm_xfer_cb_priv_pool, cb_priv); + + k_mutex_lock(&ctx->mutex, K_FOREVER); + if (ctx->available && ctx->auto_restart_rx_xfer) { + trigger_next = true; + } + k_mutex_unlock(&ctx->mutex); + if (trigger_next) { + restart_err = usbh_cdc_ecm_start_data_in_xfer(ctx); + if (restart_err != 0) { + LOG_ERR("restart data IN transfer error (%d)", restart_err); + } + } + + return ret; +} + +static int usbh_cdc_ecm_xfer_data_out_cb(struct usb_device *const udev, + struct uhc_transfer *const xfer) +{ + struct usbh_cdc_ecm_xfer_cb_priv *cb_priv = xfer->priv; + struct usbh_cdc_ecm_ctx *ctx = cb_priv->ctx; +#if defined(CONFIG_NET_STATISTICS_ETHERNET) + bool is_broadcast; + bool is_multicast; +#endif + + if (xfer->err != 0) { + if (xfer->err != -EIO) { + LOG_WRN("data OUT transfer error (%d)", xfer->err); + } + +#if defined(CONFIG_NET_STATISTICS_ETHERNET) + k_mutex_lock(&ctx->stats.mutex, K_FOREVER); + ctx->stats.data.errors.tx++; + if (xfer->err == -EPIPE) { + ctx->stats.data.error_details.tx_fifo_errors++; + } else if (xfer->err == -ECONNABORTED || xfer->err == -ENODEV) { + ctx->stats.data.error_details.tx_aborted_errors++; + } else { + ctx->stats.data.error_details.tx_carrier_errors++; + } + k_mutex_unlock(&ctx->stats.mutex); +#endif + + goto cleanup; + } + +#if defined(CONFIG_NET_STATISTICS_ETHERNET) + if (xfer->buf != NULL && xfer->buf->len > 0) { + k_mutex_lock(&ctx->stats.mutex, K_FOREVER); + ctx->stats.data.pkts.tx++; + ctx->stats.data.bytes.sent += xfer->buf->len; + is_broadcast = net_eth_is_addr_broadcast((struct net_eth_addr *)xfer->buf->data); + is_multicast = net_eth_is_addr_multicast((struct net_eth_addr *)xfer->buf->data); + if (is_broadcast) { + ctx->stats.data.broadcast.tx++; + } + if (is_multicast) { + ctx->stats.data.multicast.tx++; + } + k_mutex_unlock(&ctx->stats.mutex); + } +#endif + +cleanup: + if (xfer->buf != NULL) { + net_buf_unref(xfer->buf); + k_sem_give(&ctx->data_tx_sem); + } + usbh_xfer_free(udev, xfer); + + k_mutex_lock(&ctx->queued_xfers.mutex, K_FOREVER); + sys_dlist_remove(&cb_priv->node); + k_mutex_unlock(&ctx->queued_xfers.mutex); + k_mem_slab_free(&usbh_cdc_ecm_xfer_cb_priv_pool, cb_priv); + + return 0; +} + +static void usbh_cdc_ecm_cleanup_xfers(struct usbh_cdc_ecm_ctx *const ctx) +{ + struct usb_device *udev; + struct usbh_cdc_ecm_xfer_cb_priv *cb_priv, *cb_priv_next; + + k_mutex_lock(&ctx->mutex, K_FOREVER); + udev = ctx->udev; + k_mutex_unlock(&ctx->mutex); + + k_mutex_lock(&ctx->queued_xfers.mutex, K_FOREVER); + SYS_DLIST_FOR_EACH_CONTAINER_SAFE(&ctx->queued_xfers.list, cb_priv, cb_priv_next, node) { + usbh_xfer_dequeue(udev, cb_priv->xfer); + } + k_mutex_unlock(&ctx->queued_xfers.mutex); +} + +static void usbh_cdc_ecm_cleanup_mc_filters(struct usbh_cdc_ecm_ctx *const ctx) +{ + struct usbh_cdc_ecm_multicast_filter_addr *mc_addr, *mc_addr_next; + + k_mutex_lock(&ctx->mc_filters.mutex, K_FOREVER); + SYS_DLIST_FOR_EACH_CONTAINER_SAFE(&ctx->mc_filters.addrs_list, mc_addr, mc_addr_next, + node) { + sys_dlist_remove(&mc_addr->node); + k_free(mc_addr); + } + ctx->mc_filters.ignored_addrs = 0; + k_mutex_unlock(&ctx->mc_filters.mutex); +} + +static int usbh_cdc_ecm_init(struct usbh_class_data *const c_data) +{ + ARG_UNUSED(c_data); + + return 0; +} + +static int usbh_cdc_ecm_completion_cb(struct usbh_class_data *const c_data, + struct uhc_transfer *const xfer) +{ + ARG_UNUSED(c_data); + ARG_UNUSED(xfer); + + return 0; +} + +static int usbh_cdc_ecm_probe(struct usbh_class_data *const c_data, struct usb_device *const udev, + const uint8_t iface) +{ + struct usbh_cdc_ecm_ctx *ctx = c_data->priv; + const struct usb_if_descriptor *if_desc; + const struct usb_association_descriptor *assoc_desc; + uint8_t data_if_num; + uint8_t data_alt_num; + struct net_if *eth_iface; + struct net_eth_addr eth_mac; + int ret; + + if (iface == USBH_CLASS_IFNUM_DEVICE) { + return -ENOTSUP; + } + + k_mutex_lock(&ctx->mutex, K_FOREVER); + ctx->udev = udev; + eth_iface = ctx->eth_iface; + ctx->available = false; + ctx->auto_restart_rx_xfer = false; + ctx->upload_speed = 0; + ctx->download_speed = 0; + k_mutex_unlock(&ctx->mutex); + + k_mutex_lock(&ctx->pkt_filter.mutex, K_FOREVER); + ctx->pkt_filter.bitmap = 0; + k_mutex_unlock(&ctx->pkt_filter.mutex); + +#if defined(CONFIG_NET_STATISTICS_ETHERNET) + k_mutex_lock(&ctx->stats.mutex, K_FOREVER); + memset(&ctx->stats.data, 0, sizeof(struct net_stats_eth)); + k_mutex_unlock(&ctx->stats.mutex); +#endif + + if_desc = usbh_desc_get_iface(udev, iface); + if (if_desc == NULL) { + LOG_ERR("no descriptor found for interface %u", iface); + ret = -ENOTSUP; + goto done; + } + + if (if_desc->bDescriptorType == USB_DESC_INTERFACE_ASSOC) { + assoc_desc = (const struct usb_association_descriptor *)if_desc; + if_desc = usbh_desc_get_iface(udev, assoc_desc->bFirstInterface); + if (if_desc == NULL) { + LOG_ERR("no descriptor (IAD) found for interface %u", + assoc_desc->bFirstInterface); + ret = -ENOTSUP; + goto done; + } + } + + ret = usbh_cdc_ecm_parse_descriptors(ctx, if_desc); + if (ret != 0) { + goto done; + } + + k_mutex_lock(&ctx->mutex, K_FOREVER); + data_if_num = ctx->data_if_num; + data_alt_num = ctx->data_alt_num; + k_mutex_unlock(&ctx->mutex); + + if (data_alt_num > 0) { + ret = usbh_device_interface_set(udev, data_if_num, data_alt_num, false); + if (ret != 0) { + LOG_ERR("set data interface alternate setting error (%d)", ret); + goto done; + } + } + + ret = usbh_cdc_ecm_get_mac_address(ctx, ð_mac); + if (ret != 0) { + LOG_ERR("get valid MAC address error (%d)", ret); + goto done; + } + + ret = net_if_set_link_addr(eth_iface, eth_mac.addr, NET_ETH_ADDR_LEN, NET_LINK_ETHERNET); + if (ret != 0) { + LOG_ERR("set MAC address error (%d)", ret); + goto done; + } + + k_mutex_lock(&ctx->mutex, K_FOREVER); + + LOG_INF("the USB device information is summarized below\r\n" + "Device Information:\r\n" + "\tCommunication: interface %u, endpoint [IN 0x%02x]\r\n" + "\tData: interface %u (alt %d), endpoint [IN 0x%02x, OUT 0x%02x (MPS %u)]\r\n" + "\twMaxSegmentSize %u bytes, MAC string descriptor index %u " + "[%02X:%02X:%02X:%02X:%02X:%02X]\r\n" + "\tHardware Multicast Filters: %u (%s), CRC shift %u bits", + ctx->comm_if_num, ctx->comm_in_ep_addr, ctx->data_if_num, ctx->data_alt_num, + ctx->data_in_ep_addr, ctx->data_out_ep_addr, ctx->data_out_ep_mps, + ctx->max_segment_size, ctx->mac_str_desc_idx, eth_mac.addr[0], eth_mac.addr[1], + eth_mac.addr[2], eth_mac.addr[3], eth_mac.addr[4], eth_mac.addr[5], + ctx->mc_filter_supported_num, ctx->mc_filter_is_imperfect ? "imperfect" : "perfect", + ctx->mc_filter_crc32_shift); + + ctx->available = true; + ctx->auto_restart_rx_xfer = true; + + k_mutex_unlock(&ctx->mutex); + + ret = usbh_cdc_ecm_update_packet_filter(ctx, true, + PACKET_TYPE_BROADCAST | PACKET_TYPE_DIRECTED | + PACKET_TYPE_ALL_MULTICAST); + if (ret != 0) { + goto done; + } + + ret = usbh_cdc_ecm_start_rx(ctx); + if (ret != 0) { + goto done; + } + +done: + if (ret != 0) { + k_mutex_lock(&ctx->mutex, K_FOREVER); + ctx->available = false; + k_mutex_unlock(&ctx->mutex); + } + + return ret; +} + +static int usbh_cdc_ecm_removed(struct usbh_class_data *const c_data) +{ + struct usbh_cdc_ecm_ctx *ctx = c_data->priv; + + usbh_cdc_ecm_cleanup_xfers(ctx); + usbh_cdc_ecm_cleanup_mc_filters(ctx); + + net_eth_carrier_off(ctx->eth_iface); + + k_mutex_lock(&ctx->mutex, K_FOREVER); + ctx->available = false; + ctx->udev = NULL; + k_mutex_unlock(&ctx->mutex); + + return 0; +} + +static void eth_usbh_cdc_ecm_iface_init(struct net_if *iface) +{ + const struct device *dev = net_if_get_device(iface); + struct usbh_cdc_ecm_ctx *ctx = dev->data; + + k_mutex_lock(&ctx->mutex, K_FOREVER); + ctx->eth_iface = iface; + k_mutex_unlock(&ctx->mutex); + + ethernet_init(iface); + net_if_carrier_off(iface); +} + +#if defined(CONFIG_NET_STATISTICS_ETHERNET) +struct net_stats_eth *eth_usbh_cdc_ecm_get_stats(const struct device *dev) +{ + struct usbh_cdc_ecm_ctx *ctx = dev->data; + +#if defined(CONFIG_USBH_CDC_ECM_HW_STATS) + bool need_update; + + k_mutex_lock(&ctx->stats.mutex, K_FOREVER); + need_update = sys_timepoint_expired(ctx->stats.last_tp); + if (need_update) { + ctx->stats.last_tp = + sys_timepoint_calc(K_SECONDS(CONFIG_USBH_CDC_ECM_HW_STATS_INTERVAL)); + } + k_mutex_unlock(&ctx->stats.mutex); + + if (need_update) { + usbh_cdc_ecm_update_stats(ctx); + } +#endif + + return &ctx->stats.data; +} +#endif + +static int eth_usbh_cdc_ecm_start(const struct device *dev) +{ + struct usbh_cdc_ecm_ctx *ctx = dev->data; + struct usb_device *udev; + uint8_t data_if_num; + uint8_t data_alt_num; + int ret; + + k_mutex_lock(&ctx->mutex, K_FOREVER); + if (!ctx->available) { + k_mutex_unlock(&ctx->mutex); + return 0; + } + udev = ctx->udev; + data_if_num = ctx->data_if_num; + data_alt_num = ctx->data_alt_num; + k_mutex_unlock(&ctx->mutex); + + ret = usbh_device_interface_set(udev, data_if_num, data_alt_num, false); + if (ret != 0) { + return ret; + } + + k_mutex_lock(&ctx->mutex, K_FOREVER); + ctx->auto_restart_rx_xfer = true; + k_mutex_unlock(&ctx->mutex); + +#if defined(CONFIG_NET_STATISTICS_ETHERNET) + k_mutex_lock(&ctx->stats.mutex, K_FOREVER); + memset(&ctx->stats.data, 0, sizeof(ctx->stats.data)); + k_mutex_unlock(&ctx->stats.mutex); +#endif + + ret = usbh_cdc_ecm_start_rx(ctx); + if (ret != 0) { + goto error_recovery; + } + + return 0; + +error_recovery: + k_mutex_lock(&ctx->mutex, K_FOREVER); + ctx->auto_restart_rx_xfer = false; + k_mutex_unlock(&ctx->mutex); + usbh_cdc_ecm_cleanup_xfers(ctx); + usbh_device_interface_set(udev, data_if_num, 0, false); + + return ret; +} + +static int eth_usbh_cdc_ecm_stop(const struct device *dev) +{ + struct usbh_cdc_ecm_ctx *ctx = dev->data; + struct usb_device *udev; + uint8_t data_if_num; + + k_mutex_lock(&ctx->mutex, K_FOREVER); + if (!ctx->available) { + k_mutex_unlock(&ctx->mutex); + return 0; + } + udev = ctx->udev; + data_if_num = ctx->data_if_num; + ctx->auto_restart_rx_xfer = false; + k_mutex_unlock(&ctx->mutex); + + usbh_cdc_ecm_cleanup_xfers(ctx); + + return usbh_device_interface_set(udev, data_if_num, 0, false); +} + +static enum ethernet_hw_caps eth_usbh_cdc_ecm_get_capabilities(const struct device *dev) +{ + ARG_UNUSED(dev); + + return ETHERNET_LINK_10BASE | ETHERNET_LINK_100BASE | ETHERNET_HW_FILTERING +#if defined(CONFIG_NET_PROMISCUOUS_MODE) + | ETHERNET_PROMISC_MODE +#endif + ; +} + +static int eth_usbh_cdc_ecm_set_config(const struct device *dev, enum ethernet_config_type type, + const struct ethernet_config *config) +{ + struct usbh_cdc_ecm_ctx *ctx = dev->data; + uint8_t mac_addr[NET_ETH_ADDR_LEN]; + int ret = 0; + + switch (type) { + case ETHERNET_CONFIG_TYPE_MAC_ADDRESS: + memcpy(mac_addr, config->mac_address.addr, NET_ETH_ADDR_LEN); + ret = net_if_set_link_addr(ctx->eth_iface, mac_addr, NET_ETH_ADDR_LEN, + NET_LINK_ETHERNET); + break; + case ETHERNET_CONFIG_TYPE_FILTER: + if (config->filter.set) { + ret = usbh_cdc_ecm_add_multicast_group(ctx, &config->filter.mac_address); + } else { + ret = usbh_cdc_ecm_leave_multicast_group(ctx, &config->filter.mac_address); + } + break; +#if defined(CONFIG_NET_PROMISCUOUS_MODE) + case ETHERNET_CONFIG_TYPE_PROMISC_MODE: + ret = usbh_cdc_ecm_update_packet_filter(ctx, config->promisc_mode, + PACKET_TYPE_PROMISCUOUS); + break; +#endif + default: + ret = -ENOTSUP; + break; + } + + return ret; +} + +static int eth_usbh_cdc_ecm_send(const struct device *dev, struct net_pkt *pkt) +{ + struct usbh_cdc_ecm_ctx *ctx = dev->data; + struct net_buf *buf = pkt->buffer; + uint16_t max_segment_size; + struct net_buf *tx_buf = NULL; + size_t total_len; + int ret; + + k_mutex_lock(&ctx->mutex, K_FOREVER); + if (!ctx->available) { + k_mutex_unlock(&ctx->mutex); + return -ENODEV; + } + max_segment_size = ctx->max_segment_size; + k_mutex_unlock(&ctx->mutex); + + total_len = net_buf_frags_len(buf); + if (total_len > max_segment_size || total_len > CONFIG_USBH_CDC_ECM_DATA_BUF_POOL_SIZE) { + return -EMSGSIZE; + } + + tx_buf = net_buf_alloc(&usbh_cdc_ecm_data_pool, K_NO_WAIT); + if (tx_buf == NULL) { + LOG_WRN("failed to allocate data buffer for transmitting"); + return -ENOMEM; + } + + if (net_buf_linearize(tx_buf->data, net_buf_tailroom(tx_buf), buf, 0, total_len) != + total_len) { + LOG_ERR("data linearization failed for transmitting"); + ret = -EIO; + goto cleanup; + } + + net_buf_add(tx_buf, total_len); + + ret = usbh_cdc_ecm_start_data_out_xfer(ctx, tx_buf); + if (ret != 0) { + goto cleanup; + } + + return 0; + +cleanup: + if (tx_buf != NULL) { + net_buf_unref(tx_buf); + } + + return ret; +} + +static struct usbh_class_api usbh_cdc_ecm_api = { + .init = usbh_cdc_ecm_init, + .completion_cb = usbh_cdc_ecm_completion_cb, + .probe = usbh_cdc_ecm_probe, + .removed = usbh_cdc_ecm_removed, +}; + +static const struct ethernet_api eth_usbh_cdc_ecm_api = { + .iface_api.init = eth_usbh_cdc_ecm_iface_init, +#if defined(CONFIG_NET_STATISTICS_ETHERNET) + .get_stats = eth_usbh_cdc_ecm_get_stats, +#endif + .start = eth_usbh_cdc_ecm_start, + .stop = eth_usbh_cdc_ecm_stop, + .get_capabilities = eth_usbh_cdc_ecm_get_capabilities, + .set_config = eth_usbh_cdc_ecm_set_config, + .send = eth_usbh_cdc_ecm_send, +}; + +static struct usbh_class_filter cdc_ecm_filters[] = { + { + .flags = USBH_CLASS_MATCH_CODE_TRIPLE, + .class = USB_BCC_CDC_CONTROL, + .sub = ECM_SUBCLASS, + }, +}; + +static int eth_net_device_init_fn(const struct device *dev) +{ + struct usbh_cdc_ecm_ctx *ctx = dev->data; + + ctx->available = false; + + k_mutex_init(&ctx->mutex); + k_mutex_init(&ctx->queued_xfers.mutex); + k_mutex_init(&ctx->mc_filters.mutex); + k_mutex_init(&ctx->pkt_filter.mutex); +#if defined(CONFIG_NET_STATISTICS_ETHERNET) + k_mutex_init(&ctx->stats.mutex); +#endif + sys_dlist_init(&ctx->queued_xfers.list); + sys_dlist_init(&ctx->mc_filters.addrs_list); + + k_sem_init(&ctx->data_tx_sem, CONFIG_USBH_CDC_ECM_DATA_TX_CONCURRENT_NUM, + CONFIG_USBH_CDC_ECM_DATA_TX_CONCURRENT_NUM); + + return 0; +} + +#define USBH_CDC_ECM_DEVICE_DEFINE(x, _) \ + static struct usbh_cdc_ecm_ctx cdc_ecm_ctx_##x; \ + \ + ETH_NET_DEVICE_INIT(eth_usbh_cdc_ecm_##x, \ + CONFIG_USBH_CDC_ECM_ETH_DRV_NAME #x " (usbh_cdc_ecm)", \ + eth_net_device_init_fn, NULL, &cdc_ecm_ctx_##x, NULL, \ + CONFIG_ETH_INIT_PRIORITY, ð_usbh_cdc_ecm_api, NET_ETH_MTU); \ + \ + USBH_DEFINE_CLASS(cdc_ecm_c_data_##x, &usbh_cdc_ecm_api, &cdc_ecm_ctx_##x, cdc_ecm_filters) + +LISTIFY(CONFIG_USBH_CDC_ECM_INSTANCES_COUNT, USBH_CDC_ECM_DEVICE_DEFINE, (;), _); + +#if DT_HAS_CHOSEN(zephyr_uhc) +USBH_CONTROLLER_DEFINE(cdc_ecm_uhs_ctx, DEVICE_DT_GET(DT_CHOSEN(zephyr_uhc))); + +static int cdc_ecm_usbh_start(void) +{ + int ret; + + ret = usbh_init(&cdc_ecm_uhs_ctx); + if (ret != 0) { + LOG_ERR("Failed to init USB host: %d", ret); + return ret; + } + + ret = usbh_enable(&cdc_ecm_uhs_ctx); + if (ret != 0) { + LOG_ERR("Failed to enable USB host: %d", ret); + return ret; + } + + return 0; +} + +/* Just directly after iface init */ +SYS_INIT(cdc_ecm_usbh_start, APPLICATION, 91); +#endif /* DT_HAS_CHOSEN(zephyr_uhc) */ diff --git a/subsys/usb/host/usbh_ch9.c b/subsys/usb/host/usbh_ch9.c index 80783e430559b..db2af35db647f 100644 --- a/subsys/usb/host/usbh_ch9.c +++ b/subsys/usb/host/usbh_ch9.c @@ -180,6 +180,15 @@ int usbh_req_desc_cfg(struct usb_device *const udev, return ret; } +int usbh_req_desc_str(struct usb_device *const udev, + const uint8_t index, const uint16_t lang_id, + struct net_buf *const desc_buf) +{ + uint16_t len = MIN(net_buf_tailroom(desc_buf), UINT8_MAX); + + return usbh_req_desc(udev, USB_DESC_STRING, index, lang_id, len, desc_buf); +} + int usbh_req_set_address(struct usb_device *const udev, const uint8_t addr) { diff --git a/subsys/usb/host/usbh_ch9.h b/subsys/usb/host/usbh_ch9.h index 19879dae6e2df..ae4292623e8c6 100644 --- a/subsys/usb/host/usbh_ch9.h +++ b/subsys/usb/host/usbh_ch9.h @@ -41,6 +41,10 @@ int usbh_req_desc_cfg(struct usb_device *const udev, const uint16_t len, struct usb_cfg_descriptor *const desc); +int usbh_req_desc_str(struct usb_device *const udev, + const uint8_t index, const uint16_t lang_id, + struct net_buf *const desc_buf); + int usbh_req_set_alt(struct usb_device *const udev, const uint8_t iface, const uint8_t alt); diff --git a/subsys/usb/host/usbh_desc.c b/subsys/usb/host/usbh_desc.c index cf82b1bba1b89..f927335aa32dc 100644 --- a/subsys/usb/host/usbh_desc.c +++ b/subsys/usb/host/usbh_desc.c @@ -1,6 +1,6 @@ /* * SPDX-FileCopyrightText: Copyright Nordic Semiconductor ASA - * SPDX-FileCopyrightText: Copyright 2025 NXP + * SPDX-FileCopyrightText: Copyright 2025 - 2026 NXP * SPDX-License-Identifier: Apache-2.0 */ @@ -10,6 +10,8 @@ #include "usbh_class.h" #include "usbh_desc.h" +#include "usbh_ch9.h" +#include "usbh_device.h" LOG_MODULE_REGISTER(usbh_desc, CONFIG_USBH_LOG_LEVEL); @@ -59,6 +61,12 @@ bool usbh_desc_is_valid_endpoint(const void *const desc) USB_DESC_ENDPOINT); } +bool usbh_desc_is_valid_string(const void *const desc) +{ + return usbh_desc_is_valid(desc, sizeof(struct usb_string_descriptor), + USB_DESC_STRING); +} + const void *usbh_desc_get_next(const void *const desc) { const struct usb_desc_header *const head = desc; @@ -218,3 +226,74 @@ const void *usbh_desc_get_next_function(const void *const desc) return NULL; } + +int usbh_desc_get_supported_langs(struct usb_device *const udev, uint16_t *const lang_ids, + const uint8_t lang_ids_len) +{ + struct net_buf *buf; + uint16_t len; + int ret; + + buf = usbh_xfer_buf_alloc(udev, lang_ids_len * sizeof(uint16_t) + 2); + if (buf == NULL) { + return -ENOMEM; + } + + ret = usbh_req_desc_str(udev, 0, 0, buf); + if (ret != 0) { + goto done; + } + + if (!usbh_desc_is_valid_string(buf->data)) { + ret = -EBADMSG; + goto done; + } + + len = net_buf_pull_u8(buf) - 2; + net_buf_pull_u8(buf); + len = MIN(len, buf->len) / 2; + for (ret = 0; ret < len; ret++) { + lang_ids[ret] = net_buf_pull_le16(buf); + } + +done: + if (buf != NULL) { + usbh_xfer_buf_free(udev, buf); + } + + return ret; +} + +int usbh_desc_str_utfle16_to_ascii(const struct net_buf *const buf, char *const ascii_buf, + const uint16_t ascii_buf_len) +{ + uint16_t len; + uint16_t utf16le_code; + struct net_buf_simple tmp_buf; + + if (!usbh_desc_is_valid_string(buf->data)) { + return -EINVAL; + } + + net_buf_simple_clone(&buf->b, &tmp_buf); + + len = net_buf_simple_pull_u8(&tmp_buf) - 2; + net_buf_simple_pull_u8(&tmp_buf); + len = MIN(MIN(tmp_buf.len, len) / 2, ascii_buf_len - 1); + for (unsigned int i = 0; i < len; i++) { + utf16le_code = net_buf_simple_pull_le16(&tmp_buf); + + if (utf16le_code > 0x7F) { + return -EINVAL; + } + + ascii_buf[i] = (char)utf16le_code; + + if (utf16le_code == 0) { + break; + } + } + ascii_buf[len] = '\0'; + + return 0; +} diff --git a/subsys/usb/host/usbh_desc.h b/subsys/usb/host/usbh_desc.h index 487b3f7b7853f..63de37d4fabcb 100644 --- a/subsys/usb/host/usbh_desc.h +++ b/subsys/usb/host/usbh_desc.h @@ -1,5 +1,6 @@ /* * SPDX-FileCopyrightText: Copyright Nordic Semiconductor ASA + * SPDX-FileCopyrightText: Coryright 2025 - 2026 NXP * SPDX-License-Identifier: Apache-2.0 */ @@ -121,6 +122,16 @@ bool usbh_desc_is_valid_association(const void *const desc); */ bool usbh_desc_is_valid_endpoint(const void *const desc); +/** + * @brief Checks that the pointed descriptor is an string descriptor. + * + * @param[in] desc The descriptor to validate + * + * @return true if the descriptor size and type are correct + * @return false if the descriptor size or type is wrong + */ +bool usbh_desc_is_valid_string(const void *const desc); + /** * @brief Get the next function in the descriptor list. * @@ -151,4 +162,36 @@ const void *usbh_desc_get_next_function(const void *const desc); */ const void *usbh_desc_get_next_alt_setting(const void *const desc); +/** + * @brief Get supported USB LANGIDs. + * + * Retrieves the list of language IDs supported by the USB device. + * + * @param[in] udev Pointer to the USB device. + * @param[out] lang_ids Array to store the supported LANGIDs. + * @param[in] lang_ids_len Length of the LANGIDs array. + * + * @retval number of supported IDs. + * @retval Negative error code from getting the string descriptor. + * @retval -ENOMEM if memory allocation failed. + * @retval -EBADMSG if the descriptor is invalid. + */ +int usbh_desc_get_supported_langs(struct usb_device *const udev, uint16_t *const lang_ids, + const uint8_t lang_ids_len); + +/** + * @brief Convert UTF16LE encoded string descriptor to ASCII. + * + * Converts the UTF16LE encoded string descriptor descriptor to an ASCII string. + * The ASCII string is always null-terminated. + * + * @param[in] buf Buffer containing the string descriptor. + * @param[out] ascii_buf Buffer to store the converted ASCII string. + * @param[in] ascii_buf_len Maximum length of the ASCII buffer (including null terminator). + * + * @retval 0 on success. + * @retval -EINVAL if descriptor is malformed or conversion is not possilbe. + */ +int usbh_desc_str_utfle16_to_ascii(const struct net_buf *const buf, char *const ascii_buf, + const uint16_t ascii_buf_len); #endif /* ZEPHYR_INCLUDE_USBH_DESC_H */ diff --git a/subsys/usb/host/usbh_device.c b/subsys/usb/host/usbh_device.c index b43c47464b7a7..255547706115e 100644 --- a/subsys/usb/host/usbh_device.c +++ b/subsys/usb/host/usbh_device.c @@ -418,7 +418,7 @@ int usbh_device_set_configuration(struct usb_device *const udev, const uint8_t n goto error; } - err = usbh_req_set_cfg(udev, num); + err = usbh_req_set_cfg(udev, cfg_desc.bConfigurationValue); if (err) { LOG_ERR("Set Configuration %u request failed", num); goto error; @@ -503,7 +503,27 @@ void usbh_device_connect(struct usbh_context *const ctx, return; } - usbh_class_probe_device(udev); + /* Try each configuration in order; stop as soon as a class driver binds */ + for (uint8_t cfg = 1; cfg <= udev->dev_desc.bNumConfigurations; cfg++) { + if (cfg > 1) { + err = usbh_device_set_configuration(udev, cfg); + if (err) { + continue; + } + } + + usbh_class_probe_device(udev); + + STRUCT_SECTION_FOREACH(usbh_class_node, c_node) { + if (c_node->state == USBH_CLASS_STATE_BOUND && + c_node->c_data->udev == udev) { + return; + } + } + + /* No class accepted this config; undo any partial probe before trying next */ + usbh_class_remove_all(udev); + } } void usbh_device_disconnect(struct usbh_context *ctx, struct usb_device *udev)