@@ -95,6 +95,7 @@ struct mspi_dw_data {
9595
9696struct mspi_dw_config {
9797 DEVICE_MMIO_ROM ;
98+ void * wrapper_regs ;
9899 void (* irq_config )(void );
99100 uint32_t clock_frequency ;
100101#if defined(CONFIG_PINCTRL )
@@ -112,6 +113,11 @@ struct mspi_dw_config {
112113 uint8_t max_queued_dummy_bytes ;
113114 uint8_t tx_fifo_threshold ;
114115 uint8_t rx_fifo_threshold ;
116+ #ifdef CONFIG_MSPI_DMA
117+ uint8_t dma_tx_data_level ;
118+ uint8_t dma_rx_data_level ;
119+ #endif
120+ void const * vendor_specific_data ;
115121 DECLARE_REG_ACCESS ( );
116122 bool sw_multi_periph ;
117123 enum mspi_op_mode op_mode ;
@@ -139,6 +145,11 @@ DEFINE_MM_REG_RD_WR(dr, 0x60)
139145DEFINE_MM_REG_WR (rx_sample_dly , 0xf0 )
140146DEFINE_MM_REG_WR (spi_ctrlr0 , 0xf4 )
141147DEFINE_MM_REG_WR (txd_drive_edge , 0xf8 )
148+ #if defined(CONFIG_MSPI_DMA )
149+ DEFINE_MM_REG_WR (dmacr , 0x4C )
150+ DEFINE_MM_REG_WR (dmatdlr , 0x50 )
151+ DEFINE_MM_REG_WR (dmardlr , 0x54 )
152+ #endif
142153
143154#if defined(CONFIG_MSPI_XIP )
144155DEFINE_MM_REG_WR (xip_incr_inst , 0x100 )
@@ -172,8 +183,6 @@ static void call_user_callback_with_context(const struct device *dev,
172183 return ;
173184 }
174185
175- LOG_DBG ("Calling user function with evt_type: %u" , evt_type );
176-
177186 cb_ctx -> mspi_evt .evt_type = evt_type ;
178187 cb_ctx -> mspi_evt .evt_data .controller = dev ;
179188 cb_ctx -> mspi_evt .evt_data .dev_id = dev_data -> dev_id ;
@@ -541,6 +550,19 @@ static void fifo_work_handler(struct k_work *work)
541550
542551static void mspi_dw_isr (const struct device * dev )
543552{
553+ #if defined(CONFIG_MSPI_DMA )
554+ struct mspi_dw_data * dev_data = dev -> data ;
555+
556+ if (dev_data -> xfer .xfer_mode == MSPI_DMA ) {
557+ if (vendor_specific_read_dma_irq (dev )) {
558+ set_imr (dev , 0 );
559+ handle_end_of_packet (dev_data );
560+ }
561+ vendor_specific_irq_clear (dev );
562+ return ;
563+ }
564+ #endif
565+
544566#if defined(CONFIG_MSPI_DW_HANDLE_FIFOS_IN_SYSTEM_WORKQUEUE )
545567 struct mspi_dw_data * dev_data = dev -> data ;
546568 int rc ;
@@ -1067,7 +1089,7 @@ static int start_next_packet(const struct device *dev)
10671089 (false));
10681090 unsigned int key ;
10691091 uint32_t packet_frames ;
1070- uint32_t imr ;
1092+ uint32_t imr = 0 ;
10711093 int rc = 0 ;
10721094
10731095 if (packet -> num_bytes == 0 &&
@@ -1115,6 +1137,18 @@ static int start_next_packet(const struct device *dev)
11151137 return - EINVAL ;
11161138 }
11171139
1140+ #if defined(CONFIG_MSPI_DMA )
1141+ if (dev_data -> xfer .xfer_mode == MSPI_DMA ) {
1142+ /* Check if the packet buffer is accessible */
1143+ if (packet -> num_bytes > 0 &&
1144+ !vendor_specific_dma_accessible_check (dev , packet -> data_buf )) {
1145+ LOG_ERR ("Buffer not DMA accessible: ptr=0x%lx, size=%u" ,
1146+ (uintptr_t )packet -> data_buf , packet -> num_bytes );
1147+ return - EINVAL ;
1148+ }
1149+ }
1150+ #endif
1151+
11181152 if (packet -> dir == MSPI_TX || packet -> num_bytes == 0 ) {
11191153 imr = IMR_TXEIM_BIT ;
11201154 dev_data -> ctrlr0 |= FIELD_PREP (CTRLR0_TMOD_MASK ,
@@ -1123,6 +1157,12 @@ static int start_next_packet(const struct device *dev)
11231157 dev_data -> xfer .tx_dummy );
11241158
11251159 write_rxftlr (dev , 0 );
1160+ #if defined(CONFIG_MSPI_DMA )
1161+ } else if (dev_data -> xfer .xfer_mode == MSPI_DMA ) {
1162+ dev_data -> ctrlr0 |= FIELD_PREP (CTRLR0_TMOD_MASK , CTRLR0_TMOD_RX );
1163+ dev_data -> spi_ctrlr0 |= FIELD_PREP (SPI_CTRLR0_WAIT_CYCLES_MASK ,
1164+ dev_data -> xfer .rx_dummy );
1165+ #endif
11261166 } else {
11271167 uint32_t tmod ;
11281168 uint8_t rx_fifo_threshold ;
@@ -1211,95 +1251,123 @@ static int start_next_packet(const struct device *dev)
12111251 irq_unlock (key );
12121252 }
12131253
1214- dev_data -> buf_pos = packet -> data_buf ;
1215- dev_data -> buf_end = & packet -> data_buf [ packet -> num_bytes ] ;
1216-
1217- /* Set the TX FIFO threshold and its transmit start level. */
1218- if (packet -> num_bytes ) {
1219- /* If there is some data to send/receive, set the threshold to
1220- * the value configured for the driver instance and the start
1221- * level to the maximum possible value (it will be updated later
1222- * in tx_fifo() or tx_dummy_bytes() when TX is to be finished).
1223- * This helps avoid a situation when the TX FIFO becomes empty
1224- * before the transfer is complete and the SSI core finishes the
1225- * transaction and deactivates the CE line. This could occur
1226- * right before the data phase in enhanced SPI modes, when the
1227- * clock stretching feature does not work yet, or in Standard
1228- * SPI mode, where the clock stretching is not available at all.
1229- */
1230- uint8_t start_level = dev_data -> dummy_bytes != 0
1231- ? dev_config -> max_queued_dummy_bytes - 1
1232- : dev_config -> tx_fifo_depth_minus_1 ;
1254+ if ( dev_data -> xfer . xfer_mode == MSPI_PIO ) {
1255+ dev_data -> buf_pos = packet -> data_buf ;
1256+ dev_data -> buf_end = & packet -> data_buf [ packet -> num_bytes ];
1257+ /* Set the TX FIFO threshold and its transmit start level. */
1258+ if (packet -> num_bytes ) {
1259+ /* If there is some data to send/receive, set the threshold to
1260+ * the value configured for the driver instance and the start
1261+ * level to the maximum possible value (it will be updated later
1262+ * in tx_fifo() or tx_dummy_bytes() when TX is to be finished).
1263+ * This helps avoid a situation when the TX FIFO becomes empty
1264+ * before the transfer is complete and the SSI core finishes the
1265+ * transaction and deactivates the CE line. This could occur
1266+ * right before the data phase in enhanced SPI modes, when the
1267+ * clock stretching feature does not work yet, or in Standard
1268+ * SPI mode, where the clock stretching is not available at all.
1269+ */
1270+ uint8_t start_level = dev_data -> dummy_bytes != 0
1271+ ? dev_config -> max_queued_dummy_bytes - 1
1272+ : dev_config -> tx_fifo_depth_minus_1 ;
12331273
1234- write_txftlr (dev , FIELD_PREP (TXFTLR_TXFTHR_MASK , start_level ) |
1235- FIELD_PREP (TXFTLR_TFT_MASK ,
1236- dev_config -> tx_fifo_threshold ));
1237- } else {
1238- uint32_t total_tx_entries = 0 ;
1274+ write_txftlr (dev , FIELD_PREP (TXFTLR_TXFTHR_MASK , start_level ) |
1275+ FIELD_PREP (TXFTLR_TFT_MASK ,
1276+ dev_config -> tx_fifo_threshold ));
12391277
1240- /* It the whole transfer is to contain only the command and/or
1241- * address, set up the transfer to start right after entries
1242- * for those appear in the TX FIFO, and the threshold to 0,
1243- * so that the interrupt occurs when the TX FIFO gets emptied.
1244- */
1245- if (dev_data -> xfer .cmd_length ) {
1246- if (dev_data -> standard_spi ) {
1247- total_tx_entries += dev_data -> xfer .cmd_length ;
1248- } else {
1249- total_tx_entries += 1 ;
1278+ } else {
1279+ uint32_t total_tx_entries = 0 ;
1280+
1281+ /* It the whole transfer is to contain only the command and/or
1282+ * address, set up the transfer to start right after entries
1283+ * for those appear in the TX FIFO, and the threshold to 0,
1284+ * so that the interrupt occurs when the TX FIFO gets emptied.
1285+ */
1286+ if (dev_data -> xfer .cmd_length ) {
1287+ if (dev_data -> standard_spi ) {
1288+ total_tx_entries += dev_data -> xfer .cmd_length ;
1289+ } else {
1290+ total_tx_entries += 1 ;
1291+ }
12501292 }
1251- }
12521293
1253- if (dev_data -> xfer .addr_length ) {
1254- if (dev_data -> standard_spi ) {
1255- total_tx_entries += dev_data -> xfer .addr_length ;
1256- } else {
1257- total_tx_entries += 1 ;
1294+ if (dev_data -> xfer .addr_length ) {
1295+ if (dev_data -> standard_spi ) {
1296+ total_tx_entries += dev_data -> xfer .addr_length ;
1297+ } else {
1298+ total_tx_entries += 1 ;
1299+ }
12581300 }
1301+
1302+ write_txftlr (dev , FIELD_PREP (TXFTLR_TXFTHR_MASK ,
1303+ total_tx_entries - 1 ));
12591304 }
12601305
1261- write_txftlr (dev , FIELD_PREP (TXFTLR_TXFTHR_MASK ,
1262- total_tx_entries - 1 ));
1263- }
1306+ /* Ensure that there will be no interrupt from the controller yet. */
1307+ write_imr (dev , 0 );
1308+ /* Enable the controller. This must be done before DR is written. */
1309+ write_ssienr (dev , SSIENR_SSIC_EN_BIT );
12641310
1265- /* Ensure that there will be no interrupt from the controller yet. */
1266- write_imr (dev , 0 );
1267- /* Enable the controller. This must be done before DR is written. */
1268- write_ssienr (dev , SSIENR_SSIC_EN_BIT );
1311+ /* Since the FIFO depth in SSI is always at least 8, it can be safely
1312+ * assumed that the command and address fields (max. 2 and 4 bytes,
1313+ * respectively) can be written here before the TX FIFO gets filled up.
1314+ */
1315+ if (dev_data -> standard_spi ) {
1316+ if (dev_data -> xfer .cmd_length ) {
1317+ tx_control_field (dev , packet -> cmd ,
1318+ dev_data -> xfer .cmd_length );
1319+ }
12691320
1270- /* Since the FIFO depth in SSI is always at least 8, it can be safely
1271- * assumed that the command and address fields (max. 2 and 4 bytes,
1272- * respectively) can be written here before the TX FIFO gets filled up.
1273- */
1274- if (dev_data -> standard_spi ) {
1275- if (dev_data -> xfer .cmd_length ) {
1276- tx_control_field (dev , packet -> cmd ,
1277- dev_data -> xfer .cmd_length );
1278- }
1321+ if (dev_data -> xfer .addr_length ) {
1322+ tx_control_field (dev , packet -> address ,
1323+ dev_data -> xfer .addr_length );
1324+ }
1325+ } else {
1326+ if (dev_data -> xfer .cmd_length ) {
1327+ write_dr (dev , packet -> cmd );
1328+ }
12791329
1280- if (dev_data -> xfer .addr_length ) {
1281- tx_control_field (dev , packet -> address ,
1282- dev_data -> xfer .addr_length );
1283- }
1284- } else {
1285- if (dev_data -> xfer .cmd_length ) {
1286- write_dr (dev , packet -> cmd );
1330+ if (dev_data -> xfer .addr_length ) {
1331+ write_dr (dev , packet -> address );
1332+ }
12871333 }
12881334
1289- if (dev_data -> xfer .addr_length ) {
1290- write_dr (dev , packet -> address );
1335+ /* Prefill TX FIFO with any data we can */
1336+ if (dev_data -> dummy_bytes && tx_dummy_bytes (dev , NULL )) {
1337+ imr = IMR_RXFIM_BIT ;
1338+ } else if (packet -> dir == MSPI_TX && packet -> num_bytes ) {
1339+ tx_data (dev , packet );
12911340 }
1292- }
12931341
1294- /* Prefill TX FIFO with any data we can */
1295- if (dev_data -> dummy_bytes && tx_dummy_bytes (dev , NULL )) {
1296- imr = IMR_RXFIM_BIT ;
1297- } else if (packet -> dir == MSPI_TX && packet -> num_bytes ) {
1298- tx_data (dev , packet );
1342+ /* Enable interrupts now and wait until the packet is done unless async. */
1343+ write_imr (dev , imr );
1344+
12991345 }
1346+ #if defined(CONFIG_MSPI_DMA )
1347+ else {
1348+ /* For DMA mode, set start level based on transfer length to prevent underflow */
1349+ uint32_t total_transfer_bytes = packet -> num_bytes + dev_data -> xfer .addr_length +
1350+ dev_data -> xfer .cmd_length ;
1351+ uint32_t transfer_frames = total_transfer_bytes >> dev_data -> bytes_per_frame_exp ;
13001352
1301- /* Enable interrupts now */
1302- write_imr (dev , imr );
1353+ /* Use minimum of transfer length or FIFO depth, but at least 1 */
1354+ uint8_t dma_start_level = MIN (transfer_frames - 1 ,
1355+ dev_config -> tx_fifo_depth_minus_1 );
1356+
1357+ dma_start_level = (dma_start_level > 0 ? dma_start_level : 1 );
1358+
1359+ /* Only TXFTHR needs to be set to the minimum number of frames */
1360+ write_txftlr (dev , FIELD_PREP (TXFTLR_TXFTHR_MASK , dma_start_level ));
1361+ write_dmatdlr (dev , FIELD_PREP (DMATDLR_DMATDL_MASK , dev_config -> dma_tx_data_level ));
1362+ write_dmardlr (dev , FIELD_PREP (DMARDLR_DMARDL_MASK , dev_config -> dma_rx_data_level ));
1363+ write_dmacr (dev , DMACR_TDMAE_BIT | DMACR_RDMAE_BIT );
1364+ write_imr (dev , 0 );
1365+ write_ssienr (dev , SSIENR_SSIC_EN_BIT );
1366+
1367+ vendor_specific_start_dma_xfer (dev );
1368+
1369+ }
1370+ #endif
13031371 /* Write SER to start transfer */
13041372 write_ser (dev , BIT (dev_data -> dev_id -> dev_idx ));
13051373
@@ -1867,9 +1935,16 @@ static DEVICE_API(mspi, drv_api) = {
18671935 DT_INST_PROP_OR(inst, rx_fifo_threshold, \
18681936 1 * RX_FIFO_DEPTH(inst) / 8 - 1)
18691937
1938+ #define MSPI_DW_DMA_DATA_LEVELS (inst ) \
1939+ .dma_tx_data_level = \
1940+ DT_INST_PROP_OR(inst, dma_transmit_data_level, 0), \
1941+ .dma_rx_data_level = \
1942+ DT_INST_PROP_OR(inst, dma_receive_data_level, 0)
1943+
18701944#define MSPI_DW_INST (inst ) \
18711945 PM_DEVICE_DT_INST_DEFINE(inst, dev_pm_action_cb); \
18721946 IF_ENABLED(CONFIG_PINCTRL, (PINCTRL_DT_INST_DEFINE(inst);)) \
1947+ VENDOR_SPECIFIC_DATA_DEFINE(inst); \
18731948 static void irq_config##inst(void) \
18741949 { \
18751950 LISTIFY(DT_INST_NUM_IRQS(inst), \
@@ -1878,13 +1953,16 @@ static DEVICE_API(mspi, drv_api) = {
18781953 static struct mspi_dw_data dev##inst##_data; \
18791954 static const struct mspi_dw_config dev##inst##_config = { \
18801955 MSPI_DW_MMIO_ROM_INIT(DT_DRV_INST(inst)), \
1956+ .wrapper_regs = (void *)DT_INST_REG_ADDR(inst), \
18811957 .irq_config = irq_config##inst, \
18821958 .clock_frequency = MSPI_DW_CLOCK_FREQUENCY(inst), \
18831959 IF_ENABLED(CONFIG_PINCTRL, \
18841960 (.pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(inst),)) \
18851961 IF_ENABLED(DT_INST_NODE_HAS_PROP(inst, ce_gpios), \
18861962 (MSPI_DW_CE_GPIOS(inst),)) \
18871963 MSPI_DW_FIFO_PROPS(inst), \
1964+ IF_ENABLED(CONFIG_MSPI_DMA, (MSPI_DW_DMA_DATA_LEVELS(inst),)) \
1965+ .vendor_specific_data = VENDOR_SPECIFIC_DATA_GET(inst), \
18881966 DEFINE_REG_ACCESS(inst) \
18891967 .sw_multi_periph = \
18901968 DT_INST_PROP(inst, software_multiperipheral), \
0 commit comments