Skip to content

Commit ca68f2b

Browse files
committed
drivers: mspi: mspi_dw: Add support for nrf-mspi peripheral
The nrf-mspi peripheral is similar to EXMIF on nrf54h20 but supports DMA and slave-mode. The wrapper around the SSI IP is also different with DMA features. Signed-off-by: David Jewsbury [email protected]
1 parent 36ddea5 commit ca68f2b

File tree

2 files changed

+204
-6
lines changed

2 files changed

+204
-6
lines changed

drivers/mspi/mspi_dw_vendor_specific.h

Lines changed: 196 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -13,22 +13,22 @@
1313

1414
#include <nrf.h>
1515

16-
static inline void vendor_specific_init(const struct device *dev)
16+
static inline void vendor_specific_init(const struct device *dev, const struct mspi_dw_config * config)
1717
{
1818
ARG_UNUSED(dev);
1919

2020
NRF_EXMIF->EVENTS_CORE = 0;
2121
NRF_EXMIF->INTENSET = BIT(EXMIF_INTENSET_CORE_Pos);
2222
}
2323

24-
static inline void vendor_specific_suspend(const struct device *dev)
24+
static inline void vendor_specific_suspend(const struct device *dev, const struct mspi_dw_config * config)
2525
{
2626
ARG_UNUSED(dev);
2727

2828
NRF_EXMIF->TASKS_STOP = 1;
2929
}
3030

31-
static inline void vendor_specific_resume(const struct device *dev)
31+
static inline void vendor_specific_resume(const struct device *dev, const struct mspi_dw_config * config)
3232
{
3333
ARG_UNUSED(dev);
3434

@@ -46,15 +46,15 @@ static inline void vendor_specific_resume(const struct device *dev)
4646
} while (rxftlr != rxftlr_mod);
4747
}
4848

49-
static inline void vendor_specific_irq_clear(const struct device *dev)
49+
static inline void vendor_specific_irq_clear(const struct device *dev, const struct mspi_dw_config * config)
5050
{
5151
ARG_UNUSED(dev);
5252

5353
NRF_EXMIF->EVENTS_CORE = 0;
5454
}
5555

5656
#if defined(CONFIG_MSPI_XIP)
57-
static inline int vendor_specific_xip_enable(const struct device *dev,
57+
static inline int vendor_specific_xip_enable(const struct device *dev, const struct mspi_dw_config * config,
5858
const struct mspi_dev_id *dev_id,
5959
const struct mspi_xip_cfg *cfg)
6060
{
@@ -77,7 +77,7 @@ static inline int vendor_specific_xip_enable(const struct device *dev,
7777
return 0;
7878
}
7979

80-
static inline int vendor_specific_xip_disable(const struct device *dev,
80+
static inline int vendor_specific_xip_disable(const struct device *dev, const struct mspi_dw_config * config,
8181
const struct mspi_dev_id *dev_id,
8282
const struct mspi_xip_cfg *cfg)
8383
{
@@ -97,3 +97,193 @@ static inline int vendor_specific_xip_disable(const struct device *dev,
9797
#endif /* defined(CONFIG_MSPI_XIP) */
9898

9999
#endif /* DT_HAS_COMPAT_STATUS_OKAY(nordic_nrf_exmif) */
100+
101+
#define MSPI_DT_DRV_COMPAT nordic_nrf_mspi
102+
#if DT_HAS_COMPAT_STATUS_OKAY(MSPI_DT_DRV_COMPAT)
103+
104+
#include <nrf.h>
105+
106+
static inline void vendor_specific_init(const struct device *dev, const struct mspi_dw_config * config)
107+
{
108+
NRF_QSPI_Type *preg = (NRF_QSPI_Type *)config->wrapper_regs;
109+
110+
preg->EVENTS_CORE = 0;
111+
preg->EVENTS_DMA.DONE = 0;
112+
113+
preg->INTENSET |= BIT(QSPI_INTENSET_CORE_Pos);
114+
preg->INTENSET |= BIT(QSPI_INTENSET_DMADONE_Pos);
115+
116+
}
117+
118+
static inline void vendor_specific_suspend(const struct device *dev, const struct mspi_dw_config * config)
119+
{
120+
NRF_QSPI_Type *preg = (NRF_QSPI_Type *)config->wrapper_regs;
121+
ARG_UNUSED(dev);
122+
123+
preg->ENABLE = 0;
124+
}
125+
126+
static inline void vendor_specific_resume(const struct device *dev, const struct mspi_dw_config * config)
127+
{
128+
NRF_QSPI_Type *preg = (NRF_QSPI_Type *)config->wrapper_regs;
129+
ARG_UNUSED(dev);
130+
131+
preg->ENABLE = 1;
132+
133+
/* Try to write an SSI register and wait until the write is successful
134+
* to ensure that the clock that drives the SSI core is ready.
135+
*/
136+
uint32_t rxftlr = read_rxftlr(dev);
137+
uint32_t rxftlr_mod = rxftlr ^ 1;
138+
139+
do {
140+
write_rxftlr(dev, rxftlr_mod);
141+
rxftlr = read_rxftlr(dev);
142+
} while (rxftlr != rxftlr_mod);
143+
}
144+
145+
static inline void vendor_specific_irq_clear(const struct device *dev, const struct mspi_dw_config * config)
146+
{
147+
ARG_UNUSED(dev);
148+
NRF_QSPI_Type *preg = (NRF_QSPI_Type *)config->wrapper_regs;
149+
preg->EVENTS_CORE = 0;
150+
preg->EVENTS_DMA.DONE = 0;
151+
}
152+
153+
/* DMA support */
154+
155+
#define EVDMA_ATTR_LEN_Pos (0UL)
156+
#define EVDMA_ATTR_LEN_Msk (0x00FFFFFFUL)
157+
158+
#define EVDMA_ATTR_ATTR_Pos (24UL)
159+
#define EVDMA_ATTR_ATTR_Msk (0x3FUL << EVDMA_ATTR_ATTR_Pos)
160+
161+
#define EVDMA_ATTR_32AXI_Pos (30UL)
162+
#define EVDMA_ATTR_32AXI_Msk (0x1UL << EVDMA_ATTR_32AXI_Pos)
163+
164+
#define EVDMA_ATTR_EVENTS_Pos (31UL)
165+
#define EVDMA_ATTR_EVENTS_Msk (0x1UL << EVDMA_ATTR_EVENTS_Pos)
166+
167+
typedef enum {
168+
EVDMA_BYTE_SWAP = 0,
169+
EVDMA_JOBLIST = 1,
170+
EVDMA_BUFFER_FILL = 2,
171+
EVDMA_FIXED_ATTR = 3,
172+
EVDMA_STATIC_ADDR = 4,
173+
EVDMA_PLAIN_DATA_BUF_WR = 5,
174+
175+
EVDMA_PLAIN_DATA = 0x3f
176+
} EVDMA_ATTR_Type;
177+
178+
typedef struct {
179+
uint8_t* addr;
180+
uint32_t attr;
181+
} EVDMA_JOB_Type;
182+
183+
#define EVDMA_JOB(BUFFER, SIZE, ATTR) \
184+
(EVDMA_JOB_Type) { .addr = (uint8_t*)BUFFER, .attr = (ATTR << EVDMA_ATTR_ATTR_Pos | SIZE) }
185+
#define EVDMA_NULL_JOB() \
186+
(EVDMA_JOB_Type) { .addr = (uint8_t*)0, .attr = 0 }
187+
typedef struct {
188+
EVDMA_JOB_Type* tx_job;
189+
EVDMA_JOB_Type* rx_job;
190+
} QSPI_TRANSFER_LIST_Type;
191+
192+
/* Number of jobs needed for transmit trasaction */
193+
#define NUM_JOBS 5
194+
/* Just support 1 trasaction for each peripheral as concurrent transactions aren't supported yet*/
195+
#define MAX_CONCURR_TRANSACTIONS 1
196+
#define NUM_LISTS DT_NUM_INST_STATUS_OKAY(MSPI_DT_DRV_COMPAT)
197+
#define DMA_TRANSFER_LIST_SIZE (sizeof(QSPI_TRANSFER_LIST_Type) + sizeof(EVDMA_JOB_Type) * \
198+
NUM_JOBS * MAX_CONCURR_TRANSACTIONS)
199+
#define DMA_TRANSFER_LIST_ALIGN 4
200+
K_MEM_SLAB_DEFINE(dma_transfer_list_slab, DMA_TRANSFER_LIST_SIZE, NUM_LISTS, DMA_TRANSFER_LIST_ALIGN);
201+
202+
static inline void vendor_specific_enable_dma_irq(const struct device *dev, const struct mspi_dw_config * config)
203+
{
204+
ARG_UNUSED(dev);
205+
NRF_QSPI_Type *preg = (NRF_QSPI_Type *)config->wrapper_regs;
206+
preg->INTENSET = BIT(QSPI_INTENSET_DMADONE_Pos);
207+
}
208+
209+
static inline void vendor_specific_start_dma_xfer(const struct device *dev, const struct mspi_dw_config * config)
210+
{
211+
ARG_UNUSED(dev);
212+
NRF_QSPI_Type *preg = (NRF_QSPI_Type *)config->wrapper_regs;
213+
preg->TASKS_START = 1;
214+
}
215+
216+
/* Temporarily hard coded as not in MDK yet */
217+
#define QSPI_TMOD_OFFSET (0x490UL)
218+
#define QSPI_TMOD_RX_ONLY (0x2)
219+
static inline int vendor_specific_setup_dma_xfer(const struct device *dev, const struct mspi_dw_config * config,
220+
const struct mspi_xfer_packet *packet, const struct mspi_xfer *xfer, struct mspi_dw_data * dev_data)
221+
{
222+
ARG_UNUSED(dev);
223+
NRF_QSPI_Type *preg = (NRF_QSPI_Type *)config->wrapper_regs;
224+
225+
void *transfer_list_ptr;
226+
int rc = k_mem_slab_alloc(&dma_transfer_list_slab, &transfer_list_ptr, K_NO_WAIT);
227+
if (rc < 0) {
228+
return rc;
229+
}
230+
231+
/* Create DMA transfer list based on whether it is an RX or TX transfer */
232+
QSPI_TRANSFER_LIST_Type *transfer_list = (QSPI_TRANSFER_LIST_Type *)transfer_list_ptr;
233+
dev_data->dma_transfer_list = (void*) transfer_list;
234+
/* Right after transfer_list */
235+
EVDMA_JOB_Type *joblist = (EVDMA_JOB_Type *)(transfer_list + 1);
236+
237+
if(packet->dir == MSPI_TX) {
238+
preg->CONFIG.RXTRANSFERLENGTH = 0;
239+
joblist[0] = EVDMA_JOB(&packet->cmd, xfer->cmd_length, EVDMA_PLAIN_DATA);
240+
joblist[1] = EVDMA_JOB(&packet->address, xfer->addr_length, EVDMA_PLAIN_DATA);
241+
joblist[2] = EVDMA_JOB(packet->data_buf, packet->num_bytes, EVDMA_PLAIN_DATA);
242+
joblist[3] = EVDMA_NULL_JOB();
243+
joblist[4] = EVDMA_NULL_JOB();
244+
transfer_list->tx_job = &joblist[0];
245+
transfer_list->rx_job = &joblist[3];
246+
}
247+
else {
248+
preg->CONFIG.RXTRANSFERLENGTH = ((packet->num_bytes + xfer->addr_length + xfer->cmd_length) >> dev_data->bytes_per_frame_exp) -1;
249+
joblist[0] = EVDMA_JOB(packet->data_buf, packet->num_bytes, EVDMA_PLAIN_DATA);
250+
joblist[1] = EVDMA_NULL_JOB();
251+
transfer_list->tx_job = &joblist[1];
252+
transfer_list->rx_job = &joblist[0];
253+
/*
254+
* In slave mode, a tmod register in the wrapper also needs to be set. Currently
255+
* the address not in MDK so temp fix.
256+
*/
257+
uintptr_t addr = (uintptr_t)preg + QSPI_TMOD_OFFSET;
258+
259+
sys_write32(QSPI_TMOD_RX_ONLY, addr);
260+
}
261+
262+
preg->CONFIG.TXBURSTLENGTH = (config->tx_fifo_depth_minus_1+1)-config->dma_tx_data_level;
263+
preg->CONFIG.RXBURSTLENGTH = config->dma_rx_data_level+1;
264+
preg->DMA.CONFIG.LISTPTR = (uint32_t)transfer_list;
265+
preg->INTEN = BIT(QSPI_INTEN_DMADONE_Pos);
266+
267+
return 0;
268+
}
269+
270+
static inline void vendor_specific_free_dma_transfer_list(const struct device *dev, struct mspi_dw_data * dev_data) {
271+
ARG_UNUSED(dev);
272+
k_mem_slab_free(&dma_transfer_list_slab, dev_data->dma_transfer_list);
273+
}
274+
275+
static inline bool vendor_specific_dma_accessible_check(const struct device *dev,
276+
const struct mspi_dw_config * config, uint8_t *data_buf ) {
277+
ARG_UNUSED(dev);
278+
NRF_QSPI_Type *preg = (NRF_QSPI_Type *)config->wrapper_regs;
279+
return nrf_dma_accessible_check(preg, data_buf);
280+
}
281+
282+
static inline bool vendor_specific_read_dma_irq(const struct device *dev,
283+
const struct mspi_dw_config * config ) {
284+
ARG_UNUSED(dev);
285+
NRF_QSPI_Type *preg = (NRF_QSPI_Type *)config->wrapper_regs;
286+
return (bool) preg->EVENTS_DMA.DONE;
287+
}
288+
289+
#endif /* DT_HAS_COMPAT_STATUS_OKAY(MSPI_DT_DRV_COMPAT) */
Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,8 @@
1+
# Copyright (c) 2025 Nordic Semiconductor ASA
2+
# SPDX-License-Identifier: Apache-2.0
3+
4+
description: Nordic MSPI Interface using SSI IP
5+
6+
compatible: "nordic,nrf-mspi"
7+
8+
include: snps,designware-ssi.yaml

0 commit comments

Comments
 (0)