forked from rp-rs/rp-hal
-
Notifications
You must be signed in to change notification settings - Fork 0
/
sio.rs
658 lines (538 loc) · 19.6 KB
/
sio.rs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
//! Single Cycle Input and Output (SIO)
//!
//! To be able to partition parts of the SIO block to other modules:
//!
//! ```no_run
//! use rp2040_hal::{gpio::Pins, pac, sio::Sio};
//!
//! let mut peripherals = pac::Peripherals::take().unwrap();
//! let sio = Sio::new(peripherals.SIO);
//! ```
//!
//! And then for example
//!
//! ```no_run
//! # use rp2040_hal::{gpio::Pins, pac, sio::Sio};
//! # let mut peripherals = pac::Peripherals::take().unwrap();
//! # let sio = Sio::new(peripherals.SIO);
//! let pins = Pins::new(peripherals.IO_BANK0, peripherals.PADS_BANK0, sio.gpio_bank0, &mut peripherals.RESETS);
//! ```
use super::*;
use core::convert::Infallible;
/// Marker struct for ownership of SIO gpio bank0
pub struct SioGpioBank0 {
_private: (),
}
/// Marker struct for ownership of SIO FIFO
pub struct SioFifo {
_private: (),
}
/// Marker struct for ownership of SIO gpio qspi
pub struct SioGpioQspi {
_private: (),
}
/// Marker struct for ownership of divide/modulo module
pub struct HwDivider {
_private: (),
}
/// Result of divide/modulo operation
pub struct DivResult<T> {
/// The quotient of divide/modulo operation
pub quotient: T,
/// The remainder of divide/modulo operation
pub remainder: T,
}
/// Struct containing ownership markers for managing ownership of the SIO registers.
pub struct Sio {
_sio: pac::SIO,
/// GPIO Bank 0 registers
pub gpio_bank0: SioGpioBank0,
/// GPIO QSPI registers
pub gpio_qspi: SioGpioQspi,
/// 8-cycle hardware divide/modulo module
pub hwdivider: HwDivider,
/// Inter-core FIFO
pub fifo: SioFifo,
// we can hand out other things here, for example:
// interp0
// interp1
}
impl Sio {
/// Create `Sio` from the PAC.
pub fn new(sio: pac::SIO) -> Self {
Self {
_sio: sio,
gpio_bank0: SioGpioBank0 { _private: () },
gpio_qspi: SioGpioQspi { _private: () },
fifo: SioFifo { _private: () },
hwdivider: HwDivider { _private: () },
}
}
/// Returns whether we are running on Core 0 (`0`) or Core 1 (`1`).
pub fn core() -> u8 {
// Safety: it is always safe to read this read-only register
unsafe { (*pac::SIO::ptr()).cpuid.read().bits() as u8 }
}
}
impl SioFifo {
/// Check if the inter-core FIFO has valid data for reading.
///
/// Returning `true` means there is valid data, `false` means it is empty
/// and you must not read from it.
pub fn is_read_ready(&mut self) -> bool {
let sio = unsafe { &(*pac::SIO::ptr()) };
sio.fifo_st.read().vld().bit_is_set()
}
/// Check if the inter-core FIFO is ready to receive data.
///
/// Returning `true` means there is room, `false` means it is full and you
/// must not write to it.
pub fn is_write_ready(&mut self) -> bool {
let sio = unsafe { &(*pac::SIO::ptr()) };
sio.fifo_st.read().rdy().bit_is_set()
}
/// Return the FIFO status, as an integer.
pub fn status(&self) -> u32 {
let sio = unsafe { &(*pac::SIO::ptr()) };
sio.fifo_st.read().bits()
}
/// Write to the inter-core FIFO.
///
/// You must ensure the FIFO has space by calling `is_write_ready`
pub fn write(&mut self, value: u32) {
let sio = unsafe { &(*pac::SIO::ptr()) };
sio.fifo_wr.write(|w| unsafe { w.bits(value) });
// Fire off an event to the other core.
// This is required as the other core may be `wfe` (waiting for event)
cortex_m::asm::sev();
}
/// Read from the inter-core FIFO.
///
/// Will return `Some(data)`, or `None` if the FIFO is empty.
pub fn read(&mut self) -> Option<u32> {
if self.is_read_ready() {
let sio = unsafe { &(*pac::SIO::ptr()) };
Some(sio.fifo_rd.read().bits())
} else {
None
}
}
/// Read from the FIFO until it is empty, throwing the contents away.
pub fn drain(&mut self) {
while self.read().is_some() {
// Retry until FIFO empty
}
}
/// Push to the FIFO, spinning if there's no space.
pub fn write_blocking(&mut self, value: u32) {
// We busy-wait for the FIFO to have some space
while !self.is_write_ready() {
cortex_m::asm::nop();
}
// Write the value to the FIFO - the other core will now be able to
// pop it off its end of the FIFO.
self.write(value as u32);
// Fire off an event to the other core
cortex_m::asm::sev();
}
/// Pop from the FIFO, spinning if there's currently no data.
pub fn read_blocking(&mut self) -> u32 {
// Keep trying until FIFO has data
loop {
// Have we got something?
if let Some(data) = self.read() {
// Yes, return it right away
return data;
} else {
// No, so sleep the CPU. We expect the sending core to `sev`
// on write.
cortex_m::asm::wfe();
}
}
}
}
macro_rules! safe_division {
(
fn $name:ident(
$arg1:ident: $arg1ty:ty,
$arg2:ident: $arg2ty:ty
) -> $ret:ty {
$($body:literal),*
}
) => {
#[naked]
extern "aapcs" fn $name(
$arg1: $arg1ty,
$arg2: $arg2ty
) -> $ret {
unsafe {
core::arch::asm!(
"ldr r2, =({sio_base})",
// Check the DIRTY state of the divider by shifting it into
// the C status bit
"ldr r3, [r2, #0x078]", // DIV_CSR
"lsrs r3, #2", // DIRTY = 1, so shift 2 down
// We only need to save the state when DIRTY, otherwise we
// can just do the division directly.
"bcs 2f",
// Do the actual division now, we're either not DIRTY, or
// we've saved the state and branched back here so it's
// safe now.
"1:",
$($body),* ,
"bx lr",
"2:",
// Since we can't save the signed-ness of the calculation, we have to
// make sure that there's at least an 8 cycle delay before we read the
// result. The push takes 5 cycles, and we've already spent
// at least 7 checking the DIRTY state to get here.
"push {{r4-r6, lr}}",
// Read the quotient last, since that's what clears the dirty flag.
// This means we can't just use a LDMIA, since it's out of order.
"ldr r3, [r2, #0x060]", // DIV_UDIVIDEND
"ldr r4, [r2, #0x064]", // DIV_UDIVISOR
"ldr r5, [r2, #0x074]", // DIV_REMAINDER
"ldr r6, [r2, #0x070]", // DIV_QUOTIENT
// If we get interrupted here (before a write sets the DIRTY flag) its
// fine, since we have the full state, so the interruptor doesn't have
// to restore it. Once the write happens and the DIRTY flag is set, the
// interruptor becomes responsible for restoring our state.
"bl 1b",
// If we are interrupted here, then the interruptor will start an
// incorrect calculation using a wrong divisor, but we'll restore the
// divisor and result ourselves correctly. This sets DIRTY, so any
// interruptor will save the state.
"str r3, [r2, #0x060]", // DIV_UDIVIDEND
// If we are interrupted here, the the interruptor may start the
// calculation using incorrectly signed inputs, but we'll restore the
// result ourselves. This sets DIRTY, so any interruptor will save
// the state.
"str r4, [r2, #0x064]", // DIV_UDIVISOR
// If we are interrupted here, the interruptor will have restored
// everything but the quotient may be wrongly signed. If the
// calculation started by the above writes is still ongoing it is
// stopped, so it won't replace the result we're restoring. DIRTY
// and READY set, but only DIRTY matters to make the interruptor save
// the state.
"str r5, [r2, #0x074]", // DIV_REMAINDER
// State fully restored after the quotient write. This sets both
// DIRTY and READY, so whatever we may have interrupted can read
// the result.
"str r6, [r2, #0x070]", // DIV_QUOTIENT
"pop {{r4-r6, pc}}",
sio_base = const unsafe { core::mem::transmute::<_, u32>(pac::SIO::ptr()) },
options(noreturn)
)
}
}
};
}
// This takes advantage of how AAPCS defines a 64-bit return on 32-bit registers
// by packing it into r0[0:31] and r1[32:63]. So all we need to do is put
// the remainder in the high order 32 bits of a 64 bit result.
safe_division! {
fn unsigned_divmod(n: u32, d: u32) -> u64 {
"str r0, [r2, #0x060]", // DIV_UDIVIDEND
"str r1, [r2, #0x064]", // DIV_UDIVISOR
// Wait for the result
"nop",
"nop",
"nop",
"nop",
"nop",
"nop",
"nop",
"nop",
// Read the quotient last, since that's what clears the dirty flag.
"ldr r1, [r2, #0x074]", // DIV_REMAINDER
"ldr r0, [r2, #0x070]" // DIV_QUOTIENT
}
}
safe_division! {
fn signed_divmod(n: i32, d: i32) -> u64 {
"str r0, [r2, #0x068]", // DIV_SDIVIDEND
"str r1, [r2, #0x06c]", // DIV_SDIVISOR
// Wait for the result
"nop",
"nop",
"nop",
"nop",
"nop",
"nop",
"nop",
"nop",
// Read the quotient last, since that's what clears the dirty flag.
"ldr r1, [r2, #0x074]", // DIV_REMAINDER
"ldr r0, [r2, #0x070]" // DIV_QUOTIENT
}
}
fn divider_unsigned(n: u32, d: u32) -> DivResult<u32> {
let packed = unsigned_divmod(n, d);
DivResult {
quotient: packed as u32,
remainder: (packed >> 32) as u32,
}
}
fn divider_signed(n: i32, d: i32) -> DivResult<i32> {
let packed = signed_divmod(n, d);
// Double casts to avoid sign extension
DivResult {
quotient: packed as u32 as i32,
remainder: (packed >> 32) as u32 as i32,
}
}
impl HwDivider {
/// Perform hardware unsigned divide/modulo operation
pub fn unsigned(&self, dividend: u32, divisor: u32) -> DivResult<u32> {
divider_unsigned(dividend, divisor)
}
/// Perform hardware signed divide/modulo operation
pub fn signed(&self, dividend: i32, divisor: i32) -> DivResult<i32> {
divider_signed(dividend, divisor)
}
}
intrinsics! {
extern "C" fn __udivsi3(n: u32, d: u32) -> u32 {
divider_unsigned(n, d).quotient
}
extern "C" fn __umodsi3(n: u32, d: u32) -> u32 {
divider_unsigned(n, d).remainder
}
extern "C" fn __udivmodsi4(n: u32, d: u32, rem: Option<&mut u32>) -> u32 {
let quo_rem = divider_unsigned(n, d);
if let Some(rem) = rem {
*rem = quo_rem.remainder;
}
quo_rem.quotient
}
extern "C" fn __divsi3(n: i32, d: i32) -> i32 {
divider_signed(n, d).quotient
}
extern "C" fn __modsi3(n: i32, d: i32) -> i32 {
divider_signed(n, d).remainder
}
extern "C" fn __divmodsi4(n: i32, d: i32, rem: &mut i32) -> i32 {
let quo_rem = divider_signed(n, d);
*rem = quo_rem.remainder;
quo_rem.quotient
}
// As mentioned above, this is using how AAPCS handles 64-bit results.
// We can also alias the division operators to these for a similar reason:
// r0 is the result either way and r1 a scratch register, so the caller
// can't assume it retains the argument value.
#[aeabi = __aeabi_uidiv]
extern "aapcs" fn __aeabi_uidivmod(n: u32, d: u32) -> u64 {
unsigned_divmod(n, d)
}
#[aeabi = __aeabi_idiv]
extern "aapcs" fn __aeabi_idivmod(n: i32, d: i32) -> u64 {
signed_divmod(n, d)
}
}
/// This type is just used to limit us to Spinlocks `0..=31`
pub trait SpinlockValid {}
/// Hardware based spinlock.
///
/// You can claim this lock by calling either [`claim`], [`try_claim`] or
/// [`claim_async`]. These spin-locks are hardware backed, so if you lock
/// e.g. `Spinlock<6>`, then any other part of your application using
/// `Spinlock<6>` will contend for the same lock, without them needing to
/// share a reference or otherwise communicate with each other.
///
/// When the obtained spinlock goes out of scope, it is automatically unlocked.
///
///
/// ```no_run
/// use rp2040_hal::sio::Spinlock0;
/// static mut SOME_GLOBAL_VAR: u32 = 0;
///
/// /// This function is safe to call from two different cores, but is not safe
/// /// to call from an interrupt routine!
/// fn update_global_var() {
/// // Do not say `let _ = ` here - it will immediately unlock!
/// let _lock = Spinlock0::claim();
/// // Do your thing here that Core 0 and Core 1 might want to do at the
/// // same time, like update this global variable:
/// unsafe { SOME_GLOBAL_VAR += 1 };
/// // The lock is dropped here.
/// }
/// ```
///
/// **Warning**: These spinlocks are not re-entrant, meaning that the
/// following code will cause a deadlock:
///
/// ```no_run
/// use rp2040_hal::sio::Spinlock0;
/// let lock_1 = Spinlock0::claim();
/// let lock_2 = Spinlock0::claim(); // deadlock here
/// ```
///
/// **Note:** The `critical-section` implementation uses Spinlock 31.
///
/// [`claim`]: #method.claim
/// [`try_claim`]: #method.try_claim
/// [`claim_async`]: #method.claim_asyncs
pub struct Spinlock<const N: usize>(core::marker::PhantomData<()>)
where
Spinlock<N>: SpinlockValid;
impl<const N: usize> Spinlock<N>
where
Spinlock<N>: SpinlockValid,
{
/// Try to claim the spinlock. Will return `Some(Self)` if the lock is obtained, and `None` if the lock is
/// already in use somewhere else.
pub fn try_claim() -> Option<Self> {
// Safety: We're only reading from this register
let sio = unsafe { &*pac::SIO::ptr() };
let lock = sio.spinlock[N].read().bits();
if lock > 0 {
Some(Self(core::marker::PhantomData))
} else {
None
}
}
/// Claim the spinlock, will block the current thread until the lock is available.
///
/// Note that calling this multiple times in a row will cause a deadlock
pub fn claim() -> Self {
loop {
if let Some(result) = Self::try_claim() {
break result;
}
}
}
/// Try to claim the spinlock. Will return `WouldBlock` until the spinlock is available.
pub fn claim_async() -> nb::Result<Self, Infallible> {
Self::try_claim().ok_or(nb::Error::WouldBlock)
}
/// Clear a locked spin-lock.
///
/// # Safety
///
/// Only call this function if you hold the spin-lock.
pub unsafe fn release() {
let sio = &*pac::SIO::ptr();
// Write (any value): release the lock
sio.spinlock[N].write_with_zero(|b| b.bits(1));
}
}
impl<const N: usize> Drop for Spinlock<N>
where
Spinlock<N>: SpinlockValid,
{
fn drop(&mut self) {
// This is safe because we own the object, and hence hold the lock.
unsafe { Self::release() }
}
}
/// Spinlock number 0
pub type Spinlock0 = Spinlock<0>;
impl SpinlockValid for Spinlock<0> {}
/// Spinlock number 1
pub type Spinlock1 = Spinlock<1>;
impl SpinlockValid for Spinlock<1> {}
/// Spinlock number 2
pub type Spinlock2 = Spinlock<2>;
impl SpinlockValid for Spinlock<2> {}
/// Spinlock number 3
pub type Spinlock3 = Spinlock<3>;
impl SpinlockValid for Spinlock<3> {}
/// Spinlock number 4
pub type Spinlock4 = Spinlock<4>;
impl SpinlockValid for Spinlock<4> {}
/// Spinlock number 5
pub type Spinlock5 = Spinlock<5>;
impl SpinlockValid for Spinlock<5> {}
/// Spinlock number 6
pub type Spinlock6 = Spinlock<6>;
impl SpinlockValid for Spinlock<6> {}
/// Spinlock number 7
pub type Spinlock7 = Spinlock<7>;
impl SpinlockValid for Spinlock<7> {}
/// Spinlock number 8
pub type Spinlock8 = Spinlock<8>;
impl SpinlockValid for Spinlock<8> {}
/// Spinlock number 9
pub type Spinlock9 = Spinlock<9>;
impl SpinlockValid for Spinlock<9> {}
/// Spinlock number 10
pub type Spinlock10 = Spinlock<10>;
impl SpinlockValid for Spinlock<10> {}
/// Spinlock number 11
pub type Spinlock11 = Spinlock<11>;
impl SpinlockValid for Spinlock<11> {}
/// Spinlock number 12
pub type Spinlock12 = Spinlock<12>;
impl SpinlockValid for Spinlock<12> {}
/// Spinlock number 13
pub type Spinlock13 = Spinlock<13>;
impl SpinlockValid for Spinlock<13> {}
/// Spinlock number 14
pub type Spinlock14 = Spinlock<14>;
impl SpinlockValid for Spinlock<14> {}
/// Spinlock number 15
pub type Spinlock15 = Spinlock<15>;
impl SpinlockValid for Spinlock<15> {}
/// Spinlock number 16
pub type Spinlock16 = Spinlock<16>;
impl SpinlockValid for Spinlock<16> {}
/// Spinlock number 17
pub type Spinlock17 = Spinlock<17>;
impl SpinlockValid for Spinlock<17> {}
/// Spinlock number 18
pub type Spinlock18 = Spinlock<18>;
impl SpinlockValid for Spinlock<18> {}
/// Spinlock number 19
pub type Spinlock19 = Spinlock<19>;
impl SpinlockValid for Spinlock<19> {}
/// Spinlock number 20
pub type Spinlock20 = Spinlock<20>;
impl SpinlockValid for Spinlock<20> {}
/// Spinlock number 21
pub type Spinlock21 = Spinlock<21>;
impl SpinlockValid for Spinlock<21> {}
/// Spinlock number 22
pub type Spinlock22 = Spinlock<22>;
impl SpinlockValid for Spinlock<22> {}
/// Spinlock number 23
pub type Spinlock23 = Spinlock<23>;
impl SpinlockValid for Spinlock<23> {}
/// Spinlock number 24
pub type Spinlock24 = Spinlock<24>;
impl SpinlockValid for Spinlock<24> {}
/// Spinlock number 25
pub type Spinlock25 = Spinlock<25>;
impl SpinlockValid for Spinlock<25> {}
/// Spinlock number 26
pub type Spinlock26 = Spinlock<26>;
impl SpinlockValid for Spinlock<26> {}
/// Spinlock number 27
pub type Spinlock27 = Spinlock<27>;
impl SpinlockValid for Spinlock<27> {}
/// Spinlock number 28
pub type Spinlock28 = Spinlock<28>;
impl SpinlockValid for Spinlock<28> {}
/// Spinlock number 29
pub type Spinlock29 = Spinlock<29>;
impl SpinlockValid for Spinlock<29> {}
/// Spinlock number 30
pub type Spinlock30 = Spinlock<30>;
impl SpinlockValid for Spinlock<30> {}
/// Spinlock number 31 - used by critical section implementation
pub(crate) type Spinlock31 = Spinlock<31>;
impl SpinlockValid for Spinlock<31> {}
/// Returns the current state of the spinlocks. Each index corresponds to the associated spinlock, e.g. if index `5` is set to `true`, it means that [`Spinlock5`] is currently locked.
///
/// Note that spinlocks can be claimed or released at any point, so this function cannot guarantee the spinlock is actually available right after calling this function. This function is mainly intended for debugging.
pub fn spinlock_state() -> [bool; 32] {
// Safety: we're only reading from a register
let sio = unsafe { &*pac::SIO::ptr() };
// A bitmap containing the state of all 32 spinlocks (1=locked).
let register = sio.spinlock_st.read().bits();
let mut result = [false; 32];
#[allow(clippy::needless_range_loop)]
for i in 0..32 {
result[i] = (register & (1 << i)) > 0;
}
result
}