Skip to content

Commit e3801c9

Browse files
ldesrochesNicolas Ferre
authored and
Nicolas Ferre
committed
dmaengine: at_xdmac: lock fixes
Using _bh variant for spin locks causes this kind of warning: Starting logging: ------------[ cut here ]------------ WARNING: CPU: 0 PID: 3 at /ssd_drive/linux/kernel/softirq.c:151 __local_bh_enable_ip+0xe8/0xf4() Modules linked in: CPU: 0 PID: 3 Comm: ksoftirqd/0 Not tainted 4.1.0-rc2+ torvalds#94 Hardware name: Atmel SAMA5 [<c0013c04>] (unwind_backtrace) from [<c00118a4>] (show_stack+0x10/0x14) [<c00118a4>] (show_stack) from [<c001bbcc>] (warn_slowpath_common+0x80/0xac) [<c001bbcc>] (warn_slowpath_common) from [<c001bc14>] (warn_slowpath_null+0x1c/0x24) [<c001bc14>] (warn_slowpath_null) from [<c001e28c>] (__local_bh_enable_ip+0xe8/0xf4) [<c001e28c>] (__local_bh_enable_ip) from [<c01fdbd0>] (at_xdmac_device_terminate_all+0xf4/0x100) [<c01fdbd0>] (at_xdmac_device_terminate_all) from [<c02221a4>] (atmel_complete_tx_dma+0x34/0xf4) [<c02221a4>] (atmel_complete_tx_dma) from [<c01fe4ac>] (at_xdmac_tasklet+0x14c/0x1ac) [<c01fe4ac>] (at_xdmac_tasklet) from [<c001de58>] (tasklet_action+0x68/0xb4) [<c001de58>] (tasklet_action) from [<c001dfdc>] (__do_softirq+0xfc/0x238) [<c001dfdc>] (__do_softirq) from [<c001e140>] (run_ksoftirqd+0x28/0x34) [<c001e140>] (run_ksoftirqd) from [<c0033a3c>] (smpboot_thread_fn+0x138/0x18c) [<c0033a3c>] (smpboot_thread_fn) from [<c0030e7c>] (kthread+0xdc/0xf0) [<c0030e7c>] (kthread) from [<c000f480>] (ret_from_fork+0x14/0x34) ---[ end trace b57b14a99c1d8812 ]--- It comes from the fact that devices can called some code from the DMA controller with irq disabled. _bh variant is not intended to be used in this case since it can enable irqs. Switch to irqsave/irqrestore variant to avoid this situation. Signed-off-by: Ludovic Desroches <[email protected]> Cc: [email protected] # 4.0 and later
1 parent 9aa3498 commit e3801c9

File tree

1 file changed

+45
-32
lines changed

1 file changed

+45
-32
lines changed

Diff for: drivers/dma/at_xdmac.c

+45-32
Original file line numberDiff line numberDiff line change
@@ -415,8 +415,9 @@ static dma_cookie_t at_xdmac_tx_submit(struct dma_async_tx_descriptor *tx)
415415
struct at_xdmac_desc *desc = txd_to_at_desc(tx);
416416
struct at_xdmac_chan *atchan = to_at_xdmac_chan(tx->chan);
417417
dma_cookie_t cookie;
418+
unsigned long irqflags;
418419

419-
spin_lock_bh(&atchan->lock);
420+
spin_lock_irqsave(&atchan->lock, irqflags);
420421
cookie = dma_cookie_assign(tx);
421422

422423
dev_vdbg(chan2dev(tx->chan), "%s: atchan 0x%p, add desc 0x%p to xfers_list\n",
@@ -425,7 +426,7 @@ static dma_cookie_t at_xdmac_tx_submit(struct dma_async_tx_descriptor *tx)
425426
if (list_is_singular(&atchan->xfers_list))
426427
at_xdmac_start_xfer(atchan, desc);
427428

428-
spin_unlock_bh(&atchan->lock);
429+
spin_unlock_irqrestore(&atchan->lock, irqflags);
429430
return cookie;
430431
}
431432

@@ -563,6 +564,8 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
563564
struct scatterlist *sg;
564565
int i;
565566
unsigned int xfer_size = 0;
567+
unsigned long irqflags;
568+
struct dma_async_tx_descriptor *ret = NULL;
566569

567570
if (!sgl)
568571
return NULL;
@@ -578,7 +581,7 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
578581
flags);
579582

580583
/* Protect dma_sconfig field that can be modified by set_slave_conf. */
581-
spin_lock_bh(&atchan->lock);
584+
spin_lock_irqsave(&atchan->lock, irqflags);
582585

583586
/* Prepare descriptors. */
584587
for_each_sg(sgl, sg, sg_len, i) {
@@ -589,8 +592,7 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
589592
mem = sg_dma_address(sg);
590593
if (unlikely(!len)) {
591594
dev_err(chan2dev(chan), "sg data length is zero\n");
592-
spin_unlock_bh(&atchan->lock);
593-
return NULL;
595+
goto spin_unlock;
594596
}
595597
dev_dbg(chan2dev(chan), "%s: * sg%d len=%u, mem=0x%08x\n",
596598
__func__, i, len, mem);
@@ -600,8 +602,7 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
600602
dev_err(chan2dev(chan), "can't get descriptor\n");
601603
if (first)
602604
list_splice_init(&first->descs_list, &atchan->free_descs_list);
603-
spin_unlock_bh(&atchan->lock);
604-
return NULL;
605+
goto spin_unlock;
605606
}
606607

607608
/* Linked list descriptor setup. */
@@ -645,13 +646,15 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
645646
xfer_size += len;
646647
}
647648

648-
spin_unlock_bh(&atchan->lock);
649649

650650
first->tx_dma_desc.flags = flags;
651651
first->xfer_size = xfer_size;
652652
first->direction = direction;
653+
ret = &first->tx_dma_desc;
653654

654-
return &first->tx_dma_desc;
655+
spin_unlock:
656+
spin_unlock_irqrestore(&atchan->lock, irqflags);
657+
return ret;
655658
}
656659

657660
static struct dma_async_tx_descriptor *
@@ -664,6 +667,7 @@ at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
664667
struct at_xdmac_desc *first = NULL, *prev = NULL;
665668
unsigned int periods = buf_len / period_len;
666669
int i;
670+
unsigned long irqflags;
667671
u32 cfg;
668672

669673
dev_dbg(chan2dev(chan), "%s: buf_addr=%pad, buf_len=%zd, period_len=%zd, dir=%s, flags=0x%lx\n",
@@ -683,16 +687,16 @@ at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
683687
for (i = 0; i < periods; i++) {
684688
struct at_xdmac_desc *desc = NULL;
685689

686-
spin_lock_bh(&atchan->lock);
690+
spin_lock_irqsave(&atchan->lock, irqflags);
687691
desc = at_xdmac_get_desc(atchan);
688692
if (!desc) {
689693
dev_err(chan2dev(chan), "can't get descriptor\n");
690694
if (first)
691695
list_splice_init(&first->descs_list, &atchan->free_descs_list);
692-
spin_unlock_bh(&atchan->lock);
696+
spin_unlock_irqrestore(&atchan->lock, irqflags);
693697
return NULL;
694698
}
695-
spin_unlock_bh(&atchan->lock);
699+
spin_unlock_irqrestore(&atchan->lock, irqflags);
696700
dev_dbg(chan2dev(chan),
697701
"%s: desc=0x%p, tx_dma_desc.phys=%pad\n",
698702
__func__, desc, &desc->tx_dma_desc.phys);
@@ -767,6 +771,7 @@ at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
767771
| AT_XDMAC_CC_SIF(0)
768772
| AT_XDMAC_CC_MBSIZE_SIXTEEN
769773
| AT_XDMAC_CC_TYPE_MEM_TRAN;
774+
unsigned long irqflags;
770775

771776
dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, len=%zd, flags=0x%lx\n",
772777
__func__, &src, &dest, len, flags);
@@ -799,9 +804,9 @@ at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
799804

800805
dev_dbg(chan2dev(chan), "%s: remaining_size=%zu\n", __func__, remaining_size);
801806

802-
spin_lock_bh(&atchan->lock);
807+
spin_lock_irqsave(&atchan->lock, irqflags);
803808
desc = at_xdmac_get_desc(atchan);
804-
spin_unlock_bh(&atchan->lock);
809+
spin_unlock_irqrestore(&atchan->lock, irqflags);
805810
if (!desc) {
806811
dev_err(chan2dev(chan), "can't get descriptor\n");
807812
if (first)
@@ -887,6 +892,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
887892
int residue;
888893
u32 cur_nda, mask, value;
889894
u8 dwidth = 0;
895+
unsigned long flags;
890896

891897
ret = dma_cookie_status(chan, cookie, txstate);
892898
if (ret == DMA_COMPLETE)
@@ -895,7 +901,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
895901
if (!txstate)
896902
return ret;
897903

898-
spin_lock_bh(&atchan->lock);
904+
spin_lock_irqsave(&atchan->lock, flags);
899905

900906
desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node);
901907

@@ -905,8 +911,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
905911
*/
906912
if (!desc->active_xfer) {
907913
dma_set_residue(txstate, desc->xfer_size);
908-
spin_unlock_bh(&atchan->lock);
909-
return ret;
914+
goto spin_unlock;
910915
}
911916

912917
residue = desc->xfer_size;
@@ -937,14 +942,14 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
937942
}
938943
residue += at_xdmac_chan_read(atchan, AT_XDMAC_CUBC) << dwidth;
939944

940-
spin_unlock_bh(&atchan->lock);
941-
942945
dma_set_residue(txstate, residue);
943946

944947
dev_dbg(chan2dev(chan),
945948
"%s: desc=0x%p, tx_dma_desc.phys=%pad, tx_status=%d, cookie=%d, residue=%d\n",
946949
__func__, desc, &desc->tx_dma_desc.phys, ret, cookie, residue);
947950

951+
spin_unlock:
952+
spin_unlock_irqrestore(&atchan->lock, flags);
948953
return ret;
949954
}
950955

@@ -965,8 +970,9 @@ static void at_xdmac_remove_xfer(struct at_xdmac_chan *atchan,
965970
static void at_xdmac_advance_work(struct at_xdmac_chan *atchan)
966971
{
967972
struct at_xdmac_desc *desc;
973+
unsigned long flags;
968974

969-
spin_lock_bh(&atchan->lock);
975+
spin_lock_irqsave(&atchan->lock, flags);
970976

971977
/*
972978
* If channel is enabled, do nothing, advance_work will be triggered
@@ -981,7 +987,7 @@ static void at_xdmac_advance_work(struct at_xdmac_chan *atchan)
981987
at_xdmac_start_xfer(atchan, desc);
982988
}
983989

984-
spin_unlock_bh(&atchan->lock);
990+
spin_unlock_irqrestore(&atchan->lock, flags);
985991
}
986992

987993
static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan)
@@ -1117,12 +1123,13 @@ static int at_xdmac_device_config(struct dma_chan *chan,
11171123
{
11181124
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
11191125
int ret;
1126+
unsigned long flags;
11201127

11211128
dev_dbg(chan2dev(chan), "%s\n", __func__);
11221129

1123-
spin_lock_bh(&atchan->lock);
1130+
spin_lock_irqsave(&atchan->lock, flags);
11241131
ret = at_xdmac_set_slave_config(chan, config);
1125-
spin_unlock_bh(&atchan->lock);
1132+
spin_unlock_irqrestore(&atchan->lock, flags);
11261133

11271134
return ret;
11281135
}
@@ -1131,18 +1138,19 @@ static int at_xdmac_device_pause(struct dma_chan *chan)
11311138
{
11321139
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
11331140
struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1141+
unsigned long flags;
11341142

11351143
dev_dbg(chan2dev(chan), "%s\n", __func__);
11361144

11371145
if (test_and_set_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status))
11381146
return 0;
11391147

1140-
spin_lock_bh(&atchan->lock);
1148+
spin_lock_irqsave(&atchan->lock, flags);
11411149
at_xdmac_write(atxdmac, AT_XDMAC_GRWS, atchan->mask);
11421150
while (at_xdmac_chan_read(atchan, AT_XDMAC_CC)
11431151
& (AT_XDMAC_CC_WRIP | AT_XDMAC_CC_RDIP))
11441152
cpu_relax();
1145-
spin_unlock_bh(&atchan->lock);
1153+
spin_unlock_irqrestore(&atchan->lock, flags);
11461154

11471155
return 0;
11481156
}
@@ -1151,16 +1159,19 @@ static int at_xdmac_device_resume(struct dma_chan *chan)
11511159
{
11521160
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
11531161
struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1162+
unsigned long flags;
11541163

11551164
dev_dbg(chan2dev(chan), "%s\n", __func__);
11561165

1157-
spin_lock_bh(&atchan->lock);
1158-
if (!at_xdmac_chan_is_paused(atchan))
1166+
spin_lock_irqsave(&atchan->lock, flags);
1167+
if (!at_xdmac_chan_is_paused(atchan)) {
1168+
spin_unlock_irqrestore(&atchan->lock, flags);
11591169
return 0;
1170+
}
11601171

11611172
at_xdmac_write(atxdmac, AT_XDMAC_GRWR, atchan->mask);
11621173
clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
1163-
spin_unlock_bh(&atchan->lock);
1174+
spin_unlock_irqrestore(&atchan->lock, flags);
11641175

11651176
return 0;
11661177
}
@@ -1170,10 +1181,11 @@ static int at_xdmac_device_terminate_all(struct dma_chan *chan)
11701181
struct at_xdmac_desc *desc, *_desc;
11711182
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
11721183
struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1184+
unsigned long flags;
11731185

11741186
dev_dbg(chan2dev(chan), "%s\n", __func__);
11751187

1176-
spin_lock_bh(&atchan->lock);
1188+
spin_lock_irqsave(&atchan->lock, flags);
11771189
at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
11781190
while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask)
11791191
cpu_relax();
@@ -1183,7 +1195,7 @@ static int at_xdmac_device_terminate_all(struct dma_chan *chan)
11831195
at_xdmac_remove_xfer(atchan, desc);
11841196

11851197
clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
1186-
spin_unlock_bh(&atchan->lock);
1198+
spin_unlock_irqrestore(&atchan->lock, flags);
11871199

11881200
return 0;
11891201
}
@@ -1193,8 +1205,9 @@ static int at_xdmac_alloc_chan_resources(struct dma_chan *chan)
11931205
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
11941206
struct at_xdmac_desc *desc;
11951207
int i;
1208+
unsigned long flags;
11961209

1197-
spin_lock_bh(&atchan->lock);
1210+
spin_lock_irqsave(&atchan->lock, flags);
11981211

11991212
if (at_xdmac_chan_is_enabled(atchan)) {
12001213
dev_err(chan2dev(chan),
@@ -1225,7 +1238,7 @@ static int at_xdmac_alloc_chan_resources(struct dma_chan *chan)
12251238
dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i);
12261239

12271240
spin_unlock:
1228-
spin_unlock_bh(&atchan->lock);
1241+
spin_unlock_irqrestore(&atchan->lock, flags);
12291242
return i;
12301243
}
12311244

0 commit comments

Comments
 (0)