@@ -415,8 +415,9 @@ static dma_cookie_t at_xdmac_tx_submit(struct dma_async_tx_descriptor *tx)
415
415
struct at_xdmac_desc * desc = txd_to_at_desc (tx );
416
416
struct at_xdmac_chan * atchan = to_at_xdmac_chan (tx -> chan );
417
417
dma_cookie_t cookie ;
418
+ unsigned long irqflags ;
418
419
419
- spin_lock_bh (& atchan -> lock );
420
+ spin_lock_irqsave (& atchan -> lock , irqflags );
420
421
cookie = dma_cookie_assign (tx );
421
422
422
423
dev_vdbg (chan2dev (tx -> chan ), "%s: atchan 0x%p, add desc 0x%p to xfers_list\n" ,
@@ -425,7 +426,7 @@ static dma_cookie_t at_xdmac_tx_submit(struct dma_async_tx_descriptor *tx)
425
426
if (list_is_singular (& atchan -> xfers_list ))
426
427
at_xdmac_start_xfer (atchan , desc );
427
428
428
- spin_unlock_bh (& atchan -> lock );
429
+ spin_unlock_irqrestore (& atchan -> lock , irqflags );
429
430
return cookie ;
430
431
}
431
432
@@ -563,6 +564,8 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
563
564
struct scatterlist * sg ;
564
565
int i ;
565
566
unsigned int xfer_size = 0 ;
567
+ unsigned long irqflags ;
568
+ struct dma_async_tx_descriptor * ret = NULL ;
566
569
567
570
if (!sgl )
568
571
return NULL ;
@@ -578,7 +581,7 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
578
581
flags );
579
582
580
583
/* Protect dma_sconfig field that can be modified by set_slave_conf. */
581
- spin_lock_bh (& atchan -> lock );
584
+ spin_lock_irqsave (& atchan -> lock , irqflags );
582
585
583
586
/* Prepare descriptors. */
584
587
for_each_sg (sgl , sg , sg_len , i ) {
@@ -589,8 +592,7 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
589
592
mem = sg_dma_address (sg );
590
593
if (unlikely (!len )) {
591
594
dev_err (chan2dev (chan ), "sg data length is zero\n" );
592
- spin_unlock_bh (& atchan -> lock );
593
- return NULL ;
595
+ goto spin_unlock ;
594
596
}
595
597
dev_dbg (chan2dev (chan ), "%s: * sg%d len=%u, mem=0x%08x\n" ,
596
598
__func__ , i , len , mem );
@@ -600,8 +602,7 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
600
602
dev_err (chan2dev (chan ), "can't get descriptor\n" );
601
603
if (first )
602
604
list_splice_init (& first -> descs_list , & atchan -> free_descs_list );
603
- spin_unlock_bh (& atchan -> lock );
604
- return NULL ;
605
+ goto spin_unlock ;
605
606
}
606
607
607
608
/* Linked list descriptor setup. */
@@ -645,13 +646,15 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
645
646
xfer_size += len ;
646
647
}
647
648
648
- spin_unlock_bh (& atchan -> lock );
649
649
650
650
first -> tx_dma_desc .flags = flags ;
651
651
first -> xfer_size = xfer_size ;
652
652
first -> direction = direction ;
653
+ ret = & first -> tx_dma_desc ;
653
654
654
- return & first -> tx_dma_desc ;
655
+ spin_unlock :
656
+ spin_unlock_irqrestore (& atchan -> lock , irqflags );
657
+ return ret ;
655
658
}
656
659
657
660
static struct dma_async_tx_descriptor *
@@ -664,6 +667,7 @@ at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
664
667
struct at_xdmac_desc * first = NULL , * prev = NULL ;
665
668
unsigned int periods = buf_len / period_len ;
666
669
int i ;
670
+ unsigned long irqflags ;
667
671
u32 cfg ;
668
672
669
673
dev_dbg (chan2dev (chan ), "%s: buf_addr=%pad, buf_len=%zd, period_len=%zd, dir=%s, flags=0x%lx\n" ,
@@ -683,16 +687,16 @@ at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
683
687
for (i = 0 ; i < periods ; i ++ ) {
684
688
struct at_xdmac_desc * desc = NULL ;
685
689
686
- spin_lock_bh (& atchan -> lock );
690
+ spin_lock_irqsave (& atchan -> lock , irqflags );
687
691
desc = at_xdmac_get_desc (atchan );
688
692
if (!desc ) {
689
693
dev_err (chan2dev (chan ), "can't get descriptor\n" );
690
694
if (first )
691
695
list_splice_init (& first -> descs_list , & atchan -> free_descs_list );
692
- spin_unlock_bh (& atchan -> lock );
696
+ spin_unlock_irqrestore (& atchan -> lock , irqflags );
693
697
return NULL ;
694
698
}
695
- spin_unlock_bh (& atchan -> lock );
699
+ spin_unlock_irqrestore (& atchan -> lock , irqflags );
696
700
dev_dbg (chan2dev (chan ),
697
701
"%s: desc=0x%p, tx_dma_desc.phys=%pad\n" ,
698
702
__func__ , desc , & desc -> tx_dma_desc .phys );
@@ -767,6 +771,7 @@ at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
767
771
| AT_XDMAC_CC_SIF (0 )
768
772
| AT_XDMAC_CC_MBSIZE_SIXTEEN
769
773
| AT_XDMAC_CC_TYPE_MEM_TRAN ;
774
+ unsigned long irqflags ;
770
775
771
776
dev_dbg (chan2dev (chan ), "%s: src=%pad, dest=%pad, len=%zd, flags=0x%lx\n" ,
772
777
__func__ , & src , & dest , len , flags );
@@ -799,9 +804,9 @@ at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
799
804
800
805
dev_dbg (chan2dev (chan ), "%s: remaining_size=%zu\n" , __func__ , remaining_size );
801
806
802
- spin_lock_bh (& atchan -> lock );
807
+ spin_lock_irqsave (& atchan -> lock , irqflags );
803
808
desc = at_xdmac_get_desc (atchan );
804
- spin_unlock_bh (& atchan -> lock );
809
+ spin_unlock_irqrestore (& atchan -> lock , irqflags );
805
810
if (!desc ) {
806
811
dev_err (chan2dev (chan ), "can't get descriptor\n" );
807
812
if (first )
@@ -887,6 +892,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
887
892
int residue ;
888
893
u32 cur_nda , mask , value ;
889
894
u8 dwidth = 0 ;
895
+ unsigned long flags ;
890
896
891
897
ret = dma_cookie_status (chan , cookie , txstate );
892
898
if (ret == DMA_COMPLETE )
@@ -895,7 +901,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
895
901
if (!txstate )
896
902
return ret ;
897
903
898
- spin_lock_bh (& atchan -> lock );
904
+ spin_lock_irqsave (& atchan -> lock , flags );
899
905
900
906
desc = list_first_entry (& atchan -> xfers_list , struct at_xdmac_desc , xfer_node );
901
907
@@ -905,8 +911,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
905
911
*/
906
912
if (!desc -> active_xfer ) {
907
913
dma_set_residue (txstate , desc -> xfer_size );
908
- spin_unlock_bh (& atchan -> lock );
909
- return ret ;
914
+ goto spin_unlock ;
910
915
}
911
916
912
917
residue = desc -> xfer_size ;
@@ -937,14 +942,14 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
937
942
}
938
943
residue += at_xdmac_chan_read (atchan , AT_XDMAC_CUBC ) << dwidth ;
939
944
940
- spin_unlock_bh (& atchan -> lock );
941
-
942
945
dma_set_residue (txstate , residue );
943
946
944
947
dev_dbg (chan2dev (chan ),
945
948
"%s: desc=0x%p, tx_dma_desc.phys=%pad, tx_status=%d, cookie=%d, residue=%d\n" ,
946
949
__func__ , desc , & desc -> tx_dma_desc .phys , ret , cookie , residue );
947
950
951
+ spin_unlock :
952
+ spin_unlock_irqrestore (& atchan -> lock , flags );
948
953
return ret ;
949
954
}
950
955
@@ -965,8 +970,9 @@ static void at_xdmac_remove_xfer(struct at_xdmac_chan *atchan,
965
970
static void at_xdmac_advance_work (struct at_xdmac_chan * atchan )
966
971
{
967
972
struct at_xdmac_desc * desc ;
973
+ unsigned long flags ;
968
974
969
- spin_lock_bh (& atchan -> lock );
975
+ spin_lock_irqsave (& atchan -> lock , flags );
970
976
971
977
/*
972
978
* If channel is enabled, do nothing, advance_work will be triggered
@@ -981,7 +987,7 @@ static void at_xdmac_advance_work(struct at_xdmac_chan *atchan)
981
987
at_xdmac_start_xfer (atchan , desc );
982
988
}
983
989
984
- spin_unlock_bh (& atchan -> lock );
990
+ spin_unlock_irqrestore (& atchan -> lock , flags );
985
991
}
986
992
987
993
static void at_xdmac_handle_cyclic (struct at_xdmac_chan * atchan )
@@ -1117,12 +1123,13 @@ static int at_xdmac_device_config(struct dma_chan *chan,
1117
1123
{
1118
1124
struct at_xdmac_chan * atchan = to_at_xdmac_chan (chan );
1119
1125
int ret ;
1126
+ unsigned long flags ;
1120
1127
1121
1128
dev_dbg (chan2dev (chan ), "%s\n" , __func__ );
1122
1129
1123
- spin_lock_bh (& atchan -> lock );
1130
+ spin_lock_irqsave (& atchan -> lock , flags );
1124
1131
ret = at_xdmac_set_slave_config (chan , config );
1125
- spin_unlock_bh (& atchan -> lock );
1132
+ spin_unlock_irqrestore (& atchan -> lock , flags );
1126
1133
1127
1134
return ret ;
1128
1135
}
@@ -1131,18 +1138,19 @@ static int at_xdmac_device_pause(struct dma_chan *chan)
1131
1138
{
1132
1139
struct at_xdmac_chan * atchan = to_at_xdmac_chan (chan );
1133
1140
struct at_xdmac * atxdmac = to_at_xdmac (atchan -> chan .device );
1141
+ unsigned long flags ;
1134
1142
1135
1143
dev_dbg (chan2dev (chan ), "%s\n" , __func__ );
1136
1144
1137
1145
if (test_and_set_bit (AT_XDMAC_CHAN_IS_PAUSED , & atchan -> status ))
1138
1146
return 0 ;
1139
1147
1140
- spin_lock_bh (& atchan -> lock );
1148
+ spin_lock_irqsave (& atchan -> lock , flags );
1141
1149
at_xdmac_write (atxdmac , AT_XDMAC_GRWS , atchan -> mask );
1142
1150
while (at_xdmac_chan_read (atchan , AT_XDMAC_CC )
1143
1151
& (AT_XDMAC_CC_WRIP | AT_XDMAC_CC_RDIP ))
1144
1152
cpu_relax ();
1145
- spin_unlock_bh (& atchan -> lock );
1153
+ spin_unlock_irqrestore (& atchan -> lock , flags );
1146
1154
1147
1155
return 0 ;
1148
1156
}
@@ -1151,16 +1159,19 @@ static int at_xdmac_device_resume(struct dma_chan *chan)
1151
1159
{
1152
1160
struct at_xdmac_chan * atchan = to_at_xdmac_chan (chan );
1153
1161
struct at_xdmac * atxdmac = to_at_xdmac (atchan -> chan .device );
1162
+ unsigned long flags ;
1154
1163
1155
1164
dev_dbg (chan2dev (chan ), "%s\n" , __func__ );
1156
1165
1157
- spin_lock_bh (& atchan -> lock );
1158
- if (!at_xdmac_chan_is_paused (atchan ))
1166
+ spin_lock_irqsave (& atchan -> lock , flags );
1167
+ if (!at_xdmac_chan_is_paused (atchan )) {
1168
+ spin_unlock_irqrestore (& atchan -> lock , flags );
1159
1169
return 0 ;
1170
+ }
1160
1171
1161
1172
at_xdmac_write (atxdmac , AT_XDMAC_GRWR , atchan -> mask );
1162
1173
clear_bit (AT_XDMAC_CHAN_IS_PAUSED , & atchan -> status );
1163
- spin_unlock_bh (& atchan -> lock );
1174
+ spin_unlock_irqrestore (& atchan -> lock , flags );
1164
1175
1165
1176
return 0 ;
1166
1177
}
@@ -1170,10 +1181,11 @@ static int at_xdmac_device_terminate_all(struct dma_chan *chan)
1170
1181
struct at_xdmac_desc * desc , * _desc ;
1171
1182
struct at_xdmac_chan * atchan = to_at_xdmac_chan (chan );
1172
1183
struct at_xdmac * atxdmac = to_at_xdmac (atchan -> chan .device );
1184
+ unsigned long flags ;
1173
1185
1174
1186
dev_dbg (chan2dev (chan ), "%s\n" , __func__ );
1175
1187
1176
- spin_lock_bh (& atchan -> lock );
1188
+ spin_lock_irqsave (& atchan -> lock , flags );
1177
1189
at_xdmac_write (atxdmac , AT_XDMAC_GD , atchan -> mask );
1178
1190
while (at_xdmac_read (atxdmac , AT_XDMAC_GS ) & atchan -> mask )
1179
1191
cpu_relax ();
@@ -1183,7 +1195,7 @@ static int at_xdmac_device_terminate_all(struct dma_chan *chan)
1183
1195
at_xdmac_remove_xfer (atchan , desc );
1184
1196
1185
1197
clear_bit (AT_XDMAC_CHAN_IS_CYCLIC , & atchan -> status );
1186
- spin_unlock_bh (& atchan -> lock );
1198
+ spin_unlock_irqrestore (& atchan -> lock , flags );
1187
1199
1188
1200
return 0 ;
1189
1201
}
@@ -1193,8 +1205,9 @@ static int at_xdmac_alloc_chan_resources(struct dma_chan *chan)
1193
1205
struct at_xdmac_chan * atchan = to_at_xdmac_chan (chan );
1194
1206
struct at_xdmac_desc * desc ;
1195
1207
int i ;
1208
+ unsigned long flags ;
1196
1209
1197
- spin_lock_bh (& atchan -> lock );
1210
+ spin_lock_irqsave (& atchan -> lock , flags );
1198
1211
1199
1212
if (at_xdmac_chan_is_enabled (atchan )) {
1200
1213
dev_err (chan2dev (chan ),
@@ -1225,7 +1238,7 @@ static int at_xdmac_alloc_chan_resources(struct dma_chan *chan)
1225
1238
dev_dbg (chan2dev (chan ), "%s: allocated %d descriptors\n" , __func__ , i );
1226
1239
1227
1240
spin_unlock :
1228
- spin_unlock_bh (& atchan -> lock );
1241
+ spin_unlock_irqrestore (& atchan -> lock , flags );
1229
1242
return i ;
1230
1243
}
1231
1244
0 commit comments