@@ -96,16 +96,15 @@ static void idpf_ctlq_init_rxq_bufs(struct idpf_ctlq_info *cq)
9696 */
9797static void idpf_ctlq_shutdown (struct idpf_hw * hw , struct idpf_ctlq_info * cq )
9898{
99- mutex_lock (& cq -> cq_lock );
99+ spin_lock (& cq -> cq_lock );
100100
101101 /* free ring buffers and the ring itself */
102102 idpf_ctlq_dealloc_ring_res (hw , cq );
103103
104104 /* Set ring_size to 0 to indicate uninitialized queue */
105105 cq -> ring_size = 0 ;
106106
107- mutex_unlock (& cq -> cq_lock );
108- mutex_destroy (& cq -> cq_lock );
107+ spin_unlock (& cq -> cq_lock );
109108}
110109
111110/**
@@ -173,7 +172,7 @@ int idpf_ctlq_add(struct idpf_hw *hw,
173172
174173 idpf_ctlq_init_regs (hw , cq , is_rxq );
175174
176- mutex_init (& cq -> cq_lock );
175+ spin_lock_init (& cq -> cq_lock );
177176
178177 list_add (& cq -> cq_list , & hw -> cq_list_head );
179178
@@ -272,7 +271,7 @@ int idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
272271 int err = 0 ;
273272 int i ;
274273
275- mutex_lock (& cq -> cq_lock );
274+ spin_lock (& cq -> cq_lock );
276275
277276 /* Ensure there are enough descriptors to send all messages */
278277 num_desc_avail = IDPF_CTLQ_DESC_UNUSED (cq );
@@ -332,7 +331,7 @@ int idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
332331 wr32 (hw , cq -> reg .tail , cq -> next_to_use );
333332
334333err_unlock :
335- mutex_unlock (& cq -> cq_lock );
334+ spin_unlock (& cq -> cq_lock );
336335
337336 return err ;
338337}
@@ -364,7 +363,7 @@ int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
364363 if (* clean_count > cq -> ring_size )
365364 return - EBADR ;
366365
367- mutex_lock (& cq -> cq_lock );
366+ spin_lock (& cq -> cq_lock );
368367
369368 ntc = cq -> next_to_clean ;
370369
@@ -397,7 +396,7 @@ int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
397396
398397 cq -> next_to_clean = ntc ;
399398
400- mutex_unlock (& cq -> cq_lock );
399+ spin_unlock (& cq -> cq_lock );
401400
402401 /* Return number of descriptors actually cleaned */
403402 * clean_count = i ;
@@ -435,7 +434,7 @@ int idpf_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
435434 if (* buff_count > 0 )
436435 buffs_avail = true;
437436
438- mutex_lock (& cq -> cq_lock );
437+ spin_lock (& cq -> cq_lock );
439438
440439 if (tbp >= cq -> ring_size )
441440 tbp = 0 ;
@@ -524,7 +523,7 @@ int idpf_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
524523 wr32 (hw , cq -> reg .tail , cq -> next_to_post );
525524 }
526525
527- mutex_unlock (& cq -> cq_lock );
526+ spin_unlock (& cq -> cq_lock );
528527
529528 /* return the number of buffers that were not posted */
530529 * buff_count = * buff_count - i ;
@@ -552,7 +551,7 @@ int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
552551 u16 i ;
553552
554553 /* take the lock before we start messing with the ring */
555- mutex_lock (& cq -> cq_lock );
554+ spin_lock (& cq -> cq_lock );
556555
557556 ntc = cq -> next_to_clean ;
558557
@@ -614,7 +613,7 @@ int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
614613
615614 cq -> next_to_clean = ntc ;
616615
617- mutex_unlock (& cq -> cq_lock );
616+ spin_unlock (& cq -> cq_lock );
618617
619618 * num_q_msg = i ;
620619 if (* num_q_msg == 0 )
0 commit comments