Skip to content

Commit b02bab6

Browse files
NeilBrownVinod Koul
NeilBrown
authored and
Vinod Koul
committed
async_tx: use GFP_NOWAIT rather than GFP_IO
These async_XX functions are called from md/raid5 in an atomic section, between get_cpu() and put_cpu(), so they must not sleep. So use GFP_NOWAIT rather than GFP_IO. Dan Williams writes: Longer term async_tx needs to be merged into md directly as we can allocate this unmap data statically per-stripe rather than per request. Fixed: 7476bd7 ("async_pq: convert to dmaengine_unmap_data") Cc: [email protected] (v3.13+) Reported-and-tested-by: Stanislav Samsonov <[email protected]> Acked-by: Dan Williams <[email protected]> Signed-off-by: NeilBrown <[email protected]> Signed-off-by: Vinod Koul <[email protected]>
1 parent 16605e8 commit b02bab6

File tree

4 files changed

+7
-7
lines changed

4 files changed

+7
-7
lines changed

crypto/async_tx/async_memcpy.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
5353
struct dmaengine_unmap_data *unmap = NULL;
5454

5555
if (device)
56-
unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOIO);
56+
unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT);
5757

5858
if (unmap && is_dma_copy_aligned(device, src_offset, dest_offset, len)) {
5959
unsigned long dma_prep_flags = 0;

crypto/async_tx/async_pq.c

+2-2
Original file line numberDiff line numberDiff line change
@@ -188,7 +188,7 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
188188
BUG_ON(disks > 255 || !(P(blocks, disks) || Q(blocks, disks)));
189189

190190
if (device)
191-
unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOIO);
191+
unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT);
192192

193193
/* XORing P/Q is only implemented in software */
194194
if (unmap && !(submit->flags & ASYNC_TX_PQ_XOR_DST) &&
@@ -307,7 +307,7 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
307307
BUG_ON(disks < 4);
308308

309309
if (device)
310-
unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOIO);
310+
unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT);
311311

312312
if (unmap && disks <= dma_maxpq(device, 0) &&
313313
is_dma_pq_aligned(device, offset, 0, len)) {

crypto/async_tx/async_raid6_recov.c

+2-2
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef,
4141
u8 *a, *b, *c;
4242

4343
if (dma)
44-
unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOIO);
44+
unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOWAIT);
4545

4646
if (unmap) {
4747
struct device *dev = dma->dev;
@@ -105,7 +105,7 @@ async_mult(struct page *dest, struct page *src, u8 coef, size_t len,
105105
u8 *d, *s;
106106

107107
if (dma)
108-
unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOIO);
108+
unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOWAIT);
109109

110110
if (unmap) {
111111
dma_addr_t dma_dest[2];

crypto/async_tx/async_xor.c

+2-2
Original file line numberDiff line numberDiff line change
@@ -182,7 +182,7 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset,
182182
BUG_ON(src_cnt <= 1);
183183

184184
if (device)
185-
unmap = dmaengine_get_unmap_data(device->dev, src_cnt+1, GFP_NOIO);
185+
unmap = dmaengine_get_unmap_data(device->dev, src_cnt+1, GFP_NOWAIT);
186186

187187
if (unmap && is_dma_xor_aligned(device, offset, 0, len)) {
188188
struct dma_async_tx_descriptor *tx;
@@ -278,7 +278,7 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
278278
BUG_ON(src_cnt <= 1);
279279

280280
if (device)
281-
unmap = dmaengine_get_unmap_data(device->dev, src_cnt, GFP_NOIO);
281+
unmap = dmaengine_get_unmap_data(device->dev, src_cnt, GFP_NOWAIT);
282282

283283
if (unmap && src_cnt <= device->max_xor &&
284284
is_dma_xor_aligned(device, offset, 0, len)) {

0 commit comments

Comments
 (0)