@@ -739,7 +739,6 @@ static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
739739 page_pool_recycle_direct (rxr -> page_pool , page );
740740 return NULL ;
741741 }
742- * mapping += bp -> rx_dma_offset ;
743742 return page ;
744743}
745744
@@ -781,6 +780,7 @@ int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
781780 if (!page )
782781 return - ENOMEM ;
783782
783+ mapping += bp -> rx_dma_offset ;
784784 rx_buf -> data = page ;
785785 rx_buf -> data_ptr = page_address (page ) + bp -> rx_offset ;
786786 } else {
@@ -841,33 +841,41 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp,
841841 u16 sw_prod = rxr -> rx_sw_agg_prod ;
842842 unsigned int offset = 0 ;
843843
844- if (PAGE_SIZE > BNXT_RX_PAGE_SIZE ) {
845- page = rxr -> rx_page ;
846- if (!page ) {
844+ if (BNXT_RX_PAGE_MODE (bp )) {
845+ page = __bnxt_alloc_rx_page (bp , & mapping , rxr , gfp );
846+
847+ if (!page )
848+ return - ENOMEM ;
849+
850+ } else {
851+ if (PAGE_SIZE > BNXT_RX_PAGE_SIZE ) {
852+ page = rxr -> rx_page ;
853+ if (!page ) {
854+ page = alloc_page (gfp );
855+ if (!page )
856+ return - ENOMEM ;
857+ rxr -> rx_page = page ;
858+ rxr -> rx_page_offset = 0 ;
859+ }
860+ offset = rxr -> rx_page_offset ;
861+ rxr -> rx_page_offset += BNXT_RX_PAGE_SIZE ;
862+ if (rxr -> rx_page_offset == PAGE_SIZE )
863+ rxr -> rx_page = NULL ;
864+ else
865+ get_page (page );
866+ } else {
847867 page = alloc_page (gfp );
848868 if (!page )
849869 return - ENOMEM ;
850- rxr -> rx_page = page ;
851- rxr -> rx_page_offset = 0 ;
852870 }
853- offset = rxr -> rx_page_offset ;
854- rxr -> rx_page_offset += BNXT_RX_PAGE_SIZE ;
855- if (rxr -> rx_page_offset == PAGE_SIZE )
856- rxr -> rx_page = NULL ;
857- else
858- get_page (page );
859- } else {
860- page = alloc_page (gfp );
861- if (!page )
862- return - ENOMEM ;
863- }
864871
865- mapping = dma_map_page_attrs (& pdev -> dev , page , offset ,
866- BNXT_RX_PAGE_SIZE , DMA_FROM_DEVICE ,
867- DMA_ATTR_WEAK_ORDERING );
868- if (dma_mapping_error (& pdev -> dev , mapping )) {
869- __free_page (page );
870- return - EIO ;
872+ mapping = dma_map_page_attrs (& pdev -> dev , page , offset ,
873+ BNXT_RX_PAGE_SIZE , DMA_FROM_DEVICE ,
874+ DMA_ATTR_WEAK_ORDERING );
875+ if (dma_mapping_error (& pdev -> dev , mapping )) {
876+ __free_page (page );
877+ return - EIO ;
878+ }
871879 }
872880
873881 if (unlikely (test_bit (sw_prod , rxr -> rx_agg_bmap )))
@@ -1105,7 +1113,7 @@ static u32 __bnxt_rx_agg_pages(struct bnxt *bp,
11051113 }
11061114
11071115 dma_unmap_page_attrs (& pdev -> dev , mapping , BNXT_RX_PAGE_SIZE ,
1108- DMA_FROM_DEVICE ,
1116+ bp -> rx_dir ,
11091117 DMA_ATTR_WEAK_ORDERING );
11101118
11111119 total_frag_len += frag_len ;
@@ -2936,14 +2944,23 @@ static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
29362944 if (!page )
29372945 continue ;
29382946
2939- dma_unmap_page_attrs (& pdev -> dev , rx_agg_buf -> mapping ,
2940- BNXT_RX_PAGE_SIZE , DMA_FROM_DEVICE ,
2941- DMA_ATTR_WEAK_ORDERING );
2947+ if (BNXT_RX_PAGE_MODE (bp )) {
2948+ dma_unmap_page_attrs (& pdev -> dev , rx_agg_buf -> mapping ,
2949+ BNXT_RX_PAGE_SIZE , bp -> rx_dir ,
2950+ DMA_ATTR_WEAK_ORDERING );
2951+ rx_agg_buf -> page = NULL ;
2952+ __clear_bit (i , rxr -> rx_agg_bmap );
29422953
2943- rx_agg_buf -> page = NULL ;
2944- __clear_bit (i , rxr -> rx_agg_bmap );
2954+ page_pool_recycle_direct (rxr -> page_pool , page );
2955+ } else {
2956+ dma_unmap_page_attrs (& pdev -> dev , rx_agg_buf -> mapping ,
2957+ BNXT_RX_PAGE_SIZE , DMA_FROM_DEVICE ,
2958+ DMA_ATTR_WEAK_ORDERING );
2959+ rx_agg_buf -> page = NULL ;
2960+ __clear_bit (i , rxr -> rx_agg_bmap );
29452961
2946- __free_page (page );
2962+ __free_page (page );
2963+ }
29472964 }
29482965
29492966skip_rx_agg_free :
0 commit comments