@@ -98,7 +98,7 @@ static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags,
9898 __GFP_KSWAPD_RECLAIM ;
9999
100100 if (!pool -> use_dma_alloc ) {
101- p = alloc_pages ( gfp_flags , order );
101+ p = alloc_pages_node ( pool -> nid , gfp_flags , order );
102102 if (p )
103103 p -> private = order ;
104104 return p ;
@@ -292,7 +292,7 @@ static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool,
292292 enum ttm_caching caching ,
293293 unsigned int order )
294294{
295- if (pool -> use_dma_alloc )
295+ if (pool -> use_dma_alloc || pool -> nid != NUMA_NO_NODE )
296296 return & pool -> caching [caching ].orders [order ];
297297
298298#ifdef CONFIG_X86
@@ -550,29 +550,32 @@ EXPORT_SYMBOL(ttm_pool_free);
550550 *
551551 * @pool: the pool to initialize
552552 * @dev: device for DMA allocations and mappings
553+ * @nid: NUMA node to use for allocations
553554 * @use_dma_alloc: true if coherent DMA alloc should be used
554555 * @use_dma32: true if GFP_DMA32 should be used
555556 *
556557 * Initialize the pool and its pool types.
557558 */
558559void ttm_pool_init (struct ttm_pool * pool , struct device * dev ,
559- bool use_dma_alloc , bool use_dma32 )
560+ int nid , bool use_dma_alloc , bool use_dma32 )
560561{
561562 unsigned int i , j ;
562563
563564 WARN_ON (!dev && use_dma_alloc );
564565
565566 pool -> dev = dev ;
567+ pool -> nid = nid ;
566568 pool -> use_dma_alloc = use_dma_alloc ;
567569 pool -> use_dma32 = use_dma32 ;
568570
569- if (use_dma_alloc ) {
571+ if (use_dma_alloc || nid != NUMA_NO_NODE ) {
570572 for (i = 0 ; i < TTM_NUM_CACHING_TYPES ; ++ i )
571573 for (j = 0 ; j < TTM_DIM_ORDER ; ++ j )
572574 ttm_pool_type_init (& pool -> caching [i ].orders [j ],
573575 pool , i , j );
574576 }
575577}
578+ EXPORT_SYMBOL (ttm_pool_init );
576579
577580/**
578581 * ttm_pool_fini - Cleanup a pool
@@ -586,7 +589,7 @@ void ttm_pool_fini(struct ttm_pool *pool)
586589{
587590 unsigned int i , j ;
588591
589- if (pool -> use_dma_alloc ) {
592+ if (pool -> use_dma_alloc || pool -> nid != NUMA_NO_NODE ) {
590593 for (i = 0 ; i < TTM_NUM_CACHING_TYPES ; ++ i )
591594 for (j = 0 ; j < TTM_DIM_ORDER ; ++ j )
592595 ttm_pool_type_fini (& pool -> caching [i ].orders [j ]);
0 commit comments