@@ -277,6 +277,20 @@ void __kasan_kfree_large(void *ptr, unsigned long ip)
277277	/* The object will be poisoned by kasan_poison_pages(). */ 
278278}
279279
280+ static  inline  void  unpoison_slab_object (struct  kmem_cache  * cache , void  * object ,
281+ 					gfp_t  flags , bool  init )
282+ {
283+ 	/* 
284+ 	 * Unpoison the whole object. For kmalloc() allocations, 
285+ 	 * poison_kmalloc_redzone() will do precise poisoning. 
286+ 	 */ 
287+ 	kasan_unpoison (object , cache -> object_size , init );
288+ 
289+ 	/* Save alloc info (if possible) for non-kmalloc() allocations. */ 
290+ 	if  (kasan_stack_collection_enabled () &&  !is_kmalloc_cache (cache ))
291+ 		kasan_save_alloc_info (cache , object , flags );
292+ }
293+ 
280294void  *  __must_check  __kasan_slab_alloc (struct  kmem_cache  * cache ,
281295					void  * object , gfp_t  flags , bool  init )
282296{
@@ -299,15 +313,8 @@ void * __must_check __kasan_slab_alloc(struct kmem_cache *cache,
299313	tag  =  assign_tag (cache , object , false);
300314	tagged_object  =  set_tag (object , tag );
301315
302- 	/* 
303- 	 * Unpoison the whole object. 
304- 	 * For kmalloc() allocations, kasan_kmalloc() will do precise poisoning. 
305- 	 */ 
306- 	kasan_unpoison (tagged_object , cache -> object_size , init );
307- 
308- 	/* Save alloc info (if possible) for non-kmalloc() allocations. */ 
309- 	if  (kasan_stack_collection_enabled () &&  !is_kmalloc_cache (cache ))
310- 		kasan_save_alloc_info (cache , tagged_object , flags );
316+ 	/* Unpoison the object and save alloc info for non-kmalloc() allocations. */ 
317+ 	unpoison_slab_object (cache , tagged_object , flags , init );
311318
312319	return  tagged_object ;
313320}
@@ -482,7 +489,30 @@ bool __kasan_mempool_poison_object(void *ptr, unsigned long ip)
482489
483490void  __kasan_mempool_unpoison_object (void  * ptr , size_t  size , unsigned long  ip )
484491{
485- 	kasan_unpoison (ptr , size , false);
492+ 	struct  slab  * slab ;
493+ 	gfp_t  flags  =  0 ; /* Might be executing under a lock. */ 
494+ 
495+ 	if  (is_kfence_address (kasan_reset_tag (ptr )))
496+ 		return ;
497+ 
498+ 	slab  =  virt_to_slab (ptr );
499+ 
500+ 	/* 
501+ 	 * This function can be called for large kmalloc allocation that get 
502+ 	 * their memory from page_alloc. 
503+ 	 */ 
504+ 	if  (unlikely (!slab )) {
505+ 		kasan_unpoison (ptr , size , false);
506+ 		poison_kmalloc_large_redzone (ptr , size , flags );
507+ 		return ;
508+ 	}
509+ 
510+ 	/* Unpoison the object and save alloc info for non-kmalloc() allocations. */ 
511+ 	unpoison_slab_object (slab -> slab_cache , ptr , size , flags );
512+ 
513+ 	/* Poison the redzone and save alloc info for kmalloc() allocations. */ 
514+ 	if  (is_kmalloc_cache (slab -> slab_cache ))
515+ 		poison_kmalloc_redzone (slab -> slab_cache , ptr , size , flags );
486516}
487517
488518bool  __kasan_check_byte (const  void  * address , unsigned long  ip )
0 commit comments