@@ -880,18 +880,6 @@ static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node)
880
880
return 0 ;
881
881
}
882
882
883
- /* Prevent CPU hotplug from freeing up the per-CPU acomp_ctx resources */
884
- static struct crypto_acomp_ctx * acomp_ctx_get_cpu (struct crypto_acomp_ctx __percpu * acomp_ctx )
885
- {
886
- cpus_read_lock ();
887
- return raw_cpu_ptr (acomp_ctx );
888
- }
889
-
890
- static void acomp_ctx_put_cpu (void )
891
- {
892
- cpus_read_unlock ();
893
- }
894
-
895
883
static bool zswap_compress (struct page * page , struct zswap_entry * entry ,
896
884
struct zswap_pool * pool )
897
885
{
@@ -905,7 +893,8 @@ static bool zswap_compress(struct page *page, struct zswap_entry *entry,
905
893
gfp_t gfp ;
906
894
u8 * dst ;
907
895
908
- acomp_ctx = acomp_ctx_get_cpu (pool -> acomp_ctx );
896
+ acomp_ctx = raw_cpu_ptr (pool -> acomp_ctx );
897
+
909
898
mutex_lock (& acomp_ctx -> mutex );
910
899
911
900
dst = acomp_ctx -> buffer ;
@@ -961,7 +950,6 @@ static bool zswap_compress(struct page *page, struct zswap_entry *entry,
961
950
zswap_reject_alloc_fail ++ ;
962
951
963
952
mutex_unlock (& acomp_ctx -> mutex );
964
- acomp_ctx_put_cpu ();
965
953
return comp_ret == 0 && alloc_ret == 0 ;
966
954
}
967
955
@@ -972,7 +960,7 @@ static void zswap_decompress(struct zswap_entry *entry, struct folio *folio)
972
960
struct crypto_acomp_ctx * acomp_ctx ;
973
961
u8 * src ;
974
962
975
- acomp_ctx = acomp_ctx_get_cpu (entry -> pool -> acomp_ctx );
963
+ acomp_ctx = raw_cpu_ptr (entry -> pool -> acomp_ctx );
976
964
mutex_lock (& acomp_ctx -> mutex );
977
965
978
966
src = zpool_map_handle (zpool , entry -> handle , ZPOOL_MM_RO );
@@ -1002,7 +990,6 @@ static void zswap_decompress(struct zswap_entry *entry, struct folio *folio)
1002
990
1003
991
if (src != acomp_ctx -> buffer )
1004
992
zpool_unmap_handle (zpool , entry -> handle );
1005
- acomp_ctx_put_cpu ();
1006
993
}
1007
994
1008
995
/*********************************
0 commit comments