@@ -119,8 +119,6 @@ mlx5_ib_create_mkey_cb(struct mlx5_ib_dev *dev,
119
119
create_mkey_callback , context );
120
120
}
121
121
122
- static void clean_mr (struct mlx5_ib_dev * dev , struct mlx5_ib_mr * mr );
123
- static void dereg_mr (struct mlx5_ib_dev * dev , struct mlx5_ib_mr * mr );
124
122
static int mr_cache_max_order (struct mlx5_ib_dev * dev );
125
123
static void queue_adjust_cache_locked (struct mlx5_cache_ent * ent );
126
124
@@ -627,30 +625,10 @@ static struct mlx5_ib_mr *get_cache_mr(struct mlx5_cache_ent *req_ent)
627
625
return NULL ;
628
626
}
629
627
630
- static void detach_mr_from_cache ( struct mlx5_ib_mr * mr )
628
+ static void mlx5_mr_cache_free ( struct mlx5_ib_dev * dev , struct mlx5_ib_mr * mr )
631
629
{
632
630
struct mlx5_cache_ent * ent = mr -> cache_ent ;
633
631
634
- mr -> cache_ent = NULL ;
635
- spin_lock_irq (& ent -> lock );
636
- ent -> total_mrs -- ;
637
- spin_unlock_irq (& ent -> lock );
638
- }
639
-
640
- void mlx5_mr_cache_free (struct mlx5_ib_dev * dev , struct mlx5_ib_mr * mr )
641
- {
642
- struct mlx5_cache_ent * ent = mr -> cache_ent ;
643
-
644
- if (!ent )
645
- return ;
646
-
647
- if (mlx5_mr_cache_invalidate (mr )) {
648
- detach_mr_from_cache (mr );
649
- destroy_mkey (dev , mr );
650
- kfree (mr );
651
- return ;
652
- }
653
-
654
632
spin_lock_irq (& ent -> lock );
655
633
list_add_tail (& mr -> list , & ent -> head );
656
634
ent -> available_mrs ++ ;
@@ -1503,7 +1481,7 @@ static struct ib_mr *create_real_mr(struct ib_pd *pd, struct ib_umem *umem,
1503
1481
*/
1504
1482
err = mlx5_ib_update_mr_pas (mr , MLX5_IB_UPD_XLT_ENABLE );
1505
1483
if (err ) {
1506
- dereg_mr ( dev , mr );
1484
+ mlx5_ib_dereg_mr ( & mr -> ibmr , NULL );
1507
1485
return ERR_PTR (err );
1508
1486
}
1509
1487
}
@@ -1560,7 +1538,7 @@ static struct ib_mr *create_user_odp_mr(struct ib_pd *pd, u64 start, u64 length,
1560
1538
return & mr -> ibmr ;
1561
1539
1562
1540
err_dereg_mr :
1563
- dereg_mr ( dev , mr );
1541
+ mlx5_ib_dereg_mr ( & mr -> ibmr , NULL );
1564
1542
return ERR_PTR (err );
1565
1543
}
1566
1544
@@ -1657,7 +1635,7 @@ struct ib_mr *mlx5_ib_reg_user_mr_dmabuf(struct ib_pd *pd, u64 offset,
1657
1635
return & mr -> ibmr ;
1658
1636
1659
1637
err_dereg_mr :
1660
- dereg_mr ( dev , mr );
1638
+ mlx5_ib_dereg_mr ( & mr -> ibmr , NULL );
1661
1639
return ERR_PTR (err );
1662
1640
}
1663
1641
@@ -1669,7 +1647,7 @@ struct ib_mr *mlx5_ib_reg_user_mr_dmabuf(struct ib_pd *pd, u64 offset,
1669
1647
* and any DMA inprogress will be completed. Failure of this function
1670
1648
* indicates the HW has failed catastrophically.
1671
1649
*/
1672
- int mlx5_mr_cache_invalidate (struct mlx5_ib_mr * mr )
1650
+ static int mlx5_mr_cache_invalidate (struct mlx5_ib_mr * mr )
1673
1651
{
1674
1652
struct mlx5_umr_wr umrwr = {};
1675
1653
@@ -1941,69 +1919,82 @@ mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
1941
1919
}
1942
1920
}
1943
1921
1944
- static void clean_mr (struct mlx5_ib_dev * dev , struct mlx5_ib_mr * mr )
1922
+ int mlx5_ib_dereg_mr (struct ib_mr * ibmr , struct ib_udata * udata )
1945
1923
{
1946
- if (mr -> ibmr .type == IB_MR_TYPE_INTEGRITY ) {
1924
+ struct mlx5_ib_mr * mr = to_mmr (ibmr );
1925
+ struct mlx5_ib_dev * dev = to_mdev (ibmr -> device );
1926
+ int rc ;
1927
+
1928
+ /*
1929
+ * Any async use of the mr must hold the refcount, once the refcount
1930
+ * goes to zero no other thread, such as ODP page faults, prefetch, any
1931
+ * UMR activity, etc can touch the mkey. Thus it is safe to destroy it.
1932
+ */
1933
+ if (IS_ENABLED (CONFIG_INFINIBAND_ON_DEMAND_PAGING ) &&
1934
+ refcount_read (& mr -> mmkey .usecount ) != 0 &&
1935
+ xa_erase (& mr_to_mdev (mr )-> odp_mkeys , mlx5_base_mkey (mr -> mmkey .key )))
1936
+ mlx5r_deref_wait_odp_mkey (& mr -> mmkey );
1937
+
1938
+ if (ibmr -> type == IB_MR_TYPE_INTEGRITY ) {
1939
+ xa_cmpxchg (& dev -> sig_mrs , mlx5_base_mkey (mr -> mmkey .key ), ibmr ,
1940
+ NULL , GFP_KERNEL );
1941
+
1942
+ if (mr -> mtt_mr ) {
1943
+ rc = mlx5_ib_dereg_mr (& mr -> mtt_mr -> ibmr , NULL );
1944
+ if (rc )
1945
+ return rc ;
1946
+ mr -> mtt_mr = NULL ;
1947
+ }
1948
+ if (mr -> klm_mr ) {
1949
+ mlx5_ib_dereg_mr (& mr -> klm_mr -> ibmr , NULL );
1950
+ if (rc )
1951
+ return rc ;
1952
+ mr -> klm_mr = NULL ;
1953
+ }
1954
+
1947
1955
if (mlx5_core_destroy_psv (dev -> mdev ,
1948
1956
mr -> sig -> psv_memory .psv_idx ))
1949
1957
mlx5_ib_warn (dev , "failed to destroy mem psv %d\n" ,
1950
1958
mr -> sig -> psv_memory .psv_idx );
1951
- if (mlx5_core_destroy_psv (dev -> mdev ,
1952
- mr -> sig -> psv_wire .psv_idx ))
1959
+ if (mlx5_core_destroy_psv (dev -> mdev , mr -> sig -> psv_wire .psv_idx ))
1953
1960
mlx5_ib_warn (dev , "failed to destroy wire psv %d\n" ,
1954
1961
mr -> sig -> psv_wire .psv_idx );
1955
- xa_erase (& dev -> sig_mrs , mlx5_base_mkey (mr -> mmkey .key ));
1956
1962
kfree (mr -> sig );
1957
1963
mr -> sig = NULL ;
1958
1964
}
1959
1965
1966
+ /* Stop DMA */
1967
+ if (mr -> cache_ent ) {
1968
+ if (mlx5_mr_cache_invalidate (mr )) {
1969
+ spin_lock_irq (& mr -> cache_ent -> lock );
1970
+ mr -> cache_ent -> total_mrs -- ;
1971
+ spin_unlock_irq (& mr -> cache_ent -> lock );
1972
+ mr -> cache_ent = NULL ;
1973
+ }
1974
+ }
1960
1975
if (!mr -> cache_ent ) {
1961
- destroy_mkey (dev , mr );
1962
- mlx5_free_priv_descs (mr );
1976
+ rc = destroy_mkey (to_mdev (mr -> ibmr .device ), mr );
1977
+ if (rc )
1978
+ return rc ;
1963
1979
}
1964
- }
1965
-
1966
- static void dereg_mr (struct mlx5_ib_dev * dev , struct mlx5_ib_mr * mr )
1967
- {
1968
- struct ib_umem * umem = mr -> umem ;
1969
1980
1970
- /* Stop all DMA */
1971
- if (is_odp_mr (mr ))
1972
- mlx5_ib_fence_odp_mr (mr );
1973
- else if (is_dmabuf_mr (mr ))
1974
- mlx5_ib_fence_dmabuf_mr (mr );
1975
- else
1976
- clean_mr (dev , mr );
1981
+ if (mr -> umem ) {
1982
+ bool is_odp = is_odp_mr (mr );
1977
1983
1978
- if (umem ) {
1979
- if (!is_odp_mr (mr ))
1980
- atomic_sub (ib_umem_num_pages (umem ),
1984
+ if (!is_odp )
1985
+ atomic_sub (ib_umem_num_pages (mr -> umem ),
1981
1986
& dev -> mdev -> priv .reg_pages );
1982
- ib_umem_release (umem );
1987
+ ib_umem_release (mr -> umem );
1988
+ if (is_odp )
1989
+ mlx5_ib_free_odp_mr (mr );
1983
1990
}
1984
1991
1985
- if (mr -> cache_ent )
1992
+ if (mr -> cache_ent ) {
1986
1993
mlx5_mr_cache_free (dev , mr );
1987
- else
1994
+ } else {
1995
+ mlx5_free_priv_descs (mr );
1988
1996
kfree (mr );
1989
- }
1990
-
1991
- int mlx5_ib_dereg_mr (struct ib_mr * ibmr , struct ib_udata * udata )
1992
- {
1993
- struct mlx5_ib_mr * mmr = to_mmr (ibmr );
1994
-
1995
- if (ibmr -> type == IB_MR_TYPE_INTEGRITY ) {
1996
- dereg_mr (to_mdev (mmr -> mtt_mr -> ibmr .device ), mmr -> mtt_mr );
1997
- dereg_mr (to_mdev (mmr -> klm_mr -> ibmr .device ), mmr -> klm_mr );
1998
- }
1999
-
2000
- if (is_odp_mr (mmr ) && to_ib_umem_odp (mmr -> umem )-> is_implicit_odp ) {
2001
- mlx5_ib_free_implicit_mr (mmr );
2002
- return 0 ;
2003
1997
}
2004
-
2005
- dereg_mr (to_mdev (ibmr -> device ), mmr );
2006
-
2007
1998
return 0 ;
2008
1999
}
2009
2000
@@ -2175,10 +2166,10 @@ static int mlx5_alloc_integrity_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
2175
2166
destroy_mkey (dev , mr );
2176
2167
mlx5_free_priv_descs (mr );
2177
2168
err_free_mtt_mr :
2178
- dereg_mr ( to_mdev ( mr -> mtt_mr -> ibmr . device ), mr -> mtt_mr );
2169
+ mlx5_ib_dereg_mr ( & mr -> mtt_mr -> ibmr , NULL );
2179
2170
mr -> mtt_mr = NULL ;
2180
2171
err_free_klm_mr :
2181
- dereg_mr ( to_mdev ( mr -> klm_mr -> ibmr . device ), mr -> klm_mr );
2172
+ mlx5_ib_dereg_mr ( & mr -> klm_mr -> ibmr , NULL );
2182
2173
mr -> klm_mr = NULL ;
2183
2174
err_destroy_psv :
2184
2175
if (mlx5_core_destroy_psv (dev -> mdev , mr -> sig -> psv_memory .psv_idx ))
0 commit comments