@@ -2054,6 +2054,7 @@ void amdgpu_gfx_enforce_isolation_ring_begin_use(struct amdgpu_ring *ring)
2054
2054
{
2055
2055
struct amdgpu_device * adev = ring -> adev ;
2056
2056
u32 idx ;
2057
+ bool sched_work = false;
2057
2058
2058
2059
if (!adev -> gfx .enable_cleaner_shader )
2059
2060
return ;
@@ -2072,9 +2073,12 @@ void amdgpu_gfx_enforce_isolation_ring_begin_use(struct amdgpu_ring *ring)
2072
2073
mutex_lock (& adev -> enforce_isolation_mutex );
2073
2074
if (adev -> enforce_isolation [idx ]) {
2074
2075
if (adev -> kfd .init_complete )
2075
- amdgpu_gfx_kfd_sch_ctrl ( adev , idx , false) ;
2076
+ sched_work = true ;
2076
2077
}
2077
2078
mutex_unlock (& adev -> enforce_isolation_mutex );
2079
+
2080
+ if (sched_work )
2081
+ amdgpu_gfx_kfd_sch_ctrl (adev , idx , false);
2078
2082
}
2079
2083
2080
2084
/**
@@ -2090,6 +2094,7 @@ void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring)
2090
2094
{
2091
2095
struct amdgpu_device * adev = ring -> adev ;
2092
2096
u32 idx ;
2097
+ bool sched_work = false;
2093
2098
2094
2099
if (!adev -> gfx .enable_cleaner_shader )
2095
2100
return ;
@@ -2105,9 +2110,12 @@ void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring)
2105
2110
mutex_lock (& adev -> enforce_isolation_mutex );
2106
2111
if (adev -> enforce_isolation [idx ]) {
2107
2112
if (adev -> kfd .init_complete )
2108
- amdgpu_gfx_kfd_sch_ctrl ( adev , idx , true) ;
2113
+ sched_work = true;
2109
2114
}
2110
2115
mutex_unlock (& adev -> enforce_isolation_mutex );
2116
+
2117
+ if (sched_work )
2118
+ amdgpu_gfx_kfd_sch_ctrl (adev , idx , true);
2111
2119
}
2112
2120
2113
2121
/*
0 commit comments