]> git.itanic.dy.fi Git - linux-stable/commitdiff
cpufreq: amd-pstate: Add ->fast_switch() callback
authorGautham R. Shenoy <gautham.shenoy@amd.com>
Wed, 17 May 2023 16:28:15 +0000 (16:28 +0000)
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>
Wed, 24 May 2023 17:39:16 +0000 (19:39 +0200)
Schedutil normally calls the adjust_perf callback for drivers with
adjust_perf callback available and fast_switch_possible flag set.
However, when frequency invariance is disabled and schedutil tries to
invoke fast_switch. So, there is a chance of kernel crash if this
function pointer is not set. To protect against this scenario add
fast_switch callback to amd_pstate driver.

Fixes: 1d215f0319c2 ("cpufreq: amd-pstate: Add fast switch function for AMD P-State")
Signed-off-by: Gautham R. Shenoy <gautham.shenoy@amd.com>
Signed-off-by: Wyes Karny <wyes.karny@amd.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
drivers/cpufreq/amd-pstate.c

index 5a3d4aa0f45a6a18e1abb3cceb6c972c4b3f3e19..45711fc0a856549d0073ab57ce1f3806b2016312 100644 (file)
@@ -444,9 +444,8 @@ static int amd_pstate_verify(struct cpufreq_policy_data *policy)
        return 0;
 }
 
-static int amd_pstate_target(struct cpufreq_policy *policy,
-                            unsigned int target_freq,
-                            unsigned int relation)
+static int amd_pstate_update_freq(struct cpufreq_policy *policy,
+                                 unsigned int target_freq, bool fast_switch)
 {
        struct cpufreq_freqs freqs;
        struct amd_cpudata *cpudata = policy->driver_data;
@@ -465,14 +464,37 @@ static int amd_pstate_target(struct cpufreq_policy *policy,
        des_perf = DIV_ROUND_CLOSEST(target_freq * cap_perf,
                                     cpudata->max_freq);
 
-       cpufreq_freq_transition_begin(policy, &freqs);
+       WARN_ON(fast_switch && !policy->fast_switch_enabled);
+       /*
+        * If fast_switch is desired, then there aren't any registered
+        * transition notifiers. See comment for
+        * cpufreq_enable_fast_switch().
+        */
+       if (!fast_switch)
+               cpufreq_freq_transition_begin(policy, &freqs);
+
        amd_pstate_update(cpudata, min_perf, des_perf,
-                         max_perf, false, policy->governor->flags);
-       cpufreq_freq_transition_end(policy, &freqs, false);
+                       max_perf, fast_switch, policy->governor->flags);
+
+       if (!fast_switch)
+               cpufreq_freq_transition_end(policy, &freqs, false);
 
        return 0;
 }
 
+static int amd_pstate_target(struct cpufreq_policy *policy,
+                            unsigned int target_freq,
+                            unsigned int relation)
+{
+       return amd_pstate_update_freq(policy, target_freq, false);
+}
+
+static unsigned int amd_pstate_fast_switch(struct cpufreq_policy *policy,
+                                 unsigned int target_freq)
+{
+       return amd_pstate_update_freq(policy, target_freq, true);
+}
+
 static void amd_pstate_adjust_perf(unsigned int cpu,
                                   unsigned long _min_perf,
                                   unsigned long target_perf,
@@ -715,6 +737,7 @@ static int amd_pstate_cpu_exit(struct cpufreq_policy *policy)
 
        freq_qos_remove_request(&cpudata->req[1]);
        freq_qos_remove_request(&cpudata->req[0]);
+       policy->fast_switch_possible = false;
        kfree(cpudata);
 
        return 0;
@@ -1309,6 +1332,7 @@ static struct cpufreq_driver amd_pstate_driver = {
        .flags          = CPUFREQ_CONST_LOOPS | CPUFREQ_NEED_UPDATE_LIMITS,
        .verify         = amd_pstate_verify,
        .target         = amd_pstate_target,
+       .fast_switch    = amd_pstate_fast_switch,
        .init           = amd_pstate_cpu_init,
        .exit           = amd_pstate_cpu_exit,
        .suspend        = amd_pstate_cpu_suspend,