mirror of
https://github.com/adulau/aha.git
synced 2024-12-27 19:26:25 +00:00
[CPUFREQ] ondemand/conservative: sanitize sampling_rate restrictions
Limit sampling rate to transition_latency * 100 or kernel limits. If sampling_rate is tried to be set too low, set the lowest allowed value. Signed-off-by: Thomas Renninger <trenn@suse.de> Signed-off-by: Dave Jones <davej@redhat.com>
This commit is contained in:
parent
9411b4ef7f
commit
112124ab0a
3 changed files with 49 additions and 21 deletions
|
@ -117,7 +117,19 @@ accessible parameters:
|
|||
sampling_rate: measured in uS (10^-6 seconds), this is how often you
|
||||
want the kernel to look at the CPU usage and to make decisions on
|
||||
what to do about the frequency. Typically this is set to values of
|
||||
around '10000' or more.
|
||||
around '10000' or more. It's default value is (cmp. with users-guide.txt):
|
||||
transition_latency * 1000
|
||||
The lowest value you can set is:
|
||||
transition_latency * 100 or it may get restricted to a value where it
|
||||
makes not sense for the kernel anymore to poll that often which depends
|
||||
on your HZ config variable (HZ=1000: max=20000us, HZ=250: max=5000).
|
||||
Be aware that transition latency is in ns and sampling_rate is in us, so you
|
||||
get the same sysfs value by default.
|
||||
Sampling rate should always get adjusted considering the transition latency
|
||||
To set the sampling rate 750 times as high as the transition latency
|
||||
in the bash (as said, 1000 is default), do:
|
||||
echo `$(($(cat cpuinfo_transition_latency) * 750 / 1000)) \
|
||||
>ondemand/sampling_rate
|
||||
|
||||
show_sampling_rate_(min|max): THIS INTERFACE IS DEPRECATED, DON'T USE IT.
|
||||
You can use wider ranges now and the general
|
||||
|
|
|
@ -54,8 +54,20 @@ static unsigned int def_sampling_rate;
|
|||
(MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10))
|
||||
#define MIN_SAMPLING_RATE \
|
||||
(def_sampling_rate / MIN_SAMPLING_RATE_RATIO)
|
||||
/* Above MIN_SAMPLING_RATE will vanish with its sysfs file soon
|
||||
* Define the minimal settable sampling rate to the greater of:
|
||||
* - "HW transition latency" * 100 (same as default sampling / 10)
|
||||
* - MIN_STAT_SAMPLING_RATE
|
||||
* To avoid that userspace shoots itself.
|
||||
*/
|
||||
static unsigned int minimum_sampling_rate(void)
|
||||
{
|
||||
return max(def_sampling_rate / 10, MIN_STAT_SAMPLING_RATE);
|
||||
}
|
||||
|
||||
/* This will also vanish soon with removing sampling_rate_max */
|
||||
#define MAX_SAMPLING_RATE (500 * def_sampling_rate)
|
||||
#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000)
|
||||
#define LATENCY_MULTIPLIER (1000)
|
||||
#define DEF_SAMPLING_DOWN_FACTOR (1)
|
||||
#define MAX_SAMPLING_DOWN_FACTOR (10)
|
||||
#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
|
||||
|
@ -208,13 +220,11 @@ static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
|
|||
ret = sscanf(buf, "%u", &input);
|
||||
|
||||
mutex_lock(&dbs_mutex);
|
||||
if (ret != 1 || input > MAX_SAMPLING_RATE ||
|
||||
input < MIN_SAMPLING_RATE) {
|
||||
if (ret != 1) {
|
||||
mutex_unlock(&dbs_mutex);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dbs_tuners_ins.sampling_rate = input;
|
||||
dbs_tuners_ins.sampling_rate = max(input, minimum_sampling_rate());
|
||||
mutex_unlock(&dbs_mutex);
|
||||
|
||||
return count;
|
||||
|
@ -540,11 +550,9 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
|
|||
if (latency == 0)
|
||||
latency = 1;
|
||||
|
||||
def_sampling_rate = 10 * latency *
|
||||
DEF_SAMPLING_RATE_LATENCY_MULTIPLIER;
|
||||
|
||||
if (def_sampling_rate < MIN_STAT_SAMPLING_RATE)
|
||||
def_sampling_rate = MIN_STAT_SAMPLING_RATE;
|
||||
def_sampling_rate =
|
||||
max(10 * latency * LATENCY_MULTIPLIER,
|
||||
MIN_STAT_SAMPLING_RATE);
|
||||
|
||||
dbs_tuners_ins.sampling_rate = def_sampling_rate;
|
||||
|
||||
|
|
|
@ -52,8 +52,20 @@ static unsigned int def_sampling_rate;
|
|||
(MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10))
|
||||
#define MIN_SAMPLING_RATE \
|
||||
(def_sampling_rate / MIN_SAMPLING_RATE_RATIO)
|
||||
/* Above MIN_SAMPLING_RATE will vanish with its sysfs file soon
|
||||
* Define the minimal settable sampling rate to the greater of:
|
||||
* - "HW transition latency" * 100 (same as default sampling / 10)
|
||||
* - MIN_STAT_SAMPLING_RATE
|
||||
* To avoid that userspace shoots itself.
|
||||
*/
|
||||
static unsigned int minimum_sampling_rate(void)
|
||||
{
|
||||
return max(def_sampling_rate / 10, MIN_STAT_SAMPLING_RATE);
|
||||
}
|
||||
|
||||
/* This will also vanish soon with removing sampling_rate_max */
|
||||
#define MAX_SAMPLING_RATE (500 * def_sampling_rate)
|
||||
#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000)
|
||||
#define LATENCY_MULTIPLIER (1000)
|
||||
#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
|
||||
|
||||
static void do_dbs_timer(struct work_struct *work);
|
||||
|
@ -255,13 +267,11 @@ static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
|
|||
ret = sscanf(buf, "%u", &input);
|
||||
|
||||
mutex_lock(&dbs_mutex);
|
||||
if (ret != 1 || input > MAX_SAMPLING_RATE
|
||||
|| input < MIN_SAMPLING_RATE) {
|
||||
if (ret != 1) {
|
||||
mutex_unlock(&dbs_mutex);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dbs_tuners_ins.sampling_rate = input;
|
||||
dbs_tuners_ins.sampling_rate = max(input, minimum_sampling_rate());
|
||||
mutex_unlock(&dbs_mutex);
|
||||
|
||||
return count;
|
||||
|
@ -607,11 +617,9 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
|
|||
if (latency == 0)
|
||||
latency = 1;
|
||||
|
||||
def_sampling_rate = latency *
|
||||
DEF_SAMPLING_RATE_LATENCY_MULTIPLIER;
|
||||
|
||||
if (def_sampling_rate < MIN_STAT_SAMPLING_RATE)
|
||||
def_sampling_rate = MIN_STAT_SAMPLING_RATE;
|
||||
def_sampling_rate =
|
||||
max(latency * LATENCY_MULTIPLIER,
|
||||
MIN_STAT_SAMPLING_RATE);
|
||||
|
||||
dbs_tuners_ins.sampling_rate = def_sampling_rate;
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue