summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRick Yiu <rickyiu@google.com>2023-11-29 07:08:37 +0000
committerRick Yiu <rickyiu@google.com>2023-12-05 08:19:13 +0000
commit4877a693370dcc15914ebda4ab2bf6089829376a (patch)
treee95e23feb6084f386ff4adea076fdc70a7c70d1d
parent29d569e7ef9519e9ee8fd98ed1a659af4ecfaccd (diff)
downloadgs-4877a693370dcc15914ebda4ab2bf6089829376a.tar.gz
vh: sched: Avoid cluster in CPD for RT placement
If a cluster is in CPD, it will have longer wake-up latency. This is bad for latency sensitive tasks like RT, so avoid this kind of cpu. Bug: 309620400 Change-Id: I5f96f8f1fd63cc96e67f88bb4045d6beee7238e1 Signed-off-by: Rick Yiu <rickyiu@google.com>
-rw-r--r--drivers/soc/google/cal-if/cal-if.c16
-rw-r--r--drivers/soc/google/vh/kernel/sched/core.c10
-rw-r--r--drivers/soc/google/vh/kernel/sched/init.c20
-rw-r--r--drivers/soc/google/vh/kernel/sched/rt.c10
-rw-r--r--drivers/soc/google/vh/kernel/sched/sched_priv.h1
5 files changed, 51 insertions, 6 deletions
diff --git a/drivers/soc/google/cal-if/cal-if.c b/drivers/soc/google/cal-if/cal-if.c
index 832113cc0..f616cc95d 100644
--- a/drivers/soc/google/cal-if/cal-if.c
+++ b/drivers/soc/google/cal-if/cal-if.c
@@ -34,6 +34,9 @@
extern s32 gs_chipid_get_dvfs_version(void);
+// The first parameter is cluster id, the second parameter is enable/disable.
+void (*set_cluster_enabled_cb)(int, int) = NULL;
+
static int (*exynos_cal_pd_bcm_sync)(unsigned int id, bool on);
static DEFINE_MUTEX(cal_pd_bcm_sync_mutex);
@@ -356,6 +359,14 @@ int cal_cpu_status(unsigned int cpu)
}
EXPORT_SYMBOL_GPL(cal_cpu_status);
+void register_set_cluster_enabled_cb(void (*func)(int, int))
+{
+ // This function could only be registered once.
+ BUG_ON(set_cluster_enabled_cb);
+ set_cluster_enabled_cb = func;
+}
+EXPORT_SYMBOL_GPL(register_set_cluster_enabled_cb);
+
int cal_cluster_enable(unsigned int cluster)
{
int ret;
@@ -365,6 +376,9 @@ int cal_cluster_enable(unsigned int cluster)
ret = pmucal_cpu_cluster_enable(cluster);
spin_unlock(&pmucal_cpu_lock);
+ if (likely(ret == 0 && set_cluster_enabled_cb))
+ set_cluster_enabled_cb(cluster, 1);
+
scnprintf(clock_name, 32, "CAL_CLUSTER_ENABLE_%u", cluster);
trace_clock_set_rate(clock_name, 1, raw_smp_processor_id());
@@ -381,6 +395,8 @@ int cal_cluster_disable(unsigned int cluster)
ret = pmucal_cpu_cluster_disable(cluster);
spin_unlock(&pmucal_cpu_lock);
+ if (likely(ret == 0 && set_cluster_enabled_cb))
+ set_cluster_enabled_cb(cluster, 0);
scnprintf(clock_name, 32, "CAL_CLUSTER_ENABLE_%u", cluster);
trace_clock_set_rate(clock_name, 0, raw_smp_processor_id());
diff --git a/drivers/soc/google/vh/kernel/sched/core.c b/drivers/soc/google/vh/kernel/sched/core.c
index 4fc93affc..f0ac00a29 100644
--- a/drivers/soc/google/vh/kernel/sched/core.c
+++ b/drivers/soc/google/vh/kernel/sched/core.c
@@ -272,3 +272,13 @@ void rvh_rtmutex_prepare_setprio_pixel_mod(void *data, struct task_struct *p,
vp->uclamp_pi[UCLAMP_MIN] = uclamp_none(UCLAMP_MIN);
vp->uclamp_pi[UCLAMP_MAX] = uclamp_none(UCLAMP_MAX);
}
+
+void set_cluster_enabled_cb(int cluster, int enabled)
+{
+ pixel_cluster_enabled[cluster] = enabled;
+}
+
+int get_cluster_enabled(int cluster)
+{
+ return pixel_cluster_enabled[cluster];
+}
diff --git a/drivers/soc/google/vh/kernel/sched/init.c b/drivers/soc/google/vh/kernel/sched/init.c
index eb1d2109c..6ac72e9b9 100644
--- a/drivers/soc/google/vh/kernel/sched/init.c
+++ b/drivers/soc/google/vh/kernel/sched/init.c
@@ -103,13 +103,15 @@ extern void rvh_setscheduler_pixel_mod(void *data, struct task_struct *p);
extern void rvh_find_lowest_rq_pixel_mod(void *data, struct task_struct *p,
struct cpumask *lowest_mask,
int ret, int *cpu);
-extern void rvh_update_misfit_status_pixel_mod(void *data, struct task_struct *p,
- struct rq *rq, bool *need_update);
+extern void rvh_update_misfit_status_pixel_mod(void *data, struct task_struct *p, struct rq *rq,
+ bool *need_update);
-extern struct cpufreq_governor sched_pixel_gov;
extern int pmu_poll_init(void);
+extern void set_cluster_enabled_cb(int cluster, int enabled);
+extern void register_set_cluster_enabled_cb(void (*func)(int, int));
+extern struct cpufreq_governor sched_pixel_gov;
extern bool wait_for_init;
int pixel_cpu_num;
@@ -118,6 +120,7 @@ int *pixel_cluster_start_cpu;
int *pixel_cluster_start_cpu;
int *pixel_cluster_cpu_num;
int *pixel_cpu_to_cluster;
+int *pixel_cluster_enabled;
bool pixel_cpu_init = false;
EXPORT_SYMBOL_GPL(pixel_cpu_num);
@@ -235,6 +238,11 @@ static int init_pixel_cpu(void)
if (!pixel_cluster_cpu_num)
goto out_no_pixel_cluster_cpu_num;
+ pixel_cluster_enabled = kmalloc_array(pixel_cluster_num, sizeof(int), GFP_KERNEL);
+ if (!pixel_cluster_cpu_num)
+ goto out_no_pixel_cluster_enabled;
+ memset(pixel_cluster_enabled, 1, pixel_cluster_num * sizeof(int));
+
cur_capacity = 0;
for_each_possible_cpu(i) {
if (arch_scale_cpu_capacity(i) > cur_capacity) {
@@ -247,11 +255,15 @@ static int init_pixel_cpu(void)
}
pixel_cpu_init = true;
+
+ register_set_cluster_enabled_cb(set_cluster_enabled_cb);
+
return 0;
+out_no_pixel_cluster_enabled:
+ kfree(pixel_cluster_cpu_num);
out_no_pixel_cluster_cpu_num:
kfree(pixel_cluster_start_cpu);
-
out_no_pixel_cluster_start_cpu:
kfree(pixel_cpu_to_cluster);
diff --git a/drivers/soc/google/vh/kernel/sched/rt.c b/drivers/soc/google/vh/kernel/sched/rt.c
index cba70b981..33eb24c92 100644
--- a/drivers/soc/google/vh/kernel/sched/rt.c
+++ b/drivers/soc/google/vh/kernel/sched/rt.c
@@ -18,9 +18,10 @@ extern int cpu_is_idle(int cpu);
extern int sched_cpu_idle(int cpu);
extern bool get_prefer_high_cap(struct task_struct *p);
-extern int ___update_load_sum(u64 now, struct sched_avg *sa,
- unsigned long load, unsigned long runnable, int running);
+extern int ___update_load_sum(u64 now, struct sched_avg *sa, unsigned long load,
+ unsigned long runnable, int running);
extern void ___update_load_avg(struct sched_avg *sa, unsigned long load);
+extern int get_cluster_enabled(int cluster);
/*****************************************************************************/
/* Upstream Code Section */
@@ -188,6 +189,11 @@ static int find_least_loaded_cpu(struct task_struct *p, struct cpumask *lowest_m
if (is_idle)
util[cpu] = 0;
+ // Avoid single core cluster in CPD state
+ if (is_idle && 1 == pixel_cluster_cpu_num[pixel_cpu_to_cluster[cpu]] &&
+ !get_cluster_enabled(pixel_cpu_to_cluster[cpu]))
+ cpu_importance[cpu] = UINT_MAX;
+
if (task_fits[cpu]) {
fit_and_non_overutilized_found |= !overutilize[cpu];
fit_and_overutilized_found |= overutilize[cpu];
diff --git a/drivers/soc/google/vh/kernel/sched/sched_priv.h b/drivers/soc/google/vh/kernel/sched/sched_priv.h
index 66c4c682a..d10611cab 100644
--- a/drivers/soc/google/vh/kernel/sched/sched_priv.h
+++ b/drivers/soc/google/vh/kernel/sched/sched_priv.h
@@ -49,6 +49,7 @@ extern int pixel_cluster_num;
extern int *pixel_cluster_start_cpu;
extern int *pixel_cluster_cpu_num;
extern int *pixel_cpu_to_cluster;
+extern int *pixel_cluster_enabled;
#define cpu_overutilized(cap, max, cpu) \
((cap) * sched_capacity_margin[cpu] > (max) << SCHED_CAPACITY_SHIFT)