summaryrefslogtreecommitdiff
authorXiaoliang Wang <xiaoliang.wang@amlogic.com>2019-01-17 11:34:45 (GMT)
committer Gerrit Code Review <gituser@droid04>2019-01-17 11:34:45 (GMT)
commit1591bfb60df4a7b7746642f8cbf7055c91013073 (patch)
tree3a37972af0b38e9b3f8a0fbb1275c3e9eef9a154
parentce5f3f319eb8916575fbbf1dd8e436e88f33a683 (diff)
parent22ad15a9c328c2546338846276c99af307364d05 (diff)
downloadcommon-1591bfb60df4a7b7746642f8cbf7055c91013073.zip
common-1591bfb60df4a7b7746642f8cbf7055c91013073.tar.gz
common-1591bfb60df4a7b7746642f8cbf7055c91013073.tar.bz2
Merge "cpu_hotplug: add cpu_hotplug_reserve_cpus interface [1/1]" into p-tv-atom
Diffstat
-rw-r--r--drivers/amlogic/cpu_hotplug/cpu_hotplug.c62
-rw-r--r--include/linux/amlogic/cpu_hotplug.h5
-rw-r--r--kernel/sched/core.c29
3 files changed, 93 insertions, 3 deletions
diff --git a/drivers/amlogic/cpu_hotplug/cpu_hotplug.c b/drivers/amlogic/cpu_hotplug/cpu_hotplug.c
index c8cb0d9..6b819c4 100644
--- a/drivers/amlogic/cpu_hotplug/cpu_hotplug.c
+++ b/drivers/amlogic/cpu_hotplug/cpu_hotplug.c
@@ -50,6 +50,23 @@ struct cpu_hotplug_s {
static struct cpu_hotplug_s hpg;
+unsigned long reserved_cpus;
+
+static int __init setup_hotplug_reserved_cpus(char *str)
+{
+ int ret;
+
+ ret = kstrtoul(str, 16, &reserved_cpus);
+
+ if (ret)
+ pr_err("read hotplug_reserved_cpus err:%s\n", str);
+ else
+ pr_info("hotplug_reserved_cpus = 0x%lx\n", reserved_cpus);
+
+ return 1;
+}
+__setup("hotplug_reserved_cpus=", setup_hotplug_reserved_cpus);
+
int cpu_hotplug_cpumask_init(void)
{
int cpu, clstr;
@@ -150,13 +167,16 @@ int cpu_hotplug_gov(int clustr, int num, int flg, cpumask_t *mask)
static int __ref cpu_hotplug_thread(void *data)
{
- unsigned int clustr, cpu, flg, online;
+ unsigned int clustr, cpu = 0, flg, online;
int target, cnt;
unsigned long flags;
while (1) {
+ pr_info("%s()\n", __func__);
+
if (kthread_should_stop())
break;
+
mutex_lock(&hpg.mutex);
for (clustr = 0; clustr < hpg.clusters; clustr++) {
if (!hpg.flgs[clustr])
@@ -171,6 +191,10 @@ static int __ref cpu_hotplug_thread(void *data)
if (online >= hpg.gov_num[clustr] ||
online >= hpg.max_num[clustr])
break;
+
+ pr_info("%s() device_online: %d\n",
+ __func__,
+ cpu);
device_online(get_cpu_device(cpu));
cpumask_set_cpu(cpu,
&hpg.null_thread[clustr]->cpus_allowed);
@@ -183,25 +207,42 @@ static int __ref cpu_hotplug_thread(void *data)
break;
if (cnt++ > 20)
break;
+
raw_spin_lock_irqsave(
&hpg.null_thread[clustr]->pi_lock,
flags);
+
target = cpumask_next(
cpumask_first(&hpg.cpumask[clustr]),
&hpg.cpumask[clustr]);
+
target = select_cpu_for_hotplug(
hpg.null_thread[clustr],
target, SD_BALANCE_WAKE, 0);
+
raw_spin_unlock_irqrestore(
&hpg.null_thread[clustr]->pi_lock,
flags);
+
if (!cpumask_test_cpu(target,
&hpg.cpumask[clustr])) {
goto clear_cpu;
}
+
if (!cpu_online(target) ||
cpumask_first(hpg.cpumask) == target)
goto clear_cpu;
+
+ if (1 << target & reserved_cpus) {
+ pr_info("%d in resrved 0x%lx\n",
+ target,
+ reserved_cpus);
+ goto clear_cpu;
+ }
+
+ pr_info("%s() device_offline: %u\n",
+ __func__,
+ target);
device_offline(get_cpu_device(target));
clear_cpu:
cpumask_clear_cpu(target,
@@ -248,16 +289,33 @@ static ssize_t store_hotplug_max_cpus(struct kobject *kobj,
ret = kstrtouint(buf, 0, &input);
+ pr_info("%s() input = 0x%x\n", __func__, input);
+
for (c = 0; c < hpg.clusters; c++) {
max = input & 0xff;
- if (max)
+ if (max) {
+ pr_info("%s() max = %u, cluster = %u\n",
+ __func__, max, c);
cpu_hotplug_set_max(max, c);
+ }
input = input >> 8;
}
return count;
}
define_one_global_rw(hotplug_max_cpus);
+/*
+ * reserved cpumask can't plug off
+ * eg:
+ * cpu0 & cpu1 -> 0x3
+ * cpu3 -> 0x8
+ *
+ */
+void cpu_hotplug_reserve_cpus(unsigned long cpumask)
+{
+ pr_info("%s() mask = 0x%lx\n", __func__, cpumask);
+ reserved_cpus = cpumask;
+}
static int __init cpu_hotplug_init(void)
{
diff --git a/include/linux/amlogic/cpu_hotplug.h b/include/linux/amlogic/cpu_hotplug.h
index 508d3e9..fa9b0d3 100644
--- a/include/linux/amlogic/cpu_hotplug.h
+++ b/include/linux/amlogic/cpu_hotplug.h
@@ -30,6 +30,8 @@ unsigned int cpu_hotplug_get_min(int clustr);
int cpu_hotplug_gov(int clustr, int num, int flg, cpumask_t *mask);
int select_cpu_for_hotplug(struct task_struct *p,
int cpu, int sd_flags, int wake_flags);
+void cpu_hotplug_reserve_cpus(unsigned long cpumask);
+extern unsigned long reserved_cpus;
#else
static inline void cpufreq_set_max_cpu_num(unsigned int n, unsigned int c)
{
@@ -56,6 +58,9 @@ static inline int select_cpu_for_hotplug(struct task_struct *p,
return -1;
}
+static void cpu_hotplug_reserve_cpus(unsigned long cpumask)
+{
+}
#endif
#endif
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index bac9c21..baa18db 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -93,6 +93,8 @@
#include <trace/events/sched.h>
#include "walt.h"
+#include <linux/amlogic/cpu_hotplug.h>
+
DEFINE_MUTEX(sched_domains_mutex);
DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
@@ -1113,6 +1115,9 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
{
struct rq *rq = task_rq(p);
bool queued, running;
+ struct cpumask mask;
+
+ cpumask_copy(&mask, new_mask);
lockdep_assert_held(&p->pi_lock);
@@ -1130,7 +1135,29 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
if (running)
put_prev_task(rq, p);
- p->sched_class->set_cpus_allowed(p, new_mask);
+ if (reserved_cpus & 0x8) {
+ if (p->mm && *cpumask_bits(&p->cpus_allowed) == 0x8) {
+ *cpumask_bits(&mask) = 0x8;
+ pr_info("keep cpu3 bind: %d:%s 0x%lx->0x%lx:0x%lx\n",
+ p->pid, p->comm,
+ *cpumask_bits(&p->cpus_allowed),
+ *cpumask_bits(new_mask),
+ *cpumask_bits(&mask));
+ }
+
+ if (*cpumask_bits(&mask) == 0x8) {
+ pr_info("set affinity() %d:%s 0x%lx->0x%lx:0x%lx\n",
+ p->pid, p->comm,
+ *cpumask_bits(&p->cpus_allowed),
+ *cpumask_bits(new_mask),
+ *cpumask_bits(&mask));
+ dump_stack();
+ } else
+ cpumask_clear_cpu(3, &mask);
+
+ }
+
+ p->sched_class->set_cpus_allowed(p, &mask);
if (queued)
enqueue_task(rq, p, ENQUEUE_RESTORE);