aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMark Rutland <mark.rutland@arm.com>2013-08-14 17:20:24 +0100
committerSireesh Tripurari <sireesh.tripurari@linaro.org>2013-12-10 10:47:10 +0000
commit8c3ddfb6b2659ac2bc5931833ef79bcf20623c49 (patch)
treeeb7efca014dd1b496d593930f24e23278a0bd720
parent6ab4e741f92e235e8d565b17847738b96c28556c (diff)
downloadjuice-8c3ddfb6b2659ac2bc5931833ef79bcf20623c49.tar.gz
arm64: reorganise smp_enable_ops
For hotplug support, we're going to want a place to store operations that do more than bring CPUs online, and it makes sense to group these with our current smp_enable_ops. This patch renames smp_enable_ops to smp_ops to make the intended use of the structure clearer. While we're at it, fix up instances of the cpu parameter to be an unsigned int, drop the init markinss and rename the *_cpu functions to cpu_* to reduce future churn when smp_operations is extended. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Acked-by: Nicolas Pitre <nico@linaro.org> Acked-by: Santosh Shilimkar <santosh.shilimkar@ti.com>
-rw-r--r--arch/arm64/include/asm/smp.h15
-rw-r--r--arch/arm64/kernel/smp.c226
-rw-r--r--arch/arm64/kernel/smp_psci.c53
-rw-r--r--arch/arm64/kernel/smp_spin_table.c89
4 files changed, 213 insertions, 170 deletions
diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h
index a498f2cd2c2..90626b6a01f 100644
--- a/arch/arm64/include/asm/smp.h
+++ b/arch/arm64/include/asm/smp.h
@@ -60,14 +60,21 @@ struct secondary_data {
void *stack;
};
extern struct secondary_data secondary_data;
-extern void secondary_entry(void);
+extern void secondary_holding_pen(void);
+extern volatile unsigned long secondary_holding_pen_release;
extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
-extern int __cpu_disable(void);
+struct device_node;
-extern void __cpu_die(unsigned int cpu);
-extern void cpu_die(void);
+struct smp_operations {
+ const char *name;
+ int (*cpu_init)(struct device_node *, unsigned int);
+ int (*cpu_prepare)(unsigned int);
+};
+
+extern const struct smp_operations smp_spin_table_ops;
+extern const struct smp_operations smp_psci_ops;
#endif /* ifndef __ASM_SMP_H */
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index a0c2ca602cf..86f282173e5 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -39,7 +39,7 @@
#include <asm/atomic.h>
#include <asm/cacheflush.h>
#include <asm/cputype.h>
-#include <asm/cpu_ops.h>
+#include <asm/mmu_context.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
@@ -55,6 +55,7 @@
* where to place its SVC stack
*/
struct secondary_data secondary_data;
+volatile unsigned long secondary_holding_pen_release = INVALID_HWID;
enum ipi_msg_type {
IPI_RESCHEDULE,
@@ -63,16 +64,61 @@ enum ipi_msg_type {
IPI_CPU_STOP,
};
+static DEFINE_RAW_SPINLOCK(boot_lock);
+
+/*
+ * Write secondary_holding_pen_release in a way that is guaranteed to be
+ * visible to all observers, irrespective of whether they're taking part
+ * in coherency or not. This is necessary for the hotplug code to work
+ * reliably.
+ */
+static void write_pen_release(u64 val)
+{
+ void *start = (void *)&secondary_holding_pen_release;
+ unsigned long size = sizeof(secondary_holding_pen_release);
+
+ secondary_holding_pen_release = val;
+ __flush_dcache_area(start, size);
+}
+
/*
* Boot a secondary CPU, and assign it the specified idle task.
* This also gives us the initial stack to use for this CPU.
*/
static int boot_secondary(unsigned int cpu, struct task_struct *idle)
{
+ unsigned long timeout;
+
+ /*
+ * Set synchronisation state between this boot processor
+ * and the secondary one
+ */
+ raw_spin_lock(&boot_lock);
+
+ /*
+ * Update the pen release flag.
+ */
+ write_pen_release(cpu_logical_map(cpu));
+
+ /*
+ * Send an event, causing the secondaries to read pen_release.
+ */
+ sev();
+
+ timeout = jiffies + (1 * HZ);
+ while (time_before(jiffies, timeout)) {
+ if (secondary_holding_pen_release == INVALID_HWID)
+ break;
+ udelay(10);
+ }
+
+ /*
+ * Now the secondary core is starting up let it run its
+ * calibrations, then wait for it to finish
if (cpu_ops[cpu]->cpu_boot)
return cpu_ops[cpu]->cpu_boot(cpu);
- return -EOPNOTSUPP;
+ return secondary_holding_pen_release != INVALID_HWID ? -ENOSYS : 0;
}
static DECLARE_COMPLETION(cpu_running);
@@ -142,12 +188,16 @@ asmlinkage void secondary_start_kernel(void)
preempt_disable();
trace_hardirqs_off();
+ /*
+ * Let the primary processor know we're out of the
+ * pen, then head off into the C entry point
if (cpu_ops[cpu]->cpu_postboot)
cpu_ops[cpu]->cpu_postboot();
/*
- * Enable GIC and timers.
+ * Synchronise with the boot thread.
*/
+ raw_spin_lock(&boot_lock);
notify_cpu_starting(cpu);
/*
@@ -158,9 +208,14 @@ asmlinkage void secondary_start_kernel(void)
set_cpu_online(cpu, true);
complete(&cpu_running);
+ /*
+ * Enable GIC and timers.
+ */
+ notify_cpu_starting(cpu);
+
local_irq_enable();
local_fiq_enable();
- local_async_enable();
+
/*
* OK, it's off to the idle thread for us
@@ -168,57 +223,57 @@ asmlinkage void secondary_start_kernel(void)
cpu_startup_entry(CPUHP_ONLINE);
}
-#ifdef CONFIG_HOTPLUG_CPU
-static int op_cpu_disable(unsigned int cpu)
+void __init smp_cpus_done(unsigned int max_cpus)
{
- /*
- * If we don't have a cpu_die method, abort before we reach the point
- * of no return. CPU0 may not have an cpu_ops, so test for it.
- */
- if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_die)
- return -EOPNOTSUPP;
+ pr_info("SMP: Total of %d processors activated.\n", num_online_cpus());
+}
- /*
- * We may need to abort a hot unplug for some other mechanism-specific
- * reason.
- */
- if (cpu_ops[cpu]->cpu_disable)
- return cpu_ops[cpu]->cpu_disable(cpu);
+void __init smp_prepare_boot_cpu(void)
+{
+}
- return 0;
+static void (*smp_cross_call)(const struct cpumask *, unsigned int);
+
+static const struct smp_operations *supported_smp_ops[] __initconst = {
+ &smp_spin_table_ops,
+ &smp_psci_ops,
+ NULL,
+};
}
-/*
- * __cpu_disable runs on the processor to be shutdown.
- */
-int __cpu_disable(void)
+void __init smp_prepare_boot_cpu(void)
{
- unsigned int cpu = smp_processor_id();
- int ret;
+}
+{
+}
- ret = op_cpu_disable(cpu);
- if (ret)
- return ret;
+static void (*smp_cross_call)(const struct cpumask *, unsigned int);
- /*
- * Take this CPU offline. Once we clear this, we can't return,
- * and we must not schedule until we're ready to give up the cpu.
- */
- set_cpu_online(cpu, false);
+static const struct smp_operations *supported_smp_ops[] __initconst = {
+ &smp_spin_table_ops,
+ &smp_psci_ops,
+ NULL,
+};
- /*
- * OK - migrate IRQs away from this CPU
- */
- migrate_irqs();
+static const struct smp_operations *smp_ops[NR_CPUS];
- /*
- * Remove this CPU from the vm mask set of all processes.
- */
- clear_tasks_mm_cpumask(cpu);
+static const struct smp_operations * __init smp_get_ops(const char *name)
+{
+ const struct smp_operations **ops = supported_smp_ops;
- return 0;
-}
+ while (*ops) {
+
+static void (*smp_cross_call)(const struct cpumask *, unsigned int);
+
+static const struct smp_operations *supported_smp_ops[] __initconst = {
+
+static const struct smp_operations *supported_smp_ops[] __initconst = {
+ &smp_spin_table_ops,
+ &smp_psci_ops,
+ NULL,
+ while (*ops) {
+ if (!strcmp(name, (*ops)->name))
static DECLARE_COMPLETION(cpu_died);
/*
@@ -231,49 +286,42 @@ void __cpu_die(unsigned int cpu)
pr_crit("CPU%u: cpu didn't die\n", cpu);
return;
}
- pr_notice("CPU%u: shutdown\n", cpu);
+
+ return NULL;
}
/*
- * Called from the idle thread for the CPU which has been shutdown.
- *
- * Note that we disable IRQs here, but do not re-enable them
- * before returning to the caller. This is also the behaviour
- * of the other hotplug-cpu capable cores, so presumably coming
- * out of idle fixes this.
+ * Enumerate the possible CPU set from the device tree and build the
+ * cpu logical map array containing MPIDR values related to logical
+ * cpus. Assumes that cpu_logical_map(0) has already been initialized.
*/
-void cpu_die(void)
+void __init smp_init_cpus(void)
{
- unsigned int cpu = smp_processor_id();
-
- idle_task_exit();
-
- local_irq_disable();
-
- /* Tell __cpu_die() that this CPU is now safe to dispose of */
- complete(&cpu_died);
-
- /*
- * Actually shutdown the CPU. This must never fail. The specific hotplug
- * mechanism must perform all required cache maintenance to ensure that
- * no dirty lines are lost in the process of shutting down the CPU.
- */
- cpu_ops[cpu]->cpu_die(cpu);
+ const char *enable_method;
+ struct device_node *dn = NULL;
+ unsigned int i, cpu = 1;
- BUG();
+ return NULL;
}
-#endif
-void __init smp_cpus_done(unsigned int max_cpus)
+/*
+ * Enumerate the possible CPU set from the device tree and build the
+ * cpu logical map array containing MPIDR values related to logical
+ * cpus. Assumes that cpu_logical_map(0) has already been initialized.
+ */
+void __init smp_init_cpus(void)
{
- pr_info("SMP: Total of %d processors activated.\n", num_online_cpus());
-}
+ const char *enable_method;
+ struct device_node *dn = NULL;
+ unsigned int i, cpu = 1;
+ bool bootcpu_valid = false;
-void __init smp_prepare_boot_cpu(void)
-{
-}
+ while ((dn = of_find_node_by_type(dn, "cpu"))) {
+ const u32 *cell;
+ u64 hwid;
-static void (*smp_cross_call)(const struct cpumask *, unsigned int);
+ /*
+}
/*
* Enumerate the possible CPU set from the device tree and build the
@@ -281,7 +329,15 @@ static void (*smp_cross_call)(const struct cpumask *, unsigned int);
* cpus. Assumes that cpu_logical_map(0) has already been initialized.
*/
void __init smp_init_cpus(void)
+/*
+ * Enumerate the possible CPU set from the device tree and build the
+ * cpu logical map array containing MPIDR values related to logical
+ * cpus. Assumes that cpu_logical_map(0) has already been initialized.
+ */
+void __init smp_init_cpus(void)
{
+ const char *enable_method;
+ struct device_node *dn = NULL;
struct device_node *dn = NULL;
unsigned int i, cpu = 1;
bool bootcpu_valid = false;
@@ -352,10 +408,19 @@ void __init smp_init_cpus(void)
if (cpu >= NR_CPUS)
goto next;
+ /*
+ * We currently support only the "spin-table" enable-method.
+ */
+ enable_method = of_get_property(dn, "enable-method", NULL);
+ if (!enable_method) {
+ pr_err("%s: missing enable-method property\n",
if (cpu_read_ops(dn, cpu) != 0)
goto next;
+ }
+
+ smp_ops[cpu] = smp_get_ops(enable_method);
- if (cpu_ops[cpu]->cpu_init(dn, cpu))
+ if (!smp_ops[cpu]) {
goto next;
pr_debug("cpu logical map 0x%llx\n", hwid);
@@ -413,10 +478,13 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
if (cpu == smp_processor_id())
continue;
- if (!cpu_ops[cpu])
+ if (!smp_ops[cpu])
+ continue;
+
+ err = smp_ops[cpu]->cpu_prepare(cpu);
+ if (err)
continue;
- err = cpu_ops[cpu]->cpu_prepare(cpu);
if (err)
continue;
@@ -456,7 +524,7 @@ void show_ipi_list(struct seq_file *p, int prec)
for (i = 0; i < NR_IPI; i++) {
seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i + IPI_RESCHEDULE,
prec >= 4 ? " " : "");
- for_each_online_cpu(cpu)
+ for_each_present_cpu(cpu)
seq_printf(p, "%10u ",
__get_irq_stat(cpu, ipi_irqs[i]));
seq_printf(p, " %s\n", ipi_types[i]);
diff --git a/arch/arm64/kernel/smp_psci.c b/arch/arm64/kernel/smp_psci.c
new file mode 100644
index 00000000000..2f0d3dd2164
--- /dev/null
+++ b/arch/arm64/kernel/smp_psci.c
@@ -0,0 +1,53 @@
+/*
+ * PSCI SMP initialisation
+ *
+ * Copyright (C) 2013 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/smp.h>
+
+#include <asm/psci.h>
+#include <asm/smp_plat.h>
+
+static int smp_psci_cpu_init(struct device_node *dn, unsigned int cpu)
+{
+ return 0;
+}
+
+static int smp_psci_cpu_prepare(unsigned int cpu)
+{
+ int err;
+
+ if (!psci_ops.cpu_on) {
+ pr_err("psci: no cpu_on method, not booting CPU%d\n", cpu);
+ return -ENODEV;
+ }
+
+ err = psci_ops.cpu_on(cpu_logical_map(cpu), __pa(secondary_holding_pen));
+ if (err) {
+ pr_err("psci: failed to boot CPU%d (%d)\n", cpu, err);
+ return err;
+ }
+
+ return 0;
+}
+
+const struct smp_operations smp_psci_ops = {
+ .name = "psci",
+ .cpu_init = smp_psci_cpu_init,
+ .cpu_prepare = smp_psci_cpu_prepare,
+};
diff --git a/arch/arm64/kernel/smp_spin_table.c b/arch/arm64/kernel/smp_spin_table.c
index 44c22805d2e..5fecffce1a0 100644
--- a/arch/arm64/kernel/smp_spin_table.c
+++ b/arch/arm64/kernel/smp_spin_table.c
@@ -16,37 +16,13 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include <linux/delay.h>
#include <linux/init.h>
#include <linux/of.h>
#include <linux/smp.h>
#include <asm/cacheflush.h>
-#include <asm/cpu_ops.h>
-#include <asm/cputype.h>
-#include <asm/smp_plat.h>
-
-extern void secondary_holding_pen(void);
-volatile unsigned long secondary_holding_pen_release = INVALID_HWID;
static phys_addr_t cpu_release_addr[NR_CPUS];
-static DEFINE_RAW_SPINLOCK(boot_lock);
-
-/*
- * Write secondary_holding_pen_release in a way that is guaranteed to be
- * visible to all observers, irrespective of whether they're taking part
- * in coherency or not. This is necessary for the hotplug code to work
- * reliably.
- */
-static void write_pen_release(u64 val)
-{
- void *start = (void *)&secondary_holding_pen_release;
- unsigned long size = sizeof(secondary_holding_pen_release);
-
- secondary_holding_pen_release = val;
- __flush_dcache_area(start, size);
-}
-
static int smp_spin_table_cpu_init(struct device_node *dn, unsigned int cpu)
{
@@ -72,16 +48,7 @@ static int smp_spin_table_cpu_prepare(unsigned int cpu)
return -ENODEV;
release_addr = __va(cpu_release_addr[cpu]);
-
- /*
- * We write the release address as LE regardless of the native
- * endianess of the kernel. Therefore, any boot-loaders that
- * read this address need to convert this address to the
- * boot-loader's endianess before jumping. This is mandated by
- * the boot protocol.
- */
- release_addr[0] = (void *) cpu_to_le64(__pa(secondary_holding_pen));
-
+ release_addr[0] = (void *)__pa(secondary_holding_pen);
__flush_dcache_area(release_addr, sizeof(release_addr[0]));
/*
@@ -92,60 +59,8 @@ static int smp_spin_table_cpu_prepare(unsigned int cpu)
return 0;
}
-static int smp_spin_table_cpu_boot(unsigned int cpu)
-{
- unsigned long timeout;
-
- /*
- * Set synchronisation state between this boot processor
- * and the secondary one
- */
- raw_spin_lock(&boot_lock);
-
- /*
- * Update the pen release flag.
- */
- write_pen_release(cpu_logical_map(cpu));
-
- /*
- * Send an event, causing the secondaries to read pen_release.
- */
- sev();
-
- timeout = jiffies + (1 * HZ);
- while (time_before(jiffies, timeout)) {
- if (secondary_holding_pen_release == INVALID_HWID)
- break;
- udelay(10);
- }
-
- /*
- * Now the secondary core is starting up let it run its
- * calibrations, then wait for it to finish
- */
- raw_spin_unlock(&boot_lock);
-
- return secondary_holding_pen_release != INVALID_HWID ? -ENOSYS : 0;
-}
-
-void smp_spin_table_cpu_postboot(void)
-{
- /*
- * Let the primary processor know we're out of the pen.
- */
- write_pen_release(INVALID_HWID);
-
- /*
- * Synchronise with the boot thread.
- */
- raw_spin_lock(&boot_lock);
- raw_spin_unlock(&boot_lock);
-}
-
-const struct cpu_operations smp_spin_table_ops = {
+const struct smp_operations smp_spin_table_ops = {
.name = "spin-table",
.cpu_init = smp_spin_table_cpu_init,
.cpu_prepare = smp_spin_table_cpu_prepare,
- .cpu_boot = smp_spin_table_cpu_boot,
- .cpu_postboot = smp_spin_table_cpu_postboot,
};