summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJon Medhurst <tixy@linaro.org>2012-07-19 15:50:23 +0100
committerJon Medhurst <tixy@linaro.org>2012-07-19 15:50:23 +0100
commit4c0b0ef7cc5dd201afcc4f9cbd75752701c05661 (patch)
treeb6a9c056a283194c9569e23ae419b6efcfba8c3d
parent24656aad90ff50d08a40078ae2e24082091afece (diff)
parent527ccf55a87d6515d49ad2505a8e51f5f1a0479d (diff)
downloadvexpress-a9-4c0b0ef7cc5dd201afcc4f9cbd75752701c05661.tar.gz
Merge branch 'tracking-armlt-tc2-pm' of git://git.linaro.org/landing-teams/working/arm/kernel into armlt-linaro-android-3.5-alt
Conflicts: drivers/misc/Kconfig drivers/misc/Makefile
-rw-r--r--arch/arm/Kconfig1
-rw-r--r--arch/arm/boot/dts/vexpress-v2p-ca15-tc2.dts12
-rw-r--r--arch/arm/common/gic.c8
-rw-r--r--arch/arm/include/asm/cacheflush.h38
-rw-r--r--arch/arm/kernel/arch_timer.c2
-rw-r--r--arch/arm/kernel/sleep.S32
-rw-r--r--arch/arm/kernel/smp.c56
-rw-r--r--arch/arm/kernel/suspend.c23
-rw-r--r--arch/arm/mach-vexpress/Kconfig15
-rw-r--r--arch/arm/mach-vexpress/Makefile1
-rw-r--r--arch/arm/mach-vexpress/cpuidle-tc2.c281
-rw-r--r--arch/arm/mach-vexpress/hotplug-asm.S31
-rw-r--r--arch/arm/mach-vexpress/tc2-sleep.S60
-rw-r--r--arch/arm/mm/cache-v7.S43
-rw-r--r--arch/arm/mm/proc-macros.S7
-rw-r--r--drivers/cpufreq/Kconfig.arm9
-rw-r--r--drivers/cpufreq/Makefile1
-rw-r--r--drivers/cpufreq/vexpress_bL_cpufreq.c276
-rw-r--r--drivers/cpuidle/Kconfig3
-rw-r--r--drivers/cpuidle/Makefile1
-rw-r--r--drivers/cpuidle/coupled.c715
-rw-r--r--drivers/cpuidle/cpuidle.c68
-rw-r--r--drivers/cpuidle/cpuidle.h32
-rw-r--r--drivers/misc/Kconfig4
-rw-r--r--drivers/misc/Makefile3
-rw-r--r--drivers/misc/arm-cci.c130
-rw-r--r--drivers/misc/vexpress/Kconfig2
-rw-r--r--drivers/misc/vexpress/Makefile1
-rw-r--r--drivers/misc/vexpress/arm-spc.c371
-rw-r--r--include/linux/arm-cci.h30
-rw-r--r--include/linux/cpuidle.h11
-rw-r--r--include/linux/vexpress.h50
32 files changed, 2284 insertions, 33 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index e9e5aeb098a..b26f8c08f39 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -303,6 +303,7 @@ config ARCH_VERSATILE
config ARCH_VEXPRESS
bool "ARM Ltd. Versatile Express family"
+ select ARCH_HAS_CPUFREQ
select ARCH_WANT_OPTIONAL_GPIOLIB
select ARM_AMBA
select ARM_TIMER_SP804
diff --git a/arch/arm/boot/dts/vexpress-v2p-ca15-tc2.dts b/arch/arm/boot/dts/vexpress-v2p-ca15-tc2.dts
index b947cb6b48b..68e8fc6948f 100644
--- a/arch/arm/boot/dts/vexpress-v2p-ca15-tc2.dts
+++ b/arch/arm/boot/dts/vexpress-v2p-ca15-tc2.dts
@@ -42,6 +42,7 @@
cluster0: cluster@0 {
reg = <0>;
+ freqs = <500000000 600000000 700000000 800000000 900000000 1000000000 1100000000 1200000000>;
cores {
#address-cells = <1>;
#size-cells = <0>;
@@ -59,6 +60,7 @@
cluster1: cluster@1 {
reg = <1>;
+ freqs = <350000000 400000000 500000000 600000000 700000000 800000000 900000000 1000000000>;
cores {
#address-cells = <1>;
#size-cells = <0>;
@@ -148,6 +150,16 @@
framebuffer = <0xff000000 0x01000000>;
};
+ spc@7fff0000 {
+ compatible = "arm,spc";
+ reg = <0x7FFF0000 0x1000>;
+ };
+
+ cci@2c090000 {
+ compatible = "arm,cci";
+ reg = <0x2c090000 0x8000>;
+ };
+
memory-controller@2b0a0000 {
compatible = "arm,pl341", "arm,primecell";
reg = <0x2b0a0000 0x1000>;
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
index d108cc4e542..7fa82240d08 100644
--- a/arch/arm/common/gic.c
+++ b/arch/arm/common/gic.c
@@ -590,6 +590,14 @@ static void gic_cpu_save(unsigned int gic_nr)
for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
+ /*
+ * Disable GIC CPU IF and IRQ bybass. When a CPU is shutdown we must
+ * insure that it does not exit wfi if an IRQ is pending on the IF.
+ * The GIC allows this operation by disabling the GIC CPU IF and the
+ * IRQ bypass mode. The raw IRQ line is still delivered to the power
+ * controller that use the IRQ to wake up the respective core.
+ */
+ writel_relaxed(0x1e0, cpu_base + GIC_CPU_CTRL);
}
static void gic_cpu_restore(unsigned int gic_nr)
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index c3099b1b698..1b1a43b2266 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -50,6 +50,10 @@
*
* Unconditionally clean and invalidate the entire cache.
*
+ * flush_kern_dcache_level(level)
+ *
+ * Flush data cache levels up to the level input parameter.
+ *
* flush_user_all()
*
* Clean and invalidate all user space cache entries
@@ -98,6 +102,7 @@
struct cpu_cache_fns {
void (*flush_icache_all)(void);
void (*flush_kern_all)(void);
+ void (*flush_kern_dcache_level)(int);
void (*flush_user_all)(void);
void (*flush_user_range)(unsigned long, unsigned long, unsigned int);
@@ -200,6 +205,39 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *,
#define __flush_icache_preferred __flush_icache_all_generic
#endif
+#if __LINUX_ARM_ARCH__ >= 7
+/*
+ * Hotplug and CPU idle code requires to flush only cache levels
+ * impacted by power down operations. In v7 the upper level is
+ * retrieved by reading LoUIS field of CLIDR, since inner shareability
+ * represents the cache boundaries affected by per-CPU shutdown
+ * operations in the most common platforms.
+ */
+#define __cache_level_v7_uis ({ \
+ u32 val; \
+ asm volatile("mrc p15, 1, %0, c0, c0, 1" : "=r"(val)); \
+ ((val & 0xe00000) >> 21); })
+
+#define flush_cache_level_preferred() __cache_level_v7_uis
+#else
+#define flush_cache_level_preferred() (-1)
+#endif
+
+static inline int flush_cache_level_cpu(void)
+{
+ return flush_cache_level_preferred();
+}
+/*
+ * Flush data cache up to a certain cache level
+ * level - upper cache level to clean
+ * if level == -1, default to flush_kern_all
+ */
+#ifdef MULTI_CACHE
+#define flush_dcache_level(level) cpu_cache.flush_kern_dcache_level(level)
+#else
+#define flush_dcache_level(level) __cpuc_flush_kern_all()
+#endif
+
static inline void __flush_icache_all(void)
{
__flush_icache_preferred();
diff --git a/arch/arm/kernel/arch_timer.c b/arch/arm/kernel/arch_timer.c
index 0925c702b0e..cd8c80983fb 100644
--- a/arch/arm/kernel/arch_timer.c
+++ b/arch/arm/kernel/arch_timer.c
@@ -137,7 +137,7 @@ static int __cpuinit arch_timer_setup(struct clock_event_device *clk)
/* Be safe... */
arch_timer_disable();
- clk->features = CLOCK_EVT_FEAT_ONESHOT;
+ clk->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP;
clk->name = "arch_sys_timer";
clk->rating = 450;
clk->set_mode = arch_timer_set_mode;
diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S
index 987dcf33415..c8952daf410 100644
--- a/arch/arm/kernel/sleep.S
+++ b/arch/arm/kernel/sleep.S
@@ -4,6 +4,7 @@
#include <asm/assembler.h>
#include <asm/glue-cache.h>
#include <asm/glue-proc.h>
+#include "entry-header.S"
.text
/*
@@ -30,9 +31,8 @@ ENTRY(__cpu_suspend)
mov r2, r5 @ virtual SP
ldr r3, =sleep_save_sp
#ifdef CONFIG_SMP
- ALT_SMP(mrc p15, 0, lr, c0, c0, 5)
- ALT_UP(mov lr, #0)
- and lr, lr, #15
+ get_thread_info r5
+ ldr lr, [r5, #TI_CPU] @ cpu logical index
add r3, r3, lr, lsl #2
#endif
bl __cpu_suspend_save
@@ -82,10 +82,13 @@ ENDPROC(cpu_resume_after_mmu)
.align
ENTRY(cpu_resume)
#ifdef CONFIG_SMP
+ mov r1, #0 @ fall-back logical index for UP
+ ALT_SMP(mrc p15, 0, r0, c0, c0, 5)
+ ALT_UP_B(1f)
+ bfc r0, #24, #8
+ bl cpu_logical_index @ return logical index in r1
+1:
adr r0, sleep_save_sp
- ALT_SMP(mrc p15, 0, r1, c0, c0, 5)
- ALT_UP(mov r1, #0)
- and r1, r1, #15
ldr r0, [r0, r1, lsl #2] @ stack phys addr
#else
ldr r0, sleep_save_sp @ stack phys addr
@@ -102,3 +105,20 @@ sleep_save_sp:
.rept CONFIG_NR_CPUS
.long 0 @ preserve stack phys ptr here
.endr
+
+#ifdef CONFIG_SMP
+cpu_logical_index:
+ adr r3, cpu_map_ptr
+ ldr r2, [r3]
+ add r3, r3, r2 @ virt_to_phys(__cpu_logical_map)
+ mov r1, #0
+1:
+ ldr r2, [r3, r1, lsl #2]
+ cmp r2, r0
+ moveq pc, lr
+ add r1, r1, #1
+ b 1b
+
+cpu_map_ptr:
+ .long __cpu_logical_map - .
+#endif
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 083ecceb406..04b2d76b0b7 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -25,6 +25,7 @@
#include <linux/percpu.h>
#include <linux/clockchips.h>
#include <linux/completion.h>
+#include <linux/cpufreq.h>
#include <linux/atomic.h>
#include <asm/cacheflush.h>
@@ -136,7 +137,7 @@ int __cpu_disable(void)
* Flush user cache and TLB mappings, and then remove this CPU
* from the vm mask set of all processes.
*/
- flush_cache_all();
+ flush_dcache_level(flush_cache_level_cpu());
local_flush_tlb_all();
clear_tasks_mm_cpumask(cpu);
@@ -641,3 +642,56 @@ int setup_profiling_timer(unsigned int multiplier)
{
return -EINVAL;
}
+
+#ifdef CONFIG_CPU_FREQ
+
+static DEFINE_PER_CPU(unsigned long, l_p_j_ref);
+static DEFINE_PER_CPU(unsigned long, l_p_j_ref_freq);
+static unsigned long global_l_p_j_ref;
+static unsigned long global_l_p_j_ref_freq;
+
+static int cpufreq_callback(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+ struct cpufreq_freqs *freq = data;
+ int cpu = freq->cpu;
+
+ if (freq->flags & CPUFREQ_CONST_LOOPS)
+ return NOTIFY_OK;
+
+ if (!per_cpu(l_p_j_ref, cpu)) {
+ per_cpu(l_p_j_ref, cpu) =
+ per_cpu(cpu_data, cpu).loops_per_jiffy;
+ per_cpu(l_p_j_ref_freq, cpu) = freq->old;
+ if (!global_l_p_j_ref) {
+ global_l_p_j_ref = loops_per_jiffy;
+ global_l_p_j_ref_freq = freq->old;
+ }
+ }
+
+ if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
+ (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
+ (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
+ loops_per_jiffy = cpufreq_scale(global_l_p_j_ref,
+ global_l_p_j_ref_freq,
+ freq->new);
+ per_cpu(cpu_data, cpu).loops_per_jiffy =
+ cpufreq_scale(per_cpu(l_p_j_ref, cpu),
+ per_cpu(l_p_j_ref_freq, cpu),
+ freq->new);
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block cpufreq_notifier = {
+ .notifier_call = cpufreq_callback,
+};
+
+static int __init register_cpufreq_notifier(void)
+{
+ return cpufreq_register_notifier(&cpufreq_notifier,
+ CPUFREQ_TRANSITION_NOTIFIER);
+}
+core_initcall(register_cpufreq_notifier);
+
+#endif
diff --git a/arch/arm/kernel/suspend.c b/arch/arm/kernel/suspend.c
index 1794cc3b0f1..8b3278bff99 100644
--- a/arch/arm/kernel/suspend.c
+++ b/arch/arm/kernel/suspend.c
@@ -17,16 +17,29 @@ extern void cpu_resume_mmu(void);
*/
void __cpu_suspend_save(u32 *ptr, u32 ptrsz, u32 sp, u32 *save_ptr)
{
+ u32 *ctx = ptr;
+
*save_ptr = virt_to_phys(ptr);
/* This must correspond to the LDM in cpu_resume() assembly */
- *ptr++ = virt_to_phys(idmap_pgd);
- *ptr++ = sp;
- *ptr++ = virt_to_phys(cpu_do_resume);
+ *ctx++ = virt_to_phys(idmap_pgd);
+ *ctx++ = sp;
+ *ctx++ = virt_to_phys(cpu_do_resume);
- cpu_do_suspend(ptr);
+ cpu_do_suspend(ctx);
- flush_cache_all();
+ flush_dcache_level(flush_cache_level_cpu());
+ /*
+ * flush_dcache_level does not guarantee that
+ * save_ptr and ptr are cleaned to main memory,
+ * just up to the required cache level.
+ * Since the context pointer and context itself
+ * are to be retrieved with the MMU off that
+ * data must be cleaned from all cache levels
+ * to main memory using "area" cache primitives.
+ */
+ __cpuc_flush_dcache_area(ptr, ptrsz);
+ __cpuc_flush_dcache_area(save_ptr, sizeof(*save_ptr));
outer_clean_range(*save_ptr, *save_ptr + ptrsz);
outer_clean_range(virt_to_phys(save_ptr),
virt_to_phys(save_ptr) + sizeof(*save_ptr));
diff --git a/arch/arm/mach-vexpress/Kconfig b/arch/arm/mach-vexpress/Kconfig
index cf8730d35e7..98f9c88442a 100644
--- a/arch/arm/mach-vexpress/Kconfig
+++ b/arch/arm/mach-vexpress/Kconfig
@@ -12,6 +12,21 @@ config ARCH_VEXPRESS_CORTEX_A5_A9_ERRATA
build a working kernel, you must also enable relevant core
tile support or Flattened Device Tree based support options.
+config ARCH_VEXPRESS_TC2_PM
+ bool "Power Management Support for TC2 test-chip (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+ depends on CPU_IDLE && PM
+ select ARM_CPU_SUSPEND
+ select ARCH_NEEDS_CPU_IDLE_COUPLED
+ select ARM_SPC
+ select ARM_CCI
+ help
+ Provides code that enables CPU idle power management on the
+ TC2 testchip. It enables the CPU idle driver so that the kernel
+ can enter cluster power down states provided by the power
+ controller. Code is built on top of coupled C-state idle code
+ since all CPUs need to be idle to enter cluster shutdown.
+
config ARCH_VEXPRESS_CA9X4
bool "Versatile Express Cortex-A9x4 tile"
select ARCH_VEXPRESS_CORTEX_A5_A9_ERRATA
diff --git a/arch/arm/mach-vexpress/Makefile b/arch/arm/mach-vexpress/Makefile
index 90551b9780a..5067ef487bd 100644
--- a/arch/arm/mach-vexpress/Makefile
+++ b/arch/arm/mach-vexpress/Makefile
@@ -6,3 +6,4 @@ obj-y := v2m.o
obj-$(CONFIG_ARCH_VEXPRESS_CA9X4) += ct-ca9x4.o
obj-$(CONFIG_SMP) += platsmp.o
obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
+obj-$(CONFIG_ARCH_VEXPRESS_TC2_PM) += cpuidle-tc2.o hotplug-asm.o tc2-sleep.o
diff --git a/arch/arm/mach-vexpress/cpuidle-tc2.c b/arch/arm/mach-vexpress/cpuidle-tc2.c
new file mode 100644
index 00000000000..493f3464a53
--- /dev/null
+++ b/arch/arm/mach-vexpress/cpuidle-tc2.c
@@ -0,0 +1,281 @@
+/*
+ * TC2 CPU idle driver.
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ * Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/arm-cci.h>
+#include <linux/bitmap.h>
+#include <linux/cpuidle.h>
+#include <linux/cpu_pm.h>
+#include <linux/clockchips.h>
+#include <linux/debugfs.h>
+#include <linux/hrtimer.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/tick.h>
+#include <linux/vexpress.h>
+#include <asm/cpuidle.h>
+#include <asm/cputype.h>
+#include <asm/idmap.h>
+#include <asm/proc-fns.h>
+#include <asm/suspend.h>
+
+#include <mach/motherboard.h>
+
+static int tc2_cpuidle_simple_enter(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index)
+{
+ ktime_t time_start, time_end;
+ s64 diff;
+
+ time_start = ktime_get();
+
+ cpu_do_idle();
+
+ time_end = ktime_get();
+
+ local_irq_enable();
+
+ diff = ktime_to_us(ktime_sub(time_end, time_start));
+ if (diff > INT_MAX)
+ diff = INT_MAX;
+
+ dev->last_residency = (int) diff;
+
+ return index;
+}
+
+static int tc2_enter_coupled(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int idx);
+
+static struct cpuidle_state tc2_cpuidle_set[] __initdata = {
+ [0] = {
+ .enter = tc2_cpuidle_simple_enter,
+ .exit_latency = 1,
+ .target_residency = 1,
+ .power_usage = UINT_MAX,
+ .flags = CPUIDLE_FLAG_TIME_VALID,
+ .name = "WFI",
+ .desc = "ARM WFI",
+ },
+ [1] = {
+ .enter = tc2_enter_coupled,
+ .exit_latency = 300,
+ .target_residency = 1000,
+ .flags = CPUIDLE_FLAG_TIME_VALID |
+ CPUIDLE_FLAG_COUPLED,
+ .name = "C1",
+ .desc = "ARM power down",
+ },
+};
+
+struct cpuidle_driver tc2_idle_driver = {
+ .name = "tc2_idle",
+ .owner = THIS_MODULE,
+ .safe_state_index = 0
+};
+
+static DEFINE_PER_CPU(struct cpuidle_device, tc2_idle_dev);
+
+#define NR_CLUSTERS 2
+static cpumask_t cluster_mask = CPU_MASK_NONE;
+
+extern void disable_clean_inv_dcache(int);
+static atomic_t abort_barrier[NR_CLUSTERS];
+
+extern void tc2_cpu_resume(void);
+extern void disable_snoops(void);
+
+int tc2_coupled_finisher(unsigned long arg)
+{
+ unsigned int mpidr = read_cpuid_mpidr();
+ unsigned int cpu = smp_processor_id();
+ unsigned int cluster = (mpidr >> 8) & 0xf;
+ unsigned int weight = cpumask_weight(topology_core_cpumask(cpu));
+ u8 wfi_weight = 0;
+
+ cpuidle_coupled_parallel_barrier((struct cpuidle_device *)arg,
+ &abort_barrier[cluster]);
+ if (mpidr & 0xf) {
+ disable_clean_inv_dcache(0);
+ wfi();
+ /* not reached */
+ }
+
+ while (wfi_weight != (weight - 1)) {
+ wfi_weight = vexpress_spc_wfi_cpustat(cluster);
+ wfi_weight = hweight8(wfi_weight);
+ }
+
+ vexpress_spc_powerdown_enable(cluster, 1);
+ disable_clean_inv_dcache(1);
+ disable_cci(cluster);
+ disable_snoops();
+ return 1;
+}
+
+/*
+ * tc2_enter_coupled - Programs CPU to enter the specified state
+ * @dev: cpuidle device
+ * @drv: The target state to be programmed
+ * @idx: state index
+ *
+ * Called from the CPUidle framework to program the device to the
+ * specified target state selected by the governor.
+ */
+static int tc2_enter_coupled(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int idx)
+{
+ struct timespec ts_preidle, ts_postidle, ts_idle;
+ int ret;
+ int cluster = (read_cpuid_mpidr() >> 8) & 0xf;
+ /* Used to keep track of the total time in idle */
+ getnstimeofday(&ts_preidle);
+
+ if (!cpu_isset(cluster, cluster_mask))
+ goto shallow_out;
+
+ BUG_ON(!irqs_disabled());
+
+ cpu_pm_enter();
+
+ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu);
+
+ ret = cpu_suspend((unsigned long) dev, tc2_coupled_finisher);
+
+ if (ret)
+ BUG();
+
+ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu);
+
+ cpu_pm_exit();
+
+shallow_out:
+ getnstimeofday(&ts_postidle);
+ ts_idle = timespec_sub(ts_postidle, ts_preidle);
+
+ dev->last_residency = ts_idle.tv_nsec / NSEC_PER_USEC +
+ ts_idle.tv_sec * USEC_PER_SEC;
+ return idx;
+}
+
+static int idle_mask_show(struct seq_file *f, void *p)
+{
+ char buf[256];
+ bitmap_scnlistprintf(buf, 256, cpumask_bits(&cluster_mask),
+ NR_CLUSTERS);
+
+ seq_printf(f, "%s\n", buf);
+
+ return 0;
+}
+
+static int idle_mask_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, idle_mask_show, inode->i_private);
+}
+
+static const struct file_operations cpuidle_fops = {
+ .open = idle_mask_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int idle_debug_set(void *data, u64 val)
+{
+ int i;
+
+ if ((val > NR_CLUSTERS || val < 0) && val != 0xff) {
+ pr_warning("Wrong parameter passed\n");
+ return -EINVAL;
+ }
+ cpuidle_pause_and_lock();
+ if (val == 0xff) {
+ cpumask_clear(&cluster_mask);
+ return 0;
+ }
+
+ for (i = 0; i < NR_CLUSTERS; i++)
+ if (val == i)
+ cpumask_set_cpu(i, &cluster_mask);
+ cpuidle_resume_and_unlock();
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(idle_debug_fops, NULL, idle_debug_set, "%llu\n");
+
+/*
+ * tc2_idle_init
+ *
+ * Registers the TC2 specific cpuidle driver with the cpuidle
+ * framework with the valid set of states.
+ */
+int __init tc2_idle_init(void)
+{
+ struct cpuidle_device *dev;
+ int i, cpu_id;
+ struct dentry *idle_debug, *file_debug;
+ struct cpuidle_driver *drv = &tc2_idle_driver;
+
+ drv->state_count = (sizeof(tc2_cpuidle_set) /
+ sizeof(struct cpuidle_state));
+
+ for (i = 0; i < drv->state_count; i++) {
+ memcpy(&drv->states[i], &tc2_cpuidle_set[i],
+ sizeof(struct cpuidle_state));
+ }
+
+ cpuidle_register_driver(drv);
+
+ for_each_cpu(cpu_id, cpu_online_mask) {
+ pr_err("CPUidle for CPU%d registered\n", cpu_id);
+ dev = &per_cpu(tc2_idle_dev, cpu_id);
+ dev->cpu = cpu_id;
+ dev->safe_state_index = 0;
+
+ cpumask_copy(&dev->coupled_cpus,
+ topology_core_cpumask(cpu_id));
+ dev->state_count = drv->state_count;
+
+ if (cpuidle_register_device(dev)) {
+ printk(KERN_ERR "%s: Cpuidle register device failed\n",
+ __func__);
+ return -EIO;
+ }
+ }
+
+ idle_debug = debugfs_create_dir("idle_debug", NULL);
+
+ if (IS_ERR_OR_NULL(idle_debug)) {
+ printk(KERN_INFO "Error in creating idle debugfs directory\n");
+ return 0;
+ }
+
+ file_debug = debugfs_create_file("enable_idle", S_IRUGO | S_IWGRP,
+ idle_debug, NULL, &idle_debug_fops);
+
+ if (IS_ERR_OR_NULL(file_debug)) {
+ printk(KERN_INFO "Error in creating enable_idle file\n");
+ return 0;
+ }
+
+ file_debug = debugfs_create_file("enable_mask", S_IRUGO | S_IWGRP,
+ idle_debug, NULL, &cpuidle_fops);
+
+ if (IS_ERR_OR_NULL(file_debug))
+ printk(KERN_INFO "Error in creating enable_mask file\n");
+
+ /* enable all wake-up IRQs by default */
+ vexpress_spc_set_wake_intr(0x7ff);
+ v2m_flags_set(virt_to_phys(tc2_cpu_resume));
+
+ return 0;
+}
+
+late_initcall(tc2_idle_init);
diff --git a/arch/arm/mach-vexpress/hotplug-asm.S b/arch/arm/mach-vexpress/hotplug-asm.S
new file mode 100644
index 00000000000..c51d2310e39
--- /dev/null
+++ b/arch/arm/mach-vexpress/hotplug-asm.S
@@ -0,0 +1,31 @@
+#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
+
+ .text
+ENTRY(disable_clean_inv_dcache)
+ ARM( stmfd sp!, {r4-r5, r7, r9-r11, lr} )
+ THUMB( stmfd sp!, {r4-r7, r9-r11, lr} )
+
+ mrc p15, 0, r3, c1, c0, 0
+ bic r3, #4 @ clear C bit
+ mcr p15, 0, r3, c1, c0, 0
+ dsb
+ isb
+ cmp r0, #0
+ mrc p15, 1, r0, c0, c0, 1
+ andeq r0, r0, #0xe00000
+ moveq r0, r0, lsr #21
+ andne r0, r0, #0x7000000
+ movne r0, r0, lsr #24
+ movs r0, r0
+ blne v7_flush_dcache_level
+ clrex
+ mrc p15, 0, r3, c1, c0, 1
+ bic r3, #0x40 @ clear SMP bit
+ mcr p15, 0, r3, c1, c0, 1
+ isb
+ dsb
+ ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} )
+ THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} )
+ mov pc, lr
+ENDPROC(disable_clean_inv_dcache)
diff --git a/arch/arm/mach-vexpress/tc2-sleep.S b/arch/arm/mach-vexpress/tc2-sleep.S
new file mode 100644
index 00000000000..cc6b84144fa
--- /dev/null
+++ b/arch/arm/mach-vexpress/tc2-sleep.S
@@ -0,0 +1,60 @@
+#include <linux/linkage.h>
+
+ENTRY(tc2_cpu_resume)
+ mrc p15, 0, r0, c0, c0, 5
+ ands r0, r0, #0xff00
+ adr r0, value
+ addne r0, r0, #16
+ ldmia r0, {r1, r2, r3, r4} @ CCI address, SCC snoop control & val
+ mvn r3, r3 @ undo actions done at shutdown
+ ldr r0, [r2]
+ and r5, r0, r3
+ str r5, [r2]
+ mov r0, #3 @ enable CCI for the cluster
+ str r0, [r1]
+ adr r1, cci_ctrl
+ ldr r1, [r1]
+loop:
+ ldr r0, [r1]
+ ands r0, r0, #1
+ bne loop
+ mov r0, #0 @ disable power down enable
+ str r0, [r4]
+ b cpu_resume
+ENDPROC(tc2_cpu_resume)
+
+ENTRY(disable_snoops)
+ mrc p15, 0, r0, c0, c0, 5
+ ands r0, r0, #0xff00
+ adr r0, vvalue
+ addne r0, r0, #8
+ ldmia r0, {r2, r3} @ CCI address, SCC snoop control & val
+ ldr r1, scc_ptr
+ ldr r1, [r1]
+ add r2, r1, r2
+ ldr r0, [r2]
+ orr r0, r0, r3
+ dsb
+ isb
+ str r0, [r2]
+ wfi
+ENDPROC(disable_snoops)
+
+cci_ctrl:
+ .long 0x2c09000c
+value:
+ .long 0x2c094000
+ .long 0x7fff0404
+ .long 0x180
+ .long 0x7fff0b30
+ .long 0x2c095000
+ .long 0x7fff0504
+ .long 0x2000
+ .long 0x7fff0b34
+vvalue:
+ .long 0x404
+ .long 0x180
+ .long 0x504
+ .long 0x2000
+scc_ptr:
+ .long vscc
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index 39e3fb3db80..5e648ea2aab 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -34,6 +34,23 @@ ENTRY(v7_flush_icache_all)
ENDPROC(v7_flush_icache_all)
/*
+ * v7_flush_dcache_level(level)
+ *
+ * Flush the D-cache up to the level passed as first input parameter.
+ *
+ * r0 - cache level
+ *
+ * Corrupted registers: r0-r7, r9-r11 (r6 only in Thumb mode)
+ */
+
+ENTRY(v7_flush_dcache_level)
+ dmb
+ mov r3, r0, lsl #1 @ level * 2
+ mrc p15, 1, r0, c0, c0, 1 @ read clidr
+ b __flush_level
+ENDPROC(v7_flush_dcache_level)
+
+/*
* v7_flush_dcache_all()
*
* Flush the whole D-cache.
@@ -48,6 +65,7 @@ ENTRY(v7_flush_dcache_all)
ands r3, r0, #0x7000000 @ extract loc from clidr
mov r3, r3, lsr #23 @ left align loc bit field
beq finished @ if loc is 0, then no need to clean
+__flush_level:
mov r10, #0 @ start clean at cache level 0
loop1:
add r2, r10, r10, lsr #1 @ work out 3x current cache level
@@ -121,6 +139,29 @@ ENTRY(v7_flush_kern_cache_all)
ENDPROC(v7_flush_kern_cache_all)
/*
+ * v7_flush_kern_dcache_level(int level)
+ * level - upper level that should be cleaned/invalidated
+ * [valid values (-1,7)]
+ * level == -1 forces a flush_kern_cache_all
+ * level == 0 is a nop
+ * 1 < level <=7 flush dcache up to level
+ * Flush the data cache up to a level passed as a platform
+ * specific parameter
+ */
+ENTRY(v7_flush_kern_dcache_level)
+ cmp r0, #-1 @ -1 defaults to flush all
+ beq v7_flush_kern_cache_all
+ ARM( stmfd sp!, {r4-r5, r7, r9-r11, lr} )
+ THUMB( stmfd sp!, {r4-r7, r9-r11, lr} )
+ sub r2, r0, #1
+ cmp r2, #6
+ blls v7_flush_dcache_level @ jump if 0 < level <=7
+ ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} )
+ THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} )
+ mov pc, lr
+ENDPROC(v7_flush_kern_dcache_level)
+
+/*
* v7_flush_cache_all()
*
* Flush all TLB entries in a particular address space
@@ -350,4 +391,4 @@ ENDPROC(v7_dma_unmap_area)
__INITDATA
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
- define_cache_functions v7
+ define_cache_functions v7, cachelevel=1
diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
index 2d8ff3ad86d..f193bc374e2 100644
--- a/arch/arm/mm/proc-macros.S
+++ b/arch/arm/mm/proc-macros.S
@@ -293,12 +293,17 @@ ENTRY(\name\()_processor_functions)
.size \name\()_processor_functions, . - \name\()_processor_functions
.endm
-.macro define_cache_functions name:req
+.macro define_cache_functions name:req, cachelevel=0
.align 2
.type \name\()_cache_fns, #object
ENTRY(\name\()_cache_fns)
.long \name\()_flush_icache_all
.long \name\()_flush_kern_cache_all
+ .if \cachelevel
+ .long \name\()_flush_kern_dcache_level
+ .else
+ .long \name\()_flush_kern_cache_all
+ .endif
.long \name\()_flush_user_cache_all
.long \name\()_flush_user_cache_range
.long \name\()_coherent_kern_range
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 5961e6415f0..d13195361a9 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -76,3 +76,12 @@ config ARM_EXYNOS5250_CPUFREQ
help
This adds the CPUFreq driver for Samsung EXYNOS5250
SoC.
+
+config ARM_VEXPRESS_BL_CPUFREQ
+ tristate "CPUfreq driver for ARM Vexpress big.LITTLE CPUs"
+ depends on ARCH_VEXPRESS && CPU_FREQ
+ help
+ This enables the CPUfreq driver for ARM Vexpress big.LITTLE
+ platform.
+
+ If in doubt, say N.
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 35835b7cc2d..478c08b795f 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -49,6 +49,7 @@ obj-$(CONFIG_ARM_EXYNOS4210_CPUFREQ) += exynos4210-cpufreq.o
obj-$(CONFIG_ARM_EXYNOS4X12_CPUFREQ) += exynos4x12-cpufreq.o
obj-$(CONFIG_ARM_EXYNOS5250_CPUFREQ) += exynos5250-cpufreq.o
obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o
+obj-$(CONFIG_ARM_VEXPRESS_BL_CPUFREQ) += vexpress_bL_cpufreq.o
##################################################################################
# PowerPC platform drivers
diff --git a/drivers/cpufreq/vexpress_bL_cpufreq.c b/drivers/cpufreq/vexpress_bL_cpufreq.c
new file mode 100644
index 00000000000..88ac95ef85c
--- /dev/null
+++ b/drivers/cpufreq/vexpress_bL_cpufreq.c
@@ -0,0 +1,276 @@
+/*
+ * Vexpress big.LITTLE CPUFreq support
+ * Based on mach-integrator
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ * Author: Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/cpufreq.h>
+#include <linux/cpumask.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+
+#include <linux/vexpress.h>
+
+#define VEXPRESS_MAX_CLUSTER 2
+
+static struct cpufreq_frequency_table *freq_table[VEXPRESS_MAX_CLUSTER];
+static atomic_t freq_table_users = ATOMIC_INIT(0);
+
+/* Cached current cluster for each CPU to save on IPIs */
+static DEFINE_PER_CPU(unsigned int, cpu_cur_cluster);
+
+/*
+ * Functions to get the current status.
+ *
+ * Beware that the cluster for another CPU may change unexpectedly.
+ */
+
+static unsigned int get_local_cluster(void)
+{
+ unsigned int mpidr;
+ asm ("mrc\tp15, 0, %0, c0, c0, 5" : "=r" (mpidr));
+ return (mpidr >> 8) & 0xf;
+}
+
+static void __get_current_cluster(void *_data)
+{
+ unsigned int *_cluster = _data;
+ *_cluster = get_local_cluster();
+}
+
+static int get_current_cluster(unsigned int cpu)
+{
+ unsigned int cluster = 0;
+ smp_call_function_single(cpu, __get_current_cluster, &cluster, 1);
+ return cluster;
+}
+
+static int get_current_cached_cluster(unsigned int cpu)
+{
+ return per_cpu(cpu_cur_cluster, cpu);
+}
+
+/* Validate policy frequency range */
+static int vexpress_cpufreq_verify_policy(struct cpufreq_policy *policy)
+{
+ uint32_t cur_cluster = get_current_cached_cluster(policy->cpu);
+
+ /* This call takes care of it all using freq_table */
+ return cpufreq_frequency_table_verify(policy, freq_table[cur_cluster]);
+}
+
+/* Set clock frequency */
+static int vexpress_cpufreq_set_target(struct cpufreq_policy *policy,
+ unsigned int target_freq, unsigned int relation)
+{
+ uint32_t cpu = policy->cpu;
+ struct cpufreq_freqs freqs;
+ uint32_t freq_tab_idx;
+ uint32_t cur_cluster;
+ int ret = 0;
+
+ /* Read current clock rate */
+ cur_cluster = get_current_cached_cluster(cpu);
+
+ if (vexpress_spc_get_performance(cur_cluster, &freq_tab_idx))
+ return -EIO;
+
+ freqs.old = freq_table[cur_cluster][freq_tab_idx].frequency;
+
+ /* Make sure that target_freq is within supported range */
+ if (target_freq > policy->max)
+ target_freq = policy->max;
+ if (target_freq < policy->min)
+ target_freq = policy->min;
+
+ /* Determine valid target frequency using freq_table */
+ cpufreq_frequency_table_target(policy, freq_table[cur_cluster],
+ target_freq, relation, &freq_tab_idx);
+ freqs.new = freq_table[cur_cluster][freq_tab_idx].frequency;
+
+ freqs.cpu = policy->cpu;
+
+ if (freqs.old == freqs.new)
+ return 0;
+
+ pr_debug("Requested Freq %d cpu %d\n", freqs.new, cpu);
+
+ for_each_cpu(freqs.cpu, policy->cpus)
+ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+
+ ret = vexpress_spc_set_performance(cur_cluster, freq_tab_idx);
+ if (ret) {
+ pr_err("Error %d while setting required OPP\n", ret);
+ return ret;
+ }
+
+ policy->cur = freqs.new;
+
+ for_each_cpu(freqs.cpu, policy->cpus)
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+
+ return ret;
+}
+
+/* Get current clock frequency */
+static unsigned int vexpress_cpufreq_get(unsigned int cpu)
+{
+ uint32_t freq_tab_idx = 0;
+ uint32_t cur_cluster = get_current_cached_cluster(cpu);
+
+ /*
+ * Read current clock rate with vexpress_spc call
+ */
+ if (vexpress_spc_get_performance(cur_cluster, &freq_tab_idx))
+ return -EIO;
+
+ return freq_table[cur_cluster][freq_tab_idx].frequency;
+}
+
+/* translate the integer array into cpufreq_frequency_table entries */
+static inline void _cpufreq_copy_table_from_array(uint32_t *table,
+ struct cpufreq_frequency_table *freq_table, int size)
+{
+ int i;
+ for (i = 0; i < size; i++) {
+ freq_table[i].index = i;
+ freq_table[i].frequency = table[i] / 1000; /* in kHZ */
+ }
+ freq_table[i].index = size;
+ freq_table[i].frequency = CPUFREQ_TABLE_END;
+}
+
+static int vexpress_cpufreq_of_init(void)
+{
+ uint32_t cpu_opp_num;
+ struct cpufreq_frequency_table *freqtable[VEXPRESS_MAX_CLUSTER];
+ uint32_t *cpu_freqs;
+ int ret = 0, cluster_id = 0, len;
+ struct device_node *cluster = NULL;
+ const struct property *pp;
+ const u32 *hwid;
+
+ while ((cluster = of_find_node_by_name(cluster, "cluster"))) {
+ hwid = of_get_property(cluster, "reg", &len);
+ if (hwid && len == 4)
+ cluster_id = be32_to_cpup(hwid);
+
+ pp = of_find_property(cluster, "freqs", NULL);
+ if (!pp)
+ return -EINVAL;
+ cpu_opp_num = pp->length / sizeof(u32);
+ if (!cpu_opp_num)
+ return -ENODATA;
+
+ cpu_freqs = kzalloc(sizeof(uint32_t) * cpu_opp_num, GFP_KERNEL);
+ freqtable[cluster_id] =
+ kzalloc(sizeof(struct cpufreq_frequency_table) *
+ (cpu_opp_num + 1), GFP_KERNEL);
+ if (!cpu_freqs || !freqtable[cluster_id]) {
+ ret = -ENOMEM;
+ goto free_mem;
+ }
+ of_property_read_u32_array(cluster, "freqs",
+ cpu_freqs, cpu_opp_num);
+ _cpufreq_copy_table_from_array(cpu_freqs,
+ freqtable[cluster_id], cpu_opp_num);
+ freq_table[cluster_id] = freqtable[cluster_id];
+
+ kfree(cpu_freqs);
+ }
+ return ret;
+free_mem:
+ while (cluster_id >= 0)
+ kfree(freqtable[cluster_id--]);
+ kfree(cpu_freqs);
+ return ret;
+}
+
+/* Per-CPU initialization */
+static int vexpress_cpufreq_init(struct cpufreq_policy *policy)
+{
+ int result = 0;
+ uint32_t cur_cluster = get_current_cluster(policy->cpu);
+
+ if (atomic_inc_return(&freq_table_users) == 1)
+ result = vexpress_cpufreq_of_init();
+
+ if (result) {
+ atomic_dec_return(&freq_table_users);
+ pr_err("CPUFreq - CPU %d failed to initialize\n", policy->cpu);
+ return result;
+ }
+
+ result =
+ cpufreq_frequency_table_cpuinfo(policy, freq_table[cur_cluster]);
+ if (result)
+ return result;
+
+ cpufreq_frequency_table_get_attr(freq_table[cur_cluster], policy->cpu);
+
+ per_cpu(cpu_cur_cluster, policy->cpu) = cur_cluster;
+
+ /* set default policy and cpuinfo */
+ policy->min = policy->cpuinfo.min_freq;
+ policy->max = policy->cpuinfo.max_freq;
+
+ policy->cpuinfo.transition_latency = 1000000; /* 1 ms assumed */
+ policy->cur = vexpress_cpufreq_get(policy->cpu);
+
+ cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu));
+ cpumask_copy(policy->related_cpus, policy->cpus);
+
+ pr_info("CPUFreq for CPU %d initialized\n", policy->cpu);
+ return result;
+}
+
+/* Export freq_table to sysfs */
+static struct freq_attr *vexpress_cpufreq_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ NULL,
+};
+
+static struct cpufreq_driver vexpress_cpufreq_driver = {
+ .flags = CPUFREQ_STICKY,
+ .verify = vexpress_cpufreq_verify_policy,
+ .target = vexpress_cpufreq_set_target,
+ .get = vexpress_cpufreq_get,
+ .init = vexpress_cpufreq_init,
+ .name = "cpufreq_vexpress",
+ .attr = vexpress_cpufreq_attr,
+};
+
+static int __init vexpress_cpufreq_modinit(void)
+{
+ return cpufreq_register_driver(&vexpress_cpufreq_driver);
+}
+
+static void __exit vexpress_cpufreq_modexit(void)
+{
+ cpufreq_unregister_driver(&vexpress_cpufreq_driver);
+}
+
+MODULE_DESCRIPTION("cpufreq driver for ARM vexpress big.LITTLE platform");
+MODULE_LICENSE("GPL");
+
+module_init(vexpress_cpufreq_modinit);
+module_exit(vexpress_cpufreq_modexit);
diff --git a/drivers/cpuidle/Kconfig b/drivers/cpuidle/Kconfig
index 78a666d1e5f..a76b689e553 100644
--- a/drivers/cpuidle/Kconfig
+++ b/drivers/cpuidle/Kconfig
@@ -18,3 +18,6 @@ config CPU_IDLE_GOV_MENU
bool
depends on CPU_IDLE && NO_HZ
default y
+
+config ARCH_NEEDS_CPU_IDLE_COUPLED
+ def_bool n
diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile
index 5634f88379d..38c8f69f30c 100644
--- a/drivers/cpuidle/Makefile
+++ b/drivers/cpuidle/Makefile
@@ -3,3 +3,4 @@
#
obj-y += cpuidle.o driver.o governor.o sysfs.o governors/
+obj-$(CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED) += coupled.o
diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c
new file mode 100644
index 00000000000..2c9bf269223
--- /dev/null
+++ b/drivers/cpuidle/coupled.c
@@ -0,0 +1,715 @@
+/*
+ * coupled.c - helper functions to enter the same idle state on multiple cpus
+ *
+ * Copyright (c) 2011 Google, Inc.
+ *
+ * Author: Colin Cross <ccross@android.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/cpu.h>
+#include <linux/cpuidle.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "cpuidle.h"
+
+/**
+ * DOC: Coupled cpuidle states
+ *
+ * On some ARM SMP SoCs (OMAP4460, Tegra 2, and probably more), the
+ * cpus cannot be independently powered down, either due to
+ * sequencing restrictions (on Tegra 2, cpu 0 must be the last to
+ * power down), or due to HW bugs (on OMAP4460, a cpu powering up
+ * will corrupt the gic state unless the other cpu runs a work
+ * around). Each cpu has a power state that it can enter without
+ * coordinating with the other cpu (usually Wait For Interrupt, or
+ * WFI), and one or more "coupled" power states that affect blocks
+ * shared between the cpus (L2 cache, interrupt controller, and
+ * sometimes the whole SoC). Entering a coupled power state must
+ * be tightly controlled on both cpus.
+ *
+ * This file implements a solution, where each cpu will wait in the
+ * WFI state until all cpus are ready to enter a coupled state, at
+ * which point the coupled state function will be called on all
+ * cpus at approximately the same time.
+ *
+ * Once all cpus are ready to enter idle, they are woken by an smp
+ * cross call. At this point, there is a chance that one of the
+ * cpus will find work to do, and choose not to enter idle. A
+ * final pass is needed to guarantee that all cpus will call the
+ * power state enter function at the same time. During this pass,
+ * each cpu will increment the ready counter, and continue once the
+ * ready counter matches the number of online coupled cpus. If any
+ * cpu exits idle, the other cpus will decrement their counter and
+ * retry.
+ *
+ * requested_state stores the deepest coupled idle state each cpu
+ * is ready for. It is assumed that the states are indexed from
+ * shallowest (highest power, lowest exit latency) to deepest
+ * (lowest power, highest exit latency). The requested_state
+ * variable is not locked. It is only written from the cpu that
+ * it stores (or by the on/offlining cpu if that cpu is offline),
+ * and only read after all the cpus are ready for the coupled idle
+ * state are are no longer updating it.
+ *
+ * Three atomic counters are used. alive_count tracks the number
+ * of cpus in the coupled set that are currently or soon will be
+ * online. waiting_count tracks the number of cpus that are in
+ * the waiting loop, in the ready loop, or in the coupled idle state.
+ * ready_count tracks the number of cpus that are in the ready loop
+ * or in the coupled idle state.
+ *
+ * To use coupled cpuidle states, a cpuidle driver must:
+ *
+ * Set struct cpuidle_device.coupled_cpus to the mask of all
+ * coupled cpus, usually the same as cpu_possible_mask if all cpus
+ * are part of the same cluster. The coupled_cpus mask must be
+ * set in the struct cpuidle_device for each cpu.
+ *
+ * Set struct cpuidle_device.safe_state to a state that is not a
+ * coupled state. This is usually WFI.
+ *
+ * Set CPUIDLE_FLAG_COUPLED in struct cpuidle_state.flags for each
+ * state that affects multiple cpus.
+ *
+ * Provide a struct cpuidle_state.enter function for each state
+ * that affects multiple cpus. This function is guaranteed to be
+ * called on all cpus at approximately the same time. The driver
+ * should ensure that the cpus all abort together if any cpu tries
+ * to abort once the function is called. The function should return
+ * with interrupts still disabled.
+ */
+
+/**
+ * struct cpuidle_coupled - data for set of cpus that share a coupled idle state
+ * @coupled_cpus: mask of cpus that are part of the coupled set
+ * @requested_state: array of requested states for cpus in the coupled set
+ * @ready_waiting_counts: combined count of cpus in ready or waiting loops
+ * @online_count: count of cpus that are online
+ * @refcnt: reference count of cpuidle devices that are using this struct
+ * @prevent: flag to prevent coupled idle while a cpu is hotplugging
+ */
+struct cpuidle_coupled {
+ cpumask_t coupled_cpus;
+ int requested_state[NR_CPUS];
+ atomic_t ready_waiting_counts;
+ int online_count;
+ int refcnt;
+ int prevent;
+};
+
+#define WAITING_BITS 16
+#define MAX_WAITING_CPUS (1 << WAITING_BITS)
+#define WAITING_MASK (MAX_WAITING_CPUS - 1)
+#define READY_MASK (~WAITING_MASK)
+
+#define CPUIDLE_COUPLED_NOT_IDLE (-1)
+
+static DEFINE_MUTEX(cpuidle_coupled_lock);
+static DEFINE_PER_CPU(struct call_single_data, cpuidle_coupled_poke_cb);
+
+/*
+ * The cpuidle_coupled_poked_mask mask is used to avoid calling
+ * __smp_call_function_single with the per cpu call_single_data struct already
+ * in use. This prevents a deadlock where two cpus are waiting for each others
+ * call_single_data struct to be available
+ */
+static cpumask_t cpuidle_coupled_poked_mask;
+
+/**
+ * cpuidle_coupled_parallel_barrier - synchronize all online coupled cpus
+ * @dev: cpuidle_device of the calling cpu
+ * @a: atomic variable to hold the barrier
+ *
+ * No caller to this function will return from this function until all online
+ * cpus in the same coupled group have called this function. Once any caller
+ * has returned from this function, the barrier is immediately available for
+ * reuse.
+ *
+ * The atomic variable a must be initialized to 0 before any cpu calls
+ * this function, will be reset to 0 before any cpu returns from this function.
+ *
+ * Must only be called from within a coupled idle state handler
+ * (state.enter when state.flags has CPUIDLE_FLAG_COUPLED set).
+ *
+ * Provides full smp barrier semantics before and after calling.
+ */
+void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a)
+{
+ int n = dev->coupled->online_count;
+
+ smp_mb__before_atomic_inc();
+ atomic_inc(a);
+
+ while (atomic_read(a) < n)
+ cpu_relax();
+
+ if (atomic_inc_return(a) == n * 2) {
+ atomic_set(a, 0);
+ return;
+ }
+
+ while (atomic_read(a) > n)
+ cpu_relax();
+}
+
+/**
+ * cpuidle_state_is_coupled - check if a state is part of a coupled set
+ * @dev: struct cpuidle_device for the current cpu
+ * @drv: struct cpuidle_driver for the platform
+ * @state: index of the target state in drv->states
+ *
+ * Returns true if the target state is coupled with cpus besides this one
+ */
+bool cpuidle_state_is_coupled(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int state)
+{
+ return drv->states[state].flags & CPUIDLE_FLAG_COUPLED;
+}
+
+/**
+ * cpuidle_coupled_set_ready - mark a cpu as ready
+ * @coupled: the struct coupled that contains the current cpu
+ */
+static inline void cpuidle_coupled_set_ready(struct cpuidle_coupled *coupled)
+{
+ atomic_add(MAX_WAITING_CPUS, &coupled->ready_waiting_counts);
+}
+
+/**
+ * cpuidle_coupled_set_not_ready - mark a cpu as not ready
+ * @coupled: the struct coupled that contains the current cpu
+ *
+ * Decrements the ready counter, unless the ready (and thus the waiting) counter
+ * is equal to the number of online cpus. Prevents a race where one cpu
+ * decrements the waiting counter and then re-increments it just before another
+ * cpu has decremented its ready counter, leading to the ready counter going
+ * down from the number of online cpus without going through the coupled idle
+ * state.
+ *
+ * Returns 0 if the counter was decremented successfully, -EINVAL if the ready
+ * counter was equal to the number of online cpus.
+ */
+static
+inline int cpuidle_coupled_set_not_ready(struct cpuidle_coupled *coupled)
+{
+ int all;
+ int ret;
+
+ all = coupled->online_count || (coupled->online_count << WAITING_BITS);
+ ret = atomic_add_unless(&coupled->ready_waiting_counts,
+ -MAX_WAITING_CPUS, all);
+
+ return ret ? 0 : -EINVAL;
+}
+
+/**
+ * cpuidle_coupled_no_cpus_ready - check if no cpus in a coupled set are ready
+ * @coupled: the struct coupled that contains the current cpu
+ *
+ * Returns true if all of the cpus in a coupled set are out of the ready loop.
+ */
+static inline int cpuidle_coupled_no_cpus_ready(struct cpuidle_coupled *coupled)
+{
+ int r = atomic_read(&coupled->ready_waiting_counts) >> WAITING_BITS;
+ return r == 0;
+}
+
+/**
+ * cpuidle_coupled_cpus_ready - check if all cpus in a coupled set are ready
+ * @coupled: the struct coupled that contains the current cpu
+ *
+ * Returns true if all cpus coupled to this target state are in the ready loop
+ */
+static inline bool cpuidle_coupled_cpus_ready(struct cpuidle_coupled *coupled)
+{
+ int r = atomic_read(&coupled->ready_waiting_counts) >> WAITING_BITS;
+ return r == coupled->online_count;
+}
+
+/**
+ * cpuidle_coupled_cpus_waiting - check if all cpus in a coupled set are waiting
+ * @coupled: the struct coupled that contains the current cpu
+ *
+ * Returns true if all cpus coupled to this target state are in the wait loop
+ */
+static inline bool cpuidle_coupled_cpus_waiting(struct cpuidle_coupled *coupled)
+{
+ int w = atomic_read(&coupled->ready_waiting_counts) & WAITING_MASK;
+ return w == coupled->online_count;
+}
+
+/**
+ * cpuidle_coupled_no_cpus_waiting - check if no cpus in coupled set are waiting
+ * @coupled: the struct coupled that contains the current cpu
+ *
+ * Returns true if all of the cpus in a coupled set are out of the waiting loop.
+ */
+static inline int cpuidle_coupled_no_cpus_waiting(struct cpuidle_coupled *coupled)
+{
+ int w = atomic_read(&coupled->ready_waiting_counts) & WAITING_MASK;
+ return w == 0;
+}
+
+/**
+ * cpuidle_coupled_get_state - determine the deepest idle state
+ * @dev: struct cpuidle_device for this cpu
+ * @coupled: the struct coupled that contains the current cpu
+ *
+ * Returns the deepest idle state that all coupled cpus can enter
+ */
+static inline int cpuidle_coupled_get_state(struct cpuidle_device *dev,
+ struct cpuidle_coupled *coupled)
+{
+ int i;
+ int state = INT_MAX;
+
+ /*
+ * Read barrier ensures that read of requested_state is ordered after
+ * reads of ready_count. Matches the write barriers
+ * cpuidle_set_state_waiting.
+ */
+ smp_rmb();
+
+ for_each_cpu_mask(i, coupled->coupled_cpus)
+ if (cpu_online(i) && coupled->requested_state[i] < state)
+ state = coupled->requested_state[i];
+
+ return state;
+}
+
+static void cpuidle_coupled_poked(void *info)
+{
+ int cpu = (unsigned long)info;
+ cpumask_clear_cpu(cpu, &cpuidle_coupled_poked_mask);
+}
+
+/**
+ * cpuidle_coupled_poke - wake up a cpu that may be waiting
+ * @cpu: target cpu
+ *
+ * Ensures that the target cpu exits it's waiting idle state (if it is in it)
+ * and will see updates to waiting_count before it re-enters it's waiting idle
+ * state.
+ *
+ * If cpuidle_coupled_poked_mask is already set for the target cpu, that cpu
+ * either has or will soon have a pending IPI that will wake it out of idle,
+ * or it is currently processing the IPI and is not in idle.
+ */
+static void cpuidle_coupled_poke(int cpu)
+{
+ struct call_single_data *csd = &per_cpu(cpuidle_coupled_poke_cb, cpu);
+
+ if (!cpumask_test_and_set_cpu(cpu, &cpuidle_coupled_poked_mask))
+ __smp_call_function_single(cpu, csd, 0);
+}
+
+/**
+ * cpuidle_coupled_poke_others - wake up all other cpus that may be waiting
+ * @dev: struct cpuidle_device for this cpu
+ * @coupled: the struct coupled that contains the current cpu
+ *
+ * Calls cpuidle_coupled_poke on all other online cpus.
+ */
+static void cpuidle_coupled_poke_others(int this_cpu,
+ struct cpuidle_coupled *coupled)
+{
+ int cpu;
+
+ for_each_cpu_mask(cpu, coupled->coupled_cpus)
+ if (cpu != this_cpu && cpu_online(cpu))
+ cpuidle_coupled_poke(cpu);
+}
+
+/**
+ * cpuidle_coupled_set_waiting - mark this cpu as in the wait loop
+ * @dev: struct cpuidle_device for this cpu
+ * @coupled: the struct coupled that contains the current cpu
+ * @next_state: the index in drv->states of the requested state for this cpu
+ *
+ * Updates the requested idle state for the specified cpuidle device,
+ * poking all coupled cpus out of idle if necessary to let them see the new
+ * state.
+ */
+static void cpuidle_coupled_set_waiting(int cpu,
+ struct cpuidle_coupled *coupled, int next_state)
+{
+ int w;
+
+ coupled->requested_state[cpu] = next_state;
+
+ /*
+ * If this is the last cpu to enter the waiting state, poke
+ * all the other cpus out of their waiting state so they can
+ * enter a deeper state. This can race with one of the cpus
+ * exiting the waiting state due to an interrupt and
+ * decrementing waiting_count, see comment below.
+ *
+ * The atomic_inc_return provides a write barrier to order the write
+ * to requested_state with the later write that increments ready_count.
+ */
+ w = atomic_inc_return(&coupled->ready_waiting_counts) & WAITING_MASK;
+ if (w == coupled->online_count)
+ cpuidle_coupled_poke_others(cpu, coupled);
+}
+
+/**
+ * cpuidle_coupled_set_not_waiting - mark this cpu as leaving the wait loop
+ * @dev: struct cpuidle_device for this cpu
+ * @coupled: the struct coupled that contains the current cpu
+ *
+ * Removes the requested idle state for the specified cpuidle device.
+ */
+static void cpuidle_coupled_set_not_waiting(int cpu,
+ struct cpuidle_coupled *coupled)
+{
+ /*
+ * Decrementing waiting count can race with incrementing it in
+ * cpuidle_coupled_set_waiting, but that's OK. Worst case, some
+ * cpus will increment ready_count and then spin until they
+ * notice that this cpu has cleared it's requested_state.
+ */
+ atomic_dec(&coupled->ready_waiting_counts);
+
+ coupled->requested_state[cpu] = CPUIDLE_COUPLED_NOT_IDLE;
+}
+
+/**
+ * cpuidle_coupled_set_done - mark this cpu as leaving the ready loop
+ * @cpu: the current cpu
+ * @coupled: the struct coupled that contains the current cpu
+ *
+ * Marks this cpu as no longer in the ready and waiting loops. Decrements
+ * the waiting count first to prevent another cpu looping back in and seeing
+ * this cpu as waiting just before it exits idle.
+ */
+static void cpuidle_coupled_set_done(int cpu, struct cpuidle_coupled *coupled)
+{
+ cpuidle_coupled_set_not_waiting(cpu, coupled);
+ atomic_sub(MAX_WAITING_CPUS, &coupled->ready_waiting_counts);
+}
+
+/**
+ * cpuidle_coupled_clear_pokes - spin until the poke interrupt is processed
+ * @cpu - this cpu
+ *
+ * Turns on interrupts and spins until any outstanding poke interrupts have
+ * been processed and the poke bit has been cleared.
+ *
+ * Other interrupts may also be processed while interrupts are enabled, so
+ * need_resched() must be tested after turning interrupts off again to make sure
+ * the interrupt didn't schedule work that should take the cpu out of idle.
+ *
+ * Returns 0 if need_resched was false, -EINTR if need_resched was true.
+ */
+static int cpuidle_coupled_clear_pokes(int cpu)
+{
+ local_irq_enable();
+ while (cpumask_test_cpu(cpu, &cpuidle_coupled_poked_mask))
+ cpu_relax();
+ local_irq_disable();
+
+ return need_resched() ? -EINTR : 0;
+}
+
+/**
+ * cpuidle_enter_state_coupled - attempt to enter a state with coupled cpus
+ * @dev: struct cpuidle_device for the current cpu
+ * @drv: struct cpuidle_driver for the platform
+ * @next_state: index of the requested state in drv->states
+ *
+ * Coordinate with coupled cpus to enter the target state. This is a two
+ * stage process. In the first stage, the cpus are operating independently,
+ * and may call into cpuidle_enter_state_coupled at completely different times.
+ * To save as much power as possible, the first cpus to call this function will
+ * go to an intermediate state (the cpuidle_device's safe state), and wait for
+ * all the other cpus to call this function. Once all coupled cpus are idle,
+ * the second stage will start. Each coupled cpu will spin until all cpus have
+ * guaranteed that they will call the target_state.
+ *
+ * This function must be called with interrupts disabled. It may enable
+ * interrupts while preparing for idle, and it will always return with
+ * interrupts enabled.
+ */
+int cpuidle_enter_state_coupled(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int next_state)
+{
+ int entered_state = -1;
+ struct cpuidle_coupled *coupled = dev->coupled;
+
+ if (!coupled)
+ return -EINVAL;
+
+ while (coupled->prevent) {
+ if (cpuidle_coupled_clear_pokes(dev->cpu)) {
+ local_irq_enable();
+ return entered_state;
+ }
+ entered_state = cpuidle_enter_state(dev, drv,
+ dev->safe_state_index);
+ }
+
+ /* Read barrier ensures online_count is read after prevent is cleared */
+ smp_rmb();
+
+ cpuidle_coupled_set_waiting(dev->cpu, coupled, next_state);
+
+retry:
+ /*
+ * Wait for all coupled cpus to be idle, using the deepest state
+ * allowed for a single cpu.
+ */
+ while (!cpuidle_coupled_cpus_waiting(coupled)) {
+ if (cpuidle_coupled_clear_pokes(dev->cpu)) {
+ cpuidle_coupled_set_not_waiting(dev->cpu, coupled);
+ goto out;
+ }
+
+ if (coupled->prevent) {
+ cpuidle_coupled_set_not_waiting(dev->cpu, coupled);
+ goto out;
+ }
+
+ entered_state = cpuidle_enter_state(dev, drv,
+ dev->safe_state_index);
+ }
+
+ if (cpuidle_coupled_clear_pokes(dev->cpu)) {
+ cpuidle_coupled_set_not_waiting(dev->cpu, coupled);
+ goto out;
+ }
+
+ /*
+ * All coupled cpus are probably idle. There is a small chance that
+ * one of the other cpus just became active. Increment the ready count,
+ * and spin until all coupled cpus have incremented the counter. Once a
+ * cpu has incremented the ready counter, it cannot abort idle and must
+ * spin until either all cpus have incremented the ready counter, or
+ * another cpu leaves idle and decrements the waiting counter.
+ */
+
+ cpuidle_coupled_set_ready(coupled);
+ while (!cpuidle_coupled_cpus_ready(coupled)) {
+ /* Check if any other cpus bailed out of idle. */
+ if (!cpuidle_coupled_cpus_waiting(coupled))
+ if (!cpuidle_coupled_set_not_ready(coupled))
+ goto retry;
+
+ cpu_relax();
+ }
+
+ /* all cpus have acked the coupled state */
+ next_state = cpuidle_coupled_get_state(dev, coupled);
+
+ entered_state = cpuidle_enter_state(dev, drv, next_state);
+
+ cpuidle_coupled_set_done(dev->cpu, coupled);
+
+out:
+ /*
+ * Normal cpuidle states are expected to return with irqs enabled.
+ * That leads to an inefficiency where a cpu receiving an interrupt
+ * that brings it out of idle will process that interrupt before
+ * exiting the idle enter function and decrementing ready_count. All
+ * other cpus will need to spin waiting for the cpu that is processing
+ * the interrupt. If the driver returns with interrupts disabled,
+ * all other cpus will loop back into the safe idle state instead of
+ * spinning, saving power.
+ *
+ * Calling local_irq_enable here allows coupled states to return with
+ * interrupts disabled, but won't cause problems for drivers that
+ * exit with interrupts enabled.
+ */
+ local_irq_enable();
+
+ /*
+ * Wait until all coupled cpus have exited idle. There is no risk that
+ * a cpu exits and re-enters the ready state because this cpu has
+ * already decremented its waiting_count.
+ */
+ while (!cpuidle_coupled_no_cpus_ready(coupled))
+ cpu_relax();
+
+ return entered_state;
+}
+
+static void cpuidle_coupled_update_online_cpus(struct cpuidle_coupled *coupled)
+{
+ cpumask_t cpus;
+ cpumask_and(&cpus, cpu_online_mask, &coupled->coupled_cpus);
+ coupled->online_count = cpumask_weight(&cpus);
+}
+
+/**
+ * cpuidle_coupled_register_device - register a coupled cpuidle device
+ * @dev: struct cpuidle_device for the current cpu
+ *
+ * Called from cpuidle_register_device to handle coupled idle init. Finds the
+ * cpuidle_coupled struct for this set of coupled cpus, or creates one if none
+ * exists yet.
+ */
+int cpuidle_coupled_register_device(struct cpuidle_device *dev)
+{
+ int cpu;
+ struct cpuidle_device *other_dev;
+ struct call_single_data *csd;
+ struct cpuidle_coupled *coupled;
+
+ if (cpumask_empty(&dev->coupled_cpus))
+ return 0;
+
+ for_each_cpu_mask(cpu, dev->coupled_cpus) {
+ other_dev = per_cpu(cpuidle_devices, cpu);
+ if (other_dev && other_dev->coupled) {
+ coupled = other_dev->coupled;
+ goto have_coupled;
+ }
+ }
+
+ /* No existing coupled info found, create a new one */
+ coupled = kzalloc(sizeof(struct cpuidle_coupled), GFP_KERNEL);
+ if (!coupled)
+ return -ENOMEM;
+
+ coupled->coupled_cpus = dev->coupled_cpus;
+
+have_coupled:
+ dev->coupled = coupled;
+ if (WARN_ON(!cpumask_equal(&dev->coupled_cpus, &coupled->coupled_cpus)))
+ coupled->prevent++;
+
+ cpuidle_coupled_update_online_cpus(coupled);
+
+ coupled->refcnt++;
+
+ csd = &per_cpu(cpuidle_coupled_poke_cb, dev->cpu);
+ csd->func = cpuidle_coupled_poked;
+ csd->info = (void *)(unsigned long)dev->cpu;
+
+ return 0;
+}
+
+/**
+ * cpuidle_coupled_unregister_device - unregister a coupled cpuidle device
+ * @dev: struct cpuidle_device for the current cpu
+ *
+ * Called from cpuidle_unregister_device to tear down coupled idle. Removes the
+ * cpu from the coupled idle set, and frees the cpuidle_coupled_info struct if
+ * this was the last cpu in the set.
+ */
+void cpuidle_coupled_unregister_device(struct cpuidle_device *dev)
+{
+ struct cpuidle_coupled *coupled = dev->coupled;
+
+ if (cpumask_empty(&dev->coupled_cpus))
+ return;
+
+ if (--coupled->refcnt)
+ kfree(coupled);
+ dev->coupled = NULL;
+}
+
+/**
+ * cpuidle_coupled_prevent_idle - prevent cpus from entering a coupled state
+ * @coupled: the struct coupled that contains the cpu that is changing state
+ *
+ * Disables coupled cpuidle on a coupled set of cpus. Used to ensure that
+ * cpu_online_mask doesn't change while cpus are coordinating coupled idle.
+ */
+static void cpuidle_coupled_prevent_idle(struct cpuidle_coupled *coupled)
+{
+ int cpu = get_cpu();
+
+ /* Force all cpus out of the waiting loop. */
+ coupled->prevent++;
+ cpuidle_coupled_poke_others(cpu, coupled);
+ put_cpu();
+ while (!cpuidle_coupled_no_cpus_waiting(coupled))
+ cpu_relax();
+}
+
+/**
+ * cpuidle_coupled_allow_idle - allows cpus to enter a coupled state
+ * @coupled: the struct coupled that contains the cpu that is changing state
+ *
+ * Enables coupled cpuidle on a coupled set of cpus. Used to ensure that
+ * cpu_online_mask doesn't change while cpus are coordinating coupled idle.
+ */
+static void cpuidle_coupled_allow_idle(struct cpuidle_coupled *coupled)
+{
+ int cpu = get_cpu();
+
+ /*
+ * Write barrier ensures readers see the new online_count when they
+ * see prevent == 0.
+ */
+ smp_wmb();
+ coupled->prevent--;
+ /* Force cpus out of the prevent loop. */
+ cpuidle_coupled_poke_others(cpu, coupled);
+ put_cpu();
+}
+
+/**
+ * cpuidle_coupled_cpu_notify - notifier called during hotplug transitions
+ * @nb: notifier block
+ * @action: hotplug transition
+ * @hcpu: target cpu number
+ *
+ * Called when a cpu is brought on or offline using hotplug. Updates the
+ * coupled cpu set appropriately
+ */
+static int cpuidle_coupled_cpu_notify(struct notifier_block *nb,
+ unsigned long action, void *hcpu)
+{
+ int cpu = (unsigned long)hcpu;
+ struct cpuidle_device *dev;
+
+ mutex_lock(&cpuidle_lock);
+
+ dev = per_cpu(cpuidle_devices, cpu);
+ if (!dev->coupled)
+ goto out;
+
+ switch (action & ~CPU_TASKS_FROZEN) {
+ case CPU_UP_PREPARE:
+ case CPU_DOWN_PREPARE:
+ cpuidle_coupled_prevent_idle(dev->coupled);
+ break;
+ case CPU_ONLINE:
+ case CPU_DEAD:
+ cpuidle_coupled_update_online_cpus(dev->coupled);
+ /* Fall through */
+ case CPU_UP_CANCELED:
+ case CPU_DOWN_FAILED:
+ cpuidle_coupled_allow_idle(dev->coupled);
+ break;
+ }
+
+out:
+ mutex_unlock(&cpuidle_lock);
+ return NOTIFY_OK;
+}
+
+static struct notifier_block cpuidle_coupled_cpu_notifier = {
+ .notifier_call = cpuidle_coupled_cpu_notify,
+};
+
+static int __init cpuidle_coupled_init(void)
+{
+ return register_cpu_notifier(&cpuidle_coupled_cpu_notifier);
+}
+core_initcall(cpuidle_coupled_init);
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index d90519cec88..ed1dd859299 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -92,6 +92,34 @@ int cpuidle_play_dead(void)
}
/**
+ * cpuidle_enter_state - enter the state and update stats
+ * @dev: cpuidle device for this cpu
+ * @drv: cpuidle driver for this cpu
+ * @next_state: index into drv->states of the state to enter
+ */
+int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
+ int next_state)
+{
+ int entered_state;
+
+ entered_state = cpuidle_enter_ops(dev, drv, next_state);
+
+ if (entered_state >= 0) {
+ /* Update cpuidle counters */
+ /* This can be moved to within driver enter routine
+ * but that results in multiple copies of same code.
+ */
+ dev->states_usage[entered_state].time +=
+ (unsigned long long)dev->last_residency;
+ dev->states_usage[entered_state].usage++;
+ } else {
+ dev->last_residency = 0;
+ }
+
+ return entered_state;
+}
+
+/**
* cpuidle_idle_call - the main idle loop
*
* NOTE: no locks or semaphores should be used here
@@ -132,23 +160,15 @@ int cpuidle_idle_call(void)
trace_power_start_rcuidle(POWER_CSTATE, next_state, dev->cpu);
trace_cpu_idle_rcuidle(next_state, dev->cpu);
- entered_state = cpuidle_enter_ops(dev, drv, next_state);
+ if (cpuidle_state_is_coupled(dev, drv, next_state))
+ entered_state = cpuidle_enter_state_coupled(dev, drv,
+ next_state);
+ else
+ entered_state = cpuidle_enter_state(dev, drv, next_state);
trace_power_end_rcuidle(dev->cpu);
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
- if (entered_state >= 0) {
- /* Update cpuidle counters */
- /* This can be moved to within driver enter routine
- * but that results in multiple copies of same code.
- */
- dev->states_usage[entered_state].time +=
- (unsigned long long)dev->last_residency;
- dev->states_usage[entered_state].usage++;
- } else {
- dev->last_residency = 0;
- }
-
/* give the governor an opportunity to reflect on the outcome */
if (cpuidle_curr_governor->reflect)
cpuidle_curr_governor->reflect(dev, entered_state);
@@ -376,13 +396,25 @@ static int __cpuidle_register_device(struct cpuidle_device *dev)
per_cpu(cpuidle_devices, dev->cpu) = dev;
list_add(&dev->device_list, &cpuidle_detected_devices);
- if ((ret = cpuidle_add_sysfs(cpu_dev))) {
- module_put(cpuidle_driver->owner);
- return ret;
- }
+ ret = cpuidle_add_sysfs(cpu_dev);
+ if (ret)
+ goto err_sysfs;
+
+ ret = cpuidle_coupled_register_device(dev);
+ if (ret)
+ goto err_coupled;
dev->registered = 1;
return 0;
+
+err_coupled:
+ cpuidle_remove_sysfs(cpu_dev);
+ wait_for_completion(&dev->kobj_unregister);
+err_sysfs:
+ list_del(&dev->device_list);
+ per_cpu(cpuidle_devices, dev->cpu) = NULL;
+ module_put(cpuidle_driver->owner);
+ return ret;
}
/**
@@ -432,6 +464,8 @@ void cpuidle_unregister_device(struct cpuidle_device *dev)
wait_for_completion(&dev->kobj_unregister);
per_cpu(cpuidle_devices, dev->cpu) = NULL;
+ cpuidle_coupled_unregister_device(dev);
+
cpuidle_resume_and_unlock();
module_put(cpuidle_driver->owner);
diff --git a/drivers/cpuidle/cpuidle.h b/drivers/cpuidle/cpuidle.h
index 7db186685c2..76e7f696ad8 100644
--- a/drivers/cpuidle/cpuidle.h
+++ b/drivers/cpuidle/cpuidle.h
@@ -14,6 +14,8 @@ extern struct list_head cpuidle_detected_devices;
extern struct mutex cpuidle_lock;
extern spinlock_t cpuidle_driver_lock;
extern int cpuidle_disabled(void);
+extern int cpuidle_enter_state(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int next_state);
/* idle loop */
extern void cpuidle_install_idle_handler(void);
@@ -30,4 +32,34 @@ extern void cpuidle_remove_state_sysfs(struct cpuidle_device *device);
extern int cpuidle_add_sysfs(struct device *dev);
extern void cpuidle_remove_sysfs(struct device *dev);
+#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
+bool cpuidle_state_is_coupled(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int state);
+int cpuidle_enter_state_coupled(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int next_state);
+int cpuidle_coupled_register_device(struct cpuidle_device *dev);
+void cpuidle_coupled_unregister_device(struct cpuidle_device *dev);
+#else
+static inline bool cpuidle_state_is_coupled(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int state)
+{
+ return false;
+}
+
+static inline int cpuidle_enter_state_coupled(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int next_state)
+{
+ return -1;
+}
+
+static inline int cpuidle_coupled_register_device(struct cpuidle_device *dev)
+{
+ return 0;
+}
+
+static inline void cpuidle_coupled_unregister_device(struct cpuidle_device *dev)
+{
+}
+#endif
+
#endif /* __DRIVER_CPUIDLE_H */
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 0724b1f5aae..05e21981164 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -528,6 +528,9 @@ config WL127X_RFKILL
Creates an rfkill entry in sysfs for power control of Bluetooth
TI wl127x chips.
+config ARM_CCI
+ bool "ARM CCI driver support"
+
source "drivers/misc/c2port/Kconfig"
source "drivers/misc/eeprom/Kconfig"
source "drivers/misc/cb710/Kconfig"
@@ -537,4 +540,5 @@ source "drivers/misc/lis3lv02d/Kconfig"
source "drivers/misc/carma/Kconfig"
source "drivers/misc/altera-stapl/Kconfig"
source "drivers/misc/mei/Kconfig"
+source "drivers/misc/vexpress/Kconfig"
endmenu
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index ca02974e4c9..64e9e7044cc 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -54,4 +54,5 @@ obj-$(CONFIG_ALTERA_STAPL) +=altera-stapl/
obj-$(CONFIG_INTEL_MEI) += mei/
obj-$(CONFIG_WL127X_RFKILL) += wl127x-rfkill.o
obj-$(CONFIG_SENSORS_AK8975) += akm8975.o
-
+obj-$(CONFIG_ARCH_VEXPRESS) += vexpress/
+obj-$(CONFIG_ARM_CCI) += arm-cci.o
diff --git a/drivers/misc/arm-cci.c b/drivers/misc/arm-cci.c
new file mode 100644
index 00000000000..96afe77b9fe
--- /dev/null
+++ b/drivers/misc/arm-cci.c
@@ -0,0 +1,130 @@
+/*
+ * CCI support
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ * Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/arm-cci.h>
+
+#define CCI400_EAG_OFFSET 0x4000
+#define CCI400_KF_OFFSET 0x5000
+
+#define DRIVER_NAME "CCI"
+struct cci_drvdata {
+ void __iomem *baseaddr;
+ spinlock_t lock;
+};
+
+static struct cci_drvdata *info;
+
+void disable_cci(int cluster)
+{
+ u32 cci_reg = cluster ? CCI400_KF_OFFSET : CCI400_EAG_OFFSET;
+ writel_relaxed(0x0, info->baseaddr + cci_reg);
+
+ while (readl_relaxed(info->baseaddr + 0xc) & 0x1)
+ ;
+}
+EXPORT_SYMBOL_GPL(disable_cci);
+
+static int __devinit cci_driver_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ int ret = 0;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ dev_err(&pdev->dev, "unable to allocate mem\n");
+ return -ENOMEM;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "No memory resource\n");
+ ret = -EINVAL;
+ goto mem_free;
+ }
+
+ if (!request_mem_region(res->start, resource_size(res),
+ dev_name(&pdev->dev))) {
+ dev_err(&pdev->dev, "address 0x%x in use\n", (u32) res->start);
+ ret = -EBUSY;
+ goto mem_free;
+ }
+
+ info->baseaddr = ioremap(res->start, resource_size(res));
+ if (!info->baseaddr) {
+ ret = -ENXIO;
+ goto ioremap_err;
+ }
+
+ platform_set_drvdata(pdev, info);
+
+ pr_info("CCI loaded at %p\n", info->baseaddr);
+ return ret;
+
+ioremap_err:
+ release_region(res->start, resource_size(res));
+mem_free:
+ kfree(info);
+
+ return ret;
+}
+
+static int __devexit cci_driver_remove(struct platform_device *pdev)
+{
+ struct cci_drvdata *info;
+ struct resource *res = pdev->resource;
+
+ info = platform_get_drvdata(pdev);
+ iounmap(info->baseaddr);
+ release_region(res->start, resource_size(res));
+ kfree(info);
+
+ return 0;
+}
+
+static const struct of_device_id arm_cci_matches[] = {
+ {.compatible = "arm,cci"},
+ {},
+};
+
+static struct platform_driver cci_platform_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DRIVER_NAME,
+ .of_match_table = arm_cci_matches,
+ },
+ .probe = cci_driver_probe,
+ .remove = cci_driver_remove,
+};
+
+static int __init cci_init(void)
+{
+ return platform_driver_register(&cci_platform_driver);
+}
+
+static void __exit cci_exit(void)
+{
+ platform_driver_unregister(&cci_platform_driver);
+}
+
+arch_initcall(cci_init);
+module_exit(cci_exit);
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("CCI support");
diff --git a/drivers/misc/vexpress/Kconfig b/drivers/misc/vexpress/Kconfig
new file mode 100644
index 00000000000..8fb52f640bf
--- /dev/null
+++ b/drivers/misc/vexpress/Kconfig
@@ -0,0 +1,2 @@
+config ARM_SPC
+ bool "ARM SPC driver support"
diff --git a/drivers/misc/vexpress/Makefile b/drivers/misc/vexpress/Makefile
new file mode 100644
index 00000000000..95b58166d0a
--- /dev/null
+++ b/drivers/misc/vexpress/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_ARM_SPC) += arm-spc.o
diff --git a/drivers/misc/vexpress/arm-spc.c b/drivers/misc/vexpress/arm-spc.c
new file mode 100644
index 00000000000..23cee9df576
--- /dev/null
+++ b/drivers/misc/vexpress/arm-spc.c
@@ -0,0 +1,371 @@
+/*
+ * Serial Power Controller (SPC) support
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ * Author(s): Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
+ * Achin Gupta <achin.gupta@arm.com>
+ * Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/vexpress.h>
+
+#define SNOOP_CTL_A15 0x404
+#define SNOOP_CTL_A7 0x504
+#define PERF_LVL_A15 0xB00
+#define PERF_REQ_A15 0xB04
+#define PERF_LVL_A7 0xB08
+#define PERF_REQ_A7 0xB0c
+#define COMMS 0xB10
+#define COMMS_REQ 0xB14
+#define PWC_STATUS 0xB18
+#define PWC_FLAG 0xB1c
+#define WAKE_INT_MASK 0xB24
+#define WAKE_INT_RAW 0xB28
+#define WAKE_INT_STAT 0xB2c
+#define A15_PWRDN_EN 0xB30
+#define A7_PWRDN_EN 0xB34
+#define A15_A7_ISOLATE 0xB38
+#define STANDBYWFI_STAT 0xB3c
+#define A15_CACTIVE 0xB40
+#define A15_PWRDNREQ 0xB44
+#define A15_PWRDNACK 0xB48
+#define A7_CACTIVE 0xB4c
+#define A7_PWRDNREQ 0xB50
+#define A7_PWRDNACK 0xB54
+#define A15_RESET_HOLD 0xB58
+#define A7_RESET_HOLD 0xB5c
+#define A15_RESET_STAT 0xB60
+#define A7_RESET_STAT 0xB64
+#define A15_CONF 0x400
+#define A7_CONF 0x500
+
+#define DRIVER_NAME "SPC"
+#define TIME_OUT 100
+
+struct vexpress_spc_drvdata {
+ void __iomem *baseaddr;
+ spinlock_t lock;
+};
+
+static struct vexpress_spc_drvdata *info;
+
+/* SCC virtual address */
+u32 vscc;
+
+static inline int read_wait_to(void __iomem *reg, int status, int timeout)
+{
+ while (timeout-- && readl(reg) == status) {
+ cpu_relax();
+ udelay(2);
+ }
+ if (!timeout)
+ return -EAGAIN;
+ else
+ return 0;
+}
+
+int vexpress_spc_get_performance(int cluster, int *perf)
+{
+ u32 perf_cfg_reg = 0;
+ u32 a15_clusid = 0;
+ int ret = 0;
+
+ if (IS_ERR_OR_NULL(info))
+ return -ENXIO;
+
+ a15_clusid = readl_relaxed(info->baseaddr + A15_CONF) & 0xf;
+ perf_cfg_reg = cluster != a15_clusid ? PERF_LVL_A7 : PERF_LVL_A15;
+
+ spin_lock(&info->lock);
+ *perf = readl(info->baseaddr + perf_cfg_reg);
+ spin_unlock(&info->lock);
+
+ return ret;
+
+}
+EXPORT_SYMBOL_GPL(vexpress_spc_get_performance);
+
+int vexpress_spc_set_performance(int cluster, int perf)
+{
+ u32 perf_cfg_reg = 0;
+ u32 perf_stat_reg = 0;
+ u32 a15_clusid = 0;
+ int ret = 0;
+
+ if (IS_ERR_OR_NULL(info))
+ return -ENXIO;
+
+ a15_clusid = readl_relaxed(info->baseaddr + A15_CONF) & 0xf;
+ perf_cfg_reg = cluster != a15_clusid ? PERF_LVL_A7 : PERF_LVL_A15;
+ perf_stat_reg = cluster != a15_clusid ? PERF_REQ_A7 : PERF_REQ_A15;
+
+ if (perf < 0 || perf > 7)
+ return -EINVAL;
+
+ spin_lock(&info->lock);
+ writel(perf, info->baseaddr + perf_cfg_reg);
+ if (read_wait_to(info->baseaddr + perf_stat_reg, 1, TIME_OUT))
+ ret = -EAGAIN;
+ spin_unlock(&info->lock);
+ return ret;
+
+}
+EXPORT_SYMBOL_GPL(vexpress_spc_set_performance);
+
+void vexpress_spc_set_wake_intr(u32 mask)
+{
+ if (!IS_ERR_OR_NULL(info))
+ writel(mask & VEXPRESS_SPC_WAKE_INTR_MASK,
+ info->baseaddr + WAKE_INT_MASK);
+ return;
+}
+EXPORT_SYMBOL_GPL(vexpress_spc_set_wake_intr);
+
+u32 vexpress_spc_get_wake_intr(int raw)
+{
+ u32 wake_intr_reg = raw ? WAKE_INT_RAW : WAKE_INT_STAT;
+
+ if (!IS_ERR_OR_NULL(info))
+ return readl(info->baseaddr + wake_intr_reg);
+ else
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vexpress_spc_get_wake_intr);
+
+void vexpress_spc_powerdown_enable(int cluster, int enable)
+{
+ u32 pwdrn_reg = 0;
+ u32 a15_clusid = 0;
+
+ if (!IS_ERR_OR_NULL(info)) {
+ a15_clusid = readl_relaxed(info->baseaddr + A15_CONF) & 0xf;
+ pwdrn_reg = cluster != a15_clusid ? A7_PWRDN_EN : A15_PWRDN_EN;
+ writel(!!enable, info->baseaddr + pwdrn_reg);
+ }
+ return;
+}
+EXPORT_SYMBOL_GPL(vexpress_spc_powerdown_enable);
+
+void vexpress_spc_adb400_pd_enable(int cluster, int enable)
+{
+ u32 pwdrn_reg = 0;
+ u32 a15_clusid = 0;
+ u32 val = enable ? 0xF : 0x0; /* all adb bridges ?? */
+
+ if (IS_ERR_OR_NULL(info))
+ return;
+
+ a15_clusid = readl_relaxed(info->baseaddr + A15_CONF) & 0xf;
+ pwdrn_reg = cluster != a15_clusid ? A7_PWRDNREQ : A15_PWRDNREQ;
+
+ spin_lock(&info->lock);
+ writel(val, info->baseaddr + pwdrn_reg);
+ spin_unlock(&info->lock);
+ return;
+}
+EXPORT_SYMBOL_GPL(vexpress_spc_adb400_pd_enable);
+
+void vexpress_scc_ctl_snoops(int cluster, int enable)
+{
+ u32 val;
+ u32 snoop_reg = 0;
+ u32 a15_clusid = 0;
+ u32 or = 0;
+
+ if (IS_ERR_OR_NULL(info))
+ return;
+
+ a15_clusid = readl_relaxed(info->baseaddr + A15_CONF) & 0xf;
+ snoop_reg = cluster != a15_clusid ? SNOOP_CTL_A7 : SNOOP_CTL_A15;
+ or = cluster != a15_clusid ? 0x2000 : 0x180;
+
+ val = readl_relaxed(info->baseaddr + snoop_reg);
+ if (enable) {
+ or = ~or;
+ val &= or;
+ } else {
+ val |= or;
+ }
+ writel_relaxed(val, info->baseaddr + snoop_reg);
+}
+EXPORT_SYMBOL_GPL(vexpress_scc_ctl_snoops);
+
+void vexpress_spc_wfi_cpureset(int cluster, int cpu, int enable)
+{
+ u32 rsthold_reg, prst_shift;
+ u32 val;
+ u32 a15_clusid = 0;
+
+ if (IS_ERR_OR_NULL(info))
+ return;
+
+ a15_clusid = readl_relaxed(info->baseaddr + A15_CONF) & 0xf;
+
+ if (cluster != a15_clusid) {
+ rsthold_reg = A7_RESET_HOLD;
+ prst_shift = 3;
+ } else {
+ rsthold_reg = A15_RESET_HOLD;
+ prst_shift = 2;
+ }
+ val = readl_relaxed(info->baseaddr + rsthold_reg);
+ if (enable)
+ val |= (1 << cpu);
+ else
+ val &= ~(1 << cpu);
+ writel_relaxed(val, info->baseaddr + rsthold_reg);
+ return;
+}
+EXPORT_SYMBOL_GPL(vexpress_spc_wfi_cpureset);
+
+void vexpress_spc_wfi_cluster_reset(int cluster, int enable)
+{
+ u32 rsthold_reg, shift;
+ u32 val;
+ u32 a15_clusid = 0;
+
+ if (IS_ERR_OR_NULL(info))
+ return;
+
+ a15_clusid = readl_relaxed(info->baseaddr + A15_CONF) & 0xf;
+
+ if (cluster != a15_clusid) {
+ rsthold_reg = A7_RESET_HOLD;
+ shift = 6;
+ } else {
+ rsthold_reg = A15_RESET_HOLD;
+ shift = 4;
+ }
+ spin_lock(&info->lock);
+ val = readl(info->baseaddr + rsthold_reg);
+ if (enable)
+ val |= 1 << shift;
+ else
+ val &= ~(1 << shift);
+ writel(val, info->baseaddr + rsthold_reg);
+ spin_unlock(&info->lock);
+ return;
+}
+EXPORT_SYMBOL_GPL(vexpress_spc_wfi_cluster_reset);
+
+int vexpress_spc_wfi_cpustat(int cluster)
+{
+ u32 rststat_reg;
+ u32 val;
+ u32 a15_clusid = 0;
+
+ if (IS_ERR_OR_NULL(info))
+ return 0;
+
+ a15_clusid = readl_relaxed(info->baseaddr + A15_CONF) & 0xf;
+ rststat_reg = STANDBYWFI_STAT;
+
+ val = readl_relaxed(info->baseaddr + rststat_reg);
+ return cluster != a15_clusid ? ((val & 0x38) >> 3) : (val & 0x3);
+}
+EXPORT_SYMBOL_GPL(vexpress_spc_wfi_cpustat);
+
+
+static int __devinit vexpress_spc_driver_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ int ret = 0;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ dev_err(&pdev->dev, "unable to allocate mem\n");
+ return -ENOMEM;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "No memory resource\n");
+ ret = -EINVAL;
+ goto mem_free;
+ }
+
+ if (!request_mem_region(res->start, resource_size(res),
+ dev_name(&pdev->dev))) {
+ dev_err(&pdev->dev, "address 0x%x in use\n", (u32) res->start);
+ ret = -EBUSY;
+ goto mem_free;
+ }
+
+ info->baseaddr = ioremap(res->start, resource_size(res));
+ if (!info->baseaddr) {
+ ret = -ENXIO;
+ goto ioremap_err;
+ }
+ vscc = (u32) info->baseaddr;
+ spin_lock_init(&info->lock);
+ platform_set_drvdata(pdev, info);
+
+ pr_info("vexpress_spc loaded at %p\n", info->baseaddr);
+ return ret;
+
+ioremap_err:
+ release_region(res->start, resource_size(res));
+mem_free:
+ kfree(info);
+
+ return ret;
+}
+
+static int __devexit vexpress_spc_driver_remove(struct platform_device *pdev)
+{
+ struct vexpress_spc_drvdata *info;
+ struct resource *res = pdev->resource;
+
+ info = platform_get_drvdata(pdev);
+ iounmap(info->baseaddr);
+ release_region(res->start, resource_size(res));
+ kfree(info);
+
+ return 0;
+}
+
+static const struct of_device_id arm_vexpress_spc_matches[] = {
+ {.compatible = "arm,spc"},
+ {},
+};
+
+static struct platform_driver vexpress_spc_platform_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DRIVER_NAME,
+ .of_match_table = arm_vexpress_spc_matches,
+ },
+ .probe = vexpress_spc_driver_probe,
+ .remove = vexpress_spc_driver_remove,
+};
+
+static int __init vexpress_spc_init(void)
+{
+ return platform_driver_register(&vexpress_spc_platform_driver);
+}
+
+static void __exit vexpress_spc_exit(void)
+{
+ platform_driver_unregister(&vexpress_spc_platform_driver);
+}
+
+arch_initcall(vexpress_spc_init);
+module_exit(vexpress_spc_exit);
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Serial Power Controller (SPC) support");
diff --git a/include/linux/arm-cci.h b/include/linux/arm-cci.h
new file mode 100644
index 00000000000..ce3f705fb6d
--- /dev/null
+++ b/include/linux/arm-cci.h
@@ -0,0 +1,30 @@
+/*
+ * CCI support
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __LINUX_ARM_CCI_H
+#define __LINUX_ARM_CCI_H
+
+#ifdef CONFIG_ARM_CCI
+extern void disable_cci(int cluster);
+#else
+static inline void disable_cci(int cluster) { }
+#endif
+
+#endif
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 6c26a3da0e0..5ab7183313c 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -57,6 +57,7 @@ struct cpuidle_state {
/* Idle State Flags */
#define CPUIDLE_FLAG_TIME_VALID (0x01) /* is residency time measurable? */
+#define CPUIDLE_FLAG_COUPLED (0x02) /* state applies to multiple cpus */
#define CPUIDLE_DRIVER_FLAGS_MASK (0xFFFF0000)
@@ -100,6 +101,12 @@ struct cpuidle_device {
struct list_head device_list;
struct kobject kobj;
struct completion kobj_unregister;
+
+#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
+ int safe_state_index;
+ cpumask_t coupled_cpus;
+ struct cpuidle_coupled *coupled;
+#endif
};
DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
@@ -176,6 +183,10 @@ static inline int cpuidle_play_dead(void) {return -ENODEV; }
#endif
+#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
+void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a);
+#endif
+
/******************************
* CPUIDLE GOVERNOR INTERFACE *
******************************/
diff --git a/include/linux/vexpress.h b/include/linux/vexpress.h
new file mode 100644
index 00000000000..61f5651103c
--- /dev/null
+++ b/include/linux/vexpress.h
@@ -0,0 +1,50 @@
+/*
+ * include/linux/vexpress.h
+ *
+ * Copyright (C) 2012 ARM Limited
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ *
+ * Versatile Express register definitions and exported functions
+ */
+
+#define VEXPRESS_SPC_WAKE_INTR_IRQ(cluster, cpu) \
+ (1 << (4 * (cluster) + (cpu)))
+#define VEXPRESS_SPC_WAKE_INTR_FIQ(cluster, cpu) \
+ (1 << (7 * (cluster) + (cpu)))
+#define VEXPRESS_SPC_WAKE_INTR_SWDOG (1 << 10)
+#define VEXPRESS_SPC_WAKE_INTR_GTIMER (1 << 11)
+#define VEXPRESS_SPC_WAKE_INTR_MASK 0xFFF
+
+#ifdef CONFIG_ARM_SPC
+extern int vexpress_spc_get_performance(int cluster, int *perf);
+extern int vexpress_spc_set_performance(int cluster, int perf);
+extern void vexpress_spc_set_wake_intr(u32 mask);
+extern u32 vexpress_spc_get_wake_intr(int raw);
+extern void vexpress_spc_powerdown_enable(int cluster, int enable);
+extern void vexpress_spc_adb400_pd_enable(int cluster, int enable);
+extern void vexpress_spc_wfi_cpureset(int cluster, int cpu, int enable);
+extern int vexpress_spc_wfi_cpustat(int cluster);
+extern void vexpress_spc_wfi_cluster_reset(int cluster, int enable);
+extern void vexpress_scc_ctl_snoops(int cluster, int enable);
+#else
+static inline int vexpress_spc_get_performance(int cluster, int *perf)
+{
+ return -EINVAL;
+}
+static inline int vexpress_spc_set_performance(int cluster, int perf)
+{
+ return -EINVAL;
+}
+static inline void vexpress_spc_set_wake_intr(u32 mask) { }
+static inline u32 vexpress_spc_get_wake_intr(int raw) { return 0; }
+static inline void vexpress_spc_powerdown_enable(int cluster, int enable) { }
+static inline void vexpress_spc_adb400_pd_enable(int cluster, int enable) { }
+static inline void vexpress_spc_wfi_cpureset(int cluster, int cpu, int enable)
+{ }
+static inline int vexpress_spc_wfi_cpustat(int cluster) { return 0; }
+static inline void vexpress_spc_wfi_cluster_reset(int cluster, int enable) { }
+static inline void vexpress_scc_ctl_snoops(int cluster, int enable) { }
+#endif