blob: 926ba9871c628ac4e1e352363f578cfc22835788 [file] [log] [blame]
Paul Burtond0508942014-04-14 16:25:29 +01001/*
2 * Copyright (C) 2014 Imagination Technologies
3 * Author: Paul Burton <paul.burton@imgtec.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
9 */
10
11#include <linux/cpu_pm.h>
12#include <linux/cpuidle.h>
13#include <linux/init.h>
14
15#include <asm/idle.h>
16#include <asm/pm-cps.h>
17
18/* Enumeration of the various idle states this driver may enter */
19enum cps_idle_state {
20 STATE_WAIT = 0, /* MIPS wait instruction, coherent */
21 STATE_NC_WAIT, /* MIPS wait instruction, non-coherent */
22 STATE_CLOCK_GATED, /* Core clock gated */
23 STATE_POWER_GATED, /* Core power gated */
24 STATE_COUNT
25};
26
27static int cps_nc_enter(struct cpuidle_device *dev,
28 struct cpuidle_driver *drv, int index)
29{
30 enum cps_pm_state pm_state;
31 int err;
32
33 /*
34 * At least one core must remain powered up & clocked in order for the
35 * system to have any hope of functioning.
36 *
37 * TODO: don't treat core 0 specially, just prevent the final core
38 * TODO: remap interrupt affinity temporarily
39 */
40 if (!cpu_data[dev->cpu].core && (index > STATE_NC_WAIT))
41 index = STATE_NC_WAIT;
42
43 /* Select the appropriate cps_pm_state */
44 switch (index) {
45 case STATE_NC_WAIT:
46 pm_state = CPS_PM_NC_WAIT;
47 break;
48 case STATE_CLOCK_GATED:
49 pm_state = CPS_PM_CLOCK_GATED;
50 break;
51 case STATE_POWER_GATED:
52 pm_state = CPS_PM_POWER_GATED;
53 break;
54 default:
55 BUG();
56 return -EINVAL;
57 }
58
59 /* Notify listeners the CPU is about to power down */
60 if ((pm_state == CPS_PM_POWER_GATED) && cpu_pm_enter())
61 return -EINTR;
62
63 /* Enter that state */
64 err = cps_pm_enter_state(pm_state);
65
66 /* Notify listeners the CPU is back up */
67 if (pm_state == CPS_PM_POWER_GATED)
68 cpu_pm_exit();
69
70 return err ?: index;
71}
72
73static struct cpuidle_driver cps_driver = {
74 .name = "cpc_cpuidle",
75 .owner = THIS_MODULE,
76 .states = {
77 [STATE_WAIT] = MIPS_CPUIDLE_WAIT_STATE,
78 [STATE_NC_WAIT] = {
79 .enter = cps_nc_enter,
80 .exit_latency = 200,
81 .target_residency = 450,
Paul Burtond0508942014-04-14 16:25:29 +010082 .name = "nc-wait",
83 .desc = "non-coherent MIPS wait",
84 },
85 [STATE_CLOCK_GATED] = {
86 .enter = cps_nc_enter,
87 .exit_latency = 300,
88 .target_residency = 700,
Daniel Lezcanob82b6cc2014-11-12 16:03:50 +010089 .flags = CPUIDLE_FLAG_TIMER_STOP,
Paul Burtond0508942014-04-14 16:25:29 +010090 .name = "clock-gated",
91 .desc = "core clock gated",
92 },
93 [STATE_POWER_GATED] = {
94 .enter = cps_nc_enter,
95 .exit_latency = 600,
96 .target_residency = 1000,
Daniel Lezcanob82b6cc2014-11-12 16:03:50 +010097 .flags = CPUIDLE_FLAG_TIMER_STOP,
Paul Burtond0508942014-04-14 16:25:29 +010098 .name = "power-gated",
99 .desc = "core power gated",
100 },
101 },
102 .state_count = STATE_COUNT,
103 .safe_state_index = 0,
104};
105
106static void __init cps_cpuidle_unregister(void)
107{
108 int cpu;
109 struct cpuidle_device *device;
110
111 for_each_possible_cpu(cpu) {
112 device = &per_cpu(cpuidle_dev, cpu);
113 cpuidle_unregister_device(device);
114 }
115
116 cpuidle_unregister_driver(&cps_driver);
117}
118
119static int __init cps_cpuidle_init(void)
120{
121 int err, cpu, core, i;
122 struct cpuidle_device *device;
123
124 /* Detect supported states */
125 if (!cps_pm_support_state(CPS_PM_POWER_GATED))
126 cps_driver.state_count = STATE_CLOCK_GATED + 1;
127 if (!cps_pm_support_state(CPS_PM_CLOCK_GATED))
128 cps_driver.state_count = STATE_NC_WAIT + 1;
129 if (!cps_pm_support_state(CPS_PM_NC_WAIT))
130 cps_driver.state_count = STATE_WAIT + 1;
131
132 /* Inform the user if some states are unavailable */
133 if (cps_driver.state_count < STATE_COUNT) {
134 pr_info("cpuidle-cps: limited to ");
135 switch (cps_driver.state_count - 1) {
136 case STATE_WAIT:
137 pr_cont("coherent wait\n");
138 break;
139 case STATE_NC_WAIT:
140 pr_cont("non-coherent wait\n");
141 break;
142 case STATE_CLOCK_GATED:
143 pr_cont("clock gating\n");
144 break;
145 }
146 }
147
148 /*
149 * Set the coupled flag on the appropriate states if this system
150 * requires it.
151 */
152 if (coupled_coherence)
153 for (i = STATE_NC_WAIT; i < cps_driver.state_count; i++)
154 cps_driver.states[i].flags |= CPUIDLE_FLAG_COUPLED;
155
156 err = cpuidle_register_driver(&cps_driver);
157 if (err) {
158 pr_err("Failed to register CPS cpuidle driver\n");
159 return err;
160 }
161
162 for_each_possible_cpu(cpu) {
163 core = cpu_data[cpu].core;
164 device = &per_cpu(cpuidle_dev, cpu);
165 device->cpu = cpu;
Matt Redfearn72bc8c72016-09-07 10:45:20 +0100166#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
Paul Burtond0508942014-04-14 16:25:29 +0100167 cpumask_copy(&device->coupled_cpus, &cpu_sibling_map[cpu]);
168#endif
169
170 err = cpuidle_register_device(device);
171 if (err) {
172 pr_err("Failed to register CPU%d cpuidle device\n",
173 cpu);
174 goto err_out;
175 }
176 }
177
178 return 0;
179err_out:
180 cps_cpuidle_unregister();
181 return err;
182}
183device_initcall(cps_cpuidle_init);