blob: 52304b139aa46a60966d03922197b060f4693cf1 [file] [log] [blame]
Magnus Dammfbc83b72013-02-27 17:15:01 +09001/*
2 * Renesas IRQC Driver
3 *
4 * Copyright (C) 2013 Magnus Damm
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
Geert Uytterhoeven6f46aed2015-04-01 14:00:06 +020020#include <linux/clk.h>
Magnus Dammfbc83b72013-02-27 17:15:01 +090021#include <linux/init.h>
22#include <linux/platform_device.h>
23#include <linux/spinlock.h>
24#include <linux/interrupt.h>
25#include <linux/ioport.h>
26#include <linux/io.h>
27#include <linux/irq.h>
28#include <linux/irqdomain.h>
29#include <linux/err.h>
30#include <linux/slab.h>
31#include <linux/module.h>
Geert Uytterhoeven51b05f62015-03-18 19:55:56 +010032#include <linux/pm_runtime.h>
Magnus Dammfbc83b72013-02-27 17:15:01 +090033
Geert Uytterhoeven1cd5ec732015-03-18 19:55:55 +010034#define IRQC_IRQ_MAX 32 /* maximum 32 interrupts per driver instance */
Magnus Dammfbc83b72013-02-27 17:15:01 +090035
Geert Uytterhoeven1cd5ec732015-03-18 19:55:55 +010036#define IRQC_REQ_STS 0x00 /* Interrupt Request Status Register */
37#define IRQC_EN_STS 0x04 /* Interrupt Enable Status Register */
38#define IRQC_EN_SET 0x08 /* Interrupt Enable Set Register */
Magnus Dammfbc83b72013-02-27 17:15:01 +090039#define IRQC_INT_CPU_BASE(n) (0x000 + ((n) * 0x10))
Geert Uytterhoeven1cd5ec732015-03-18 19:55:55 +010040 /* SYS-CPU vs. RT-CPU */
41#define DETECT_STATUS 0x100 /* IRQn Detect Status Register */
42#define MONITOR 0x104 /* IRQn Signal Level Monitor Register */
43#define HLVL_STS 0x108 /* IRQn High Level Detect Status Register */
44#define LLVL_STS 0x10c /* IRQn Low Level Detect Status Register */
45#define S_R_EDGE_STS 0x110 /* IRQn Sync Rising Edge Detect Status Reg. */
46#define S_F_EDGE_STS 0x114 /* IRQn Sync Falling Edge Detect Status Reg. */
47#define A_R_EDGE_STS 0x118 /* IRQn Async Rising Edge Detect Status Reg. */
48#define A_F_EDGE_STS 0x11c /* IRQn Async Falling Edge Detect Status Reg. */
49#define CHTEN_STS 0x120 /* Chattering Reduction Status Register */
Magnus Dammfbc83b72013-02-27 17:15:01 +090050#define IRQC_CONFIG(n) (0x180 + ((n) * 0x04))
Geert Uytterhoeven1cd5ec732015-03-18 19:55:55 +010051 /* IRQn Configuration Register */
Magnus Dammfbc83b72013-02-27 17:15:01 +090052
53struct irqc_irq {
54 int hw_irq;
55 int requested_irq;
Magnus Dammfbc83b72013-02-27 17:15:01 +090056 struct irqc_priv *p;
57};
58
59struct irqc_priv {
60 void __iomem *iomem;
61 void __iomem *cpu_int_base;
62 struct irqc_irq irq[IRQC_IRQ_MAX];
Magnus Dammfbc83b72013-02-27 17:15:01 +090063 unsigned int number_of_irqs;
64 struct platform_device *pdev;
Magnus Damm99c221d2015-09-28 18:42:37 +090065 struct irq_chip_generic *gc;
Magnus Dammfbc83b72013-02-27 17:15:01 +090066 struct irq_domain *irq_domain;
Geert Uytterhoeven6f46aed2015-04-01 14:00:06 +020067 struct clk *clk;
Magnus Dammfbc83b72013-02-27 17:15:01 +090068};
69
Magnus Damm99c221d2015-09-28 18:42:37 +090070static struct irqc_priv *irq_data_to_priv(struct irq_data *data)
71{
72 return data->domain->host_data;
73}
74
Magnus Dammfbc83b72013-02-27 17:15:01 +090075static void irqc_dbg(struct irqc_irq *i, char *str)
76{
Magnus Damme10fc032015-07-20 19:06:35 +090077 dev_dbg(&i->p->pdev->dev, "%s (%d:%d)\n",
78 str, i->requested_irq, i->hw_irq);
Magnus Dammfbc83b72013-02-27 17:15:01 +090079}
80
Magnus Dammfbc83b72013-02-27 17:15:01 +090081static unsigned char irqc_sense[IRQ_TYPE_SENSE_MASK + 1] = {
Sergei Shtylyovce70af12013-12-14 03:09:31 +030082 [IRQ_TYPE_LEVEL_LOW] = 0x01,
83 [IRQ_TYPE_LEVEL_HIGH] = 0x02,
84 [IRQ_TYPE_EDGE_FALLING] = 0x04, /* Synchronous */
85 [IRQ_TYPE_EDGE_RISING] = 0x08, /* Synchronous */
86 [IRQ_TYPE_EDGE_BOTH] = 0x0c, /* Synchronous */
Magnus Dammfbc83b72013-02-27 17:15:01 +090087};
88
89static int irqc_irq_set_type(struct irq_data *d, unsigned int type)
90{
Magnus Damm99c221d2015-09-28 18:42:37 +090091 struct irqc_priv *p = irq_data_to_priv(d);
Magnus Dammfbc83b72013-02-27 17:15:01 +090092 int hw_irq = irqd_to_hwirq(d);
93 unsigned char value = irqc_sense[type & IRQ_TYPE_SENSE_MASK];
Geert Uytterhoevenf791e3c2015-02-26 11:43:32 +010094 u32 tmp;
Magnus Dammfbc83b72013-02-27 17:15:01 +090095
96 irqc_dbg(&p->irq[hw_irq], "sense");
97
Sergei Shtylyovce70af12013-12-14 03:09:31 +030098 if (!value)
Magnus Dammfbc83b72013-02-27 17:15:01 +090099 return -EINVAL;
100
101 tmp = ioread32(p->iomem + IRQC_CONFIG(hw_irq));
102 tmp &= ~0x3f;
Sergei Shtylyovce70af12013-12-14 03:09:31 +0300103 tmp |= value;
Magnus Dammfbc83b72013-02-27 17:15:01 +0900104 iowrite32(tmp, p->iomem + IRQC_CONFIG(hw_irq));
105 return 0;
106}
107
Geert Uytterhoeven6f46aed2015-04-01 14:00:06 +0200108static int irqc_irq_set_wake(struct irq_data *d, unsigned int on)
109{
Magnus Damm99c221d2015-09-28 18:42:37 +0900110 struct irqc_priv *p = irq_data_to_priv(d);
Geert Uytterhoeven4cd78632015-09-08 19:00:36 +0200111 int hw_irq = irqd_to_hwirq(d);
112
113 irq_set_irq_wake(p->irq[hw_irq].requested_irq, on);
Geert Uytterhoeven6f46aed2015-04-01 14:00:06 +0200114
115 if (!p->clk)
116 return 0;
117
118 if (on)
119 clk_enable(p->clk);
120 else
121 clk_disable(p->clk);
122
123 return 0;
124}
125
Magnus Dammfbc83b72013-02-27 17:15:01 +0900126static irqreturn_t irqc_irq_handler(int irq, void *dev_id)
127{
128 struct irqc_irq *i = dev_id;
129 struct irqc_priv *p = i->p;
Geert Uytterhoevenf791e3c2015-02-26 11:43:32 +0100130 u32 bit = BIT(i->hw_irq);
Magnus Dammfbc83b72013-02-27 17:15:01 +0900131
132 irqc_dbg(i, "demux1");
133
134 if (ioread32(p->iomem + DETECT_STATUS) & bit) {
135 iowrite32(bit, p->iomem + DETECT_STATUS);
136 irqc_dbg(i, "demux2");
Magnus Damme10fc032015-07-20 19:06:35 +0900137 generic_handle_irq(irq_find_mapping(p->irq_domain, i->hw_irq));
Magnus Dammfbc83b72013-02-27 17:15:01 +0900138 return IRQ_HANDLED;
139 }
140 return IRQ_NONE;
141}
142
Magnus Dammfbc83b72013-02-27 17:15:01 +0900143static int irqc_probe(struct platform_device *pdev)
144{
Magnus Dammfbc83b72013-02-27 17:15:01 +0900145 struct irqc_priv *p;
146 struct resource *io;
147 struct resource *irq;
Magnus Dammfbc83b72013-02-27 17:15:01 +0900148 const char *name = dev_name(&pdev->dev);
149 int ret;
150 int k;
151
152 p = kzalloc(sizeof(*p), GFP_KERNEL);
153 if (!p) {
154 dev_err(&pdev->dev, "failed to allocate driver data\n");
155 ret = -ENOMEM;
156 goto err0;
157 }
158
Magnus Dammfbc83b72013-02-27 17:15:01 +0900159 p->pdev = pdev;
160 platform_set_drvdata(pdev, p);
161
Geert Uytterhoeven6f46aed2015-04-01 14:00:06 +0200162 p->clk = devm_clk_get(&pdev->dev, NULL);
163 if (IS_ERR(p->clk)) {
164 dev_warn(&pdev->dev, "unable to get clock\n");
165 p->clk = NULL;
166 }
167
Geert Uytterhoeven51b05f62015-03-18 19:55:56 +0100168 pm_runtime_enable(&pdev->dev);
169 pm_runtime_get_sync(&pdev->dev);
170
Magnus Dammfbc83b72013-02-27 17:15:01 +0900171 /* get hold of manadatory IOMEM */
172 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
173 if (!io) {
174 dev_err(&pdev->dev, "not enough IOMEM resources\n");
175 ret = -EINVAL;
176 goto err1;
177 }
178
179 /* allow any number of IRQs between 1 and IRQC_IRQ_MAX */
180 for (k = 0; k < IRQC_IRQ_MAX; k++) {
181 irq = platform_get_resource(pdev, IORESOURCE_IRQ, k);
182 if (!irq)
183 break;
184
185 p->irq[k].p = p;
Magnus Damme10fc032015-07-20 19:06:35 +0900186 p->irq[k].hw_irq = k;
Magnus Dammfbc83b72013-02-27 17:15:01 +0900187 p->irq[k].requested_irq = irq->start;
188 }
189
190 p->number_of_irqs = k;
191 if (p->number_of_irqs < 1) {
192 dev_err(&pdev->dev, "not enough IRQ resources\n");
193 ret = -EINVAL;
194 goto err1;
195 }
196
197 /* ioremap IOMEM and setup read/write callbacks */
198 p->iomem = ioremap_nocache(io->start, resource_size(io));
199 if (!p->iomem) {
200 dev_err(&pdev->dev, "failed to remap IOMEM\n");
201 ret = -ENXIO;
202 goto err2;
203 }
204
205 p->cpu_int_base = p->iomem + IRQC_INT_CPU_BASE(0); /* SYS-SPI */
206
Magnus Damm7d153752015-07-20 19:06:25 +0900207 p->irq_domain = irq_domain_add_linear(pdev->dev.of_node,
208 p->number_of_irqs,
Magnus Damm99c221d2015-09-28 18:42:37 +0900209 &irq_generic_chip_ops, p);
Magnus Dammfbc83b72013-02-27 17:15:01 +0900210 if (!p->irq_domain) {
211 ret = -ENXIO;
212 dev_err(&pdev->dev, "cannot initialize irq domain\n");
213 goto err2;
214 }
215
Magnus Damm99c221d2015-09-28 18:42:37 +0900216 ret = irq_alloc_domain_generic_chips(p->irq_domain, p->number_of_irqs,
217 1, name, handle_level_irq,
218 0, 0, IRQ_GC_INIT_NESTED_LOCK);
219 if (ret) {
220 dev_err(&pdev->dev, "cannot allocate generic chip\n");
221 goto err3;
222 }
223
224 p->gc = irq_get_domain_generic_chip(p->irq_domain, 0);
225 p->gc->reg_base = p->cpu_int_base;
226 p->gc->chip_types[0].regs.enable = IRQC_EN_SET;
227 p->gc->chip_types[0].regs.disable = IRQC_EN_STS;
228 p->gc->chip_types[0].chip.irq_mask = irq_gc_mask_disable_reg;
229 p->gc->chip_types[0].chip.irq_unmask = irq_gc_unmask_enable_reg;
230 p->gc->chip_types[0].chip.irq_set_type = irqc_irq_set_type;
231 p->gc->chip_types[0].chip.irq_set_wake = irqc_irq_set_wake;
232 p->gc->chip_types[0].chip.flags = IRQCHIP_MASK_ON_SUSPEND;
233
Magnus Dammfbc83b72013-02-27 17:15:01 +0900234 /* request interrupts one by one */
235 for (k = 0; k < p->number_of_irqs; k++) {
236 if (request_irq(p->irq[k].requested_irq, irqc_irq_handler,
237 0, name, &p->irq[k])) {
238 dev_err(&pdev->dev, "failed to request IRQ\n");
239 ret = -ENOENT;
Magnus Damm99c221d2015-09-28 18:42:37 +0900240 goto err4;
Magnus Dammfbc83b72013-02-27 17:15:01 +0900241 }
242 }
243
244 dev_info(&pdev->dev, "driving %d irqs\n", p->number_of_irqs);
245
Magnus Dammfbc83b72013-02-27 17:15:01 +0900246 return 0;
Magnus Damm99c221d2015-09-28 18:42:37 +0900247err4:
Axel Lindfaf8202013-05-06 17:03:32 +0800248 while (--k >= 0)
249 free_irq(p->irq[k].requested_irq, &p->irq[k]);
Magnus Dammfbc83b72013-02-27 17:15:01 +0900250
Magnus Damm99c221d2015-09-28 18:42:37 +0900251err3:
Magnus Dammfbc83b72013-02-27 17:15:01 +0900252 irq_domain_remove(p->irq_domain);
253err2:
254 iounmap(p->iomem);
255err1:
Geert Uytterhoeven51b05f62015-03-18 19:55:56 +0100256 pm_runtime_put(&pdev->dev);
257 pm_runtime_disable(&pdev->dev);
Magnus Dammfbc83b72013-02-27 17:15:01 +0900258 kfree(p);
259err0:
260 return ret;
261}
262
263static int irqc_remove(struct platform_device *pdev)
264{
265 struct irqc_priv *p = platform_get_drvdata(pdev);
266 int k;
267
268 for (k = 0; k < p->number_of_irqs; k++)
269 free_irq(p->irq[k].requested_irq, &p->irq[k]);
270
271 irq_domain_remove(p->irq_domain);
272 iounmap(p->iomem);
Geert Uytterhoeven51b05f62015-03-18 19:55:56 +0100273 pm_runtime_put(&pdev->dev);
274 pm_runtime_disable(&pdev->dev);
Magnus Dammfbc83b72013-02-27 17:15:01 +0900275 kfree(p);
276 return 0;
277}
278
Magnus Damm3b8dfa72013-03-06 15:23:39 +0900279static const struct of_device_id irqc_dt_ids[] = {
280 { .compatible = "renesas,irqc", },
281 {},
282};
283MODULE_DEVICE_TABLE(of, irqc_dt_ids);
284
Magnus Dammfbc83b72013-02-27 17:15:01 +0900285static struct platform_driver irqc_device_driver = {
286 .probe = irqc_probe,
287 .remove = irqc_remove,
288 .driver = {
289 .name = "renesas_irqc",
Magnus Damm3b8dfa72013-03-06 15:23:39 +0900290 .of_match_table = irqc_dt_ids,
Magnus Dammfbc83b72013-02-27 17:15:01 +0900291 }
292};
293
294static int __init irqc_init(void)
295{
296 return platform_driver_register(&irqc_device_driver);
297}
298postcore_initcall(irqc_init);
299
300static void __exit irqc_exit(void)
301{
302 platform_driver_unregister(&irqc_device_driver);
303}
304module_exit(irqc_exit);
305
306MODULE_AUTHOR("Magnus Damm");
307MODULE_DESCRIPTION("Renesas IRQC Driver");
308MODULE_LICENSE("GPL v2");