blob: cd54ce6f62309df8f7afe4c7e036b7d691895056 [file] [log] [blame]
Ian Munsie10542ca2014-10-08 19:55:01 +11001/*
2 * Copyright 2014 IBM Corp.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/module.h>
11#include <linux/rcupdate.h>
12#include <asm/errno.h>
Michael Neulingec249dd2015-05-27 16:07:16 +100013#include <misc/cxl-base.h>
Frederic Barratb40844a2016-03-04 12:26:39 +010014#include <linux/of_platform.h>
Ian Munsie10542ca2014-10-08 19:55:01 +110015#include "cxl.h"
16
17/* protected by rcu */
18static struct cxl_calls *cxl_calls;
19
20atomic_t cxl_use_count = ATOMIC_INIT(0);
21EXPORT_SYMBOL(cxl_use_count);
22
23#ifdef CONFIG_CXL_MODULE
24
25static inline struct cxl_calls *cxl_calls_get(void)
26{
27 struct cxl_calls *calls = NULL;
28
29 rcu_read_lock();
30 calls = rcu_dereference(cxl_calls);
31 if (calls && !try_module_get(calls->owner))
32 calls = NULL;
33 rcu_read_unlock();
34
35 return calls;
36}
37
38static inline void cxl_calls_put(struct cxl_calls *calls)
39{
40 BUG_ON(calls != cxl_calls);
41
42 /* we don't need to rcu this, as we hold a reference to the module */
43 module_put(cxl_calls->owner);
44}
45
46#else /* !defined CONFIG_CXL_MODULE */
47
48static inline struct cxl_calls *cxl_calls_get(void)
49{
50 return cxl_calls;
51}
52
53static inline void cxl_calls_put(struct cxl_calls *calls) { }
54
55#endif /* CONFIG_CXL_MODULE */
56
Ian Munsie62ccf2d2016-07-14 07:17:03 +100057/* AFU refcount management */
58struct cxl_afu *cxl_afu_get(struct cxl_afu *afu)
59{
60 return (get_device(&afu->dev) == NULL) ? NULL : afu;
61}
62EXPORT_SYMBOL_GPL(cxl_afu_get);
63
64void cxl_afu_put(struct cxl_afu *afu)
65{
66 put_device(&afu->dev);
67}
68EXPORT_SYMBOL_GPL(cxl_afu_put);
69
Ian Munsie10542ca2014-10-08 19:55:01 +110070void cxl_slbia(struct mm_struct *mm)
71{
72 struct cxl_calls *calls;
73
74 calls = cxl_calls_get();
75 if (!calls)
76 return;
77
78 if (cxl_ctx_in_use())
79 calls->cxl_slbia(mm);
80
81 cxl_calls_put(calls);
82}
83
84int register_cxl_calls(struct cxl_calls *calls)
85{
86 if (cxl_calls)
87 return -EBUSY;
88
89 rcu_assign_pointer(cxl_calls, calls);
90 return 0;
91}
92EXPORT_SYMBOL_GPL(register_cxl_calls);
93
94void unregister_cxl_calls(struct cxl_calls *calls)
95{
96 BUG_ON(cxl_calls->owner != calls->owner);
97 RCU_INIT_POINTER(cxl_calls, NULL);
98 synchronize_rcu();
99}
100EXPORT_SYMBOL_GPL(unregister_cxl_calls);
Christophe Lombard594ff7d2016-03-04 12:26:38 +0100101
102int cxl_update_properties(struct device_node *dn,
103 struct property *new_prop)
104{
105 return of_update_property(dn, new_prop);
106}
107EXPORT_SYMBOL_GPL(cxl_update_properties);
Frederic Barratb40844a2016-03-04 12:26:39 +0100108
Ian Munsiea19bd792016-07-14 07:17:04 +1000109/*
110 * API calls into the driver that may be called from the PHB code and must be
111 * built in.
112 */
113bool cxl_pci_associate_default_context(struct pci_dev *dev, struct cxl_afu *afu)
114{
115 bool ret;
116 struct cxl_calls *calls;
117
118 calls = cxl_calls_get();
119 if (!calls)
120 return false;
121
122 ret = calls->cxl_pci_associate_default_context(dev, afu);
123
124 cxl_calls_put(calls);
125
126 return ret;
127}
128EXPORT_SYMBOL_GPL(cxl_pci_associate_default_context);
129
130void cxl_pci_disable_device(struct pci_dev *dev)
131{
132 struct cxl_calls *calls;
133
134 calls = cxl_calls_get();
135 if (!calls)
136 return;
137
138 calls->cxl_pci_disable_device(dev);
139
140 cxl_calls_put(calls);
141}
142EXPORT_SYMBOL_GPL(cxl_pci_disable_device);
143
Ian Munsiecbce0912016-07-14 07:17:09 +1000144int cxl_next_msi_hwirq(struct pci_dev *pdev, struct cxl_context **ctx, int *afu_irq)
145{
146 int ret;
147 struct cxl_calls *calls;
148
149 calls = cxl_calls_get();
150 if (!calls)
151 return -EBUSY;
152
153 ret = calls->cxl_next_msi_hwirq(pdev, ctx, afu_irq);
154
155 cxl_calls_put(calls);
156
157 return ret;
158}
159EXPORT_SYMBOL_GPL(cxl_next_msi_hwirq);
160
Ian Munsiea2f67d52016-07-14 07:17:10 +1000161int cxl_cx4_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
162{
163 int ret;
164 struct cxl_calls *calls;
165
166 calls = cxl_calls_get();
167 if (!calls)
168 return false;
169
170 ret = calls->cxl_cx4_setup_msi_irqs(pdev, nvec, type);
171
172 cxl_calls_put(calls);
173
174 return ret;
175}
176EXPORT_SYMBOL_GPL(cxl_cx4_setup_msi_irqs);
177
178void cxl_cx4_teardown_msi_irqs(struct pci_dev *pdev)
179{
180 struct cxl_calls *calls;
181
182 calls = cxl_calls_get();
183 if (!calls)
184 return;
185
186 calls->cxl_cx4_teardown_msi_irqs(pdev);
187
188 cxl_calls_put(calls);
189}
190EXPORT_SYMBOL_GPL(cxl_cx4_teardown_msi_irqs);
191
Frederic Barratb40844a2016-03-04 12:26:39 +0100192static int __init cxl_base_init(void)
193{
Wei Yongjunfc9f75e2016-07-12 11:30:11 +0000194 struct device_node *np;
Frederic Barratb40844a2016-03-04 12:26:39 +0100195 struct platform_device *dev;
196 int count = 0;
197
198 /*
199 * Scan for compatible devices in guest only
200 */
201 if (cpu_has_feature(CPU_FTR_HVMODE))
202 return 0;
203
Wei Yongjunfc9f75e2016-07-12 11:30:11 +0000204 for_each_compatible_node(np, NULL, "ibm,coherent-platform-facility") {
Frederic Barratb40844a2016-03-04 12:26:39 +0100205 dev = of_platform_device_create(np, NULL, NULL);
206 if (dev)
207 count++;
208 }
209 pr_devel("Found %d cxl device(s)\n", count);
210 return 0;
211}
Paul Gortmakere00878b2016-07-03 16:31:53 -0400212device_initcall(cxl_base_init);