blob: 075d81d20cb838ebd200e59f808f1f9c3b6f959a [file] [log] [blame]
Alex Elder30c6d9d2015-05-22 13:02:08 -05001/*
2 * SVC Greybus driver.
3 *
4 * Copyright 2015 Google Inc.
5 * Copyright 2015 Linaro Ltd.
6 *
7 * Released under the GPLv2 only.
8 */
9
Viresh Kumar067906f2015-08-06 12:44:55 +053010#include <linux/workqueue.h>
Alex Elder30c6d9d2015-05-22 13:02:08 -050011
Viresh Kumarf66427a2015-09-02 21:27:13 +053012#include "greybus.h"
13
Perry Hung0b226492015-07-24 19:02:34 -040014#define CPORT_FLAGS_E2EFC (1)
15#define CPORT_FLAGS_CSD_N (2)
16#define CPORT_FLAGS_CSV_N (4)
17
Viresh Kumar3ccb1602015-09-03 15:42:22 +053018enum gb_svc_state {
19 GB_SVC_STATE_RESET,
20 GB_SVC_STATE_PROTOCOL_VERSION,
21 GB_SVC_STATE_SVC_HELLO,
22};
23
Viresh Kumarb45864d2015-07-24 15:32:21 +053024struct gb_svc {
25 struct gb_connection *connection;
Viresh Kumar3ccb1602015-09-03 15:42:22 +053026 enum gb_svc_state state;
Johan Hovoldc09db182015-09-15 09:18:08 +020027 struct ida device_id_map;
Viresh Kumarb45864d2015-07-24 15:32:21 +053028};
29
Viresh Kumar067906f2015-08-06 12:44:55 +053030struct svc_hotplug {
31 struct work_struct work;
32 struct gb_connection *connection;
33 struct gb_svc_intf_hotplug_request data;
34};
35
Viresh Kumaread35462015-07-21 17:44:19 +053036
Viresh Kumard3d44842015-07-21 17:44:18 +053037/*
38 * AP's SVC cport is required early to get messages from the SVC. This happens
39 * even before the Endo is created and hence any modules or interfaces.
40 *
41 * This is a temporary connection, used only at initial bootup.
42 */
43struct gb_connection *
44gb_ap_svc_connection_create(struct greybus_host_device *hd)
45{
46 struct gb_connection *connection;
47
48 connection = gb_connection_create_range(hd, NULL, hd->parent,
49 GB_SVC_CPORT_ID,
50 GREYBUS_PROTOCOL_SVC,
51 GB_SVC_CPORT_ID,
52 GB_SVC_CPORT_ID + 1);
53
54 return connection;
55}
Viresh Kumard3d44842015-07-21 17:44:18 +053056
57/*
58 * We know endo-type and AP's interface id now, lets create a proper svc
59 * connection (and its interface/bundle) now and get rid of the initial
60 * 'partially' initialized one svc connection.
61 */
62static struct gb_interface *
63gb_ap_interface_create(struct greybus_host_device *hd,
64 struct gb_connection *connection, u8 interface_id)
65{
66 struct gb_interface *intf;
67 struct device *dev = &hd->endo->dev;
Viresh Kumard3d44842015-07-21 17:44:18 +053068
69 intf = gb_interface_create(hd, interface_id);
70 if (!intf) {
71 dev_err(dev, "%s: Failed to create interface with id %hhu\n",
72 __func__, interface_id);
73 return NULL;
74 }
75
76 intf->device_id = GB_DEVICE_ID_AP;
Viresh Kumar67c93ae2015-07-24 15:32:19 +053077 svc_update_connection(intf, connection);
Viresh Kumard3d44842015-07-21 17:44:18 +053078
Viresh Kumardcd05002015-07-24 15:32:20 +053079 /* Its no longer a partially initialized connection */
80 hd->initial_svc_connection = NULL;
81
Viresh Kumard3d44842015-07-21 17:44:18 +053082 return intf;
83}
84
Viresh Kumar505f16c2015-08-31 17:21:07 +053085static int gb_svc_intf_device_id(struct gb_svc *svc, u8 intf_id, u8 device_id)
Alex Elder30c6d9d2015-05-22 13:02:08 -050086{
87 struct gb_svc_intf_device_id_request request;
88
89 request.intf_id = intf_id;
90 request.device_id = device_id;
91
92 return gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_DEVICE_ID,
93 &request, sizeof(request), NULL, 0);
94}
95
Viresh Kumar3f0e9182015-08-31 17:21:06 +053096int gb_svc_intf_reset(struct gb_svc *svc, u8 intf_id)
Alex Elder30c6d9d2015-05-22 13:02:08 -050097{
98 struct gb_svc_intf_reset_request request;
99
100 request.intf_id = intf_id;
101
102 return gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_RESET,
103 &request, sizeof(request), NULL, 0);
104}
Viresh Kumar3f0e9182015-08-31 17:21:06 +0530105EXPORT_SYMBOL_GPL(gb_svc_intf_reset);
Alex Elder30c6d9d2015-05-22 13:02:08 -0500106
Viresh Kumar19151c32015-09-09 21:08:29 +0530107int gb_svc_dme_peer_get(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
108 u32 *value)
109{
110 struct gb_svc_dme_peer_get_request request;
111 struct gb_svc_dme_peer_get_response response;
112 u16 result;
113 int ret;
114
115 request.intf_id = intf_id;
116 request.attr = cpu_to_le16(attr);
117 request.selector = cpu_to_le16(selector);
118
119 ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_GET,
120 &request, sizeof(request),
121 &response, sizeof(response));
122 if (ret) {
123 dev_err(&svc->connection->dev,
124 "failed to get DME attribute (%hhu %hx %hu) %d\n",
125 intf_id, attr, selector, ret);
126 return ret;
127 }
128
129 result = le16_to_cpu(response.result_code);
130 if (result) {
131 dev_err(&svc->connection->dev,
132 "Unipro error %hu while getting DME attribute (%hhu %hx %hu)\n",
133 result, intf_id, attr, selector);
134 return -EINVAL;
135 }
136
137 if (value)
138 *value = le32_to_cpu(response.attr_value);
139
140 return 0;
141}
142EXPORT_SYMBOL_GPL(gb_svc_dme_peer_get);
143
144int gb_svc_dme_peer_set(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
145 u32 value)
146{
147 struct gb_svc_dme_peer_set_request request;
148 struct gb_svc_dme_peer_set_response response;
149 u16 result;
150 int ret;
151
152 request.intf_id = intf_id;
153 request.attr = cpu_to_le16(attr);
154 request.selector = cpu_to_le16(selector);
155 request.value = cpu_to_le32(value);
156
157 ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_SET,
158 &request, sizeof(request),
159 &response, sizeof(response));
160 if (ret) {
161 dev_err(&svc->connection->dev,
162 "failed to set DME attribute (%hhu %hx %hu %u) %d\n",
163 intf_id, attr, selector, value, ret);
164 return ret;
165 }
166
167 result = le16_to_cpu(response.result_code);
168 if (result) {
169 dev_err(&svc->connection->dev,
170 "Unipro error %hu while setting DME attribute (%hhu %hx %hu %u)\n",
171 result, intf_id, attr, selector, value);
172 return -EINVAL;
173 }
174
175 return 0;
176}
177EXPORT_SYMBOL_GPL(gb_svc_dme_peer_set);
178
Viresh Kumar3f0e9182015-08-31 17:21:06 +0530179int gb_svc_connection_create(struct gb_svc *svc,
Alex Elder30c6d9d2015-05-22 13:02:08 -0500180 u8 intf1_id, u16 cport1_id,
181 u8 intf2_id, u16 cport2_id)
182{
183 struct gb_svc_conn_create_request request;
184
185 request.intf1_id = intf1_id;
186 request.cport1_id = cport1_id;
187 request.intf2_id = intf2_id;
188 request.cport2_id = cport2_id;
Perry Hung0b226492015-07-24 19:02:34 -0400189 /*
190 * XXX: fix connections paramaters to TC0 and all CPort flags
191 * for now.
192 */
193 request.tc = 0;
194 request.flags = CPORT_FLAGS_CSV_N | CPORT_FLAGS_E2EFC;
Alex Elder30c6d9d2015-05-22 13:02:08 -0500195
196 return gb_operation_sync(svc->connection, GB_SVC_TYPE_CONN_CREATE,
197 &request, sizeof(request), NULL, 0);
198}
Viresh Kumar3f0e9182015-08-31 17:21:06 +0530199EXPORT_SYMBOL_GPL(gb_svc_connection_create);
Alex Elder30c6d9d2015-05-22 13:02:08 -0500200
Viresh Kumar3f0e9182015-08-31 17:21:06 +0530201void gb_svc_connection_destroy(struct gb_svc *svc, u8 intf1_id, u16 cport1_id,
202 u8 intf2_id, u16 cport2_id)
Alex Elder30c6d9d2015-05-22 13:02:08 -0500203{
204 struct gb_svc_conn_destroy_request request;
Viresh Kumard9fcfff2015-08-31 17:21:05 +0530205 struct gb_connection *connection = svc->connection;
206 int ret;
Alex Elder30c6d9d2015-05-22 13:02:08 -0500207
208 request.intf1_id = intf1_id;
209 request.cport1_id = cport1_id;
210 request.intf2_id = intf2_id;
211 request.cport2_id = cport2_id;
212
Viresh Kumard9fcfff2015-08-31 17:21:05 +0530213 ret = gb_operation_sync(connection, GB_SVC_TYPE_CONN_DESTROY,
214 &request, sizeof(request), NULL, 0);
215 if (ret) {
216 dev_err(&connection->dev,
217 "failed to destroy connection (%hhx:%hx %hhx:%hx) %d\n",
218 intf1_id, cport1_id, intf2_id, cport2_id, ret);
219 }
Alex Elder30c6d9d2015-05-22 13:02:08 -0500220}
Viresh Kumar3f0e9182015-08-31 17:21:06 +0530221EXPORT_SYMBOL_GPL(gb_svc_connection_destroy);
Alex Elder30c6d9d2015-05-22 13:02:08 -0500222
Viresh Kumarbb106852015-09-07 16:01:25 +0530223/* Creates bi-directional routes between the devices */
Viresh Kumar505f16c2015-08-31 17:21:07 +0530224static int gb_svc_route_create(struct gb_svc *svc, u8 intf1_id, u8 dev1_id,
225 u8 intf2_id, u8 dev2_id)
Perry Hunge08aaa42015-07-24 19:02:31 -0400226{
227 struct gb_svc_route_create_request request;
228
229 request.intf1_id = intf1_id;
230 request.dev1_id = dev1_id;
231 request.intf2_id = intf2_id;
232 request.dev2_id = dev2_id;
233
234 return gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_CREATE,
235 &request, sizeof(request), NULL, 0);
236}
Perry Hunge08aaa42015-07-24 19:02:31 -0400237
Viresh Kumar0a020572015-09-07 18:05:26 +0530238/* Destroys bi-directional routes between the devices */
239static void gb_svc_route_destroy(struct gb_svc *svc, u8 intf1_id, u8 intf2_id)
240{
241 struct gb_svc_route_destroy_request request;
242 int ret;
243
244 request.intf1_id = intf1_id;
245 request.intf2_id = intf2_id;
246
247 ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_DESTROY,
248 &request, sizeof(request), NULL, 0);
249 if (ret) {
250 dev_err(&svc->connection->dev,
251 "failed to destroy route (%hhx %hhx) %d\n",
252 intf1_id, intf2_id, ret);
253 }
254}
255
Viresh Kumaread35462015-07-21 17:44:19 +0530256static int gb_svc_version_request(struct gb_operation *op)
257{
258 struct gb_connection *connection = op->connection;
259 struct gb_protocol_version_response *version;
260 struct device *dev = &connection->dev;
261
262 version = op->request->payload;
263
264 if (version->major > GB_SVC_VERSION_MAJOR) {
265 dev_err(&connection->dev,
266 "unsupported major version (%hhu > %hhu)\n",
267 version->major, GB_SVC_VERSION_MAJOR);
268 return -ENOTSUPP;
269 }
270
Viresh Kumar3ea959e32015-08-11 07:36:14 +0530271 connection->module_major = version->major;
272 connection->module_minor = version->minor;
273
Viresh Kumaread35462015-07-21 17:44:19 +0530274 if (!gb_operation_response_alloc(op, sizeof(*version), GFP_KERNEL)) {
275 dev_err(dev, "%s: error allocating response\n",
276 __func__);
277 return -ENOMEM;
278 }
279
280 version = op->response->payload;
Johan Hovold59832932015-09-15 10:48:00 +0200281 version->major = connection->module_major;
282 version->minor = connection->module_minor;
283
Viresh Kumaread35462015-07-21 17:44:19 +0530284 return 0;
285}
286
287static int gb_svc_hello(struct gb_operation *op)
288{
289 struct gb_connection *connection = op->connection;
290 struct greybus_host_device *hd = connection->hd;
291 struct gb_svc_hello_request *hello_request;
292 struct device *dev = &connection->dev;
293 struct gb_interface *intf;
294 u16 endo_id;
295 u8 interface_id;
296 int ret;
297
Viresh Kumaread35462015-07-21 17:44:19 +0530298 /*
299 * SVC sends information about the endo and interface-id on the hello
300 * request, use that to create an endo.
301 */
Viresh Kumar0c32d2a2015-08-11 07:29:19 +0530302 if (op->request->payload_size < sizeof(*hello_request)) {
303 dev_err(dev, "%s: Illegal size of hello request (%zu < %zu)\n",
Viresh Kumaread35462015-07-21 17:44:19 +0530304 __func__, op->request->payload_size,
305 sizeof(*hello_request));
306 return -EINVAL;
307 }
308
309 hello_request = op->request->payload;
310 endo_id = le16_to_cpu(hello_request->endo_id);
311 interface_id = hello_request->interface_id;
312
313 /* Setup Endo */
314 ret = greybus_endo_setup(hd, endo_id, interface_id);
315 if (ret)
316 return ret;
317
318 /*
319 * Endo and its modules are ready now, fix AP's partially initialized
320 * svc protocol and its connection.
321 */
322 intf = gb_ap_interface_create(hd, connection, interface_id);
323 if (!intf) {
324 gb_endo_remove(hd->endo);
325 return ret;
326 }
327
328 return 0;
329}
330
Viresh Kumar067906f2015-08-06 12:44:55 +0530331/*
332 * 'struct svc_hotplug' should be freed by svc_process_hotplug() before it
333 * returns, irrespective of success or Failure in bringing up the module.
334 */
335static void svc_process_hotplug(struct work_struct *work)
Alex Elder30c6d9d2015-05-22 13:02:08 -0500336{
Viresh Kumar067906f2015-08-06 12:44:55 +0530337 struct svc_hotplug *svc_hotplug = container_of(work, struct svc_hotplug,
338 work);
339 struct gb_svc_intf_hotplug_request *hotplug = &svc_hotplug->data;
340 struct gb_connection *connection = svc_hotplug->connection;
341 struct gb_svc *svc = connection->private;
Viresh Kumarb9fb7042015-09-01 17:16:16 +0530342 struct greybus_host_device *hd = connection->hd;
Viresh Kumar067906f2015-08-06 12:44:55 +0530343 struct device *dev = &connection->dev;
Viresh Kumaread35462015-07-21 17:44:19 +0530344 struct gb_interface *intf;
345 u8 intf_id, device_id;
Viresh Kumaread35462015-07-21 17:44:19 +0530346 int ret;
Alex Elder30c6d9d2015-05-22 13:02:08 -0500347
Alex Elder30c6d9d2015-05-22 13:02:08 -0500348 /*
349 * Grab the information we need.
Viresh Kumar7eb89192015-07-01 12:13:50 +0530350 */
Alex Elder30c6d9d2015-05-22 13:02:08 -0500351 intf_id = hotplug->intf_id;
Alex Elder30c6d9d2015-05-22 13:02:08 -0500352
Viresh Kumaread35462015-07-21 17:44:19 +0530353 intf = gb_interface_create(hd, intf_id);
354 if (!intf) {
355 dev_err(dev, "%s: Failed to create interface with id %hhu\n",
356 __func__, intf_id);
Viresh Kumar067906f2015-08-06 12:44:55 +0530357 goto free_svc_hotplug;
Viresh Kumaread35462015-07-21 17:44:19 +0530358 }
359
Viresh Kumar3944a452015-08-12 09:19:31 +0530360 intf->unipro_mfg_id = le32_to_cpu(hotplug->data.unipro_mfg_id);
361 intf->unipro_prod_id = le32_to_cpu(hotplug->data.unipro_prod_id);
362 intf->ara_vend_id = le32_to_cpu(hotplug->data.ara_vend_id);
363 intf->ara_prod_id = le32_to_cpu(hotplug->data.ara_prod_id);
364
Viresh Kumaread35462015-07-21 17:44:19 +0530365 /*
366 * Create a device id for the interface:
367 * - device id 0 (GB_DEVICE_ID_SVC) belongs to the SVC
368 * - device id 1 (GB_DEVICE_ID_AP) belongs to the AP
369 *
370 * XXX Do we need to allocate device ID for SVC or the AP here? And what
371 * XXX about an AP with multiple interface blocks?
372 */
Johan Hovoldc09db182015-09-15 09:18:08 +0200373 device_id = ida_simple_get(&svc->device_id_map,
Johan Hovold89f637f2015-09-01 12:25:25 +0200374 GB_DEVICE_ID_MODULES_START, 0, GFP_KERNEL);
Viresh Kumaread35462015-07-21 17:44:19 +0530375 if (device_id < 0) {
376 ret = device_id;
377 dev_err(dev, "%s: Failed to allocate device id for interface with id %hhu (%d)\n",
378 __func__, intf_id, ret);
379 goto destroy_interface;
380 }
381
Viresh Kumar3f0e9182015-08-31 17:21:06 +0530382 ret = gb_svc_intf_device_id(svc, intf_id, device_id);
Viresh Kumaread35462015-07-21 17:44:19 +0530383 if (ret) {
384 dev_err(dev, "%s: Device id operation failed, interface %hhu device_id %hhu (%d)\n",
385 __func__, intf_id, device_id, ret);
386 goto ida_put;
387 }
388
Perry Hung7e275462015-07-24 19:02:32 -0400389 /*
390 * Create a two-way route between the AP and the new interface
391 */
Viresh Kumar3f0e9182015-08-31 17:21:06 +0530392 ret = gb_svc_route_create(svc, hd->endo->ap_intf_id, GB_DEVICE_ID_AP,
393 intf_id, device_id);
Perry Hung7e275462015-07-24 19:02:32 -0400394 if (ret) {
395 dev_err(dev, "%s: Route create operation failed, interface %hhu device_id %hhu (%d)\n",
396 __func__, intf_id, device_id, ret);
Viresh Kumar0a020572015-09-07 18:05:26 +0530397 goto svc_id_free;
Perry Hung7e275462015-07-24 19:02:32 -0400398 }
399
Viresh Kumaread35462015-07-21 17:44:19 +0530400 ret = gb_interface_init(intf, device_id);
401 if (ret) {
402 dev_err(dev, "%s: Failed to initialize interface, interface %hhu device_id %hhu (%d)\n",
403 __func__, intf_id, device_id, ret);
Viresh Kumar0a020572015-09-07 18:05:26 +0530404 goto destroy_route;
Viresh Kumaread35462015-07-21 17:44:19 +0530405 }
Alex Elder30c6d9d2015-05-22 13:02:08 -0500406
Viresh Kumar067906f2015-08-06 12:44:55 +0530407 goto free_svc_hotplug;
Viresh Kumaread35462015-07-21 17:44:19 +0530408
Viresh Kumar0a020572015-09-07 18:05:26 +0530409destroy_route:
410 gb_svc_route_destroy(svc, hd->endo->ap_intf_id, intf_id);
Viresh Kumaread35462015-07-21 17:44:19 +0530411svc_id_free:
412 /*
413 * XXX Should we tell SVC that this id doesn't belong to interface
414 * XXX anymore.
415 */
416ida_put:
Johan Hovoldc09db182015-09-15 09:18:08 +0200417 ida_simple_remove(&svc->device_id_map, device_id);
Viresh Kumaread35462015-07-21 17:44:19 +0530418destroy_interface:
419 gb_interface_remove(hd, intf_id);
Viresh Kumar067906f2015-08-06 12:44:55 +0530420free_svc_hotplug:
421 kfree(svc_hotplug);
422}
Viresh Kumaread35462015-07-21 17:44:19 +0530423
Viresh Kumar067906f2015-08-06 12:44:55 +0530424/*
425 * Bringing up a module can be time consuming, as that may require lots of
426 * initialization on the module side. Over that, we may also need to download
427 * the firmware first and flash that on the module.
428 *
429 * In order to make other hotplug events to not wait for all this to finish,
430 * handle most of module hotplug stuff outside of the hotplug callback, with
431 * help of a workqueue.
432 */
433static int gb_svc_intf_hotplug_recv(struct gb_operation *op)
434{
435 struct gb_message *request = op->request;
436 struct svc_hotplug *svc_hotplug;
437
438 if (request->payload_size < sizeof(svc_hotplug->data)) {
439 dev_err(&op->connection->dev,
440 "%s: short hotplug request received (%zu < %zu)\n",
441 __func__, request->payload_size,
442 sizeof(svc_hotplug->data));
443 return -EINVAL;
444 }
445
Johan Hovold287bba82015-09-01 12:25:26 +0200446 svc_hotplug = kmalloc(sizeof(*svc_hotplug), GFP_KERNEL);
Viresh Kumar067906f2015-08-06 12:44:55 +0530447 if (!svc_hotplug)
448 return -ENOMEM;
449
450 svc_hotplug->connection = op->connection;
451 memcpy(&svc_hotplug->data, op->request->payload, sizeof(svc_hotplug->data));
452
453 INIT_WORK(&svc_hotplug->work, svc_process_hotplug);
454 queue_work(system_unbound_wq, &svc_hotplug->work);
455
456 return 0;
Alex Elder30c6d9d2015-05-22 13:02:08 -0500457}
458
459static int gb_svc_intf_hot_unplug_recv(struct gb_operation *op)
460{
461 struct gb_message *request = op->request;
Viresh Kumaread35462015-07-21 17:44:19 +0530462 struct gb_svc_intf_hot_unplug_request *hot_unplug = request->payload;
Viresh Kumarb9fb7042015-09-01 17:16:16 +0530463 struct greybus_host_device *hd = op->connection->hd;
Viresh Kumaread35462015-07-21 17:44:19 +0530464 struct device *dev = &op->connection->dev;
Viresh Kumar0a020572015-09-07 18:05:26 +0530465 struct gb_svc *svc = op->connection->private;
Viresh Kumaread35462015-07-21 17:44:19 +0530466 u8 device_id;
467 struct gb_interface *intf;
Alex Elder30c6d9d2015-05-22 13:02:08 -0500468 u8 intf_id;
469
470 if (request->payload_size < sizeof(*hot_unplug)) {
471 dev_err(&op->connection->dev,
Viresh Kumar6d05ad32015-08-06 12:44:54 +0530472 "short hot unplug request received (%zu < %zu)\n",
473 request->payload_size, sizeof(*hot_unplug));
Alex Elder30c6d9d2015-05-22 13:02:08 -0500474 return -EINVAL;
475 }
Alex Elder30c6d9d2015-05-22 13:02:08 -0500476
477 intf_id = hot_unplug->intf_id;
478
Viresh Kumaread35462015-07-21 17:44:19 +0530479 intf = gb_interface_find(hd, intf_id);
480 if (!intf) {
481 dev_err(dev, "%s: Couldn't find interface for id %hhu\n",
482 __func__, intf_id);
483 return -EINVAL;
484 }
485
486 device_id = intf->device_id;
487 gb_interface_remove(hd, intf_id);
Viresh Kumar0a020572015-09-07 18:05:26 +0530488
489 /*
490 * Destroy the two-way route between the AP and the interface.
491 */
492 gb_svc_route_destroy(svc, hd->endo->ap_intf_id, intf_id);
493
Johan Hovoldc09db182015-09-15 09:18:08 +0200494 ida_simple_remove(&svc->device_id_map, device_id);
Alex Elder30c6d9d2015-05-22 13:02:08 -0500495
496 return 0;
Alex Elder30c6d9d2015-05-22 13:02:08 -0500497}
498
499static int gb_svc_intf_reset_recv(struct gb_operation *op)
500{
501 struct gb_message *request = op->request;
502 struct gb_svc_intf_reset_request *reset;
503 u8 intf_id;
504
505 if (request->payload_size < sizeof(*reset)) {
506 dev_err(&op->connection->dev,
Viresh Kumar6d05ad32015-08-06 12:44:54 +0530507 "short reset request received (%zu < %zu)\n",
508 request->payload_size, sizeof(*reset));
Alex Elder30c6d9d2015-05-22 13:02:08 -0500509 return -EINVAL;
510 }
511 reset = request->payload;
512
513 intf_id = reset->intf_id;
514
515 /* FIXME Reset the interface here */
516
517 return 0;
518}
519
520static int gb_svc_request_recv(u8 type, struct gb_operation *op)
521{
Viresh Kumar3ccb1602015-09-03 15:42:22 +0530522 struct gb_connection *connection = op->connection;
523 struct gb_svc *svc = connection->private;
524 int ret = 0;
525
526 /*
527 * SVC requests need to follow a specific order (at least initially) and
528 * below code takes care of enforcing that. The expected order is:
529 * - PROTOCOL_VERSION
530 * - SVC_HELLO
531 * - Any other request, but the earlier two.
532 *
533 * Incoming requests are guaranteed to be serialized and so we don't
534 * need to protect 'state' for any races.
535 */
Alex Elder30c6d9d2015-05-22 13:02:08 -0500536 switch (type) {
Viresh Kumar0e2462d2015-08-14 07:57:38 +0530537 case GB_REQUEST_TYPE_PROTOCOL_VERSION:
Viresh Kumar3ccb1602015-09-03 15:42:22 +0530538 if (svc->state != GB_SVC_STATE_RESET)
539 ret = -EINVAL;
540 break;
Viresh Kumaread35462015-07-21 17:44:19 +0530541 case GB_SVC_TYPE_SVC_HELLO:
Viresh Kumar3ccb1602015-09-03 15:42:22 +0530542 if (svc->state != GB_SVC_STATE_PROTOCOL_VERSION)
543 ret = -EINVAL;
544 break;
545 default:
546 if (svc->state != GB_SVC_STATE_SVC_HELLO)
547 ret = -EINVAL;
548 break;
549 }
550
551 if (ret) {
552 dev_warn(&connection->dev,
553 "unexpected SVC request 0x%02x received (state %u)\n",
554 type, svc->state);
555 return ret;
556 }
557
558 switch (type) {
559 case GB_REQUEST_TYPE_PROTOCOL_VERSION:
560 ret = gb_svc_version_request(op);
561 if (!ret)
562 svc->state = GB_SVC_STATE_PROTOCOL_VERSION;
563 return ret;
564 case GB_SVC_TYPE_SVC_HELLO:
565 ret = gb_svc_hello(op);
566 if (!ret)
567 svc->state = GB_SVC_STATE_SVC_HELLO;
568 return ret;
Alex Elder30c6d9d2015-05-22 13:02:08 -0500569 case GB_SVC_TYPE_INTF_HOTPLUG:
570 return gb_svc_intf_hotplug_recv(op);
571 case GB_SVC_TYPE_INTF_HOT_UNPLUG:
572 return gb_svc_intf_hot_unplug_recv(op);
573 case GB_SVC_TYPE_INTF_RESET:
574 return gb_svc_intf_reset_recv(op);
575 default:
576 dev_err(&op->connection->dev,
577 "unsupported request: %hhu\n", type);
578 return -EINVAL;
579 }
580}
581
Alex Elder30c6d9d2015-05-22 13:02:08 -0500582static int gb_svc_connection_init(struct gb_connection *connection)
583{
584 struct gb_svc *svc;
Alex Elder30c6d9d2015-05-22 13:02:08 -0500585
586 svc = kzalloc(sizeof(*svc), GFP_KERNEL);
587 if (!svc)
588 return -ENOMEM;
589
Perry Hung75a60ed2015-07-24 19:02:33 -0400590 connection->hd->svc = svc;
Viresh Kumar3ccb1602015-09-03 15:42:22 +0530591 svc->state = GB_SVC_STATE_RESET;
Alex Elder30c6d9d2015-05-22 13:02:08 -0500592 svc->connection = connection;
593 connection->private = svc;
Viresh Kumard3d44842015-07-21 17:44:18 +0530594
Viresh Kumardcd05002015-07-24 15:32:20 +0530595 WARN_ON(connection->hd->initial_svc_connection);
596 connection->hd->initial_svc_connection = connection;
Viresh Kumard3d44842015-07-21 17:44:18 +0530597
Johan Hovoldc09db182015-09-15 09:18:08 +0200598 ida_init(&svc->device_id_map);
Viresh Kumard3d44842015-07-21 17:44:18 +0530599
Viresh Kumar18d777c2015-07-21 17:44:20 +0530600 return 0;
Alex Elder30c6d9d2015-05-22 13:02:08 -0500601}
602
603static void gb_svc_connection_exit(struct gb_connection *connection)
604{
605 struct gb_svc *svc = connection->private;
606
Johan Hovoldc09db182015-09-15 09:18:08 +0200607 ida_destroy(&svc->device_id_map);
Perry Hung75a60ed2015-07-24 19:02:33 -0400608 connection->hd->svc = NULL;
Viresh Kumard3d44842015-07-21 17:44:18 +0530609 connection->private = NULL;
Alex Elder30c6d9d2015-05-22 13:02:08 -0500610 kfree(svc);
611}
612
613static struct gb_protocol svc_protocol = {
614 .name = "svc",
615 .id = GREYBUS_PROTOCOL_SVC,
Viresh Kumar06e305f2015-07-01 12:13:51 +0530616 .major = GB_SVC_VERSION_MAJOR,
617 .minor = GB_SVC_VERSION_MINOR,
Alex Elder30c6d9d2015-05-22 13:02:08 -0500618 .connection_init = gb_svc_connection_init,
619 .connection_exit = gb_svc_connection_exit,
620 .request_recv = gb_svc_request_recv,
Viresh Kumar5a5296b2015-09-07 16:01:24 +0530621 .flags = GB_PROTOCOL_SKIP_CONTROL_CONNECTED |
622 GB_PROTOCOL_SKIP_CONTROL_DISCONNECTED |
623 GB_PROTOCOL_NO_BUNDLE |
624 GB_PROTOCOL_SKIP_VERSION |
625 GB_PROTOCOL_SKIP_SVC_CONNECTION,
Alex Elder30c6d9d2015-05-22 13:02:08 -0500626};
Viresh Kumarab69c4c2015-07-03 17:00:29 +0530627gb_builtin_protocol_driver(svc_protocol);