greybus: svc: Read and clear module's boot status
[linux-2.6-block.git] / drivers / staging / greybus / svc.c
CommitLineData
30c6d9d7
AE
1/*
2 * SVC Greybus driver.
3 *
4 * Copyright 2015 Google Inc.
5 * Copyright 2015 Linaro Ltd.
6 *
7 * Released under the GPLv2 only.
8 */
9
067906f6 10#include <linux/workqueue.h>
30c6d9d7 11
f66427ad
VK
12#include "greybus.h"
13
0b226497
PH
14#define CPORT_FLAGS_E2EFC (1)
15#define CPORT_FLAGS_CSD_N (2)
16#define CPORT_FLAGS_CSV_N (4)
17
3ccb1600
VK
18enum gb_svc_state {
19 GB_SVC_STATE_RESET,
20 GB_SVC_STATE_PROTOCOL_VERSION,
21 GB_SVC_STATE_SVC_HELLO,
22};
23
b45864d4
VK
24struct gb_svc {
25 struct gb_connection *connection;
3ccb1600 26 enum gb_svc_state state;
c09db182 27 struct ida device_id_map;
b45864d4
VK
28};
29
067906f6
VK
30struct svc_hotplug {
31 struct work_struct work;
32 struct gb_connection *connection;
33 struct gb_svc_intf_hotplug_request data;
34};
35
ead35460 36
d3d44840
VK
37/*
38 * AP's SVC cport is required early to get messages from the SVC. This happens
39 * even before the Endo is created and hence any modules or interfaces.
40 *
41 * This is a temporary connection, used only at initial bootup.
42 */
43struct gb_connection *
44gb_ap_svc_connection_create(struct greybus_host_device *hd)
45{
46 struct gb_connection *connection;
47
48 connection = gb_connection_create_range(hd, NULL, hd->parent,
49 GB_SVC_CPORT_ID,
50 GREYBUS_PROTOCOL_SVC,
51 GB_SVC_CPORT_ID,
52 GB_SVC_CPORT_ID + 1);
53
54 return connection;
55}
d3d44840
VK
56
57/*
58 * We know endo-type and AP's interface id now, lets create a proper svc
59 * connection (and its interface/bundle) now and get rid of the initial
60 * 'partially' initialized one svc connection.
61 */
62static struct gb_interface *
63gb_ap_interface_create(struct greybus_host_device *hd,
64 struct gb_connection *connection, u8 interface_id)
65{
66 struct gb_interface *intf;
67 struct device *dev = &hd->endo->dev;
d3d44840
VK
68
69 intf = gb_interface_create(hd, interface_id);
70 if (!intf) {
71 dev_err(dev, "%s: Failed to create interface with id %hhu\n",
72 __func__, interface_id);
73 return NULL;
74 }
75
76 intf->device_id = GB_DEVICE_ID_AP;
67c93ae6 77 svc_update_connection(intf, connection);
d3d44840 78
dcd05008
VK
79 /* Its no longer a partially initialized connection */
80 hd->initial_svc_connection = NULL;
81
d3d44840
VK
82 return intf;
83}
84
505f16cc 85static int gb_svc_intf_device_id(struct gb_svc *svc, u8 intf_id, u8 device_id)
30c6d9d7
AE
86{
87 struct gb_svc_intf_device_id_request request;
88
89 request.intf_id = intf_id;
90 request.device_id = device_id;
91
92 return gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_DEVICE_ID,
93 &request, sizeof(request), NULL, 0);
94}
95
3f0e9183 96int gb_svc_intf_reset(struct gb_svc *svc, u8 intf_id)
30c6d9d7
AE
97{
98 struct gb_svc_intf_reset_request request;
99
100 request.intf_id = intf_id;
101
102 return gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_RESET,
103 &request, sizeof(request), NULL, 0);
104}
3f0e9183 105EXPORT_SYMBOL_GPL(gb_svc_intf_reset);
30c6d9d7 106
19151c3d
VK
107int gb_svc_dme_peer_get(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
108 u32 *value)
109{
110 struct gb_svc_dme_peer_get_request request;
111 struct gb_svc_dme_peer_get_response response;
112 u16 result;
113 int ret;
114
115 request.intf_id = intf_id;
116 request.attr = cpu_to_le16(attr);
117 request.selector = cpu_to_le16(selector);
118
119 ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_GET,
120 &request, sizeof(request),
121 &response, sizeof(response));
122 if (ret) {
123 dev_err(&svc->connection->dev,
124 "failed to get DME attribute (%hhu %hx %hu) %d\n",
125 intf_id, attr, selector, ret);
126 return ret;
127 }
128
129 result = le16_to_cpu(response.result_code);
130 if (result) {
131 dev_err(&svc->connection->dev,
132 "Unipro error %hu while getting DME attribute (%hhu %hx %hu)\n",
133 result, intf_id, attr, selector);
134 return -EINVAL;
135 }
136
137 if (value)
138 *value = le32_to_cpu(response.attr_value);
139
140 return 0;
141}
142EXPORT_SYMBOL_GPL(gb_svc_dme_peer_get);
143
144int gb_svc_dme_peer_set(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
145 u32 value)
146{
147 struct gb_svc_dme_peer_set_request request;
148 struct gb_svc_dme_peer_set_response response;
149 u16 result;
150 int ret;
151
152 request.intf_id = intf_id;
153 request.attr = cpu_to_le16(attr);
154 request.selector = cpu_to_le16(selector);
155 request.value = cpu_to_le32(value);
156
157 ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_SET,
158 &request, sizeof(request),
159 &response, sizeof(response));
160 if (ret) {
161 dev_err(&svc->connection->dev,
162 "failed to set DME attribute (%hhu %hx %hu %u) %d\n",
163 intf_id, attr, selector, value, ret);
164 return ret;
165 }
166
167 result = le16_to_cpu(response.result_code);
168 if (result) {
169 dev_err(&svc->connection->dev,
170 "Unipro error %hu while setting DME attribute (%hhu %hx %hu %u)\n",
171 result, intf_id, attr, selector, value);
172 return -EINVAL;
173 }
174
175 return 0;
176}
177EXPORT_SYMBOL_GPL(gb_svc_dme_peer_set);
178
6bec5c78
VK
179/*
180 * T_TstSrcIncrement is written by the module on ES2 as a stand-in for boot
181 * status attribute. AP needs to read and clear it, after reading a non-zero
182 * value from it.
183 *
184 * FIXME: This is module-hardware dependent and needs to be extended for every
185 * type of module we want to support.
186 */
187static int gb_svc_read_and_clear_module_boot_status(struct gb_interface *intf)
188{
189 struct greybus_host_device *hd = intf->hd;
190 int ret;
191 u32 value;
192
193 /* Read and clear boot status in T_TstSrcIncrement */
194 ret = gb_svc_dme_peer_get(hd->svc, intf->interface_id,
195 DME_ATTR_T_TST_SRC_INCREMENT,
196 DME_ATTR_SELECTOR_INDEX, &value);
197
198 if (ret)
199 return ret;
200
201 /*
202 * A nonzero boot status indicates the module has finished
203 * booting. Clear it.
204 */
205 if (!value) {
206 dev_err(&intf->dev, "Module not ready yet\n");
207 return -ENODEV;
208 }
209
210 return gb_svc_dme_peer_set(hd->svc, intf->interface_id,
211 DME_ATTR_T_TST_SRC_INCREMENT,
212 DME_ATTR_SELECTOR_INDEX, 0);
213}
214
3f0e9183 215int gb_svc_connection_create(struct gb_svc *svc,
30c6d9d7
AE
216 u8 intf1_id, u16 cport1_id,
217 u8 intf2_id, u16 cport2_id)
218{
219 struct gb_svc_conn_create_request request;
220
221 request.intf1_id = intf1_id;
2498050b 222 request.cport1_id = cpu_to_le16(cport1_id);
30c6d9d7 223 request.intf2_id = intf2_id;
2498050b 224 request.cport2_id = cpu_to_le16(cport2_id);
0b226497
PH
225 /*
226 * XXX: fix connections paramaters to TC0 and all CPort flags
227 * for now.
228 */
229 request.tc = 0;
230 request.flags = CPORT_FLAGS_CSV_N | CPORT_FLAGS_E2EFC;
30c6d9d7
AE
231
232 return gb_operation_sync(svc->connection, GB_SVC_TYPE_CONN_CREATE,
233 &request, sizeof(request), NULL, 0);
234}
3f0e9183 235EXPORT_SYMBOL_GPL(gb_svc_connection_create);
30c6d9d7 236
3f0e9183
VK
237void gb_svc_connection_destroy(struct gb_svc *svc, u8 intf1_id, u16 cport1_id,
238 u8 intf2_id, u16 cport2_id)
30c6d9d7
AE
239{
240 struct gb_svc_conn_destroy_request request;
d9fcffff
VK
241 struct gb_connection *connection = svc->connection;
242 int ret;
30c6d9d7
AE
243
244 request.intf1_id = intf1_id;
2498050b 245 request.cport1_id = cpu_to_le16(cport1_id);
30c6d9d7 246 request.intf2_id = intf2_id;
2498050b 247 request.cport2_id = cpu_to_le16(cport2_id);
30c6d9d7 248
d9fcffff
VK
249 ret = gb_operation_sync(connection, GB_SVC_TYPE_CONN_DESTROY,
250 &request, sizeof(request), NULL, 0);
251 if (ret) {
252 dev_err(&connection->dev,
253 "failed to destroy connection (%hhx:%hx %hhx:%hx) %d\n",
254 intf1_id, cport1_id, intf2_id, cport2_id, ret);
255 }
30c6d9d7 256}
3f0e9183 257EXPORT_SYMBOL_GPL(gb_svc_connection_destroy);
30c6d9d7 258
bb106852 259/* Creates bi-directional routes between the devices */
505f16cc
VK
260static int gb_svc_route_create(struct gb_svc *svc, u8 intf1_id, u8 dev1_id,
261 u8 intf2_id, u8 dev2_id)
e08aaa49
PH
262{
263 struct gb_svc_route_create_request request;
264
265 request.intf1_id = intf1_id;
266 request.dev1_id = dev1_id;
267 request.intf2_id = intf2_id;
268 request.dev2_id = dev2_id;
269
270 return gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_CREATE,
271 &request, sizeof(request), NULL, 0);
272}
e08aaa49 273
0a020570
VK
274/* Destroys bi-directional routes between the devices */
275static void gb_svc_route_destroy(struct gb_svc *svc, u8 intf1_id, u8 intf2_id)
276{
277 struct gb_svc_route_destroy_request request;
278 int ret;
279
280 request.intf1_id = intf1_id;
281 request.intf2_id = intf2_id;
282
283 ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_DESTROY,
284 &request, sizeof(request), NULL, 0);
285 if (ret) {
286 dev_err(&svc->connection->dev,
287 "failed to destroy route (%hhx %hhx) %d\n",
288 intf1_id, intf2_id, ret);
289 }
290}
291
ead35460
VK
292static int gb_svc_version_request(struct gb_operation *op)
293{
294 struct gb_connection *connection = op->connection;
cfb16906
JH
295 struct gb_protocol_version_request *request;
296 struct gb_protocol_version_response *response;
ead35460
VK
297 struct device *dev = &connection->dev;
298
cfb16906 299 request = op->request->payload;
ead35460 300
cfb16906 301 if (request->major > GB_SVC_VERSION_MAJOR) {
ead35460
VK
302 dev_err(&connection->dev,
303 "unsupported major version (%hhu > %hhu)\n",
cfb16906 304 request->major, GB_SVC_VERSION_MAJOR);
ead35460
VK
305 return -ENOTSUPP;
306 }
307
cfb16906
JH
308 connection->module_major = request->major;
309 connection->module_minor = request->minor;
3ea959e3 310
cfb16906 311 if (!gb_operation_response_alloc(op, sizeof(*response), GFP_KERNEL)) {
ead35460
VK
312 dev_err(dev, "%s: error allocating response\n",
313 __func__);
314 return -ENOMEM;
315 }
316
cfb16906
JH
317 response = op->response->payload;
318 response->major = connection->module_major;
319 response->minor = connection->module_minor;
59832931 320
ead35460
VK
321 return 0;
322}
323
324static int gb_svc_hello(struct gb_operation *op)
325{
326 struct gb_connection *connection = op->connection;
327 struct greybus_host_device *hd = connection->hd;
328 struct gb_svc_hello_request *hello_request;
329 struct device *dev = &connection->dev;
330 struct gb_interface *intf;
331 u16 endo_id;
332 u8 interface_id;
333 int ret;
334
ead35460
VK
335 /*
336 * SVC sends information about the endo and interface-id on the hello
337 * request, use that to create an endo.
338 */
0c32d2a5
VK
339 if (op->request->payload_size < sizeof(*hello_request)) {
340 dev_err(dev, "%s: Illegal size of hello request (%zu < %zu)\n",
ead35460
VK
341 __func__, op->request->payload_size,
342 sizeof(*hello_request));
343 return -EINVAL;
344 }
345
346 hello_request = op->request->payload;
347 endo_id = le16_to_cpu(hello_request->endo_id);
348 interface_id = hello_request->interface_id;
349
350 /* Setup Endo */
351 ret = greybus_endo_setup(hd, endo_id, interface_id);
352 if (ret)
353 return ret;
354
355 /*
356 * Endo and its modules are ready now, fix AP's partially initialized
357 * svc protocol and its connection.
358 */
359 intf = gb_ap_interface_create(hd, connection, interface_id);
360 if (!intf) {
361 gb_endo_remove(hd->endo);
362 return ret;
363 }
364
365 return 0;
366}
367
bbaca711
VK
368static void svc_intf_remove(struct gb_connection *connection,
369 struct gb_interface *intf)
370{
371 struct greybus_host_device *hd = connection->hd;
372 struct gb_svc *svc = connection->private;
373 u8 intf_id = intf->interface_id;
374 u8 device_id;
375
376 device_id = intf->device_id;
377 gb_interface_remove(hd, intf_id);
378
379 /*
380 * Destroy the two-way route between the AP and the interface.
381 */
382 gb_svc_route_destroy(svc, hd->endo->ap_intf_id, intf_id);
383
384 ida_simple_remove(&svc->device_id_map, device_id);
385}
386
067906f6
VK
387/*
388 * 'struct svc_hotplug' should be freed by svc_process_hotplug() before it
389 * returns, irrespective of success or Failure in bringing up the module.
390 */
391static void svc_process_hotplug(struct work_struct *work)
30c6d9d7 392{
067906f6
VK
393 struct svc_hotplug *svc_hotplug = container_of(work, struct svc_hotplug,
394 work);
395 struct gb_svc_intf_hotplug_request *hotplug = &svc_hotplug->data;
396 struct gb_connection *connection = svc_hotplug->connection;
397 struct gb_svc *svc = connection->private;
b9fb704a 398 struct greybus_host_device *hd = connection->hd;
067906f6 399 struct device *dev = &connection->dev;
ead35460
VK
400 struct gb_interface *intf;
401 u8 intf_id, device_id;
ead35460 402 int ret;
30c6d9d7 403
30c6d9d7
AE
404 /*
405 * Grab the information we need.
7eb8919b 406 */
30c6d9d7 407 intf_id = hotplug->intf_id;
30c6d9d7 408
bbaca711
VK
409 intf = gb_interface_find(hd, intf_id);
410 if (intf) {
411 /*
412 * We have received a hotplug request for an interface that
413 * already exists.
414 *
415 * This can happen in cases like:
416 * - bootrom loading the firmware image and booting into that,
417 * which only generates a hotplug event. i.e. no hot-unplug
418 * event.
419 * - Or the firmware on the module crashed and sent hotplug
420 * request again to the SVC, which got propagated to AP.
421 *
422 * Remove the interface and add it again, and let user know
423 * about this with a print message.
424 */
425 dev_info(dev, "Removed interface (%hhu) to add it again\n",
426 intf_id);
427 svc_intf_remove(connection, intf);
428 }
429
ead35460
VK
430 intf = gb_interface_create(hd, intf_id);
431 if (!intf) {
432 dev_err(dev, "%s: Failed to create interface with id %hhu\n",
433 __func__, intf_id);
067906f6 434 goto free_svc_hotplug;
ead35460
VK
435 }
436
6bec5c78
VK
437 ret = gb_svc_read_and_clear_module_boot_status(intf);
438 if (ret)
439 goto destroy_interface;
440
3944a454
VK
441 intf->unipro_mfg_id = le32_to_cpu(hotplug->data.unipro_mfg_id);
442 intf->unipro_prod_id = le32_to_cpu(hotplug->data.unipro_prod_id);
443 intf->ara_vend_id = le32_to_cpu(hotplug->data.ara_vend_id);
444 intf->ara_prod_id = le32_to_cpu(hotplug->data.ara_prod_id);
445
ead35460
VK
446 /*
447 * Create a device id for the interface:
448 * - device id 0 (GB_DEVICE_ID_SVC) belongs to the SVC
449 * - device id 1 (GB_DEVICE_ID_AP) belongs to the AP
450 *
451 * XXX Do we need to allocate device ID for SVC or the AP here? And what
452 * XXX about an AP with multiple interface blocks?
453 */
c09db182 454 device_id = ida_simple_get(&svc->device_id_map,
89f637f7 455 GB_DEVICE_ID_MODULES_START, 0, GFP_KERNEL);
ead35460
VK
456 if (device_id < 0) {
457 ret = device_id;
458 dev_err(dev, "%s: Failed to allocate device id for interface with id %hhu (%d)\n",
459 __func__, intf_id, ret);
460 goto destroy_interface;
461 }
462
3f0e9183 463 ret = gb_svc_intf_device_id(svc, intf_id, device_id);
ead35460
VK
464 if (ret) {
465 dev_err(dev, "%s: Device id operation failed, interface %hhu device_id %hhu (%d)\n",
466 __func__, intf_id, device_id, ret);
467 goto ida_put;
468 }
469
7e275465
PH
470 /*
471 * Create a two-way route between the AP and the new interface
472 */
3f0e9183
VK
473 ret = gb_svc_route_create(svc, hd->endo->ap_intf_id, GB_DEVICE_ID_AP,
474 intf_id, device_id);
7e275465
PH
475 if (ret) {
476 dev_err(dev, "%s: Route create operation failed, interface %hhu device_id %hhu (%d)\n",
477 __func__, intf_id, device_id, ret);
0a020570 478 goto svc_id_free;
7e275465
PH
479 }
480
ead35460
VK
481 ret = gb_interface_init(intf, device_id);
482 if (ret) {
483 dev_err(dev, "%s: Failed to initialize interface, interface %hhu device_id %hhu (%d)\n",
484 __func__, intf_id, device_id, ret);
0a020570 485 goto destroy_route;
ead35460 486 }
30c6d9d7 487
067906f6 488 goto free_svc_hotplug;
ead35460 489
0a020570
VK
490destroy_route:
491 gb_svc_route_destroy(svc, hd->endo->ap_intf_id, intf_id);
ead35460
VK
492svc_id_free:
493 /*
494 * XXX Should we tell SVC that this id doesn't belong to interface
495 * XXX anymore.
496 */
497ida_put:
c09db182 498 ida_simple_remove(&svc->device_id_map, device_id);
ead35460
VK
499destroy_interface:
500 gb_interface_remove(hd, intf_id);
067906f6
VK
501free_svc_hotplug:
502 kfree(svc_hotplug);
503}
ead35460 504
067906f6
VK
505/*
506 * Bringing up a module can be time consuming, as that may require lots of
507 * initialization on the module side. Over that, we may also need to download
508 * the firmware first and flash that on the module.
509 *
510 * In order to make other hotplug events to not wait for all this to finish,
511 * handle most of module hotplug stuff outside of the hotplug callback, with
512 * help of a workqueue.
513 */
514static int gb_svc_intf_hotplug_recv(struct gb_operation *op)
515{
516 struct gb_message *request = op->request;
517 struct svc_hotplug *svc_hotplug;
518
519 if (request->payload_size < sizeof(svc_hotplug->data)) {
520 dev_err(&op->connection->dev,
521 "%s: short hotplug request received (%zu < %zu)\n",
522 __func__, request->payload_size,
523 sizeof(svc_hotplug->data));
524 return -EINVAL;
525 }
526
287bba82 527 svc_hotplug = kmalloc(sizeof(*svc_hotplug), GFP_KERNEL);
067906f6
VK
528 if (!svc_hotplug)
529 return -ENOMEM;
530
531 svc_hotplug->connection = op->connection;
532 memcpy(&svc_hotplug->data, op->request->payload, sizeof(svc_hotplug->data));
533
534 INIT_WORK(&svc_hotplug->work, svc_process_hotplug);
535 queue_work(system_unbound_wq, &svc_hotplug->work);
536
537 return 0;
30c6d9d7
AE
538}
539
540static int gb_svc_intf_hot_unplug_recv(struct gb_operation *op)
541{
542 struct gb_message *request = op->request;
ead35460 543 struct gb_svc_intf_hot_unplug_request *hot_unplug = request->payload;
b9fb704a 544 struct greybus_host_device *hd = op->connection->hd;
ead35460 545 struct device *dev = &op->connection->dev;
ead35460 546 struct gb_interface *intf;
30c6d9d7
AE
547 u8 intf_id;
548
549 if (request->payload_size < sizeof(*hot_unplug)) {
c06307c3 550 dev_err(dev, "short hot unplug request received (%zu < %zu)\n",
6d05ad3c 551 request->payload_size, sizeof(*hot_unplug));
30c6d9d7
AE
552 return -EINVAL;
553 }
30c6d9d7
AE
554
555 intf_id = hot_unplug->intf_id;
556
ead35460
VK
557 intf = gb_interface_find(hd, intf_id);
558 if (!intf) {
559 dev_err(dev, "%s: Couldn't find interface for id %hhu\n",
560 __func__, intf_id);
561 return -EINVAL;
562 }
30c6d9d7 563
bbaca711 564 svc_intf_remove(op->connection, intf);
30c6d9d7 565
ead35460 566 return 0;
30c6d9d7
AE
567}
568
569static int gb_svc_intf_reset_recv(struct gb_operation *op)
570{
571 struct gb_message *request = op->request;
572 struct gb_svc_intf_reset_request *reset;
573 u8 intf_id;
574
575 if (request->payload_size < sizeof(*reset)) {
576 dev_err(&op->connection->dev,
6d05ad3c
VK
577 "short reset request received (%zu < %zu)\n",
578 request->payload_size, sizeof(*reset));
30c6d9d7
AE
579 return -EINVAL;
580 }
581 reset = request->payload;
582
583 intf_id = reset->intf_id;
584
585 /* FIXME Reset the interface here */
586
587 return 0;
588}
589
590static int gb_svc_request_recv(u8 type, struct gb_operation *op)
591{
3ccb1600
VK
592 struct gb_connection *connection = op->connection;
593 struct gb_svc *svc = connection->private;
594 int ret = 0;
595
596 /*
597 * SVC requests need to follow a specific order (at least initially) and
598 * below code takes care of enforcing that. The expected order is:
599 * - PROTOCOL_VERSION
600 * - SVC_HELLO
601 * - Any other request, but the earlier two.
602 *
603 * Incoming requests are guaranteed to be serialized and so we don't
604 * need to protect 'state' for any races.
605 */
30c6d9d7 606 switch (type) {
0e2462d1 607 case GB_REQUEST_TYPE_PROTOCOL_VERSION:
3ccb1600
VK
608 if (svc->state != GB_SVC_STATE_RESET)
609 ret = -EINVAL;
610 break;
ead35460 611 case GB_SVC_TYPE_SVC_HELLO:
3ccb1600
VK
612 if (svc->state != GB_SVC_STATE_PROTOCOL_VERSION)
613 ret = -EINVAL;
614 break;
615 default:
616 if (svc->state != GB_SVC_STATE_SVC_HELLO)
617 ret = -EINVAL;
618 break;
619 }
620
621 if (ret) {
622 dev_warn(&connection->dev,
623 "unexpected SVC request 0x%02x received (state %u)\n",
624 type, svc->state);
625 return ret;
626 }
627
628 switch (type) {
629 case GB_REQUEST_TYPE_PROTOCOL_VERSION:
630 ret = gb_svc_version_request(op);
631 if (!ret)
632 svc->state = GB_SVC_STATE_PROTOCOL_VERSION;
633 return ret;
634 case GB_SVC_TYPE_SVC_HELLO:
635 ret = gb_svc_hello(op);
636 if (!ret)
637 svc->state = GB_SVC_STATE_SVC_HELLO;
638 return ret;
30c6d9d7
AE
639 case GB_SVC_TYPE_INTF_HOTPLUG:
640 return gb_svc_intf_hotplug_recv(op);
641 case GB_SVC_TYPE_INTF_HOT_UNPLUG:
642 return gb_svc_intf_hot_unplug_recv(op);
643 case GB_SVC_TYPE_INTF_RESET:
644 return gb_svc_intf_reset_recv(op);
645 default:
646 dev_err(&op->connection->dev,
647 "unsupported request: %hhu\n", type);
648 return -EINVAL;
649 }
650}
651
30c6d9d7
AE
652static int gb_svc_connection_init(struct gb_connection *connection)
653{
654 struct gb_svc *svc;
30c6d9d7
AE
655
656 svc = kzalloc(sizeof(*svc), GFP_KERNEL);
657 if (!svc)
658 return -ENOMEM;
659
75a60ed2 660 connection->hd->svc = svc;
3ccb1600 661 svc->state = GB_SVC_STATE_RESET;
30c6d9d7
AE
662 svc->connection = connection;
663 connection->private = svc;
d3d44840 664
dcd05008
VK
665 WARN_ON(connection->hd->initial_svc_connection);
666 connection->hd->initial_svc_connection = connection;
d3d44840 667
c09db182 668 ida_init(&svc->device_id_map);
d3d44840 669
18d777cd 670 return 0;
30c6d9d7
AE
671}
672
673static void gb_svc_connection_exit(struct gb_connection *connection)
674{
675 struct gb_svc *svc = connection->private;
676
c09db182 677 ida_destroy(&svc->device_id_map);
75a60ed2 678 connection->hd->svc = NULL;
d3d44840 679 connection->private = NULL;
30c6d9d7
AE
680 kfree(svc);
681}
682
683static struct gb_protocol svc_protocol = {
684 .name = "svc",
685 .id = GREYBUS_PROTOCOL_SVC,
06e305f1
VK
686 .major = GB_SVC_VERSION_MAJOR,
687 .minor = GB_SVC_VERSION_MINOR,
30c6d9d7
AE
688 .connection_init = gb_svc_connection_init,
689 .connection_exit = gb_svc_connection_exit,
690 .request_recv = gb_svc_request_recv,
5a5296bb
VK
691 .flags = GB_PROTOCOL_SKIP_CONTROL_CONNECTED |
692 GB_PROTOCOL_SKIP_CONTROL_DISCONNECTED |
693 GB_PROTOCOL_NO_BUNDLE |
694 GB_PROTOCOL_SKIP_VERSION |
695 GB_PROTOCOL_SKIP_SVC_CONNECTION,
30c6d9d7 696};
ab69c4ce 697gb_builtin_protocol_driver(svc_protocol);