Commit | Line | Data |
---|---|---|
30c6d9d7 AE |
1 | /* |
2 | * SVC Greybus driver. | |
3 | * | |
4 | * Copyright 2015 Google Inc. | |
5 | * Copyright 2015 Linaro Ltd. | |
6 | * | |
7 | * Released under the GPLv2 only. | |
8 | */ | |
9 | ||
067906f6 | 10 | #include <linux/workqueue.h> |
30c6d9d7 | 11 | |
f66427ad VK |
12 | #include "greybus.h" |
13 | ||
f6c6c138 JH |
14 | #define CPORT_FLAGS_E2EFC BIT(0) |
15 | #define CPORT_FLAGS_CSD_N BIT(1) | |
16 | #define CPORT_FLAGS_CSV_N BIT(2) | |
0b226497 | 17 | |
b45864d4 | 18 | |
9ae4109e | 19 | struct gb_svc_deferred_request { |
067906f6 | 20 | struct work_struct work; |
9ae4109e | 21 | struct gb_operation *operation; |
067906f6 VK |
22 | }; |
23 | ||
ead35460 | 24 | |
66069fb0 JH |
25 | static ssize_t endo_id_show(struct device *dev, |
26 | struct device_attribute *attr, char *buf) | |
27 | { | |
28 | struct gb_svc *svc = to_gb_svc(dev); | |
29 | ||
30 | return sprintf(buf, "0x%04x\n", svc->endo_id); | |
31 | } | |
32 | static DEVICE_ATTR_RO(endo_id); | |
33 | ||
34 | static ssize_t ap_intf_id_show(struct device *dev, | |
35 | struct device_attribute *attr, char *buf) | |
36 | { | |
37 | struct gb_svc *svc = to_gb_svc(dev); | |
38 | ||
39 | return sprintf(buf, "%u\n", svc->ap_intf_id); | |
40 | } | |
41 | static DEVICE_ATTR_RO(ap_intf_id); | |
42 | ||
43 | static struct attribute *svc_attrs[] = { | |
44 | &dev_attr_endo_id.attr, | |
45 | &dev_attr_ap_intf_id.attr, | |
46 | NULL, | |
47 | }; | |
48 | ATTRIBUTE_GROUPS(svc); | |
49 | ||
505f16cc | 50 | static int gb_svc_intf_device_id(struct gb_svc *svc, u8 intf_id, u8 device_id) |
30c6d9d7 AE |
51 | { |
52 | struct gb_svc_intf_device_id_request request; | |
53 | ||
54 | request.intf_id = intf_id; | |
55 | request.device_id = device_id; | |
56 | ||
57 | return gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_DEVICE_ID, | |
58 | &request, sizeof(request), NULL, 0); | |
59 | } | |
60 | ||
3f0e9183 | 61 | int gb_svc_intf_reset(struct gb_svc *svc, u8 intf_id) |
30c6d9d7 AE |
62 | { |
63 | struct gb_svc_intf_reset_request request; | |
64 | ||
65 | request.intf_id = intf_id; | |
66 | ||
67 | return gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_RESET, | |
68 | &request, sizeof(request), NULL, 0); | |
69 | } | |
3f0e9183 | 70 | EXPORT_SYMBOL_GPL(gb_svc_intf_reset); |
30c6d9d7 | 71 | |
19151c3d VK |
72 | int gb_svc_dme_peer_get(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector, |
73 | u32 *value) | |
74 | { | |
75 | struct gb_svc_dme_peer_get_request request; | |
76 | struct gb_svc_dme_peer_get_response response; | |
77 | u16 result; | |
78 | int ret; | |
79 | ||
80 | request.intf_id = intf_id; | |
81 | request.attr = cpu_to_le16(attr); | |
82 | request.selector = cpu_to_le16(selector); | |
83 | ||
84 | ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_GET, | |
85 | &request, sizeof(request), | |
86 | &response, sizeof(response)); | |
87 | if (ret) { | |
b933fa4a | 88 | dev_err(&svc->dev, "failed to get DME attribute (%u 0x%04x %u): %d\n", |
684156a9 | 89 | intf_id, attr, selector, ret); |
19151c3d VK |
90 | return ret; |
91 | } | |
92 | ||
93 | result = le16_to_cpu(response.result_code); | |
94 | if (result) { | |
b933fa4a | 95 | dev_err(&svc->dev, "UniPro error while getting DME attribute (%u 0x%04x %u): %u\n", |
684156a9 | 96 | intf_id, attr, selector, result); |
4aac6c5a | 97 | return -EIO; |
19151c3d VK |
98 | } |
99 | ||
100 | if (value) | |
101 | *value = le32_to_cpu(response.attr_value); | |
102 | ||
103 | return 0; | |
104 | } | |
105 | EXPORT_SYMBOL_GPL(gb_svc_dme_peer_get); | |
106 | ||
107 | int gb_svc_dme_peer_set(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector, | |
108 | u32 value) | |
109 | { | |
110 | struct gb_svc_dme_peer_set_request request; | |
111 | struct gb_svc_dme_peer_set_response response; | |
112 | u16 result; | |
113 | int ret; | |
114 | ||
115 | request.intf_id = intf_id; | |
116 | request.attr = cpu_to_le16(attr); | |
117 | request.selector = cpu_to_le16(selector); | |
118 | request.value = cpu_to_le32(value); | |
119 | ||
120 | ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_SET, | |
121 | &request, sizeof(request), | |
122 | &response, sizeof(response)); | |
123 | if (ret) { | |
b933fa4a | 124 | dev_err(&svc->dev, "failed to set DME attribute (%u 0x%04x %u %u): %d\n", |
684156a9 | 125 | intf_id, attr, selector, value, ret); |
19151c3d VK |
126 | return ret; |
127 | } | |
128 | ||
129 | result = le16_to_cpu(response.result_code); | |
130 | if (result) { | |
b933fa4a | 131 | dev_err(&svc->dev, "UniPro error while setting DME attribute (%u 0x%04x %u %u): %u\n", |
684156a9 | 132 | intf_id, attr, selector, value, result); |
4aac6c5a | 133 | return -EIO; |
19151c3d VK |
134 | } |
135 | ||
136 | return 0; | |
137 | } | |
138 | EXPORT_SYMBOL_GPL(gb_svc_dme_peer_set); | |
139 | ||
6bec5c78 VK |
140 | /* |
141 | * T_TstSrcIncrement is written by the module on ES2 as a stand-in for boot | |
3563ff88 ES |
142 | * status attribute ES3_INIT_STATUS. AP needs to read and clear it, after |
143 | * reading a non-zero value from it. | |
6bec5c78 VK |
144 | * |
145 | * FIXME: This is module-hardware dependent and needs to be extended for every | |
146 | * type of module we want to support. | |
147 | */ | |
148 | static int gb_svc_read_and_clear_module_boot_status(struct gb_interface *intf) | |
149 | { | |
2537636a | 150 | struct gb_host_device *hd = intf->hd; |
6bec5c78 VK |
151 | int ret; |
152 | u32 value; | |
3563ff88 ES |
153 | u16 attr; |
154 | u8 init_status; | |
6bec5c78 | 155 | |
3563ff88 ES |
156 | /* |
157 | * Check if the module is ES2 or ES3, and choose attr number | |
158 | * appropriately. | |
159 | * FIXME: Remove ES2 support from the kernel entirely. | |
160 | */ | |
161 | if (intf->ddbl1_manufacturer_id == ES2_DDBL1_MFR_ID && | |
162 | intf->ddbl1_product_id == ES2_DDBL1_PROD_ID) | |
163 | attr = DME_ATTR_T_TST_SRC_INCREMENT; | |
164 | else | |
165 | attr = DME_ATTR_ES3_INIT_STATUS; | |
166 | ||
167 | /* Read and clear boot status in ES3_INIT_STATUS */ | |
168 | ret = gb_svc_dme_peer_get(hd->svc, intf->interface_id, attr, | |
6bec5c78 VK |
169 | DME_ATTR_SELECTOR_INDEX, &value); |
170 | ||
171 | if (ret) | |
172 | return ret; | |
173 | ||
174 | /* | |
175 | * A nonzero boot status indicates the module has finished | |
176 | * booting. Clear it. | |
177 | */ | |
178 | if (!value) { | |
179 | dev_err(&intf->dev, "Module not ready yet\n"); | |
180 | return -ENODEV; | |
181 | } | |
182 | ||
1575ef18 | 183 | /* |
3563ff88 | 184 | * Check if the module needs to boot from UniPro. |
1575ef18 VK |
185 | * For ES2: We need to check lowest 8 bits of 'value'. |
186 | * For ES3: We need to check highest 8 bits out of 32 of 'value'. | |
3563ff88 | 187 | * FIXME: Remove ES2 support from the kernel entirely. |
1575ef18 | 188 | */ |
3563ff88 ES |
189 | if (intf->ddbl1_manufacturer_id == ES2_DDBL1_MFR_ID && |
190 | intf->ddbl1_product_id == ES2_DDBL1_PROD_ID) | |
191 | init_status = value; | |
192 | else | |
193 | init_status = value >> 24; | |
194 | ||
195 | if (init_status == DME_DIS_UNIPRO_BOOT_STARTED || | |
196 | init_status == DME_DIS_FALLBACK_UNIPRO_BOOT_STARTED) | |
1575ef18 VK |
197 | intf->boot_over_unipro = true; |
198 | ||
3563ff88 | 199 | return gb_svc_dme_peer_set(hd->svc, intf->interface_id, attr, |
6bec5c78 VK |
200 | DME_ATTR_SELECTOR_INDEX, 0); |
201 | } | |
202 | ||
3f0e9183 | 203 | int gb_svc_connection_create(struct gb_svc *svc, |
30c6d9d7 | 204 | u8 intf1_id, u16 cport1_id, |
1575ef18 VK |
205 | u8 intf2_id, u16 cport2_id, |
206 | bool boot_over_unipro) | |
30c6d9d7 AE |
207 | { |
208 | struct gb_svc_conn_create_request request; | |
209 | ||
210 | request.intf1_id = intf1_id; | |
2498050b | 211 | request.cport1_id = cpu_to_le16(cport1_id); |
30c6d9d7 | 212 | request.intf2_id = intf2_id; |
2498050b | 213 | request.cport2_id = cpu_to_le16(cport2_id); |
0b226497 PH |
214 | /* |
215 | * XXX: fix connections paramaters to TC0 and all CPort flags | |
216 | * for now. | |
217 | */ | |
218 | request.tc = 0; | |
1575ef18 VK |
219 | |
220 | /* | |
221 | * We need to skip setting E2EFC and other flags to the connection | |
222 | * create request, for all cports, on an interface that need to boot | |
223 | * over unipro, i.e. interfaces required to download firmware. | |
224 | */ | |
225 | if (boot_over_unipro) | |
226 | request.flags = CPORT_FLAGS_CSV_N | CPORT_FLAGS_CSD_N; | |
227 | else | |
228 | request.flags = CPORT_FLAGS_CSV_N | CPORT_FLAGS_E2EFC; | |
30c6d9d7 AE |
229 | |
230 | return gb_operation_sync(svc->connection, GB_SVC_TYPE_CONN_CREATE, | |
231 | &request, sizeof(request), NULL, 0); | |
232 | } | |
3f0e9183 | 233 | EXPORT_SYMBOL_GPL(gb_svc_connection_create); |
30c6d9d7 | 234 | |
3f0e9183 VK |
235 | void gb_svc_connection_destroy(struct gb_svc *svc, u8 intf1_id, u16 cport1_id, |
236 | u8 intf2_id, u16 cport2_id) | |
30c6d9d7 AE |
237 | { |
238 | struct gb_svc_conn_destroy_request request; | |
d9fcffff VK |
239 | struct gb_connection *connection = svc->connection; |
240 | int ret; | |
30c6d9d7 AE |
241 | |
242 | request.intf1_id = intf1_id; | |
2498050b | 243 | request.cport1_id = cpu_to_le16(cport1_id); |
30c6d9d7 | 244 | request.intf2_id = intf2_id; |
2498050b | 245 | request.cport2_id = cpu_to_le16(cport2_id); |
30c6d9d7 | 246 | |
d9fcffff VK |
247 | ret = gb_operation_sync(connection, GB_SVC_TYPE_CONN_DESTROY, |
248 | &request, sizeof(request), NULL, 0); | |
684156a9 | 249 | if (ret) { |
2f3db927 | 250 | dev_err(&svc->dev, "failed to destroy connection (%u:%u %u:%u): %d\n", |
684156a9 JH |
251 | intf1_id, cport1_id, intf2_id, cport2_id, ret); |
252 | } | |
30c6d9d7 | 253 | } |
3f0e9183 | 254 | EXPORT_SYMBOL_GPL(gb_svc_connection_destroy); |
30c6d9d7 | 255 | |
bb106852 | 256 | /* Creates bi-directional routes between the devices */ |
505f16cc VK |
257 | static int gb_svc_route_create(struct gb_svc *svc, u8 intf1_id, u8 dev1_id, |
258 | u8 intf2_id, u8 dev2_id) | |
e08aaa49 PH |
259 | { |
260 | struct gb_svc_route_create_request request; | |
261 | ||
262 | request.intf1_id = intf1_id; | |
263 | request.dev1_id = dev1_id; | |
264 | request.intf2_id = intf2_id; | |
265 | request.dev2_id = dev2_id; | |
266 | ||
267 | return gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_CREATE, | |
268 | &request, sizeof(request), NULL, 0); | |
269 | } | |
e08aaa49 | 270 | |
0a020570 VK |
271 | /* Destroys bi-directional routes between the devices */ |
272 | static void gb_svc_route_destroy(struct gb_svc *svc, u8 intf1_id, u8 intf2_id) | |
273 | { | |
274 | struct gb_svc_route_destroy_request request; | |
275 | int ret; | |
276 | ||
277 | request.intf1_id = intf1_id; | |
278 | request.intf2_id = intf2_id; | |
279 | ||
280 | ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_DESTROY, | |
281 | &request, sizeof(request), NULL, 0); | |
684156a9 | 282 | if (ret) { |
2f3db927 | 283 | dev_err(&svc->dev, "failed to destroy route (%u %u): %d\n", |
684156a9 JH |
284 | intf1_id, intf2_id, ret); |
285 | } | |
0a020570 VK |
286 | } |
287 | ||
aab4a1a3 LP |
288 | int gb_svc_intf_set_power_mode(struct gb_svc *svc, u8 intf_id, u8 hs_series, |
289 | u8 tx_mode, u8 tx_gear, u8 tx_nlanes, | |
290 | u8 rx_mode, u8 rx_gear, u8 rx_nlanes, | |
291 | u8 flags, u32 quirks) | |
784f8761 | 292 | { |
aab4a1a3 LP |
293 | struct gb_svc_intf_set_pwrm_request request; |
294 | struct gb_svc_intf_set_pwrm_response response; | |
295 | int ret; | |
784f8761 LP |
296 | |
297 | request.intf_id = intf_id; | |
aab4a1a3 LP |
298 | request.hs_series = hs_series; |
299 | request.tx_mode = tx_mode; | |
300 | request.tx_gear = tx_gear; | |
301 | request.tx_nlanes = tx_nlanes; | |
302 | request.rx_mode = rx_mode; | |
303 | request.rx_gear = rx_gear; | |
304 | request.rx_nlanes = rx_nlanes; | |
784f8761 | 305 | request.flags = flags; |
aab4a1a3 | 306 | request.quirks = cpu_to_le32(quirks); |
784f8761 | 307 | |
aab4a1a3 LP |
308 | ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_SET_PWRM, |
309 | &request, sizeof(request), | |
310 | &response, sizeof(response)); | |
311 | if (ret < 0) | |
312 | return ret; | |
313 | ||
314 | return le16_to_cpu(response.result_code); | |
784f8761 | 315 | } |
aab4a1a3 | 316 | EXPORT_SYMBOL_GPL(gb_svc_intf_set_power_mode); |
784f8761 | 317 | |
ead35460 VK |
318 | static int gb_svc_version_request(struct gb_operation *op) |
319 | { | |
320 | struct gb_connection *connection = op->connection; | |
684156a9 | 321 | struct gb_svc *svc = connection->private; |
cfb16906 JH |
322 | struct gb_protocol_version_request *request; |
323 | struct gb_protocol_version_response *response; | |
ead35460 | 324 | |
55510843 | 325 | if (op->request->payload_size < sizeof(*request)) { |
684156a9 | 326 | dev_err(&svc->dev, "short version request (%zu < %zu)\n", |
55510843 JH |
327 | op->request->payload_size, |
328 | sizeof(*request)); | |
329 | return -EINVAL; | |
330 | } | |
331 | ||
cfb16906 | 332 | request = op->request->payload; |
ead35460 | 333 | |
cfb16906 | 334 | if (request->major > GB_SVC_VERSION_MAJOR) { |
2f3db927 | 335 | dev_warn(&svc->dev, "unsupported major version (%u > %u)\n", |
684156a9 | 336 | request->major, GB_SVC_VERSION_MAJOR); |
ead35460 VK |
337 | return -ENOTSUPP; |
338 | } | |
339 | ||
cfb16906 JH |
340 | connection->module_major = request->major; |
341 | connection->module_minor = request->minor; | |
3ea959e3 | 342 | |
684156a9 | 343 | if (!gb_operation_response_alloc(op, sizeof(*response), GFP_KERNEL)) |
ead35460 | 344 | return -ENOMEM; |
ead35460 | 345 | |
cfb16906 JH |
346 | response = op->response->payload; |
347 | response->major = connection->module_major; | |
348 | response->minor = connection->module_minor; | |
59832931 | 349 | |
ead35460 VK |
350 | return 0; |
351 | } | |
352 | ||
353 | static int gb_svc_hello(struct gb_operation *op) | |
354 | { | |
355 | struct gb_connection *connection = op->connection; | |
88f7b96d | 356 | struct gb_svc *svc = connection->private; |
ead35460 | 357 | struct gb_svc_hello_request *hello_request; |
ead35460 VK |
358 | int ret; |
359 | ||
0c32d2a5 | 360 | if (op->request->payload_size < sizeof(*hello_request)) { |
684156a9 JH |
361 | dev_warn(&svc->dev, "short hello request (%zu < %zu)\n", |
362 | op->request->payload_size, | |
363 | sizeof(*hello_request)); | |
ead35460 VK |
364 | return -EINVAL; |
365 | } | |
366 | ||
367 | hello_request = op->request->payload; | |
66069fb0 JH |
368 | svc->endo_id = le16_to_cpu(hello_request->endo_id); |
369 | svc->ap_intf_id = hello_request->interface_id; | |
ead35460 | 370 | |
88f7b96d JH |
371 | ret = device_add(&svc->dev); |
372 | if (ret) { | |
373 | dev_err(&svc->dev, "failed to register svc device: %d\n", ret); | |
374 | return ret; | |
375 | } | |
376 | ||
ead35460 VK |
377 | return 0; |
378 | } | |
379 | ||
b4ee82ec | 380 | static void gb_svc_intf_remove(struct gb_svc *svc, struct gb_interface *intf) |
bbaca711 | 381 | { |
bbaca711 | 382 | u8 intf_id = intf->interface_id; |
141af4f0 JH |
383 | u8 device_id = intf->device_id; |
384 | ||
385 | intf->disconnected = true; | |
bbaca711 | 386 | |
80d1ede8 | 387 | gb_interface_remove(intf); |
bbaca711 VK |
388 | |
389 | /* | |
390 | * Destroy the two-way route between the AP and the interface. | |
391 | */ | |
66069fb0 | 392 | gb_svc_route_destroy(svc, svc->ap_intf_id, intf_id); |
bbaca711 VK |
393 | |
394 | ida_simple_remove(&svc->device_id_map, device_id); | |
395 | } | |
396 | ||
9ae4109e | 397 | static void gb_svc_process_intf_hotplug(struct gb_operation *operation) |
30c6d9d7 | 398 | { |
24456a09 | 399 | struct gb_svc_intf_hotplug_request *request; |
9ae4109e | 400 | struct gb_connection *connection = operation->connection; |
067906f6 | 401 | struct gb_svc *svc = connection->private; |
2537636a | 402 | struct gb_host_device *hd = connection->hd; |
ead35460 VK |
403 | struct gb_interface *intf; |
404 | u8 intf_id, device_id; | |
ead35460 | 405 | int ret; |
30c6d9d7 | 406 | |
24456a09 | 407 | /* The request message size has already been verified. */ |
9ae4109e | 408 | request = operation->request->payload; |
24456a09 JH |
409 | intf_id = request->intf_id; |
410 | ||
411 | dev_dbg(&svc->dev, "%s - id = %u\n", __func__, intf_id); | |
30c6d9d7 | 412 | |
bbaca711 VK |
413 | intf = gb_interface_find(hd, intf_id); |
414 | if (intf) { | |
415 | /* | |
416 | * We have received a hotplug request for an interface that | |
417 | * already exists. | |
418 | * | |
419 | * This can happen in cases like: | |
420 | * - bootrom loading the firmware image and booting into that, | |
421 | * which only generates a hotplug event. i.e. no hot-unplug | |
422 | * event. | |
423 | * - Or the firmware on the module crashed and sent hotplug | |
424 | * request again to the SVC, which got propagated to AP. | |
425 | * | |
426 | * Remove the interface and add it again, and let user know | |
427 | * about this with a print message. | |
428 | */ | |
2f3db927 | 429 | dev_info(&svc->dev, "removing interface %u to add it again\n", |
684156a9 | 430 | intf_id); |
b4ee82ec | 431 | gb_svc_intf_remove(svc, intf); |
bbaca711 VK |
432 | } |
433 | ||
ead35460 VK |
434 | intf = gb_interface_create(hd, intf_id); |
435 | if (!intf) { | |
2f3db927 | 436 | dev_err(&svc->dev, "failed to create interface %u\n", |
684156a9 | 437 | intf_id); |
9ae4109e | 438 | return; |
ead35460 VK |
439 | } |
440 | ||
63d742b6 VK |
441 | intf->ddbl1_manufacturer_id = le32_to_cpu(request->data.ddbl1_mfr_id); |
442 | intf->ddbl1_product_id = le32_to_cpu(request->data.ddbl1_prod_id); | |
443 | intf->vendor_id = le32_to_cpu(request->data.ara_vend_id); | |
444 | intf->product_id = le32_to_cpu(request->data.ara_prod_id); | |
57c6bcc6 | 445 | intf->serial_number = le64_to_cpu(request->data.serial_number); |
63d742b6 | 446 | |
6bec5c78 | 447 | ret = gb_svc_read_and_clear_module_boot_status(intf); |
b395754a JH |
448 | if (ret) { |
449 | dev_err(&svc->dev, "failed to clear boot status of interface %u: %d\n", | |
450 | intf_id, ret); | |
6bec5c78 | 451 | goto destroy_interface; |
b395754a | 452 | } |
6bec5c78 | 453 | |
ead35460 VK |
454 | /* |
455 | * Create a device id for the interface: | |
456 | * - device id 0 (GB_DEVICE_ID_SVC) belongs to the SVC | |
457 | * - device id 1 (GB_DEVICE_ID_AP) belongs to the AP | |
458 | * | |
459 | * XXX Do we need to allocate device ID for SVC or the AP here? And what | |
460 | * XXX about an AP with multiple interface blocks? | |
461 | */ | |
c09db182 | 462 | device_id = ida_simple_get(&svc->device_id_map, |
89f637f7 | 463 | GB_DEVICE_ID_MODULES_START, 0, GFP_KERNEL); |
ead35460 VK |
464 | if (device_id < 0) { |
465 | ret = device_id; | |
2f3db927 | 466 | dev_err(&svc->dev, "failed to allocate device id for interface %u: %d\n", |
684156a9 | 467 | intf_id, ret); |
ead35460 VK |
468 | goto destroy_interface; |
469 | } | |
470 | ||
3f0e9183 | 471 | ret = gb_svc_intf_device_id(svc, intf_id, device_id); |
ead35460 | 472 | if (ret) { |
2f3db927 | 473 | dev_err(&svc->dev, "failed to set device id %u for interface %u: %d\n", |
684156a9 | 474 | device_id, intf_id, ret); |
ead35460 VK |
475 | goto ida_put; |
476 | } | |
477 | ||
7e275465 PH |
478 | /* |
479 | * Create a two-way route between the AP and the new interface | |
480 | */ | |
66069fb0 | 481 | ret = gb_svc_route_create(svc, svc->ap_intf_id, GB_DEVICE_ID_AP, |
3f0e9183 | 482 | intf_id, device_id); |
7e275465 | 483 | if (ret) { |
2f3db927 | 484 | dev_err(&svc->dev, "failed to create route to interface %u (device id %u): %d\n", |
684156a9 | 485 | intf_id, device_id, ret); |
0a020570 | 486 | goto svc_id_free; |
7e275465 PH |
487 | } |
488 | ||
ead35460 VK |
489 | ret = gb_interface_init(intf, device_id); |
490 | if (ret) { | |
2f3db927 | 491 | dev_err(&svc->dev, "failed to initialize interface %u (device id %u): %d\n", |
684156a9 | 492 | intf_id, device_id, ret); |
0a020570 | 493 | goto destroy_route; |
ead35460 | 494 | } |
30c6d9d7 | 495 | |
9ae4109e | 496 | return; |
ead35460 | 497 | |
0a020570 | 498 | destroy_route: |
66069fb0 | 499 | gb_svc_route_destroy(svc, svc->ap_intf_id, intf_id); |
ead35460 VK |
500 | svc_id_free: |
501 | /* | |
502 | * XXX Should we tell SVC that this id doesn't belong to interface | |
503 | * XXX anymore. | |
504 | */ | |
505 | ida_put: | |
c09db182 | 506 | ida_simple_remove(&svc->device_id_map, device_id); |
ead35460 | 507 | destroy_interface: |
80d1ede8 | 508 | gb_interface_remove(intf); |
9ae4109e JH |
509 | } |
510 | ||
57ccd4b0 JH |
511 | static void gb_svc_process_intf_hot_unplug(struct gb_operation *operation) |
512 | { | |
513 | struct gb_svc *svc = operation->connection->private; | |
514 | struct gb_svc_intf_hot_unplug_request *request; | |
515 | struct gb_host_device *hd = operation->connection->hd; | |
516 | struct gb_interface *intf; | |
517 | u8 intf_id; | |
518 | ||
519 | /* The request message size has already been verified. */ | |
520 | request = operation->request->payload; | |
521 | intf_id = request->intf_id; | |
522 | ||
523 | dev_dbg(&svc->dev, "%s - id = %u\n", __func__, intf_id); | |
524 | ||
525 | intf = gb_interface_find(hd, intf_id); | |
526 | if (!intf) { | |
2f3db927 | 527 | dev_warn(&svc->dev, "could not find hot-unplug interface %u\n", |
57ccd4b0 JH |
528 | intf_id); |
529 | return; | |
530 | } | |
531 | ||
532 | gb_svc_intf_remove(svc, intf); | |
533 | } | |
534 | ||
9ae4109e JH |
535 | static void gb_svc_process_deferred_request(struct work_struct *work) |
536 | { | |
537 | struct gb_svc_deferred_request *dr; | |
538 | struct gb_operation *operation; | |
539 | struct gb_svc *svc; | |
540 | u8 type; | |
541 | ||
542 | dr = container_of(work, struct gb_svc_deferred_request, work); | |
543 | operation = dr->operation; | |
544 | svc = operation->connection->private; | |
545 | type = operation->request->header->type; | |
546 | ||
547 | switch (type) { | |
548 | case GB_SVC_TYPE_INTF_HOTPLUG: | |
549 | gb_svc_process_intf_hotplug(operation); | |
550 | break; | |
57ccd4b0 JH |
551 | case GB_SVC_TYPE_INTF_HOT_UNPLUG: |
552 | gb_svc_process_intf_hot_unplug(operation); | |
553 | break; | |
9ae4109e | 554 | default: |
b933fa4a | 555 | dev_err(&svc->dev, "bad deferred request type: 0x%02x\n", type); |
9ae4109e JH |
556 | } |
557 | ||
558 | gb_operation_put(operation); | |
559 | kfree(dr); | |
560 | } | |
561 | ||
562 | static int gb_svc_queue_deferred_request(struct gb_operation *operation) | |
563 | { | |
3e48acac | 564 | struct gb_svc *svc = operation->connection->private; |
9ae4109e JH |
565 | struct gb_svc_deferred_request *dr; |
566 | ||
567 | dr = kmalloc(sizeof(*dr), GFP_KERNEL); | |
568 | if (!dr) | |
569 | return -ENOMEM; | |
570 | ||
571 | gb_operation_get(operation); | |
572 | ||
573 | dr->operation = operation; | |
574 | INIT_WORK(&dr->work, gb_svc_process_deferred_request); | |
575 | ||
3e48acac | 576 | queue_work(svc->wq, &dr->work); |
9ae4109e JH |
577 | |
578 | return 0; | |
067906f6 | 579 | } |
ead35460 | 580 | |
067906f6 VK |
581 | /* |
582 | * Bringing up a module can be time consuming, as that may require lots of | |
583 | * initialization on the module side. Over that, we may also need to download | |
584 | * the firmware first and flash that on the module. | |
585 | * | |
3e48acac | 586 | * In order not to make other svc events wait for all this to finish, |
067906f6 VK |
587 | * handle most of module hotplug stuff outside of the hotplug callback, with |
588 | * help of a workqueue. | |
589 | */ | |
590 | static int gb_svc_intf_hotplug_recv(struct gb_operation *op) | |
591 | { | |
684156a9 | 592 | struct gb_svc *svc = op->connection->private; |
d34a3643 | 593 | struct gb_svc_intf_hotplug_request *request; |
067906f6 | 594 | |
d34a3643 | 595 | if (op->request->payload_size < sizeof(*request)) { |
684156a9 | 596 | dev_warn(&svc->dev, "short hotplug request received (%zu < %zu)\n", |
d34a3643 | 597 | op->request->payload_size, sizeof(*request)); |
067906f6 VK |
598 | return -EINVAL; |
599 | } | |
600 | ||
d34a3643 JH |
601 | request = op->request->payload; |
602 | ||
603 | dev_dbg(&svc->dev, "%s - id = %u\n", __func__, request->intf_id); | |
604 | ||
9ae4109e | 605 | return gb_svc_queue_deferred_request(op); |
30c6d9d7 AE |
606 | } |
607 | ||
608 | static int gb_svc_intf_hot_unplug_recv(struct gb_operation *op) | |
609 | { | |
684156a9 | 610 | struct gb_svc *svc = op->connection->private; |
d34a3643 | 611 | struct gb_svc_intf_hot_unplug_request *request; |
30c6d9d7 | 612 | |
d34a3643 | 613 | if (op->request->payload_size < sizeof(*request)) { |
684156a9 | 614 | dev_warn(&svc->dev, "short hot unplug request received (%zu < %zu)\n", |
d34a3643 | 615 | op->request->payload_size, sizeof(*request)); |
30c6d9d7 AE |
616 | return -EINVAL; |
617 | } | |
30c6d9d7 | 618 | |
d34a3643 | 619 | request = op->request->payload; |
30c6d9d7 | 620 | |
57ccd4b0 | 621 | dev_dbg(&svc->dev, "%s - id = %u\n", __func__, request->intf_id); |
30c6d9d7 | 622 | |
57ccd4b0 | 623 | return gb_svc_queue_deferred_request(op); |
30c6d9d7 AE |
624 | } |
625 | ||
626 | static int gb_svc_intf_reset_recv(struct gb_operation *op) | |
627 | { | |
684156a9 | 628 | struct gb_svc *svc = op->connection->private; |
30c6d9d7 AE |
629 | struct gb_message *request = op->request; |
630 | struct gb_svc_intf_reset_request *reset; | |
631 | u8 intf_id; | |
632 | ||
633 | if (request->payload_size < sizeof(*reset)) { | |
684156a9 JH |
634 | dev_warn(&svc->dev, "short reset request received (%zu < %zu)\n", |
635 | request->payload_size, sizeof(*reset)); | |
30c6d9d7 AE |
636 | return -EINVAL; |
637 | } | |
638 | reset = request->payload; | |
639 | ||
640 | intf_id = reset->intf_id; | |
641 | ||
642 | /* FIXME Reset the interface here */ | |
643 | ||
644 | return 0; | |
645 | } | |
646 | ||
647 | static int gb_svc_request_recv(u8 type, struct gb_operation *op) | |
648 | { | |
3ccb1600 VK |
649 | struct gb_connection *connection = op->connection; |
650 | struct gb_svc *svc = connection->private; | |
651 | int ret = 0; | |
652 | ||
653 | /* | |
654 | * SVC requests need to follow a specific order (at least initially) and | |
655 | * below code takes care of enforcing that. The expected order is: | |
656 | * - PROTOCOL_VERSION | |
657 | * - SVC_HELLO | |
658 | * - Any other request, but the earlier two. | |
659 | * | |
660 | * Incoming requests are guaranteed to be serialized and so we don't | |
661 | * need to protect 'state' for any races. | |
662 | */ | |
30c6d9d7 | 663 | switch (type) { |
0e2462d1 | 664 | case GB_REQUEST_TYPE_PROTOCOL_VERSION: |
3ccb1600 VK |
665 | if (svc->state != GB_SVC_STATE_RESET) |
666 | ret = -EINVAL; | |
667 | break; | |
ead35460 | 668 | case GB_SVC_TYPE_SVC_HELLO: |
3ccb1600 VK |
669 | if (svc->state != GB_SVC_STATE_PROTOCOL_VERSION) |
670 | ret = -EINVAL; | |
671 | break; | |
672 | default: | |
673 | if (svc->state != GB_SVC_STATE_SVC_HELLO) | |
674 | ret = -EINVAL; | |
675 | break; | |
676 | } | |
677 | ||
678 | if (ret) { | |
684156a9 JH |
679 | dev_warn(&svc->dev, "unexpected request 0x%02x received (state %u)\n", |
680 | type, svc->state); | |
3ccb1600 VK |
681 | return ret; |
682 | } | |
683 | ||
684 | switch (type) { | |
685 | case GB_REQUEST_TYPE_PROTOCOL_VERSION: | |
686 | ret = gb_svc_version_request(op); | |
687 | if (!ret) | |
688 | svc->state = GB_SVC_STATE_PROTOCOL_VERSION; | |
689 | return ret; | |
690 | case GB_SVC_TYPE_SVC_HELLO: | |
691 | ret = gb_svc_hello(op); | |
692 | if (!ret) | |
693 | svc->state = GB_SVC_STATE_SVC_HELLO; | |
694 | return ret; | |
30c6d9d7 AE |
695 | case GB_SVC_TYPE_INTF_HOTPLUG: |
696 | return gb_svc_intf_hotplug_recv(op); | |
697 | case GB_SVC_TYPE_INTF_HOT_UNPLUG: | |
698 | return gb_svc_intf_hot_unplug_recv(op); | |
699 | case GB_SVC_TYPE_INTF_RESET: | |
700 | return gb_svc_intf_reset_recv(op); | |
701 | default: | |
684156a9 | 702 | dev_warn(&svc->dev, "unsupported request 0x%02x\n", type); |
30c6d9d7 AE |
703 | return -EINVAL; |
704 | } | |
705 | } | |
706 | ||
efe6ef76 JH |
707 | static void gb_svc_release(struct device *dev) |
708 | { | |
88f7b96d | 709 | struct gb_svc *svc = to_gb_svc(dev); |
efe6ef76 | 710 | |
7adeaae7 JH |
711 | if (svc->connection) |
712 | gb_connection_destroy(svc->connection); | |
efe6ef76 | 713 | ida_destroy(&svc->device_id_map); |
3e48acac | 714 | destroy_workqueue(svc->wq); |
efe6ef76 JH |
715 | kfree(svc); |
716 | } | |
717 | ||
718 | struct device_type greybus_svc_type = { | |
719 | .name = "greybus_svc", | |
720 | .release = gb_svc_release, | |
721 | }; | |
722 | ||
7adeaae7 | 723 | struct gb_svc *gb_svc_create(struct gb_host_device *hd) |
30c6d9d7 AE |
724 | { |
725 | struct gb_svc *svc; | |
30c6d9d7 AE |
726 | |
727 | svc = kzalloc(sizeof(*svc), GFP_KERNEL); | |
728 | if (!svc) | |
7adeaae7 | 729 | return NULL; |
30c6d9d7 | 730 | |
3e48acac JH |
731 | svc->wq = alloc_workqueue("%s:svc", WQ_UNBOUND, 1, dev_name(&hd->dev)); |
732 | if (!svc->wq) { | |
733 | kfree(svc); | |
7adeaae7 | 734 | return NULL; |
3e48acac JH |
735 | } |
736 | ||
efe6ef76 JH |
737 | svc->dev.parent = &hd->dev; |
738 | svc->dev.bus = &greybus_bus_type; | |
739 | svc->dev.type = &greybus_svc_type; | |
66069fb0 | 740 | svc->dev.groups = svc_groups; |
efe6ef76 JH |
741 | svc->dev.dma_mask = svc->dev.parent->dma_mask; |
742 | device_initialize(&svc->dev); | |
743 | ||
744 | dev_set_name(&svc->dev, "%d-svc", hd->bus_id); | |
745 | ||
6106e51b | 746 | ida_init(&svc->device_id_map); |
3ccb1600 | 747 | svc->state = GB_SVC_STATE_RESET; |
f0960d05 | 748 | svc->hd = hd; |
d3d44840 | 749 | |
7adeaae7 JH |
750 | svc->connection = gb_connection_create_static(hd, GB_SVC_CPORT_ID, |
751 | GREYBUS_PROTOCOL_SVC); | |
752 | if (!svc->connection) { | |
753 | dev_err(&svc->dev, "failed to create connection\n"); | |
754 | put_device(&svc->dev); | |
755 | return NULL; | |
756 | } | |
757 | ||
758 | svc->connection->private = svc; | |
efe6ef76 | 759 | |
7adeaae7 | 760 | return svc; |
30c6d9d7 AE |
761 | } |
762 | ||
7adeaae7 | 763 | int gb_svc_add(struct gb_svc *svc) |
30c6d9d7 | 764 | { |
7adeaae7 | 765 | int ret; |
30c6d9d7 | 766 | |
7adeaae7 JH |
767 | /* |
768 | * The SVC protocol is currently driven by the SVC, so the SVC device | |
769 | * is added from the connection request handler when enough | |
770 | * information has been received. | |
771 | */ | |
772 | ret = gb_connection_init(svc->connection); | |
773 | if (ret) | |
774 | return ret; | |
775 | ||
776 | return 0; | |
777 | } | |
778 | ||
779 | void gb_svc_del(struct gb_svc *svc) | |
780 | { | |
781 | /* | |
782 | * The SVC device may have been registered from the request handler. | |
783 | */ | |
88f7b96d JH |
784 | if (device_is_registered(&svc->dev)) |
785 | device_del(&svc->dev); | |
786 | ||
7adeaae7 | 787 | gb_connection_exit(svc->connection); |
1cacb456 | 788 | |
7adeaae7 JH |
789 | flush_workqueue(svc->wq); |
790 | } | |
efe6ef76 | 791 | |
7adeaae7 JH |
792 | void gb_svc_put(struct gb_svc *svc) |
793 | { | |
efe6ef76 | 794 | put_device(&svc->dev); |
30c6d9d7 AE |
795 | } |
796 | ||
7adeaae7 JH |
797 | static int gb_svc_connection_init(struct gb_connection *connection) |
798 | { | |
799 | struct gb_svc *svc = connection->private; | |
800 | ||
801 | dev_dbg(&svc->dev, "%s\n", __func__); | |
802 | ||
803 | return 0; | |
804 | } | |
805 | ||
806 | static void gb_svc_connection_exit(struct gb_connection *connection) | |
807 | { | |
808 | struct gb_svc *svc = connection->private; | |
809 | ||
810 | dev_dbg(&svc->dev, "%s\n", __func__); | |
811 | } | |
812 | ||
30c6d9d7 AE |
813 | static struct gb_protocol svc_protocol = { |
814 | .name = "svc", | |
815 | .id = GREYBUS_PROTOCOL_SVC, | |
06e305f1 VK |
816 | .major = GB_SVC_VERSION_MAJOR, |
817 | .minor = GB_SVC_VERSION_MINOR, | |
30c6d9d7 AE |
818 | .connection_init = gb_svc_connection_init, |
819 | .connection_exit = gb_svc_connection_exit, | |
820 | .request_recv = gb_svc_request_recv, | |
5a5296bb VK |
821 | .flags = GB_PROTOCOL_SKIP_CONTROL_CONNECTED | |
822 | GB_PROTOCOL_SKIP_CONTROL_DISCONNECTED | | |
4ec1574a | 823 | GB_PROTOCOL_SKIP_VERSION, |
30c6d9d7 | 824 | }; |
ab69c4ce | 825 | gb_builtin_protocol_driver(svc_protocol); |