Commit | Line | Data |
---|---|---|
543be3d8 WH |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | |
3 | * Driver for FPGA Device Feature List (DFL) Support | |
4 | * | |
5 | * Copyright (C) 2017-2018 Intel Corporation, Inc. | |
6 | * | |
7 | * Authors: | |
8 | * Kang Luwei <luwei.kang@intel.com> | |
9 | * Zhang Yi <yi.z.zhang@intel.com> | |
10 | * Wu Hao <hao.wu@intel.com> | |
11 | * Xiao Guangrong <guangrong.xiao@linux.intel.com> | |
12 | */ | |
322b598b | 13 | #include <linux/fpga-dfl.h> |
543be3d8 | 14 | #include <linux/module.h> |
322b598b | 15 | #include <linux/uaccess.h> |
543be3d8 WH |
16 | |
17 | #include "dfl.h" | |
18 | ||
19 | static DEFINE_MUTEX(dfl_id_mutex); | |
20 | ||
21 | /* | |
22 | * when adding a new feature dev support in DFL framework, it's required to | |
23 | * add a new item in enum dfl_id_type and provide related information in below | |
24 | * dfl_devs table which is indexed by dfl_id_type, e.g. name string used for | |
25 | * platform device creation (define name strings in dfl.h, as they could be | |
26 | * reused by platform device drivers). | |
b16c5147 WH |
27 | * |
28 | * if the new feature dev needs chardev support, then it's required to add | |
29 | * a new item in dfl_chardevs table and configure dfl_devs[i].devt_type as | |
30 | * index to dfl_chardevs table. If no chardev support just set devt_type | |
31 | * as one invalid index (DFL_FPGA_DEVT_MAX). | |
543be3d8 WH |
32 | */ |
33 | enum dfl_id_type { | |
34 | FME_ID, /* fme id allocation and mapping */ | |
35 | PORT_ID, /* port id allocation and mapping */ | |
36 | DFL_ID_MAX, | |
37 | }; | |
38 | ||
b16c5147 WH |
39 | enum dfl_fpga_devt_type { |
40 | DFL_FPGA_DEVT_FME, | |
41 | DFL_FPGA_DEVT_PORT, | |
42 | DFL_FPGA_DEVT_MAX, | |
43 | }; | |
44 | ||
dfe3de8d SW |
45 | static struct lock_class_key dfl_pdata_keys[DFL_ID_MAX]; |
46 | ||
47 | static const char *dfl_pdata_key_strings[DFL_ID_MAX] = { | |
48 | "dfl-fme-pdata", | |
49 | "dfl-port-pdata", | |
50 | }; | |
51 | ||
543be3d8 WH |
52 | /** |
53 | * dfl_dev_info - dfl feature device information. | |
54 | * @name: name string of the feature platform device. | |
55 | * @dfh_id: id value in Device Feature Header (DFH) register by DFL spec. | |
56 | * @id: idr id of the feature dev. | |
b16c5147 | 57 | * @devt_type: index to dfl_chrdevs[]. |
543be3d8 WH |
58 | */ |
59 | struct dfl_dev_info { | |
60 | const char *name; | |
8a5de2de | 61 | u16 dfh_id; |
543be3d8 | 62 | struct idr id; |
b16c5147 | 63 | enum dfl_fpga_devt_type devt_type; |
543be3d8 WH |
64 | }; |
65 | ||
66 | /* it is indexed by dfl_id_type */ | |
67 | static struct dfl_dev_info dfl_devs[] = { | |
b16c5147 WH |
68 | {.name = DFL_FPGA_FEATURE_DEV_FME, .dfh_id = DFH_ID_FIU_FME, |
69 | .devt_type = DFL_FPGA_DEVT_FME}, | |
70 | {.name = DFL_FPGA_FEATURE_DEV_PORT, .dfh_id = DFH_ID_FIU_PORT, | |
71 | .devt_type = DFL_FPGA_DEVT_PORT}, | |
72 | }; | |
73 | ||
74 | /** | |
75 | * dfl_chardev_info - chardev information of dfl feature device | |
76 | * @name: nmae string of the char device. | |
77 | * @devt: devt of the char device. | |
78 | */ | |
79 | struct dfl_chardev_info { | |
80 | const char *name; | |
81 | dev_t devt; | |
82 | }; | |
83 | ||
84 | /* indexed by enum dfl_fpga_devt_type */ | |
85 | static struct dfl_chardev_info dfl_chrdevs[] = { | |
86 | {.name = DFL_FPGA_FEATURE_DEV_FME}, | |
87 | {.name = DFL_FPGA_FEATURE_DEV_PORT}, | |
543be3d8 WH |
88 | }; |
89 | ||
90 | static void dfl_ids_init(void) | |
91 | { | |
92 | int i; | |
93 | ||
94 | for (i = 0; i < ARRAY_SIZE(dfl_devs); i++) | |
95 | idr_init(&dfl_devs[i].id); | |
96 | } | |
97 | ||
98 | static void dfl_ids_destroy(void) | |
99 | { | |
100 | int i; | |
101 | ||
102 | for (i = 0; i < ARRAY_SIZE(dfl_devs); i++) | |
103 | idr_destroy(&dfl_devs[i].id); | |
104 | } | |
105 | ||
106 | static int dfl_id_alloc(enum dfl_id_type type, struct device *dev) | |
107 | { | |
108 | int id; | |
109 | ||
110 | WARN_ON(type >= DFL_ID_MAX); | |
111 | mutex_lock(&dfl_id_mutex); | |
112 | id = idr_alloc(&dfl_devs[type].id, dev, 0, 0, GFP_KERNEL); | |
113 | mutex_unlock(&dfl_id_mutex); | |
114 | ||
115 | return id; | |
116 | } | |
117 | ||
118 | static void dfl_id_free(enum dfl_id_type type, int id) | |
119 | { | |
120 | WARN_ON(type >= DFL_ID_MAX); | |
121 | mutex_lock(&dfl_id_mutex); | |
122 | idr_remove(&dfl_devs[type].id, id); | |
123 | mutex_unlock(&dfl_id_mutex); | |
124 | } | |
125 | ||
126 | static enum dfl_id_type feature_dev_id_type(struct platform_device *pdev) | |
127 | { | |
128 | int i; | |
129 | ||
130 | for (i = 0; i < ARRAY_SIZE(dfl_devs); i++) | |
131 | if (!strcmp(dfl_devs[i].name, pdev->name)) | |
132 | return i; | |
133 | ||
134 | return DFL_ID_MAX; | |
135 | } | |
136 | ||
8a5de2de | 137 | static enum dfl_id_type dfh_id_to_type(u16 id) |
543be3d8 WH |
138 | { |
139 | int i; | |
140 | ||
141 | for (i = 0; i < ARRAY_SIZE(dfl_devs); i++) | |
142 | if (dfl_devs[i].dfh_id == id) | |
143 | return i; | |
144 | ||
145 | return DFL_ID_MAX; | |
146 | } | |
147 | ||
6e8fd6e4 WH |
148 | /* |
149 | * introduce a global port_ops list, it allows port drivers to register ops | |
150 | * in such list, then other feature devices (e.g. FME), could use the port | |
151 | * functions even related port platform device is hidden. Below is one example, | |
152 | * in virtualization case of PCIe-based FPGA DFL device, when SRIOV is | |
153 | * enabled, port (and it's AFU) is turned into VF and port platform device | |
154 | * is hidden from system but it's still required to access port to finish FPGA | |
155 | * reconfiguration function in FME. | |
156 | */ | |
157 | ||
158 | static DEFINE_MUTEX(dfl_port_ops_mutex); | |
159 | static LIST_HEAD(dfl_port_ops_list); | |
160 | ||
161 | /** | |
162 | * dfl_fpga_port_ops_get - get matched port ops from the global list | |
163 | * @pdev: platform device to match with associated port ops. | |
164 | * Return: matched port ops on success, NULL otherwise. | |
165 | * | |
166 | * Please note that must dfl_fpga_port_ops_put after use the port_ops. | |
167 | */ | |
168 | struct dfl_fpga_port_ops *dfl_fpga_port_ops_get(struct platform_device *pdev) | |
169 | { | |
170 | struct dfl_fpga_port_ops *ops = NULL; | |
171 | ||
172 | mutex_lock(&dfl_port_ops_mutex); | |
173 | if (list_empty(&dfl_port_ops_list)) | |
174 | goto done; | |
175 | ||
176 | list_for_each_entry(ops, &dfl_port_ops_list, node) { | |
177 | /* match port_ops using the name of platform device */ | |
178 | if (!strcmp(pdev->name, ops->name)) { | |
179 | if (!try_module_get(ops->owner)) | |
180 | ops = NULL; | |
181 | goto done; | |
182 | } | |
183 | } | |
184 | ||
185 | ops = NULL; | |
186 | done: | |
187 | mutex_unlock(&dfl_port_ops_mutex); | |
188 | return ops; | |
189 | } | |
190 | EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_get); | |
191 | ||
192 | /** | |
193 | * dfl_fpga_port_ops_put - put port ops | |
194 | * @ops: port ops. | |
195 | */ | |
196 | void dfl_fpga_port_ops_put(struct dfl_fpga_port_ops *ops) | |
197 | { | |
198 | if (ops && ops->owner) | |
199 | module_put(ops->owner); | |
200 | } | |
201 | EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_put); | |
202 | ||
203 | /** | |
204 | * dfl_fpga_port_ops_add - add port_ops to global list | |
205 | * @ops: port ops to add. | |
206 | */ | |
207 | void dfl_fpga_port_ops_add(struct dfl_fpga_port_ops *ops) | |
208 | { | |
209 | mutex_lock(&dfl_port_ops_mutex); | |
210 | list_add_tail(&ops->node, &dfl_port_ops_list); | |
211 | mutex_unlock(&dfl_port_ops_mutex); | |
212 | } | |
213 | EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_add); | |
214 | ||
215 | /** | |
216 | * dfl_fpga_port_ops_del - remove port_ops from global list | |
217 | * @ops: port ops to del. | |
218 | */ | |
219 | void dfl_fpga_port_ops_del(struct dfl_fpga_port_ops *ops) | |
220 | { | |
221 | mutex_lock(&dfl_port_ops_mutex); | |
222 | list_del(&ops->node); | |
223 | mutex_unlock(&dfl_port_ops_mutex); | |
224 | } | |
225 | EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_del); | |
226 | ||
d06b004b WH |
227 | /** |
228 | * dfl_fpga_check_port_id - check the port id | |
229 | * @pdev: port platform device. | |
230 | * @pport_id: port id to compare. | |
231 | * | |
232 | * Return: 1 if port device matches with given port id, otherwise 0. | |
233 | */ | |
234 | int dfl_fpga_check_port_id(struct platform_device *pdev, void *pport_id) | |
235 | { | |
69bb18dd WH |
236 | struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev); |
237 | struct dfl_fpga_port_ops *port_ops; | |
238 | ||
239 | if (pdata->id != FEATURE_DEV_ID_UNUSED) | |
240 | return pdata->id == *(int *)pport_id; | |
d06b004b | 241 | |
69bb18dd | 242 | port_ops = dfl_fpga_port_ops_get(pdev); |
d06b004b WH |
243 | if (!port_ops || !port_ops->get_id) |
244 | return 0; | |
245 | ||
69bb18dd | 246 | pdata->id = port_ops->get_id(pdev); |
d06b004b WH |
247 | dfl_fpga_port_ops_put(port_ops); |
248 | ||
69bb18dd | 249 | return pdata->id == *(int *)pport_id; |
d06b004b WH |
250 | } |
251 | EXPORT_SYMBOL_GPL(dfl_fpga_check_port_id); | |
252 | ||
89eb35e8 XY |
253 | #define is_header_feature(feature) ((feature)->id == FEATURE_ID_FIU_HEADER) |
254 | ||
5b57d02a XG |
255 | /** |
256 | * dfl_fpga_dev_feature_uinit - uinit for sub features of dfl feature device | |
257 | * @pdev: feature device. | |
258 | */ | |
259 | void dfl_fpga_dev_feature_uinit(struct platform_device *pdev) | |
260 | { | |
261 | struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev); | |
262 | struct dfl_feature *feature; | |
263 | ||
264 | dfl_fpga_dev_for_each_feature(pdata, feature) | |
265 | if (feature->ops) { | |
3c51ff77 WH |
266 | if (feature->ops->uinit) |
267 | feature->ops->uinit(pdev, feature); | |
5b57d02a XG |
268 | feature->ops = NULL; |
269 | } | |
270 | } | |
271 | EXPORT_SYMBOL_GPL(dfl_fpga_dev_feature_uinit); | |
272 | ||
273 | static int dfl_feature_instance_init(struct platform_device *pdev, | |
274 | struct dfl_feature_platform_data *pdata, | |
275 | struct dfl_feature *feature, | |
276 | struct dfl_feature_driver *drv) | |
277 | { | |
89eb35e8 | 278 | void __iomem *base; |
84b693e3 | 279 | int ret = 0; |
5b57d02a | 280 | |
89eb35e8 XY |
281 | if (!is_header_feature(feature)) { |
282 | base = devm_platform_ioremap_resource(pdev, | |
283 | feature->resource_index); | |
284 | if (IS_ERR(base)) { | |
285 | dev_err(&pdev->dev, | |
286 | "ioremap failed for feature 0x%x!\n", | |
287 | feature->id); | |
288 | return PTR_ERR(base); | |
289 | } | |
290 | ||
291 | feature->ioaddr = base; | |
292 | } | |
293 | ||
84b693e3 WH |
294 | if (drv->ops->init) { |
295 | ret = drv->ops->init(pdev, feature); | |
296 | if (ret) | |
297 | return ret; | |
298 | } | |
5b57d02a XG |
299 | |
300 | feature->ops = drv->ops; | |
301 | ||
302 | return ret; | |
303 | } | |
304 | ||
15bbb300 WH |
305 | static bool dfl_feature_drv_match(struct dfl_feature *feature, |
306 | struct dfl_feature_driver *driver) | |
307 | { | |
308 | const struct dfl_feature_id *ids = driver->id_table; | |
309 | ||
310 | if (ids) { | |
311 | while (ids->id) { | |
312 | if (ids->id == feature->id) | |
313 | return true; | |
314 | ids++; | |
315 | } | |
316 | } | |
317 | return false; | |
318 | } | |
319 | ||
5b57d02a XG |
320 | /** |
321 | * dfl_fpga_dev_feature_init - init for sub features of dfl feature device | |
322 | * @pdev: feature device. | |
323 | * @feature_drvs: drvs for sub features. | |
324 | * | |
325 | * This function will match sub features with given feature drvs list and | |
326 | * use matched drv to init related sub feature. | |
327 | * | |
328 | * Return: 0 on success, negative error code otherwise. | |
329 | */ | |
330 | int dfl_fpga_dev_feature_init(struct platform_device *pdev, | |
331 | struct dfl_feature_driver *feature_drvs) | |
332 | { | |
333 | struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev); | |
334 | struct dfl_feature_driver *drv = feature_drvs; | |
335 | struct dfl_feature *feature; | |
336 | int ret; | |
337 | ||
338 | while (drv->ops) { | |
339 | dfl_fpga_dev_for_each_feature(pdata, feature) { | |
15bbb300 | 340 | if (dfl_feature_drv_match(feature, drv)) { |
5b57d02a XG |
341 | ret = dfl_feature_instance_init(pdev, pdata, |
342 | feature, drv); | |
343 | if (ret) | |
344 | goto exit; | |
345 | } | |
346 | } | |
347 | drv++; | |
348 | } | |
349 | ||
350 | return 0; | |
351 | exit: | |
352 | dfl_fpga_dev_feature_uinit(pdev); | |
353 | return ret; | |
354 | } | |
355 | EXPORT_SYMBOL_GPL(dfl_fpga_dev_feature_init); | |
356 | ||
b16c5147 WH |
357 | static void dfl_chardev_uinit(void) |
358 | { | |
359 | int i; | |
360 | ||
361 | for (i = 0; i < DFL_FPGA_DEVT_MAX; i++) | |
362 | if (MAJOR(dfl_chrdevs[i].devt)) { | |
363 | unregister_chrdev_region(dfl_chrdevs[i].devt, | |
de9a7f6f | 364 | MINORMASK + 1); |
b16c5147 WH |
365 | dfl_chrdevs[i].devt = MKDEV(0, 0); |
366 | } | |
367 | } | |
368 | ||
369 | static int dfl_chardev_init(void) | |
370 | { | |
371 | int i, ret; | |
372 | ||
373 | for (i = 0; i < DFL_FPGA_DEVT_MAX; i++) { | |
de9a7f6f CX |
374 | ret = alloc_chrdev_region(&dfl_chrdevs[i].devt, 0, |
375 | MINORMASK + 1, dfl_chrdevs[i].name); | |
b16c5147 WH |
376 | if (ret) |
377 | goto exit; | |
378 | } | |
379 | ||
380 | return 0; | |
381 | ||
382 | exit: | |
383 | dfl_chardev_uinit(); | |
384 | return ret; | |
385 | } | |
386 | ||
387 | static dev_t dfl_get_devt(enum dfl_fpga_devt_type type, int id) | |
388 | { | |
389 | if (type >= DFL_FPGA_DEVT_MAX) | |
390 | return 0; | |
391 | ||
392 | return MKDEV(MAJOR(dfl_chrdevs[type].devt), id); | |
393 | } | |
394 | ||
395 | /** | |
396 | * dfl_fpga_dev_ops_register - register cdev ops for feature dev | |
397 | * | |
398 | * @pdev: feature dev. | |
399 | * @fops: file operations for feature dev's cdev. | |
400 | * @owner: owning module/driver. | |
401 | * | |
402 | * Return: 0 on success, negative error code otherwise. | |
403 | */ | |
404 | int dfl_fpga_dev_ops_register(struct platform_device *pdev, | |
405 | const struct file_operations *fops, | |
406 | struct module *owner) | |
407 | { | |
408 | struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev); | |
409 | ||
410 | cdev_init(&pdata->cdev, fops); | |
411 | pdata->cdev.owner = owner; | |
412 | ||
413 | /* | |
414 | * set parent to the feature device so that its refcount is | |
415 | * decreased after the last refcount of cdev is gone, that | |
416 | * makes sure the feature device is valid during device | |
417 | * file's life-cycle. | |
418 | */ | |
419 | pdata->cdev.kobj.parent = &pdev->dev.kobj; | |
420 | ||
421 | return cdev_add(&pdata->cdev, pdev->dev.devt, 1); | |
422 | } | |
423 | EXPORT_SYMBOL_GPL(dfl_fpga_dev_ops_register); | |
424 | ||
425 | /** | |
426 | * dfl_fpga_dev_ops_unregister - unregister cdev ops for feature dev | |
427 | * @pdev: feature dev. | |
428 | */ | |
429 | void dfl_fpga_dev_ops_unregister(struct platform_device *pdev) | |
430 | { | |
431 | struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev); | |
432 | ||
433 | cdev_del(&pdata->cdev); | |
434 | } | |
435 | EXPORT_SYMBOL_GPL(dfl_fpga_dev_ops_unregister); | |
436 | ||
543be3d8 WH |
437 | /** |
438 | * struct build_feature_devs_info - info collected during feature dev build. | |
439 | * | |
440 | * @dev: device to enumerate. | |
441 | * @cdev: the container device for all feature devices. | |
8d021039 XY |
442 | * @nr_irqs: number of irqs for all feature devices. |
443 | * @irq_table: Linux IRQ numbers for all irqs, indexed by local irq index of | |
444 | * this device. | |
543be3d8 | 445 | * @feature_dev: current feature device. |
89eb35e8 XY |
446 | * @ioaddr: header register region address of current FIU in enumeration. |
447 | * @start: register resource start of current FIU. | |
448 | * @len: max register resource length of current FIU. | |
543be3d8 WH |
449 | * @sub_features: a sub features linked list for feature device in enumeration. |
450 | * @feature_num: number of sub features for feature device in enumeration. | |
451 | */ | |
452 | struct build_feature_devs_info { | |
453 | struct device *dev; | |
454 | struct dfl_fpga_cdev *cdev; | |
8d021039 XY |
455 | unsigned int nr_irqs; |
456 | int *irq_table; | |
457 | ||
543be3d8 WH |
458 | struct platform_device *feature_dev; |
459 | void __iomem *ioaddr; | |
89eb35e8 XY |
460 | resource_size_t start; |
461 | resource_size_t len; | |
543be3d8 WH |
462 | struct list_head sub_features; |
463 | int feature_num; | |
464 | }; | |
465 | ||
466 | /** | |
467 | * struct dfl_feature_info - sub feature info collected during feature dev build | |
468 | * | |
469 | * @fid: id of this sub feature. | |
470 | * @mmio_res: mmio resource of this sub feature. | |
471 | * @ioaddr: mapped base address of mmio resource. | |
472 | * @node: node in sub_features linked list. | |
8d021039 XY |
473 | * @irq_base: start of irq index in this sub feature. |
474 | * @nr_irqs: number of irqs of this sub feature. | |
543be3d8 WH |
475 | */ |
476 | struct dfl_feature_info { | |
8a5de2de | 477 | u16 fid; |
543be3d8 WH |
478 | struct resource mmio_res; |
479 | void __iomem *ioaddr; | |
480 | struct list_head node; | |
8d021039 XY |
481 | unsigned int irq_base; |
482 | unsigned int nr_irqs; | |
543be3d8 WH |
483 | }; |
484 | ||
485 | static void dfl_fpga_cdev_add_port_dev(struct dfl_fpga_cdev *cdev, | |
486 | struct platform_device *port) | |
487 | { | |
488 | struct dfl_feature_platform_data *pdata = dev_get_platdata(&port->dev); | |
489 | ||
490 | mutex_lock(&cdev->lock); | |
491 | list_add(&pdata->node, &cdev->port_dev_list); | |
492 | get_device(&pdata->dev->dev); | |
493 | mutex_unlock(&cdev->lock); | |
494 | } | |
495 | ||
496 | /* | |
497 | * register current feature device, it is called when we need to switch to | |
498 | * another feature parsing or we have parsed all features on given device | |
499 | * feature list. | |
500 | */ | |
501 | static int build_info_commit_dev(struct build_feature_devs_info *binfo) | |
502 | { | |
503 | struct platform_device *fdev = binfo->feature_dev; | |
504 | struct dfl_feature_platform_data *pdata; | |
505 | struct dfl_feature_info *finfo, *p; | |
dfe3de8d | 506 | enum dfl_id_type type; |
89eb35e8 | 507 | int ret, index = 0, res_idx = 0; |
543be3d8 | 508 | |
dfe3de8d SW |
509 | type = feature_dev_id_type(fdev); |
510 | if (WARN_ON_ONCE(type >= DFL_ID_MAX)) | |
511 | return -EINVAL; | |
512 | ||
543be3d8 WH |
513 | /* |
514 | * we do not need to care for the memory which is associated with | |
515 | * the platform device. After calling platform_device_unregister(), | |
516 | * it will be automatically freed by device's release() callback, | |
517 | * platform_device_release(). | |
518 | */ | |
e1d9ec3a | 519 | pdata = kzalloc(struct_size(pdata, features, binfo->feature_num), GFP_KERNEL); |
543be3d8 WH |
520 | if (!pdata) |
521 | return -ENOMEM; | |
522 | ||
523 | pdata->dev = fdev; | |
524 | pdata->num = binfo->feature_num; | |
525 | pdata->dfl_cdev = binfo->cdev; | |
69bb18dd | 526 | pdata->id = FEATURE_DEV_ID_UNUSED; |
543be3d8 | 527 | mutex_init(&pdata->lock); |
dfe3de8d SW |
528 | lockdep_set_class_and_name(&pdata->lock, &dfl_pdata_keys[type], |
529 | dfl_pdata_key_strings[type]); | |
543be3d8 WH |
530 | |
531 | /* | |
532 | * the count should be initialized to 0 to make sure | |
533 | *__fpga_port_enable() following __fpga_port_disable() | |
534 | * works properly for port device. | |
535 | * and it should always be 0 for fme device. | |
536 | */ | |
537 | WARN_ON(pdata->disable_count); | |
538 | ||
539 | fdev->dev.platform_data = pdata; | |
540 | ||
541 | /* each sub feature has one MMIO resource */ | |
542 | fdev->num_resources = binfo->feature_num; | |
543 | fdev->resource = kcalloc(binfo->feature_num, sizeof(*fdev->resource), | |
544 | GFP_KERNEL); | |
545 | if (!fdev->resource) | |
546 | return -ENOMEM; | |
547 | ||
548 | /* fill features and resource information for feature dev */ | |
549 | list_for_each_entry_safe(finfo, p, &binfo->sub_features, node) { | |
89eb35e8 | 550 | struct dfl_feature *feature = &pdata->features[index++]; |
8d021039 XY |
551 | struct dfl_feature_irq_ctx *ctx; |
552 | unsigned int i; | |
543be3d8 WH |
553 | |
554 | /* save resource information for each feature */ | |
322b598b | 555 | feature->dev = fdev; |
543be3d8 | 556 | feature->id = finfo->fid; |
89eb35e8 XY |
557 | |
558 | /* | |
559 | * the FIU header feature has some fundamental functions (sriov | |
560 | * set, port enable/disable) needed for the dfl bus device and | |
561 | * other sub features. So its mmio resource should be mapped by | |
562 | * DFL bus device. And we should not assign it to feature | |
563 | * devices (dfl-fme/afu) again. | |
564 | */ | |
565 | if (is_header_feature(feature)) { | |
566 | feature->resource_index = -1; | |
567 | feature->ioaddr = | |
568 | devm_ioremap_resource(binfo->dev, | |
569 | &finfo->mmio_res); | |
570 | if (IS_ERR(feature->ioaddr)) | |
571 | return PTR_ERR(feature->ioaddr); | |
572 | } else { | |
573 | feature->resource_index = res_idx; | |
574 | fdev->resource[res_idx++] = finfo->mmio_res; | |
575 | } | |
543be3d8 | 576 | |
8d021039 XY |
577 | if (finfo->nr_irqs) { |
578 | ctx = devm_kcalloc(binfo->dev, finfo->nr_irqs, | |
579 | sizeof(*ctx), GFP_KERNEL); | |
580 | if (!ctx) | |
581 | return -ENOMEM; | |
582 | ||
583 | for (i = 0; i < finfo->nr_irqs; i++) | |
584 | ctx[i].irq = | |
585 | binfo->irq_table[finfo->irq_base + i]; | |
586 | ||
587 | feature->irq_ctx = ctx; | |
588 | feature->nr_irqs = finfo->nr_irqs; | |
589 | } | |
590 | ||
543be3d8 WH |
591 | list_del(&finfo->node); |
592 | kfree(finfo); | |
593 | } | |
594 | ||
595 | ret = platform_device_add(binfo->feature_dev); | |
596 | if (!ret) { | |
dfe3de8d | 597 | if (type == PORT_ID) |
543be3d8 WH |
598 | dfl_fpga_cdev_add_port_dev(binfo->cdev, |
599 | binfo->feature_dev); | |
600 | else | |
601 | binfo->cdev->fme_dev = | |
602 | get_device(&binfo->feature_dev->dev); | |
603 | /* | |
604 | * reset it to avoid build_info_free() freeing their resource. | |
605 | * | |
606 | * The resource of successfully registered feature devices | |
607 | * will be freed by platform_device_unregister(). See the | |
608 | * comments in build_info_create_dev(). | |
609 | */ | |
610 | binfo->feature_dev = NULL; | |
611 | } | |
612 | ||
613 | return ret; | |
614 | } | |
615 | ||
616 | static int | |
617 | build_info_create_dev(struct build_feature_devs_info *binfo, | |
89eb35e8 | 618 | enum dfl_id_type type) |
543be3d8 WH |
619 | { |
620 | struct platform_device *fdev; | |
543be3d8 WH |
621 | |
622 | if (type >= DFL_ID_MAX) | |
623 | return -EINVAL; | |
624 | ||
543be3d8 WH |
625 | /* |
626 | * we use -ENODEV as the initialization indicator which indicates | |
627 | * whether the id need to be reclaimed | |
628 | */ | |
629 | fdev = platform_device_alloc(dfl_devs[type].name, -ENODEV); | |
630 | if (!fdev) | |
631 | return -ENOMEM; | |
632 | ||
633 | binfo->feature_dev = fdev; | |
634 | binfo->feature_num = 0; | |
89eb35e8 | 635 | |
543be3d8 WH |
636 | INIT_LIST_HEAD(&binfo->sub_features); |
637 | ||
638 | fdev->id = dfl_id_alloc(type, &fdev->dev); | |
639 | if (fdev->id < 0) | |
640 | return fdev->id; | |
641 | ||
642 | fdev->dev.parent = &binfo->cdev->region->dev; | |
b16c5147 | 643 | fdev->dev.devt = dfl_get_devt(dfl_devs[type].devt_type, fdev->id); |
543be3d8 WH |
644 | |
645 | return 0; | |
646 | } | |
647 | ||
648 | static void build_info_free(struct build_feature_devs_info *binfo) | |
649 | { | |
650 | struct dfl_feature_info *finfo, *p; | |
651 | ||
652 | /* | |
653 | * it is a valid id, free it. See comments in | |
654 | * build_info_create_dev() | |
655 | */ | |
656 | if (binfo->feature_dev && binfo->feature_dev->id >= 0) { | |
657 | dfl_id_free(feature_dev_id_type(binfo->feature_dev), | |
658 | binfo->feature_dev->id); | |
659 | ||
660 | list_for_each_entry_safe(finfo, p, &binfo->sub_features, node) { | |
661 | list_del(&finfo->node); | |
662 | kfree(finfo); | |
663 | } | |
664 | } | |
665 | ||
666 | platform_device_put(binfo->feature_dev); | |
667 | ||
668 | devm_kfree(binfo->dev, binfo); | |
669 | } | |
670 | ||
671 | static inline u32 feature_size(void __iomem *start) | |
672 | { | |
673 | u64 v = readq(start + DFH); | |
674 | u32 ofst = FIELD_GET(DFH_NEXT_HDR_OFST, v); | |
675 | /* workaround for private features with invalid size, use 4K instead */ | |
676 | return ofst ? ofst : 4096; | |
677 | } | |
678 | ||
8a5de2de | 679 | static u16 feature_id(void __iomem *start) |
543be3d8 WH |
680 | { |
681 | u64 v = readq(start + DFH); | |
682 | u16 id = FIELD_GET(DFH_ID, v); | |
683 | u8 type = FIELD_GET(DFH_TYPE, v); | |
684 | ||
685 | if (type == DFH_TYPE_FIU) | |
686 | return FEATURE_ID_FIU_HEADER; | |
687 | else if (type == DFH_TYPE_PRIVATE) | |
688 | return id; | |
689 | else if (type == DFH_TYPE_AFU) | |
690 | return FEATURE_ID_AFU; | |
691 | ||
692 | WARN_ON(1); | |
693 | return 0; | |
694 | } | |
695 | ||
8d021039 | 696 | static int parse_feature_irqs(struct build_feature_devs_info *binfo, |
8a5de2de | 697 | resource_size_t ofst, u16 fid, |
8d021039 XY |
698 | unsigned int *irq_base, unsigned int *nr_irqs) |
699 | { | |
700 | void __iomem *base = binfo->ioaddr + ofst; | |
701 | unsigned int i, ibase, inr = 0; | |
702 | int virq; | |
703 | u64 v; | |
704 | ||
705 | /* | |
706 | * Ideally DFL framework should only read info from DFL header, but | |
707 | * current version DFL only provides mmio resources information for | |
708 | * each feature in DFL Header, no field for interrupt resources. | |
709 | * Interrupt resource information is provided by specific mmio | |
710 | * registers of each private feature which supports interrupt. So in | |
711 | * order to parse and assign irq resources, DFL framework has to look | |
712 | * into specific capability registers of these private features. | |
713 | * | |
714 | * Once future DFL version supports generic interrupt resource | |
715 | * information in common DFL headers, the generic interrupt parsing | |
716 | * code will be added. But in order to be compatible to old version | |
717 | * DFL, the driver may still fall back to these quirks. | |
718 | */ | |
719 | switch (fid) { | |
720 | case PORT_FEATURE_ID_UINT: | |
721 | v = readq(base + PORT_UINT_CAP); | |
722 | ibase = FIELD_GET(PORT_UINT_CAP_FST_VECT, v); | |
723 | inr = FIELD_GET(PORT_UINT_CAP_INT_NUM, v); | |
724 | break; | |
725 | case PORT_FEATURE_ID_ERROR: | |
726 | v = readq(base + PORT_ERROR_CAP); | |
727 | ibase = FIELD_GET(PORT_ERROR_CAP_INT_VECT, v); | |
728 | inr = FIELD_GET(PORT_ERROR_CAP_SUPP_INT, v); | |
729 | break; | |
730 | case FME_FEATURE_ID_GLOBAL_ERR: | |
731 | v = readq(base + FME_ERROR_CAP); | |
732 | ibase = FIELD_GET(FME_ERROR_CAP_INT_VECT, v); | |
733 | inr = FIELD_GET(FME_ERROR_CAP_SUPP_INT, v); | |
734 | break; | |
735 | } | |
736 | ||
737 | if (!inr) { | |
738 | *irq_base = 0; | |
739 | *nr_irqs = 0; | |
740 | return 0; | |
741 | } | |
742 | ||
8a5de2de | 743 | dev_dbg(binfo->dev, "feature: 0x%x, irq_base: %u, nr_irqs: %u\n", |
8d021039 XY |
744 | fid, ibase, inr); |
745 | ||
746 | if (ibase + inr > binfo->nr_irqs) { | |
747 | dev_err(binfo->dev, | |
8a5de2de | 748 | "Invalid interrupt number in feature 0x%x\n", fid); |
8d021039 XY |
749 | return -EINVAL; |
750 | } | |
751 | ||
752 | for (i = 0; i < inr; i++) { | |
753 | virq = binfo->irq_table[ibase + i]; | |
754 | if (virq < 0 || virq > NR_IRQS) { | |
755 | dev_err(binfo->dev, | |
8a5de2de | 756 | "Invalid irq table entry for feature 0x%x\n", |
8d021039 XY |
757 | fid); |
758 | return -EINVAL; | |
759 | } | |
760 | } | |
761 | ||
762 | *irq_base = ibase; | |
763 | *nr_irqs = inr; | |
764 | ||
765 | return 0; | |
766 | } | |
767 | ||
543be3d8 WH |
768 | /* |
769 | * when create sub feature instances, for private features, it doesn't need | |
770 | * to provide resource size and feature id as they could be read from DFH | |
771 | * register. For afu sub feature, its register region only contains user | |
772 | * defined registers, so never trust any information from it, just use the | |
773 | * resource size information provided by its parent FIU. | |
774 | */ | |
775 | static int | |
776 | create_feature_instance(struct build_feature_devs_info *binfo, | |
89eb35e8 | 777 | resource_size_t ofst, resource_size_t size, u16 fid) |
543be3d8 | 778 | { |
8d021039 | 779 | unsigned int irq_base, nr_irqs; |
543be3d8 | 780 | struct dfl_feature_info *finfo; |
8d021039 | 781 | int ret; |
543be3d8 WH |
782 | |
783 | /* read feature size and id if inputs are invalid */ | |
89eb35e8 XY |
784 | size = size ? size : feature_size(binfo->ioaddr + ofst); |
785 | fid = fid ? fid : feature_id(binfo->ioaddr + ofst); | |
543be3d8 | 786 | |
89eb35e8 | 787 | if (binfo->len - ofst < size) |
543be3d8 WH |
788 | return -EINVAL; |
789 | ||
8d021039 XY |
790 | ret = parse_feature_irqs(binfo, ofst, fid, &irq_base, &nr_irqs); |
791 | if (ret) | |
792 | return ret; | |
793 | ||
543be3d8 WH |
794 | finfo = kzalloc(sizeof(*finfo), GFP_KERNEL); |
795 | if (!finfo) | |
796 | return -ENOMEM; | |
797 | ||
798 | finfo->fid = fid; | |
89eb35e8 | 799 | finfo->mmio_res.start = binfo->start + ofst; |
543be3d8 WH |
800 | finfo->mmio_res.end = finfo->mmio_res.start + size - 1; |
801 | finfo->mmio_res.flags = IORESOURCE_MEM; | |
8d021039 XY |
802 | finfo->irq_base = irq_base; |
803 | finfo->nr_irqs = nr_irqs; | |
543be3d8 WH |
804 | |
805 | list_add_tail(&finfo->node, &binfo->sub_features); | |
806 | binfo->feature_num++; | |
807 | ||
808 | return 0; | |
809 | } | |
810 | ||
811 | static int parse_feature_port_afu(struct build_feature_devs_info *binfo, | |
543be3d8 WH |
812 | resource_size_t ofst) |
813 | { | |
814 | u64 v = readq(binfo->ioaddr + PORT_HDR_CAP); | |
815 | u32 size = FIELD_GET(PORT_CAP_MMIO_SIZE, v) << 10; | |
816 | ||
817 | WARN_ON(!size); | |
818 | ||
89eb35e8 | 819 | return create_feature_instance(binfo, ofst, size, FEATURE_ID_AFU); |
543be3d8 WH |
820 | } |
821 | ||
89eb35e8 XY |
822 | #define is_feature_dev_detected(binfo) (!!(binfo)->feature_dev) |
823 | ||
543be3d8 | 824 | static int parse_feature_afu(struct build_feature_devs_info *binfo, |
543be3d8 WH |
825 | resource_size_t ofst) |
826 | { | |
89eb35e8 | 827 | if (!is_feature_dev_detected(binfo)) { |
543be3d8 WH |
828 | dev_err(binfo->dev, "this AFU does not belong to any FIU.\n"); |
829 | return -EINVAL; | |
830 | } | |
831 | ||
832 | switch (feature_dev_id_type(binfo->feature_dev)) { | |
833 | case PORT_ID: | |
89eb35e8 | 834 | return parse_feature_port_afu(binfo, ofst); |
543be3d8 WH |
835 | default: |
836 | dev_info(binfo->dev, "AFU belonging to FIU %s is not supported yet.\n", | |
837 | binfo->feature_dev->name); | |
838 | } | |
839 | ||
840 | return 0; | |
841 | } | |
842 | ||
89eb35e8 XY |
843 | static int build_info_prepare(struct build_feature_devs_info *binfo, |
844 | resource_size_t start, resource_size_t len) | |
845 | { | |
846 | struct device *dev = binfo->dev; | |
847 | void __iomem *ioaddr; | |
848 | ||
849 | if (!devm_request_mem_region(dev, start, len, dev_name(dev))) { | |
850 | dev_err(dev, "request region fail, start:%pa, len:%pa\n", | |
851 | &start, &len); | |
852 | return -EBUSY; | |
853 | } | |
854 | ||
855 | ioaddr = devm_ioremap(dev, start, len); | |
856 | if (!ioaddr) { | |
857 | dev_err(dev, "ioremap region fail, start:%pa, len:%pa\n", | |
858 | &start, &len); | |
859 | return -ENOMEM; | |
860 | } | |
861 | ||
862 | binfo->start = start; | |
863 | binfo->len = len; | |
864 | binfo->ioaddr = ioaddr; | |
865 | ||
866 | return 0; | |
867 | } | |
868 | ||
869 | static void build_info_complete(struct build_feature_devs_info *binfo) | |
870 | { | |
871 | devm_iounmap(binfo->dev, binfo->ioaddr); | |
872 | devm_release_mem_region(binfo->dev, binfo->start, binfo->len); | |
873 | } | |
874 | ||
543be3d8 | 875 | static int parse_feature_fiu(struct build_feature_devs_info *binfo, |
543be3d8 WH |
876 | resource_size_t ofst) |
877 | { | |
543be3d8 | 878 | int ret = 0; |
8a5de2de XY |
879 | u32 offset; |
880 | u16 id; | |
881 | u64 v; | |
543be3d8 | 882 | |
89eb35e8 XY |
883 | if (is_feature_dev_detected(binfo)) { |
884 | build_info_complete(binfo); | |
885 | ||
886 | ret = build_info_commit_dev(binfo); | |
887 | if (ret) | |
888 | return ret; | |
889 | ||
890 | ret = build_info_prepare(binfo, binfo->start + ofst, | |
891 | binfo->len - ofst); | |
892 | if (ret) | |
893 | return ret; | |
894 | } | |
895 | ||
896 | v = readq(binfo->ioaddr + DFH); | |
543be3d8 WH |
897 | id = FIELD_GET(DFH_ID, v); |
898 | ||
899 | /* create platform device for dfl feature dev */ | |
89eb35e8 | 900 | ret = build_info_create_dev(binfo, dfh_id_to_type(id)); |
543be3d8 WH |
901 | if (ret) |
902 | return ret; | |
903 | ||
89eb35e8 | 904 | ret = create_feature_instance(binfo, 0, 0, 0); |
543be3d8 WH |
905 | if (ret) |
906 | return ret; | |
907 | /* | |
908 | * find and parse FIU's child AFU via its NEXT_AFU register. | |
909 | * please note that only Port has valid NEXT_AFU pointer per spec. | |
910 | */ | |
89eb35e8 | 911 | v = readq(binfo->ioaddr + NEXT_AFU); |
543be3d8 WH |
912 | |
913 | offset = FIELD_GET(NEXT_AFU_NEXT_DFH_OFST, v); | |
914 | if (offset) | |
89eb35e8 | 915 | return parse_feature_afu(binfo, offset); |
543be3d8 WH |
916 | |
917 | dev_dbg(binfo->dev, "No AFUs detected on FIU %d\n", id); | |
918 | ||
919 | return ret; | |
920 | } | |
921 | ||
922 | static int parse_feature_private(struct build_feature_devs_info *binfo, | |
543be3d8 WH |
923 | resource_size_t ofst) |
924 | { | |
89eb35e8 | 925 | if (!is_feature_dev_detected(binfo)) { |
8a5de2de | 926 | dev_err(binfo->dev, "the private feature 0x%x does not belong to any AFU.\n", |
89eb35e8 | 927 | feature_id(binfo->ioaddr + ofst)); |
543be3d8 WH |
928 | return -EINVAL; |
929 | } | |
930 | ||
89eb35e8 | 931 | return create_feature_instance(binfo, ofst, 0, 0); |
543be3d8 WH |
932 | } |
933 | ||
934 | /** | |
935 | * parse_feature - parse a feature on given device feature list | |
936 | * | |
937 | * @binfo: build feature devices information. | |
89eb35e8 | 938 | * @ofst: offset to current FIU header |
543be3d8 WH |
939 | */ |
940 | static int parse_feature(struct build_feature_devs_info *binfo, | |
89eb35e8 | 941 | resource_size_t ofst) |
543be3d8 WH |
942 | { |
943 | u64 v; | |
944 | u32 type; | |
945 | ||
89eb35e8 | 946 | v = readq(binfo->ioaddr + ofst + DFH); |
543be3d8 WH |
947 | type = FIELD_GET(DFH_TYPE, v); |
948 | ||
949 | switch (type) { | |
950 | case DFH_TYPE_AFU: | |
89eb35e8 | 951 | return parse_feature_afu(binfo, ofst); |
543be3d8 | 952 | case DFH_TYPE_PRIVATE: |
89eb35e8 | 953 | return parse_feature_private(binfo, ofst); |
543be3d8 | 954 | case DFH_TYPE_FIU: |
89eb35e8 | 955 | return parse_feature_fiu(binfo, ofst); |
543be3d8 WH |
956 | default: |
957 | dev_info(binfo->dev, | |
958 | "Feature Type %x is not supported.\n", type); | |
959 | } | |
960 | ||
961 | return 0; | |
962 | } | |
963 | ||
964 | static int parse_feature_list(struct build_feature_devs_info *binfo, | |
89eb35e8 | 965 | resource_size_t start, resource_size_t len) |
543be3d8 | 966 | { |
89eb35e8 | 967 | resource_size_t end = start + len; |
543be3d8 WH |
968 | int ret = 0; |
969 | u32 ofst = 0; | |
970 | u64 v; | |
971 | ||
89eb35e8 XY |
972 | ret = build_info_prepare(binfo, start, len); |
973 | if (ret) | |
974 | return ret; | |
975 | ||
543be3d8 WH |
976 | /* walk through the device feature list via DFH's next DFH pointer. */ |
977 | for (; start < end; start += ofst) { | |
978 | if (end - start < DFH_SIZE) { | |
979 | dev_err(binfo->dev, "The region is too small to contain a feature.\n"); | |
980 | return -EINVAL; | |
981 | } | |
982 | ||
89eb35e8 | 983 | ret = parse_feature(binfo, start - binfo->start); |
543be3d8 WH |
984 | if (ret) |
985 | return ret; | |
986 | ||
89eb35e8 | 987 | v = readq(binfo->ioaddr + start - binfo->start + DFH); |
543be3d8 WH |
988 | ofst = FIELD_GET(DFH_NEXT_HDR_OFST, v); |
989 | ||
990 | /* stop parsing if EOL(End of List) is set or offset is 0 */ | |
991 | if ((v & DFH_EOL) || !ofst) | |
992 | break; | |
993 | } | |
994 | ||
995 | /* commit current feature device when reach the end of list */ | |
89eb35e8 XY |
996 | build_info_complete(binfo); |
997 | ||
998 | if (is_feature_dev_detected(binfo)) | |
999 | ret = build_info_commit_dev(binfo); | |
1000 | ||
1001 | return ret; | |
543be3d8 WH |
1002 | } |
1003 | ||
1004 | struct dfl_fpga_enum_info *dfl_fpga_enum_info_alloc(struct device *dev) | |
1005 | { | |
1006 | struct dfl_fpga_enum_info *info; | |
1007 | ||
1008 | get_device(dev); | |
1009 | ||
1010 | info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL); | |
1011 | if (!info) { | |
1012 | put_device(dev); | |
1013 | return NULL; | |
1014 | } | |
1015 | ||
1016 | info->dev = dev; | |
1017 | INIT_LIST_HEAD(&info->dfls); | |
1018 | ||
1019 | return info; | |
1020 | } | |
1021 | EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_alloc); | |
1022 | ||
1023 | void dfl_fpga_enum_info_free(struct dfl_fpga_enum_info *info) | |
1024 | { | |
1025 | struct dfl_fpga_enum_dfl *tmp, *dfl; | |
1026 | struct device *dev; | |
1027 | ||
1028 | if (!info) | |
1029 | return; | |
1030 | ||
1031 | dev = info->dev; | |
1032 | ||
1033 | /* remove all device feature lists in the list. */ | |
1034 | list_for_each_entry_safe(dfl, tmp, &info->dfls, node) { | |
1035 | list_del(&dfl->node); | |
1036 | devm_kfree(dev, dfl); | |
1037 | } | |
1038 | ||
8d021039 XY |
1039 | /* remove irq table */ |
1040 | if (info->irq_table) | |
1041 | devm_kfree(dev, info->irq_table); | |
1042 | ||
543be3d8 WH |
1043 | devm_kfree(dev, info); |
1044 | put_device(dev); | |
1045 | } | |
1046 | EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_free); | |
1047 | ||
1048 | /** | |
1049 | * dfl_fpga_enum_info_add_dfl - add info of a device feature list to enum info | |
1050 | * | |
1051 | * @info: ptr to dfl_fpga_enum_info | |
1052 | * @start: mmio resource address of the device feature list. | |
1053 | * @len: mmio resource length of the device feature list. | |
543be3d8 WH |
1054 | * |
1055 | * One FPGA device may have one or more Device Feature Lists (DFLs), use this | |
1056 | * function to add information of each DFL to common data structure for next | |
1057 | * step enumeration. | |
1058 | * | |
1059 | * Return: 0 on success, negative error code otherwise. | |
1060 | */ | |
1061 | int dfl_fpga_enum_info_add_dfl(struct dfl_fpga_enum_info *info, | |
89eb35e8 | 1062 | resource_size_t start, resource_size_t len) |
543be3d8 WH |
1063 | { |
1064 | struct dfl_fpga_enum_dfl *dfl; | |
1065 | ||
1066 | dfl = devm_kzalloc(info->dev, sizeof(*dfl), GFP_KERNEL); | |
1067 | if (!dfl) | |
1068 | return -ENOMEM; | |
1069 | ||
1070 | dfl->start = start; | |
1071 | dfl->len = len; | |
543be3d8 WH |
1072 | |
1073 | list_add_tail(&dfl->node, &info->dfls); | |
1074 | ||
1075 | return 0; | |
1076 | } | |
1077 | EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_add_dfl); | |
1078 | ||
8d021039 XY |
1079 | /** |
1080 | * dfl_fpga_enum_info_add_irq - add irq table to enum info | |
1081 | * | |
1082 | * @info: ptr to dfl_fpga_enum_info | |
1083 | * @nr_irqs: number of irqs of the DFL fpga device to be enumerated. | |
1084 | * @irq_table: Linux IRQ numbers for all irqs, indexed by local irq index of | |
1085 | * this device. | |
1086 | * | |
1087 | * One FPGA device may have several interrupts. This function adds irq | |
1088 | * information of the DFL fpga device to enum info for next step enumeration. | |
1089 | * This function should be called before dfl_fpga_feature_devs_enumerate(). | |
1090 | * As we only support one irq domain for all DFLs in the same enum info, adding | |
1091 | * irq table a second time for the same enum info will return error. | |
1092 | * | |
1093 | * If we need to enumerate DFLs which belong to different irq domains, we | |
1094 | * should fill more enum info and enumerate them one by one. | |
1095 | * | |
1096 | * Return: 0 on success, negative error code otherwise. | |
1097 | */ | |
1098 | int dfl_fpga_enum_info_add_irq(struct dfl_fpga_enum_info *info, | |
1099 | unsigned int nr_irqs, int *irq_table) | |
1100 | { | |
1101 | if (!nr_irqs || !irq_table) | |
1102 | return -EINVAL; | |
1103 | ||
1104 | if (info->irq_table) | |
1105 | return -EEXIST; | |
1106 | ||
1107 | info->irq_table = devm_kmemdup(info->dev, irq_table, | |
1108 | sizeof(int) * nr_irqs, GFP_KERNEL); | |
1109 | if (!info->irq_table) | |
1110 | return -ENOMEM; | |
1111 | ||
1112 | info->nr_irqs = nr_irqs; | |
1113 | ||
1114 | return 0; | |
1115 | } | |
1116 | EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_add_irq); | |
1117 | ||
543be3d8 WH |
1118 | static int remove_feature_dev(struct device *dev, void *data) |
1119 | { | |
1120 | struct platform_device *pdev = to_platform_device(dev); | |
1121 | enum dfl_id_type type = feature_dev_id_type(pdev); | |
1122 | int id = pdev->id; | |
1123 | ||
1124 | platform_device_unregister(pdev); | |
1125 | ||
1126 | dfl_id_free(type, id); | |
1127 | ||
1128 | return 0; | |
1129 | } | |
1130 | ||
1131 | static void remove_feature_devs(struct dfl_fpga_cdev *cdev) | |
1132 | { | |
1133 | device_for_each_child(&cdev->region->dev, NULL, remove_feature_dev); | |
1134 | } | |
1135 | ||
1136 | /** | |
1137 | * dfl_fpga_feature_devs_enumerate - enumerate feature devices | |
1138 | * @info: information for enumeration. | |
1139 | * | |
1140 | * This function creates a container device (base FPGA region), enumerates | |
1141 | * feature devices based on the enumeration info and creates platform devices | |
1142 | * under the container device. | |
1143 | * | |
1144 | * Return: dfl_fpga_cdev struct on success, -errno on failure | |
1145 | */ | |
1146 | struct dfl_fpga_cdev * | |
1147 | dfl_fpga_feature_devs_enumerate(struct dfl_fpga_enum_info *info) | |
1148 | { | |
1149 | struct build_feature_devs_info *binfo; | |
1150 | struct dfl_fpga_enum_dfl *dfl; | |
1151 | struct dfl_fpga_cdev *cdev; | |
1152 | int ret = 0; | |
1153 | ||
1154 | if (!info->dev) | |
1155 | return ERR_PTR(-ENODEV); | |
1156 | ||
1157 | cdev = devm_kzalloc(info->dev, sizeof(*cdev), GFP_KERNEL); | |
1158 | if (!cdev) | |
1159 | return ERR_PTR(-ENOMEM); | |
1160 | ||
fea82b7f | 1161 | cdev->region = devm_fpga_region_create(info->dev, NULL, NULL); |
543be3d8 WH |
1162 | if (!cdev->region) { |
1163 | ret = -ENOMEM; | |
1164 | goto free_cdev_exit; | |
1165 | } | |
1166 | ||
1167 | cdev->parent = info->dev; | |
1168 | mutex_init(&cdev->lock); | |
1169 | INIT_LIST_HEAD(&cdev->port_dev_list); | |
1170 | ||
1171 | ret = fpga_region_register(cdev->region); | |
1172 | if (ret) | |
fea82b7f | 1173 | goto free_cdev_exit; |
543be3d8 WH |
1174 | |
1175 | /* create and init build info for enumeration */ | |
1176 | binfo = devm_kzalloc(info->dev, sizeof(*binfo), GFP_KERNEL); | |
1177 | if (!binfo) { | |
1178 | ret = -ENOMEM; | |
1179 | goto unregister_region_exit; | |
1180 | } | |
1181 | ||
1182 | binfo->dev = info->dev; | |
1183 | binfo->cdev = cdev; | |
1184 | ||
8d021039 XY |
1185 | binfo->nr_irqs = info->nr_irqs; |
1186 | if (info->nr_irqs) | |
1187 | binfo->irq_table = info->irq_table; | |
1188 | ||
543be3d8 WH |
1189 | /* |
1190 | * start enumeration for all feature devices based on Device Feature | |
1191 | * Lists. | |
1192 | */ | |
1193 | list_for_each_entry(dfl, &info->dfls, node) { | |
89eb35e8 | 1194 | ret = parse_feature_list(binfo, dfl->start, dfl->len); |
543be3d8 WH |
1195 | if (ret) { |
1196 | remove_feature_devs(cdev); | |
1197 | build_info_free(binfo); | |
1198 | goto unregister_region_exit; | |
1199 | } | |
1200 | } | |
1201 | ||
1202 | build_info_free(binfo); | |
1203 | ||
1204 | return cdev; | |
1205 | ||
1206 | unregister_region_exit: | |
1207 | fpga_region_unregister(cdev->region); | |
543be3d8 WH |
1208 | free_cdev_exit: |
1209 | devm_kfree(info->dev, cdev); | |
1210 | return ERR_PTR(ret); | |
1211 | } | |
1212 | EXPORT_SYMBOL_GPL(dfl_fpga_feature_devs_enumerate); | |
1213 | ||
1214 | /** | |
1215 | * dfl_fpga_feature_devs_remove - remove all feature devices | |
1216 | * @cdev: fpga container device. | |
1217 | * | |
1218 | * Remove the container device and all feature devices under given container | |
1219 | * devices. | |
1220 | */ | |
1221 | void dfl_fpga_feature_devs_remove(struct dfl_fpga_cdev *cdev) | |
1222 | { | |
1223 | struct dfl_feature_platform_data *pdata, *ptmp; | |
1224 | ||
543be3d8 | 1225 | mutex_lock(&cdev->lock); |
69bb18dd | 1226 | if (cdev->fme_dev) |
543be3d8 | 1227 | put_device(cdev->fme_dev); |
543be3d8 WH |
1228 | |
1229 | list_for_each_entry_safe(pdata, ptmp, &cdev->port_dev_list, node) { | |
1230 | struct platform_device *port_dev = pdata->dev; | |
1231 | ||
69bb18dd WH |
1232 | /* remove released ports */ |
1233 | if (!device_is_registered(&port_dev->dev)) { | |
1234 | dfl_id_free(feature_dev_id_type(port_dev), | |
1235 | port_dev->id); | |
1236 | platform_device_put(port_dev); | |
1237 | } | |
1238 | ||
543be3d8 WH |
1239 | list_del(&pdata->node); |
1240 | put_device(&port_dev->dev); | |
1241 | } | |
1242 | mutex_unlock(&cdev->lock); | |
1243 | ||
69bb18dd WH |
1244 | remove_feature_devs(cdev); |
1245 | ||
543be3d8 WH |
1246 | fpga_region_unregister(cdev->region); |
1247 | devm_kfree(cdev->parent, cdev); | |
1248 | } | |
1249 | EXPORT_SYMBOL_GPL(dfl_fpga_feature_devs_remove); | |
1250 | ||
5d56e117 WH |
1251 | /** |
1252 | * __dfl_fpga_cdev_find_port - find a port under given container device | |
1253 | * | |
1254 | * @cdev: container device | |
1255 | * @data: data passed to match function | |
1256 | * @match: match function used to find specific port from the port device list | |
1257 | * | |
1258 | * Find a port device under container device. This function needs to be | |
1259 | * invoked with lock held. | |
1260 | * | |
1261 | * Return: pointer to port's platform device if successful, NULL otherwise. | |
1262 | * | |
1263 | * NOTE: you will need to drop the device reference with put_device() after use. | |
1264 | */ | |
1265 | struct platform_device * | |
1266 | __dfl_fpga_cdev_find_port(struct dfl_fpga_cdev *cdev, void *data, | |
1267 | int (*match)(struct platform_device *, void *)) | |
1268 | { | |
1269 | struct dfl_feature_platform_data *pdata; | |
1270 | struct platform_device *port_dev; | |
1271 | ||
1272 | list_for_each_entry(pdata, &cdev->port_dev_list, node) { | |
1273 | port_dev = pdata->dev; | |
1274 | ||
1275 | if (match(port_dev, data) && get_device(&port_dev->dev)) | |
1276 | return port_dev; | |
1277 | } | |
1278 | ||
1279 | return NULL; | |
1280 | } | |
1281 | EXPORT_SYMBOL_GPL(__dfl_fpga_cdev_find_port); | |
1282 | ||
543be3d8 WH |
1283 | static int __init dfl_fpga_init(void) |
1284 | { | |
b16c5147 WH |
1285 | int ret; |
1286 | ||
543be3d8 WH |
1287 | dfl_ids_init(); |
1288 | ||
b16c5147 WH |
1289 | ret = dfl_chardev_init(); |
1290 | if (ret) | |
1291 | dfl_ids_destroy(); | |
1292 | ||
1293 | return ret; | |
543be3d8 WH |
1294 | } |
1295 | ||
69bb18dd WH |
1296 | /** |
1297 | * dfl_fpga_cdev_release_port - release a port platform device | |
1298 | * | |
1299 | * @cdev: parent container device. | |
1300 | * @port_id: id of the port platform device. | |
1301 | * | |
1302 | * This function allows user to release a port platform device. This is a | |
1303 | * mandatory step before turn a port from PF into VF for SRIOV support. | |
1304 | * | |
1305 | * Return: 0 on success, negative error code otherwise. | |
1306 | */ | |
1307 | int dfl_fpga_cdev_release_port(struct dfl_fpga_cdev *cdev, int port_id) | |
1308 | { | |
b6862193 | 1309 | struct dfl_feature_platform_data *pdata; |
69bb18dd WH |
1310 | struct platform_device *port_pdev; |
1311 | int ret = -ENODEV; | |
1312 | ||
1313 | mutex_lock(&cdev->lock); | |
1314 | port_pdev = __dfl_fpga_cdev_find_port(cdev, &port_id, | |
1315 | dfl_fpga_check_port_id); | |
1316 | if (!port_pdev) | |
1317 | goto unlock_exit; | |
1318 | ||
1319 | if (!device_is_registered(&port_pdev->dev)) { | |
1320 | ret = -EBUSY; | |
1321 | goto put_dev_exit; | |
1322 | } | |
1323 | ||
b6862193 XY |
1324 | pdata = dev_get_platdata(&port_pdev->dev); |
1325 | ||
1326 | mutex_lock(&pdata->lock); | |
1327 | ret = dfl_feature_dev_use_begin(pdata, true); | |
1328 | mutex_unlock(&pdata->lock); | |
69bb18dd WH |
1329 | if (ret) |
1330 | goto put_dev_exit; | |
1331 | ||
1332 | platform_device_del(port_pdev); | |
1333 | cdev->released_port_num++; | |
1334 | put_dev_exit: | |
1335 | put_device(&port_pdev->dev); | |
1336 | unlock_exit: | |
1337 | mutex_unlock(&cdev->lock); | |
1338 | return ret; | |
1339 | } | |
1340 | EXPORT_SYMBOL_GPL(dfl_fpga_cdev_release_port); | |
1341 | ||
1342 | /** | |
1343 | * dfl_fpga_cdev_assign_port - assign a port platform device back | |
1344 | * | |
1345 | * @cdev: parent container device. | |
1346 | * @port_id: id of the port platform device. | |
1347 | * | |
1348 | * This function allows user to assign a port platform device back. This is | |
1349 | * a mandatory step after disable SRIOV support. | |
1350 | * | |
1351 | * Return: 0 on success, negative error code otherwise. | |
1352 | */ | |
1353 | int dfl_fpga_cdev_assign_port(struct dfl_fpga_cdev *cdev, int port_id) | |
1354 | { | |
b6862193 | 1355 | struct dfl_feature_platform_data *pdata; |
69bb18dd WH |
1356 | struct platform_device *port_pdev; |
1357 | int ret = -ENODEV; | |
1358 | ||
1359 | mutex_lock(&cdev->lock); | |
1360 | port_pdev = __dfl_fpga_cdev_find_port(cdev, &port_id, | |
1361 | dfl_fpga_check_port_id); | |
1362 | if (!port_pdev) | |
1363 | goto unlock_exit; | |
1364 | ||
1365 | if (device_is_registered(&port_pdev->dev)) { | |
1366 | ret = -EBUSY; | |
1367 | goto put_dev_exit; | |
1368 | } | |
1369 | ||
1370 | ret = platform_device_add(port_pdev); | |
1371 | if (ret) | |
1372 | goto put_dev_exit; | |
1373 | ||
b6862193 XY |
1374 | pdata = dev_get_platdata(&port_pdev->dev); |
1375 | ||
1376 | mutex_lock(&pdata->lock); | |
1377 | dfl_feature_dev_use_end(pdata); | |
1378 | mutex_unlock(&pdata->lock); | |
1379 | ||
69bb18dd WH |
1380 | cdev->released_port_num--; |
1381 | put_dev_exit: | |
1382 | put_device(&port_pdev->dev); | |
1383 | unlock_exit: | |
1384 | mutex_unlock(&cdev->lock); | |
1385 | return ret; | |
1386 | } | |
1387 | EXPORT_SYMBOL_GPL(dfl_fpga_cdev_assign_port); | |
1388 | ||
bdd4f307 WH |
1389 | static void config_port_access_mode(struct device *fme_dev, int port_id, |
1390 | bool is_vf) | |
1391 | { | |
1392 | void __iomem *base; | |
1393 | u64 v; | |
1394 | ||
1395 | base = dfl_get_feature_ioaddr_by_id(fme_dev, FME_FEATURE_ID_HEADER); | |
1396 | ||
1397 | v = readq(base + FME_HDR_PORT_OFST(port_id)); | |
1398 | ||
1399 | v &= ~FME_PORT_OFST_ACC_CTRL; | |
1400 | v |= FIELD_PREP(FME_PORT_OFST_ACC_CTRL, | |
1401 | is_vf ? FME_PORT_OFST_ACC_VF : FME_PORT_OFST_ACC_PF); | |
1402 | ||
1403 | writeq(v, base + FME_HDR_PORT_OFST(port_id)); | |
1404 | } | |
1405 | ||
1406 | #define config_port_vf_mode(dev, id) config_port_access_mode(dev, id, true) | |
1407 | #define config_port_pf_mode(dev, id) config_port_access_mode(dev, id, false) | |
1408 | ||
1409 | /** | |
1410 | * dfl_fpga_cdev_config_ports_pf - configure ports to PF access mode | |
1411 | * | |
1412 | * @cdev: parent container device. | |
1413 | * | |
1414 | * This function is needed in sriov configuration routine. It could be used to | |
1415 | * configure the all released ports from VF access mode to PF. | |
1416 | */ | |
1417 | void dfl_fpga_cdev_config_ports_pf(struct dfl_fpga_cdev *cdev) | |
1418 | { | |
1419 | struct dfl_feature_platform_data *pdata; | |
1420 | ||
1421 | mutex_lock(&cdev->lock); | |
1422 | list_for_each_entry(pdata, &cdev->port_dev_list, node) { | |
1423 | if (device_is_registered(&pdata->dev->dev)) | |
1424 | continue; | |
1425 | ||
1426 | config_port_pf_mode(cdev->fme_dev, pdata->id); | |
1427 | } | |
1428 | mutex_unlock(&cdev->lock); | |
1429 | } | |
1430 | EXPORT_SYMBOL_GPL(dfl_fpga_cdev_config_ports_pf); | |
1431 | ||
1432 | /** | |
1433 | * dfl_fpga_cdev_config_ports_vf - configure ports to VF access mode | |
1434 | * | |
1435 | * @cdev: parent container device. | |
1436 | * @num_vfs: VF device number. | |
1437 | * | |
1438 | * This function is needed in sriov configuration routine. It could be used to | |
1439 | * configure the released ports from PF access mode to VF. | |
1440 | * | |
1441 | * Return: 0 on success, negative error code otherwise. | |
1442 | */ | |
1443 | int dfl_fpga_cdev_config_ports_vf(struct dfl_fpga_cdev *cdev, int num_vfs) | |
1444 | { | |
1445 | struct dfl_feature_platform_data *pdata; | |
1446 | int ret = 0; | |
1447 | ||
1448 | mutex_lock(&cdev->lock); | |
1449 | /* | |
1450 | * can't turn multiple ports into 1 VF device, only 1 port for 1 VF | |
1451 | * device, so if released port number doesn't match VF device number, | |
1452 | * then reject the request with -EINVAL error code. | |
1453 | */ | |
1454 | if (cdev->released_port_num != num_vfs) { | |
1455 | ret = -EINVAL; | |
1456 | goto done; | |
1457 | } | |
1458 | ||
1459 | list_for_each_entry(pdata, &cdev->port_dev_list, node) { | |
1460 | if (device_is_registered(&pdata->dev->dev)) | |
1461 | continue; | |
1462 | ||
1463 | config_port_vf_mode(cdev->fme_dev, pdata->id); | |
1464 | } | |
1465 | done: | |
1466 | mutex_unlock(&cdev->lock); | |
1467 | return ret; | |
1468 | } | |
1469 | EXPORT_SYMBOL_GPL(dfl_fpga_cdev_config_ports_vf); | |
1470 | ||
322b598b XY |
1471 | static irqreturn_t dfl_irq_handler(int irq, void *arg) |
1472 | { | |
1473 | struct eventfd_ctx *trigger = arg; | |
1474 | ||
1475 | eventfd_signal(trigger, 1); | |
1476 | return IRQ_HANDLED; | |
1477 | } | |
1478 | ||
1479 | static int do_set_irq_trigger(struct dfl_feature *feature, unsigned int idx, | |
1480 | int fd) | |
1481 | { | |
1482 | struct platform_device *pdev = feature->dev; | |
1483 | struct eventfd_ctx *trigger; | |
1484 | int irq, ret; | |
1485 | ||
1486 | irq = feature->irq_ctx[idx].irq; | |
1487 | ||
1488 | if (feature->irq_ctx[idx].trigger) { | |
1489 | free_irq(irq, feature->irq_ctx[idx].trigger); | |
1490 | kfree(feature->irq_ctx[idx].name); | |
1491 | eventfd_ctx_put(feature->irq_ctx[idx].trigger); | |
1492 | feature->irq_ctx[idx].trigger = NULL; | |
1493 | } | |
1494 | ||
1495 | if (fd < 0) | |
1496 | return 0; | |
1497 | ||
1498 | feature->irq_ctx[idx].name = | |
8a5de2de | 1499 | kasprintf(GFP_KERNEL, "fpga-irq[%u](%s-%x)", idx, |
322b598b XY |
1500 | dev_name(&pdev->dev), feature->id); |
1501 | if (!feature->irq_ctx[idx].name) | |
1502 | return -ENOMEM; | |
1503 | ||
1504 | trigger = eventfd_ctx_fdget(fd); | |
1505 | if (IS_ERR(trigger)) { | |
1506 | ret = PTR_ERR(trigger); | |
1507 | goto free_name; | |
1508 | } | |
1509 | ||
1510 | ret = request_irq(irq, dfl_irq_handler, 0, | |
1511 | feature->irq_ctx[idx].name, trigger); | |
1512 | if (!ret) { | |
1513 | feature->irq_ctx[idx].trigger = trigger; | |
1514 | return ret; | |
1515 | } | |
1516 | ||
1517 | eventfd_ctx_put(trigger); | |
1518 | free_name: | |
1519 | kfree(feature->irq_ctx[idx].name); | |
1520 | ||
1521 | return ret; | |
1522 | } | |
1523 | ||
1524 | /** | |
1525 | * dfl_fpga_set_irq_triggers - set eventfd triggers for dfl feature interrupts | |
1526 | * | |
1527 | * @feature: dfl sub feature. | |
1528 | * @start: start of irq index in this dfl sub feature. | |
1529 | * @count: number of irqs. | |
1530 | * @fds: eventfds to bind with irqs. unbind related irq if fds[n] is negative. | |
1531 | * unbind "count" specified number of irqs if fds ptr is NULL. | |
1532 | * | |
1533 | * Bind given eventfds with irqs in this dfl sub feature. Unbind related irq if | |
1534 | * fds[n] is negative. Unbind "count" specified number of irqs if fds ptr is | |
1535 | * NULL. | |
1536 | * | |
1537 | * Return: 0 on success, negative error code otherwise. | |
1538 | */ | |
1539 | int dfl_fpga_set_irq_triggers(struct dfl_feature *feature, unsigned int start, | |
1540 | unsigned int count, int32_t *fds) | |
1541 | { | |
1542 | unsigned int i; | |
1543 | int ret = 0; | |
1544 | ||
1545 | /* overflow */ | |
1546 | if (unlikely(start + count < start)) | |
1547 | return -EINVAL; | |
1548 | ||
1549 | /* exceeds nr_irqs */ | |
1550 | if (start + count > feature->nr_irqs) | |
1551 | return -EINVAL; | |
1552 | ||
1553 | for (i = 0; i < count; i++) { | |
1554 | int fd = fds ? fds[i] : -1; | |
1555 | ||
1556 | ret = do_set_irq_trigger(feature, start + i, fd); | |
1557 | if (ret) { | |
1558 | while (i--) | |
1559 | do_set_irq_trigger(feature, start + i, -1); | |
1560 | break; | |
1561 | } | |
1562 | } | |
1563 | ||
1564 | return ret; | |
1565 | } | |
1566 | EXPORT_SYMBOL_GPL(dfl_fpga_set_irq_triggers); | |
1567 | ||
1568 | /** | |
1569 | * dfl_feature_ioctl_get_num_irqs - dfl feature _GET_IRQ_NUM ioctl interface. | |
1570 | * @pdev: the feature device which has the sub feature | |
1571 | * @feature: the dfl sub feature | |
1572 | * @arg: ioctl argument | |
1573 | * | |
1574 | * Return: 0 on success, negative error code otherwise. | |
1575 | */ | |
1576 | long dfl_feature_ioctl_get_num_irqs(struct platform_device *pdev, | |
1577 | struct dfl_feature *feature, | |
1578 | unsigned long arg) | |
1579 | { | |
1580 | return put_user(feature->nr_irqs, (__u32 __user *)arg); | |
1581 | } | |
1582 | EXPORT_SYMBOL_GPL(dfl_feature_ioctl_get_num_irqs); | |
1583 | ||
1584 | /** | |
1585 | * dfl_feature_ioctl_set_irq - dfl feature _SET_IRQ ioctl interface. | |
1586 | * @pdev: the feature device which has the sub feature | |
1587 | * @feature: the dfl sub feature | |
1588 | * @arg: ioctl argument | |
1589 | * | |
1590 | * Return: 0 on success, negative error code otherwise. | |
1591 | */ | |
1592 | long dfl_feature_ioctl_set_irq(struct platform_device *pdev, | |
1593 | struct dfl_feature *feature, | |
1594 | unsigned long arg) | |
1595 | { | |
1596 | struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev); | |
1597 | struct dfl_fpga_irq_set hdr; | |
1598 | s32 *fds; | |
1599 | long ret; | |
1600 | ||
1601 | if (!feature->nr_irqs) | |
1602 | return -ENOENT; | |
1603 | ||
1604 | if (copy_from_user(&hdr, (void __user *)arg, sizeof(hdr))) | |
1605 | return -EFAULT; | |
1606 | ||
1607 | if (!hdr.count || (hdr.start + hdr.count > feature->nr_irqs) || | |
1608 | (hdr.start + hdr.count < hdr.start)) | |
1609 | return -EINVAL; | |
1610 | ||
1611 | fds = memdup_user((void __user *)(arg + sizeof(hdr)), | |
1612 | hdr.count * sizeof(s32)); | |
1613 | if (IS_ERR(fds)) | |
1614 | return PTR_ERR(fds); | |
1615 | ||
1616 | mutex_lock(&pdata->lock); | |
1617 | ret = dfl_fpga_set_irq_triggers(feature, hdr.start, hdr.count, fds); | |
1618 | mutex_unlock(&pdata->lock); | |
1619 | ||
1620 | kfree(fds); | |
1621 | return ret; | |
1622 | } | |
1623 | EXPORT_SYMBOL_GPL(dfl_feature_ioctl_set_irq); | |
1624 | ||
543be3d8 WH |
1625 | static void __exit dfl_fpga_exit(void) |
1626 | { | |
b16c5147 | 1627 | dfl_chardev_uinit(); |
543be3d8 WH |
1628 | dfl_ids_destroy(); |
1629 | } | |
1630 | ||
1631 | module_init(dfl_fpga_init); | |
1632 | module_exit(dfl_fpga_exit); | |
1633 | ||
1634 | MODULE_DESCRIPTION("FPGA Device Feature List (DFL) Support"); | |
1635 | MODULE_AUTHOR("Intel Corporation"); | |
1636 | MODULE_LICENSE("GPL v2"); |