Commit | Line | Data |
---|---|---|
7268f33e | 1 | // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) |
cee9fbd8 | 2 | /* QLogic qedr NIC Driver |
e8f1cb50 | 3 | * Copyright (c) 2015-2017 QLogic Corporation |
c4fad2a5 | 4 | * Copyright (c) 2019-2020 Marvell International Ltd. |
cee9fbd8 | 5 | */ |
7268f33e | 6 | |
cee9fbd8 RA |
7 | #include <linux/pci.h> |
8 | #include <linux/netdevice.h> | |
9 | #include <linux/list.h> | |
10 | #include <linux/mutex.h> | |
b262a06e | 11 | #include <linux/qed/qede_rdma.h> |
cee9fbd8 RA |
12 | #include "qede.h" |
13 | ||
14 | static struct qedr_driver *qedr_drv; | |
15 | static LIST_HEAD(qedr_dev_list); | |
16 | static DEFINE_MUTEX(qedr_dev_list_lock); | |
17 | ||
bbfcd1e8 | 18 | bool qede_rdma_supported(struct qede_dev *dev) |
cee9fbd8 RA |
19 | { |
20 | return dev->dev_info.common.rdma_supported; | |
21 | } | |
22 | ||
bbfcd1e8 | 23 | static void _qede_rdma_dev_add(struct qede_dev *edev) |
cee9fbd8 RA |
24 | { |
25 | if (!qedr_drv) | |
26 | return; | |
27 | ||
ccc67ef5 TT |
28 | /* Leftovers from previous error recovery */ |
29 | edev->rdma_info.exp_recovery = false; | |
cee9fbd8 RA |
30 | edev->rdma_info.qedr_dev = qedr_drv->add(edev->cdev, edev->pdev, |
31 | edev->ndev); | |
32 | } | |
33 | ||
bbfcd1e8 | 34 | static int qede_rdma_create_wq(struct qede_dev *edev) |
cee9fbd8 | 35 | { |
bbfcd1e8 | 36 | INIT_LIST_HEAD(&edev->rdma_info.rdma_event_list); |
af6565ad MK |
37 | kref_init(&edev->rdma_info.refcnt); |
38 | init_completion(&edev->rdma_info.event_comp); | |
39 | ||
bbfcd1e8 MK |
40 | edev->rdma_info.rdma_wq = create_singlethread_workqueue("rdma_wq"); |
41 | if (!edev->rdma_info.rdma_wq) { | |
cee9fbd8 RA |
42 | DP_NOTICE(edev, "qedr: Could not create workqueue\n"); |
43 | return -ENOMEM; | |
44 | } | |
45 | ||
46 | return 0; | |
47 | } | |
48 | ||
bbfcd1e8 | 49 | static void qede_rdma_cleanup_event(struct qede_dev *edev) |
cee9fbd8 | 50 | { |
bbfcd1e8 MK |
51 | struct list_head *head = &edev->rdma_info.rdma_event_list; |
52 | struct qede_rdma_event_work *event_node; | |
cee9fbd8 | 53 | |
bbfcd1e8 | 54 | flush_workqueue(edev->rdma_info.rdma_wq); |
cee9fbd8 | 55 | while (!list_empty(head)) { |
bbfcd1e8 | 56 | event_node = list_entry(head->next, struct qede_rdma_event_work, |
cee9fbd8 RA |
57 | list); |
58 | cancel_work_sync(&event_node->work); | |
59 | list_del(&event_node->list); | |
60 | kfree(event_node); | |
61 | } | |
62 | } | |
63 | ||
af6565ad MK |
64 | static void qede_rdma_complete_event(struct kref *ref) |
65 | { | |
66 | struct qede_rdma_dev *rdma_dev = | |
67 | container_of(ref, struct qede_rdma_dev, refcnt); | |
68 | ||
69 | /* no more events will be added after this */ | |
70 | complete(&rdma_dev->event_comp); | |
71 | } | |
72 | ||
bbfcd1e8 | 73 | static void qede_rdma_destroy_wq(struct qede_dev *edev) |
cee9fbd8 | 74 | { |
af6565ad MK |
75 | /* Avoid race with add_event flow, make sure it finishes before |
76 | * we start accessing the list and cleaning up the work | |
77 | */ | |
78 | kref_put(&edev->rdma_info.refcnt, qede_rdma_complete_event); | |
79 | wait_for_completion(&edev->rdma_info.event_comp); | |
80 | ||
bbfcd1e8 MK |
81 | qede_rdma_cleanup_event(edev); |
82 | destroy_workqueue(edev->rdma_info.rdma_wq); | |
4079c7f7 | 83 | edev->rdma_info.rdma_wq = NULL; |
cee9fbd8 RA |
84 | } |
85 | ||
ccc67ef5 | 86 | int qede_rdma_dev_add(struct qede_dev *edev, bool recovery) |
cee9fbd8 | 87 | { |
ccc67ef5 | 88 | int rc; |
cee9fbd8 | 89 | |
ccc67ef5 TT |
90 | if (!qede_rdma_supported(edev)) |
91 | return 0; | |
cee9fbd8 | 92 | |
ccc67ef5 TT |
93 | /* Cannot start qedr while recovering since it wasn't fully stopped */ |
94 | if (recovery) | |
95 | return 0; | |
96 | ||
97 | rc = qede_rdma_create_wq(edev); | |
98 | if (rc) | |
99 | return rc; | |
100 | ||
101 | INIT_LIST_HEAD(&edev->rdma_info.entry); | |
102 | mutex_lock(&qedr_dev_list_lock); | |
103 | list_add_tail(&edev->rdma_info.entry, &qedr_dev_list); | |
104 | _qede_rdma_dev_add(edev); | |
105 | mutex_unlock(&qedr_dev_list_lock); | |
cee9fbd8 RA |
106 | |
107 | return rc; | |
108 | } | |
109 | ||
bbfcd1e8 | 110 | static void _qede_rdma_dev_remove(struct qede_dev *edev) |
cee9fbd8 RA |
111 | { |
112 | if (qedr_drv && qedr_drv->remove && edev->rdma_info.qedr_dev) | |
113 | qedr_drv->remove(edev->rdma_info.qedr_dev); | |
cee9fbd8 RA |
114 | } |
115 | ||
ccc67ef5 | 116 | void qede_rdma_dev_remove(struct qede_dev *edev, bool recovery) |
cee9fbd8 | 117 | { |
bbfcd1e8 | 118 | if (!qede_rdma_supported(edev)) |
cee9fbd8 RA |
119 | return; |
120 | ||
ccc67ef5 TT |
121 | /* Cannot remove qedr while recovering since it wasn't fully stopped */ |
122 | if (!recovery) { | |
123 | qede_rdma_destroy_wq(edev); | |
124 | mutex_lock(&qedr_dev_list_lock); | |
125 | if (!edev->rdma_info.exp_recovery) | |
126 | _qede_rdma_dev_remove(edev); | |
127 | edev->rdma_info.qedr_dev = NULL; | |
128 | list_del(&edev->rdma_info.entry); | |
129 | mutex_unlock(&qedr_dev_list_lock); | |
130 | } else { | |
131 | if (!edev->rdma_info.exp_recovery) { | |
132 | mutex_lock(&qedr_dev_list_lock); | |
133 | _qede_rdma_dev_remove(edev); | |
134 | mutex_unlock(&qedr_dev_list_lock); | |
135 | } | |
136 | edev->rdma_info.exp_recovery = true; | |
137 | } | |
cee9fbd8 RA |
138 | } |
139 | ||
bbfcd1e8 | 140 | static void _qede_rdma_dev_open(struct qede_dev *edev) |
cee9fbd8 RA |
141 | { |
142 | if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify) | |
143 | qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_UP); | |
144 | } | |
145 | ||
bbfcd1e8 | 146 | static void qede_rdma_dev_open(struct qede_dev *edev) |
cee9fbd8 | 147 | { |
bbfcd1e8 | 148 | if (!qede_rdma_supported(edev)) |
cee9fbd8 RA |
149 | return; |
150 | ||
151 | mutex_lock(&qedr_dev_list_lock); | |
bbfcd1e8 | 152 | _qede_rdma_dev_open(edev); |
cee9fbd8 RA |
153 | mutex_unlock(&qedr_dev_list_lock); |
154 | } | |
155 | ||
bbfcd1e8 | 156 | static void _qede_rdma_dev_close(struct qede_dev *edev) |
cee9fbd8 RA |
157 | { |
158 | if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify) | |
159 | qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_DOWN); | |
160 | } | |
161 | ||
bbfcd1e8 | 162 | static void qede_rdma_dev_close(struct qede_dev *edev) |
cee9fbd8 | 163 | { |
bbfcd1e8 | 164 | if (!qede_rdma_supported(edev)) |
cee9fbd8 RA |
165 | return; |
166 | ||
167 | mutex_lock(&qedr_dev_list_lock); | |
bbfcd1e8 | 168 | _qede_rdma_dev_close(edev); |
cee9fbd8 RA |
169 | mutex_unlock(&qedr_dev_list_lock); |
170 | } | |
171 | ||
bbfcd1e8 | 172 | static void qede_rdma_dev_shutdown(struct qede_dev *edev) |
cee9fbd8 | 173 | { |
bbfcd1e8 | 174 | if (!qede_rdma_supported(edev)) |
cee9fbd8 RA |
175 | return; |
176 | ||
177 | mutex_lock(&qedr_dev_list_lock); | |
178 | if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify) | |
179 | qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_CLOSE); | |
180 | mutex_unlock(&qedr_dev_list_lock); | |
181 | } | |
182 | ||
bbfcd1e8 | 183 | int qede_rdma_register_driver(struct qedr_driver *drv) |
cee9fbd8 RA |
184 | { |
185 | struct qede_dev *edev; | |
186 | u8 qedr_counter = 0; | |
187 | ||
188 | mutex_lock(&qedr_dev_list_lock); | |
189 | if (qedr_drv) { | |
190 | mutex_unlock(&qedr_dev_list_lock); | |
191 | return -EINVAL; | |
192 | } | |
193 | qedr_drv = drv; | |
194 | ||
195 | list_for_each_entry(edev, &qedr_dev_list, rdma_info.entry) { | |
196 | struct net_device *ndev; | |
197 | ||
198 | qedr_counter++; | |
bbfcd1e8 | 199 | _qede_rdma_dev_add(edev); |
cee9fbd8 RA |
200 | ndev = edev->ndev; |
201 | if (netif_running(ndev) && netif_oper_up(ndev)) | |
bbfcd1e8 | 202 | _qede_rdma_dev_open(edev); |
cee9fbd8 RA |
203 | } |
204 | mutex_unlock(&qedr_dev_list_lock); | |
205 | ||
bbfcd1e8 | 206 | pr_notice("qedr: discovered and registered %d RDMA funcs\n", |
22b1ae61 | 207 | qedr_counter); |
cee9fbd8 RA |
208 | |
209 | return 0; | |
210 | } | |
bbfcd1e8 | 211 | EXPORT_SYMBOL(qede_rdma_register_driver); |
cee9fbd8 | 212 | |
bbfcd1e8 | 213 | void qede_rdma_unregister_driver(struct qedr_driver *drv) |
cee9fbd8 RA |
214 | { |
215 | struct qede_dev *edev; | |
216 | ||
217 | mutex_lock(&qedr_dev_list_lock); | |
218 | list_for_each_entry(edev, &qedr_dev_list, rdma_info.entry) { | |
ccc67ef5 TT |
219 | /* If device has experienced recovery it was already removed */ |
220 | if (edev->rdma_info.qedr_dev && !edev->rdma_info.exp_recovery) | |
bbfcd1e8 | 221 | _qede_rdma_dev_remove(edev); |
cee9fbd8 RA |
222 | } |
223 | qedr_drv = NULL; | |
224 | mutex_unlock(&qedr_dev_list_lock); | |
225 | } | |
bbfcd1e8 | 226 | EXPORT_SYMBOL(qede_rdma_unregister_driver); |
cee9fbd8 | 227 | |
bbfcd1e8 | 228 | static void qede_rdma_changeaddr(struct qede_dev *edev) |
cee9fbd8 | 229 | { |
bbfcd1e8 | 230 | if (!qede_rdma_supported(edev)) |
cee9fbd8 RA |
231 | return; |
232 | ||
233 | if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify) | |
234 | qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_CHANGE_ADDR); | |
235 | } | |
236 | ||
97fb3e33 MK |
237 | static void qede_rdma_change_mtu(struct qede_dev *edev) |
238 | { | |
239 | if (qede_rdma_supported(edev)) { | |
240 | if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify) | |
241 | qedr_drv->notify(edev->rdma_info.qedr_dev, | |
242 | QEDE_CHANGE_MTU); | |
243 | } | |
244 | } | |
245 | ||
bbfcd1e8 MK |
246 | static struct qede_rdma_event_work * |
247 | qede_rdma_get_free_event_node(struct qede_dev *edev) | |
cee9fbd8 | 248 | { |
bbfcd1e8 | 249 | struct qede_rdma_event_work *event_node = NULL; |
cee9fbd8 RA |
250 | bool found = false; |
251 | ||
36861d1f WH |
252 | list_for_each_entry(event_node, &edev->rdma_info.rdma_event_list, |
253 | list) { | |
cee9fbd8 RA |
254 | if (!work_pending(&event_node->work)) { |
255 | found = true; | |
256 | break; | |
257 | } | |
258 | } | |
259 | ||
260 | if (!found) { | |
090477e4 | 261 | event_node = kzalloc(sizeof(*event_node), GFP_ATOMIC); |
cee9fbd8 RA |
262 | if (!event_node) { |
263 | DP_NOTICE(edev, | |
bbfcd1e8 | 264 | "qedr: Could not allocate memory for rdma work\n"); |
cee9fbd8 RA |
265 | return NULL; |
266 | } | |
267 | list_add_tail(&event_node->list, | |
bbfcd1e8 | 268 | &edev->rdma_info.rdma_event_list); |
cee9fbd8 RA |
269 | } |
270 | ||
271 | return event_node; | |
272 | } | |
273 | ||
bbfcd1e8 | 274 | static void qede_rdma_handle_event(struct work_struct *work) |
cee9fbd8 | 275 | { |
bbfcd1e8 MK |
276 | struct qede_rdma_event_work *event_node; |
277 | enum qede_rdma_event event; | |
cee9fbd8 RA |
278 | struct qede_dev *edev; |
279 | ||
bbfcd1e8 | 280 | event_node = container_of(work, struct qede_rdma_event_work, work); |
cee9fbd8 RA |
281 | event = event_node->event; |
282 | edev = event_node->ptr; | |
283 | ||
284 | switch (event) { | |
285 | case QEDE_UP: | |
bbfcd1e8 | 286 | qede_rdma_dev_open(edev); |
cee9fbd8 RA |
287 | break; |
288 | case QEDE_DOWN: | |
bbfcd1e8 | 289 | qede_rdma_dev_close(edev); |
cee9fbd8 RA |
290 | break; |
291 | case QEDE_CLOSE: | |
bbfcd1e8 | 292 | qede_rdma_dev_shutdown(edev); |
cee9fbd8 RA |
293 | break; |
294 | case QEDE_CHANGE_ADDR: | |
bbfcd1e8 | 295 | qede_rdma_changeaddr(edev); |
cee9fbd8 | 296 | break; |
97fb3e33 MK |
297 | case QEDE_CHANGE_MTU: |
298 | qede_rdma_change_mtu(edev); | |
299 | break; | |
cee9fbd8 | 300 | default: |
bbfcd1e8 | 301 | DP_NOTICE(edev, "Invalid rdma event %d", event); |
cee9fbd8 RA |
302 | } |
303 | } | |
304 | ||
bbfcd1e8 MK |
305 | static void qede_rdma_add_event(struct qede_dev *edev, |
306 | enum qede_rdma_event event) | |
cee9fbd8 | 307 | { |
bbfcd1e8 | 308 | struct qede_rdma_event_work *event_node; |
cee9fbd8 | 309 | |
ccc67ef5 TT |
310 | /* If a recovery was experienced avoid adding the event */ |
311 | if (edev->rdma_info.exp_recovery) | |
312 | return; | |
313 | ||
4079c7f7 | 314 | if (!edev->rdma_info.qedr_dev || !edev->rdma_info.rdma_wq) |
cee9fbd8 RA |
315 | return; |
316 | ||
af6565ad MK |
317 | /* We don't want the cleanup flow to start while we're allocating and |
318 | * scheduling the work | |
319 | */ | |
320 | if (!kref_get_unless_zero(&edev->rdma_info.refcnt)) | |
321 | return; /* already being destroyed */ | |
322 | ||
bbfcd1e8 | 323 | event_node = qede_rdma_get_free_event_node(edev); |
cee9fbd8 | 324 | if (!event_node) |
af6565ad | 325 | goto out; |
cee9fbd8 RA |
326 | |
327 | event_node->event = event; | |
328 | event_node->ptr = edev; | |
329 | ||
bbfcd1e8 MK |
330 | INIT_WORK(&event_node->work, qede_rdma_handle_event); |
331 | queue_work(edev->rdma_info.rdma_wq, &event_node->work); | |
af6565ad MK |
332 | |
333 | out: | |
334 | kref_put(&edev->rdma_info.refcnt, qede_rdma_complete_event); | |
cee9fbd8 RA |
335 | } |
336 | ||
bbfcd1e8 | 337 | void qede_rdma_dev_event_open(struct qede_dev *edev) |
cee9fbd8 | 338 | { |
bbfcd1e8 | 339 | qede_rdma_add_event(edev, QEDE_UP); |
cee9fbd8 RA |
340 | } |
341 | ||
bbfcd1e8 | 342 | void qede_rdma_dev_event_close(struct qede_dev *edev) |
cee9fbd8 | 343 | { |
bbfcd1e8 | 344 | qede_rdma_add_event(edev, QEDE_DOWN); |
cee9fbd8 RA |
345 | } |
346 | ||
bbfcd1e8 | 347 | void qede_rdma_event_changeaddr(struct qede_dev *edev) |
cee9fbd8 | 348 | { |
bbfcd1e8 | 349 | qede_rdma_add_event(edev, QEDE_CHANGE_ADDR); |
cee9fbd8 | 350 | } |
97fb3e33 MK |
351 | |
352 | void qede_rdma_event_change_mtu(struct qede_dev *edev) | |
353 | { | |
354 | qede_rdma_add_event(edev, QEDE_CHANGE_MTU); | |
355 | } |