Commit | Line | Data |
---|---|---|
2e0cbc4d RA |
1 | /* QLogic qedr NIC Driver |
2 | * Copyright (c) 2015-2016 QLogic Corporation | |
3 | * | |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and /or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | */ | |
32 | #include <linux/module.h> | |
33 | #include <rdma/ib_verbs.h> | |
34 | #include <rdma/ib_addr.h> | |
ac1b36e5 | 35 | #include <rdma/ib_user_verbs.h> |
e6a38c54 KM |
36 | #include <rdma/iw_cm.h> |
37 | #include <rdma/ib_mad.h> | |
2e0cbc4d RA |
38 | #include <linux/netdevice.h> |
39 | #include <linux/iommu.h> | |
461a6946 | 40 | #include <linux/pci.h> |
2e0cbc4d | 41 | #include <net/addrconf.h> |
de0089e6 | 42 | #include <linux/idr.h> |
b262a06e | 43 | |
ec72fce4 RA |
44 | #include <linux/qed/qed_chain.h> |
45 | #include <linux/qed/qed_if.h> | |
2e0cbc4d | 46 | #include "qedr.h" |
ac1b36e5 RA |
47 | #include "verbs.h" |
48 | #include <rdma/qedr-abi.h> | |
de0089e6 | 49 | #include "qedr_iw_cm.h" |
2e0cbc4d RA |
50 | |
51 | MODULE_DESCRIPTION("QLogic 40G/100G ROCE Driver"); | |
52 | MODULE_AUTHOR("QLogic Corporation"); | |
53 | MODULE_LICENSE("Dual BSD/GPL"); | |
2e0cbc4d | 54 | |
cecbcddf RA |
55 | #define QEDR_WQ_MULTIPLIER_DFT (3) |
56 | ||
0089985e BVA |
57 | static void qedr_ib_dispatch_event(struct qedr_dev *dev, u8 port_num, |
58 | enum ib_event_type type) | |
2e0cbc4d RA |
59 | { |
60 | struct ib_event ibev; | |
61 | ||
62 | ibev.device = &dev->ibdev; | |
63 | ibev.element.port_num = port_num; | |
64 | ibev.event = type; | |
65 | ||
66 | ib_dispatch_event(&ibev); | |
67 | } | |
68 | ||
69 | static enum rdma_link_layer qedr_link_layer(struct ib_device *device, | |
70 | u8 port_num) | |
71 | { | |
72 | return IB_LINK_LAYER_ETHERNET; | |
73 | } | |
74 | ||
9abb0d1b | 75 | static void qedr_get_dev_fw_str(struct ib_device *ibdev, char *str) |
ec72fce4 RA |
76 | { |
77 | struct qedr_dev *qedr = get_qedr_dev(ibdev); | |
78 | u32 fw_ver = (u32)qedr->attr.fw_ver; | |
79 | ||
9abb0d1b | 80 | snprintf(str, IB_FW_VERSION_NAME_MAX, "%d. %d. %d. %d", |
ec72fce4 RA |
81 | (fw_ver >> 24) & 0xFF, (fw_ver >> 16) & 0xFF, |
82 | (fw_ver >> 8) & 0xFF, fw_ver & 0xFF); | |
83 | } | |
84 | ||
993d1b52 RA |
85 | static struct net_device *qedr_get_netdev(struct ib_device *dev, u8 port_num) |
86 | { | |
87 | struct qedr_dev *qdev; | |
88 | ||
89 | qdev = get_qedr_dev(dev); | |
90 | dev_hold(qdev->ndev); | |
91 | ||
92 | /* The HW vendor's device driver must guarantee | |
070f2d7e KT |
93 | * that this function returns NULL before the net device has finished |
94 | * NETDEV_UNREGISTER state. | |
993d1b52 RA |
95 | */ |
96 | return qdev->ndev; | |
97 | } | |
98 | ||
0089985e BVA |
99 | static int qedr_roce_port_immutable(struct ib_device *ibdev, u8 port_num, |
100 | struct ib_port_immutable *immutable) | |
e6a38c54 KM |
101 | { |
102 | struct ib_port_attr attr; | |
103 | int err; | |
104 | ||
105 | err = qedr_query_port(ibdev, port_num, &attr); | |
106 | if (err) | |
107 | return err; | |
108 | ||
109 | immutable->pkey_tbl_len = attr.pkey_tbl_len; | |
110 | immutable->gid_tbl_len = attr.gid_tbl_len; | |
111 | immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE | | |
112 | RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP; | |
113 | immutable->max_mad_size = IB_MGMT_MAD_SIZE; | |
114 | ||
115 | return 0; | |
116 | } | |
117 | ||
0089985e BVA |
118 | static int qedr_iw_port_immutable(struct ib_device *ibdev, u8 port_num, |
119 | struct ib_port_immutable *immutable) | |
e6a38c54 KM |
120 | { |
121 | struct ib_port_attr attr; | |
122 | int err; | |
123 | ||
124 | err = qedr_query_port(ibdev, port_num, &attr); | |
125 | if (err) | |
126 | return err; | |
127 | ||
128 | immutable->pkey_tbl_len = 1; | |
129 | immutable->gid_tbl_len = 1; | |
130 | immutable->core_cap_flags = RDMA_CORE_PORT_IWARP; | |
131 | immutable->max_mad_size = 0; | |
132 | ||
133 | return 0; | |
134 | } | |
135 | ||
508a523f PP |
136 | /* QEDR sysfs interface */ |
137 | static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr, | |
138 | char *buf) | |
139 | { | |
140 | struct qedr_dev *dev = dev_get_drvdata(device); | |
141 | ||
142 | return scnprintf(buf, PAGE_SIZE, "0x%x\n", dev->pdev->vendor); | |
143 | } | |
144 | static DEVICE_ATTR_RO(hw_rev); | |
145 | ||
146 | static ssize_t hca_type_show(struct device *device, | |
147 | struct device_attribute *attr, char *buf) | |
148 | { | |
149 | return scnprintf(buf, PAGE_SIZE, "%s\n", "HCA_TYPE_TO_SET"); | |
150 | } | |
151 | static DEVICE_ATTR_RO(hca_type); | |
152 | ||
153 | static struct attribute *qedr_attributes[] = { | |
154 | &dev_attr_hw_rev.attr, | |
155 | &dev_attr_hca_type.attr, | |
156 | NULL | |
157 | }; | |
158 | ||
159 | static const struct attribute_group qedr_attr_group = { | |
160 | .attrs = qedr_attributes, | |
161 | }; | |
162 | ||
bd59461e KH |
163 | static const struct ib_device_ops qedr_iw_dev_ops = { |
164 | .get_port_immutable = qedr_iw_port_immutable, | |
165 | .query_gid = qedr_iw_query_gid, | |
166 | }; | |
167 | ||
0089985e | 168 | static int qedr_iw_register_device(struct qedr_dev *dev) |
e6a38c54 KM |
169 | { |
170 | dev->ibdev.node_type = RDMA_NODE_RNIC; | |
e6a38c54 | 171 | |
bd59461e | 172 | ib_set_device_ops(&dev->ibdev, &qedr_iw_dev_ops); |
e6a38c54 KM |
173 | |
174 | dev->ibdev.iwcm = kzalloc(sizeof(*dev->ibdev.iwcm), GFP_KERNEL); | |
175 | if (!dev->ibdev.iwcm) | |
176 | return -ENOMEM; | |
e411e058 KM |
177 | |
178 | dev->ibdev.iwcm->connect = qedr_iw_connect; | |
179 | dev->ibdev.iwcm->accept = qedr_iw_accept; | |
180 | dev->ibdev.iwcm->reject = qedr_iw_reject; | |
181 | dev->ibdev.iwcm->create_listen = qedr_iw_create_listen; | |
182 | dev->ibdev.iwcm->destroy_listen = qedr_iw_destroy_listen; | |
de0089e6 KM |
183 | dev->ibdev.iwcm->add_ref = qedr_iw_qp_add_ref; |
184 | dev->ibdev.iwcm->rem_ref = qedr_iw_qp_rem_ref; | |
185 | dev->ibdev.iwcm->get_qp = qedr_iw_get_qp; | |
e6a38c54 KM |
186 | |
187 | memcpy(dev->ibdev.iwcm->ifname, | |
188 | dev->ndev->name, sizeof(dev->ibdev.iwcm->ifname)); | |
189 | ||
190 | return 0; | |
191 | } | |
192 | ||
bd59461e KH |
193 | static const struct ib_device_ops qedr_roce_dev_ops = { |
194 | .get_port_immutable = qedr_roce_port_immutable, | |
195 | }; | |
196 | ||
0089985e | 197 | static void qedr_roce_register_device(struct qedr_dev *dev) |
e6a38c54 KM |
198 | { |
199 | dev->ibdev.node_type = RDMA_NODE_IB_CA; | |
e6a38c54 | 200 | |
bd59461e | 201 | ib_set_device_ops(&dev->ibdev, &qedr_roce_dev_ops); |
e6a38c54 KM |
202 | } |
203 | ||
bd59461e KH |
204 | static const struct ib_device_ops qedr_dev_ops = { |
205 | .alloc_mr = qedr_alloc_mr, | |
206 | .alloc_pd = qedr_alloc_pd, | |
207 | .alloc_ucontext = qedr_alloc_ucontext, | |
208 | .create_ah = qedr_create_ah, | |
209 | .create_cq = qedr_create_cq, | |
210 | .create_qp = qedr_create_qp, | |
211 | .create_srq = qedr_create_srq, | |
212 | .dealloc_pd = qedr_dealloc_pd, | |
213 | .dealloc_ucontext = qedr_dealloc_ucontext, | |
214 | .dereg_mr = qedr_dereg_mr, | |
215 | .destroy_ah = qedr_destroy_ah, | |
216 | .destroy_cq = qedr_destroy_cq, | |
217 | .destroy_qp = qedr_destroy_qp, | |
218 | .destroy_srq = qedr_destroy_srq, | |
219 | .get_dev_fw_str = qedr_get_dev_fw_str, | |
220 | .get_dma_mr = qedr_get_dma_mr, | |
221 | .get_link_layer = qedr_link_layer, | |
222 | .get_netdev = qedr_get_netdev, | |
223 | .map_mr_sg = qedr_map_mr_sg, | |
224 | .mmap = qedr_mmap, | |
225 | .modify_port = qedr_modify_port, | |
226 | .modify_qp = qedr_modify_qp, | |
227 | .modify_srq = qedr_modify_srq, | |
228 | .poll_cq = qedr_poll_cq, | |
229 | .post_recv = qedr_post_recv, | |
230 | .post_send = qedr_post_send, | |
231 | .post_srq_recv = qedr_post_srq_recv, | |
232 | .process_mad = qedr_process_mad, | |
233 | .query_device = qedr_query_device, | |
234 | .query_pkey = qedr_query_pkey, | |
235 | .query_port = qedr_query_port, | |
236 | .query_qp = qedr_query_qp, | |
237 | .query_srq = qedr_query_srq, | |
238 | .reg_user_mr = qedr_reg_user_mr, | |
239 | .req_notify_cq = qedr_arm_cq, | |
240 | .resize_cq = qedr_resize_cq, | |
241 | }; | |
242 | ||
2e0cbc4d RA |
243 | static int qedr_register_device(struct qedr_dev *dev) |
244 | { | |
e6a38c54 KM |
245 | int rc; |
246 | ||
993d1b52 | 247 | dev->ibdev.node_guid = dev->attr.node_guid; |
2e0cbc4d RA |
248 | memcpy(dev->ibdev.node_desc, QEDR_NODE_DESC, sizeof(QEDR_NODE_DESC)); |
249 | dev->ibdev.owner = THIS_MODULE; | |
ac1b36e5 RA |
250 | dev->ibdev.uverbs_abi_ver = QEDR_ABI_VERSION; |
251 | ||
252 | dev->ibdev.uverbs_cmd_mask = QEDR_UVERBS(GET_CONTEXT) | | |
253 | QEDR_UVERBS(QUERY_DEVICE) | | |
a7efd777 RA |
254 | QEDR_UVERBS(QUERY_PORT) | |
255 | QEDR_UVERBS(ALLOC_PD) | | |
256 | QEDR_UVERBS(DEALLOC_PD) | | |
257 | QEDR_UVERBS(CREATE_COMP_CHANNEL) | | |
258 | QEDR_UVERBS(CREATE_CQ) | | |
259 | QEDR_UVERBS(RESIZE_CQ) | | |
260 | QEDR_UVERBS(DESTROY_CQ) | | |
cecbcddf RA |
261 | QEDR_UVERBS(REQ_NOTIFY_CQ) | |
262 | QEDR_UVERBS(CREATE_QP) | | |
263 | QEDR_UVERBS(MODIFY_QP) | | |
264 | QEDR_UVERBS(QUERY_QP) | | |
e0290cce | 265 | QEDR_UVERBS(DESTROY_QP) | |
40b173dd YB |
266 | QEDR_UVERBS(CREATE_SRQ) | |
267 | QEDR_UVERBS(DESTROY_SRQ) | | |
268 | QEDR_UVERBS(QUERY_SRQ) | | |
269 | QEDR_UVERBS(MODIFY_SRQ) | | |
270 | QEDR_UVERBS(POST_SRQ_RECV) | | |
e0290cce | 271 | QEDR_UVERBS(REG_MR) | |
afa0e13b RA |
272 | QEDR_UVERBS(DEREG_MR) | |
273 | QEDR_UVERBS(POLL_CQ) | | |
274 | QEDR_UVERBS(POST_SEND) | | |
275 | QEDR_UVERBS(POST_RECV); | |
ac1b36e5 | 276 | |
e6a38c54 KM |
277 | if (IS_IWARP(dev)) { |
278 | rc = qedr_iw_register_device(dev); | |
279 | if (rc) | |
280 | return rc; | |
281 | } else { | |
282 | qedr_roce_register_device(dev); | |
283 | } | |
284 | ||
ac1b36e5 RA |
285 | dev->ibdev.phys_port_cnt = 1; |
286 | dev->ibdev.num_comp_vectors = dev->num_cnq; | |
69117101 | 287 | dev->ibdev.dev.parent = &dev->pdev->dev; |
2e0cbc4d | 288 | |
508a523f | 289 | rdma_set_device_sysfs_group(&dev->ibdev, &qedr_attr_group); |
bd59461e KH |
290 | ib_set_device_ops(&dev->ibdev, &qedr_dev_ops); |
291 | ||
0ede73bc | 292 | dev->ibdev.driver_id = RDMA_DRIVER_QEDR; |
ea4baf7f | 293 | return ib_register_device(&dev->ibdev, "qedr%d"); |
2e0cbc4d RA |
294 | } |
295 | ||
ec72fce4 RA |
296 | /* This function allocates fast-path status block memory */ |
297 | static int qedr_alloc_mem_sb(struct qedr_dev *dev, | |
298 | struct qed_sb_info *sb_info, u16 sb_id) | |
299 | { | |
21dd79e8 | 300 | struct status_block_e4 *sb_virt; |
ec72fce4 RA |
301 | dma_addr_t sb_phys; |
302 | int rc; | |
303 | ||
304 | sb_virt = dma_alloc_coherent(&dev->pdev->dev, | |
305 | sizeof(*sb_virt), &sb_phys, GFP_KERNEL); | |
306 | if (!sb_virt) | |
307 | return -ENOMEM; | |
308 | ||
309 | rc = dev->ops->common->sb_init(dev->cdev, sb_info, | |
310 | sb_virt, sb_phys, sb_id, | |
311 | QED_SB_TYPE_CNQ); | |
312 | if (rc) { | |
313 | pr_err("Status block initialization failed\n"); | |
314 | dma_free_coherent(&dev->pdev->dev, sizeof(*sb_virt), | |
315 | sb_virt, sb_phys); | |
316 | return rc; | |
317 | } | |
318 | ||
319 | return 0; | |
320 | } | |
321 | ||
322 | static void qedr_free_mem_sb(struct qedr_dev *dev, | |
323 | struct qed_sb_info *sb_info, int sb_id) | |
324 | { | |
325 | if (sb_info->sb_virt) { | |
326 | dev->ops->common->sb_release(dev->cdev, sb_info, sb_id); | |
327 | dma_free_coherent(&dev->pdev->dev, sizeof(*sb_info->sb_virt), | |
328 | (void *)sb_info->sb_virt, sb_info->sb_phys); | |
329 | } | |
330 | } | |
331 | ||
332 | static void qedr_free_resources(struct qedr_dev *dev) | |
333 | { | |
334 | int i; | |
335 | ||
e411e058 KM |
336 | if (IS_IWARP(dev)) |
337 | destroy_workqueue(dev->iwarp_wq); | |
338 | ||
ec72fce4 RA |
339 | for (i = 0; i < dev->num_cnq; i++) { |
340 | qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i); | |
341 | dev->ops->common->chain_free(dev->cdev, &dev->cnq_array[i].pbl); | |
342 | } | |
343 | ||
344 | kfree(dev->cnq_array); | |
345 | kfree(dev->sb_array); | |
346 | kfree(dev->sgid_tbl); | |
347 | } | |
348 | ||
349 | static int qedr_alloc_resources(struct qedr_dev *dev) | |
350 | { | |
351 | struct qedr_cnq *cnq; | |
352 | __le16 *cons_pi; | |
353 | u16 n_entries; | |
354 | int i, rc; | |
355 | ||
6396bb22 KC |
356 | dev->sgid_tbl = kcalloc(QEDR_MAX_SGID, sizeof(union ib_gid), |
357 | GFP_KERNEL); | |
ec72fce4 RA |
358 | if (!dev->sgid_tbl) |
359 | return -ENOMEM; | |
360 | ||
361 | spin_lock_init(&dev->sgid_lock); | |
362 | ||
de0089e6 | 363 | if (IS_IWARP(dev)) { |
1212767e YB |
364 | spin_lock_init(&dev->qpidr.idr_lock); |
365 | idr_init(&dev->qpidr.idr); | |
e411e058 | 366 | dev->iwarp_wq = create_singlethread_workqueue("qedr_iwarpq"); |
de0089e6 KM |
367 | } |
368 | ||
ec72fce4 RA |
369 | /* Allocate Status blocks for CNQ */ |
370 | dev->sb_array = kcalloc(dev->num_cnq, sizeof(*dev->sb_array), | |
371 | GFP_KERNEL); | |
372 | if (!dev->sb_array) { | |
373 | rc = -ENOMEM; | |
374 | goto err1; | |
375 | } | |
376 | ||
377 | dev->cnq_array = kcalloc(dev->num_cnq, | |
378 | sizeof(*dev->cnq_array), GFP_KERNEL); | |
379 | if (!dev->cnq_array) { | |
380 | rc = -ENOMEM; | |
381 | goto err2; | |
382 | } | |
383 | ||
384 | dev->sb_start = dev->ops->rdma_get_start_sb(dev->cdev); | |
385 | ||
386 | /* Allocate CNQ PBLs */ | |
387 | n_entries = min_t(u32, QED_RDMA_MAX_CNQ_SIZE, QEDR_ROCE_MAX_CNQ_SIZE); | |
388 | for (i = 0; i < dev->num_cnq; i++) { | |
389 | cnq = &dev->cnq_array[i]; | |
390 | ||
391 | rc = qedr_alloc_mem_sb(dev, &dev->sb_array[i], | |
392 | dev->sb_start + i); | |
393 | if (rc) | |
394 | goto err3; | |
395 | ||
396 | rc = dev->ops->common->chain_alloc(dev->cdev, | |
397 | QED_CHAIN_USE_TO_CONSUME, | |
398 | QED_CHAIN_MODE_PBL, | |
399 | QED_CHAIN_CNT_TYPE_U16, | |
400 | n_entries, | |
401 | sizeof(struct regpair *), | |
1a4a6975 | 402 | &cnq->pbl, NULL); |
ec72fce4 RA |
403 | if (rc) |
404 | goto err4; | |
405 | ||
406 | cnq->dev = dev; | |
407 | cnq->sb = &dev->sb_array[i]; | |
408 | cons_pi = dev->sb_array[i].sb_virt->pi_array; | |
409 | cnq->hw_cons_ptr = &cons_pi[QED_ROCE_PROTOCOL_INDEX]; | |
410 | cnq->index = i; | |
411 | sprintf(cnq->name, "qedr%d@pci:%s", i, pci_name(dev->pdev)); | |
412 | ||
413 | DP_DEBUG(dev, QEDR_MSG_INIT, "cnq[%d].cons=%d\n", | |
414 | i, qed_chain_get_cons_idx(&cnq->pbl)); | |
415 | } | |
416 | ||
417 | return 0; | |
418 | err4: | |
419 | qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i); | |
420 | err3: | |
421 | for (--i; i >= 0; i--) { | |
422 | dev->ops->common->chain_free(dev->cdev, &dev->cnq_array[i].pbl); | |
423 | qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i); | |
424 | } | |
425 | kfree(dev->cnq_array); | |
426 | err2: | |
427 | kfree(dev->sb_array); | |
428 | err1: | |
429 | kfree(dev->sgid_tbl); | |
430 | return rc; | |
431 | } | |
432 | ||
2e0cbc4d RA |
433 | static void qedr_pci_set_atomic(struct qedr_dev *dev, struct pci_dev *pdev) |
434 | { | |
20c3ff61 FK |
435 | int rc = pci_enable_atomic_ops_to_root(pdev, |
436 | PCI_EXP_DEVCAP2_ATOMIC_COMP64); | |
f92faaba | 437 | |
20c3ff61 FK |
438 | if (rc) { |
439 | dev->atomic_cap = IB_ATOMIC_NONE; | |
440 | DP_DEBUG(dev, QEDR_MSG_INIT, "Atomic capability disabled\n"); | |
441 | } else { | |
442 | dev->atomic_cap = IB_ATOMIC_GLOB; | |
443 | DP_DEBUG(dev, QEDR_MSG_INIT, "Atomic capability enabled\n"); | |
2e0cbc4d | 444 | } |
2e0cbc4d RA |
445 | } |
446 | ||
ec72fce4 RA |
447 | static const struct qed_rdma_ops *qed_ops; |
448 | ||
449 | #define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo)) | |
450 | ||
451 | static irqreturn_t qedr_irq_handler(int irq, void *handle) | |
452 | { | |
453 | u16 hw_comp_cons, sw_comp_cons; | |
454 | struct qedr_cnq *cnq = handle; | |
a7efd777 RA |
455 | struct regpair *cq_handle; |
456 | struct qedr_cq *cq; | |
ec72fce4 RA |
457 | |
458 | qed_sb_ack(cnq->sb, IGU_INT_DISABLE, 0); | |
459 | ||
460 | qed_sb_update_sb_idx(cnq->sb); | |
461 | ||
462 | hw_comp_cons = le16_to_cpu(*cnq->hw_cons_ptr); | |
463 | sw_comp_cons = qed_chain_get_cons_idx(&cnq->pbl); | |
464 | ||
465 | /* Align protocol-index and chain reads */ | |
466 | rmb(); | |
467 | ||
468 | while (sw_comp_cons != hw_comp_cons) { | |
a7efd777 RA |
469 | cq_handle = (struct regpair *)qed_chain_consume(&cnq->pbl); |
470 | cq = (struct qedr_cq *)(uintptr_t)HILO_U64(cq_handle->hi, | |
471 | cq_handle->lo); | |
472 | ||
473 | if (cq == NULL) { | |
474 | DP_ERR(cnq->dev, | |
475 | "Received NULL CQ cq_handle->hi=%d cq_handle->lo=%d sw_comp_cons=%d hw_comp_cons=%d\n", | |
476 | cq_handle->hi, cq_handle->lo, sw_comp_cons, | |
477 | hw_comp_cons); | |
478 | ||
479 | break; | |
480 | } | |
481 | ||
482 | if (cq->sig != QEDR_CQ_MAGIC_NUMBER) { | |
483 | DP_ERR(cnq->dev, | |
484 | "Problem with cq signature, cq_handle->hi=%d ch_handle->lo=%d cq=%p\n", | |
485 | cq_handle->hi, cq_handle->lo, cq); | |
486 | break; | |
487 | } | |
488 | ||
489 | cq->arm_flags = 0; | |
490 | ||
4dd72636 | 491 | if (!cq->destroyed && cq->ibcq.comp_handler) |
a7efd777 RA |
492 | (*cq->ibcq.comp_handler) |
493 | (&cq->ibcq, cq->ibcq.cq_context); | |
494 | ||
4dd72636 AR |
495 | /* The CQ's CNQ notification counter is checked before |
496 | * destroying the CQ in a busy-wait loop that waits for all of | |
497 | * the CQ's CNQ interrupts to be processed. It is increased | |
498 | * here, only after the completion handler, to ensure that the | |
499 | * the handler is not running when the CQ is destroyed. | |
500 | */ | |
501 | cq->cnq_notif++; | |
502 | ||
ec72fce4 | 503 | sw_comp_cons = qed_chain_get_cons_idx(&cnq->pbl); |
a7efd777 | 504 | |
ec72fce4 RA |
505 | cnq->n_comp++; |
506 | } | |
507 | ||
508 | qed_ops->rdma_cnq_prod_update(cnq->dev->rdma_ctx, cnq->index, | |
509 | sw_comp_cons); | |
510 | ||
511 | qed_sb_ack(cnq->sb, IGU_INT_ENABLE, 1); | |
512 | ||
513 | return IRQ_HANDLED; | |
514 | } | |
515 | ||
516 | static void qedr_sync_free_irqs(struct qedr_dev *dev) | |
517 | { | |
518 | u32 vector; | |
519 | int i; | |
520 | ||
521 | for (i = 0; i < dev->int_info.used_cnt; i++) { | |
522 | if (dev->int_info.msix_cnt) { | |
523 | vector = dev->int_info.msix[i * dev->num_hwfns].vector; | |
524 | synchronize_irq(vector); | |
525 | free_irq(vector, &dev->cnq_array[i]); | |
526 | } | |
527 | } | |
528 | ||
529 | dev->int_info.used_cnt = 0; | |
530 | } | |
531 | ||
532 | static int qedr_req_msix_irqs(struct qedr_dev *dev) | |
533 | { | |
534 | int i, rc = 0; | |
535 | ||
536 | if (dev->num_cnq > dev->int_info.msix_cnt) { | |
537 | DP_ERR(dev, | |
538 | "Interrupt mismatch: %d CNQ queues > %d MSI-x vectors\n", | |
539 | dev->num_cnq, dev->int_info.msix_cnt); | |
540 | return -EINVAL; | |
541 | } | |
542 | ||
543 | for (i = 0; i < dev->num_cnq; i++) { | |
544 | rc = request_irq(dev->int_info.msix[i * dev->num_hwfns].vector, | |
545 | qedr_irq_handler, 0, dev->cnq_array[i].name, | |
546 | &dev->cnq_array[i]); | |
547 | if (rc) { | |
548 | DP_ERR(dev, "Request cnq %d irq failed\n", i); | |
549 | qedr_sync_free_irqs(dev); | |
550 | } else { | |
551 | DP_DEBUG(dev, QEDR_MSG_INIT, | |
552 | "Requested cnq irq for %s [entry %d]. Cookie is at %p\n", | |
553 | dev->cnq_array[i].name, i, | |
554 | &dev->cnq_array[i]); | |
555 | dev->int_info.used_cnt++; | |
556 | } | |
557 | } | |
558 | ||
559 | return rc; | |
560 | } | |
561 | ||
562 | static int qedr_setup_irqs(struct qedr_dev *dev) | |
563 | { | |
564 | int rc; | |
565 | ||
566 | DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_setup_irqs\n"); | |
567 | ||
568 | /* Learn Interrupt configuration */ | |
569 | rc = dev->ops->rdma_set_rdma_int(dev->cdev, dev->num_cnq); | |
570 | if (rc < 0) | |
571 | return rc; | |
572 | ||
573 | rc = dev->ops->rdma_get_rdma_int(dev->cdev, &dev->int_info); | |
574 | if (rc) { | |
575 | DP_DEBUG(dev, QEDR_MSG_INIT, "get_rdma_int failed\n"); | |
576 | return rc; | |
577 | } | |
578 | ||
579 | if (dev->int_info.msix_cnt) { | |
580 | DP_DEBUG(dev, QEDR_MSG_INIT, "rdma msix_cnt = %d\n", | |
581 | dev->int_info.msix_cnt); | |
582 | rc = qedr_req_msix_irqs(dev); | |
583 | if (rc) | |
584 | return rc; | |
585 | } | |
586 | ||
587 | DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_setup_irqs succeeded\n"); | |
588 | ||
589 | return 0; | |
590 | } | |
591 | ||
592 | static int qedr_set_device_attr(struct qedr_dev *dev) | |
593 | { | |
594 | struct qed_rdma_device *qed_attr; | |
595 | struct qedr_device_attr *attr; | |
596 | u32 page_size; | |
597 | ||
598 | /* Part 1 - query core capabilities */ | |
599 | qed_attr = dev->ops->rdma_query_device(dev->rdma_ctx); | |
600 | ||
601 | /* Part 2 - check capabilities */ | |
602 | page_size = ~dev->attr.page_size_caps + 1; | |
603 | if (page_size > PAGE_SIZE) { | |
604 | DP_ERR(dev, | |
605 | "Kernel PAGE_SIZE is %ld which is smaller than minimum page size (%d) required by qedr\n", | |
606 | PAGE_SIZE, page_size); | |
607 | return -ENODEV; | |
608 | } | |
609 | ||
610 | /* Part 3 - copy and update capabilities */ | |
611 | attr = &dev->attr; | |
612 | attr->vendor_id = qed_attr->vendor_id; | |
613 | attr->vendor_part_id = qed_attr->vendor_part_id; | |
614 | attr->hw_ver = qed_attr->hw_ver; | |
615 | attr->fw_ver = qed_attr->fw_ver; | |
616 | attr->node_guid = qed_attr->node_guid; | |
617 | attr->sys_image_guid = qed_attr->sys_image_guid; | |
618 | attr->max_cnq = qed_attr->max_cnq; | |
619 | attr->max_sge = qed_attr->max_sge; | |
620 | attr->max_inline = qed_attr->max_inline; | |
621 | attr->max_sqe = min_t(u32, qed_attr->max_wqe, QEDR_MAX_SQE); | |
622 | attr->max_rqe = min_t(u32, qed_attr->max_wqe, QEDR_MAX_RQE); | |
623 | attr->max_qp_resp_rd_atomic_resc = qed_attr->max_qp_resp_rd_atomic_resc; | |
624 | attr->max_qp_req_rd_atomic_resc = qed_attr->max_qp_req_rd_atomic_resc; | |
625 | attr->max_dev_resp_rd_atomic_resc = | |
626 | qed_attr->max_dev_resp_rd_atomic_resc; | |
627 | attr->max_cq = qed_attr->max_cq; | |
628 | attr->max_qp = qed_attr->max_qp; | |
629 | attr->max_mr = qed_attr->max_mr; | |
630 | attr->max_mr_size = qed_attr->max_mr_size; | |
631 | attr->max_cqe = min_t(u64, qed_attr->max_cqe, QEDR_MAX_CQES); | |
632 | attr->max_mw = qed_attr->max_mw; | |
633 | attr->max_fmr = qed_attr->max_fmr; | |
634 | attr->max_mr_mw_fmr_pbl = qed_attr->max_mr_mw_fmr_pbl; | |
635 | attr->max_mr_mw_fmr_size = qed_attr->max_mr_mw_fmr_size; | |
636 | attr->max_pd = qed_attr->max_pd; | |
637 | attr->max_ah = qed_attr->max_ah; | |
638 | attr->max_pkey = qed_attr->max_pkey; | |
639 | attr->max_srq = qed_attr->max_srq; | |
640 | attr->max_srq_wr = qed_attr->max_srq_wr; | |
641 | attr->dev_caps = qed_attr->dev_caps; | |
642 | attr->page_size_caps = qed_attr->page_size_caps; | |
643 | attr->dev_ack_delay = qed_attr->dev_ack_delay; | |
644 | attr->reserved_lkey = qed_attr->reserved_lkey; | |
645 | attr->bad_pkey_counter = qed_attr->bad_pkey_counter; | |
646 | attr->max_stats_queues = qed_attr->max_stats_queues; | |
647 | ||
648 | return 0; | |
649 | } | |
650 | ||
0089985e | 651 | static void qedr_unaffiliated_event(void *context, u8 event_code) |
993d1b52 RA |
652 | { |
653 | pr_err("unaffiliated event not implemented yet\n"); | |
654 | } | |
655 | ||
0089985e | 656 | static void qedr_affiliated_event(void *context, u8 e_code, void *fw_handle) |
993d1b52 RA |
657 | { |
658 | #define EVENT_TYPE_NOT_DEFINED 0 | |
659 | #define EVENT_TYPE_CQ 1 | |
660 | #define EVENT_TYPE_QP 2 | |
40b173dd | 661 | #define EVENT_TYPE_SRQ 3 |
993d1b52 | 662 | struct qedr_dev *dev = (struct qedr_dev *)context; |
be086e7c MY |
663 | struct regpair *async_handle = (struct regpair *)fw_handle; |
664 | u64 roce_handle64 = ((u64) async_handle->hi << 32) + async_handle->lo; | |
993d1b52 RA |
665 | u8 event_type = EVENT_TYPE_NOT_DEFINED; |
666 | struct ib_event event; | |
40b173dd YB |
667 | struct ib_srq *ibsrq; |
668 | struct qedr_srq *srq; | |
669 | unsigned long flags; | |
993d1b52 RA |
670 | struct ib_cq *ibcq; |
671 | struct ib_qp *ibqp; | |
672 | struct qedr_cq *cq; | |
673 | struct qedr_qp *qp; | |
40b173dd | 674 | u16 srq_id; |
993d1b52 | 675 | |
40b173dd YB |
676 | if (IS_ROCE(dev)) { |
677 | switch (e_code) { | |
678 | case ROCE_ASYNC_EVENT_CQ_OVERFLOW_ERR: | |
679 | event.event = IB_EVENT_CQ_ERR; | |
680 | event_type = EVENT_TYPE_CQ; | |
681 | break; | |
682 | case ROCE_ASYNC_EVENT_SQ_DRAINED: | |
683 | event.event = IB_EVENT_SQ_DRAINED; | |
684 | event_type = EVENT_TYPE_QP; | |
685 | break; | |
686 | case ROCE_ASYNC_EVENT_QP_CATASTROPHIC_ERR: | |
687 | event.event = IB_EVENT_QP_FATAL; | |
688 | event_type = EVENT_TYPE_QP; | |
689 | break; | |
690 | case ROCE_ASYNC_EVENT_LOCAL_INVALID_REQUEST_ERR: | |
691 | event.event = IB_EVENT_QP_REQ_ERR; | |
692 | event_type = EVENT_TYPE_QP; | |
693 | break; | |
694 | case ROCE_ASYNC_EVENT_LOCAL_ACCESS_ERR: | |
695 | event.event = IB_EVENT_QP_ACCESS_ERR; | |
696 | event_type = EVENT_TYPE_QP; | |
697 | break; | |
698 | case ROCE_ASYNC_EVENT_SRQ_LIMIT: | |
699 | event.event = IB_EVENT_SRQ_LIMIT_REACHED; | |
700 | event_type = EVENT_TYPE_SRQ; | |
701 | break; | |
702 | case ROCE_ASYNC_EVENT_SRQ_EMPTY: | |
703 | event.event = IB_EVENT_SRQ_ERR; | |
704 | event_type = EVENT_TYPE_SRQ; | |
705 | break; | |
706 | default: | |
707 | DP_ERR(dev, "unsupported event %d on handle=%llx\n", | |
708 | e_code, roce_handle64); | |
709 | } | |
710 | } else { | |
711 | switch (e_code) { | |
712 | case QED_IWARP_EVENT_SRQ_LIMIT: | |
713 | event.event = IB_EVENT_SRQ_LIMIT_REACHED; | |
714 | event_type = EVENT_TYPE_SRQ; | |
715 | break; | |
716 | case QED_IWARP_EVENT_SRQ_EMPTY: | |
717 | event.event = IB_EVENT_SRQ_ERR; | |
718 | event_type = EVENT_TYPE_SRQ; | |
719 | break; | |
720 | default: | |
993d1b52 RA |
721 | DP_ERR(dev, "unsupported event %d on handle=%llx\n", e_code, |
722 | roce_handle64); | |
40b173dd | 723 | } |
993d1b52 | 724 | } |
993d1b52 RA |
725 | switch (event_type) { |
726 | case EVENT_TYPE_CQ: | |
727 | cq = (struct qedr_cq *)(uintptr_t)roce_handle64; | |
728 | if (cq) { | |
729 | ibcq = &cq->ibcq; | |
730 | if (ibcq->event_handler) { | |
731 | event.device = ibcq->device; | |
732 | event.element.cq = ibcq; | |
733 | ibcq->event_handler(&event, ibcq->cq_context); | |
734 | } | |
735 | } else { | |
736 | WARN(1, | |
737 | "Error: CQ event with NULL pointer ibcq. Handle=%llx\n", | |
738 | roce_handle64); | |
739 | } | |
a343e3f8 | 740 | DP_ERR(dev, "CQ event %d on handle %p\n", e_code, cq); |
993d1b52 RA |
741 | break; |
742 | case EVENT_TYPE_QP: | |
743 | qp = (struct qedr_qp *)(uintptr_t)roce_handle64; | |
744 | if (qp) { | |
745 | ibqp = &qp->ibqp; | |
746 | if (ibqp->event_handler) { | |
747 | event.device = ibqp->device; | |
748 | event.element.qp = ibqp; | |
749 | ibqp->event_handler(&event, ibqp->qp_context); | |
750 | } | |
751 | } else { | |
752 | WARN(1, | |
753 | "Error: QP event with NULL pointer ibqp. Handle=%llx\n", | |
754 | roce_handle64); | |
755 | } | |
a343e3f8 | 756 | DP_ERR(dev, "QP event %d on handle %p\n", e_code, qp); |
993d1b52 | 757 | break; |
40b173dd YB |
758 | case EVENT_TYPE_SRQ: |
759 | srq_id = (u16)roce_handle64; | |
760 | spin_lock_irqsave(&dev->srqidr.idr_lock, flags); | |
761 | srq = idr_find(&dev->srqidr.idr, srq_id); | |
762 | if (srq) { | |
763 | ibsrq = &srq->ibsrq; | |
764 | if (ibsrq->event_handler) { | |
765 | event.device = ibsrq->device; | |
766 | event.element.srq = ibsrq; | |
767 | ibsrq->event_handler(&event, | |
768 | ibsrq->srq_context); | |
769 | } | |
770 | } else { | |
771 | DP_NOTICE(dev, | |
772 | "SRQ event with NULL pointer ibsrq. Handle=%llx\n", | |
773 | roce_handle64); | |
774 | } | |
775 | spin_unlock_irqrestore(&dev->srqidr.idr_lock, flags); | |
776 | DP_NOTICE(dev, "SRQ event %d on handle %p\n", e_code, srq); | |
993d1b52 RA |
777 | default: |
778 | break; | |
779 | } | |
780 | } | |
781 | ||
ec72fce4 RA |
782 | static int qedr_init_hw(struct qedr_dev *dev) |
783 | { | |
784 | struct qed_rdma_add_user_out_params out_params; | |
785 | struct qed_rdma_start_in_params *in_params; | |
786 | struct qed_rdma_cnq_params *cur_pbl; | |
787 | struct qed_rdma_events events; | |
788 | dma_addr_t p_phys_table; | |
789 | u32 page_cnt; | |
790 | int rc = 0; | |
791 | int i; | |
792 | ||
793 | in_params = kzalloc(sizeof(*in_params), GFP_KERNEL); | |
794 | if (!in_params) { | |
795 | rc = -ENOMEM; | |
796 | goto out; | |
797 | } | |
798 | ||
799 | in_params->desired_cnq = dev->num_cnq; | |
800 | for (i = 0; i < dev->num_cnq; i++) { | |
801 | cur_pbl = &in_params->cnq_pbl_list[i]; | |
802 | ||
803 | page_cnt = qed_chain_get_page_cnt(&dev->cnq_array[i].pbl); | |
804 | cur_pbl->num_pbl_pages = page_cnt; | |
805 | ||
806 | p_phys_table = qed_chain_get_pbl_phys(&dev->cnq_array[i].pbl); | |
807 | cur_pbl->pbl_ptr = (u64)p_phys_table; | |
808 | } | |
809 | ||
993d1b52 RA |
810 | events.affiliated_event = qedr_affiliated_event; |
811 | events.unaffiliated_event = qedr_unaffiliated_event; | |
ec72fce4 RA |
812 | events.context = dev; |
813 | ||
814 | in_params->events = &events; | |
815 | in_params->cq_mode = QED_RDMA_CQ_MODE_32_BITS; | |
816 | in_params->max_mtu = dev->ndev->mtu; | |
e411e058 | 817 | dev->iwarp_max_mtu = dev->ndev->mtu; |
ec72fce4 RA |
818 | ether_addr_copy(&in_params->mac_addr[0], dev->ndev->dev_addr); |
819 | ||
820 | rc = dev->ops->rdma_init(dev->cdev, in_params); | |
821 | if (rc) | |
822 | goto out; | |
823 | ||
824 | rc = dev->ops->rdma_add_user(dev->rdma_ctx, &out_params); | |
825 | if (rc) | |
826 | goto out; | |
827 | ||
99847b5c | 828 | dev->db_addr = (void __iomem *)(uintptr_t)out_params.dpi_addr; |
ec72fce4 RA |
829 | dev->db_phys_addr = out_params.dpi_phys_addr; |
830 | dev->db_size = out_params.dpi_size; | |
831 | dev->dpi = out_params.dpi; | |
832 | ||
833 | rc = qedr_set_device_attr(dev); | |
834 | out: | |
835 | kfree(in_params); | |
836 | if (rc) | |
837 | DP_ERR(dev, "Init HW Failed rc = %d\n", rc); | |
838 | ||
839 | return rc; | |
840 | } | |
841 | ||
0089985e | 842 | static void qedr_stop_hw(struct qedr_dev *dev) |
ec72fce4 RA |
843 | { |
844 | dev->ops->rdma_remove_user(dev->rdma_ctx, dev->dpi); | |
845 | dev->ops->rdma_stop(dev->rdma_ctx); | |
846 | } | |
847 | ||
2e0cbc4d RA |
848 | static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev, |
849 | struct net_device *ndev) | |
850 | { | |
ec72fce4 | 851 | struct qed_dev_rdma_info dev_info; |
2e0cbc4d | 852 | struct qedr_dev *dev; |
508a523f | 853 | int rc = 0; |
2e0cbc4d RA |
854 | |
855 | dev = (struct qedr_dev *)ib_alloc_device(sizeof(*dev)); | |
856 | if (!dev) { | |
857 | pr_err("Unable to allocate ib device\n"); | |
858 | return NULL; | |
859 | } | |
860 | ||
861 | DP_DEBUG(dev, QEDR_MSG_INIT, "qedr add device called\n"); | |
862 | ||
863 | dev->pdev = pdev; | |
864 | dev->ndev = ndev; | |
865 | dev->cdev = cdev; | |
866 | ||
ec72fce4 RA |
867 | qed_ops = qed_get_rdma_ops(); |
868 | if (!qed_ops) { | |
869 | DP_ERR(dev, "Failed to get qed roce operations\n"); | |
870 | goto init_err; | |
871 | } | |
872 | ||
873 | dev->ops = qed_ops; | |
874 | rc = qed_ops->fill_dev_info(cdev, &dev_info); | |
875 | if (rc) | |
876 | goto init_err; | |
877 | ||
ad84dad2 | 878 | dev->user_dpm_enabled = dev_info.user_dpm_enabled; |
e538e0ac | 879 | dev->rdma_type = dev_info.rdma_type; |
ec72fce4 RA |
880 | dev->num_hwfns = dev_info.common.num_hwfns; |
881 | dev->rdma_ctx = dev->ops->rdma_get_rdma_ctx(cdev); | |
882 | ||
883 | dev->num_cnq = dev->ops->rdma_get_min_cnq_msix(cdev); | |
884 | if (!dev->num_cnq) { | |
b15606f4 KM |
885 | DP_ERR(dev, "Failed. At least one CNQ is required.\n"); |
886 | rc = -ENOMEM; | |
ec72fce4 RA |
887 | goto init_err; |
888 | } | |
889 | ||
cecbcddf RA |
890 | dev->wq_multiplier = QEDR_WQ_MULTIPLIER_DFT; |
891 | ||
2e0cbc4d RA |
892 | qedr_pci_set_atomic(dev, pdev); |
893 | ||
ec72fce4 RA |
894 | rc = qedr_alloc_resources(dev); |
895 | if (rc) | |
896 | goto init_err; | |
897 | ||
898 | rc = qedr_init_hw(dev); | |
899 | if (rc) | |
900 | goto alloc_err; | |
901 | ||
902 | rc = qedr_setup_irqs(dev); | |
903 | if (rc) | |
904 | goto irq_err; | |
905 | ||
2e0cbc4d RA |
906 | rc = qedr_register_device(dev); |
907 | if (rc) { | |
908 | DP_ERR(dev, "Unable to allocate register device\n"); | |
ec72fce4 | 909 | goto reg_err; |
2e0cbc4d RA |
910 | } |
911 | ||
f449c7a2 RA |
912 | if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state)) |
913 | qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE); | |
914 | ||
2e0cbc4d RA |
915 | DP_DEBUG(dev, QEDR_MSG_INIT, "qedr driver loaded successfully\n"); |
916 | return dev; | |
917 | ||
ec72fce4 RA |
918 | reg_err: |
919 | qedr_sync_free_irqs(dev); | |
920 | irq_err: | |
921 | qedr_stop_hw(dev); | |
922 | alloc_err: | |
923 | qedr_free_resources(dev); | |
2e0cbc4d RA |
924 | init_err: |
925 | ib_dealloc_device(&dev->ibdev); | |
926 | DP_ERR(dev, "qedr driver load failed rc=%d\n", rc); | |
927 | ||
928 | return NULL; | |
929 | } | |
930 | ||
931 | static void qedr_remove(struct qedr_dev *dev) | |
932 | { | |
933 | /* First unregister with stack to stop all the active traffic | |
934 | * of the registered clients. | |
935 | */ | |
993d1b52 | 936 | ib_unregister_device(&dev->ibdev); |
2e0cbc4d | 937 | |
ec72fce4 RA |
938 | qedr_stop_hw(dev); |
939 | qedr_sync_free_irqs(dev); | |
940 | qedr_free_resources(dev); | |
2e0cbc4d RA |
941 | ib_dealloc_device(&dev->ibdev); |
942 | } | |
943 | ||
f449c7a2 | 944 | static void qedr_close(struct qedr_dev *dev) |
2e0cbc4d | 945 | { |
f449c7a2 RA |
946 | if (test_and_clear_bit(QEDR_ENET_STATE_BIT, &dev->enet_state)) |
947 | qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ERR); | |
2e0cbc4d RA |
948 | } |
949 | ||
950 | static void qedr_shutdown(struct qedr_dev *dev) | |
951 | { | |
952 | qedr_close(dev); | |
953 | qedr_remove(dev); | |
954 | } | |
955 | ||
f449c7a2 RA |
956 | static void qedr_open(struct qedr_dev *dev) |
957 | { | |
958 | if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state)) | |
959 | qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE); | |
960 | } | |
961 | ||
1d1424c8 RA |
962 | static void qedr_mac_address_change(struct qedr_dev *dev) |
963 | { | |
964 | union ib_gid *sgid = &dev->sgid_tbl[0]; | |
965 | u8 guid[8], mac_addr[6]; | |
966 | int rc; | |
967 | ||
968 | /* Update SGID */ | |
969 | ether_addr_copy(&mac_addr[0], dev->ndev->dev_addr); | |
970 | guid[0] = mac_addr[0] ^ 2; | |
971 | guid[1] = mac_addr[1]; | |
972 | guid[2] = mac_addr[2]; | |
973 | guid[3] = 0xff; | |
974 | guid[4] = 0xfe; | |
975 | guid[5] = mac_addr[3]; | |
976 | guid[6] = mac_addr[4]; | |
977 | guid[7] = mac_addr[5]; | |
978 | sgid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL); | |
979 | memcpy(&sgid->raw[8], guid, sizeof(guid)); | |
980 | ||
981 | /* Update LL2 */ | |
0518c12f MK |
982 | rc = dev->ops->ll2_set_mac_filter(dev->cdev, |
983 | dev->gsi_ll2_mac_address, | |
984 | dev->ndev->dev_addr); | |
1d1424c8 RA |
985 | |
986 | ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr); | |
987 | ||
f449c7a2 | 988 | qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_GID_CHANGE); |
1d1424c8 RA |
989 | |
990 | if (rc) | |
991 | DP_ERR(dev, "Error updating mac filter\n"); | |
992 | } | |
993 | ||
2e0cbc4d RA |
994 | /* event handling via NIC driver ensures that all the NIC specific |
995 | * initialization done before RoCE driver notifies | |
996 | * event to stack. | |
997 | */ | |
bbfcd1e8 | 998 | static void qedr_notify(struct qedr_dev *dev, enum qede_rdma_event event) |
2e0cbc4d RA |
999 | { |
1000 | switch (event) { | |
1001 | case QEDE_UP: | |
f449c7a2 | 1002 | qedr_open(dev); |
2e0cbc4d RA |
1003 | break; |
1004 | case QEDE_DOWN: | |
1005 | qedr_close(dev); | |
1006 | break; | |
1007 | case QEDE_CLOSE: | |
1008 | qedr_shutdown(dev); | |
1009 | break; | |
1010 | case QEDE_CHANGE_ADDR: | |
1d1424c8 | 1011 | qedr_mac_address_change(dev); |
2e0cbc4d RA |
1012 | break; |
1013 | default: | |
1014 | pr_err("Event not supported\n"); | |
1015 | } | |
1016 | } | |
1017 | ||
1018 | static struct qedr_driver qedr_drv = { | |
1019 | .name = "qedr_driver", | |
1020 | .add = qedr_add, | |
1021 | .remove = qedr_remove, | |
1022 | .notify = qedr_notify, | |
1023 | }; | |
1024 | ||
1025 | static int __init qedr_init_module(void) | |
1026 | { | |
bbfcd1e8 | 1027 | return qede_rdma_register_driver(&qedr_drv); |
2e0cbc4d RA |
1028 | } |
1029 | ||
1030 | static void __exit qedr_exit_module(void) | |
1031 | { | |
bbfcd1e8 | 1032 | qede_rdma_unregister_driver(&qedr_drv); |
2e0cbc4d RA |
1033 | } |
1034 | ||
1035 | module_init(qedr_init_module); | |
1036 | module_exit(qedr_exit_module); |