Commit | Line | Data |
---|---|---|
915cc7ac MI |
1 | // SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB |
2 | /* Copyright (c) 2015 - 2021 Intel Corporation */ | |
3 | #include "main.h" | |
4 | ||
5 | /** | |
6 | * irdma_arp_table -manage arp table | |
7 | * @rf: RDMA PCI function | |
8 | * @ip_addr: ip address for device | |
9 | * @ipv4: IPv4 flag | |
10 | * @mac_addr: mac address ptr | |
11 | * @action: modify, delete or add | |
12 | */ | |
13 | int irdma_arp_table(struct irdma_pci_f *rf, u32 *ip_addr, bool ipv4, | |
fd92213e | 14 | const u8 *mac_addr, u32 action) |
915cc7ac MI |
15 | { |
16 | unsigned long flags; | |
17 | int arp_index; | |
18 | u32 ip[4] = {}; | |
19 | ||
20 | if (ipv4) | |
21 | ip[0] = *ip_addr; | |
22 | else | |
23 | memcpy(ip, ip_addr, sizeof(ip)); | |
24 | ||
25 | spin_lock_irqsave(&rf->arp_lock, flags); | |
26 | for (arp_index = 0; (u32)arp_index < rf->arp_table_size; arp_index++) { | |
27 | if (!memcmp(rf->arp_table[arp_index].ip_addr, ip, sizeof(ip))) | |
28 | break; | |
29 | } | |
30 | ||
31 | switch (action) { | |
32 | case IRDMA_ARP_ADD: | |
33 | if (arp_index != rf->arp_table_size) { | |
34 | arp_index = -1; | |
35 | break; | |
36 | } | |
37 | ||
38 | arp_index = 0; | |
39 | if (irdma_alloc_rsrc(rf, rf->allocated_arps, rf->arp_table_size, | |
40 | (u32 *)&arp_index, &rf->next_arp_index)) { | |
41 | arp_index = -1; | |
42 | break; | |
43 | } | |
44 | ||
45 | memcpy(rf->arp_table[arp_index].ip_addr, ip, | |
46 | sizeof(rf->arp_table[arp_index].ip_addr)); | |
47 | ether_addr_copy(rf->arp_table[arp_index].mac_addr, mac_addr); | |
48 | break; | |
49 | case IRDMA_ARP_RESOLVE: | |
50 | if (arp_index == rf->arp_table_size) | |
51 | arp_index = -1; | |
52 | break; | |
53 | case IRDMA_ARP_DELETE: | |
54 | if (arp_index == rf->arp_table_size) { | |
55 | arp_index = -1; | |
56 | break; | |
57 | } | |
58 | ||
59 | memset(rf->arp_table[arp_index].ip_addr, 0, | |
60 | sizeof(rf->arp_table[arp_index].ip_addr)); | |
61 | eth_zero_addr(rf->arp_table[arp_index].mac_addr); | |
62 | irdma_free_rsrc(rf, rf->allocated_arps, arp_index); | |
63 | break; | |
64 | default: | |
65 | arp_index = -1; | |
66 | break; | |
67 | } | |
68 | ||
69 | spin_unlock_irqrestore(&rf->arp_lock, flags); | |
70 | return arp_index; | |
71 | } | |
72 | ||
73 | /** | |
74 | * irdma_add_arp - add a new arp entry if needed | |
75 | * @rf: RDMA function | |
76 | * @ip: IP address | |
77 | * @ipv4: IPv4 flag | |
78 | * @mac: MAC address | |
79 | */ | |
fd92213e | 80 | int irdma_add_arp(struct irdma_pci_f *rf, u32 *ip, bool ipv4, const u8 *mac) |
915cc7ac MI |
81 | { |
82 | int arpidx; | |
83 | ||
84 | arpidx = irdma_arp_table(rf, &ip[0], ipv4, NULL, IRDMA_ARP_RESOLVE); | |
85 | if (arpidx >= 0) { | |
86 | if (ether_addr_equal(rf->arp_table[arpidx].mac_addr, mac)) | |
87 | return arpidx; | |
88 | ||
89 | irdma_manage_arp_cache(rf, rf->arp_table[arpidx].mac_addr, ip, | |
90 | ipv4, IRDMA_ARP_DELETE); | |
91 | } | |
92 | ||
93 | irdma_manage_arp_cache(rf, mac, ip, ipv4, IRDMA_ARP_ADD); | |
94 | ||
95 | return irdma_arp_table(rf, ip, ipv4, NULL, IRDMA_ARP_RESOLVE); | |
96 | } | |
97 | ||
98 | /** | |
99 | * wr32 - write 32 bits to hw register | |
100 | * @hw: hardware information including registers | |
101 | * @reg: register offset | |
102 | * @val: value to write to register | |
103 | */ | |
104 | inline void wr32(struct irdma_hw *hw, u32 reg, u32 val) | |
105 | { | |
106 | writel(val, hw->hw_addr + reg); | |
107 | } | |
108 | ||
109 | /** | |
110 | * rd32 - read a 32 bit hw register | |
111 | * @hw: hardware information including registers | |
112 | * @reg: register offset | |
113 | * | |
114 | * Return value of register content | |
115 | */ | |
116 | inline u32 rd32(struct irdma_hw *hw, u32 reg) | |
117 | { | |
118 | return readl(hw->hw_addr + reg); | |
119 | } | |
120 | ||
121 | /** | |
122 | * rd64 - read a 64 bit hw register | |
123 | * @hw: hardware information including registers | |
124 | * @reg: register offset | |
125 | * | |
126 | * Return value of register content | |
127 | */ | |
128 | inline u64 rd64(struct irdma_hw *hw, u32 reg) | |
129 | { | |
130 | return readq(hw->hw_addr + reg); | |
131 | } | |
132 | ||
133 | static void irdma_gid_change_event(struct ib_device *ibdev) | |
134 | { | |
135 | struct ib_event ib_event; | |
136 | ||
137 | ib_event.event = IB_EVENT_GID_CHANGE; | |
138 | ib_event.device = ibdev; | |
139 | ib_event.element.port_num = 1; | |
140 | ib_dispatch_event(&ib_event); | |
141 | } | |
142 | ||
143 | /** | |
144 | * irdma_inetaddr_event - system notifier for ipv4 addr events | |
145 | * @notifier: not used | |
146 | * @event: event for notifier | |
147 | * @ptr: if address | |
148 | */ | |
149 | int irdma_inetaddr_event(struct notifier_block *notifier, unsigned long event, | |
150 | void *ptr) | |
151 | { | |
152 | struct in_ifaddr *ifa = ptr; | |
6702bc14 | 153 | struct net_device *real_dev, *netdev = ifa->ifa_dev->dev; |
915cc7ac MI |
154 | struct irdma_device *iwdev; |
155 | struct ib_device *ibdev; | |
156 | u32 local_ipaddr; | |
157 | ||
6702bc14 MI |
158 | real_dev = rdma_vlan_dev_real_dev(netdev); |
159 | if (!real_dev) | |
160 | real_dev = netdev; | |
161 | ||
162 | ibdev = ib_device_get_by_netdev(real_dev, RDMA_DRIVER_IRDMA); | |
915cc7ac MI |
163 | if (!ibdev) |
164 | return NOTIFY_DONE; | |
165 | ||
166 | iwdev = to_iwdev(ibdev); | |
167 | local_ipaddr = ntohl(ifa->ifa_address); | |
168 | ibdev_dbg(&iwdev->ibdev, | |
6702bc14 MI |
169 | "DEV: netdev %p event %lu local_ip=%pI4 MAC=%pM\n", real_dev, |
170 | event, &local_ipaddr, real_dev->dev_addr); | |
915cc7ac MI |
171 | switch (event) { |
172 | case NETDEV_DOWN: | |
6702bc14 | 173 | irdma_manage_arp_cache(iwdev->rf, real_dev->dev_addr, |
915cc7ac | 174 | &local_ipaddr, true, IRDMA_ARP_DELETE); |
6702bc14 | 175 | irdma_if_notify(iwdev, real_dev, &local_ipaddr, true, false); |
915cc7ac MI |
176 | irdma_gid_change_event(&iwdev->ibdev); |
177 | break; | |
178 | case NETDEV_UP: | |
179 | case NETDEV_CHANGEADDR: | |
6702bc14 MI |
180 | irdma_add_arp(iwdev->rf, &local_ipaddr, true, real_dev->dev_addr); |
181 | irdma_if_notify(iwdev, real_dev, &local_ipaddr, true, true); | |
915cc7ac MI |
182 | irdma_gid_change_event(&iwdev->ibdev); |
183 | break; | |
184 | default: | |
185 | break; | |
186 | } | |
187 | ||
188 | ib_device_put(ibdev); | |
189 | ||
190 | return NOTIFY_DONE; | |
191 | } | |
192 | ||
193 | /** | |
194 | * irdma_inet6addr_event - system notifier for ipv6 addr events | |
195 | * @notifier: not used | |
196 | * @event: event for notifier | |
197 | * @ptr: if address | |
198 | */ | |
199 | int irdma_inet6addr_event(struct notifier_block *notifier, unsigned long event, | |
200 | void *ptr) | |
201 | { | |
202 | struct inet6_ifaddr *ifa = ptr; | |
6702bc14 | 203 | struct net_device *real_dev, *netdev = ifa->idev->dev; |
915cc7ac MI |
204 | struct irdma_device *iwdev; |
205 | struct ib_device *ibdev; | |
206 | u32 local_ipaddr6[4]; | |
207 | ||
6702bc14 MI |
208 | real_dev = rdma_vlan_dev_real_dev(netdev); |
209 | if (!real_dev) | |
210 | real_dev = netdev; | |
211 | ||
212 | ibdev = ib_device_get_by_netdev(real_dev, RDMA_DRIVER_IRDMA); | |
915cc7ac MI |
213 | if (!ibdev) |
214 | return NOTIFY_DONE; | |
215 | ||
216 | iwdev = to_iwdev(ibdev); | |
217 | irdma_copy_ip_ntohl(local_ipaddr6, ifa->addr.in6_u.u6_addr32); | |
218 | ibdev_dbg(&iwdev->ibdev, | |
6702bc14 MI |
219 | "DEV: netdev %p event %lu local_ip=%pI6 MAC=%pM\n", real_dev, |
220 | event, local_ipaddr6, real_dev->dev_addr); | |
915cc7ac MI |
221 | switch (event) { |
222 | case NETDEV_DOWN: | |
6702bc14 | 223 | irdma_manage_arp_cache(iwdev->rf, real_dev->dev_addr, |
915cc7ac | 224 | local_ipaddr6, false, IRDMA_ARP_DELETE); |
6702bc14 | 225 | irdma_if_notify(iwdev, real_dev, local_ipaddr6, false, false); |
915cc7ac MI |
226 | irdma_gid_change_event(&iwdev->ibdev); |
227 | break; | |
228 | case NETDEV_UP: | |
229 | case NETDEV_CHANGEADDR: | |
230 | irdma_add_arp(iwdev->rf, local_ipaddr6, false, | |
6702bc14 MI |
231 | real_dev->dev_addr); |
232 | irdma_if_notify(iwdev, real_dev, local_ipaddr6, false, true); | |
915cc7ac MI |
233 | irdma_gid_change_event(&iwdev->ibdev); |
234 | break; | |
235 | default: | |
236 | break; | |
237 | } | |
238 | ||
239 | ib_device_put(ibdev); | |
240 | ||
241 | return NOTIFY_DONE; | |
242 | } | |
243 | ||
244 | /** | |
245 | * irdma_net_event - system notifier for net events | |
246 | * @notifier: not used | |
247 | * @event: event for notifier | |
248 | * @ptr: neighbor | |
249 | */ | |
250 | int irdma_net_event(struct notifier_block *notifier, unsigned long event, | |
251 | void *ptr) | |
252 | { | |
253 | struct neighbour *neigh = ptr; | |
6702bc14 | 254 | struct net_device *real_dev, *netdev = (struct net_device *)neigh->dev; |
915cc7ac MI |
255 | struct irdma_device *iwdev; |
256 | struct ib_device *ibdev; | |
257 | __be32 *p; | |
258 | u32 local_ipaddr[4] = {}; | |
259 | bool ipv4 = true; | |
260 | ||
915cc7ac MI |
261 | switch (event) { |
262 | case NETEVENT_NEIGH_UPDATE: | |
1c9043ae MI |
263 | real_dev = rdma_vlan_dev_real_dev(netdev); |
264 | if (!real_dev) | |
265 | real_dev = netdev; | |
266 | ibdev = ib_device_get_by_netdev(real_dev, RDMA_DRIVER_IRDMA); | |
267 | if (!ibdev) | |
268 | return NOTIFY_DONE; | |
269 | ||
270 | iwdev = to_iwdev(ibdev); | |
915cc7ac MI |
271 | p = (__be32 *)neigh->primary_key; |
272 | if (neigh->tbl->family == AF_INET6) { | |
273 | ipv4 = false; | |
274 | irdma_copy_ip_ntohl(local_ipaddr, p); | |
275 | } else { | |
276 | local_ipaddr[0] = ntohl(*p); | |
277 | } | |
278 | ||
279 | ibdev_dbg(&iwdev->ibdev, | |
280 | "DEV: netdev %p state %d local_ip=%pI4 MAC=%pM\n", | |
281 | iwdev->netdev, neigh->nud_state, local_ipaddr, | |
282 | neigh->ha); | |
283 | ||
284 | if (neigh->nud_state & NUD_VALID) | |
285 | irdma_add_arp(iwdev->rf, local_ipaddr, ipv4, neigh->ha); | |
286 | ||
287 | else | |
288 | irdma_manage_arp_cache(iwdev->rf, neigh->ha, | |
289 | local_ipaddr, ipv4, | |
290 | IRDMA_ARP_DELETE); | |
1c9043ae | 291 | ib_device_put(ibdev); |
915cc7ac MI |
292 | break; |
293 | default: | |
294 | break; | |
295 | } | |
296 | ||
915cc7ac MI |
297 | return NOTIFY_DONE; |
298 | } | |
299 | ||
300 | /** | |
301 | * irdma_netdevice_event - system notifier for netdev events | |
302 | * @notifier: not used | |
303 | * @event: event for notifier | |
304 | * @ptr: netdev | |
305 | */ | |
306 | int irdma_netdevice_event(struct notifier_block *notifier, unsigned long event, | |
307 | void *ptr) | |
308 | { | |
309 | struct irdma_device *iwdev; | |
310 | struct ib_device *ibdev; | |
311 | struct net_device *netdev = netdev_notifier_info_to_dev(ptr); | |
312 | ||
313 | ibdev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_IRDMA); | |
314 | if (!ibdev) | |
315 | return NOTIFY_DONE; | |
316 | ||
317 | iwdev = to_iwdev(ibdev); | |
318 | iwdev->iw_status = 1; | |
319 | switch (event) { | |
320 | case NETDEV_DOWN: | |
321 | iwdev->iw_status = 0; | |
322 | fallthrough; | |
323 | case NETDEV_UP: | |
324 | irdma_port_ibevent(iwdev); | |
325 | break; | |
326 | default: | |
327 | break; | |
328 | } | |
329 | ib_device_put(ibdev); | |
330 | ||
331 | return NOTIFY_DONE; | |
332 | } | |
333 | ||
334 | /** | |
335 | * irdma_add_ipv6_addr - add ipv6 address to the hw arp table | |
336 | * @iwdev: irdma device | |
337 | */ | |
338 | static void irdma_add_ipv6_addr(struct irdma_device *iwdev) | |
339 | { | |
340 | struct net_device *ip_dev; | |
341 | struct inet6_dev *idev; | |
342 | struct inet6_ifaddr *ifp, *tmp; | |
343 | u32 local_ipaddr6[4]; | |
344 | ||
345 | rcu_read_lock(); | |
346 | for_each_netdev_rcu (&init_net, ip_dev) { | |
347 | if (((rdma_vlan_dev_vlan_id(ip_dev) < 0xFFFF && | |
348 | rdma_vlan_dev_real_dev(ip_dev) == iwdev->netdev) || | |
349 | ip_dev == iwdev->netdev) && | |
350 | (READ_ONCE(ip_dev->flags) & IFF_UP)) { | |
351 | idev = __in6_dev_get(ip_dev); | |
352 | if (!idev) { | |
353 | ibdev_err(&iwdev->ibdev, "ipv6 inet device not found\n"); | |
354 | break; | |
355 | } | |
356 | list_for_each_entry_safe (ifp, tmp, &idev->addr_list, | |
357 | if_list) { | |
358 | ibdev_dbg(&iwdev->ibdev, | |
359 | "INIT: IP=%pI6, vlan_id=%d, MAC=%pM\n", | |
360 | &ifp->addr, | |
361 | rdma_vlan_dev_vlan_id(ip_dev), | |
362 | ip_dev->dev_addr); | |
363 | ||
364 | irdma_copy_ip_ntohl(local_ipaddr6, | |
365 | ifp->addr.in6_u.u6_addr32); | |
366 | irdma_manage_arp_cache(iwdev->rf, | |
367 | ip_dev->dev_addr, | |
368 | local_ipaddr6, false, | |
369 | IRDMA_ARP_ADD); | |
370 | } | |
371 | } | |
372 | } | |
373 | rcu_read_unlock(); | |
374 | } | |
375 | ||
376 | /** | |
377 | * irdma_add_ipv4_addr - add ipv4 address to the hw arp table | |
378 | * @iwdev: irdma device | |
379 | */ | |
380 | static void irdma_add_ipv4_addr(struct irdma_device *iwdev) | |
381 | { | |
382 | struct net_device *dev; | |
383 | struct in_device *idev; | |
384 | u32 ip_addr; | |
385 | ||
386 | rcu_read_lock(); | |
387 | for_each_netdev_rcu (&init_net, dev) { | |
388 | if (((rdma_vlan_dev_vlan_id(dev) < 0xFFFF && | |
389 | rdma_vlan_dev_real_dev(dev) == iwdev->netdev) || | |
390 | dev == iwdev->netdev) && (READ_ONCE(dev->flags) & IFF_UP)) { | |
391 | const struct in_ifaddr *ifa; | |
392 | ||
393 | idev = __in_dev_get_rcu(dev); | |
394 | if (!idev) | |
395 | continue; | |
396 | ||
397 | in_dev_for_each_ifa_rcu(ifa, idev) { | |
398 | ibdev_dbg(&iwdev->ibdev, "CM: IP=%pI4, vlan_id=%d, MAC=%pM\n", | |
399 | &ifa->ifa_address, rdma_vlan_dev_vlan_id(dev), | |
400 | dev->dev_addr); | |
401 | ||
402 | ip_addr = ntohl(ifa->ifa_address); | |
403 | irdma_manage_arp_cache(iwdev->rf, dev->dev_addr, | |
404 | &ip_addr, true, | |
405 | IRDMA_ARP_ADD); | |
406 | } | |
407 | } | |
408 | } | |
409 | rcu_read_unlock(); | |
410 | } | |
411 | ||
412 | /** | |
413 | * irdma_add_ip - add ip addresses | |
414 | * @iwdev: irdma device | |
415 | * | |
416 | * Add ipv4/ipv6 addresses to the arp cache | |
417 | */ | |
418 | void irdma_add_ip(struct irdma_device *iwdev) | |
419 | { | |
420 | irdma_add_ipv4_addr(iwdev); | |
421 | irdma_add_ipv6_addr(iwdev); | |
422 | } | |
423 | ||
424 | /** | |
425 | * irdma_alloc_and_get_cqp_request - get cqp struct | |
426 | * @cqp: device cqp ptr | |
427 | * @wait: cqp to be used in wait mode | |
428 | */ | |
429 | struct irdma_cqp_request *irdma_alloc_and_get_cqp_request(struct irdma_cqp *cqp, | |
430 | bool wait) | |
431 | { | |
432 | struct irdma_cqp_request *cqp_request = NULL; | |
433 | unsigned long flags; | |
434 | ||
435 | spin_lock_irqsave(&cqp->req_lock, flags); | |
436 | if (!list_empty(&cqp->cqp_avail_reqs)) { | |
6246f1cc SS |
437 | cqp_request = list_first_entry(&cqp->cqp_avail_reqs, |
438 | struct irdma_cqp_request, list); | |
915cc7ac MI |
439 | list_del_init(&cqp_request->list); |
440 | } | |
441 | spin_unlock_irqrestore(&cqp->req_lock, flags); | |
442 | if (!cqp_request) { | |
443 | cqp_request = kzalloc(sizeof(*cqp_request), GFP_ATOMIC); | |
444 | if (cqp_request) { | |
445 | cqp_request->dynamic = true; | |
446 | if (wait) | |
447 | init_waitqueue_head(&cqp_request->waitq); | |
448 | } | |
449 | } | |
450 | if (!cqp_request) { | |
451 | ibdev_dbg(to_ibdev(cqp->sc_cqp.dev), "ERR: CQP Request Fail: No Memory"); | |
452 | return NULL; | |
453 | } | |
454 | ||
455 | cqp_request->waiting = wait; | |
456 | refcount_set(&cqp_request->refcnt, 1); | |
457 | memset(&cqp_request->compl_info, 0, sizeof(cqp_request->compl_info)); | |
458 | ||
459 | return cqp_request; | |
460 | } | |
461 | ||
462 | /** | |
463 | * irdma_get_cqp_request - increase refcount for cqp_request | |
464 | * @cqp_request: pointer to cqp_request instance | |
465 | */ | |
466 | static inline void irdma_get_cqp_request(struct irdma_cqp_request *cqp_request) | |
467 | { | |
468 | refcount_inc(&cqp_request->refcnt); | |
469 | } | |
470 | ||
471 | /** | |
472 | * irdma_free_cqp_request - free cqp request | |
473 | * @cqp: cqp ptr | |
474 | * @cqp_request: to be put back in cqp list | |
475 | */ | |
476 | void irdma_free_cqp_request(struct irdma_cqp *cqp, | |
477 | struct irdma_cqp_request *cqp_request) | |
478 | { | |
479 | unsigned long flags; | |
480 | ||
481 | if (cqp_request->dynamic) { | |
482 | kfree(cqp_request); | |
483 | } else { | |
f0842bb3 | 484 | WRITE_ONCE(cqp_request->request_done, false); |
915cc7ac MI |
485 | cqp_request->callback_fcn = NULL; |
486 | cqp_request->waiting = false; | |
487 | ||
488 | spin_lock_irqsave(&cqp->req_lock, flags); | |
489 | list_add_tail(&cqp_request->list, &cqp->cqp_avail_reqs); | |
490 | spin_unlock_irqrestore(&cqp->req_lock, flags); | |
491 | } | |
492 | wake_up(&cqp->remove_wq); | |
493 | } | |
494 | ||
495 | /** | |
496 | * irdma_put_cqp_request - dec ref count and free if 0 | |
497 | * @cqp: cqp ptr | |
498 | * @cqp_request: to be put back in cqp list | |
499 | */ | |
500 | void irdma_put_cqp_request(struct irdma_cqp *cqp, | |
501 | struct irdma_cqp_request *cqp_request) | |
502 | { | |
503 | if (refcount_dec_and_test(&cqp_request->refcnt)) | |
504 | irdma_free_cqp_request(cqp, cqp_request); | |
505 | } | |
506 | ||
507 | /** | |
508 | * irdma_free_pending_cqp_request -free pending cqp request objs | |
509 | * @cqp: cqp ptr | |
510 | * @cqp_request: to be put back in cqp list | |
511 | */ | |
512 | static void | |
513 | irdma_free_pending_cqp_request(struct irdma_cqp *cqp, | |
514 | struct irdma_cqp_request *cqp_request) | |
515 | { | |
516 | if (cqp_request->waiting) { | |
517 | cqp_request->compl_info.error = true; | |
f0842bb3 | 518 | WRITE_ONCE(cqp_request->request_done, true); |
915cc7ac MI |
519 | wake_up(&cqp_request->waitq); |
520 | } | |
521 | wait_event_timeout(cqp->remove_wq, | |
522 | refcount_read(&cqp_request->refcnt) == 1, 1000); | |
523 | irdma_put_cqp_request(cqp, cqp_request); | |
524 | } | |
525 | ||
526 | /** | |
527 | * irdma_cleanup_pending_cqp_op - clean-up cqp with no | |
528 | * completions | |
529 | * @rf: RDMA PCI function | |
530 | */ | |
531 | void irdma_cleanup_pending_cqp_op(struct irdma_pci_f *rf) | |
532 | { | |
533 | struct irdma_sc_dev *dev = &rf->sc_dev; | |
534 | struct irdma_cqp *cqp = &rf->cqp; | |
535 | struct irdma_cqp_request *cqp_request = NULL; | |
536 | struct cqp_cmds_info *pcmdinfo = NULL; | |
537 | u32 i, pending_work, wqe_idx; | |
538 | ||
539 | pending_work = IRDMA_RING_USED_QUANTA(cqp->sc_cqp.sq_ring); | |
540 | wqe_idx = IRDMA_RING_CURRENT_TAIL(cqp->sc_cqp.sq_ring); | |
541 | for (i = 0; i < pending_work; i++) { | |
542 | cqp_request = (struct irdma_cqp_request *)(unsigned long) | |
543 | cqp->scratch_array[wqe_idx]; | |
544 | if (cqp_request) | |
545 | irdma_free_pending_cqp_request(cqp, cqp_request); | |
546 | wqe_idx = (wqe_idx + 1) % IRDMA_RING_SIZE(cqp->sc_cqp.sq_ring); | |
547 | } | |
548 | ||
549 | while (!list_empty(&dev->cqp_cmd_head)) { | |
550 | pcmdinfo = irdma_remove_cqp_head(dev); | |
551 | cqp_request = | |
552 | container_of(pcmdinfo, struct irdma_cqp_request, info); | |
553 | if (cqp_request) | |
554 | irdma_free_pending_cqp_request(cqp, cqp_request); | |
555 | } | |
556 | } | |
557 | ||
558 | /** | |
559 | * irdma_wait_event - wait for completion | |
560 | * @rf: RDMA PCI function | |
561 | * @cqp_request: cqp request to wait | |
562 | */ | |
2c4b14ea SS |
563 | static int irdma_wait_event(struct irdma_pci_f *rf, |
564 | struct irdma_cqp_request *cqp_request) | |
915cc7ac MI |
565 | { |
566 | struct irdma_cqp_timeout cqp_timeout = {}; | |
567 | bool cqp_error = false; | |
2c4b14ea | 568 | int err_code = 0; |
915cc7ac | 569 | |
f2c30378 | 570 | cqp_timeout.compl_cqp_cmds = atomic64_read(&rf->sc_dev.cqp->completed_ops); |
915cc7ac MI |
571 | do { |
572 | irdma_cqp_ce_handler(rf, &rf->ccq.sc_cq); | |
573 | if (wait_event_timeout(cqp_request->waitq, | |
f0842bb3 | 574 | READ_ONCE(cqp_request->request_done), |
915cc7ac MI |
575 | msecs_to_jiffies(CQP_COMPL_WAIT_TIME_MS))) |
576 | break; | |
577 | ||
578 | irdma_check_cqp_progress(&cqp_timeout, &rf->sc_dev); | |
579 | ||
580 | if (cqp_timeout.count < CQP_TIMEOUT_THRESHOLD) | |
581 | continue; | |
582 | ||
583 | if (!rf->reset) { | |
584 | rf->reset = true; | |
585 | rf->gen_ops.request_reset(rf); | |
586 | } | |
2c4b14ea | 587 | return -ETIMEDOUT; |
915cc7ac MI |
588 | } while (1); |
589 | ||
590 | cqp_error = cqp_request->compl_info.error; | |
591 | if (cqp_error) { | |
2c4b14ea | 592 | err_code = -EIO; |
6b227bd3 SD |
593 | if (cqp_request->compl_info.maj_err_code == 0xFFFF) { |
594 | if (cqp_request->compl_info.min_err_code == 0x8002) | |
595 | err_code = -EBUSY; | |
596 | else if (cqp_request->compl_info.min_err_code == 0x8029) { | |
597 | if (!rf->reset) { | |
598 | rf->reset = true; | |
599 | rf->gen_ops.request_reset(rf); | |
600 | } | |
915cc7ac MI |
601 | } |
602 | } | |
603 | } | |
604 | ||
605 | return err_code; | |
606 | } | |
607 | ||
608 | static const char *const irdma_cqp_cmd_names[IRDMA_MAX_CQP_OPS] = { | |
609 | [IRDMA_OP_CEQ_DESTROY] = "Destroy CEQ Cmd", | |
610 | [IRDMA_OP_AEQ_DESTROY] = "Destroy AEQ Cmd", | |
611 | [IRDMA_OP_DELETE_ARP_CACHE_ENTRY] = "Delete ARP Cache Cmd", | |
612 | [IRDMA_OP_MANAGE_APBVT_ENTRY] = "Manage APBV Table Entry Cmd", | |
613 | [IRDMA_OP_CEQ_CREATE] = "CEQ Create Cmd", | |
614 | [IRDMA_OP_AEQ_CREATE] = "AEQ Destroy Cmd", | |
615 | [IRDMA_OP_MANAGE_QHASH_TABLE_ENTRY] = "Manage Quad Hash Table Entry Cmd", | |
616 | [IRDMA_OP_QP_MODIFY] = "Modify QP Cmd", | |
617 | [IRDMA_OP_QP_UPLOAD_CONTEXT] = "Upload Context Cmd", | |
618 | [IRDMA_OP_CQ_CREATE] = "Create CQ Cmd", | |
619 | [IRDMA_OP_CQ_DESTROY] = "Destroy CQ Cmd", | |
620 | [IRDMA_OP_QP_CREATE] = "Create QP Cmd", | |
621 | [IRDMA_OP_QP_DESTROY] = "Destroy QP Cmd", | |
622 | [IRDMA_OP_ALLOC_STAG] = "Allocate STag Cmd", | |
623 | [IRDMA_OP_MR_REG_NON_SHARED] = "Register Non-Shared MR Cmd", | |
624 | [IRDMA_OP_DEALLOC_STAG] = "Deallocate STag Cmd", | |
625 | [IRDMA_OP_MW_ALLOC] = "Allocate Memory Window Cmd", | |
626 | [IRDMA_OP_QP_FLUSH_WQES] = "Flush QP Cmd", | |
627 | [IRDMA_OP_ADD_ARP_CACHE_ENTRY] = "Add ARP Cache Cmd", | |
628 | [IRDMA_OP_MANAGE_PUSH_PAGE] = "Manage Push Page Cmd", | |
629 | [IRDMA_OP_UPDATE_PE_SDS] = "Update PE SDs Cmd", | |
630 | [IRDMA_OP_MANAGE_HMC_PM_FUNC_TABLE] = "Manage HMC PM Function Table Cmd", | |
631 | [IRDMA_OP_SUSPEND] = "Suspend QP Cmd", | |
632 | [IRDMA_OP_RESUME] = "Resume QP Cmd", | |
633 | [IRDMA_OP_MANAGE_VF_PBLE_BP] = "Manage VF PBLE Backing Pages Cmd", | |
634 | [IRDMA_OP_QUERY_FPM_VAL] = "Query FPM Values Cmd", | |
635 | [IRDMA_OP_COMMIT_FPM_VAL] = "Commit FPM Values Cmd", | |
636 | [IRDMA_OP_AH_CREATE] = "Create Address Handle Cmd", | |
637 | [IRDMA_OP_AH_MODIFY] = "Modify Address Handle Cmd", | |
638 | [IRDMA_OP_AH_DESTROY] = "Destroy Address Handle Cmd", | |
639 | [IRDMA_OP_MC_CREATE] = "Create Multicast Group Cmd", | |
640 | [IRDMA_OP_MC_DESTROY] = "Destroy Multicast Group Cmd", | |
641 | [IRDMA_OP_MC_MODIFY] = "Modify Multicast Group Cmd", | |
642 | [IRDMA_OP_STATS_ALLOCATE] = "Add Statistics Instance Cmd", | |
643 | [IRDMA_OP_STATS_FREE] = "Free Statistics Instance Cmd", | |
644 | [IRDMA_OP_STATS_GATHER] = "Gather Statistics Cmd", | |
645 | [IRDMA_OP_WS_ADD_NODE] = "Add Work Scheduler Node Cmd", | |
646 | [IRDMA_OP_WS_MODIFY_NODE] = "Modify Work Scheduler Node Cmd", | |
647 | [IRDMA_OP_WS_DELETE_NODE] = "Delete Work Scheduler Node Cmd", | |
648 | [IRDMA_OP_SET_UP_MAP] = "Set UP-UP Mapping Cmd", | |
649 | [IRDMA_OP_GEN_AE] = "Generate AE Cmd", | |
650 | [IRDMA_OP_QUERY_RDMA_FEATURES] = "RDMA Get Features Cmd", | |
205be5dc | 651 | [IRDMA_OP_ALLOC_LOCAL_MAC_ENTRY] = "Allocate Local MAC Entry Cmd", |
915cc7ac MI |
652 | [IRDMA_OP_ADD_LOCAL_MAC_ENTRY] = "Add Local MAC Entry Cmd", |
653 | [IRDMA_OP_DELETE_LOCAL_MAC_ENTRY] = "Delete Local MAC Entry Cmd", | |
654 | [IRDMA_OP_CQ_MODIFY] = "CQ Modify Cmd", | |
655 | }; | |
656 | ||
657 | static const struct irdma_cqp_err_info irdma_noncrit_err_list[] = { | |
36a26d12 | 658 | {0xffff, 0x8002, "Invalid State"}, |
915cc7ac MI |
659 | {0xffff, 0x8006, "Flush No Wqe Pending"}, |
660 | {0xffff, 0x8007, "Modify QP Bad Close"}, | |
661 | {0xffff, 0x8009, "LLP Closed"}, | |
662 | {0xffff, 0x800a, "Reset Not Sent"} | |
663 | }; | |
664 | ||
665 | /** | |
666 | * irdma_cqp_crit_err - check if CQP error is critical | |
667 | * @dev: pointer to dev structure | |
668 | * @cqp_cmd: code for last CQP operation | |
669 | * @maj_err_code: major error code | |
670 | * @min_err_code: minot error code | |
671 | */ | |
672 | bool irdma_cqp_crit_err(struct irdma_sc_dev *dev, u8 cqp_cmd, | |
673 | u16 maj_err_code, u16 min_err_code) | |
674 | { | |
675 | int i; | |
676 | ||
677 | for (i = 0; i < ARRAY_SIZE(irdma_noncrit_err_list); ++i) { | |
678 | if (maj_err_code == irdma_noncrit_err_list[i].maj && | |
679 | min_err_code == irdma_noncrit_err_list[i].min) { | |
680 | ibdev_dbg(to_ibdev(dev), | |
681 | "CQP: [%s Error][%s] maj=0x%x min=0x%x\n", | |
682 | irdma_noncrit_err_list[i].desc, | |
683 | irdma_cqp_cmd_names[cqp_cmd], maj_err_code, | |
684 | min_err_code); | |
685 | return false; | |
686 | } | |
687 | } | |
688 | return true; | |
689 | } | |
690 | ||
691 | /** | |
692 | * irdma_handle_cqp_op - process cqp command | |
693 | * @rf: RDMA PCI function | |
694 | * @cqp_request: cqp request to process | |
695 | */ | |
2c4b14ea SS |
696 | int irdma_handle_cqp_op(struct irdma_pci_f *rf, |
697 | struct irdma_cqp_request *cqp_request) | |
915cc7ac MI |
698 | { |
699 | struct irdma_sc_dev *dev = &rf->sc_dev; | |
700 | struct cqp_cmds_info *info = &cqp_request->info; | |
2c4b14ea | 701 | int status; |
915cc7ac MI |
702 | bool put_cqp_request = true; |
703 | ||
704 | if (rf->reset) | |
2c4b14ea | 705 | return -EBUSY; |
915cc7ac MI |
706 | |
707 | irdma_get_cqp_request(cqp_request); | |
708 | status = irdma_process_cqp_cmd(dev, info); | |
709 | if (status) | |
710 | goto err; | |
711 | ||
712 | if (cqp_request->waiting) { | |
713 | put_cqp_request = false; | |
714 | status = irdma_wait_event(rf, cqp_request); | |
715 | if (status) | |
716 | goto err; | |
717 | } | |
718 | ||
719 | return 0; | |
720 | ||
721 | err: | |
722 | if (irdma_cqp_crit_err(dev, info->cqp_cmd, | |
723 | cqp_request->compl_info.maj_err_code, | |
724 | cqp_request->compl_info.min_err_code)) | |
725 | ibdev_err(&rf->iwdev->ibdev, | |
726 | "[%s Error][op_code=%d] status=%d waiting=%d completion_err=%d maj=0x%x min=0x%x\n", | |
727 | irdma_cqp_cmd_names[info->cqp_cmd], info->cqp_cmd, status, cqp_request->waiting, | |
728 | cqp_request->compl_info.error, cqp_request->compl_info.maj_err_code, | |
729 | cqp_request->compl_info.min_err_code); | |
730 | ||
731 | if (put_cqp_request) | |
732 | irdma_put_cqp_request(&rf->cqp, cqp_request); | |
733 | ||
734 | return status; | |
735 | } | |
736 | ||
737 | void irdma_qp_add_ref(struct ib_qp *ibqp) | |
738 | { | |
739 | struct irdma_qp *iwqp = (struct irdma_qp *)ibqp; | |
740 | ||
741 | refcount_inc(&iwqp->refcnt); | |
742 | } | |
743 | ||
744 | void irdma_qp_rem_ref(struct ib_qp *ibqp) | |
745 | { | |
746 | struct irdma_qp *iwqp = to_iwqp(ibqp); | |
747 | struct irdma_device *iwdev = iwqp->iwdev; | |
748 | u32 qp_num; | |
749 | unsigned long flags; | |
750 | ||
751 | spin_lock_irqsave(&iwdev->rf->qptable_lock, flags); | |
752 | if (!refcount_dec_and_test(&iwqp->refcnt)) { | |
753 | spin_unlock_irqrestore(&iwdev->rf->qptable_lock, flags); | |
754 | return; | |
755 | } | |
756 | ||
757 | qp_num = iwqp->ibqp.qp_num; | |
758 | iwdev->rf->qp_table[qp_num] = NULL; | |
759 | spin_unlock_irqrestore(&iwdev->rf->qptable_lock, flags); | |
760 | complete(&iwqp->free_qp); | |
761 | } | |
762 | ||
763 | struct ib_device *to_ibdev(struct irdma_sc_dev *dev) | |
764 | { | |
765 | return &(container_of(dev, struct irdma_pci_f, sc_dev))->iwdev->ibdev; | |
766 | } | |
767 | ||
768 | /** | |
769 | * irdma_get_qp - get qp address | |
770 | * @device: iwarp device | |
771 | * @qpn: qp number | |
772 | */ | |
773 | struct ib_qp *irdma_get_qp(struct ib_device *device, int qpn) | |
774 | { | |
775 | struct irdma_device *iwdev = to_iwdev(device); | |
776 | ||
777 | if (qpn < IW_FIRST_QPN || qpn >= iwdev->rf->max_qp) | |
778 | return NULL; | |
779 | ||
780 | return &iwdev->rf->qp_table[qpn]->ibqp; | |
781 | } | |
782 | ||
915cc7ac MI |
783 | /** |
784 | * irdma_remove_cqp_head - return head entry and remove | |
785 | * @dev: device | |
786 | */ | |
787 | void *irdma_remove_cqp_head(struct irdma_sc_dev *dev) | |
788 | { | |
789 | struct list_head *entry; | |
790 | struct list_head *list = &dev->cqp_cmd_head; | |
791 | ||
792 | if (list_empty(list)) | |
793 | return NULL; | |
794 | ||
795 | entry = list->next; | |
796 | list_del(entry); | |
797 | ||
798 | return entry; | |
799 | } | |
800 | ||
801 | /** | |
802 | * irdma_cqp_sds_cmd - create cqp command for sd | |
803 | * @dev: hardware control device structure | |
804 | * @sdinfo: information for sd cqp | |
805 | * | |
806 | */ | |
2c4b14ea SS |
807 | int irdma_cqp_sds_cmd(struct irdma_sc_dev *dev, |
808 | struct irdma_update_sds_info *sdinfo) | |
915cc7ac MI |
809 | { |
810 | struct irdma_cqp_request *cqp_request; | |
811 | struct cqp_cmds_info *cqp_info; | |
812 | struct irdma_pci_f *rf = dev_to_rf(dev); | |
2c4b14ea | 813 | int status; |
915cc7ac MI |
814 | |
815 | cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true); | |
816 | if (!cqp_request) | |
2c4b14ea | 817 | return -ENOMEM; |
915cc7ac MI |
818 | |
819 | cqp_info = &cqp_request->info; | |
820 | memcpy(&cqp_info->in.u.update_pe_sds.info, sdinfo, | |
821 | sizeof(cqp_info->in.u.update_pe_sds.info)); | |
822 | cqp_info->cqp_cmd = IRDMA_OP_UPDATE_PE_SDS; | |
823 | cqp_info->post_sq = 1; | |
824 | cqp_info->in.u.update_pe_sds.dev = dev; | |
825 | cqp_info->in.u.update_pe_sds.scratch = (uintptr_t)cqp_request; | |
826 | ||
827 | status = irdma_handle_cqp_op(rf, cqp_request); | |
828 | irdma_put_cqp_request(&rf->cqp, cqp_request); | |
829 | ||
830 | return status; | |
831 | } | |
832 | ||
833 | /** | |
834 | * irdma_cqp_qp_suspend_resume - cqp command for suspend/resume | |
835 | * @qp: hardware control qp | |
836 | * @op: suspend or resume | |
837 | */ | |
2c4b14ea | 838 | int irdma_cqp_qp_suspend_resume(struct irdma_sc_qp *qp, u8 op) |
915cc7ac MI |
839 | { |
840 | struct irdma_sc_dev *dev = qp->dev; | |
841 | struct irdma_cqp_request *cqp_request; | |
842 | struct irdma_sc_cqp *cqp = dev->cqp; | |
843 | struct cqp_cmds_info *cqp_info; | |
844 | struct irdma_pci_f *rf = dev_to_rf(dev); | |
2c4b14ea | 845 | int status; |
915cc7ac MI |
846 | |
847 | cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, false); | |
848 | if (!cqp_request) | |
2c4b14ea | 849 | return -ENOMEM; |
915cc7ac MI |
850 | |
851 | cqp_info = &cqp_request->info; | |
852 | cqp_info->cqp_cmd = op; | |
853 | cqp_info->in.u.suspend_resume.cqp = cqp; | |
854 | cqp_info->in.u.suspend_resume.qp = qp; | |
855 | cqp_info->in.u.suspend_resume.scratch = (uintptr_t)cqp_request; | |
856 | ||
857 | status = irdma_handle_cqp_op(rf, cqp_request); | |
858 | irdma_put_cqp_request(&rf->cqp, cqp_request); | |
859 | ||
860 | return status; | |
861 | } | |
862 | ||
863 | /** | |
864 | * irdma_term_modify_qp - modify qp for term message | |
865 | * @qp: hardware control qp | |
866 | * @next_state: qp's next state | |
867 | * @term: terminate code | |
868 | * @term_len: length | |
869 | */ | |
870 | void irdma_term_modify_qp(struct irdma_sc_qp *qp, u8 next_state, u8 term, | |
871 | u8 term_len) | |
872 | { | |
873 | struct irdma_qp *iwqp; | |
874 | ||
875 | iwqp = qp->qp_uk.back_qp; | |
876 | irdma_next_iw_state(iwqp, next_state, 0, term, term_len); | |
877 | }; | |
878 | ||
879 | /** | |
880 | * irdma_terminate_done - after terminate is completed | |
881 | * @qp: hardware control qp | |
882 | * @timeout_occurred: indicates if terminate timer expired | |
883 | */ | |
884 | void irdma_terminate_done(struct irdma_sc_qp *qp, int timeout_occurred) | |
885 | { | |
886 | struct irdma_qp *iwqp; | |
887 | u8 hte = 0; | |
888 | bool first_time; | |
889 | unsigned long flags; | |
890 | ||
891 | iwqp = qp->qp_uk.back_qp; | |
892 | spin_lock_irqsave(&iwqp->lock, flags); | |
893 | if (iwqp->hte_added) { | |
894 | iwqp->hte_added = 0; | |
895 | hte = 1; | |
896 | } | |
897 | first_time = !(qp->term_flags & IRDMA_TERM_DONE); | |
898 | qp->term_flags |= IRDMA_TERM_DONE; | |
899 | spin_unlock_irqrestore(&iwqp->lock, flags); | |
900 | if (first_time) { | |
901 | if (!timeout_occurred) | |
902 | irdma_terminate_del_timer(qp); | |
903 | ||
904 | irdma_next_iw_state(iwqp, IRDMA_QP_STATE_ERROR, hte, 0, 0); | |
905 | irdma_cm_disconn(iwqp); | |
906 | } | |
907 | } | |
908 | ||
909 | static void irdma_terminate_timeout(struct timer_list *t) | |
910 | { | |
911 | struct irdma_qp *iwqp = from_timer(iwqp, t, terminate_timer); | |
912 | struct irdma_sc_qp *qp = &iwqp->sc_qp; | |
913 | ||
914 | irdma_terminate_done(qp, 1); | |
915 | irdma_qp_rem_ref(&iwqp->ibqp); | |
916 | } | |
917 | ||
918 | /** | |
919 | * irdma_terminate_start_timer - start terminate timeout | |
920 | * @qp: hardware control qp | |
921 | */ | |
922 | void irdma_terminate_start_timer(struct irdma_sc_qp *qp) | |
923 | { | |
924 | struct irdma_qp *iwqp; | |
925 | ||
926 | iwqp = qp->qp_uk.back_qp; | |
927 | irdma_qp_add_ref(&iwqp->ibqp); | |
928 | timer_setup(&iwqp->terminate_timer, irdma_terminate_timeout, 0); | |
929 | iwqp->terminate_timer.expires = jiffies + HZ; | |
930 | ||
931 | add_timer(&iwqp->terminate_timer); | |
932 | } | |
933 | ||
934 | /** | |
935 | * irdma_terminate_del_timer - delete terminate timeout | |
936 | * @qp: hardware control qp | |
937 | */ | |
938 | void irdma_terminate_del_timer(struct irdma_sc_qp *qp) | |
939 | { | |
940 | struct irdma_qp *iwqp; | |
941 | int ret; | |
942 | ||
943 | iwqp = qp->qp_uk.back_qp; | |
944 | ret = del_timer(&iwqp->terminate_timer); | |
945 | if (ret) | |
946 | irdma_qp_rem_ref(&iwqp->ibqp); | |
947 | } | |
948 | ||
949 | /** | |
950 | * irdma_cqp_query_fpm_val_cmd - send cqp command for fpm | |
951 | * @dev: function device struct | |
952 | * @val_mem: buffer for fpm | |
953 | * @hmc_fn_id: function id for fpm | |
954 | */ | |
2c4b14ea SS |
955 | int irdma_cqp_query_fpm_val_cmd(struct irdma_sc_dev *dev, |
956 | struct irdma_dma_mem *val_mem, u8 hmc_fn_id) | |
915cc7ac MI |
957 | { |
958 | struct irdma_cqp_request *cqp_request; | |
959 | struct cqp_cmds_info *cqp_info; | |
960 | struct irdma_pci_f *rf = dev_to_rf(dev); | |
2c4b14ea | 961 | int status; |
915cc7ac MI |
962 | |
963 | cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true); | |
964 | if (!cqp_request) | |
2c4b14ea | 965 | return -ENOMEM; |
915cc7ac MI |
966 | |
967 | cqp_info = &cqp_request->info; | |
968 | cqp_request->param = NULL; | |
969 | cqp_info->in.u.query_fpm_val.cqp = dev->cqp; | |
970 | cqp_info->in.u.query_fpm_val.fpm_val_pa = val_mem->pa; | |
971 | cqp_info->in.u.query_fpm_val.fpm_val_va = val_mem->va; | |
972 | cqp_info->in.u.query_fpm_val.hmc_fn_id = hmc_fn_id; | |
973 | cqp_info->cqp_cmd = IRDMA_OP_QUERY_FPM_VAL; | |
974 | cqp_info->post_sq = 1; | |
975 | cqp_info->in.u.query_fpm_val.scratch = (uintptr_t)cqp_request; | |
976 | ||
977 | status = irdma_handle_cqp_op(rf, cqp_request); | |
978 | irdma_put_cqp_request(&rf->cqp, cqp_request); | |
979 | ||
980 | return status; | |
981 | } | |
982 | ||
983 | /** | |
984 | * irdma_cqp_commit_fpm_val_cmd - commit fpm values in hw | |
985 | * @dev: hardware control device structure | |
986 | * @val_mem: buffer with fpm values | |
987 | * @hmc_fn_id: function id for fpm | |
988 | */ | |
2c4b14ea SS |
989 | int irdma_cqp_commit_fpm_val_cmd(struct irdma_sc_dev *dev, |
990 | struct irdma_dma_mem *val_mem, u8 hmc_fn_id) | |
915cc7ac MI |
991 | { |
992 | struct irdma_cqp_request *cqp_request; | |
993 | struct cqp_cmds_info *cqp_info; | |
994 | struct irdma_pci_f *rf = dev_to_rf(dev); | |
2c4b14ea | 995 | int status; |
915cc7ac MI |
996 | |
997 | cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true); | |
998 | if (!cqp_request) | |
2c4b14ea | 999 | return -ENOMEM; |
915cc7ac MI |
1000 | |
1001 | cqp_info = &cqp_request->info; | |
1002 | cqp_request->param = NULL; | |
1003 | cqp_info->in.u.commit_fpm_val.cqp = dev->cqp; | |
1004 | cqp_info->in.u.commit_fpm_val.fpm_val_pa = val_mem->pa; | |
1005 | cqp_info->in.u.commit_fpm_val.fpm_val_va = val_mem->va; | |
1006 | cqp_info->in.u.commit_fpm_val.hmc_fn_id = hmc_fn_id; | |
1007 | cqp_info->cqp_cmd = IRDMA_OP_COMMIT_FPM_VAL; | |
1008 | cqp_info->post_sq = 1; | |
1009 | cqp_info->in.u.commit_fpm_val.scratch = (uintptr_t)cqp_request; | |
1010 | ||
1011 | status = irdma_handle_cqp_op(rf, cqp_request); | |
1012 | irdma_put_cqp_request(&rf->cqp, cqp_request); | |
1013 | ||
1014 | return status; | |
1015 | } | |
1016 | ||
1017 | /** | |
1018 | * irdma_cqp_cq_create_cmd - create a cq for the cqp | |
1019 | * @dev: device pointer | |
1020 | * @cq: pointer to created cq | |
1021 | */ | |
2c4b14ea | 1022 | int irdma_cqp_cq_create_cmd(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq) |
915cc7ac MI |
1023 | { |
1024 | struct irdma_pci_f *rf = dev_to_rf(dev); | |
1025 | struct irdma_cqp *iwcqp = &rf->cqp; | |
1026 | struct irdma_cqp_request *cqp_request; | |
1027 | struct cqp_cmds_info *cqp_info; | |
2c4b14ea | 1028 | int status; |
915cc7ac MI |
1029 | |
1030 | cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true); | |
1031 | if (!cqp_request) | |
2c4b14ea | 1032 | return -ENOMEM; |
915cc7ac MI |
1033 | |
1034 | cqp_info = &cqp_request->info; | |
1035 | cqp_info->cqp_cmd = IRDMA_OP_CQ_CREATE; | |
1036 | cqp_info->post_sq = 1; | |
1037 | cqp_info->in.u.cq_create.cq = cq; | |
1038 | cqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request; | |
1039 | ||
1040 | status = irdma_handle_cqp_op(rf, cqp_request); | |
1041 | irdma_put_cqp_request(iwcqp, cqp_request); | |
1042 | ||
1043 | return status; | |
1044 | } | |
1045 | ||
1046 | /** | |
1047 | * irdma_cqp_qp_create_cmd - create a qp for the cqp | |
1048 | * @dev: device pointer | |
1049 | * @qp: pointer to created qp | |
1050 | */ | |
2c4b14ea | 1051 | int irdma_cqp_qp_create_cmd(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp) |
915cc7ac MI |
1052 | { |
1053 | struct irdma_pci_f *rf = dev_to_rf(dev); | |
1054 | struct irdma_cqp *iwcqp = &rf->cqp; | |
1055 | struct irdma_cqp_request *cqp_request; | |
1056 | struct cqp_cmds_info *cqp_info; | |
1057 | struct irdma_create_qp_info *qp_info; | |
2c4b14ea | 1058 | int status; |
915cc7ac MI |
1059 | |
1060 | cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true); | |
1061 | if (!cqp_request) | |
2c4b14ea | 1062 | return -ENOMEM; |
915cc7ac MI |
1063 | |
1064 | cqp_info = &cqp_request->info; | |
1065 | qp_info = &cqp_request->info.in.u.qp_create.info; | |
1066 | memset(qp_info, 0, sizeof(*qp_info)); | |
1067 | qp_info->cq_num_valid = true; | |
1068 | qp_info->next_iwarp_state = IRDMA_QP_STATE_RTS; | |
1069 | cqp_info->cqp_cmd = IRDMA_OP_QP_CREATE; | |
1070 | cqp_info->post_sq = 1; | |
1071 | cqp_info->in.u.qp_create.qp = qp; | |
1072 | cqp_info->in.u.qp_create.scratch = (uintptr_t)cqp_request; | |
1073 | ||
1074 | status = irdma_handle_cqp_op(rf, cqp_request); | |
1075 | irdma_put_cqp_request(iwcqp, cqp_request); | |
1076 | ||
1077 | return status; | |
1078 | } | |
1079 | ||
1080 | /** | |
1081 | * irdma_dealloc_push_page - free a push page for qp | |
1082 | * @rf: RDMA PCI function | |
1083 | * @qp: hardware control qp | |
1084 | */ | |
1085 | static void irdma_dealloc_push_page(struct irdma_pci_f *rf, | |
1086 | struct irdma_sc_qp *qp) | |
1087 | { | |
1088 | struct irdma_cqp_request *cqp_request; | |
1089 | struct cqp_cmds_info *cqp_info; | |
2c4b14ea | 1090 | int status; |
915cc7ac MI |
1091 | |
1092 | if (qp->push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX) | |
1093 | return; | |
1094 | ||
1095 | cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, false); | |
1096 | if (!cqp_request) | |
1097 | return; | |
1098 | ||
1099 | cqp_info = &cqp_request->info; | |
1100 | cqp_info->cqp_cmd = IRDMA_OP_MANAGE_PUSH_PAGE; | |
1101 | cqp_info->post_sq = 1; | |
1102 | cqp_info->in.u.manage_push_page.info.push_idx = qp->push_idx; | |
1103 | cqp_info->in.u.manage_push_page.info.qs_handle = qp->qs_handle; | |
1104 | cqp_info->in.u.manage_push_page.info.free_page = 1; | |
1105 | cqp_info->in.u.manage_push_page.info.push_page_type = 0; | |
1106 | cqp_info->in.u.manage_push_page.cqp = &rf->cqp.sc_cqp; | |
1107 | cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request; | |
1108 | status = irdma_handle_cqp_op(rf, cqp_request); | |
1109 | if (!status) | |
1110 | qp->push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX; | |
1111 | irdma_put_cqp_request(&rf->cqp, cqp_request); | |
1112 | } | |
1113 | ||
1114 | /** | |
1115 | * irdma_free_qp_rsrc - free up memory resources for qp | |
1116 | * @iwqp: qp ptr (user or kernel) | |
1117 | */ | |
1118 | void irdma_free_qp_rsrc(struct irdma_qp *iwqp) | |
1119 | { | |
1120 | struct irdma_device *iwdev = iwqp->iwdev; | |
1121 | struct irdma_pci_f *rf = iwdev->rf; | |
1122 | u32 qp_num = iwqp->ibqp.qp_num; | |
1123 | ||
1124 | irdma_ieq_cleanup_qp(iwdev->vsi.ieq, &iwqp->sc_qp); | |
1125 | irdma_dealloc_push_page(rf, &iwqp->sc_qp); | |
1126 | if (iwqp->sc_qp.vsi) { | |
1127 | irdma_qp_rem_qos(&iwqp->sc_qp); | |
1128 | iwqp->sc_qp.dev->ws_remove(iwqp->sc_qp.vsi, | |
1129 | iwqp->sc_qp.user_pri); | |
1130 | } | |
1131 | ||
1132 | if (qp_num > 2) | |
1133 | irdma_free_rsrc(rf, rf->allocated_qps, qp_num); | |
1134 | dma_free_coherent(rf->sc_dev.hw->device, iwqp->q2_ctx_mem.size, | |
1135 | iwqp->q2_ctx_mem.va, iwqp->q2_ctx_mem.pa); | |
1136 | iwqp->q2_ctx_mem.va = NULL; | |
1137 | dma_free_coherent(rf->sc_dev.hw->device, iwqp->kqp.dma_mem.size, | |
1138 | iwqp->kqp.dma_mem.va, iwqp->kqp.dma_mem.pa); | |
1139 | iwqp->kqp.dma_mem.va = NULL; | |
1140 | kfree(iwqp->kqp.sq_wrid_mem); | |
915cc7ac | 1141 | kfree(iwqp->kqp.rq_wrid_mem); |
915cc7ac MI |
1142 | } |
1143 | ||
1144 | /** | |
1145 | * irdma_cq_wq_destroy - send cq destroy cqp | |
1146 | * @rf: RDMA PCI function | |
1147 | * @cq: hardware control cq | |
1148 | */ | |
1149 | void irdma_cq_wq_destroy(struct irdma_pci_f *rf, struct irdma_sc_cq *cq) | |
1150 | { | |
1151 | struct irdma_cqp_request *cqp_request; | |
1152 | struct cqp_cmds_info *cqp_info; | |
1153 | ||
1154 | cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true); | |
1155 | if (!cqp_request) | |
1156 | return; | |
1157 | ||
1158 | cqp_info = &cqp_request->info; | |
1159 | cqp_info->cqp_cmd = IRDMA_OP_CQ_DESTROY; | |
1160 | cqp_info->post_sq = 1; | |
1161 | cqp_info->in.u.cq_destroy.cq = cq; | |
1162 | cqp_info->in.u.cq_destroy.scratch = (uintptr_t)cqp_request; | |
1163 | ||
1164 | irdma_handle_cqp_op(rf, cqp_request); | |
1165 | irdma_put_cqp_request(&rf->cqp, cqp_request); | |
1166 | } | |
1167 | ||
1168 | /** | |
1169 | * irdma_hw_modify_qp_callback - handle state for modifyQPs that don't wait | |
1170 | * @cqp_request: modify QP completion | |
1171 | */ | |
1172 | static void irdma_hw_modify_qp_callback(struct irdma_cqp_request *cqp_request) | |
1173 | { | |
1174 | struct cqp_cmds_info *cqp_info; | |
1175 | struct irdma_qp *iwqp; | |
1176 | ||
1177 | cqp_info = &cqp_request->info; | |
1178 | iwqp = cqp_info->in.u.qp_modify.qp->qp_uk.back_qp; | |
1179 | atomic_dec(&iwqp->hw_mod_qp_pend); | |
1180 | wake_up(&iwqp->mod_qp_waitq); | |
1181 | } | |
1182 | ||
1183 | /** | |
1184 | * irdma_hw_modify_qp - setup cqp for modify qp | |
1185 | * @iwdev: RDMA device | |
1186 | * @iwqp: qp ptr (user or kernel) | |
1187 | * @info: info for modify qp | |
1188 | * @wait: flag to wait or not for modify qp completion | |
1189 | */ | |
2c4b14ea SS |
1190 | int irdma_hw_modify_qp(struct irdma_device *iwdev, struct irdma_qp *iwqp, |
1191 | struct irdma_modify_qp_info *info, bool wait) | |
915cc7ac | 1192 | { |
2c4b14ea | 1193 | int status; |
915cc7ac MI |
1194 | struct irdma_pci_f *rf = iwdev->rf; |
1195 | struct irdma_cqp_request *cqp_request; | |
1196 | struct cqp_cmds_info *cqp_info; | |
1197 | struct irdma_modify_qp_info *m_info; | |
1198 | ||
1199 | cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait); | |
1200 | if (!cqp_request) | |
2c4b14ea | 1201 | return -ENOMEM; |
915cc7ac MI |
1202 | |
1203 | if (!wait) { | |
1204 | cqp_request->callback_fcn = irdma_hw_modify_qp_callback; | |
1205 | atomic_inc(&iwqp->hw_mod_qp_pend); | |
1206 | } | |
1207 | cqp_info = &cqp_request->info; | |
1208 | m_info = &cqp_info->in.u.qp_modify.info; | |
1209 | memcpy(m_info, info, sizeof(*m_info)); | |
1210 | cqp_info->cqp_cmd = IRDMA_OP_QP_MODIFY; | |
1211 | cqp_info->post_sq = 1; | |
1212 | cqp_info->in.u.qp_modify.qp = &iwqp->sc_qp; | |
1213 | cqp_info->in.u.qp_modify.scratch = (uintptr_t)cqp_request; | |
1214 | status = irdma_handle_cqp_op(rf, cqp_request); | |
1215 | irdma_put_cqp_request(&rf->cqp, cqp_request); | |
1216 | if (status) { | |
1217 | if (rdma_protocol_roce(&iwdev->ibdev, 1)) | |
1218 | return status; | |
1219 | ||
1220 | switch (m_info->next_iwarp_state) { | |
1221 | struct irdma_gen_ae_info ae_info; | |
1222 | ||
1223 | case IRDMA_QP_STATE_RTS: | |
1224 | case IRDMA_QP_STATE_IDLE: | |
1225 | case IRDMA_QP_STATE_TERMINATE: | |
1226 | case IRDMA_QP_STATE_CLOSING: | |
1227 | if (info->curr_iwarp_state == IRDMA_QP_STATE_IDLE) | |
1228 | irdma_send_reset(iwqp->cm_node); | |
1229 | else | |
1230 | iwqp->sc_qp.term_flags = IRDMA_TERM_DONE; | |
1231 | if (!wait) { | |
1232 | ae_info.ae_code = IRDMA_AE_BAD_CLOSE; | |
1233 | ae_info.ae_src = 0; | |
1234 | irdma_gen_ae(rf, &iwqp->sc_qp, &ae_info, false); | |
1235 | } else { | |
1236 | cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, | |
1237 | wait); | |
1238 | if (!cqp_request) | |
2c4b14ea | 1239 | return -ENOMEM; |
915cc7ac MI |
1240 | |
1241 | cqp_info = &cqp_request->info; | |
1242 | m_info = &cqp_info->in.u.qp_modify.info; | |
1243 | memcpy(m_info, info, sizeof(*m_info)); | |
1244 | cqp_info->cqp_cmd = IRDMA_OP_QP_MODIFY; | |
1245 | cqp_info->post_sq = 1; | |
1246 | cqp_info->in.u.qp_modify.qp = &iwqp->sc_qp; | |
1247 | cqp_info->in.u.qp_modify.scratch = (uintptr_t)cqp_request; | |
1248 | m_info->next_iwarp_state = IRDMA_QP_STATE_ERROR; | |
1249 | m_info->reset_tcp_conn = true; | |
1250 | irdma_handle_cqp_op(rf, cqp_request); | |
1251 | irdma_put_cqp_request(&rf->cqp, cqp_request); | |
1252 | } | |
1253 | break; | |
1254 | case IRDMA_QP_STATE_ERROR: | |
1255 | default: | |
1256 | break; | |
1257 | } | |
1258 | } | |
1259 | ||
1260 | return status; | |
1261 | } | |
1262 | ||
1263 | /** | |
1264 | * irdma_cqp_cq_destroy_cmd - destroy the cqp cq | |
1265 | * @dev: device pointer | |
1266 | * @cq: pointer to cq | |
1267 | */ | |
1268 | void irdma_cqp_cq_destroy_cmd(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq) | |
1269 | { | |
1270 | struct irdma_pci_f *rf = dev_to_rf(dev); | |
1271 | ||
1272 | irdma_cq_wq_destroy(rf, cq); | |
1273 | } | |
1274 | ||
1275 | /** | |
1276 | * irdma_cqp_qp_destroy_cmd - destroy the cqp | |
1277 | * @dev: device pointer | |
1278 | * @qp: pointer to qp | |
1279 | */ | |
2c4b14ea | 1280 | int irdma_cqp_qp_destroy_cmd(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp) |
915cc7ac MI |
1281 | { |
1282 | struct irdma_pci_f *rf = dev_to_rf(dev); | |
1283 | struct irdma_cqp *iwcqp = &rf->cqp; | |
1284 | struct irdma_cqp_request *cqp_request; | |
1285 | struct cqp_cmds_info *cqp_info; | |
2c4b14ea | 1286 | int status; |
915cc7ac MI |
1287 | |
1288 | cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true); | |
1289 | if (!cqp_request) | |
2c4b14ea | 1290 | return -ENOMEM; |
915cc7ac MI |
1291 | |
1292 | cqp_info = &cqp_request->info; | |
1293 | memset(cqp_info, 0, sizeof(*cqp_info)); | |
1294 | cqp_info->cqp_cmd = IRDMA_OP_QP_DESTROY; | |
1295 | cqp_info->post_sq = 1; | |
1296 | cqp_info->in.u.qp_destroy.qp = qp; | |
1297 | cqp_info->in.u.qp_destroy.scratch = (uintptr_t)cqp_request; | |
1298 | cqp_info->in.u.qp_destroy.remove_hash_idx = true; | |
1299 | ||
1300 | status = irdma_handle_cqp_op(rf, cqp_request); | |
1301 | irdma_put_cqp_request(&rf->cqp, cqp_request); | |
1302 | ||
1303 | return status; | |
1304 | } | |
1305 | ||
1306 | /** | |
1307 | * irdma_ieq_mpa_crc_ae - generate AE for crc error | |
1308 | * @dev: hardware control device structure | |
1309 | * @qp: hardware control qp | |
1310 | */ | |
1311 | void irdma_ieq_mpa_crc_ae(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp) | |
1312 | { | |
1313 | struct irdma_gen_ae_info info = {}; | |
1314 | struct irdma_pci_f *rf = dev_to_rf(dev); | |
1315 | ||
1316 | ibdev_dbg(&rf->iwdev->ibdev, "AEQ: Generate MPA CRC AE\n"); | |
1317 | info.ae_code = IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR; | |
1318 | info.ae_src = IRDMA_AE_SOURCE_RQ; | |
1319 | irdma_gen_ae(rf, qp, &info, false); | |
1320 | } | |
1321 | ||
1322 | /** | |
1323 | * irdma_init_hash_desc - initialize hash for crc calculation | |
1324 | * @desc: cryption type | |
1325 | */ | |
2c4b14ea | 1326 | int irdma_init_hash_desc(struct shash_desc **desc) |
915cc7ac MI |
1327 | { |
1328 | struct crypto_shash *tfm; | |
1329 | struct shash_desc *tdesc; | |
1330 | ||
1331 | tfm = crypto_alloc_shash("crc32c", 0, 0); | |
1332 | if (IS_ERR(tfm)) | |
2c4b14ea | 1333 | return -EINVAL; |
915cc7ac MI |
1334 | |
1335 | tdesc = kzalloc(sizeof(*tdesc) + crypto_shash_descsize(tfm), | |
1336 | GFP_KERNEL); | |
1337 | if (!tdesc) { | |
1338 | crypto_free_shash(tfm); | |
2c4b14ea | 1339 | return -EINVAL; |
915cc7ac MI |
1340 | } |
1341 | ||
1342 | tdesc->tfm = tfm; | |
1343 | *desc = tdesc; | |
1344 | ||
1345 | return 0; | |
1346 | } | |
1347 | ||
1348 | /** | |
1349 | * irdma_free_hash_desc - free hash desc | |
1350 | * @desc: to be freed | |
1351 | */ | |
1352 | void irdma_free_hash_desc(struct shash_desc *desc) | |
1353 | { | |
1354 | if (desc) { | |
1355 | crypto_free_shash(desc->tfm); | |
1356 | kfree(desc); | |
1357 | } | |
1358 | } | |
1359 | ||
1360 | /** | |
1361 | * irdma_ieq_check_mpacrc - check if mpa crc is OK | |
1362 | * @desc: desc for hash | |
1363 | * @addr: address of buffer for crc | |
1364 | * @len: length of buffer | |
1365 | * @val: value to be compared | |
1366 | */ | |
2c4b14ea SS |
1367 | int irdma_ieq_check_mpacrc(struct shash_desc *desc, void *addr, u32 len, |
1368 | u32 val) | |
915cc7ac MI |
1369 | { |
1370 | u32 crc = 0; | |
1371 | int ret; | |
2c4b14ea | 1372 | int ret_code = 0; |
915cc7ac MI |
1373 | |
1374 | crypto_shash_init(desc); | |
1375 | ret = crypto_shash_update(desc, addr, len); | |
1376 | if (!ret) | |
1377 | crypto_shash_final(desc, (u8 *)&crc); | |
1378 | if (crc != val) | |
2c4b14ea | 1379 | ret_code = -EINVAL; |
915cc7ac MI |
1380 | |
1381 | return ret_code; | |
1382 | } | |
1383 | ||
1384 | /** | |
1385 | * irdma_ieq_get_qp - get qp based on quad in puda buffer | |
1386 | * @dev: hardware control device structure | |
1387 | * @buf: receive puda buffer on exception q | |
1388 | */ | |
1389 | struct irdma_sc_qp *irdma_ieq_get_qp(struct irdma_sc_dev *dev, | |
1390 | struct irdma_puda_buf *buf) | |
1391 | { | |
1392 | struct irdma_qp *iwqp; | |
1393 | struct irdma_cm_node *cm_node; | |
1394 | struct irdma_device *iwdev = buf->vsi->back_vsi; | |
1395 | u32 loc_addr[4] = {}; | |
1396 | u32 rem_addr[4] = {}; | |
1397 | u16 loc_port, rem_port; | |
1398 | struct ipv6hdr *ip6h; | |
1399 | struct iphdr *iph = (struct iphdr *)buf->iph; | |
1400 | struct tcphdr *tcph = (struct tcphdr *)buf->tcph; | |
1401 | ||
1402 | if (iph->version == 4) { | |
1403 | loc_addr[0] = ntohl(iph->daddr); | |
1404 | rem_addr[0] = ntohl(iph->saddr); | |
1405 | } else { | |
1406 | ip6h = (struct ipv6hdr *)buf->iph; | |
1407 | irdma_copy_ip_ntohl(loc_addr, ip6h->daddr.in6_u.u6_addr32); | |
1408 | irdma_copy_ip_ntohl(rem_addr, ip6h->saddr.in6_u.u6_addr32); | |
1409 | } | |
1410 | loc_port = ntohs(tcph->dest); | |
1411 | rem_port = ntohs(tcph->source); | |
1412 | cm_node = irdma_find_node(&iwdev->cm_core, rem_port, rem_addr, loc_port, | |
1413 | loc_addr, buf->vlan_valid ? buf->vlan_id : 0xFFFF); | |
1414 | if (!cm_node) | |
1415 | return NULL; | |
1416 | ||
1417 | iwqp = cm_node->iwqp; | |
1418 | irdma_rem_ref_cm_node(cm_node); | |
1419 | ||
1420 | return &iwqp->sc_qp; | |
1421 | } | |
1422 | ||
1423 | /** | |
1424 | * irdma_send_ieq_ack - ACKs for duplicate or OOO partials FPDUs | |
1425 | * @qp: qp ptr | |
1426 | */ | |
1427 | void irdma_send_ieq_ack(struct irdma_sc_qp *qp) | |
1428 | { | |
1429 | struct irdma_cm_node *cm_node = ((struct irdma_qp *)qp->qp_uk.back_qp)->cm_node; | |
1430 | struct irdma_puda_buf *buf = qp->pfpdu.lastrcv_buf; | |
1431 | struct tcphdr *tcph = (struct tcphdr *)buf->tcph; | |
1432 | ||
1433 | cm_node->tcp_cntxt.rcv_nxt = qp->pfpdu.nextseqnum; | |
1434 | cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq); | |
1435 | ||
1436 | irdma_send_ack(cm_node); | |
1437 | } | |
1438 | ||
1439 | /** | |
1440 | * irdma_puda_ieq_get_ah_info - get AH info from IEQ buffer | |
1441 | * @qp: qp pointer | |
1442 | * @ah_info: AH info pointer | |
1443 | */ | |
1444 | void irdma_puda_ieq_get_ah_info(struct irdma_sc_qp *qp, | |
1445 | struct irdma_ah_info *ah_info) | |
1446 | { | |
1447 | struct irdma_puda_buf *buf = qp->pfpdu.ah_buf; | |
1448 | struct iphdr *iph; | |
1449 | struct ipv6hdr *ip6h; | |
1450 | ||
1451 | memset(ah_info, 0, sizeof(*ah_info)); | |
1452 | ah_info->do_lpbk = true; | |
1453 | ah_info->vlan_tag = buf->vlan_id; | |
1454 | ah_info->insert_vlan_tag = buf->vlan_valid; | |
1455 | ah_info->ipv4_valid = buf->ipv4; | |
1456 | ah_info->vsi = qp->vsi; | |
1457 | ||
1458 | if (buf->smac_valid) | |
1459 | ether_addr_copy(ah_info->mac_addr, buf->smac); | |
1460 | ||
1461 | if (buf->ipv4) { | |
1462 | ah_info->ipv4_valid = true; | |
1463 | iph = (struct iphdr *)buf->iph; | |
1464 | ah_info->hop_ttl = iph->ttl; | |
1465 | ah_info->tc_tos = iph->tos; | |
1466 | ah_info->dest_ip_addr[0] = ntohl(iph->daddr); | |
1467 | ah_info->src_ip_addr[0] = ntohl(iph->saddr); | |
1468 | } else { | |
1469 | ip6h = (struct ipv6hdr *)buf->iph; | |
1470 | ah_info->hop_ttl = ip6h->hop_limit; | |
1471 | ah_info->tc_tos = ip6h->priority; | |
1472 | irdma_copy_ip_ntohl(ah_info->dest_ip_addr, | |
1473 | ip6h->daddr.in6_u.u6_addr32); | |
1474 | irdma_copy_ip_ntohl(ah_info->src_ip_addr, | |
1475 | ip6h->saddr.in6_u.u6_addr32); | |
1476 | } | |
1477 | ||
1478 | ah_info->dst_arpindex = irdma_arp_table(dev_to_rf(qp->dev), | |
1479 | ah_info->dest_ip_addr, | |
1480 | ah_info->ipv4_valid, | |
1481 | NULL, IRDMA_ARP_RESOLVE); | |
1482 | } | |
1483 | ||
1484 | /** | |
1485 | * irdma_gen1_ieq_update_tcpip_info - update tcpip in the buffer | |
1486 | * @buf: puda to update | |
1487 | * @len: length of buffer | |
1488 | * @seqnum: seq number for tcp | |
1489 | */ | |
1490 | static void irdma_gen1_ieq_update_tcpip_info(struct irdma_puda_buf *buf, | |
1491 | u16 len, u32 seqnum) | |
1492 | { | |
1493 | struct tcphdr *tcph; | |
1494 | struct iphdr *iph; | |
1495 | u16 iphlen; | |
1496 | u16 pktsize; | |
1497 | u8 *addr = buf->mem.va; | |
1498 | ||
1499 | iphlen = (buf->ipv4) ? 20 : 40; | |
1500 | iph = (struct iphdr *)(addr + buf->maclen); | |
1501 | tcph = (struct tcphdr *)(addr + buf->maclen + iphlen); | |
1502 | pktsize = len + buf->tcphlen + iphlen; | |
1503 | iph->tot_len = htons(pktsize); | |
1504 | tcph->seq = htonl(seqnum); | |
1505 | } | |
1506 | ||
1507 | /** | |
1508 | * irdma_ieq_update_tcpip_info - update tcpip in the buffer | |
1509 | * @buf: puda to update | |
1510 | * @len: length of buffer | |
1511 | * @seqnum: seq number for tcp | |
1512 | */ | |
1513 | void irdma_ieq_update_tcpip_info(struct irdma_puda_buf *buf, u16 len, | |
1514 | u32 seqnum) | |
1515 | { | |
1516 | struct tcphdr *tcph; | |
1517 | u8 *addr; | |
1518 | ||
1519 | if (buf->vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) | |
1520 | return irdma_gen1_ieq_update_tcpip_info(buf, len, seqnum); | |
1521 | ||
1522 | addr = buf->mem.va; | |
1523 | tcph = (struct tcphdr *)addr; | |
1524 | tcph->seq = htonl(seqnum); | |
1525 | } | |
1526 | ||
1527 | /** | |
1528 | * irdma_gen1_puda_get_tcpip_info - get tcpip info from puda | |
1529 | * buffer | |
1530 | * @info: to get information | |
1531 | * @buf: puda buffer | |
1532 | */ | |
2c4b14ea SS |
1533 | static int irdma_gen1_puda_get_tcpip_info(struct irdma_puda_cmpl_info *info, |
1534 | struct irdma_puda_buf *buf) | |
915cc7ac MI |
1535 | { |
1536 | struct iphdr *iph; | |
1537 | struct ipv6hdr *ip6h; | |
1538 | struct tcphdr *tcph; | |
1539 | u16 iphlen; | |
1540 | u16 pkt_len; | |
1541 | u8 *mem = buf->mem.va; | |
1542 | struct ethhdr *ethh = buf->mem.va; | |
1543 | ||
1544 | if (ethh->h_proto == htons(0x8100)) { | |
1545 | info->vlan_valid = true; | |
1546 | buf->vlan_id = ntohs(((struct vlan_ethhdr *)ethh)->h_vlan_TCI) & | |
1547 | VLAN_VID_MASK; | |
1548 | } | |
1549 | ||
1550 | buf->maclen = (info->vlan_valid) ? 18 : 14; | |
1551 | iphlen = (info->l3proto) ? 40 : 20; | |
1552 | buf->ipv4 = (info->l3proto) ? false : true; | |
1553 | buf->iph = mem + buf->maclen; | |
1554 | iph = (struct iphdr *)buf->iph; | |
1555 | buf->tcph = buf->iph + iphlen; | |
1556 | tcph = (struct tcphdr *)buf->tcph; | |
1557 | ||
1558 | if (buf->ipv4) { | |
1559 | pkt_len = ntohs(iph->tot_len); | |
1560 | } else { | |
1561 | ip6h = (struct ipv6hdr *)buf->iph; | |
1562 | pkt_len = ntohs(ip6h->payload_len) + iphlen; | |
1563 | } | |
1564 | ||
1565 | buf->totallen = pkt_len + buf->maclen; | |
1566 | ||
1567 | if (info->payload_len < buf->totallen) { | |
1568 | ibdev_dbg(to_ibdev(buf->vsi->dev), | |
1569 | "ERR: payload_len = 0x%x totallen expected0x%x\n", | |
1570 | info->payload_len, buf->totallen); | |
2c4b14ea | 1571 | return -EINVAL; |
915cc7ac MI |
1572 | } |
1573 | ||
1574 | buf->tcphlen = tcph->doff << 2; | |
1575 | buf->datalen = pkt_len - iphlen - buf->tcphlen; | |
1576 | buf->data = buf->datalen ? buf->tcph + buf->tcphlen : NULL; | |
1577 | buf->hdrlen = buf->maclen + iphlen + buf->tcphlen; | |
1578 | buf->seqnum = ntohl(tcph->seq); | |
1579 | ||
1580 | return 0; | |
1581 | } | |
1582 | ||
1583 | /** | |
1584 | * irdma_puda_get_tcpip_info - get tcpip info from puda buffer | |
1585 | * @info: to get information | |
1586 | * @buf: puda buffer | |
1587 | */ | |
2c4b14ea SS |
1588 | int irdma_puda_get_tcpip_info(struct irdma_puda_cmpl_info *info, |
1589 | struct irdma_puda_buf *buf) | |
915cc7ac MI |
1590 | { |
1591 | struct tcphdr *tcph; | |
1592 | u32 pkt_len; | |
1593 | u8 *mem; | |
1594 | ||
1595 | if (buf->vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) | |
1596 | return irdma_gen1_puda_get_tcpip_info(info, buf); | |
1597 | ||
1598 | mem = buf->mem.va; | |
1599 | buf->vlan_valid = info->vlan_valid; | |
1600 | if (info->vlan_valid) | |
1601 | buf->vlan_id = info->vlan; | |
1602 | ||
1603 | buf->ipv4 = info->ipv4; | |
1604 | if (buf->ipv4) | |
1605 | buf->iph = mem + IRDMA_IPV4_PAD; | |
1606 | else | |
1607 | buf->iph = mem; | |
1608 | ||
1609 | buf->tcph = mem + IRDMA_TCP_OFFSET; | |
1610 | tcph = (struct tcphdr *)buf->tcph; | |
1611 | pkt_len = info->payload_len; | |
1612 | buf->totallen = pkt_len; | |
1613 | buf->tcphlen = tcph->doff << 2; | |
1614 | buf->datalen = pkt_len - IRDMA_TCP_OFFSET - buf->tcphlen; | |
1615 | buf->data = buf->datalen ? buf->tcph + buf->tcphlen : NULL; | |
1616 | buf->hdrlen = IRDMA_TCP_OFFSET + buf->tcphlen; | |
1617 | buf->seqnum = ntohl(tcph->seq); | |
1618 | ||
1619 | if (info->smac_valid) { | |
1620 | ether_addr_copy(buf->smac, info->smac); | |
1621 | buf->smac_valid = true; | |
1622 | } | |
1623 | ||
1624 | return 0; | |
1625 | } | |
1626 | ||
1627 | /** | |
1628 | * irdma_hw_stats_timeout - Stats timer-handler which updates all HW stats | |
1629 | * @t: timer_list pointer | |
1630 | */ | |
1631 | static void irdma_hw_stats_timeout(struct timer_list *t) | |
1632 | { | |
1633 | struct irdma_vsi_pestat *pf_devstat = | |
1634 | from_timer(pf_devstat, t, stats_timer); | |
1635 | struct irdma_sc_vsi *sc_vsi = pf_devstat->vsi; | |
1636 | ||
5a711e58 | 1637 | if (sc_vsi->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) |
915cc7ac | 1638 | irdma_cqp_gather_stats_cmd(sc_vsi->dev, sc_vsi->pestat, false); |
5a711e58 KC |
1639 | else |
1640 | irdma_cqp_gather_stats_gen1(sc_vsi->dev, sc_vsi->pestat); | |
915cc7ac MI |
1641 | |
1642 | mod_timer(&pf_devstat->stats_timer, | |
1643 | jiffies + msecs_to_jiffies(STATS_TIMER_DELAY)); | |
1644 | } | |
1645 | ||
1646 | /** | |
1647 | * irdma_hw_stats_start_timer - Start periodic stats timer | |
1648 | * @vsi: vsi structure pointer | |
1649 | */ | |
1650 | void irdma_hw_stats_start_timer(struct irdma_sc_vsi *vsi) | |
1651 | { | |
1652 | struct irdma_vsi_pestat *devstat = vsi->pestat; | |
1653 | ||
1654 | timer_setup(&devstat->stats_timer, irdma_hw_stats_timeout, 0); | |
1655 | mod_timer(&devstat->stats_timer, | |
1656 | jiffies + msecs_to_jiffies(STATS_TIMER_DELAY)); | |
1657 | } | |
1658 | ||
1659 | /** | |
1660 | * irdma_hw_stats_stop_timer - Delete periodic stats timer | |
1661 | * @vsi: pointer to vsi structure | |
1662 | */ | |
1663 | void irdma_hw_stats_stop_timer(struct irdma_sc_vsi *vsi) | |
1664 | { | |
1665 | struct irdma_vsi_pestat *devstat = vsi->pestat; | |
1666 | ||
1667 | del_timer_sync(&devstat->stats_timer); | |
1668 | } | |
1669 | ||
1670 | /** | |
1671 | * irdma_process_stats - Checking for wrap and update stats | |
1672 | * @pestat: stats structure pointer | |
1673 | */ | |
1674 | static inline void irdma_process_stats(struct irdma_vsi_pestat *pestat) | |
1675 | { | |
1676 | sc_vsi_update_stats(pestat->vsi); | |
1677 | } | |
1678 | ||
1679 | /** | |
1680 | * irdma_cqp_gather_stats_gen1 - Gather stats | |
1681 | * @dev: pointer to device structure | |
1682 | * @pestat: statistics structure | |
1683 | */ | |
1684 | void irdma_cqp_gather_stats_gen1(struct irdma_sc_dev *dev, | |
1685 | struct irdma_vsi_pestat *pestat) | |
1686 | { | |
1687 | struct irdma_gather_stats *gather_stats = | |
1688 | pestat->gather_info.gather_stats_va; | |
5a711e58 KC |
1689 | const struct irdma_hw_stat_map *map = dev->hw_stats_map; |
1690 | u16 max_stats_idx = dev->hw_attrs.max_stat_idx; | |
915cc7ac MI |
1691 | u32 stats_inst_offset_32; |
1692 | u32 stats_inst_offset_64; | |
5a711e58 KC |
1693 | u64 new_val; |
1694 | u16 i; | |
915cc7ac MI |
1695 | |
1696 | stats_inst_offset_32 = (pestat->gather_info.use_stats_inst) ? | |
5a711e58 KC |
1697 | pestat->gather_info.stats_inst_index : |
1698 | pestat->hw->hmc.hmc_fn_id; | |
915cc7ac MI |
1699 | stats_inst_offset_32 *= 4; |
1700 | stats_inst_offset_64 = stats_inst_offset_32 * 2; | |
1701 | ||
5a711e58 KC |
1702 | for (i = 0; i < max_stats_idx; i++) { |
1703 | if (map[i].bitmask <= IRDMA_MAX_STATS_32) | |
1704 | new_val = rd32(dev->hw, | |
1705 | dev->hw_stats_regs[i] + stats_inst_offset_32); | |
1706 | else | |
1707 | new_val = rd64(dev->hw, | |
1708 | dev->hw_stats_regs[i] + stats_inst_offset_64); | |
1709 | gather_stats->val[map[i].byteoff / sizeof(u64)] = new_val; | |
1710 | } | |
915cc7ac MI |
1711 | |
1712 | irdma_process_stats(pestat); | |
1713 | } | |
1714 | ||
1715 | /** | |
1716 | * irdma_process_cqp_stats - Checking for wrap and update stats | |
1717 | * @cqp_request: cqp_request structure pointer | |
1718 | */ | |
1719 | static void irdma_process_cqp_stats(struct irdma_cqp_request *cqp_request) | |
1720 | { | |
1721 | struct irdma_vsi_pestat *pestat = cqp_request->param; | |
1722 | ||
1723 | irdma_process_stats(pestat); | |
1724 | } | |
1725 | ||
1726 | /** | |
1727 | * irdma_cqp_gather_stats_cmd - Gather stats | |
1728 | * @dev: pointer to device structure | |
1729 | * @pestat: pointer to stats info | |
1730 | * @wait: flag to wait or not wait for stats | |
1731 | */ | |
2c4b14ea SS |
1732 | int irdma_cqp_gather_stats_cmd(struct irdma_sc_dev *dev, |
1733 | struct irdma_vsi_pestat *pestat, bool wait) | |
915cc7ac MI |
1734 | |
1735 | { | |
1736 | struct irdma_pci_f *rf = dev_to_rf(dev); | |
1737 | struct irdma_cqp *iwcqp = &rf->cqp; | |
1738 | struct irdma_cqp_request *cqp_request; | |
1739 | struct cqp_cmds_info *cqp_info; | |
2c4b14ea | 1740 | int status; |
915cc7ac MI |
1741 | |
1742 | cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, wait); | |
1743 | if (!cqp_request) | |
2c4b14ea | 1744 | return -ENOMEM; |
915cc7ac MI |
1745 | |
1746 | cqp_info = &cqp_request->info; | |
1747 | memset(cqp_info, 0, sizeof(*cqp_info)); | |
1748 | cqp_info->cqp_cmd = IRDMA_OP_STATS_GATHER; | |
1749 | cqp_info->post_sq = 1; | |
1750 | cqp_info->in.u.stats_gather.info = pestat->gather_info; | |
1751 | cqp_info->in.u.stats_gather.scratch = (uintptr_t)cqp_request; | |
1752 | cqp_info->in.u.stats_gather.cqp = &rf->cqp.sc_cqp; | |
1753 | cqp_request->param = pestat; | |
1754 | if (!wait) | |
1755 | cqp_request->callback_fcn = irdma_process_cqp_stats; | |
1756 | status = irdma_handle_cqp_op(rf, cqp_request); | |
1757 | if (wait) | |
1758 | irdma_process_stats(pestat); | |
1759 | irdma_put_cqp_request(&rf->cqp, cqp_request); | |
1760 | ||
1761 | return status; | |
1762 | } | |
1763 | ||
1764 | /** | |
1765 | * irdma_cqp_stats_inst_cmd - Allocate/free stats instance | |
1766 | * @vsi: pointer to vsi structure | |
1767 | * @cmd: command to allocate or free | |
1768 | * @stats_info: pointer to allocate stats info | |
1769 | */ | |
2c4b14ea SS |
1770 | int irdma_cqp_stats_inst_cmd(struct irdma_sc_vsi *vsi, u8 cmd, |
1771 | struct irdma_stats_inst_info *stats_info) | |
915cc7ac MI |
1772 | { |
1773 | struct irdma_pci_f *rf = dev_to_rf(vsi->dev); | |
1774 | struct irdma_cqp *iwcqp = &rf->cqp; | |
1775 | struct irdma_cqp_request *cqp_request; | |
1776 | struct cqp_cmds_info *cqp_info; | |
2c4b14ea | 1777 | int status; |
915cc7ac MI |
1778 | bool wait = false; |
1779 | ||
1780 | if (cmd == IRDMA_OP_STATS_ALLOCATE) | |
1781 | wait = true; | |
1782 | cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, wait); | |
1783 | if (!cqp_request) | |
2c4b14ea | 1784 | return -ENOMEM; |
915cc7ac MI |
1785 | |
1786 | cqp_info = &cqp_request->info; | |
1787 | memset(cqp_info, 0, sizeof(*cqp_info)); | |
1788 | cqp_info->cqp_cmd = cmd; | |
1789 | cqp_info->post_sq = 1; | |
1790 | cqp_info->in.u.stats_manage.info = *stats_info; | |
1791 | cqp_info->in.u.stats_manage.scratch = (uintptr_t)cqp_request; | |
1792 | cqp_info->in.u.stats_manage.cqp = &rf->cqp.sc_cqp; | |
1793 | status = irdma_handle_cqp_op(rf, cqp_request); | |
1794 | if (wait) | |
1795 | stats_info->stats_idx = cqp_request->compl_info.op_ret_val; | |
1796 | irdma_put_cqp_request(iwcqp, cqp_request); | |
1797 | ||
1798 | return status; | |
1799 | } | |
1800 | ||
1801 | /** | |
1802 | * irdma_cqp_ceq_cmd - Create/Destroy CEQ's after CEQ 0 | |
1803 | * @dev: pointer to device info | |
1804 | * @sc_ceq: pointer to ceq structure | |
1805 | * @op: Create or Destroy | |
1806 | */ | |
2c4b14ea SS |
1807 | int irdma_cqp_ceq_cmd(struct irdma_sc_dev *dev, struct irdma_sc_ceq *sc_ceq, |
1808 | u8 op) | |
915cc7ac MI |
1809 | { |
1810 | struct irdma_cqp_request *cqp_request; | |
1811 | struct cqp_cmds_info *cqp_info; | |
1812 | struct irdma_pci_f *rf = dev_to_rf(dev); | |
2c4b14ea | 1813 | int status; |
915cc7ac MI |
1814 | |
1815 | cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true); | |
1816 | if (!cqp_request) | |
2c4b14ea | 1817 | return -ENOMEM; |
915cc7ac MI |
1818 | |
1819 | cqp_info = &cqp_request->info; | |
1820 | cqp_info->post_sq = 1; | |
1821 | cqp_info->cqp_cmd = op; | |
1822 | cqp_info->in.u.ceq_create.ceq = sc_ceq; | |
1823 | cqp_info->in.u.ceq_create.scratch = (uintptr_t)cqp_request; | |
1824 | ||
1825 | status = irdma_handle_cqp_op(rf, cqp_request); | |
1826 | irdma_put_cqp_request(&rf->cqp, cqp_request); | |
1827 | ||
1828 | return status; | |
1829 | } | |
1830 | ||
1831 | /** | |
1832 | * irdma_cqp_aeq_cmd - Create/Destroy AEQ | |
1833 | * @dev: pointer to device info | |
1834 | * @sc_aeq: pointer to aeq structure | |
1835 | * @op: Create or Destroy | |
1836 | */ | |
2c4b14ea SS |
1837 | int irdma_cqp_aeq_cmd(struct irdma_sc_dev *dev, struct irdma_sc_aeq *sc_aeq, |
1838 | u8 op) | |
915cc7ac MI |
1839 | { |
1840 | struct irdma_cqp_request *cqp_request; | |
1841 | struct cqp_cmds_info *cqp_info; | |
1842 | struct irdma_pci_f *rf = dev_to_rf(dev); | |
2c4b14ea | 1843 | int status; |
915cc7ac MI |
1844 | |
1845 | cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true); | |
1846 | if (!cqp_request) | |
2c4b14ea | 1847 | return -ENOMEM; |
915cc7ac MI |
1848 | |
1849 | cqp_info = &cqp_request->info; | |
1850 | cqp_info->post_sq = 1; | |
1851 | cqp_info->cqp_cmd = op; | |
1852 | cqp_info->in.u.aeq_create.aeq = sc_aeq; | |
1853 | cqp_info->in.u.aeq_create.scratch = (uintptr_t)cqp_request; | |
1854 | ||
1855 | status = irdma_handle_cqp_op(rf, cqp_request); | |
1856 | irdma_put_cqp_request(&rf->cqp, cqp_request); | |
1857 | ||
1858 | return status; | |
1859 | } | |
1860 | ||
1861 | /** | |
1862 | * irdma_cqp_ws_node_cmd - Add/modify/delete ws node | |
1863 | * @dev: pointer to device structure | |
1864 | * @cmd: Add, modify or delete | |
1865 | * @node_info: pointer to ws node info | |
1866 | */ | |
2c4b14ea SS |
1867 | int irdma_cqp_ws_node_cmd(struct irdma_sc_dev *dev, u8 cmd, |
1868 | struct irdma_ws_node_info *node_info) | |
915cc7ac MI |
1869 | { |
1870 | struct irdma_pci_f *rf = dev_to_rf(dev); | |
1871 | struct irdma_cqp *iwcqp = &rf->cqp; | |
1872 | struct irdma_sc_cqp *cqp = &iwcqp->sc_cqp; | |
1873 | struct irdma_cqp_request *cqp_request; | |
1874 | struct cqp_cmds_info *cqp_info; | |
2c4b14ea | 1875 | int status; |
915cc7ac MI |
1876 | bool poll; |
1877 | ||
1878 | if (!rf->sc_dev.ceq_valid) | |
1879 | poll = true; | |
1880 | else | |
1881 | poll = false; | |
1882 | ||
1883 | cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, !poll); | |
1884 | if (!cqp_request) | |
2c4b14ea | 1885 | return -ENOMEM; |
915cc7ac MI |
1886 | |
1887 | cqp_info = &cqp_request->info; | |
1888 | memset(cqp_info, 0, sizeof(*cqp_info)); | |
1889 | cqp_info->cqp_cmd = cmd; | |
1890 | cqp_info->post_sq = 1; | |
1891 | cqp_info->in.u.ws_node.info = *node_info; | |
1892 | cqp_info->in.u.ws_node.cqp = cqp; | |
1893 | cqp_info->in.u.ws_node.scratch = (uintptr_t)cqp_request; | |
1894 | status = irdma_handle_cqp_op(rf, cqp_request); | |
1895 | if (status) | |
1896 | goto exit; | |
1897 | ||
1898 | if (poll) { | |
1899 | struct irdma_ccq_cqe_info compl_info; | |
1900 | ||
1901 | status = irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_WORK_SCHED_NODE, | |
1902 | &compl_info); | |
1903 | node_info->qs_handle = compl_info.op_ret_val; | |
1904 | ibdev_dbg(&rf->iwdev->ibdev, "DCB: opcode=%d, compl_info.retval=%d\n", | |
1905 | compl_info.op_code, compl_info.op_ret_val); | |
1906 | } else { | |
1907 | node_info->qs_handle = cqp_request->compl_info.op_ret_val; | |
1908 | } | |
1909 | ||
1910 | exit: | |
1911 | irdma_put_cqp_request(&rf->cqp, cqp_request); | |
915cc7ac MI |
1912 | |
1913 | return status; | |
1914 | } | |
1915 | ||
1916 | /** | |
1917 | * irdma_ah_cqp_op - perform an AH cqp operation | |
1918 | * @rf: RDMA PCI function | |
1919 | * @sc_ah: address handle | |
1920 | * @cmd: AH operation | |
1921 | * @wait: wait if true | |
1922 | * @callback_fcn: Callback function on CQP op completion | |
1923 | * @cb_param: parameter for callback function | |
1924 | * | |
1925 | * returns errno | |
1926 | */ | |
1927 | int irdma_ah_cqp_op(struct irdma_pci_f *rf, struct irdma_sc_ah *sc_ah, u8 cmd, | |
1928 | bool wait, | |
1929 | void (*callback_fcn)(struct irdma_cqp_request *), | |
1930 | void *cb_param) | |
1931 | { | |
1932 | struct irdma_cqp_request *cqp_request; | |
1933 | struct cqp_cmds_info *cqp_info; | |
2c4b14ea | 1934 | int status; |
915cc7ac MI |
1935 | |
1936 | if (cmd != IRDMA_OP_AH_CREATE && cmd != IRDMA_OP_AH_DESTROY) | |
1937 | return -EINVAL; | |
1938 | ||
1939 | cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait); | |
1940 | if (!cqp_request) | |
1941 | return -ENOMEM; | |
1942 | ||
1943 | cqp_info = &cqp_request->info; | |
1944 | cqp_info->cqp_cmd = cmd; | |
1945 | cqp_info->post_sq = 1; | |
1946 | if (cmd == IRDMA_OP_AH_CREATE) { | |
1947 | cqp_info->in.u.ah_create.info = sc_ah->ah_info; | |
1948 | cqp_info->in.u.ah_create.scratch = (uintptr_t)cqp_request; | |
1949 | cqp_info->in.u.ah_create.cqp = &rf->cqp.sc_cqp; | |
1950 | } else if (cmd == IRDMA_OP_AH_DESTROY) { | |
1951 | cqp_info->in.u.ah_destroy.info = sc_ah->ah_info; | |
1952 | cqp_info->in.u.ah_destroy.scratch = (uintptr_t)cqp_request; | |
1953 | cqp_info->in.u.ah_destroy.cqp = &rf->cqp.sc_cqp; | |
1954 | } | |
1955 | ||
1956 | if (!wait) { | |
1957 | cqp_request->callback_fcn = callback_fcn; | |
1958 | cqp_request->param = cb_param; | |
1959 | } | |
1960 | status = irdma_handle_cqp_op(rf, cqp_request); | |
1961 | irdma_put_cqp_request(&rf->cqp, cqp_request); | |
1962 | ||
1963 | if (status) | |
1964 | return -ENOMEM; | |
1965 | ||
1966 | if (wait) | |
1967 | sc_ah->ah_info.ah_valid = (cmd == IRDMA_OP_AH_CREATE); | |
1968 | ||
1969 | return 0; | |
1970 | } | |
1971 | ||
1972 | /** | |
1973 | * irdma_ieq_ah_cb - callback after creation of AH for IEQ | |
1974 | * @cqp_request: pointer to cqp_request of create AH | |
1975 | */ | |
1976 | static void irdma_ieq_ah_cb(struct irdma_cqp_request *cqp_request) | |
1977 | { | |
1978 | struct irdma_sc_qp *qp = cqp_request->param; | |
1979 | struct irdma_sc_ah *sc_ah = qp->pfpdu.ah; | |
1980 | unsigned long flags; | |
1981 | ||
1982 | spin_lock_irqsave(&qp->pfpdu.lock, flags); | |
1983 | if (!cqp_request->compl_info.op_ret_val) { | |
1984 | sc_ah->ah_info.ah_valid = true; | |
1985 | irdma_ieq_process_fpdus(qp, qp->vsi->ieq); | |
1986 | } else { | |
1987 | sc_ah->ah_info.ah_valid = false; | |
1988 | irdma_ieq_cleanup_qp(qp->vsi->ieq, qp); | |
1989 | } | |
1990 | spin_unlock_irqrestore(&qp->pfpdu.lock, flags); | |
1991 | } | |
1992 | ||
1993 | /** | |
1994 | * irdma_ilq_ah_cb - callback after creation of AH for ILQ | |
1995 | * @cqp_request: pointer to cqp_request of create AH | |
1996 | */ | |
1997 | static void irdma_ilq_ah_cb(struct irdma_cqp_request *cqp_request) | |
1998 | { | |
1999 | struct irdma_cm_node *cm_node = cqp_request->param; | |
2000 | struct irdma_sc_ah *sc_ah = cm_node->ah; | |
2001 | ||
2002 | sc_ah->ah_info.ah_valid = !cqp_request->compl_info.op_ret_val; | |
2003 | irdma_add_conn_est_qh(cm_node); | |
2004 | } | |
2005 | ||
2006 | /** | |
2007 | * irdma_puda_create_ah - create AH for ILQ/IEQ qp's | |
2008 | * @dev: device pointer | |
2009 | * @ah_info: Address handle info | |
2010 | * @wait: When true will wait for operation to complete | |
2011 | * @type: ILQ/IEQ | |
2012 | * @cb_param: Callback param when not waiting | |
2013 | * @ah_ret: Returned pointer to address handle if created | |
2014 | * | |
2015 | */ | |
2c4b14ea SS |
2016 | int irdma_puda_create_ah(struct irdma_sc_dev *dev, |
2017 | struct irdma_ah_info *ah_info, bool wait, | |
2018 | enum puda_rsrc_type type, void *cb_param, | |
2019 | struct irdma_sc_ah **ah_ret) | |
915cc7ac MI |
2020 | { |
2021 | struct irdma_sc_ah *ah; | |
2022 | struct irdma_pci_f *rf = dev_to_rf(dev); | |
2023 | int err; | |
2024 | ||
2025 | ah = kzalloc(sizeof(*ah), GFP_ATOMIC); | |
2026 | *ah_ret = ah; | |
2027 | if (!ah) | |
2c4b14ea | 2028 | return -ENOMEM; |
915cc7ac MI |
2029 | |
2030 | err = irdma_alloc_rsrc(rf, rf->allocated_ahs, rf->max_ah, | |
2031 | &ah_info->ah_idx, &rf->next_ah); | |
2032 | if (err) | |
2033 | goto err_free; | |
2034 | ||
2035 | ah->dev = dev; | |
2036 | ah->ah_info = *ah_info; | |
2037 | ||
2038 | if (type == IRDMA_PUDA_RSRC_TYPE_ILQ) | |
2039 | err = irdma_ah_cqp_op(rf, ah, IRDMA_OP_AH_CREATE, wait, | |
2040 | irdma_ilq_ah_cb, cb_param); | |
2041 | else | |
2042 | err = irdma_ah_cqp_op(rf, ah, IRDMA_OP_AH_CREATE, wait, | |
2043 | irdma_ieq_ah_cb, cb_param); | |
2044 | ||
2045 | if (err) | |
2046 | goto error; | |
2047 | return 0; | |
2048 | ||
2049 | error: | |
2050 | irdma_free_rsrc(rf, rf->allocated_ahs, ah->ah_info.ah_idx); | |
2051 | err_free: | |
2052 | kfree(ah); | |
2053 | *ah_ret = NULL; | |
2c4b14ea | 2054 | return -ENOMEM; |
915cc7ac MI |
2055 | } |
2056 | ||
2057 | /** | |
2058 | * irdma_puda_free_ah - free a puda address handle | |
2059 | * @dev: device pointer | |
2060 | * @ah: The address handle to free | |
2061 | */ | |
2062 | void irdma_puda_free_ah(struct irdma_sc_dev *dev, struct irdma_sc_ah *ah) | |
2063 | { | |
2064 | struct irdma_pci_f *rf = dev_to_rf(dev); | |
2065 | ||
2066 | if (!ah) | |
2067 | return; | |
2068 | ||
2069 | if (ah->ah_info.ah_valid) { | |
2070 | irdma_ah_cqp_op(rf, ah, IRDMA_OP_AH_DESTROY, false, NULL, NULL); | |
2071 | irdma_free_rsrc(rf, rf->allocated_ahs, ah->ah_info.ah_idx); | |
2072 | } | |
2073 | ||
2074 | kfree(ah); | |
2075 | } | |
2076 | ||
2077 | /** | |
2078 | * irdma_gsi_ud_qp_ah_cb - callback after creation of AH for GSI/ID QP | |
2079 | * @cqp_request: pointer to cqp_request of create AH | |
2080 | */ | |
2081 | void irdma_gsi_ud_qp_ah_cb(struct irdma_cqp_request *cqp_request) | |
2082 | { | |
2083 | struct irdma_sc_ah *sc_ah = cqp_request->param; | |
2084 | ||
2085 | if (!cqp_request->compl_info.op_ret_val) | |
2086 | sc_ah->ah_info.ah_valid = true; | |
2087 | else | |
2088 | sc_ah->ah_info.ah_valid = false; | |
2089 | } | |
2090 | ||
2091 | /** | |
2092 | * irdma_prm_add_pble_mem - add moemory to pble resources | |
2093 | * @pprm: pble resource manager | |
2094 | * @pchunk: chunk of memory to add | |
2095 | */ | |
2c4b14ea SS |
2096 | int irdma_prm_add_pble_mem(struct irdma_pble_prm *pprm, |
2097 | struct irdma_chunk *pchunk) | |
915cc7ac MI |
2098 | { |
2099 | u64 sizeofbitmap; | |
2100 | ||
2101 | if (pchunk->size & 0xfff) | |
2c4b14ea | 2102 | return -EINVAL; |
915cc7ac MI |
2103 | |
2104 | sizeofbitmap = (u64)pchunk->size >> pprm->pble_shift; | |
2105 | ||
117697cc CJ |
2106 | pchunk->bitmapbuf = bitmap_zalloc(sizeofbitmap, GFP_KERNEL); |
2107 | if (!pchunk->bitmapbuf) | |
2c4b14ea | 2108 | return -ENOMEM; |
915cc7ac | 2109 | |
915cc7ac MI |
2110 | pchunk->sizeofbitmap = sizeofbitmap; |
2111 | /* each pble is 8 bytes hence shift by 3 */ | |
2112 | pprm->total_pble_alloc += pchunk->size >> 3; | |
2113 | pprm->free_pble_cnt += pchunk->size >> 3; | |
2114 | ||
2115 | return 0; | |
2116 | } | |
2117 | ||
2118 | /** | |
2119 | * irdma_prm_get_pbles - get pble's from prm | |
2120 | * @pprm: pble resource manager | |
2121 | * @chunkinfo: nformation about chunk where pble's were acquired | |
2122 | * @mem_size: size of pble memory needed | |
2123 | * @vaddr: returns virtual address of pble memory | |
2124 | * @fpm_addr: returns fpm address of pble memory | |
2125 | */ | |
2c4b14ea SS |
2126 | int irdma_prm_get_pbles(struct irdma_pble_prm *pprm, |
2127 | struct irdma_pble_chunkinfo *chunkinfo, u64 mem_size, | |
2128 | u64 **vaddr, u64 *fpm_addr) | |
915cc7ac MI |
2129 | { |
2130 | u64 bits_needed; | |
2131 | u64 bit_idx = PBLE_INVALID_IDX; | |
2132 | struct irdma_chunk *pchunk = NULL; | |
2133 | struct list_head *chunk_entry = pprm->clist.next; | |
2134 | u32 offset; | |
2135 | unsigned long flags; | |
2db7b2ea | 2136 | *vaddr = NULL; |
915cc7ac MI |
2137 | *fpm_addr = 0; |
2138 | ||
1f700757 | 2139 | bits_needed = DIV_ROUND_UP_ULL(mem_size, BIT_ULL(pprm->pble_shift)); |
915cc7ac MI |
2140 | |
2141 | spin_lock_irqsave(&pprm->prm_lock, flags); | |
2142 | while (chunk_entry != &pprm->clist) { | |
2143 | pchunk = (struct irdma_chunk *)chunk_entry; | |
2144 | bit_idx = bitmap_find_next_zero_area(pchunk->bitmapbuf, | |
2145 | pchunk->sizeofbitmap, 0, | |
2146 | bits_needed, 0); | |
2147 | if (bit_idx < pchunk->sizeofbitmap) | |
2148 | break; | |
2149 | ||
2150 | /* list.next used macro */ | |
2151 | chunk_entry = pchunk->list.next; | |
2152 | } | |
2153 | ||
2154 | if (!pchunk || bit_idx >= pchunk->sizeofbitmap) { | |
2155 | spin_unlock_irqrestore(&pprm->prm_lock, flags); | |
2c4b14ea | 2156 | return -ENOMEM; |
915cc7ac MI |
2157 | } |
2158 | ||
2159 | bitmap_set(pchunk->bitmapbuf, bit_idx, bits_needed); | |
2160 | offset = bit_idx << pprm->pble_shift; | |
2161 | *vaddr = pchunk->vaddr + offset; | |
2162 | *fpm_addr = pchunk->fpm_addr + offset; | |
2163 | ||
2164 | chunkinfo->pchunk = pchunk; | |
2165 | chunkinfo->bit_idx = bit_idx; | |
2166 | chunkinfo->bits_used = bits_needed; | |
2167 | /* 3 is sizeof pble divide */ | |
2168 | pprm->free_pble_cnt -= chunkinfo->bits_used << (pprm->pble_shift - 3); | |
2169 | spin_unlock_irqrestore(&pprm->prm_lock, flags); | |
2170 | ||
2171 | return 0; | |
2172 | } | |
2173 | ||
2174 | /** | |
2175 | * irdma_prm_return_pbles - return pbles back to prm | |
2176 | * @pprm: pble resource manager | |
2177 | * @chunkinfo: chunk where pble's were acquired and to be freed | |
2178 | */ | |
2179 | void irdma_prm_return_pbles(struct irdma_pble_prm *pprm, | |
2180 | struct irdma_pble_chunkinfo *chunkinfo) | |
2181 | { | |
2182 | unsigned long flags; | |
2183 | ||
2184 | spin_lock_irqsave(&pprm->prm_lock, flags); | |
2185 | pprm->free_pble_cnt += chunkinfo->bits_used << (pprm->pble_shift - 3); | |
2186 | bitmap_clear(chunkinfo->pchunk->bitmapbuf, chunkinfo->bit_idx, | |
2187 | chunkinfo->bits_used); | |
2188 | spin_unlock_irqrestore(&pprm->prm_lock, flags); | |
2189 | } | |
2190 | ||
2c4b14ea SS |
2191 | int irdma_map_vm_page_list(struct irdma_hw *hw, void *va, dma_addr_t *pg_dma, |
2192 | u32 pg_cnt) | |
915cc7ac MI |
2193 | { |
2194 | struct page *vm_page; | |
2195 | int i; | |
2196 | u8 *addr; | |
2197 | ||
2198 | addr = (u8 *)(uintptr_t)va; | |
2199 | for (i = 0; i < pg_cnt; i++) { | |
2200 | vm_page = vmalloc_to_page(addr); | |
2201 | if (!vm_page) | |
2202 | goto err; | |
2203 | ||
2204 | pg_dma[i] = dma_map_page(hw->device, vm_page, 0, PAGE_SIZE, | |
2205 | DMA_BIDIRECTIONAL); | |
2206 | if (dma_mapping_error(hw->device, pg_dma[i])) | |
2207 | goto err; | |
2208 | ||
2209 | addr += PAGE_SIZE; | |
2210 | } | |
2211 | ||
2212 | return 0; | |
2213 | ||
2214 | err: | |
2215 | irdma_unmap_vm_page_list(hw, pg_dma, i); | |
2c4b14ea | 2216 | return -ENOMEM; |
915cc7ac MI |
2217 | } |
2218 | ||
2219 | void irdma_unmap_vm_page_list(struct irdma_hw *hw, dma_addr_t *pg_dma, u32 pg_cnt) | |
2220 | { | |
2221 | int i; | |
2222 | ||
2223 | for (i = 0; i < pg_cnt; i++) | |
2224 | dma_unmap_page(hw->device, pg_dma[i], PAGE_SIZE, DMA_BIDIRECTIONAL); | |
2225 | } | |
2226 | ||
2227 | /** | |
2228 | * irdma_pble_free_paged_mem - free virtual paged memory | |
2229 | * @chunk: chunk to free with paged memory | |
2230 | */ | |
2231 | void irdma_pble_free_paged_mem(struct irdma_chunk *chunk) | |
2232 | { | |
2233 | if (!chunk->pg_cnt) | |
2234 | goto done; | |
2235 | ||
2236 | irdma_unmap_vm_page_list(chunk->dev->hw, chunk->dmainfo.dmaaddrs, | |
2237 | chunk->pg_cnt); | |
2238 | ||
2239 | done: | |
2240 | kfree(chunk->dmainfo.dmaaddrs); | |
2241 | chunk->dmainfo.dmaaddrs = NULL; | |
2db7b2ea SS |
2242 | vfree(chunk->vaddr); |
2243 | chunk->vaddr = NULL; | |
915cc7ac MI |
2244 | chunk->type = 0; |
2245 | } | |
2246 | ||
2247 | /** | |
2248 | * irdma_pble_get_paged_mem -allocate paged memory for pbles | |
2249 | * @chunk: chunk to add for paged memory | |
2250 | * @pg_cnt: number of pages needed | |
2251 | */ | |
2c4b14ea | 2252 | int irdma_pble_get_paged_mem(struct irdma_chunk *chunk, u32 pg_cnt) |
915cc7ac MI |
2253 | { |
2254 | u32 size; | |
2255 | void *va; | |
2256 | ||
2257 | chunk->dmainfo.dmaaddrs = kzalloc(pg_cnt << 3, GFP_KERNEL); | |
2258 | if (!chunk->dmainfo.dmaaddrs) | |
2c4b14ea | 2259 | return -ENOMEM; |
915cc7ac MI |
2260 | |
2261 | size = PAGE_SIZE * pg_cnt; | |
2262 | va = vmalloc(size); | |
2263 | if (!va) | |
2264 | goto err; | |
2265 | ||
2266 | if (irdma_map_vm_page_list(chunk->dev->hw, va, chunk->dmainfo.dmaaddrs, | |
2267 | pg_cnt)) { | |
2268 | vfree(va); | |
2269 | goto err; | |
2270 | } | |
2db7b2ea | 2271 | chunk->vaddr = va; |
915cc7ac MI |
2272 | chunk->size = size; |
2273 | chunk->pg_cnt = pg_cnt; | |
2274 | chunk->type = PBLE_SD_PAGED; | |
2275 | ||
2276 | return 0; | |
2277 | err: | |
2278 | kfree(chunk->dmainfo.dmaaddrs); | |
2279 | chunk->dmainfo.dmaaddrs = NULL; | |
2280 | ||
2c4b14ea | 2281 | return -ENOMEM; |
915cc7ac MI |
2282 | } |
2283 | ||
2284 | /** | |
2285 | * irdma_alloc_ws_node_id - Allocate a tx scheduler node ID | |
2286 | * @dev: device pointer | |
2287 | */ | |
2288 | u16 irdma_alloc_ws_node_id(struct irdma_sc_dev *dev) | |
2289 | { | |
2290 | struct irdma_pci_f *rf = dev_to_rf(dev); | |
2291 | u32 next = 1; | |
2292 | u32 node_id; | |
2293 | ||
2294 | if (irdma_alloc_rsrc(rf, rf->allocated_ws_nodes, rf->max_ws_node_id, | |
2295 | &node_id, &next)) | |
2296 | return IRDMA_WS_NODE_INVALID; | |
2297 | ||
2298 | return (u16)node_id; | |
2299 | } | |
2300 | ||
2301 | /** | |
2302 | * irdma_free_ws_node_id - Free a tx scheduler node ID | |
2303 | * @dev: device pointer | |
2304 | * @node_id: Work scheduler node ID | |
2305 | */ | |
2306 | void irdma_free_ws_node_id(struct irdma_sc_dev *dev, u16 node_id) | |
2307 | { | |
2308 | struct irdma_pci_f *rf = dev_to_rf(dev); | |
2309 | ||
2310 | irdma_free_rsrc(rf, rf->allocated_ws_nodes, (u32)node_id); | |
2311 | } | |
2312 | ||
2313 | /** | |
2314 | * irdma_modify_qp_to_err - Modify a QP to error | |
2315 | * @sc_qp: qp structure | |
2316 | */ | |
2317 | void irdma_modify_qp_to_err(struct irdma_sc_qp *sc_qp) | |
2318 | { | |
2319 | struct irdma_qp *qp = sc_qp->qp_uk.back_qp; | |
2320 | struct ib_qp_attr attr; | |
2321 | ||
5b1e985f | 2322 | if (qp->iwdev->rf->reset) |
915cc7ac MI |
2323 | return; |
2324 | attr.qp_state = IB_QPS_ERR; | |
2325 | ||
2326 | if (rdma_protocol_roce(qp->ibqp.device, 1)) | |
2327 | irdma_modify_qp_roce(&qp->ibqp, &attr, IB_QP_STATE, NULL); | |
2328 | else | |
2329 | irdma_modify_qp(&qp->ibqp, &attr, IB_QP_STATE, NULL); | |
2330 | } | |
2331 | ||
2332 | void irdma_ib_qp_event(struct irdma_qp *iwqp, enum irdma_qp_event_type event) | |
2333 | { | |
2334 | struct ib_event ibevent; | |
2335 | ||
2336 | if (!iwqp->ibqp.event_handler) | |
2337 | return; | |
2338 | ||
2339 | switch (event) { | |
2340 | case IRDMA_QP_EVENT_CATASTROPHIC: | |
2341 | ibevent.event = IB_EVENT_QP_FATAL; | |
2342 | break; | |
2343 | case IRDMA_QP_EVENT_ACCESS_ERR: | |
2344 | ibevent.event = IB_EVENT_QP_ACCESS_ERR; | |
2345 | break; | |
7f51a961 SD |
2346 | case IRDMA_QP_EVENT_REQ_ERR: |
2347 | ibevent.event = IB_EVENT_QP_REQ_ERR; | |
2348 | break; | |
915cc7ac MI |
2349 | } |
2350 | ibevent.device = iwqp->ibqp.device; | |
2351 | ibevent.element.qp = &iwqp->ibqp; | |
2352 | iwqp->ibqp.event_handler(&ibevent, iwqp->ibqp.qp_context); | |
2353 | } | |
10467ce0 TN |
2354 | |
2355 | bool irdma_cq_empty(struct irdma_cq *iwcq) | |
2356 | { | |
2357 | struct irdma_cq_uk *ukcq; | |
2358 | u64 qword3; | |
2359 | __le64 *cqe; | |
2360 | u8 polarity; | |
2361 | ||
2362 | ukcq = &iwcq->sc_cq.cq_uk; | |
2363 | cqe = IRDMA_GET_CURRENT_CQ_ELEM(ukcq); | |
2364 | get_64bit_val(cqe, 24, &qword3); | |
2365 | polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3); | |
2366 | ||
2367 | return polarity != ukcq->polarity; | |
2368 | } | |
81091d76 MI |
2369 | |
2370 | void irdma_remove_cmpls_list(struct irdma_cq *iwcq) | |
2371 | { | |
2372 | struct irdma_cmpl_gen *cmpl_node; | |
2373 | struct list_head *tmp_node, *list_node; | |
2374 | ||
2375 | list_for_each_safe (list_node, tmp_node, &iwcq->cmpl_generated) { | |
2376 | cmpl_node = list_entry(list_node, struct irdma_cmpl_gen, list); | |
2377 | list_del(&cmpl_node->list); | |
2378 | kfree(cmpl_node); | |
2379 | } | |
2380 | } | |
2381 | ||
2382 | int irdma_generated_cmpls(struct irdma_cq *iwcq, struct irdma_cq_poll_info *cq_poll_info) | |
2383 | { | |
2384 | struct irdma_cmpl_gen *cmpl; | |
2385 | ||
2386 | if (list_empty(&iwcq->cmpl_generated)) | |
2387 | return -ENOENT; | |
2388 | cmpl = list_first_entry_or_null(&iwcq->cmpl_generated, struct irdma_cmpl_gen, list); | |
2389 | list_del(&cmpl->list); | |
2390 | memcpy(cq_poll_info, &cmpl->cpi, sizeof(*cq_poll_info)); | |
2391 | kfree(cmpl); | |
2392 | ||
2393 | ibdev_dbg(iwcq->ibcq.device, | |
2394 | "VERBS: %s: Poll artificially generated completion for QP 0x%X, op %u, wr_id=0x%llx\n", | |
2395 | __func__, cq_poll_info->qp_id, cq_poll_info->op_type, | |
2396 | cq_poll_info->wr_id); | |
2397 | ||
2398 | return 0; | |
2399 | } | |
2400 | ||
2401 | /** | |
2402 | * irdma_set_cpi_common_values - fill in values for polling info struct | |
2403 | * @cpi: resulting structure of cq_poll_info type | |
2404 | * @qp: QPair | |
2405 | * @qp_num: id of the QP | |
2406 | */ | |
2407 | static void irdma_set_cpi_common_values(struct irdma_cq_poll_info *cpi, | |
2408 | struct irdma_qp_uk *qp, u32 qp_num) | |
2409 | { | |
2410 | cpi->comp_status = IRDMA_COMPL_STATUS_FLUSHED; | |
2411 | cpi->error = true; | |
2412 | cpi->major_err = IRDMA_FLUSH_MAJOR_ERR; | |
2413 | cpi->minor_err = FLUSH_GENERAL_ERR; | |
2414 | cpi->qp_handle = (irdma_qp_handle)(uintptr_t)qp; | |
2415 | cpi->qp_id = qp_num; | |
2416 | } | |
2417 | ||
2418 | static inline void irdma_comp_handler(struct irdma_cq *cq) | |
2419 | { | |
2420 | if (!cq->ibcq.comp_handler) | |
2421 | return; | |
2422 | if (atomic_cmpxchg(&cq->armed, 1, 0)) | |
2423 | cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); | |
2424 | } | |
2425 | ||
2426 | void irdma_generate_flush_completions(struct irdma_qp *iwqp) | |
2427 | { | |
2428 | struct irdma_qp_uk *qp = &iwqp->sc_qp.qp_uk; | |
2429 | struct irdma_ring *sq_ring = &qp->sq_ring; | |
2430 | struct irdma_ring *rq_ring = &qp->rq_ring; | |
2431 | struct irdma_cmpl_gen *cmpl; | |
2432 | __le64 *sw_wqe; | |
2433 | u64 wqe_qword; | |
2434 | u32 wqe_idx; | |
2435 | bool compl_generated = false; | |
2436 | unsigned long flags1; | |
2437 | ||
2438 | spin_lock_irqsave(&iwqp->iwscq->lock, flags1); | |
2439 | if (irdma_cq_empty(iwqp->iwscq)) { | |
2440 | unsigned long flags2; | |
2441 | ||
2442 | spin_lock_irqsave(&iwqp->lock, flags2); | |
2443 | while (IRDMA_RING_MORE_WORK(*sq_ring)) { | |
2444 | cmpl = kzalloc(sizeof(*cmpl), GFP_ATOMIC); | |
2445 | if (!cmpl) { | |
2446 | spin_unlock_irqrestore(&iwqp->lock, flags2); | |
2447 | spin_unlock_irqrestore(&iwqp->iwscq->lock, flags1); | |
2448 | return; | |
2449 | } | |
2450 | ||
2451 | wqe_idx = sq_ring->tail; | |
2452 | irdma_set_cpi_common_values(&cmpl->cpi, qp, qp->qp_id); | |
2453 | ||
2454 | cmpl->cpi.wr_id = qp->sq_wrtrk_array[wqe_idx].wrid; | |
2455 | sw_wqe = qp->sq_base[wqe_idx].elem; | |
2456 | get_64bit_val(sw_wqe, 24, &wqe_qword); | |
2457 | cmpl->cpi.op_type = (u8)FIELD_GET(IRDMAQPSQ_OPCODE, IRDMAQPSQ_OPCODE); | |
24419777 | 2458 | cmpl->cpi.q_type = IRDMA_CQE_QTYPE_SQ; |
81091d76 MI |
2459 | /* remove the SQ WR by moving SQ tail*/ |
2460 | IRDMA_RING_SET_TAIL(*sq_ring, | |
2461 | sq_ring->tail + qp->sq_wrtrk_array[sq_ring->tail].quanta); | |
30ed9ee9 MI |
2462 | if (cmpl->cpi.op_type == IRDMAQP_OP_NOP) { |
2463 | kfree(cmpl); | |
2464 | continue; | |
2465 | } | |
81091d76 MI |
2466 | ibdev_dbg(iwqp->iwscq->ibcq.device, |
2467 | "DEV: %s: adding wr_id = 0x%llx SQ Completion to list qp_id=%d\n", | |
2468 | __func__, cmpl->cpi.wr_id, qp->qp_id); | |
2469 | list_add_tail(&cmpl->list, &iwqp->iwscq->cmpl_generated); | |
2470 | compl_generated = true; | |
2471 | } | |
2472 | spin_unlock_irqrestore(&iwqp->lock, flags2); | |
2473 | spin_unlock_irqrestore(&iwqp->iwscq->lock, flags1); | |
2474 | if (compl_generated) | |
ead54ced | 2475 | irdma_comp_handler(iwqp->iwscq); |
81091d76 MI |
2476 | } else { |
2477 | spin_unlock_irqrestore(&iwqp->iwscq->lock, flags1); | |
2478 | mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush, | |
2479 | msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS)); | |
2480 | } | |
2481 | ||
2482 | spin_lock_irqsave(&iwqp->iwrcq->lock, flags1); | |
2483 | if (irdma_cq_empty(iwqp->iwrcq)) { | |
2484 | unsigned long flags2; | |
2485 | ||
2486 | spin_lock_irqsave(&iwqp->lock, flags2); | |
2487 | while (IRDMA_RING_MORE_WORK(*rq_ring)) { | |
2488 | cmpl = kzalloc(sizeof(*cmpl), GFP_ATOMIC); | |
2489 | if (!cmpl) { | |
2490 | spin_unlock_irqrestore(&iwqp->lock, flags2); | |
2491 | spin_unlock_irqrestore(&iwqp->iwrcq->lock, flags1); | |
2492 | return; | |
2493 | } | |
2494 | ||
2495 | wqe_idx = rq_ring->tail; | |
2496 | irdma_set_cpi_common_values(&cmpl->cpi, qp, qp->qp_id); | |
2497 | ||
2498 | cmpl->cpi.wr_id = qp->rq_wrid_array[wqe_idx]; | |
2499 | cmpl->cpi.op_type = IRDMA_OP_TYPE_REC; | |
24419777 | 2500 | cmpl->cpi.q_type = IRDMA_CQE_QTYPE_RQ; |
81091d76 MI |
2501 | /* remove the RQ WR by moving RQ tail */ |
2502 | IRDMA_RING_SET_TAIL(*rq_ring, rq_ring->tail + 1); | |
2503 | ibdev_dbg(iwqp->iwrcq->ibcq.device, | |
2504 | "DEV: %s: adding wr_id = 0x%llx RQ Completion to list qp_id=%d, wqe_idx=%d\n", | |
2505 | __func__, cmpl->cpi.wr_id, qp->qp_id, | |
2506 | wqe_idx); | |
2507 | list_add_tail(&cmpl->list, &iwqp->iwrcq->cmpl_generated); | |
2508 | ||
2509 | compl_generated = true; | |
2510 | } | |
2511 | spin_unlock_irqrestore(&iwqp->lock, flags2); | |
2512 | spin_unlock_irqrestore(&iwqp->iwrcq->lock, flags1); | |
2513 | if (compl_generated) | |
2514 | irdma_comp_handler(iwqp->iwrcq); | |
2515 | } else { | |
2516 | spin_unlock_irqrestore(&iwqp->iwrcq->lock, flags1); | |
2517 | mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush, | |
2518 | msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS)); | |
2519 | } | |
2520 | } |