RDMA/i40iw: Convert timers to use timer_setup() (part 2)
[linux-2.6-block.git] / drivers / infiniband / hw / i40iw / i40iw_utils.c
CommitLineData
4e9042e6
FL
1/*******************************************************************************
2*
3* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
4*
5* This software is available to you under a choice of one of two
6* licenses. You may choose to be licensed under the terms of the GNU
7* General Public License (GPL) Version 2, available from the file
8* COPYING in the main directory of this source tree, or the
9* OpenFabrics.org BSD license below:
10*
11* Redistribution and use in source and binary forms, with or
12* without modification, are permitted provided that the following
13* conditions are met:
14*
15* - Redistributions of source code must retain the above
16* copyright notice, this list of conditions and the following
17* disclaimer.
18*
19* - Redistributions in binary form must reproduce the above
20* copyright notice, this list of conditions and the following
21* disclaimer in the documentation and/or other materials
22* provided with the distribution.
23*
24* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31* SOFTWARE.
32*
33*******************************************************************************/
34
35#include <linux/module.h>
36#include <linux/moduleparam.h>
37#include <linux/netdevice.h>
38#include <linux/etherdevice.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
41#include <linux/if_vlan.h>
42#include <linux/crc32.h>
43#include <linux/in.h>
44#include <linux/ip.h>
45#include <linux/tcp.h>
46#include <linux/init.h>
47#include <linux/io.h>
48#include <asm/irq.h>
49#include <asm/byteorder.h>
50#include <net/netevent.h>
51#include <net/neighbour.h>
52#include "i40iw.h"
53
54/**
55 * i40iw_arp_table - manage arp table
56 * @iwdev: iwarp device
57 * @ip_addr: ip address for device
58 * @mac_addr: mac address ptr
59 * @action: modify, delete or add
60 */
61int i40iw_arp_table(struct i40iw_device *iwdev,
20c61f7e 62 u32 *ip_addr,
4e9042e6
FL
63 bool ipv4,
64 u8 *mac_addr,
65 u32 action)
66{
67 int arp_index;
68 int err;
69 u32 ip[4];
70
71 if (ipv4) {
72 memset(ip, 0, sizeof(ip));
73 ip[0] = *ip_addr;
74 } else {
75 memcpy(ip, ip_addr, sizeof(ip));
76 }
77
78 for (arp_index = 0; (u32)arp_index < iwdev->arp_table_size; arp_index++)
79 if (memcmp(iwdev->arp_table[arp_index].ip_addr, ip, sizeof(ip)) == 0)
80 break;
81 switch (action) {
82 case I40IW_ARP_ADD:
83 if (arp_index != iwdev->arp_table_size)
84 return -1;
85
86 arp_index = 0;
87 err = i40iw_alloc_resource(iwdev, iwdev->allocated_arps,
88 iwdev->arp_table_size,
89 (u32 *)&arp_index,
90 &iwdev->next_arp_index);
91
92 if (err)
93 return err;
94
95 memcpy(iwdev->arp_table[arp_index].ip_addr, ip, sizeof(ip));
96 ether_addr_copy(iwdev->arp_table[arp_index].mac_addr, mac_addr);
97 break;
98 case I40IW_ARP_RESOLVE:
99 if (arp_index == iwdev->arp_table_size)
100 return -1;
101 break;
102 case I40IW_ARP_DELETE:
103 if (arp_index == iwdev->arp_table_size)
104 return -1;
105 memset(iwdev->arp_table[arp_index].ip_addr, 0,
106 sizeof(iwdev->arp_table[arp_index].ip_addr));
107 eth_zero_addr(iwdev->arp_table[arp_index].mac_addr);
108 i40iw_free_resource(iwdev, iwdev->allocated_arps, arp_index);
109 break;
110 default:
111 return -1;
112 }
113 return arp_index;
114}
115
116/**
117 * i40iw_wr32 - write 32 bits to hw register
118 * @hw: hardware information including registers
119 * @reg: register offset
120 * @value: vvalue to write to register
121 */
122inline void i40iw_wr32(struct i40iw_hw *hw, u32 reg, u32 value)
123{
124 writel(value, hw->hw_addr + reg);
125}
126
127/**
128 * i40iw_rd32 - read a 32 bit hw register
129 * @hw: hardware information including registers
130 * @reg: register offset
131 *
132 * Return value of register content
133 */
134inline u32 i40iw_rd32(struct i40iw_hw *hw, u32 reg)
135{
136 return readl(hw->hw_addr + reg);
137}
138
139/**
140 * i40iw_inetaddr_event - system notifier for netdev events
141 * @notfier: not used
142 * @event: event for notifier
143 * @ptr: if address
144 */
145int i40iw_inetaddr_event(struct notifier_block *notifier,
146 unsigned long event,
147 void *ptr)
148{
149 struct in_ifaddr *ifa = ptr;
150 struct net_device *event_netdev = ifa->ifa_dev->dev;
151 struct net_device *netdev;
152 struct net_device *upper_dev;
153 struct i40iw_device *iwdev;
154 struct i40iw_handler *hdl;
20c61f7e 155 u32 local_ipaddr;
e5e74b61 156 u32 action = I40IW_ARP_ADD;
4e9042e6
FL
157
158 hdl = i40iw_find_netdev(event_netdev);
159 if (!hdl)
160 return NOTIFY_DONE;
161
162 iwdev = &hdl->device;
47fb3c16 163 if (iwdev->init_state < IP_ADDR_REGISTERED || iwdev->closing)
871a8623
SS
164 return NOTIFY_DONE;
165
4e9042e6
FL
166 netdev = iwdev->ldev->netdev;
167 upper_dev = netdev_master_upper_dev_get(netdev);
168 if (netdev != event_netdev)
169 return NOTIFY_DONE;
170
e5e74b61
MI
171 if (upper_dev)
172 local_ipaddr = ntohl(
173 ((struct in_device *)upper_dev->ip_ptr)->ifa_list->ifa_address);
174 else
175 local_ipaddr = ntohl(ifa->ifa_address);
4e9042e6
FL
176 switch (event) {
177 case NETDEV_DOWN:
e5e74b61
MI
178 action = I40IW_ARP_DELETE;
179 /* Fall through */
4e9042e6 180 case NETDEV_UP:
e5e74b61 181 /* Fall through */
4e9042e6 182 case NETDEV_CHANGEADDR:
4e9042e6
FL
183 i40iw_manage_arp_cache(iwdev,
184 netdev->dev_addr,
185 &local_ipaddr,
186 true,
e5e74b61
MI
187 action);
188 i40iw_if_notify(iwdev, netdev, &local_ipaddr, true,
189 (action == I40IW_ARP_ADD) ? true : false);
4e9042e6
FL
190 break;
191 default:
192 break;
193 }
194 return NOTIFY_DONE;
195}
196
197/**
198 * i40iw_inet6addr_event - system notifier for ipv6 netdev events
199 * @notfier: not used
200 * @event: event for notifier
201 * @ptr: if address
202 */
203int i40iw_inet6addr_event(struct notifier_block *notifier,
204 unsigned long event,
205 void *ptr)
206{
207 struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
208 struct net_device *event_netdev = ifa->idev->dev;
209 struct net_device *netdev;
210 struct i40iw_device *iwdev;
211 struct i40iw_handler *hdl;
20c61f7e 212 u32 local_ipaddr6[4];
e5e74b61 213 u32 action = I40IW_ARP_ADD;
4e9042e6
FL
214
215 hdl = i40iw_find_netdev(event_netdev);
216 if (!hdl)
217 return NOTIFY_DONE;
218
219 iwdev = &hdl->device;
47fb3c16 220 if (iwdev->init_state < IP_ADDR_REGISTERED || iwdev->closing)
871a8623
SS
221 return NOTIFY_DONE;
222
4e9042e6
FL
223 netdev = iwdev->ldev->netdev;
224 if (netdev != event_netdev)
225 return NOTIFY_DONE;
226
e5e74b61 227 i40iw_copy_ip_ntohl(local_ipaddr6, ifa->addr.in6_u.u6_addr32);
4e9042e6
FL
228 switch (event) {
229 case NETDEV_DOWN:
e5e74b61
MI
230 action = I40IW_ARP_DELETE;
231 /* Fall through */
4e9042e6
FL
232 case NETDEV_UP:
233 /* Fall through */
234 case NETDEV_CHANGEADDR:
4e9042e6
FL
235 i40iw_manage_arp_cache(iwdev,
236 netdev->dev_addr,
237 local_ipaddr6,
238 false,
e5e74b61
MI
239 action);
240 i40iw_if_notify(iwdev, netdev, local_ipaddr6, false,
241 (action == I40IW_ARP_ADD) ? true : false);
4e9042e6
FL
242 break;
243 default:
244 break;
245 }
246 return NOTIFY_DONE;
247}
248
249/**
250 * i40iw_net_event - system notifier for net events
251 * @notfier: not used
252 * @event: event for notifier
253 * @ptr: neighbor
254 */
255int i40iw_net_event(struct notifier_block *notifier, unsigned long event, void *ptr)
256{
257 struct neighbour *neigh = ptr;
258 struct i40iw_device *iwdev;
259 struct i40iw_handler *iwhdl;
260 __be32 *p;
261 u32 local_ipaddr[4];
262
263 switch (event) {
264 case NETEVENT_NEIGH_UPDATE:
265 iwhdl = i40iw_find_netdev((struct net_device *)neigh->dev);
266 if (!iwhdl)
267 return NOTIFY_DONE;
268 iwdev = &iwhdl->device;
47fb3c16 269 if (iwdev->init_state < IP_ADDR_REGISTERED || iwdev->closing)
871a8623 270 return NOTIFY_DONE;
4e9042e6
FL
271 p = (__be32 *)neigh->primary_key;
272 i40iw_copy_ip_ntohl(local_ipaddr, p);
273 if (neigh->nud_state & NUD_VALID) {
274 i40iw_manage_arp_cache(iwdev,
275 neigh->ha,
276 local_ipaddr,
277 false,
278 I40IW_ARP_ADD);
279
280 } else {
281 i40iw_manage_arp_cache(iwdev,
282 neigh->ha,
283 local_ipaddr,
284 false,
285 I40IW_ARP_DELETE);
286 }
287 break;
288 default:
289 break;
290 }
291 return NOTIFY_DONE;
292}
293
294/**
295 * i40iw_get_cqp_request - get cqp struct
296 * @cqp: device cqp ptr
297 * @wait: cqp to be used in wait mode
298 */
299struct i40iw_cqp_request *i40iw_get_cqp_request(struct i40iw_cqp *cqp, bool wait)
300{
301 struct i40iw_cqp_request *cqp_request = NULL;
302 unsigned long flags;
303
304 spin_lock_irqsave(&cqp->req_lock, flags);
305 if (!list_empty(&cqp->cqp_avail_reqs)) {
306 cqp_request = list_entry(cqp->cqp_avail_reqs.next,
307 struct i40iw_cqp_request, list);
308 list_del_init(&cqp_request->list);
309 }
310 spin_unlock_irqrestore(&cqp->req_lock, flags);
311 if (!cqp_request) {
312 cqp_request = kzalloc(sizeof(*cqp_request), GFP_ATOMIC);
313 if (cqp_request) {
314 cqp_request->dynamic = true;
315 INIT_LIST_HEAD(&cqp_request->list);
316 init_waitqueue_head(&cqp_request->waitq);
317 }
318 }
319 if (!cqp_request) {
320 i40iw_pr_err("CQP Request Fail: No Memory");
321 return NULL;
322 }
323
324 if (wait) {
325 atomic_set(&cqp_request->refcount, 2);
326 cqp_request->waiting = true;
327 } else {
328 atomic_set(&cqp_request->refcount, 1);
329 }
330 return cqp_request;
331}
332
333/**
334 * i40iw_free_cqp_request - free cqp request
335 * @cqp: cqp ptr
336 * @cqp_request: to be put back in cqp list
337 */
338void i40iw_free_cqp_request(struct i40iw_cqp *cqp, struct i40iw_cqp_request *cqp_request)
339{
44b99f88 340 struct i40iw_device *iwdev = container_of(cqp, struct i40iw_device, cqp);
4e9042e6
FL
341 unsigned long flags;
342
343 if (cqp_request->dynamic) {
344 kfree(cqp_request);
345 } else {
346 cqp_request->request_done = false;
347 cqp_request->callback_fcn = NULL;
348 cqp_request->waiting = false;
349
350 spin_lock_irqsave(&cqp->req_lock, flags);
351 list_add_tail(&cqp_request->list, &cqp->cqp_avail_reqs);
352 spin_unlock_irqrestore(&cqp->req_lock, flags);
353 }
44b99f88 354 wake_up(&iwdev->close_wq);
4e9042e6
FL
355}
356
357/**
358 * i40iw_put_cqp_request - dec ref count and free if 0
359 * @cqp: cqp ptr
360 * @cqp_request: to be put back in cqp list
361 */
362void i40iw_put_cqp_request(struct i40iw_cqp *cqp,
363 struct i40iw_cqp_request *cqp_request)
364{
365 if (atomic_dec_and_test(&cqp_request->refcount))
366 i40iw_free_cqp_request(cqp, cqp_request);
367}
368
44b99f88
SS
369/**
370 * i40iw_free_pending_cqp_request -free pending cqp request objs
371 * @cqp: cqp ptr
372 * @cqp_request: to be put back in cqp list
373 */
374static void i40iw_free_pending_cqp_request(struct i40iw_cqp *cqp,
375 struct i40iw_cqp_request *cqp_request)
376{
377 struct i40iw_device *iwdev = container_of(cqp, struct i40iw_device, cqp);
378
379 if (cqp_request->waiting) {
380 cqp_request->compl_info.error = true;
381 cqp_request->request_done = true;
382 wake_up(&cqp_request->waitq);
383 }
384 i40iw_put_cqp_request(cqp, cqp_request);
385 wait_event_timeout(iwdev->close_wq,
386 !atomic_read(&cqp_request->refcount),
387 1000);
388}
389
390/**
391 * i40iw_cleanup_pending_cqp_op - clean-up cqp with no completions
392 * @iwdev: iwarp device
393 */
394void i40iw_cleanup_pending_cqp_op(struct i40iw_device *iwdev)
395{
396 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
397 struct i40iw_cqp *cqp = &iwdev->cqp;
398 struct i40iw_cqp_request *cqp_request = NULL;
399 struct cqp_commands_info *pcmdinfo = NULL;
400 u32 i, pending_work, wqe_idx;
401
402 pending_work = I40IW_RING_WORK_AVAILABLE(cqp->sc_cqp.sq_ring);
403 wqe_idx = I40IW_RING_GETCURRENT_TAIL(cqp->sc_cqp.sq_ring);
404 for (i = 0; i < pending_work; i++) {
405 cqp_request = (struct i40iw_cqp_request *)(unsigned long)cqp->scratch_array[wqe_idx];
406 if (cqp_request)
407 i40iw_free_pending_cqp_request(cqp, cqp_request);
408 wqe_idx = (wqe_idx + 1) % I40IW_RING_GETSIZE(cqp->sc_cqp.sq_ring);
409 }
410
411 while (!list_empty(&dev->cqp_cmd_head)) {
412 pcmdinfo = (struct cqp_commands_info *)i40iw_remove_head(&dev->cqp_cmd_head);
413 cqp_request = container_of(pcmdinfo, struct i40iw_cqp_request, info);
414 if (cqp_request)
415 i40iw_free_pending_cqp_request(cqp, cqp_request);
416 }
417}
418
4e9042e6
FL
419/**
420 * i40iw_free_qp - callback after destroy cqp completes
421 * @cqp_request: cqp request for destroy qp
422 * @num: not used
423 */
424static void i40iw_free_qp(struct i40iw_cqp_request *cqp_request, u32 num)
425{
426 struct i40iw_sc_qp *qp = (struct i40iw_sc_qp *)cqp_request->param;
427 struct i40iw_qp *iwqp = (struct i40iw_qp *)qp->back_qp;
428 struct i40iw_device *iwdev;
429 u32 qp_num = iwqp->ibqp.qp_num;
430
431 iwdev = iwqp->iwdev;
432
433 i40iw_rem_pdusecount(iwqp->iwpd, iwdev);
434 i40iw_free_qp_resources(iwdev, iwqp, qp_num);
d5965934 435 i40iw_rem_devusecount(iwdev);
4e9042e6
FL
436}
437
438/**
439 * i40iw_wait_event - wait for completion
440 * @iwdev: iwarp device
441 * @cqp_request: cqp request to wait
442 */
443static int i40iw_wait_event(struct i40iw_device *iwdev,
444 struct i40iw_cqp_request *cqp_request)
445{
446 struct cqp_commands_info *info = &cqp_request->info;
447 struct i40iw_cqp *iwcqp = &iwdev->cqp;
d26875b4 448 struct i40iw_cqp_timeout cqp_timeout;
4e9042e6
FL
449 bool cqp_error = false;
450 int err_code = 0;
d26875b4
SS
451 memset(&cqp_timeout, 0, sizeof(cqp_timeout));
452 cqp_timeout.compl_cqp_cmds = iwdev->sc_dev.cqp_cmd_stats[OP_COMPLETED_COMMANDS];
453 do {
454 if (wait_event_timeout(cqp_request->waitq,
455 cqp_request->request_done, CQP_COMPL_WAIT_TIME))
456 break;
457
458 i40iw_check_cqp_progress(&cqp_timeout, &iwdev->sc_dev);
459
460 if (cqp_timeout.count < CQP_TIMEOUT_THRESHOLD)
461 continue;
462
463 i40iw_pr_err("error cqp command 0x%x timed out", info->cqp_cmd);
4e9042e6 464 err_code = -ETIME;
78300cf8
HO
465 if (!iwdev->reset) {
466 iwdev->reset = true;
467 i40iw_request_reset(iwdev);
468 }
4e9042e6 469 goto done;
d26875b4 470 } while (1);
4e9042e6
FL
471 cqp_error = cqp_request->compl_info.error;
472 if (cqp_error) {
473 i40iw_pr_err("error cqp command 0x%x completion maj = 0x%x min=0x%x\n",
474 info->cqp_cmd, cqp_request->compl_info.maj_err_code,
475 cqp_request->compl_info.min_err_code);
476 err_code = -EPROTO;
477 goto done;
478 }
479done:
480 i40iw_put_cqp_request(iwcqp, cqp_request);
481 return err_code;
482}
483
484/**
485 * i40iw_handle_cqp_op - process cqp command
486 * @iwdev: iwarp device
487 * @cqp_request: cqp request to process
488 */
489enum i40iw_status_code i40iw_handle_cqp_op(struct i40iw_device *iwdev,
490 struct i40iw_cqp_request
491 *cqp_request)
492{
493 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
494 enum i40iw_status_code status;
495 struct cqp_commands_info *info = &cqp_request->info;
496 int err_code = 0;
497
78300cf8
HO
498 if (iwdev->reset) {
499 i40iw_free_cqp_request(&iwdev->cqp, cqp_request);
500 return I40IW_ERR_CQP_COMPL_ERROR;
501 }
502
4e9042e6
FL
503 status = i40iw_process_cqp_cmd(dev, info);
504 if (status) {
505 i40iw_pr_err("error cqp command 0x%x failed\n", info->cqp_cmd);
506 i40iw_free_cqp_request(&iwdev->cqp, cqp_request);
507 return status;
508 }
509 if (cqp_request->waiting)
510 err_code = i40iw_wait_event(iwdev, cqp_request);
511 if (err_code)
512 status = I40IW_ERR_CQP_COMPL_ERROR;
513 return status;
514}
515
d5965934
MI
516/**
517 * i40iw_add_devusecount - add dev refcount
518 * @iwdev: dev for refcount
519 */
520void i40iw_add_devusecount(struct i40iw_device *iwdev)
521{
522 atomic64_inc(&iwdev->use_count);
523}
524
525/**
526 * i40iw_rem_devusecount - decrement refcount for dev
527 * @iwdev: device
528 */
529void i40iw_rem_devusecount(struct i40iw_device *iwdev)
530{
531 if (!atomic64_dec_and_test(&iwdev->use_count))
532 return;
533 wake_up(&iwdev->close_wq);
534}
535
4e9042e6
FL
536/**
537 * i40iw_add_pdusecount - add pd refcount
538 * @iwpd: pd for refcount
539 */
540void i40iw_add_pdusecount(struct i40iw_pd *iwpd)
541{
542 atomic_inc(&iwpd->usecount);
543}
544
545/**
546 * i40iw_rem_pdusecount - decrement refcount for pd and free if 0
547 * @iwpd: pd for refcount
548 * @iwdev: iwarp device
549 */
550void i40iw_rem_pdusecount(struct i40iw_pd *iwpd, struct i40iw_device *iwdev)
551{
552 if (!atomic_dec_and_test(&iwpd->usecount))
553 return;
554 i40iw_free_resource(iwdev, iwdev->allocated_pds, iwpd->sc_pd.pd_id);
555 kfree(iwpd);
556}
557
558/**
559 * i40iw_add_ref - add refcount for qp
560 * @ibqp: iqarp qp
561 */
562void i40iw_add_ref(struct ib_qp *ibqp)
563{
564 struct i40iw_qp *iwqp = (struct i40iw_qp *)ibqp;
565
566 atomic_inc(&iwqp->refcount);
567}
568
569/**
570 * i40iw_rem_ref - rem refcount for qp and free if 0
571 * @ibqp: iqarp qp
572 */
573void i40iw_rem_ref(struct ib_qp *ibqp)
574{
575 struct i40iw_qp *iwqp;
576 enum i40iw_status_code status;
577 struct i40iw_cqp_request *cqp_request;
578 struct cqp_commands_info *cqp_info;
579 struct i40iw_device *iwdev;
580 u32 qp_num;
996abf0a 581 unsigned long flags;
4e9042e6
FL
582
583 iwqp = to_iwqp(ibqp);
996abf0a
IM
584 iwdev = iwqp->iwdev;
585 spin_lock_irqsave(&iwdev->qptable_lock, flags);
586 if (!atomic_dec_and_test(&iwqp->refcount)) {
587 spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
4e9042e6 588 return;
996abf0a 589 }
4e9042e6 590
4e9042e6
FL
591 qp_num = iwqp->ibqp.qp_num;
592 iwdev->qp_table[qp_num] = NULL;
996abf0a 593 spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
4e9042e6
FL
594 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);
595 if (!cqp_request)
596 return;
597
598 cqp_request->callback_fcn = i40iw_free_qp;
599 cqp_request->param = (void *)&iwqp->sc_qp;
600 cqp_info = &cqp_request->info;
601 cqp_info->cqp_cmd = OP_QP_DESTROY;
602 cqp_info->post_sq = 1;
603 cqp_info->in.u.qp_destroy.qp = &iwqp->sc_qp;
604 cqp_info->in.u.qp_destroy.scratch = (uintptr_t)cqp_request;
605 cqp_info->in.u.qp_destroy.remove_hash_idx = true;
606 status = i40iw_handle_cqp_op(iwdev, cqp_request);
b5e452a0
SS
607 if (!status)
608 return;
609
610 i40iw_rem_pdusecount(iwqp->iwpd, iwdev);
611 i40iw_free_qp_resources(iwdev, iwqp, qp_num);
612 i40iw_rem_devusecount(iwdev);
4e9042e6
FL
613}
614
615/**
616 * i40iw_get_qp - get qp address
617 * @device: iwarp device
618 * @qpn: qp number
619 */
620struct ib_qp *i40iw_get_qp(struct ib_device *device, int qpn)
621{
622 struct i40iw_device *iwdev = to_iwdev(device);
623
624 if ((qpn < IW_FIRST_QPN) || (qpn >= iwdev->max_qp))
625 return NULL;
626
627 return &iwdev->qp_table[qpn]->ibqp;
628}
629
630/**
631 * i40iw_debug_buf - print debug msg and buffer is mask set
632 * @dev: hardware control device structure
633 * @mask: mask to compare if to print debug buffer
634 * @buf: points buffer addr
635 * @size: saize of buffer to print
636 */
637void i40iw_debug_buf(struct i40iw_sc_dev *dev,
638 enum i40iw_debug_flag mask,
639 char *desc,
640 u64 *buf,
641 u32 size)
642{
643 u32 i;
644
645 if (!(dev->debug_mask & mask))
646 return;
647 i40iw_debug(dev, mask, "%s\n", desc);
648 i40iw_debug(dev, mask, "starting address virt=%p phy=%llxh\n", buf,
649 (unsigned long long)virt_to_phys(buf));
650
651 for (i = 0; i < size; i += 8)
652 i40iw_debug(dev, mask, "index %03d val: %016llx\n", i, buf[i / 8]);
653}
654
655/**
656 * i40iw_get_hw_addr - return hw addr
657 * @par: points to shared dev
658 */
659u8 __iomem *i40iw_get_hw_addr(void *par)
660{
661 struct i40iw_sc_dev *dev = (struct i40iw_sc_dev *)par;
662
663 return dev->hw->hw_addr;
664}
665
666/**
667 * i40iw_remove_head - return head entry and remove from list
668 * @list: list for entry
669 */
670void *i40iw_remove_head(struct list_head *list)
671{
672 struct list_head *entry;
673
674 if (list_empty(list))
675 return NULL;
676
677 entry = (void *)list->next;
678 list_del(entry);
679 return (void *)entry;
680}
681
682/**
683 * i40iw_allocate_dma_mem - Memory alloc helper fn
684 * @hw: pointer to the HW structure
685 * @mem: ptr to mem struct to fill out
686 * @size: size of memory requested
687 * @alignment: what to align the allocation to
688 */
689enum i40iw_status_code i40iw_allocate_dma_mem(struct i40iw_hw *hw,
690 struct i40iw_dma_mem *mem,
691 u64 size,
692 u32 alignment)
693{
694 struct pci_dev *pcidev = (struct pci_dev *)hw->dev_context;
695
696 if (!mem)
697 return I40IW_ERR_PARAM;
698 mem->size = ALIGN(size, alignment);
699 mem->va = dma_zalloc_coherent(&pcidev->dev, mem->size,
700 (dma_addr_t *)&mem->pa, GFP_KERNEL);
701 if (!mem->va)
702 return I40IW_ERR_NO_MEMORY;
703 return 0;
704}
705
706/**
707 * i40iw_free_dma_mem - Memory free helper fn
708 * @hw: pointer to the HW structure
709 * @mem: ptr to mem struct to free
710 */
711void i40iw_free_dma_mem(struct i40iw_hw *hw, struct i40iw_dma_mem *mem)
712{
713 struct pci_dev *pcidev = (struct pci_dev *)hw->dev_context;
714
715 if (!mem || !mem->va)
716 return;
717
718 dma_free_coherent(&pcidev->dev, mem->size,
719 mem->va, (dma_addr_t)mem->pa);
720 mem->va = NULL;
721}
722
723/**
724 * i40iw_allocate_virt_mem - virtual memory alloc helper fn
725 * @hw: pointer to the HW structure
726 * @mem: ptr to mem struct to fill out
727 * @size: size of memory requested
728 */
729enum i40iw_status_code i40iw_allocate_virt_mem(struct i40iw_hw *hw,
730 struct i40iw_virt_mem *mem,
731 u32 size)
732{
733 if (!mem)
734 return I40IW_ERR_PARAM;
735
736 mem->size = size;
737 mem->va = kzalloc(size, GFP_KERNEL);
738
739 if (mem->va)
740 return 0;
741 else
742 return I40IW_ERR_NO_MEMORY;
743}
744
745/**
746 * i40iw_free_virt_mem - virtual memory free helper fn
747 * @hw: pointer to the HW structure
748 * @mem: ptr to mem struct to free
749 */
750enum i40iw_status_code i40iw_free_virt_mem(struct i40iw_hw *hw,
751 struct i40iw_virt_mem *mem)
752{
753 if (!mem)
754 return I40IW_ERR_PARAM;
7eaf8313
MI
755 /*
756 * mem->va points to the parent of mem, so both mem and mem->va
757 * can not be touched once mem->va is freed
758 */
4e9042e6 759 kfree(mem->va);
4e9042e6
FL
760 return 0;
761}
762
763/**
764 * i40iw_cqp_sds_cmd - create cqp command for sd
765 * @dev: hardware control device structure
766 * @sd_info: information for sd cqp
767 *
768 */
769enum i40iw_status_code i40iw_cqp_sds_cmd(struct i40iw_sc_dev *dev,
770 struct i40iw_update_sds_info *sdinfo)
771{
772 enum i40iw_status_code status;
773 struct i40iw_cqp_request *cqp_request;
774 struct cqp_commands_info *cqp_info;
775 struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
776
777 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
778 if (!cqp_request)
779 return I40IW_ERR_NO_MEMORY;
780 cqp_info = &cqp_request->info;
781 memcpy(&cqp_info->in.u.update_pe_sds.info, sdinfo,
782 sizeof(cqp_info->in.u.update_pe_sds.info));
783 cqp_info->cqp_cmd = OP_UPDATE_PE_SDS;
784 cqp_info->post_sq = 1;
785 cqp_info->in.u.update_pe_sds.dev = dev;
786 cqp_info->in.u.update_pe_sds.scratch = (uintptr_t)cqp_request;
787 status = i40iw_handle_cqp_op(iwdev, cqp_request);
788 if (status)
789 i40iw_pr_err("CQP-OP Update SD's fail");
790 return status;
791}
792
0fc2dc58
HO
793/**
794 * i40iw_qp_suspend_resume - cqp command for suspend/resume
795 * @dev: hardware control device structure
796 * @qp: hardware control qp
797 * @suspend: flag if suspend or resume
798 */
799void i40iw_qp_suspend_resume(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp, bool suspend)
800{
801 struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
802 struct i40iw_cqp_request *cqp_request;
803 struct i40iw_sc_cqp *cqp = dev->cqp;
804 struct cqp_commands_info *cqp_info;
805 enum i40iw_status_code status;
806
807 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);
808 if (!cqp_request)
809 return;
810
811 cqp_info = &cqp_request->info;
812 cqp_info->cqp_cmd = (suspend) ? OP_SUSPEND : OP_RESUME;
813 cqp_info->in.u.suspend_resume.cqp = cqp;
814 cqp_info->in.u.suspend_resume.qp = qp;
815 cqp_info->in.u.suspend_resume.scratch = (uintptr_t)cqp_request;
816 status = i40iw_handle_cqp_op(iwdev, cqp_request);
817 if (status)
818 i40iw_pr_err("CQP-OP QP Suspend/Resume fail");
819}
820
4e9042e6
FL
821/**
822 * i40iw_term_modify_qp - modify qp for term message
823 * @qp: hardware control qp
824 * @next_state: qp's next state
825 * @term: terminate code
826 * @term_len: length
827 */
828void i40iw_term_modify_qp(struct i40iw_sc_qp *qp, u8 next_state, u8 term, u8 term_len)
829{
830 struct i40iw_qp *iwqp;
831
832 iwqp = (struct i40iw_qp *)qp->back_qp;
833 i40iw_next_iw_state(iwqp, next_state, 0, term, term_len);
834};
835
836/**
837 * i40iw_terminate_done - after terminate is completed
838 * @qp: hardware control qp
839 * @timeout_occurred: indicates if terminate timer expired
840 */
841void i40iw_terminate_done(struct i40iw_sc_qp *qp, int timeout_occurred)
842{
843 struct i40iw_qp *iwqp;
844 u32 next_iwarp_state = I40IW_QP_STATE_ERROR;
845 u8 hte = 0;
846 bool first_time;
847 unsigned long flags;
848
849 iwqp = (struct i40iw_qp *)qp->back_qp;
850 spin_lock_irqsave(&iwqp->lock, flags);
851 if (iwqp->hte_added) {
852 iwqp->hte_added = 0;
853 hte = 1;
854 }
855 first_time = !(qp->term_flags & I40IW_TERM_DONE);
856 qp->term_flags |= I40IW_TERM_DONE;
857 spin_unlock_irqrestore(&iwqp->lock, flags);
858 if (first_time) {
859 if (!timeout_occurred)
860 i40iw_terminate_del_timer(qp);
861 else
862 next_iwarp_state = I40IW_QP_STATE_CLOSING;
863
864 i40iw_next_iw_state(iwqp, next_iwarp_state, hte, 0, 0);
865 i40iw_cm_disconn(iwqp);
866 }
867}
868
869/**
870 * i40iw_terminate_imeout - timeout happened
871 * @context: points to iwarp qp
872 */
2ec46d68 873static void i40iw_terminate_timeout(struct timer_list *t)
4e9042e6 874{
2ec46d68 875 struct i40iw_qp *iwqp = from_timer(iwqp, t, terminate_timer);
4e9042e6
FL
876 struct i40iw_sc_qp *qp = (struct i40iw_sc_qp *)&iwqp->sc_qp;
877
878 i40iw_terminate_done(qp, 1);
d627b506 879 i40iw_rem_ref(&iwqp->ibqp);
4e9042e6
FL
880}
881
882/**
883 * i40iw_terminate_start_timer - start terminate timeout
884 * @qp: hardware control qp
885 */
886void i40iw_terminate_start_timer(struct i40iw_sc_qp *qp)
887{
888 struct i40iw_qp *iwqp;
889
890 iwqp = (struct i40iw_qp *)qp->back_qp;
d627b506 891 i40iw_add_ref(&iwqp->ibqp);
2ec46d68 892 timer_setup(&iwqp->terminate_timer, i40iw_terminate_timeout, 0);
4e9042e6 893 iwqp->terminate_timer.expires = jiffies + HZ;
4e9042e6
FL
894 add_timer(&iwqp->terminate_timer);
895}
896
897/**
898 * i40iw_terminate_del_timer - delete terminate timeout
899 * @qp: hardware control qp
900 */
901void i40iw_terminate_del_timer(struct i40iw_sc_qp *qp)
902{
903 struct i40iw_qp *iwqp;
904
905 iwqp = (struct i40iw_qp *)qp->back_qp;
d627b506
SS
906 if (del_timer(&iwqp->terminate_timer))
907 i40iw_rem_ref(&iwqp->ibqp);
4e9042e6
FL
908}
909
910/**
911 * i40iw_cqp_generic_worker - generic worker for cqp
912 * @work: work pointer
913 */
914static void i40iw_cqp_generic_worker(struct work_struct *work)
915{
916 struct i40iw_virtchnl_work_info *work_info =
917 &((struct virtchnl_work *)work)->work_info;
918
919 if (work_info->worker_vf_dev)
920 work_info->callback_fcn(work_info->worker_vf_dev);
921}
922
923/**
924 * i40iw_cqp_spawn_worker - spawn worket thread
925 * @iwdev: device struct pointer
926 * @work_info: work request info
927 * @iw_vf_idx: virtual function index
928 */
929void i40iw_cqp_spawn_worker(struct i40iw_sc_dev *dev,
930 struct i40iw_virtchnl_work_info *work_info,
931 u32 iw_vf_idx)
932{
933 struct virtchnl_work *work;
934 struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
935
936 work = &iwdev->virtchnl_w[iw_vf_idx];
937 memcpy(&work->work_info, work_info, sizeof(*work_info));
938 INIT_WORK(&work->work, i40iw_cqp_generic_worker);
939 queue_work(iwdev->virtchnl_wq, &work->work);
940}
941
942/**
943 * i40iw_cqp_manage_hmc_fcn_worker -
944 * @work: work pointer for hmc info
945 */
946static void i40iw_cqp_manage_hmc_fcn_worker(struct work_struct *work)
947{
948 struct i40iw_cqp_request *cqp_request =
949 ((struct virtchnl_work *)work)->cqp_request;
950 struct i40iw_ccq_cqe_info ccq_cqe_info;
951 struct i40iw_hmc_fcn_info *hmcfcninfo =
952 &cqp_request->info.in.u.manage_hmc_pm.info;
953 struct i40iw_device *iwdev =
954 (struct i40iw_device *)cqp_request->info.in.u.manage_hmc_pm.dev->back_dev;
955
956 ccq_cqe_info.cqp = NULL;
957 ccq_cqe_info.maj_err_code = cqp_request->compl_info.maj_err_code;
958 ccq_cqe_info.min_err_code = cqp_request->compl_info.min_err_code;
959 ccq_cqe_info.op_code = cqp_request->compl_info.op_code;
960 ccq_cqe_info.op_ret_val = cqp_request->compl_info.op_ret_val;
961 ccq_cqe_info.scratch = 0;
962 ccq_cqe_info.error = cqp_request->compl_info.error;
963 hmcfcninfo->callback_fcn(cqp_request->info.in.u.manage_hmc_pm.dev,
964 hmcfcninfo->cqp_callback_param, &ccq_cqe_info);
965 i40iw_put_cqp_request(&iwdev->cqp, cqp_request);
966}
967
968/**
969 * i40iw_cqp_manage_hmc_fcn_callback - called function after cqp completion
970 * @cqp_request: cqp request info struct for hmc fun
971 * @unused: unused param of callback
972 */
973static void i40iw_cqp_manage_hmc_fcn_callback(struct i40iw_cqp_request *cqp_request,
974 u32 unused)
975{
976 struct virtchnl_work *work;
977 struct i40iw_hmc_fcn_info *hmcfcninfo =
978 &cqp_request->info.in.u.manage_hmc_pm.info;
979 struct i40iw_device *iwdev =
980 (struct i40iw_device *)cqp_request->info.in.u.manage_hmc_pm.dev->
981 back_dev;
982
983 if (hmcfcninfo && hmcfcninfo->callback_fcn) {
984 i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_HMC, "%s1\n", __func__);
985 atomic_inc(&cqp_request->refcount);
986 work = &iwdev->virtchnl_w[hmcfcninfo->iw_vf_idx];
987 work->cqp_request = cqp_request;
988 INIT_WORK(&work->work, i40iw_cqp_manage_hmc_fcn_worker);
989 queue_work(iwdev->virtchnl_wq, &work->work);
990 i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_HMC, "%s2\n", __func__);
991 } else {
992 i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_HMC, "%s: Something wrong\n", __func__);
993 }
994}
995
996/**
997 * i40iw_cqp_manage_hmc_fcn_cmd - issue cqp command to manage hmc
998 * @dev: hardware control device structure
999 * @hmcfcninfo: info for hmc
1000 */
1001enum i40iw_status_code i40iw_cqp_manage_hmc_fcn_cmd(struct i40iw_sc_dev *dev,
1002 struct i40iw_hmc_fcn_info *hmcfcninfo)
1003{
1004 enum i40iw_status_code status;
1005 struct i40iw_cqp_request *cqp_request;
1006 struct cqp_commands_info *cqp_info;
1007 struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
1008
1009 i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_HMC, "%s\n", __func__);
1010 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);
1011 if (!cqp_request)
1012 return I40IW_ERR_NO_MEMORY;
1013 cqp_info = &cqp_request->info;
1014 cqp_request->callback_fcn = i40iw_cqp_manage_hmc_fcn_callback;
1015 cqp_request->param = hmcfcninfo;
1016 memcpy(&cqp_info->in.u.manage_hmc_pm.info, hmcfcninfo,
1017 sizeof(*hmcfcninfo));
1018 cqp_info->in.u.manage_hmc_pm.dev = dev;
1019 cqp_info->cqp_cmd = OP_MANAGE_HMC_PM_FUNC_TABLE;
1020 cqp_info->post_sq = 1;
1021 cqp_info->in.u.manage_hmc_pm.scratch = (uintptr_t)cqp_request;
1022 status = i40iw_handle_cqp_op(iwdev, cqp_request);
1023 if (status)
1024 i40iw_pr_err("CQP-OP Manage HMC fail");
1025 return status;
1026}
1027
1028/**
1029 * i40iw_cqp_query_fpm_values_cmd - send cqp command for fpm
1030 * @iwdev: function device struct
1031 * @values_mem: buffer for fpm
1032 * @hmc_fn_id: function id for fpm
1033 */
1034enum i40iw_status_code i40iw_cqp_query_fpm_values_cmd(struct i40iw_sc_dev *dev,
1035 struct i40iw_dma_mem *values_mem,
1036 u8 hmc_fn_id)
1037{
1038 enum i40iw_status_code status;
1039 struct i40iw_cqp_request *cqp_request;
1040 struct cqp_commands_info *cqp_info;
1041 struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
1042
1043 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
1044 if (!cqp_request)
1045 return I40IW_ERR_NO_MEMORY;
1046 cqp_info = &cqp_request->info;
1047 cqp_request->param = NULL;
1048 cqp_info->in.u.query_fpm_values.cqp = dev->cqp;
1049 cqp_info->in.u.query_fpm_values.fpm_values_pa = values_mem->pa;
1050 cqp_info->in.u.query_fpm_values.fpm_values_va = values_mem->va;
1051 cqp_info->in.u.query_fpm_values.hmc_fn_id = hmc_fn_id;
1052 cqp_info->cqp_cmd = OP_QUERY_FPM_VALUES;
1053 cqp_info->post_sq = 1;
1054 cqp_info->in.u.query_fpm_values.scratch = (uintptr_t)cqp_request;
1055 status = i40iw_handle_cqp_op(iwdev, cqp_request);
1056 if (status)
1057 i40iw_pr_err("CQP-OP Query FPM fail");
1058 return status;
1059}
1060
1061/**
1062 * i40iw_cqp_commit_fpm_values_cmd - commit fpm values in hw
1063 * @dev: hardware control device structure
1064 * @values_mem: buffer with fpm values
1065 * @hmc_fn_id: function id for fpm
1066 */
1067enum i40iw_status_code i40iw_cqp_commit_fpm_values_cmd(struct i40iw_sc_dev *dev,
1068 struct i40iw_dma_mem *values_mem,
1069 u8 hmc_fn_id)
1070{
1071 enum i40iw_status_code status;
1072 struct i40iw_cqp_request *cqp_request;
1073 struct cqp_commands_info *cqp_info;
1074 struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
1075
1076 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
1077 if (!cqp_request)
1078 return I40IW_ERR_NO_MEMORY;
1079 cqp_info = &cqp_request->info;
1080 cqp_request->param = NULL;
1081 cqp_info->in.u.commit_fpm_values.cqp = dev->cqp;
1082 cqp_info->in.u.commit_fpm_values.fpm_values_pa = values_mem->pa;
1083 cqp_info->in.u.commit_fpm_values.fpm_values_va = values_mem->va;
1084 cqp_info->in.u.commit_fpm_values.hmc_fn_id = hmc_fn_id;
1085 cqp_info->cqp_cmd = OP_COMMIT_FPM_VALUES;
1086 cqp_info->post_sq = 1;
1087 cqp_info->in.u.commit_fpm_values.scratch = (uintptr_t)cqp_request;
1088 status = i40iw_handle_cqp_op(iwdev, cqp_request);
1089 if (status)
1090 i40iw_pr_err("CQP-OP Commit FPM fail");
1091 return status;
1092}
1093
1094/**
1095 * i40iw_vf_wait_vchnl_resp - wait for channel msg
1096 * @iwdev: function's device struct
1097 */
1098enum i40iw_status_code i40iw_vf_wait_vchnl_resp(struct i40iw_sc_dev *dev)
1099{
1100 struct i40iw_device *iwdev = dev->back_dev;
4e9042e6
FL
1101 int timeout_ret;
1102
1103 i40iw_debug(dev, I40IW_DEBUG_VIRT, "%s[%u] dev %p, iwdev %p\n",
1104 __func__, __LINE__, dev, iwdev);
f69c3331
IM
1105
1106 atomic_set(&iwdev->vchnl_msgs, 2);
4e9042e6
FL
1107 timeout_ret = wait_event_timeout(iwdev->vchnl_waitq,
1108 (atomic_read(&iwdev->vchnl_msgs) == 1),
1109 I40IW_VCHNL_EVENT_TIMEOUT);
1110 atomic_dec(&iwdev->vchnl_msgs);
1111 if (!timeout_ret) {
1112 i40iw_pr_err("virt channel completion timeout = 0x%x\n", timeout_ret);
f69c3331
IM
1113 atomic_set(&iwdev->vchnl_msgs, 0);
1114 dev->vchnl_up = false;
1115 return I40IW_ERR_TIMEOUT;
4e9042e6 1116 }
f69c3331
IM
1117 wake_up(&dev->vf_reqs);
1118 return 0;
4e9042e6
FL
1119}
1120
d6f7bbcc
HO
1121/**
1122 * i40iw_cqp_cq_create_cmd - create a cq for the cqp
1123 * @dev: device pointer
1124 * @cq: pointer to created cq
1125 */
1126enum i40iw_status_code i40iw_cqp_cq_create_cmd(struct i40iw_sc_dev *dev,
1127 struct i40iw_sc_cq *cq)
1128{
1129 struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
1130 struct i40iw_cqp *iwcqp = &iwdev->cqp;
1131 struct i40iw_cqp_request *cqp_request;
1132 struct cqp_commands_info *cqp_info;
1133 enum i40iw_status_code status;
1134
1135 cqp_request = i40iw_get_cqp_request(iwcqp, true);
1136 if (!cqp_request)
1137 return I40IW_ERR_NO_MEMORY;
1138
1139 cqp_info = &cqp_request->info;
1140 cqp_info->cqp_cmd = OP_CQ_CREATE;
1141 cqp_info->post_sq = 1;
1142 cqp_info->in.u.cq_create.cq = cq;
1143 cqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request;
1144 status = i40iw_handle_cqp_op(iwdev, cqp_request);
1145 if (status)
1146 i40iw_pr_err("CQP-OP Create QP fail");
1147
1148 return status;
1149}
1150
1151/**
1152 * i40iw_cqp_qp_create_cmd - create a qp for the cqp
1153 * @dev: device pointer
1154 * @qp: pointer to created qp
1155 */
1156enum i40iw_status_code i40iw_cqp_qp_create_cmd(struct i40iw_sc_dev *dev,
1157 struct i40iw_sc_qp *qp)
1158{
1159 struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
1160 struct i40iw_cqp *iwcqp = &iwdev->cqp;
1161 struct i40iw_cqp_request *cqp_request;
1162 struct cqp_commands_info *cqp_info;
1163 struct i40iw_create_qp_info *qp_info;
1164 enum i40iw_status_code status;
1165
1166 cqp_request = i40iw_get_cqp_request(iwcqp, true);
1167 if (!cqp_request)
1168 return I40IW_ERR_NO_MEMORY;
1169
1170 cqp_info = &cqp_request->info;
1171 qp_info = &cqp_request->info.in.u.qp_create.info;
1172
1173 memset(qp_info, 0, sizeof(*qp_info));
1174
1175 qp_info->cq_num_valid = true;
1176 qp_info->next_iwarp_state = I40IW_QP_STATE_RTS;
1177
1178 cqp_info->cqp_cmd = OP_QP_CREATE;
1179 cqp_info->post_sq = 1;
1180 cqp_info->in.u.qp_create.qp = qp;
1181 cqp_info->in.u.qp_create.scratch = (uintptr_t)cqp_request;
1182 status = i40iw_handle_cqp_op(iwdev, cqp_request);
1183 if (status)
1184 i40iw_pr_err("CQP-OP QP create fail");
1185 return status;
1186}
1187
1188/**
1189 * i40iw_cqp_cq_destroy_cmd - destroy the cqp cq
1190 * @dev: device pointer
1191 * @cq: pointer to cq
1192 */
1193void i40iw_cqp_cq_destroy_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_cq *cq)
1194{
1195 struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
1196
1197 i40iw_cq_wq_destroy(iwdev, cq);
1198}
1199
1200/**
1201 * i40iw_cqp_qp_destroy_cmd - destroy the cqp
1202 * @dev: device pointer
1203 * @qp: pointer to qp
1204 */
1205void i40iw_cqp_qp_destroy_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp)
1206{
1207 struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
1208 struct i40iw_cqp *iwcqp = &iwdev->cqp;
1209 struct i40iw_cqp_request *cqp_request;
1210 struct cqp_commands_info *cqp_info;
1211 enum i40iw_status_code status;
1212
1213 cqp_request = i40iw_get_cqp_request(iwcqp, true);
1214 if (!cqp_request)
1215 return;
1216
1217 cqp_info = &cqp_request->info;
1218 memset(cqp_info, 0, sizeof(*cqp_info));
1219
1220 cqp_info->cqp_cmd = OP_QP_DESTROY;
1221 cqp_info->post_sq = 1;
1222 cqp_info->in.u.qp_destroy.qp = qp;
1223 cqp_info->in.u.qp_destroy.scratch = (uintptr_t)cqp_request;
1224 cqp_info->in.u.qp_destroy.remove_hash_idx = true;
1225 status = i40iw_handle_cqp_op(iwdev, cqp_request);
1226 if (status)
1227 i40iw_pr_err("CQP QP_DESTROY fail");
1228}
1229
1230
4e9042e6
FL
1231/**
1232 * i40iw_ieq_mpa_crc_ae - generate AE for crc error
1233 * @dev: hardware control device structure
1234 * @qp: hardware control qp
1235 */
1236void i40iw_ieq_mpa_crc_ae(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp)
1237{
1238 struct i40iw_qp_flush_info info;
1239 struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
1240
1241 i40iw_debug(dev, I40IW_DEBUG_AEQ, "%s entered\n", __func__);
1242 memset(&info, 0, sizeof(info));
1243 info.ae_code = I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR;
1244 info.generate_ae = true;
1245 info.ae_source = 0x3;
1246 (void)i40iw_hw_flush_wqes(iwdev, qp, &info, false);
1247}
1248
1249/**
1250 * i40iw_init_hash_desc - initialize hash for crc calculation
1251 * @desc: cryption type
1252 */
34abf9ed 1253enum i40iw_status_code i40iw_init_hash_desc(struct shash_desc **desc)
4e9042e6 1254{
34abf9ed
TN
1255 struct crypto_shash *tfm;
1256 struct shash_desc *tdesc;
1257
1258 tfm = crypto_alloc_shash("crc32c", 0, 0);
1259 if (IS_ERR(tfm))
1260 return I40IW_ERR_MPA_CRC;
1261
1262 tdesc = kzalloc(sizeof(*tdesc) + crypto_shash_descsize(tfm),
1263 GFP_KERNEL);
1264 if (!tdesc) {
1265 crypto_free_shash(tfm);
4e9042e6 1266 return I40IW_ERR_MPA_CRC;
34abf9ed
TN
1267 }
1268 tdesc->tfm = tfm;
1269 *desc = tdesc;
1270
4e9042e6
FL
1271 return 0;
1272}
1273
1274/**
1275 * i40iw_free_hash_desc - free hash desc
1276 * @desc: to be freed
1277 */
34abf9ed 1278void i40iw_free_hash_desc(struct shash_desc *desc)
4e9042e6 1279{
34abf9ed
TN
1280 if (desc) {
1281 crypto_free_shash(desc->tfm);
1282 kfree(desc);
1283 }
4e9042e6
FL
1284}
1285
1286/**
1287 * i40iw_alloc_query_fpm_buf - allocate buffer for fpm
1288 * @dev: hardware control device structure
1289 * @mem: buffer ptr for fpm to be allocated
1290 * @return: memory allocation status
1291 */
1292enum i40iw_status_code i40iw_alloc_query_fpm_buf(struct i40iw_sc_dev *dev,
1293 struct i40iw_dma_mem *mem)
1294{
1295 enum i40iw_status_code status;
1296 struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
1297
1298 status = i40iw_obj_aligned_mem(iwdev, mem, I40IW_QUERY_FPM_BUF_SIZE,
1299 I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK);
1300 return status;
1301}
1302
1303/**
1304 * i40iw_ieq_check_mpacrc - check if mpa crc is OK
1305 * @desc: desc for hash
1306 * @addr: address of buffer for crc
1307 * @length: length of buffer
1308 * @value: value to be compared
1309 */
34abf9ed 1310enum i40iw_status_code i40iw_ieq_check_mpacrc(struct shash_desc *desc,
4e9042e6
FL
1311 void *addr,
1312 u32 length,
1313 u32 value)
1314{
4e9042e6
FL
1315 u32 crc = 0;
1316 int ret;
1317 enum i40iw_status_code ret_code = 0;
1318
34abf9ed
TN
1319 crypto_shash_init(desc);
1320 ret = crypto_shash_update(desc, addr, length);
4e9042e6 1321 if (!ret)
34abf9ed 1322 crypto_shash_final(desc, (u8 *)&crc);
4e9042e6
FL
1323 if (crc != value) {
1324 i40iw_pr_err("mpa crc check fail\n");
1325 ret_code = I40IW_ERR_MPA_CRC;
1326 }
1327 return ret_code;
1328}
1329
1330/**
1331 * i40iw_ieq_get_qp - get qp based on quad in puda buffer
1332 * @dev: hardware control device structure
1333 * @buf: receive puda buffer on exception q
1334 */
1335struct i40iw_sc_qp *i40iw_ieq_get_qp(struct i40iw_sc_dev *dev,
1336 struct i40iw_puda_buf *buf)
1337{
1338 struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
1339 struct i40iw_qp *iwqp;
1340 struct i40iw_cm_node *cm_node;
1341 u32 loc_addr[4], rem_addr[4];
1342 u16 loc_port, rem_port;
1343 struct ipv6hdr *ip6h;
1344 struct iphdr *iph = (struct iphdr *)buf->iph;
1345 struct tcphdr *tcph = (struct tcphdr *)buf->tcph;
1346
1347 if (iph->version == 4) {
1348 memset(loc_addr, 0, sizeof(loc_addr));
1349 loc_addr[0] = ntohl(iph->daddr);
1350 memset(rem_addr, 0, sizeof(rem_addr));
1351 rem_addr[0] = ntohl(iph->saddr);
1352 } else {
1353 ip6h = (struct ipv6hdr *)buf->iph;
1354 i40iw_copy_ip_ntohl(loc_addr, ip6h->daddr.in6_u.u6_addr32);
1355 i40iw_copy_ip_ntohl(rem_addr, ip6h->saddr.in6_u.u6_addr32);
1356 }
1357 loc_port = ntohs(tcph->dest);
1358 rem_port = ntohs(tcph->source);
1359
1360 cm_node = i40iw_find_node(&iwdev->cm_core, rem_port, rem_addr, loc_port,
1361 loc_addr, false);
1362 if (!cm_node)
1363 return NULL;
1364 iwqp = cm_node->iwqp;
1365 return &iwqp->sc_qp;
1366}
1367
1368/**
1369 * i40iw_ieq_update_tcpip_info - update tcpip in the buffer
1370 * @buf: puda to update
1371 * @length: length of buffer
1372 * @seqnum: seq number for tcp
1373 */
1374void i40iw_ieq_update_tcpip_info(struct i40iw_puda_buf *buf, u16 length, u32 seqnum)
1375{
1376 struct tcphdr *tcph;
1377 struct iphdr *iph;
1378 u16 iphlen;
1379 u16 packetsize;
1380 u8 *addr = (u8 *)buf->mem.va;
1381
1382 iphlen = (buf->ipv4) ? 20 : 40;
1383 iph = (struct iphdr *)(addr + buf->maclen);
1384 tcph = (struct tcphdr *)(addr + buf->maclen + iphlen);
1385 packetsize = length + buf->tcphlen + iphlen;
1386
1387 iph->tot_len = htons(packetsize);
1388 tcph->seq = htonl(seqnum);
1389}
1390
1391/**
1392 * i40iw_puda_get_tcpip_info - get tcpip info from puda buffer
1393 * @info: to get information
1394 * @buf: puda buffer
1395 */
1396enum i40iw_status_code i40iw_puda_get_tcpip_info(struct i40iw_puda_completion_info *info,
1397 struct i40iw_puda_buf *buf)
1398{
1399 struct iphdr *iph;
1400 struct ipv6hdr *ip6h;
1401 struct tcphdr *tcph;
1402 u16 iphlen;
1403 u16 pkt_len;
1404 u8 *mem = (u8 *)buf->mem.va;
1405 struct ethhdr *ethh = (struct ethhdr *)buf->mem.va;
1406
1407 if (ethh->h_proto == htons(0x8100)) {
1408 info->vlan_valid = true;
1409 buf->vlan_id = ntohs(((struct vlan_ethhdr *)ethh)->h_vlan_TCI) & VLAN_VID_MASK;
1410 }
1411 buf->maclen = (info->vlan_valid) ? 18 : 14;
1412 iphlen = (info->l3proto) ? 40 : 20;
1413 buf->ipv4 = (info->l3proto) ? false : true;
1414 buf->iph = mem + buf->maclen;
1415 iph = (struct iphdr *)buf->iph;
1416
1417 buf->tcph = buf->iph + iphlen;
1418 tcph = (struct tcphdr *)buf->tcph;
1419
1420 if (buf->ipv4) {
1421 pkt_len = ntohs(iph->tot_len);
1422 } else {
1423 ip6h = (struct ipv6hdr *)buf->iph;
1424 pkt_len = ntohs(ip6h->payload_len) + iphlen;
1425 }
1426
1427 buf->totallen = pkt_len + buf->maclen;
1428
7581e96c 1429 if (info->payload_len < buf->totallen) {
4e9042e6
FL
1430 i40iw_pr_err("payload_len = 0x%x totallen expected0x%x\n",
1431 info->payload_len, buf->totallen);
1432 return I40IW_ERR_INVALID_SIZE;
1433 }
1434
1435 buf->tcphlen = (tcph->doff) << 2;
1436 buf->datalen = pkt_len - iphlen - buf->tcphlen;
1437 buf->data = (buf->datalen) ? buf->tcph + buf->tcphlen : NULL;
1438 buf->hdrlen = buf->maclen + iphlen + buf->tcphlen;
1439 buf->seqnum = ntohl(tcph->seq);
1440 return 0;
1441}
1442
1443/**
1444 * i40iw_hw_stats_timeout - Stats timer-handler which updates all HW stats
d6f7bbcc 1445 * @vsi: pointer to the vsi structure
4e9042e6 1446 */
605cbb2c 1447static void i40iw_hw_stats_timeout(struct timer_list *t)
4e9042e6 1448{
605cbb2c
KC
1449 struct i40iw_vsi_pestat *pf_devstat = from_timer(pf_devstat, t,
1450 stats_timer);
1451 struct i40iw_sc_vsi *sc_vsi = pf_devstat->vsi;
d6f7bbcc 1452 struct i40iw_sc_dev *pf_dev = sc_vsi->dev;
d6f7bbcc 1453 struct i40iw_vsi_pestat *vf_devstat = NULL;
4e9042e6
FL
1454 u16 iw_vf_idx;
1455 unsigned long flags;
1456
1457 /*PF*/
d6f7bbcc
HO
1458 i40iw_hw_stats_read_all(pf_devstat, &pf_devstat->hw_stats);
1459
4e9042e6 1460 for (iw_vf_idx = 0; iw_vf_idx < I40IW_MAX_PE_ENABLED_VF_COUNT; iw_vf_idx++) {
d6f7bbcc 1461 spin_lock_irqsave(&pf_devstat->lock, flags);
4e9042e6
FL
1462 if (pf_dev->vf_dev[iw_vf_idx]) {
1463 if (pf_dev->vf_dev[iw_vf_idx]->stats_initialized) {
d6f7bbcc
HO
1464 vf_devstat = &pf_dev->vf_dev[iw_vf_idx]->pestat;
1465 i40iw_hw_stats_read_all(vf_devstat, &vf_devstat->hw_stats);
4e9042e6
FL
1466 }
1467 }
d6f7bbcc 1468 spin_unlock_irqrestore(&pf_devstat->lock, flags);
4e9042e6
FL
1469 }
1470
1471 mod_timer(&pf_devstat->stats_timer,
1472 jiffies + msecs_to_jiffies(STATS_TIMER_DELAY));
1473}
1474
1475/**
1476 * i40iw_hw_stats_start_timer - Start periodic stats timer
d6f7bbcc 1477 * @vsi: pointer to the vsi structure
4e9042e6 1478 */
d6f7bbcc 1479void i40iw_hw_stats_start_timer(struct i40iw_sc_vsi *vsi)
4e9042e6 1480{
d6f7bbcc 1481 struct i40iw_vsi_pestat *devstat = vsi->pestat;
4e9042e6 1482
605cbb2c 1483 timer_setup(&devstat->stats_timer, i40iw_hw_stats_timeout, 0);
4e9042e6
FL
1484 mod_timer(&devstat->stats_timer,
1485 jiffies + msecs_to_jiffies(STATS_TIMER_DELAY));
1486}
1487
1488/**
d6f7bbcc
HO
1489 * i40iw_hw_stats_stop_timer - Delete periodic stats timer
1490 * @vsi: pointer to the vsi structure
4e9042e6 1491 */
d6f7bbcc 1492void i40iw_hw_stats_stop_timer(struct i40iw_sc_vsi *vsi)
4e9042e6 1493{
d6f7bbcc 1494 struct i40iw_vsi_pestat *devstat = vsi->pestat;
4e9042e6
FL
1495
1496 del_timer_sync(&devstat->stats_timer);
1497}