Merge branch 'for-5.1' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup
[linux-2.6-block.git] / drivers / net / ethernet / chelsio / cxgb4 / cxgb4_uld.c
CommitLineData
94cdb8bb
HS
1/*
2 * cxgb4_uld.c:Chelsio Upper Layer Driver Interface for T4/T5/T6 SGE management
3 *
4 * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 *
34 * Written by: Atul Gupta (atul.gupta@chelsio.com)
35 * Written by: Hariprasad Shenai (hariprasad@chelsio.com)
36 */
37
38#include <linux/kernel.h>
94cdb8bb
HS
39#include <linux/module.h>
40#include <linux/errno.h>
41#include <linux/types.h>
42#include <linux/debugfs.h>
43#include <linux/export.h>
44#include <linux/list.h>
45#include <linux/skbuff.h>
46#include <linux/pci.h>
47
48#include "cxgb4.h"
49#include "cxgb4_uld.h"
50#include "t4_regs.h"
51#include "t4fw_api.h"
52#include "t4_msg.h"
53
54#define for_each_uldrxq(m, i) for (i = 0; i < ((m)->nrxq + (m)->nciq); i++)
55
56static int get_msix_idx_from_bmap(struct adapter *adap)
57{
58 struct uld_msix_bmap *bmap = &adap->msix_bmap_ulds;
59 unsigned long flags;
60 unsigned int msix_idx;
61
62 spin_lock_irqsave(&bmap->lock, flags);
63 msix_idx = find_first_zero_bit(bmap->msix_bmap, bmap->mapsize);
64 if (msix_idx < bmap->mapsize) {
65 __set_bit(msix_idx, bmap->msix_bmap);
66 } else {
67 spin_unlock_irqrestore(&bmap->lock, flags);
68 return -ENOSPC;
69 }
70
71 spin_unlock_irqrestore(&bmap->lock, flags);
72 return msix_idx;
73}
74
75static void free_msix_idx_in_bmap(struct adapter *adap, unsigned int msix_idx)
76{
77 struct uld_msix_bmap *bmap = &adap->msix_bmap_ulds;
78 unsigned long flags;
79
80 spin_lock_irqsave(&bmap->lock, flags);
fd21c89b 81 __clear_bit(msix_idx, bmap->msix_bmap);
94cdb8bb
HS
82 spin_unlock_irqrestore(&bmap->lock, flags);
83}
84
0fbc81b3
HS
85/* Flush the aggregated lro sessions */
86static void uldrx_flush_handler(struct sge_rspq *q)
87{
88 struct adapter *adap = q->adap;
89
90 if (adap->uld[q->uld].lro_flush)
91 adap->uld[q->uld].lro_flush(&q->lro_mgr);
92}
93
94/**
95 * uldrx_handler - response queue handler for ULD queues
96 * @q: the response queue that received the packet
97 * @rsp: the response queue descriptor holding the offload message
98 * @gl: the gather list of packet fragments
99 *
100 * Deliver an ingress offload packet to a ULD. All processing is done by
101 * the ULD, we just maintain statistics.
102 */
94cdb8bb
HS
103static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
104 const struct pkt_gl *gl)
105{
106 struct adapter *adap = q->adap;
107 struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
108 int ret;
109
110 /* FW can send CPLs encapsulated in a CPL_FW4_MSG */
111 if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG &&
112 ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL)
113 rsp += 2;
114
115 if (q->flush_handler)
116 ret = adap->uld[q->uld].lro_rx_handler(adap->uld[q->uld].handle,
117 rsp, gl, &q->lro_mgr,
118 &q->napi);
119 else
120 ret = adap->uld[q->uld].rx_handler(adap->uld[q->uld].handle,
121 rsp, gl);
122
123 if (ret) {
124 rxq->stats.nomem++;
125 return -1;
126 }
127
128 if (!gl)
129 rxq->stats.imm++;
130 else if (gl == CXGB4_MSG_AN)
131 rxq->stats.an++;
132 else
133 rxq->stats.pkts++;
134 return 0;
135}
136
137static int alloc_uld_rxqs(struct adapter *adap,
166e6045 138 struct sge_uld_rxq_info *rxq_info, bool lro)
94cdb8bb
HS
139{
140 struct sge *s = &adap->sge;
166e6045
GG
141 unsigned int nq = rxq_info->nrxq + rxq_info->nciq;
142 struct sge_ofld_rxq *q = rxq_info->uldrxq;
143 unsigned short *ids = rxq_info->rspq_id;
0fbc81b3 144 unsigned int bmap_idx = 0;
166e6045
GG
145 unsigned int per_chan;
146 int i, err, msi_idx, que_idx = 0;
147
148 per_chan = rxq_info->nrxq / adap->params.nports;
94cdb8bb 149
80f61f19 150 if (adap->flags & CXGB4_USING_MSIX)
94cdb8bb
HS
151 msi_idx = 1;
152 else
153 msi_idx = -((int)s->intrq.abs_id + 1);
154
155 for (i = 0; i < nq; i++, q++) {
166e6045
GG
156 if (i == rxq_info->nrxq) {
157 /* start allocation of concentrator queues */
158 per_chan = rxq_info->nciq / adap->params.nports;
159 que_idx = 0;
160 }
161
94cdb8bb
HS
162 if (msi_idx >= 0) {
163 bmap_idx = get_msix_idx_from_bmap(adap);
0fbc81b3 164 msi_idx = adap->msix_info_ulds[bmap_idx].idx;
94cdb8bb
HS
165 }
166 err = t4_sge_alloc_rxq(adap, &q->rspq, false,
166e6045 167 adap->port[que_idx++ / per_chan],
0fbc81b3 168 msi_idx,
94cdb8bb
HS
169 q->fl.size ? &q->fl : NULL,
170 uldrx_handler,
0fbc81b3 171 lro ? uldrx_flush_handler : NULL,
94cdb8bb
HS
172 0);
173 if (err)
174 goto freeout;
175 if (msi_idx >= 0)
166e6045 176 rxq_info->msix_tbl[i] = bmap_idx;
94cdb8bb
HS
177 memset(&q->stats, 0, sizeof(q->stats));
178 if (ids)
179 ids[i] = q->rspq.abs_id;
180 }
181 return 0;
182freeout:
166e6045 183 q = rxq_info->uldrxq;
94cdb8bb
HS
184 for ( ; i; i--, q++) {
185 if (q->rspq.desc)
186 free_rspq_fl(adap, &q->rspq,
187 q->fl.size ? &q->fl : NULL);
94cdb8bb 188 }
94cdb8bb
HS
189 return err;
190}
191
50935857
BX
192static int
193setup_sge_queues_uld(struct adapter *adap, unsigned int uld_type, bool lro)
94cdb8bb
HS
194{
195 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
0fbc81b3 196 int i, ret = 0;
94cdb8bb 197
80f61f19 198 if (adap->flags & CXGB4_USING_MSIX) {
0fbc81b3
HS
199 rxq_info->msix_tbl = kcalloc((rxq_info->nrxq + rxq_info->nciq),
200 sizeof(unsigned short),
94cdb8bb
HS
201 GFP_KERNEL);
202 if (!rxq_info->msix_tbl)
203 return -ENOMEM;
204 }
205
166e6045 206 ret = !(!alloc_uld_rxqs(adap, rxq_info, lro));
0fbc81b3
HS
207
208 /* Tell uP to route control queue completions to rdma rspq */
80f61f19 209 if (adap->flags & CXGB4_FULL_INIT_DONE &&
0fbc81b3
HS
210 !ret && uld_type == CXGB4_ULD_RDMA) {
211 struct sge *s = &adap->sge;
212 unsigned int cmplqid;
213 u32 param, cmdop;
214
215 cmdop = FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL;
216 for_each_port(adap, i) {
217 cmplqid = rxq_info->uldrxq[i].rspq.cntxt_id;
218 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
219 FW_PARAMS_PARAM_X_V(cmdop) |
220 FW_PARAMS_PARAM_YZ_V(s->ctrlq[i].q.cntxt_id));
221 ret = t4_set_params(adap, adap->mbox, adap->pf,
222 0, 1, &param, &cmplqid);
223 }
224 }
225 return ret;
94cdb8bb
HS
226}
227
228static void t4_free_uld_rxqs(struct adapter *adap, int n,
229 struct sge_ofld_rxq *q)
230{
231 for ( ; n; n--, q++) {
232 if (q->rspq.desc)
233 free_rspq_fl(adap, &q->rspq,
234 q->fl.size ? &q->fl : NULL);
94cdb8bb
HS
235 }
236}
237
50935857 238static void free_sge_queues_uld(struct adapter *adap, unsigned int uld_type)
94cdb8bb
HS
239{
240 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
241
80f61f19 242 if (adap->flags & CXGB4_FULL_INIT_DONE && uld_type == CXGB4_ULD_RDMA) {
0fbc81b3
HS
243 struct sge *s = &adap->sge;
244 u32 param, cmdop, cmplqid = 0;
245 int i;
246
247 cmdop = FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL;
248 for_each_port(adap, i) {
249 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
250 FW_PARAMS_PARAM_X_V(cmdop) |
251 FW_PARAMS_PARAM_YZ_V(s->ctrlq[i].q.cntxt_id));
252 t4_set_params(adap, adap->mbox, adap->pf,
253 0, 1, &param, &cmplqid);
254 }
255 }
256
94cdb8bb
HS
257 if (rxq_info->nciq)
258 t4_free_uld_rxqs(adap, rxq_info->nciq,
259 rxq_info->uldrxq + rxq_info->nrxq);
260 t4_free_uld_rxqs(adap, rxq_info->nrxq, rxq_info->uldrxq);
80f61f19 261 if (adap->flags & CXGB4_USING_MSIX)
94cdb8bb
HS
262 kfree(rxq_info->msix_tbl);
263}
264
50935857
BX
265static int cfg_queues_uld(struct adapter *adap, unsigned int uld_type,
266 const struct cxgb4_uld_info *uld_info)
94cdb8bb
HS
267{
268 struct sge *s = &adap->sge;
269 struct sge_uld_rxq_info *rxq_info;
0fbc81b3 270 int i, nrxq, ciq_size;
94cdb8bb
HS
271
272 rxq_info = kzalloc(sizeof(*rxq_info), GFP_KERNEL);
273 if (!rxq_info)
274 return -ENOMEM;
275
80f61f19 276 if (adap->flags & CXGB4_USING_MSIX && uld_info->nrxq > s->nqs_per_uld) {
0fbc81b3
HS
277 i = s->nqs_per_uld;
278 rxq_info->nrxq = roundup(i, adap->params.nports);
279 } else {
280 i = min_t(int, uld_info->nrxq,
281 num_online_cpus());
282 rxq_info->nrxq = roundup(i, adap->params.nports);
283 }
284 if (!uld_info->ciq) {
94cdb8bb 285 rxq_info->nciq = 0;
0fbc81b3 286 } else {
80f61f19 287 if (adap->flags & CXGB4_USING_MSIX)
0fbc81b3
HS
288 rxq_info->nciq = min_t(int, s->nqs_per_uld,
289 num_online_cpus());
290 else
291 rxq_info->nciq = min_t(int, MAX_OFLD_QSETS,
292 num_online_cpus());
293 rxq_info->nciq = ((rxq_info->nciq / adap->params.nports) *
294 adap->params.nports);
295 rxq_info->nciq = max_t(int, rxq_info->nciq,
296 adap->params.nports);
297 }
94cdb8bb
HS
298
299 nrxq = rxq_info->nrxq + rxq_info->nciq; /* total rxq's */
300 rxq_info->uldrxq = kcalloc(nrxq, sizeof(struct sge_ofld_rxq),
301 GFP_KERNEL);
302 if (!rxq_info->uldrxq) {
303 kfree(rxq_info);
304 return -ENOMEM;
305 }
306
307 rxq_info->rspq_id = kcalloc(nrxq, sizeof(unsigned short), GFP_KERNEL);
106323b9 308 if (!rxq_info->rspq_id) {
94cdb8bb
HS
309 kfree(rxq_info->uldrxq);
310 kfree(rxq_info);
311 return -ENOMEM;
312 }
313
314 for (i = 0; i < rxq_info->nrxq; i++) {
315 struct sge_ofld_rxq *r = &rxq_info->uldrxq[i];
316
317 init_rspq(adap, &r->rspq, 5, 1, uld_info->rxq_size, 64);
318 r->rspq.uld = uld_type;
319 r->fl.size = 72;
320 }
321
0fbc81b3
HS
322 ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids;
323 if (ciq_size > SGE_MAX_IQ_SIZE) {
324 dev_warn(adap->pdev_dev, "CIQ size too small for available IQs\n");
325 ciq_size = SGE_MAX_IQ_SIZE;
326 }
327
94cdb8bb
HS
328 for (i = rxq_info->nrxq; i < nrxq; i++) {
329 struct sge_ofld_rxq *r = &rxq_info->uldrxq[i];
330
0fbc81b3 331 init_rspq(adap, &r->rspq, 5, 1, ciq_size, 64);
94cdb8bb 332 r->rspq.uld = uld_type;
94cdb8bb
HS
333 }
334
335 memcpy(rxq_info->name, uld_info->name, IFNAMSIZ);
336 adap->sge.uld_rxq_info[uld_type] = rxq_info;
337
338 return 0;
339}
340
50935857 341static void free_queues_uld(struct adapter *adap, unsigned int uld_type)
94cdb8bb
HS
342{
343 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
344
d7cb4449 345 adap->sge.uld_rxq_info[uld_type] = NULL;
94cdb8bb
HS
346 kfree(rxq_info->rspq_id);
347 kfree(rxq_info->uldrxq);
348 kfree(rxq_info);
349}
350
50935857
BX
351static int
352request_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type)
94cdb8bb
HS
353{
354 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
0fbc81b3
HS
355 int err = 0;
356 unsigned int idx, bmap_idx;
94cdb8bb
HS
357
358 for_each_uldrxq(rxq_info, idx) {
359 bmap_idx = rxq_info->msix_tbl[idx];
360 err = request_irq(adap->msix_info_ulds[bmap_idx].vec,
361 t4_sge_intr_msix, 0,
362 adap->msix_info_ulds[bmap_idx].desc,
363 &rxq_info->uldrxq[idx].rspq);
364 if (err)
365 goto unwind;
366 }
367 return 0;
368unwind:
e12934d9 369 while (idx-- > 0) {
94cdb8bb
HS
370 bmap_idx = rxq_info->msix_tbl[idx];
371 free_msix_idx_in_bmap(adap, bmap_idx);
372 free_irq(adap->msix_info_ulds[bmap_idx].vec,
373 &rxq_info->uldrxq[idx].rspq);
374 }
375 return err;
376}
377
50935857
BX
378static void
379free_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type)
94cdb8bb
HS
380{
381 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
0fbc81b3 382 unsigned int idx, bmap_idx;
94cdb8bb
HS
383
384 for_each_uldrxq(rxq_info, idx) {
0fbc81b3 385 bmap_idx = rxq_info->msix_tbl[idx];
94cdb8bb
HS
386
387 free_msix_idx_in_bmap(adap, bmap_idx);
388 free_irq(adap->msix_info_ulds[bmap_idx].vec,
389 &rxq_info->uldrxq[idx].rspq);
390 }
391}
392
50935857 393static void name_msix_vecs_uld(struct adapter *adap, unsigned int uld_type)
94cdb8bb
HS
394{
395 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
396 int n = sizeof(adap->msix_info_ulds[0].desc);
0fbc81b3 397 unsigned int idx, bmap_idx;
94cdb8bb
HS
398
399 for_each_uldrxq(rxq_info, idx) {
0fbc81b3 400 bmap_idx = rxq_info->msix_tbl[idx];
94cdb8bb
HS
401
402 snprintf(adap->msix_info_ulds[bmap_idx].desc, n, "%s-%s%d",
403 adap->port[0]->name, rxq_info->name, idx);
404 }
405}
406
407static void enable_rx(struct adapter *adap, struct sge_rspq *q)
408{
409 if (!q)
410 return;
411
5226b791 412 if (q->handler)
94cdb8bb 413 napi_enable(&q->napi);
5226b791 414
94cdb8bb
HS
415 /* 0-increment GTS to start the timer and enable interrupts */
416 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
417 SEINTARM_V(q->intr_params) |
418 INGRESSQID_V(q->cntxt_id));
419}
420
421static void quiesce_rx(struct adapter *adap, struct sge_rspq *q)
422{
5226b791 423 if (q && q->handler)
94cdb8bb 424 napi_disable(&q->napi);
94cdb8bb
HS
425}
426
50935857 427static void enable_rx_uld(struct adapter *adap, unsigned int uld_type)
94cdb8bb
HS
428{
429 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
430 int idx;
431
432 for_each_uldrxq(rxq_info, idx)
433 enable_rx(adap, &rxq_info->uldrxq[idx].rspq);
434}
435
50935857 436static void quiesce_rx_uld(struct adapter *adap, unsigned int uld_type)
94cdb8bb
HS
437{
438 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
439 int idx;
440
441 for_each_uldrxq(rxq_info, idx)
442 quiesce_rx(adap, &rxq_info->uldrxq[idx].rspq);
443}
444
ab677ff4
HS
445static void
446free_sge_txq_uld(struct adapter *adap, struct sge_uld_txq_info *txq_info)
447{
448 int nq = txq_info->ntxq;
449 int i;
450
451 for (i = 0; i < nq; i++) {
452 struct sge_uld_txq *txq = &txq_info->uldtxq[i];
453
454 if (txq && txq->q.desc) {
455 tasklet_kill(&txq->qresume_tsk);
456 t4_ofld_eq_free(adap, adap->mbox, adap->pf, 0,
457 txq->q.cntxt_id);
458 free_tx_desc(adap, &txq->q, txq->q.in_use, false);
459 kfree(txq->q.sdesc);
460 __skb_queue_purge(&txq->sendq);
461 free_txq(adap, &txq->q);
462 }
463 }
464}
465
466static int
467alloc_sge_txq_uld(struct adapter *adap, struct sge_uld_txq_info *txq_info,
468 unsigned int uld_type)
469{
470 struct sge *s = &adap->sge;
471 int nq = txq_info->ntxq;
472 int i, j, err;
473
474 j = nq / adap->params.nports;
475 for (i = 0; i < nq; i++) {
476 struct sge_uld_txq *txq = &txq_info->uldtxq[i];
477
478 txq->q.size = 1024;
479 err = t4_sge_alloc_uld_txq(adap, txq, adap->port[i / j],
480 s->fw_evtq.cntxt_id, uld_type);
481 if (err)
482 goto freeout;
483 }
484 return 0;
485freeout:
486 free_sge_txq_uld(adap, txq_info);
487 return err;
488}
489
490static void
491release_sge_txq_uld(struct adapter *adap, unsigned int uld_type)
492{
493 struct sge_uld_txq_info *txq_info = NULL;
494 int tx_uld_type = TX_ULD(uld_type);
495
496 txq_info = adap->sge.uld_txq_info[tx_uld_type];
497
498 if (txq_info && atomic_dec_and_test(&txq_info->users)) {
499 free_sge_txq_uld(adap, txq_info);
500 kfree(txq_info->uldtxq);
501 kfree(txq_info);
502 adap->sge.uld_txq_info[tx_uld_type] = NULL;
503 }
504}
505
506static int
507setup_sge_txq_uld(struct adapter *adap, unsigned int uld_type,
508 const struct cxgb4_uld_info *uld_info)
509{
510 struct sge_uld_txq_info *txq_info = NULL;
511 int tx_uld_type, i;
512
513 tx_uld_type = TX_ULD(uld_type);
514 txq_info = adap->sge.uld_txq_info[tx_uld_type];
515
516 if ((tx_uld_type == CXGB4_TX_OFLD) && txq_info &&
517 (atomic_inc_return(&txq_info->users) > 1))
518 return 0;
519
520 txq_info = kzalloc(sizeof(*txq_info), GFP_KERNEL);
521 if (!txq_info)
522 return -ENOMEM;
a1c6fd43
HJ
523 if (uld_type == CXGB4_ULD_CRYPTO) {
524 i = min_t(int, adap->vres.ncrypto_fc,
525 num_online_cpus());
526 txq_info->ntxq = rounddown(i, adap->params.nports);
527 if (txq_info->ntxq <= 0) {
528 dev_warn(adap->pdev_dev, "Crypto Tx Queues can't be zero\n");
529 kfree(txq_info);
530 return -EINVAL;
531 }
ab677ff4 532
a1c6fd43
HJ
533 } else {
534 i = min_t(int, uld_info->ntxq, num_online_cpus());
535 txq_info->ntxq = roundup(i, adap->params.nports);
536 }
ab677ff4
HS
537 txq_info->uldtxq = kcalloc(txq_info->ntxq, sizeof(struct sge_uld_txq),
538 GFP_KERNEL);
539 if (!txq_info->uldtxq) {
619228d8 540 kfree(txq_info);
ab677ff4
HS
541 return -ENOMEM;
542 }
543
544 if (alloc_sge_txq_uld(adap, txq_info, tx_uld_type)) {
545 kfree(txq_info->uldtxq);
546 kfree(txq_info);
547 return -ENOMEM;
548 }
549
550 atomic_inc(&txq_info->users);
551 adap->sge.uld_txq_info[tx_uld_type] = txq_info;
552 return 0;
553}
554
94cdb8bb
HS
555static void uld_queue_init(struct adapter *adap, unsigned int uld_type,
556 struct cxgb4_lld_info *lli)
557{
558 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
a1c6fd43
HJ
559 int tx_uld_type = TX_ULD(uld_type);
560 struct sge_uld_txq_info *txq_info = adap->sge.uld_txq_info[tx_uld_type];
94cdb8bb
HS
561
562 lli->rxq_ids = rxq_info->rspq_id;
563 lli->nrxq = rxq_info->nrxq;
564 lli->ciq_ids = rxq_info->rspq_id + rxq_info->nrxq;
565 lli->nciq = rxq_info->nciq;
a1c6fd43 566 lli->ntxq = txq_info->ntxq;
94cdb8bb
HS
567}
568
0fbc81b3 569int t4_uld_mem_alloc(struct adapter *adap)
94cdb8bb
HS
570{
571 struct sge *s = &adap->sge;
572
0fbc81b3 573 adap->uld = kcalloc(CXGB4_ULD_MAX, sizeof(*adap->uld), GFP_KERNEL);
94cdb8bb
HS
574 if (!adap->uld)
575 return -ENOMEM;
576
6396bb22 577 s->uld_rxq_info = kcalloc(CXGB4_ULD_MAX,
94cdb8bb
HS
578 sizeof(struct sge_uld_rxq_info *),
579 GFP_KERNEL);
580 if (!s->uld_rxq_info)
581 goto err_uld;
582
6396bb22 583 s->uld_txq_info = kcalloc(CXGB4_TX_MAX,
ab677ff4
HS
584 sizeof(struct sge_uld_txq_info *),
585 GFP_KERNEL);
586 if (!s->uld_txq_info)
587 goto err_uld_rx;
94cdb8bb 588 return 0;
ab677ff4
HS
589
590err_uld_rx:
591 kfree(s->uld_rxq_info);
94cdb8bb
HS
592err_uld:
593 kfree(adap->uld);
594 return -ENOMEM;
595}
596
0fbc81b3 597void t4_uld_mem_free(struct adapter *adap)
94cdb8bb
HS
598{
599 struct sge *s = &adap->sge;
600
ab677ff4 601 kfree(s->uld_txq_info);
94cdb8bb
HS
602 kfree(s->uld_rxq_info);
603 kfree(adap->uld);
604}
605
6a146f3a
GP
606/* This function should be called with uld_mutex taken. */
607static void cxgb4_shutdown_uld_adapter(struct adapter *adap, enum cxgb4_uld type)
608{
609 if (adap->uld[type].handle) {
610 adap->uld[type].handle = NULL;
611 adap->uld[type].add = NULL;
612 release_sge_txq_uld(adap, type);
613
80f61f19 614 if (adap->flags & CXGB4_FULL_INIT_DONE)
6a146f3a
GP
615 quiesce_rx_uld(adap, type);
616
80f61f19 617 if (adap->flags & CXGB4_USING_MSIX)
6a146f3a
GP
618 free_msix_queue_irqs_uld(adap, type);
619
620 free_sge_queues_uld(adap, type);
621 free_queues_uld(adap, type);
622 }
623}
624
0fbc81b3
HS
625void t4_uld_clean_up(struct adapter *adap)
626{
0fbc81b3
HS
627 unsigned int i;
628
6a146f3a 629 mutex_lock(&uld_mutex);
0fbc81b3
HS
630 for (i = 0; i < CXGB4_ULD_MAX; i++) {
631 if (!adap->uld[i].handle)
632 continue;
6a146f3a
GP
633
634 cxgb4_shutdown_uld_adapter(adap, i);
0fbc81b3 635 }
6a146f3a 636 mutex_unlock(&uld_mutex);
0fbc81b3
HS
637}
638
94cdb8bb
HS
639static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld)
640{
641 int i;
642
643 lld->pdev = adap->pdev;
644 lld->pf = adap->pf;
645 lld->l2t = adap->l2t;
646 lld->tids = &adap->tids;
647 lld->ports = adap->port;
648 lld->vr = &adap->vres;
649 lld->mtus = adap->params.mtus;
94cdb8bb
HS
650 lld->nchan = adap->params.nports;
651 lld->nports = adap->params.nports;
652 lld->wr_cred = adap->params.ofldq_wr_cred;
a6ec572b 653 lld->crypto = adap->params.crypto;
0fbc81b3
HS
654 lld->iscsi_iolen = MAXRXDATA_G(t4_read_reg(adap, TP_PARA_REG2_A));
655 lld->iscsi_tagmask = t4_read_reg(adap, ULP_RX_ISCSI_TAGMASK_A);
656 lld->iscsi_pgsz_order = t4_read_reg(adap, ULP_RX_ISCSI_PSZ_A);
657 lld->iscsi_llimit = t4_read_reg(adap, ULP_RX_ISCSI_LLIMIT_A);
658 lld->iscsi_ppm = &adap->iscsi_ppm;
94cdb8bb
HS
659 lld->adapter_type = adap->params.chip;
660 lld->cclk_ps = 1000000000 / adap->params.vpd.cclk;
661 lld->udb_density = 1 << adap->params.sge.eq_qpp;
662 lld->ucq_density = 1 << adap->params.sge.iq_qpp;
fc4144e7 663 lld->sge_host_page_size = 1 << (adap->params.sge.hps + 10);
94cdb8bb
HS
664 lld->filt_mode = adap->params.tp.vlan_pri_map;
665 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
666 for (i = 0; i < NCHAN; i++)
667 lld->tx_modq[i] = i;
668 lld->gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS_A);
669 lld->db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL_A);
670 lld->fw_vers = adap->params.fw_vers;
671 lld->dbfifo_int_thresh = dbfifo_int_thresh;
672 lld->sge_ingpadboundary = adap->sge.fl_align;
673 lld->sge_egrstatuspagesize = adap->sge.stat_len;
674 lld->sge_pktshift = adap->sge.pktshift;
14c19b17 675 lld->ulp_crypto = adap->params.crypto;
80f61f19 676 lld->enable_fw_ofld_conn = adap->flags & CXGB4_FW_OFLD_CONN;
94cdb8bb
HS
677 lld->max_ordird_qp = adap->params.max_ordird_qp;
678 lld->max_ird_adapter = adap->params.max_ird_adapter;
679 lld->ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
680 lld->nodeid = dev_to_node(adap->pdev_dev);
b9044ac8 681 lld->fr_nsmr_tpte_wr_support = adap->params.fr_nsmr_tpte_wr_support;
43db9296 682 lld->write_w_imm_support = adap->params.write_w_imm_support;
f3910c62 683 lld->write_cmpl_support = adap->params.write_cmpl_support;
94cdb8bb
HS
684}
685
686static void uld_attach(struct adapter *adap, unsigned int uld)
687{
688 void *handle;
689 struct cxgb4_lld_info lli;
690
691 uld_init(adap, &lli);
692 uld_queue_init(adap, uld, &lli);
693
694 handle = adap->uld[uld].add(&lli);
695 if (IS_ERR(handle)) {
696 dev_warn(adap->pdev_dev,
697 "could not attach to the %s driver, error %ld\n",
698 adap->uld[uld].name, PTR_ERR(handle));
699 return;
700 }
701
702 adap->uld[uld].handle = handle;
0fbc81b3 703 t4_register_netevent_notifier();
94cdb8bb 704
80f61f19 705 if (adap->flags & CXGB4_FULL_INIT_DONE)
94cdb8bb
HS
706 adap->uld[uld].state_change(handle, CXGB4_STATE_UP);
707}
708
0fbc81b3
HS
709/**
710 * cxgb4_register_uld - register an upper-layer driver
711 * @type: the ULD type
712 * @p: the ULD methods
713 *
714 * Registers an upper-layer driver with this driver and notifies the ULD
715 * about any presently available devices that support its type. Returns
716 * %-EBUSY if a ULD of the same type is already registered.
717 */
40b06553
GG
718void cxgb4_register_uld(enum cxgb4_uld type,
719 const struct cxgb4_uld_info *p)
94cdb8bb
HS
720{
721 int ret = 0;
722 struct adapter *adap;
723
0fbc81b3 724 if (type >= CXGB4_ULD_MAX)
40b06553 725 return;
94cdb8bb
HS
726
727 mutex_lock(&uld_mutex);
728 list_for_each_entry(adap, &adapter_list, list_node) {
0fbc81b3
HS
729 if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) ||
730 (type != CXGB4_ULD_CRYPTO && !is_offload(adap)))
731 continue;
732 if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip))
94cdb8bb
HS
733 continue;
734 ret = cfg_queues_uld(adap, type, p);
735 if (ret)
736 goto out;
737 ret = setup_sge_queues_uld(adap, type, p->lro);
738 if (ret)
739 goto free_queues;
80f61f19 740 if (adap->flags & CXGB4_USING_MSIX) {
94cdb8bb
HS
741 name_msix_vecs_uld(adap, type);
742 ret = request_msix_queue_irqs_uld(adap, type);
743 if (ret)
744 goto free_rxq;
745 }
80f61f19 746 if (adap->flags & CXGB4_FULL_INIT_DONE)
94cdb8bb 747 enable_rx_uld(adap, type);
40b06553 748 if (adap->uld[type].add)
94cdb8bb 749 goto free_irq;
ab677ff4
HS
750 ret = setup_sge_txq_uld(adap, type, p);
751 if (ret)
752 goto free_irq;
94cdb8bb
HS
753 adap->uld[type] = *p;
754 uld_attach(adap, type);
40b06553 755 continue;
94cdb8bb 756free_irq:
80f61f19 757 if (adap->flags & CXGB4_FULL_INIT_DONE)
0fbc81b3 758 quiesce_rx_uld(adap, type);
80f61f19 759 if (adap->flags & CXGB4_USING_MSIX)
0fbc81b3 760 free_msix_queue_irqs_uld(adap, type);
40b06553 761free_rxq:
0fbc81b3 762 free_sge_queues_uld(adap, type);
40b06553 763free_queues:
0fbc81b3 764 free_queues_uld(adap, type);
40b06553
GG
765out:
766 dev_warn(adap->pdev_dev,
767 "ULD registration failed for uld type %d\n", type);
0fbc81b3 768 }
94cdb8bb 769 mutex_unlock(&uld_mutex);
40b06553 770 return;
94cdb8bb 771}
0fbc81b3 772EXPORT_SYMBOL(cxgb4_register_uld);
94cdb8bb 773
0fbc81b3
HS
774/**
775 * cxgb4_unregister_uld - unregister an upper-layer driver
776 * @type: the ULD type
777 *
778 * Unregisters an existing upper-layer driver.
779 */
780int cxgb4_unregister_uld(enum cxgb4_uld type)
94cdb8bb
HS
781{
782 struct adapter *adap;
783
0fbc81b3 784 if (type >= CXGB4_ULD_MAX)
94cdb8bb
HS
785 return -EINVAL;
786
787 mutex_lock(&uld_mutex);
788 list_for_each_entry(adap, &adapter_list, list_node) {
0fbc81b3
HS
789 if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) ||
790 (type != CXGB4_ULD_CRYPTO && !is_offload(adap)))
791 continue;
792 if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip))
94cdb8bb 793 continue;
6a146f3a
GP
794
795 cxgb4_shutdown_uld_adapter(adap, type);
94cdb8bb
HS
796 }
797 mutex_unlock(&uld_mutex);
798
799 return 0;
800}
0fbc81b3 801EXPORT_SYMBOL(cxgb4_unregister_uld);