net: hns3: add querying speed and duplex support to VF
[linux-2.6-block.git] / drivers / net / ethernet / hisilicon / hns3 / hns3vf / hclgevf_main.c
CommitLineData
e2cb1dec
SM
1// SPDX-License-Identifier: GPL-2.0+
2// Copyright (c) 2016-2017 Hisilicon Limited.
3
4#include <linux/etherdevice.h>
5#include "hclgevf_cmd.h"
6#include "hclgevf_main.h"
7#include "hclge_mbx.h"
8#include "hnae3.h"
9
10#define HCLGEVF_NAME "hclgevf"
11
12static struct hnae3_ae_algo ae_algovf;
13
14static const struct pci_device_id ae_algovf_pci_tbl[] = {
15 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0},
16 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0},
17 /* required last entry */
18 {0, }
19};
20
2f550a46
YL
21MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl);
22
e2cb1dec
SM
23static inline struct hclgevf_dev *hclgevf_ae_get_hdev(
24 struct hnae3_handle *handle)
25{
26 return container_of(handle, struct hclgevf_dev, nic);
27}
28
29static int hclgevf_tqps_update_stats(struct hnae3_handle *handle)
30{
31 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
32 struct hnae3_queue *queue;
33 struct hclgevf_desc desc;
34 struct hclgevf_tqp *tqp;
35 int status;
36 int i;
37
38 for (i = 0; i < hdev->num_tqps; i++) {
39 queue = handle->kinfo.tqp[i];
40 tqp = container_of(queue, struct hclgevf_tqp, q);
41 hclgevf_cmd_setup_basic_desc(&desc,
42 HCLGEVF_OPC_QUERY_RX_STATUS,
43 true);
44
45 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff);
46 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
47 if (status) {
48 dev_err(&hdev->pdev->dev,
49 "Query tqp stat fail, status = %d,queue = %d\n",
50 status, i);
51 return status;
52 }
53 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
cf72fa63 54 le32_to_cpu(desc.data[1]);
e2cb1dec
SM
55
56 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS,
57 true);
58
59 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff);
60 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
61 if (status) {
62 dev_err(&hdev->pdev->dev,
63 "Query tqp stat fail, status = %d,queue = %d\n",
64 status, i);
65 return status;
66 }
67 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
cf72fa63 68 le32_to_cpu(desc.data[1]);
e2cb1dec
SM
69 }
70
71 return 0;
72}
73
74static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
75{
76 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
77 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
78 struct hclgevf_tqp *tqp;
79 u64 *buff = data;
80 int i;
81
82 for (i = 0; i < hdev->num_tqps; i++) {
83 tqp = container_of(handle->kinfo.tqp[i], struct hclgevf_tqp, q);
84 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
85 }
86 for (i = 0; i < kinfo->num_tqps; i++) {
87 tqp = container_of(handle->kinfo.tqp[i], struct hclgevf_tqp, q);
88 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
89 }
90
91 return buff;
92}
93
94static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset)
95{
96 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
97
98 return hdev->num_tqps * 2;
99}
100
101static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
102{
103 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
104 u8 *buff = data;
105 int i = 0;
106
107 for (i = 0; i < hdev->num_tqps; i++) {
108 struct hclgevf_tqp *tqp = container_of(handle->kinfo.tqp[i],
109 struct hclgevf_tqp, q);
a6c51c26 110 snprintf(buff, ETH_GSTRING_LEN, "txq#%d_pktnum_rcd",
e2cb1dec
SM
111 tqp->index);
112 buff += ETH_GSTRING_LEN;
113 }
114
115 for (i = 0; i < hdev->num_tqps; i++) {
116 struct hclgevf_tqp *tqp = container_of(handle->kinfo.tqp[i],
117 struct hclgevf_tqp, q);
a6c51c26 118 snprintf(buff, ETH_GSTRING_LEN, "rxq#%d_pktnum_rcd",
e2cb1dec
SM
119 tqp->index);
120 buff += ETH_GSTRING_LEN;
121 }
122
123 return buff;
124}
125
126static void hclgevf_update_stats(struct hnae3_handle *handle,
127 struct net_device_stats *net_stats)
128{
129 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
130 int status;
131
132 status = hclgevf_tqps_update_stats(handle);
133 if (status)
134 dev_err(&hdev->pdev->dev,
135 "VF update of TQPS stats fail, status = %d.\n",
136 status);
137}
138
139static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset)
140{
141 if (strset == ETH_SS_TEST)
142 return -EOPNOTSUPP;
143 else if (strset == ETH_SS_STATS)
144 return hclgevf_tqps_get_sset_count(handle, strset);
145
146 return 0;
147}
148
149static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset,
150 u8 *data)
151{
152 u8 *p = (char *)data;
153
154 if (strset == ETH_SS_STATS)
155 p = hclgevf_tqps_get_strings(handle, p);
156}
157
158static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data)
159{
160 hclgevf_tqps_get_stats(handle, data);
161}
162
163static int hclgevf_get_tc_info(struct hclgevf_dev *hdev)
164{
165 u8 resp_msg;
166 int status;
167
168 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_TCINFO, 0, NULL, 0,
169 true, &resp_msg, sizeof(u8));
170 if (status) {
171 dev_err(&hdev->pdev->dev,
172 "VF request to get TC info from PF failed %d",
173 status);
174 return status;
175 }
176
177 hdev->hw_tc_map = resp_msg;
178
179 return 0;
180}
181
182static int hclge_get_queue_info(struct hclgevf_dev *hdev)
183{
184#define HCLGEVF_TQPS_RSS_INFO_LEN 8
185 u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN];
186 int status;
187
188 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QINFO, 0, NULL, 0,
189 true, resp_msg,
190 HCLGEVF_TQPS_RSS_INFO_LEN);
191 if (status) {
192 dev_err(&hdev->pdev->dev,
193 "VF request to get tqp info from PF failed %d",
194 status);
195 return status;
196 }
197
198 memcpy(&hdev->num_tqps, &resp_msg[0], sizeof(u16));
199 memcpy(&hdev->rss_size_max, &resp_msg[2], sizeof(u16));
200 memcpy(&hdev->num_desc, &resp_msg[4], sizeof(u16));
201 memcpy(&hdev->rx_buf_len, &resp_msg[6], sizeof(u16));
202
203 return 0;
204}
205
e2cb1dec
SM
206static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev)
207{
208 struct hclgevf_tqp *tqp;
209 int i;
210
211 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
212 sizeof(struct hclgevf_tqp), GFP_KERNEL);
213 if (!hdev->htqp)
214 return -ENOMEM;
215
216 tqp = hdev->htqp;
217
218 for (i = 0; i < hdev->num_tqps; i++) {
219 tqp->dev = &hdev->pdev->dev;
220 tqp->index = i;
221
222 tqp->q.ae_algo = &ae_algovf;
223 tqp->q.buf_size = hdev->rx_buf_len;
224 tqp->q.desc_num = hdev->num_desc;
225 tqp->q.io_base = hdev->hw.io_base + HCLGEVF_TQP_REG_OFFSET +
226 i * HCLGEVF_TQP_REG_SIZE;
227
228 tqp++;
229 }
230
231 return 0;
232}
233
234static int hclgevf_knic_setup(struct hclgevf_dev *hdev)
235{
236 struct hnae3_handle *nic = &hdev->nic;
237 struct hnae3_knic_private_info *kinfo;
238 u16 new_tqps = hdev->num_tqps;
239 int i;
240
241 kinfo = &nic->kinfo;
242 kinfo->num_tc = 0;
243 kinfo->num_desc = hdev->num_desc;
244 kinfo->rx_buf_len = hdev->rx_buf_len;
245 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++)
246 if (hdev->hw_tc_map & BIT(i))
247 kinfo->num_tc++;
248
249 kinfo->rss_size
250 = min_t(u16, hdev->rss_size_max, new_tqps / kinfo->num_tc);
251 new_tqps = kinfo->rss_size * kinfo->num_tc;
252 kinfo->num_tqps = min(new_tqps, hdev->num_tqps);
253
254 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps,
255 sizeof(struct hnae3_queue *), GFP_KERNEL);
256 if (!kinfo->tqp)
257 return -ENOMEM;
258
259 for (i = 0; i < kinfo->num_tqps; i++) {
260 hdev->htqp[i].q.handle = &hdev->nic;
261 hdev->htqp[i].q.tqp_index = i;
262 kinfo->tqp[i] = &hdev->htqp[i].q;
263 }
264
265 return 0;
266}
267
268static void hclgevf_request_link_info(struct hclgevf_dev *hdev)
269{
270 int status;
271 u8 resp_msg;
272
273 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_STATUS, 0, NULL,
274 0, false, &resp_msg, sizeof(u8));
275 if (status)
276 dev_err(&hdev->pdev->dev,
277 "VF failed to fetch link status(%d) from PF", status);
278}
279
280void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state)
281{
282 struct hnae3_handle *handle = &hdev->nic;
283 struct hnae3_client *client;
284
285 client = handle->client;
286
287 if (link_state != hdev->hw.mac.link) {
288 client->ops->link_status_change(handle, !!link_state);
289 hdev->hw.mac.link = link_state;
290 }
291}
292
293static int hclgevf_set_handle_info(struct hclgevf_dev *hdev)
294{
295 struct hnae3_handle *nic = &hdev->nic;
296 int ret;
297
298 nic->ae_algo = &ae_algovf;
299 nic->pdev = hdev->pdev;
300 nic->numa_node_mask = hdev->numa_node_mask;
424eb834 301 nic->flags |= HNAE3_SUPPORT_VF;
e2cb1dec
SM
302
303 if (hdev->ae_dev->dev_type != HNAE3_DEV_KNIC) {
304 dev_err(&hdev->pdev->dev, "unsupported device type %d\n",
305 hdev->ae_dev->dev_type);
306 return -EINVAL;
307 }
308
309 ret = hclgevf_knic_setup(hdev);
310 if (ret)
311 dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n",
312 ret);
313 return ret;
314}
315
316static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id)
317{
318 hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT;
319 hdev->num_msi_left += 1;
320 hdev->num_msi_used -= 1;
321}
322
323static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num,
324 struct hnae3_vector_info *vector_info)
325{
326 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
327 struct hnae3_vector_info *vector = vector_info;
328 int alloc = 0;
329 int i, j;
330
331 vector_num = min(hdev->num_msi_left, vector_num);
332
333 for (j = 0; j < vector_num; j++) {
334 for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) {
335 if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) {
336 vector->vector = pci_irq_vector(hdev->pdev, i);
337 vector->io_addr = hdev->hw.io_base +
338 HCLGEVF_VECTOR_REG_BASE +
339 (i - 1) * HCLGEVF_VECTOR_REG_OFFSET;
340 hdev->vector_status[i] = 0;
341 hdev->vector_irq[i] = vector->vector;
342
343 vector++;
344 alloc++;
345
346 break;
347 }
348 }
349 }
350 hdev->num_msi_left -= alloc;
351 hdev->num_msi_used += alloc;
352
353 return alloc;
354}
355
356static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector)
357{
358 int i;
359
360 for (i = 0; i < hdev->num_msi; i++)
361 if (vector == hdev->vector_irq[i])
362 return i;
363
364 return -EINVAL;
365}
366
367static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle)
368{
369 return HCLGEVF_RSS_KEY_SIZE;
370}
371
372static u32 hclgevf_get_rss_indir_size(struct hnae3_handle *handle)
373{
374 return HCLGEVF_RSS_IND_TBL_SIZE;
375}
376
377static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev)
378{
379 const u8 *indir = hdev->rss_cfg.rss_indirection_tbl;
380 struct hclgevf_rss_indirection_table_cmd *req;
381 struct hclgevf_desc desc;
382 int status;
383 int i, j;
384
385 req = (struct hclgevf_rss_indirection_table_cmd *)desc.data;
386
387 for (i = 0; i < HCLGEVF_RSS_CFG_TBL_NUM; i++) {
388 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE,
389 false);
390 req->start_table_index = i * HCLGEVF_RSS_CFG_TBL_SIZE;
391 req->rss_set_bitmap = HCLGEVF_RSS_SET_BITMAP_MSK;
392 for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++)
393 req->rss_result[j] =
394 indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j];
395
396 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
397 if (status) {
398 dev_err(&hdev->pdev->dev,
399 "VF failed(=%d) to set RSS indirection table\n",
400 status);
401 return status;
402 }
403 }
404
405 return 0;
406}
407
408static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size)
409{
410 struct hclgevf_rss_tc_mode_cmd *req;
411 u16 tc_offset[HCLGEVF_MAX_TC_NUM];
412 u16 tc_valid[HCLGEVF_MAX_TC_NUM];
413 u16 tc_size[HCLGEVF_MAX_TC_NUM];
414 struct hclgevf_desc desc;
415 u16 roundup_size;
416 int status;
417 int i;
418
419 req = (struct hclgevf_rss_tc_mode_cmd *)desc.data;
420
421 roundup_size = roundup_pow_of_two(rss_size);
422 roundup_size = ilog2(roundup_size);
423
424 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
425 tc_valid[i] = !!(hdev->hw_tc_map & BIT(i));
426 tc_size[i] = roundup_size;
427 tc_offset[i] = rss_size * i;
428 }
429
430 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false);
431 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
432 hnae_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B,
433 (tc_valid[i] & 0x1));
434 hnae_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M,
435 HCLGEVF_RSS_TC_SIZE_S, tc_size[i]);
436 hnae_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M,
437 HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]);
438 }
439 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
440 if (status)
441 dev_err(&hdev->pdev->dev,
442 "VF failed(=%d) to set rss tc mode\n", status);
443
444 return status;
445}
446
447static int hclgevf_get_rss_hw_cfg(struct hnae3_handle *handle, u8 *hash,
448 u8 *key)
449{
450 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
451 struct hclgevf_rss_config_cmd *req;
452 int lkup_times = key ? 3 : 1;
453 struct hclgevf_desc desc;
454 int key_offset;
455 int key_size;
456 int status;
457
458 req = (struct hclgevf_rss_config_cmd *)desc.data;
459 lkup_times = (lkup_times == 3) ? 3 : ((hash) ? 1 : 0);
460
461 for (key_offset = 0; key_offset < lkup_times; key_offset++) {
462 hclgevf_cmd_setup_basic_desc(&desc,
463 HCLGEVF_OPC_RSS_GENERIC_CONFIG,
464 true);
465 req->hash_config |= (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET);
466
467 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
468 if (status) {
469 dev_err(&hdev->pdev->dev,
470 "failed to get hardware RSS cfg, status = %d\n",
471 status);
472 return status;
473 }
474
475 if (key_offset == 2)
476 key_size =
477 HCLGEVF_RSS_KEY_SIZE - HCLGEVF_RSS_HASH_KEY_NUM * 2;
478 else
479 key_size = HCLGEVF_RSS_HASH_KEY_NUM;
480
481 if (key)
482 memcpy(key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM,
483 req->hash_key,
484 key_size);
485 }
486
487 if (hash) {
488 if ((req->hash_config & 0xf) == HCLGEVF_RSS_HASH_ALGO_TOEPLITZ)
489 *hash = ETH_RSS_HASH_TOP;
490 else
491 *hash = ETH_RSS_HASH_UNKNOWN;
492 }
493
494 return 0;
495}
496
497static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key,
498 u8 *hfunc)
499{
500 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
501 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
502 int i;
503
504 if (indir)
505 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
506 indir[i] = rss_cfg->rss_indirection_tbl[i];
507
508 return hclgevf_get_rss_hw_cfg(handle, hfunc, key);
509}
510
511static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir,
512 const u8 *key, const u8 hfunc)
513{
514 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
515 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
516 int i;
517
518 /* update the shadow RSS table with user specified qids */
519 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
520 rss_cfg->rss_indirection_tbl[i] = indir[i];
521
522 /* update the hardware */
523 return hclgevf_set_rss_indir_table(hdev);
524}
525
526static int hclgevf_get_tc_size(struct hnae3_handle *handle)
527{
528 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
529 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
530
531 return rss_cfg->rss_size;
532}
533
534static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en,
535 int vector,
536 struct hnae3_ring_chain_node *ring_chain)
537{
e2cb1dec
SM
538 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
539 struct hnae3_ring_chain_node *node;
540 struct hclge_mbx_vf_to_pf_cmd *req;
541 struct hclgevf_desc desc;
5d02a58d 542 int i = 0, vector_id;
e2cb1dec
SM
543 int status;
544 u8 type;
545
546 req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
547 vector_id = hclgevf_get_vector_index(hdev, vector);
548 if (vector_id < 0) {
549 dev_err(&handle->pdev->dev,
550 "Get vector index fail. ret =%d\n", vector_id);
551 return vector_id;
552 }
553
e2cb1dec 554 for (node = ring_chain; node; node = node->next) {
5d02a58d
YL
555 int idx_offset = HCLGE_MBX_RING_MAP_BASIC_MSG_NUM +
556 HCLGE_MBX_RING_NODE_VARIABLE_NUM * i;
557
558 if (i == 0) {
559 hclgevf_cmd_setup_basic_desc(&desc,
560 HCLGEVF_OPC_MBX_VF_TO_PF,
561 false);
562 type = en ?
563 HCLGE_MBX_MAP_RING_TO_VECTOR :
564 HCLGE_MBX_UNMAP_RING_TO_VECTOR;
565 req->msg[0] = type;
566 req->msg[1] = vector_id;
567 }
568
569 req->msg[idx_offset] =
e2cb1dec 570 hnae_get_bit(node->flag, HNAE3_RING_TYPE_B);
5d02a58d
YL
571 req->msg[idx_offset + 1] = node->tqp_index;
572 req->msg[idx_offset + 2] = hnae_get_field(node->int_gl_idx,
573 HNAE3_RING_GL_IDX_M,
574 HNAE3_RING_GL_IDX_S);
575
576 i++;
577 if ((i == (HCLGE_MBX_VF_MSG_DATA_NUM -
578 HCLGE_MBX_RING_MAP_BASIC_MSG_NUM) /
579 HCLGE_MBX_RING_NODE_VARIABLE_NUM) ||
580 !node->next) {
e2cb1dec
SM
581 req->msg[2] = i;
582
583 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
584 if (status) {
585 dev_err(&hdev->pdev->dev,
586 "Map TQP fail, status is %d.\n",
587 status);
588 return status;
589 }
590 i = 0;
591 hclgevf_cmd_setup_basic_desc(&desc,
592 HCLGEVF_OPC_MBX_VF_TO_PF,
593 false);
594 req->msg[0] = type;
595 req->msg[1] = vector_id;
596 }
597 }
598
e2cb1dec
SM
599 return 0;
600}
601
602static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector,
603 struct hnae3_ring_chain_node *ring_chain)
604{
605 return hclgevf_bind_ring_to_vector(handle, true, vector, ring_chain);
606}
607
608static int hclgevf_unmap_ring_from_vector(
609 struct hnae3_handle *handle,
610 int vector,
611 struct hnae3_ring_chain_node *ring_chain)
612{
613 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
614 int ret, vector_id;
615
616 vector_id = hclgevf_get_vector_index(hdev, vector);
617 if (vector_id < 0) {
618 dev_err(&handle->pdev->dev,
619 "Get vector index fail. ret =%d\n", vector_id);
620 return vector_id;
621 }
622
623 ret = hclgevf_bind_ring_to_vector(handle, false, vector, ring_chain);
0d3e6631 624 if (ret)
e2cb1dec
SM
625 dev_err(&handle->pdev->dev,
626 "Unmap ring from vector fail. vector=%d, ret =%d\n",
627 vector_id,
628 ret);
0d3e6631
YL
629
630 return ret;
631}
632
633static int hclgevf_put_vector(struct hnae3_handle *handle, int vector)
634{
635 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
e2cb1dec
SM
636
637 hclgevf_free_vector(hdev, vector);
638
639 return 0;
640}
641
642static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, u32 en)
643{
644 struct hclge_mbx_vf_to_pf_cmd *req;
645 struct hclgevf_desc desc;
646 int status;
647
648 req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
649
650 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false);
651 req->msg[0] = HCLGE_MBX_SET_PROMISC_MODE;
652 req->msg[1] = en;
653
654 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
655 if (status)
656 dev_err(&hdev->pdev->dev,
657 "Set promisc mode fail, status is %d.\n", status);
658
659 return status;
660}
661
662static void hclgevf_set_promisc_mode(struct hnae3_handle *handle, u32 en)
663{
664 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
665
666 hclgevf_cmd_set_promisc_mode(hdev, en);
667}
668
669static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id,
670 int stream_id, bool enable)
671{
672 struct hclgevf_cfg_com_tqp_queue_cmd *req;
673 struct hclgevf_desc desc;
674 int status;
675
676 req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data;
677
678 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE,
679 false);
680 req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK);
681 req->stream_id = cpu_to_le16(stream_id);
682 req->enable |= enable << HCLGEVF_TQP_ENABLE_B;
683
684 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
685 if (status)
686 dev_err(&hdev->pdev->dev,
687 "TQP enable fail, status =%d.\n", status);
688
689 return status;
690}
691
692static int hclgevf_get_queue_id(struct hnae3_queue *queue)
693{
694 struct hclgevf_tqp *tqp = container_of(queue, struct hclgevf_tqp, q);
695
696 return tqp->index;
697}
698
699static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle)
700{
701 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
702 struct hnae3_queue *queue;
703 struct hclgevf_tqp *tqp;
704 int i;
705
706 for (i = 0; i < hdev->num_tqps; i++) {
707 queue = handle->kinfo.tqp[i];
708 tqp = container_of(queue, struct hclgevf_tqp, q);
709 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
710 }
711}
712
713static int hclgevf_cfg_func_mta_filter(struct hnae3_handle *handle, bool en)
714{
715 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
716 u8 msg[2] = {0};
717
718 msg[0] = en;
719 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
720 HCLGE_MBX_MAC_VLAN_MC_FUNC_MTA_ENABLE,
721 msg, 1, false, NULL, 0);
722}
723
724static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p)
725{
726 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
727
728 ether_addr_copy(p, hdev->hw.mac.mac_addr);
729}
730
59098055
FL
731static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p,
732 bool is_first)
e2cb1dec
SM
733{
734 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
735 u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr;
736 u8 *new_mac_addr = (u8 *)p;
737 u8 msg_data[ETH_ALEN * 2];
59098055 738 u16 subcode;
e2cb1dec
SM
739 int status;
740
741 ether_addr_copy(msg_data, new_mac_addr);
742 ether_addr_copy(&msg_data[ETH_ALEN], old_mac_addr);
743
59098055
FL
744 subcode = is_first ? HCLGE_MBX_MAC_VLAN_UC_ADD :
745 HCLGE_MBX_MAC_VLAN_UC_MODIFY;
746
e2cb1dec 747 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
59098055 748 subcode, msg_data, ETH_ALEN * 2,
2097fdef 749 true, NULL, 0);
e2cb1dec
SM
750 if (!status)
751 ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr);
752
753 return status;
754}
755
756static int hclgevf_add_uc_addr(struct hnae3_handle *handle,
757 const unsigned char *addr)
758{
759 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
760
761 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
762 HCLGE_MBX_MAC_VLAN_UC_ADD,
763 addr, ETH_ALEN, false, NULL, 0);
764}
765
766static int hclgevf_rm_uc_addr(struct hnae3_handle *handle,
767 const unsigned char *addr)
768{
769 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
770
771 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
772 HCLGE_MBX_MAC_VLAN_UC_REMOVE,
773 addr, ETH_ALEN, false, NULL, 0);
774}
775
776static int hclgevf_add_mc_addr(struct hnae3_handle *handle,
777 const unsigned char *addr)
778{
779 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
780
781 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
782 HCLGE_MBX_MAC_VLAN_MC_ADD,
783 addr, ETH_ALEN, false, NULL, 0);
784}
785
786static int hclgevf_rm_mc_addr(struct hnae3_handle *handle,
787 const unsigned char *addr)
788{
789 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
790
791 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
792 HCLGE_MBX_MAC_VLAN_MC_REMOVE,
793 addr, ETH_ALEN, false, NULL, 0);
794}
795
796static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
797 __be16 proto, u16 vlan_id,
798 bool is_kill)
799{
800#define HCLGEVF_VLAN_MBX_MSG_LEN 5
801 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
802 u8 msg_data[HCLGEVF_VLAN_MBX_MSG_LEN];
803
804 if (vlan_id > 4095)
805 return -EINVAL;
806
807 if (proto != htons(ETH_P_8021Q))
808 return -EPROTONOSUPPORT;
809
810 msg_data[0] = is_kill;
811 memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id));
812 memcpy(&msg_data[3], &proto, sizeof(proto));
813 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
814 HCLGE_MBX_VLAN_FILTER, msg_data,
815 HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0);
816}
817
818static void hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
819{
820 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
821 u8 msg_data[2];
1a426f8b 822 int ret;
e2cb1dec
SM
823
824 memcpy(&msg_data[0], &queue_id, sizeof(queue_id));
825
1a426f8b
PL
826 /* disable vf queue before send queue reset msg to PF */
827 ret = hclgevf_tqp_enable(hdev, queue_id, 0, false);
828 if (ret)
829 return;
830
831 hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data,
832 2, true, NULL, 0);
e2cb1dec
SM
833}
834
835static u32 hclgevf_get_fw_version(struct hnae3_handle *handle)
836{
837 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
838
839 return hdev->fw_version;
840}
841
842static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev)
843{
844 struct hclgevf_misc_vector *vector = &hdev->misc_vector;
845
846 vector->vector_irq = pci_irq_vector(hdev->pdev,
847 HCLGEVF_MISC_VECTOR_NUM);
848 vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE;
849 /* vector status always valid for Vector 0 */
850 hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0;
851 hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq;
852
853 hdev->num_msi_left -= 1;
854 hdev->num_msi_used += 1;
855}
856
857static void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev)
858{
859 if (!test_and_set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state))
860 schedule_work(&hdev->mbx_service_task);
861}
862
863static void hclgevf_task_schedule(struct hclgevf_dev *hdev)
864{
865 if (!test_bit(HCLGEVF_STATE_DOWN, &hdev->state) &&
866 !test_and_set_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state))
867 schedule_work(&hdev->service_task);
868}
869
870static void hclgevf_service_timer(struct timer_list *t)
871{
872 struct hclgevf_dev *hdev = from_timer(hdev, t, service_timer);
873
874 mod_timer(&hdev->service_timer, jiffies + 5 * HZ);
875
876 hclgevf_task_schedule(hdev);
877}
878
879static void hclgevf_mailbox_service_task(struct work_struct *work)
880{
881 struct hclgevf_dev *hdev;
882
883 hdev = container_of(work, struct hclgevf_dev, mbx_service_task);
884
885 if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state))
886 return;
887
888 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
889
890 hclgevf_mbx_handler(hdev);
891
892 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
893}
894
895static void hclgevf_service_task(struct work_struct *work)
896{
897 struct hclgevf_dev *hdev;
898
899 hdev = container_of(work, struct hclgevf_dev, service_task);
900
901 /* request the link status from the PF. PF would be able to tell VF
902 * about such updates in future so we might remove this later
903 */
904 hclgevf_request_link_info(hdev);
905
906 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
907}
908
909static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr)
910{
911 hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr);
912}
913
914static bool hclgevf_check_event_cause(struct hclgevf_dev *hdev, u32 *clearval)
915{
916 u32 cmdq_src_reg;
917
918 /* fetch the events from their corresponding regs */
919 cmdq_src_reg = hclgevf_read_dev(&hdev->hw,
920 HCLGEVF_VECTOR0_CMDQ_SRC_REG);
921
922 /* check for vector0 mailbox(=CMDQ RX) event source */
923 if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
924 cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B);
925 *clearval = cmdq_src_reg;
926 return true;
927 }
928
929 dev_dbg(&hdev->pdev->dev, "vector 0 interrupt from unknown source\n");
930
931 return false;
932}
933
934static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en)
935{
936 writel(en ? 1 : 0, vector->addr);
937}
938
939static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
940{
941 struct hclgevf_dev *hdev = data;
942 u32 clearval;
943
944 hclgevf_enable_vector(&hdev->misc_vector, false);
945 if (!hclgevf_check_event_cause(hdev, &clearval))
946 goto skip_sched;
947
948 /* schedule the VF mailbox service task, if not already scheduled */
949 hclgevf_mbx_task_schedule(hdev);
950
951 hclgevf_clear_event_cause(hdev, clearval);
952
953skip_sched:
954 hclgevf_enable_vector(&hdev->misc_vector, true);
955
956 return IRQ_HANDLED;
957}
958
959static int hclgevf_configure(struct hclgevf_dev *hdev)
960{
961 int ret;
962
963 /* get queue configuration from PF */
964 ret = hclge_get_queue_info(hdev);
965 if (ret)
966 return ret;
967 /* get tc configuration from PF */
968 return hclgevf_get_tc_info(hdev);
969}
970
971static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev)
972{
973 struct hnae3_handle *roce = &hdev->roce;
974 struct hnae3_handle *nic = &hdev->nic;
975
976 roce->rinfo.num_vectors = HCLGEVF_ROCEE_VECTOR_NUM;
977
978 if (hdev->num_msi_left < roce->rinfo.num_vectors ||
979 hdev->num_msi_left == 0)
980 return -EINVAL;
981
982 roce->rinfo.base_vector =
983 hdev->vector_status[hdev->num_msi_used];
984
985 roce->rinfo.netdev = nic->kinfo.netdev;
986 roce->rinfo.roce_io_base = hdev->hw.io_base;
987
988 roce->pdev = nic->pdev;
989 roce->ae_algo = nic->ae_algo;
990 roce->numa_node_mask = nic->numa_node_mask;
991
992 return 0;
993}
994
995static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
996{
997 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
998 int i, ret;
999
1000 rss_cfg->rss_size = hdev->rss_size_max;
1001
1002 /* Initialize RSS indirect table for each vport */
1003 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
1004 rss_cfg->rss_indirection_tbl[i] = i % hdev->rss_size_max;
1005
1006 ret = hclgevf_set_rss_indir_table(hdev);
1007 if (ret)
1008 return ret;
1009
1010 return hclgevf_set_rss_tc_mode(hdev, hdev->rss_size_max);
1011}
1012
1013static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev)
1014{
1015 /* other vlan config(like, VLAN TX/RX offload) would also be added
1016 * here later
1017 */
1018 return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0,
1019 false);
1020}
1021
1022static int hclgevf_ae_start(struct hnae3_handle *handle)
1023{
1024 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1025 int i, queue_id;
1026
1027 for (i = 0; i < handle->kinfo.num_tqps; i++) {
1028 /* ring enable */
1029 queue_id = hclgevf_get_queue_id(handle->kinfo.tqp[i]);
1030 if (queue_id < 0) {
1031 dev_warn(&hdev->pdev->dev,
1032 "Get invalid queue id, ignore it\n");
1033 continue;
1034 }
1035
1036 hclgevf_tqp_enable(hdev, queue_id, 0, true);
1037 }
1038
1039 /* reset tqp stats */
1040 hclgevf_reset_tqp_stats(handle);
1041
1042 hclgevf_request_link_info(hdev);
1043
1044 clear_bit(HCLGEVF_STATE_DOWN, &hdev->state);
1045 mod_timer(&hdev->service_timer, jiffies + HZ);
1046
1047 return 0;
1048}
1049
1050static void hclgevf_ae_stop(struct hnae3_handle *handle)
1051{
1052 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1053 int i, queue_id;
1054
1055 for (i = 0; i < hdev->num_tqps; i++) {
1056 /* Ring disable */
1057 queue_id = hclgevf_get_queue_id(handle->kinfo.tqp[i]);
1058 if (queue_id < 0) {
1059 dev_warn(&hdev->pdev->dev,
1060 "Get invalid queue id, ignore it\n");
1061 continue;
1062 }
1063
1064 hclgevf_tqp_enable(hdev, queue_id, 0, false);
1065 }
1066
1067 /* reset tqp stats */
1068 hclgevf_reset_tqp_stats(handle);
8cc6c1f7
FL
1069 del_timer_sync(&hdev->service_timer);
1070 cancel_work_sync(&hdev->service_task);
1071 hclgevf_update_link_status(hdev, 0);
e2cb1dec
SM
1072}
1073
1074static void hclgevf_state_init(struct hclgevf_dev *hdev)
1075{
1076 /* setup tasks for the MBX */
1077 INIT_WORK(&hdev->mbx_service_task, hclgevf_mailbox_service_task);
1078 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
1079 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
1080
1081 /* setup tasks for service timer */
1082 timer_setup(&hdev->service_timer, hclgevf_service_timer, 0);
1083
1084 INIT_WORK(&hdev->service_task, hclgevf_service_task);
1085 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
1086
1087 mutex_init(&hdev->mbx_resp.mbx_mutex);
1088
1089 /* bring the device down */
1090 set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
1091}
1092
1093static void hclgevf_state_uninit(struct hclgevf_dev *hdev)
1094{
1095 set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
1096
1097 if (hdev->service_timer.function)
1098 del_timer_sync(&hdev->service_timer);
1099 if (hdev->service_task.func)
1100 cancel_work_sync(&hdev->service_task);
1101 if (hdev->mbx_service_task.func)
1102 cancel_work_sync(&hdev->mbx_service_task);
1103
1104 mutex_destroy(&hdev->mbx_resp.mbx_mutex);
1105}
1106
1107static int hclgevf_init_msi(struct hclgevf_dev *hdev)
1108{
1109 struct pci_dev *pdev = hdev->pdev;
1110 int vectors;
1111 int i;
1112
1113 hdev->num_msi = HCLGEVF_MAX_VF_VECTOR_NUM;
1114
1115 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
1116 PCI_IRQ_MSI | PCI_IRQ_MSIX);
1117 if (vectors < 0) {
1118 dev_err(&pdev->dev,
1119 "failed(%d) to allocate MSI/MSI-X vectors\n",
1120 vectors);
1121 return vectors;
1122 }
1123 if (vectors < hdev->num_msi)
1124 dev_warn(&hdev->pdev->dev,
1125 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
1126 hdev->num_msi, vectors);
1127
1128 hdev->num_msi = vectors;
1129 hdev->num_msi_left = vectors;
1130 hdev->base_msi_vector = pdev->irq;
1131
1132 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
1133 sizeof(u16), GFP_KERNEL);
1134 if (!hdev->vector_status) {
1135 pci_free_irq_vectors(pdev);
1136 return -ENOMEM;
1137 }
1138
1139 for (i = 0; i < hdev->num_msi; i++)
1140 hdev->vector_status[i] = HCLGEVF_INVALID_VPORT;
1141
1142 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
1143 sizeof(int), GFP_KERNEL);
1144 if (!hdev->vector_irq) {
1145 pci_free_irq_vectors(pdev);
1146 return -ENOMEM;
1147 }
1148
1149 return 0;
1150}
1151
1152static void hclgevf_uninit_msi(struct hclgevf_dev *hdev)
1153{
1154 struct pci_dev *pdev = hdev->pdev;
1155
1156 pci_free_irq_vectors(pdev);
1157}
1158
1159static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev)
1160{
1161 int ret = 0;
1162
1163 hclgevf_get_misc_vector(hdev);
1164
1165 ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle,
1166 0, "hclgevf_cmd", hdev);
1167 if (ret) {
1168 dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n",
1169 hdev->misc_vector.vector_irq);
1170 return ret;
1171 }
1172
1173 /* enable misc. vector(vector 0) */
1174 hclgevf_enable_vector(&hdev->misc_vector, true);
1175
1176 return ret;
1177}
1178
1179static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev)
1180{
1181 /* disable misc vector(vector 0) */
1182 hclgevf_enable_vector(&hdev->misc_vector, false);
1183 free_irq(hdev->misc_vector.vector_irq, hdev);
1184 hclgevf_free_vector(hdev, 0);
1185}
1186
1187static int hclgevf_init_instance(struct hclgevf_dev *hdev,
1188 struct hnae3_client *client)
1189{
1190 int ret;
1191
1192 switch (client->type) {
1193 case HNAE3_CLIENT_KNIC:
1194 hdev->nic_client = client;
1195 hdev->nic.client = client;
1196
1197 ret = client->ops->init_instance(&hdev->nic);
1198 if (ret)
1199 return ret;
1200
1201 if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) {
1202 struct hnae3_client *rc = hdev->roce_client;
1203
1204 ret = hclgevf_init_roce_base_info(hdev);
1205 if (ret)
1206 return ret;
1207 ret = rc->ops->init_instance(&hdev->roce);
1208 if (ret)
1209 return ret;
1210 }
1211 break;
1212 case HNAE3_CLIENT_UNIC:
1213 hdev->nic_client = client;
1214 hdev->nic.client = client;
1215
1216 ret = client->ops->init_instance(&hdev->nic);
1217 if (ret)
1218 return ret;
1219 break;
1220 case HNAE3_CLIENT_ROCE:
1221 hdev->roce_client = client;
1222 hdev->roce.client = client;
1223
1224 if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) {
1225 ret = hclgevf_init_roce_base_info(hdev);
1226 if (ret)
1227 return ret;
1228
1229 ret = client->ops->init_instance(&hdev->roce);
1230 if (ret)
1231 return ret;
1232 }
1233 }
1234
1235 return 0;
1236}
1237
1238static void hclgevf_uninit_instance(struct hclgevf_dev *hdev,
1239 struct hnae3_client *client)
1240{
1241 /* un-init roce, if it exists */
1242 if (hdev->roce_client)
1243 hdev->roce_client->ops->uninit_instance(&hdev->roce, 0);
1244
1245 /* un-init nic/unic, if this was not called by roce client */
1246 if ((client->ops->uninit_instance) &&
1247 (client->type != HNAE3_CLIENT_ROCE))
1248 client->ops->uninit_instance(&hdev->nic, 0);
1249}
1250
1251static int hclgevf_register_client(struct hnae3_client *client,
1252 struct hnae3_ae_dev *ae_dev)
1253{
1254 struct hclgevf_dev *hdev = ae_dev->priv;
1255
1256 return hclgevf_init_instance(hdev, client);
1257}
1258
1259static void hclgevf_unregister_client(struct hnae3_client *client,
1260 struct hnae3_ae_dev *ae_dev)
1261{
1262 struct hclgevf_dev *hdev = ae_dev->priv;
1263
1264 hclgevf_uninit_instance(hdev, client);
1265}
1266
1267static int hclgevf_pci_init(struct hclgevf_dev *hdev)
1268{
1269 struct pci_dev *pdev = hdev->pdev;
1270 struct hclgevf_hw *hw;
1271 int ret;
1272
1273 ret = pci_enable_device(pdev);
1274 if (ret) {
1275 dev_err(&pdev->dev, "failed to enable PCI device\n");
1276 goto err_no_drvdata;
1277 }
1278
1279 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1280 if (ret) {
1281 dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting");
1282 goto err_disable_device;
1283 }
1284
1285 ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME);
1286 if (ret) {
1287 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
1288 goto err_disable_device;
1289 }
1290
1291 pci_set_master(pdev);
1292 hw = &hdev->hw;
1293 hw->hdev = hdev;
2e1ea493 1294 hw->io_base = pci_iomap(pdev, 2, 0);
e2cb1dec
SM
1295 if (!hw->io_base) {
1296 dev_err(&pdev->dev, "can't map configuration register space\n");
1297 ret = -ENOMEM;
1298 goto err_clr_master;
1299 }
1300
1301 return 0;
1302
1303err_clr_master:
1304 pci_clear_master(pdev);
1305 pci_release_regions(pdev);
1306err_disable_device:
1307 pci_disable_device(pdev);
1308err_no_drvdata:
1309 pci_set_drvdata(pdev, NULL);
1310 return ret;
1311}
1312
1313static void hclgevf_pci_uninit(struct hclgevf_dev *hdev)
1314{
1315 struct pci_dev *pdev = hdev->pdev;
1316
1317 pci_iounmap(pdev, hdev->hw.io_base);
1318 pci_clear_master(pdev);
1319 pci_release_regions(pdev);
1320 pci_disable_device(pdev);
1321 pci_set_drvdata(pdev, NULL);
1322}
1323
1324static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev)
1325{
1326 struct pci_dev *pdev = ae_dev->pdev;
1327 struct hclgevf_dev *hdev;
1328 int ret;
1329
1330 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
1331 if (!hdev)
1332 return -ENOMEM;
1333
1334 hdev->pdev = pdev;
1335 hdev->ae_dev = ae_dev;
1336 ae_dev->priv = hdev;
1337
1338 ret = hclgevf_pci_init(hdev);
1339 if (ret) {
1340 dev_err(&pdev->dev, "PCI initialization failed\n");
1341 return ret;
1342 }
1343
1344 ret = hclgevf_init_msi(hdev);
1345 if (ret) {
1346 dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret);
1347 goto err_irq_init;
1348 }
1349
1350 hclgevf_state_init(hdev);
1351
1352 ret = hclgevf_misc_irq_init(hdev);
1353 if (ret) {
1354 dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n",
1355 ret);
1356 goto err_misc_irq_init;
1357 }
1358
1359 ret = hclgevf_cmd_init(hdev);
1360 if (ret)
1361 goto err_cmd_init;
1362
1363 ret = hclgevf_configure(hdev);
1364 if (ret) {
1365 dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret);
1366 goto err_config;
1367 }
1368
1369 ret = hclgevf_alloc_tqps(hdev);
1370 if (ret) {
1371 dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret);
1372 goto err_config;
1373 }
1374
1375 ret = hclgevf_set_handle_info(hdev);
1376 if (ret) {
1377 dev_err(&pdev->dev, "failed(%d) to set handle info\n", ret);
1378 goto err_config;
1379 }
1380
e2cb1dec
SM
1381 /* Initialize VF's MTA */
1382 hdev->accept_mta_mc = true;
1383 ret = hclgevf_cfg_func_mta_filter(&hdev->nic, hdev->accept_mta_mc);
1384 if (ret) {
1385 dev_err(&hdev->pdev->dev,
1386 "failed(%d) to set mta filter mode\n", ret);
1387 goto err_config;
1388 }
1389
1390 /* Initialize RSS for this VF */
1391 ret = hclgevf_rss_init_hw(hdev);
1392 if (ret) {
1393 dev_err(&hdev->pdev->dev,
1394 "failed(%d) to initialize RSS\n", ret);
1395 goto err_config;
1396 }
1397
1398 ret = hclgevf_init_vlan_config(hdev);
1399 if (ret) {
1400 dev_err(&hdev->pdev->dev,
1401 "failed(%d) to initialize VLAN config\n", ret);
1402 goto err_config;
1403 }
1404
1405 pr_info("finished initializing %s driver\n", HCLGEVF_DRIVER_NAME);
1406
1407 return 0;
1408
1409err_config:
1410 hclgevf_cmd_uninit(hdev);
1411err_cmd_init:
1412 hclgevf_misc_irq_uninit(hdev);
1413err_misc_irq_init:
1414 hclgevf_state_uninit(hdev);
1415 hclgevf_uninit_msi(hdev);
1416err_irq_init:
1417 hclgevf_pci_uninit(hdev);
1418 return ret;
1419}
1420
1421static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
1422{
1423 struct hclgevf_dev *hdev = ae_dev->priv;
1424
1425 hclgevf_cmd_uninit(hdev);
1426 hclgevf_misc_irq_uninit(hdev);
1427 hclgevf_state_uninit(hdev);
1428 hclgevf_uninit_msi(hdev);
1429 hclgevf_pci_uninit(hdev);
1430 ae_dev->priv = NULL;
1431}
1432
849e4607
PL
1433static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev)
1434{
1435 struct hnae3_handle *nic = &hdev->nic;
1436 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1437
1438 return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps);
1439}
1440
1441/**
1442 * hclgevf_get_channels - Get the current channels enabled and max supported.
1443 * @handle: hardware information for network interface
1444 * @ch: ethtool channels structure
1445 *
1446 * We don't support separate tx and rx queues as channels. The other count
1447 * represents how many queues are being used for control. max_combined counts
1448 * how many queue pairs we can support. They may not be mapped 1 to 1 with
1449 * q_vectors since we support a lot more queue pairs than q_vectors.
1450 **/
1451static void hclgevf_get_channels(struct hnae3_handle *handle,
1452 struct ethtool_channels *ch)
1453{
1454 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1455
1456 ch->max_combined = hclgevf_get_max_channels(hdev);
1457 ch->other_count = 0;
1458 ch->max_other = 0;
1459 ch->combined_count = hdev->num_tqps;
1460}
1461
cc719218
PL
1462static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle,
1463 u16 *free_tqps, u16 *max_rss_size)
1464{
1465 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1466
1467 *free_tqps = 0;
1468 *max_rss_size = hdev->rss_size_max;
1469}
1470
175ec96b
FL
1471static int hclgevf_get_status(struct hnae3_handle *handle)
1472{
1473 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1474
1475 return hdev->hw.mac.link;
1476}
1477
4a152de9
FL
1478static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle,
1479 u8 *auto_neg, u32 *speed,
1480 u8 *duplex)
1481{
1482 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1483
1484 if (speed)
1485 *speed = hdev->hw.mac.speed;
1486 if (duplex)
1487 *duplex = hdev->hw.mac.duplex;
1488 if (auto_neg)
1489 *auto_neg = AUTONEG_DISABLE;
1490}
1491
1492void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed,
1493 u8 duplex)
1494{
1495 hdev->hw.mac.speed = speed;
1496 hdev->hw.mac.duplex = duplex;
1497}
1498
e2cb1dec
SM
1499static const struct hnae3_ae_ops hclgevf_ops = {
1500 .init_ae_dev = hclgevf_init_ae_dev,
1501 .uninit_ae_dev = hclgevf_uninit_ae_dev,
1502 .init_client_instance = hclgevf_register_client,
1503 .uninit_client_instance = hclgevf_unregister_client,
1504 .start = hclgevf_ae_start,
1505 .stop = hclgevf_ae_stop,
1506 .map_ring_to_vector = hclgevf_map_ring_to_vector,
1507 .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector,
1508 .get_vector = hclgevf_get_vector,
0d3e6631 1509 .put_vector = hclgevf_put_vector,
e2cb1dec
SM
1510 .reset_queue = hclgevf_reset_tqp,
1511 .set_promisc_mode = hclgevf_set_promisc_mode,
1512 .get_mac_addr = hclgevf_get_mac_addr,
1513 .set_mac_addr = hclgevf_set_mac_addr,
1514 .add_uc_addr = hclgevf_add_uc_addr,
1515 .rm_uc_addr = hclgevf_rm_uc_addr,
1516 .add_mc_addr = hclgevf_add_mc_addr,
1517 .rm_mc_addr = hclgevf_rm_mc_addr,
1518 .get_stats = hclgevf_get_stats,
1519 .update_stats = hclgevf_update_stats,
1520 .get_strings = hclgevf_get_strings,
1521 .get_sset_count = hclgevf_get_sset_count,
1522 .get_rss_key_size = hclgevf_get_rss_key_size,
1523 .get_rss_indir_size = hclgevf_get_rss_indir_size,
1524 .get_rss = hclgevf_get_rss,
1525 .set_rss = hclgevf_set_rss,
1526 .get_tc_size = hclgevf_get_tc_size,
1527 .get_fw_version = hclgevf_get_fw_version,
1528 .set_vlan_filter = hclgevf_set_vlan_filter,
849e4607 1529 .get_channels = hclgevf_get_channels,
cc719218 1530 .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info,
175ec96b 1531 .get_status = hclgevf_get_status,
4a152de9 1532 .get_ksettings_an_result = hclgevf_get_ksettings_an_result,
e2cb1dec
SM
1533};
1534
1535static struct hnae3_ae_algo ae_algovf = {
1536 .ops = &hclgevf_ops,
1537 .name = HCLGEVF_NAME,
1538 .pdev_id_table = ae_algovf_pci_tbl,
1539};
1540
1541static int hclgevf_init(void)
1542{
1543 pr_info("%s is initializing\n", HCLGEVF_NAME);
1544
1545 return hnae3_register_ae_algo(&ae_algovf);
1546}
1547
1548static void hclgevf_exit(void)
1549{
1550 hnae3_unregister_ae_algo(&ae_algovf);
1551}
1552module_init(hclgevf_init);
1553module_exit(hclgevf_exit);
1554
1555MODULE_LICENSE("GPL");
1556MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
1557MODULE_DESCRIPTION("HCLGEVF Driver");
1558MODULE_VERSION(HCLGEVF_MOD_VERSION);