1 /**********************************************************************
4 * Contact: support@cavium.com
5 * Please include "LiquidIO" in the subject.
7 * Copyright (c) 2003-2015 Cavium, Inc.
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
19 * This file may also be available under a different license from Cavium.
20 * Contact Cavium, Inc. for more information
21 **********************************************************************/
22 #include <linux/version.h>
23 #include <linux/types.h>
24 #include <linux/list.h>
25 #include <linux/interrupt.h>
26 #include <linux/pci.h>
27 #include <linux/crc32.h>
28 #include <linux/kthread.h>
29 #include <linux/netdevice.h>
30 #include "octeon_config.h"
31 #include "liquidio_common.h"
32 #include "octeon_droq.h"
33 #include "octeon_iq.h"
34 #include "response_manager.h"
35 #include "octeon_device.h"
36 #include "octeon_nic.h"
37 #include "octeon_main.h"
38 #include "octeon_network.h"
39 #include "cn66xx_regs.h"
40 #include "cn66xx_device.h"
41 #include "cn68xx_regs.h"
42 #include "cn68xx_device.h"
43 #include "liquidio_image.h"
44 #include "octeon_mem_ops.h"
46 /** Default configuration
47 * for CN66XX OCTEON Models.
49 static struct octeon_config default_cn66xx_conf = {
50 .card_type = LIO_210SV,
51 .card_name = LIO_210SV_NAME,
55 .max_iqs = CN6XXX_CFG_IO_QUEUES,
57 (CN6XXX_MAX_IQ_DESCRIPTORS * CN6XXX_CFG_IO_QUEUES),
58 .instr_type = OCTEON_64BYTE_INSTR,
59 .db_min = CN6XXX_DB_MIN,
60 .db_timeout = CN6XXX_DB_TIMEOUT,
66 .max_oqs = CN6XXX_CFG_IO_QUEUES,
67 .info_ptr = OCTEON_OQ_INFOPTR_MODE,
68 .refill_threshold = CN6XXX_OQ_REFIL_THRESHOLD,
69 .oq_intr_pkt = CN6XXX_OQ_INTR_PKT,
70 .oq_intr_time = CN6XXX_OQ_INTR_TIME,
71 .pkts_per_intr = CN6XXX_OQ_PKTSPER_INTR,
75 .num_nic_ports = DEFAULT_NUM_NIC_PORTS_66XX,
76 .num_def_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
77 .num_def_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
78 .def_rx_buf_size = CN6XXX_OQ_BUF_SIZE,
80 /* For ethernet interface 0: Port cfg Attributes */
82 /* Max Txqs: Half for each of the two ports :max_iq/2 */
83 .max_txqs = MAX_TXQS_PER_INTF,
85 /* Actual configured value. Range could be: 1...max_txqs */
86 .num_txqs = DEF_TXQS_PER_INTF,
88 /* Max Rxqs: Half for each of the two ports :max_oq/2 */
89 .max_rxqs = MAX_RXQS_PER_INTF,
91 /* Actual configured value. Range could be: 1...max_rxqs */
92 .num_rxqs = DEF_RXQS_PER_INTF,
94 /* Num of desc for rx rings */
95 .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
97 /* Num of desc for tx rings */
98 .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
100 /* SKB size, We need not change buf size even for Jumbo frames.
101 * Octeon can send jumbo frames in 4 consecutive descriptors,
103 .rx_buf_size = CN6XXX_OQ_BUF_SIZE,
105 .base_queue = BASE_QUEUE_NOT_REQUESTED,
111 /* Max Txqs: Half for each of the two ports :max_iq/2 */
112 .max_txqs = MAX_TXQS_PER_INTF,
114 /* Actual configured value. Range could be: 1...max_txqs */
115 .num_txqs = DEF_TXQS_PER_INTF,
117 /* Max Rxqs: Half for each of the two ports :max_oq/2 */
118 .max_rxqs = MAX_RXQS_PER_INTF,
120 /* Actual configured value. Range could be: 1...max_rxqs */
121 .num_rxqs = DEF_RXQS_PER_INTF,
123 /* Num of desc for rx rings */
124 .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
126 /* Num of desc for tx rings */
127 .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
129 /* SKB size, We need not change buf size even for Jumbo frames.
130 * Octeon can send jumbo frames in 4 consecutive descriptors,
132 .rx_buf_size = CN6XXX_OQ_BUF_SIZE,
134 .base_queue = BASE_QUEUE_NOT_REQUESTED,
139 /** Miscellaneous attributes */
141 /* Host driver link query interval */
142 .oct_link_query_interval = 100,
144 /* Octeon link query interval */
145 .host_link_query_interval = 500,
147 .enable_sli_oq_bp = 0,
149 /* Control queue group */
155 /** Default configuration
156 * for CN68XX OCTEON Model.
159 static struct octeon_config default_cn68xx_conf = {
160 .card_type = LIO_410NV,
161 .card_name = LIO_410NV_NAME,
165 .max_iqs = CN6XXX_CFG_IO_QUEUES,
167 (CN6XXX_MAX_IQ_DESCRIPTORS * CN6XXX_CFG_IO_QUEUES),
168 .instr_type = OCTEON_64BYTE_INSTR,
169 .db_min = CN6XXX_DB_MIN,
170 .db_timeout = CN6XXX_DB_TIMEOUT,
176 .max_oqs = CN6XXX_CFG_IO_QUEUES,
177 .info_ptr = OCTEON_OQ_INFOPTR_MODE,
178 .refill_threshold = CN6XXX_OQ_REFIL_THRESHOLD,
179 .oq_intr_pkt = CN6XXX_OQ_INTR_PKT,
180 .oq_intr_time = CN6XXX_OQ_INTR_TIME,
181 .pkts_per_intr = CN6XXX_OQ_PKTSPER_INTR,
185 .num_nic_ports = DEFAULT_NUM_NIC_PORTS_68XX,
186 .num_def_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
187 .num_def_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
188 .def_rx_buf_size = CN6XXX_OQ_BUF_SIZE,
191 /* Max Txqs: Half for each of the two ports :max_iq/2 */
192 .max_txqs = MAX_TXQS_PER_INTF,
194 /* Actual configured value. Range could be: 1...max_txqs */
195 .num_txqs = DEF_TXQS_PER_INTF,
197 /* Max Rxqs: Half for each of the two ports :max_oq/2 */
198 .max_rxqs = MAX_RXQS_PER_INTF,
200 /* Actual configured value. Range could be: 1...max_rxqs */
201 .num_rxqs = DEF_RXQS_PER_INTF,
203 /* Num of desc for rx rings */
204 .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
206 /* Num of desc for tx rings */
207 .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
209 /* SKB size, We need not change buf size even for Jumbo frames.
210 * Octeon can send jumbo frames in 4 consecutive descriptors,
212 .rx_buf_size = CN6XXX_OQ_BUF_SIZE,
214 .base_queue = BASE_QUEUE_NOT_REQUESTED,
220 /* Max Txqs: Half for each of the two ports :max_iq/2 */
221 .max_txqs = MAX_TXQS_PER_INTF,
223 /* Actual configured value. Range could be: 1...max_txqs */
224 .num_txqs = DEF_TXQS_PER_INTF,
226 /* Max Rxqs: Half for each of the two ports :max_oq/2 */
227 .max_rxqs = MAX_RXQS_PER_INTF,
229 /* Actual configured value. Range could be: 1...max_rxqs */
230 .num_rxqs = DEF_RXQS_PER_INTF,
232 /* Num of desc for rx rings */
233 .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
235 /* Num of desc for tx rings */
236 .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
238 /* SKB size, We need not change buf size even for Jumbo frames.
239 * Octeon can send jumbo frames in 4 consecutive descriptors,
241 .rx_buf_size = CN6XXX_OQ_BUF_SIZE,
243 .base_queue = BASE_QUEUE_NOT_REQUESTED,
249 /* Max Txqs: Half for each of the two ports :max_iq/2 */
250 .max_txqs = MAX_TXQS_PER_INTF,
252 /* Actual configured value. Range could be: 1...max_txqs */
253 .num_txqs = DEF_TXQS_PER_INTF,
255 /* Max Rxqs: Half for each of the two ports :max_oq/2 */
256 .max_rxqs = MAX_RXQS_PER_INTF,
258 /* Actual configured value. Range could be: 1...max_rxqs */
259 .num_rxqs = DEF_RXQS_PER_INTF,
261 /* Num of desc for rx rings */
262 .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
264 /* Num of desc for tx rings */
265 .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
267 /* SKB size, We need not change buf size even for Jumbo frames.
268 * Octeon can send jumbo frames in 4 consecutive descriptors,
270 .rx_buf_size = CN6XXX_OQ_BUF_SIZE,
272 .base_queue = BASE_QUEUE_NOT_REQUESTED,
278 /* Max Txqs: Half for each of the two ports :max_iq/2 */
279 .max_txqs = MAX_TXQS_PER_INTF,
281 /* Actual configured value. Range could be: 1...max_txqs */
282 .num_txqs = DEF_TXQS_PER_INTF,
284 /* Max Rxqs: Half for each of the two ports :max_oq/2 */
285 .max_rxqs = MAX_RXQS_PER_INTF,
287 /* Actual configured value. Range could be: 1...max_rxqs */
288 .num_rxqs = DEF_RXQS_PER_INTF,
290 /* Num of desc for rx rings */
291 .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
293 /* Num of desc for tx rings */
294 .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
296 /* SKB size, We need not change buf size even for Jumbo frames.
297 * Octeon can send jumbo frames in 4 consecutive descriptors,
299 .rx_buf_size = CN6XXX_OQ_BUF_SIZE,
301 .base_queue = BASE_QUEUE_NOT_REQUESTED,
306 /** Miscellaneous attributes */
308 /* Host driver link query interval */
309 .oct_link_query_interval = 100,
311 /* Octeon link query interval */
312 .host_link_query_interval = 500,
314 .enable_sli_oq_bp = 0,
316 /* Control queue group */
322 /** Default configuration
323 * for CN68XX OCTEON Model.
325 static struct octeon_config default_cn68xx_210nv_conf = {
326 .card_type = LIO_210NV,
327 .card_name = LIO_210NV_NAME,
332 .max_iqs = CN6XXX_CFG_IO_QUEUES,
334 (CN6XXX_MAX_IQ_DESCRIPTORS * CN6XXX_CFG_IO_QUEUES),
335 .instr_type = OCTEON_64BYTE_INSTR,
336 .db_min = CN6XXX_DB_MIN,
337 .db_timeout = CN6XXX_DB_TIMEOUT,
343 .max_oqs = CN6XXX_CFG_IO_QUEUES,
344 .info_ptr = OCTEON_OQ_INFOPTR_MODE,
345 .refill_threshold = CN6XXX_OQ_REFIL_THRESHOLD,
346 .oq_intr_pkt = CN6XXX_OQ_INTR_PKT,
347 .oq_intr_time = CN6XXX_OQ_INTR_TIME,
348 .pkts_per_intr = CN6XXX_OQ_PKTSPER_INTR,
352 .num_nic_ports = DEFAULT_NUM_NIC_PORTS_68XX_210NV,
353 .num_def_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
354 .num_def_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
355 .def_rx_buf_size = CN6XXX_OQ_BUF_SIZE,
358 /* Max Txqs: Half for each of the two ports :max_iq/2 */
359 .max_txqs = MAX_TXQS_PER_INTF,
361 /* Actual configured value. Range could be: 1...max_txqs */
362 .num_txqs = DEF_TXQS_PER_INTF,
364 /* Max Rxqs: Half for each of the two ports :max_oq/2 */
365 .max_rxqs = MAX_RXQS_PER_INTF,
367 /* Actual configured value. Range could be: 1...max_rxqs */
368 .num_rxqs = DEF_RXQS_PER_INTF,
370 /* Num of desc for rx rings */
371 .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
373 /* Num of desc for tx rings */
374 .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
376 /* SKB size, We need not change buf size even for Jumbo frames.
377 * Octeon can send jumbo frames in 4 consecutive descriptors,
379 .rx_buf_size = CN6XXX_OQ_BUF_SIZE,
381 .base_queue = BASE_QUEUE_NOT_REQUESTED,
387 /* Max Txqs: Half for each of the two ports :max_iq/2 */
388 .max_txqs = MAX_TXQS_PER_INTF,
390 /* Actual configured value. Range could be: 1...max_txqs */
391 .num_txqs = DEF_TXQS_PER_INTF,
393 /* Max Rxqs: Half for each of the two ports :max_oq/2 */
394 .max_rxqs = MAX_RXQS_PER_INTF,
396 /* Actual configured value. Range could be: 1...max_rxqs */
397 .num_rxqs = DEF_RXQS_PER_INTF,
399 /* Num of desc for rx rings */
400 .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
402 /* Num of desc for tx rings */
403 .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
405 /* SKB size, We need not change buf size even for Jumbo frames.
406 * Octeon can send jumbo frames in 4 consecutive descriptors,
408 .rx_buf_size = CN6XXX_OQ_BUF_SIZE,
410 .base_queue = BASE_QUEUE_NOT_REQUESTED,
415 /** Miscellaneous attributes */
417 /* Host driver link query interval */
418 .oct_link_query_interval = 100,
420 /* Octeon link query interval */
421 .host_link_query_interval = 500,
423 .enable_sli_oq_bp = 0,
425 /* Control queue group */
432 OCTEON_CONFIG_TYPE_DEFAULT = 0,
436 static struct octeon_config_ptr {
438 } oct_conf_info[MAX_OCTEON_DEVICES] = {
440 OCTEON_CONFIG_TYPE_DEFAULT,
442 OCTEON_CONFIG_TYPE_DEFAULT,
444 OCTEON_CONFIG_TYPE_DEFAULT,
446 OCTEON_CONFIG_TYPE_DEFAULT,
450 static char oct_dev_state_str[OCT_DEV_STATES + 1][32] = {
451 "BEGIN", "PCI-MAP-DONE", "DISPATCH-INIT-DONE",
452 "IQ-INIT-DONE", "SCBUFF-POOL-INIT-DONE", "RESPLIST-INIT-DONE",
453 "DROQ-INIT-DONE", "IO-QUEUES-INIT-DONE", "CONSOLE-INIT-DONE",
454 "HOST-READY", "CORE-READY", "RUNNING", "IN-RESET",
458 static char oct_dev_app_str[CVM_DRV_APP_COUNT + 1][32] = {
459 "BASE", "NIC", "UNKNOWN"};
461 static struct octeon_device *octeon_device[MAX_OCTEON_DEVICES];
462 static u32 octeon_device_count;
464 static struct octeon_core_setup core_setup[MAX_OCTEON_DEVICES];
466 void oct_set_config_info(int oct_id, int conf_type)
468 if (conf_type < 0 || conf_type > (NUM_OCTEON_CONFS - 1))
469 conf_type = OCTEON_CONFIG_TYPE_DEFAULT;
470 oct_conf_info[oct_id].conf_type = conf_type;
473 void octeon_init_device_list(int conf_type)
477 memset(octeon_device, 0, (sizeof(void *) * MAX_OCTEON_DEVICES));
478 for (i = 0; i < MAX_OCTEON_DEVICES; i++)
479 oct_set_config_info(i, conf_type);
482 static void *__retrieve_octeon_config_info(struct octeon_device *oct,
485 u32 oct_id = oct->octeon_id;
488 switch (oct_conf_info[oct_id].conf_type) {
489 case OCTEON_CONFIG_TYPE_DEFAULT:
490 if (oct->chip_id == OCTEON_CN66XX) {
491 ret = (void *)&default_cn66xx_conf;
492 } else if ((oct->chip_id == OCTEON_CN68XX) &&
493 (card_type == LIO_210NV)) {
494 ret = (void *)&default_cn68xx_210nv_conf;
495 } else if ((oct->chip_id == OCTEON_CN68XX) &&
496 (card_type == LIO_410NV)) {
497 ret = (void *)&default_cn68xx_conf;
506 static int __verify_octeon_config_info(struct octeon_device *oct, void *conf)
508 switch (oct->chip_id) {
511 return lio_validate_cn6xxx_config_info(oct, conf);
520 void *oct_get_config_info(struct octeon_device *oct, u16 card_type)
524 conf = __retrieve_octeon_config_info(oct, card_type);
528 if (__verify_octeon_config_info(oct, conf)) {
529 dev_err(&oct->pci_dev->dev, "Configuration verification failed\n");
536 char *lio_get_state_string(atomic_t *state_ptr)
538 s32 istate = (s32)atomic_read(state_ptr);
540 if (istate > OCT_DEV_STATES || istate < 0)
541 return oct_dev_state_str[OCT_DEV_STATE_INVALID];
542 return oct_dev_state_str[istate];
545 static char *get_oct_app_string(u32 app_mode)
547 if (app_mode <= CVM_DRV_APP_END)
548 return oct_dev_app_str[app_mode - CVM_DRV_APP_START];
549 return oct_dev_app_str[CVM_DRV_INVALID_APP - CVM_DRV_APP_START];
552 int octeon_download_firmware(struct octeon_device *oct, const u8 *data,
561 struct octeon_firmware_file_header *h;
564 if (size < sizeof(struct octeon_firmware_file_header)) {
565 dev_err(&oct->pci_dev->dev, "Firmware file too small (%d < %d).\n",
567 (u32)sizeof(struct octeon_firmware_file_header));
571 h = (struct octeon_firmware_file_header *)data;
573 if (h->magic != be32_to_cpu(LIO_NIC_MAGIC)) {
574 dev_err(&oct->pci_dev->dev, "Unrecognized firmware file.\n");
580 sizeof(struct octeon_firmware_file_header) -
582 if (crc32_result != be32_to_cpu(h->crc32)) {
583 dev_err(&oct->pci_dev->dev, "Firmware CRC mismatch (0x%08x != 0x%08x).\n",
584 crc32_result, be32_to_cpu(h->crc32));
588 if (memcmp(LIQUIDIO_VERSION, h->version, strlen(LIQUIDIO_VERSION))) {
589 dev_err(&oct->pci_dev->dev, "Unmatched firmware version. Expected %s, got %s.\n",
590 LIQUIDIO_VERSION, h->version);
594 if (be32_to_cpu(h->num_images) > LIO_MAX_IMAGES) {
595 dev_err(&oct->pci_dev->dev, "Too many images in firmware file (%d).\n",
596 be32_to_cpu(h->num_images));
600 dev_info(&oct->pci_dev->dev, "Firmware version: %s\n", h->version);
601 snprintf(oct->fw_info.liquidio_firmware_version, 32, "LIQUIDIO: %s",
604 buffer = kmalloc(size, GFP_KERNEL);
608 memcpy(buffer, data, size);
610 p = buffer + sizeof(struct octeon_firmware_file_header);
612 /* load all images */
613 for (i = 0; i < be32_to_cpu(h->num_images); i++) {
614 load_addr = be64_to_cpu(h->desc[i].addr);
615 image_len = be32_to_cpu(h->desc[i].len);
617 /* validate the image */
618 crc32_result = crc32(~0, p, image_len) ^ ~0U;
619 if (crc32_result != be32_to_cpu(h->desc[i].crc32)) {
620 dev_err(&oct->pci_dev->dev,
621 "Firmware CRC mismatch in image %d (0x%08x != 0x%08x).\n",
623 be32_to_cpu(h->desc[i].crc32));
625 goto done_downloading;
628 /* download the image */
629 octeon_pci_write_core_mem(oct, load_addr, p, image_len);
632 dev_dbg(&oct->pci_dev->dev,
633 "Downloaded image %d (%d bytes) to address 0x%016llx\n",
634 i, image_len, load_addr);
637 /* Invoke the bootcmd */
638 ret = octeon_console_send_cmd(oct, h->bootcmd, 50);
646 void octeon_free_device_mem(struct octeon_device *oct)
650 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) {
651 /* could check mask as well */
656 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) {
657 /* could check mask as well */
658 if (oct->instr_queue[i])
659 vfree(oct->instr_queue[i]);
665 octeon_device[i] = NULL;
666 octeon_device_count--;
669 static struct octeon_device *octeon_allocate_device_mem(u32 pci_id,
672 struct octeon_device *oct;
674 u32 octdevsize = 0, configsize = 0, size;
679 configsize = sizeof(struct octeon_cn6xxx);
683 pr_err("%s: Unknown PCI Device: 0x%x\n",
689 if (configsize & 0x7)
690 configsize += (8 - (configsize & 0x7));
692 octdevsize = sizeof(struct octeon_device);
693 if (octdevsize & 0x7)
694 octdevsize += (8 - (octdevsize & 0x7));
697 priv_size += (8 - (priv_size & 0x7));
699 size = octdevsize + priv_size + configsize +
700 (sizeof(struct octeon_dispatch) * DISPATCH_LIST_SIZE);
706 memset(buf, 0, size);
708 oct = (struct octeon_device *)buf;
709 oct->priv = (void *)(buf + octdevsize);
710 oct->chip = (void *)(buf + octdevsize + priv_size);
711 oct->dispatch.dlist = (struct octeon_dispatch *)
712 (buf + octdevsize + priv_size + configsize);
717 struct octeon_device *octeon_allocate_device(u32 pci_id,
721 struct octeon_device *oct = NULL;
723 for (oct_idx = 0; oct_idx < MAX_OCTEON_DEVICES; oct_idx++)
724 if (!octeon_device[oct_idx])
727 if (oct_idx == MAX_OCTEON_DEVICES)
730 oct = octeon_allocate_device_mem(pci_id, priv_size);
734 spin_lock_init(&oct->pci_win_lock);
735 spin_lock_init(&oct->mem_access_lock);
737 octeon_device_count++;
738 octeon_device[oct_idx] = oct;
740 oct->octeon_id = oct_idx;
741 snprintf((oct->device_name), sizeof(oct->device_name),
742 "LiquidIO%d", (oct->octeon_id));
747 int octeon_setup_instr_queues(struct octeon_device *oct)
752 /* this causes queue 0 to be default queue */
753 if (OCTEON_CN6XXX(oct)) {
756 CFG_GET_NUM_DEF_TX_DESCS(CHIP_FIELD(oct, cn6xxx, conf));
761 for (i = 0; i < num_iqs; i++) {
762 oct->instr_queue[i] =
763 vmalloc(sizeof(struct octeon_instr_queue));
764 if (!oct->instr_queue[i])
767 memset(oct->instr_queue[i], 0,
768 sizeof(struct octeon_instr_queue));
770 oct->instr_queue[i]->app_ctx = (void *)(size_t)i;
771 if (octeon_init_instr_queue(oct, i, num_descs))
780 int octeon_setup_output_queues(struct octeon_device *oct)
786 /* this causes queue 0 to be default queue */
787 if (OCTEON_CN6XXX(oct)) {
788 /* CFG_GET_OQ_MAX_BASE_Q(CHIP_FIELD(oct, cn6xxx, conf)); */
791 CFG_GET_NUM_DEF_RX_DESCS(CHIP_FIELD(oct, cn6xxx, conf));
793 CFG_GET_DEF_RX_BUF_SIZE(CHIP_FIELD(oct, cn6xxx, conf));
798 for (i = 0; i < num_oqs; i++) {
799 oct->droq[i] = vmalloc(sizeof(*oct->droq[i]));
803 memset(oct->droq[i], 0, sizeof(struct octeon_droq));
805 if (octeon_init_droq(oct, i, num_descs, desc_size, NULL))
814 void octeon_set_io_queues_off(struct octeon_device *oct)
816 /* Disable the i/p and o/p queues for this Octeon. */
818 octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, 0);
819 octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, 0);
822 void octeon_set_droq_pkt_op(struct octeon_device *oct,
828 /* Disable the i/p and o/p queues for this Octeon. */
829 reg_val = octeon_read_csr(oct, CN6XXX_SLI_PKT_OUT_ENB);
832 reg_val = reg_val | (1 << q_no);
834 reg_val = reg_val & (~(1 << q_no));
836 octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, reg_val);
839 int octeon_init_dispatch_list(struct octeon_device *oct)
843 oct->dispatch.count = 0;
845 for (i = 0; i < DISPATCH_LIST_SIZE; i++) {
846 oct->dispatch.dlist[i].opcode = 0;
847 INIT_LIST_HEAD(&oct->dispatch.dlist[i].list);
850 for (i = 0; i <= REQTYPE_LAST; i++)
851 octeon_register_reqtype_free_fn(oct, i, NULL);
853 spin_lock_init(&oct->dispatch.lock);
858 void octeon_delete_dispatch_list(struct octeon_device *oct)
861 struct list_head freelist, *temp, *tmp2;
863 INIT_LIST_HEAD(&freelist);
865 spin_lock_bh(&oct->dispatch.lock);
867 for (i = 0; i < DISPATCH_LIST_SIZE; i++) {
868 struct list_head *dispatch;
870 dispatch = &oct->dispatch.dlist[i].list;
871 while (dispatch->next != dispatch) {
872 temp = dispatch->next;
874 list_add_tail(temp, &freelist);
877 oct->dispatch.dlist[i].opcode = 0;
880 oct->dispatch.count = 0;
882 spin_unlock_bh(&oct->dispatch.lock);
884 list_for_each_safe(temp, tmp2, &freelist) {
891 octeon_get_dispatch(struct octeon_device *octeon_dev, u16 opcode,
895 struct list_head *dispatch;
896 octeon_dispatch_fn_t fn = NULL;
897 u16 combined_opcode = OPCODE_SUBCODE(opcode, subcode);
899 idx = combined_opcode & OCTEON_OPCODE_MASK;
901 spin_lock_bh(&octeon_dev->dispatch.lock);
903 if (octeon_dev->dispatch.count == 0) {
904 spin_unlock_bh(&octeon_dev->dispatch.lock);
908 if (!(octeon_dev->dispatch.dlist[idx].opcode)) {
909 spin_unlock_bh(&octeon_dev->dispatch.lock);
913 if (octeon_dev->dispatch.dlist[idx].opcode == combined_opcode) {
914 fn = octeon_dev->dispatch.dlist[idx].dispatch_fn;
916 list_for_each(dispatch,
917 &octeon_dev->dispatch.dlist[idx].list) {
918 if (((struct octeon_dispatch *)dispatch)->opcode ==
920 fn = ((struct octeon_dispatch *)
921 dispatch)->dispatch_fn;
927 spin_unlock_bh(&octeon_dev->dispatch.lock);
931 /* octeon_register_dispatch_fn
933 * octeon_id - id of the octeon device.
934 * opcode - opcode for which driver should call the registered function
935 * subcode - subcode for which driver should call the registered function
936 * fn - The function to call when a packet with "opcode" arrives in
937 * octeon output queues.
938 * fn_arg - The argument to be passed when calling function "fn".
940 * Registers a function and its argument to be called when a packet
941 * arrives in Octeon output queues with "opcode".
949 octeon_register_dispatch_fn(struct octeon_device *oct,
952 octeon_dispatch_fn_t fn, void *fn_arg)
955 octeon_dispatch_fn_t pfn;
956 u16 combined_opcode = OPCODE_SUBCODE(opcode, subcode);
958 idx = combined_opcode & OCTEON_OPCODE_MASK;
960 spin_lock_bh(&oct->dispatch.lock);
961 /* Add dispatch function to first level of lookup table */
962 if (oct->dispatch.dlist[idx].opcode == 0) {
963 oct->dispatch.dlist[idx].opcode = combined_opcode;
964 oct->dispatch.dlist[idx].dispatch_fn = fn;
965 oct->dispatch.dlist[idx].arg = fn_arg;
966 oct->dispatch.count++;
967 spin_unlock_bh(&oct->dispatch.lock);
971 spin_unlock_bh(&oct->dispatch.lock);
973 /* Check if there was a function already registered for this
976 pfn = octeon_get_dispatch(oct, opcode, subcode);
978 struct octeon_dispatch *dispatch;
980 dev_dbg(&oct->pci_dev->dev,
981 "Adding opcode to dispatch list linked list\n");
982 dispatch = (struct octeon_dispatch *)
983 vmalloc(sizeof(struct octeon_dispatch));
985 dev_err(&oct->pci_dev->dev,
986 "No memory to add dispatch function\n");
989 dispatch->opcode = combined_opcode;
990 dispatch->dispatch_fn = fn;
991 dispatch->arg = fn_arg;
993 /* Add dispatch function to linked list of fn ptrs
994 * at the hashed index.
996 spin_lock_bh(&oct->dispatch.lock);
997 list_add(&dispatch->list, &oct->dispatch.dlist[idx].list);
998 oct->dispatch.count++;
999 spin_unlock_bh(&oct->dispatch.lock);
1002 dev_err(&oct->pci_dev->dev,
1003 "Found previously registered dispatch fn for opcode/subcode: %x/%x\n",
1011 /* octeon_unregister_dispatch_fn
1013 * oct - octeon device
1014 * opcode - driver should unregister the function for this opcode
1015 * subcode - driver should unregister the function for this subcode
1017 * Unregister the function set for this opcode+subcode.
1022 * No locks are held.
1025 octeon_unregister_dispatch_fn(struct octeon_device *oct, u16 opcode,
1030 struct list_head *dispatch, *dfree = NULL, *tmp2;
1031 u16 combined_opcode = OPCODE_SUBCODE(opcode, subcode);
1033 idx = combined_opcode & OCTEON_OPCODE_MASK;
1035 spin_lock_bh(&oct->dispatch.lock);
1037 if (oct->dispatch.count == 0) {
1038 spin_unlock_bh(&oct->dispatch.lock);
1039 dev_err(&oct->pci_dev->dev,
1040 "No dispatch functions registered for this device\n");
1044 if (oct->dispatch.dlist[idx].opcode == combined_opcode) {
1045 dispatch = &oct->dispatch.dlist[idx].list;
1046 if (dispatch->next != dispatch) {
1047 dispatch = dispatch->next;
1048 oct->dispatch.dlist[idx].opcode =
1049 ((struct octeon_dispatch *)dispatch)->opcode;
1050 oct->dispatch.dlist[idx].dispatch_fn =
1051 ((struct octeon_dispatch *)
1052 dispatch)->dispatch_fn;
1053 oct->dispatch.dlist[idx].arg =
1054 ((struct octeon_dispatch *)dispatch)->arg;
1058 oct->dispatch.dlist[idx].opcode = 0;
1059 oct->dispatch.dlist[idx].dispatch_fn = NULL;
1060 oct->dispatch.dlist[idx].arg = NULL;
1064 list_for_each_safe(dispatch, tmp2,
1065 &(oct->dispatch.dlist[idx].
1067 if (((struct octeon_dispatch *)dispatch)->opcode ==
1077 oct->dispatch.count--;
1079 spin_unlock_bh(&oct->dispatch.lock);
1087 int octeon_core_drv_init(struct octeon_recv_info *recv_info, void *buf)
1091 struct octeon_device *oct = (struct octeon_device *)buf;
1092 struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
1093 struct octeon_core_setup *cs = NULL;
1094 u32 num_nic_ports = 0;
1096 if (OCTEON_CN6XXX(oct))
1098 CFG_GET_NUM_NIC_PORTS(CHIP_FIELD(oct, cn6xxx, conf));
1100 if (atomic_read(&oct->status) >= OCT_DEV_RUNNING) {
1101 dev_err(&oct->pci_dev->dev, "Received CORE OK when device state is 0x%x\n",
1102 atomic_read(&oct->status));
1103 goto core_drv_init_err;
1108 (u32)recv_pkt->rh.r_core_drv_init.app_mode),
1109 sizeof(app_name) - 1);
1110 oct->app_mode = (u32)recv_pkt->rh.r_core_drv_init.app_mode;
1111 if (recv_pkt->rh.r_core_drv_init.app_mode == CVM_DRV_NIC_APP)
1112 oct->fw_info.max_nic_ports =
1113 (u32)recv_pkt->rh.r_core_drv_init.max_nic_ports;
1114 oct->fw_info.num_gmx_ports =
1115 (u32)recv_pkt->rh.r_core_drv_init.num_gmx_ports;
1117 if (oct->fw_info.max_nic_ports < num_nic_ports) {
1118 dev_err(&oct->pci_dev->dev,
1119 "Config has more ports than firmware allows (%d > %d).\n",
1120 num_nic_ports, oct->fw_info.max_nic_ports);
1121 goto core_drv_init_err;
1123 oct->fw_info.app_cap_flags = recv_pkt->rh.r_core_drv_init.app_cap_flags;
1124 oct->fw_info.app_mode = (u32)recv_pkt->rh.r_core_drv_init.app_mode;
1126 atomic_set(&oct->status, OCT_DEV_CORE_OK);
1128 cs = &core_setup[oct->octeon_id];
1130 if (recv_pkt->buffer_size[0] != sizeof(*cs)) {
1131 dev_dbg(&oct->pci_dev->dev, "Core setup bytes expected %u found %d\n",
1133 recv_pkt->buffer_size[0]);
1136 memcpy(cs, get_rbd(recv_pkt->buffer_ptr[0]), sizeof(*cs));
1137 strncpy(oct->boardinfo.name, cs->boardname, OCT_BOARD_NAME);
1138 strncpy(oct->boardinfo.serial_number, cs->board_serial_number,
1141 octeon_swap_8B_data((u64 *)cs, (sizeof(*cs) >> 3));
1143 oct->boardinfo.major = cs->board_rev_major;
1144 oct->boardinfo.minor = cs->board_rev_minor;
1146 dev_info(&oct->pci_dev->dev,
1147 "Running %s (%llu Hz)\n",
1148 app_name, CVM_CAST64(cs->corefreq));
1151 for (i = 0; i < recv_pkt->buffer_count; i++)
1152 recv_buffer_free(recv_pkt->buffer_ptr[i]);
1153 octeon_free_recv_info(recv_info);
1157 int octeon_get_tx_qsize(struct octeon_device *oct, u32 q_no)
1160 if (oct && (q_no < MAX_OCTEON_INSTR_QUEUES) &&
1161 (oct->io_qmask.iq & (1UL << q_no)))
1162 return oct->instr_queue[q_no]->max_count;
1167 int octeon_get_rx_qsize(struct octeon_device *oct, u32 q_no)
1169 if (oct && (q_no < MAX_OCTEON_OUTPUT_QUEUES) &&
1170 (oct->io_qmask.oq & (1UL << q_no)))
1171 return oct->droq[q_no]->max_count;
1175 /* Retruns the host firmware handshake OCTEON specific configuration */
1176 struct octeon_config *octeon_get_conf(struct octeon_device *oct)
1178 struct octeon_config *default_oct_conf = NULL;
1180 /* check the OCTEON Device model & return the corresponding octeon
1184 if (OCTEON_CN6XXX(oct)) {
1186 (struct octeon_config *)(CHIP_FIELD(oct, cn6xxx, conf));
1189 return default_oct_conf;
1192 /* scratch register address is same in all the OCT-II and CN70XX models */
1193 #define CNXX_SLI_SCRATCH1 0x3C0
1195 /** Get the octeon device pointer.
1196 * @param octeon_id - The id for which the octeon device pointer is required.
1197 * @return Success: Octeon device pointer.
1198 * @return Failure: NULL.
1200 struct octeon_device *lio_get_device(u32 octeon_id)
1202 if (octeon_id >= MAX_OCTEON_DEVICES)
1205 return octeon_device[octeon_id];
1208 u64 lio_pci_readq(struct octeon_device *oct, u64 addr)
1211 unsigned long flags;
1214 spin_lock_irqsave(&oct->pci_win_lock, flags);
1216 /* The windowed read happens when the LSB of the addr is written.
1217 * So write MSB first
1219 addrhi = (addr >> 32);
1220 if ((oct->chip_id == OCTEON_CN66XX) || (oct->chip_id == OCTEON_CN68XX))
1221 addrhi |= 0x00060000;
1222 writel(addrhi, oct->reg_list.pci_win_rd_addr_hi);
1224 /* Read back to preserve ordering of writes */
1225 val32 = readl(oct->reg_list.pci_win_rd_addr_hi);
1227 writel(addr & 0xffffffff, oct->reg_list.pci_win_rd_addr_lo);
1228 val32 = readl(oct->reg_list.pci_win_rd_addr_lo);
1230 val64 = readq(oct->reg_list.pci_win_rd_data);
1232 spin_unlock_irqrestore(&oct->pci_win_lock, flags);
1237 void lio_pci_writeq(struct octeon_device *oct,
1242 unsigned long flags;
1244 spin_lock_irqsave(&oct->pci_win_lock, flags);
1246 writeq(addr, oct->reg_list.pci_win_wr_addr);
1248 /* The write happens when the LSB is written. So write MSB first. */
1249 writel(val >> 32, oct->reg_list.pci_win_wr_data_hi);
1250 /* Read the MSB to ensure ordering of writes. */
1251 val32 = readl(oct->reg_list.pci_win_wr_data_hi);
1253 writel(val & 0xffffffff, oct->reg_list.pci_win_wr_data_lo);
1255 spin_unlock_irqrestore(&oct->pci_win_lock, flags);
1258 int octeon_mem_access_ok(struct octeon_device *oct)
1260 u64 access_okay = 0;
1262 /* Check to make sure a DDR interface is enabled */
1263 u64 lmc0_reset_ctl = lio_pci_readq(oct, CN6XXX_LMC0_RESET_CTL);
1265 access_okay = (lmc0_reset_ctl & CN6XXX_LMC0_RESET_CTL_DDR3RST_MASK);
1267 return access_okay ? 0 : 1;
1270 int octeon_wait_for_ddr_init(struct octeon_device *oct, u32 *timeout)
1278 while (*timeout == 0)
1279 schedule_timeout_uninterruptible(HZ / 10);
1281 for (ms = 0; (ret != 0) && ((*timeout == 0) || (ms <= *timeout));
1283 ret = octeon_mem_access_ok(oct);
1287 schedule_timeout_uninterruptible(HZ / 10);
1293 /** Get the octeon id assigned to the octeon device passed as argument.
1294 * This function is exported to other modules.
1295 * @param dev - octeon device pointer passed as a void *.
1296 * @return octeon device id
1298 int lio_get_device_id(void *dev)
1300 struct octeon_device *octeon_dev = (struct octeon_device *)dev;
1303 for (i = 0; i < MAX_OCTEON_DEVICES; i++)
1304 if (octeon_device[i] == octeon_dev)
1305 return octeon_dev->octeon_id;