1 /****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2008-2009 Solarflare Communications Inc.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
10 #include <linux/delay.h>
11 #include "net_driver.h"
15 #include "mcdi_pcol.h"
18 /**************************************************************************
20 * Management-Controller-to-Driver Interface
22 **************************************************************************
25 /* Software-defined structure to the shared-memory */
26 #define CMD_NOTIFY_PORT0 0
27 #define CMD_NOTIFY_PORT1 4
28 #define CMD_PDU_PORT0 0x008
29 #define CMD_PDU_PORT1 0x108
30 #define REBOOT_FLAG_PORT0 0x3f8
31 #define REBOOT_FLAG_PORT1 0x3fc
33 #define MCDI_RPC_TIMEOUT 10 /*seconds */
35 #define MCDI_PDU(efx) \
36 (efx_port_num(efx) ? CMD_PDU_PORT1 : CMD_PDU_PORT0)
37 #define MCDI_DOORBELL(efx) \
38 (efx_port_num(efx) ? CMD_NOTIFY_PORT1 : CMD_NOTIFY_PORT0)
39 #define MCDI_REBOOT_FLAG(efx) \
40 (efx_port_num(efx) ? REBOOT_FLAG_PORT1 : REBOOT_FLAG_PORT0)
43 EFX_MASK32(EFX_WIDTH(MCDI_HEADER_SEQ))
45 static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx)
47 struct siena_nic_data *nic_data;
48 EFX_BUG_ON_PARANOID(efx_nic_rev(efx) < EFX_REV_SIENA_A0);
49 nic_data = efx->nic_data;
50 return &nic_data->mcdi;
53 void efx_mcdi_init(struct efx_nic *efx)
55 struct efx_mcdi_iface *mcdi;
57 if (efx_nic_rev(efx) < EFX_REV_SIENA_A0)
61 init_waitqueue_head(&mcdi->wq);
62 spin_lock_init(&mcdi->iface_lock);
63 atomic_set(&mcdi->state, MCDI_STATE_QUIESCENT);
64 mcdi->mode = MCDI_MODE_POLL;
66 (void) efx_mcdi_poll_reboot(efx);
69 static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd,
70 const u8 *inbuf, size_t inlen)
72 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
73 unsigned pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
74 unsigned doorbell = FR_CZ_MC_TREG_SMEM + MCDI_DOORBELL(efx);
79 BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT);
80 BUG_ON(inlen & 3 || inlen >= 0x100);
82 seqno = mcdi->seqno & SEQ_MASK;
84 if (mcdi->mode == MCDI_MODE_EVENTS)
85 xflags |= MCDI_HEADER_XFLAGS_EVREQ;
87 EFX_POPULATE_DWORD_6(hdr,
88 MCDI_HEADER_RESPONSE, 0,
89 MCDI_HEADER_RESYNC, 1,
90 MCDI_HEADER_CODE, cmd,
91 MCDI_HEADER_DATALEN, inlen,
92 MCDI_HEADER_SEQ, seqno,
93 MCDI_HEADER_XFLAGS, xflags);
95 efx_writed(efx, &hdr, pdu);
97 for (i = 0; i < inlen; i += 4)
98 _efx_writed(efx, *((__le32 *)(inbuf + i)), pdu + 4 + i);
100 /* Ensure the payload is written out before the header */
103 /* ring the doorbell with a distinctive value */
104 _efx_writed(efx, (__force __le32) 0x45789abc, doorbell);
107 static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen)
109 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
110 unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
113 BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT);
114 BUG_ON(outlen & 3 || outlen >= 0x100);
116 for (i = 0; i < outlen; i += 4)
117 *((__le32 *)(outbuf + i)) = _efx_readd(efx, pdu + 4 + i);
120 static int efx_mcdi_poll(struct efx_nic *efx)
122 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
123 unsigned int time, finish;
124 unsigned int respseq, respcmd, error;
125 unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
126 unsigned int rc, spins;
129 /* Check for a reboot atomically with respect to efx_mcdi_copyout() */
130 rc = efx_mcdi_poll_reboot(efx);
134 /* Poll for completion. Poll quickly (once a us) for the 1st jiffy,
135 * because generally mcdi responses are fast. After that, back off
136 * and poll once a jiffy (approximately)
139 finish = get_seconds() + MCDI_RPC_TIMEOUT;
148 time = get_seconds();
151 efx_readd(efx, ®, pdu);
153 /* All 1's indicates that shared memory is in reset (and is
154 * not a valid header). Wait for it to come out reset before
155 * completing the command */
156 if (EFX_DWORD_FIELD(reg, EFX_DWORD_0) != 0xffffffff &&
157 EFX_DWORD_FIELD(reg, MCDI_HEADER_RESPONSE))
164 mcdi->resplen = EFX_DWORD_FIELD(reg, MCDI_HEADER_DATALEN);
165 respseq = EFX_DWORD_FIELD(reg, MCDI_HEADER_SEQ);
166 respcmd = EFX_DWORD_FIELD(reg, MCDI_HEADER_CODE);
167 error = EFX_DWORD_FIELD(reg, MCDI_HEADER_ERROR);
169 if (error && mcdi->resplen == 0) {
170 EFX_ERR(efx, "MC rebooted\n");
172 } else if ((respseq ^ mcdi->seqno) & SEQ_MASK) {
173 EFX_ERR(efx, "MC response mismatch tx seq 0x%x rx seq 0x%x\n",
174 respseq, mcdi->seqno);
177 efx_readd(efx, ®, pdu + 4);
178 switch (EFX_DWORD_FIELD(reg, EFX_DWORD_0)) {
179 #define TRANSLATE_ERROR(name) \
180 case MC_CMD_ERR_ ## name: \
183 TRANSLATE_ERROR(ENOENT);
184 TRANSLATE_ERROR(EINTR);
185 TRANSLATE_ERROR(EACCES);
186 TRANSLATE_ERROR(EBUSY);
187 TRANSLATE_ERROR(EINVAL);
188 TRANSLATE_ERROR(EDEADLK);
189 TRANSLATE_ERROR(ENOSYS);
190 TRANSLATE_ERROR(ETIME);
191 #undef TRANSLATE_ERROR
204 /* Return rc=0 like wait_event_timeout() */
208 /* Test and clear MC-rebooted flag for this port/function */
209 int efx_mcdi_poll_reboot(struct efx_nic *efx)
211 unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_REBOOT_FLAG(efx);
215 if (efx_nic_rev(efx) < EFX_REV_SIENA_A0)
218 efx_readd(efx, ®, addr);
219 value = EFX_DWORD_FIELD(reg, EFX_DWORD_0);
225 efx_writed(efx, ®, addr);
227 if (value == MC_STATUS_DWORD_ASSERT)
233 static void efx_mcdi_acquire(struct efx_mcdi_iface *mcdi)
235 /* Wait until the interface becomes QUIESCENT and we win the race
236 * to mark it RUNNING. */
238 atomic_cmpxchg(&mcdi->state,
239 MCDI_STATE_QUIESCENT,
241 == MCDI_STATE_QUIESCENT);
244 static int efx_mcdi_await_completion(struct efx_nic *efx)
246 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
248 if (wait_event_timeout(
250 atomic_read(&mcdi->state) == MCDI_STATE_COMPLETED,
251 msecs_to_jiffies(MCDI_RPC_TIMEOUT * 1000)) == 0)
254 /* Check if efx_mcdi_set_mode() switched us back to polled completions.
255 * In which case, poll for completions directly. If efx_mcdi_ev_cpl()
256 * completed the request first, then we'll just end up completing the
257 * request again, which is safe.
259 * We need an smp_rmb() to synchronise with efx_mcdi_mode_poll(), which
260 * wait_event_timeout() implicitly provides.
262 if (mcdi->mode == MCDI_MODE_POLL)
263 return efx_mcdi_poll(efx);
268 static bool efx_mcdi_complete(struct efx_mcdi_iface *mcdi)
270 /* If the interface is RUNNING, then move to COMPLETED and wake any
271 * waiters. If the interface isn't in RUNNING then we've received a
272 * duplicate completion after we've already transitioned back to
273 * QUIESCENT. [A subsequent invocation would increment seqno, so would
274 * have failed the seqno check].
276 if (atomic_cmpxchg(&mcdi->state,
278 MCDI_STATE_COMPLETED) == MCDI_STATE_RUNNING) {
286 static void efx_mcdi_release(struct efx_mcdi_iface *mcdi)
288 atomic_set(&mcdi->state, MCDI_STATE_QUIESCENT);
292 static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno,
293 unsigned int datalen, unsigned int errno)
295 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
298 spin_lock(&mcdi->iface_lock);
300 if ((seqno ^ mcdi->seqno) & SEQ_MASK) {
302 /* The request has been cancelled */
305 EFX_ERR(efx, "MC response mismatch tx seq 0x%x rx "
306 "seq 0x%x\n", seqno, mcdi->seqno);
308 mcdi->resprc = errno;
309 mcdi->resplen = datalen;
314 spin_unlock(&mcdi->iface_lock);
317 efx_mcdi_complete(mcdi);
320 /* Issue the given command by writing the data into the shared memory PDU,
321 * ring the doorbell and wait for completion. Copyout the result. */
322 int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
323 const u8 *inbuf, size_t inlen, u8 *outbuf, size_t outlen,
324 size_t *outlen_actual)
326 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
328 BUG_ON(efx_nic_rev(efx) < EFX_REV_SIENA_A0);
330 efx_mcdi_acquire(mcdi);
332 /* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */
333 spin_lock_bh(&mcdi->iface_lock);
335 spin_unlock_bh(&mcdi->iface_lock);
337 efx_mcdi_copyin(efx, cmd, inbuf, inlen);
339 if (mcdi->mode == MCDI_MODE_POLL)
340 rc = efx_mcdi_poll(efx);
342 rc = efx_mcdi_await_completion(efx);
345 /* Close the race with efx_mcdi_ev_cpl() executing just too late
346 * and completing a request we've just cancelled, by ensuring
347 * that the seqno check therein fails.
349 spin_lock_bh(&mcdi->iface_lock);
352 spin_unlock_bh(&mcdi->iface_lock);
354 EFX_ERR(efx, "MC command 0x%x inlen %d mode %d timed out\n",
355 cmd, (int)inlen, mcdi->mode);
359 /* At the very least we need a memory barrier here to ensure
360 * we pick up changes from efx_mcdi_ev_cpl(). Protect against
361 * a spurious efx_mcdi_ev_cpl() running concurrently by
362 * acquiring the iface_lock. */
363 spin_lock_bh(&mcdi->iface_lock);
365 resplen = mcdi->resplen;
366 spin_unlock_bh(&mcdi->iface_lock);
369 efx_mcdi_copyout(efx, outbuf,
370 min(outlen, mcdi->resplen + 3) & ~0x3);
371 if (outlen_actual != NULL)
372 *outlen_actual = resplen;
373 } else if (cmd == MC_CMD_REBOOT && rc == -EIO)
374 ; /* Don't reset if MC_CMD_REBOOT returns EIO */
375 else if (rc == -EIO || rc == -EINTR) {
376 EFX_ERR(efx, "MC fatal error %d\n", -rc);
377 efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
379 EFX_ERR(efx, "MC command 0x%x inlen %d failed rc=%d\n",
380 cmd, (int)inlen, -rc);
383 efx_mcdi_release(mcdi);
387 void efx_mcdi_mode_poll(struct efx_nic *efx)
389 struct efx_mcdi_iface *mcdi;
391 if (efx_nic_rev(efx) < EFX_REV_SIENA_A0)
394 mcdi = efx_mcdi(efx);
395 if (mcdi->mode == MCDI_MODE_POLL)
398 /* We can switch from event completion to polled completion, because
399 * mcdi requests are always completed in shared memory. We do this by
400 * switching the mode to POLL'd then completing the request.
401 * efx_mcdi_await_completion() will then call efx_mcdi_poll().
403 * We need an smp_wmb() to synchronise with efx_mcdi_await_completion(),
404 * which efx_mcdi_complete() provides for us.
406 mcdi->mode = MCDI_MODE_POLL;
408 efx_mcdi_complete(mcdi);
411 void efx_mcdi_mode_event(struct efx_nic *efx)
413 struct efx_mcdi_iface *mcdi;
415 if (efx_nic_rev(efx) < EFX_REV_SIENA_A0)
418 mcdi = efx_mcdi(efx);
420 if (mcdi->mode == MCDI_MODE_EVENTS)
423 /* We can't switch from polled to event completion in the middle of a
424 * request, because the completion method is specified in the request.
425 * So acquire the interface to serialise the requestors. We don't need
426 * to acquire the iface_lock to change the mode here, but we do need a
427 * write memory barrier ensure that efx_mcdi_rpc() sees it, which
428 * efx_mcdi_acquire() provides.
430 efx_mcdi_acquire(mcdi);
431 mcdi->mode = MCDI_MODE_EVENTS;
432 efx_mcdi_release(mcdi);
435 static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
437 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
439 /* If there is an outstanding MCDI request, it has been terminated
440 * either by a BADASSERT or REBOOT event. If the mcdi interface is
441 * in polled mode, then do nothing because the MC reboot handler will
442 * set the header correctly. However, if the mcdi interface is waiting
443 * for a CMDDONE event it won't receive it [and since all MCDI events
444 * are sent to the same queue, we can't be racing with
447 * There's a race here with efx_mcdi_rpc(), because we might receive
448 * a REBOOT event *before* the request has been copied out. In polled
449 * mode (during startup) this is irrelevent, because efx_mcdi_complete()
450 * is ignored. In event mode, this condition is just an edge-case of
451 * receiving a REBOOT event after posting the MCDI request. Did the mc
452 * reboot before or after the copyout? The best we can do always is
453 * just return failure.
455 spin_lock(&mcdi->iface_lock);
456 if (efx_mcdi_complete(mcdi)) {
457 if (mcdi->mode == MCDI_MODE_EVENTS) {
462 /* Nobody was waiting for an MCDI request, so trigger a reset */
463 efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
465 spin_unlock(&mcdi->iface_lock);
468 static unsigned int efx_mcdi_event_link_speed[] = {
469 [MCDI_EVENT_LINKCHANGE_SPEED_100M] = 100,
470 [MCDI_EVENT_LINKCHANGE_SPEED_1G] = 1000,
471 [MCDI_EVENT_LINKCHANGE_SPEED_10G] = 10000,
475 static void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev)
477 u32 flags, fcntl, speed, lpa;
479 speed = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_SPEED);
480 EFX_BUG_ON_PARANOID(speed >= ARRAY_SIZE(efx_mcdi_event_link_speed));
481 speed = efx_mcdi_event_link_speed[speed];
483 flags = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LINK_FLAGS);
484 fcntl = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_FCNTL);
485 lpa = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LP_CAP);
487 /* efx->link_state is only modified by efx_mcdi_phy_get_link(),
488 * which is only run after flushing the event queues. Therefore, it
489 * is safe to modify the link state outside of the mac_lock here.
491 efx_mcdi_phy_decode_link(efx, &efx->link_state, speed, flags, fcntl);
493 efx_mcdi_phy_check_fcntl(efx, lpa);
495 efx_link_status_changed(efx);
498 static const char *sensor_names[] = {
499 [MC_CMD_SENSOR_CONTROLLER_TEMP] = "Controller temp. sensor",
500 [MC_CMD_SENSOR_PHY_COMMON_TEMP] = "PHY shared temp. sensor",
501 [MC_CMD_SENSOR_CONTROLLER_COOLING] = "Controller cooling",
502 [MC_CMD_SENSOR_PHY0_TEMP] = "PHY 0 temp. sensor",
503 [MC_CMD_SENSOR_PHY0_COOLING] = "PHY 0 cooling",
504 [MC_CMD_SENSOR_PHY1_TEMP] = "PHY 1 temp. sensor",
505 [MC_CMD_SENSOR_PHY1_COOLING] = "PHY 1 cooling",
506 [MC_CMD_SENSOR_IN_1V0] = "1.0V supply sensor",
507 [MC_CMD_SENSOR_IN_1V2] = "1.2V supply sensor",
508 [MC_CMD_SENSOR_IN_1V8] = "1.8V supply sensor",
509 [MC_CMD_SENSOR_IN_2V5] = "2.5V supply sensor",
510 [MC_CMD_SENSOR_IN_3V3] = "3.3V supply sensor",
511 [MC_CMD_SENSOR_IN_12V0] = "12V supply sensor"
514 static const char *sensor_status_names[] = {
515 [MC_CMD_SENSOR_STATE_OK] = "OK",
516 [MC_CMD_SENSOR_STATE_WARNING] = "Warning",
517 [MC_CMD_SENSOR_STATE_FATAL] = "Fatal",
518 [MC_CMD_SENSOR_STATE_BROKEN] = "Device failure",
521 static void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev)
523 unsigned int monitor, state, value;
524 const char *name, *state_txt;
525 monitor = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_MONITOR);
526 state = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_STATE);
527 value = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_VALUE);
528 /* Deal gracefully with the board having more drivers than we
529 * know about, but do not expect new sensor states. */
530 name = (monitor >= ARRAY_SIZE(sensor_names))
531 ? "No sensor name available" :
532 sensor_names[monitor];
533 EFX_BUG_ON_PARANOID(state >= ARRAY_SIZE(sensor_status_names));
534 state_txt = sensor_status_names[state];
536 EFX_ERR(efx, "Sensor %d (%s) reports condition '%s' for raw value %d\n",
537 monitor, name, state_txt, value);
540 /* Called from falcon_process_eventq for MCDI events */
541 void efx_mcdi_process_event(struct efx_channel *channel,
544 struct efx_nic *efx = channel->efx;
545 int code = EFX_QWORD_FIELD(*event, MCDI_EVENT_CODE);
546 u32 data = EFX_QWORD_FIELD(*event, MCDI_EVENT_DATA);
549 case MCDI_EVENT_CODE_BADSSERT:
550 EFX_ERR(efx, "MC watchdog or assertion failure at 0x%x\n", data);
551 efx_mcdi_ev_death(efx, EINTR);
554 case MCDI_EVENT_CODE_PMNOTICE:
555 EFX_INFO(efx, "MCDI PM event.\n");
558 case MCDI_EVENT_CODE_CMDDONE:
560 MCDI_EVENT_FIELD(*event, CMDDONE_SEQ),
561 MCDI_EVENT_FIELD(*event, CMDDONE_DATALEN),
562 MCDI_EVENT_FIELD(*event, CMDDONE_ERRNO));
565 case MCDI_EVENT_CODE_LINKCHANGE:
566 efx_mcdi_process_link_change(efx, event);
568 case MCDI_EVENT_CODE_SENSOREVT:
569 efx_mcdi_sensor_event(efx, event);
571 case MCDI_EVENT_CODE_SCHEDERR:
572 EFX_INFO(efx, "MC Scheduler error address=0x%x\n", data);
574 case MCDI_EVENT_CODE_REBOOT:
575 EFX_INFO(efx, "MC Reboot\n");
576 efx_mcdi_ev_death(efx, EIO);
578 case MCDI_EVENT_CODE_MAC_STATS_DMA:
579 /* MAC stats are gather lazily. We can ignore this. */
583 EFX_ERR(efx, "Unknown MCDI event 0x%x\n", code);
587 /**************************************************************************
589 * Specific request functions
591 **************************************************************************
594 int efx_mcdi_fwver(struct efx_nic *efx, u64 *version, u32 *build)
596 u8 outbuf[ALIGN(MC_CMD_GET_VERSION_V1_OUT_LEN, 4)];
598 const __le16 *ver_words;
601 BUILD_BUG_ON(MC_CMD_GET_VERSION_IN_LEN != 0);
603 rc = efx_mcdi_rpc(efx, MC_CMD_GET_VERSION, NULL, 0,
604 outbuf, sizeof(outbuf), &outlength);
608 if (outlength == MC_CMD_GET_VERSION_V0_OUT_LEN) {
610 *build = MCDI_DWORD(outbuf, GET_VERSION_OUT_FIRMWARE);
614 if (outlength < MC_CMD_GET_VERSION_V1_OUT_LEN) {
619 ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION);
620 *version = (((u64)le16_to_cpu(ver_words[0]) << 48) |
621 ((u64)le16_to_cpu(ver_words[1]) << 32) |
622 ((u64)le16_to_cpu(ver_words[2]) << 16) |
623 le16_to_cpu(ver_words[3]));
624 *build = MCDI_DWORD(outbuf, GET_VERSION_OUT_FIRMWARE);
629 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
633 int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
636 u8 inbuf[MC_CMD_DRV_ATTACH_IN_LEN];
637 u8 outbuf[MC_CMD_DRV_ATTACH_OUT_LEN];
641 MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_NEW_STATE,
642 driver_operating ? 1 : 0);
643 MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_UPDATE, 1);
645 rc = efx_mcdi_rpc(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf),
646 outbuf, sizeof(outbuf), &outlen);
649 if (outlen < MC_CMD_DRV_ATTACH_OUT_LEN)
652 if (was_attached != NULL)
653 *was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE);
657 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
661 int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
662 u16 *fw_subtype_list)
664 uint8_t outbuf[MC_CMD_GET_BOARD_CFG_OUT_LEN];
666 int port_num = efx_port_num(efx);
670 BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_IN_LEN != 0);
672 rc = efx_mcdi_rpc(efx, MC_CMD_GET_BOARD_CFG, NULL, 0,
673 outbuf, sizeof(outbuf), &outlen);
677 if (outlen < MC_CMD_GET_BOARD_CFG_OUT_LEN) {
683 ? MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST
684 : MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST;
686 memcpy(mac_address, outbuf + offset, ETH_ALEN);
688 memcpy(fw_subtype_list,
689 outbuf + MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST,
690 MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN);
695 EFX_ERR(efx, "%s: failed rc=%d len=%d\n", __func__, rc, (int)outlen);
700 int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq)
702 u8 inbuf[MC_CMD_LOG_CTRL_IN_LEN];
707 dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_UART;
709 dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ;
711 MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST, dest);
712 MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST_EVQ, dest_evq);
714 BUILD_BUG_ON(MC_CMD_LOG_CTRL_OUT_LEN != 0);
716 rc = efx_mcdi_rpc(efx, MC_CMD_LOG_CTRL, inbuf, sizeof(inbuf),
724 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
728 int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out)
730 u8 outbuf[MC_CMD_NVRAM_TYPES_OUT_LEN];
734 BUILD_BUG_ON(MC_CMD_NVRAM_TYPES_IN_LEN != 0);
736 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TYPES, NULL, 0,
737 outbuf, sizeof(outbuf), &outlen);
740 if (outlen < MC_CMD_NVRAM_TYPES_OUT_LEN)
743 *nvram_types_out = MCDI_DWORD(outbuf, NVRAM_TYPES_OUT_TYPES);
747 EFX_ERR(efx, "%s: failed rc=%d\n",
752 int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
753 size_t *size_out, size_t *erase_size_out,
756 u8 inbuf[MC_CMD_NVRAM_INFO_IN_LEN];
757 u8 outbuf[MC_CMD_NVRAM_INFO_OUT_LEN];
761 MCDI_SET_DWORD(inbuf, NVRAM_INFO_IN_TYPE, type);
763 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_INFO, inbuf, sizeof(inbuf),
764 outbuf, sizeof(outbuf), &outlen);
767 if (outlen < MC_CMD_NVRAM_INFO_OUT_LEN)
770 *size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_SIZE);
771 *erase_size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_ERASESIZE);
772 *protected_out = !!(MCDI_DWORD(outbuf, NVRAM_INFO_OUT_FLAGS) &
773 (1 << MC_CMD_NVRAM_PROTECTED_LBN));
777 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
781 int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type)
783 u8 inbuf[MC_CMD_NVRAM_UPDATE_START_IN_LEN];
786 MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_START_IN_TYPE, type);
788 BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_START_OUT_LEN != 0);
790 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf, sizeof(inbuf),
798 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
802 int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
803 loff_t offset, u8 *buffer, size_t length)
805 u8 inbuf[MC_CMD_NVRAM_READ_IN_LEN];
806 u8 outbuf[MC_CMD_NVRAM_READ_OUT_LEN(length)];
810 MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_TYPE, type);
811 MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_OFFSET, offset);
812 MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_LENGTH, length);
814 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf),
815 outbuf, sizeof(outbuf), &outlen);
819 memcpy(buffer, MCDI_PTR(outbuf, NVRAM_READ_OUT_READ_BUFFER), length);
823 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
827 int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
828 loff_t offset, const u8 *buffer, size_t length)
830 u8 inbuf[MC_CMD_NVRAM_WRITE_IN_LEN(length)];
833 MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type);
834 MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_OFFSET, offset);
835 MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_LENGTH, length);
836 memcpy(MCDI_PTR(inbuf, NVRAM_WRITE_IN_WRITE_BUFFER), buffer, length);
838 BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0);
840 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf, sizeof(inbuf),
848 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
852 int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
853 loff_t offset, size_t length)
855 u8 inbuf[MC_CMD_NVRAM_ERASE_IN_LEN];
858 MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_TYPE, type);
859 MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_OFFSET, offset);
860 MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_LENGTH, length);
862 BUILD_BUG_ON(MC_CMD_NVRAM_ERASE_OUT_LEN != 0);
864 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_ERASE, inbuf, sizeof(inbuf),
872 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
876 int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
878 u8 inbuf[MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN];
881 MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_TYPE, type);
883 BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN != 0);
885 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf),
893 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
897 int efx_mcdi_handle_assertion(struct efx_nic *efx)
900 u8 asserts[MC_CMD_GET_ASSERTS_IN_LEN];
901 u8 reboot[MC_CMD_REBOOT_IN_LEN];
903 u8 assertion[MC_CMD_GET_ASSERTS_OUT_LEN];
904 unsigned int flags, index, ofst;
910 /* Check if the MC is in the assertion handler, retrying twice. Once
911 * because a boot-time assertion might cause this command to fail
912 * with EINTR. And once again because GET_ASSERTS can race with
913 * MC_CMD_REBOOT running on the other port. */
916 MCDI_SET_DWORD(inbuf.asserts, GET_ASSERTS_IN_CLEAR, 0);
917 rc = efx_mcdi_rpc(efx, MC_CMD_GET_ASSERTS,
918 inbuf.asserts, MC_CMD_GET_ASSERTS_IN_LEN,
919 assertion, sizeof(assertion), &outlen);
920 } while ((rc == -EINTR || rc == -EIO) && retry-- > 0);
924 if (outlen < MC_CMD_GET_ASSERTS_OUT_LEN)
927 flags = MCDI_DWORD(assertion, GET_ASSERTS_OUT_GLOBAL_FLAGS);
928 if (flags == MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS)
931 /* Reset the hardware atomically such that only one port with succeed.
932 * This command will succeed if a reboot is no longer required (because
933 * the other port did it first), but fail with EIO if it succeeds.
935 BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
936 MCDI_SET_DWORD(inbuf.reboot, REBOOT_IN_FLAGS,
937 MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION);
938 efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf.reboot, MC_CMD_REBOOT_IN_LEN,
941 /* Print out the assertion */
942 reason = (flags == MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL)
943 ? "system-level assertion"
944 : (flags == MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL)
945 ? "thread-level assertion"
946 : (flags == MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED)
948 : "unknown assertion";
949 EFX_ERR(efx, "MCPU %s at PC = 0x%.8x in thread 0x%.8x\n", reason,
950 MCDI_DWORD(assertion, GET_ASSERTS_OUT_SAVED_PC_OFFS),
951 MCDI_DWORD(assertion, GET_ASSERTS_OUT_THREAD_OFFS));
953 /* Print out the registers */
954 ofst = MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST;
955 for (index = 1; index < 32; index++) {
956 EFX_ERR(efx, "R%.2d (?): 0x%.8x\n", index,
957 MCDI_DWORD2(assertion, ofst));
958 ofst += sizeof(efx_dword_t);
964 void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
966 u8 inbuf[MC_CMD_SET_ID_LED_IN_LEN];
969 BUILD_BUG_ON(EFX_LED_OFF != MC_CMD_LED_OFF);
970 BUILD_BUG_ON(EFX_LED_ON != MC_CMD_LED_ON);
971 BUILD_BUG_ON(EFX_LED_DEFAULT != MC_CMD_LED_DEFAULT);
973 BUILD_BUG_ON(MC_CMD_SET_ID_LED_OUT_LEN != 0);
975 MCDI_SET_DWORD(inbuf, SET_ID_LED_IN_STATE, mode);
977 rc = efx_mcdi_rpc(efx, MC_CMD_SET_ID_LED, inbuf, sizeof(inbuf),
980 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
983 int efx_mcdi_reset_port(struct efx_nic *efx)
985 int rc = efx_mcdi_rpc(efx, MC_CMD_PORT_RESET, NULL, 0, NULL, 0, NULL);
987 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
991 int efx_mcdi_reset_mc(struct efx_nic *efx)
993 u8 inbuf[MC_CMD_REBOOT_IN_LEN];
996 BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
997 MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, 0);
998 rc = efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, sizeof(inbuf),
1000 /* White is black, and up is down */
1005 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
1009 int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type,
1010 const u8 *mac, int *id_out)
1012 u8 inbuf[MC_CMD_WOL_FILTER_SET_IN_LEN];
1013 u8 outbuf[MC_CMD_WOL_FILTER_SET_OUT_LEN];
1017 MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_WOL_TYPE, type);
1018 MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_FILTER_MODE,
1019 MC_CMD_FILTER_MODE_SIMPLE);
1020 memcpy(MCDI_PTR(inbuf, WOL_FILTER_SET_IN_MAGIC_MAC), mac, ETH_ALEN);
1022 rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_SET, inbuf, sizeof(inbuf),
1023 outbuf, sizeof(outbuf), &outlen);
1027 if (outlen < MC_CMD_WOL_FILTER_SET_OUT_LEN) {
1032 *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_SET_OUT_FILTER_ID);
1038 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
1045 efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac, int *id_out)
1047 return efx_mcdi_wol_filter_set(efx, MC_CMD_WOL_TYPE_MAGIC, mac, id_out);
1051 int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out)
1053 u8 outbuf[MC_CMD_WOL_FILTER_GET_OUT_LEN];
1057 rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_GET, NULL, 0,
1058 outbuf, sizeof(outbuf), &outlen);
1062 if (outlen < MC_CMD_WOL_FILTER_GET_OUT_LEN) {
1067 *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_GET_OUT_FILTER_ID);
1073 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
1078 int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id)
1080 u8 inbuf[MC_CMD_WOL_FILTER_REMOVE_IN_LEN];
1083 MCDI_SET_DWORD(inbuf, WOL_FILTER_REMOVE_IN_FILTER_ID, (u32)id);
1085 rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_REMOVE, inbuf, sizeof(inbuf),
1093 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
1098 int efx_mcdi_wol_filter_reset(struct efx_nic *efx)
1102 rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_RESET, NULL, 0, NULL, 0, NULL);
1109 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);