net: wwan: t7xx: Infrastructure for early port configuration
authorJinjian Song <jinjian.song@fibocom.com>
Mon, 5 Feb 2024 10:22:29 +0000 (18:22 +0800)
committerDavid S. Miller <davem@davemloft.net>
Fri, 9 Feb 2024 12:07:48 +0000 (12:07 +0000)
To support cases such as FW update or Core dump, the t7xx
device is capable of signaling the host that a special port
needs to be created before the handshake phase.

Adds the infrastructure required to create the early ports
which also requires a different configuration of CLDMA queues.

Base on the v5 patch version of follow series:
'net: wwan: t7xx: fw flashing & coredump support'
(https://patchwork.kernel.org/project/netdevbpf/patch/3777bb382f4b0395cb594a602c5c79dbab86c9e0.1674307425.git.m.chetan.kumar@linux.intel.com/)

Signed-off-by: Jinjian Song <jinjian.song@fibocom.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/wwan/t7xx/t7xx_hif_cldma.c
drivers/net/wwan/t7xx/t7xx_hif_cldma.h
drivers/net/wwan/t7xx/t7xx_modem_ops.c
drivers/net/wwan/t7xx/t7xx_pci.c
drivers/net/wwan/t7xx/t7xx_port.h
drivers/net/wwan/t7xx/t7xx_port_proxy.c
drivers/net/wwan/t7xx/t7xx_port_proxy.h
drivers/net/wwan/t7xx/t7xx_port_wwan.c
drivers/net/wwan/t7xx/t7xx_reg.h
drivers/net/wwan/t7xx/t7xx_state_monitor.c
drivers/net/wwan/t7xx/t7xx_state_monitor.h

index cc70360364b7d62c2c3d66ff50c65b9f8da11a22..abc41a7089fa4f9e1efb1a51e3375d57321b51c6 100644 (file)
@@ -57,8 +57,6 @@
 #define CHECK_Q_STOP_TIMEOUT_US                1000000
 #define CHECK_Q_STOP_STEP_US           10000
 
-#define CLDMA_JUMBO_BUFF_SZ            (63 * 1024 + sizeof(struct ccci_header))
-
 static void md_cd_queue_struct_reset(struct cldma_queue *queue, struct cldma_ctrl *md_ctrl,
                                     enum mtk_txrx tx_rx, unsigned int index)
 {
@@ -161,7 +159,7 @@ static int t7xx_cldma_gpd_rx_from_q(struct cldma_queue *queue, int budget, bool
                skb_reset_tail_pointer(skb);
                skb_put(skb, le16_to_cpu(gpd->data_buff_len));
 
-               ret = md_ctrl->recv_skb(queue, skb);
+               ret = queue->recv_skb(queue, skb);
                /* Break processing, will try again later */
                if (ret < 0)
                        return ret;
@@ -897,13 +895,13 @@ static void t7xx_cldma_hw_start_send(struct cldma_ctrl *md_ctrl, int qno,
 
 /**
  * t7xx_cldma_set_recv_skb() - Set the callback to handle RX packets.
- * @md_ctrl: CLDMA context structure.
+ * @queue: CLDMA queue.
  * @recv_skb: Receiving skb callback.
  */
-void t7xx_cldma_set_recv_skb(struct cldma_ctrl *md_ctrl,
+void t7xx_cldma_set_recv_skb(struct cldma_queue *queue,
                             int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb))
 {
-       md_ctrl->recv_skb = recv_skb;
+       queue->recv_skb = recv_skb;
 }
 
 /**
@@ -993,6 +991,28 @@ allow_sleep:
        return ret;
 }
 
+static void t7xx_cldma_adjust_config(struct cldma_ctrl *md_ctrl, enum cldma_cfg cfg_id)
+{
+       int qno;
+
+       for (qno = 0; qno < CLDMA_RXQ_NUM; qno++) {
+               md_ctrl->rx_ring[qno].pkt_size = CLDMA_SHARED_Q_BUFF_SZ;
+               t7xx_cldma_set_recv_skb(&md_ctrl->rxq[qno], t7xx_port_proxy_recv_skb);
+       }
+
+       md_ctrl->rx_ring[CLDMA_RXQ_NUM - 1].pkt_size = CLDMA_JUMBO_BUFF_SZ;
+
+       for (qno = 0; qno < CLDMA_TXQ_NUM; qno++)
+               md_ctrl->tx_ring[qno].pkt_size = CLDMA_SHARED_Q_BUFF_SZ;
+
+       if (cfg_id == CLDMA_DEDICATED_Q_CFG) {
+               md_ctrl->tx_ring[CLDMA_Q_IDX_DUMP].pkt_size = CLDMA_DEDICATED_Q_BUFF_SZ;
+               md_ctrl->rx_ring[CLDMA_Q_IDX_DUMP].pkt_size = CLDMA_DEDICATED_Q_BUFF_SZ;
+               t7xx_cldma_set_recv_skb(&md_ctrl->rxq[CLDMA_Q_IDX_DUMP],
+                                       t7xx_port_proxy_recv_skb_from_dedicated_queue);
+       }
+}
+
 static int t7xx_cldma_late_init(struct cldma_ctrl *md_ctrl)
 {
        char dma_pool_name[32];
@@ -1018,16 +1038,9 @@ static int t7xx_cldma_late_init(struct cldma_ctrl *md_ctrl)
                        dev_err(md_ctrl->dev, "control TX ring init fail\n");
                        goto err_free_tx_ring;
                }
-
-               md_ctrl->tx_ring[i].pkt_size = CLDMA_MTU;
        }
 
        for (j = 0; j < CLDMA_RXQ_NUM; j++) {
-               md_ctrl->rx_ring[j].pkt_size = CLDMA_MTU;
-
-               if (j == CLDMA_RXQ_NUM - 1)
-                       md_ctrl->rx_ring[j].pkt_size = CLDMA_JUMBO_BUFF_SZ;
-
                ret = t7xx_cldma_rx_ring_init(md_ctrl, &md_ctrl->rx_ring[j]);
                if (ret) {
                        dev_err(md_ctrl->dev, "Control RX ring init fail\n");
@@ -1094,6 +1107,7 @@ int t7xx_cldma_alloc(enum cldma_id hif_id, struct t7xx_pci_dev *t7xx_dev)
 {
        struct device *dev = &t7xx_dev->pdev->dev;
        struct cldma_ctrl *md_ctrl;
+       int qno;
 
        md_ctrl = devm_kzalloc(dev, sizeof(*md_ctrl), GFP_KERNEL);
        if (!md_ctrl)
@@ -1102,7 +1116,9 @@ int t7xx_cldma_alloc(enum cldma_id hif_id, struct t7xx_pci_dev *t7xx_dev)
        md_ctrl->t7xx_dev = t7xx_dev;
        md_ctrl->dev = dev;
        md_ctrl->hif_id = hif_id;
-       md_ctrl->recv_skb = t7xx_cldma_default_recv_skb;
+       for (qno = 0; qno < CLDMA_RXQ_NUM; qno++)
+               md_ctrl->rxq[qno].recv_skb = t7xx_cldma_default_recv_skb;
+
        t7xx_hw_info_init(md_ctrl);
        t7xx_dev->md->md_ctrl[hif_id] = md_ctrl;
        return 0;
@@ -1332,9 +1348,10 @@ err_workqueue:
        return -ENOMEM;
 }
 
-void t7xx_cldma_switch_cfg(struct cldma_ctrl *md_ctrl)
+void t7xx_cldma_switch_cfg(struct cldma_ctrl *md_ctrl, enum cldma_cfg cfg_id)
 {
        t7xx_cldma_late_release(md_ctrl);
+       t7xx_cldma_adjust_config(md_ctrl, cfg_id);
        t7xx_cldma_late_init(md_ctrl);
 }
 
index 4410bac6993aef8e8c80870cab4cac5394d8a0e8..f2d9941be9c8312034d0ad3501d803ceab357b68 100644 (file)
 #include "t7xx_cldma.h"
 #include "t7xx_pci.h"
 
+#define CLDMA_JUMBO_BUFF_SZ            (63 * 1024 + sizeof(struct ccci_header))
+#define CLDMA_SHARED_Q_BUFF_SZ         3584
+#define CLDMA_DEDICATED_Q_BUFF_SZ      2048
+
 /**
  * enum cldma_id - Identifiers for CLDMA HW units.
  * @CLDMA_ID_MD: Modem control channel.
@@ -55,6 +59,11 @@ struct cldma_gpd {
        __le16 not_used2;
 };
 
+enum cldma_cfg {
+       CLDMA_SHARED_Q_CFG,
+       CLDMA_DEDICATED_Q_CFG,
+};
+
 struct cldma_request {
        struct cldma_gpd *gpd;  /* Virtual address for CPU */
        dma_addr_t gpd_addr;    /* Physical address for DMA */
@@ -82,6 +91,7 @@ struct cldma_queue {
        wait_queue_head_t req_wq;       /* Only for TX */
        struct workqueue_struct *worker;
        struct work_struct cldma_work;
+       int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb);
 };
 
 struct cldma_ctrl {
@@ -101,24 +111,22 @@ struct cldma_ctrl {
        struct md_pm_entity *pm_entity;
        struct t7xx_cldma_hw hw_info;
        bool is_late_init;
-       int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb);
 };
 
+#define CLDMA_Q_IDX_DUMP       1
 #define GPD_FLAGS_HWO          BIT(0)
 #define GPD_FLAGS_IOC          BIT(7)
 #define GPD_DMAPOOL_ALIGN      16
 
-#define CLDMA_MTU              3584    /* 3.5kB */
-
 int t7xx_cldma_alloc(enum cldma_id hif_id, struct t7xx_pci_dev *t7xx_dev);
 void t7xx_cldma_hif_hw_init(struct cldma_ctrl *md_ctrl);
 int t7xx_cldma_init(struct cldma_ctrl *md_ctrl);
 void t7xx_cldma_exit(struct cldma_ctrl *md_ctrl);
-void t7xx_cldma_switch_cfg(struct cldma_ctrl *md_ctrl);
+void t7xx_cldma_switch_cfg(struct cldma_ctrl *md_ctrl, enum cldma_cfg cfg_id);
 void t7xx_cldma_start(struct cldma_ctrl *md_ctrl);
 int t7xx_cldma_stop(struct cldma_ctrl *md_ctrl);
 void t7xx_cldma_reset(struct cldma_ctrl *md_ctrl);
-void t7xx_cldma_set_recv_skb(struct cldma_ctrl *md_ctrl,
+void t7xx_cldma_set_recv_skb(struct cldma_queue *queue,
                             int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb));
 int t7xx_cldma_send_skb(struct cldma_ctrl *md_ctrl, int qno, struct sk_buff *skb);
 void t7xx_cldma_stop_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx);
index ca262d2961ed7f8d65f276c9a6bdccb01b0a65f9..8d864d4ed77f5eb1cc807c137e857849e61c5b22 100644 (file)
@@ -535,7 +535,7 @@ static void t7xx_md_hk_wq(struct work_struct *work)
 
        /* Clear the HS2 EXIT event appended in core_reset() */
        t7xx_fsm_clr_event(ctl, FSM_EVENT_MD_HS2_EXIT);
-       t7xx_cldma_switch_cfg(md->md_ctrl[CLDMA_ID_MD]);
+       t7xx_cldma_switch_cfg(md->md_ctrl[CLDMA_ID_MD], CLDMA_SHARED_Q_CFG);
        t7xx_cldma_start(md->md_ctrl[CLDMA_ID_MD]);
        t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_FOR_HS2);
        md->core_md.handshake_ongoing = true;
@@ -550,7 +550,7 @@ static void t7xx_ap_hk_wq(struct work_struct *work)
         /* Clear the HS2 EXIT event appended in t7xx_core_reset(). */
        t7xx_fsm_clr_event(ctl, FSM_EVENT_AP_HS2_EXIT);
        t7xx_cldma_stop(md->md_ctrl[CLDMA_ID_AP]);
-       t7xx_cldma_switch_cfg(md->md_ctrl[CLDMA_ID_AP]);
+       t7xx_cldma_switch_cfg(md->md_ctrl[CLDMA_ID_AP], CLDMA_SHARED_Q_CFG);
        t7xx_cldma_start(md->md_ctrl[CLDMA_ID_AP]);
        md->core_ap.handshake_ongoing = true;
        t7xx_core_hk_handler(md, &md->core_ap, ctl, FSM_EVENT_AP_HS2, FSM_EVENT_AP_HS2_EXIT);
@@ -764,6 +764,7 @@ err_destroy_hswq:
 
 void t7xx_md_exit(struct t7xx_pci_dev *t7xx_dev)
 {
+       enum t7xx_mode mode = READ_ONCE(t7xx_dev->mode);
        struct t7xx_modem *md = t7xx_dev->md;
 
        t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT);
@@ -771,7 +772,8 @@ void t7xx_md_exit(struct t7xx_pci_dev *t7xx_dev)
        if (!md->md_init_finish)
                return;
 
-       t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_PRE_STOP, FSM_CMD_FLAG_WAIT_FOR_COMPLETION);
+       if (mode != T7XX_RESET && mode != T7XX_UNKNOWN)
+               t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_PRE_STOP, FSM_CMD_FLAG_WAIT_FOR_COMPLETION);
        t7xx_port_proxy_uninit(md->port_prox);
        t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_AP]);
        t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_MD]);
index f99eb21cb8ccb0b710521f1a32b292f6c511eaef..e0b1e7a616cae530627a7692babd5d09b335c36f 100644 (file)
@@ -183,7 +183,7 @@ static int t7xx_pci_pm_init(struct t7xx_pci_dev *t7xx_dev)
        pm_runtime_set_autosuspend_delay(&pdev->dev, PM_AUTOSUSPEND_MS);
        pm_runtime_use_autosuspend(&pdev->dev);
 
-       return t7xx_wait_pm_config(t7xx_dev);
+       return 0;
 }
 
 void t7xx_pci_pm_init_late(struct t7xx_pci_dev *t7xx_dev)
index 4ae8a00a85322e428eae9f05b355c56638c91a93..f74d3bab810d849b236e58e1fd3f47a357396d60 100644 (file)
@@ -75,6 +75,8 @@ enum port_ch {
        PORT_CH_DSS6_TX = 0x20df,
        PORT_CH_DSS7_RX = 0x20e0,
        PORT_CH_DSS7_TX = 0x20e1,
+
+       PORT_CH_UNIMPORTANT = 0xffff,
 };
 
 struct t7xx_port;
@@ -135,11 +137,13 @@ struct t7xx_port {
        };
 };
 
+int t7xx_get_port_mtu(struct t7xx_port *port);
 struct sk_buff *t7xx_port_alloc_skb(int payload);
 struct sk_buff *t7xx_ctrl_alloc_skb(int payload);
 int t7xx_port_enqueue_skb(struct t7xx_port *port, struct sk_buff *skb);
 int t7xx_port_send_skb(struct t7xx_port *port, struct sk_buff *skb, unsigned int pkt_header,
                       unsigned int ex_msg);
+int t7xx_port_send_raw_skb(struct t7xx_port *port, struct sk_buff *skb);
 int t7xx_port_send_ctl_skb(struct t7xx_port *port, struct sk_buff *skb, unsigned int msg,
                           unsigned int ex_msg);
 
index 274846d39fbf3f67ddca2ffea3277bf8213ff792..e53a152faee4086224a1abd55aa951e0c1ae7454 100644 (file)
@@ -48,6 +48,9 @@
             i < (proxy)->port_count;           \
             i++, (p) = &(proxy)->ports[i])
 
+#define T7XX_MAX_POSSIBLE_PORTS_NUM    \
+       (max(ARRAY_SIZE(t7xx_port_conf), ARRAY_SIZE(t7xx_early_port_conf)))
+
 static const struct t7xx_port_conf t7xx_port_conf[] = {
        {
                .tx_ch = PORT_CH_UART2_TX,
@@ -100,6 +103,18 @@ static const struct t7xx_port_conf t7xx_port_conf[] = {
        },
 };
 
+static const struct t7xx_port_conf t7xx_early_port_conf[] = {
+       {
+               .tx_ch = PORT_CH_UNIMPORTANT,
+               .rx_ch = PORT_CH_UNIMPORTANT,
+               .txq_index = CLDMA_Q_IDX_DUMP,
+               .rxq_index = CLDMA_Q_IDX_DUMP,
+               .txq_exp_index = CLDMA_Q_IDX_DUMP,
+               .rxq_exp_index = CLDMA_Q_IDX_DUMP,
+               .path_id = CLDMA_ID_AP,
+       },
+};
+
 static struct t7xx_port *t7xx_proxy_get_port_by_ch(struct port_proxy *port_prox, enum port_ch ch)
 {
        const struct t7xx_port_conf *port_conf;
@@ -214,7 +229,17 @@ int t7xx_port_enqueue_skb(struct t7xx_port *port, struct sk_buff *skb)
        return 0;
 }
 
-static int t7xx_port_send_raw_skb(struct t7xx_port *port, struct sk_buff *skb)
+int t7xx_get_port_mtu(struct t7xx_port *port)
+{
+       enum cldma_id path_id = port->port_conf->path_id;
+       int tx_qno = t7xx_port_get_queue_no(port);
+       struct cldma_ctrl *md_ctrl;
+
+       md_ctrl = port->t7xx_dev->md->md_ctrl[path_id];
+       return md_ctrl->tx_ring[tx_qno].pkt_size;
+}
+
+int t7xx_port_send_raw_skb(struct t7xx_port *port, struct sk_buff *skb)
 {
        enum cldma_id path_id = port->port_conf->path_id;
        struct cldma_ctrl *md_ctrl;
@@ -329,6 +354,39 @@ static void t7xx_proxy_setup_ch_mapping(struct port_proxy *port_prox)
        }
 }
 
+/**
+ * t7xx_port_proxy_recv_skb_from_dedicated_queue() - Dispatch early port received skb.
+ * @queue: CLDMA queue.
+ * @skb: Socket buffer.
+ *
+ * Return:
+ ** 0          - Packet consumed.
+ ** -ERROR     - Failed to process skb.
+ */
+int t7xx_port_proxy_recv_skb_from_dedicated_queue(struct cldma_queue *queue, struct sk_buff *skb)
+{
+       struct t7xx_pci_dev *t7xx_dev = queue->md_ctrl->t7xx_dev;
+       struct port_proxy *port_prox = t7xx_dev->md->port_prox;
+       const struct t7xx_port_conf *port_conf;
+       struct t7xx_port *port;
+       int ret;
+
+       port = &port_prox->ports[0];
+       if (WARN_ON_ONCE(port->port_conf->rxq_index != queue->index)) {
+               dev_kfree_skb_any(skb);
+               return -EINVAL;
+       }
+
+       port_conf = port->port_conf;
+       ret = port_conf->ops->recv_skb(port, skb);
+       if (ret < 0 && ret != -ENOBUFS) {
+               dev_err(port->dev, "drop on RX ch %d, %d\n", port_conf->rx_ch, ret);
+               dev_kfree_skb_any(skb);
+       }
+
+       return ret;
+}
+
 static struct t7xx_port *t7xx_port_proxy_find_port(struct t7xx_pci_dev *t7xx_dev,
                                                   struct cldma_queue *queue, u16 channel)
 {
@@ -359,7 +417,7 @@ static struct t7xx_port *t7xx_port_proxy_find_port(struct t7xx_pci_dev *t7xx_dev
  ** 0          - Packet consumed.
  ** -ERROR     - Failed to process skb.
  */
-static int t7xx_port_proxy_recv_skb(struct cldma_queue *queue, struct sk_buff *skb)
+int t7xx_port_proxy_recv_skb(struct cldma_queue *queue, struct sk_buff *skb)
 {
        struct ccci_header *ccci_h = (struct ccci_header *)skb->data;
        struct t7xx_pci_dev *t7xx_dev = queue->md_ctrl->t7xx_dev;
@@ -444,33 +502,54 @@ static void t7xx_proxy_init_all_ports(struct t7xx_modem *md)
                spin_lock_init(&port->port_update_lock);
                port->chan_enable = false;
 
-               if (port_conf->ops->init)
+               if (port_conf->ops && port_conf->ops->init)
                        port_conf->ops->init(port);
        }
 
        t7xx_proxy_setup_ch_mapping(port_prox);
 }
 
+void t7xx_port_proxy_set_cfg(struct t7xx_modem *md, enum port_cfg_id cfg_id)
+{
+       struct port_proxy *port_prox = md->port_prox;
+       const struct t7xx_port_conf *port_conf;
+       u32 port_count;
+       int i;
+
+       t7xx_port_proxy_uninit(port_prox);
+
+       if (cfg_id == PORT_CFG_ID_EARLY) {
+               port_conf = t7xx_early_port_conf;
+               port_count = ARRAY_SIZE(t7xx_early_port_conf);
+       } else {
+               port_conf = t7xx_port_conf;
+               port_count = ARRAY_SIZE(t7xx_port_conf);
+       }
+
+       for (i = 0; i < port_count; i++)
+               port_prox->ports[i].port_conf = &port_conf[i];
+
+       port_prox->cfg_id = cfg_id;
+       port_prox->port_count = port_count;
+
+       t7xx_proxy_init_all_ports(md);
+}
+
 static int t7xx_proxy_alloc(struct t7xx_modem *md)
 {
-       unsigned int port_count = ARRAY_SIZE(t7xx_port_conf);
        struct device *dev = &md->t7xx_dev->pdev->dev;
        struct port_proxy *port_prox;
-       int i;
 
-       port_prox = devm_kzalloc(dev, sizeof(*port_prox) + sizeof(struct t7xx_port) * port_count,
+       port_prox = devm_kzalloc(dev, sizeof(*port_prox) +
+                                sizeof(struct t7xx_port) * T7XX_MAX_POSSIBLE_PORTS_NUM,
                                 GFP_KERNEL);
        if (!port_prox)
                return -ENOMEM;
 
        md->port_prox = port_prox;
        port_prox->dev = dev;
+       t7xx_port_proxy_set_cfg(md, PORT_CFG_ID_EARLY);
 
-       for (i = 0; i < port_count; i++)
-               port_prox->ports[i].port_conf = &t7xx_port_conf[i];
-
-       port_prox->port_count = port_count;
-       t7xx_proxy_init_all_ports(md);
        return 0;
 }
 
@@ -492,8 +571,6 @@ int t7xx_port_proxy_init(struct t7xx_modem *md)
        if (ret)
                return ret;
 
-       t7xx_cldma_set_recv_skb(md->md_ctrl[CLDMA_ID_AP], t7xx_port_proxy_recv_skb);
-       t7xx_cldma_set_recv_skb(md->md_ctrl[CLDMA_ID_MD], t7xx_port_proxy_recv_skb);
        return 0;
 }
 
@@ -505,7 +582,7 @@ void t7xx_port_proxy_uninit(struct port_proxy *port_prox)
        for_each_proxy_port(i, port, port_prox) {
                const struct t7xx_port_conf *port_conf = port->port_conf;
 
-               if (port_conf->ops->uninit)
+               if (port_conf->ops && port_conf->ops->uninit)
                        port_conf->ops->uninit(port);
        }
 }
index 81d059fbc0fb4667bf0a5ec1f5f807c8a76a3e2e..7f5706811445baaa7c0213c3a225c95342644233 100644 (file)
 #define RX_QUEUE_MAXLEN                32
 #define CTRL_QUEUE_MAXLEN      16
 
+enum port_cfg_id {
+       PORT_CFG_ID_INVALID,
+       PORT_CFG_ID_NORMAL,
+       PORT_CFG_ID_EARLY,
+};
+
 struct port_proxy {
        int                     port_count;
        struct list_head        rx_ch_ports[PORT_CH_ID_MASK + 1];
        struct list_head        queue_ports[CLDMA_NUM][MTK_QUEUES];
        struct device           *dev;
+       enum port_cfg_id        cfg_id;
        struct t7xx_port        ports[];
 };
 
@@ -98,5 +105,8 @@ void t7xx_port_proxy_md_status_notify(struct port_proxy *port_prox, unsigned int
 int t7xx_port_enum_msg_handler(struct t7xx_modem *md, void *msg);
 int t7xx_port_proxy_chl_enable_disable(struct port_proxy *port_prox, unsigned int ch_id,
                                       bool en_flag);
+void t7xx_port_proxy_set_cfg(struct t7xx_modem *md, enum port_cfg_id cfg_id);
+int t7xx_port_proxy_recv_skb(struct cldma_queue *queue, struct sk_buff *skb);
+int t7xx_port_proxy_recv_skb_from_dedicated_queue(struct cldma_queue *queue, struct sk_buff *skb);
 
 #endif /* __T7XX_PORT_PROXY_H__ */
index 17389c8f6600a3f6e9d492098e7ea16967659206..ddc20ddfa7347ab8c27eec2191c3cbcf072b4eda 100644 (file)
@@ -152,14 +152,15 @@ static int t7xx_port_wwan_disable_chl(struct t7xx_port *port)
 static void t7xx_port_wwan_md_state_notify(struct t7xx_port *port, unsigned int state)
 {
        const struct t7xx_port_conf *port_conf = port->port_conf;
-       unsigned int header_len = sizeof(struct ccci_header);
+       unsigned int header_len = sizeof(struct ccci_header), mtu;
        struct wwan_port_caps caps;
 
        if (state != MD_STATE_READY)
                return;
 
        if (!port->wwan.wwan_port) {
-               caps.frag_len = CLDMA_MTU - header_len;
+               mtu = t7xx_get_port_mtu(port);
+               caps.frag_len = mtu - header_len;
                caps.headroom_len = header_len;
                port->wwan.wwan_port = wwan_create_port(port->dev, port_conf->port_type,
                                                        &wwan_ops, &caps, port);
index c41d7d094c08539a5a6aacdd07a1cd1c94bd79bc..9c7dc72ac6f62593e763f85578633d35557cb171 100644 (file)
@@ -101,11 +101,33 @@ enum t7xx_pm_resume_state {
        PM_RESUME_REG_STATE_L2_EXP,
 };
 
+enum host_event_e {
+       HOST_EVENT_INIT = 0,
+       FASTBOOT_DL_NOTIFY = 0x3,
+};
+
 #define T7XX_PCIE_MISC_DEV_STATUS              0x0d1c
 #define MISC_STAGE_MASK                                GENMASK(2, 0)
 #define MISC_RESET_TYPE_PLDR                   BIT(26)
 #define MISC_RESET_TYPE_FLDR                   BIT(27)
-#define LINUX_STAGE                            4
+#define MISC_RESET_TYPE_PLDR                   BIT(26)
+#define MISC_LK_EVENT_MASK                     GENMASK(11, 8)
+#define HOST_EVENT_MASK                                GENMASK(31, 28)
+
+enum lk_event_id {
+       LK_EVENT_NORMAL = 0,
+       LK_EVENT_CREATE_PD_PORT = 1,
+       LK_EVENT_CREATE_POST_DL_PORT = 2,
+       LK_EVENT_RESET = 7,
+};
+
+enum t7xx_device_stage {
+       T7XX_DEV_STAGE_INIT = 0,
+       T7XX_DEV_STAGE_BROM_PRE = 1,
+       T7XX_DEV_STAGE_BROM_POST = 2,
+       T7XX_DEV_STAGE_LK = 3,
+       T7XX_DEV_STAGE_LINUX = 4,
+};
 
 #define T7XX_PCIE_RESOURCE_STATUS              0x0d28
 #define T7XX_PCIE_RESOURCE_STS_MSK             GENMASK(4, 0)
index c5d46f45fa6237f3051e2c91816cf1d217c94dce..038377fed1028a7514a0d63d52be8d32b79e67ef 100644 (file)
 #define FSM_MD_EX_PASS_TIMEOUT_MS              45000
 #define FSM_CMD_TIMEOUT_MS                     2000
 
+#define wait_for_expected_dev_stage(status)    \
+       read_poll_timeout(ioread32, status,     \
+                         ((status & MISC_STAGE_MASK) == T7XX_DEV_STAGE_LINUX) ||       \
+                         ((status & MISC_STAGE_MASK) == T7XX_DEV_STAGE_LK), 100000,    \
+                         20000000, false, IREG_BASE(md->t7xx_dev) +    \
+                         T7XX_PCIE_MISC_DEV_STATUS)
+
 void t7xx_fsm_notifier_register(struct t7xx_modem *md, struct t7xx_fsm_notifier *notifier)
 {
        struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
@@ -206,6 +213,51 @@ static void fsm_routine_exception(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_comm
                fsm_finish_command(ctl, cmd, 0);
 }
 
+static void t7xx_host_event_notify(struct t7xx_modem *md, unsigned int event_id)
+{
+       u32 value;
+
+       value = ioread32(IREG_BASE(md->t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS);
+       value &= ~HOST_EVENT_MASK;
+       value |= FIELD_PREP(HOST_EVENT_MASK, event_id);
+       iowrite32(value, IREG_BASE(md->t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS);
+}
+
+static void t7xx_lk_stage_event_handling(struct t7xx_fsm_ctl *ctl, unsigned int status)
+{
+       struct t7xx_modem *md = ctl->md;
+       struct cldma_ctrl *md_ctrl;
+       enum lk_event_id lk_event;
+       struct device *dev;
+
+       dev = &md->t7xx_dev->pdev->dev;
+       lk_event = FIELD_GET(MISC_LK_EVENT_MASK, status);
+       switch (lk_event) {
+       case LK_EVENT_NORMAL:
+       case LK_EVENT_RESET:
+               break;
+
+       case LK_EVENT_CREATE_PD_PORT:
+       case LK_EVENT_CREATE_POST_DL_PORT:
+               md_ctrl = md->md_ctrl[CLDMA_ID_AP];
+               t7xx_cldma_hif_hw_init(md_ctrl);
+               t7xx_cldma_stop(md_ctrl);
+               t7xx_cldma_switch_cfg(md_ctrl, CLDMA_DEDICATED_Q_CFG);
+
+               t7xx_cldma_start(md_ctrl);
+
+               if (lk_event == LK_EVENT_CREATE_POST_DL_PORT)
+                       t7xx_mode_update(md->t7xx_dev, T7XX_FASTBOOT_DOWNLOAD);
+               else
+                       t7xx_mode_update(md->t7xx_dev, T7XX_FASTBOOT_DUMP);
+               break;
+
+       default:
+               dev_err(dev, "Invalid LK event %d\n", lk_event);
+               break;
+       }
+}
+
 static int fsm_stopped_handler(struct t7xx_fsm_ctl *ctl)
 {
        ctl->curr_state = FSM_STATE_STOPPED;
@@ -226,8 +278,9 @@ static void fsm_routine_stopped(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_comman
 
 static void fsm_routine_stopping(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd)
 {
-       struct t7xx_pci_dev *t7xx_dev;
-       struct cldma_ctrl *md_ctrl;
+       struct cldma_ctrl *md_ctrl = ctl->md->md_ctrl[CLDMA_ID_MD];
+       struct t7xx_pci_dev *t7xx_dev = ctl->md->t7xx_dev;
+       enum t7xx_mode mode = READ_ONCE(t7xx_dev->mode);
        int err;
 
        if (ctl->curr_state == FSM_STATE_STOPPED || ctl->curr_state == FSM_STATE_STOPPING) {
@@ -235,18 +288,20 @@ static void fsm_routine_stopping(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_comma
                return;
        }
 
-       md_ctrl = ctl->md->md_ctrl[CLDMA_ID_MD];
-       t7xx_dev = ctl->md->t7xx_dev;
-
        ctl->curr_state = FSM_STATE_STOPPING;
        t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_TO_STOP);
        t7xx_cldma_stop(md_ctrl);
 
-       if (!ctl->md->rgu_irq_asserted) {
-               t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DRM_DISABLE_AP);
-               /* Wait for the DRM disable to take effect */
-               msleep(FSM_DRM_DISABLE_DELAY_MS);
+       if (mode == T7XX_FASTBOOT_SWITCHING)
+               t7xx_host_event_notify(ctl->md, FASTBOOT_DL_NOTIFY);
+
+       t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DRM_DISABLE_AP);
+       /* Wait for the DRM disable to take effect */
+       msleep(FSM_DRM_DISABLE_DELAY_MS);
 
+       if (mode == T7XX_FASTBOOT_SWITCHING) {
+               t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DEVICE_RESET);
+       } else {
                err = t7xx_acpi_fldr_func(t7xx_dev);
                if (err)
                        t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DEVICE_RESET);
@@ -318,7 +373,8 @@ static int fsm_routine_starting(struct t7xx_fsm_ctl *ctl)
 static void fsm_routine_start(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd)
 {
        struct t7xx_modem *md = ctl->md;
-       u32 dev_status;
+       struct device *dev;
+       u32 status;
        int ret;
 
        if (!md)
@@ -330,23 +386,53 @@ static void fsm_routine_start(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command
                return;
        }
 
+       dev = &md->t7xx_dev->pdev->dev;
        ctl->curr_state = FSM_STATE_PRE_START;
        t7xx_md_event_notify(md, FSM_PRE_START);
 
-       ret = read_poll_timeout(ioread32, dev_status,
-                               (dev_status & MISC_STAGE_MASK) == LINUX_STAGE, 20000, 2000000,
-                               false, IREG_BASE(md->t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS);
+       ret = wait_for_expected_dev_stage(status);
+
        if (ret) {
-               struct device *dev = &md->t7xx_dev->pdev->dev;
+               dev_err(dev, "read poll timeout %d\n", ret);
+               goto finish_command;
+       }
 
-               fsm_finish_command(ctl, cmd, -ETIMEDOUT);
-               dev_err(dev, "Invalid device status 0x%lx\n", dev_status & MISC_STAGE_MASK);
-               return;
+       if (status != ctl->status || cmd->flag != 0) {
+               u32 stage = FIELD_GET(MISC_STAGE_MASK, status);
+
+               switch (stage) {
+               case T7XX_DEV_STAGE_INIT:
+               case T7XX_DEV_STAGE_BROM_PRE:
+               case T7XX_DEV_STAGE_BROM_POST:
+                       dev_dbg(dev, "BROM_STAGE Entered\n");
+                       ret = t7xx_fsm_append_cmd(ctl, FSM_CMD_START, 0);
+                       break;
+
+               case T7XX_DEV_STAGE_LK:
+                       dev_dbg(dev, "LK_STAGE Entered\n");
+                       t7xx_lk_stage_event_handling(ctl, status);
+                       break;
+
+               case T7XX_DEV_STAGE_LINUX:
+                       dev_dbg(dev, "LINUX_STAGE Entered\n");
+                       t7xx_mhccif_mask_clr(md->t7xx_dev, D2H_INT_PORT_ENUM |
+                                            D2H_INT_ASYNC_MD_HK | D2H_INT_ASYNC_AP_HK);
+                       if (cmd->flag == 0)
+                               break;
+                       t7xx_cldma_hif_hw_init(md->md_ctrl[CLDMA_ID_AP]);
+                       t7xx_cldma_hif_hw_init(md->md_ctrl[CLDMA_ID_MD]);
+                       t7xx_port_proxy_set_cfg(md, PORT_CFG_ID_NORMAL);
+                       ret = fsm_routine_starting(ctl);
+                       break;
+
+               default:
+                       break;
+               }
+               ctl->status = status;
        }
 
-       t7xx_cldma_hif_hw_init(md->md_ctrl[CLDMA_ID_AP]);
-       t7xx_cldma_hif_hw_init(md->md_ctrl[CLDMA_ID_MD]);
-       fsm_finish_command(ctl, cmd, fsm_routine_starting(ctl));
+finish_command:
+       fsm_finish_command(ctl, cmd, ret);
 }
 
 static int fsm_main_thread(void *data)
@@ -518,6 +604,7 @@ void t7xx_fsm_reset(struct t7xx_modem *md)
        fsm_flush_event_cmd_qs(ctl);
        ctl->curr_state = FSM_STATE_STOPPED;
        ctl->exp_flg = false;
+       ctl->status = T7XX_DEV_STAGE_INIT;
 }
 
 int t7xx_fsm_init(struct t7xx_modem *md)
index b0b3662ae6d72e58a0e10e1bff14876de53780bc..7b0a9baf488c1818ab5fc19782f296d81137e5c5 100644 (file)
@@ -96,6 +96,7 @@ struct t7xx_fsm_ctl {
        bool                    exp_flg;
        spinlock_t              notifier_lock;          /* Protects notifier list */
        struct list_head        notifier_list;
+       u32                     status;                 /* Device boot stage */
 };
 
 struct t7xx_fsm_event {