rtwdev->mlo_dbcc_mode = MLO_2_PLUS_0_1RF;
}
+ rtwdev->bbs[RTW89_PHY_0].phy_idx = RTW89_PHY_0;
+ rtwdev->bbs[RTW89_PHY_1].phy_idx = RTW89_PHY_1;
+
INIT_WORK(&btc->eapol_notify_work, rtw89_btc_ntfy_eapol_packet_work);
INIT_WORK(&btc->arp_notify_work, rtw89_btc_ntfy_arp_packet_work);
INIT_WORK(&btc->dhcp_notify_work, rtw89_btc_ntfy_dhcp_packet_work);
struct rtw89_phy_ul_tb_info ul_tb_info;
struct rtw89_antdiv_info antdiv;
+ struct rtw89_bb_ctx {
+ enum rtw89_phy_idx phy_idx;
+ } bbs[RTW89_PHY_NUM];
+
struct delayed_work track_work;
struct delayed_work chanctx_work;
struct delayed_work coex_act1_work;
}
}
+static inline u8 rtw89_get_active_phy_bitmap(struct rtw89_dev *rtwdev)
+{
+ if (!rtwdev->dbcc_en)
+ return BIT(RTW89_PHY_0);
+
+ switch (rtwdev->mlo_dbcc_mode) {
+ case MLO_0_PLUS_2_1RF:
+ case MLO_0_PLUS_2_2RF:
+ return BIT(RTW89_PHY_1);
+ case MLO_1_PLUS_1_1RF:
+ case MLO_1_PLUS_1_2RF:
+ case MLO_2_PLUS_2_2RF:
+ case DBCC_LEGACY:
+ return BIT(RTW89_PHY_0) | BIT(RTW89_PHY_1);
+ case MLO_2_PLUS_0_1RF:
+ case MLO_2_PLUS_0_2RF:
+ default:
+ return BIT(RTW89_PHY_0);
+ }
+}
+
+#define rtw89_for_each_active_bb(rtwdev, bb) \
+ for (u8 __active_bb_bitmap = rtw89_get_active_phy_bitmap(rtwdev), \
+ __phy_idx = 0; __phy_idx < RTW89_PHY_NUM; __phy_idx++) \
+ if (__active_bb_bitmap & BIT(__phy_idx) && \
+ (bb = &rtwdev->bbs[__phy_idx]))
+
+#define rtw89_for_each_capab_bb(rtwdev, bb) \
+ for (u8 __phy_idx_max = rtwdev->dbcc_en ? RTW89_PHY_1 : RTW89_PHY_0, \
+ __phy_idx = 0; __phy_idx <= __phy_idx_max; __phy_idx++) \
+ if ((bb = &rtwdev->bbs[__phy_idx]))
+
+static inline
+struct rtw89_bb_ctx *rtw89_get_bb_ctx(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+ if (phy_idx >= RTW89_PHY_NUM)
+ return &rtwdev->bbs[RTW89_PHY_0];
+
+ return &rtwdev->bbs[phy_idx];
+}
+
static inline bool rtw89_is_rtl885xb(struct rtw89_dev *rtwdev)
{
enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;