return -ENOMEM;
cq->sq.desc_buf.size = size;
- cq->sq.cmd_buf = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries,
- sizeof(struct ice_sq_cd), GFP_KERNEL);
- if (!cq->sq.cmd_buf) {
- dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.desc_buf.size,
- cq->sq.desc_buf.va, cq->sq.desc_buf.pa);
- cq->sq.desc_buf.va = NULL;
- cq->sq.desc_buf.pa = 0;
- cq->sq.desc_buf.size = 0;
- return -ENOMEM;
- }
-
return 0;
}
(qi)->ring.r.ring##_bi[i].size = 0;\
} \
} \
- /* free the buffer info list */ \
- devm_kfree(ice_hw_to_dev(hw), (qi)->ring.cmd_buf); \
/* free DMA head */ \
devm_kfree(ice_hw_to_dev(hw), (qi)->ring.dma_head); \
} while (0)
{
struct ice_ctl_q_ring *sq = &cq->sq;
u16 ntc = sq->next_to_clean;
- struct ice_sq_cd *details;
struct ice_aq_desc *desc;
desc = ICE_CTL_Q_DESC(*sq, ntc);
- details = ICE_CTL_Q_DETAILS(*sq, ntc);
while (rd32(hw, cq->sq.head) != ntc) {
ice_debug(hw, ICE_DBG_AQ_MSG, "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head));
memset(desc, 0, sizeof(*desc));
- memset(details, 0, sizeof(*details));
ntc++;
if (ntc == sq->count)
ntc = 0;
desc = ICE_CTL_Q_DESC(*sq, ntc);
- details = ICE_CTL_Q_DETAILS(*sq, ntc);
}
sq->next_to_clean = ntc;
struct ice_dma_mem *dma_buf = NULL;
struct ice_aq_desc *desc_on_ring;
bool cmd_completed = false;
- struct ice_sq_cd *details;
int status = 0;
u16 retval = 0;
u32 val = 0;
goto sq_send_command_error;
}
- details = ICE_CTL_Q_DETAILS(cq->sq, cq->sq.next_to_use);
- if (cd)
- *details = *cd;
- else
- memset(details, 0, sizeof(*details));
-
/* Call clean and check queue available function to reclaim the
* descriptors that were processed by FW/MBX; the function returns the
* number of desc available. The clean function called here could be
ice_debug_cq(hw, cq, (void *)desc, buf, buf_size, true);
/* save writeback AQ if requested */
- if (details->wb_desc)
- memcpy(details->wb_desc, desc_on_ring,
- sizeof(*details->wb_desc));
+ if (cd && cd->wb_desc)
+ memcpy(cd->wb_desc, desc_on_ring, sizeof(*cd->wb_desc));
/* update the error if time out occurred */
if (!cmd_completed) {