Merge branch 'x86-pti-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-block.git] / drivers / net / ethernet / hisilicon / hns3 / hns3vf / hclgevf_cmd.c
CommitLineData
fedd0c15
SM
1// SPDX-License-Identifier: GPL-2.0+
2// Copyright (c) 2016-2017 Hisilicon Limited.
3
4#include <linux/device.h>
5#include <linux/dma-direction.h>
6#include <linux/dma-mapping.h>
7#include <linux/err.h>
8#include <linux/pci.h>
9#include <linux/slab.h>
10#include "hclgevf_cmd.h"
11#include "hclgevf_main.h"
12#include "hnae3.h"
13
14#define hclgevf_is_csq(ring) ((ring)->flag & HCLGEVF_TYPE_CSQ)
15#define hclgevf_ring_to_dma_dir(ring) (hclgevf_is_csq(ring) ? \
16 DMA_TO_DEVICE : DMA_FROM_DEVICE)
17#define cmq_ring_to_dev(ring) (&(ring)->dev->pdev->dev)
18
19static int hclgevf_ring_space(struct hclgevf_cmq_ring *ring)
20{
21 int ntc = ring->next_to_clean;
22 int ntu = ring->next_to_use;
23 int used;
24
25 used = (ntu - ntc + ring->desc_num) % ring->desc_num;
26
27 return ring->desc_num - used - 1;
28}
29
ffd0a922
HT
30static int hclgevf_is_valid_csq_clean_head(struct hclgevf_cmq_ring *ring,
31 int head)
32{
33 int ntu = ring->next_to_use;
34 int ntc = ring->next_to_clean;
35
36 if (ntu > ntc)
37 return head >= ntc && head <= ntu;
38
39 return head >= ntc || head <= ntu;
40}
41
fedd0c15
SM
42static int hclgevf_cmd_csq_clean(struct hclgevf_hw *hw)
43{
ffd0a922 44 struct hclgevf_dev *hdev = container_of(hw, struct hclgevf_dev, hw);
fedd0c15 45 struct hclgevf_cmq_ring *csq = &hw->cmq.csq;
cdd332ac 46 int clean;
fedd0c15
SM
47 u32 head;
48
fedd0c15 49 head = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG);
ffd0a922
HT
50 rmb(); /* Make sure head is ready before touch any data */
51
52 if (!hclgevf_is_valid_csq_clean_head(csq, head)) {
adcf738b 53 dev_warn(&hdev->pdev->dev, "wrong cmd head (%u, %d-%d)\n", head,
ffd0a922
HT
54 csq->next_to_use, csq->next_to_clean);
55 dev_warn(&hdev->pdev->dev,
56 "Disabling any further commands to IMP firmware\n");
57 set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
58 return -EIO;
fedd0c15 59 }
fedd0c15 60
ffd0a922
HT
61 clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num;
62 csq->next_to_clean = head;
fedd0c15
SM
63 return clean;
64}
65
66static bool hclgevf_cmd_csq_done(struct hclgevf_hw *hw)
67{
68 u32 head;
69
70 head = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG);
71
72 return head == hw->cmq.csq.next_to_use;
73}
74
75static bool hclgevf_is_special_opcode(u16 opcode)
76{
e9ac25b7 77 static const u16 spec_opcode[] = {0x30, 0x31, 0x32};
fedd0c15
SM
78 int i;
79
80 for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) {
81 if (spec_opcode[i] == opcode)
82 return true;
83 }
84
85 return false;
86}
87
8b0195a3
HT
88static void hclgevf_cmd_config_regs(struct hclgevf_cmq_ring *ring)
89{
90 struct hclgevf_dev *hdev = ring->dev;
91 struct hclgevf_hw *hw = &hdev->hw;
92 u32 reg_val;
93
94 if (ring->flag == HCLGEVF_TYPE_CSQ) {
d6ad7c53 95 reg_val = lower_32_bits(ring->desc_dma_addr);
8b0195a3 96 hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, reg_val);
d6ad7c53 97 reg_val = upper_32_bits(ring->desc_dma_addr);
8b0195a3
HT
98 hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, reg_val);
99
6b428b4f
HT
100 reg_val = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG);
101 reg_val &= HCLGEVF_NIC_SW_RST_RDY;
102 reg_val |= (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
8b0195a3
HT
103 hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, reg_val);
104
105 hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0);
106 hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0);
107 } else {
d6ad7c53 108 reg_val = lower_32_bits(ring->desc_dma_addr);
8b0195a3 109 hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, reg_val);
d6ad7c53 110 reg_val = upper_32_bits(ring->desc_dma_addr);
8b0195a3
HT
111 hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, reg_val);
112
113 reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
8b0195a3
HT
114 hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_DEPTH_REG, reg_val);
115
116 hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_HEAD_REG, 0);
117 hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG, 0);
118 }
119}
120
121static void hclgevf_cmd_init_regs(struct hclgevf_hw *hw)
122{
123 hclgevf_cmd_config_regs(&hw->cmq.csq);
124 hclgevf_cmd_config_regs(&hw->cmq.crq);
125}
126
fedd0c15
SM
127static int hclgevf_alloc_cmd_desc(struct hclgevf_cmq_ring *ring)
128{
129 int size = ring->desc_num * sizeof(struct hclgevf_desc);
130
750afb08
LC
131 ring->desc = dma_alloc_coherent(cmq_ring_to_dev(ring), size,
132 &ring->desc_dma_addr, GFP_KERNEL);
fedd0c15
SM
133 if (!ring->desc)
134 return -ENOMEM;
135
fedd0c15
SM
136 return 0;
137}
138
139static void hclgevf_free_cmd_desc(struct hclgevf_cmq_ring *ring)
140{
024cc792 141 int size = ring->desc_num * sizeof(struct hclgevf_desc);
fedd0c15 142
024cc792
HT
143 if (ring->desc) {
144 dma_free_coherent(cmq_ring_to_dev(ring), size,
145 ring->desc, ring->desc_dma_addr);
146 ring->desc = NULL;
147 }
fedd0c15
SM
148}
149
8b0195a3 150static int hclgevf_alloc_cmd_queue(struct hclgevf_dev *hdev, int ring_type)
fedd0c15
SM
151{
152 struct hclgevf_hw *hw = &hdev->hw;
8b0195a3
HT
153 struct hclgevf_cmq_ring *ring =
154 (ring_type == HCLGEVF_TYPE_CSQ) ? &hw->cmq.csq : &hw->cmq.crq;
fedd0c15
SM
155 int ret;
156
fedd0c15 157 ring->dev = hdev;
8b0195a3 158 ring->flag = ring_type;
fedd0c15
SM
159
160 /* allocate CSQ/CRQ descriptor */
161 ret = hclgevf_alloc_cmd_desc(ring);
8b0195a3 162 if (ret)
fedd0c15
SM
163 dev_err(&hdev->pdev->dev, "failed(%d) to alloc %s desc\n", ret,
164 (ring_type == HCLGEVF_TYPE_CSQ) ? "CSQ" : "CRQ");
fedd0c15 165
8b0195a3 166 return ret;
fedd0c15
SM
167}
168
169void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc,
170 enum hclgevf_opcode_type opcode, bool is_read)
171{
172 memset(desc, 0, sizeof(struct hclgevf_desc));
173 desc->opcode = cpu_to_le16(opcode);
174 desc->flag = cpu_to_le16(HCLGEVF_CMD_FLAG_NO_INTR |
175 HCLGEVF_CMD_FLAG_IN);
176 if (is_read)
177 desc->flag |= cpu_to_le16(HCLGEVF_CMD_FLAG_WR);
178 else
179 desc->flag &= cpu_to_le16(~HCLGEVF_CMD_FLAG_WR);
180}
181
9e1511fb
PL
182static int hclgevf_cmd_convert_err_code(u16 desc_ret)
183{
184 switch (desc_ret) {
185 case HCLGEVF_CMD_EXEC_SUCCESS:
186 return 0;
187 case HCLGEVF_CMD_NO_AUTH:
188 return -EPERM;
189 case HCLGEVF_CMD_NOT_SUPPORTED:
190 return -EOPNOTSUPP;
191 case HCLGEVF_CMD_QUEUE_FULL:
192 return -EXFULL;
193 case HCLGEVF_CMD_NEXT_ERR:
194 return -ENOSR;
195 case HCLGEVF_CMD_UNEXE_ERR:
196 return -ENOTBLK;
197 case HCLGEVF_CMD_PARA_ERR:
198 return -EINVAL;
199 case HCLGEVF_CMD_RESULT_ERR:
200 return -ERANGE;
201 case HCLGEVF_CMD_TIMEOUT:
202 return -ETIME;
203 case HCLGEVF_CMD_HILINK_ERR:
204 return -ENOLINK;
205 case HCLGEVF_CMD_QUEUE_ILLEGAL:
206 return -ENXIO;
207 case HCLGEVF_CMD_INVALID:
208 return -EBADR;
209 default:
210 return -EIO;
211 }
212}
213
fedd0c15
SM
214/* hclgevf_cmd_send - send command to command queue
215 * @hw: pointer to the hw struct
216 * @desc: prefilled descriptor for describing the command
217 * @num : the number of descriptors to be sent
218 *
219 * This is the main send command for command queue, it
220 * sends the queue, cleans the queue, etc
221 */
222int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num)
223{
224 struct hclgevf_dev *hdev = (struct hclgevf_dev *)hw->hdev;
82c8ae6e 225 struct hclgevf_cmq_ring *csq = &hw->cmq.csq;
fedd0c15
SM
226 struct hclgevf_desc *desc_to_use;
227 bool complete = false;
228 u32 timeout = 0;
229 int handle = 0;
230 int status = 0;
231 u16 retval;
232 u16 opcode;
233 int ntc;
234
235 spin_lock_bh(&hw->cmq.csq.lock);
236
82c8ae6e
PL
237 if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) {
238 spin_unlock_bh(&hw->cmq.csq.lock);
239 return -EBUSY;
240 }
241
242 if (num > hclgevf_ring_space(&hw->cmq.csq)) {
243 /* If CMDQ ring is full, SW HEAD and HW HEAD may be different,
244 * need update the SW HEAD pointer csq->next_to_clean
245 */
246 csq->next_to_clean = hclgevf_read_dev(hw,
247 HCLGEVF_NIC_CSQ_HEAD_REG);
fedd0c15
SM
248 spin_unlock_bh(&hw->cmq.csq.lock);
249 return -EBUSY;
250 }
251
252 /* Record the location of desc in the ring for this time
253 * which will be use for hardware to write back
254 */
255 ntc = hw->cmq.csq.next_to_use;
256 opcode = le16_to_cpu(desc[0].opcode);
257 while (handle < num) {
258 desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
259 *desc_to_use = desc[handle];
260 (hw->cmq.csq.next_to_use)++;
261 if (hw->cmq.csq.next_to_use == hw->cmq.csq.desc_num)
262 hw->cmq.csq.next_to_use = 0;
263 handle++;
264 }
265
266 /* Write to hardware */
267 hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG,
268 hw->cmq.csq.next_to_use);
269
270 /* If the command is sync, wait for the firmware to write back,
271 * if multi descriptors to be sent, use the first one to check
272 */
273 if (HCLGEVF_SEND_SYNC(le16_to_cpu(desc->flag))) {
274 do {
275 if (hclgevf_cmd_csq_done(hw))
276 break;
277 udelay(1);
278 timeout++;
279 } while (timeout < hw->cmq.tx_timeout);
280 }
281
282 if (hclgevf_cmd_csq_done(hw)) {
283 complete = true;
284 handle = 0;
285
286 while (handle < num) {
287 /* Get the result of hardware write back */
288 desc_to_use = &hw->cmq.csq.desc[ntc];
289 desc[handle] = *desc_to_use;
290
291 if (likely(!hclgevf_is_special_opcode(opcode)))
292 retval = le16_to_cpu(desc[handle].retval);
293 else
294 retval = le16_to_cpu(desc[0].retval);
295
9e1511fb 296 status = hclgevf_cmd_convert_err_code(retval);
fedd0c15
SM
297 hw->cmq.last_status = (enum hclgevf_cmd_status)retval;
298 ntc++;
299 handle++;
300 if (ntc == hw->cmq.csq.desc_num)
301 ntc = 0;
302 }
303 }
304
305 if (!complete)
82c8ae6e 306 status = -EBADE;
fedd0c15
SM
307
308 /* Clean the command send queue */
309 handle = hclgevf_cmd_csq_clean(hw);
82c8ae6e 310 if (handle != num)
fedd0c15
SM
311 dev_warn(&hdev->pdev->dev,
312 "cleaned %d, need to clean %d\n", handle, num);
fedd0c15
SM
313
314 spin_unlock_bh(&hw->cmq.csq.lock);
315
316 return status;
317}
318
319static int hclgevf_cmd_query_firmware_version(struct hclgevf_hw *hw,
320 u32 *version)
321{
322 struct hclgevf_query_version_cmd *resp;
323 struct hclgevf_desc desc;
324 int status;
325
326 resp = (struct hclgevf_query_version_cmd *)desc.data;
327
328 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_FW_VER, 1);
329 status = hclgevf_cmd_send(hw, &desc, 1);
330 if (!status)
331 *version = le32_to_cpu(resp->firmware);
332
333 return status;
334}
335
8b0195a3 336int hclgevf_cmd_queue_init(struct hclgevf_dev *hdev)
fedd0c15 337{
fedd0c15
SM
338 int ret;
339
8b0195a3
HT
340 /* Setup the lock for command queue */
341 spin_lock_init(&hdev->hw.cmq.csq.lock);
342 spin_lock_init(&hdev->hw.cmq.crq.lock);
343
fedd0c15 344 hdev->hw.cmq.tx_timeout = HCLGEVF_CMDQ_TX_TIMEOUT;
8b0195a3
HT
345 hdev->hw.cmq.csq.desc_num = HCLGEVF_NIC_CMQ_DESC_NUM;
346 hdev->hw.cmq.crq.desc_num = HCLGEVF_NIC_CMQ_DESC_NUM;
fedd0c15 347
8b0195a3 348 ret = hclgevf_alloc_cmd_queue(hdev, HCLGEVF_TYPE_CSQ);
fedd0c15
SM
349 if (ret) {
350 dev_err(&hdev->pdev->dev,
8b0195a3 351 "CSQ ring setup error %d\n", ret);
fedd0c15
SM
352 return ret;
353 }
354
8b0195a3 355 ret = hclgevf_alloc_cmd_queue(hdev, HCLGEVF_TYPE_CRQ);
fedd0c15
SM
356 if (ret) {
357 dev_err(&hdev->pdev->dev,
8b0195a3 358 "CRQ ring setup error %d\n", ret);
fedd0c15
SM
359 goto err_csq;
360 }
361
8b0195a3
HT
362 return 0;
363err_csq:
364 hclgevf_free_cmd_desc(&hdev->hw.cmq.csq);
365 return ret;
366}
367
368int hclgevf_cmd_init(struct hclgevf_dev *hdev)
369{
370 u32 version;
371 int ret;
372
373 spin_lock_bh(&hdev->hw.cmq.csq.lock);
6814b590 374 spin_lock(&hdev->hw.cmq.crq.lock);
8b0195a3 375
07a0556a
SM
376 /* initialize the pointers of async rx queue of mailbox */
377 hdev->arq.hdev = hdev;
378 hdev->arq.head = 0;
379 hdev->arq.tail = 0;
30780a8b 380 atomic_set(&hdev->arq.count, 0);
8b0195a3
HT
381 hdev->hw.cmq.csq.next_to_clean = 0;
382 hdev->hw.cmq.csq.next_to_use = 0;
383 hdev->hw.cmq.crq.next_to_clean = 0;
384 hdev->hw.cmq.crq.next_to_use = 0;
385
386 hclgevf_cmd_init_regs(&hdev->hw);
387
6814b590 388 spin_unlock(&hdev->hw.cmq.crq.lock);
8b0195a3 389 spin_unlock_bh(&hdev->hw.cmq.csq.lock);
07a0556a 390
ef5f8e50
HT
391 clear_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
392
393 /* Check if there is new reset pending, because the higher level
394 * reset may happen when lower level reset is being processed.
395 */
396 if (hclgevf_is_reset_pending(hdev)) {
4339ef39
HT
397 ret = -EBUSY;
398 goto err_cmd_init;
ef5f8e50
HT
399 }
400
fedd0c15
SM
401 /* get firmware version */
402 ret = hclgevf_cmd_query_firmware_version(&hdev->hw, &version);
403 if (ret) {
404 dev_err(&hdev->pdev->dev,
405 "failed(%d) to query firmware version\n", ret);
4339ef39 406 goto err_cmd_init;
fedd0c15
SM
407 }
408 hdev->fw_version = version;
409
92371373
YM
410 dev_info(&hdev->pdev->dev, "The firmware version is %lu.%lu.%lu.%lu\n",
411 hnae3_get_field(version, HNAE3_FW_VERSION_BYTE3_MASK,
412 HNAE3_FW_VERSION_BYTE3_SHIFT),
413 hnae3_get_field(version, HNAE3_FW_VERSION_BYTE2_MASK,
414 HNAE3_FW_VERSION_BYTE2_SHIFT),
415 hnae3_get_field(version, HNAE3_FW_VERSION_BYTE1_MASK,
416 HNAE3_FW_VERSION_BYTE1_SHIFT),
417 hnae3_get_field(version, HNAE3_FW_VERSION_BYTE0_MASK,
418 HNAE3_FW_VERSION_BYTE0_SHIFT));
fedd0c15
SM
419
420 return 0;
4339ef39
HT
421
422err_cmd_init:
423 set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
424
425 return ret;
fedd0c15
SM
426}
427
34f81f04
HT
428static void hclgevf_cmd_uninit_regs(struct hclgevf_hw *hw)
429{
430 hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, 0);
431 hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, 0);
432 hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, 0);
433 hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0);
434 hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0);
435 hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, 0);
436 hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, 0);
437 hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_DEPTH_REG, 0);
438 hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_HEAD_REG, 0);
439 hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG, 0);
440}
441
fedd0c15
SM
442void hclgevf_cmd_uninit(struct hclgevf_dev *hdev)
443{
34f81f04
HT
444 spin_lock_bh(&hdev->hw.cmq.csq.lock);
445 spin_lock(&hdev->hw.cmq.crq.lock);
446 clear_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
447 hclgevf_cmd_uninit_regs(&hdev->hw);
448 spin_unlock(&hdev->hw.cmq.crq.lock);
449 spin_unlock_bh(&hdev->hw.cmq.csq.lock);
fedd0c15
SM
450 hclgevf_free_cmd_desc(&hdev->hw.cmq.csq);
451 hclgevf_free_cmd_desc(&hdev->hw.cmq.crq);
452}