Commit | Line | Data |
---|---|---|
57b1c0ef AD |
1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* | |
3 | * Copyright (c) 2022 Qualcomm Innovation Center. All rights reserved. | |
4 | * | |
5 | * Authors: | |
6 | * Asutosh Das <quic_asutoshd@quicinc.com> | |
7 | * Can Guo <quic_cang@quicinc.com> | |
8 | */ | |
9 | ||
10 | #include <asm/unaligned.h> | |
11 | #include <linux/dma-mapping.h> | |
12 | #include <linux/module.h> | |
13 | #include <linux/platform_device.h> | |
14 | #include "ufshcd-priv.h" | |
8d729034 BN |
15 | #include <linux/delay.h> |
16 | #include <scsi/scsi_cmnd.h> | |
17 | #include <linux/bitfield.h> | |
18 | #include <linux/iopoll.h> | |
57b1c0ef AD |
19 | |
20 | #define MAX_QUEUE_SUP GENMASK(7, 0) | |
21 | #define UFS_MCQ_MIN_RW_QUEUES 2 | |
22 | #define UFS_MCQ_MIN_READ_QUEUES 0 | |
57b1c0ef | 23 | #define UFS_MCQ_MIN_POLL_QUEUES 0 |
2468da61 AD |
24 | #define QUEUE_EN_OFFSET 31 |
25 | #define QUEUE_ID_OFFSET 16 | |
57b1c0ef | 26 | |
7224c806 | 27 | #define MCQ_CFG_MAC_MASK GENMASK(16, 8) |
2468da61 AD |
28 | #define MCQ_QCFG_SIZE 0x40 |
29 | #define MCQ_ENTRY_SIZE_IN_DWORD 8 | |
f87b2c41 | 30 | #define CQE_UCD_BA GENMASK_ULL(63, 7) |
7224c806 | 31 | |
8d729034 BN |
32 | /* Max mcq register polling time in microseconds */ |
33 | #define MCQ_POLL_US 500000 | |
34 | ||
57b1c0ef AD |
35 | static int rw_queue_count_set(const char *val, const struct kernel_param *kp) |
36 | { | |
37 | return param_set_uint_minmax(val, kp, UFS_MCQ_MIN_RW_QUEUES, | |
38 | num_possible_cpus()); | |
39 | } | |
40 | ||
41 | static const struct kernel_param_ops rw_queue_count_ops = { | |
42 | .set = rw_queue_count_set, | |
43 | .get = param_get_uint, | |
44 | }; | |
45 | ||
46 | static unsigned int rw_queues; | |
47 | module_param_cb(rw_queues, &rw_queue_count_ops, &rw_queues, 0644); | |
48 | MODULE_PARM_DESC(rw_queues, | |
49 | "Number of interrupt driven I/O queues used for rw. Default value is nr_cpus"); | |
50 | ||
51 | static int read_queue_count_set(const char *val, const struct kernel_param *kp) | |
52 | { | |
53 | return param_set_uint_minmax(val, kp, UFS_MCQ_MIN_READ_QUEUES, | |
54 | num_possible_cpus()); | |
55 | } | |
56 | ||
57 | static const struct kernel_param_ops read_queue_count_ops = { | |
58 | .set = read_queue_count_set, | |
59 | .get = param_get_uint, | |
60 | }; | |
61 | ||
62 | static unsigned int read_queues; | |
63 | module_param_cb(read_queues, &read_queue_count_ops, &read_queues, 0644); | |
64 | MODULE_PARM_DESC(read_queues, | |
65 | "Number of interrupt driven read queues used for read. Default value is 0"); | |
66 | ||
67 | static int poll_queue_count_set(const char *val, const struct kernel_param *kp) | |
68 | { | |
69 | return param_set_uint_minmax(val, kp, UFS_MCQ_MIN_POLL_QUEUES, | |
70 | num_possible_cpus()); | |
71 | } | |
72 | ||
73 | static const struct kernel_param_ops poll_queue_count_ops = { | |
74 | .set = poll_queue_count_set, | |
75 | .get = param_get_uint, | |
76 | }; | |
77 | ||
78 | static unsigned int poll_queues = 1; | |
79 | module_param_cb(poll_queues, &poll_queue_count_ops, &poll_queues, 0644); | |
80 | MODULE_PARM_DESC(poll_queues, | |
81 | "Number of poll queues used for r/w. Default value is 1"); | |
82 | ||
2468da61 AD |
83 | /** |
84 | * ufshcd_mcq_config_mac - Set the #Max Activ Cmds. | |
b62c8292 BVA |
85 | * @hba: per adapter instance |
86 | * @max_active_cmds: maximum # of active commands to the device at any time. | |
2468da61 AD |
87 | * |
88 | * The controller won't send more than the max_active_cmds to the device at | |
89 | * any time. | |
90 | */ | |
91 | void ufshcd_mcq_config_mac(struct ufs_hba *hba, u32 max_active_cmds) | |
92 | { | |
93 | u32 val; | |
94 | ||
95 | val = ufshcd_readl(hba, REG_UFS_MCQ_CFG); | |
96 | val &= ~MCQ_CFG_MAC_MASK; | |
767712f9 | 97 | val |= FIELD_PREP(MCQ_CFG_MAC_MASK, max_active_cmds - 1); |
2468da61 AD |
98 | ufshcd_writel(hba, val, REG_UFS_MCQ_CFG); |
99 | } | |
11afb65c | 100 | EXPORT_SYMBOL_GPL(ufshcd_mcq_config_mac); |
2468da61 | 101 | |
854f84e7 AD |
102 | /** |
103 | * ufshcd_mcq_req_to_hwq - find the hardware queue on which the | |
104 | * request would be issued. | |
b62c8292 BVA |
105 | * @hba: per adapter instance |
106 | * @req: pointer to the request to be issued | |
854f84e7 | 107 | * |
3a17fefe | 108 | * Return: the hardware queue instance on which the request would |
854f84e7 AD |
109 | * be queued. |
110 | */ | |
111 | struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba, | |
112 | struct request *req) | |
113 | { | |
114 | u32 utag = blk_mq_unique_tag(req); | |
115 | u32 hwq = blk_mq_unique_tag_to_hwq(utag); | |
116 | ||
ccb23dc3 | 117 | return &hba->uhq[hwq]; |
854f84e7 AD |
118 | } |
119 | ||
7224c806 AD |
120 | /** |
121 | * ufshcd_mcq_decide_queue_depth - decide the queue depth | |
b62c8292 | 122 | * @hba: per adapter instance |
7224c806 | 123 | * |
3a17fefe | 124 | * Return: queue-depth on success, non-zero on error |
7224c806 AD |
125 | * |
126 | * MAC - Max. Active Command of the Host Controller (HC) | |
127 | * HC wouldn't send more than this commands to the device. | |
128 | * It is mandatory to implement get_hba_mac() to enable MCQ mode. | |
129 | * Calculates and adjusts the queue depth based on the depth | |
130 | * supported by the HC and ufs device. | |
131 | */ | |
132 | int ufshcd_mcq_decide_queue_depth(struct ufs_hba *hba) | |
133 | { | |
134 | int mac; | |
135 | ||
136 | /* Mandatory to implement get_hba_mac() */ | |
137 | mac = ufshcd_mcq_vops_get_hba_mac(hba); | |
138 | if (mac < 0) { | |
139 | dev_err(hba->dev, "Failed to get mac, err=%d\n", mac); | |
140 | return mac; | |
141 | } | |
142 | ||
143 | WARN_ON_ONCE(!hba->dev_info.bqueuedepth); | |
144 | /* | |
145 | * max. value of bqueuedepth = 256, mac is host dependent. | |
146 | * It is mandatory for UFS device to define bQueueDepth if | |
147 | * shared queuing architecture is enabled. | |
148 | */ | |
149 | return min_t(int, mac, hba->dev_info.bqueuedepth); | |
150 | } | |
151 | ||
57b1c0ef AD |
152 | static int ufshcd_mcq_config_nr_queues(struct ufs_hba *hba) |
153 | { | |
154 | int i; | |
155 | u32 hba_maxq, rem, tot_queues; | |
156 | struct Scsi_Host *host = hba->host; | |
157 | ||
72a81bb0 PWK |
158 | /* maxq is 0 based value */ |
159 | hba_maxq = FIELD_GET(MAX_QUEUE_SUP, hba->mcq_capabilities) + 1; | |
57b1c0ef | 160 | |
ccb23dc3 | 161 | tot_queues = read_queues + poll_queues + rw_queues; |
57b1c0ef AD |
162 | |
163 | if (hba_maxq < tot_queues) { | |
164 | dev_err(hba->dev, "Total queues (%d) exceeds HC capacity (%d)\n", | |
165 | tot_queues, hba_maxq); | |
166 | return -EOPNOTSUPP; | |
167 | } | |
168 | ||
ccb23dc3 | 169 | rem = hba_maxq; |
57b1c0ef AD |
170 | |
171 | if (rw_queues) { | |
172 | hba->nr_queues[HCTX_TYPE_DEFAULT] = rw_queues; | |
173 | rem -= hba->nr_queues[HCTX_TYPE_DEFAULT]; | |
174 | } else { | |
175 | rw_queues = num_possible_cpus(); | |
176 | } | |
177 | ||
178 | if (poll_queues) { | |
179 | hba->nr_queues[HCTX_TYPE_POLL] = poll_queues; | |
180 | rem -= hba->nr_queues[HCTX_TYPE_POLL]; | |
181 | } | |
182 | ||
183 | if (read_queues) { | |
184 | hba->nr_queues[HCTX_TYPE_READ] = read_queues; | |
185 | rem -= hba->nr_queues[HCTX_TYPE_READ]; | |
186 | } | |
187 | ||
188 | if (!hba->nr_queues[HCTX_TYPE_DEFAULT]) | |
189 | hba->nr_queues[HCTX_TYPE_DEFAULT] = min3(rem, rw_queues, | |
190 | num_possible_cpus()); | |
191 | ||
192 | for (i = 0; i < HCTX_MAX_TYPES; i++) | |
193 | host->nr_hw_queues += hba->nr_queues[i]; | |
194 | ||
ccb23dc3 | 195 | hba->nr_hw_queues = host->nr_hw_queues; |
57b1c0ef AD |
196 | return 0; |
197 | } | |
198 | ||
4682abfa AD |
199 | int ufshcd_mcq_memory_alloc(struct ufs_hba *hba) |
200 | { | |
201 | struct ufs_hw_queue *hwq; | |
202 | size_t utrdl_size, cqe_size; | |
203 | int i; | |
204 | ||
205 | for (i = 0; i < hba->nr_hw_queues; i++) { | |
206 | hwq = &hba->uhq[i]; | |
207 | ||
208 | utrdl_size = sizeof(struct utp_transfer_req_desc) * | |
209 | hwq->max_entries; | |
210 | hwq->sqe_base_addr = dmam_alloc_coherent(hba->dev, utrdl_size, | |
211 | &hwq->sqe_dma_addr, | |
212 | GFP_KERNEL); | |
213 | if (!hwq->sqe_dma_addr) { | |
214 | dev_err(hba->dev, "SQE allocation failed\n"); | |
215 | return -ENOMEM; | |
216 | } | |
217 | ||
218 | cqe_size = sizeof(struct cq_entry) * hwq->max_entries; | |
219 | hwq->cqe_base_addr = dmam_alloc_coherent(hba->dev, cqe_size, | |
220 | &hwq->cqe_dma_addr, | |
221 | GFP_KERNEL); | |
222 | if (!hwq->cqe_dma_addr) { | |
223 | dev_err(hba->dev, "CQE allocation failed\n"); | |
224 | return -ENOMEM; | |
225 | } | |
226 | } | |
227 | ||
228 | return 0; | |
229 | } | |
230 | ||
231 | ||
2468da61 AD |
232 | /* Operation and runtime registers configuration */ |
233 | #define MCQ_CFG_n(r, i) ((r) + MCQ_QCFG_SIZE * (i)) | |
234 | #define MCQ_OPR_OFFSET_n(p, i) \ | |
235 | (hba->mcq_opr[(p)].offset + hba->mcq_opr[(p)].stride * (i)) | |
236 | ||
237 | static void __iomem *mcq_opr_base(struct ufs_hba *hba, | |
238 | enum ufshcd_mcq_opr n, int i) | |
239 | { | |
240 | struct ufshcd_mcq_opr_info_t *opr = &hba->mcq_opr[n]; | |
241 | ||
242 | return opr->base + opr->stride * i; | |
243 | } | |
244 | ||
f87b2c41 AD |
245 | u32 ufshcd_mcq_read_cqis(struct ufs_hba *hba, int i) |
246 | { | |
247 | return readl(mcq_opr_base(hba, OPR_CQIS, i) + REG_CQIS); | |
248 | } | |
11afb65c | 249 | EXPORT_SYMBOL_GPL(ufshcd_mcq_read_cqis); |
f87b2c41 AD |
250 | |
251 | void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i) | |
252 | { | |
253 | writel(val, mcq_opr_base(hba, OPR_CQIS, i) + REG_CQIS); | |
254 | } | |
e02288e0 | 255 | EXPORT_SYMBOL_GPL(ufshcd_mcq_write_cqis); |
f87b2c41 AD |
256 | |
257 | /* | |
258 | * Current MCQ specification doesn't provide a Task Tag or its equivalent in | |
259 | * the Completion Queue Entry. Find the Task Tag using an indirect method. | |
260 | */ | |
01f25622 | 261 | static int ufshcd_mcq_get_tag(struct ufs_hba *hba, struct cq_entry *cqe) |
f87b2c41 AD |
262 | { |
263 | u64 addr; | |
264 | ||
265 | /* sizeof(struct utp_transfer_cmd_desc) must be a multiple of 128 */ | |
266 | BUILD_BUG_ON(sizeof(struct utp_transfer_cmd_desc) & GENMASK(6, 0)); | |
267 | ||
268 | /* Bits 63:7 UCD base address, 6:5 are reserved, 4:0 is SQ ID */ | |
269 | addr = (le64_to_cpu(cqe->command_desc_base_addr) & CQE_UCD_BA) - | |
270 | hba->ucdl_dma_addr; | |
271 | ||
06caeb53 | 272 | return div_u64(addr, ufshcd_get_ucd_size(hba)); |
f87b2c41 AD |
273 | } |
274 | ||
275 | static void ufshcd_mcq_process_cqe(struct ufs_hba *hba, | |
ab248643 | 276 | struct ufs_hw_queue *hwq) |
f87b2c41 AD |
277 | { |
278 | struct cq_entry *cqe = ufshcd_mcq_cur_cqe(hwq); | |
01f25622 | 279 | int tag = ufshcd_mcq_get_tag(hba, cqe); |
f87b2c41 | 280 | |
ab248643 BN |
281 | if (cqe->command_desc_base_addr) { |
282 | ufshcd_compl_one_cqe(hba, tag, cqe); | |
283 | /* After processed the cqe, mark it empty (invalid) entry */ | |
284 | cqe->command_desc_base_addr = 0; | |
285 | } | |
f87b2c41 AD |
286 | } |
287 | ||
ab248643 BN |
288 | void ufshcd_mcq_compl_all_cqes_lock(struct ufs_hba *hba, |
289 | struct ufs_hw_queue *hwq) | |
f87b2c41 | 290 | { |
ab248643 BN |
291 | unsigned long flags; |
292 | u32 entries = hwq->max_entries; | |
f87b2c41 | 293 | |
ab248643 BN |
294 | spin_lock_irqsave(&hwq->cq_lock, flags); |
295 | while (entries > 0) { | |
f87b2c41 AD |
296 | ufshcd_mcq_process_cqe(hba, hwq); |
297 | ufshcd_mcq_inc_cq_head_slot(hwq); | |
ab248643 | 298 | entries--; |
f87b2c41 AD |
299 | } |
300 | ||
ab248643 BN |
301 | ufshcd_mcq_update_cq_tail_slot(hwq); |
302 | hwq->cq_head_slot = hwq->cq_tail_slot; | |
303 | spin_unlock_irqrestore(&hwq->cq_lock, flags); | |
f87b2c41 AD |
304 | } |
305 | ||
ed975065 AD |
306 | unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba, |
307 | struct ufs_hw_queue *hwq) | |
308 | { | |
f87b2c41 | 309 | unsigned long completed_reqs = 0; |
9c24f90f | 310 | unsigned long flags; |
ed975065 | 311 | |
948afc69 | 312 | spin_lock_irqsave(&hwq->cq_lock, flags); |
f87b2c41 AD |
313 | ufshcd_mcq_update_cq_tail_slot(hwq); |
314 | while (!ufshcd_mcq_is_cq_empty(hwq)) { | |
315 | ufshcd_mcq_process_cqe(hba, hwq); | |
316 | ufshcd_mcq_inc_cq_head_slot(hwq); | |
317 | completed_reqs++; | |
318 | } | |
319 | ||
320 | if (completed_reqs) | |
321 | ufshcd_mcq_update_cq_head(hwq); | |
948afc69 | 322 | spin_unlock_irqrestore(&hwq->cq_lock, flags); |
ed975065 AD |
323 | |
324 | return completed_reqs; | |
325 | } | |
57d6ef46 | 326 | EXPORT_SYMBOL_GPL(ufshcd_mcq_poll_cqe_lock); |
ed975065 | 327 | |
2468da61 AD |
328 | void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba) |
329 | { | |
330 | struct ufs_hw_queue *hwq; | |
331 | u16 qsize; | |
332 | int i; | |
333 | ||
334 | for (i = 0; i < hba->nr_hw_queues; i++) { | |
335 | hwq = &hba->uhq[i]; | |
336 | hwq->id = i; | |
337 | qsize = hwq->max_entries * MCQ_ENTRY_SIZE_IN_DWORD - 1; | |
338 | ||
339 | /* Submission Queue Lower Base Address */ | |
340 | ufsmcq_writelx(hba, lower_32_bits(hwq->sqe_dma_addr), | |
341 | MCQ_CFG_n(REG_SQLBA, i)); | |
342 | /* Submission Queue Upper Base Address */ | |
343 | ufsmcq_writelx(hba, upper_32_bits(hwq->sqe_dma_addr), | |
344 | MCQ_CFG_n(REG_SQUBA, i)); | |
345 | /* Submission Queue Doorbell Address Offset */ | |
346 | ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_SQD, i), | |
347 | MCQ_CFG_n(REG_SQDAO, i)); | |
348 | /* Submission Queue Interrupt Status Address Offset */ | |
349 | ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_SQIS, i), | |
350 | MCQ_CFG_n(REG_SQISAO, i)); | |
351 | ||
352 | /* Completion Queue Lower Base Address */ | |
353 | ufsmcq_writelx(hba, lower_32_bits(hwq->cqe_dma_addr), | |
354 | MCQ_CFG_n(REG_CQLBA, i)); | |
355 | /* Completion Queue Upper Base Address */ | |
356 | ufsmcq_writelx(hba, upper_32_bits(hwq->cqe_dma_addr), | |
357 | MCQ_CFG_n(REG_CQUBA, i)); | |
358 | /* Completion Queue Doorbell Address Offset */ | |
359 | ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_CQD, i), | |
360 | MCQ_CFG_n(REG_CQDAO, i)); | |
361 | /* Completion Queue Interrupt Status Address Offset */ | |
362 | ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_CQIS, i), | |
363 | MCQ_CFG_n(REG_CQISAO, i)); | |
364 | ||
365 | /* Save the base addresses for quicker access */ | |
366 | hwq->mcq_sq_head = mcq_opr_base(hba, OPR_SQD, i) + REG_SQHP; | |
367 | hwq->mcq_sq_tail = mcq_opr_base(hba, OPR_SQD, i) + REG_SQTP; | |
368 | hwq->mcq_cq_head = mcq_opr_base(hba, OPR_CQD, i) + REG_CQHP; | |
369 | hwq->mcq_cq_tail = mcq_opr_base(hba, OPR_CQD, i) + REG_CQTP; | |
370 | ||
f87b2c41 AD |
371 | /* Reinitializing is needed upon HC reset */ |
372 | hwq->sq_tail_slot = hwq->cq_tail_slot = hwq->cq_head_slot = 0; | |
373 | ||
2468da61 AD |
374 | /* Enable Tail Entry Push Status interrupt only for non-poll queues */ |
375 | if (i < hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL]) | |
376 | writel(1, mcq_opr_base(hba, OPR_CQIS, i) + REG_CQIE); | |
377 | ||
378 | /* Completion Queue Enable|Size to Completion Queue Attribute */ | |
379 | ufsmcq_writel(hba, (1 << QUEUE_EN_OFFSET) | qsize, | |
380 | MCQ_CFG_n(REG_CQATTR, i)); | |
381 | ||
382 | /* | |
383 | * Submission Qeueue Enable|Size|Completion Queue ID to | |
384 | * Submission Queue Attribute | |
385 | */ | |
386 | ufsmcq_writel(hba, (1 << QUEUE_EN_OFFSET) | qsize | | |
387 | (i << QUEUE_ID_OFFSET), | |
388 | MCQ_CFG_n(REG_SQATTR, i)); | |
389 | } | |
390 | } | |
11afb65c | 391 | EXPORT_SYMBOL_GPL(ufshcd_mcq_make_queues_operational); |
2468da61 | 392 | |
e02288e0 CG |
393 | void ufshcd_mcq_enable_esi(struct ufs_hba *hba) |
394 | { | |
395 | ufshcd_writel(hba, ufshcd_readl(hba, REG_UFS_MEM_CFG) | 0x2, | |
396 | REG_UFS_MEM_CFG); | |
397 | } | |
398 | EXPORT_SYMBOL_GPL(ufshcd_mcq_enable_esi); | |
399 | ||
ab3e6c4e CL |
400 | void ufshcd_mcq_enable(struct ufs_hba *hba) |
401 | { | |
402 | ufshcd_rmwl(hba, MCQ_MODE_SELECT, MCQ_MODE_SELECT, REG_UFS_MEM_CFG); | |
403 | } | |
404 | EXPORT_SYMBOL_GPL(ufshcd_mcq_enable); | |
405 | ||
e02288e0 CG |
406 | void ufshcd_mcq_config_esi(struct ufs_hba *hba, struct msi_msg *msg) |
407 | { | |
408 | ufshcd_writel(hba, msg->address_lo, REG_UFS_ESILBA); | |
409 | ufshcd_writel(hba, msg->address_hi, REG_UFS_ESIUBA); | |
410 | } | |
411 | EXPORT_SYMBOL_GPL(ufshcd_mcq_config_esi); | |
412 | ||
57b1c0ef AD |
413 | int ufshcd_mcq_init(struct ufs_hba *hba) |
414 | { | |
0d33728f | 415 | struct Scsi_Host *host = hba->host; |
4682abfa AD |
416 | struct ufs_hw_queue *hwq; |
417 | int ret, i; | |
57b1c0ef AD |
418 | |
419 | ret = ufshcd_mcq_config_nr_queues(hba); | |
c263b4ef AD |
420 | if (ret) |
421 | return ret; | |
57b1c0ef | 422 | |
c263b4ef | 423 | ret = ufshcd_vops_mcq_config_resource(hba); |
4682abfa AD |
424 | if (ret) |
425 | return ret; | |
426 | ||
2468da61 AD |
427 | ret = ufshcd_mcq_vops_op_runtime_config(hba); |
428 | if (ret) { | |
429 | dev_err(hba->dev, "Operation runtime config failed, ret=%d\n", | |
430 | ret); | |
431 | return ret; | |
432 | } | |
4682abfa AD |
433 | hba->uhq = devm_kzalloc(hba->dev, |
434 | hba->nr_hw_queues * sizeof(struct ufs_hw_queue), | |
435 | GFP_KERNEL); | |
436 | if (!hba->uhq) { | |
437 | dev_err(hba->dev, "ufs hw queue memory allocation failed\n"); | |
438 | return -ENOMEM; | |
439 | } | |
440 | ||
441 | for (i = 0; i < hba->nr_hw_queues; i++) { | |
442 | hwq = &hba->uhq[i]; | |
defde5a5 | 443 | hwq->max_entries = hba->nutrs + 1; |
22a2d563 | 444 | spin_lock_init(&hwq->sq_lock); |
ed975065 | 445 | spin_lock_init(&hwq->cq_lock); |
8d729034 | 446 | mutex_init(&hwq->sq_mutex); |
4682abfa AD |
447 | } |
448 | ||
449 | /* The very first HW queue serves device commands */ | |
450 | hba->dev_cmd_queue = &hba->uhq[0]; | |
4682abfa | 451 | |
0d33728f | 452 | host->host_tagset = 1; |
4682abfa | 453 | return 0; |
57b1c0ef | 454 | } |
8d729034 BN |
455 | |
456 | static int ufshcd_mcq_sq_stop(struct ufs_hba *hba, struct ufs_hw_queue *hwq) | |
457 | { | |
458 | void __iomem *reg; | |
459 | u32 id = hwq->id, val; | |
460 | int err; | |
461 | ||
aa9d5d00 PWK |
462 | if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_RTC) |
463 | return -ETIMEDOUT; | |
464 | ||
8d729034 BN |
465 | writel(SQ_STOP, mcq_opr_base(hba, OPR_SQD, id) + REG_SQRTC); |
466 | reg = mcq_opr_base(hba, OPR_SQD, id) + REG_SQRTS; | |
467 | err = read_poll_timeout(readl, val, val & SQ_STS, 20, | |
468 | MCQ_POLL_US, false, reg); | |
469 | if (err) | |
470 | dev_err(hba->dev, "%s: failed. hwq-id=%d, err=%d\n", | |
471 | __func__, id, err); | |
472 | return err; | |
473 | } | |
474 | ||
475 | static int ufshcd_mcq_sq_start(struct ufs_hba *hba, struct ufs_hw_queue *hwq) | |
476 | { | |
477 | void __iomem *reg; | |
478 | u32 id = hwq->id, val; | |
479 | int err; | |
480 | ||
aa9d5d00 PWK |
481 | if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_RTC) |
482 | return -ETIMEDOUT; | |
483 | ||
8d729034 BN |
484 | writel(SQ_START, mcq_opr_base(hba, OPR_SQD, id) + REG_SQRTC); |
485 | reg = mcq_opr_base(hba, OPR_SQD, id) + REG_SQRTS; | |
486 | err = read_poll_timeout(readl, val, !(val & SQ_STS), 20, | |
487 | MCQ_POLL_US, false, reg); | |
488 | if (err) | |
489 | dev_err(hba->dev, "%s: failed. hwq-id=%d, err=%d\n", | |
490 | __func__, id, err); | |
491 | return err; | |
492 | } | |
493 | ||
494 | /** | |
495 | * ufshcd_mcq_sq_cleanup - Clean up submission queue resources | |
496 | * associated with the pending command. | |
317a3804 YL |
497 | * @hba: per adapter instance. |
498 | * @task_tag: The command's task tag. | |
8d729034 | 499 | * |
3a17fefe | 500 | * Return: 0 for success; error code otherwise. |
8d729034 BN |
501 | */ |
502 | int ufshcd_mcq_sq_cleanup(struct ufs_hba *hba, int task_tag) | |
503 | { | |
504 | struct ufshcd_lrb *lrbp = &hba->lrb[task_tag]; | |
505 | struct scsi_cmnd *cmd = lrbp->cmd; | |
506 | struct ufs_hw_queue *hwq; | |
507 | void __iomem *reg, *opr_sqd_base; | |
508 | u32 nexus, id, val; | |
509 | int err; | |
510 | ||
aa9d5d00 PWK |
511 | if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_RTC) |
512 | return -ETIMEDOUT; | |
513 | ||
8d729034 BN |
514 | if (task_tag != hba->nutrs - UFSHCD_NUM_RESERVED) { |
515 | if (!cmd) | |
516 | return -EINVAL; | |
517 | hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd)); | |
518 | } else { | |
519 | hwq = hba->dev_cmd_queue; | |
520 | } | |
521 | ||
522 | id = hwq->id; | |
523 | ||
524 | mutex_lock(&hwq->sq_mutex); | |
525 | ||
526 | /* stop the SQ fetching before working on it */ | |
527 | err = ufshcd_mcq_sq_stop(hba, hwq); | |
528 | if (err) | |
529 | goto unlock; | |
530 | ||
531 | /* SQCTI = EXT_IID, IID, LUN, Task Tag */ | |
532 | nexus = lrbp->lun << 8 | task_tag; | |
533 | opr_sqd_base = mcq_opr_base(hba, OPR_SQD, id); | |
534 | writel(nexus, opr_sqd_base + REG_SQCTI); | |
535 | ||
536 | /* SQRTCy.ICU = 1 */ | |
537 | writel(SQ_ICU, opr_sqd_base + REG_SQRTC); | |
538 | ||
539 | /* Poll SQRTSy.CUS = 1. Return result from SQRTSy.RTC */ | |
540 | reg = opr_sqd_base + REG_SQRTS; | |
541 | err = read_poll_timeout(readl, val, val & SQ_CUS, 20, | |
542 | MCQ_POLL_US, false, reg); | |
543 | if (err) | |
544 | dev_err(hba->dev, "%s: failed. hwq=%d, tag=%d err=%ld\n", | |
545 | __func__, id, task_tag, | |
546 | FIELD_GET(SQ_ICU_ERR_CODE_MASK, readl(reg))); | |
547 | ||
548 | if (ufshcd_mcq_sq_start(hba, hwq)) | |
549 | err = -ETIMEDOUT; | |
550 | ||
551 | unlock: | |
552 | mutex_unlock(&hwq->sq_mutex); | |
553 | return err; | |
554 | } | |
555 | ||
556 | /** | |
557 | * ufshcd_mcq_nullify_sqe - Nullify the submission queue entry. | |
558 | * Write the sqe's Command Type to 0xF. The host controller will not | |
559 | * fetch any sqe with Command Type = 0xF. | |
560 | * | |
317a3804 | 561 | * @utrd: UTP Transfer Request Descriptor to be nullified. |
8d729034 BN |
562 | */ |
563 | static void ufshcd_mcq_nullify_sqe(struct utp_transfer_req_desc *utrd) | |
564 | { | |
67a2a897 | 565 | utrd->header.command_type = 0xf; |
8d729034 BN |
566 | } |
567 | ||
568 | /** | |
569 | * ufshcd_mcq_sqe_search - Search for the command in the submission queue | |
570 | * If the command is in the submission queue and not issued to the device yet, | |
571 | * nullify the sqe so the host controller will skip fetching the sqe. | |
572 | * | |
317a3804 YL |
573 | * @hba: per adapter instance. |
574 | * @hwq: Hardware Queue to be searched. | |
575 | * @task_tag: The command's task tag. | |
8d729034 | 576 | * |
3a17fefe | 577 | * Return: true if the SQE containing the command is present in the SQ |
8d729034 BN |
578 | * (not fetched by the controller); returns false if the SQE is not in the SQ. |
579 | */ | |
580 | static bool ufshcd_mcq_sqe_search(struct ufs_hba *hba, | |
581 | struct ufs_hw_queue *hwq, int task_tag) | |
582 | { | |
583 | struct ufshcd_lrb *lrbp = &hba->lrb[task_tag]; | |
584 | struct utp_transfer_req_desc *utrd; | |
8d729034 BN |
585 | __le64 cmd_desc_base_addr; |
586 | bool ret = false; | |
587 | u64 addr, match; | |
588 | u32 sq_head_slot; | |
589 | ||
aa9d5d00 PWK |
590 | if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_RTC) |
591 | return true; | |
592 | ||
8d729034 BN |
593 | mutex_lock(&hwq->sq_mutex); |
594 | ||
595 | ufshcd_mcq_sq_stop(hba, hwq); | |
596 | sq_head_slot = ufshcd_mcq_get_sq_head_slot(hwq); | |
597 | if (sq_head_slot == hwq->sq_tail_slot) | |
598 | goto out; | |
599 | ||
600 | cmd_desc_base_addr = lrbp->utr_descriptor_ptr->command_desc_base_addr; | |
601 | addr = le64_to_cpu(cmd_desc_base_addr) & CQE_UCD_BA; | |
602 | ||
603 | while (sq_head_slot != hwq->sq_tail_slot) { | |
604 | utrd = hwq->sqe_base_addr + | |
605 | sq_head_slot * sizeof(struct utp_transfer_req_desc); | |
606 | match = le64_to_cpu(utrd->command_desc_base_addr) & CQE_UCD_BA; | |
607 | if (addr == match) { | |
608 | ufshcd_mcq_nullify_sqe(utrd); | |
609 | ret = true; | |
610 | goto out; | |
611 | } | |
d0c89af3 BN |
612 | |
613 | sq_head_slot++; | |
614 | if (sq_head_slot == hwq->max_entries) | |
615 | sq_head_slot = 0; | |
8d729034 BN |
616 | } |
617 | ||
618 | out: | |
619 | ufshcd_mcq_sq_start(hba, hwq); | |
620 | mutex_unlock(&hwq->sq_mutex); | |
621 | return ret; | |
622 | } | |
f1304d44 BN |
623 | |
624 | /** | |
625 | * ufshcd_mcq_abort - Abort the command in MCQ. | |
317a3804 | 626 | * @cmd: The command to be aborted. |
f1304d44 | 627 | * |
3a17fefe | 628 | * Return: SUCCESS or FAILED error codes |
f1304d44 BN |
629 | */ |
630 | int ufshcd_mcq_abort(struct scsi_cmnd *cmd) | |
631 | { | |
632 | struct Scsi_Host *host = cmd->device->host; | |
633 | struct ufs_hba *hba = shost_priv(host); | |
634 | int tag = scsi_cmd_to_rq(cmd)->tag; | |
635 | struct ufshcd_lrb *lrbp = &hba->lrb[tag]; | |
636 | struct ufs_hw_queue *hwq; | |
27900d71 | 637 | unsigned long flags; |
f1304d44 BN |
638 | int err = FAILED; |
639 | ||
640 | if (!ufshcd_cmd_inflight(lrbp->cmd)) { | |
641 | dev_err(hba->dev, | |
642 | "%s: skip abort. cmd at tag %d already completed.\n", | |
643 | __func__, tag); | |
644 | goto out; | |
645 | } | |
646 | ||
647 | /* Skip task abort in case previous aborts failed and report failure */ | |
648 | if (lrbp->req_abort_skip) { | |
649 | dev_err(hba->dev, "%s: skip abort. tag %d failed earlier\n", | |
650 | __func__, tag); | |
651 | goto out; | |
652 | } | |
653 | ||
654 | hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd)); | |
655 | ||
656 | if (ufshcd_mcq_sqe_search(hba, hwq, tag)) { | |
657 | /* | |
658 | * Failure. The command should not be "stuck" in SQ for | |
659 | * a long time which resulted in command being aborted. | |
660 | */ | |
661 | dev_err(hba->dev, "%s: cmd found in sq. hwq=%d, tag=%d\n", | |
662 | __func__, hwq->id, tag); | |
663 | goto out; | |
664 | } | |
665 | ||
666 | /* | |
667 | * The command is not in the submission queue, and it is not | |
668 | * in the completion queue either. Query the device to see if | |
669 | * the command is being processed in the device. | |
670 | */ | |
671 | if (ufshcd_try_to_abort_task(hba, tag)) { | |
672 | dev_err(hba->dev, "%s: device abort failed %d\n", __func__, err); | |
673 | lrbp->req_abort_skip = true; | |
674 | goto out; | |
675 | } | |
676 | ||
677 | err = SUCCESS; | |
27900d71 | 678 | spin_lock_irqsave(&hwq->cq_lock, flags); |
f1304d44 BN |
679 | if (ufshcd_cmd_inflight(lrbp->cmd)) |
680 | ufshcd_release_scsi_cmd(hba, lrbp); | |
27900d71 | 681 | spin_unlock_irqrestore(&hwq->cq_lock, flags); |
f1304d44 BN |
682 | |
683 | out: | |
684 | return err; | |
685 | } |