scsi: hisi_sas: Init disks after controller reset
[linux-2.6-block.git] / drivers / scsi / hisi_sas / hisi_sas_main.c
CommitLineData
e8899fad
JG
1/*
2 * Copyright (c) 2015 Linaro Ltd.
3 * Copyright (c) 2015 Hisilicon Limited.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 */
11
12#include "hisi_sas.h"
13#define DRV_NAME "hisi_sas"
14
42e7a693
JG
15#define DEV_IS_GONE(dev) \
16 ((!dev) || (dev->dev_type == SAS_PHY_UNUSED))
17
cac9b2a2
JG
18static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
19 u8 *lun, struct hisi_sas_tmf_task *tmf);
441c2740
JG
20static int
21hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
22 struct domain_device *device,
23 int abort_flag, int tag);
7c594f04 24static int hisi_sas_softreset_ata_disk(struct domain_device *device);
057c3d1f
XT
25static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
26 void *funcdata);
d5a60dfd
XC
27static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
28 struct domain_device *device);
29static void hisi_sas_dev_gone(struct domain_device *device);
cac9b2a2 30
468f4b8d 31u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, int direction)
6c7bb8a1 32{
468f4b8d 33 switch (fis->command) {
6c7bb8a1
XC
34 case ATA_CMD_FPDMA_WRITE:
35 case ATA_CMD_FPDMA_READ:
36 case ATA_CMD_FPDMA_RECV:
37 case ATA_CMD_FPDMA_SEND:
38 case ATA_CMD_NCQ_NON_DATA:
edafeef4 39 return HISI_SAS_SATA_PROTOCOL_FPDMA;
6c7bb8a1
XC
40
41 case ATA_CMD_DOWNLOAD_MICRO:
42 case ATA_CMD_ID_ATA:
43 case ATA_CMD_PMP_READ:
44 case ATA_CMD_READ_LOG_EXT:
45 case ATA_CMD_PIO_READ:
46 case ATA_CMD_PIO_READ_EXT:
47 case ATA_CMD_PMP_WRITE:
48 case ATA_CMD_WRITE_LOG_EXT:
49 case ATA_CMD_PIO_WRITE:
50 case ATA_CMD_PIO_WRITE_EXT:
edafeef4 51 return HISI_SAS_SATA_PROTOCOL_PIO;
6c7bb8a1
XC
52
53 case ATA_CMD_DSM:
54 case ATA_CMD_DOWNLOAD_MICRO_DMA:
55 case ATA_CMD_PMP_READ_DMA:
56 case ATA_CMD_PMP_WRITE_DMA:
57 case ATA_CMD_READ:
58 case ATA_CMD_READ_EXT:
59 case ATA_CMD_READ_LOG_DMA_EXT:
60 case ATA_CMD_READ_STREAM_DMA_EXT:
61 case ATA_CMD_TRUSTED_RCV_DMA:
62 case ATA_CMD_TRUSTED_SND_DMA:
63 case ATA_CMD_WRITE:
64 case ATA_CMD_WRITE_EXT:
65 case ATA_CMD_WRITE_FUA_EXT:
66 case ATA_CMD_WRITE_QUEUED:
67 case ATA_CMD_WRITE_LOG_DMA_EXT:
68 case ATA_CMD_WRITE_STREAM_DMA_EXT:
c3fe8a2b 69 case ATA_CMD_ZAC_MGMT_IN:
edafeef4 70 return HISI_SAS_SATA_PROTOCOL_DMA;
6c7bb8a1
XC
71
72 case ATA_CMD_CHK_POWER:
73 case ATA_CMD_DEV_RESET:
74 case ATA_CMD_EDD:
75 case ATA_CMD_FLUSH:
76 case ATA_CMD_FLUSH_EXT:
77 case ATA_CMD_VERIFY:
78 case ATA_CMD_VERIFY_EXT:
79 case ATA_CMD_SET_FEATURES:
80 case ATA_CMD_STANDBY:
81 case ATA_CMD_STANDBYNOW1:
c3fe8a2b 82 case ATA_CMD_ZAC_MGMT_OUT:
edafeef4 83 return HISI_SAS_SATA_PROTOCOL_NONDATA;
468f4b8d 84
3ff0f0b6
XT
85 case ATA_CMD_SET_MAX:
86 switch (fis->features) {
87 case ATA_SET_MAX_PASSWD:
88 case ATA_SET_MAX_LOCK:
89 return HISI_SAS_SATA_PROTOCOL_PIO;
468f4b8d 90
3ff0f0b6
XT
91 case ATA_SET_MAX_PASSWD_DMA:
92 case ATA_SET_MAX_UNLOCK_DMA:
93 return HISI_SAS_SATA_PROTOCOL_DMA;
94
95 default:
96 return HISI_SAS_SATA_PROTOCOL_NONDATA;
468f4b8d 97 }
3ff0f0b6
XT
98
99 default:
100 {
6c7bb8a1
XC
101 if (direction == DMA_NONE)
102 return HISI_SAS_SATA_PROTOCOL_NONDATA;
103 return HISI_SAS_SATA_PROTOCOL_PIO;
104 }
468f4b8d 105 }
6c7bb8a1
XC
106}
107EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol);
108
75904077
XC
109void hisi_sas_sata_done(struct sas_task *task,
110 struct hisi_sas_slot *slot)
111{
112 struct task_status_struct *ts = &task->task_status;
113 struct ata_task_resp *resp = (struct ata_task_resp *)ts->buf;
f557e32c
XT
114 struct hisi_sas_status_buffer *status_buf =
115 hisi_sas_status_buf_addr_mem(slot);
116 u8 *iu = &status_buf->iu[0];
117 struct dev_to_host_fis *d2h = (struct dev_to_host_fis *)iu;
75904077
XC
118
119 resp->frame_len = sizeof(struct dev_to_host_fis);
120 memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis));
121
122 ts->buf_valid_size = sizeof(*resp);
123}
124EXPORT_SYMBOL_GPL(hisi_sas_sata_done);
125
318913c6
XC
126int hisi_sas_get_ncq_tag(struct sas_task *task, u32 *tag)
127{
128 struct ata_queued_cmd *qc = task->uldd_task;
129
130 if (qc) {
131 if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
132 qc->tf.command == ATA_CMD_FPDMA_READ) {
133 *tag = qc->tag;
134 return 1;
135 }
136 }
137 return 0;
138}
139EXPORT_SYMBOL_GPL(hisi_sas_get_ncq_tag);
140
c2c1d9de
XC
141/*
142 * This function assumes linkrate mask fits in 8 bits, which it
143 * does for all HW versions supported.
144 */
145u8 hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max)
146{
147 u16 rate = 0;
148 int i;
149
150 max -= SAS_LINK_RATE_1_5_GBPS;
151 for (i = 0; i <= max; i++)
152 rate |= 1 << (i * 2);
153 return rate;
154}
155EXPORT_SYMBOL_GPL(hisi_sas_get_prog_phy_linkrate_mask);
156
42e7a693
JG
157static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device)
158{
159 return device->port->ha->lldd_ha;
160}
161
2e244f0f
JG
162struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port)
163{
164 return container_of(sas_port, struct hisi_sas_port, sas_port);
165}
166EXPORT_SYMBOL_GPL(to_hisi_sas_port);
167
a25d0d3d
XC
168void hisi_sas_stop_phys(struct hisi_hba *hisi_hba)
169{
170 int phy_no;
171
172 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++)
173 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
174}
175EXPORT_SYMBOL_GPL(hisi_sas_stop_phys);
176
257efd1f
JG
177static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx)
178{
179 void *bitmap = hisi_hba->slot_index_tags;
180
181 clear_bit(slot_idx, bitmap);
182}
183
42e7a693
JG
184static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx)
185{
186 hisi_sas_slot_index_clear(hisi_hba, slot_idx);
187}
188
189static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx)
190{
191 void *bitmap = hisi_hba->slot_index_tags;
192
193 set_bit(slot_idx, bitmap);
194}
195
196static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba, int *slot_idx)
197{
198 unsigned int index;
199 void *bitmap = hisi_hba->slot_index_tags;
200
fa3be0f2
XC
201 index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count,
202 hisi_hba->last_slot_index + 1);
203 if (index >= hisi_hba->slot_index_count) {
204 index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count,
205 0);
206 if (index >= hisi_hba->slot_index_count)
207 return -SAS_QUEUE_FULL;
208 }
42e7a693
JG
209 hisi_sas_slot_index_set(hisi_hba, index);
210 *slot_idx = index;
fa3be0f2
XC
211 hisi_hba->last_slot_index = index;
212
42e7a693
JG
213 return 0;
214}
215
257efd1f
JG
216static void hisi_sas_slot_index_init(struct hisi_hba *hisi_hba)
217{
218 int i;
219
220 for (i = 0; i < hisi_hba->slot_index_count; ++i)
221 hisi_sas_slot_index_clear(hisi_hba, i);
222}
27a3f229
JG
223
224void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
225 struct hisi_sas_slot *slot)
226{
e85d93b2
XC
227 struct hisi_sas_dq *dq = &hisi_hba->dq[slot->dlvry_queue];
228 unsigned long flags;
27a3f229 229
d3c4dd4e 230 if (task) {
11b75249 231 struct device *dev = hisi_hba->dev;
27a3f229 232
6ba0fbc3
XT
233 if (!task->lldd_task)
234 return;
235
236 task->lldd_task = NULL;
237
d3c4dd4e
JG
238 if (!sas_protocol_ata(task->task_proto))
239 if (slot->n_elem)
dc1e4730
XC
240 dma_unmap_sg(dev, task->scatter,
241 task->num_scatter,
d3c4dd4e 242 task->data_dir);
d3c4dd4e 243 }
27a3f229 244
f557e32c
XT
245 if (slot->buf)
246 dma_pool_free(hisi_hba->buffer_pool, slot->buf, slot->buf_dma);
27a3f229 247
e85d93b2 248 spin_lock_irqsave(&dq->lock, flags);
27a3f229 249 list_del_init(&slot->entry);
e85d93b2 250 spin_unlock_irqrestore(&dq->lock, flags);
6ba0fbc3 251 slot->buf = NULL;
27a3f229
JG
252 slot->task = NULL;
253 slot->port = NULL;
e85d93b2 254 spin_lock_irqsave(&hisi_hba->lock, flags);
27a3f229 255 hisi_sas_slot_index_free(hisi_hba, slot->idx);
e85d93b2 256 spin_unlock_irqrestore(&hisi_hba->lock, flags);
d3c4dd4e 257
59ba49f9 258 /* slot memory is fully zeroed when it is reused */
27a3f229
JG
259}
260EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free);
261
a2b3820b 262static void hisi_sas_task_prep_smp(struct hisi_hba *hisi_hba,
66ee999b
JG
263 struct hisi_sas_slot *slot)
264{
a2b3820b 265 hisi_hba->hw->prep_smp(hisi_hba, slot);
66ee999b
JG
266}
267
a2b3820b 268static void hisi_sas_task_prep_ssp(struct hisi_hba *hisi_hba,
42e7a693
JG
269 struct hisi_sas_slot *slot, int is_tmf,
270 struct hisi_sas_tmf_task *tmf)
271{
a2b3820b 272 hisi_hba->hw->prep_ssp(hisi_hba, slot, is_tmf, tmf);
42e7a693
JG
273}
274
a2b3820b 275static void hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba,
6f2ff1a1
JG
276 struct hisi_sas_slot *slot)
277{
a2b3820b 278 hisi_hba->hw->prep_stp(hisi_hba, slot);
6f2ff1a1
JG
279}
280
a2b3820b 281static void hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba,
441c2740
JG
282 struct hisi_sas_slot *slot,
283 int device_id, int abort_flag, int tag_to_abort)
284{
a2b3820b 285 hisi_hba->hw->prep_abort(hisi_hba, slot,
441c2740
JG
286 device_id, abort_flag, tag_to_abort);
287}
288
cac9b2a2
JG
289/*
290 * This function will issue an abort TMF regardless of whether the
291 * task is in the sdev or not. Then it will do the task complete
292 * cleanup and callbacks.
293 */
294static void hisi_sas_slot_abort(struct work_struct *work)
295{
296 struct hisi_sas_slot *abort_slot =
297 container_of(work, struct hisi_sas_slot, abort_slot);
298 struct sas_task *task = abort_slot->task;
299 struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
300 struct scsi_cmnd *cmnd = task->uldd_task;
301 struct hisi_sas_tmf_task tmf_task;
cac9b2a2 302 struct scsi_lun lun;
11b75249 303 struct device *dev = hisi_hba->dev;
cac9b2a2
JG
304 int tag = abort_slot->idx;
305
306 if (!(task->task_proto & SAS_PROTOCOL_SSP)) {
307 dev_err(dev, "cannot abort slot for non-ssp task\n");
308 goto out;
309 }
310
311 int_to_scsilun(cmnd->device->lun, &lun);
312 tmf_task.tmf = TMF_ABORT_TASK;
313 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
314
315 hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun, &tmf_task);
316out:
317 /* Do cleanup for this task */
318 hisi_sas_slot_task_free(hisi_hba, task, abort_slot);
319 if (task->task_done)
320 task->task_done(task);
cac9b2a2
JG
321}
322
2f6bca20
XT
323static int hisi_sas_task_prep(struct sas_task *task,
324 struct hisi_sas_dq **dq_pointer,
fa222db0
XC
325 int is_tmf, struct hisi_sas_tmf_task *tmf,
326 int *pass)
42e7a693
JG
327{
328 struct domain_device *device = task->dev;
2f6bca20 329 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
42e7a693
JG
330 struct hisi_sas_device *sas_dev = device->lldd_dev;
331 struct hisi_sas_port *port;
332 struct hisi_sas_slot *slot;
333 struct hisi_sas_cmd_hdr *cmd_hdr_base;
2e244f0f 334 struct asd_sas_port *sas_port = device->port;
11b75249 335 struct device *dev = hisi_hba->dev;
7eee4b92 336 int dlvry_queue_slot, dlvry_queue, rc, slot_idx;
2f6bca20 337 int n_elem = 0, n_elem_req = 0, n_elem_resp = 0;
fa222db0 338 unsigned long flags, flags_dq;
2f6bca20 339 struct hisi_sas_dq *dq;
fa222db0 340 int wr_q_index;
42e7a693 341
2e244f0f 342 if (!sas_port) {
42e7a693
JG
343 struct task_status_struct *ts = &task->task_status;
344
345 ts->resp = SAS_TASK_UNDELIVERED;
346 ts->stat = SAS_PHY_DOWN;
347 /*
348 * libsas will use dev->port, should
349 * not call task_done for sata
350 */
351 if (device->dev_type != SAS_SATA_DEV)
352 task->task_done(task);
6bf6db51 353 return -ECOMM;
42e7a693
JG
354 }
355
356 if (DEV_IS_GONE(sas_dev)) {
357 if (sas_dev)
ad604832 358 dev_info(dev, "task prep: device %d not ready\n",
42e7a693
JG
359 sas_dev->device_id);
360 else
361 dev_info(dev, "task prep: device %016llx not ready\n",
362 SAS_ADDR(device->sas_addr));
363
6bf6db51 364 return -ECOMM;
42e7a693 365 }
2e244f0f 366
2f6bca20
XT
367 *dq_pointer = dq = sas_dev->dq;
368
2e244f0f 369 port = to_hisi_sas_port(sas_port);
9859f24e 370 if (port && !port->port_attached) {
09fe9ecb 371 dev_info(dev, "task prep: %s port%d not attach device\n",
6073b771 372 (dev_is_sata(device)) ?
09fe9ecb
JG
373 "SATA/STP" : "SAS",
374 device->port->id);
375
6bf6db51 376 return -ECOMM;
42e7a693
JG
377 }
378
379 if (!sas_protocol_ata(task->task_proto)) {
7eee4b92
XC
380 unsigned int req_len, resp_len;
381
42e7a693
JG
382 if (task->num_scatter) {
383 n_elem = dma_map_sg(dev, task->scatter,
384 task->num_scatter, task->data_dir);
385 if (!n_elem) {
386 rc = -ENOMEM;
387 goto prep_out;
388 }
7eee4b92
XC
389 } else if (task->task_proto & SAS_PROTOCOL_SMP) {
390 n_elem_req = dma_map_sg(dev, &task->smp_task.smp_req,
391 1, DMA_TO_DEVICE);
392 if (!n_elem_req) {
393 rc = -ENOMEM;
394 goto prep_out;
395 }
396 req_len = sg_dma_len(&task->smp_task.smp_req);
397 if (req_len & 0x3) {
398 rc = -EINVAL;
399 goto err_out_dma_unmap;
400 }
401 n_elem_resp = dma_map_sg(dev, &task->smp_task.smp_resp,
402 1, DMA_FROM_DEVICE);
eb217359 403 if (!n_elem_resp) {
7eee4b92
XC
404 rc = -ENOMEM;
405 goto err_out_dma_unmap;
406 }
407 resp_len = sg_dma_len(&task->smp_task.smp_resp);
408 if (resp_len & 0x3) {
409 rc = -EINVAL;
410 goto err_out_dma_unmap;
411 }
42e7a693
JG
412 }
413 } else
414 n_elem = task->num_scatter;
415
a2b3820b
XC
416 if (n_elem > HISI_SAS_SGE_PAGE_CNT) {
417 dev_err(dev, "task prep: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT",
418 n_elem);
419 rc = -EINVAL;
420 goto err_out_dma_unmap;
421 }
422
b1a49412 423 spin_lock_irqsave(&hisi_hba->lock, flags);
685b6d6e
JG
424 if (hisi_hba->hw->slot_index_alloc)
425 rc = hisi_hba->hw->slot_index_alloc(hisi_hba, &slot_idx,
426 device);
427 else
428 rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
b1a49412 429 spin_unlock_irqrestore(&hisi_hba->lock, flags);
7eee4b92
XC
430 if (rc)
431 goto err_out_dma_unmap;
b1a49412 432
3de0026d
XC
433 slot = &hisi_hba->slot_info[slot_idx];
434 memset(slot, 0, sizeof(struct hisi_sas_slot));
435
436 slot->buf = dma_pool_alloc(hisi_hba->buffer_pool,
437 GFP_ATOMIC, &slot->buf_dma);
438 if (!slot->buf) {
439 rc = -ENOMEM;
440 goto err_out_tag;
441 }
442
fa222db0
XC
443 spin_lock_irqsave(&dq->lock, flags_dq);
444 wr_q_index = hisi_hba->hw->get_free_slot(hisi_hba, dq);
445 if (wr_q_index < 0) {
446 spin_unlock_irqrestore(&dq->lock, flags_dq);
3de0026d 447 goto err_out_buf;
fa222db0
XC
448 }
449
450 list_add_tail(&slot->delivery, &dq->list);
451 spin_unlock_irqrestore(&dq->lock, flags_dq);
42e7a693 452
b1a49412 453 dlvry_queue = dq->id;
fa222db0 454 dlvry_queue_slot = wr_q_index;
42e7a693
JG
455
456 slot->idx = slot_idx;
457 slot->n_elem = n_elem;
458 slot->dlvry_queue = dlvry_queue;
459 slot->dlvry_queue_slot = dlvry_queue_slot;
460 cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
461 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
462 slot->task = task;
463 slot->port = port;
cd938e53
XC
464 if (is_tmf)
465 slot->is_internal = true;
42e7a693 466 task->lldd_task = slot;
cac9b2a2 467 INIT_WORK(&slot->abort_slot, hisi_sas_slot_abort);
42e7a693 468
42e7a693 469 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
f557e32c
XT
470 memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
471 memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ);
42e7a693
JG
472
473 switch (task->task_proto) {
66ee999b 474 case SAS_PROTOCOL_SMP:
a2b3820b 475 hisi_sas_task_prep_smp(hisi_hba, slot);
66ee999b 476 break;
42e7a693 477 case SAS_PROTOCOL_SSP:
a2b3820b 478 hisi_sas_task_prep_ssp(hisi_hba, slot, is_tmf, tmf);
42e7a693
JG
479 break;
480 case SAS_PROTOCOL_SATA:
481 case SAS_PROTOCOL_STP:
482 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
a2b3820b 483 hisi_sas_task_prep_ata(hisi_hba, slot);
6f2ff1a1 484 break;
42e7a693
JG
485 default:
486 dev_err(dev, "task prep: unknown/unsupported proto (0x%x)\n",
487 task->task_proto);
42e7a693
JG
488 break;
489 }
490
e85d93b2 491 spin_lock_irqsave(&dq->lock, flags);
405314df 492 list_add_tail(&slot->entry, &sas_dev->list);
e85d93b2 493 spin_unlock_irqrestore(&dq->lock, flags);
54c9dd2d 494 spin_lock_irqsave(&task->task_state_lock, flags);
42e7a693 495 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
54c9dd2d 496 spin_unlock_irqrestore(&task->task_state_lock, flags);
42e7a693 497
42e7a693 498 ++(*pass);
fa222db0 499 slot->ready = 1;
42e7a693 500
9c9d18e7 501 return 0;
42e7a693 502
3de0026d
XC
503err_out_buf:
504 dma_pool_free(hisi_hba->buffer_pool, slot->buf,
505 slot->buf_dma);
42e7a693 506err_out_tag:
b1a49412 507 spin_lock_irqsave(&hisi_hba->lock, flags);
42e7a693 508 hisi_sas_slot_index_free(hisi_hba, slot_idx);
b1a49412 509 spin_unlock_irqrestore(&hisi_hba->lock, flags);
7eee4b92
XC
510err_out_dma_unmap:
511 if (!sas_protocol_ata(task->task_proto)) {
512 if (task->num_scatter) {
513 dma_unmap_sg(dev, task->scatter, task->num_scatter,
514 task->data_dir);
515 } else if (task->task_proto & SAS_PROTOCOL_SMP) {
516 if (n_elem_req)
517 dma_unmap_sg(dev, &task->smp_task.smp_req,
518 1, DMA_TO_DEVICE);
519 if (n_elem_resp)
520 dma_unmap_sg(dev, &task->smp_task.smp_resp,
521 1, DMA_FROM_DEVICE);
522 }
523 }
42e7a693 524prep_out:
7eee4b92 525 dev_err(dev, "task prep: failed[%d]!\n", rc);
42e7a693
JG
526 return rc;
527}
528
529static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
530 int is_tmf, struct hisi_sas_tmf_task *tmf)
531{
532 u32 rc;
533 u32 pass = 0;
534 unsigned long flags;
535 struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
11b75249 536 struct device *dev = hisi_hba->dev;
2f6bca20 537 struct hisi_sas_dq *dq = NULL;
42e7a693 538
917d3bda 539 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
06ec0fb9
XC
540 return -EINVAL;
541
42e7a693 542 /* protect task_prep and start_delivery sequence */
2f6bca20 543 rc = hisi_sas_task_prep(task, &dq, is_tmf, tmf, &pass);
42e7a693
JG
544 if (rc)
545 dev_err(dev, "task exec: failed[%d]!\n", rc);
546
2f6bca20
XT
547 if (likely(pass)) {
548 spin_lock_irqsave(&dq->lock, flags);
b1a49412 549 hisi_hba->hw->start_delivery(dq);
2f6bca20
XT
550 spin_unlock_irqrestore(&dq->lock, flags);
551 }
42e7a693
JG
552
553 return rc;
554}
257efd1f 555
66139921
JG
556static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no)
557{
558 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
559 struct asd_sas_phy *sas_phy = &phy->sas_phy;
560 struct sas_ha_struct *sas_ha;
561
562 if (!phy->phy_attached)
563 return;
564
565 sas_ha = &hisi_hba->sha;
566 sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE);
567
568 if (sas_phy->phy) {
569 struct sas_phy *sphy = sas_phy->phy;
570
571 sphy->negotiated_linkrate = sas_phy->linkrate;
66139921 572 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
2ae75787
XC
573 sphy->maximum_linkrate_hw =
574 hisi_hba->hw->phy_get_max_linkrate();
575 if (sphy->minimum_linkrate == SAS_LINK_RATE_UNKNOWN)
576 sphy->minimum_linkrate = phy->minimum_linkrate;
577
578 if (sphy->maximum_linkrate == SAS_LINK_RATE_UNKNOWN)
579 sphy->maximum_linkrate = phy->maximum_linkrate;
66139921
JG
580 }
581
582 if (phy->phy_type & PORT_TYPE_SAS) {
583 struct sas_identify_frame *id;
584
585 id = (struct sas_identify_frame *)phy->frame_rcvd;
586 id->dev_type = phy->identify.device_type;
587 id->initiator_bits = SAS_PROTOCOL_ALL;
588 id->target_bits = phy->identify.target_port_protocols;
589 } else if (phy->phy_type & PORT_TYPE_SATA) {
590 /*Nothing*/
591 }
592
593 sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
594 sas_ha->notify_port_event(sas_phy, PORTE_BYTES_DMAED);
595}
596
abda97c2
JG
597static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device)
598{
599 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
600 struct hisi_sas_device *sas_dev = NULL;
302e0901 601 unsigned long flags;
1b865185
XC
602 int last = hisi_hba->last_dev_id;
603 int first = (hisi_hba->last_dev_id + 1) % HISI_SAS_MAX_DEVICES;
abda97c2
JG
604 int i;
605
302e0901 606 spin_lock_irqsave(&hisi_hba->lock, flags);
1b865185 607 for (i = first; i != last; i %= HISI_SAS_MAX_DEVICES) {
abda97c2 608 if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) {
b1a49412
XC
609 int queue = i % hisi_hba->queue_count;
610 struct hisi_sas_dq *dq = &hisi_hba->dq[queue];
611
abda97c2
JG
612 hisi_hba->devices[i].device_id = i;
613 sas_dev = &hisi_hba->devices[i];
614 sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
615 sas_dev->dev_type = device->dev_type;
616 sas_dev->hisi_hba = hisi_hba;
617 sas_dev->sas_device = device;
b1a49412 618 sas_dev->dq = dq;
405314df 619 INIT_LIST_HEAD(&hisi_hba->devices[i].list);
abda97c2
JG
620 break;
621 }
1b865185 622 i++;
abda97c2 623 }
1b865185 624 hisi_hba->last_dev_id = i;
302e0901 625 spin_unlock_irqrestore(&hisi_hba->lock, flags);
abda97c2
JG
626
627 return sas_dev;
628}
629
d5a60dfd
XC
630#define HISI_SAS_SRST_ATA_DISK_CNT 3
631static int hisi_sas_init_device(struct domain_device *device)
632{
633 int rc = TMF_RESP_FUNC_COMPLETE;
634 struct scsi_lun lun;
635 struct hisi_sas_tmf_task tmf_task;
636 int retry = HISI_SAS_SRST_ATA_DISK_CNT;
637 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
638
639 switch (device->dev_type) {
640 case SAS_END_DEVICE:
641 int_to_scsilun(0, &lun);
642
643 tmf_task.tmf = TMF_CLEAR_TASK_SET;
644 rc = hisi_sas_debug_issue_ssp_tmf(device, lun.scsi_lun,
645 &tmf_task);
646 if (rc == TMF_RESP_FUNC_COMPLETE)
647 hisi_sas_release_task(hisi_hba, device);
648 break;
649 case SAS_SATA_DEV:
650 case SAS_SATA_PM:
651 case SAS_SATA_PM_PORT:
652 case SAS_SATA_PENDING:
653 while (retry-- > 0) {
654 rc = hisi_sas_softreset_ata_disk(device);
655 if (!rc)
656 break;
657 }
658 break;
659 default:
660 break;
661 }
662
663 return rc;
664}
665
abda97c2
JG
666static int hisi_sas_dev_found(struct domain_device *device)
667{
668 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
669 struct domain_device *parent_dev = device->parent;
670 struct hisi_sas_device *sas_dev;
11b75249 671 struct device *dev = hisi_hba->dev;
d5a60dfd 672 int rc;
abda97c2 673
685b6d6e
JG
674 if (hisi_hba->hw->alloc_dev)
675 sas_dev = hisi_hba->hw->alloc_dev(device);
676 else
677 sas_dev = hisi_sas_alloc_dev(device);
abda97c2
JG
678 if (!sas_dev) {
679 dev_err(dev, "fail alloc dev: max support %d devices\n",
680 HISI_SAS_MAX_DEVICES);
681 return -EINVAL;
682 }
683
684 device->lldd_dev = sas_dev;
685 hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
686
687 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) {
688 int phy_no;
689 u8 phy_num = parent_dev->ex_dev.num_phys;
690 struct ex_phy *phy;
691
692 for (phy_no = 0; phy_no < phy_num; phy_no++) {
693 phy = &parent_dev->ex_dev.ex_phy[phy_no];
694 if (SAS_ADDR(phy->attached_sas_addr) ==
c90a0bea 695 SAS_ADDR(device->sas_addr))
abda97c2 696 break;
abda97c2
JG
697 }
698
699 if (phy_no == phy_num) {
700 dev_info(dev, "dev found: no attached "
701 "dev:%016llx at ex:%016llx\n",
702 SAS_ADDR(device->sas_addr),
703 SAS_ADDR(parent_dev->sas_addr));
d5a60dfd
XC
704 rc = -EINVAL;
705 goto err_out;
abda97c2
JG
706 }
707 }
708
f1c88211
XC
709 dev_info(dev, "dev[%d:%x] found\n",
710 sas_dev->device_id, sas_dev->dev_type);
711
d5a60dfd
XC
712 rc = hisi_sas_init_device(device);
713 if (rc)
714 goto err_out;
abda97c2 715 return 0;
d5a60dfd
XC
716
717err_out:
718 hisi_sas_dev_gone(device);
719 return rc;
abda97c2
JG
720}
721
235bfc7f 722int hisi_sas_slave_configure(struct scsi_device *sdev)
31eec8a6
JG
723{
724 struct domain_device *dev = sdev_to_domain_dev(sdev);
725 int ret = sas_slave_configure(sdev);
726
727 if (ret)
728 return ret;
729 if (!dev_is_sata(dev))
730 sas_change_queue_depth(sdev, 64);
731
732 return 0;
733}
235bfc7f 734EXPORT_SYMBOL_GPL(hisi_sas_slave_configure);
31eec8a6 735
235bfc7f 736void hisi_sas_scan_start(struct Scsi_Host *shost)
701f75ec
JG
737{
738 struct hisi_hba *hisi_hba = shost_priv(shost);
701f75ec 739
396b8044 740 hisi_hba->hw->phys_init(hisi_hba);
701f75ec 741}
235bfc7f 742EXPORT_SYMBOL_GPL(hisi_sas_scan_start);
701f75ec 743
235bfc7f 744int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time)
701f75ec
JG
745{
746 struct hisi_hba *hisi_hba = shost_priv(shost);
747 struct sas_ha_struct *sha = &hisi_hba->sha;
748
396b8044
JG
749 /* Wait for PHY up interrupt to occur */
750 if (time < HZ)
701f75ec
JG
751 return 0;
752
753 sas_drain_work(sha);
754 return 1;
755}
235bfc7f 756EXPORT_SYMBOL_GPL(hisi_sas_scan_finished);
701f75ec 757
66139921
JG
758static void hisi_sas_phyup_work(struct work_struct *work)
759{
760 struct hisi_sas_phy *phy =
e537b62b 761 container_of(work, typeof(*phy), works[HISI_PHYE_PHY_UP]);
66139921
JG
762 struct hisi_hba *hisi_hba = phy->hisi_hba;
763 struct asd_sas_phy *sas_phy = &phy->sas_phy;
764 int phy_no = sas_phy->id;
765
766 hisi_hba->hw->sl_notify(hisi_hba, phy_no); /* This requires a sleep */
767 hisi_sas_bytes_dmaed(hisi_hba, phy_no);
768}
976867e6 769
057c3d1f
XT
770static void hisi_sas_linkreset_work(struct work_struct *work)
771{
772 struct hisi_sas_phy *phy =
773 container_of(work, typeof(*phy), works[HISI_PHYE_LINK_RESET]);
774 struct asd_sas_phy *sas_phy = &phy->sas_phy;
775
776 hisi_sas_control_phy(sas_phy, PHY_FUNC_LINK_RESET, NULL);
777}
778
e537b62b
XT
779static const work_func_t hisi_sas_phye_fns[HISI_PHYES_NUM] = {
780 [HISI_PHYE_PHY_UP] = hisi_sas_phyup_work,
057c3d1f 781 [HISI_PHYE_LINK_RESET] = hisi_sas_linkreset_work,
e537b62b
XT
782};
783
784bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy,
785 enum hisi_sas_phy_event event)
786{
787 struct hisi_hba *hisi_hba = phy->hisi_hba;
788
789 if (WARN_ON(event >= HISI_PHYES_NUM))
790 return false;
791
792 return queue_work(hisi_hba->wq, &phy->works[event]);
793}
794EXPORT_SYMBOL_GPL(hisi_sas_notify_phy_event);
795
976867e6
JG
796static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no)
797{
798 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
799 struct asd_sas_phy *sas_phy = &phy->sas_phy;
e537b62b 800 int i;
976867e6
JG
801
802 phy->hisi_hba = hisi_hba;
803 phy->port = NULL;
eba8c20c
XT
804 phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS;
805 phy->maximum_linkrate = hisi_hba->hw->phy_get_max_linkrate();
976867e6
JG
806 sas_phy->enabled = (phy_no < hisi_hba->n_phy) ? 1 : 0;
807 sas_phy->class = SAS;
808 sas_phy->iproto = SAS_PROTOCOL_ALL;
809 sas_phy->tproto = 0;
810 sas_phy->type = PHY_TYPE_PHYSICAL;
811 sas_phy->role = PHY_ROLE_INITIATOR;
812 sas_phy->oob_mode = OOB_NOT_CONNECTED;
813 sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
814 sas_phy->id = phy_no;
815 sas_phy->sas_addr = &hisi_hba->sas_addr[0];
816 sas_phy->frame_rcvd = &phy->frame_rcvd[0];
817 sas_phy->ha = (struct sas_ha_struct *)hisi_hba->shost->hostdata;
818 sas_phy->lldd_phy = phy;
66139921 819
e537b62b
XT
820 for (i = 0; i < HISI_PHYES_NUM; i++)
821 INIT_WORK(&phy->works[i], hisi_sas_phye_fns[i]);
976867e6
JG
822}
823
184a4635
JG
824static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
825{
826 struct sas_ha_struct *sas_ha = sas_phy->ha;
827 struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
828 struct hisi_sas_phy *phy = sas_phy->lldd_phy;
829 struct asd_sas_port *sas_port = sas_phy->port;
2e244f0f 830 struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
184a4635
JG
831 unsigned long flags;
832
833 if (!sas_port)
834 return;
835
836 spin_lock_irqsave(&hisi_hba->lock, flags);
837 port->port_attached = 1;
838 port->id = phy->port_id;
839 phy->port = port;
840 sas_port->lldd_port = port;
841 spin_unlock_irqrestore(&hisi_hba->lock, flags);
842}
843
d3c4dd4e 844static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task *task,
405314df 845 struct hisi_sas_slot *slot)
184a4635 846{
d3c4dd4e
JG
847 if (task) {
848 unsigned long flags;
849 struct task_status_struct *ts;
184a4635 850
d3c4dd4e 851 ts = &task->task_status;
184a4635 852
d3c4dd4e
JG
853 ts->resp = SAS_TASK_COMPLETE;
854 ts->stat = SAS_ABORTED_TASK;
855 spin_lock_irqsave(&task->task_state_lock, flags);
856 task->task_state_flags &=
857 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
858 task->task_state_flags |= SAS_TASK_STATE_DONE;
859 spin_unlock_irqrestore(&task->task_state_lock, flags);
860 }
184a4635 861
405314df 862 hisi_sas_slot_task_free(hisi_hba, task, slot);
184a4635
JG
863}
864
405314df 865/* hisi_hba.lock should be locked */
184a4635
JG
866static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
867 struct domain_device *device)
868{
405314df
JG
869 struct hisi_sas_slot *slot, *slot2;
870 struct hisi_sas_device *sas_dev = device->lldd_dev;
184a4635 871
405314df
JG
872 list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry)
873 hisi_sas_do_release_task(hisi_hba, slot->task, slot);
184a4635
JG
874}
875
4d0951ee 876void hisi_sas_release_tasks(struct hisi_hba *hisi_hba)
06ec0fb9 877{
405314df
JG
878 struct hisi_sas_device *sas_dev;
879 struct domain_device *device;
06ec0fb9
XC
880 int i;
881
405314df
JG
882 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
883 sas_dev = &hisi_hba->devices[i];
884 device = sas_dev->sas_device;
06ec0fb9 885
405314df
JG
886 if ((sas_dev->dev_type == SAS_PHY_UNUSED) ||
887 !device)
06ec0fb9 888 continue;
405314df
JG
889
890 hisi_sas_release_task(hisi_hba, device);
06ec0fb9
XC
891 }
892}
4d0951ee 893EXPORT_SYMBOL_GPL(hisi_sas_release_tasks);
06ec0fb9 894
d30ff263
XC
895static void hisi_sas_dereg_device(struct hisi_hba *hisi_hba,
896 struct domain_device *device)
897{
898 if (hisi_hba->hw->dereg_device)
899 hisi_hba->hw->dereg_device(hisi_hba, device);
900}
901
abda97c2
JG
902static void hisi_sas_dev_gone(struct domain_device *device)
903{
904 struct hisi_sas_device *sas_dev = device->lldd_dev;
905 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
11b75249 906 struct device *dev = hisi_hba->dev;
abda97c2 907
f1c88211 908 dev_info(dev, "dev[%d:%x] is gone\n",
abda97c2
JG
909 sas_dev->device_id, sas_dev->dev_type);
910
f8e45ec2
XC
911 if (!test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) {
912 hisi_sas_internal_task_abort(hisi_hba, device,
40f2702b
JG
913 HISI_SAS_INT_ABT_DEV, 0);
914
f8e45ec2
XC
915 hisi_sas_dereg_device(hisi_hba, device);
916
917 hisi_hba->hw->clear_itct(hisi_hba, sas_dev);
918 device->lldd_dev = NULL;
f8e45ec2 919 }
d30ff263 920
0258141a
XT
921 if (hisi_hba->hw->free_device)
922 hisi_hba->hw->free_device(sas_dev);
abda97c2 923 sas_dev->dev_type = SAS_PHY_UNUSED;
abda97c2 924}
42e7a693
JG
925
926static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags)
927{
928 return hisi_sas_task_exec(task, gfp_flags, 0, NULL);
929}
930
757db2da
JG
931static void hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no,
932 struct sas_phy_linkrates *r)
933{
934 struct sas_phy_linkrates _r;
935
936 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
937 struct asd_sas_phy *sas_phy = &phy->sas_phy;
938 enum sas_linkrate min, max;
939
940 if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) {
941 max = sas_phy->phy->maximum_linkrate;
942 min = r->minimum_linkrate;
943 } else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) {
944 max = r->maximum_linkrate;
945 min = sas_phy->phy->minimum_linkrate;
946 } else
947 return;
948
949 _r.maximum_linkrate = max;
950 _r.minimum_linkrate = min;
951
952 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
953 msleep(100);
954 hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, &_r);
955 hisi_hba->hw->phy_start(hisi_hba, phy_no);
956}
957
e4189d53
JG
958static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
959 void *funcdata)
960{
961 struct sas_ha_struct *sas_ha = sas_phy->ha;
962 struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
963 int phy_no = sas_phy->id;
964
965 switch (func) {
966 case PHY_FUNC_HARD_RESET:
967 hisi_hba->hw->phy_hard_reset(hisi_hba, phy_no);
968 break;
969
970 case PHY_FUNC_LINK_RESET:
b4c67a6c
JG
971 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
972 msleep(100);
1eb8eeac 973 hisi_hba->hw->phy_start(hisi_hba, phy_no);
e4189d53
JG
974 break;
975
976 case PHY_FUNC_DISABLE:
977 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
978 break;
979
980 case PHY_FUNC_SET_LINK_RATE:
757db2da 981 hisi_sas_phy_set_linkrate(hisi_hba, phy_no, funcdata);
2ae75787 982 break;
c52108c6
XT
983 case PHY_FUNC_GET_EVENTS:
984 if (hisi_hba->hw->get_events) {
985 hisi_hba->hw->get_events(hisi_hba, phy_no);
986 break;
987 }
988 /* fallthru */
e4189d53
JG
989 case PHY_FUNC_RELEASE_SPINUP_HOLD:
990 default:
991 return -EOPNOTSUPP;
992 }
993 return 0;
994}
184a4635 995
0efff300
JG
996static void hisi_sas_task_done(struct sas_task *task)
997{
998 if (!del_timer(&task->slow_task->timer))
999 return;
1000 complete(&task->slow_task->completion);
1001}
1002
77570eed 1003static void hisi_sas_tmf_timedout(struct timer_list *t)
0efff300 1004{
77570eed
KC
1005 struct sas_task_slow *slow = from_timer(slow, t, timer);
1006 struct sas_task *task = slow->task;
f64a6988
XC
1007 unsigned long flags;
1008
1009 spin_lock_irqsave(&task->task_state_lock, flags);
1010 if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
1011 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
1012 spin_unlock_irqrestore(&task->task_state_lock, flags);
0efff300 1013
0efff300
JG
1014 complete(&task->slow_task->completion);
1015}
1016
1017#define TASK_TIMEOUT 20
1018#define TASK_RETRY 3
bb9abc4a 1019#define INTERNAL_ABORT_TIMEOUT 6
0efff300
JG
1020static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
1021 void *parameter, u32 para_len,
1022 struct hisi_sas_tmf_task *tmf)
1023{
1024 struct hisi_sas_device *sas_dev = device->lldd_dev;
1025 struct hisi_hba *hisi_hba = sas_dev->hisi_hba;
11b75249 1026 struct device *dev = hisi_hba->dev;
0efff300
JG
1027 struct sas_task *task;
1028 int res, retry;
1029
1030 for (retry = 0; retry < TASK_RETRY; retry++) {
1031 task = sas_alloc_slow_task(GFP_KERNEL);
1032 if (!task)
1033 return -ENOMEM;
1034
1035 task->dev = device;
1036 task->task_proto = device->tproto;
1037
7c594f04
XC
1038 if (dev_is_sata(device)) {
1039 task->ata_task.device_control_reg_update = 1;
1040 memcpy(&task->ata_task.fis, parameter, para_len);
1041 } else {
1042 memcpy(&task->ssp_task, parameter, para_len);
1043 }
0efff300
JG
1044 task->task_done = hisi_sas_task_done;
1045
841b86f3 1046 task->slow_task->timer.function = hisi_sas_tmf_timedout;
0efff300
JG
1047 task->slow_task->timer.expires = jiffies + TASK_TIMEOUT*HZ;
1048 add_timer(&task->slow_task->timer);
1049
1050 res = hisi_sas_task_exec(task, GFP_KERNEL, 1, tmf);
1051
1052 if (res) {
1053 del_timer(&task->slow_task->timer);
1054 dev_err(dev, "abort tmf: executing internal task failed: %d\n",
1055 res);
1056 goto ex_err;
1057 }
1058
1059 wait_for_completion(&task->slow_task->completion);
1060 res = TMF_RESP_FUNC_FAILED;
1061 /* Even TMF timed out, return direct. */
1062 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1063 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
d3c4dd4e
JG
1064 struct hisi_sas_slot *slot = task->lldd_task;
1065
f1c88211 1066 dev_err(dev, "abort tmf: TMF task timeout and not done\n");
d3c4dd4e
JG
1067 if (slot)
1068 slot->task = NULL;
1069
0efff300 1070 goto ex_err;
f1c88211
XC
1071 } else
1072 dev_err(dev, "abort tmf: TMF task timeout\n");
0efff300
JG
1073 }
1074
1075 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1af1b808 1076 task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
0efff300
JG
1077 res = TMF_RESP_FUNC_COMPLETE;
1078 break;
1079 }
1080
4ffde482
JG
1081 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1082 task->task_status.stat == TMF_RESP_FUNC_SUCC) {
1083 res = TMF_RESP_FUNC_SUCC;
1084 break;
1085 }
1086
0efff300
JG
1087 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1088 task->task_status.stat == SAS_DATA_UNDERRUN) {
1089 /* no error, but return the number of bytes of
1090 * underrun
1091 */
1092 dev_warn(dev, "abort tmf: task to dev %016llx "
1093 "resp: 0x%x sts 0x%x underrun\n",
1094 SAS_ADDR(device->sas_addr),
1095 task->task_status.resp,
1096 task->task_status.stat);
1097 res = task->task_status.residual;
1098 break;
1099 }
1100
1101 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1102 task->task_status.stat == SAS_DATA_OVERRUN) {
1103 dev_warn(dev, "abort tmf: blocked task error\n");
1104 res = -EMSGSIZE;
1105 break;
1106 }
1107
1108 dev_warn(dev, "abort tmf: task to dev "
1109 "%016llx resp: 0x%x status 0x%x\n",
1110 SAS_ADDR(device->sas_addr), task->task_status.resp,
1111 task->task_status.stat);
1112 sas_free_task(task);
1113 task = NULL;
1114 }
1115ex_err:
d2d7e7a0
XC
1116 if (retry == TASK_RETRY)
1117 dev_warn(dev, "abort tmf: executing internal task failed!\n");
0efff300
JG
1118 sas_free_task(task);
1119 return res;
1120}
1121
7c594f04
XC
1122static void hisi_sas_fill_ata_reset_cmd(struct ata_device *dev,
1123 bool reset, int pmp, u8 *fis)
1124{
1125 struct ata_taskfile tf;
1126
1127 ata_tf_init(dev, &tf);
1128 if (reset)
1129 tf.ctl |= ATA_SRST;
1130 else
1131 tf.ctl &= ~ATA_SRST;
1132 tf.command = ATA_CMD_DEV_RESET;
1133 ata_tf_to_fis(&tf, pmp, 0, fis);
1134}
1135
1136static int hisi_sas_softreset_ata_disk(struct domain_device *device)
1137{
1138 u8 fis[20] = {0};
1139 struct ata_port *ap = device->sata_dev.ap;
1140 struct ata_link *link;
1141 int rc = TMF_RESP_FUNC_FAILED;
1142 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
11b75249 1143 struct device *dev = hisi_hba->dev;
7c594f04 1144 int s = sizeof(struct host_to_dev_fis);
7c594f04
XC
1145
1146 ata_for_each_link(link, ap, EDGE) {
1147 int pmp = sata_srst_pmp(link);
1148
1149 hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis);
1150 rc = hisi_sas_exec_internal_tmf_task(device, fis, s, NULL);
1151 if (rc != TMF_RESP_FUNC_COMPLETE)
1152 break;
1153 }
1154
1155 if (rc == TMF_RESP_FUNC_COMPLETE) {
1156 ata_for_each_link(link, ap, EDGE) {
1157 int pmp = sata_srst_pmp(link);
1158
1159 hisi_sas_fill_ata_reset_cmd(link->device, 0, pmp, fis);
1160 rc = hisi_sas_exec_internal_tmf_task(device, fis,
1161 s, NULL);
1162 if (rc != TMF_RESP_FUNC_COMPLETE)
1163 dev_err(dev, "ata disk de-reset failed\n");
1164 }
1165 } else {
1166 dev_err(dev, "ata disk reset failed\n");
1167 }
1168
e85d93b2 1169 if (rc == TMF_RESP_FUNC_COMPLETE)
7c594f04 1170 hisi_sas_release_task(hisi_hba, device);
7c594f04
XC
1171
1172 return rc;
1173}
1174
0efff300
JG
1175static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
1176 u8 *lun, struct hisi_sas_tmf_task *tmf)
1177{
1178 struct sas_ssp_task ssp_task;
1179
1180 if (!(device->tproto & SAS_PROTOCOL_SSP))
1181 return TMF_RESP_FUNC_ESUPP;
1182
1183 memcpy(ssp_task.LUN, lun, 8);
1184
1185 return hisi_sas_exec_internal_tmf_task(device, &ssp_task,
1186 sizeof(ssp_task), tmf);
1187}
1188
a669bdbf 1189static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba)
917d3bda 1190{
a669bdbf 1191 u32 state = hisi_hba->hw->get_phys_state(hisi_hba);
917d3bda
XT
1192 int i;
1193
1194 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
a669bdbf
XT
1195 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
1196 struct domain_device *device = sas_dev->sas_device;
1197 struct asd_sas_port *sas_port;
1198 struct hisi_sas_port *port;
1199 struct hisi_sas_phy *phy = NULL;
1200 struct asd_sas_phy *sas_phy;
1201
917d3bda 1202 if ((sas_dev->dev_type == SAS_PHY_UNUSED)
a669bdbf 1203 || !device || !device->port)
917d3bda
XT
1204 continue;
1205
a669bdbf
XT
1206 sas_port = device->port;
1207 port = to_hisi_sas_port(sas_port);
1208
1209 list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el)
1210 if (state & BIT(sas_phy->id)) {
1211 phy = sas_phy->lldd_phy;
1212 break;
1213 }
1214
1215 if (phy) {
1216 port->id = phy->port_id;
917d3bda 1217
a669bdbf
XT
1218 /* Update linkrate of directly attached device. */
1219 if (!device->parent)
1220 device->linkrate = phy->sas_phy.linkrate;
917d3bda 1221
a669bdbf
XT
1222 hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
1223 } else
1224 port->id = 0xff;
917d3bda
XT
1225 }
1226}
1227
1228static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state,
1229 u32 state)
1230{
1231 struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1232 struct asd_sas_port *_sas_port = NULL;
1233 int phy_no;
1234
1235 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
1236 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1237 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1238 struct asd_sas_port *sas_port = sas_phy->port;
917d3bda
XT
1239 bool do_port_check = !!(_sas_port != sas_port);
1240
1241 if (!sas_phy->phy->enabled)
1242 continue;
1243
1244 /* Report PHY state change to libsas */
a669bdbf
XT
1245 if (state & BIT(phy_no)) {
1246 if (do_port_check && sas_port && sas_port->port_dev) {
917d3bda
XT
1247 struct domain_device *dev = sas_port->port_dev;
1248
1249 _sas_port = sas_port;
917d3bda
XT
1250
1251 if (DEV_IS_EXPANDER(dev->dev_type))
1252 sas_ha->notify_port_event(sas_phy,
1253 PORTE_BROADCAST_RCVD);
1254 }
1255 } else if (old_state & (1 << phy_no))
1256 /* PHY down but was up before */
1257 hisi_sas_phy_down(hisi_hba, phy_no, 0);
1258
1259 }
917d3bda
XT
1260}
1261
6175abde
XT
1262static void hisi_sas_reset_init_all_devices(struct hisi_hba *hisi_hba)
1263{
1264 struct hisi_sas_device *sas_dev;
1265 struct domain_device *device;
1266 int i;
1267
1268 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1269 sas_dev = &hisi_hba->devices[i];
1270 device = sas_dev->sas_device;
1271
1272 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device)
1273 continue;
1274
1275 hisi_sas_init_device(device);
1276 }
1277}
1278
06ec0fb9
XC
1279static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
1280{
917d3bda
XT
1281 struct device *dev = hisi_hba->dev;
1282 struct Scsi_Host *shost = hisi_hba->shost;
1283 u32 old_state, state;
06ec0fb9
XC
1284 int rc;
1285
1286 if (!hisi_hba->hw->soft_reset)
1287 return -1;
1288
917d3bda
XT
1289 if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
1290 return -1;
06ec0fb9 1291
fb51e7a8 1292 dev_info(dev, "controller resetting...\n");
917d3bda 1293 old_state = hisi_hba->hw->get_phys_state(hisi_hba);
06ec0fb9 1294
917d3bda 1295 scsi_block_requests(shost);
6f7c32d6
JG
1296 if (timer_pending(&hisi_hba->timer))
1297 del_timer_sync(&hisi_hba->timer);
1298
917d3bda
XT
1299 set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1300 rc = hisi_hba->hw->soft_reset(hisi_hba);
1301 if (rc) {
1302 dev_warn(dev, "controller reset failed (%d)\n", rc);
1303 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
fb51e7a8 1304 scsi_unblock_requests(shost);
917d3bda
XT
1305 goto out;
1306 }
917d3bda 1307
917d3bda
XT
1308 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1309
1310 /* Init and wait for PHYs to come up and all libsas event finished. */
1311 hisi_hba->hw->phys_init(hisi_hba);
1312 msleep(1000);
a669bdbf 1313 hisi_sas_refresh_port_id(hisi_hba);
6175abde 1314 hisi_sas_reset_init_all_devices(hisi_hba);
fb51e7a8 1315 scsi_unblock_requests(shost);
917d3bda
XT
1316
1317 state = hisi_hba->hw->get_phys_state(hisi_hba);
1318 hisi_sas_rescan_topology(hisi_hba, old_state, state);
fb51e7a8 1319 dev_info(dev, "controller reset complete\n");
06ec0fb9
XC
1320
1321out:
06ec0fb9 1322 clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
917d3bda 1323
06ec0fb9
XC
1324 return rc;
1325}
1326
0efff300
JG
1327static int hisi_sas_abort_task(struct sas_task *task)
1328{
1329 struct scsi_lun lun;
1330 struct hisi_sas_tmf_task tmf_task;
1331 struct domain_device *device = task->dev;
1332 struct hisi_sas_device *sas_dev = device->lldd_dev;
c6ef8954
XC
1333 struct hisi_hba *hisi_hba;
1334 struct device *dev;
0efff300
JG
1335 int rc = TMF_RESP_FUNC_FAILED;
1336 unsigned long flags;
1337
c6ef8954 1338 if (!sas_dev)
0efff300 1339 return TMF_RESP_FUNC_FAILED;
c6ef8954
XC
1340
1341 hisi_hba = dev_to_hisi_hba(task->dev);
1342 dev = hisi_hba->dev;
0efff300 1343
b81b6cce 1344 spin_lock_irqsave(&task->task_state_lock, flags);
0efff300 1345 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
b81b6cce 1346 spin_unlock_irqrestore(&task->task_state_lock, flags);
0efff300
JG
1347 rc = TMF_RESP_FUNC_COMPLETE;
1348 goto out;
1349 }
b81b6cce
XC
1350 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
1351 spin_unlock_irqrestore(&task->task_state_lock, flags);
0efff300 1352
0efff300
JG
1353 sas_dev->dev_status = HISI_SAS_DEV_EH;
1354 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1355 struct scsi_cmnd *cmnd = task->uldd_task;
1356 struct hisi_sas_slot *slot = task->lldd_task;
1357 u32 tag = slot->idx;
c35279f2 1358 int rc2;
0efff300
JG
1359
1360 int_to_scsilun(cmnd->device->lun, &lun);
1361 tmf_task.tmf = TMF_ABORT_TASK;
1362 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
1363
1364 rc = hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun,
1365 &tmf_task);
1366
c35279f2
JG
1367 rc2 = hisi_sas_internal_task_abort(hisi_hba, device,
1368 HISI_SAS_INT_ABT_CMD, tag);
813709f2
XT
1369 if (rc2 < 0) {
1370 dev_err(dev, "abort task: internal abort (%d)\n", rc2);
1371 return TMF_RESP_FUNC_FAILED;
1372 }
1373
c35279f2
JG
1374 /*
1375 * If the TMF finds that the IO is not in the device and also
1376 * the internal abort does not succeed, then it is safe to
1377 * free the slot.
1378 * Note: if the internal abort succeeds then the slot
1379 * will have already been completed
1380 */
1381 if (rc == TMF_RESP_FUNC_COMPLETE && rc2 != TMF_RESP_FUNC_SUCC) {
e85d93b2 1382 if (task->lldd_task)
c35279f2 1383 hisi_sas_do_release_task(hisi_hba, task, slot);
0efff300 1384 }
0efff300
JG
1385 } else if (task->task_proto & SAS_PROTOCOL_SATA ||
1386 task->task_proto & SAS_PROTOCOL_STP) {
1387 if (task->dev->dev_type == SAS_SATA_DEV) {
813709f2
XT
1388 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1389 HISI_SAS_INT_ABT_DEV, 0);
1390 if (rc < 0) {
1391 dev_err(dev, "abort task: internal abort failed\n");
1392 goto out;
1393 }
d30ff263 1394 hisi_sas_dereg_device(hisi_hba, device);
7c594f04 1395 rc = hisi_sas_softreset_ata_disk(device);
0efff300 1396 }
eb045e04 1397 } else if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SMP) {
dc8a49ca
JG
1398 /* SMP */
1399 struct hisi_sas_slot *slot = task->lldd_task;
1400 u32 tag = slot->idx;
0efff300 1401
ccbfe5a0
XC
1402 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1403 HISI_SAS_INT_ABT_CMD, tag);
813709f2 1404 if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) &&
e85d93b2 1405 task->lldd_task)
ccbfe5a0 1406 hisi_sas_do_release_task(hisi_hba, task, slot);
0efff300
JG
1407 }
1408
1409out:
1410 if (rc != TMF_RESP_FUNC_COMPLETE)
1411 dev_notice(dev, "abort task: rc=%d\n", rc);
1412 return rc;
1413}
1414
1415static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun)
1416{
2a038131
XT
1417 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1418 struct device *dev = hisi_hba->dev;
0efff300
JG
1419 struct hisi_sas_tmf_task tmf_task;
1420 int rc = TMF_RESP_FUNC_FAILED;
2a038131
XT
1421
1422 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1423 HISI_SAS_INT_ABT_DEV, 0);
1424 if (rc < 0) {
1425 dev_err(dev, "abort task set: internal abort rc=%d\n", rc);
1426 return TMF_RESP_FUNC_FAILED;
1427 }
1428 hisi_sas_dereg_device(hisi_hba, device);
0efff300
JG
1429
1430 tmf_task.tmf = TMF_ABORT_TASK_SET;
1431 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1432
e85d93b2 1433 if (rc == TMF_RESP_FUNC_COMPLETE)
2a038131 1434 hisi_sas_release_task(hisi_hba, device);
2a038131 1435
0efff300
JG
1436 return rc;
1437}
1438
1439static int hisi_sas_clear_aca(struct domain_device *device, u8 *lun)
1440{
1441 int rc = TMF_RESP_FUNC_FAILED;
1442 struct hisi_sas_tmf_task tmf_task;
1443
1444 tmf_task.tmf = TMF_CLEAR_ACA;
1445 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1446
1447 return rc;
1448}
1449
1450static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
1451{
1452 struct sas_phy *phy = sas_get_local_phy(device);
1453 int rc, reset_type = (device->dev_type == SAS_SATA_DEV ||
1454 (device->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
1455 rc = sas_phy_reset(phy, reset_type);
1456 sas_put_local_phy(phy);
1457 msleep(2000);
1458 return rc;
1459}
1460
1461static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
1462{
1463 struct hisi_sas_device *sas_dev = device->lldd_dev;
1464 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
813709f2 1465 struct device *dev = hisi_hba->dev;
0efff300
JG
1466 int rc = TMF_RESP_FUNC_FAILED;
1467
1468 if (sas_dev->dev_status != HISI_SAS_DEV_EH)
1469 return TMF_RESP_FUNC_FAILED;
1470 sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
1471
813709f2 1472 rc = hisi_sas_internal_task_abort(hisi_hba, device,
d30ff263 1473 HISI_SAS_INT_ABT_DEV, 0);
813709f2
XT
1474 if (rc < 0) {
1475 dev_err(dev, "I_T nexus reset: internal abort (%d)\n", rc);
1476 return TMF_RESP_FUNC_FAILED;
1477 }
d30ff263
XC
1478 hisi_sas_dereg_device(hisi_hba, device);
1479
0efff300
JG
1480 rc = hisi_sas_debug_I_T_nexus_reset(device);
1481
e85d93b2 1482 if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV))
6131243a 1483 hisi_sas_release_task(hisi_hba, device);
e85d93b2 1484
6131243a 1485 return rc;
0efff300
JG
1486}
1487
1488static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
1489{
0efff300
JG
1490 struct hisi_sas_device *sas_dev = device->lldd_dev;
1491 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
11b75249 1492 struct device *dev = hisi_hba->dev;
0efff300
JG
1493 int rc = TMF_RESP_FUNC_FAILED;
1494
0efff300 1495 sas_dev->dev_status = HISI_SAS_DEV_EH;
055945df
JG
1496 if (dev_is_sata(device)) {
1497 struct sas_phy *phy;
1498
1499 /* Clear internal IO and then hardreset */
1500 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1501 HISI_SAS_INT_ABT_DEV, 0);
813709f2
XT
1502 if (rc < 0) {
1503 dev_err(dev, "lu_reset: internal abort failed\n");
055945df 1504 goto out;
813709f2 1505 }
d30ff263 1506 hisi_sas_dereg_device(hisi_hba, device);
0efff300 1507
055945df
JG
1508 phy = sas_get_local_phy(device);
1509
1510 rc = sas_phy_reset(phy, 1);
1511
e85d93b2 1512 if (rc == 0)
055945df 1513 hisi_sas_release_task(hisi_hba, device);
055945df
JG
1514 sas_put_local_phy(phy);
1515 } else {
1516 struct hisi_sas_tmf_task tmf_task = { .tmf = TMF_LU_RESET };
1517
2a038131
XT
1518 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1519 HISI_SAS_INT_ABT_DEV, 0);
1520 if (rc < 0) {
1521 dev_err(dev, "lu_reset: internal abort failed\n");
1522 goto out;
1523 }
1524 hisi_sas_dereg_device(hisi_hba, device);
1525
055945df 1526 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
e85d93b2 1527 if (rc == TMF_RESP_FUNC_COMPLETE)
055945df 1528 hisi_sas_release_task(hisi_hba, device);
055945df
JG
1529 }
1530out:
14d3f397 1531 if (rc != TMF_RESP_FUNC_COMPLETE)
ad604832 1532 dev_err(dev, "lu_reset: for device[%d]:rc= %d\n",
14d3f397 1533 sas_dev->device_id, rc);
0efff300
JG
1534 return rc;
1535}
1536
8b05ad6a
JG
1537static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha)
1538{
1539 struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
e402acdb 1540 HISI_SAS_DECLARE_RST_WORK_ON_STACK(r);
8b05ad6a 1541
e402acdb
XT
1542 queue_work(hisi_hba->wq, &r.work);
1543 wait_for_completion(r.completion);
1544 if (r.done)
1545 return TMF_RESP_FUNC_COMPLETE;
1546
1547 return TMF_RESP_FUNC_FAILED;
8b05ad6a
JG
1548}
1549
0efff300
JG
1550static int hisi_sas_query_task(struct sas_task *task)
1551{
1552 struct scsi_lun lun;
1553 struct hisi_sas_tmf_task tmf_task;
1554 int rc = TMF_RESP_FUNC_FAILED;
1555
1556 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1557 struct scsi_cmnd *cmnd = task->uldd_task;
1558 struct domain_device *device = task->dev;
1559 struct hisi_sas_slot *slot = task->lldd_task;
1560 u32 tag = slot->idx;
1561
1562 int_to_scsilun(cmnd->device->lun, &lun);
1563 tmf_task.tmf = TMF_QUERY_TASK;
1564 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
1565
1566 rc = hisi_sas_debug_issue_ssp_tmf(device,
1567 lun.scsi_lun,
1568 &tmf_task);
1569 switch (rc) {
1570 /* The task is still in Lun, release it then */
1571 case TMF_RESP_FUNC_SUCC:
1572 /* The task is not in Lun or failed, reset the phy */
1573 case TMF_RESP_FUNC_FAILED:
1574 case TMF_RESP_FUNC_COMPLETE:
1575 break;
997ee43c
XC
1576 default:
1577 rc = TMF_RESP_FUNC_FAILED;
1578 break;
0efff300
JG
1579 }
1580 }
1581 return rc;
1582}
1583
441c2740 1584static int
ad604832 1585hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
441c2740
JG
1586 struct sas_task *task, int abort_flag,
1587 int task_tag)
1588{
1589 struct domain_device *device = task->dev;
1590 struct hisi_sas_device *sas_dev = device->lldd_dev;
11b75249 1591 struct device *dev = hisi_hba->dev;
441c2740
JG
1592 struct hisi_sas_port *port;
1593 struct hisi_sas_slot *slot;
2e244f0f 1594 struct asd_sas_port *sas_port = device->port;
441c2740 1595 struct hisi_sas_cmd_hdr *cmd_hdr_base;
b1a49412 1596 struct hisi_sas_dq *dq = sas_dev->dq;
441c2740 1597 int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
fa222db0
XC
1598 unsigned long flags, flags_dq = 0;
1599 int wr_q_index;
441c2740 1600
917d3bda 1601 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
06ec0fb9
XC
1602 return -EINVAL;
1603
441c2740
JG
1604 if (!device->port)
1605 return -1;
1606
2e244f0f 1607 port = to_hisi_sas_port(sas_port);
441c2740
JG
1608
1609 /* simply get a slot and send abort command */
b1a49412 1610 spin_lock_irqsave(&hisi_hba->lock, flags);
441c2740 1611 rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
b1a49412
XC
1612 if (rc) {
1613 spin_unlock_irqrestore(&hisi_hba->lock, flags);
441c2740 1614 goto err_out;
b1a49412
XC
1615 }
1616 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1617
3de0026d
XC
1618 slot = &hisi_hba->slot_info[slot_idx];
1619 memset(slot, 0, sizeof(struct hisi_sas_slot));
1620
1621 slot->buf = dma_pool_alloc(hisi_hba->buffer_pool,
1622 GFP_ATOMIC, &slot->buf_dma);
1623 if (!slot->buf) {
1624 rc = -ENOMEM;
1625 goto err_out_tag;
1626 }
fa222db0 1627
b1a49412 1628 spin_lock_irqsave(&dq->lock, flags_dq);
fa222db0
XC
1629 wr_q_index = hisi_hba->hw->get_free_slot(hisi_hba, dq);
1630 if (wr_q_index < 0) {
3de0026d
XC
1631 spin_unlock_irqrestore(&dq->lock, flags_dq);
1632 goto err_out_buf;
1633 }
fa222db0
XC
1634 list_add_tail(&slot->delivery, &dq->list);
1635 spin_unlock_irqrestore(&dq->lock, flags_dq);
441c2740 1636
b1a49412 1637 dlvry_queue = dq->id;
fa222db0 1638 dlvry_queue_slot = wr_q_index;
b1a49412 1639
441c2740
JG
1640 slot->idx = slot_idx;
1641 slot->n_elem = n_elem;
1642 slot->dlvry_queue = dlvry_queue;
1643 slot->dlvry_queue_slot = dlvry_queue_slot;
1644 cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
1645 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
1646 slot->task = task;
1647 slot->port = port;
cd938e53 1648 slot->is_internal = true;
441c2740
JG
1649 task->lldd_task = slot;
1650
1651 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
031da09c
XC
1652 memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
1653 memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ);
441c2740 1654
a2b3820b 1655 hisi_sas_task_prep_abort(hisi_hba, slot, device_id,
441c2740 1656 abort_flag, task_tag);
441c2740 1657
54c9dd2d 1658 spin_lock_irqsave(&task->task_state_lock, flags);
441c2740 1659 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
54c9dd2d 1660 spin_unlock_irqrestore(&task->task_state_lock, flags);
441c2740 1661
fa222db0 1662 slot->ready = 1;
b1a49412 1663 /* send abort command to the chip */
fa222db0
XC
1664 spin_lock_irqsave(&dq->lock, flags);
1665 list_add_tail(&slot->entry, &sas_dev->list);
b1a49412 1666 hisi_hba->hw->start_delivery(dq);
fa222db0 1667 spin_unlock_irqrestore(&dq->lock, flags);
441c2740
JG
1668
1669 return 0;
1670
3de0026d
XC
1671err_out_buf:
1672 dma_pool_free(hisi_hba->buffer_pool, slot->buf,
1673 slot->buf_dma);
441c2740 1674err_out_tag:
b1a49412 1675 spin_lock_irqsave(&hisi_hba->lock, flags);
441c2740 1676 hisi_sas_slot_index_free(hisi_hba, slot_idx);
b1a49412 1677 spin_unlock_irqrestore(&hisi_hba->lock, flags);
441c2740
JG
1678err_out:
1679 dev_err(dev, "internal abort task prep: failed[%d]!\n", rc);
1680
1681 return rc;
1682}
1683
1684/**
1685 * hisi_sas_internal_task_abort -- execute an internal
1686 * abort command for single IO command or a device
1687 * @hisi_hba: host controller struct
1688 * @device: domain device
1689 * @abort_flag: mode of operation, device or single IO
1690 * @tag: tag of IO to be aborted (only relevant to single
1691 * IO mode)
1692 */
1693static int
1694hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
1695 struct domain_device *device,
1696 int abort_flag, int tag)
1697{
1698 struct sas_task *task;
1699 struct hisi_sas_device *sas_dev = device->lldd_dev;
11b75249 1700 struct device *dev = hisi_hba->dev;
441c2740 1701 int res;
441c2740 1702
813709f2
XT
1703 /*
1704 * The interface is not realized means this HW don't support internal
1705 * abort, or don't need to do internal abort. Then here, we return
1706 * TMF_RESP_FUNC_FAILED and let other steps go on, which depends that
1707 * the internal abort has been executed and returned CQ.
1708 */
441c2740 1709 if (!hisi_hba->hw->prep_abort)
813709f2 1710 return TMF_RESP_FUNC_FAILED;
441c2740
JG
1711
1712 task = sas_alloc_slow_task(GFP_KERNEL);
1713 if (!task)
1714 return -ENOMEM;
1715
1716 task->dev = device;
1717 task->task_proto = device->tproto;
1718 task->task_done = hisi_sas_task_done;
841b86f3 1719 task->slow_task->timer.function = hisi_sas_tmf_timedout;
bb9abc4a 1720 task->slow_task->timer.expires = jiffies + INTERNAL_ABORT_TIMEOUT*HZ;
441c2740
JG
1721 add_timer(&task->slow_task->timer);
1722
441c2740
JG
1723 res = hisi_sas_internal_abort_task_exec(hisi_hba, sas_dev->device_id,
1724 task, abort_flag, tag);
441c2740
JG
1725 if (res) {
1726 del_timer(&task->slow_task->timer);
1727 dev_err(dev, "internal task abort: executing internal task failed: %d\n",
1728 res);
1729 goto exit;
1730 }
1731 wait_for_completion(&task->slow_task->completion);
1732 res = TMF_RESP_FUNC_FAILED;
1733
f64a6988
XC
1734 /* Internal abort timed out */
1735 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1736 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
1737 struct hisi_sas_slot *slot = task->lldd_task;
1738
1739 if (slot)
1740 slot->task = NULL;
f1c88211 1741 dev_err(dev, "internal task abort: timeout and not done.\n");
813709f2 1742 res = -EIO;
f692a677 1743 goto exit;
f1c88211
XC
1744 } else
1745 dev_err(dev, "internal task abort: timeout.\n");
f64a6988
XC
1746 }
1747
441c2740
JG
1748 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1749 task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
1750 res = TMF_RESP_FUNC_COMPLETE;
1751 goto exit;
1752 }
1753
c35279f2
JG
1754 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1755 task->task_status.stat == TMF_RESP_FUNC_SUCC) {
1756 res = TMF_RESP_FUNC_SUCC;
1757 goto exit;
1758 }
1759
441c2740 1760exit:
297d7302 1761 dev_dbg(dev, "internal task abort: task to dev %016llx task=%p "
441c2740
JG
1762 "resp: 0x%x sts 0x%x\n",
1763 SAS_ADDR(device->sas_addr),
1764 task,
1765 task->task_status.resp, /* 0 is complete, -1 is undelivered */
1766 task->task_status.stat);
1767 sas_free_task(task);
1768
1769 return res;
1770}
1771
184a4635
JG
1772static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy)
1773{
1774 hisi_sas_port_notify_formed(sas_phy);
1775}
1776
336bd78b
XC
1777static void hisi_sas_port_deformed(struct asd_sas_phy *sas_phy)
1778{
1779}
1780
6379c560
XT
1781static int hisi_sas_write_gpio(struct sas_ha_struct *sha, u8 reg_type,
1782 u8 reg_index, u8 reg_count, u8 *write_data)
1783{
1784 struct hisi_hba *hisi_hba = sha->lldd_ha;
1785
1786 if (!hisi_hba->hw->write_gpio)
1787 return -EOPNOTSUPP;
1788
1789 return hisi_hba->hw->write_gpio(hisi_hba, reg_type,
1790 reg_index, reg_count, write_data);
1791}
1792
184a4635
JG
1793static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy)
1794{
1795 phy->phy_attached = 0;
1796 phy->phy_type = 0;
1797 phy->port = NULL;
1798}
1799
1800void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy)
1801{
1802 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1803 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1804 struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1805
1806 if (rdy) {
1807 /* Phy down but ready */
1808 hisi_sas_bytes_dmaed(hisi_hba, phy_no);
1809 hisi_sas_port_notify_formed(sas_phy);
1810 } else {
1811 struct hisi_sas_port *port = phy->port;
1812
1813 /* Phy down and not ready */
1814 sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL);
1815 sas_phy_disconnected(sas_phy);
1816
1817 if (port) {
1818 if (phy->phy_type & PORT_TYPE_SAS) {
1819 int port_id = port->id;
1820
1821 if (!hisi_hba->hw->get_wideport_bitmap(hisi_hba,
1822 port_id))
1823 port->port_attached = 0;
1824 } else if (phy->phy_type & PORT_TYPE_SATA)
1825 port->port_attached = 0;
1826 }
1827 hisi_sas_phy_disconnected(phy);
1828 }
1829}
1830EXPORT_SYMBOL_GPL(hisi_sas_phy_down);
1831
571295f8
XT
1832void hisi_sas_kill_tasklets(struct hisi_hba *hisi_hba)
1833{
1834 int i;
1835
1836 for (i = 0; i < hisi_hba->queue_count; i++) {
1837 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
1838
1839 tasklet_kill(&cq->tasklet);
1840 }
1841}
1842EXPORT_SYMBOL_GPL(hisi_sas_kill_tasklets);
06ec0fb9 1843
e21fe3a5
JG
1844struct scsi_transport_template *hisi_sas_stt;
1845EXPORT_SYMBOL_GPL(hisi_sas_stt);
e8899fad 1846
235bfc7f 1847struct device_attribute *host_attrs[] = {
8eea9dd8
JY
1848 &dev_attr_phy_event_threshold,
1849 NULL,
1850};
235bfc7f 1851EXPORT_SYMBOL_GPL(host_attrs);
7eb7869f 1852
e8899fad 1853static struct sas_domain_function_template hisi_sas_transport_ops = {
abda97c2
JG
1854 .lldd_dev_found = hisi_sas_dev_found,
1855 .lldd_dev_gone = hisi_sas_dev_gone,
42e7a693 1856 .lldd_execute_task = hisi_sas_queue_command,
e4189d53 1857 .lldd_control_phy = hisi_sas_control_phy,
0efff300
JG
1858 .lldd_abort_task = hisi_sas_abort_task,
1859 .lldd_abort_task_set = hisi_sas_abort_task_set,
1860 .lldd_clear_aca = hisi_sas_clear_aca,
1861 .lldd_I_T_nexus_reset = hisi_sas_I_T_nexus_reset,
1862 .lldd_lu_reset = hisi_sas_lu_reset,
1863 .lldd_query_task = hisi_sas_query_task,
8b05ad6a 1864 .lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha,
184a4635 1865 .lldd_port_formed = hisi_sas_port_formed,
336bd78b 1866 .lldd_port_deformed = hisi_sas_port_deformed,
6379c560 1867 .lldd_write_gpio = hisi_sas_write_gpio,
e8899fad
JG
1868};
1869
06ec0fb9
XC
1870void hisi_sas_init_mem(struct hisi_hba *hisi_hba)
1871{
1872 int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
1873
1874 for (i = 0; i < hisi_hba->queue_count; i++) {
1875 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
1876 struct hisi_sas_dq *dq = &hisi_hba->dq[i];
1877
1878 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
1879 memset(hisi_hba->cmd_hdr[i], 0, s);
1880 dq->wr_point = 0;
1881
1882 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
1883 memset(hisi_hba->complete_hdr[i], 0, s);
1884 cq->rd_point = 0;
1885 }
1886
1887 s = sizeof(struct hisi_sas_initial_fis) * hisi_hba->n_phy;
1888 memset(hisi_hba->initial_fis, 0, s);
1889
1890 s = max_command_entries * sizeof(struct hisi_sas_iost);
1891 memset(hisi_hba->iost, 0, s);
1892
1893 s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
1894 memset(hisi_hba->breakpoint, 0, s);
1895
3297ded1 1896 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
06ec0fb9
XC
1897 memset(hisi_hba->sata_breakpoint, 0, s);
1898}
1899EXPORT_SYMBOL_GPL(hisi_sas_init_mem);
1900
e21fe3a5 1901int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
6be6de18 1902{
11b75249 1903 struct device *dev = hisi_hba->dev;
a8d547bd 1904 int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
6be6de18 1905
fa42d80d 1906 spin_lock_init(&hisi_hba->lock);
976867e6
JG
1907 for (i = 0; i < hisi_hba->n_phy; i++) {
1908 hisi_sas_phy_init(hisi_hba, i);
1909 hisi_hba->port[i].port_attached = 0;
1910 hisi_hba->port[i].id = -1;
976867e6
JG
1911 }
1912
af740dbe
JG
1913 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1914 hisi_hba->devices[i].dev_type = SAS_PHY_UNUSED;
1915 hisi_hba->devices[i].device_id = i;
1916 hisi_hba->devices[i].dev_status = HISI_SAS_DEV_NORMAL;
1917 }
1918
6be6de18 1919 for (i = 0; i < hisi_hba->queue_count; i++) {
9101a079 1920 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
4fde02ad 1921 struct hisi_sas_dq *dq = &hisi_hba->dq[i];
9101a079
JG
1922
1923 /* Completion queue structure */
1924 cq->id = i;
1925 cq->hisi_hba = hisi_hba;
1926
4fde02ad 1927 /* Delivery queue structure */
39bade0c 1928 spin_lock_init(&dq->lock);
fa222db0 1929 INIT_LIST_HEAD(&dq->list);
4fde02ad
JG
1930 dq->id = i;
1931 dq->hisi_hba = hisi_hba;
1932
6be6de18
JG
1933 /* Delivery queue */
1934 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
1935 hisi_hba->cmd_hdr[i] = dma_alloc_coherent(dev, s,
1936 &hisi_hba->cmd_hdr_dma[i], GFP_KERNEL);
1937 if (!hisi_hba->cmd_hdr[i])
1938 goto err_out;
6be6de18
JG
1939
1940 /* Completion queue */
1941 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
1942 hisi_hba->complete_hdr[i] = dma_alloc_coherent(dev, s,
1943 &hisi_hba->complete_hdr_dma[i], GFP_KERNEL);
1944 if (!hisi_hba->complete_hdr[i])
1945 goto err_out;
6be6de18
JG
1946 }
1947
f557e32c
XT
1948 s = sizeof(struct hisi_sas_slot_buf_table);
1949 hisi_hba->buffer_pool = dma_pool_create("dma_buffer", dev, s, 16, 0);
1950 if (!hisi_hba->buffer_pool)
6be6de18
JG
1951 goto err_out;
1952
1953 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
4f4e21b8 1954 hisi_hba->itct = dma_zalloc_coherent(dev, s, &hisi_hba->itct_dma,
6be6de18
JG
1955 GFP_KERNEL);
1956 if (!hisi_hba->itct)
1957 goto err_out;
1958
a8d547bd 1959 hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries,
6be6de18
JG
1960 sizeof(struct hisi_sas_slot),
1961 GFP_KERNEL);
1962 if (!hisi_hba->slot_info)
1963 goto err_out;
1964
a8d547bd 1965 s = max_command_entries * sizeof(struct hisi_sas_iost);
6be6de18
JG
1966 hisi_hba->iost = dma_alloc_coherent(dev, s, &hisi_hba->iost_dma,
1967 GFP_KERNEL);
1968 if (!hisi_hba->iost)
1969 goto err_out;
1970
a8d547bd 1971 s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
6be6de18
JG
1972 hisi_hba->breakpoint = dma_alloc_coherent(dev, s,
1973 &hisi_hba->breakpoint_dma, GFP_KERNEL);
1974 if (!hisi_hba->breakpoint)
1975 goto err_out;
1976
a8d547bd 1977 hisi_hba->slot_index_count = max_command_entries;
433f5696 1978 s = hisi_hba->slot_index_count / BITS_PER_BYTE;
257efd1f
JG
1979 hisi_hba->slot_index_tags = devm_kzalloc(dev, s, GFP_KERNEL);
1980 if (!hisi_hba->slot_index_tags)
1981 goto err_out;
1982
6be6de18
JG
1983 s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
1984 hisi_hba->initial_fis = dma_alloc_coherent(dev, s,
1985 &hisi_hba->initial_fis_dma, GFP_KERNEL);
1986 if (!hisi_hba->initial_fis)
1987 goto err_out;
6be6de18 1988
3297ded1 1989 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
6be6de18
JG
1990 hisi_hba->sata_breakpoint = dma_alloc_coherent(dev, s,
1991 &hisi_hba->sata_breakpoint_dma, GFP_KERNEL);
1992 if (!hisi_hba->sata_breakpoint)
1993 goto err_out;
06ec0fb9 1994 hisi_sas_init_mem(hisi_hba);
6be6de18 1995
257efd1f
JG
1996 hisi_sas_slot_index_init(hisi_hba);
1997
7e9080e1
JG
1998 hisi_hba->wq = create_singlethread_workqueue(dev_name(dev));
1999 if (!hisi_hba->wq) {
2000 dev_err(dev, "sas_alloc: failed to create workqueue\n");
2001 goto err_out;
2002 }
2003
6be6de18
JG
2004 return 0;
2005err_out:
2006 return -ENOMEM;
2007}
e21fe3a5 2008EXPORT_SYMBOL_GPL(hisi_sas_alloc);
6be6de18 2009
e21fe3a5 2010void hisi_sas_free(struct hisi_hba *hisi_hba)
89d53322 2011{
11b75249 2012 struct device *dev = hisi_hba->dev;
a8d547bd 2013 int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
89d53322
JG
2014
2015 for (i = 0; i < hisi_hba->queue_count; i++) {
2016 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
2017 if (hisi_hba->cmd_hdr[i])
2018 dma_free_coherent(dev, s,
2019 hisi_hba->cmd_hdr[i],
2020 hisi_hba->cmd_hdr_dma[i]);
2021
2022 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
2023 if (hisi_hba->complete_hdr[i])
2024 dma_free_coherent(dev, s,
2025 hisi_hba->complete_hdr[i],
2026 hisi_hba->complete_hdr_dma[i]);
2027 }
2028
f557e32c 2029 dma_pool_destroy(hisi_hba->buffer_pool);
89d53322
JG
2030
2031 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
2032 if (hisi_hba->itct)
2033 dma_free_coherent(dev, s,
2034 hisi_hba->itct, hisi_hba->itct_dma);
2035
a8d547bd 2036 s = max_command_entries * sizeof(struct hisi_sas_iost);
89d53322
JG
2037 if (hisi_hba->iost)
2038 dma_free_coherent(dev, s,
2039 hisi_hba->iost, hisi_hba->iost_dma);
2040
a8d547bd 2041 s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
89d53322
JG
2042 if (hisi_hba->breakpoint)
2043 dma_free_coherent(dev, s,
2044 hisi_hba->breakpoint,
2045 hisi_hba->breakpoint_dma);
2046
2047
2048 s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
2049 if (hisi_hba->initial_fis)
2050 dma_free_coherent(dev, s,
2051 hisi_hba->initial_fis,
2052 hisi_hba->initial_fis_dma);
2053
3297ded1 2054 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
89d53322
JG
2055 if (hisi_hba->sata_breakpoint)
2056 dma_free_coherent(dev, s,
2057 hisi_hba->sata_breakpoint,
2058 hisi_hba->sata_breakpoint_dma);
2059
7e9080e1
JG
2060 if (hisi_hba->wq)
2061 destroy_workqueue(hisi_hba->wq);
89d53322 2062}
e21fe3a5 2063EXPORT_SYMBOL_GPL(hisi_sas_free);
6be6de18 2064
b4241f0f 2065void hisi_sas_rst_work_handler(struct work_struct *work)
06ec0fb9
XC
2066{
2067 struct hisi_hba *hisi_hba =
2068 container_of(work, struct hisi_hba, rst_work);
2069
2070 hisi_sas_controller_reset(hisi_hba);
2071}
b4241f0f 2072EXPORT_SYMBOL_GPL(hisi_sas_rst_work_handler);
06ec0fb9 2073
e402acdb
XT
2074void hisi_sas_sync_rst_work_handler(struct work_struct *work)
2075{
2076 struct hisi_sas_rst *rst =
2077 container_of(work, struct hisi_sas_rst, work);
2078
2079 if (!hisi_sas_controller_reset(rst->hisi_hba))
2080 rst->done = true;
2081 complete(rst->completion);
2082}
2083EXPORT_SYMBOL_GPL(hisi_sas_sync_rst_work_handler);
2084
0fa24c19 2085int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba)
7eb7869f 2086{
0fa24c19
JG
2087 struct device *dev = hisi_hba->dev;
2088 struct platform_device *pdev = hisi_hba->platform_dev;
2089 struct device_node *np = pdev ? pdev->dev.of_node : NULL;
3bc45af8 2090 struct clk *refclk;
7eb7869f 2091
4d558c77 2092 if (device_property_read_u8_array(dev, "sas-addr", hisi_hba->sas_addr,
0fa24c19
JG
2093 SAS_ADDR_SIZE)) {
2094 dev_err(dev, "could not get property sas-addr\n");
2095 return -ENOENT;
2096 }
e26b2f40 2097
4d558c77 2098 if (np) {
0fa24c19
JG
2099 /*
2100 * These properties are only required for platform device-based
2101 * controller with DT firmware.
2102 */
4d558c77
JG
2103 hisi_hba->ctrl = syscon_regmap_lookup_by_phandle(np,
2104 "hisilicon,sas-syscon");
0fa24c19
JG
2105 if (IS_ERR(hisi_hba->ctrl)) {
2106 dev_err(dev, "could not get syscon\n");
2107 return -ENOENT;
2108 }
e26b2f40 2109
4d558c77 2110 if (device_property_read_u32(dev, "ctrl-reset-reg",
0fa24c19
JG
2111 &hisi_hba->ctrl_reset_reg)) {
2112 dev_err(dev,
2113 "could not get property ctrl-reset-reg\n");
2114 return -ENOENT;
2115 }
e26b2f40 2116
4d558c77 2117 if (device_property_read_u32(dev, "ctrl-reset-sts-reg",
0fa24c19
JG
2118 &hisi_hba->ctrl_reset_sts_reg)) {
2119 dev_err(dev,
2120 "could not get property ctrl-reset-sts-reg\n");
2121 return -ENOENT;
2122 }
e26b2f40 2123
4d558c77 2124 if (device_property_read_u32(dev, "ctrl-clock-ena-reg",
0fa24c19
JG
2125 &hisi_hba->ctrl_clock_ena_reg)) {
2126 dev_err(dev,
2127 "could not get property ctrl-clock-ena-reg\n");
2128 return -ENOENT;
2129 }
4d558c77
JG
2130 }
2131
0fa24c19 2132 refclk = devm_clk_get(dev, NULL);
3bc45af8 2133 if (IS_ERR(refclk))
87e287c1 2134 dev_dbg(dev, "no ref clk property\n");
3bc45af8
JG
2135 else
2136 hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000;
2137
0fa24c19
JG
2138 if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy)) {
2139 dev_err(dev, "could not get property phy-count\n");
2140 return -ENOENT;
2141 }
e26b2f40 2142
4d558c77 2143 if (device_property_read_u32(dev, "queue-count",
0fa24c19
JG
2144 &hisi_hba->queue_count)) {
2145 dev_err(dev, "could not get property queue-count\n");
2146 return -ENOENT;
2147 }
2148
2149 return 0;
2150}
2151EXPORT_SYMBOL_GPL(hisi_sas_get_fw_info);
2152
2153static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
2154 const struct hisi_sas_hw *hw)
2155{
2156 struct resource *res;
2157 struct Scsi_Host *shost;
2158 struct hisi_hba *hisi_hba;
2159 struct device *dev = &pdev->dev;
2160
235bfc7f 2161 shost = scsi_host_alloc(hw->sht, sizeof(*hisi_hba));
0fa24c19
JG
2162 if (!shost) {
2163 dev_err(dev, "scsi host alloc failed\n");
2164 return NULL;
2165 }
2166 hisi_hba = shost_priv(shost);
2167
2168 INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler);
2169 hisi_hba->hw = hw;
2170 hisi_hba->dev = dev;
2171 hisi_hba->platform_dev = pdev;
2172 hisi_hba->shost = shost;
2173 SHOST_TO_SAS_HA(shost) = &hisi_hba->sha;
2174
77570eed 2175 timer_setup(&hisi_hba->timer, NULL, 0);
0fa24c19
JG
2176
2177 if (hisi_sas_get_fw_info(hisi_hba) < 0)
e26b2f40
JG
2178 goto err_out;
2179
a6f2c7ff
JG
2180 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) &&
2181 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
2182 dev_err(dev, "No usable DMA addressing method\n");
2183 goto err_out;
2184 }
2185
e26b2f40
JG
2186 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2187 hisi_hba->regs = devm_ioremap_resource(dev, res);
2188 if (IS_ERR(hisi_hba->regs))
2189 goto err_out;
2190
6379c560
XT
2191 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2192 if (res) {
2193 hisi_hba->sgpio_regs = devm_ioremap_resource(dev, res);
2194 if (IS_ERR(hisi_hba->sgpio_regs))
2195 goto err_out;
2196 }
2197
89d53322
JG
2198 if (hisi_sas_alloc(hisi_hba, shost)) {
2199 hisi_sas_free(hisi_hba);
6be6de18 2200 goto err_out;
89d53322 2201 }
6be6de18 2202
7eb7869f
JG
2203 return shost;
2204err_out:
76aae5f6 2205 scsi_host_put(shost);
7eb7869f
JG
2206 dev_err(dev, "shost alloc failed\n");
2207 return NULL;
2208}
2209
2210int hisi_sas_probe(struct platform_device *pdev,
235bfc7f 2211 const struct hisi_sas_hw *hw)
7eb7869f
JG
2212{
2213 struct Scsi_Host *shost;
2214 struct hisi_hba *hisi_hba;
2215 struct device *dev = &pdev->dev;
2216 struct asd_sas_phy **arr_phy;
2217 struct asd_sas_port **arr_port;
2218 struct sas_ha_struct *sha;
2219 int rc, phy_nr, port_nr, i;
2220
2221 shost = hisi_sas_shost_alloc(pdev, hw);
d37a0082
XT
2222 if (!shost)
2223 return -ENOMEM;
7eb7869f
JG
2224
2225 sha = SHOST_TO_SAS_HA(shost);
2226 hisi_hba = shost_priv(shost);
2227 platform_set_drvdata(pdev, sha);
50cb916f 2228
7eb7869f
JG
2229 phy_nr = port_nr = hisi_hba->n_phy;
2230
2231 arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL);
2232 arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL);
d37a0082
XT
2233 if (!arr_phy || !arr_port) {
2234 rc = -ENOMEM;
2235 goto err_out_ha;
2236 }
7eb7869f
JG
2237
2238 sha->sas_phy = arr_phy;
2239 sha->sas_port = arr_port;
7eb7869f
JG
2240 sha->lldd_ha = hisi_hba;
2241
2242 shost->transportt = hisi_sas_stt;
2243 shost->max_id = HISI_SAS_MAX_DEVICES;
2244 shost->max_lun = ~0;
2245 shost->max_channel = 1;
2246 shost->max_cmd_len = 16;
2247 shost->sg_tablesize = min_t(u16, SG_ALL, HISI_SAS_SGE_PAGE_CNT);
a8d547bd
JG
2248 shost->can_queue = hisi_hba->hw->max_command_entries;
2249 shost->cmd_per_lun = hisi_hba->hw->max_command_entries;
7eb7869f
JG
2250
2251 sha->sas_ha_name = DRV_NAME;
11b75249 2252 sha->dev = hisi_hba->dev;
7eb7869f
JG
2253 sha->lldd_module = THIS_MODULE;
2254 sha->sas_addr = &hisi_hba->sas_addr[0];
2255 sha->num_phys = hisi_hba->n_phy;
2256 sha->core.shost = hisi_hba->shost;
2257
2258 for (i = 0; i < hisi_hba->n_phy; i++) {
2259 sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy;
2260 sha->sas_port[i] = &hisi_hba->port[i].sas_port;
2261 }
2262
2263 rc = scsi_add_host(shost, &pdev->dev);
2264 if (rc)
2265 goto err_out_ha;
2266
2267 rc = sas_register_ha(sha);
2268 if (rc)
2269 goto err_out_register_ha;
2270
0757f041
XC
2271 rc = hisi_hba->hw->hw_init(hisi_hba);
2272 if (rc)
2273 goto err_out_register_ha;
2274
7eb7869f
JG
2275 scsi_scan_host(shost);
2276
2277 return 0;
2278
2279err_out_register_ha:
2280 scsi_remove_host(shost);
2281err_out_ha:
d37a0082 2282 hisi_sas_free(hisi_hba);
76aae5f6 2283 scsi_host_put(shost);
7eb7869f
JG
2284 return rc;
2285}
2286EXPORT_SYMBOL_GPL(hisi_sas_probe);
2287
89d53322
JG
2288int hisi_sas_remove(struct platform_device *pdev)
2289{
2290 struct sas_ha_struct *sha = platform_get_drvdata(pdev);
2291 struct hisi_hba *hisi_hba = sha->lldd_ha;
d37a0082 2292 struct Scsi_Host *shost = sha->core.shost;
89d53322 2293
5df41af4
XC
2294 if (timer_pending(&hisi_hba->timer))
2295 del_timer(&hisi_hba->timer);
2296
89d53322
JG
2297 sas_unregister_ha(sha);
2298 sas_remove_host(sha->core.shost);
2299
2300 hisi_sas_free(hisi_hba);
76aae5f6 2301 scsi_host_put(shost);
89d53322
JG
2302 return 0;
2303}
2304EXPORT_SYMBOL_GPL(hisi_sas_remove);
2305
e8899fad
JG
2306static __init int hisi_sas_init(void)
2307{
e8899fad
JG
2308 hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops);
2309 if (!hisi_sas_stt)
2310 return -ENOMEM;
2311
2312 return 0;
2313}
2314
2315static __exit void hisi_sas_exit(void)
2316{
2317 sas_release_transport(hisi_sas_stt);
2318}
2319
2320module_init(hisi_sas_init);
2321module_exit(hisi_sas_exit);
2322
e8899fad
JG
2323MODULE_LICENSE("GPL");
2324MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
2325MODULE_DESCRIPTION("HISILICON SAS controller driver");
2326MODULE_ALIAS("platform:" DRV_NAME);