1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (c) 2015 Linaro Ltd.
4 * Copyright (c) 2015 Hisilicon Limited.
8 #define DRV_NAME "hisi_sas"
10 #define DEV_IS_GONE(dev) \
11 ((!dev) || (dev->dev_type == SAS_PHY_UNUSED))
13 static int hisi_sas_softreset_ata_disk(struct domain_device *device);
14 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
16 static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
17 struct domain_device *device);
18 static void hisi_sas_dev_gone(struct domain_device *device);
20 struct hisi_sas_internal_abort_data {
21 bool rst_ha_timeout; /* reset the HA for timeout */
24 u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, int direction)
26 switch (fis->command) {
27 case ATA_CMD_FPDMA_WRITE:
28 case ATA_CMD_FPDMA_READ:
29 case ATA_CMD_FPDMA_RECV:
30 case ATA_CMD_FPDMA_SEND:
31 case ATA_CMD_NCQ_NON_DATA:
32 return HISI_SAS_SATA_PROTOCOL_FPDMA;
34 case ATA_CMD_DOWNLOAD_MICRO:
36 case ATA_CMD_PMP_READ:
37 case ATA_CMD_READ_LOG_EXT:
38 case ATA_CMD_PIO_READ:
39 case ATA_CMD_PIO_READ_EXT:
40 case ATA_CMD_PMP_WRITE:
41 case ATA_CMD_WRITE_LOG_EXT:
42 case ATA_CMD_PIO_WRITE:
43 case ATA_CMD_PIO_WRITE_EXT:
44 return HISI_SAS_SATA_PROTOCOL_PIO;
47 case ATA_CMD_DOWNLOAD_MICRO_DMA:
48 case ATA_CMD_PMP_READ_DMA:
49 case ATA_CMD_PMP_WRITE_DMA:
51 case ATA_CMD_READ_EXT:
52 case ATA_CMD_READ_LOG_DMA_EXT:
53 case ATA_CMD_READ_STREAM_DMA_EXT:
54 case ATA_CMD_TRUSTED_RCV_DMA:
55 case ATA_CMD_TRUSTED_SND_DMA:
57 case ATA_CMD_WRITE_EXT:
58 case ATA_CMD_WRITE_FUA_EXT:
59 case ATA_CMD_WRITE_QUEUED:
60 case ATA_CMD_WRITE_LOG_DMA_EXT:
61 case ATA_CMD_WRITE_STREAM_DMA_EXT:
62 case ATA_CMD_ZAC_MGMT_IN:
63 return HISI_SAS_SATA_PROTOCOL_DMA;
65 case ATA_CMD_CHK_POWER:
66 case ATA_CMD_DEV_RESET:
69 case ATA_CMD_FLUSH_EXT:
71 case ATA_CMD_VERIFY_EXT:
72 case ATA_CMD_SET_FEATURES:
74 case ATA_CMD_STANDBYNOW1:
75 case ATA_CMD_ZAC_MGMT_OUT:
76 return HISI_SAS_SATA_PROTOCOL_NONDATA;
79 switch (fis->features) {
80 case ATA_SET_MAX_PASSWD:
81 case ATA_SET_MAX_LOCK:
82 return HISI_SAS_SATA_PROTOCOL_PIO;
84 case ATA_SET_MAX_PASSWD_DMA:
85 case ATA_SET_MAX_UNLOCK_DMA:
86 return HISI_SAS_SATA_PROTOCOL_DMA;
89 return HISI_SAS_SATA_PROTOCOL_NONDATA;
94 if (direction == DMA_NONE)
95 return HISI_SAS_SATA_PROTOCOL_NONDATA;
96 return HISI_SAS_SATA_PROTOCOL_PIO;
100 EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol);
102 void hisi_sas_sata_done(struct sas_task *task,
103 struct hisi_sas_slot *slot)
105 struct task_status_struct *ts = &task->task_status;
106 struct ata_task_resp *resp = (struct ata_task_resp *)ts->buf;
107 struct hisi_sas_status_buffer *status_buf =
108 hisi_sas_status_buf_addr_mem(slot);
109 u8 *iu = &status_buf->iu[0];
110 struct dev_to_host_fis *d2h = (struct dev_to_host_fis *)iu;
112 resp->frame_len = sizeof(struct dev_to_host_fis);
113 memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis));
115 ts->buf_valid_size = sizeof(*resp);
117 EXPORT_SYMBOL_GPL(hisi_sas_sata_done);
120 * This function assumes linkrate mask fits in 8 bits, which it
121 * does for all HW versions supported.
123 u8 hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max)
128 max -= SAS_LINK_RATE_1_5_GBPS;
129 for (i = 0; i <= max; i++)
130 rate |= 1 << (i * 2);
133 EXPORT_SYMBOL_GPL(hisi_sas_get_prog_phy_linkrate_mask);
135 static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device)
137 return device->port->ha->lldd_ha;
140 struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port)
142 return container_of(sas_port, struct hisi_sas_port, sas_port);
144 EXPORT_SYMBOL_GPL(to_hisi_sas_port);
146 void hisi_sas_stop_phys(struct hisi_hba *hisi_hba)
150 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++)
151 hisi_sas_phy_enable(hisi_hba, phy_no, 0);
153 EXPORT_SYMBOL_GPL(hisi_sas_stop_phys);
155 static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx)
157 void *bitmap = hisi_hba->slot_index_tags;
159 __clear_bit(slot_idx, bitmap);
162 static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx)
164 if (hisi_hba->hw->slot_index_alloc ||
165 slot_idx < HISI_SAS_RESERVED_IPTT) {
166 spin_lock(&hisi_hba->lock);
167 hisi_sas_slot_index_clear(hisi_hba, slot_idx);
168 spin_unlock(&hisi_hba->lock);
172 static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx)
174 void *bitmap = hisi_hba->slot_index_tags;
176 __set_bit(slot_idx, bitmap);
179 static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba,
183 void *bitmap = hisi_hba->slot_index_tags;
186 return rq->tag + HISI_SAS_RESERVED_IPTT;
188 spin_lock(&hisi_hba->lock);
189 index = find_next_zero_bit(bitmap, HISI_SAS_RESERVED_IPTT,
190 hisi_hba->last_slot_index + 1);
191 if (index >= HISI_SAS_RESERVED_IPTT) {
192 index = find_next_zero_bit(bitmap,
193 HISI_SAS_RESERVED_IPTT,
195 if (index >= HISI_SAS_RESERVED_IPTT) {
196 spin_unlock(&hisi_hba->lock);
197 return -SAS_QUEUE_FULL;
200 hisi_sas_slot_index_set(hisi_hba, index);
201 hisi_hba->last_slot_index = index;
202 spin_unlock(&hisi_hba->lock);
207 void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
208 struct hisi_sas_slot *slot, bool need_lock)
210 int device_id = slot->device_id;
211 struct hisi_sas_device *sas_dev = &hisi_hba->devices[device_id];
214 struct device *dev = hisi_hba->dev;
216 if (!task->lldd_task)
219 task->lldd_task = NULL;
221 if (!sas_protocol_ata(task->task_proto)) {
223 if (task->task_proto & SAS_PROTOCOL_SSP)
224 dma_unmap_sg(dev, task->scatter,
228 dma_unmap_sg(dev, &task->smp_task.smp_req,
231 if (slot->n_elem_dif) {
232 struct sas_ssp_task *ssp_task = &task->ssp_task;
233 struct scsi_cmnd *scsi_cmnd = ssp_task->cmd;
235 dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd),
236 scsi_prot_sg_count(scsi_cmnd),
243 spin_lock(&sas_dev->lock);
244 list_del_init(&slot->entry);
245 spin_unlock(&sas_dev->lock);
247 list_del_init(&slot->entry);
250 memset(slot, 0, offsetof(struct hisi_sas_slot, buf));
252 hisi_sas_slot_index_free(hisi_hba, slot->idx);
254 EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free);
256 static void hisi_sas_task_prep_smp(struct hisi_hba *hisi_hba,
257 struct hisi_sas_slot *slot)
259 hisi_hba->hw->prep_smp(hisi_hba, slot);
262 static void hisi_sas_task_prep_ssp(struct hisi_hba *hisi_hba,
263 struct hisi_sas_slot *slot)
265 hisi_hba->hw->prep_ssp(hisi_hba, slot);
268 static void hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba,
269 struct hisi_sas_slot *slot)
271 hisi_hba->hw->prep_stp(hisi_hba, slot);
274 static void hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba,
275 struct hisi_sas_slot *slot)
277 hisi_hba->hw->prep_abort(hisi_hba, slot);
280 static void hisi_sas_dma_unmap(struct hisi_hba *hisi_hba,
281 struct sas_task *task, int n_elem)
283 struct device *dev = hisi_hba->dev;
285 if (!sas_protocol_ata(task->task_proto) && n_elem) {
286 if (task->num_scatter) {
287 dma_unmap_sg(dev, task->scatter, task->num_scatter,
289 } else if (task->task_proto & SAS_PROTOCOL_SMP) {
290 dma_unmap_sg(dev, &task->smp_task.smp_req,
296 static int hisi_sas_dma_map(struct hisi_hba *hisi_hba,
297 struct sas_task *task, int *n_elem)
299 struct device *dev = hisi_hba->dev;
302 if (sas_protocol_ata(task->task_proto)) {
303 *n_elem = task->num_scatter;
305 unsigned int req_len;
307 if (task->num_scatter) {
308 *n_elem = dma_map_sg(dev, task->scatter,
309 task->num_scatter, task->data_dir);
314 } else if (task->task_proto & SAS_PROTOCOL_SMP) {
315 *n_elem = dma_map_sg(dev, &task->smp_task.smp_req,
321 req_len = sg_dma_len(&task->smp_task.smp_req);
324 goto err_out_dma_unmap;
329 if (*n_elem > HISI_SAS_SGE_PAGE_CNT) {
330 dev_err(dev, "task prep: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT\n",
333 goto err_out_dma_unmap;
338 /* It would be better to call dma_unmap_sg() here, but it's messy */
339 hisi_sas_dma_unmap(hisi_hba, task, *n_elem);
344 static void hisi_sas_dif_dma_unmap(struct hisi_hba *hisi_hba,
345 struct sas_task *task, int n_elem_dif)
347 struct device *dev = hisi_hba->dev;
350 struct sas_ssp_task *ssp_task = &task->ssp_task;
351 struct scsi_cmnd *scsi_cmnd = ssp_task->cmd;
353 dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd),
354 scsi_prot_sg_count(scsi_cmnd),
359 static int hisi_sas_dif_dma_map(struct hisi_hba *hisi_hba,
360 int *n_elem_dif, struct sas_task *task)
362 struct device *dev = hisi_hba->dev;
363 struct sas_ssp_task *ssp_task;
364 struct scsi_cmnd *scsi_cmnd;
367 if (task->num_scatter) {
368 ssp_task = &task->ssp_task;
369 scsi_cmnd = ssp_task->cmd;
371 if (scsi_prot_sg_count(scsi_cmnd)) {
372 *n_elem_dif = dma_map_sg(dev,
373 scsi_prot_sglist(scsi_cmnd),
374 scsi_prot_sg_count(scsi_cmnd),
380 if (*n_elem_dif > HISI_SAS_SGE_DIF_PAGE_CNT) {
381 dev_err(dev, "task prep: n_elem_dif(%d) too large\n",
384 goto err_out_dif_dma_unmap;
391 err_out_dif_dma_unmap:
392 dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd),
393 scsi_prot_sg_count(scsi_cmnd), task->data_dir);
398 void hisi_sas_task_deliver(struct hisi_hba *hisi_hba,
399 struct hisi_sas_slot *slot,
400 struct hisi_sas_dq *dq,
401 struct hisi_sas_device *sas_dev)
403 struct hisi_sas_cmd_hdr *cmd_hdr_base;
404 int dlvry_queue_slot, dlvry_queue;
405 struct sas_task *task = slot->task;
408 spin_lock(&dq->lock);
409 wr_q_index = dq->wr_point;
410 dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS;
411 list_add_tail(&slot->delivery, &dq->list);
412 spin_unlock(&dq->lock);
413 spin_lock(&sas_dev->lock);
414 list_add_tail(&slot->entry, &sas_dev->list);
415 spin_unlock(&sas_dev->lock);
417 dlvry_queue = dq->id;
418 dlvry_queue_slot = wr_q_index;
420 slot->device_id = sas_dev->device_id;
421 slot->dlvry_queue = dlvry_queue;
422 slot->dlvry_queue_slot = dlvry_queue_slot;
423 cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
424 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
426 task->lldd_task = slot;
428 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
429 memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
430 memset(hisi_sas_status_buf_addr_mem(slot), 0,
431 sizeof(struct hisi_sas_err_record));
433 switch (task->task_proto) {
434 case SAS_PROTOCOL_SMP:
435 hisi_sas_task_prep_smp(hisi_hba, slot);
437 case SAS_PROTOCOL_SSP:
438 hisi_sas_task_prep_ssp(hisi_hba, slot);
440 case SAS_PROTOCOL_SATA:
441 case SAS_PROTOCOL_STP:
442 case SAS_PROTOCOL_STP_ALL:
443 hisi_sas_task_prep_ata(hisi_hba, slot);
445 case SAS_PROTOCOL_INTERNAL_ABORT:
446 hisi_sas_task_prep_abort(hisi_hba, slot);
452 /* Make slot memories observable before marking as ready */
454 WRITE_ONCE(slot->ready, 1);
456 spin_lock(&dq->lock);
457 hisi_hba->hw->start_delivery(dq);
458 spin_unlock(&dq->lock);
461 static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags)
463 int n_elem = 0, n_elem_dif = 0;
464 struct domain_device *device = task->dev;
465 struct asd_sas_port *sas_port = device->port;
466 struct hisi_sas_device *sas_dev = device->lldd_dev;
467 bool internal_abort = sas_is_internal_abort(task);
468 struct hisi_sas_dq *dq = NULL;
469 struct hisi_sas_port *port;
470 struct hisi_hba *hisi_hba;
471 struct hisi_sas_slot *slot;
472 struct request *rq = NULL;
477 struct task_status_struct *ts = &task->task_status;
479 ts->resp = SAS_TASK_UNDELIVERED;
480 ts->stat = SAS_PHY_DOWN;
482 * libsas will use dev->port, should
483 * not call task_done for sata
485 if (device->dev_type != SAS_SATA_DEV && !internal_abort)
486 task->task_done(task);
490 hisi_hba = dev_to_hisi_hba(device);
493 switch (task->task_proto) {
494 case SAS_PROTOCOL_SSP:
495 case SAS_PROTOCOL_SMP:
496 case SAS_PROTOCOL_SATA:
497 case SAS_PROTOCOL_STP:
498 case SAS_PROTOCOL_STP_ALL:
499 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) {
500 if (!gfpflags_allow_blocking(gfp_flags))
503 down(&hisi_hba->sem);
507 if (DEV_IS_GONE(sas_dev)) {
509 dev_info(dev, "task prep: device %d not ready\n",
512 dev_info(dev, "task prep: device %016llx not ready\n",
513 SAS_ADDR(device->sas_addr));
518 port = to_hisi_sas_port(sas_port);
519 if (!port->port_attached) {
520 dev_info(dev, "task prep: %s port%d not attach device\n",
521 dev_is_sata(device) ? "SATA/STP" : "SAS",
527 rq = sas_task_find_rq(task);
529 unsigned int dq_index;
532 blk_tag = blk_mq_unique_tag(rq);
533 dq_index = blk_mq_unique_tag_to_hwq(blk_tag);
534 dq = &hisi_hba->dq[dq_index];
538 if (hisi_hba->iopoll_q_cnt) {
540 * Use interrupt queue (queue 0) to deliver and complete
541 * internal IOs of libsas or libata when there is at least
546 struct Scsi_Host *shost = hisi_hba->shost;
547 struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
549 queue = qmap->mq_map[raw_smp_processor_id()];
551 dq = &hisi_hba->dq[queue];
554 case SAS_PROTOCOL_INTERNAL_ABORT:
555 if (!hisi_hba->hw->prep_abort)
556 return TMF_RESP_FUNC_FAILED;
558 if (test_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags))
561 hisi_hba = dev_to_hisi_hba(device);
563 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
566 port = to_hisi_sas_port(sas_port);
567 dq = &hisi_hba->dq[task->abort_task.qid];
570 dev_err(hisi_hba->dev, "task prep: unknown/unsupported proto (0x%x)\n",
575 rc = hisi_sas_dma_map(hisi_hba, task, &n_elem);
579 if (!sas_protocol_ata(task->task_proto)) {
580 rc = hisi_sas_dif_dma_map(hisi_hba, &n_elem_dif, task);
582 goto err_out_dma_unmap;
585 if (!internal_abort && hisi_hba->hw->slot_index_alloc)
586 rc = hisi_hba->hw->slot_index_alloc(hisi_hba, device);
588 rc = hisi_sas_slot_index_alloc(hisi_hba, rq);
591 goto err_out_dif_dma_unmap;
593 slot = &hisi_hba->slot_info[rc];
594 slot->n_elem = n_elem;
595 slot->n_elem_dif = n_elem_dif;
599 slot->tmf = task->tmf;
600 slot->is_internal = !!task->tmf || internal_abort;
602 /* protect task_prep and start_delivery sequence */
603 hisi_sas_task_deliver(hisi_hba, slot, dq, sas_dev);
607 err_out_dif_dma_unmap:
608 if (!sas_protocol_ata(task->task_proto))
609 hisi_sas_dif_dma_unmap(hisi_hba, task, n_elem_dif);
611 hisi_sas_dma_unmap(hisi_hba, task, n_elem);
613 dev_err(dev, "task exec: failed[%d]!\n", rc);
617 static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no,
620 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
621 struct asd_sas_phy *sas_phy = &phy->sas_phy;
623 if (!phy->phy_attached)
626 sas_notify_phy_event(sas_phy, PHYE_OOB_DONE, gfp_flags);
629 struct sas_phy *sphy = sas_phy->phy;
631 sphy->negotiated_linkrate = sas_phy->linkrate;
632 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
633 sphy->maximum_linkrate_hw =
634 hisi_hba->hw->phy_get_max_linkrate();
635 if (sphy->minimum_linkrate == SAS_LINK_RATE_UNKNOWN)
636 sphy->minimum_linkrate = phy->minimum_linkrate;
638 if (sphy->maximum_linkrate == SAS_LINK_RATE_UNKNOWN)
639 sphy->maximum_linkrate = phy->maximum_linkrate;
642 if (phy->phy_type & PORT_TYPE_SAS) {
643 struct sas_identify_frame *id;
645 id = (struct sas_identify_frame *)phy->frame_rcvd;
646 id->dev_type = phy->identify.device_type;
647 id->initiator_bits = SAS_PROTOCOL_ALL;
648 id->target_bits = phy->identify.target_port_protocols;
649 } else if (phy->phy_type & PORT_TYPE_SATA) {
653 sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
654 sas_notify_port_event(sas_phy, PORTE_BYTES_DMAED, gfp_flags);
657 static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device)
659 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
660 struct hisi_sas_device *sas_dev = NULL;
661 int last = hisi_hba->last_dev_id;
662 int first = (hisi_hba->last_dev_id + 1) % HISI_SAS_MAX_DEVICES;
665 spin_lock(&hisi_hba->lock);
666 for (i = first; i != last; i %= HISI_SAS_MAX_DEVICES) {
667 if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) {
668 int queue = i % hisi_hba->queue_count;
669 struct hisi_sas_dq *dq = &hisi_hba->dq[queue];
671 hisi_hba->devices[i].device_id = i;
672 sas_dev = &hisi_hba->devices[i];
673 sas_dev->dev_status = HISI_SAS_DEV_INIT;
674 sas_dev->dev_type = device->dev_type;
675 sas_dev->hisi_hba = hisi_hba;
676 sas_dev->sas_device = device;
678 spin_lock_init(&sas_dev->lock);
679 INIT_LIST_HEAD(&hisi_hba->devices[i].list);
684 hisi_hba->last_dev_id = i;
685 spin_unlock(&hisi_hba->lock);
690 static void hisi_sas_sync_poll_cq(struct hisi_sas_cq *cq)
692 /* make sure CQ entries being processed are processed to completion */
693 spin_lock(&cq->poll_lock);
694 spin_unlock(&cq->poll_lock);
697 static bool hisi_sas_queue_is_poll(struct hisi_sas_cq *cq)
699 struct hisi_hba *hisi_hba = cq->hisi_hba;
701 if (cq->id < hisi_hba->queue_count - hisi_hba->iopoll_q_cnt)
706 static void hisi_sas_sync_cq(struct hisi_sas_cq *cq)
708 if (hisi_sas_queue_is_poll(cq))
709 hisi_sas_sync_poll_cq(cq);
711 synchronize_irq(cq->irq_no);
714 void hisi_sas_sync_poll_cqs(struct hisi_hba *hisi_hba)
718 for (i = 0; i < hisi_hba->queue_count; i++) {
719 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
721 if (hisi_sas_queue_is_poll(cq))
722 hisi_sas_sync_poll_cq(cq);
725 EXPORT_SYMBOL_GPL(hisi_sas_sync_poll_cqs);
727 void hisi_sas_sync_cqs(struct hisi_hba *hisi_hba)
731 for (i = 0; i < hisi_hba->queue_count; i++) {
732 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
734 hisi_sas_sync_cq(cq);
737 EXPORT_SYMBOL_GPL(hisi_sas_sync_cqs);
739 static void hisi_sas_tmf_aborted(struct sas_task *task)
741 struct hisi_sas_slot *slot = task->lldd_task;
742 struct domain_device *device = task->dev;
743 struct hisi_sas_device *sas_dev = device->lldd_dev;
744 struct hisi_hba *hisi_hba = sas_dev->hisi_hba;
747 struct hisi_sas_cq *cq =
748 &hisi_hba->cq[slot->dlvry_queue];
750 * sync irq or poll queue to avoid free'ing task
751 * before using task in IO completion
753 hisi_sas_sync_cq(cq);
758 #define HISI_SAS_DISK_RECOVER_CNT 3
759 static int hisi_sas_init_device(struct domain_device *device)
761 int rc = TMF_RESP_FUNC_COMPLETE;
763 int retry = HISI_SAS_DISK_RECOVER_CNT;
764 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
766 switch (device->dev_type) {
768 int_to_scsilun(0, &lun);
770 while (retry-- > 0) {
771 rc = sas_abort_task_set(device, lun.scsi_lun);
772 if (rc == TMF_RESP_FUNC_COMPLETE) {
773 hisi_sas_release_task(hisi_hba, device);
780 case SAS_SATA_PM_PORT:
781 case SAS_SATA_PENDING:
783 * If an expander is swapped when a SATA disk is attached then
784 * we should issue a hard reset to clear previous affiliation
785 * of STP target port, see SPL (chapter 6.19.4).
787 * However we don't need to issue a hard reset here for these
789 * a. When probing the device, libsas/libata already issues a
790 * hard reset in sas_probe_sata() -> ata_sas_async_probe().
791 * Note that in hisi_sas_debug_I_T_nexus_reset() we take care
792 * to issue a hard reset by checking the dev status (== INIT).
793 * b. When resetting the controller, this is simply unnecessary.
795 while (retry-- > 0) {
796 rc = hisi_sas_softreset_ata_disk(device);
808 int hisi_sas_slave_alloc(struct scsi_device *sdev)
810 struct domain_device *ddev = sdev_to_domain_dev(sdev);
811 struct hisi_sas_device *sas_dev = ddev->lldd_dev;
814 rc = sas_slave_alloc(sdev);
818 rc = hisi_sas_init_device(ddev);
821 sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
824 EXPORT_SYMBOL_GPL(hisi_sas_slave_alloc);
826 static int hisi_sas_dev_found(struct domain_device *device)
828 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
829 struct domain_device *parent_dev = device->parent;
830 struct hisi_sas_device *sas_dev;
831 struct device *dev = hisi_hba->dev;
834 if (hisi_hba->hw->alloc_dev)
835 sas_dev = hisi_hba->hw->alloc_dev(device);
837 sas_dev = hisi_sas_alloc_dev(device);
839 dev_err(dev, "fail alloc dev: max support %d devices\n",
840 HISI_SAS_MAX_DEVICES);
844 device->lldd_dev = sas_dev;
845 hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
847 if (parent_dev && dev_is_expander(parent_dev->dev_type)) {
850 phy_no = sas_find_attached_phy_id(&parent_dev->ex_dev, device);
852 dev_info(dev, "dev found: no attached "
853 "dev:%016llx at ex:%016llx\n",
854 SAS_ADDR(device->sas_addr),
855 SAS_ADDR(parent_dev->sas_addr));
861 dev_info(dev, "dev[%d:%x] found\n",
862 sas_dev->device_id, sas_dev->dev_type);
867 hisi_sas_dev_gone(device);
871 int hisi_sas_slave_configure(struct scsi_device *sdev)
873 struct domain_device *dev = sdev_to_domain_dev(sdev);
874 int ret = sas_slave_configure(sdev);
878 if (!dev_is_sata(dev))
879 sas_change_queue_depth(sdev, 64);
883 EXPORT_SYMBOL_GPL(hisi_sas_slave_configure);
885 void hisi_sas_scan_start(struct Scsi_Host *shost)
887 struct hisi_hba *hisi_hba = shost_priv(shost);
889 hisi_hba->hw->phys_init(hisi_hba);
891 EXPORT_SYMBOL_GPL(hisi_sas_scan_start);
893 int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time)
895 struct hisi_hba *hisi_hba = shost_priv(shost);
896 struct sas_ha_struct *sha = &hisi_hba->sha;
898 /* Wait for PHY up interrupt to occur */
905 EXPORT_SYMBOL_GPL(hisi_sas_scan_finished);
907 static void hisi_sas_phyup_work_common(struct work_struct *work,
908 enum hisi_sas_phy_event event)
910 struct hisi_sas_phy *phy =
911 container_of(work, typeof(*phy), works[event]);
912 struct hisi_hba *hisi_hba = phy->hisi_hba;
913 struct asd_sas_phy *sas_phy = &phy->sas_phy;
914 int phy_no = sas_phy->id;
916 phy->wait_phyup_cnt = 0;
917 if (phy->identify.target_port_protocols == SAS_PROTOCOL_SSP)
918 hisi_hba->hw->sl_notify_ssp(hisi_hba, phy_no);
919 hisi_sas_bytes_dmaed(hisi_hba, phy_no, GFP_KERNEL);
922 static void hisi_sas_phyup_work(struct work_struct *work)
924 hisi_sas_phyup_work_common(work, HISI_PHYE_PHY_UP);
927 static void hisi_sas_linkreset_work(struct work_struct *work)
929 struct hisi_sas_phy *phy =
930 container_of(work, typeof(*phy), works[HISI_PHYE_LINK_RESET]);
931 struct asd_sas_phy *sas_phy = &phy->sas_phy;
933 hisi_sas_control_phy(sas_phy, PHY_FUNC_LINK_RESET, NULL);
936 static void hisi_sas_phyup_pm_work(struct work_struct *work)
938 struct hisi_sas_phy *phy =
939 container_of(work, typeof(*phy), works[HISI_PHYE_PHY_UP_PM]);
940 struct hisi_hba *hisi_hba = phy->hisi_hba;
941 struct device *dev = hisi_hba->dev;
943 hisi_sas_phyup_work_common(work, HISI_PHYE_PHY_UP_PM);
944 pm_runtime_put_sync(dev);
947 static const work_func_t hisi_sas_phye_fns[HISI_PHYES_NUM] = {
948 [HISI_PHYE_PHY_UP] = hisi_sas_phyup_work,
949 [HISI_PHYE_LINK_RESET] = hisi_sas_linkreset_work,
950 [HISI_PHYE_PHY_UP_PM] = hisi_sas_phyup_pm_work,
953 bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy,
954 enum hisi_sas_phy_event event)
956 struct hisi_hba *hisi_hba = phy->hisi_hba;
958 if (WARN_ON(event >= HISI_PHYES_NUM))
961 return queue_work(hisi_hba->wq, &phy->works[event]);
963 EXPORT_SYMBOL_GPL(hisi_sas_notify_phy_event);
965 static void hisi_sas_wait_phyup_timedout(struct timer_list *t)
967 struct hisi_sas_phy *phy = from_timer(phy, t, timer);
968 struct hisi_hba *hisi_hba = phy->hisi_hba;
969 struct device *dev = hisi_hba->dev;
970 int phy_no = phy->sas_phy.id;
972 dev_warn(dev, "phy%d wait phyup timeout, issuing link reset\n", phy_no);
973 hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET);
976 #define HISI_SAS_WAIT_PHYUP_RETRIES 10
978 void hisi_sas_phy_oob_ready(struct hisi_hba *hisi_hba, int phy_no)
980 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
981 struct device *dev = hisi_hba->dev;
984 dev_dbg(dev, "phy%d OOB ready\n", phy_no);
985 spin_lock_irqsave(&phy->lock, flags);
986 if (phy->phy_attached) {
987 spin_unlock_irqrestore(&phy->lock, flags);
991 if (!timer_pending(&phy->timer)) {
992 if (phy->wait_phyup_cnt < HISI_SAS_WAIT_PHYUP_RETRIES) {
993 phy->wait_phyup_cnt++;
994 phy->timer.expires = jiffies +
995 HISI_SAS_WAIT_PHYUP_TIMEOUT;
996 add_timer(&phy->timer);
997 spin_unlock_irqrestore(&phy->lock, flags);
1001 dev_warn(dev, "phy%d failed to come up %d times, giving up\n",
1002 phy_no, phy->wait_phyup_cnt);
1003 phy->wait_phyup_cnt = 0;
1005 spin_unlock_irqrestore(&phy->lock, flags);
1008 EXPORT_SYMBOL_GPL(hisi_sas_phy_oob_ready);
1010 static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no)
1012 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1013 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1016 phy->hisi_hba = hisi_hba;
1018 phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS;
1019 phy->maximum_linkrate = hisi_hba->hw->phy_get_max_linkrate();
1020 sas_phy->enabled = (phy_no < hisi_hba->n_phy) ? 1 : 0;
1021 sas_phy->iproto = SAS_PROTOCOL_ALL;
1022 sas_phy->tproto = 0;
1023 sas_phy->role = PHY_ROLE_INITIATOR;
1024 sas_phy->oob_mode = OOB_NOT_CONNECTED;
1025 sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
1026 sas_phy->id = phy_no;
1027 sas_phy->sas_addr = &hisi_hba->sas_addr[0];
1028 sas_phy->frame_rcvd = &phy->frame_rcvd[0];
1029 sas_phy->ha = (struct sas_ha_struct *)hisi_hba->shost->hostdata;
1030 sas_phy->lldd_phy = phy;
1032 for (i = 0; i < HISI_PHYES_NUM; i++)
1033 INIT_WORK(&phy->works[i], hisi_sas_phye_fns[i]);
1035 spin_lock_init(&phy->lock);
1037 timer_setup(&phy->timer, hisi_sas_wait_phyup_timedout, 0);
1040 /* Wrapper to ensure we track hisi_sas_phy.enable properly */
1041 void hisi_sas_phy_enable(struct hisi_hba *hisi_hba, int phy_no, int enable)
1043 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1044 struct asd_sas_phy *aphy = &phy->sas_phy;
1045 struct sas_phy *sphy = aphy->phy;
1046 unsigned long flags;
1048 spin_lock_irqsave(&phy->lock, flags);
1051 /* We may have been enabled already; if so, don't touch */
1053 sphy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN;
1054 hisi_hba->hw->phy_start(hisi_hba, phy_no);
1056 sphy->negotiated_linkrate = SAS_PHY_DISABLED;
1057 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
1059 phy->enable = enable;
1060 spin_unlock_irqrestore(&phy->lock, flags);
1062 EXPORT_SYMBOL_GPL(hisi_sas_phy_enable);
1064 static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
1066 struct sas_ha_struct *sas_ha = sas_phy->ha;
1067 struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
1068 struct hisi_sas_phy *phy = sas_phy->lldd_phy;
1069 struct asd_sas_port *sas_port = sas_phy->port;
1070 struct hisi_sas_port *port;
1071 unsigned long flags;
1076 port = to_hisi_sas_port(sas_port);
1077 spin_lock_irqsave(&hisi_hba->lock, flags);
1078 port->port_attached = 1;
1079 port->id = phy->port_id;
1081 sas_port->lldd_port = port;
1082 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1085 static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task *task,
1086 struct hisi_sas_slot *slot, bool need_lock)
1089 unsigned long flags;
1090 struct task_status_struct *ts;
1092 ts = &task->task_status;
1094 ts->resp = SAS_TASK_COMPLETE;
1095 ts->stat = SAS_ABORTED_TASK;
1096 spin_lock_irqsave(&task->task_state_lock, flags);
1097 task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
1098 if (!slot->is_internal && task->task_proto != SAS_PROTOCOL_SMP)
1099 task->task_state_flags |= SAS_TASK_STATE_DONE;
1100 spin_unlock_irqrestore(&task->task_state_lock, flags);
1103 hisi_sas_slot_task_free(hisi_hba, task, slot, need_lock);
1106 static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
1107 struct domain_device *device)
1109 struct hisi_sas_slot *slot, *slot2;
1110 struct hisi_sas_device *sas_dev = device->lldd_dev;
1112 spin_lock(&sas_dev->lock);
1113 list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry)
1114 hisi_sas_do_release_task(hisi_hba, slot->task, slot, false);
1116 spin_unlock(&sas_dev->lock);
1119 void hisi_sas_release_tasks(struct hisi_hba *hisi_hba)
1121 struct hisi_sas_device *sas_dev;
1122 struct domain_device *device;
1125 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1126 sas_dev = &hisi_hba->devices[i];
1127 device = sas_dev->sas_device;
1129 if ((sas_dev->dev_type == SAS_PHY_UNUSED) ||
1133 hisi_sas_release_task(hisi_hba, device);
1136 EXPORT_SYMBOL_GPL(hisi_sas_release_tasks);
1138 static void hisi_sas_dereg_device(struct hisi_hba *hisi_hba,
1139 struct domain_device *device)
1141 if (hisi_hba->hw->dereg_device)
1142 hisi_hba->hw->dereg_device(hisi_hba, device);
1146 hisi_sas_internal_task_abort_dev(struct hisi_sas_device *sas_dev,
1147 bool rst_ha_timeout)
1149 struct hisi_sas_internal_abort_data data = { rst_ha_timeout };
1150 struct domain_device *device = sas_dev->sas_device;
1151 struct hisi_hba *hisi_hba = sas_dev->hisi_hba;
1154 for (i = 0; i < hisi_hba->cq_nvecs; i++) {
1155 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
1156 const struct cpumask *mask = cq->irq_mask;
1158 if (mask && !cpumask_intersects(cpu_online_mask, mask))
1160 rc = sas_execute_internal_abort_dev(device, i, &data);
1168 static void hisi_sas_dev_gone(struct domain_device *device)
1170 struct hisi_sas_device *sas_dev = device->lldd_dev;
1171 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1172 struct device *dev = hisi_hba->dev;
1175 dev_info(dev, "dev[%d:%x] is gone\n",
1176 sas_dev->device_id, sas_dev->dev_type);
1178 down(&hisi_hba->sem);
1179 if (!test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) {
1180 hisi_sas_internal_task_abort_dev(sas_dev, true);
1182 hisi_sas_dereg_device(hisi_hba, device);
1184 ret = hisi_hba->hw->clear_itct(hisi_hba, sas_dev);
1185 device->lldd_dev = NULL;
1188 if (hisi_hba->hw->free_device)
1189 hisi_hba->hw->free_device(sas_dev);
1191 /* Don't mark it as SAS_PHY_UNUSED if failed to clear ITCT */
1193 sas_dev->dev_type = SAS_PHY_UNUSED;
1194 sas_dev->sas_device = NULL;
1198 static int hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no,
1199 struct sas_phy_linkrates *r)
1201 struct sas_phy_linkrates _r;
1203 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1204 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1205 enum sas_linkrate min, max;
1207 if (r->minimum_linkrate > SAS_LINK_RATE_1_5_GBPS)
1210 if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) {
1211 max = sas_phy->phy->maximum_linkrate;
1212 min = r->minimum_linkrate;
1213 } else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) {
1214 max = r->maximum_linkrate;
1215 min = sas_phy->phy->minimum_linkrate;
1219 _r.maximum_linkrate = max;
1220 _r.minimum_linkrate = min;
1222 sas_phy->phy->maximum_linkrate = max;
1223 sas_phy->phy->minimum_linkrate = min;
1225 hisi_sas_phy_enable(hisi_hba, phy_no, 0);
1227 hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, &_r);
1228 hisi_sas_phy_enable(hisi_hba, phy_no, 1);
1233 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
1236 struct hisi_sas_phy *phy = container_of(sas_phy,
1237 struct hisi_sas_phy, sas_phy);
1238 struct sas_ha_struct *sas_ha = sas_phy->ha;
1239 struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
1240 struct device *dev = hisi_hba->dev;
1241 DECLARE_COMPLETION_ONSTACK(completion);
1242 int phy_no = sas_phy->id;
1243 u8 sts = phy->phy_attached;
1246 down(&hisi_hba->sem);
1247 phy->reset_completion = &completion;
1250 case PHY_FUNC_HARD_RESET:
1251 hisi_hba->hw->phy_hard_reset(hisi_hba, phy_no);
1254 case PHY_FUNC_LINK_RESET:
1255 hisi_sas_phy_enable(hisi_hba, phy_no, 0);
1257 hisi_sas_phy_enable(hisi_hba, phy_no, 1);
1260 case PHY_FUNC_DISABLE:
1261 hisi_sas_phy_enable(hisi_hba, phy_no, 0);
1264 case PHY_FUNC_SET_LINK_RATE:
1265 ret = hisi_sas_phy_set_linkrate(hisi_hba, phy_no, funcdata);
1268 case PHY_FUNC_GET_EVENTS:
1269 if (hisi_hba->hw->get_events) {
1270 hisi_hba->hw->get_events(hisi_hba, phy_no);
1274 case PHY_FUNC_RELEASE_SPINUP_HOLD:
1280 if (sts && !wait_for_completion_timeout(&completion,
1281 HISI_SAS_WAIT_PHYUP_TIMEOUT)) {
1282 dev_warn(dev, "phy%d wait phyup timed out for func %d\n",
1289 phy->reset_completion = NULL;
1295 static void hisi_sas_fill_ata_reset_cmd(struct ata_device *dev,
1296 bool reset, int pmp, u8 *fis)
1298 struct ata_taskfile tf;
1300 ata_tf_init(dev, &tf);
1304 tf.ctl &= ~ATA_SRST;
1305 tf.command = ATA_CMD_DEV_RESET;
1306 ata_tf_to_fis(&tf, pmp, 0, fis);
1309 static int hisi_sas_softreset_ata_disk(struct domain_device *device)
1312 struct ata_port *ap = device->sata_dev.ap;
1313 struct ata_link *link;
1314 int rc = TMF_RESP_FUNC_FAILED;
1315 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1316 struct device *dev = hisi_hba->dev;
1318 ata_for_each_link(link, ap, EDGE) {
1319 int pmp = sata_srst_pmp(link);
1321 hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis);
1322 rc = sas_execute_ata_cmd(device, fis, -1);
1323 if (rc != TMF_RESP_FUNC_COMPLETE)
1327 if (rc == TMF_RESP_FUNC_COMPLETE) {
1328 ata_for_each_link(link, ap, EDGE) {
1329 int pmp = sata_srst_pmp(link);
1331 hisi_sas_fill_ata_reset_cmd(link->device, 0, pmp, fis);
1332 rc = sas_execute_ata_cmd(device, fis, -1);
1333 if (rc != TMF_RESP_FUNC_COMPLETE)
1334 dev_err(dev, "ata disk %016llx de-reset failed\n",
1335 SAS_ADDR(device->sas_addr));
1338 dev_err(dev, "ata disk %016llx reset failed\n",
1339 SAS_ADDR(device->sas_addr));
1342 if (rc == TMF_RESP_FUNC_COMPLETE)
1343 hisi_sas_release_task(hisi_hba, device);
1348 static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba)
1350 u32 state = hisi_hba->hw->get_phys_state(hisi_hba);
1353 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1354 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
1355 struct domain_device *device = sas_dev->sas_device;
1356 struct asd_sas_port *sas_port;
1357 struct hisi_sas_port *port;
1358 struct hisi_sas_phy *phy = NULL;
1359 struct asd_sas_phy *sas_phy;
1361 if ((sas_dev->dev_type == SAS_PHY_UNUSED)
1362 || !device || !device->port)
1365 sas_port = device->port;
1366 port = to_hisi_sas_port(sas_port);
1368 spin_lock(&sas_port->phy_list_lock);
1369 list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el)
1370 if (state & BIT(sas_phy->id)) {
1371 phy = sas_phy->lldd_phy;
1374 spin_unlock(&sas_port->phy_list_lock);
1377 port->id = phy->port_id;
1379 /* Update linkrate of directly attached device. */
1380 if (!device->parent)
1381 device->linkrate = phy->sas_phy.linkrate;
1383 hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
1384 } else if (!port->port_attached)
1389 static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 state)
1391 struct asd_sas_port *_sas_port = NULL;
1394 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
1395 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1396 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1397 struct asd_sas_port *sas_port = sas_phy->port;
1398 bool do_port_check = _sas_port != sas_port;
1400 if (!sas_phy->phy->enabled)
1403 /* Report PHY state change to libsas */
1404 if (state & BIT(phy_no)) {
1405 if (do_port_check && sas_port && sas_port->port_dev) {
1406 struct domain_device *dev = sas_port->port_dev;
1408 _sas_port = sas_port;
1410 if (dev_is_expander(dev->dev_type))
1411 sas_notify_port_event(sas_phy,
1412 PORTE_BROADCAST_RCVD,
1416 hisi_sas_phy_down(hisi_hba, phy_no, 0, GFP_KERNEL);
1421 static void hisi_sas_reset_init_all_devices(struct hisi_hba *hisi_hba)
1423 struct hisi_sas_device *sas_dev;
1424 struct domain_device *device;
1427 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1428 sas_dev = &hisi_hba->devices[i];
1429 device = sas_dev->sas_device;
1431 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device)
1434 hisi_sas_init_device(device);
1438 static void hisi_sas_send_ata_reset_each_phy(struct hisi_hba *hisi_hba,
1439 struct asd_sas_port *sas_port,
1440 struct domain_device *device)
1442 struct ata_port *ap = device->sata_dev.ap;
1443 struct device *dev = hisi_hba->dev;
1444 int rc = TMF_RESP_FUNC_FAILED;
1445 struct ata_link *link;
1449 for (i = 0; i < hisi_hba->n_phy; i++) {
1450 if (!(sas_port->phy_mask & BIT(i)))
1453 ata_for_each_link(link, ap, EDGE) {
1454 int pmp = sata_srst_pmp(link);
1456 hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis);
1457 rc = sas_execute_ata_cmd(device, fis, i);
1458 if (rc != TMF_RESP_FUNC_COMPLETE) {
1459 dev_err(dev, "phy%d ata reset failed rc=%d\n",
1467 static void hisi_sas_terminate_stp_reject(struct hisi_hba *hisi_hba)
1469 struct device *dev = hisi_hba->dev;
1472 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1473 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
1474 struct domain_device *device = sas_dev->sas_device;
1476 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device)
1479 rc = hisi_sas_internal_task_abort_dev(sas_dev, false);
1481 dev_err(dev, "STP reject: abort dev failed %d\n", rc);
1484 for (port_no = 0; port_no < hisi_hba->n_phy; port_no++) {
1485 struct hisi_sas_port *port = &hisi_hba->port[port_no];
1486 struct asd_sas_port *sas_port = &port->sas_port;
1487 struct domain_device *port_dev = sas_port->port_dev;
1488 struct domain_device *device;
1490 if (!port_dev || !dev_is_expander(port_dev->dev_type))
1493 /* Try to find a SATA device */
1494 list_for_each_entry(device, &sas_port->dev_list,
1496 if (dev_is_sata(device)) {
1497 hisi_sas_send_ata_reset_each_phy(hisi_hba,
1506 void hisi_sas_controller_reset_prepare(struct hisi_hba *hisi_hba)
1508 struct Scsi_Host *shost = hisi_hba->shost;
1510 hisi_hba->phy_state = hisi_hba->hw->get_phys_state(hisi_hba);
1512 scsi_block_requests(shost);
1513 hisi_hba->hw->wait_cmds_complete_timeout(hisi_hba, 100, 5000);
1515 del_timer_sync(&hisi_hba->timer);
1517 set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1519 EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_prepare);
1521 static void hisi_sas_async_init_wait_phyup(void *data, async_cookie_t cookie)
1523 struct hisi_sas_phy *phy = data;
1524 struct hisi_hba *hisi_hba = phy->hisi_hba;
1525 struct device *dev = hisi_hba->dev;
1526 DECLARE_COMPLETION_ONSTACK(completion);
1527 int phy_no = phy->sas_phy.id;
1529 phy->reset_completion = &completion;
1530 hisi_sas_phy_enable(hisi_hba, phy_no, 1);
1531 if (!wait_for_completion_timeout(&completion,
1532 HISI_SAS_WAIT_PHYUP_TIMEOUT))
1533 dev_warn(dev, "phy%d wait phyup timed out\n", phy_no);
1535 phy->reset_completion = NULL;
1538 void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba)
1540 struct Scsi_Host *shost = hisi_hba->shost;
1541 ASYNC_DOMAIN_EXCLUSIVE(async);
1544 /* Init and wait for PHYs to come up and all libsas event finished. */
1545 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
1546 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1548 if (!(hisi_hba->phy_state & BIT(phy_no)))
1551 async_schedule_domain(hisi_sas_async_init_wait_phyup,
1555 async_synchronize_full_domain(&async);
1556 hisi_sas_refresh_port_id(hisi_hba);
1557 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1559 if (hisi_hba->reject_stp_links_msk)
1560 hisi_sas_terminate_stp_reject(hisi_hba);
1561 hisi_sas_reset_init_all_devices(hisi_hba);
1562 scsi_unblock_requests(shost);
1563 clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags);
1566 hisi_sas_rescan_topology(hisi_hba, hisi_hba->phy_state);
1568 EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_done);
1570 static int hisi_sas_controller_prereset(struct hisi_hba *hisi_hba)
1572 if (!hisi_hba->hw->soft_reset)
1575 down(&hisi_hba->sem);
1576 if (test_and_set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) {
1581 if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct)
1582 hisi_hba->hw->debugfs_snapshot_regs(hisi_hba);
1587 static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
1589 struct device *dev = hisi_hba->dev;
1590 struct Scsi_Host *shost = hisi_hba->shost;
1593 dev_info(dev, "controller resetting...\n");
1594 hisi_sas_controller_reset_prepare(hisi_hba);
1596 rc = hisi_hba->hw->soft_reset(hisi_hba);
1598 dev_warn(dev, "controller reset failed (%d)\n", rc);
1599 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1601 scsi_unblock_requests(shost);
1602 clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags);
1605 clear_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags);
1607 hisi_sas_controller_reset_done(hisi_hba);
1608 dev_info(dev, "controller reset complete\n");
1613 static int hisi_sas_abort_task(struct sas_task *task)
1615 struct hisi_sas_internal_abort_data internal_abort_data = { false };
1616 struct domain_device *device = task->dev;
1617 struct hisi_sas_device *sas_dev = device->lldd_dev;
1618 struct hisi_sas_slot *slot = task->lldd_task;
1619 struct hisi_hba *hisi_hba;
1621 int rc = TMF_RESP_FUNC_FAILED;
1622 unsigned long flags;
1625 return TMF_RESP_FUNC_FAILED;
1627 hisi_hba = dev_to_hisi_hba(task->dev);
1628 dev = hisi_hba->dev;
1630 spin_lock_irqsave(&task->task_state_lock, flags);
1631 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
1632 struct hisi_sas_cq *cq;
1636 * sync irq or poll queue to avoid free'ing task
1637 * before using task in IO completion
1639 cq = &hisi_hba->cq[slot->dlvry_queue];
1640 hisi_sas_sync_cq(cq);
1642 spin_unlock_irqrestore(&task->task_state_lock, flags);
1643 rc = TMF_RESP_FUNC_COMPLETE;
1646 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
1647 spin_unlock_irqrestore(&task->task_state_lock, flags);
1649 if (slot && task->task_proto & SAS_PROTOCOL_SSP) {
1650 u16 tag = slot->idx;
1653 rc = sas_abort_task(task, tag);
1654 rc2 = sas_execute_internal_abort_single(device, tag,
1655 slot->dlvry_queue, &internal_abort_data);
1657 dev_err(dev, "abort task: internal abort (%d)\n", rc2);
1658 return TMF_RESP_FUNC_FAILED;
1662 * If the TMF finds that the IO is not in the device and also
1663 * the internal abort does not succeed, then it is safe to
1665 * Note: if the internal abort succeeds then the slot
1666 * will have already been completed
1668 if (rc == TMF_RESP_FUNC_COMPLETE && rc2 != TMF_RESP_FUNC_SUCC) {
1669 if (task->lldd_task)
1670 hisi_sas_do_release_task(hisi_hba, task, slot, true);
1672 } else if (task->task_proto & SAS_PROTOCOL_SATA ||
1673 task->task_proto & SAS_PROTOCOL_STP) {
1674 if (task->dev->dev_type == SAS_SATA_DEV) {
1675 struct ata_queued_cmd *qc = task->uldd_task;
1677 rc = hisi_sas_internal_task_abort_dev(sas_dev, false);
1679 dev_err(dev, "abort task: internal abort failed\n");
1682 hisi_sas_dereg_device(hisi_hba, device);
1685 * If an ATA internal command times out in ATA EH, it
1686 * need to execute soft reset, so check the scsicmd
1688 if ((sas_dev->dev_status == HISI_SAS_DEV_NCQ_ERR) &&
1689 qc && qc->scsicmd) {
1690 hisi_sas_do_release_task(hisi_hba, task, slot, true);
1691 rc = TMF_RESP_FUNC_COMPLETE;
1693 rc = hisi_sas_softreset_ata_disk(device);
1696 } else if (slot && task->task_proto & SAS_PROTOCOL_SMP) {
1698 u32 tag = slot->idx;
1699 struct hisi_sas_cq *cq = &hisi_hba->cq[slot->dlvry_queue];
1701 rc = sas_execute_internal_abort_single(device,
1702 tag, slot->dlvry_queue,
1703 &internal_abort_data);
1704 if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) &&
1707 * sync irq or poll queue to avoid free'ing task
1708 * before using task in IO completion
1710 hisi_sas_sync_cq(cq);
1716 if (rc != TMF_RESP_FUNC_COMPLETE)
1717 dev_notice(dev, "abort task: rc=%d\n", rc);
1721 static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun)
1723 struct hisi_sas_device *sas_dev = device->lldd_dev;
1724 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1725 struct device *dev = hisi_hba->dev;
1728 rc = hisi_sas_internal_task_abort_dev(sas_dev, false);
1730 dev_err(dev, "abort task set: internal abort rc=%d\n", rc);
1731 return TMF_RESP_FUNC_FAILED;
1733 hisi_sas_dereg_device(hisi_hba, device);
1735 rc = sas_abort_task_set(device, lun);
1736 if (rc == TMF_RESP_FUNC_COMPLETE)
1737 hisi_sas_release_task(hisi_hba, device);
1742 static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
1744 struct sas_phy *local_phy = sas_get_local_phy(device);
1745 struct hisi_sas_device *sas_dev = device->lldd_dev;
1746 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1747 struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1750 if (!local_phy->enabled) {
1751 sas_put_local_phy(local_phy);
1755 if (scsi_is_sas_phy_local(local_phy)) {
1756 struct asd_sas_phy *sas_phy =
1757 sas_ha->sas_phy[local_phy->number];
1758 struct hisi_sas_phy *phy =
1759 container_of(sas_phy, struct hisi_sas_phy, sas_phy);
1760 unsigned long flags;
1762 spin_lock_irqsave(&phy->lock, flags);
1764 spin_unlock_irqrestore(&phy->lock, flags);
1767 reset_type = (sas_dev->dev_status == HISI_SAS_DEV_INIT ||
1768 !dev_is_sata(device)) ? true : false;
1770 rc = sas_phy_reset(local_phy, reset_type);
1771 sas_put_local_phy(local_phy);
1773 if (scsi_is_sas_phy_local(local_phy)) {
1774 struct asd_sas_phy *sas_phy =
1775 sas_ha->sas_phy[local_phy->number];
1776 struct hisi_sas_phy *phy =
1777 container_of(sas_phy, struct hisi_sas_phy, sas_phy);
1778 unsigned long flags;
1780 spin_lock_irqsave(&phy->lock, flags);
1782 spin_unlock_irqrestore(&phy->lock, flags);
1784 /* report PHY down if timed out */
1785 if (rc == -ETIMEDOUT)
1786 hisi_sas_phy_down(hisi_hba, sas_phy->id, 0, GFP_KERNEL);
1794 if (dev_is_sata(device)) {
1795 struct ata_link *link = &device->sata_dev.ap->link;
1797 rc = ata_wait_after_reset(link, HISI_SAS_WAIT_PHYUP_TIMEOUT,
1798 smp_ata_check_ready_type);
1806 static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
1808 struct hisi_sas_device *sas_dev = device->lldd_dev;
1809 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1810 struct device *dev = hisi_hba->dev;
1813 if (sas_dev->dev_status == HISI_SAS_DEV_NCQ_ERR)
1814 sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
1816 rc = hisi_sas_internal_task_abort_dev(sas_dev, false);
1818 dev_err(dev, "I_T nexus reset: internal abort (%d)\n", rc);
1819 return TMF_RESP_FUNC_FAILED;
1821 hisi_sas_dereg_device(hisi_hba, device);
1823 rc = hisi_sas_debug_I_T_nexus_reset(device);
1824 if (rc == TMF_RESP_FUNC_COMPLETE && dev_is_sata(device)) {
1825 struct sas_phy *local_phy;
1827 rc = hisi_sas_softreset_ata_disk(device);
1832 case TMF_RESP_FUNC_FAILED:
1835 local_phy = sas_get_local_phy(device);
1836 rc = sas_phy_enable(local_phy, 0);
1838 local_phy->enabled = 0;
1839 dev_err(dev, "Disabled local phy of ATA disk %016llx due to softreset fail (%d)\n",
1840 SAS_ADDR(device->sas_addr), rc);
1843 sas_put_local_phy(local_phy);
1850 if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV))
1851 hisi_sas_release_task(hisi_hba, device);
1856 static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
1858 struct hisi_sas_device *sas_dev = device->lldd_dev;
1859 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1860 struct device *dev = hisi_hba->dev;
1861 int rc = TMF_RESP_FUNC_FAILED;
1863 /* Clear internal IO and then lu reset */
1864 rc = hisi_sas_internal_task_abort_dev(sas_dev, false);
1866 dev_err(dev, "lu_reset: internal abort failed\n");
1869 hisi_sas_dereg_device(hisi_hba, device);
1871 if (dev_is_sata(device)) {
1872 struct sas_phy *phy;
1874 phy = sas_get_local_phy(device);
1876 rc = sas_phy_reset(phy, true);
1879 hisi_sas_release_task(hisi_hba, device);
1880 sas_put_local_phy(phy);
1882 rc = sas_lu_reset(device, lun);
1883 if (rc == TMF_RESP_FUNC_COMPLETE)
1884 hisi_sas_release_task(hisi_hba, device);
1887 if (rc != TMF_RESP_FUNC_COMPLETE)
1888 dev_err(dev, "lu_reset: for device[%d]:rc= %d\n",
1889 sas_dev->device_id, rc);
1893 static void hisi_sas_async_I_T_nexus_reset(void *data, async_cookie_t cookie)
1895 struct domain_device *device = data;
1896 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1899 rc = hisi_sas_debug_I_T_nexus_reset(device);
1900 if (rc != TMF_RESP_FUNC_COMPLETE)
1901 dev_info(hisi_hba->dev, "I_T_nexus reset fail for dev:%016llx rc=%d\n",
1902 SAS_ADDR(device->sas_addr), rc);
1905 static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha)
1907 struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
1908 HISI_SAS_DECLARE_RST_WORK_ON_STACK(r);
1909 ASYNC_DOMAIN_EXCLUSIVE(async);
1912 queue_work(hisi_hba->wq, &r.work);
1913 wait_for_completion(r.completion);
1915 return TMF_RESP_FUNC_FAILED;
1917 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1918 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
1919 struct domain_device *device = sas_dev->sas_device;
1921 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device ||
1922 dev_is_expander(device->dev_type))
1925 async_schedule_domain(hisi_sas_async_I_T_nexus_reset,
1929 async_synchronize_full_domain(&async);
1930 hisi_sas_release_tasks(hisi_hba);
1932 return TMF_RESP_FUNC_COMPLETE;
1935 static int hisi_sas_query_task(struct sas_task *task)
1937 int rc = TMF_RESP_FUNC_FAILED;
1939 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1940 struct hisi_sas_slot *slot = task->lldd_task;
1941 u32 tag = slot->idx;
1943 rc = sas_query_task(task, tag);
1945 /* The task is still in Lun, release it then */
1946 case TMF_RESP_FUNC_SUCC:
1947 /* The task is not in Lun or failed, reset the phy */
1948 case TMF_RESP_FUNC_FAILED:
1949 case TMF_RESP_FUNC_COMPLETE:
1952 rc = TMF_RESP_FUNC_FAILED;
1959 static bool hisi_sas_internal_abort_timeout(struct sas_task *task,
1962 struct domain_device *device = task->dev;
1963 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1964 struct hisi_sas_internal_abort_data *timeout = data;
1966 if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct)
1967 queue_work(hisi_hba->wq, &hisi_hba->debugfs_work);
1969 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
1970 pr_err("Internal abort: timeout %016llx\n",
1971 SAS_ADDR(device->sas_addr));
1973 struct hisi_sas_slot *slot = task->lldd_task;
1975 set_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags);
1978 struct hisi_sas_cq *cq =
1979 &hisi_hba->cq[slot->dlvry_queue];
1981 * sync irq or poll queue to avoid free'ing task
1982 * before using task in IO completion
1984 hisi_sas_sync_cq(cq);
1988 if (timeout->rst_ha_timeout) {
1989 pr_err("Internal abort: timeout and not done %016llx. Queuing reset.\n",
1990 SAS_ADDR(device->sas_addr));
1991 queue_work(hisi_hba->wq, &hisi_hba->rst_work);
1993 pr_err("Internal abort: timeout and not done %016llx.\n",
1994 SAS_ADDR(device->sas_addr));
2003 static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy)
2005 hisi_sas_port_notify_formed(sas_phy);
2008 static int hisi_sas_write_gpio(struct sas_ha_struct *sha, u8 reg_type,
2009 u8 reg_index, u8 reg_count, u8 *write_data)
2011 struct hisi_hba *hisi_hba = sha->lldd_ha;
2013 if (!hisi_hba->hw->write_gpio)
2016 return hisi_hba->hw->write_gpio(hisi_hba, reg_type,
2017 reg_index, reg_count, write_data);
2020 static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy)
2022 struct asd_sas_phy *sas_phy = &phy->sas_phy;
2023 struct sas_phy *sphy = sas_phy->phy;
2024 unsigned long flags;
2026 phy->phy_attached = 0;
2030 spin_lock_irqsave(&phy->lock, flags);
2032 sphy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN;
2034 sphy->negotiated_linkrate = SAS_PHY_DISABLED;
2035 spin_unlock_irqrestore(&phy->lock, flags);
2038 void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy,
2041 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
2042 struct asd_sas_phy *sas_phy = &phy->sas_phy;
2043 struct device *dev = hisi_hba->dev;
2046 /* Phy down but ready */
2047 hisi_sas_bytes_dmaed(hisi_hba, phy_no, gfp_flags);
2048 hisi_sas_port_notify_formed(sas_phy);
2050 struct hisi_sas_port *port = phy->port;
2052 if (test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags) ||
2054 dev_info(dev, "ignore flutter phy%d down\n", phy_no);
2057 /* Phy down and not ready */
2058 sas_notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL, gfp_flags);
2059 sas_phy_disconnected(sas_phy);
2062 if (phy->phy_type & PORT_TYPE_SAS) {
2063 int port_id = port->id;
2065 if (!hisi_hba->hw->get_wideport_bitmap(hisi_hba,
2067 port->port_attached = 0;
2068 } else if (phy->phy_type & PORT_TYPE_SATA)
2069 port->port_attached = 0;
2071 hisi_sas_phy_disconnected(phy);
2074 EXPORT_SYMBOL_GPL(hisi_sas_phy_down);
2076 void hisi_sas_phy_bcast(struct hisi_sas_phy *phy)
2078 struct asd_sas_phy *sas_phy = &phy->sas_phy;
2079 struct hisi_hba *hisi_hba = phy->hisi_hba;
2081 if (test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags))
2084 sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD, GFP_ATOMIC);
2086 EXPORT_SYMBOL_GPL(hisi_sas_phy_bcast);
2088 int hisi_sas_host_reset(struct Scsi_Host *shost, int reset_type)
2090 struct hisi_hba *hisi_hba = shost_priv(shost);
2092 if (reset_type != SCSI_ADAPTER_RESET)
2095 queue_work(hisi_hba->wq, &hisi_hba->rst_work);
2099 EXPORT_SYMBOL_GPL(hisi_sas_host_reset);
2101 struct scsi_transport_template *hisi_sas_stt;
2102 EXPORT_SYMBOL_GPL(hisi_sas_stt);
2104 static struct sas_domain_function_template hisi_sas_transport_ops = {
2105 .lldd_dev_found = hisi_sas_dev_found,
2106 .lldd_dev_gone = hisi_sas_dev_gone,
2107 .lldd_execute_task = hisi_sas_queue_command,
2108 .lldd_control_phy = hisi_sas_control_phy,
2109 .lldd_abort_task = hisi_sas_abort_task,
2110 .lldd_abort_task_set = hisi_sas_abort_task_set,
2111 .lldd_I_T_nexus_reset = hisi_sas_I_T_nexus_reset,
2112 .lldd_lu_reset = hisi_sas_lu_reset,
2113 .lldd_query_task = hisi_sas_query_task,
2114 .lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha,
2115 .lldd_port_formed = hisi_sas_port_formed,
2116 .lldd_write_gpio = hisi_sas_write_gpio,
2117 .lldd_tmf_aborted = hisi_sas_tmf_aborted,
2118 .lldd_abort_timeout = hisi_sas_internal_abort_timeout,
2121 void hisi_sas_init_mem(struct hisi_hba *hisi_hba)
2123 int i, s, j, max_command_entries = HISI_SAS_MAX_COMMANDS;
2124 struct hisi_sas_breakpoint *sata_breakpoint = hisi_hba->sata_breakpoint;
2126 for (i = 0; i < hisi_hba->queue_count; i++) {
2127 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
2128 struct hisi_sas_dq *dq = &hisi_hba->dq[i];
2129 struct hisi_sas_cmd_hdr *cmd_hdr = hisi_hba->cmd_hdr[i];
2131 s = sizeof(struct hisi_sas_cmd_hdr);
2132 for (j = 0; j < HISI_SAS_QUEUE_SLOTS; j++)
2133 memset(&cmd_hdr[j], 0, s);
2137 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
2138 memset(hisi_hba->complete_hdr[i], 0, s);
2142 s = sizeof(struct hisi_sas_initial_fis) * hisi_hba->n_phy;
2143 memset(hisi_hba->initial_fis, 0, s);
2145 s = max_command_entries * sizeof(struct hisi_sas_iost);
2146 memset(hisi_hba->iost, 0, s);
2148 s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
2149 memset(hisi_hba->breakpoint, 0, s);
2151 s = sizeof(struct hisi_sas_sata_breakpoint);
2152 for (j = 0; j < HISI_SAS_MAX_ITCT_ENTRIES; j++)
2153 memset(&sata_breakpoint[j], 0, s);
2155 EXPORT_SYMBOL_GPL(hisi_sas_init_mem);
2157 int hisi_sas_alloc(struct hisi_hba *hisi_hba)
2159 struct device *dev = hisi_hba->dev;
2160 int i, j, s, max_command_entries = HISI_SAS_MAX_COMMANDS;
2161 int max_command_entries_ru, sz_slot_buf_ru;
2162 int blk_cnt, slots_per_blk;
2164 sema_init(&hisi_hba->sem, 1);
2165 spin_lock_init(&hisi_hba->lock);
2166 for (i = 0; i < hisi_hba->n_phy; i++) {
2167 hisi_sas_phy_init(hisi_hba, i);
2168 hisi_hba->port[i].port_attached = 0;
2169 hisi_hba->port[i].id = -1;
2172 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
2173 hisi_hba->devices[i].dev_type = SAS_PHY_UNUSED;
2174 hisi_hba->devices[i].device_id = i;
2175 hisi_hba->devices[i].dev_status = HISI_SAS_DEV_INIT;
2178 for (i = 0; i < hisi_hba->queue_count; i++) {
2179 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
2180 struct hisi_sas_dq *dq = &hisi_hba->dq[i];
2182 /* Completion queue structure */
2184 cq->hisi_hba = hisi_hba;
2185 spin_lock_init(&cq->poll_lock);
2187 /* Delivery queue structure */
2188 spin_lock_init(&dq->lock);
2189 INIT_LIST_HEAD(&dq->list);
2191 dq->hisi_hba = hisi_hba;
2193 /* Delivery queue */
2194 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
2195 hisi_hba->cmd_hdr[i] = dmam_alloc_coherent(dev, s,
2196 &hisi_hba->cmd_hdr_dma[i],
2198 if (!hisi_hba->cmd_hdr[i])
2201 /* Completion queue */
2202 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
2203 hisi_hba->complete_hdr[i] = dmam_alloc_coherent(dev, s,
2204 &hisi_hba->complete_hdr_dma[i],
2206 if (!hisi_hba->complete_hdr[i])
2210 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
2211 hisi_hba->itct = dmam_alloc_coherent(dev, s, &hisi_hba->itct_dma,
2213 if (!hisi_hba->itct)
2216 hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries,
2217 sizeof(struct hisi_sas_slot),
2219 if (!hisi_hba->slot_info)
2222 /* roundup to avoid overly large block size */
2223 max_command_entries_ru = roundup(max_command_entries, 64);
2224 if (hisi_hba->prot_mask & HISI_SAS_DIX_PROT_MASK)
2225 sz_slot_buf_ru = sizeof(struct hisi_sas_slot_dif_buf_table);
2227 sz_slot_buf_ru = sizeof(struct hisi_sas_slot_buf_table);
2228 sz_slot_buf_ru = roundup(sz_slot_buf_ru, 64);
2229 s = max(lcm(max_command_entries_ru, sz_slot_buf_ru), PAGE_SIZE);
2230 blk_cnt = (max_command_entries_ru * sz_slot_buf_ru) / s;
2231 slots_per_blk = s / sz_slot_buf_ru;
2233 for (i = 0; i < blk_cnt; i++) {
2234 int slot_index = i * slots_per_blk;
2238 buf = dmam_alloc_coherent(dev, s, &buf_dma,
2243 for (j = 0; j < slots_per_blk; j++, slot_index++) {
2244 struct hisi_sas_slot *slot;
2246 slot = &hisi_hba->slot_info[slot_index];
2248 slot->buf_dma = buf_dma;
2249 slot->idx = slot_index;
2251 buf += sz_slot_buf_ru;
2252 buf_dma += sz_slot_buf_ru;
2256 s = max_command_entries * sizeof(struct hisi_sas_iost);
2257 hisi_hba->iost = dmam_alloc_coherent(dev, s, &hisi_hba->iost_dma,
2259 if (!hisi_hba->iost)
2262 s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
2263 hisi_hba->breakpoint = dmam_alloc_coherent(dev, s,
2264 &hisi_hba->breakpoint_dma,
2266 if (!hisi_hba->breakpoint)
2269 s = hisi_hba->slot_index_count = max_command_entries;
2270 hisi_hba->slot_index_tags = devm_bitmap_zalloc(dev, s, GFP_KERNEL);
2271 if (!hisi_hba->slot_index_tags)
2274 s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
2275 hisi_hba->initial_fis = dmam_alloc_coherent(dev, s,
2276 &hisi_hba->initial_fis_dma,
2278 if (!hisi_hba->initial_fis)
2281 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
2282 hisi_hba->sata_breakpoint = dmam_alloc_coherent(dev, s,
2283 &hisi_hba->sata_breakpoint_dma,
2285 if (!hisi_hba->sata_breakpoint)
2288 hisi_hba->last_slot_index = 0;
2290 hisi_hba->wq = create_singlethread_workqueue(dev_name(dev));
2291 if (!hisi_hba->wq) {
2292 dev_err(dev, "sas_alloc: failed to create workqueue\n");
2300 EXPORT_SYMBOL_GPL(hisi_sas_alloc);
2302 void hisi_sas_free(struct hisi_hba *hisi_hba)
2306 for (i = 0; i < hisi_hba->n_phy; i++) {
2307 struct hisi_sas_phy *phy = &hisi_hba->phy[i];
2309 del_timer_sync(&phy->timer);
2313 destroy_workqueue(hisi_hba->wq);
2315 EXPORT_SYMBOL_GPL(hisi_sas_free);
2317 void hisi_sas_rst_work_handler(struct work_struct *work)
2319 struct hisi_hba *hisi_hba =
2320 container_of(work, struct hisi_hba, rst_work);
2322 if (hisi_sas_controller_prereset(hisi_hba))
2325 hisi_sas_controller_reset(hisi_hba);
2327 EXPORT_SYMBOL_GPL(hisi_sas_rst_work_handler);
2329 void hisi_sas_sync_rst_work_handler(struct work_struct *work)
2331 struct hisi_sas_rst *rst =
2332 container_of(work, struct hisi_sas_rst, work);
2334 if (hisi_sas_controller_prereset(rst->hisi_hba))
2337 if (!hisi_sas_controller_reset(rst->hisi_hba))
2340 complete(rst->completion);
2342 EXPORT_SYMBOL_GPL(hisi_sas_sync_rst_work_handler);
2344 int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba)
2346 struct device *dev = hisi_hba->dev;
2347 struct platform_device *pdev = hisi_hba->platform_dev;
2348 struct device_node *np = pdev ? pdev->dev.of_node : NULL;
2351 if (device_property_read_u8_array(dev, "sas-addr", hisi_hba->sas_addr,
2353 dev_err(dev, "could not get property sas-addr\n");
2359 * These properties are only required for platform device-based
2360 * controller with DT firmware.
2362 hisi_hba->ctrl = syscon_regmap_lookup_by_phandle(np,
2363 "hisilicon,sas-syscon");
2364 if (IS_ERR(hisi_hba->ctrl)) {
2365 dev_err(dev, "could not get syscon\n");
2369 if (device_property_read_u32(dev, "ctrl-reset-reg",
2370 &hisi_hba->ctrl_reset_reg)) {
2371 dev_err(dev, "could not get property ctrl-reset-reg\n");
2375 if (device_property_read_u32(dev, "ctrl-reset-sts-reg",
2376 &hisi_hba->ctrl_reset_sts_reg)) {
2377 dev_err(dev, "could not get property ctrl-reset-sts-reg\n");
2381 if (device_property_read_u32(dev, "ctrl-clock-ena-reg",
2382 &hisi_hba->ctrl_clock_ena_reg)) {
2383 dev_err(dev, "could not get property ctrl-clock-ena-reg\n");
2388 refclk = devm_clk_get(dev, NULL);
2390 dev_dbg(dev, "no ref clk property\n");
2392 hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000;
2394 if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy)) {
2395 dev_err(dev, "could not get property phy-count\n");
2399 if (device_property_read_u32(dev, "queue-count",
2400 &hisi_hba->queue_count)) {
2401 dev_err(dev, "could not get property queue-count\n");
2407 EXPORT_SYMBOL_GPL(hisi_sas_get_fw_info);
2409 static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
2410 const struct hisi_sas_hw *hw)
2412 struct resource *res;
2413 struct Scsi_Host *shost;
2414 struct hisi_hba *hisi_hba;
2415 struct device *dev = &pdev->dev;
2418 shost = scsi_host_alloc(hw->sht, sizeof(*hisi_hba));
2420 dev_err(dev, "scsi host alloc failed\n");
2423 hisi_hba = shost_priv(shost);
2425 INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler);
2427 hisi_hba->dev = dev;
2428 hisi_hba->platform_dev = pdev;
2429 hisi_hba->shost = shost;
2430 SHOST_TO_SAS_HA(shost) = &hisi_hba->sha;
2432 timer_setup(&hisi_hba->timer, NULL, 0);
2434 if (hisi_sas_get_fw_info(hisi_hba) < 0)
2437 error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
2439 dev_err(dev, "No usable DMA addressing method\n");
2443 hisi_hba->regs = devm_platform_ioremap_resource(pdev, 0);
2444 if (IS_ERR(hisi_hba->regs))
2447 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2449 hisi_hba->sgpio_regs = devm_ioremap_resource(dev, res);
2450 if (IS_ERR(hisi_hba->sgpio_regs))
2454 if (hisi_sas_alloc(hisi_hba)) {
2455 hisi_sas_free(hisi_hba);
2461 scsi_host_put(shost);
2462 dev_err(dev, "shost alloc failed\n");
2466 static int hisi_sas_interrupt_preinit(struct hisi_hba *hisi_hba)
2468 if (hisi_hba->hw->interrupt_preinit)
2469 return hisi_hba->hw->interrupt_preinit(hisi_hba);
2473 int hisi_sas_probe(struct platform_device *pdev,
2474 const struct hisi_sas_hw *hw)
2476 struct Scsi_Host *shost;
2477 struct hisi_hba *hisi_hba;
2478 struct device *dev = &pdev->dev;
2479 struct asd_sas_phy **arr_phy;
2480 struct asd_sas_port **arr_port;
2481 struct sas_ha_struct *sha;
2482 int rc, phy_nr, port_nr, i;
2484 shost = hisi_sas_shost_alloc(pdev, hw);
2488 sha = SHOST_TO_SAS_HA(shost);
2489 hisi_hba = shost_priv(shost);
2490 platform_set_drvdata(pdev, sha);
2492 phy_nr = port_nr = hisi_hba->n_phy;
2494 arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL);
2495 arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL);
2496 if (!arr_phy || !arr_port) {
2501 sha->sas_phy = arr_phy;
2502 sha->sas_port = arr_port;
2503 sha->lldd_ha = hisi_hba;
2505 shost->transportt = hisi_sas_stt;
2506 shost->max_id = HISI_SAS_MAX_DEVICES;
2507 shost->max_lun = ~0;
2508 shost->max_channel = 1;
2509 shost->max_cmd_len = 16;
2510 if (hisi_hba->hw->slot_index_alloc) {
2511 shost->can_queue = HISI_SAS_MAX_COMMANDS;
2512 shost->cmd_per_lun = HISI_SAS_MAX_COMMANDS;
2514 shost->can_queue = HISI_SAS_UNRESERVED_IPTT;
2515 shost->cmd_per_lun = HISI_SAS_UNRESERVED_IPTT;
2518 sha->sas_ha_name = DRV_NAME;
2519 sha->dev = hisi_hba->dev;
2520 sha->sas_addr = &hisi_hba->sas_addr[0];
2521 sha->num_phys = hisi_hba->n_phy;
2522 sha->core.shost = hisi_hba->shost;
2524 for (i = 0; i < hisi_hba->n_phy; i++) {
2525 sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy;
2526 sha->sas_port[i] = &hisi_hba->port[i].sas_port;
2529 rc = hisi_sas_interrupt_preinit(hisi_hba);
2533 rc = scsi_add_host(shost, &pdev->dev);
2537 rc = sas_register_ha(sha);
2539 goto err_out_register_ha;
2541 rc = hisi_hba->hw->hw_init(hisi_hba);
2543 goto err_out_hw_init;
2545 scsi_scan_host(shost);
2550 sas_unregister_ha(sha);
2551 err_out_register_ha:
2552 scsi_remove_host(shost);
2554 hisi_sas_free(hisi_hba);
2555 scsi_host_put(shost);
2558 EXPORT_SYMBOL_GPL(hisi_sas_probe);
2560 void hisi_sas_remove(struct platform_device *pdev)
2562 struct sas_ha_struct *sha = platform_get_drvdata(pdev);
2563 struct hisi_hba *hisi_hba = sha->lldd_ha;
2564 struct Scsi_Host *shost = sha->core.shost;
2566 del_timer_sync(&hisi_hba->timer);
2568 sas_unregister_ha(sha);
2569 sas_remove_host(sha->core.shost);
2571 hisi_sas_free(hisi_hba);
2572 scsi_host_put(shost);
2574 EXPORT_SYMBOL_GPL(hisi_sas_remove);
2576 #if IS_ENABLED(CONFIG_SCSI_HISI_SAS_DEBUGFS_DEFAULT_ENABLE)
2577 #define DEBUGFS_ENABLE_DEFAULT "enabled"
2578 bool hisi_sas_debugfs_enable = true;
2579 u32 hisi_sas_debugfs_dump_count = 50;
2581 #define DEBUGFS_ENABLE_DEFAULT "disabled"
2582 bool hisi_sas_debugfs_enable;
2583 u32 hisi_sas_debugfs_dump_count = 1;
2586 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_enable);
2587 module_param_named(debugfs_enable, hisi_sas_debugfs_enable, bool, 0444);
2588 MODULE_PARM_DESC(hisi_sas_debugfs_enable,
2589 "Enable driver debugfs (default "DEBUGFS_ENABLE_DEFAULT")");
2591 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_dump_count);
2592 module_param_named(debugfs_dump_count, hisi_sas_debugfs_dump_count, uint, 0444);
2593 MODULE_PARM_DESC(hisi_sas_debugfs_dump_count, "Number of debugfs dumps to allow");
2595 struct dentry *hisi_sas_debugfs_dir;
2596 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_dir);
2598 static __init int hisi_sas_init(void)
2600 hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops);
2604 if (hisi_sas_debugfs_enable) {
2605 hisi_sas_debugfs_dir = debugfs_create_dir("hisi_sas", NULL);
2606 if (hisi_sas_debugfs_dump_count > HISI_SAS_MAX_DEBUGFS_DUMP) {
2607 pr_info("hisi_sas: Limiting debugfs dump count\n");
2608 hisi_sas_debugfs_dump_count = HISI_SAS_MAX_DEBUGFS_DUMP;
2615 static __exit void hisi_sas_exit(void)
2617 sas_release_transport(hisi_sas_stt);
2619 debugfs_remove(hisi_sas_debugfs_dir);
2622 module_init(hisi_sas_init);
2623 module_exit(hisi_sas_exit);
2625 MODULE_LICENSE("GPL");
2626 MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
2627 MODULE_DESCRIPTION("HISILICON SAS controller driver");
2628 MODULE_ALIAS("platform:" DRV_NAME);