1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (c) 2015 Linaro Ltd.
4 * Copyright (c) 2015 Hisilicon Limited.
8 #define DRV_NAME "hisi_sas"
10 #define DEV_IS_GONE(dev) \
11 ((!dev) || (dev->dev_type == SAS_PHY_UNUSED))
13 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
14 u8 *lun, struct hisi_sas_tmf_task *tmf);
16 hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
17 struct domain_device *device,
18 int abort_flag, int tag);
19 static int hisi_sas_softreset_ata_disk(struct domain_device *device);
20 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
22 static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
23 struct domain_device *device);
24 static void hisi_sas_dev_gone(struct domain_device *device);
26 u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, int direction)
28 switch (fis->command) {
29 case ATA_CMD_FPDMA_WRITE:
30 case ATA_CMD_FPDMA_READ:
31 case ATA_CMD_FPDMA_RECV:
32 case ATA_CMD_FPDMA_SEND:
33 case ATA_CMD_NCQ_NON_DATA:
34 return HISI_SAS_SATA_PROTOCOL_FPDMA;
36 case ATA_CMD_DOWNLOAD_MICRO:
38 case ATA_CMD_PMP_READ:
39 case ATA_CMD_READ_LOG_EXT:
40 case ATA_CMD_PIO_READ:
41 case ATA_CMD_PIO_READ_EXT:
42 case ATA_CMD_PMP_WRITE:
43 case ATA_CMD_WRITE_LOG_EXT:
44 case ATA_CMD_PIO_WRITE:
45 case ATA_CMD_PIO_WRITE_EXT:
46 return HISI_SAS_SATA_PROTOCOL_PIO;
49 case ATA_CMD_DOWNLOAD_MICRO_DMA:
50 case ATA_CMD_PMP_READ_DMA:
51 case ATA_CMD_PMP_WRITE_DMA:
53 case ATA_CMD_READ_EXT:
54 case ATA_CMD_READ_LOG_DMA_EXT:
55 case ATA_CMD_READ_STREAM_DMA_EXT:
56 case ATA_CMD_TRUSTED_RCV_DMA:
57 case ATA_CMD_TRUSTED_SND_DMA:
59 case ATA_CMD_WRITE_EXT:
60 case ATA_CMD_WRITE_FUA_EXT:
61 case ATA_CMD_WRITE_QUEUED:
62 case ATA_CMD_WRITE_LOG_DMA_EXT:
63 case ATA_CMD_WRITE_STREAM_DMA_EXT:
64 case ATA_CMD_ZAC_MGMT_IN:
65 return HISI_SAS_SATA_PROTOCOL_DMA;
67 case ATA_CMD_CHK_POWER:
68 case ATA_CMD_DEV_RESET:
71 case ATA_CMD_FLUSH_EXT:
73 case ATA_CMD_VERIFY_EXT:
74 case ATA_CMD_SET_FEATURES:
76 case ATA_CMD_STANDBYNOW1:
77 case ATA_CMD_ZAC_MGMT_OUT:
78 return HISI_SAS_SATA_PROTOCOL_NONDATA;
81 switch (fis->features) {
82 case ATA_SET_MAX_PASSWD:
83 case ATA_SET_MAX_LOCK:
84 return HISI_SAS_SATA_PROTOCOL_PIO;
86 case ATA_SET_MAX_PASSWD_DMA:
87 case ATA_SET_MAX_UNLOCK_DMA:
88 return HISI_SAS_SATA_PROTOCOL_DMA;
91 return HISI_SAS_SATA_PROTOCOL_NONDATA;
96 if (direction == DMA_NONE)
97 return HISI_SAS_SATA_PROTOCOL_NONDATA;
98 return HISI_SAS_SATA_PROTOCOL_PIO;
102 EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol);
104 void hisi_sas_sata_done(struct sas_task *task,
105 struct hisi_sas_slot *slot)
107 struct task_status_struct *ts = &task->task_status;
108 struct ata_task_resp *resp = (struct ata_task_resp *)ts->buf;
109 struct hisi_sas_status_buffer *status_buf =
110 hisi_sas_status_buf_addr_mem(slot);
111 u8 *iu = &status_buf->iu[0];
112 struct dev_to_host_fis *d2h = (struct dev_to_host_fis *)iu;
114 resp->frame_len = sizeof(struct dev_to_host_fis);
115 memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis));
117 ts->buf_valid_size = sizeof(*resp);
119 EXPORT_SYMBOL_GPL(hisi_sas_sata_done);
121 int hisi_sas_get_ncq_tag(struct sas_task *task, u32 *tag)
123 struct ata_queued_cmd *qc = task->uldd_task;
126 if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
127 qc->tf.command == ATA_CMD_FPDMA_READ) {
134 EXPORT_SYMBOL_GPL(hisi_sas_get_ncq_tag);
137 * This function assumes linkrate mask fits in 8 bits, which it
138 * does for all HW versions supported.
140 u8 hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max)
145 max -= SAS_LINK_RATE_1_5_GBPS;
146 for (i = 0; i <= max; i++)
147 rate |= 1 << (i * 2);
150 EXPORT_SYMBOL_GPL(hisi_sas_get_prog_phy_linkrate_mask);
152 static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device)
154 return device->port->ha->lldd_ha;
157 struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port)
159 return container_of(sas_port, struct hisi_sas_port, sas_port);
161 EXPORT_SYMBOL_GPL(to_hisi_sas_port);
163 void hisi_sas_stop_phys(struct hisi_hba *hisi_hba)
167 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++)
168 hisi_sas_phy_enable(hisi_hba, phy_no, 0);
170 EXPORT_SYMBOL_GPL(hisi_sas_stop_phys);
172 static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx)
174 void *bitmap = hisi_hba->slot_index_tags;
176 clear_bit(slot_idx, bitmap);
179 static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx)
183 if (hisi_hba->hw->slot_index_alloc || (slot_idx >=
184 hisi_hba->hw->max_command_entries - HISI_SAS_RESERVED_IPTT_CNT)) {
185 spin_lock_irqsave(&hisi_hba->lock, flags);
186 hisi_sas_slot_index_clear(hisi_hba, slot_idx);
187 spin_unlock_irqrestore(&hisi_hba->lock, flags);
191 static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx)
193 void *bitmap = hisi_hba->slot_index_tags;
195 set_bit(slot_idx, bitmap);
198 static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba,
199 struct scsi_cmnd *scsi_cmnd)
202 void *bitmap = hisi_hba->slot_index_tags;
206 return scsi_cmnd->request->tag;
208 spin_lock_irqsave(&hisi_hba->lock, flags);
209 index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count,
210 hisi_hba->last_slot_index + 1);
211 if (index >= hisi_hba->slot_index_count) {
212 index = find_next_zero_bit(bitmap,
213 hisi_hba->slot_index_count,
214 hisi_hba->hw->max_command_entries -
215 HISI_SAS_RESERVED_IPTT_CNT);
216 if (index >= hisi_hba->slot_index_count) {
217 spin_unlock_irqrestore(&hisi_hba->lock, flags);
218 return -SAS_QUEUE_FULL;
221 hisi_sas_slot_index_set(hisi_hba, index);
222 hisi_hba->last_slot_index = index;
223 spin_unlock_irqrestore(&hisi_hba->lock, flags);
228 static void hisi_sas_slot_index_init(struct hisi_hba *hisi_hba)
232 for (i = 0; i < hisi_hba->slot_index_count; ++i)
233 hisi_sas_slot_index_clear(hisi_hba, i);
236 void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
237 struct hisi_sas_slot *slot)
240 int device_id = slot->device_id;
241 struct hisi_sas_device *sas_dev = &hisi_hba->devices[device_id];
244 struct device *dev = hisi_hba->dev;
246 if (!task->lldd_task)
249 task->lldd_task = NULL;
251 if (!sas_protocol_ata(task->task_proto)) {
252 struct sas_ssp_task *ssp_task = &task->ssp_task;
253 struct scsi_cmnd *scsi_cmnd = ssp_task->cmd;
256 dma_unmap_sg(dev, task->scatter,
259 if (slot->n_elem_dif)
260 dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd),
261 scsi_prot_sg_count(scsi_cmnd),
266 spin_lock_irqsave(&sas_dev->lock, flags);
267 list_del_init(&slot->entry);
268 spin_unlock_irqrestore(&sas_dev->lock, flags);
270 memset(slot, 0, offsetof(struct hisi_sas_slot, buf));
272 hisi_sas_slot_index_free(hisi_hba, slot->idx);
274 EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free);
276 static void hisi_sas_task_prep_smp(struct hisi_hba *hisi_hba,
277 struct hisi_sas_slot *slot)
279 hisi_hba->hw->prep_smp(hisi_hba, slot);
282 static void hisi_sas_task_prep_ssp(struct hisi_hba *hisi_hba,
283 struct hisi_sas_slot *slot)
285 hisi_hba->hw->prep_ssp(hisi_hba, slot);
288 static void hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba,
289 struct hisi_sas_slot *slot)
291 hisi_hba->hw->prep_stp(hisi_hba, slot);
294 static void hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba,
295 struct hisi_sas_slot *slot,
296 int device_id, int abort_flag, int tag_to_abort)
298 hisi_hba->hw->prep_abort(hisi_hba, slot,
299 device_id, abort_flag, tag_to_abort);
302 static void hisi_sas_dma_unmap(struct hisi_hba *hisi_hba,
303 struct sas_task *task, int n_elem,
304 int n_elem_req, int n_elem_resp)
306 struct device *dev = hisi_hba->dev;
308 if (!sas_protocol_ata(task->task_proto)) {
309 if (task->num_scatter) {
311 dma_unmap_sg(dev, task->scatter,
314 } else if (task->task_proto & SAS_PROTOCOL_SMP) {
316 dma_unmap_sg(dev, &task->smp_task.smp_req,
319 dma_unmap_sg(dev, &task->smp_task.smp_resp,
325 static int hisi_sas_dma_map(struct hisi_hba *hisi_hba,
326 struct sas_task *task, int *n_elem,
327 int *n_elem_req, int *n_elem_resp)
329 struct device *dev = hisi_hba->dev;
332 if (sas_protocol_ata(task->task_proto)) {
333 *n_elem = task->num_scatter;
335 unsigned int req_len, resp_len;
337 if (task->num_scatter) {
338 *n_elem = dma_map_sg(dev, task->scatter,
339 task->num_scatter, task->data_dir);
344 } else if (task->task_proto & SAS_PROTOCOL_SMP) {
345 *n_elem_req = dma_map_sg(dev, &task->smp_task.smp_req,
351 req_len = sg_dma_len(&task->smp_task.smp_req);
354 goto err_out_dma_unmap;
356 *n_elem_resp = dma_map_sg(dev, &task->smp_task.smp_resp,
360 goto err_out_dma_unmap;
362 resp_len = sg_dma_len(&task->smp_task.smp_resp);
363 if (resp_len & 0x3) {
365 goto err_out_dma_unmap;
370 if (*n_elem > HISI_SAS_SGE_PAGE_CNT) {
371 dev_err(dev, "task prep: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT",
374 goto err_out_dma_unmap;
379 /* It would be better to call dma_unmap_sg() here, but it's messy */
380 hisi_sas_dma_unmap(hisi_hba, task, *n_elem,
381 *n_elem_req, *n_elem_resp);
386 static void hisi_sas_dif_dma_unmap(struct hisi_hba *hisi_hba,
387 struct sas_task *task, int n_elem_dif)
389 struct device *dev = hisi_hba->dev;
392 struct sas_ssp_task *ssp_task = &task->ssp_task;
393 struct scsi_cmnd *scsi_cmnd = ssp_task->cmd;
395 dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd),
396 scsi_prot_sg_count(scsi_cmnd),
401 static int hisi_sas_dif_dma_map(struct hisi_hba *hisi_hba,
402 int *n_elem_dif, struct sas_task *task)
404 struct device *dev = hisi_hba->dev;
405 struct sas_ssp_task *ssp_task;
406 struct scsi_cmnd *scsi_cmnd;
409 if (task->num_scatter) {
410 ssp_task = &task->ssp_task;
411 scsi_cmnd = ssp_task->cmd;
413 if (scsi_prot_sg_count(scsi_cmnd)) {
414 *n_elem_dif = dma_map_sg(dev,
415 scsi_prot_sglist(scsi_cmnd),
416 scsi_prot_sg_count(scsi_cmnd),
422 if (*n_elem_dif > HISI_SAS_SGE_DIF_PAGE_CNT) {
423 dev_err(dev, "task prep: n_elem_dif(%d) too large\n",
426 goto err_out_dif_dma_unmap;
433 err_out_dif_dma_unmap:
434 dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd),
435 scsi_prot_sg_count(scsi_cmnd), task->data_dir);
439 static int hisi_sas_task_prep(struct sas_task *task,
440 struct hisi_sas_dq **dq_pointer,
441 bool is_tmf, struct hisi_sas_tmf_task *tmf,
444 struct domain_device *device = task->dev;
445 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
446 struct hisi_sas_device *sas_dev = device->lldd_dev;
447 struct hisi_sas_port *port;
448 struct hisi_sas_slot *slot;
449 struct hisi_sas_cmd_hdr *cmd_hdr_base;
450 struct asd_sas_port *sas_port = device->port;
451 struct device *dev = hisi_hba->dev;
452 int dlvry_queue_slot, dlvry_queue, rc, slot_idx;
453 int n_elem = 0, n_elem_dif = 0, n_elem_req = 0, n_elem_resp = 0;
454 struct hisi_sas_dq *dq;
458 if (DEV_IS_GONE(sas_dev)) {
460 dev_info(dev, "task prep: device %d not ready\n",
463 dev_info(dev, "task prep: device %016llx not ready\n",
464 SAS_ADDR(device->sas_addr));
469 if (hisi_hba->reply_map) {
470 int cpu = raw_smp_processor_id();
471 unsigned int dq_index = hisi_hba->reply_map[cpu];
473 *dq_pointer = dq = &hisi_hba->dq[dq_index];
475 *dq_pointer = dq = sas_dev->dq;
478 port = to_hisi_sas_port(sas_port);
479 if (port && !port->port_attached) {
480 dev_info(dev, "task prep: %s port%d not attach device\n",
481 (dev_is_sata(device)) ?
488 rc = hisi_sas_dma_map(hisi_hba, task, &n_elem,
489 &n_elem_req, &n_elem_resp);
493 if (!sas_protocol_ata(task->task_proto)) {
494 rc = hisi_sas_dif_dma_map(hisi_hba, &n_elem_dif, task);
496 goto err_out_dma_unmap;
499 if (hisi_hba->hw->slot_index_alloc)
500 rc = hisi_hba->hw->slot_index_alloc(hisi_hba, device);
502 struct scsi_cmnd *scsi_cmnd = NULL;
504 if (task->uldd_task) {
505 struct ata_queued_cmd *qc;
507 if (dev_is_sata(device)) {
508 qc = task->uldd_task;
509 scsi_cmnd = qc->scsicmd;
511 scsi_cmnd = task->uldd_task;
514 rc = hisi_sas_slot_index_alloc(hisi_hba, scsi_cmnd);
517 goto err_out_dif_dma_unmap;
520 slot = &hisi_hba->slot_info[slot_idx];
522 spin_lock_irqsave(&dq->lock, flags);
523 wr_q_index = hisi_hba->hw->get_free_slot(hisi_hba, dq);
524 if (wr_q_index < 0) {
525 spin_unlock_irqrestore(&dq->lock, flags);
530 list_add_tail(&slot->delivery, &dq->list);
531 spin_unlock_irqrestore(&dq->lock, flags);
532 spin_lock_irqsave(&sas_dev->lock, flags);
533 list_add_tail(&slot->entry, &sas_dev->list);
534 spin_unlock_irqrestore(&sas_dev->lock, flags);
536 dlvry_queue = dq->id;
537 dlvry_queue_slot = wr_q_index;
539 slot->device_id = sas_dev->device_id;
540 slot->n_elem = n_elem;
541 slot->n_elem_dif = n_elem_dif;
542 slot->dlvry_queue = dlvry_queue;
543 slot->dlvry_queue_slot = dlvry_queue_slot;
544 cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
545 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
549 slot->is_internal = is_tmf;
550 task->lldd_task = slot;
552 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
553 memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
554 memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ);
556 switch (task->task_proto) {
557 case SAS_PROTOCOL_SMP:
558 hisi_sas_task_prep_smp(hisi_hba, slot);
560 case SAS_PROTOCOL_SSP:
561 hisi_sas_task_prep_ssp(hisi_hba, slot);
563 case SAS_PROTOCOL_SATA:
564 case SAS_PROTOCOL_STP:
565 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
566 hisi_sas_task_prep_ata(hisi_hba, slot);
569 dev_err(dev, "task prep: unknown/unsupported proto (0x%x)\n",
574 spin_lock_irqsave(&task->task_state_lock, flags);
575 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
576 spin_unlock_irqrestore(&task->task_state_lock, flags);
579 WRITE_ONCE(slot->ready, 1);
584 hisi_sas_slot_index_free(hisi_hba, slot_idx);
585 err_out_dif_dma_unmap:
586 if (!sas_protocol_ata(task->task_proto))
587 hisi_sas_dif_dma_unmap(hisi_hba, task, n_elem_dif);
589 hisi_sas_dma_unmap(hisi_hba, task, n_elem,
590 n_elem_req, n_elem_resp);
592 dev_err(dev, "task prep: failed[%d]!\n", rc);
596 static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
597 bool is_tmf, struct hisi_sas_tmf_task *tmf)
602 struct hisi_hba *hisi_hba;
604 struct domain_device *device = task->dev;
605 struct asd_sas_port *sas_port = device->port;
606 struct hisi_sas_dq *dq = NULL;
609 struct task_status_struct *ts = &task->task_status;
611 ts->resp = SAS_TASK_UNDELIVERED;
612 ts->stat = SAS_PHY_DOWN;
614 * libsas will use dev->port, should
615 * not call task_done for sata
617 if (device->dev_type != SAS_SATA_DEV)
618 task->task_done(task);
622 hisi_hba = dev_to_hisi_hba(device);
625 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) {
629 down(&hisi_hba->sem);
633 /* protect task_prep and start_delivery sequence */
634 rc = hisi_sas_task_prep(task, &dq, is_tmf, tmf, &pass);
636 dev_err(dev, "task exec: failed[%d]!\n", rc);
639 spin_lock_irqsave(&dq->lock, flags);
640 hisi_hba->hw->start_delivery(dq);
641 spin_unlock_irqrestore(&dq->lock, flags);
647 static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no)
649 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
650 struct asd_sas_phy *sas_phy = &phy->sas_phy;
651 struct sas_ha_struct *sas_ha;
653 if (!phy->phy_attached)
656 sas_ha = &hisi_hba->sha;
657 sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE);
660 struct sas_phy *sphy = sas_phy->phy;
662 sphy->negotiated_linkrate = sas_phy->linkrate;
663 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
664 sphy->maximum_linkrate_hw =
665 hisi_hba->hw->phy_get_max_linkrate();
666 if (sphy->minimum_linkrate == SAS_LINK_RATE_UNKNOWN)
667 sphy->minimum_linkrate = phy->minimum_linkrate;
669 if (sphy->maximum_linkrate == SAS_LINK_RATE_UNKNOWN)
670 sphy->maximum_linkrate = phy->maximum_linkrate;
673 if (phy->phy_type & PORT_TYPE_SAS) {
674 struct sas_identify_frame *id;
676 id = (struct sas_identify_frame *)phy->frame_rcvd;
677 id->dev_type = phy->identify.device_type;
678 id->initiator_bits = SAS_PROTOCOL_ALL;
679 id->target_bits = phy->identify.target_port_protocols;
680 } else if (phy->phy_type & PORT_TYPE_SATA) {
684 sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
685 sas_ha->notify_port_event(sas_phy, PORTE_BYTES_DMAED);
688 static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device)
690 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
691 struct hisi_sas_device *sas_dev = NULL;
693 int last = hisi_hba->last_dev_id;
694 int first = (hisi_hba->last_dev_id + 1) % HISI_SAS_MAX_DEVICES;
697 spin_lock_irqsave(&hisi_hba->lock, flags);
698 for (i = first; i != last; i %= HISI_SAS_MAX_DEVICES) {
699 if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) {
700 int queue = i % hisi_hba->queue_count;
701 struct hisi_sas_dq *dq = &hisi_hba->dq[queue];
703 hisi_hba->devices[i].device_id = i;
704 sas_dev = &hisi_hba->devices[i];
705 sas_dev->dev_status = HISI_SAS_DEV_INIT;
706 sas_dev->dev_type = device->dev_type;
707 sas_dev->hisi_hba = hisi_hba;
708 sas_dev->sas_device = device;
710 spin_lock_init(&sas_dev->lock);
711 INIT_LIST_HEAD(&hisi_hba->devices[i].list);
716 hisi_hba->last_dev_id = i;
717 spin_unlock_irqrestore(&hisi_hba->lock, flags);
722 #define HISI_SAS_SRST_ATA_DISK_CNT 3
723 static int hisi_sas_init_device(struct domain_device *device)
725 int rc = TMF_RESP_FUNC_COMPLETE;
727 struct hisi_sas_tmf_task tmf_task;
728 int retry = HISI_SAS_SRST_ATA_DISK_CNT;
729 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
730 struct device *dev = hisi_hba->dev;
731 struct sas_phy *local_phy;
733 switch (device->dev_type) {
735 int_to_scsilun(0, &lun);
737 tmf_task.tmf = TMF_CLEAR_TASK_SET;
738 rc = hisi_sas_debug_issue_ssp_tmf(device, lun.scsi_lun,
740 if (rc == TMF_RESP_FUNC_COMPLETE)
741 hisi_sas_release_task(hisi_hba, device);
745 case SAS_SATA_PM_PORT:
746 case SAS_SATA_PENDING:
748 * send HARD RESET to clear previous affiliation of
751 local_phy = sas_get_local_phy(device);
752 if (!scsi_is_sas_phy_local(local_phy) &&
753 !test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) {
754 unsigned long deadline = ata_deadline(jiffies, 20000);
755 struct sata_device *sata_dev = &device->sata_dev;
756 struct ata_host *ata_host = sata_dev->ata_host;
757 struct ata_port_operations *ops = ata_host->ops;
758 struct ata_port *ap = sata_dev->ap;
759 struct ata_link *link;
760 unsigned int classes;
762 ata_for_each_link(link, ap, EDGE)
763 rc = ops->hardreset(link, &classes,
766 sas_put_local_phy(local_phy);
768 dev_warn(dev, "SATA disk hardreset fail: %d\n", rc);
772 while (retry-- > 0) {
773 rc = hisi_sas_softreset_ata_disk(device);
785 static int hisi_sas_dev_found(struct domain_device *device)
787 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
788 struct domain_device *parent_dev = device->parent;
789 struct hisi_sas_device *sas_dev;
790 struct device *dev = hisi_hba->dev;
793 if (hisi_hba->hw->alloc_dev)
794 sas_dev = hisi_hba->hw->alloc_dev(device);
796 sas_dev = hisi_sas_alloc_dev(device);
798 dev_err(dev, "fail alloc dev: max support %d devices\n",
799 HISI_SAS_MAX_DEVICES);
803 device->lldd_dev = sas_dev;
804 hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
806 if (parent_dev && dev_is_expander(parent_dev->dev_type)) {
808 u8 phy_num = parent_dev->ex_dev.num_phys;
811 for (phy_no = 0; phy_no < phy_num; phy_no++) {
812 phy = &parent_dev->ex_dev.ex_phy[phy_no];
813 if (SAS_ADDR(phy->attached_sas_addr) ==
814 SAS_ADDR(device->sas_addr))
818 if (phy_no == phy_num) {
819 dev_info(dev, "dev found: no attached "
820 "dev:%016llx at ex:%016llx\n",
821 SAS_ADDR(device->sas_addr),
822 SAS_ADDR(parent_dev->sas_addr));
828 dev_info(dev, "dev[%d:%x] found\n",
829 sas_dev->device_id, sas_dev->dev_type);
831 rc = hisi_sas_init_device(device);
834 sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
838 hisi_sas_dev_gone(device);
842 int hisi_sas_slave_configure(struct scsi_device *sdev)
844 struct domain_device *dev = sdev_to_domain_dev(sdev);
845 int ret = sas_slave_configure(sdev);
849 if (!dev_is_sata(dev))
850 sas_change_queue_depth(sdev, 64);
854 EXPORT_SYMBOL_GPL(hisi_sas_slave_configure);
856 void hisi_sas_scan_start(struct Scsi_Host *shost)
858 struct hisi_hba *hisi_hba = shost_priv(shost);
860 hisi_hba->hw->phys_init(hisi_hba);
862 EXPORT_SYMBOL_GPL(hisi_sas_scan_start);
864 int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time)
866 struct hisi_hba *hisi_hba = shost_priv(shost);
867 struct sas_ha_struct *sha = &hisi_hba->sha;
869 /* Wait for PHY up interrupt to occur */
876 EXPORT_SYMBOL_GPL(hisi_sas_scan_finished);
878 static void hisi_sas_phyup_work(struct work_struct *work)
880 struct hisi_sas_phy *phy =
881 container_of(work, typeof(*phy), works[HISI_PHYE_PHY_UP]);
882 struct hisi_hba *hisi_hba = phy->hisi_hba;
883 struct asd_sas_phy *sas_phy = &phy->sas_phy;
884 int phy_no = sas_phy->id;
886 if (phy->identify.target_port_protocols == SAS_PROTOCOL_SSP)
887 hisi_hba->hw->sl_notify_ssp(hisi_hba, phy_no);
888 hisi_sas_bytes_dmaed(hisi_hba, phy_no);
891 static void hisi_sas_linkreset_work(struct work_struct *work)
893 struct hisi_sas_phy *phy =
894 container_of(work, typeof(*phy), works[HISI_PHYE_LINK_RESET]);
895 struct asd_sas_phy *sas_phy = &phy->sas_phy;
897 hisi_sas_control_phy(sas_phy, PHY_FUNC_LINK_RESET, NULL);
900 static const work_func_t hisi_sas_phye_fns[HISI_PHYES_NUM] = {
901 [HISI_PHYE_PHY_UP] = hisi_sas_phyup_work,
902 [HISI_PHYE_LINK_RESET] = hisi_sas_linkreset_work,
905 bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy,
906 enum hisi_sas_phy_event event)
908 struct hisi_hba *hisi_hba = phy->hisi_hba;
910 if (WARN_ON(event >= HISI_PHYES_NUM))
913 return queue_work(hisi_hba->wq, &phy->works[event]);
915 EXPORT_SYMBOL_GPL(hisi_sas_notify_phy_event);
917 static void hisi_sas_wait_phyup_timedout(struct timer_list *t)
919 struct hisi_sas_phy *phy = from_timer(phy, t, timer);
920 struct hisi_hba *hisi_hba = phy->hisi_hba;
921 struct device *dev = hisi_hba->dev;
922 int phy_no = phy->sas_phy.id;
924 dev_warn(dev, "phy%d wait phyup timeout, issuing link reset\n", phy_no);
925 hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET);
928 void hisi_sas_phy_oob_ready(struct hisi_hba *hisi_hba, int phy_no)
930 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
931 struct device *dev = hisi_hba->dev;
933 if (!timer_pending(&phy->timer)) {
934 dev_dbg(dev, "phy%d OOB ready\n", phy_no);
935 phy->timer.expires = jiffies + HISI_SAS_WAIT_PHYUP_TIMEOUT * HZ;
936 add_timer(&phy->timer);
939 EXPORT_SYMBOL_GPL(hisi_sas_phy_oob_ready);
941 static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no)
943 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
944 struct asd_sas_phy *sas_phy = &phy->sas_phy;
947 phy->hisi_hba = hisi_hba;
949 phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS;
950 phy->maximum_linkrate = hisi_hba->hw->phy_get_max_linkrate();
951 sas_phy->enabled = (phy_no < hisi_hba->n_phy) ? 1 : 0;
952 sas_phy->class = SAS;
953 sas_phy->iproto = SAS_PROTOCOL_ALL;
955 sas_phy->type = PHY_TYPE_PHYSICAL;
956 sas_phy->role = PHY_ROLE_INITIATOR;
957 sas_phy->oob_mode = OOB_NOT_CONNECTED;
958 sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
959 sas_phy->id = phy_no;
960 sas_phy->sas_addr = &hisi_hba->sas_addr[0];
961 sas_phy->frame_rcvd = &phy->frame_rcvd[0];
962 sas_phy->ha = (struct sas_ha_struct *)hisi_hba->shost->hostdata;
963 sas_phy->lldd_phy = phy;
965 for (i = 0; i < HISI_PHYES_NUM; i++)
966 INIT_WORK(&phy->works[i], hisi_sas_phye_fns[i]);
968 spin_lock_init(&phy->lock);
970 timer_setup(&phy->timer, hisi_sas_wait_phyup_timedout, 0);
973 /* Wrapper to ensure we track hisi_sas_phy.enable properly */
974 void hisi_sas_phy_enable(struct hisi_hba *hisi_hba, int phy_no, int enable)
976 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
977 struct asd_sas_phy *aphy = &phy->sas_phy;
978 struct sas_phy *sphy = aphy->phy;
981 spin_lock_irqsave(&phy->lock, flags);
984 /* We may have been enabled already; if so, don't touch */
986 sphy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN;
987 hisi_hba->hw->phy_start(hisi_hba, phy_no);
989 sphy->negotiated_linkrate = SAS_PHY_DISABLED;
990 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
992 phy->enable = enable;
993 spin_unlock_irqrestore(&phy->lock, flags);
995 EXPORT_SYMBOL_GPL(hisi_sas_phy_enable);
997 static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
999 struct sas_ha_struct *sas_ha = sas_phy->ha;
1000 struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
1001 struct hisi_sas_phy *phy = sas_phy->lldd_phy;
1002 struct asd_sas_port *sas_port = sas_phy->port;
1003 struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
1004 unsigned long flags;
1009 spin_lock_irqsave(&hisi_hba->lock, flags);
1010 port->port_attached = 1;
1011 port->id = phy->port_id;
1013 sas_port->lldd_port = port;
1014 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1017 static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task *task,
1018 struct hisi_sas_slot *slot)
1021 unsigned long flags;
1022 struct task_status_struct *ts;
1024 ts = &task->task_status;
1026 ts->resp = SAS_TASK_COMPLETE;
1027 ts->stat = SAS_ABORTED_TASK;
1028 spin_lock_irqsave(&task->task_state_lock, flags);
1029 task->task_state_flags &=
1030 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
1031 if (!slot->is_internal && task->task_proto != SAS_PROTOCOL_SMP)
1032 task->task_state_flags |= SAS_TASK_STATE_DONE;
1033 spin_unlock_irqrestore(&task->task_state_lock, flags);
1036 hisi_sas_slot_task_free(hisi_hba, task, slot);
1039 static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
1040 struct domain_device *device)
1042 struct hisi_sas_slot *slot, *slot2;
1043 struct hisi_sas_device *sas_dev = device->lldd_dev;
1045 list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry)
1046 hisi_sas_do_release_task(hisi_hba, slot->task, slot);
1049 void hisi_sas_release_tasks(struct hisi_hba *hisi_hba)
1051 struct hisi_sas_device *sas_dev;
1052 struct domain_device *device;
1055 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1056 sas_dev = &hisi_hba->devices[i];
1057 device = sas_dev->sas_device;
1059 if ((sas_dev->dev_type == SAS_PHY_UNUSED) ||
1063 hisi_sas_release_task(hisi_hba, device);
1066 EXPORT_SYMBOL_GPL(hisi_sas_release_tasks);
1068 static void hisi_sas_dereg_device(struct hisi_hba *hisi_hba,
1069 struct domain_device *device)
1071 if (hisi_hba->hw->dereg_device)
1072 hisi_hba->hw->dereg_device(hisi_hba, device);
1075 static void hisi_sas_dev_gone(struct domain_device *device)
1077 struct hisi_sas_device *sas_dev = device->lldd_dev;
1078 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1079 struct device *dev = hisi_hba->dev;
1081 dev_info(dev, "dev[%d:%x] is gone\n",
1082 sas_dev->device_id, sas_dev->dev_type);
1084 if (!test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) {
1085 hisi_sas_internal_task_abort(hisi_hba, device,
1086 HISI_SAS_INT_ABT_DEV, 0);
1088 hisi_sas_dereg_device(hisi_hba, device);
1090 down(&hisi_hba->sem);
1091 hisi_hba->hw->clear_itct(hisi_hba, sas_dev);
1093 device->lldd_dev = NULL;
1096 if (hisi_hba->hw->free_device)
1097 hisi_hba->hw->free_device(sas_dev);
1098 sas_dev->dev_type = SAS_PHY_UNUSED;
1101 static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags)
1103 return hisi_sas_task_exec(task, gfp_flags, 0, NULL);
1106 static int hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no,
1107 struct sas_phy_linkrates *r)
1109 struct sas_phy_linkrates _r;
1111 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1112 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1113 enum sas_linkrate min, max;
1115 if (r->minimum_linkrate > SAS_LINK_RATE_1_5_GBPS)
1118 if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) {
1119 max = sas_phy->phy->maximum_linkrate;
1120 min = r->minimum_linkrate;
1121 } else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) {
1122 max = r->maximum_linkrate;
1123 min = sas_phy->phy->minimum_linkrate;
1127 _r.maximum_linkrate = max;
1128 _r.minimum_linkrate = min;
1130 sas_phy->phy->maximum_linkrate = max;
1131 sas_phy->phy->minimum_linkrate = min;
1133 hisi_sas_phy_enable(hisi_hba, phy_no, 0);
1135 hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, &_r);
1136 hisi_sas_phy_enable(hisi_hba, phy_no, 1);
1141 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
1144 struct sas_ha_struct *sas_ha = sas_phy->ha;
1145 struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
1146 int phy_no = sas_phy->id;
1149 case PHY_FUNC_HARD_RESET:
1150 hisi_hba->hw->phy_hard_reset(hisi_hba, phy_no);
1153 case PHY_FUNC_LINK_RESET:
1154 hisi_sas_phy_enable(hisi_hba, phy_no, 0);
1156 hisi_sas_phy_enable(hisi_hba, phy_no, 1);
1159 case PHY_FUNC_DISABLE:
1160 hisi_sas_phy_enable(hisi_hba, phy_no, 0);
1163 case PHY_FUNC_SET_LINK_RATE:
1164 return hisi_sas_phy_set_linkrate(hisi_hba, phy_no, funcdata);
1165 case PHY_FUNC_GET_EVENTS:
1166 if (hisi_hba->hw->get_events) {
1167 hisi_hba->hw->get_events(hisi_hba, phy_no);
1171 case PHY_FUNC_RELEASE_SPINUP_HOLD:
1178 static void hisi_sas_task_done(struct sas_task *task)
1180 del_timer(&task->slow_task->timer);
1181 complete(&task->slow_task->completion);
1184 static void hisi_sas_tmf_timedout(struct timer_list *t)
1186 struct sas_task_slow *slow = from_timer(slow, t, timer);
1187 struct sas_task *task = slow->task;
1188 unsigned long flags;
1189 bool is_completed = true;
1191 spin_lock_irqsave(&task->task_state_lock, flags);
1192 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
1193 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
1194 is_completed = false;
1196 spin_unlock_irqrestore(&task->task_state_lock, flags);
1199 complete(&task->slow_task->completion);
1202 #define TASK_TIMEOUT 20
1203 #define TASK_RETRY 3
1204 #define INTERNAL_ABORT_TIMEOUT 6
1205 static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
1206 void *parameter, u32 para_len,
1207 struct hisi_sas_tmf_task *tmf)
1209 struct hisi_sas_device *sas_dev = device->lldd_dev;
1210 struct hisi_hba *hisi_hba = sas_dev->hisi_hba;
1211 struct device *dev = hisi_hba->dev;
1212 struct sas_task *task;
1215 for (retry = 0; retry < TASK_RETRY; retry++) {
1216 task = sas_alloc_slow_task(GFP_KERNEL);
1221 task->task_proto = device->tproto;
1223 if (dev_is_sata(device)) {
1224 task->ata_task.device_control_reg_update = 1;
1225 memcpy(&task->ata_task.fis, parameter, para_len);
1227 memcpy(&task->ssp_task, parameter, para_len);
1229 task->task_done = hisi_sas_task_done;
1231 task->slow_task->timer.function = hisi_sas_tmf_timedout;
1232 task->slow_task->timer.expires = jiffies + TASK_TIMEOUT * HZ;
1233 add_timer(&task->slow_task->timer);
1235 res = hisi_sas_task_exec(task, GFP_KERNEL, 1, tmf);
1238 del_timer(&task->slow_task->timer);
1239 dev_err(dev, "abort tmf: executing internal task failed: %d\n",
1244 wait_for_completion(&task->slow_task->completion);
1245 res = TMF_RESP_FUNC_FAILED;
1246 /* Even TMF timed out, return direct. */
1247 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1248 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
1249 struct hisi_sas_slot *slot = task->lldd_task;
1251 dev_err(dev, "abort tmf: TMF task timeout and not done\n");
1253 struct hisi_sas_cq *cq =
1254 &hisi_hba->cq[slot->dlvry_queue];
1256 * flush tasklet to avoid free'ing task
1257 * before using task in IO completion
1259 tasklet_kill(&cq->tasklet);
1265 dev_err(dev, "abort tmf: TMF task timeout\n");
1268 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1269 task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
1270 res = TMF_RESP_FUNC_COMPLETE;
1274 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1275 task->task_status.stat == TMF_RESP_FUNC_SUCC) {
1276 res = TMF_RESP_FUNC_SUCC;
1280 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1281 task->task_status.stat == SAS_DATA_UNDERRUN) {
1282 /* no error, but return the number of bytes of
1285 dev_warn(dev, "abort tmf: task to dev %016llx resp: 0x%x sts 0x%x underrun\n",
1286 SAS_ADDR(device->sas_addr),
1287 task->task_status.resp,
1288 task->task_status.stat);
1289 res = task->task_status.residual;
1293 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1294 task->task_status.stat == SAS_DATA_OVERRUN) {
1295 dev_warn(dev, "abort tmf: blocked task error\n");
1300 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1301 task->task_status.stat == SAS_OPEN_REJECT) {
1302 dev_warn(dev, "abort tmf: open reject failed\n");
1305 dev_warn(dev, "abort tmf: task to dev %016llx resp: 0x%x status 0x%x\n",
1306 SAS_ADDR(device->sas_addr),
1307 task->task_status.resp,
1308 task->task_status.stat);
1310 sas_free_task(task);
1314 if (retry == TASK_RETRY)
1315 dev_warn(dev, "abort tmf: executing internal task failed!\n");
1316 sas_free_task(task);
1320 static void hisi_sas_fill_ata_reset_cmd(struct ata_device *dev,
1321 bool reset, int pmp, u8 *fis)
1323 struct ata_taskfile tf;
1325 ata_tf_init(dev, &tf);
1329 tf.ctl &= ~ATA_SRST;
1330 tf.command = ATA_CMD_DEV_RESET;
1331 ata_tf_to_fis(&tf, pmp, 0, fis);
1334 static int hisi_sas_softreset_ata_disk(struct domain_device *device)
1337 struct ata_port *ap = device->sata_dev.ap;
1338 struct ata_link *link;
1339 int rc = TMF_RESP_FUNC_FAILED;
1340 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1341 struct device *dev = hisi_hba->dev;
1342 int s = sizeof(struct host_to_dev_fis);
1344 ata_for_each_link(link, ap, EDGE) {
1345 int pmp = sata_srst_pmp(link);
1347 hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis);
1348 rc = hisi_sas_exec_internal_tmf_task(device, fis, s, NULL);
1349 if (rc != TMF_RESP_FUNC_COMPLETE)
1353 if (rc == TMF_RESP_FUNC_COMPLETE) {
1354 ata_for_each_link(link, ap, EDGE) {
1355 int pmp = sata_srst_pmp(link);
1357 hisi_sas_fill_ata_reset_cmd(link->device, 0, pmp, fis);
1358 rc = hisi_sas_exec_internal_tmf_task(device, fis,
1360 if (rc != TMF_RESP_FUNC_COMPLETE)
1361 dev_err(dev, "ata disk de-reset failed\n");
1364 dev_err(dev, "ata disk reset failed\n");
1367 if (rc == TMF_RESP_FUNC_COMPLETE)
1368 hisi_sas_release_task(hisi_hba, device);
1373 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
1374 u8 *lun, struct hisi_sas_tmf_task *tmf)
1376 struct sas_ssp_task ssp_task;
1378 if (!(device->tproto & SAS_PROTOCOL_SSP))
1379 return TMF_RESP_FUNC_ESUPP;
1381 memcpy(ssp_task.LUN, lun, 8);
1383 return hisi_sas_exec_internal_tmf_task(device, &ssp_task,
1384 sizeof(ssp_task), tmf);
1387 static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba)
1389 u32 state = hisi_hba->hw->get_phys_state(hisi_hba);
1392 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1393 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
1394 struct domain_device *device = sas_dev->sas_device;
1395 struct asd_sas_port *sas_port;
1396 struct hisi_sas_port *port;
1397 struct hisi_sas_phy *phy = NULL;
1398 struct asd_sas_phy *sas_phy;
1400 if ((sas_dev->dev_type == SAS_PHY_UNUSED)
1401 || !device || !device->port)
1404 sas_port = device->port;
1405 port = to_hisi_sas_port(sas_port);
1407 list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el)
1408 if (state & BIT(sas_phy->id)) {
1409 phy = sas_phy->lldd_phy;
1414 port->id = phy->port_id;
1416 /* Update linkrate of directly attached device. */
1417 if (!device->parent)
1418 device->linkrate = phy->sas_phy.linkrate;
1420 hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
1426 static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state,
1429 struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1430 struct asd_sas_port *_sas_port = NULL;
1433 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
1434 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1435 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1436 struct asd_sas_port *sas_port = sas_phy->port;
1437 bool do_port_check = !!(_sas_port != sas_port);
1439 if (!sas_phy->phy->enabled)
1442 /* Report PHY state change to libsas */
1443 if (state & BIT(phy_no)) {
1444 if (do_port_check && sas_port && sas_port->port_dev) {
1445 struct domain_device *dev = sas_port->port_dev;
1447 _sas_port = sas_port;
1449 if (dev_is_expander(dev->dev_type))
1450 sas_ha->notify_port_event(sas_phy,
1451 PORTE_BROADCAST_RCVD);
1454 hisi_sas_phy_down(hisi_hba, phy_no, 0);
1460 static void hisi_sas_reset_init_all_devices(struct hisi_hba *hisi_hba)
1462 struct hisi_sas_device *sas_dev;
1463 struct domain_device *device;
1466 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1467 sas_dev = &hisi_hba->devices[i];
1468 device = sas_dev->sas_device;
1470 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device)
1473 hisi_sas_init_device(device);
1477 static void hisi_sas_send_ata_reset_each_phy(struct hisi_hba *hisi_hba,
1478 struct asd_sas_port *sas_port,
1479 struct domain_device *device)
1481 struct hisi_sas_tmf_task tmf_task = { .force_phy = 1 };
1482 struct ata_port *ap = device->sata_dev.ap;
1483 struct device *dev = hisi_hba->dev;
1484 int s = sizeof(struct host_to_dev_fis);
1485 int rc = TMF_RESP_FUNC_FAILED;
1486 struct asd_sas_phy *sas_phy;
1487 struct ata_link *link;
1491 state = hisi_hba->hw->get_phys_state(hisi_hba);
1492 list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el) {
1493 if (!(state & BIT(sas_phy->id)))
1496 ata_for_each_link(link, ap, EDGE) {
1497 int pmp = sata_srst_pmp(link);
1499 tmf_task.phy_id = sas_phy->id;
1500 hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis);
1501 rc = hisi_sas_exec_internal_tmf_task(device, fis, s,
1503 if (rc != TMF_RESP_FUNC_COMPLETE) {
1504 dev_err(dev, "phy%d ata reset failed rc=%d\n",
1512 static void hisi_sas_terminate_stp_reject(struct hisi_hba *hisi_hba)
1514 struct device *dev = hisi_hba->dev;
1517 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1518 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
1519 struct domain_device *device = sas_dev->sas_device;
1521 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device)
1524 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1525 HISI_SAS_INT_ABT_DEV, 0);
1527 dev_err(dev, "STP reject: abort dev failed %d\n", rc);
1530 for (port_no = 0; port_no < hisi_hba->n_phy; port_no++) {
1531 struct hisi_sas_port *port = &hisi_hba->port[port_no];
1532 struct asd_sas_port *sas_port = &port->sas_port;
1533 struct domain_device *port_dev = sas_port->port_dev;
1534 struct domain_device *device;
1536 if (!port_dev || !dev_is_expander(port_dev->dev_type))
1539 /* Try to find a SATA device */
1540 list_for_each_entry(device, &sas_port->dev_list,
1542 if (dev_is_sata(device)) {
1543 hisi_sas_send_ata_reset_each_phy(hisi_hba,
1552 void hisi_sas_controller_reset_prepare(struct hisi_hba *hisi_hba)
1554 struct Scsi_Host *shost = hisi_hba->shost;
1556 down(&hisi_hba->sem);
1557 hisi_hba->phy_state = hisi_hba->hw->get_phys_state(hisi_hba);
1559 scsi_block_requests(shost);
1560 hisi_hba->hw->wait_cmds_complete_timeout(hisi_hba, 100, 5000);
1562 if (timer_pending(&hisi_hba->timer))
1563 del_timer_sync(&hisi_hba->timer);
1565 set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1567 EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_prepare);
1569 void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba)
1571 struct Scsi_Host *shost = hisi_hba->shost;
1574 /* Init and wait for PHYs to come up and all libsas event finished. */
1575 hisi_hba->hw->phys_init(hisi_hba);
1577 hisi_sas_refresh_port_id(hisi_hba);
1578 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1581 if (hisi_hba->reject_stp_links_msk)
1582 hisi_sas_terminate_stp_reject(hisi_hba);
1583 hisi_sas_reset_init_all_devices(hisi_hba);
1584 scsi_unblock_requests(shost);
1585 clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
1587 state = hisi_hba->hw->get_phys_state(hisi_hba);
1588 hisi_sas_rescan_topology(hisi_hba, hisi_hba->phy_state, state);
1590 EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_done);
1592 static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
1594 struct device *dev = hisi_hba->dev;
1595 struct Scsi_Host *shost = hisi_hba->shost;
1598 if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct)
1599 queue_work(hisi_hba->wq, &hisi_hba->debugfs_work);
1601 if (!hisi_hba->hw->soft_reset)
1604 if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
1607 dev_info(dev, "controller resetting...\n");
1608 hisi_sas_controller_reset_prepare(hisi_hba);
1610 rc = hisi_hba->hw->soft_reset(hisi_hba);
1612 dev_warn(dev, "controller reset failed (%d)\n", rc);
1613 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1615 scsi_unblock_requests(shost);
1616 clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
1620 hisi_sas_controller_reset_done(hisi_hba);
1621 dev_info(dev, "controller reset complete\n");
1626 static int hisi_sas_abort_task(struct sas_task *task)
1628 struct scsi_lun lun;
1629 struct hisi_sas_tmf_task tmf_task;
1630 struct domain_device *device = task->dev;
1631 struct hisi_sas_device *sas_dev = device->lldd_dev;
1632 struct hisi_hba *hisi_hba;
1634 int rc = TMF_RESP_FUNC_FAILED;
1635 unsigned long flags;
1638 return TMF_RESP_FUNC_FAILED;
1640 hisi_hba = dev_to_hisi_hba(task->dev);
1641 dev = hisi_hba->dev;
1643 spin_lock_irqsave(&task->task_state_lock, flags);
1644 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
1645 struct hisi_sas_slot *slot = task->lldd_task;
1646 struct hisi_sas_cq *cq;
1650 * flush tasklet to avoid free'ing task
1651 * before using task in IO completion
1653 cq = &hisi_hba->cq[slot->dlvry_queue];
1654 tasklet_kill(&cq->tasklet);
1656 spin_unlock_irqrestore(&task->task_state_lock, flags);
1657 rc = TMF_RESP_FUNC_COMPLETE;
1660 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
1661 spin_unlock_irqrestore(&task->task_state_lock, flags);
1663 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1664 struct scsi_cmnd *cmnd = task->uldd_task;
1665 struct hisi_sas_slot *slot = task->lldd_task;
1666 u16 tag = slot->idx;
1669 int_to_scsilun(cmnd->device->lun, &lun);
1670 tmf_task.tmf = TMF_ABORT_TASK;
1671 tmf_task.tag_of_task_to_be_managed = tag;
1673 rc = hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun,
1676 rc2 = hisi_sas_internal_task_abort(hisi_hba, device,
1677 HISI_SAS_INT_ABT_CMD, tag);
1679 dev_err(dev, "abort task: internal abort (%d)\n", rc2);
1680 return TMF_RESP_FUNC_FAILED;
1684 * If the TMF finds that the IO is not in the device and also
1685 * the internal abort does not succeed, then it is safe to
1687 * Note: if the internal abort succeeds then the slot
1688 * will have already been completed
1690 if (rc == TMF_RESP_FUNC_COMPLETE && rc2 != TMF_RESP_FUNC_SUCC) {
1691 if (task->lldd_task)
1692 hisi_sas_do_release_task(hisi_hba, task, slot);
1694 } else if (task->task_proto & SAS_PROTOCOL_SATA ||
1695 task->task_proto & SAS_PROTOCOL_STP) {
1696 if (task->dev->dev_type == SAS_SATA_DEV) {
1697 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1698 HISI_SAS_INT_ABT_DEV,
1701 dev_err(dev, "abort task: internal abort failed\n");
1704 hisi_sas_dereg_device(hisi_hba, device);
1705 rc = hisi_sas_softreset_ata_disk(device);
1707 } else if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SMP) {
1709 struct hisi_sas_slot *slot = task->lldd_task;
1710 u32 tag = slot->idx;
1711 struct hisi_sas_cq *cq = &hisi_hba->cq[slot->dlvry_queue];
1713 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1714 HISI_SAS_INT_ABT_CMD, tag);
1715 if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) &&
1718 * flush tasklet to avoid free'ing task
1719 * before using task in IO completion
1721 tasklet_kill(&cq->tasklet);
1727 if (rc != TMF_RESP_FUNC_COMPLETE)
1728 dev_notice(dev, "abort task: rc=%d\n", rc);
1732 static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun)
1734 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1735 struct device *dev = hisi_hba->dev;
1736 struct hisi_sas_tmf_task tmf_task;
1739 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1740 HISI_SAS_INT_ABT_DEV, 0);
1742 dev_err(dev, "abort task set: internal abort rc=%d\n", rc);
1743 return TMF_RESP_FUNC_FAILED;
1745 hisi_sas_dereg_device(hisi_hba, device);
1747 tmf_task.tmf = TMF_ABORT_TASK_SET;
1748 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1750 if (rc == TMF_RESP_FUNC_COMPLETE)
1751 hisi_sas_release_task(hisi_hba, device);
1756 static int hisi_sas_clear_aca(struct domain_device *device, u8 *lun)
1758 struct hisi_sas_tmf_task tmf_task;
1761 tmf_task.tmf = TMF_CLEAR_ACA;
1762 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1767 static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
1769 struct sas_phy *local_phy = sas_get_local_phy(device);
1770 struct hisi_sas_device *sas_dev = device->lldd_dev;
1771 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1772 struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1773 struct asd_sas_phy *sas_phy = sas_ha->sas_phy[local_phy->number];
1774 struct hisi_sas_phy *phy = container_of(sas_phy,
1775 struct hisi_sas_phy, sas_phy);
1776 DECLARE_COMPLETION_ONSTACK(phyreset);
1779 if (scsi_is_sas_phy_local(local_phy)) {
1781 phy->reset_completion = &phyreset;
1784 reset_type = (sas_dev->dev_status == HISI_SAS_DEV_INIT ||
1785 !dev_is_sata(device)) ? 1 : 0;
1787 rc = sas_phy_reset(local_phy, reset_type);
1788 sas_put_local_phy(local_phy);
1790 if (scsi_is_sas_phy_local(local_phy)) {
1791 int ret = wait_for_completion_timeout(&phyreset, 2 * HZ);
1792 unsigned long flags;
1794 spin_lock_irqsave(&phy->lock, flags);
1795 phy->reset_completion = NULL;
1797 spin_unlock_irqrestore(&phy->lock, flags);
1799 /* report PHY down if timed out */
1801 hisi_sas_phy_down(hisi_hba, sas_phy->id, 0);
1802 } else if (sas_dev->dev_status != HISI_SAS_DEV_INIT) {
1804 * If in init state, we rely on caller to wait for link to be
1805 * ready; otherwise, delay.
1813 static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
1815 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1816 struct device *dev = hisi_hba->dev;
1819 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1820 HISI_SAS_INT_ABT_DEV, 0);
1822 dev_err(dev, "I_T nexus reset: internal abort (%d)\n", rc);
1823 return TMF_RESP_FUNC_FAILED;
1825 hisi_sas_dereg_device(hisi_hba, device);
1827 if (dev_is_sata(device)) {
1828 rc = hisi_sas_softreset_ata_disk(device);
1829 if (rc == TMF_RESP_FUNC_FAILED)
1830 return TMF_RESP_FUNC_FAILED;
1833 rc = hisi_sas_debug_I_T_nexus_reset(device);
1835 if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV))
1836 hisi_sas_release_task(hisi_hba, device);
1841 static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
1843 struct hisi_sas_device *sas_dev = device->lldd_dev;
1844 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1845 struct device *dev = hisi_hba->dev;
1846 int rc = TMF_RESP_FUNC_FAILED;
1848 if (dev_is_sata(device)) {
1849 struct sas_phy *phy;
1851 /* Clear internal IO and then hardreset */
1852 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1853 HISI_SAS_INT_ABT_DEV, 0);
1855 dev_err(dev, "lu_reset: internal abort failed\n");
1858 hisi_sas_dereg_device(hisi_hba, device);
1860 phy = sas_get_local_phy(device);
1862 rc = sas_phy_reset(phy, 1);
1865 hisi_sas_release_task(hisi_hba, device);
1866 sas_put_local_phy(phy);
1868 struct hisi_sas_tmf_task tmf_task = { .tmf = TMF_LU_RESET };
1870 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1871 HISI_SAS_INT_ABT_DEV, 0);
1873 dev_err(dev, "lu_reset: internal abort failed\n");
1876 hisi_sas_dereg_device(hisi_hba, device);
1878 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1879 if (rc == TMF_RESP_FUNC_COMPLETE)
1880 hisi_sas_release_task(hisi_hba, device);
1883 if (rc != TMF_RESP_FUNC_COMPLETE)
1884 dev_err(dev, "lu_reset: for device[%d]:rc= %d\n",
1885 sas_dev->device_id, rc);
1889 static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha)
1891 struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
1892 struct device *dev = hisi_hba->dev;
1893 HISI_SAS_DECLARE_RST_WORK_ON_STACK(r);
1896 queue_work(hisi_hba->wq, &r.work);
1897 wait_for_completion(r.completion);
1899 return TMF_RESP_FUNC_FAILED;
1901 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1902 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
1903 struct domain_device *device = sas_dev->sas_device;
1905 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device ||
1906 dev_is_expander(device->dev_type))
1909 rc = hisi_sas_debug_I_T_nexus_reset(device);
1910 if (rc != TMF_RESP_FUNC_COMPLETE)
1911 dev_info(dev, "clear nexus ha: for device[%d] rc=%d\n",
1912 sas_dev->device_id, rc);
1915 hisi_sas_release_tasks(hisi_hba);
1917 return TMF_RESP_FUNC_COMPLETE;
1920 static int hisi_sas_query_task(struct sas_task *task)
1922 struct scsi_lun lun;
1923 struct hisi_sas_tmf_task tmf_task;
1924 int rc = TMF_RESP_FUNC_FAILED;
1926 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1927 struct scsi_cmnd *cmnd = task->uldd_task;
1928 struct domain_device *device = task->dev;
1929 struct hisi_sas_slot *slot = task->lldd_task;
1930 u32 tag = slot->idx;
1932 int_to_scsilun(cmnd->device->lun, &lun);
1933 tmf_task.tmf = TMF_QUERY_TASK;
1934 tmf_task.tag_of_task_to_be_managed = tag;
1936 rc = hisi_sas_debug_issue_ssp_tmf(device,
1940 /* The task is still in Lun, release it then */
1941 case TMF_RESP_FUNC_SUCC:
1942 /* The task is not in Lun or failed, reset the phy */
1943 case TMF_RESP_FUNC_FAILED:
1944 case TMF_RESP_FUNC_COMPLETE:
1947 rc = TMF_RESP_FUNC_FAILED;
1955 hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
1956 struct sas_task *task, int abort_flag,
1957 int task_tag, struct hisi_sas_dq *dq)
1959 struct domain_device *device = task->dev;
1960 struct hisi_sas_device *sas_dev = device->lldd_dev;
1961 struct device *dev = hisi_hba->dev;
1962 struct hisi_sas_port *port;
1963 struct hisi_sas_slot *slot;
1964 struct asd_sas_port *sas_port = device->port;
1965 struct hisi_sas_cmd_hdr *cmd_hdr_base;
1966 int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
1967 unsigned long flags, flags_dq = 0;
1970 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
1976 port = to_hisi_sas_port(sas_port);
1978 /* simply get a slot and send abort command */
1979 rc = hisi_sas_slot_index_alloc(hisi_hba, NULL);
1984 slot = &hisi_hba->slot_info[slot_idx];
1986 spin_lock_irqsave(&dq->lock, flags_dq);
1987 wr_q_index = hisi_hba->hw->get_free_slot(hisi_hba, dq);
1988 if (wr_q_index < 0) {
1989 spin_unlock_irqrestore(&dq->lock, flags_dq);
1993 list_add_tail(&slot->delivery, &dq->list);
1994 spin_unlock_irqrestore(&dq->lock, flags_dq);
1995 spin_lock_irqsave(&sas_dev->lock, flags);
1996 list_add_tail(&slot->entry, &sas_dev->list);
1997 spin_unlock_irqrestore(&sas_dev->lock, flags);
1999 dlvry_queue = dq->id;
2000 dlvry_queue_slot = wr_q_index;
2002 slot->device_id = sas_dev->device_id;
2003 slot->n_elem = n_elem;
2004 slot->dlvry_queue = dlvry_queue;
2005 slot->dlvry_queue_slot = dlvry_queue_slot;
2006 cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
2007 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
2010 slot->is_internal = true;
2011 task->lldd_task = slot;
2013 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
2014 memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
2015 memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ);
2017 hisi_sas_task_prep_abort(hisi_hba, slot, device_id,
2018 abort_flag, task_tag);
2020 spin_lock_irqsave(&task->task_state_lock, flags);
2021 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
2022 spin_unlock_irqrestore(&task->task_state_lock, flags);
2023 WRITE_ONCE(slot->ready, 1);
2024 /* send abort command to the chip */
2025 spin_lock_irqsave(&dq->lock, flags);
2026 hisi_hba->hw->start_delivery(dq);
2027 spin_unlock_irqrestore(&dq->lock, flags);
2032 hisi_sas_slot_index_free(hisi_hba, slot_idx);
2034 dev_err(dev, "internal abort task prep: failed[%d]!\n", rc);
2040 * _hisi_sas_internal_task_abort -- execute an internal
2041 * abort command for single IO command or a device
2042 * @hisi_hba: host controller struct
2043 * @device: domain device
2044 * @abort_flag: mode of operation, device or single IO
2045 * @tag: tag of IO to be aborted (only relevant to single
2047 * @dq: delivery queue for this internal abort command
2050 _hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
2051 struct domain_device *device, int abort_flag,
2052 int tag, struct hisi_sas_dq *dq)
2054 struct sas_task *task;
2055 struct hisi_sas_device *sas_dev = device->lldd_dev;
2056 struct device *dev = hisi_hba->dev;
2060 * The interface is not realized means this HW don't support internal
2061 * abort, or don't need to do internal abort. Then here, we return
2062 * TMF_RESP_FUNC_FAILED and let other steps go on, which depends that
2063 * the internal abort has been executed and returned CQ.
2065 if (!hisi_hba->hw->prep_abort)
2066 return TMF_RESP_FUNC_FAILED;
2068 task = sas_alloc_slow_task(GFP_KERNEL);
2073 task->task_proto = device->tproto;
2074 task->task_done = hisi_sas_task_done;
2075 task->slow_task->timer.function = hisi_sas_tmf_timedout;
2076 task->slow_task->timer.expires = jiffies + INTERNAL_ABORT_TIMEOUT * HZ;
2077 add_timer(&task->slow_task->timer);
2079 res = hisi_sas_internal_abort_task_exec(hisi_hba, sas_dev->device_id,
2080 task, abort_flag, tag, dq);
2082 del_timer(&task->slow_task->timer);
2083 dev_err(dev, "internal task abort: executing internal task failed: %d\n",
2087 wait_for_completion(&task->slow_task->completion);
2088 res = TMF_RESP_FUNC_FAILED;
2090 /* Internal abort timed out */
2091 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
2092 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
2093 struct hisi_sas_slot *slot = task->lldd_task;
2096 struct hisi_sas_cq *cq =
2097 &hisi_hba->cq[slot->dlvry_queue];
2099 * flush tasklet to avoid free'ing task
2100 * before using task in IO completion
2102 tasklet_kill(&cq->tasklet);
2105 dev_err(dev, "internal task abort: timeout and not done.\n");
2110 dev_err(dev, "internal task abort: timeout.\n");
2113 if (task->task_status.resp == SAS_TASK_COMPLETE &&
2114 task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
2115 res = TMF_RESP_FUNC_COMPLETE;
2119 if (task->task_status.resp == SAS_TASK_COMPLETE &&
2120 task->task_status.stat == TMF_RESP_FUNC_SUCC) {
2121 res = TMF_RESP_FUNC_SUCC;
2126 dev_dbg(dev, "internal task abort: task to dev %016llx task=%p resp: 0x%x sts 0x%x\n",
2127 SAS_ADDR(device->sas_addr), task,
2128 task->task_status.resp, /* 0 is complete, -1 is undelivered */
2129 task->task_status.stat);
2130 sas_free_task(task);
2136 hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
2137 struct domain_device *device,
2138 int abort_flag, int tag)
2140 struct hisi_sas_slot *slot;
2141 struct device *dev = hisi_hba->dev;
2142 struct hisi_sas_dq *dq;
2145 switch (abort_flag) {
2146 case HISI_SAS_INT_ABT_CMD:
2147 slot = &hisi_hba->slot_info[tag];
2148 dq = &hisi_hba->dq[slot->dlvry_queue];
2149 return _hisi_sas_internal_task_abort(hisi_hba, device,
2150 abort_flag, tag, dq);
2151 case HISI_SAS_INT_ABT_DEV:
2152 for (i = 0; i < hisi_hba->cq_nvecs; i++) {
2153 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
2154 const struct cpumask *mask = cq->pci_irq_mask;
2156 if (mask && !cpumask_intersects(cpu_online_mask, mask))
2158 dq = &hisi_hba->dq[i];
2159 rc = _hisi_sas_internal_task_abort(hisi_hba, device,
2167 dev_err(dev, "Unrecognised internal abort flag (%d)\n",
2175 static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy)
2177 hisi_sas_port_notify_formed(sas_phy);
2180 static int hisi_sas_write_gpio(struct sas_ha_struct *sha, u8 reg_type,
2181 u8 reg_index, u8 reg_count, u8 *write_data)
2183 struct hisi_hba *hisi_hba = sha->lldd_ha;
2185 if (!hisi_hba->hw->write_gpio)
2188 return hisi_hba->hw->write_gpio(hisi_hba, reg_type,
2189 reg_index, reg_count, write_data);
2192 static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy)
2194 struct asd_sas_phy *sas_phy = &phy->sas_phy;
2195 struct sas_phy *sphy = sas_phy->phy;
2196 unsigned long flags;
2198 phy->phy_attached = 0;
2202 spin_lock_irqsave(&phy->lock, flags);
2204 sphy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN;
2206 sphy->negotiated_linkrate = SAS_PHY_DISABLED;
2207 spin_unlock_irqrestore(&phy->lock, flags);
2210 void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy)
2212 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
2213 struct asd_sas_phy *sas_phy = &phy->sas_phy;
2214 struct sas_ha_struct *sas_ha = &hisi_hba->sha;
2215 struct device *dev = hisi_hba->dev;
2218 /* Phy down but ready */
2219 hisi_sas_bytes_dmaed(hisi_hba, phy_no);
2220 hisi_sas_port_notify_formed(sas_phy);
2222 struct hisi_sas_port *port = phy->port;
2224 if (test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags) ||
2226 dev_info(dev, "ignore flutter phy%d down\n", phy_no);
2229 /* Phy down and not ready */
2230 sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL);
2231 sas_phy_disconnected(sas_phy);
2234 if (phy->phy_type & PORT_TYPE_SAS) {
2235 int port_id = port->id;
2237 if (!hisi_hba->hw->get_wideport_bitmap(hisi_hba,
2239 port->port_attached = 0;
2240 } else if (phy->phy_type & PORT_TYPE_SATA)
2241 port->port_attached = 0;
2243 hisi_sas_phy_disconnected(phy);
2246 EXPORT_SYMBOL_GPL(hisi_sas_phy_down);
2248 void hisi_sas_kill_tasklets(struct hisi_hba *hisi_hba)
2252 for (i = 0; i < hisi_hba->cq_nvecs; i++) {
2253 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
2255 tasklet_kill(&cq->tasklet);
2258 EXPORT_SYMBOL_GPL(hisi_sas_kill_tasklets);
2260 int hisi_sas_host_reset(struct Scsi_Host *shost, int reset_type)
2262 struct hisi_hba *hisi_hba = shost_priv(shost);
2264 if (reset_type != SCSI_ADAPTER_RESET)
2267 queue_work(hisi_hba->wq, &hisi_hba->rst_work);
2271 EXPORT_SYMBOL_GPL(hisi_sas_host_reset);
2273 struct scsi_transport_template *hisi_sas_stt;
2274 EXPORT_SYMBOL_GPL(hisi_sas_stt);
2276 static struct sas_domain_function_template hisi_sas_transport_ops = {
2277 .lldd_dev_found = hisi_sas_dev_found,
2278 .lldd_dev_gone = hisi_sas_dev_gone,
2279 .lldd_execute_task = hisi_sas_queue_command,
2280 .lldd_control_phy = hisi_sas_control_phy,
2281 .lldd_abort_task = hisi_sas_abort_task,
2282 .lldd_abort_task_set = hisi_sas_abort_task_set,
2283 .lldd_clear_aca = hisi_sas_clear_aca,
2284 .lldd_I_T_nexus_reset = hisi_sas_I_T_nexus_reset,
2285 .lldd_lu_reset = hisi_sas_lu_reset,
2286 .lldd_query_task = hisi_sas_query_task,
2287 .lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha,
2288 .lldd_port_formed = hisi_sas_port_formed,
2289 .lldd_write_gpio = hisi_sas_write_gpio,
2292 void hisi_sas_init_mem(struct hisi_hba *hisi_hba)
2294 int i, s, j, max_command_entries = hisi_hba->hw->max_command_entries;
2295 struct hisi_sas_breakpoint *sata_breakpoint = hisi_hba->sata_breakpoint;
2297 for (i = 0; i < hisi_hba->queue_count; i++) {
2298 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
2299 struct hisi_sas_dq *dq = &hisi_hba->dq[i];
2300 struct hisi_sas_cmd_hdr *cmd_hdr = hisi_hba->cmd_hdr[i];
2302 s = sizeof(struct hisi_sas_cmd_hdr);
2303 for (j = 0; j < HISI_SAS_QUEUE_SLOTS; j++)
2304 memset(&cmd_hdr[j], 0, s);
2308 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
2309 memset(hisi_hba->complete_hdr[i], 0, s);
2313 s = sizeof(struct hisi_sas_initial_fis) * hisi_hba->n_phy;
2314 memset(hisi_hba->initial_fis, 0, s);
2316 s = max_command_entries * sizeof(struct hisi_sas_iost);
2317 memset(hisi_hba->iost, 0, s);
2319 s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
2320 memset(hisi_hba->breakpoint, 0, s);
2322 s = sizeof(struct hisi_sas_sata_breakpoint);
2323 for (j = 0; j < HISI_SAS_MAX_ITCT_ENTRIES; j++)
2324 memset(&sata_breakpoint[j], 0, s);
2326 EXPORT_SYMBOL_GPL(hisi_sas_init_mem);
2328 int hisi_sas_alloc(struct hisi_hba *hisi_hba)
2330 struct device *dev = hisi_hba->dev;
2331 int i, j, s, max_command_entries = hisi_hba->hw->max_command_entries;
2332 int max_command_entries_ru, sz_slot_buf_ru;
2333 int blk_cnt, slots_per_blk;
2335 sema_init(&hisi_hba->sem, 1);
2336 spin_lock_init(&hisi_hba->lock);
2337 for (i = 0; i < hisi_hba->n_phy; i++) {
2338 hisi_sas_phy_init(hisi_hba, i);
2339 hisi_hba->port[i].port_attached = 0;
2340 hisi_hba->port[i].id = -1;
2343 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
2344 hisi_hba->devices[i].dev_type = SAS_PHY_UNUSED;
2345 hisi_hba->devices[i].device_id = i;
2346 hisi_hba->devices[i].dev_status = HISI_SAS_DEV_INIT;
2349 for (i = 0; i < hisi_hba->queue_count; i++) {
2350 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
2351 struct hisi_sas_dq *dq = &hisi_hba->dq[i];
2353 /* Completion queue structure */
2355 cq->hisi_hba = hisi_hba;
2357 /* Delivery queue structure */
2358 spin_lock_init(&dq->lock);
2359 INIT_LIST_HEAD(&dq->list);
2361 dq->hisi_hba = hisi_hba;
2363 /* Delivery queue */
2364 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
2365 hisi_hba->cmd_hdr[i] = dmam_alloc_coherent(dev, s,
2366 &hisi_hba->cmd_hdr_dma[i],
2368 if (!hisi_hba->cmd_hdr[i])
2371 /* Completion queue */
2372 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
2373 hisi_hba->complete_hdr[i] = dmam_alloc_coherent(dev, s,
2374 &hisi_hba->complete_hdr_dma[i],
2376 if (!hisi_hba->complete_hdr[i])
2380 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
2381 hisi_hba->itct = dmam_alloc_coherent(dev, s, &hisi_hba->itct_dma,
2382 GFP_KERNEL | __GFP_ZERO);
2383 if (!hisi_hba->itct)
2386 hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries,
2387 sizeof(struct hisi_sas_slot),
2389 if (!hisi_hba->slot_info)
2392 /* roundup to avoid overly large block size */
2393 max_command_entries_ru = roundup(max_command_entries, 64);
2394 if (hisi_hba->prot_mask & HISI_SAS_DIX_PROT_MASK)
2395 sz_slot_buf_ru = sizeof(struct hisi_sas_slot_dif_buf_table);
2397 sz_slot_buf_ru = sizeof(struct hisi_sas_slot_buf_table);
2398 sz_slot_buf_ru = roundup(sz_slot_buf_ru, 64);
2399 s = lcm(max_command_entries_ru, sz_slot_buf_ru);
2400 blk_cnt = (max_command_entries_ru * sz_slot_buf_ru) / s;
2401 slots_per_blk = s / sz_slot_buf_ru;
2403 for (i = 0; i < blk_cnt; i++) {
2404 int slot_index = i * slots_per_blk;
2408 buf = dmam_alloc_coherent(dev, s, &buf_dma,
2409 GFP_KERNEL | __GFP_ZERO);
2413 for (j = 0; j < slots_per_blk; j++, slot_index++) {
2414 struct hisi_sas_slot *slot;
2416 slot = &hisi_hba->slot_info[slot_index];
2418 slot->buf_dma = buf_dma;
2419 slot->idx = slot_index;
2421 buf += sz_slot_buf_ru;
2422 buf_dma += sz_slot_buf_ru;
2426 s = max_command_entries * sizeof(struct hisi_sas_iost);
2427 hisi_hba->iost = dmam_alloc_coherent(dev, s, &hisi_hba->iost_dma,
2429 if (!hisi_hba->iost)
2432 s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
2433 hisi_hba->breakpoint = dmam_alloc_coherent(dev, s,
2434 &hisi_hba->breakpoint_dma,
2436 if (!hisi_hba->breakpoint)
2439 hisi_hba->slot_index_count = max_command_entries;
2440 s = hisi_hba->slot_index_count / BITS_PER_BYTE;
2441 hisi_hba->slot_index_tags = devm_kzalloc(dev, s, GFP_KERNEL);
2442 if (!hisi_hba->slot_index_tags)
2445 s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
2446 hisi_hba->initial_fis = dmam_alloc_coherent(dev, s,
2447 &hisi_hba->initial_fis_dma,
2449 if (!hisi_hba->initial_fis)
2452 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
2453 hisi_hba->sata_breakpoint = dmam_alloc_coherent(dev, s,
2454 &hisi_hba->sata_breakpoint_dma,
2456 if (!hisi_hba->sata_breakpoint)
2458 hisi_sas_init_mem(hisi_hba);
2460 hisi_sas_slot_index_init(hisi_hba);
2461 hisi_hba->last_slot_index = hisi_hba->hw->max_command_entries -
2462 HISI_SAS_RESERVED_IPTT_CNT;
2464 hisi_hba->wq = create_singlethread_workqueue(dev_name(dev));
2465 if (!hisi_hba->wq) {
2466 dev_err(dev, "sas_alloc: failed to create workqueue\n");
2474 EXPORT_SYMBOL_GPL(hisi_sas_alloc);
2476 void hisi_sas_free(struct hisi_hba *hisi_hba)
2480 for (i = 0; i < hisi_hba->n_phy; i++) {
2481 struct hisi_sas_phy *phy = &hisi_hba->phy[i];
2483 del_timer_sync(&phy->timer);
2487 destroy_workqueue(hisi_hba->wq);
2489 EXPORT_SYMBOL_GPL(hisi_sas_free);
2491 void hisi_sas_rst_work_handler(struct work_struct *work)
2493 struct hisi_hba *hisi_hba =
2494 container_of(work, struct hisi_hba, rst_work);
2496 hisi_sas_controller_reset(hisi_hba);
2498 EXPORT_SYMBOL_GPL(hisi_sas_rst_work_handler);
2500 void hisi_sas_sync_rst_work_handler(struct work_struct *work)
2502 struct hisi_sas_rst *rst =
2503 container_of(work, struct hisi_sas_rst, work);
2505 if (!hisi_sas_controller_reset(rst->hisi_hba))
2507 complete(rst->completion);
2509 EXPORT_SYMBOL_GPL(hisi_sas_sync_rst_work_handler);
2511 int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba)
2513 struct device *dev = hisi_hba->dev;
2514 struct platform_device *pdev = hisi_hba->platform_dev;
2515 struct device_node *np = pdev ? pdev->dev.of_node : NULL;
2518 if (device_property_read_u8_array(dev, "sas-addr", hisi_hba->sas_addr,
2520 dev_err(dev, "could not get property sas-addr\n");
2526 * These properties are only required for platform device-based
2527 * controller with DT firmware.
2529 hisi_hba->ctrl = syscon_regmap_lookup_by_phandle(np,
2530 "hisilicon,sas-syscon");
2531 if (IS_ERR(hisi_hba->ctrl)) {
2532 dev_err(dev, "could not get syscon\n");
2536 if (device_property_read_u32(dev, "ctrl-reset-reg",
2537 &hisi_hba->ctrl_reset_reg)) {
2538 dev_err(dev, "could not get property ctrl-reset-reg\n");
2542 if (device_property_read_u32(dev, "ctrl-reset-sts-reg",
2543 &hisi_hba->ctrl_reset_sts_reg)) {
2544 dev_err(dev, "could not get property ctrl-reset-sts-reg\n");
2548 if (device_property_read_u32(dev, "ctrl-clock-ena-reg",
2549 &hisi_hba->ctrl_clock_ena_reg)) {
2550 dev_err(dev, "could not get property ctrl-clock-ena-reg\n");
2555 refclk = devm_clk_get(dev, NULL);
2557 dev_dbg(dev, "no ref clk property\n");
2559 hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000;
2561 if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy)) {
2562 dev_err(dev, "could not get property phy-count\n");
2566 if (device_property_read_u32(dev, "queue-count",
2567 &hisi_hba->queue_count)) {
2568 dev_err(dev, "could not get property queue-count\n");
2574 EXPORT_SYMBOL_GPL(hisi_sas_get_fw_info);
2576 static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
2577 const struct hisi_sas_hw *hw)
2579 struct resource *res;
2580 struct Scsi_Host *shost;
2581 struct hisi_hba *hisi_hba;
2582 struct device *dev = &pdev->dev;
2585 shost = scsi_host_alloc(hw->sht, sizeof(*hisi_hba));
2587 dev_err(dev, "scsi host alloc failed\n");
2590 hisi_hba = shost_priv(shost);
2592 INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler);
2594 hisi_hba->dev = dev;
2595 hisi_hba->platform_dev = pdev;
2596 hisi_hba->shost = shost;
2597 SHOST_TO_SAS_HA(shost) = &hisi_hba->sha;
2599 timer_setup(&hisi_hba->timer, NULL, 0);
2601 if (hisi_sas_get_fw_info(hisi_hba) < 0)
2604 error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
2606 error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
2609 dev_err(dev, "No usable DMA addressing method\n");
2613 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2614 hisi_hba->regs = devm_ioremap_resource(dev, res);
2615 if (IS_ERR(hisi_hba->regs))
2618 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2620 hisi_hba->sgpio_regs = devm_ioremap_resource(dev, res);
2621 if (IS_ERR(hisi_hba->sgpio_regs))
2625 if (hisi_sas_alloc(hisi_hba)) {
2626 hisi_sas_free(hisi_hba);
2632 scsi_host_put(shost);
2633 dev_err(dev, "shost alloc failed\n");
2637 int hisi_sas_probe(struct platform_device *pdev,
2638 const struct hisi_sas_hw *hw)
2640 struct Scsi_Host *shost;
2641 struct hisi_hba *hisi_hba;
2642 struct device *dev = &pdev->dev;
2643 struct asd_sas_phy **arr_phy;
2644 struct asd_sas_port **arr_port;
2645 struct sas_ha_struct *sha;
2646 int rc, phy_nr, port_nr, i;
2648 shost = hisi_sas_shost_alloc(pdev, hw);
2652 sha = SHOST_TO_SAS_HA(shost);
2653 hisi_hba = shost_priv(shost);
2654 platform_set_drvdata(pdev, sha);
2656 phy_nr = port_nr = hisi_hba->n_phy;
2658 arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL);
2659 arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL);
2660 if (!arr_phy || !arr_port) {
2665 sha->sas_phy = arr_phy;
2666 sha->sas_port = arr_port;
2667 sha->lldd_ha = hisi_hba;
2669 shost->transportt = hisi_sas_stt;
2670 shost->max_id = HISI_SAS_MAX_DEVICES;
2671 shost->max_lun = ~0;
2672 shost->max_channel = 1;
2673 shost->max_cmd_len = 16;
2674 if (hisi_hba->hw->slot_index_alloc) {
2675 shost->can_queue = hisi_hba->hw->max_command_entries;
2676 shost->cmd_per_lun = hisi_hba->hw->max_command_entries;
2678 shost->can_queue = hisi_hba->hw->max_command_entries -
2679 HISI_SAS_RESERVED_IPTT_CNT;
2680 shost->cmd_per_lun = hisi_hba->hw->max_command_entries -
2681 HISI_SAS_RESERVED_IPTT_CNT;
2684 sha->sas_ha_name = DRV_NAME;
2685 sha->dev = hisi_hba->dev;
2686 sha->lldd_module = THIS_MODULE;
2687 sha->sas_addr = &hisi_hba->sas_addr[0];
2688 sha->num_phys = hisi_hba->n_phy;
2689 sha->core.shost = hisi_hba->shost;
2691 for (i = 0; i < hisi_hba->n_phy; i++) {
2692 sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy;
2693 sha->sas_port[i] = &hisi_hba->port[i].sas_port;
2696 rc = scsi_add_host(shost, &pdev->dev);
2700 rc = sas_register_ha(sha);
2702 goto err_out_register_ha;
2704 rc = hisi_hba->hw->hw_init(hisi_hba);
2706 goto err_out_register_ha;
2708 scsi_scan_host(shost);
2712 err_out_register_ha:
2713 scsi_remove_host(shost);
2715 hisi_sas_free(hisi_hba);
2716 scsi_host_put(shost);
2719 EXPORT_SYMBOL_GPL(hisi_sas_probe);
2721 struct dentry *hisi_sas_debugfs_dir;
2723 static void hisi_sas_debugfs_snapshot_cq_reg(struct hisi_hba *hisi_hba)
2725 int queue_entry_size = hisi_hba->hw->complete_hdr_size;
2728 for (i = 0; i < hisi_hba->queue_count; i++)
2729 memcpy(hisi_hba->debugfs_complete_hdr[i],
2730 hisi_hba->complete_hdr[i],
2731 HISI_SAS_QUEUE_SLOTS * queue_entry_size);
2734 static void hisi_sas_debugfs_snapshot_dq_reg(struct hisi_hba *hisi_hba)
2736 int queue_entry_size = sizeof(struct hisi_sas_cmd_hdr);
2739 for (i = 0; i < hisi_hba->queue_count; i++) {
2740 struct hisi_sas_cmd_hdr *debugfs_cmd_hdr, *cmd_hdr;
2743 debugfs_cmd_hdr = hisi_hba->debugfs_cmd_hdr[i];
2744 cmd_hdr = hisi_hba->cmd_hdr[i];
2746 for (j = 0; j < HISI_SAS_QUEUE_SLOTS; j++)
2747 memcpy(&debugfs_cmd_hdr[j], &cmd_hdr[j],
2752 static void hisi_sas_debugfs_snapshot_port_reg(struct hisi_hba *hisi_hba)
2754 const struct hisi_sas_debugfs_reg *port =
2755 hisi_hba->hw->debugfs_reg_port;
2760 for (phy_cnt = 0; phy_cnt < hisi_hba->n_phy; phy_cnt++) {
2761 databuf = (u32 *)hisi_hba->debugfs_port_reg[phy_cnt];
2762 for (i = 0; i < port->count; i++, databuf++) {
2763 offset = port->base_off + 4 * i;
2764 *databuf = port->read_port_reg(hisi_hba, phy_cnt,
2770 static void hisi_sas_debugfs_snapshot_global_reg(struct hisi_hba *hisi_hba)
2772 u32 *databuf = (u32 *)hisi_hba->debugfs_global_reg;
2773 const struct hisi_sas_debugfs_reg *global =
2774 hisi_hba->hw->debugfs_reg_global;
2777 for (i = 0; i < global->count; i++, databuf++)
2778 *databuf = global->read_global_reg(hisi_hba, 4 * i);
2781 static void hisi_sas_debugfs_snapshot_itct_reg(struct hisi_hba *hisi_hba)
2783 void *databuf = hisi_hba->debugfs_itct;
2784 struct hisi_sas_itct *itct;
2787 itct = hisi_hba->itct;
2789 for (i = 0; i < HISI_SAS_MAX_ITCT_ENTRIES; i++, itct++) {
2790 memcpy(databuf, itct, sizeof(struct hisi_sas_itct));
2791 databuf += sizeof(struct hisi_sas_itct);
2795 static void hisi_sas_debugfs_snapshot_iost_reg(struct hisi_hba *hisi_hba)
2797 int max_command_entries = hisi_hba->hw->max_command_entries;
2798 void *databuf = hisi_hba->debugfs_iost;
2799 struct hisi_sas_iost *iost;
2802 iost = hisi_hba->iost;
2804 for (i = 0; i < max_command_entries; i++, iost++) {
2805 memcpy(databuf, iost, sizeof(struct hisi_sas_iost));
2806 databuf += sizeof(struct hisi_sas_iost);
2811 hisi_sas_debugfs_to_reg_name(int off, int base_off,
2812 const struct hisi_sas_debugfs_reg_lu *lu)
2814 for (; lu->name; lu++) {
2815 if (off == lu->off - base_off)
2822 static void hisi_sas_debugfs_print_reg(u32 *regs_val, const void *ptr,
2825 const struct hisi_sas_debugfs_reg *reg = ptr;
2828 for (i = 0; i < reg->count; i++) {
2832 name = hisi_sas_debugfs_to_reg_name(off, reg->base_off,
2836 seq_printf(s, "0x%08x 0x%08x %s\n", off,
2839 seq_printf(s, "0x%08x 0x%08x\n", off,
2844 static int hisi_sas_debugfs_global_show(struct seq_file *s, void *p)
2846 struct hisi_hba *hisi_hba = s->private;
2847 const struct hisi_sas_hw *hw = hisi_hba->hw;
2848 const struct hisi_sas_debugfs_reg *reg_global = hw->debugfs_reg_global;
2850 hisi_sas_debugfs_print_reg(hisi_hba->debugfs_global_reg,
2856 static int hisi_sas_debugfs_global_open(struct inode *inode, struct file *filp)
2858 return single_open(filp, hisi_sas_debugfs_global_show,
2862 static const struct file_operations hisi_sas_debugfs_global_fops = {
2863 .open = hisi_sas_debugfs_global_open,
2865 .llseek = seq_lseek,
2866 .release = single_release,
2867 .owner = THIS_MODULE,
2870 static int hisi_sas_debugfs_port_show(struct seq_file *s, void *p)
2872 struct hisi_sas_phy *phy = s->private;
2873 struct hisi_hba *hisi_hba = phy->hisi_hba;
2874 const struct hisi_sas_hw *hw = hisi_hba->hw;
2875 const struct hisi_sas_debugfs_reg *reg_port = hw->debugfs_reg_port;
2876 u32 *databuf = hisi_hba->debugfs_port_reg[phy->sas_phy.id];
2878 hisi_sas_debugfs_print_reg(databuf, reg_port, s);
2883 static int hisi_sas_debugfs_port_open(struct inode *inode, struct file *filp)
2885 return single_open(filp, hisi_sas_debugfs_port_show, inode->i_private);
2888 static const struct file_operations hisi_sas_debugfs_port_fops = {
2889 .open = hisi_sas_debugfs_port_open,
2891 .llseek = seq_lseek,
2892 .release = single_release,
2893 .owner = THIS_MODULE,
2896 static int hisi_sas_show_row_64(struct seq_file *s, int index,
2897 int sz, __le64 *ptr)
2901 /* completion header size not fixed per HW version */
2902 seq_printf(s, "index %04d:\n\t", index);
2903 for (i = 1; i <= sz / 8; i++, ptr++) {
2904 seq_printf(s, " 0x%016llx", le64_to_cpu(*ptr));
2906 seq_puts(s, "\n\t");
2914 static int hisi_sas_show_row_32(struct seq_file *s, int index,
2915 int sz, __le32 *ptr)
2919 /* completion header size not fixed per HW version */
2920 seq_printf(s, "index %04d:\n\t", index);
2921 for (i = 1; i <= sz / 4; i++, ptr++) {
2922 seq_printf(s, " 0x%08x", le32_to_cpu(*ptr));
2924 seq_puts(s, "\n\t");
2931 static int hisi_sas_cq_show_slot(struct seq_file *s, int slot, void *cq_ptr)
2933 struct hisi_sas_cq *cq = cq_ptr;
2934 struct hisi_hba *hisi_hba = cq->hisi_hba;
2935 void *complete_queue = hisi_hba->debugfs_complete_hdr[cq->id];
2936 __le32 *complete_hdr = complete_queue +
2937 (hisi_hba->hw->complete_hdr_size * slot);
2939 return hisi_sas_show_row_32(s, slot,
2940 hisi_hba->hw->complete_hdr_size,
2944 static int hisi_sas_debugfs_cq_show(struct seq_file *s, void *p)
2946 struct hisi_sas_cq *cq = s->private;
2949 for (slot = 0; slot < HISI_SAS_QUEUE_SLOTS; slot++) {
2950 ret = hisi_sas_cq_show_slot(s, slot, cq);
2957 static int hisi_sas_debugfs_cq_open(struct inode *inode, struct file *filp)
2959 return single_open(filp, hisi_sas_debugfs_cq_show, inode->i_private);
2962 static const struct file_operations hisi_sas_debugfs_cq_fops = {
2963 .open = hisi_sas_debugfs_cq_open,
2965 .llseek = seq_lseek,
2966 .release = single_release,
2967 .owner = THIS_MODULE,
2970 static int hisi_sas_dq_show_slot(struct seq_file *s, int slot, void *dq_ptr)
2972 struct hisi_sas_dq *dq = dq_ptr;
2973 struct hisi_hba *hisi_hba = dq->hisi_hba;
2974 void *cmd_queue = hisi_hba->debugfs_cmd_hdr[dq->id];
2975 __le32 *cmd_hdr = cmd_queue +
2976 sizeof(struct hisi_sas_cmd_hdr) * slot;
2978 return hisi_sas_show_row_32(s, slot, sizeof(struct hisi_sas_cmd_hdr),
2982 static int hisi_sas_debugfs_dq_show(struct seq_file *s, void *p)
2986 for (slot = 0; slot < HISI_SAS_QUEUE_SLOTS; slot++) {
2987 ret = hisi_sas_dq_show_slot(s, slot, s->private);
2994 static int hisi_sas_debugfs_dq_open(struct inode *inode, struct file *filp)
2996 return single_open(filp, hisi_sas_debugfs_dq_show, inode->i_private);
2999 static const struct file_operations hisi_sas_debugfs_dq_fops = {
3000 .open = hisi_sas_debugfs_dq_open,
3002 .llseek = seq_lseek,
3003 .release = single_release,
3004 .owner = THIS_MODULE,
3007 static int hisi_sas_debugfs_iost_show(struct seq_file *s, void *p)
3009 struct hisi_hba *hisi_hba = s->private;
3010 struct hisi_sas_iost *debugfs_iost = hisi_hba->debugfs_iost;
3011 int i, ret, max_command_entries = hisi_hba->hw->max_command_entries;
3012 __le64 *iost = &debugfs_iost->qw0;
3014 for (i = 0; i < max_command_entries; i++, debugfs_iost++) {
3015 ret = hisi_sas_show_row_64(s, i, sizeof(*debugfs_iost),
3024 static int hisi_sas_debugfs_iost_open(struct inode *inode, struct file *filp)
3026 return single_open(filp, hisi_sas_debugfs_iost_show, inode->i_private);
3029 static const struct file_operations hisi_sas_debugfs_iost_fops = {
3030 .open = hisi_sas_debugfs_iost_open,
3032 .llseek = seq_lseek,
3033 .release = single_release,
3034 .owner = THIS_MODULE,
3037 static int hisi_sas_debugfs_itct_show(struct seq_file *s, void *p)
3040 struct hisi_hba *hisi_hba = s->private;
3041 struct hisi_sas_itct *debugfs_itct = hisi_hba->debugfs_itct;
3042 __le64 *itct = &debugfs_itct->qw0;
3044 for (i = 0; i < HISI_SAS_MAX_ITCT_ENTRIES; i++, debugfs_itct++) {
3045 ret = hisi_sas_show_row_64(s, i, sizeof(*debugfs_itct),
3054 static int hisi_sas_debugfs_itct_open(struct inode *inode, struct file *filp)
3056 return single_open(filp, hisi_sas_debugfs_itct_show, inode->i_private);
3059 static const struct file_operations hisi_sas_debugfs_itct_fops = {
3060 .open = hisi_sas_debugfs_itct_open,
3062 .llseek = seq_lseek,
3063 .release = single_release,
3064 .owner = THIS_MODULE,
3067 static void hisi_sas_debugfs_create_files(struct hisi_hba *hisi_hba)
3069 struct dentry *dump_dentry;
3070 struct dentry *dentry;
3076 /* Create dump dir inside device dir */
3077 dump_dentry = debugfs_create_dir("dump", hisi_hba->debugfs_dir);
3078 hisi_hba->debugfs_dump_dentry = dump_dentry;
3080 debugfs_create_file("global", 0400, dump_dentry, hisi_hba,
3081 &hisi_sas_debugfs_global_fops);
3083 /* Create port dir and files */
3084 dentry = debugfs_create_dir("port", dump_dentry);
3085 for (p = 0; p < hisi_hba->n_phy; p++) {
3086 snprintf(name, 256, "%d", p);
3088 debugfs_create_file(name, 0400, dentry, &hisi_hba->phy[p],
3089 &hisi_sas_debugfs_port_fops);
3092 /* Create CQ dir and files */
3093 dentry = debugfs_create_dir("cq", dump_dentry);
3094 for (c = 0; c < hisi_hba->queue_count; c++) {
3095 snprintf(name, 256, "%d", c);
3097 debugfs_create_file(name, 0400, dentry, &hisi_hba->cq[c],
3098 &hisi_sas_debugfs_cq_fops);
3101 /* Create DQ dir and files */
3102 dentry = debugfs_create_dir("dq", dump_dentry);
3103 for (d = 0; d < hisi_hba->queue_count; d++) {
3104 snprintf(name, 256, "%d", d);
3106 debugfs_create_file(name, 0400, dentry, &hisi_hba->dq[d],
3107 &hisi_sas_debugfs_dq_fops);
3110 debugfs_create_file("iost", 0400, dump_dentry, hisi_hba,
3111 &hisi_sas_debugfs_iost_fops);
3113 debugfs_create_file("itct", 0400, dump_dentry, hisi_hba,
3114 &hisi_sas_debugfs_itct_fops);
3119 static void hisi_sas_debugfs_snapshot_regs(struct hisi_hba *hisi_hba)
3121 hisi_hba->hw->snapshot_prepare(hisi_hba);
3123 hisi_sas_debugfs_snapshot_global_reg(hisi_hba);
3124 hisi_sas_debugfs_snapshot_port_reg(hisi_hba);
3125 hisi_sas_debugfs_snapshot_cq_reg(hisi_hba);
3126 hisi_sas_debugfs_snapshot_dq_reg(hisi_hba);
3127 hisi_sas_debugfs_snapshot_itct_reg(hisi_hba);
3128 hisi_sas_debugfs_snapshot_iost_reg(hisi_hba);
3130 hisi_sas_debugfs_create_files(hisi_hba);
3132 hisi_hba->hw->snapshot_restore(hisi_hba);
3135 static ssize_t hisi_sas_debugfs_trigger_dump_write(struct file *file,
3136 const char __user *user_buf,
3137 size_t count, loff_t *ppos)
3139 struct hisi_hba *hisi_hba = file->f_inode->i_private;
3142 /* A bit racy, but don't care too much since it's only debugfs */
3143 if (hisi_hba->debugfs_snapshot)
3149 if (copy_from_user(buf, user_buf, count))
3155 queue_work(hisi_hba->wq, &hisi_hba->debugfs_work);
3160 static const struct file_operations hisi_sas_debugfs_trigger_dump_fops = {
3161 .write = &hisi_sas_debugfs_trigger_dump_write,
3162 .owner = THIS_MODULE,
3165 void hisi_sas_debugfs_work_handler(struct work_struct *work)
3167 struct hisi_hba *hisi_hba =
3168 container_of(work, struct hisi_hba, debugfs_work);
3170 if (hisi_hba->debugfs_snapshot)
3172 hisi_hba->debugfs_snapshot = true;
3174 hisi_sas_debugfs_snapshot_regs(hisi_hba);
3176 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_work_handler);
3178 void hisi_sas_debugfs_init(struct hisi_hba *hisi_hba)
3180 int max_command_entries = hisi_hba->hw->max_command_entries;
3181 struct device *dev = hisi_hba->dev;
3185 hisi_hba->debugfs_dir = debugfs_create_dir(dev_name(dev),
3186 hisi_sas_debugfs_dir);
3187 debugfs_create_file("trigger_dump", 0600,
3188 hisi_hba->debugfs_dir,
3190 &hisi_sas_debugfs_trigger_dump_fops);
3192 /* Alloc buffer for global */
3193 sz = hisi_hba->hw->debugfs_reg_global->count * 4;
3194 hisi_hba->debugfs_global_reg =
3195 devm_kmalloc(dev, sz, GFP_KERNEL);
3197 if (!hisi_hba->debugfs_global_reg)
3200 /* Alloc buffer for port */
3201 sz = hisi_hba->hw->debugfs_reg_port->count * 4;
3202 for (p = 0; p < hisi_hba->n_phy; p++) {
3203 hisi_hba->debugfs_port_reg[p] =
3204 devm_kmalloc(dev, sz, GFP_KERNEL);
3206 if (!hisi_hba->debugfs_port_reg[p])
3210 /* Alloc buffer for cq */
3211 sz = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
3212 for (c = 0; c < hisi_hba->queue_count; c++) {
3213 hisi_hba->debugfs_complete_hdr[c] =
3214 devm_kmalloc(dev, sz, GFP_KERNEL);
3216 if (!hisi_hba->debugfs_complete_hdr[c])
3220 /* Alloc buffer for dq */
3221 sz = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
3222 for (d = 0; d < hisi_hba->queue_count; d++) {
3223 hisi_hba->debugfs_cmd_hdr[d] =
3224 devm_kmalloc(dev, sz, GFP_KERNEL);
3226 if (!hisi_hba->debugfs_cmd_hdr[d])
3230 /* Alloc buffer for iost */
3231 sz = max_command_entries * sizeof(struct hisi_sas_iost);
3233 hisi_hba->debugfs_iost = devm_kmalloc(dev, sz, GFP_KERNEL);
3234 if (!hisi_hba->debugfs_iost)
3237 /* Alloc buffer for itct */
3238 /* New memory allocation must be locate before itct */
3239 sz = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
3241 hisi_hba->debugfs_itct = devm_kmalloc(dev, sz, GFP_KERNEL);
3242 if (!hisi_hba->debugfs_itct)
3247 devm_kfree(dev, hisi_hba->debugfs_iost);
3249 for (i = 0; i < d; i++)
3250 devm_kfree(dev, hisi_hba->debugfs_cmd_hdr[i]);
3252 for (i = 0; i < c; i++)
3253 devm_kfree(dev, hisi_hba->debugfs_complete_hdr[i]);
3255 for (i = 0; i < p; i++)
3256 devm_kfree(dev, hisi_hba->debugfs_port_reg[i]);
3257 devm_kfree(dev, hisi_hba->debugfs_global_reg);
3259 debugfs_remove_recursive(hisi_hba->debugfs_dir);
3260 dev_dbg(dev, "failed to init debugfs!\n");
3262 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_init);
3264 void hisi_sas_debugfs_exit(struct hisi_hba *hisi_hba)
3266 debugfs_remove_recursive(hisi_hba->debugfs_dir);
3268 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_exit);
3270 int hisi_sas_remove(struct platform_device *pdev)
3272 struct sas_ha_struct *sha = platform_get_drvdata(pdev);
3273 struct hisi_hba *hisi_hba = sha->lldd_ha;
3274 struct Scsi_Host *shost = sha->core.shost;
3276 if (timer_pending(&hisi_hba->timer))
3277 del_timer(&hisi_hba->timer);
3279 sas_unregister_ha(sha);
3280 sas_remove_host(sha->core.shost);
3282 hisi_sas_free(hisi_hba);
3283 scsi_host_put(shost);
3286 EXPORT_SYMBOL_GPL(hisi_sas_remove);
3288 bool hisi_sas_debugfs_enable;
3289 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_enable);
3290 module_param_named(debugfs_enable, hisi_sas_debugfs_enable, bool, 0444);
3291 MODULE_PARM_DESC(hisi_sas_debugfs_enable, "Enable driver debugfs (default disabled)");
3293 static __init int hisi_sas_init(void)
3295 hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops);
3299 if (hisi_sas_debugfs_enable)
3300 hisi_sas_debugfs_dir = debugfs_create_dir("hisi_sas", NULL);
3305 static __exit void hisi_sas_exit(void)
3307 sas_release_transport(hisi_sas_stt);
3309 debugfs_remove(hisi_sas_debugfs_dir);
3312 module_init(hisi_sas_init);
3313 module_exit(hisi_sas_exit);
3315 MODULE_LICENSE("GPL");
3316 MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
3317 MODULE_DESCRIPTION("HISILICON SAS controller driver");
3318 MODULE_ALIAS("platform:" DRV_NAME);