GNU Linux-libre 4.14.266-gnu1
[releases.git] / drivers / scsi / hisi_sas / hisi_sas_main.c
1 /*
2  * Copyright (c) 2015 Linaro Ltd.
3  * Copyright (c) 2015 Hisilicon Limited.
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  */
11
12 #include "hisi_sas.h"
13 #include "../libsas/sas_internal.h"
14 #define DRV_NAME "hisi_sas"
15
16 #define DEV_IS_GONE(dev) \
17         ((!dev) || (dev->dev_type == SAS_PHY_UNUSED))
18
19 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
20                                 u8 *lun, struct hisi_sas_tmf_task *tmf);
21 static int
22 hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
23                              struct domain_device *device,
24                              int abort_flag, int tag);
25 static int hisi_sas_softreset_ata_disk(struct domain_device *device);
26
27 u8 hisi_sas_get_ata_protocol(u8 cmd, int direction)
28 {
29         switch (cmd) {
30         case ATA_CMD_FPDMA_WRITE:
31         case ATA_CMD_FPDMA_READ:
32         case ATA_CMD_FPDMA_RECV:
33         case ATA_CMD_FPDMA_SEND:
34         case ATA_CMD_NCQ_NON_DATA:
35         return HISI_SAS_SATA_PROTOCOL_FPDMA;
36
37         case ATA_CMD_DOWNLOAD_MICRO:
38         case ATA_CMD_ID_ATA:
39         case ATA_CMD_PMP_READ:
40         case ATA_CMD_READ_LOG_EXT:
41         case ATA_CMD_PIO_READ:
42         case ATA_CMD_PIO_READ_EXT:
43         case ATA_CMD_PMP_WRITE:
44         case ATA_CMD_WRITE_LOG_EXT:
45         case ATA_CMD_PIO_WRITE:
46         case ATA_CMD_PIO_WRITE_EXT:
47         return HISI_SAS_SATA_PROTOCOL_PIO;
48
49         case ATA_CMD_DSM:
50         case ATA_CMD_DOWNLOAD_MICRO_DMA:
51         case ATA_CMD_PMP_READ_DMA:
52         case ATA_CMD_PMP_WRITE_DMA:
53         case ATA_CMD_READ:
54         case ATA_CMD_READ_EXT:
55         case ATA_CMD_READ_LOG_DMA_EXT:
56         case ATA_CMD_READ_STREAM_DMA_EXT:
57         case ATA_CMD_TRUSTED_RCV_DMA:
58         case ATA_CMD_TRUSTED_SND_DMA:
59         case ATA_CMD_WRITE:
60         case ATA_CMD_WRITE_EXT:
61         case ATA_CMD_WRITE_FUA_EXT:
62         case ATA_CMD_WRITE_QUEUED:
63         case ATA_CMD_WRITE_LOG_DMA_EXT:
64         case ATA_CMD_WRITE_STREAM_DMA_EXT:
65         case ATA_CMD_ZAC_MGMT_IN:
66         return HISI_SAS_SATA_PROTOCOL_DMA;
67
68         case ATA_CMD_CHK_POWER:
69         case ATA_CMD_DEV_RESET:
70         case ATA_CMD_EDD:
71         case ATA_CMD_FLUSH:
72         case ATA_CMD_FLUSH_EXT:
73         case ATA_CMD_VERIFY:
74         case ATA_CMD_VERIFY_EXT:
75         case ATA_CMD_SET_FEATURES:
76         case ATA_CMD_STANDBY:
77         case ATA_CMD_STANDBYNOW1:
78         case ATA_CMD_ZAC_MGMT_OUT:
79         return HISI_SAS_SATA_PROTOCOL_NONDATA;
80         default:
81                 if (direction == DMA_NONE)
82                         return HISI_SAS_SATA_PROTOCOL_NONDATA;
83                 return HISI_SAS_SATA_PROTOCOL_PIO;
84         }
85 }
86 EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol);
87
88 void hisi_sas_sata_done(struct sas_task *task,
89                             struct hisi_sas_slot *slot)
90 {
91         struct task_status_struct *ts = &task->task_status;
92         struct ata_task_resp *resp = (struct ata_task_resp *)ts->buf;
93         struct hisi_sas_status_buffer *status_buf =
94                         hisi_sas_status_buf_addr_mem(slot);
95         u8 *iu = &status_buf->iu[0];
96         struct dev_to_host_fis *d2h =  (struct dev_to_host_fis *)iu;
97
98         resp->frame_len = sizeof(struct dev_to_host_fis);
99         memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis));
100
101         ts->buf_valid_size = sizeof(*resp);
102 }
103 EXPORT_SYMBOL_GPL(hisi_sas_sata_done);
104
105 int hisi_sas_get_ncq_tag(struct sas_task *task, u32 *tag)
106 {
107         struct ata_queued_cmd *qc = task->uldd_task;
108
109         if (qc) {
110                 if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
111                         qc->tf.command == ATA_CMD_FPDMA_READ) {
112                         *tag = qc->tag;
113                         return 1;
114                 }
115         }
116         return 0;
117 }
118 EXPORT_SYMBOL_GPL(hisi_sas_get_ncq_tag);
119
120 static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device)
121 {
122         return device->port->ha->lldd_ha;
123 }
124
125 struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port)
126 {
127         return container_of(sas_port, struct hisi_sas_port, sas_port);
128 }
129 EXPORT_SYMBOL_GPL(to_hisi_sas_port);
130
131 void hisi_sas_stop_phys(struct hisi_hba *hisi_hba)
132 {
133         int phy_no;
134
135         for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++)
136                 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
137 }
138 EXPORT_SYMBOL_GPL(hisi_sas_stop_phys);
139
140 static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx)
141 {
142         void *bitmap = hisi_hba->slot_index_tags;
143
144         clear_bit(slot_idx, bitmap);
145 }
146
147 static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx)
148 {
149         hisi_sas_slot_index_clear(hisi_hba, slot_idx);
150 }
151
152 static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx)
153 {
154         void *bitmap = hisi_hba->slot_index_tags;
155
156         set_bit(slot_idx, bitmap);
157 }
158
159 static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba, int *slot_idx)
160 {
161         unsigned int index;
162         void *bitmap = hisi_hba->slot_index_tags;
163
164         index = find_first_zero_bit(bitmap, hisi_hba->slot_index_count);
165         if (index >= hisi_hba->slot_index_count)
166                 return -SAS_QUEUE_FULL;
167         hisi_sas_slot_index_set(hisi_hba, index);
168         *slot_idx = index;
169         return 0;
170 }
171
172 static void hisi_sas_slot_index_init(struct hisi_hba *hisi_hba)
173 {
174         int i;
175
176         for (i = 0; i < hisi_hba->slot_index_count; ++i)
177                 hisi_sas_slot_index_clear(hisi_hba, i);
178 }
179
180 void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
181                              struct hisi_sas_slot *slot)
182 {
183
184         if (task) {
185                 struct device *dev = hisi_hba->dev;
186                 struct domain_device *device = task->dev;
187                 struct hisi_sas_device *sas_dev = device->lldd_dev;
188
189                 if (!task->lldd_task)
190                         return;
191
192                 task->lldd_task = NULL;
193
194                 if (!sas_protocol_ata(task->task_proto))
195                         if (slot->n_elem)
196                                 dma_unmap_sg(dev, task->scatter, slot->n_elem,
197                                              task->data_dir);
198
199                 if (sas_dev)
200                         atomic64_dec(&sas_dev->running_req);
201         }
202
203         if (slot->buf)
204                 dma_pool_free(hisi_hba->buffer_pool, slot->buf, slot->buf_dma);
205
206         list_del_init(&slot->entry);
207         slot->buf = NULL;
208         slot->task = NULL;
209         slot->port = NULL;
210         hisi_sas_slot_index_free(hisi_hba, slot->idx);
211
212         /* slot memory is fully zeroed when it is reused */
213 }
214 EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free);
215
216 static int hisi_sas_task_prep_smp(struct hisi_hba *hisi_hba,
217                                   struct hisi_sas_slot *slot)
218 {
219         return hisi_hba->hw->prep_smp(hisi_hba, slot);
220 }
221
222 static int hisi_sas_task_prep_ssp(struct hisi_hba *hisi_hba,
223                                   struct hisi_sas_slot *slot, int is_tmf,
224                                   struct hisi_sas_tmf_task *tmf)
225 {
226         return hisi_hba->hw->prep_ssp(hisi_hba, slot, is_tmf, tmf);
227 }
228
229 static int hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba,
230                                   struct hisi_sas_slot *slot)
231 {
232         return hisi_hba->hw->prep_stp(hisi_hba, slot);
233 }
234
235 static int hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba,
236                 struct hisi_sas_slot *slot,
237                 int device_id, int abort_flag, int tag_to_abort)
238 {
239         return hisi_hba->hw->prep_abort(hisi_hba, slot,
240                         device_id, abort_flag, tag_to_abort);
241 }
242
243 /*
244  * This function will issue an abort TMF regardless of whether the
245  * task is in the sdev or not. Then it will do the task complete
246  * cleanup and callbacks.
247  */
248 static void hisi_sas_slot_abort(struct work_struct *work)
249 {
250         struct hisi_sas_slot *abort_slot =
251                 container_of(work, struct hisi_sas_slot, abort_slot);
252         struct sas_task *task = abort_slot->task;
253         struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
254         struct scsi_cmnd *cmnd = task->uldd_task;
255         struct hisi_sas_tmf_task tmf_task;
256         struct scsi_lun lun;
257         struct device *dev = hisi_hba->dev;
258         int tag = abort_slot->idx;
259         unsigned long flags;
260
261         if (!(task->task_proto & SAS_PROTOCOL_SSP)) {
262                 dev_err(dev, "cannot abort slot for non-ssp task\n");
263                 goto out;
264         }
265
266         int_to_scsilun(cmnd->device->lun, &lun);
267         tmf_task.tmf = TMF_ABORT_TASK;
268         tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
269
270         hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun, &tmf_task);
271 out:
272         /* Do cleanup for this task */
273         spin_lock_irqsave(&hisi_hba->lock, flags);
274         hisi_sas_slot_task_free(hisi_hba, task, abort_slot);
275         spin_unlock_irqrestore(&hisi_hba->lock, flags);
276         if (task->task_done)
277                 task->task_done(task);
278 }
279
280 static int hisi_sas_task_prep(struct sas_task *task, struct hisi_sas_dq
281                 *dq, int is_tmf, struct hisi_sas_tmf_task *tmf,
282                 int *pass)
283 {
284         struct hisi_hba *hisi_hba = dq->hisi_hba;
285         struct domain_device *device = task->dev;
286         struct hisi_sas_device *sas_dev = device->lldd_dev;
287         struct hisi_sas_port *port;
288         struct hisi_sas_slot *slot;
289         struct hisi_sas_cmd_hdr *cmd_hdr_base;
290         struct asd_sas_port *sas_port = device->port;
291         struct device *dev = hisi_hba->dev;
292         int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
293         unsigned long flags;
294
295         if (!sas_port) {
296                 struct task_status_struct *ts = &task->task_status;
297
298                 ts->resp = SAS_TASK_UNDELIVERED;
299                 ts->stat = SAS_PHY_DOWN;
300                 /*
301                  * libsas will use dev->port, should
302                  * not call task_done for sata
303                  */
304                 if (device->dev_type != SAS_SATA_DEV)
305                         task->task_done(task);
306                 return SAS_PHY_DOWN;
307         }
308
309         if (DEV_IS_GONE(sas_dev)) {
310                 if (sas_dev)
311                         dev_info(dev, "task prep: device %d not ready\n",
312                                  sas_dev->device_id);
313                 else
314                         dev_info(dev, "task prep: device %016llx not ready\n",
315                                  SAS_ADDR(device->sas_addr));
316
317                 return SAS_PHY_DOWN;
318         }
319
320         port = to_hisi_sas_port(sas_port);
321         if (port && !port->port_attached) {
322                 dev_info(dev, "task prep: %s port%d not attach device\n",
323                          (dev_is_sata(device)) ?
324                          "SATA/STP" : "SAS",
325                          device->port->id);
326
327                 return SAS_PHY_DOWN;
328         }
329
330         if (!sas_protocol_ata(task->task_proto)) {
331                 if (task->num_scatter) {
332                         n_elem = dma_map_sg(dev, task->scatter,
333                                             task->num_scatter, task->data_dir);
334                         if (!n_elem) {
335                                 rc = -ENOMEM;
336                                 goto prep_out;
337                         }
338                 }
339         } else
340                 n_elem = task->num_scatter;
341
342         spin_lock_irqsave(&hisi_hba->lock, flags);
343         if (hisi_hba->hw->slot_index_alloc)
344                 rc = hisi_hba->hw->slot_index_alloc(hisi_hba, &slot_idx,
345                                                     device);
346         else
347                 rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
348         if (rc) {
349                 spin_unlock_irqrestore(&hisi_hba->lock, flags);
350                 goto err_out;
351         }
352         spin_unlock_irqrestore(&hisi_hba->lock, flags);
353
354         rc = hisi_hba->hw->get_free_slot(hisi_hba, dq);
355         if (rc)
356                 goto err_out_tag;
357
358         dlvry_queue = dq->id;
359         dlvry_queue_slot = dq->wr_point;
360         slot = &hisi_hba->slot_info[slot_idx];
361         memset(slot, 0, sizeof(struct hisi_sas_slot));
362
363         slot->idx = slot_idx;
364         slot->n_elem = n_elem;
365         slot->dlvry_queue = dlvry_queue;
366         slot->dlvry_queue_slot = dlvry_queue_slot;
367         cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
368         slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
369         slot->task = task;
370         slot->port = port;
371         task->lldd_task = slot;
372         INIT_WORK(&slot->abort_slot, hisi_sas_slot_abort);
373
374         slot->buf = dma_pool_alloc(hisi_hba->buffer_pool,
375                                    GFP_ATOMIC, &slot->buf_dma);
376         if (!slot->buf) {
377                 rc = -ENOMEM;
378                 goto err_out_slot_buf;
379         }
380         memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
381         memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
382         memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ);
383
384         switch (task->task_proto) {
385         case SAS_PROTOCOL_SMP:
386                 rc = hisi_sas_task_prep_smp(hisi_hba, slot);
387                 break;
388         case SAS_PROTOCOL_SSP:
389                 rc = hisi_sas_task_prep_ssp(hisi_hba, slot, is_tmf, tmf);
390                 break;
391         case SAS_PROTOCOL_SATA:
392         case SAS_PROTOCOL_STP:
393         case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
394                 rc = hisi_sas_task_prep_ata(hisi_hba, slot);
395                 break;
396         default:
397                 dev_err(dev, "task prep: unknown/unsupported proto (0x%x)\n",
398                         task->task_proto);
399                 rc = -EINVAL;
400                 break;
401         }
402
403         if (rc) {
404                 dev_err(dev, "task prep: rc = 0x%x\n", rc);
405                 goto err_out_buf;
406         }
407
408         list_add_tail(&slot->entry, &sas_dev->list);
409         spin_lock_irqsave(&task->task_state_lock, flags);
410         task->task_state_flags |= SAS_TASK_AT_INITIATOR;
411         spin_unlock_irqrestore(&task->task_state_lock, flags);
412
413         dq->slot_prep = slot;
414
415         atomic64_inc(&sas_dev->running_req);
416         ++(*pass);
417
418         return 0;
419
420 err_out_buf:
421         dma_pool_free(hisi_hba->buffer_pool, slot->buf,
422                 slot->buf_dma);
423 err_out_slot_buf:
424         /* Nothing to be done */
425 err_out_tag:
426         spin_lock_irqsave(&hisi_hba->lock, flags);
427         hisi_sas_slot_index_free(hisi_hba, slot_idx);
428         spin_unlock_irqrestore(&hisi_hba->lock, flags);
429 err_out:
430         dev_err(dev, "task prep: failed[%d]!\n", rc);
431         if (!sas_protocol_ata(task->task_proto))
432                 if (n_elem)
433                         dma_unmap_sg(dev, task->scatter, n_elem,
434                                      task->data_dir);
435 prep_out:
436         return rc;
437 }
438
439 static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
440                               int is_tmf, struct hisi_sas_tmf_task *tmf)
441 {
442         u32 rc;
443         u32 pass = 0;
444         unsigned long flags;
445         struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
446         struct device *dev = hisi_hba->dev;
447         struct domain_device *device = task->dev;
448         struct hisi_sas_device *sas_dev = device->lldd_dev;
449         struct hisi_sas_dq *dq = sas_dev->dq;
450
451         if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
452                 return -EINVAL;
453
454         /* protect task_prep and start_delivery sequence */
455         spin_lock_irqsave(&dq->lock, flags);
456         rc = hisi_sas_task_prep(task, dq, is_tmf, tmf, &pass);
457         if (rc)
458                 dev_err(dev, "task exec: failed[%d]!\n", rc);
459
460         if (likely(pass))
461                 hisi_hba->hw->start_delivery(dq);
462         spin_unlock_irqrestore(&dq->lock, flags);
463
464         return rc;
465 }
466
467 static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no)
468 {
469         struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
470         struct asd_sas_phy *sas_phy = &phy->sas_phy;
471         struct sas_ha_struct *sas_ha;
472
473         if (!phy->phy_attached)
474                 return;
475
476         sas_ha = &hisi_hba->sha;
477         sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE);
478
479         if (sas_phy->phy) {
480                 struct sas_phy *sphy = sas_phy->phy;
481
482                 sphy->negotiated_linkrate = sas_phy->linkrate;
483                 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
484                 sphy->maximum_linkrate_hw =
485                         hisi_hba->hw->phy_get_max_linkrate();
486                 if (sphy->minimum_linkrate == SAS_LINK_RATE_UNKNOWN)
487                         sphy->minimum_linkrate = phy->minimum_linkrate;
488
489                 if (sphy->maximum_linkrate == SAS_LINK_RATE_UNKNOWN)
490                         sphy->maximum_linkrate = phy->maximum_linkrate;
491         }
492
493         if (phy->phy_type & PORT_TYPE_SAS) {
494                 struct sas_identify_frame *id;
495
496                 id = (struct sas_identify_frame *)phy->frame_rcvd;
497                 id->dev_type = phy->identify.device_type;
498                 id->initiator_bits = SAS_PROTOCOL_ALL;
499                 id->target_bits = phy->identify.target_port_protocols;
500         } else if (phy->phy_type & PORT_TYPE_SATA) {
501                 /*Nothing*/
502         }
503
504         sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
505         sas_ha->notify_port_event(sas_phy, PORTE_BYTES_DMAED);
506 }
507
508 static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device)
509 {
510         struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
511         struct hisi_sas_device *sas_dev = NULL;
512         int i;
513
514         spin_lock(&hisi_hba->lock);
515         for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
516                 if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) {
517                         int queue = i % hisi_hba->queue_count;
518                         struct hisi_sas_dq *dq = &hisi_hba->dq[queue];
519
520                         hisi_hba->devices[i].device_id = i;
521                         sas_dev = &hisi_hba->devices[i];
522                         sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
523                         sas_dev->dev_type = device->dev_type;
524                         sas_dev->hisi_hba = hisi_hba;
525                         sas_dev->sas_device = device;
526                         sas_dev->dq = dq;
527                         INIT_LIST_HEAD(&hisi_hba->devices[i].list);
528                         break;
529                 }
530         }
531         spin_unlock(&hisi_hba->lock);
532
533         return sas_dev;
534 }
535
536 static int hisi_sas_dev_found(struct domain_device *device)
537 {
538         struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
539         struct domain_device *parent_dev = device->parent;
540         struct hisi_sas_device *sas_dev;
541         struct device *dev = hisi_hba->dev;
542
543         if (hisi_hba->hw->alloc_dev)
544                 sas_dev = hisi_hba->hw->alloc_dev(device);
545         else
546                 sas_dev = hisi_sas_alloc_dev(device);
547         if (!sas_dev) {
548                 dev_err(dev, "fail alloc dev: max support %d devices\n",
549                         HISI_SAS_MAX_DEVICES);
550                 return -EINVAL;
551         }
552
553         device->lldd_dev = sas_dev;
554         hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
555
556         if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) {
557                 int phy_no;
558                 u8 phy_num = parent_dev->ex_dev.num_phys;
559                 struct ex_phy *phy;
560
561                 for (phy_no = 0; phy_no < phy_num; phy_no++) {
562                         phy = &parent_dev->ex_dev.ex_phy[phy_no];
563                         if (SAS_ADDR(phy->attached_sas_addr) ==
564                                 SAS_ADDR(device->sas_addr)) {
565                                 sas_dev->attached_phy = phy_no;
566                                 break;
567                         }
568                 }
569
570                 if (phy_no == phy_num) {
571                         dev_info(dev, "dev found: no attached "
572                                  "dev:%016llx at ex:%016llx\n",
573                                  SAS_ADDR(device->sas_addr),
574                                  SAS_ADDR(parent_dev->sas_addr));
575                         return -EINVAL;
576                 }
577         }
578
579         return 0;
580 }
581
582 static int hisi_sas_slave_configure(struct scsi_device *sdev)
583 {
584         struct domain_device *dev = sdev_to_domain_dev(sdev);
585         int ret = sas_slave_configure(sdev);
586
587         if (ret)
588                 return ret;
589         if (!dev_is_sata(dev))
590                 sas_change_queue_depth(sdev, 64);
591
592         return 0;
593 }
594
595 static void hisi_sas_scan_start(struct Scsi_Host *shost)
596 {
597         struct hisi_hba *hisi_hba = shost_priv(shost);
598
599         hisi_hba->hw->phys_init(hisi_hba);
600 }
601
602 static int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time)
603 {
604         struct hisi_hba *hisi_hba = shost_priv(shost);
605         struct sas_ha_struct *sha = &hisi_hba->sha;
606
607         /* Wait for PHY up interrupt to occur */
608         if (time < HZ)
609                 return 0;
610
611         sas_drain_work(sha);
612         return 1;
613 }
614
615 static void hisi_sas_phyup_work(struct work_struct *work)
616 {
617         struct hisi_sas_phy *phy =
618                 container_of(work, struct hisi_sas_phy, phyup_ws);
619         struct hisi_hba *hisi_hba = phy->hisi_hba;
620         struct asd_sas_phy *sas_phy = &phy->sas_phy;
621         int phy_no = sas_phy->id;
622
623         hisi_hba->hw->sl_notify(hisi_hba, phy_no); /* This requires a sleep */
624         hisi_sas_bytes_dmaed(hisi_hba, phy_no);
625 }
626
627 static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no)
628 {
629         struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
630         struct asd_sas_phy *sas_phy = &phy->sas_phy;
631
632         phy->hisi_hba = hisi_hba;
633         phy->port = NULL;
634         init_timer(&phy->timer);
635         sas_phy->enabled = (phy_no < hisi_hba->n_phy) ? 1 : 0;
636         sas_phy->class = SAS;
637         sas_phy->iproto = SAS_PROTOCOL_ALL;
638         sas_phy->tproto = 0;
639         sas_phy->type = PHY_TYPE_PHYSICAL;
640         sas_phy->role = PHY_ROLE_INITIATOR;
641         sas_phy->oob_mode = OOB_NOT_CONNECTED;
642         sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
643         sas_phy->id = phy_no;
644         sas_phy->sas_addr = &hisi_hba->sas_addr[0];
645         sas_phy->frame_rcvd = &phy->frame_rcvd[0];
646         sas_phy->ha = (struct sas_ha_struct *)hisi_hba->shost->hostdata;
647         sas_phy->lldd_phy = phy;
648
649         INIT_WORK(&phy->phyup_ws, hisi_sas_phyup_work);
650 }
651
652 static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
653 {
654         struct sas_ha_struct *sas_ha = sas_phy->ha;
655         struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
656         struct hisi_sas_phy *phy = sas_phy->lldd_phy;
657         struct asd_sas_port *sas_port = sas_phy->port;
658         struct hisi_sas_port *port;
659         unsigned long flags;
660
661         if (!sas_port)
662                 return;
663
664         port = to_hisi_sas_port(sas_port);
665         spin_lock_irqsave(&hisi_hba->lock, flags);
666         port->port_attached = 1;
667         port->id = phy->port_id;
668         phy->port = port;
669         sas_port->lldd_port = port;
670         spin_unlock_irqrestore(&hisi_hba->lock, flags);
671 }
672
673 static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task *task,
674                                      struct hisi_sas_slot *slot)
675 {
676         if (task) {
677                 unsigned long flags;
678                 struct task_status_struct *ts;
679
680                 ts = &task->task_status;
681
682                 ts->resp = SAS_TASK_COMPLETE;
683                 ts->stat = SAS_ABORTED_TASK;
684                 spin_lock_irqsave(&task->task_state_lock, flags);
685                 task->task_state_flags &=
686                         ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
687                 task->task_state_flags |= SAS_TASK_STATE_DONE;
688                 spin_unlock_irqrestore(&task->task_state_lock, flags);
689         }
690
691         hisi_sas_slot_task_free(hisi_hba, task, slot);
692 }
693
694 /* hisi_hba.lock should be locked */
695 static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
696                         struct domain_device *device)
697 {
698         struct hisi_sas_slot *slot, *slot2;
699         struct hisi_sas_device *sas_dev = device->lldd_dev;
700
701         list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry)
702                 hisi_sas_do_release_task(hisi_hba, slot->task, slot);
703 }
704
705 static void hisi_sas_release_tasks(struct hisi_hba *hisi_hba)
706 {
707         struct hisi_sas_device *sas_dev;
708         struct domain_device *device;
709         int i;
710
711         for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
712                 sas_dev = &hisi_hba->devices[i];
713                 device = sas_dev->sas_device;
714
715                 if ((sas_dev->dev_type == SAS_PHY_UNUSED) ||
716                     !device)
717                         continue;
718
719                 hisi_sas_release_task(hisi_hba, device);
720         }
721 }
722
723 static void hisi_sas_dereg_device(struct hisi_hba *hisi_hba,
724                                 struct domain_device *device)
725 {
726         if (hisi_hba->hw->dereg_device)
727                 hisi_hba->hw->dereg_device(hisi_hba, device);
728 }
729
730 static void hisi_sas_dev_gone(struct domain_device *device)
731 {
732         struct hisi_sas_device *sas_dev = device->lldd_dev;
733         struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
734         struct device *dev = hisi_hba->dev;
735
736         dev_info(dev, "found dev[%d:%x] is gone\n",
737                  sas_dev->device_id, sas_dev->dev_type);
738
739         hisi_sas_internal_task_abort(hisi_hba, device,
740                                      HISI_SAS_INT_ABT_DEV, 0);
741
742         hisi_sas_dereg_device(hisi_hba, device);
743
744         hisi_hba->hw->free_device(hisi_hba, sas_dev);
745         device->lldd_dev = NULL;
746         memset(sas_dev, 0, sizeof(*sas_dev));
747         sas_dev->dev_type = SAS_PHY_UNUSED;
748 }
749
750 static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags)
751 {
752         return hisi_sas_task_exec(task, gfp_flags, 0, NULL);
753 }
754
755 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
756                                 void *funcdata)
757 {
758         struct sas_ha_struct *sas_ha = sas_phy->ha;
759         struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
760         int phy_no = sas_phy->id;
761
762         switch (func) {
763         case PHY_FUNC_HARD_RESET:
764                 hisi_hba->hw->phy_hard_reset(hisi_hba, phy_no);
765                 break;
766
767         case PHY_FUNC_LINK_RESET:
768                 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
769                 msleep(100);
770                 hisi_hba->hw->phy_enable(hisi_hba, phy_no);
771                 break;
772
773         case PHY_FUNC_DISABLE:
774                 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
775                 break;
776
777         case PHY_FUNC_SET_LINK_RATE:
778                 hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, funcdata);
779                 break;
780         case PHY_FUNC_GET_EVENTS:
781                 if (hisi_hba->hw->get_events) {
782                         hisi_hba->hw->get_events(hisi_hba, phy_no);
783                         break;
784                 }
785                 /* fallthru */
786         case PHY_FUNC_RELEASE_SPINUP_HOLD:
787         default:
788                 return -EOPNOTSUPP;
789         }
790         return 0;
791 }
792
793 static void hisi_sas_task_done(struct sas_task *task)
794 {
795         if (!del_timer(&task->slow_task->timer))
796                 return;
797         complete(&task->slow_task->completion);
798 }
799
800 static void hisi_sas_tmf_timedout(unsigned long data)
801 {
802         struct sas_task *task = (struct sas_task *)data;
803         unsigned long flags;
804
805         spin_lock_irqsave(&task->task_state_lock, flags);
806         if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
807                 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
808         spin_unlock_irqrestore(&task->task_state_lock, flags);
809
810         complete(&task->slow_task->completion);
811 }
812
813 #define TASK_TIMEOUT 20
814 #define TASK_RETRY 3
815 static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
816                                            void *parameter, u32 para_len,
817                                            struct hisi_sas_tmf_task *tmf)
818 {
819         struct hisi_sas_device *sas_dev = device->lldd_dev;
820         struct hisi_hba *hisi_hba = sas_dev->hisi_hba;
821         struct device *dev = hisi_hba->dev;
822         struct sas_task *task;
823         int res, retry;
824
825         for (retry = 0; retry < TASK_RETRY; retry++) {
826                 task = sas_alloc_slow_task(GFP_KERNEL);
827                 if (!task)
828                         return -ENOMEM;
829
830                 task->dev = device;
831                 task->task_proto = device->tproto;
832
833                 if (dev_is_sata(device)) {
834                         task->ata_task.device_control_reg_update = 1;
835                         memcpy(&task->ata_task.fis, parameter, para_len);
836                 } else {
837                         memcpy(&task->ssp_task, parameter, para_len);
838                 }
839                 task->task_done = hisi_sas_task_done;
840
841                 task->slow_task->timer.data = (unsigned long) task;
842                 task->slow_task->timer.function = hisi_sas_tmf_timedout;
843                 task->slow_task->timer.expires = jiffies + TASK_TIMEOUT*HZ;
844                 add_timer(&task->slow_task->timer);
845
846                 res = hisi_sas_task_exec(task, GFP_KERNEL, 1, tmf);
847
848                 if (res) {
849                         del_timer(&task->slow_task->timer);
850                         dev_err(dev, "abort tmf: executing internal task failed: %d\n",
851                                 res);
852                         goto ex_err;
853                 }
854
855                 wait_for_completion(&task->slow_task->completion);
856                 res = TMF_RESP_FUNC_FAILED;
857                 /* Even TMF timed out, return direct. */
858                 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
859                         if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
860                                 struct hisi_sas_slot *slot = task->lldd_task;
861
862                                 dev_err(dev, "abort tmf: TMF task timeout\n");
863                                 if (slot)
864                                         slot->task = NULL;
865
866                                 goto ex_err;
867                         }
868                 }
869
870                 if (task->task_status.resp == SAS_TASK_COMPLETE &&
871                      task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
872                         res = TMF_RESP_FUNC_COMPLETE;
873                         break;
874                 }
875
876                 if (task->task_status.resp == SAS_TASK_COMPLETE &&
877                         task->task_status.stat == TMF_RESP_FUNC_SUCC) {
878                         res = TMF_RESP_FUNC_SUCC;
879                         break;
880                 }
881
882                 if (task->task_status.resp == SAS_TASK_COMPLETE &&
883                       task->task_status.stat == SAS_DATA_UNDERRUN) {
884                         /* no error, but return the number of bytes of
885                          * underrun
886                          */
887                         dev_warn(dev, "abort tmf: task to dev %016llx "
888                                  "resp: 0x%x sts 0x%x underrun\n",
889                                  SAS_ADDR(device->sas_addr),
890                                  task->task_status.resp,
891                                  task->task_status.stat);
892                         res = task->task_status.residual;
893                         break;
894                 }
895
896                 if (task->task_status.resp == SAS_TASK_COMPLETE &&
897                         task->task_status.stat == SAS_DATA_OVERRUN) {
898                         dev_warn(dev, "abort tmf: blocked task error\n");
899                         res = -EMSGSIZE;
900                         break;
901                 }
902
903                 dev_warn(dev, "abort tmf: task to dev "
904                          "%016llx resp: 0x%x status 0x%x\n",
905                          SAS_ADDR(device->sas_addr), task->task_status.resp,
906                          task->task_status.stat);
907                 sas_free_task(task);
908                 task = NULL;
909         }
910 ex_err:
911         if (retry == TASK_RETRY)
912                 dev_warn(dev, "abort tmf: executing internal task failed!\n");
913         sas_free_task(task);
914         return res;
915 }
916
917 static void hisi_sas_fill_ata_reset_cmd(struct ata_device *dev,
918                 bool reset, int pmp, u8 *fis)
919 {
920         struct ata_taskfile tf;
921
922         ata_tf_init(dev, &tf);
923         if (reset)
924                 tf.ctl |= ATA_SRST;
925         else
926                 tf.ctl &= ~ATA_SRST;
927         tf.command = ATA_CMD_DEV_RESET;
928         ata_tf_to_fis(&tf, pmp, 0, fis);
929 }
930
931 static int hisi_sas_softreset_ata_disk(struct domain_device *device)
932 {
933         u8 fis[20] = {0};
934         struct ata_port *ap = device->sata_dev.ap;
935         struct ata_link *link;
936         int rc = TMF_RESP_FUNC_FAILED;
937         struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
938         struct device *dev = hisi_hba->dev;
939         int s = sizeof(struct host_to_dev_fis);
940         unsigned long flags;
941
942         ata_for_each_link(link, ap, EDGE) {
943                 int pmp = sata_srst_pmp(link);
944
945                 hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis);
946                 rc = hisi_sas_exec_internal_tmf_task(device, fis, s, NULL);
947                 if (rc != TMF_RESP_FUNC_COMPLETE)
948                         break;
949         }
950
951         if (rc == TMF_RESP_FUNC_COMPLETE) {
952                 ata_for_each_link(link, ap, EDGE) {
953                         int pmp = sata_srst_pmp(link);
954
955                         hisi_sas_fill_ata_reset_cmd(link->device, 0, pmp, fis);
956                         rc = hisi_sas_exec_internal_tmf_task(device, fis,
957                                                              s, NULL);
958                         if (rc != TMF_RESP_FUNC_COMPLETE)
959                                 dev_err(dev, "ata disk de-reset failed\n");
960                 }
961         } else {
962                 dev_err(dev, "ata disk reset failed\n");
963         }
964
965         if (rc == TMF_RESP_FUNC_COMPLETE) {
966                 spin_lock_irqsave(&hisi_hba->lock, flags);
967                 hisi_sas_release_task(hisi_hba, device);
968                 spin_unlock_irqrestore(&hisi_hba->lock, flags);
969         }
970
971         return rc;
972 }
973
974 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
975                                 u8 *lun, struct hisi_sas_tmf_task *tmf)
976 {
977         struct sas_ssp_task ssp_task;
978
979         if (!(device->tproto & SAS_PROTOCOL_SSP))
980                 return TMF_RESP_FUNC_ESUPP;
981
982         memcpy(ssp_task.LUN, lun, 8);
983
984         return hisi_sas_exec_internal_tmf_task(device, &ssp_task,
985                                 sizeof(ssp_task), tmf);
986 }
987
988 static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba,
989                 struct asd_sas_port *sas_port, enum sas_linkrate linkrate)
990 {
991         struct hisi_sas_device  *sas_dev;
992         struct domain_device *device;
993         int i;
994
995         for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
996                 sas_dev = &hisi_hba->devices[i];
997                 device = sas_dev->sas_device;
998                 if ((sas_dev->dev_type == SAS_PHY_UNUSED)
999                                 || !device || (device->port != sas_port))
1000                         continue;
1001
1002                 hisi_hba->hw->free_device(hisi_hba, sas_dev);
1003
1004                 /* Update linkrate of directly attached device. */
1005                 if (!device->parent)
1006                         device->linkrate = linkrate;
1007
1008                 hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
1009         }
1010 }
1011
1012 static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state,
1013                               u32 state)
1014 {
1015         struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1016         struct asd_sas_port *_sas_port = NULL;
1017         int phy_no;
1018
1019         for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
1020                 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1021                 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1022                 struct asd_sas_port *sas_port = sas_phy->port;
1023                 struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
1024                 bool do_port_check = !!(_sas_port != sas_port);
1025
1026                 if (!sas_phy->phy->enabled)
1027                         continue;
1028
1029                 /* Report PHY state change to libsas */
1030                 if (state & (1 << phy_no)) {
1031                         if (do_port_check && sas_port) {
1032                                 struct domain_device *dev = sas_port->port_dev;
1033
1034                                 _sas_port = sas_port;
1035                                 port->id = phy->port_id;
1036                                 hisi_sas_refresh_port_id(hisi_hba,
1037                                                 sas_port, sas_phy->linkrate);
1038
1039                                 if (DEV_IS_EXPANDER(dev->dev_type))
1040                                         sas_ha->notify_port_event(sas_phy,
1041                                                         PORTE_BROADCAST_RCVD);
1042                         }
1043                 } else if (old_state & (1 << phy_no))
1044                         /* PHY down but was up before */
1045                         hisi_sas_phy_down(hisi_hba, phy_no, 0);
1046
1047         }
1048
1049         drain_workqueue(hisi_hba->shost->work_q);
1050 }
1051
1052 static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
1053 {
1054         struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1055         struct device *dev = hisi_hba->dev;
1056         struct Scsi_Host *shost = hisi_hba->shost;
1057         u32 old_state, state;
1058         unsigned long flags;
1059         int rc;
1060
1061         if (!hisi_hba->hw->soft_reset)
1062                 return -1;
1063
1064         if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
1065                 return -1;
1066
1067         dev_dbg(dev, "controller resetting...\n");
1068         old_state = hisi_hba->hw->get_phys_state(hisi_hba);
1069
1070         scsi_block_requests(shost);
1071         set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1072         rc = hisi_hba->hw->soft_reset(hisi_hba);
1073         if (rc) {
1074                 dev_warn(dev, "controller reset failed (%d)\n", rc);
1075                 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1076                 goto out;
1077         }
1078         spin_lock_irqsave(&hisi_hba->lock, flags);
1079         hisi_sas_release_tasks(hisi_hba);
1080         spin_unlock_irqrestore(&hisi_hba->lock, flags);
1081
1082         sas_ha->notify_ha_event(sas_ha, HAE_RESET);
1083         clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1084
1085         /* Init and wait for PHYs to come up and all libsas event finished. */
1086         hisi_hba->hw->phys_init(hisi_hba);
1087         msleep(1000);
1088         drain_workqueue(hisi_hba->wq);
1089         drain_workqueue(shost->work_q);
1090
1091         state = hisi_hba->hw->get_phys_state(hisi_hba);
1092         hisi_sas_rescan_topology(hisi_hba, old_state, state);
1093         dev_dbg(dev, "controller reset complete\n");
1094
1095 out:
1096         scsi_unblock_requests(shost);
1097         clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
1098
1099         return rc;
1100 }
1101
1102 static int hisi_sas_abort_task(struct sas_task *task)
1103 {
1104         struct scsi_lun lun;
1105         struct hisi_sas_tmf_task tmf_task;
1106         struct domain_device *device = task->dev;
1107         struct hisi_sas_device *sas_dev = device->lldd_dev;
1108         struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
1109         struct device *dev = hisi_hba->dev;
1110         int rc = TMF_RESP_FUNC_FAILED;
1111         unsigned long flags;
1112
1113         if (!sas_dev) {
1114                 dev_warn(dev, "Device has been removed\n");
1115                 return TMF_RESP_FUNC_FAILED;
1116         }
1117
1118         if (task->task_state_flags & SAS_TASK_STATE_DONE) {
1119                 rc = TMF_RESP_FUNC_COMPLETE;
1120                 goto out;
1121         }
1122
1123         sas_dev->dev_status = HISI_SAS_DEV_EH;
1124         if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1125                 struct scsi_cmnd *cmnd = task->uldd_task;
1126                 struct hisi_sas_slot *slot = task->lldd_task;
1127                 u32 tag = slot->idx;
1128                 int rc2;
1129
1130                 int_to_scsilun(cmnd->device->lun, &lun);
1131                 tmf_task.tmf = TMF_ABORT_TASK;
1132                 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
1133
1134                 rc = hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun,
1135                                                   &tmf_task);
1136
1137                 rc2 = hisi_sas_internal_task_abort(hisi_hba, device,
1138                                                    HISI_SAS_INT_ABT_CMD, tag);
1139                 /*
1140                  * If the TMF finds that the IO is not in the device and also
1141                  * the internal abort does not succeed, then it is safe to
1142                  * free the slot.
1143                  * Note: if the internal abort succeeds then the slot
1144                  * will have already been completed
1145                  */
1146                 if (rc == TMF_RESP_FUNC_COMPLETE && rc2 != TMF_RESP_FUNC_SUCC) {
1147                         if (task->lldd_task) {
1148                                 spin_lock_irqsave(&hisi_hba->lock, flags);
1149                                 hisi_sas_do_release_task(hisi_hba, task, slot);
1150                                 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1151                         }
1152                 }
1153         } else if (task->task_proto & SAS_PROTOCOL_SATA ||
1154                 task->task_proto & SAS_PROTOCOL_STP) {
1155                 if (task->dev->dev_type == SAS_SATA_DEV) {
1156                         hisi_sas_internal_task_abort(hisi_hba, device,
1157                                                      HISI_SAS_INT_ABT_DEV, 0);
1158                         hisi_sas_dereg_device(hisi_hba, device);
1159                         rc = hisi_sas_softreset_ata_disk(device);
1160                 }
1161         } else if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SMP) {
1162                 /* SMP */
1163                 struct hisi_sas_slot *slot = task->lldd_task;
1164                 u32 tag = slot->idx;
1165
1166                 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1167                              HISI_SAS_INT_ABT_CMD, tag);
1168                 if (rc == TMF_RESP_FUNC_FAILED) {
1169                         spin_lock_irqsave(&hisi_hba->lock, flags);
1170                         hisi_sas_do_release_task(hisi_hba, task, slot);
1171                         spin_unlock_irqrestore(&hisi_hba->lock, flags);
1172                 }
1173         }
1174
1175 out:
1176         if (rc != TMF_RESP_FUNC_COMPLETE)
1177                 dev_notice(dev, "abort task: rc=%d\n", rc);
1178         return rc;
1179 }
1180
1181 static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun)
1182 {
1183         struct hisi_sas_tmf_task tmf_task;
1184         int rc = TMF_RESP_FUNC_FAILED;
1185
1186         tmf_task.tmf = TMF_ABORT_TASK_SET;
1187         rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1188
1189         return rc;
1190 }
1191
1192 static int hisi_sas_clear_aca(struct domain_device *device, u8 *lun)
1193 {
1194         int rc = TMF_RESP_FUNC_FAILED;
1195         struct hisi_sas_tmf_task tmf_task;
1196
1197         tmf_task.tmf = TMF_CLEAR_ACA;
1198         rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1199
1200         return rc;
1201 }
1202
1203 static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
1204 {
1205         struct sas_phy *phy = sas_get_local_phy(device);
1206         int rc, reset_type = (device->dev_type == SAS_SATA_DEV ||
1207                         (device->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
1208         rc = sas_phy_reset(phy, reset_type);
1209         sas_put_local_phy(phy);
1210         msleep(2000);
1211         return rc;
1212 }
1213
1214 static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
1215 {
1216         struct hisi_sas_device *sas_dev = device->lldd_dev;
1217         struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1218         unsigned long flags;
1219         int rc = TMF_RESP_FUNC_FAILED;
1220
1221         if (sas_dev->dev_status != HISI_SAS_DEV_EH)
1222                 return TMF_RESP_FUNC_FAILED;
1223         sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
1224
1225         hisi_sas_internal_task_abort(hisi_hba, device,
1226                                         HISI_SAS_INT_ABT_DEV, 0);
1227         hisi_sas_dereg_device(hisi_hba, device);
1228
1229         rc = hisi_sas_debug_I_T_nexus_reset(device);
1230
1231         if (rc == TMF_RESP_FUNC_COMPLETE) {
1232                 spin_lock_irqsave(&hisi_hba->lock, flags);
1233                 hisi_sas_release_task(hisi_hba, device);
1234                 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1235         }
1236         return rc;
1237 }
1238
1239 static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
1240 {
1241         struct hisi_sas_device *sas_dev = device->lldd_dev;
1242         struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1243         struct device *dev = hisi_hba->dev;
1244         unsigned long flags;
1245         int rc = TMF_RESP_FUNC_FAILED;
1246
1247         sas_dev->dev_status = HISI_SAS_DEV_EH;
1248         if (dev_is_sata(device)) {
1249                 struct sas_phy *phy;
1250
1251                 /* Clear internal IO and then hardreset */
1252                 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1253                                                   HISI_SAS_INT_ABT_DEV, 0);
1254                 if (rc == TMF_RESP_FUNC_FAILED)
1255                         goto out;
1256                 hisi_sas_dereg_device(hisi_hba, device);
1257
1258                 phy = sas_get_local_phy(device);
1259
1260                 rc = sas_phy_reset(phy, 1);
1261
1262                 if (rc == 0) {
1263                         spin_lock_irqsave(&hisi_hba->lock, flags);
1264                         hisi_sas_release_task(hisi_hba, device);
1265                         spin_unlock_irqrestore(&hisi_hba->lock, flags);
1266                 }
1267                 sas_put_local_phy(phy);
1268         } else {
1269                 struct hisi_sas_tmf_task tmf_task = { .tmf =  TMF_LU_RESET };
1270
1271                 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1272                 if (rc == TMF_RESP_FUNC_COMPLETE) {
1273                         spin_lock_irqsave(&hisi_hba->lock, flags);
1274                         hisi_sas_release_task(hisi_hba, device);
1275                         spin_unlock_irqrestore(&hisi_hba->lock, flags);
1276                 }
1277         }
1278 out:
1279         if (rc != TMF_RESP_FUNC_COMPLETE)
1280                 dev_err(dev, "lu_reset: for device[%d]:rc= %d\n",
1281                              sas_dev->device_id, rc);
1282         return rc;
1283 }
1284
1285 static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha)
1286 {
1287         struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
1288
1289         return hisi_sas_controller_reset(hisi_hba);
1290 }
1291
1292 static int hisi_sas_query_task(struct sas_task *task)
1293 {
1294         struct scsi_lun lun;
1295         struct hisi_sas_tmf_task tmf_task;
1296         int rc = TMF_RESP_FUNC_FAILED;
1297
1298         if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1299                 struct scsi_cmnd *cmnd = task->uldd_task;
1300                 struct domain_device *device = task->dev;
1301                 struct hisi_sas_slot *slot = task->lldd_task;
1302                 u32 tag = slot->idx;
1303
1304                 int_to_scsilun(cmnd->device->lun, &lun);
1305                 tmf_task.tmf = TMF_QUERY_TASK;
1306                 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
1307
1308                 rc = hisi_sas_debug_issue_ssp_tmf(device,
1309                                                   lun.scsi_lun,
1310                                                   &tmf_task);
1311                 switch (rc) {
1312                 /* The task is still in Lun, release it then */
1313                 case TMF_RESP_FUNC_SUCC:
1314                 /* The task is not in Lun or failed, reset the phy */
1315                 case TMF_RESP_FUNC_FAILED:
1316                 case TMF_RESP_FUNC_COMPLETE:
1317                         break;
1318                 default:
1319                         rc = TMF_RESP_FUNC_FAILED;
1320                         break;
1321                 }
1322         }
1323         return rc;
1324 }
1325
1326 static int
1327 hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
1328                                   struct sas_task *task, int abort_flag,
1329                                   int task_tag)
1330 {
1331         struct domain_device *device = task->dev;
1332         struct hisi_sas_device *sas_dev = device->lldd_dev;
1333         struct device *dev = hisi_hba->dev;
1334         struct hisi_sas_port *port;
1335         struct hisi_sas_slot *slot;
1336         struct asd_sas_port *sas_port = device->port;
1337         struct hisi_sas_cmd_hdr *cmd_hdr_base;
1338         struct hisi_sas_dq *dq = sas_dev->dq;
1339         int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
1340         unsigned long flags, flags_dq;
1341
1342         if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
1343                 return -EINVAL;
1344
1345         if (!device->port)
1346                 return -1;
1347
1348         port = to_hisi_sas_port(sas_port);
1349
1350         /* simply get a slot and send abort command */
1351         spin_lock_irqsave(&hisi_hba->lock, flags);
1352         rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
1353         if (rc) {
1354                 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1355                 goto err_out;
1356         }
1357         spin_unlock_irqrestore(&hisi_hba->lock, flags);
1358
1359         spin_lock_irqsave(&dq->lock, flags_dq);
1360         rc = hisi_hba->hw->get_free_slot(hisi_hba, dq);
1361         if (rc)
1362                 goto err_out_tag;
1363
1364         dlvry_queue = dq->id;
1365         dlvry_queue_slot = dq->wr_point;
1366
1367         slot = &hisi_hba->slot_info[slot_idx];
1368         memset(slot, 0, sizeof(struct hisi_sas_slot));
1369
1370         slot->idx = slot_idx;
1371         slot->n_elem = n_elem;
1372         slot->dlvry_queue = dlvry_queue;
1373         slot->dlvry_queue_slot = dlvry_queue_slot;
1374         cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
1375         slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
1376         slot->task = task;
1377         slot->port = port;
1378         task->lldd_task = slot;
1379
1380         slot->buf = dma_pool_alloc(hisi_hba->buffer_pool,
1381                         GFP_ATOMIC, &slot->buf_dma);
1382         if (!slot->buf) {
1383                 rc = -ENOMEM;
1384                 goto err_out_tag;
1385         }
1386
1387         memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
1388         memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
1389         memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ);
1390
1391         rc = hisi_sas_task_prep_abort(hisi_hba, slot, device_id,
1392                                       abort_flag, task_tag);
1393         if (rc)
1394                 goto err_out_buf;
1395
1396
1397         list_add_tail(&slot->entry, &sas_dev->list);
1398         spin_lock_irqsave(&task->task_state_lock, flags);
1399         task->task_state_flags |= SAS_TASK_AT_INITIATOR;
1400         spin_unlock_irqrestore(&task->task_state_lock, flags);
1401
1402         dq->slot_prep = slot;
1403
1404         atomic64_inc(&sas_dev->running_req);
1405
1406         /* send abort command to the chip */
1407         hisi_hba->hw->start_delivery(dq);
1408         spin_unlock_irqrestore(&dq->lock, flags_dq);
1409
1410         return 0;
1411
1412 err_out_buf:
1413         dma_pool_free(hisi_hba->buffer_pool, slot->buf,
1414                 slot->buf_dma);
1415 err_out_tag:
1416         spin_lock_irqsave(&hisi_hba->lock, flags);
1417         hisi_sas_slot_index_free(hisi_hba, slot_idx);
1418         spin_unlock_irqrestore(&hisi_hba->lock, flags);
1419         spin_unlock_irqrestore(&dq->lock, flags_dq);
1420 err_out:
1421         dev_err(dev, "internal abort task prep: failed[%d]!\n", rc);
1422
1423         return rc;
1424 }
1425
1426 /**
1427  * hisi_sas_internal_task_abort -- execute an internal
1428  * abort command for single IO command or a device
1429  * @hisi_hba: host controller struct
1430  * @device: domain device
1431  * @abort_flag: mode of operation, device or single IO
1432  * @tag: tag of IO to be aborted (only relevant to single
1433  *       IO mode)
1434  */
1435 static int
1436 hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
1437                              struct domain_device *device,
1438                              int abort_flag, int tag)
1439 {
1440         struct sas_task *task;
1441         struct hisi_sas_device *sas_dev = device->lldd_dev;
1442         struct device *dev = hisi_hba->dev;
1443         int res;
1444
1445         if (!hisi_hba->hw->prep_abort)
1446                 return -EOPNOTSUPP;
1447
1448         task = sas_alloc_slow_task(GFP_KERNEL);
1449         if (!task)
1450                 return -ENOMEM;
1451
1452         task->dev = device;
1453         task->task_proto = device->tproto;
1454         task->task_done = hisi_sas_task_done;
1455         task->slow_task->timer.data = (unsigned long)task;
1456         task->slow_task->timer.function = hisi_sas_tmf_timedout;
1457         task->slow_task->timer.expires = jiffies + msecs_to_jiffies(110);
1458         add_timer(&task->slow_task->timer);
1459
1460         res = hisi_sas_internal_abort_task_exec(hisi_hba, sas_dev->device_id,
1461                                                 task, abort_flag, tag);
1462         if (res) {
1463                 del_timer(&task->slow_task->timer);
1464                 dev_err(dev, "internal task abort: executing internal task failed: %d\n",
1465                         res);
1466                 goto exit;
1467         }
1468         wait_for_completion(&task->slow_task->completion);
1469         res = TMF_RESP_FUNC_FAILED;
1470
1471         /* Internal abort timed out */
1472         if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1473                 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
1474                         struct hisi_sas_slot *slot = task->lldd_task;
1475
1476                         if (slot)
1477                                 slot->task = NULL;
1478                         dev_err(dev, "internal task abort: timeout.\n");
1479                 }
1480         }
1481
1482         if (task->task_status.resp == SAS_TASK_COMPLETE &&
1483                 task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
1484                 res = TMF_RESP_FUNC_COMPLETE;
1485                 goto exit;
1486         }
1487
1488         if (task->task_status.resp == SAS_TASK_COMPLETE &&
1489                 task->task_status.stat == TMF_RESP_FUNC_SUCC) {
1490                 res = TMF_RESP_FUNC_SUCC;
1491                 goto exit;
1492         }
1493
1494 exit:
1495         dev_dbg(dev, "internal task abort: task to dev %016llx task=%p "
1496                 "resp: 0x%x sts 0x%x\n",
1497                 SAS_ADDR(device->sas_addr),
1498                 task,
1499                 task->task_status.resp, /* 0 is complete, -1 is undelivered */
1500                 task->task_status.stat);
1501         sas_free_task(task);
1502
1503         return res;
1504 }
1505
1506 static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy)
1507 {
1508         hisi_sas_port_notify_formed(sas_phy);
1509 }
1510
1511 static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy)
1512 {
1513         struct asd_sas_phy *sas_phy = &phy->sas_phy;
1514         struct sas_phy *sphy = sas_phy->phy;
1515         struct sas_phy_data *d = sphy->hostdata;
1516
1517         phy->phy_attached = 0;
1518         phy->phy_type = 0;
1519         phy->port = NULL;
1520
1521         if (d->enable)
1522                 sphy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN;
1523         else
1524                 sphy->negotiated_linkrate = SAS_PHY_DISABLED;
1525 }
1526
1527 void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy)
1528 {
1529         struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1530         struct asd_sas_phy *sas_phy = &phy->sas_phy;
1531         struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1532
1533         if (rdy) {
1534                 /* Phy down but ready */
1535                 hisi_sas_bytes_dmaed(hisi_hba, phy_no);
1536                 hisi_sas_port_notify_formed(sas_phy);
1537         } else {
1538                 struct hisi_sas_port *port  = phy->port;
1539
1540                 /* Phy down and not ready */
1541                 sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL);
1542                 sas_phy_disconnected(sas_phy);
1543
1544                 if (port) {
1545                         if (phy->phy_type & PORT_TYPE_SAS) {
1546                                 int port_id = port->id;
1547
1548                                 if (!hisi_hba->hw->get_wideport_bitmap(hisi_hba,
1549                                                                        port_id))
1550                                         port->port_attached = 0;
1551                         } else if (phy->phy_type & PORT_TYPE_SATA)
1552                                 port->port_attached = 0;
1553                 }
1554                 hisi_sas_phy_disconnected(phy);
1555         }
1556 }
1557 EXPORT_SYMBOL_GPL(hisi_sas_phy_down);
1558
1559
1560 struct scsi_transport_template *hisi_sas_stt;
1561 EXPORT_SYMBOL_GPL(hisi_sas_stt);
1562
1563 static struct scsi_host_template _hisi_sas_sht = {
1564         .module                 = THIS_MODULE,
1565         .name                   = DRV_NAME,
1566         .queuecommand           = sas_queuecommand,
1567         .target_alloc           = sas_target_alloc,
1568         .slave_configure        = hisi_sas_slave_configure,
1569         .scan_finished          = hisi_sas_scan_finished,
1570         .scan_start             = hisi_sas_scan_start,
1571         .change_queue_depth     = sas_change_queue_depth,
1572         .bios_param             = sas_bios_param,
1573         .can_queue              = 1,
1574         .this_id                = -1,
1575         .sg_tablesize           = SG_ALL,
1576         .max_sectors            = SCSI_DEFAULT_MAX_SECTORS,
1577         .use_clustering         = ENABLE_CLUSTERING,
1578         .eh_device_reset_handler = sas_eh_device_reset_handler,
1579         .eh_target_reset_handler = sas_eh_target_reset_handler,
1580         .target_destroy         = sas_target_destroy,
1581         .ioctl                  = sas_ioctl,
1582 };
1583 struct scsi_host_template *hisi_sas_sht = &_hisi_sas_sht;
1584 EXPORT_SYMBOL_GPL(hisi_sas_sht);
1585
1586 static struct sas_domain_function_template hisi_sas_transport_ops = {
1587         .lldd_dev_found         = hisi_sas_dev_found,
1588         .lldd_dev_gone          = hisi_sas_dev_gone,
1589         .lldd_execute_task      = hisi_sas_queue_command,
1590         .lldd_control_phy       = hisi_sas_control_phy,
1591         .lldd_abort_task        = hisi_sas_abort_task,
1592         .lldd_abort_task_set    = hisi_sas_abort_task_set,
1593         .lldd_clear_aca         = hisi_sas_clear_aca,
1594         .lldd_I_T_nexus_reset   = hisi_sas_I_T_nexus_reset,
1595         .lldd_lu_reset          = hisi_sas_lu_reset,
1596         .lldd_query_task        = hisi_sas_query_task,
1597         .lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha,
1598         .lldd_port_formed       = hisi_sas_port_formed,
1599 };
1600
1601 void hisi_sas_init_mem(struct hisi_hba *hisi_hba)
1602 {
1603         int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
1604
1605         for (i = 0; i < hisi_hba->queue_count; i++) {
1606                 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
1607                 struct hisi_sas_dq *dq = &hisi_hba->dq[i];
1608
1609                 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
1610                 memset(hisi_hba->cmd_hdr[i], 0, s);
1611                 dq->wr_point = 0;
1612
1613                 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
1614                 memset(hisi_hba->complete_hdr[i], 0, s);
1615                 cq->rd_point = 0;
1616         }
1617
1618         s = sizeof(struct hisi_sas_initial_fis) * hisi_hba->n_phy;
1619         memset(hisi_hba->initial_fis, 0, s);
1620
1621         s = max_command_entries * sizeof(struct hisi_sas_iost);
1622         memset(hisi_hba->iost, 0, s);
1623
1624         s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
1625         memset(hisi_hba->breakpoint, 0, s);
1626
1627         s = max_command_entries * sizeof(struct hisi_sas_breakpoint) * 2;
1628         memset(hisi_hba->sata_breakpoint, 0, s);
1629 }
1630 EXPORT_SYMBOL_GPL(hisi_sas_init_mem);
1631
1632 int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
1633 {
1634         struct device *dev = hisi_hba->dev;
1635         int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
1636
1637         spin_lock_init(&hisi_hba->lock);
1638         for (i = 0; i < hisi_hba->n_phy; i++) {
1639                 hisi_sas_phy_init(hisi_hba, i);
1640                 hisi_hba->port[i].port_attached = 0;
1641                 hisi_hba->port[i].id = -1;
1642         }
1643
1644         for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1645                 hisi_hba->devices[i].dev_type = SAS_PHY_UNUSED;
1646                 hisi_hba->devices[i].device_id = i;
1647                 hisi_hba->devices[i].dev_status = HISI_SAS_DEV_NORMAL;
1648         }
1649
1650         for (i = 0; i < hisi_hba->queue_count; i++) {
1651                 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
1652                 struct hisi_sas_dq *dq = &hisi_hba->dq[i];
1653
1654                 /* Completion queue structure */
1655                 cq->id = i;
1656                 cq->hisi_hba = hisi_hba;
1657
1658                 /* Delivery queue structure */
1659                 dq->id = i;
1660                 dq->hisi_hba = hisi_hba;
1661
1662                 /* Delivery queue */
1663                 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
1664                 hisi_hba->cmd_hdr[i] = dma_alloc_coherent(dev, s,
1665                                         &hisi_hba->cmd_hdr_dma[i], GFP_KERNEL);
1666                 if (!hisi_hba->cmd_hdr[i])
1667                         goto err_out;
1668
1669                 /* Completion queue */
1670                 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
1671                 hisi_hba->complete_hdr[i] = dma_alloc_coherent(dev, s,
1672                                 &hisi_hba->complete_hdr_dma[i], GFP_KERNEL);
1673                 if (!hisi_hba->complete_hdr[i])
1674                         goto err_out;
1675         }
1676
1677         s = sizeof(struct hisi_sas_slot_buf_table);
1678         hisi_hba->buffer_pool = dma_pool_create("dma_buffer", dev, s, 16, 0);
1679         if (!hisi_hba->buffer_pool)
1680                 goto err_out;
1681
1682         s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
1683         hisi_hba->itct = dma_alloc_coherent(dev, s, &hisi_hba->itct_dma,
1684                                             GFP_KERNEL);
1685         if (!hisi_hba->itct)
1686                 goto err_out;
1687
1688         memset(hisi_hba->itct, 0, s);
1689
1690         hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries,
1691                                            sizeof(struct hisi_sas_slot),
1692                                            GFP_KERNEL);
1693         if (!hisi_hba->slot_info)
1694                 goto err_out;
1695
1696         s = max_command_entries * sizeof(struct hisi_sas_iost);
1697         hisi_hba->iost = dma_alloc_coherent(dev, s, &hisi_hba->iost_dma,
1698                                             GFP_KERNEL);
1699         if (!hisi_hba->iost)
1700                 goto err_out;
1701
1702         s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
1703         hisi_hba->breakpoint = dma_alloc_coherent(dev, s,
1704                                 &hisi_hba->breakpoint_dma, GFP_KERNEL);
1705         if (!hisi_hba->breakpoint)
1706                 goto err_out;
1707
1708         hisi_hba->slot_index_count = max_command_entries;
1709         s = hisi_hba->slot_index_count / BITS_PER_BYTE;
1710         hisi_hba->slot_index_tags = devm_kzalloc(dev, s, GFP_KERNEL);
1711         if (!hisi_hba->slot_index_tags)
1712                 goto err_out;
1713
1714         s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
1715         hisi_hba->initial_fis = dma_alloc_coherent(dev, s,
1716                                 &hisi_hba->initial_fis_dma, GFP_KERNEL);
1717         if (!hisi_hba->initial_fis)
1718                 goto err_out;
1719
1720         s = max_command_entries * sizeof(struct hisi_sas_breakpoint) * 2;
1721         hisi_hba->sata_breakpoint = dma_alloc_coherent(dev, s,
1722                                 &hisi_hba->sata_breakpoint_dma, GFP_KERNEL);
1723         if (!hisi_hba->sata_breakpoint)
1724                 goto err_out;
1725         hisi_sas_init_mem(hisi_hba);
1726
1727         hisi_sas_slot_index_init(hisi_hba);
1728
1729         hisi_hba->wq = create_singlethread_workqueue(dev_name(dev));
1730         if (!hisi_hba->wq) {
1731                 dev_err(dev, "sas_alloc: failed to create workqueue\n");
1732                 goto err_out;
1733         }
1734
1735         return 0;
1736 err_out:
1737         return -ENOMEM;
1738 }
1739 EXPORT_SYMBOL_GPL(hisi_sas_alloc);
1740
1741 void hisi_sas_free(struct hisi_hba *hisi_hba)
1742 {
1743         struct device *dev = hisi_hba->dev;
1744         int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
1745
1746         for (i = 0; i < hisi_hba->queue_count; i++) {
1747                 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
1748                 if (hisi_hba->cmd_hdr[i])
1749                         dma_free_coherent(dev, s,
1750                                           hisi_hba->cmd_hdr[i],
1751                                           hisi_hba->cmd_hdr_dma[i]);
1752
1753                 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
1754                 if (hisi_hba->complete_hdr[i])
1755                         dma_free_coherent(dev, s,
1756                                           hisi_hba->complete_hdr[i],
1757                                           hisi_hba->complete_hdr_dma[i]);
1758         }
1759
1760         dma_pool_destroy(hisi_hba->buffer_pool);
1761
1762         s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
1763         if (hisi_hba->itct)
1764                 dma_free_coherent(dev, s,
1765                                   hisi_hba->itct, hisi_hba->itct_dma);
1766
1767         s = max_command_entries * sizeof(struct hisi_sas_iost);
1768         if (hisi_hba->iost)
1769                 dma_free_coherent(dev, s,
1770                                   hisi_hba->iost, hisi_hba->iost_dma);
1771
1772         s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
1773         if (hisi_hba->breakpoint)
1774                 dma_free_coherent(dev, s,
1775                                   hisi_hba->breakpoint,
1776                                   hisi_hba->breakpoint_dma);
1777
1778
1779         s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
1780         if (hisi_hba->initial_fis)
1781                 dma_free_coherent(dev, s,
1782                                   hisi_hba->initial_fis,
1783                                   hisi_hba->initial_fis_dma);
1784
1785         s = max_command_entries * sizeof(struct hisi_sas_breakpoint) * 2;
1786         if (hisi_hba->sata_breakpoint)
1787                 dma_free_coherent(dev, s,
1788                                   hisi_hba->sata_breakpoint,
1789                                   hisi_hba->sata_breakpoint_dma);
1790
1791         if (hisi_hba->wq)
1792                 destroy_workqueue(hisi_hba->wq);
1793 }
1794 EXPORT_SYMBOL_GPL(hisi_sas_free);
1795
1796 static void hisi_sas_rst_work_handler(struct work_struct *work)
1797 {
1798         struct hisi_hba *hisi_hba =
1799                 container_of(work, struct hisi_hba, rst_work);
1800
1801         hisi_sas_controller_reset(hisi_hba);
1802 }
1803
1804 int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba)
1805 {
1806         struct device *dev = hisi_hba->dev;
1807         struct platform_device *pdev = hisi_hba->platform_dev;
1808         struct device_node *np = pdev ? pdev->dev.of_node : NULL;
1809         struct clk *refclk;
1810
1811         if (device_property_read_u8_array(dev, "sas-addr", hisi_hba->sas_addr,
1812                                           SAS_ADDR_SIZE)) {
1813                 dev_err(dev, "could not get property sas-addr\n");
1814                 return -ENOENT;
1815         }
1816
1817         if (np) {
1818                 /*
1819                  * These properties are only required for platform device-based
1820                  * controller with DT firmware.
1821                  */
1822                 hisi_hba->ctrl = syscon_regmap_lookup_by_phandle(np,
1823                                         "hisilicon,sas-syscon");
1824                 if (IS_ERR(hisi_hba->ctrl)) {
1825                         dev_err(dev, "could not get syscon\n");
1826                         return -ENOENT;
1827                 }
1828
1829                 if (device_property_read_u32(dev, "ctrl-reset-reg",
1830                                              &hisi_hba->ctrl_reset_reg)) {
1831                         dev_err(dev,
1832                                 "could not get property ctrl-reset-reg\n");
1833                         return -ENOENT;
1834                 }
1835
1836                 if (device_property_read_u32(dev, "ctrl-reset-sts-reg",
1837                                              &hisi_hba->ctrl_reset_sts_reg)) {
1838                         dev_err(dev,
1839                                 "could not get property ctrl-reset-sts-reg\n");
1840                         return -ENOENT;
1841                 }
1842
1843                 if (device_property_read_u32(dev, "ctrl-clock-ena-reg",
1844                                              &hisi_hba->ctrl_clock_ena_reg)) {
1845                         dev_err(dev,
1846                                 "could not get property ctrl-clock-ena-reg\n");
1847                         return -ENOENT;
1848                 }
1849         }
1850
1851         refclk = devm_clk_get(dev, NULL);
1852         if (IS_ERR(refclk))
1853                 dev_dbg(dev, "no ref clk property\n");
1854         else
1855                 hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000;
1856
1857         if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy)) {
1858                 dev_err(dev, "could not get property phy-count\n");
1859                 return -ENOENT;
1860         }
1861
1862         if (device_property_read_u32(dev, "queue-count",
1863                                      &hisi_hba->queue_count)) {
1864                 dev_err(dev, "could not get property queue-count\n");
1865                 return -ENOENT;
1866         }
1867
1868         return 0;
1869 }
1870 EXPORT_SYMBOL_GPL(hisi_sas_get_fw_info);
1871
1872 static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
1873                                               const struct hisi_sas_hw *hw)
1874 {
1875         struct resource *res;
1876         struct Scsi_Host *shost;
1877         struct hisi_hba *hisi_hba;
1878         struct device *dev = &pdev->dev;
1879
1880         shost = scsi_host_alloc(hisi_sas_sht, sizeof(*hisi_hba));
1881         if (!shost) {
1882                 dev_err(dev, "scsi host alloc failed\n");
1883                 return NULL;
1884         }
1885         hisi_hba = shost_priv(shost);
1886
1887         INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler);
1888         hisi_hba->hw = hw;
1889         hisi_hba->dev = dev;
1890         hisi_hba->platform_dev = pdev;
1891         hisi_hba->shost = shost;
1892         SHOST_TO_SAS_HA(shost) = &hisi_hba->sha;
1893
1894         init_timer(&hisi_hba->timer);
1895
1896         if (hisi_sas_get_fw_info(hisi_hba) < 0)
1897                 goto err_out;
1898
1899         if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) &&
1900             dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
1901                 dev_err(dev, "No usable DMA addressing method\n");
1902                 goto err_out;
1903         }
1904
1905         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1906         hisi_hba->regs = devm_ioremap_resource(dev, res);
1907         if (IS_ERR(hisi_hba->regs))
1908                 goto err_out;
1909
1910         if (hisi_sas_alloc(hisi_hba, shost)) {
1911                 hisi_sas_free(hisi_hba);
1912                 goto err_out;
1913         }
1914
1915         return shost;
1916 err_out:
1917         scsi_host_put(shost);
1918         dev_err(dev, "shost alloc failed\n");
1919         return NULL;
1920 }
1921
1922 void hisi_sas_init_add(struct hisi_hba *hisi_hba)
1923 {
1924         int i;
1925
1926         for (i = 0; i < hisi_hba->n_phy; i++)
1927                 memcpy(&hisi_hba->phy[i].dev_sas_addr,
1928                        hisi_hba->sas_addr,
1929                        SAS_ADDR_SIZE);
1930 }
1931 EXPORT_SYMBOL_GPL(hisi_sas_init_add);
1932
1933 int hisi_sas_probe(struct platform_device *pdev,
1934                          const struct hisi_sas_hw *hw)
1935 {
1936         struct Scsi_Host *shost;
1937         struct hisi_hba *hisi_hba;
1938         struct device *dev = &pdev->dev;
1939         struct asd_sas_phy **arr_phy;
1940         struct asd_sas_port **arr_port;
1941         struct sas_ha_struct *sha;
1942         int rc, phy_nr, port_nr, i;
1943
1944         shost = hisi_sas_shost_alloc(pdev, hw);
1945         if (!shost)
1946                 return -ENOMEM;
1947
1948         sha = SHOST_TO_SAS_HA(shost);
1949         hisi_hba = shost_priv(shost);
1950         platform_set_drvdata(pdev, sha);
1951
1952         phy_nr = port_nr = hisi_hba->n_phy;
1953
1954         arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL);
1955         arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL);
1956         if (!arr_phy || !arr_port) {
1957                 rc = -ENOMEM;
1958                 goto err_out_ha;
1959         }
1960
1961         sha->sas_phy = arr_phy;
1962         sha->sas_port = arr_port;
1963         sha->lldd_ha = hisi_hba;
1964
1965         shost->transportt = hisi_sas_stt;
1966         shost->max_id = HISI_SAS_MAX_DEVICES;
1967         shost->max_lun = ~0;
1968         shost->max_channel = 1;
1969         shost->max_cmd_len = 16;
1970         shost->sg_tablesize = min_t(u16, SG_ALL, HISI_SAS_SGE_PAGE_CNT);
1971         shost->can_queue = hisi_hba->hw->max_command_entries;
1972         shost->cmd_per_lun = hisi_hba->hw->max_command_entries;
1973
1974         sha->sas_ha_name = DRV_NAME;
1975         sha->dev = hisi_hba->dev;
1976         sha->lldd_module = THIS_MODULE;
1977         sha->sas_addr = &hisi_hba->sas_addr[0];
1978         sha->num_phys = hisi_hba->n_phy;
1979         sha->core.shost = hisi_hba->shost;
1980
1981         for (i = 0; i < hisi_hba->n_phy; i++) {
1982                 sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy;
1983                 sha->sas_port[i] = &hisi_hba->port[i].sas_port;
1984         }
1985
1986         hisi_sas_init_add(hisi_hba);
1987
1988         rc = scsi_add_host(shost, &pdev->dev);
1989         if (rc)
1990                 goto err_out_ha;
1991
1992         rc = sas_register_ha(sha);
1993         if (rc)
1994                 goto err_out_register_ha;
1995
1996         rc = hisi_hba->hw->hw_init(hisi_hba);
1997         if (rc)
1998                 goto err_out_register_ha;
1999
2000         scsi_scan_host(shost);
2001
2002         return 0;
2003
2004 err_out_register_ha:
2005         scsi_remove_host(shost);
2006 err_out_ha:
2007         hisi_sas_free(hisi_hba);
2008         scsi_host_put(shost);
2009         return rc;
2010 }
2011 EXPORT_SYMBOL_GPL(hisi_sas_probe);
2012
2013 int hisi_sas_remove(struct platform_device *pdev)
2014 {
2015         struct sas_ha_struct *sha = platform_get_drvdata(pdev);
2016         struct hisi_hba *hisi_hba = sha->lldd_ha;
2017         struct Scsi_Host *shost = sha->core.shost;
2018
2019         sas_unregister_ha(sha);
2020         sas_remove_host(sha->core.shost);
2021
2022         hisi_sas_free(hisi_hba);
2023         scsi_host_put(shost);
2024         return 0;
2025 }
2026 EXPORT_SYMBOL_GPL(hisi_sas_remove);
2027
2028 static __init int hisi_sas_init(void)
2029 {
2030         hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops);
2031         if (!hisi_sas_stt)
2032                 return -ENOMEM;
2033
2034         return 0;
2035 }
2036
2037 static __exit void hisi_sas_exit(void)
2038 {
2039         sas_release_transport(hisi_sas_stt);
2040 }
2041
2042 module_init(hisi_sas_init);
2043 module_exit(hisi_sas_exit);
2044
2045 MODULE_LICENSE("GPL");
2046 MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
2047 MODULE_DESCRIPTION("HISILICON SAS controller driver");
2048 MODULE_ALIAS("platform:" DRV_NAME);