2 * Aic94xx SAS/SATA Tasks
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
7 * This file is licensed under GPLv2.
9 * This file is part of the aic94xx driver.
11 * The aic94xx driver is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; version 2 of the
16 * The aic94xx driver is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with the aic94xx driver; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
27 #include <linux/spinlock.h>
29 #include "aic94xx_sas.h"
30 #include "aic94xx_hwi.h"
32 static void asd_unbuild_ata_ascb(struct asd_ascb *a);
33 static void asd_unbuild_smp_ascb(struct asd_ascb *a);
34 static void asd_unbuild_ssp_ascb(struct asd_ascb *a);
36 static void asd_can_dequeue(struct asd_ha_struct *asd_ha, int num)
40 spin_lock_irqsave(&asd_ha->seq.pend_q_lock, flags);
41 asd_ha->seq.can_queue += num;
42 spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags);
45 /* PCI_DMA_... to our direction translation.
47 static const u8 data_dir_flags[] = {
48 [PCI_DMA_BIDIRECTIONAL] = DATA_DIR_BYRECIPIENT, /* UNSPECIFIED */
49 [PCI_DMA_TODEVICE] = DATA_DIR_OUT, /* OUTBOUND */
50 [PCI_DMA_FROMDEVICE] = DATA_DIR_IN, /* INBOUND */
51 [PCI_DMA_NONE] = DATA_DIR_NONE, /* NO TRANSFER */
54 static int asd_map_scatterlist(struct sas_task *task,
58 struct asd_ascb *ascb = task->lldd_task;
59 struct asd_ha_struct *asd_ha = ascb->ha;
60 struct scatterlist *sc;
63 if (task->data_dir == PCI_DMA_NONE)
66 if (task->num_scatter == 0) {
67 void *p = task->scatter;
68 dma_addr_t dma = pci_map_single(asd_ha->pcidev, p,
71 if (dma_mapping_error(&asd_ha->pcidev->dev, dma))
74 sg_arr[0].bus_addr = cpu_to_le64((u64)dma);
75 sg_arr[0].size = cpu_to_le32(task->total_xfer_len);
76 sg_arr[0].flags |= ASD_SG_EL_LIST_EOL;
80 /* STP tasks come from libata which has already mapped
82 if (sas_protocol_ata(task->task_proto))
83 num_sg = task->num_scatter;
85 num_sg = pci_map_sg(asd_ha->pcidev, task->scatter,
86 task->num_scatter, task->data_dir);
93 ascb->sg_arr = asd_alloc_coherent(asd_ha,
94 num_sg*sizeof(struct sg_el),
100 for_each_sg(task->scatter, sc, num_sg, i) {
102 &((struct sg_el *)ascb->sg_arr->vaddr)[i];
103 sg->bus_addr = cpu_to_le64((u64)sg_dma_address(sc));
104 sg->size = cpu_to_le32((u32)sg_dma_len(sc));
106 sg->flags |= ASD_SG_EL_LIST_EOL;
109 for_each_sg(task->scatter, sc, 2, i) {
111 cpu_to_le64((u64)sg_dma_address(sc));
112 sg_arr[i].size = cpu_to_le32((u32)sg_dma_len(sc));
114 sg_arr[1].next_sg_offs = 2 * sizeof(*sg_arr);
115 sg_arr[1].flags |= ASD_SG_EL_LIST_EOS;
117 memset(&sg_arr[2], 0, sizeof(*sg_arr));
118 sg_arr[2].bus_addr=cpu_to_le64((u64)ascb->sg_arr->dma_handle);
121 for_each_sg(task->scatter, sc, num_sg, i) {
123 cpu_to_le64((u64)sg_dma_address(sc));
124 sg_arr[i].size = cpu_to_le32((u32)sg_dma_len(sc));
126 sg_arr[i-1].flags |= ASD_SG_EL_LIST_EOL;
131 if (sas_protocol_ata(task->task_proto))
132 pci_unmap_sg(asd_ha->pcidev, task->scatter, task->num_scatter,
137 static void asd_unmap_scatterlist(struct asd_ascb *ascb)
139 struct asd_ha_struct *asd_ha = ascb->ha;
140 struct sas_task *task = ascb->uldd_task;
142 if (task->data_dir == PCI_DMA_NONE)
145 if (task->num_scatter == 0) {
146 dma_addr_t dma = (dma_addr_t)
147 le64_to_cpu(ascb->scb->ssp_task.sg_element[0].bus_addr);
148 pci_unmap_single(ascb->ha->pcidev, dma, task->total_xfer_len,
153 asd_free_coherent(asd_ha, ascb->sg_arr);
154 if (task->task_proto != SAS_PROTOCOL_STP)
155 pci_unmap_sg(asd_ha->pcidev, task->scatter, task->num_scatter,
159 /* ---------- Task complete tasklet ---------- */
161 static void asd_get_response_tasklet(struct asd_ascb *ascb,
162 struct done_list_struct *dl)
164 struct asd_ha_struct *asd_ha = ascb->ha;
165 struct sas_task *task = ascb->uldd_task;
166 struct task_status_struct *ts = &task->task_status;
168 struct tc_resp_sb_struct {
172 } __attribute__ ((packed)) *resp_sb = (void *) dl->status_block;
174 /* int size = ((resp_sb->flags & 7) << 8) | resp_sb->len_lsb; */
175 int edb_id = ((resp_sb->flags & 0x70) >> 4)-1;
176 struct asd_ascb *escb;
177 struct asd_dma_tok *edb;
180 spin_lock_irqsave(&asd_ha->seq.tc_index_lock, flags);
181 escb = asd_tc_index_find(&asd_ha->seq,
182 (int)le16_to_cpu(resp_sb->index_escb));
183 spin_unlock_irqrestore(&asd_ha->seq.tc_index_lock, flags);
186 ASD_DPRINTK("Uh-oh! No escb for this dl?!\n");
190 ts->buf_valid_size = 0;
191 edb = asd_ha->seq.edb_arr[edb_id + escb->edb_index];
193 if (task->task_proto == SAS_PROTOCOL_SSP) {
194 struct ssp_response_iu *iu =
195 r + 16 + sizeof(struct ssp_frame_hdr);
197 ts->residual = le32_to_cpu(*(__le32 *)r);
199 sas_ssp_task_response(&asd_ha->pcidev->dev, task, iu);
201 struct ata_task_resp *resp = (void *) &ts->buf[0];
203 ts->residual = le32_to_cpu(*(__le32 *)r);
205 if (SAS_STATUS_BUF_SIZE >= sizeof(*resp)) {
206 resp->frame_len = le16_to_cpu(*(__le16 *)(r+6));
207 memcpy(&resp->ending_fis[0], r+16, ATA_RESP_FIS_SIZE);
208 ts->buf_valid_size = sizeof(*resp);
212 asd_invalidate_edb(escb, edb_id);
215 static void asd_task_tasklet_complete(struct asd_ascb *ascb,
216 struct done_list_struct *dl)
218 struct sas_task *task = ascb->uldd_task;
219 struct task_status_struct *ts = &task->task_status;
221 u8 opcode = dl->opcode;
223 asd_can_dequeue(ascb->ha, 1);
228 ts->resp = SAS_TASK_COMPLETE;
229 ts->stat = SAM_STAT_GOOD;
232 ts->resp = SAS_TASK_COMPLETE;
233 ts->stat = SAS_DATA_UNDERRUN;
234 ts->residual = le32_to_cpu(*(__le32 *)dl->status_block);
237 ts->resp = SAS_TASK_COMPLETE;
238 ts->stat = SAS_DATA_OVERRUN;
243 ts->resp = SAS_TASK_COMPLETE;
244 ts->stat = SAS_PROTO_RESPONSE;
245 asd_get_response_tasklet(ascb, dl);
248 ts->resp = SAS_TASK_UNDELIVERED;
249 ts->stat = SAS_OPEN_REJECT;
250 if (dl->status_block[1] & 2)
251 ts->open_rej_reason = 1 + dl->status_block[2];
252 else if (dl->status_block[1] & 1)
253 ts->open_rej_reason = (dl->status_block[2] >> 4)+10;
255 ts->open_rej_reason = SAS_OREJ_UNKNOWN;
258 ts->resp = SAS_TASK_UNDELIVERED;
259 ts->stat = SAS_OPEN_TO;
263 ts->resp = SAS_TASK_UNDELIVERED;
264 ts->stat = SAS_PHY_DOWN;
267 ts->resp = SAS_TASK_COMPLETE;
268 ts->stat = SAS_PHY_DOWN;
274 case TF_SMP_XMIT_RCV_ERR:
275 case TC_ATA_R_ERR_RECV:
276 ts->resp = SAS_TASK_COMPLETE;
277 ts->stat = SAS_INTERRUPTED;
283 ts->resp = SAS_TASK_UNDELIVERED;
284 ts->stat = SAS_DEV_NO_RESPONSE;
287 ts->resp = SAS_TASK_COMPLETE;
288 ts->stat = SAS_NAK_R_ERR;
290 case TA_I_T_NEXUS_LOSS:
291 opcode = dl->status_block[0];
294 case TF_INV_CONN_HANDLE:
295 ts->resp = SAS_TASK_UNDELIVERED;
296 ts->stat = SAS_DEVICE_UNKNOWN;
298 case TF_REQUESTED_N_PENDING:
299 ts->resp = SAS_TASK_UNDELIVERED;
300 ts->stat = SAS_PENDING;
302 case TC_TASK_CLEARED:
304 ts->resp = SAS_TASK_COMPLETE;
305 ts->stat = SAS_ABORTED_TASK;
311 case TF_TMF_TAG_FREE:
312 case TF_TMF_TASK_DONE:
313 case TF_TMF_NO_CONN_HANDLE:
316 case TF_DATA_OFFS_ERR:
317 ts->resp = SAS_TASK_UNDELIVERED;
318 ts->stat = SAS_DEV_NO_RESPONSE;
321 case TC_LINK_ADM_RESP:
324 case TC_PARTIAL_SG_LIST:
326 ASD_DPRINTK("%s: dl opcode: 0x%x?\n", __func__, opcode);
330 switch (task->task_proto) {
331 case SAS_PROTOCOL_SATA:
332 case SAS_PROTOCOL_STP:
333 asd_unbuild_ata_ascb(ascb);
335 case SAS_PROTOCOL_SMP:
336 asd_unbuild_smp_ascb(ascb);
338 case SAS_PROTOCOL_SSP:
339 asd_unbuild_ssp_ascb(ascb);
344 spin_lock_irqsave(&task->task_state_lock, flags);
345 task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
346 task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
347 task->task_state_flags |= SAS_TASK_STATE_DONE;
348 if (unlikely((task->task_state_flags & SAS_TASK_STATE_ABORTED))) {
349 struct completion *completion = ascb->completion;
350 spin_unlock_irqrestore(&task->task_state_lock, flags);
351 ASD_DPRINTK("task 0x%p done with opcode 0x%x resp 0x%x "
352 "stat 0x%x but aborted by upper layer!\n",
353 task, opcode, ts->resp, ts->stat);
355 complete(completion);
357 spin_unlock_irqrestore(&task->task_state_lock, flags);
358 task->lldd_task = NULL;
361 task->task_done(task);
365 /* ---------- ATA ---------- */
367 static int asd_build_ata_ascb(struct asd_ascb *ascb, struct sas_task *task,
370 struct domain_device *dev = task->dev;
377 if (unlikely(task->ata_task.device_control_reg_update))
378 scb->header.opcode = CONTROL_ATA_DEV;
379 else if (dev->sata_dev.class == ATA_DEV_ATAPI)
380 scb->header.opcode = INITIATE_ATAPI_TASK;
382 scb->header.opcode = INITIATE_ATA_TASK;
384 scb->ata_task.proto_conn_rate = (1 << 5); /* STP */
385 if (dev->port->oob_mode == SAS_OOB_MODE)
386 scb->ata_task.proto_conn_rate |= dev->linkrate;
388 scb->ata_task.total_xfer_len = cpu_to_le32(task->total_xfer_len);
389 scb->ata_task.fis = task->ata_task.fis;
390 if (likely(!task->ata_task.device_control_reg_update))
391 scb->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
392 scb->ata_task.fis.flags &= 0xF0; /* PM_PORT field shall be 0 */
393 if (dev->sata_dev.class == ATA_DEV_ATAPI)
394 memcpy(scb->ata_task.atapi_packet, task->ata_task.atapi_packet,
396 scb->ata_task.sister_scb = cpu_to_le16(0xFFFF);
397 scb->ata_task.conn_handle = cpu_to_le16(
398 (u16)(unsigned long)dev->lldd_dev);
400 if (likely(!task->ata_task.device_control_reg_update)) {
402 if (task->ata_task.dma_xfer)
403 flags |= DATA_XFER_MODE_DMA;
404 if (task->ata_task.use_ncq &&
405 dev->sata_dev.class != ATA_DEV_ATAPI)
406 flags |= ATA_Q_TYPE_NCQ;
407 flags |= data_dir_flags[task->data_dir];
408 scb->ata_task.ata_flags = flags;
410 scb->ata_task.retry_count = task->ata_task.retry_count;
413 if (task->ata_task.set_affil_pol)
414 flags |= SET_AFFIL_POLICY;
415 if (task->ata_task.stp_affil_pol)
416 flags |= STP_AFFIL_POLICY;
417 scb->ata_task.flags = flags;
419 ascb->tasklet_complete = asd_task_tasklet_complete;
421 if (likely(!task->ata_task.device_control_reg_update))
422 res = asd_map_scatterlist(task, scb->ata_task.sg_element,
428 static void asd_unbuild_ata_ascb(struct asd_ascb *a)
430 asd_unmap_scatterlist(a);
433 /* ---------- SMP ---------- */
435 static int asd_build_smp_ascb(struct asd_ascb *ascb, struct sas_task *task,
438 struct asd_ha_struct *asd_ha = ascb->ha;
439 struct domain_device *dev = task->dev;
442 pci_map_sg(asd_ha->pcidev, &task->smp_task.smp_req, 1,
444 pci_map_sg(asd_ha->pcidev, &task->smp_task.smp_resp, 1,
449 scb->header.opcode = INITIATE_SMP_TASK;
451 scb->smp_task.proto_conn_rate = dev->linkrate;
453 scb->smp_task.smp_req.bus_addr =
454 cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_req));
455 scb->smp_task.smp_req.size =
456 cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_req)-4);
458 scb->smp_task.smp_resp.bus_addr =
459 cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_resp));
460 scb->smp_task.smp_resp.size =
461 cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_resp)-4);
463 scb->smp_task.sister_scb = cpu_to_le16(0xFFFF);
464 scb->smp_task.conn_handle = cpu_to_le16((u16)
465 (unsigned long)dev->lldd_dev);
467 ascb->tasklet_complete = asd_task_tasklet_complete;
472 static void asd_unbuild_smp_ascb(struct asd_ascb *a)
474 struct sas_task *task = a->uldd_task;
477 pci_unmap_sg(a->ha->pcidev, &task->smp_task.smp_req, 1,
479 pci_unmap_sg(a->ha->pcidev, &task->smp_task.smp_resp, 1,
483 /* ---------- SSP ---------- */
485 static int asd_build_ssp_ascb(struct asd_ascb *ascb, struct sas_task *task,
488 struct domain_device *dev = task->dev;
494 scb->header.opcode = INITIATE_SSP_TASK;
496 scb->ssp_task.proto_conn_rate = (1 << 4); /* SSP */
497 scb->ssp_task.proto_conn_rate |= dev->linkrate;
498 scb->ssp_task.total_xfer_len = cpu_to_le32(task->total_xfer_len);
499 scb->ssp_task.ssp_frame.frame_type = SSP_DATA;
500 memcpy(scb->ssp_task.ssp_frame.hashed_dest_addr, dev->hashed_sas_addr,
501 HASHED_SAS_ADDR_SIZE);
502 memcpy(scb->ssp_task.ssp_frame.hashed_src_addr,
503 dev->port->ha->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
504 scb->ssp_task.ssp_frame.tptt = cpu_to_be16(0xFFFF);
506 memcpy(scb->ssp_task.ssp_cmd.lun, task->ssp_task.LUN, 8);
507 if (task->ssp_task.enable_first_burst)
508 scb->ssp_task.ssp_cmd.efb_prio_attr |= EFB_MASK;
509 scb->ssp_task.ssp_cmd.efb_prio_attr |= (task->ssp_task.task_prio << 3);
510 scb->ssp_task.ssp_cmd.efb_prio_attr |= (task->ssp_task.task_attr & 7);
511 memcpy(scb->ssp_task.ssp_cmd.cdb, task->ssp_task.cmd->cmnd,
512 task->ssp_task.cmd->cmd_len);
514 scb->ssp_task.sister_scb = cpu_to_le16(0xFFFF);
515 scb->ssp_task.conn_handle = cpu_to_le16(
516 (u16)(unsigned long)dev->lldd_dev);
517 scb->ssp_task.data_dir = data_dir_flags[task->data_dir];
518 scb->ssp_task.retry_count = scb->ssp_task.retry_count;
520 ascb->tasklet_complete = asd_task_tasklet_complete;
522 res = asd_map_scatterlist(task, scb->ssp_task.sg_element, gfp_flags);
527 static void asd_unbuild_ssp_ascb(struct asd_ascb *a)
529 asd_unmap_scatterlist(a);
532 /* ---------- Execute Task ---------- */
534 static int asd_can_queue(struct asd_ha_struct *asd_ha, int num)
539 spin_lock_irqsave(&asd_ha->seq.pend_q_lock, flags);
540 if ((asd_ha->seq.can_queue - num) < 0)
541 res = -SAS_QUEUE_FULL;
543 asd_ha->seq.can_queue -= num;
544 spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags);
549 int asd_execute_task(struct sas_task *task, gfp_t gfp_flags)
553 struct sas_task *t = task;
554 struct asd_ascb *ascb = NULL, *a;
555 struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha;
558 res = asd_can_queue(asd_ha, 1);
563 ascb = asd_ascb_alloc_list(asd_ha, &res, gfp_flags);
569 __list_add(&alist, ascb->list.prev, &ascb->list);
570 list_for_each_entry(a, &alist, list) {
575 list_for_each_entry(a, &alist, list) {
578 if (t->task_proto & SAS_PROTOCOL_STP)
579 t->task_proto = SAS_PROTOCOL_STP;
580 switch (t->task_proto) {
581 case SAS_PROTOCOL_SATA:
582 case SAS_PROTOCOL_STP:
583 res = asd_build_ata_ascb(a, t, gfp_flags);
585 case SAS_PROTOCOL_SMP:
586 res = asd_build_smp_ascb(a, t, gfp_flags);
588 case SAS_PROTOCOL_SSP:
589 res = asd_build_ssp_ascb(a, t, gfp_flags);
592 asd_printk("unknown sas_task proto: 0x%x\n",
600 spin_lock_irqsave(&t->task_state_lock, flags);
601 t->task_state_flags |= SAS_TASK_AT_INITIATOR;
602 spin_unlock_irqrestore(&t->task_state_lock, flags);
604 list_del_init(&alist);
606 res = asd_post_ascb_list(asd_ha, ascb, 1);
609 __list_add(&alist, ascb->list.prev, &ascb->list);
616 struct asd_ascb *b = a;
617 list_for_each_entry(a, &alist, list) {
621 spin_lock_irqsave(&t->task_state_lock, flags);
622 t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
623 spin_unlock_irqrestore(&t->task_state_lock, flags);
624 switch (t->task_proto) {
625 case SAS_PROTOCOL_SATA:
626 case SAS_PROTOCOL_STP:
627 asd_unbuild_ata_ascb(a);
629 case SAS_PROTOCOL_SMP:
630 asd_unbuild_smp_ascb(a);
632 case SAS_PROTOCOL_SSP:
633 asd_unbuild_ssp_ascb(a);
640 list_del_init(&alist);
643 asd_ascb_free_list(ascb);
644 asd_can_dequeue(asd_ha, 1);