1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright (c) 2012 - 2015 UNISYS CORPORATION
7 #include <linux/debugfs.h>
8 #include <linux/kthread.h>
10 #include <linux/module.h>
11 #include <linux/seq_file.h>
12 #include <linux/visorbus.h>
13 #include <scsi/scsi.h>
14 #include <scsi/scsi_host.h>
15 #include <scsi/scsi_cmnd.h>
16 #include <scsi/scsi_device.h>
18 #include "iochannel.h"
20 /* The Send and Receive Buffers of the IO Queue may both be full */
22 #define IOS_ERROR_THRESHOLD 1000
23 #define MAX_PENDING_REQUESTS (MIN_NUMSIGNALS * 2)
24 #define VISORHBA_ERROR_COUNT 30
26 static struct dentry *visorhba_debugfs_dir;
28 /* GUIDS for HBA channel type supported by this driver */
29 static struct visor_channeltype_descriptor visorhba_channel_types[] = {
30 /* Note that the only channel type we expect to be reported by the
31 * bus driver is the VISOR_VHBA channel.
33 { VISOR_VHBA_CHANNEL_GUID, "sparvhba", sizeof(struct channel_header),
34 VISOR_VHBA_CHANNEL_VERSIONID },
38 MODULE_DEVICE_TABLE(visorbus, visorhba_channel_types);
39 MODULE_ALIAS("visorbus:" VISOR_VHBA_CHANNEL_GUID_STR);
41 struct visordisk_info {
42 struct scsi_device *sdev;
44 atomic_t ios_threshold;
46 struct visordisk_info *next;
50 struct uiscmdrsp cmdrsp;
51 /* The Data being tracked */
53 /* Type of pointer that is being stored */
57 /* Each scsi_host has a host_data area that contains this struct. */
58 struct visorhba_devdata {
59 struct Scsi_Host *scsihost;
60 struct visor_device *dev;
61 struct list_head dev_info_list;
62 /* Tracks the requests that have been forwarded to
63 * the IOVM and haven't returned yet
65 struct scsipending pending[MAX_PENDING_REQUESTS];
66 /* Start search for next pending free slot here */
67 unsigned int nextinsert;
68 /* lock to protect data in devdata */
71 bool serverchangingstate;
72 unsigned long long acquire_failed_cnt;
73 unsigned long long interrupts_rcvd;
74 unsigned long long interrupts_notme;
75 unsigned long long interrupts_disabled;
76 u64 __iomem *flags_addr;
77 atomic_t interrupt_rcvd;
78 wait_queue_head_t rsp_queue;
79 struct visordisk_info head;
80 unsigned int max_buff_len;
82 struct task_struct *thread;
86 * allows us to pass int handles back-and-forth between us and
87 * iovm, instead of raw pointers
91 struct dentry *debugfs_dir;
92 struct dentry *debugfs_info;
95 struct visorhba_devices_open {
96 struct visorhba_devdata *devdata;
100 * visor_thread_start - Starts a thread for the device
101 * @threadfn: Function the thread starts
102 * @thrcontext: Context to pass to the thread, i.e. devdata
103 * @name: String describing name of thread
105 * Starts a thread for the device.
107 * Return: The task_struct * denoting the thread on success,
110 static struct task_struct *visor_thread_start(int (*threadfn)(void *),
111 void *thrcontext, char *name)
113 struct task_struct *task;
115 task = kthread_run(threadfn, thrcontext, "%s", name);
117 pr_err("visorbus failed to start thread\n");
124 * visor_thread_stop - Stops the thread if it is running
125 * @task: Description of process to stop
127 static void visor_thread_stop(struct task_struct *task)
133 * add_scsipending_entry - Save off io command that is pending in
135 * @devdata: Pointer to devdata
136 * @cmdtype: Specifies the type of command pending
137 * @new: The command to be saved
139 * Saves off the io command that is being handled by the Service
140 * Partition so that it can be handled when it completes. If new is
141 * NULL it is assumed the entry refers only to the cmdrsp.
143 * Return: Insert_location where entry was added on success,
146 static int add_scsipending_entry(struct visorhba_devdata *devdata,
147 char cmdtype, void *new)
150 struct scsipending *entry;
153 spin_lock_irqsave(&devdata->privlock, flags);
154 insert_location = devdata->nextinsert;
155 while (devdata->pending[insert_location].sent) {
156 insert_location = (insert_location + 1) % MAX_PENDING_REQUESTS;
157 if (insert_location == (int)devdata->nextinsert) {
158 spin_unlock_irqrestore(&devdata->privlock, flags);
163 entry = &devdata->pending[insert_location];
164 memset(&entry->cmdrsp, 0, sizeof(entry->cmdrsp));
165 entry->cmdtype = cmdtype;
168 /* wants to send cmdrsp */
170 entry->sent = &entry->cmdrsp;
171 devdata->nextinsert = (insert_location + 1) % MAX_PENDING_REQUESTS;
172 spin_unlock_irqrestore(&devdata->privlock, flags);
174 return insert_location;
178 * del_scsipending_ent - Removes an entry from the pending array
179 * @devdata: Device holding the pending array
180 * @del: Entry to remove
182 * Removes the entry pointed at by del and returns it.
184 * Return: The scsipending entry pointed to on success, NULL on failure
186 static void *del_scsipending_ent(struct visorhba_devdata *devdata, int del)
191 if (del >= MAX_PENDING_REQUESTS)
194 spin_lock_irqsave(&devdata->privlock, flags);
195 sent = devdata->pending[del].sent;
196 devdata->pending[del].cmdtype = 0;
197 devdata->pending[del].sent = NULL;
198 spin_unlock_irqrestore(&devdata->privlock, flags);
204 * get_scsipending_cmdrsp - Return the cmdrsp stored in a pending entry
205 * @ddata: Device holding the pending array
206 * @ent: Entry that stores the cmdrsp
208 * Each scsipending entry has a cmdrsp in it. The cmdrsp is only valid
209 * if the "sent" field is not NULL.
211 * Return: A pointer to the cmdrsp, NULL on failure
213 static struct uiscmdrsp *get_scsipending_cmdrsp(struct visorhba_devdata *ddata,
216 if (ddata->pending[ent].sent)
217 return &ddata->pending[ent].cmdrsp;
223 * simple_idr_get - Associate a provided pointer with an int value
224 * 1 <= value <= INT_MAX, and return this int value;
225 * the pointer value can be obtained later by passing
226 * this int value to idr_find()
227 * @idrtable: The data object maintaining the pointer<-->int mappings
228 * @p: The pointer value to be remembered
229 * @lock: A spinlock used when exclusive access to idrtable is needed
231 * Return: The id number mapped to pointer 'p', 0 on failure
233 static unsigned int simple_idr_get(struct idr *idrtable, void *p,
239 idr_preload(GFP_KERNEL);
240 spin_lock_irqsave(lock, flags);
241 id = idr_alloc(idrtable, p, 1, INT_MAX, GFP_NOWAIT);
242 spin_unlock_irqrestore(lock, flags);
247 /* idr_alloc() guarantees > 0 */
248 return (unsigned int)(id);
252 * setup_scsitaskmgmt_handles - Stash the necessary handles so that the
253 * completion processing logic for a taskmgmt
254 * cmd will be able to find who to wake up
255 * and where to stash the result
256 * @idrtable: The data object maintaining the pointer<-->int mappings
257 * @lock: A spinlock used when exclusive access to idrtable is needed
258 * @cmdrsp: Response from the IOVM
259 * @event: The event handle to associate with an id
260 * @result: The location to place the result of the event handle into
262 static void setup_scsitaskmgmt_handles(struct idr *idrtable, spinlock_t *lock,
263 struct uiscmdrsp *cmdrsp,
264 wait_queue_head_t *event, int *result)
266 /* specify the event that has to be triggered when this */
267 /* cmd is complete */
268 cmdrsp->scsitaskmgmt.notify_handle =
269 simple_idr_get(idrtable, event, lock);
270 cmdrsp->scsitaskmgmt.notifyresult_handle =
271 simple_idr_get(idrtable, result, lock);
275 * cleanup_scsitaskmgmt_handles - Forget handles created by
276 * setup_scsitaskmgmt_handles()
277 * @idrtable: The data object maintaining the pointer<-->int mappings
278 * @cmdrsp: Response from the IOVM
280 static void cleanup_scsitaskmgmt_handles(struct idr *idrtable,
281 struct uiscmdrsp *cmdrsp)
283 if (cmdrsp->scsitaskmgmt.notify_handle)
284 idr_remove(idrtable, cmdrsp->scsitaskmgmt.notify_handle);
285 if (cmdrsp->scsitaskmgmt.notifyresult_handle)
286 idr_remove(idrtable, cmdrsp->scsitaskmgmt.notifyresult_handle);
290 * forward_taskmgmt_command - Send taskmegmt command to the Service
292 * @tasktype: Type of taskmgmt command
293 * @scsidev: Scsidev that issued command
295 * Create a cmdrsp packet and send it to the Serivce Partition
296 * that will service this request.
298 * Return: Int representing whether command was queued successfully or not
300 static int forward_taskmgmt_command(enum task_mgmt_types tasktype,
301 struct scsi_device *scsidev)
303 struct uiscmdrsp *cmdrsp;
304 struct visorhba_devdata *devdata =
305 (struct visorhba_devdata *)scsidev->host->hostdata;
306 int notifyresult = 0xffff;
307 wait_queue_head_t notifyevent;
310 if (devdata->serverdown || devdata->serverchangingstate)
313 scsicmd_id = add_scsipending_entry(devdata, CMD_SCSITASKMGMT_TYPE,
318 cmdrsp = get_scsipending_cmdrsp(devdata, scsicmd_id);
320 init_waitqueue_head(¬ifyevent);
322 /* issue TASK_MGMT_ABORT_TASK */
323 cmdrsp->cmdtype = CMD_SCSITASKMGMT_TYPE;
324 setup_scsitaskmgmt_handles(&devdata->idr, &devdata->privlock, cmdrsp,
325 ¬ifyevent, ¬ifyresult);
327 /* save destination */
328 cmdrsp->scsitaskmgmt.tasktype = tasktype;
329 cmdrsp->scsitaskmgmt.vdest.channel = scsidev->channel;
330 cmdrsp->scsitaskmgmt.vdest.id = scsidev->id;
331 cmdrsp->scsitaskmgmt.vdest.lun = scsidev->lun;
332 cmdrsp->scsitaskmgmt.handle = scsicmd_id;
334 dev_dbg(&scsidev->sdev_gendev,
335 "visorhba: initiating type=%d taskmgmt command\n", tasktype);
336 if (visorchannel_signalinsert(devdata->dev->visorchannel,
339 goto err_del_scsipending_ent;
341 /* It can take the Service Partition up to 35 seconds to complete
342 * an IO in some cases, so wait 45 seconds and error out
344 if (!wait_event_timeout(notifyevent, notifyresult != 0xffff,
345 msecs_to_jiffies(45000)))
346 goto err_del_scsipending_ent;
348 dev_dbg(&scsidev->sdev_gendev,
349 "visorhba: taskmgmt type=%d success; result=0x%x\n",
350 tasktype, notifyresult);
351 cleanup_scsitaskmgmt_handles(&devdata->idr, cmdrsp);
354 err_del_scsipending_ent:
355 dev_dbg(&scsidev->sdev_gendev,
356 "visorhba: taskmgmt type=%d not executed\n", tasktype);
357 del_scsipending_ent(devdata, scsicmd_id);
358 cleanup_scsitaskmgmt_handles(&devdata->idr, cmdrsp);
363 * visorhba_abort_handler - Send TASK_MGMT_ABORT_TASK
364 * @scsicmd: The scsicmd that needs aborted
366 * Return: SUCCESS if inserted, FAILED otherwise
368 static int visorhba_abort_handler(struct scsi_cmnd *scsicmd)
370 /* issue TASK_MGMT_ABORT_TASK */
371 struct scsi_device *scsidev;
372 struct visordisk_info *vdisk;
375 scsidev = scsicmd->device;
376 vdisk = scsidev->hostdata;
377 if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT)
378 atomic_inc(&vdisk->error_count);
380 atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
381 rtn = forward_taskmgmt_command(TASK_MGMT_ABORT_TASK, scsidev);
382 if (rtn == SUCCESS) {
383 scsicmd->result = DID_ABORT << 16;
384 scsicmd->scsi_done(scsicmd);
390 * visorhba_device_reset_handler - Send TASK_MGMT_LUN_RESET
391 * @scsicmd: The scsicmd that needs aborted
393 * Return: SUCCESS if inserted, FAILED otherwise
395 static int visorhba_device_reset_handler(struct scsi_cmnd *scsicmd)
397 /* issue TASK_MGMT_LUN_RESET */
398 struct scsi_device *scsidev;
399 struct visordisk_info *vdisk;
402 scsidev = scsicmd->device;
403 vdisk = scsidev->hostdata;
404 if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT)
405 atomic_inc(&vdisk->error_count);
407 atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
408 rtn = forward_taskmgmt_command(TASK_MGMT_LUN_RESET, scsidev);
409 if (rtn == SUCCESS) {
410 scsicmd->result = DID_RESET << 16;
411 scsicmd->scsi_done(scsicmd);
417 * visorhba_bus_reset_handler - Send TASK_MGMT_TARGET_RESET for each
419 * @scsicmd: The scsicmd that needs aborted
421 * Return: SUCCESS if inserted, FAILED otherwise
423 static int visorhba_bus_reset_handler(struct scsi_cmnd *scsicmd)
425 struct scsi_device *scsidev;
426 struct visordisk_info *vdisk;
429 scsidev = scsicmd->device;
430 shost_for_each_device(scsidev, scsidev->host) {
431 vdisk = scsidev->hostdata;
432 if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT)
433 atomic_inc(&vdisk->error_count);
435 atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
437 rtn = forward_taskmgmt_command(TASK_MGMT_BUS_RESET, scsidev);
438 if (rtn == SUCCESS) {
439 scsicmd->result = DID_RESET << 16;
440 scsicmd->scsi_done(scsicmd);
446 * visorhba_host_reset_handler - Not supported
447 * @scsicmd: The scsicmd that needs to be aborted
449 * Return: Not supported, return SUCCESS
451 static int visorhba_host_reset_handler(struct scsi_cmnd *scsicmd)
453 /* issue TASK_MGMT_TARGET_RESET for each target on each bus for host */
458 * visorhba_get_info - Get information about SCSI device
459 * @shp: Scsi host that is requesting information
461 * Return: String with visorhba information
463 static const char *visorhba_get_info(struct Scsi_Host *shp)
465 /* Return version string */
470 * dma_data_dir_linux_to_spar - convert dma_data_direction value to
471 * Unisys-specific equivalent
472 * @d: dma direction value to convert
474 * Returns the Unisys-specific dma direction value corresponding to @d
476 static u32 dma_data_dir_linux_to_spar(enum dma_data_direction d)
479 case DMA_BIDIRECTIONAL:
480 return UIS_DMA_BIDIRECTIONAL;
482 return UIS_DMA_TO_DEVICE;
483 case DMA_FROM_DEVICE:
484 return UIS_DMA_FROM_DEVICE;
493 * visorhba_queue_command_lck - Queues command to the Service Partition
494 * @scsicmd: Command to be queued
495 * @vsiorhba_cmnd_done: Done command to call when scsicmd is returned
497 * Queues to scsicmd to the ServicePartition after converting it to a
498 * uiscmdrsp structure.
500 * Return: 0 if successfully queued to the Service Partition, otherwise
503 static int visorhba_queue_command_lck(struct scsi_cmnd *scsicmd,
504 void (*visorhba_cmnd_done)
505 (struct scsi_cmnd *))
507 struct uiscmdrsp *cmdrsp;
508 struct scsi_device *scsidev = scsicmd->device;
510 unsigned char *cdb = scsicmd->cmnd;
511 struct Scsi_Host *scsihost = scsidev->host;
513 struct visorhba_devdata *devdata =
514 (struct visorhba_devdata *)scsihost->hostdata;
515 struct scatterlist *sg = NULL;
516 struct scatterlist *sglist = NULL;
518 if (devdata->serverdown || devdata->serverchangingstate)
519 return SCSI_MLQUEUE_DEVICE_BUSY;
521 insert_location = add_scsipending_entry(devdata, CMD_SCSI_TYPE,
523 if (insert_location < 0)
524 return SCSI_MLQUEUE_DEVICE_BUSY;
526 cmdrsp = get_scsipending_cmdrsp(devdata, insert_location);
527 cmdrsp->cmdtype = CMD_SCSI_TYPE;
528 /* save the pending insertion location. Deletion from pending
529 * will return the scsicmd pointer for completion
531 cmdrsp->scsi.handle = insert_location;
533 /* save done function that we have call when cmd is complete */
534 scsicmd->scsi_done = visorhba_cmnd_done;
535 /* save destination */
536 cmdrsp->scsi.vdest.channel = scsidev->channel;
537 cmdrsp->scsi.vdest.id = scsidev->id;
538 cmdrsp->scsi.vdest.lun = scsidev->lun;
540 cmdrsp->scsi.data_dir =
541 dma_data_dir_linux_to_spar(scsicmd->sc_data_direction);
542 memcpy(cmdrsp->scsi.cmnd, cdb, MAX_CMND_SIZE);
543 cmdrsp->scsi.bufflen = scsi_bufflen(scsicmd);
545 /* keep track of the max buffer length so far. */
546 if (cmdrsp->scsi.bufflen > devdata->max_buff_len)
547 devdata->max_buff_len = cmdrsp->scsi.bufflen;
549 if (scsi_sg_count(scsicmd) > MAX_PHYS_INFO)
550 goto err_del_scsipending_ent;
552 /* convert buffer to phys information */
553 /* buffer is scatterlist - copy it out */
554 sglist = scsi_sglist(scsicmd);
556 for_each_sg(sglist, sg, scsi_sg_count(scsicmd), i) {
557 cmdrsp->scsi.gpi_list[i].address = sg_phys(sg);
558 cmdrsp->scsi.gpi_list[i].length = sg->length;
560 cmdrsp->scsi.guest_phys_entries = scsi_sg_count(scsicmd);
562 if (visorchannel_signalinsert(devdata->dev->visorchannel,
565 /* queue must be full and we aren't going to wait */
566 goto err_del_scsipending_ent;
570 err_del_scsipending_ent:
571 del_scsipending_ent(devdata, insert_location);
572 return SCSI_MLQUEUE_DEVICE_BUSY;
576 static DEF_SCSI_QCMD(visorhba_queue_command)
578 #define visorhba_queue_command visorhba_queue_command_lck
582 * visorhba_slave_alloc - Called when new disk is discovered
585 * Create a new visordisk_info structure and add it to our
588 * Return: 0 on success, -ENOMEM on failure.
590 static int visorhba_slave_alloc(struct scsi_device *scsidev)
592 /* this is called by the midlayer before scan for new devices --
593 * LLD can alloc any struct & do init if needed.
595 struct visordisk_info *vdisk;
596 struct visorhba_devdata *devdata;
597 struct Scsi_Host *scsihost = (struct Scsi_Host *)scsidev->host;
599 /* already allocated return success */
600 if (scsidev->hostdata)
603 /* even though we errored, treat as success */
604 devdata = (struct visorhba_devdata *)scsihost->hostdata;
608 vdisk = kzalloc(sizeof(*vdisk), GFP_ATOMIC);
612 vdisk->sdev = scsidev;
613 scsidev->hostdata = vdisk;
618 * visorhba_slave_destroy - Disk is going away, clean up resources.
619 * @scsidev: Scsi device to destroy
621 static void visorhba_slave_destroy(struct scsi_device *scsidev)
623 /* midlevel calls this after device has been quiesced and
624 * before it is to be deleted.
626 struct visordisk_info *vdisk;
628 vdisk = scsidev->hostdata;
629 scsidev->hostdata = NULL;
633 static struct scsi_host_template visorhba_driver_template = {
634 .name = "Unisys Visor HBA",
635 .info = visorhba_get_info,
636 .queuecommand = visorhba_queue_command,
637 .eh_abort_handler = visorhba_abort_handler,
638 .eh_device_reset_handler = visorhba_device_reset_handler,
639 .eh_bus_reset_handler = visorhba_bus_reset_handler,
640 .eh_host_reset_handler = visorhba_host_reset_handler,
642 #define visorhba_MAX_CMNDS 128
643 .can_queue = visorhba_MAX_CMNDS,
646 .slave_alloc = visorhba_slave_alloc,
647 .slave_destroy = visorhba_slave_destroy,
648 .use_clustering = ENABLE_CLUSTERING,
652 * info_debugfs_show - Debugfs interface to dump visorhba states
653 * @seq: The sequence file to write information to
654 * @v: Unused, but needed for use with seq file single_open invocation
656 * Presents a file in the debugfs tree named: /visorhba/vbus<x>:dev<y>/info.
660 static int info_debugfs_show(struct seq_file *seq, void *v)
662 struct visorhba_devdata *devdata = seq->private;
664 seq_printf(seq, "max_buff_len = %u\n", devdata->max_buff_len);
665 seq_printf(seq, "interrupts_rcvd = %llu\n", devdata->interrupts_rcvd);
666 seq_printf(seq, "interrupts_disabled = %llu\n",
667 devdata->interrupts_disabled);
668 seq_printf(seq, "interrupts_notme = %llu\n",
669 devdata->interrupts_notme);
670 seq_printf(seq, "flags_addr = %p\n", devdata->flags_addr);
671 if (devdata->flags_addr) {
672 u64 phys_flags_addr =
673 virt_to_phys((__force void *)devdata->flags_addr);
674 seq_printf(seq, "phys_flags_addr = 0x%016llx\n",
676 seq_printf(seq, "FeatureFlags = %llu\n",
677 (u64)readq(devdata->flags_addr));
679 seq_printf(seq, "acquire_failed_cnt = %llu\n",
680 devdata->acquire_failed_cnt);
685 static int info_debugfs_open(struct inode *inode, struct file *file)
687 return single_open(file, info_debugfs_show, inode->i_private);
690 static const struct file_operations info_debugfs_fops = {
691 .owner = THIS_MODULE,
692 .open = info_debugfs_open,
695 .release = single_release,
699 * complete_taskmgmt_command - Complete task management
700 * @idrtable: The data object maintaining the pointer<-->int mappings
701 * @cmdrsp: Response from the IOVM
702 * @result: The result of the task management command
704 * Service Partition returned the result of the task management
705 * command. Wake up anyone waiting for it.
707 static void complete_taskmgmt_command(struct idr *idrtable,
708 struct uiscmdrsp *cmdrsp, int result)
710 wait_queue_head_t *wq =
711 idr_find(idrtable, cmdrsp->scsitaskmgmt.notify_handle);
712 int *scsi_result_ptr =
713 idr_find(idrtable, cmdrsp->scsitaskmgmt.notifyresult_handle);
714 if (unlikely(!(wq && scsi_result_ptr))) {
715 pr_err("visorhba: no completion context; cmd will time out\n");
719 /* copy the result of the taskmgmt and
720 * wake up the error handler that is waiting for this
722 pr_debug("visorhba: notifying initiator with result=0x%x\n", result);
723 *scsi_result_ptr = result;
728 * visorhba_serverdown_complete - Called when we are done cleaning up
730 * @devdata: Visorhba instance on which to complete serverdown
732 * Called when we are done cleanning up from serverdown, stop processing
733 * queue, fail pending IOs.
735 static void visorhba_serverdown_complete(struct visorhba_devdata *devdata)
738 struct scsipending *pendingdel = NULL;
739 struct scsi_cmnd *scsicmd = NULL;
740 struct uiscmdrsp *cmdrsp;
743 /* Stop using the IOVM response queue (queue should be drained
746 visor_thread_stop(devdata->thread);
748 /* Fail commands that weren't completed */
749 spin_lock_irqsave(&devdata->privlock, flags);
750 for (i = 0; i < MAX_PENDING_REQUESTS; i++) {
751 pendingdel = &devdata->pending[i];
752 switch (pendingdel->cmdtype) {
754 scsicmd = pendingdel->sent;
755 scsicmd->result = DID_RESET << 16;
756 if (scsicmd->scsi_done)
757 scsicmd->scsi_done(scsicmd);
759 case CMD_SCSITASKMGMT_TYPE:
760 cmdrsp = pendingdel->sent;
761 complete_taskmgmt_command(&devdata->idr, cmdrsp,
767 pendingdel->cmdtype = 0;
768 pendingdel->sent = NULL;
770 spin_unlock_irqrestore(&devdata->privlock, flags);
772 devdata->serverdown = true;
773 devdata->serverchangingstate = false;
777 * visorhba_serverdown - Got notified that the IOVM is down
778 * @devdata: Visorhba that is being serviced by downed IOVM
780 * Something happened to the IOVM, return immediately and
781 * schedule cleanup work.
783 * Return: 0 on success, -EINVAL on failure
785 static int visorhba_serverdown(struct visorhba_devdata *devdata)
787 if (!devdata->serverdown && !devdata->serverchangingstate) {
788 devdata->serverchangingstate = true;
789 visorhba_serverdown_complete(devdata);
790 } else if (devdata->serverchangingstate) {
797 * do_scsi_linuxstat - Scsi command returned linuxstat
798 * @cmdrsp: Response from IOVM
799 * @scsicmd: Command issued
801 * Don't log errors for disk-not-present inquiries.
803 static void do_scsi_linuxstat(struct uiscmdrsp *cmdrsp,
804 struct scsi_cmnd *scsicmd)
806 struct visordisk_info *vdisk;
807 struct scsi_device *scsidev;
809 scsidev = scsicmd->device;
810 memcpy(scsicmd->sense_buffer, cmdrsp->scsi.sensebuf, MAX_SENSE_SIZE);
812 /* Do not log errors for disk-not-present inquiries */
813 if (cmdrsp->scsi.cmnd[0] == INQUIRY &&
814 (host_byte(cmdrsp->scsi.linuxstat) == DID_NO_CONNECT) &&
815 cmdrsp->scsi.addlstat == ADDL_SEL_TIMEOUT)
817 /* Okay see what our error_count is here.... */
818 vdisk = scsidev->hostdata;
819 if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT) {
820 atomic_inc(&vdisk->error_count);
821 atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
825 static int set_no_disk_inquiry_result(unsigned char *buf, size_t len,
828 if (len < NO_DISK_INQUIRY_RESULT_LEN)
830 memset(buf, 0, NO_DISK_INQUIRY_RESULT_LEN);
831 buf[2] = SCSI_SPC2_VER;
833 buf[0] = DEV_DISK_CAPABLE_NOT_PRESENT;
834 buf[3] = DEV_HISUPPORT;
836 buf[0] = DEV_NOT_CAPABLE;
838 buf[4] = NO_DISK_INQUIRY_RESULT_LEN - 5;
839 strncpy(buf + 8, "DELLPSEUDO DEVICE .", NO_DISK_INQUIRY_RESULT_LEN - 8);
844 * do_scsi_nolinuxstat - Scsi command didn't have linuxstat
845 * @cmdrsp: Response from IOVM
846 * @scsicmd: Command issued
848 * Handle response when no linuxstat was returned.
850 static void do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp,
851 struct scsi_cmnd *scsicmd)
853 struct scsi_device *scsidev;
855 struct scatterlist *sg;
858 char *this_page_orig;
860 struct visordisk_info *vdisk;
862 scsidev = scsicmd->device;
863 if (cmdrsp->scsi.cmnd[0] == INQUIRY &&
864 cmdrsp->scsi.bufflen >= MIN_INQUIRY_RESULT_LEN) {
865 if (cmdrsp->scsi.no_disk_result == 0)
868 buf = kzalloc(36, GFP_KERNEL);
872 /* Linux scsi code wants a device at Lun 0
873 * to issue report luns, but we don't want
874 * a disk there so we'll present a processor
877 set_no_disk_inquiry_result(buf, (size_t)cmdrsp->scsi.bufflen,
880 if (scsi_sg_count(scsicmd) == 0) {
881 memcpy(scsi_sglist(scsicmd), buf,
882 cmdrsp->scsi.bufflen);
887 sg = scsi_sglist(scsicmd);
888 for (i = 0; i < scsi_sg_count(scsicmd); i++) {
889 this_page_orig = kmap_atomic(sg_page(sg + i));
890 this_page = (void *)((unsigned long)this_page_orig |
892 memcpy(this_page, buf + bufind, sg[i].length);
893 kunmap_atomic(this_page_orig);
897 vdisk = scsidev->hostdata;
898 if (atomic_read(&vdisk->ios_threshold) > 0) {
899 atomic_dec(&vdisk->ios_threshold);
900 if (atomic_read(&vdisk->ios_threshold) == 0)
901 atomic_set(&vdisk->error_count, 0);
907 * complete_scsi_command - Complete a scsi command
908 * @uiscmdrsp: Response from Service Partition
909 * @scsicmd: The scsi command
911 * Response was returned by the Service Partition. Finish it and send
912 * completion to the scsi midlayer.
914 static void complete_scsi_command(struct uiscmdrsp *cmdrsp,
915 struct scsi_cmnd *scsicmd)
917 /* take what we need out of cmdrsp and complete the scsicmd */
918 scsicmd->result = cmdrsp->scsi.linuxstat;
919 if (cmdrsp->scsi.linuxstat)
920 do_scsi_linuxstat(cmdrsp, scsicmd);
922 do_scsi_nolinuxstat(cmdrsp, scsicmd);
924 scsicmd->scsi_done(scsicmd);
928 * drain_queue - Pull responses out of iochannel
929 * @cmdrsp: Response from the IOSP
930 * @devdata: Device that owns this iochannel
932 * Pulls responses out of the iochannel and process the responses.
934 static void drain_queue(struct uiscmdrsp *cmdrsp,
935 struct visorhba_devdata *devdata)
937 struct scsi_cmnd *scsicmd;
941 if (visorchannel_signalremove(devdata->dev->visorchannel,
945 if (cmdrsp->cmdtype == CMD_SCSI_TYPE) {
946 /* scsicmd location is returned by the
949 scsicmd = del_scsipending_ent(devdata,
950 cmdrsp->scsi.handle);
953 /* complete the orig cmd */
954 complete_scsi_command(cmdrsp, scsicmd);
955 } else if (cmdrsp->cmdtype == CMD_SCSITASKMGMT_TYPE) {
956 if (!del_scsipending_ent(devdata,
957 cmdrsp->scsitaskmgmt.handle))
959 complete_taskmgmt_command(&devdata->idr, cmdrsp,
960 cmdrsp->scsitaskmgmt.result);
961 } else if (cmdrsp->cmdtype == CMD_NOTIFYGUEST_TYPE)
962 dev_err_once(&devdata->dev->device,
963 "ignoring unsupported NOTIFYGUEST\n");
964 /* cmdrsp is now available for re-use */
969 * process_incoming_rsps - Process responses from IOSP
970 * @v: Void pointer to visorhba_devdata
972 * Main function for the thread that processes the responses
973 * from the IO Service Partition. When the queue is empty, wait
974 * to check to see if it is full again.
976 * Return: 0 on success, -ENOMEM on failure
978 static int process_incoming_rsps(void *v)
980 struct visorhba_devdata *devdata = v;
981 struct uiscmdrsp *cmdrsp = NULL;
982 const int size = sizeof(*cmdrsp);
984 cmdrsp = kmalloc(size, GFP_ATOMIC);
989 if (kthread_should_stop())
991 wait_event_interruptible_timeout(
992 devdata->rsp_queue, (atomic_read(
993 &devdata->interrupt_rcvd) == 1),
994 msecs_to_jiffies(devdata->thread_wait_ms));
996 drain_queue(cmdrsp, devdata);
1003 * visorhba_pause - Function to handle visorbus pause messages
1004 * @dev: Device that is pausing
1005 * @complete_func: Function to call when finished
1007 * Something has happened to the IO Service Partition that is
1008 * handling this device. Quiet this device and reset commands
1009 * so that the Service Partition can be corrected.
1013 static int visorhba_pause(struct visor_device *dev,
1014 visorbus_state_complete_func complete_func)
1016 struct visorhba_devdata *devdata = dev_get_drvdata(&dev->device);
1018 visorhba_serverdown(devdata);
1019 complete_func(dev, 0);
1024 * visorhba_resume - Function called when the IO Service Partition is back
1025 * @dev: Device that is pausing
1026 * @complete_func: Function to call when finished
1028 * Yay! The IO Service Partition is back, the channel has been wiped
1029 * so lets re-establish connection and start processing responses.
1031 * Return: 0 on success, -EINVAL on failure
1033 static int visorhba_resume(struct visor_device *dev,
1034 visorbus_state_complete_func complete_func)
1036 struct visorhba_devdata *devdata;
1038 devdata = dev_get_drvdata(&dev->device);
1042 if (devdata->serverdown && !devdata->serverchangingstate)
1043 devdata->serverchangingstate = true;
1045 devdata->thread = visor_thread_start(process_incoming_rsps, devdata,
1047 devdata->serverdown = false;
1048 devdata->serverchangingstate = false;
1054 * visorhba_probe - Device has been discovered; do acquire
1055 * @dev: visor_device that was discovered
1057 * A new HBA was discovered; do the initial connections of it.
1059 * Return: 0 on success, otherwise error code
1061 static int visorhba_probe(struct visor_device *dev)
1063 struct Scsi_Host *scsihost;
1064 struct vhba_config_max max;
1065 struct visorhba_devdata *devdata = NULL;
1066 int err, channel_offset;
1069 scsihost = scsi_host_alloc(&visorhba_driver_template,
1074 channel_offset = offsetof(struct visor_io_channel, vhba.max);
1075 err = visorbus_read_channel(dev, channel_offset, &max,
1076 sizeof(struct vhba_config_max));
1078 goto err_scsi_host_put;
1080 scsihost->max_id = (unsigned int)max.max_id;
1081 scsihost->max_lun = (unsigned int)max.max_lun;
1082 scsihost->cmd_per_lun = (unsigned int)max.cmd_per_lun;
1083 scsihost->max_sectors =
1084 (unsigned short)(max.max_io_size >> 9);
1085 scsihost->sg_tablesize =
1086 (unsigned short)(max.max_io_size / PAGE_SIZE);
1087 if (scsihost->sg_tablesize > MAX_PHYS_INFO)
1088 scsihost->sg_tablesize = MAX_PHYS_INFO;
1089 err = scsi_add_host(scsihost, &dev->device);
1091 goto err_scsi_host_put;
1093 devdata = (struct visorhba_devdata *)scsihost->hostdata;
1095 dev_set_drvdata(&dev->device, devdata);
1097 devdata->debugfs_dir = debugfs_create_dir(dev_name(&dev->device),
1098 visorhba_debugfs_dir);
1099 if (!devdata->debugfs_dir) {
1101 goto err_scsi_remove_host;
1103 devdata->debugfs_info =
1104 debugfs_create_file("info", 0440,
1105 devdata->debugfs_dir, devdata,
1106 &info_debugfs_fops);
1107 if (!devdata->debugfs_info) {
1109 goto err_debugfs_dir;
1112 init_waitqueue_head(&devdata->rsp_queue);
1113 spin_lock_init(&devdata->privlock);
1114 devdata->serverdown = false;
1115 devdata->serverchangingstate = false;
1116 devdata->scsihost = scsihost;
1118 channel_offset = offsetof(struct visor_io_channel,
1119 channel_header.features);
1120 err = visorbus_read_channel(dev, channel_offset, &features, 8);
1122 goto err_debugfs_info;
1123 features |= VISOR_CHANNEL_IS_POLLING;
1124 err = visorbus_write_channel(dev, channel_offset, &features, 8);
1126 goto err_debugfs_info;
1128 idr_init(&devdata->idr);
1130 devdata->thread_wait_ms = 2;
1131 devdata->thread = visor_thread_start(process_incoming_rsps, devdata,
1134 scsi_scan_host(scsihost);
1139 debugfs_remove(devdata->debugfs_info);
1142 debugfs_remove_recursive(devdata->debugfs_dir);
1144 err_scsi_remove_host:
1145 scsi_remove_host(scsihost);
1148 scsi_host_put(scsihost);
1153 * visorhba_remove - Remove a visorhba device
1154 * @dev: Device to remove
1156 * Removes the visorhba device.
1158 static void visorhba_remove(struct visor_device *dev)
1160 struct visorhba_devdata *devdata = dev_get_drvdata(&dev->device);
1161 struct Scsi_Host *scsihost = NULL;
1166 scsihost = devdata->scsihost;
1167 visor_thread_stop(devdata->thread);
1168 scsi_remove_host(scsihost);
1169 scsi_host_put(scsihost);
1171 idr_destroy(&devdata->idr);
1173 dev_set_drvdata(&dev->device, NULL);
1174 debugfs_remove(devdata->debugfs_info);
1175 debugfs_remove_recursive(devdata->debugfs_dir);
1178 /* This is used to tell the visorbus driver which types of visor devices
1179 * we support, and what functions to call when a visor device that we support
1180 * is attached or removed.
1182 static struct visor_driver visorhba_driver = {
1184 .owner = THIS_MODULE,
1185 .channel_types = visorhba_channel_types,
1186 .probe = visorhba_probe,
1187 .remove = visorhba_remove,
1188 .pause = visorhba_pause,
1189 .resume = visorhba_resume,
1190 .channel_interrupt = NULL,
1194 * visorhba_init - Driver init routine
1196 * Initialize the visorhba driver and register it with visorbus
1197 * to handle s-Par virtual host bus adapter.
1199 * Return: 0 on success, error code otherwise
1201 static int visorhba_init(void)
1205 visorhba_debugfs_dir = debugfs_create_dir("visorhba", NULL);
1206 if (!visorhba_debugfs_dir)
1209 rc = visorbus_register_visor_driver(&visorhba_driver);
1211 goto cleanup_debugfs;
1216 debugfs_remove_recursive(visorhba_debugfs_dir);
1222 * visorhba_exit - Driver exit routine
1224 * Unregister driver from the bus and free up memory.
1226 static void visorhba_exit(void)
1228 visorbus_unregister_visor_driver(&visorhba_driver);
1229 debugfs_remove_recursive(visorhba_debugfs_dir);
1232 module_init(visorhba_init);
1233 module_exit(visorhba_exit);
1235 MODULE_AUTHOR("Unisys");
1236 MODULE_LICENSE("GPL");
1237 MODULE_DESCRIPTION("s-Par HBA driver for virtual SCSI host busses");