GNU Linux-libre 4.19.286-gnu1
[releases.git] / drivers / staging / unisys / visorhba / visorhba_main.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (c) 2012 - 2015 UNISYS CORPORATION
4  * All rights reserved.
5  */
6
7 #include <linux/debugfs.h>
8 #include <linux/kthread.h>
9 #include <linux/idr.h>
10 #include <linux/module.h>
11 #include <linux/seq_file.h>
12 #include <linux/visorbus.h>
13 #include <scsi/scsi.h>
14 #include <scsi/scsi_host.h>
15 #include <scsi/scsi_cmnd.h>
16 #include <scsi/scsi_device.h>
17
18 #include "iochannel.h"
19
20 /* The Send and Receive Buffers of the IO Queue may both be full */
21
22 #define IOS_ERROR_THRESHOLD  1000
23 #define MAX_PENDING_REQUESTS (MIN_NUMSIGNALS * 2)
24 #define VISORHBA_ERROR_COUNT 30
25
26 static struct dentry *visorhba_debugfs_dir;
27
28 /* GUIDS for HBA channel type supported by this driver */
29 static struct visor_channeltype_descriptor visorhba_channel_types[] = {
30         /* Note that the only channel type we expect to be reported by the
31          * bus driver is the VISOR_VHBA channel.
32          */
33         { VISOR_VHBA_CHANNEL_GUID, "sparvhba", sizeof(struct channel_header),
34           VISOR_VHBA_CHANNEL_VERSIONID },
35         {}
36 };
37
38 MODULE_DEVICE_TABLE(visorbus, visorhba_channel_types);
39 MODULE_ALIAS("visorbus:" VISOR_VHBA_CHANNEL_GUID_STR);
40
41 struct visordisk_info {
42         struct scsi_device *sdev;
43         u32 valid;
44         atomic_t ios_threshold;
45         atomic_t error_count;
46         struct visordisk_info *next;
47 };
48
49 struct scsipending {
50         struct uiscmdrsp cmdrsp;
51         /* The Data being tracked */
52         void *sent;
53         /* Type of pointer that is being stored */
54         char cmdtype;
55 };
56
57 /* Each scsi_host has a host_data area that contains this struct. */
58 struct visorhba_devdata {
59         struct Scsi_Host *scsihost;
60         struct visor_device *dev;
61         struct list_head dev_info_list;
62         /* Tracks the requests that have been forwarded to
63          * the IOVM and haven't returned yet
64          */
65         struct scsipending pending[MAX_PENDING_REQUESTS];
66         /* Start search for next pending free slot here */
67         unsigned int nextinsert;
68         /* lock to protect data in devdata */
69         spinlock_t privlock;
70         bool serverdown;
71         bool serverchangingstate;
72         unsigned long long acquire_failed_cnt;
73         unsigned long long interrupts_rcvd;
74         unsigned long long interrupts_notme;
75         unsigned long long interrupts_disabled;
76         u64 __iomem *flags_addr;
77         atomic_t interrupt_rcvd;
78         wait_queue_head_t rsp_queue;
79         struct visordisk_info head;
80         unsigned int max_buff_len;
81         int devnum;
82         struct task_struct *thread;
83         int thread_wait_ms;
84
85         /*
86          * allows us to pass int handles back-and-forth between us and
87          * iovm, instead of raw pointers
88          */
89         struct idr idr;
90
91         struct dentry *debugfs_dir;
92         struct dentry *debugfs_info;
93 };
94
95 struct visorhba_devices_open {
96         struct visorhba_devdata *devdata;
97 };
98
99 /*
100  * visor_thread_start - Starts a thread for the device
101  * @threadfn:   Function the thread starts
102  * @thrcontext: Context to pass to the thread, i.e. devdata
103  * @name:       String describing name of thread
104  *
105  * Starts a thread for the device.
106  *
107  * Return: The task_struct * denoting the thread on success,
108  *         or NULL on failure
109  */
110 static struct task_struct *visor_thread_start(int (*threadfn)(void *),
111                                               void *thrcontext, char *name)
112 {
113         struct task_struct *task;
114
115         task = kthread_run(threadfn, thrcontext, "%s", name);
116         if (IS_ERR(task)) {
117                 pr_err("visorbus failed to start thread\n");
118                 return NULL;
119         }
120         return task;
121 }
122
123 /*
124  * visor_thread_stop - Stops the thread if it is running
125  * @task: Description of process to stop
126  */
127 static void visor_thread_stop(struct task_struct *task)
128 {
129         kthread_stop(task);
130 }
131
132 /*
133  * add_scsipending_entry - Save off io command that is pending in
134  *                         Service Partition
135  * @devdata: Pointer to devdata
136  * @cmdtype: Specifies the type of command pending
137  * @new:     The command to be saved
138  *
139  * Saves off the io command that is being handled by the Service
140  * Partition so that it can be handled when it completes. If new is
141  * NULL it is assumed the entry refers only to the cmdrsp.
142  *
143  * Return: Insert_location where entry was added on success,
144  *         -EBUSY if it can't
145  */
146 static int add_scsipending_entry(struct visorhba_devdata *devdata,
147                                  char cmdtype, void *new)
148 {
149         unsigned long flags;
150         struct scsipending *entry;
151         int insert_location;
152
153         spin_lock_irqsave(&devdata->privlock, flags);
154         insert_location = devdata->nextinsert;
155         while (devdata->pending[insert_location].sent) {
156                 insert_location = (insert_location + 1) % MAX_PENDING_REQUESTS;
157                 if (insert_location == (int)devdata->nextinsert) {
158                         spin_unlock_irqrestore(&devdata->privlock, flags);
159                         return -EBUSY;
160                 }
161         }
162
163         entry = &devdata->pending[insert_location];
164         memset(&entry->cmdrsp, 0, sizeof(entry->cmdrsp));
165         entry->cmdtype = cmdtype;
166         if (new)
167                 entry->sent = new;
168         /* wants to send cmdrsp */
169         else
170                 entry->sent = &entry->cmdrsp;
171         devdata->nextinsert = (insert_location + 1) % MAX_PENDING_REQUESTS;
172         spin_unlock_irqrestore(&devdata->privlock, flags);
173
174         return insert_location;
175 }
176
177 /*
178  * del_scsipending_ent - Removes an entry from the pending array
179  * @devdata: Device holding the pending array
180  * @del:     Entry to remove
181  *
182  * Removes the entry pointed at by del and returns it.
183  *
184  * Return: The scsipending entry pointed to on success, NULL on failure
185  */
186 static void *del_scsipending_ent(struct visorhba_devdata *devdata, int del)
187 {
188         unsigned long flags;
189         void *sent;
190
191         if (del >= MAX_PENDING_REQUESTS)
192                 return NULL;
193
194         spin_lock_irqsave(&devdata->privlock, flags);
195         sent = devdata->pending[del].sent;
196         devdata->pending[del].cmdtype = 0;
197         devdata->pending[del].sent = NULL;
198         spin_unlock_irqrestore(&devdata->privlock, flags);
199
200         return sent;
201 }
202
203 /*
204  * get_scsipending_cmdrsp - Return the cmdrsp stored in a pending entry
205  * @ddata: Device holding the pending array
206  * @ent:   Entry that stores the cmdrsp
207  *
208  * Each scsipending entry has a cmdrsp in it. The cmdrsp is only valid
209  * if the "sent" field is not NULL.
210  *
211  * Return: A pointer to the cmdrsp, NULL on failure
212  */
213 static struct uiscmdrsp *get_scsipending_cmdrsp(struct visorhba_devdata *ddata,
214                                                 int ent)
215 {
216         if (ddata->pending[ent].sent)
217                 return &ddata->pending[ent].cmdrsp;
218
219         return NULL;
220 }
221
222 /*
223  * simple_idr_get - Associate a provided pointer with an int value
224  *                  1 <= value <= INT_MAX, and return this int value;
225  *                  the pointer value can be obtained later by passing
226  *                  this int value to idr_find()
227  * @idrtable: The data object maintaining the pointer<-->int mappings
228  * @p:        The pointer value to be remembered
229  * @lock:     A spinlock used when exclusive access to idrtable is needed
230  *
231  * Return: The id number mapped to pointer 'p', 0 on failure
232  */
233 static unsigned int simple_idr_get(struct idr *idrtable, void *p,
234                                    spinlock_t *lock)
235 {
236         int id;
237         unsigned long flags;
238
239         idr_preload(GFP_KERNEL);
240         spin_lock_irqsave(lock, flags);
241         id = idr_alloc(idrtable, p, 1, INT_MAX, GFP_NOWAIT);
242         spin_unlock_irqrestore(lock, flags);
243         idr_preload_end();
244         /* failure */
245         if (id < 0)
246                 return 0;
247         /* idr_alloc() guarantees > 0 */
248         return (unsigned int)(id);
249 }
250
251 /*
252  * setup_scsitaskmgmt_handles - Stash the necessary handles so that the
253  *                              completion processing logic for a taskmgmt
254  *                              cmd will be able to find who to wake up
255  *                              and where to stash the result
256  * @idrtable: The data object maintaining the pointer<-->int mappings
257  * @lock:     A spinlock used when exclusive access to idrtable is needed
258  * @cmdrsp:   Response from the IOVM
259  * @event:    The event handle to associate with an id
260  * @result:   The location to place the result of the event handle into
261  */
262 static void setup_scsitaskmgmt_handles(struct idr *idrtable, spinlock_t *lock,
263                                        struct uiscmdrsp *cmdrsp,
264                                        wait_queue_head_t *event, int *result)
265 {
266         /* specify the event that has to be triggered when this */
267         /* cmd is complete */
268         cmdrsp->scsitaskmgmt.notify_handle =
269                 simple_idr_get(idrtable, event, lock);
270         cmdrsp->scsitaskmgmt.notifyresult_handle =
271                 simple_idr_get(idrtable, result, lock);
272 }
273
274 /*
275  * cleanup_scsitaskmgmt_handles - Forget handles created by
276  *                                setup_scsitaskmgmt_handles()
277  * @idrtable: The data object maintaining the pointer<-->int mappings
278  * @cmdrsp:   Response from the IOVM
279  */
280 static void cleanup_scsitaskmgmt_handles(struct idr *idrtable,
281                                          struct uiscmdrsp *cmdrsp)
282 {
283         if (cmdrsp->scsitaskmgmt.notify_handle)
284                 idr_remove(idrtable, cmdrsp->scsitaskmgmt.notify_handle);
285         if (cmdrsp->scsitaskmgmt.notifyresult_handle)
286                 idr_remove(idrtable, cmdrsp->scsitaskmgmt.notifyresult_handle);
287 }
288
289 /*
290  * forward_taskmgmt_command - Send taskmegmt command to the Service
291  *                            Partition
292  * @tasktype: Type of taskmgmt command
293  * @scsidev:  Scsidev that issued command
294  *
295  * Create a cmdrsp packet and send it to the Serivce Partition
296  * that will service this request.
297  *
298  * Return: Int representing whether command was queued successfully or not
299  */
300 static int forward_taskmgmt_command(enum task_mgmt_types tasktype,
301                                     struct scsi_device *scsidev)
302 {
303         struct uiscmdrsp *cmdrsp;
304         struct visorhba_devdata *devdata =
305                 (struct visorhba_devdata *)scsidev->host->hostdata;
306         int notifyresult = 0xffff;
307         wait_queue_head_t notifyevent;
308         int scsicmd_id = 0;
309
310         if (devdata->serverdown || devdata->serverchangingstate)
311                 return FAILED;
312
313         scsicmd_id = add_scsipending_entry(devdata, CMD_SCSITASKMGMT_TYPE,
314                                            NULL);
315         if (scsicmd_id < 0)
316                 return FAILED;
317
318         cmdrsp = get_scsipending_cmdrsp(devdata, scsicmd_id);
319
320         init_waitqueue_head(&notifyevent);
321
322         /* issue TASK_MGMT_ABORT_TASK */
323         cmdrsp->cmdtype = CMD_SCSITASKMGMT_TYPE;
324         setup_scsitaskmgmt_handles(&devdata->idr, &devdata->privlock, cmdrsp,
325                                    &notifyevent, &notifyresult);
326
327         /* save destination */
328         cmdrsp->scsitaskmgmt.tasktype = tasktype;
329         cmdrsp->scsitaskmgmt.vdest.channel = scsidev->channel;
330         cmdrsp->scsitaskmgmt.vdest.id = scsidev->id;
331         cmdrsp->scsitaskmgmt.vdest.lun = scsidev->lun;
332         cmdrsp->scsitaskmgmt.handle = scsicmd_id;
333
334         dev_dbg(&scsidev->sdev_gendev,
335                 "visorhba: initiating type=%d taskmgmt command\n", tasktype);
336         if (visorchannel_signalinsert(devdata->dev->visorchannel,
337                                       IOCHAN_TO_IOPART,
338                                       cmdrsp))
339                 goto err_del_scsipending_ent;
340
341         /* It can take the Service Partition up to 35 seconds to complete
342          * an IO in some cases, so wait 45 seconds and error out
343          */
344         if (!wait_event_timeout(notifyevent, notifyresult != 0xffff,
345                                 msecs_to_jiffies(45000)))
346                 goto err_del_scsipending_ent;
347
348         dev_dbg(&scsidev->sdev_gendev,
349                 "visorhba: taskmgmt type=%d success; result=0x%x\n",
350                  tasktype, notifyresult);
351         cleanup_scsitaskmgmt_handles(&devdata->idr, cmdrsp);
352         return SUCCESS;
353
354 err_del_scsipending_ent:
355         dev_dbg(&scsidev->sdev_gendev,
356                 "visorhba: taskmgmt type=%d not executed\n", tasktype);
357         del_scsipending_ent(devdata, scsicmd_id);
358         cleanup_scsitaskmgmt_handles(&devdata->idr, cmdrsp);
359         return FAILED;
360 }
361
362 /*
363  * visorhba_abort_handler - Send TASK_MGMT_ABORT_TASK
364  * @scsicmd: The scsicmd that needs aborted
365  *
366  * Return: SUCCESS if inserted, FAILED otherwise
367  */
368 static int visorhba_abort_handler(struct scsi_cmnd *scsicmd)
369 {
370         /* issue TASK_MGMT_ABORT_TASK */
371         struct scsi_device *scsidev;
372         struct visordisk_info *vdisk;
373         int rtn;
374
375         scsidev = scsicmd->device;
376         vdisk = scsidev->hostdata;
377         if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT)
378                 atomic_inc(&vdisk->error_count);
379         else
380                 atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
381         rtn = forward_taskmgmt_command(TASK_MGMT_ABORT_TASK, scsidev);
382         if (rtn == SUCCESS) {
383                 scsicmd->result = DID_ABORT << 16;
384                 scsicmd->scsi_done(scsicmd);
385         }
386         return rtn;
387 }
388
389 /*
390  * visorhba_device_reset_handler - Send TASK_MGMT_LUN_RESET
391  * @scsicmd: The scsicmd that needs aborted
392  *
393  * Return: SUCCESS if inserted, FAILED otherwise
394  */
395 static int visorhba_device_reset_handler(struct scsi_cmnd *scsicmd)
396 {
397         /* issue TASK_MGMT_LUN_RESET */
398         struct scsi_device *scsidev;
399         struct visordisk_info *vdisk;
400         int rtn;
401
402         scsidev = scsicmd->device;
403         vdisk = scsidev->hostdata;
404         if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT)
405                 atomic_inc(&vdisk->error_count);
406         else
407                 atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
408         rtn = forward_taskmgmt_command(TASK_MGMT_LUN_RESET, scsidev);
409         if (rtn == SUCCESS) {
410                 scsicmd->result = DID_RESET << 16;
411                 scsicmd->scsi_done(scsicmd);
412         }
413         return rtn;
414 }
415
416 /*
417  * visorhba_bus_reset_handler - Send TASK_MGMT_TARGET_RESET for each
418  *                              target on the bus
419  * @scsicmd: The scsicmd that needs aborted
420  *
421  * Return: SUCCESS if inserted, FAILED otherwise
422  */
423 static int visorhba_bus_reset_handler(struct scsi_cmnd *scsicmd)
424 {
425         struct scsi_device *scsidev;
426         struct visordisk_info *vdisk;
427         int rtn;
428
429         scsidev = scsicmd->device;
430         shost_for_each_device(scsidev, scsidev->host) {
431                 vdisk = scsidev->hostdata;
432                 if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT)
433                         atomic_inc(&vdisk->error_count);
434                 else
435                         atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
436         }
437         rtn = forward_taskmgmt_command(TASK_MGMT_BUS_RESET, scsidev);
438         if (rtn == SUCCESS) {
439                 scsicmd->result = DID_RESET << 16;
440                 scsicmd->scsi_done(scsicmd);
441         }
442         return rtn;
443 }
444
445 /*
446  * visorhba_host_reset_handler - Not supported
447  * @scsicmd: The scsicmd that needs to be aborted
448  *
449  * Return: Not supported, return SUCCESS
450  */
451 static int visorhba_host_reset_handler(struct scsi_cmnd *scsicmd)
452 {
453         /* issue TASK_MGMT_TARGET_RESET for each target on each bus for host */
454         return SUCCESS;
455 }
456
457 /*
458  * visorhba_get_info - Get information about SCSI device
459  * @shp: Scsi host that is requesting information
460  *
461  * Return: String with visorhba information
462  */
463 static const char *visorhba_get_info(struct Scsi_Host *shp)
464 {
465         /* Return version string */
466         return "visorhba";
467 }
468
469 /*
470  * dma_data_dir_linux_to_spar - convert dma_data_direction value to
471  *                              Unisys-specific equivalent
472  * @d: dma direction value to convert
473  *
474  * Returns the Unisys-specific dma direction value corresponding to @d
475  */
476 static u32 dma_data_dir_linux_to_spar(enum dma_data_direction d)
477 {
478         switch (d) {
479         case DMA_BIDIRECTIONAL:
480                 return UIS_DMA_BIDIRECTIONAL;
481         case DMA_TO_DEVICE:
482                 return UIS_DMA_TO_DEVICE;
483         case DMA_FROM_DEVICE:
484                 return UIS_DMA_FROM_DEVICE;
485         case DMA_NONE:
486                 return UIS_DMA_NONE;
487         default:
488                 return UIS_DMA_NONE;
489         }
490 }
491
492 /*
493  * visorhba_queue_command_lck - Queues command to the Service Partition
494  * @scsicmd:            Command to be queued
495  * @vsiorhba_cmnd_done: Done command to call when scsicmd is returned
496  *
497  * Queues to scsicmd to the ServicePartition after converting it to a
498  * uiscmdrsp structure.
499  *
500  * Return: 0 if successfully queued to the Service Partition, otherwise
501  *         error code
502  */
503 static int visorhba_queue_command_lck(struct scsi_cmnd *scsicmd,
504                                       void (*visorhba_cmnd_done)
505                                            (struct scsi_cmnd *))
506 {
507         struct uiscmdrsp *cmdrsp;
508         struct scsi_device *scsidev = scsicmd->device;
509         int insert_location;
510         unsigned char *cdb = scsicmd->cmnd;
511         struct Scsi_Host *scsihost = scsidev->host;
512         unsigned int i;
513         struct visorhba_devdata *devdata =
514                 (struct visorhba_devdata *)scsihost->hostdata;
515         struct scatterlist *sg = NULL;
516         struct scatterlist *sglist = NULL;
517
518         if (devdata->serverdown || devdata->serverchangingstate)
519                 return SCSI_MLQUEUE_DEVICE_BUSY;
520
521         insert_location = add_scsipending_entry(devdata, CMD_SCSI_TYPE,
522                                                 (void *)scsicmd);
523         if (insert_location < 0)
524                 return SCSI_MLQUEUE_DEVICE_BUSY;
525
526         cmdrsp = get_scsipending_cmdrsp(devdata, insert_location);
527         cmdrsp->cmdtype = CMD_SCSI_TYPE;
528         /* save the pending insertion location. Deletion from pending
529          * will return the scsicmd pointer for completion
530          */
531         cmdrsp->scsi.handle = insert_location;
532
533         /* save done function that we have call when cmd is complete */
534         scsicmd->scsi_done = visorhba_cmnd_done;
535         /* save destination */
536         cmdrsp->scsi.vdest.channel = scsidev->channel;
537         cmdrsp->scsi.vdest.id = scsidev->id;
538         cmdrsp->scsi.vdest.lun = scsidev->lun;
539         /* save datadir */
540         cmdrsp->scsi.data_dir =
541                 dma_data_dir_linux_to_spar(scsicmd->sc_data_direction);
542         memcpy(cmdrsp->scsi.cmnd, cdb, MAX_CMND_SIZE);
543         cmdrsp->scsi.bufflen = scsi_bufflen(scsicmd);
544
545         /* keep track of the max buffer length so far. */
546         if (cmdrsp->scsi.bufflen > devdata->max_buff_len)
547                 devdata->max_buff_len = cmdrsp->scsi.bufflen;
548
549         if (scsi_sg_count(scsicmd) > MAX_PHYS_INFO)
550                 goto err_del_scsipending_ent;
551
552         /* convert buffer to phys information  */
553         /* buffer is scatterlist - copy it out */
554         sglist = scsi_sglist(scsicmd);
555
556         for_each_sg(sglist, sg, scsi_sg_count(scsicmd), i) {
557                 cmdrsp->scsi.gpi_list[i].address = sg_phys(sg);
558                 cmdrsp->scsi.gpi_list[i].length = sg->length;
559         }
560         cmdrsp->scsi.guest_phys_entries = scsi_sg_count(scsicmd);
561
562         if (visorchannel_signalinsert(devdata->dev->visorchannel,
563                                       IOCHAN_TO_IOPART,
564                                       cmdrsp))
565                 /* queue must be full and we aren't going to wait */
566                 goto err_del_scsipending_ent;
567
568         return 0;
569
570 err_del_scsipending_ent:
571         del_scsipending_ent(devdata, insert_location);
572         return SCSI_MLQUEUE_DEVICE_BUSY;
573 }
574
575 #ifdef DEF_SCSI_QCMD
576 static DEF_SCSI_QCMD(visorhba_queue_command)
577 #else
578 #define visorhba_queue_command visorhba_queue_command_lck
579 #endif
580
581 /*
582  * visorhba_slave_alloc - Called when new disk is discovered
583  * @scsidev: New disk
584  *
585  * Create a new visordisk_info structure and add it to our
586  * list of vdisks.
587  *
588  * Return: 0 on success, -ENOMEM on failure.
589  */
590 static int visorhba_slave_alloc(struct scsi_device *scsidev)
591 {
592         /* this is called by the midlayer before scan for new devices --
593          * LLD can alloc any struct & do init if needed.
594          */
595         struct visordisk_info *vdisk;
596         struct visorhba_devdata *devdata;
597         struct Scsi_Host *scsihost = (struct Scsi_Host *)scsidev->host;
598
599         /* already allocated return success */
600         if (scsidev->hostdata)
601                 return 0;
602
603         /* even though we errored, treat as success */
604         devdata = (struct visorhba_devdata *)scsihost->hostdata;
605         if (!devdata)
606                 return 0;
607
608         vdisk = kzalloc(sizeof(*vdisk), GFP_ATOMIC);
609         if (!vdisk)
610                 return -ENOMEM;
611
612         vdisk->sdev = scsidev;
613         scsidev->hostdata = vdisk;
614         return 0;
615 }
616
617 /*
618  * visorhba_slave_destroy - Disk is going away, clean up resources.
619  * @scsidev: Scsi device to destroy
620  */
621 static void visorhba_slave_destroy(struct scsi_device *scsidev)
622 {
623         /* midlevel calls this after device has been quiesced and
624          * before it is to be deleted.
625          */
626         struct visordisk_info *vdisk;
627
628         vdisk = scsidev->hostdata;
629         scsidev->hostdata = NULL;
630         kfree(vdisk);
631 }
632
633 static struct scsi_host_template visorhba_driver_template = {
634         .name = "Unisys Visor HBA",
635         .info = visorhba_get_info,
636         .queuecommand = visorhba_queue_command,
637         .eh_abort_handler = visorhba_abort_handler,
638         .eh_device_reset_handler = visorhba_device_reset_handler,
639         .eh_bus_reset_handler = visorhba_bus_reset_handler,
640         .eh_host_reset_handler = visorhba_host_reset_handler,
641         .shost_attrs = NULL,
642 #define visorhba_MAX_CMNDS 128
643         .can_queue = visorhba_MAX_CMNDS,
644         .sg_tablesize = 64,
645         .this_id = -1,
646         .slave_alloc = visorhba_slave_alloc,
647         .slave_destroy = visorhba_slave_destroy,
648         .use_clustering = ENABLE_CLUSTERING,
649 };
650
651 /*
652  * info_debugfs_show - Debugfs interface to dump visorhba states
653  * @seq: The sequence file to write information to
654  * @v:   Unused, but needed for use with seq file single_open invocation
655  *
656  * Presents a file in the debugfs tree named: /visorhba/vbus<x>:dev<y>/info.
657  *
658  * Return: SUCCESS
659  */
660 static int info_debugfs_show(struct seq_file *seq, void *v)
661 {
662         struct visorhba_devdata *devdata = seq->private;
663
664         seq_printf(seq, "max_buff_len = %u\n", devdata->max_buff_len);
665         seq_printf(seq, "interrupts_rcvd = %llu\n", devdata->interrupts_rcvd);
666         seq_printf(seq, "interrupts_disabled = %llu\n",
667                    devdata->interrupts_disabled);
668         seq_printf(seq, "interrupts_notme = %llu\n",
669                    devdata->interrupts_notme);
670         seq_printf(seq, "flags_addr = %p\n", devdata->flags_addr);
671         if (devdata->flags_addr) {
672                 u64 phys_flags_addr =
673                         virt_to_phys((__force  void *)devdata->flags_addr);
674                 seq_printf(seq, "phys_flags_addr = 0x%016llx\n",
675                            phys_flags_addr);
676                 seq_printf(seq, "FeatureFlags = %llu\n",
677                            (u64)readq(devdata->flags_addr));
678         }
679         seq_printf(seq, "acquire_failed_cnt = %llu\n",
680                    devdata->acquire_failed_cnt);
681
682         return 0;
683 }
684
685 static int info_debugfs_open(struct inode *inode, struct file *file)
686 {
687         return single_open(file, info_debugfs_show, inode->i_private);
688 }
689
690 static const struct file_operations info_debugfs_fops = {
691         .owner = THIS_MODULE,
692         .open = info_debugfs_open,
693         .read = seq_read,
694         .llseek = seq_lseek,
695         .release = single_release,
696 };
697
698 /*
699  * complete_taskmgmt_command - Complete task management
700  * @idrtable: The data object maintaining the pointer<-->int mappings
701  * @cmdrsp:   Response from the IOVM
702  * @result:   The result of the task management command
703  *
704  * Service Partition returned the result of the task management
705  * command. Wake up anyone waiting for it.
706  */
707 static void complete_taskmgmt_command(struct idr *idrtable,
708                                       struct uiscmdrsp *cmdrsp, int result)
709 {
710         wait_queue_head_t *wq =
711                 idr_find(idrtable, cmdrsp->scsitaskmgmt.notify_handle);
712         int *scsi_result_ptr =
713                 idr_find(idrtable, cmdrsp->scsitaskmgmt.notifyresult_handle);
714         if (unlikely(!(wq && scsi_result_ptr))) {
715                 pr_err("visorhba: no completion context; cmd will time out\n");
716                 return;
717         }
718
719         /* copy the result of the taskmgmt and
720          * wake up the error handler that is waiting for this
721          */
722         pr_debug("visorhba: notifying initiator with result=0x%x\n", result);
723         *scsi_result_ptr = result;
724         wake_up_all(wq);
725 }
726
727 /*
728  * visorhba_serverdown_complete - Called when we are done cleaning up
729  *                                from serverdown
730  * @devdata: Visorhba instance on which to complete serverdown
731  *
732  * Called when we are done cleanning up from serverdown, stop processing
733  * queue, fail pending IOs.
734  */
735 static void visorhba_serverdown_complete(struct visorhba_devdata *devdata)
736 {
737         int i;
738         struct scsipending *pendingdel = NULL;
739         struct scsi_cmnd *scsicmd = NULL;
740         struct uiscmdrsp *cmdrsp;
741         unsigned long flags;
742
743         /* Stop using the IOVM response queue (queue should be drained
744          * by the end)
745          */
746         visor_thread_stop(devdata->thread);
747
748         /* Fail commands that weren't completed */
749         spin_lock_irqsave(&devdata->privlock, flags);
750         for (i = 0; i < MAX_PENDING_REQUESTS; i++) {
751                 pendingdel = &devdata->pending[i];
752                 switch (pendingdel->cmdtype) {
753                 case CMD_SCSI_TYPE:
754                         scsicmd = pendingdel->sent;
755                         scsicmd->result = DID_RESET << 16;
756                         if (scsicmd->scsi_done)
757                                 scsicmd->scsi_done(scsicmd);
758                         break;
759                 case CMD_SCSITASKMGMT_TYPE:
760                         cmdrsp = pendingdel->sent;
761                         complete_taskmgmt_command(&devdata->idr, cmdrsp,
762                                                   TASK_MGMT_FAILED);
763                         break;
764                 default:
765                         break;
766                 }
767                 pendingdel->cmdtype = 0;
768                 pendingdel->sent = NULL;
769         }
770         spin_unlock_irqrestore(&devdata->privlock, flags);
771
772         devdata->serverdown = true;
773         devdata->serverchangingstate = false;
774 }
775
776 /*
777  * visorhba_serverdown - Got notified that the IOVM is down
778  * @devdata: Visorhba that is being serviced by downed IOVM
779  *
780  * Something happened to the IOVM, return immediately and
781  * schedule cleanup work.
782  *
783  * Return: 0 on success, -EINVAL on failure
784  */
785 static int visorhba_serverdown(struct visorhba_devdata *devdata)
786 {
787         if (!devdata->serverdown && !devdata->serverchangingstate) {
788                 devdata->serverchangingstate = true;
789                 visorhba_serverdown_complete(devdata);
790         } else if (devdata->serverchangingstate) {
791                 return -EINVAL;
792         }
793         return 0;
794 }
795
796 /*
797  * do_scsi_linuxstat - Scsi command returned linuxstat
798  * @cmdrsp:  Response from IOVM
799  * @scsicmd: Command issued
800  *
801  * Don't log errors for disk-not-present inquiries.
802  */
803 static void do_scsi_linuxstat(struct uiscmdrsp *cmdrsp,
804                               struct scsi_cmnd *scsicmd)
805 {
806         struct visordisk_info *vdisk;
807         struct scsi_device *scsidev;
808
809         scsidev = scsicmd->device;
810         memcpy(scsicmd->sense_buffer, cmdrsp->scsi.sensebuf, MAX_SENSE_SIZE);
811
812         /* Do not log errors for disk-not-present inquiries */
813         if (cmdrsp->scsi.cmnd[0] == INQUIRY &&
814             (host_byte(cmdrsp->scsi.linuxstat) == DID_NO_CONNECT) &&
815             cmdrsp->scsi.addlstat == ADDL_SEL_TIMEOUT)
816                 return;
817         /* Okay see what our error_count is here.... */
818         vdisk = scsidev->hostdata;
819         if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT) {
820                 atomic_inc(&vdisk->error_count);
821                 atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
822         }
823 }
824
825 static int set_no_disk_inquiry_result(unsigned char *buf, size_t len,
826                                       bool is_lun0)
827 {
828         if (len < NO_DISK_INQUIRY_RESULT_LEN)
829                 return -EINVAL;
830         memset(buf, 0, NO_DISK_INQUIRY_RESULT_LEN);
831         buf[2] = SCSI_SPC2_VER;
832         if (is_lun0) {
833                 buf[0] = DEV_DISK_CAPABLE_NOT_PRESENT;
834                 buf[3] = DEV_HISUPPORT;
835         } else {
836                 buf[0] = DEV_NOT_CAPABLE;
837         }
838         buf[4] = NO_DISK_INQUIRY_RESULT_LEN - 5;
839         strncpy(buf + 8, "DELLPSEUDO DEVICE .", NO_DISK_INQUIRY_RESULT_LEN - 8);
840         return 0;
841 }
842
843 /*
844  * do_scsi_nolinuxstat - Scsi command didn't have linuxstat
845  * @cmdrsp:  Response from IOVM
846  * @scsicmd: Command issued
847  *
848  * Handle response when no linuxstat was returned.
849  */
850 static void do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp,
851                                 struct scsi_cmnd *scsicmd)
852 {
853         struct scsi_device *scsidev;
854         unsigned char *buf;
855         struct scatterlist *sg;
856         unsigned int i;
857         char *this_page;
858         char *this_page_orig;
859         int bufind = 0;
860         struct visordisk_info *vdisk;
861
862         scsidev = scsicmd->device;
863         if (cmdrsp->scsi.cmnd[0] == INQUIRY &&
864             cmdrsp->scsi.bufflen >= MIN_INQUIRY_RESULT_LEN) {
865                 if (cmdrsp->scsi.no_disk_result == 0)
866                         return;
867
868                 buf = kzalloc(36, GFP_KERNEL);
869                 if (!buf)
870                         return;
871
872                 /* Linux scsi code wants a device at Lun 0
873                  * to issue report luns, but we don't want
874                  * a disk there so we'll present a processor
875                  * there.
876                  */
877                 set_no_disk_inquiry_result(buf, (size_t)cmdrsp->scsi.bufflen,
878                                            scsidev->lun == 0);
879
880                 if (scsi_sg_count(scsicmd) == 0) {
881                         memcpy(scsi_sglist(scsicmd), buf,
882                                cmdrsp->scsi.bufflen);
883                         kfree(buf);
884                         return;
885                 }
886
887                 sg = scsi_sglist(scsicmd);
888                 for (i = 0; i < scsi_sg_count(scsicmd); i++) {
889                         this_page_orig = kmap_atomic(sg_page(sg + i));
890                         this_page = (void *)((unsigned long)this_page_orig |
891                                              sg[i].offset);
892                         memcpy(this_page, buf + bufind, sg[i].length);
893                         kunmap_atomic(this_page_orig);
894                 }
895                 kfree(buf);
896         } else {
897                 vdisk = scsidev->hostdata;
898                 if (atomic_read(&vdisk->ios_threshold) > 0) {
899                         atomic_dec(&vdisk->ios_threshold);
900                         if (atomic_read(&vdisk->ios_threshold) == 0)
901                                 atomic_set(&vdisk->error_count, 0);
902                 }
903         }
904 }
905
906 /*
907  * complete_scsi_command - Complete a scsi command
908  * @uiscmdrsp: Response from Service Partition
909  * @scsicmd:   The scsi command
910  *
911  * Response was returned by the Service Partition. Finish it and send
912  * completion to the scsi midlayer.
913  */
914 static void complete_scsi_command(struct uiscmdrsp *cmdrsp,
915                                   struct scsi_cmnd *scsicmd)
916 {
917         /* take what we need out of cmdrsp and complete the scsicmd */
918         scsicmd->result = cmdrsp->scsi.linuxstat;
919         if (cmdrsp->scsi.linuxstat)
920                 do_scsi_linuxstat(cmdrsp, scsicmd);
921         else
922                 do_scsi_nolinuxstat(cmdrsp, scsicmd);
923
924         scsicmd->scsi_done(scsicmd);
925 }
926
927 /*
928  * drain_queue - Pull responses out of iochannel
929  * @cmdrsp:  Response from the IOSP
930  * @devdata: Device that owns this iochannel
931  *
932  * Pulls responses out of the iochannel and process the responses.
933  */
934 static void drain_queue(struct uiscmdrsp *cmdrsp,
935                         struct visorhba_devdata *devdata)
936 {
937         struct scsi_cmnd *scsicmd;
938
939         while (1) {
940                 /* queue empty */
941                 if (visorchannel_signalremove(devdata->dev->visorchannel,
942                                               IOCHAN_FROM_IOPART,
943                                               cmdrsp))
944                         break;
945                 if (cmdrsp->cmdtype == CMD_SCSI_TYPE) {
946                         /* scsicmd location is returned by the
947                          * deletion
948                          */
949                         scsicmd = del_scsipending_ent(devdata,
950                                                       cmdrsp->scsi.handle);
951                         if (!scsicmd)
952                                 break;
953                         /* complete the orig cmd */
954                         complete_scsi_command(cmdrsp, scsicmd);
955                 } else if (cmdrsp->cmdtype == CMD_SCSITASKMGMT_TYPE) {
956                         if (!del_scsipending_ent(devdata,
957                                                  cmdrsp->scsitaskmgmt.handle))
958                                 break;
959                         complete_taskmgmt_command(&devdata->idr, cmdrsp,
960                                                   cmdrsp->scsitaskmgmt.result);
961                 } else if (cmdrsp->cmdtype == CMD_NOTIFYGUEST_TYPE)
962                         dev_err_once(&devdata->dev->device,
963                                      "ignoring unsupported NOTIFYGUEST\n");
964                 /* cmdrsp is now available for re-use */
965         }
966 }
967
968 /*
969  * process_incoming_rsps - Process responses from IOSP
970  * @v:  Void pointer to visorhba_devdata
971  *
972  * Main function for the thread that processes the responses
973  * from the IO Service Partition. When the queue is empty, wait
974  * to check to see if it is full again.
975  *
976  * Return: 0 on success, -ENOMEM on failure
977  */
978 static int process_incoming_rsps(void *v)
979 {
980         struct visorhba_devdata *devdata = v;
981         struct uiscmdrsp *cmdrsp = NULL;
982         const int size = sizeof(*cmdrsp);
983
984         cmdrsp = kmalloc(size, GFP_ATOMIC);
985         if (!cmdrsp)
986                 return -ENOMEM;
987
988         while (1) {
989                 if (kthread_should_stop())
990                         break;
991                 wait_event_interruptible_timeout(
992                         devdata->rsp_queue, (atomic_read(
993                                              &devdata->interrupt_rcvd) == 1),
994                                 msecs_to_jiffies(devdata->thread_wait_ms));
995                 /* drain queue */
996                 drain_queue(cmdrsp, devdata);
997         }
998         kfree(cmdrsp);
999         return 0;
1000 }
1001
1002 /*
1003  * visorhba_pause - Function to handle visorbus pause messages
1004  * @dev:           Device that is pausing
1005  * @complete_func: Function to call when finished
1006  *
1007  * Something has happened to the IO Service Partition that is
1008  * handling this device. Quiet this device and reset commands
1009  * so that the Service Partition can be corrected.
1010  *
1011  * Return: SUCCESS
1012  */
1013 static int visorhba_pause(struct visor_device *dev,
1014                           visorbus_state_complete_func complete_func)
1015 {
1016         struct visorhba_devdata *devdata = dev_get_drvdata(&dev->device);
1017
1018         visorhba_serverdown(devdata);
1019         complete_func(dev, 0);
1020         return 0;
1021 }
1022
1023 /*
1024  * visorhba_resume - Function called when the IO Service Partition is back
1025  * @dev:           Device that is pausing
1026  * @complete_func: Function to call when finished
1027  *
1028  * Yay! The IO Service Partition is back, the channel has been wiped
1029  * so lets re-establish connection and start processing responses.
1030  *
1031  * Return: 0 on success, -EINVAL on failure
1032  */
1033 static int visorhba_resume(struct visor_device *dev,
1034                            visorbus_state_complete_func complete_func)
1035 {
1036         struct visorhba_devdata *devdata;
1037
1038         devdata = dev_get_drvdata(&dev->device);
1039         if (!devdata)
1040                 return -EINVAL;
1041
1042         if (devdata->serverdown && !devdata->serverchangingstate)
1043                 devdata->serverchangingstate = true;
1044
1045         devdata->thread = visor_thread_start(process_incoming_rsps, devdata,
1046                                              "vhba_incming");
1047         devdata->serverdown = false;
1048         devdata->serverchangingstate = false;
1049
1050         return 0;
1051 }
1052
1053 /*
1054  * visorhba_probe - Device has been discovered; do acquire
1055  * @dev: visor_device that was discovered
1056  *
1057  * A new HBA was discovered; do the initial connections of it.
1058  *
1059  * Return: 0 on success, otherwise error code
1060  */
1061 static int visorhba_probe(struct visor_device *dev)
1062 {
1063         struct Scsi_Host *scsihost;
1064         struct vhba_config_max max;
1065         struct visorhba_devdata *devdata = NULL;
1066         int err, channel_offset;
1067         u64 features;
1068
1069         scsihost = scsi_host_alloc(&visorhba_driver_template,
1070                                    sizeof(*devdata));
1071         if (!scsihost)
1072                 return -ENODEV;
1073
1074         channel_offset = offsetof(struct visor_io_channel, vhba.max);
1075         err = visorbus_read_channel(dev, channel_offset, &max,
1076                                     sizeof(struct vhba_config_max));
1077         if (err < 0)
1078                 goto err_scsi_host_put;
1079
1080         scsihost->max_id = (unsigned int)max.max_id;
1081         scsihost->max_lun = (unsigned int)max.max_lun;
1082         scsihost->cmd_per_lun = (unsigned int)max.cmd_per_lun;
1083         scsihost->max_sectors =
1084             (unsigned short)(max.max_io_size >> 9);
1085         scsihost->sg_tablesize =
1086             (unsigned short)(max.max_io_size / PAGE_SIZE);
1087         if (scsihost->sg_tablesize > MAX_PHYS_INFO)
1088                 scsihost->sg_tablesize = MAX_PHYS_INFO;
1089         err = scsi_add_host(scsihost, &dev->device);
1090         if (err < 0)
1091                 goto err_scsi_host_put;
1092
1093         devdata = (struct visorhba_devdata *)scsihost->hostdata;
1094         devdata->dev = dev;
1095         dev_set_drvdata(&dev->device, devdata);
1096
1097         devdata->debugfs_dir = debugfs_create_dir(dev_name(&dev->device),
1098                                                   visorhba_debugfs_dir);
1099         if (!devdata->debugfs_dir) {
1100                 err = -ENOMEM;
1101                 goto err_scsi_remove_host;
1102         }
1103         devdata->debugfs_info =
1104                 debugfs_create_file("info", 0440,
1105                                     devdata->debugfs_dir, devdata,
1106                                     &info_debugfs_fops);
1107         if (!devdata->debugfs_info) {
1108                 err = -ENOMEM;
1109                 goto err_debugfs_dir;
1110         }
1111
1112         init_waitqueue_head(&devdata->rsp_queue);
1113         spin_lock_init(&devdata->privlock);
1114         devdata->serverdown = false;
1115         devdata->serverchangingstate = false;
1116         devdata->scsihost = scsihost;
1117
1118         channel_offset = offsetof(struct visor_io_channel,
1119                                   channel_header.features);
1120         err = visorbus_read_channel(dev, channel_offset, &features, 8);
1121         if (err)
1122                 goto err_debugfs_info;
1123         features |= VISOR_CHANNEL_IS_POLLING;
1124         err = visorbus_write_channel(dev, channel_offset, &features, 8);
1125         if (err)
1126                 goto err_debugfs_info;
1127
1128         idr_init(&devdata->idr);
1129
1130         devdata->thread_wait_ms = 2;
1131         devdata->thread = visor_thread_start(process_incoming_rsps, devdata,
1132                                              "vhba_incoming");
1133
1134         scsi_scan_host(scsihost);
1135
1136         return 0;
1137
1138 err_debugfs_info:
1139         debugfs_remove(devdata->debugfs_info);
1140
1141 err_debugfs_dir:
1142         debugfs_remove_recursive(devdata->debugfs_dir);
1143
1144 err_scsi_remove_host:
1145         scsi_remove_host(scsihost);
1146
1147 err_scsi_host_put:
1148         scsi_host_put(scsihost);
1149         return err;
1150 }
1151
1152 /*
1153  * visorhba_remove - Remove a visorhba device
1154  * @dev: Device to remove
1155  *
1156  * Removes the visorhba device.
1157  */
1158 static void visorhba_remove(struct visor_device *dev)
1159 {
1160         struct visorhba_devdata *devdata = dev_get_drvdata(&dev->device);
1161         struct Scsi_Host *scsihost = NULL;
1162
1163         if (!devdata)
1164                 return;
1165
1166         scsihost = devdata->scsihost;
1167         visor_thread_stop(devdata->thread);
1168         scsi_remove_host(scsihost);
1169         scsi_host_put(scsihost);
1170
1171         idr_destroy(&devdata->idr);
1172
1173         dev_set_drvdata(&dev->device, NULL);
1174         debugfs_remove(devdata->debugfs_info);
1175         debugfs_remove_recursive(devdata->debugfs_dir);
1176 }
1177
1178 /* This is used to tell the visorbus driver which types of visor devices
1179  * we support, and what functions to call when a visor device that we support
1180  * is attached or removed.
1181  */
1182 static struct visor_driver visorhba_driver = {
1183         .name = "visorhba",
1184         .owner = THIS_MODULE,
1185         .channel_types = visorhba_channel_types,
1186         .probe = visorhba_probe,
1187         .remove = visorhba_remove,
1188         .pause = visorhba_pause,
1189         .resume = visorhba_resume,
1190         .channel_interrupt = NULL,
1191 };
1192
1193 /*
1194  * visorhba_init - Driver init routine
1195  *
1196  * Initialize the visorhba driver and register it with visorbus
1197  * to handle s-Par virtual host bus adapter.
1198  *
1199  * Return: 0 on success, error code otherwise
1200  */
1201 static int visorhba_init(void)
1202 {
1203         int rc = -ENOMEM;
1204
1205         visorhba_debugfs_dir = debugfs_create_dir("visorhba", NULL);
1206         if (!visorhba_debugfs_dir)
1207                 return -ENOMEM;
1208
1209         rc = visorbus_register_visor_driver(&visorhba_driver);
1210         if (rc)
1211                 goto cleanup_debugfs;
1212
1213         return 0;
1214
1215 cleanup_debugfs:
1216         debugfs_remove_recursive(visorhba_debugfs_dir);
1217
1218         return rc;
1219 }
1220
1221 /*
1222  * visorhba_exit - Driver exit routine
1223  *
1224  * Unregister driver from the bus and free up memory.
1225  */
1226 static void visorhba_exit(void)
1227 {
1228         visorbus_unregister_visor_driver(&visorhba_driver);
1229         debugfs_remove_recursive(visorhba_debugfs_dir);
1230 }
1231
1232 module_init(visorhba_init);
1233 module_exit(visorhba_exit);
1234
1235 MODULE_AUTHOR("Unisys");
1236 MODULE_LICENSE("GPL");
1237 MODULE_DESCRIPTION("s-Par HBA driver for virtual SCSI host busses");