GNU Linux-libre 4.14.266-gnu1
[releases.git] / drivers / scsi / aacraid / dpcsup.c
1 /*
2  *      Adaptec AAC series RAID controller driver
3  *      (c) Copyright 2001 Red Hat Inc.
4  *
5  * based on the old aacraid driver that is..
6  * Adaptec aacraid device driver for Linux.
7  *
8  * Copyright (c) 2000-2010 Adaptec, Inc.
9  *               2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
10  *               2016-2017 Microsemi Corp. (aacraid@microsemi.com)
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License as published by
14  * the Free Software Foundation; either version 2, or (at your option)
15  * any later version.
16  *
17  * This program is distributed in the hope that it will be useful,
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  * GNU General Public License for more details.
21  *
22  * You should have received a copy of the GNU General Public License
23  * along with this program; see the file COPYING.  If not, write to
24  * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25  *
26  * Module Name:
27  *  dpcsup.c
28  *
29  * Abstract: All DPC processing routines for the cyclone board occur here.
30  *
31  *
32  */
33
34 #include <linux/kernel.h>
35 #include <linux/init.h>
36 #include <linux/types.h>
37 #include <linux/spinlock.h>
38 #include <linux/slab.h>
39 #include <linux/completion.h>
40 #include <linux/blkdev.h>
41 #include <linux/semaphore.h>
42
43 #include "aacraid.h"
44
45 /**
46  *      aac_response_normal     -       Handle command replies
47  *      @q: Queue to read from
48  *
49  *      This DPC routine will be run when the adapter interrupts us to let us
50  *      know there is a response on our normal priority queue. We will pull off
51  *      all QE there are and wake up all the waiters before exiting. We will
52  *      take a spinlock out on the queue before operating on it.
53  */
54
55 unsigned int aac_response_normal(struct aac_queue * q)
56 {
57         struct aac_dev * dev = q->dev;
58         struct aac_entry *entry;
59         struct hw_fib * hwfib;
60         struct fib * fib;
61         int consumed = 0;
62         unsigned long flags, mflags;
63
64         spin_lock_irqsave(q->lock, flags);
65         /*
66          *      Keep pulling response QEs off the response queue and waking
67          *      up the waiters until there are no more QEs. We then return
68          *      back to the system. If no response was requesed we just
69          *      deallocate the Fib here and continue.
70          */
71         while(aac_consumer_get(dev, q, &entry))
72         {
73                 int fast;
74                 u32 index = le32_to_cpu(entry->addr);
75                 fast = index & 0x01;
76                 fib = &dev->fibs[index >> 2];
77                 hwfib = fib->hw_fib_va;
78                 
79                 aac_consumer_free(dev, q, HostNormRespQueue);
80                 /*
81                  *      Remove this fib from the Outstanding I/O queue.
82                  *      But only if it has not already been timed out.
83                  *
84                  *      If the fib has been timed out already, then just 
85                  *      continue. The caller has already been notified that
86                  *      the fib timed out.
87                  */
88                 atomic_dec(&dev->queues->queue[AdapNormCmdQueue].numpending);
89
90                 if (unlikely(fib->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) {
91                         spin_unlock_irqrestore(q->lock, flags);
92                         aac_fib_complete(fib);
93                         aac_fib_free(fib);
94                         spin_lock_irqsave(q->lock, flags);
95                         continue;
96                 }
97                 spin_unlock_irqrestore(q->lock, flags);
98
99                 if (fast) {
100                         /*
101                          *      Doctor the fib
102                          */
103                         *(__le32 *)hwfib->data = cpu_to_le32(ST_OK);
104                         hwfib->header.XferState |= cpu_to_le32(AdapterProcessed);
105                         fib->flags |= FIB_CONTEXT_FLAG_FASTRESP;
106                 }
107
108                 FIB_COUNTER_INCREMENT(aac_config.FibRecved);
109
110                 if (hwfib->header.Command == cpu_to_le16(NuFileSystem))
111                 {
112                         __le32 *pstatus = (__le32 *)hwfib->data;
113                         if (*pstatus & cpu_to_le32(0xffff0000))
114                                 *pstatus = cpu_to_le32(ST_OK);
115                 }
116                 if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected | Async)) 
117                 {
118                         if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected))
119                                 FIB_COUNTER_INCREMENT(aac_config.NoResponseRecved);
120                         else 
121                                 FIB_COUNTER_INCREMENT(aac_config.AsyncRecved);
122                         /*
123                          *      NOTE:  we cannot touch the fib after this
124                          *          call, because it may have been deallocated.
125                          */
126                         fib->callback(fib->callback_data, fib);
127                 } else {
128                         unsigned long flagv;
129                         spin_lock_irqsave(&fib->event_lock, flagv);
130                         if (!fib->done) {
131                                 fib->done = 1;
132                                 up(&fib->event_wait);
133                         }
134                         spin_unlock_irqrestore(&fib->event_lock, flagv);
135
136                         spin_lock_irqsave(&dev->manage_lock, mflags);
137                         dev->management_fib_count--;
138                         spin_unlock_irqrestore(&dev->manage_lock, mflags);
139
140                         FIB_COUNTER_INCREMENT(aac_config.NormalRecved);
141                         if (fib->done == 2) {
142                                 spin_lock_irqsave(&fib->event_lock, flagv);
143                                 fib->done = 0;
144                                 spin_unlock_irqrestore(&fib->event_lock, flagv);
145                                 aac_fib_complete(fib);
146                                 aac_fib_free(fib);
147                         }
148                 }
149                 consumed++;
150                 spin_lock_irqsave(q->lock, flags);
151         }
152
153         if (consumed > aac_config.peak_fibs)
154                 aac_config.peak_fibs = consumed;
155         if (consumed == 0) 
156                 aac_config.zero_fibs++;
157
158         spin_unlock_irqrestore(q->lock, flags);
159         return 0;
160 }
161
162
163 /**
164  *      aac_command_normal      -       handle commands
165  *      @q: queue to process
166  *
167  *      This DPC routine will be queued when the adapter interrupts us to 
168  *      let us know there is a command on our normal priority queue. We will 
169  *      pull off all QE there are and wake up all the waiters before exiting.
170  *      We will take a spinlock out on the queue before operating on it.
171  */
172  
173 unsigned int aac_command_normal(struct aac_queue *q)
174 {
175         struct aac_dev * dev = q->dev;
176         struct aac_entry *entry;
177         unsigned long flags;
178
179         spin_lock_irqsave(q->lock, flags);
180
181         /*
182          *      Keep pulling response QEs off the response queue and waking
183          *      up the waiters until there are no more QEs. We then return
184          *      back to the system.
185          */
186         while(aac_consumer_get(dev, q, &entry))
187         {
188                 struct fib fibctx;
189                 struct hw_fib * hw_fib;
190                 u32 index;
191                 struct fib *fib = &fibctx;
192                 
193                 index = le32_to_cpu(entry->addr) / sizeof(struct hw_fib);
194                 hw_fib = &dev->aif_base_va[index];
195                 
196                 /*
197                  *      Allocate a FIB at all costs. For non queued stuff
198                  *      we can just use the stack so we are happy. We need
199                  *      a fib object in order to manage the linked lists
200                  */
201                 if (dev->aif_thread)
202                         if((fib = kmalloc(sizeof(struct fib), GFP_ATOMIC)) == NULL)
203                                 fib = &fibctx;
204                 
205                 memset(fib, 0, sizeof(struct fib));
206                 INIT_LIST_HEAD(&fib->fiblink);
207                 fib->type = FSAFS_NTC_FIB_CONTEXT;
208                 fib->size = sizeof(struct fib);
209                 fib->hw_fib_va = hw_fib;
210                 fib->data = hw_fib->data;
211                 fib->dev = dev;
212                 
213                                 
214                 if (dev->aif_thread && fib != &fibctx) {
215                         list_add_tail(&fib->fiblink, &q->cmdq);
216                         aac_consumer_free(dev, q, HostNormCmdQueue);
217                         wake_up_interruptible(&q->cmdready);
218                 } else {
219                         aac_consumer_free(dev, q, HostNormCmdQueue);
220                         spin_unlock_irqrestore(q->lock, flags);
221                         /*
222                          *      Set the status of this FIB
223                          */
224                         *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
225                         aac_fib_adapter_complete(fib, sizeof(u32));
226                         spin_lock_irqsave(q->lock, flags);
227                 }               
228         }
229         spin_unlock_irqrestore(q->lock, flags);
230         return 0;
231 }
232
233 /*
234  *
235  * aac_aif_callback
236  * @context: the context set in the fib - here it is scsi cmd
237  * @fibptr: pointer to the fib
238  *
239  * Handles the AIFs - new method (SRC)
240  *
241  */
242
243 static void aac_aif_callback(void *context, struct fib * fibptr)
244 {
245         struct fib *fibctx;
246         struct aac_dev *dev;
247         struct aac_aifcmd *cmd;
248         int status;
249
250         fibctx = (struct fib *)context;
251         BUG_ON(fibptr == NULL);
252         dev = fibptr->dev;
253
254         if ((fibptr->hw_fib_va->header.XferState &
255             cpu_to_le32(NoMoreAifDataAvailable)) ||
256                 dev->sa_firmware) {
257                 aac_fib_complete(fibptr);
258                 aac_fib_free(fibptr);
259                 return;
260         }
261
262         aac_intr_normal(dev, 0, 1, 0, fibptr->hw_fib_va);
263
264         aac_fib_init(fibctx);
265         cmd = (struct aac_aifcmd *) fib_data(fibctx);
266         cmd->command = cpu_to_le32(AifReqEvent);
267
268         status = aac_fib_send(AifRequest,
269                 fibctx,
270                 sizeof(struct hw_fib)-sizeof(struct aac_fibhdr),
271                 FsaNormal,
272                 0, 1,
273                 (fib_callback)aac_aif_callback, fibctx);
274 }
275
276
277 /**
278  *      aac_intr_normal -       Handle command replies
279  *      @dev: Device
280  *      @index: completion reference
281  *
282  *      This DPC routine will be run when the adapter interrupts us to let us
283  *      know there is a response on our normal priority queue. We will pull off
284  *      all QE there are and wake up all the waiters before exiting.
285  */
286 unsigned int aac_intr_normal(struct aac_dev *dev, u32 index, int isAif,
287         int isFastResponse, struct hw_fib *aif_fib)
288 {
289         unsigned long mflags;
290         dprintk((KERN_INFO "aac_intr_normal(%p,%x)\n", dev, index));
291         if (isAif == 1) {       /* AIF - common */
292                 struct hw_fib * hw_fib;
293                 struct fib * fib;
294                 struct aac_queue *q = &dev->queues->queue[HostNormCmdQueue];
295                 unsigned long flags;
296
297                 /*
298                  *      Allocate a FIB. For non queued stuff we can just use
299                  * the stack so we are happy. We need a fib object in order to
300                  * manage the linked lists.
301                  */
302                 if ((!dev->aif_thread)
303                  || (!(fib = kzalloc(sizeof(struct fib),GFP_ATOMIC))))
304                         return 1;
305                 if (!(hw_fib = kzalloc(sizeof(struct hw_fib),GFP_ATOMIC))) {
306                         kfree (fib);
307                         return 1;
308                 }
309                 if (dev->sa_firmware) {
310                         fib->hbacmd_size = index;       /* store event type */
311                 } else if (aif_fib != NULL) {
312                         memcpy(hw_fib, aif_fib, sizeof(struct hw_fib));
313                 } else {
314                         memcpy(hw_fib, (struct hw_fib *)
315                                 (((uintptr_t)(dev->regs.sa)) + index),
316                                 sizeof(struct hw_fib));
317                 }
318                 INIT_LIST_HEAD(&fib->fiblink);
319                 fib->type = FSAFS_NTC_FIB_CONTEXT;
320                 fib->size = sizeof(struct fib);
321                 fib->hw_fib_va = hw_fib;
322                 fib->data = hw_fib->data;
323                 fib->dev = dev;
324         
325                 spin_lock_irqsave(q->lock, flags);
326                 list_add_tail(&fib->fiblink, &q->cmdq);
327                 wake_up_interruptible(&q->cmdready);
328                 spin_unlock_irqrestore(q->lock, flags);
329                 return 1;
330         } else if (isAif == 2) {        /* AIF - new (SRC) */
331                 struct fib *fibctx;
332                 struct aac_aifcmd *cmd;
333
334                 fibctx = aac_fib_alloc(dev);
335                 if (!fibctx)
336                         return 1;
337                 aac_fib_init(fibctx);
338
339                 cmd = (struct aac_aifcmd *) fib_data(fibctx);
340                 cmd->command = cpu_to_le32(AifReqEvent);
341
342                 return aac_fib_send(AifRequest,
343                         fibctx,
344                         sizeof(struct hw_fib)-sizeof(struct aac_fibhdr),
345                         FsaNormal,
346                         0, 1,
347                         (fib_callback)aac_aif_callback, fibctx);
348         } else {
349                 struct fib *fib = &dev->fibs[index];
350                 int start_callback = 0;
351
352                 /*
353                  *      Remove this fib from the Outstanding I/O queue.
354                  *      But only if it has not already been timed out.
355                  *
356                  *      If the fib has been timed out already, then just 
357                  *      continue. The caller has already been notified that
358                  *      the fib timed out.
359                  */
360                 atomic_dec(&dev->queues->queue[AdapNormCmdQueue].numpending);
361
362                 if (unlikely(fib->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) {
363                         aac_fib_complete(fib);
364                         aac_fib_free(fib);
365                         return 0;
366                 }
367
368                 FIB_COUNTER_INCREMENT(aac_config.FibRecved);
369
370                 if (fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) {
371
372                         if (isFastResponse)
373                                 fib->flags |= FIB_CONTEXT_FLAG_FASTRESP;
374
375                         if (fib->callback) {
376                                 start_callback = 1;
377                         } else {
378                                 unsigned long flagv;
379                                 int complete = 0;
380
381                                 dprintk((KERN_INFO "event_wait up\n"));
382                                 spin_lock_irqsave(&fib->event_lock, flagv);
383                                 if (fib->done == 2) {
384                                         fib->done = 1;
385                                         complete = 1;
386                                 } else {
387                                         fib->done = 1;
388                                         up(&fib->event_wait);
389                                 }
390                                 spin_unlock_irqrestore(&fib->event_lock, flagv);
391
392                                 spin_lock_irqsave(&dev->manage_lock, mflags);
393                                 dev->management_fib_count--;
394                                 spin_unlock_irqrestore(&dev->manage_lock,
395                                         mflags);
396
397                                 FIB_COUNTER_INCREMENT(aac_config.NativeRecved);
398                                 if (complete)
399                                         aac_fib_complete(fib);
400                         }
401                 } else {
402                         struct hw_fib *hwfib = fib->hw_fib_va;
403
404                         if (isFastResponse) {
405                                 /* Doctor the fib */
406                                 *(__le32 *)hwfib->data = cpu_to_le32(ST_OK);
407                                 hwfib->header.XferState |=
408                                         cpu_to_le32(AdapterProcessed);
409                                 fib->flags |= FIB_CONTEXT_FLAG_FASTRESP;
410                         }
411
412                         if (hwfib->header.Command ==
413                                 cpu_to_le16(NuFileSystem)) {
414                                 __le32 *pstatus = (__le32 *)hwfib->data;
415
416                                 if (*pstatus & cpu_to_le32(0xffff0000))
417                                         *pstatus = cpu_to_le32(ST_OK);
418                         }
419                         if (hwfib->header.XferState &
420                                 cpu_to_le32(NoResponseExpected | Async)) {
421                                 if (hwfib->header.XferState & cpu_to_le32(
422                                         NoResponseExpected))
423                                         FIB_COUNTER_INCREMENT(
424                                                 aac_config.NoResponseRecved);
425                                 else
426                                         FIB_COUNTER_INCREMENT(
427                                                 aac_config.AsyncRecved);
428                                 start_callback = 1;
429                         } else {
430                                 unsigned long flagv;
431                                 int complete = 0;
432
433                                 dprintk((KERN_INFO "event_wait up\n"));
434                                 spin_lock_irqsave(&fib->event_lock, flagv);
435                                 if (fib->done == 2) {
436                                         fib->done = 1;
437                                         complete = 1;
438                                 } else {
439                                         fib->done = 1;
440                                         up(&fib->event_wait);
441                                 }
442                                 spin_unlock_irqrestore(&fib->event_lock, flagv);
443
444                                 spin_lock_irqsave(&dev->manage_lock, mflags);
445                                 dev->management_fib_count--;
446                                 spin_unlock_irqrestore(&dev->manage_lock,
447                                         mflags);
448
449                                 FIB_COUNTER_INCREMENT(aac_config.NormalRecved);
450                                 if (complete)
451                                         aac_fib_complete(fib);
452                         }
453                 }
454
455
456                 if (start_callback) {
457                         /*
458                          * NOTE:  we cannot touch the fib after this
459                          *  call, because it may have been deallocated.
460                          */
461                         if (likely(fib->callback && fib->callback_data)) {
462                                 fib->callback(fib->callback_data, fib);
463                         } else {
464                                 aac_fib_complete(fib);
465                                 aac_fib_free(fib);
466                         }
467
468                 }
469                 return 0;
470         }
471 }