GNU Linux-libre 4.9-gnu1
[releases.git] / drivers / scsi / smartpqi / smartpqi.h
1 /*
2  *    driver for Microsemi PQI-based storage controllers
3  *    Copyright (c) 2016 Microsemi Corporation
4  *    Copyright (c) 2016 PMC-Sierra, Inc.
5  *
6  *    This program is free software; you can redistribute it and/or modify
7  *    it under the terms of the GNU General Public License as published by
8  *    the Free Software Foundation; version 2 of the License.
9  *
10  *    This program is distributed in the hope that it will be useful,
11  *    but WITHOUT ANY WARRANTY; without even the implied warranty of
12  *    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13  *    NON INFRINGEMENT.  See the GNU General Public License for more details.
14  *
15  *    Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
16  *
17  */
18
19 #if !defined(_SMARTPQI_H)
20 #define _SMARTPQI_H
21
22 #pragma pack(1)
23
24 #define PQI_DEVICE_SIGNATURE    "PQI DREG"
25
26 /* This structure is defined by the PQI specification. */
27 struct pqi_device_registers {
28         __le64  signature;
29         u8      function_and_status_code;
30         u8      reserved[7];
31         u8      max_admin_iq_elements;
32         u8      max_admin_oq_elements;
33         u8      admin_iq_element_length;        /* in 16-byte units */
34         u8      admin_oq_element_length;        /* in 16-byte units */
35         __le16  max_reset_timeout;              /* in 100-millisecond units */
36         u8      reserved1[2];
37         __le32  legacy_intx_status;
38         __le32  legacy_intx_mask_set;
39         __le32  legacy_intx_mask_clear;
40         u8      reserved2[28];
41         __le32  device_status;
42         u8      reserved3[4];
43         __le64  admin_iq_pi_offset;
44         __le64  admin_oq_ci_offset;
45         __le64  admin_iq_element_array_addr;
46         __le64  admin_oq_element_array_addr;
47         __le64  admin_iq_ci_addr;
48         __le64  admin_oq_pi_addr;
49         u8      admin_iq_num_elements;
50         u8      admin_oq_num_elements;
51         __le16  admin_queue_int_msg_num;
52         u8      reserved4[4];
53         __le32  device_error;
54         u8      reserved5[4];
55         __le64  error_details;
56         __le32  device_reset;
57         __le32  power_action;
58         u8      reserved6[104];
59 };
60
61 /*
62  * controller registers
63  *
64  * These are defined by the PMC implementation.
65  *
66  * Some registers (those named sis_*) are only used when in
67  * legacy SIS mode before we transition the controller into
68  * PQI mode.  There are a number of other SIS mode registers,
69  * but we don't use them, so only the SIS registers that we
70  * care about are defined here.  The offsets mentioned in the
71  * comments are the offsets from the PCIe BAR 0.
72  */
73 struct pqi_ctrl_registers {
74         u8      reserved[0x20];
75         __le32  sis_host_to_ctrl_doorbell;              /* 20h */
76         u8      reserved1[0x34 - (0x20 + sizeof(__le32))];
77         __le32  sis_interrupt_mask;                     /* 34h */
78         u8      reserved2[0x9c - (0x34 + sizeof(__le32))];
79         __le32  sis_ctrl_to_host_doorbell;              /* 9Ch */
80         u8      reserved3[0xa0 - (0x9c + sizeof(__le32))];
81         __le32  sis_ctrl_to_host_doorbell_clear;        /* A0h */
82         u8      reserved4[0xb0 - (0xa0 + sizeof(__le32))];
83         __le32  sis_driver_scratch;                     /* B0h */
84         u8      reserved5[0xbc - (0xb0 + sizeof(__le32))];
85         __le32  sis_firmware_status;                    /* BCh */
86         u8      reserved6[0x1000 - (0xbc + sizeof(__le32))];
87         __le32  sis_mailbox[8];                         /* 1000h */
88         u8      reserved7[0x4000 - (0x1000 + (sizeof(__le32) * 8))];
89         /*
90          * The PQI spec states that the PQI registers should be at
91          * offset 0 from the PCIe BAR 0.  However, we can't map
92          * them at offset 0 because that would break compatibility
93          * with the SIS registers.  So we map them at offset 4000h.
94          */
95         struct pqi_device_registers pqi_registers;      /* 4000h */
96 };
97
98 #define PQI_DEVICE_REGISTERS_OFFSET     0x4000
99
100 enum pqi_io_path {
101         RAID_PATH = 0,
102         AIO_PATH = 1
103 };
104
105 struct pqi_sg_descriptor {
106         __le64  address;
107         __le32  length;
108         __le32  flags;
109 };
110
111 /* manifest constants for the flags field of pqi_sg_descriptor */
112 #define CISS_SG_LAST    0x40000000
113 #define CISS_SG_CHAIN   0x80000000
114
115 struct pqi_iu_header {
116         u8      iu_type;
117         u8      reserved;
118         __le16  iu_length;      /* in bytes - does not include the length */
119                                 /* of this header */
120         __le16  response_queue_id;      /* specifies the OQ where the */
121                                         /*   response IU is to be delivered */
122         u8      work_area[2];   /* reserved for driver use */
123 };
124
125 /*
126  * According to the PQI spec, the IU header is only the first 4 bytes of our
127  * pqi_iu_header structure.
128  */
129 #define PQI_REQUEST_HEADER_LENGTH       4
130
131 struct pqi_general_admin_request {
132         struct pqi_iu_header header;
133         __le16  request_id;
134         u8      function_code;
135         union {
136                 struct {
137                         u8      reserved[33];
138                         __le32  buffer_length;
139                         struct pqi_sg_descriptor sg_descriptor;
140                 } report_device_capability;
141
142                 struct {
143                         u8      reserved;
144                         __le16  queue_id;
145                         u8      reserved1[2];
146                         __le64  element_array_addr;
147                         __le64  ci_addr;
148                         __le16  num_elements;
149                         __le16  element_length;
150                         u8      queue_protocol;
151                         u8      reserved2[23];
152                         __le32  vendor_specific;
153                 } create_operational_iq;
154
155                 struct {
156                         u8      reserved;
157                         __le16  queue_id;
158                         u8      reserved1[2];
159                         __le64  element_array_addr;
160                         __le64  pi_addr;
161                         __le16  num_elements;
162                         __le16  element_length;
163                         u8      queue_protocol;
164                         u8      reserved2[3];
165                         __le16  int_msg_num;
166                         __le16  coalescing_count;
167                         __le32  min_coalescing_time;
168                         __le32  max_coalescing_time;
169                         u8      reserved3[8];
170                         __le32  vendor_specific;
171                 } create_operational_oq;
172
173                 struct {
174                         u8      reserved;
175                         __le16  queue_id;
176                         u8      reserved1[50];
177                 } delete_operational_queue;
178
179                 struct {
180                         u8      reserved;
181                         __le16  queue_id;
182                         u8      reserved1[46];
183                         __le32  vendor_specific;
184                 } change_operational_iq_properties;
185
186         } data;
187 };
188
189 struct pqi_general_admin_response {
190         struct pqi_iu_header header;
191         __le16  request_id;
192         u8      function_code;
193         u8      status;
194         union {
195                 struct {
196                         u8      status_descriptor[4];
197                         __le64  iq_pi_offset;
198                         u8      reserved[40];
199                 } create_operational_iq;
200
201                 struct {
202                         u8      status_descriptor[4];
203                         __le64  oq_ci_offset;
204                         u8      reserved[40];
205                 } create_operational_oq;
206         } data;
207 };
208
209 struct pqi_iu_layer_descriptor {
210         u8      inbound_spanning_supported : 1;
211         u8      reserved : 7;
212         u8      reserved1[5];
213         __le16  max_inbound_iu_length;
214         u8      outbound_spanning_supported : 1;
215         u8      reserved2 : 7;
216         u8      reserved3[5];
217         __le16  max_outbound_iu_length;
218 };
219
220 struct pqi_device_capability {
221         __le16  data_length;
222         u8      reserved[6];
223         u8      iq_arbitration_priority_support_bitmask;
224         u8      maximum_aw_a;
225         u8      maximum_aw_b;
226         u8      maximum_aw_c;
227         u8      max_arbitration_burst : 3;
228         u8      reserved1 : 4;
229         u8      iqa : 1;
230         u8      reserved2[2];
231         u8      iq_freeze : 1;
232         u8      reserved3 : 7;
233         __le16  max_inbound_queues;
234         __le16  max_elements_per_iq;
235         u8      reserved4[4];
236         __le16  max_iq_element_length;
237         __le16  min_iq_element_length;
238         u8      reserved5[2];
239         __le16  max_outbound_queues;
240         __le16  max_elements_per_oq;
241         __le16  intr_coalescing_time_granularity;
242         __le16  max_oq_element_length;
243         __le16  min_oq_element_length;
244         u8      reserved6[24];
245         struct pqi_iu_layer_descriptor iu_layer_descriptors[32];
246 };
247
248 #define PQI_MAX_EMBEDDED_SG_DESCRIPTORS         4
249
250 struct pqi_raid_path_request {
251         struct pqi_iu_header header;
252         __le16  request_id;
253         __le16  nexus_id;
254         __le32  buffer_length;
255         u8      lun_number[8];
256         __le16  protocol_specific;
257         u8      data_direction : 2;
258         u8      partial : 1;
259         u8      reserved1 : 4;
260         u8      fence : 1;
261         __le16  error_index;
262         u8      reserved2;
263         u8      task_attribute : 3;
264         u8      command_priority : 4;
265         u8      reserved3 : 1;
266         u8      reserved4 : 2;
267         u8      additional_cdb_bytes_usage : 3;
268         u8      reserved5 : 3;
269         u8      cdb[32];
270         struct pqi_sg_descriptor
271                 sg_descriptors[PQI_MAX_EMBEDDED_SG_DESCRIPTORS];
272 };
273
274 struct pqi_aio_path_request {
275         struct pqi_iu_header header;
276         __le16  request_id;
277         u8      reserved1[2];
278         __le32  nexus_id;
279         __le32  buffer_length;
280         u8      data_direction : 2;
281         u8      partial : 1;
282         u8      memory_type : 1;
283         u8      fence : 1;
284         u8      encryption_enable : 1;
285         u8      reserved2 : 2;
286         u8      task_attribute : 3;
287         u8      command_priority : 4;
288         u8      reserved3 : 1;
289         __le16  data_encryption_key_index;
290         __le32  encrypt_tweak_lower;
291         __le32  encrypt_tweak_upper;
292         u8      cdb[16];
293         __le16  error_index;
294         u8      num_sg_descriptors;
295         u8      cdb_length;
296         u8      lun_number[8];
297         u8      reserved4[4];
298         struct pqi_sg_descriptor
299                 sg_descriptors[PQI_MAX_EMBEDDED_SG_DESCRIPTORS];
300 };
301
302 struct pqi_io_response {
303         struct pqi_iu_header header;
304         __le16  request_id;
305         __le16  error_index;
306         u8      reserved2[4];
307 };
308
309 struct pqi_general_management_request {
310         struct pqi_iu_header header;
311         __le16  request_id;
312         union {
313                 struct {
314                         u8      reserved[2];
315                         __le32  buffer_length;
316                         struct pqi_sg_descriptor sg_descriptors[3];
317                 } report_event_configuration;
318
319                 struct {
320                         __le16  global_event_oq_id;
321                         __le32  buffer_length;
322                         struct pqi_sg_descriptor sg_descriptors[3];
323                 } set_event_configuration;
324         } data;
325 };
326
327 struct pqi_event_descriptor {
328         u8      event_type;
329         u8      reserved;
330         __le16  oq_id;
331 };
332
333 struct pqi_event_config {
334         u8      reserved[2];
335         u8      num_event_descriptors;
336         u8      reserved1;
337         struct pqi_event_descriptor descriptors[1];
338 };
339
340 #define PQI_MAX_EVENT_DESCRIPTORS       255
341
342 struct pqi_event_response {
343         struct pqi_iu_header header;
344         u8      event_type;
345         u8      reserved2 : 7;
346         u8      request_acknowlege : 1;
347         __le16  event_id;
348         __le32  additional_event_id;
349         u8      data[16];
350 };
351
352 struct pqi_event_acknowledge_request {
353         struct pqi_iu_header header;
354         u8      event_type;
355         u8      reserved2;
356         __le16  event_id;
357         __le32  additional_event_id;
358 };
359
360 struct pqi_task_management_request {
361         struct pqi_iu_header header;
362         __le16  request_id;
363         __le16  nexus_id;
364         u8      reserved[4];
365         u8      lun_number[8];
366         __le16  protocol_specific;
367         __le16  outbound_queue_id_to_manage;
368         __le16  request_id_to_manage;
369         u8      task_management_function;
370         u8      reserved2 : 7;
371         u8      fence : 1;
372 };
373
374 #define SOP_TASK_MANAGEMENT_LUN_RESET   0x8
375
376 struct pqi_task_management_response {
377         struct pqi_iu_header header;
378         __le16  request_id;
379         __le16  nexus_id;
380         u8      additional_response_info[3];
381         u8      response_code;
382 };
383
384 struct pqi_aio_error_info {
385         u8      status;
386         u8      service_response;
387         u8      data_present;
388         u8      reserved;
389         __le32  residual_count;
390         __le16  data_length;
391         __le16  reserved1;
392         u8      data[256];
393 };
394
395 struct pqi_raid_error_info {
396         u8      data_in_result;
397         u8      data_out_result;
398         u8      reserved[3];
399         u8      status;
400         __le16  status_qualifier;
401         __le16  sense_data_length;
402         __le16  response_data_length;
403         __le32  data_in_transferred;
404         __le32  data_out_transferred;
405         u8      data[256];
406 };
407
408 #define PQI_REQUEST_IU_TASK_MANAGEMENT                  0x13
409 #define PQI_REQUEST_IU_RAID_PATH_IO                     0x14
410 #define PQI_REQUEST_IU_AIO_PATH_IO                      0x15
411 #define PQI_REQUEST_IU_GENERAL_ADMIN                    0x60
412 #define PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG       0x72
413 #define PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG          0x73
414 #define PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT         0xf6
415
416 #define PQI_RESPONSE_IU_GENERAL_MANAGEMENT              0x81
417 #define PQI_RESPONSE_IU_TASK_MANAGEMENT                 0x93
418 #define PQI_RESPONSE_IU_GENERAL_ADMIN                   0xe0
419 #define PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS            0xf0
420 #define PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS             0xf1
421 #define PQI_RESPONSE_IU_RAID_PATH_IO_ERROR              0xf2
422 #define PQI_RESPONSE_IU_AIO_PATH_IO_ERROR               0xf3
423 #define PQI_RESPONSE_IU_AIO_PATH_DISABLED               0xf4
424 #define PQI_RESPONSE_IU_VENDOR_EVENT                    0xf5
425
426 #define PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY     0x0
427 #define PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ                    0x10
428 #define PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ                    0x11
429 #define PQI_GENERAL_ADMIN_FUNCTION_DELETE_IQ                    0x12
430 #define PQI_GENERAL_ADMIN_FUNCTION_DELETE_OQ                    0x13
431 #define PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY           0x14
432
433 #define PQI_GENERAL_ADMIN_STATUS_SUCCESS        0x0
434
435 #define PQI_IQ_PROPERTY_IS_AIO_QUEUE    0x1
436
437 #define PQI_GENERAL_ADMIN_IU_LENGTH             0x3c
438 #define PQI_PROTOCOL_SOP                        0x0
439
440 #define PQI_DATA_IN_OUT_GOOD                                    0x0
441 #define PQI_DATA_IN_OUT_UNDERFLOW                               0x1
442 #define PQI_DATA_IN_OUT_BUFFER_ERROR                            0x40
443 #define PQI_DATA_IN_OUT_BUFFER_OVERFLOW                         0x41
444 #define PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA         0x42
445 #define PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE                  0x43
446 #define PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR                       0x60
447 #define PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT                 0x61
448 #define PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED           0x62
449 #define PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED       0x63
450 #define PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED                  0x64
451 #define PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST                0x65
452 #define PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION                      0x66
453 #define PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED                 0x67
454 #define PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ               0x6F
455 #define PQI_DATA_IN_OUT_ERROR                                   0xf0
456 #define PQI_DATA_IN_OUT_PROTOCOL_ERROR                          0xf1
457 #define PQI_DATA_IN_OUT_HARDWARE_ERROR                          0xf2
458 #define PQI_DATA_IN_OUT_UNSOLICITED_ABORT                       0xf3
459 #define PQI_DATA_IN_OUT_ABORTED                                 0xf4
460 #define PQI_DATA_IN_OUT_TIMEOUT                                 0xf5
461
462 #define CISS_CMD_STATUS_SUCCESS                 0x0
463 #define CISS_CMD_STATUS_TARGET_STATUS           0x1
464 #define CISS_CMD_STATUS_DATA_UNDERRUN           0x2
465 #define CISS_CMD_STATUS_DATA_OVERRUN            0x3
466 #define CISS_CMD_STATUS_INVALID                 0x4
467 #define CISS_CMD_STATUS_PROTOCOL_ERROR          0x5
468 #define CISS_CMD_STATUS_HARDWARE_ERROR          0x6
469 #define CISS_CMD_STATUS_CONNECTION_LOST         0x7
470 #define CISS_CMD_STATUS_ABORTED                 0x8
471 #define CISS_CMD_STATUS_ABORT_FAILED            0x9
472 #define CISS_CMD_STATUS_UNSOLICITED_ABORT       0xa
473 #define CISS_CMD_STATUS_TIMEOUT                 0xb
474 #define CISS_CMD_STATUS_UNABORTABLE             0xc
475 #define CISS_CMD_STATUS_TMF                     0xd
476 #define CISS_CMD_STATUS_AIO_DISABLED            0xe
477
478 #define PQI_NUM_EVENT_QUEUE_ELEMENTS    32
479 #define PQI_EVENT_OQ_ELEMENT_LENGTH     sizeof(struct pqi_event_response)
480
481 #define PQI_EVENT_TYPE_HOTPLUG                  0x1
482 #define PQI_EVENT_TYPE_HARDWARE                 0x2
483 #define PQI_EVENT_TYPE_PHYSICAL_DEVICE          0x4
484 #define PQI_EVENT_TYPE_LOGICAL_DEVICE           0x5
485 #define PQI_EVENT_TYPE_AIO_STATE_CHANGE         0xfd
486 #define PQI_EVENT_TYPE_AIO_CONFIG_CHANGE        0xfe
487 #define PQI_EVENT_TYPE_HEARTBEAT                0xff
488
489 #pragma pack()
490
491 #define PQI_ERROR_BUFFER_ELEMENT_LENGTH         \
492         sizeof(struct pqi_raid_error_info)
493
494 /* these values are based on our implementation */
495 #define PQI_ADMIN_IQ_NUM_ELEMENTS               8
496 #define PQI_ADMIN_OQ_NUM_ELEMENTS               20
497 #define PQI_ADMIN_IQ_ELEMENT_LENGTH             64
498 #define PQI_ADMIN_OQ_ELEMENT_LENGTH             64
499
500 #define PQI_OPERATIONAL_IQ_ELEMENT_LENGTH       128
501 #define PQI_OPERATIONAL_OQ_ELEMENT_LENGTH       16
502
503 #define PQI_MIN_MSIX_VECTORS            1
504 #define PQI_MAX_MSIX_VECTORS            64
505
506 /* these values are defined by the PQI spec */
507 #define PQI_MAX_NUM_ELEMENTS_ADMIN_QUEUE        255
508 #define PQI_MAX_NUM_ELEMENTS_OPERATIONAL_QUEUE  65535
509 #define PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT       64
510 #define PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT      16
511 #define PQI_ADMIN_INDEX_ALIGNMENT               64
512 #define PQI_OPERATIONAL_INDEX_ALIGNMENT         4
513
514 #define PQI_MIN_OPERATIONAL_QUEUE_ID            1
515 #define PQI_MAX_OPERATIONAL_QUEUE_ID            65535
516
517 #define PQI_AIO_SERV_RESPONSE_COMPLETE          0
518 #define PQI_AIO_SERV_RESPONSE_FAILURE           1
519 #define PQI_AIO_SERV_RESPONSE_TMF_COMPLETE      2
520 #define PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED     3
521 #define PQI_AIO_SERV_RESPONSE_TMF_REJECTED      4
522 #define PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN 5
523
524 #define PQI_AIO_STATUS_IO_ERROR                 0x1
525 #define PQI_AIO_STATUS_IO_ABORTED               0x2
526 #define PQI_AIO_STATUS_NO_PATH_TO_DEVICE        0x3
527 #define PQI_AIO_STATUS_INVALID_DEVICE           0x4
528 #define PQI_AIO_STATUS_AIO_PATH_DISABLED        0xe
529 #define PQI_AIO_STATUS_UNDERRUN                 0x51
530 #define PQI_AIO_STATUS_OVERRUN                  0x75
531
532 typedef u32 pqi_index_t;
533
534 /* SOP data direction flags */
535 #define SOP_NO_DIRECTION_FLAG   0
536 #define SOP_WRITE_FLAG          1       /* host writes data to Data-Out */
537                                         /* buffer */
538 #define SOP_READ_FLAG           2       /* host receives data from Data-In */
539                                         /* buffer */
540 #define SOP_BIDIRECTIONAL       3       /* data is transferred from the */
541                                         /* Data-Out buffer and data is */
542                                         /* transferred to the Data-In buffer */
543
544 #define SOP_TASK_ATTRIBUTE_SIMPLE               0
545 #define SOP_TASK_ATTRIBUTE_HEAD_OF_QUEUE        1
546 #define SOP_TASK_ATTRIBUTE_ORDERED              2
547 #define SOP_TASK_ATTRIBUTE_ACA                  4
548
549 #define SOP_TMF_COMPLETE                0x0
550 #define SOP_TMF_FUNCTION_SUCCEEDED      0x8
551
552 /* additional CDB bytes usage field codes */
553 #define SOP_ADDITIONAL_CDB_BYTES_0      0       /* 16-byte CDB */
554 #define SOP_ADDITIONAL_CDB_BYTES_4      1       /* 20-byte CDB */
555 #define SOP_ADDITIONAL_CDB_BYTES_8      2       /* 24-byte CDB */
556 #define SOP_ADDITIONAL_CDB_BYTES_12     3       /* 28-byte CDB */
557 #define SOP_ADDITIONAL_CDB_BYTES_16     4       /* 32-byte CDB */
558
559 /*
560  * The purpose of this structure is to obtain proper alignment of objects in
561  * an admin queue pair.
562  */
563 struct pqi_admin_queues_aligned {
564         __aligned(PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT)
565                 u8      iq_element_array[PQI_ADMIN_IQ_ELEMENT_LENGTH]
566                                         [PQI_ADMIN_IQ_NUM_ELEMENTS];
567         __aligned(PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT)
568                 u8      oq_element_array[PQI_ADMIN_OQ_ELEMENT_LENGTH]
569                                         [PQI_ADMIN_OQ_NUM_ELEMENTS];
570         __aligned(PQI_ADMIN_INDEX_ALIGNMENT) pqi_index_t iq_ci;
571         __aligned(PQI_ADMIN_INDEX_ALIGNMENT) pqi_index_t oq_pi;
572 };
573
574 struct pqi_admin_queues {
575         void            *iq_element_array;
576         void            *oq_element_array;
577         volatile pqi_index_t *iq_ci;
578         volatile pqi_index_t *oq_pi;
579         dma_addr_t      iq_element_array_bus_addr;
580         dma_addr_t      oq_element_array_bus_addr;
581         dma_addr_t      iq_ci_bus_addr;
582         dma_addr_t      oq_pi_bus_addr;
583         __le32 __iomem  *iq_pi;
584         pqi_index_t     iq_pi_copy;
585         __le32 __iomem  *oq_ci;
586         pqi_index_t     oq_ci_copy;
587         struct task_struct *task;
588         u16             int_msg_num;
589 };
590
591 struct pqi_queue_group {
592         struct pqi_ctrl_info *ctrl_info;        /* backpointer */
593         u16             iq_id[2];
594         u16             oq_id;
595         u16             int_msg_num;
596         void            *iq_element_array[2];
597         void            *oq_element_array;
598         dma_addr_t      iq_element_array_bus_addr[2];
599         dma_addr_t      oq_element_array_bus_addr;
600         __le32 __iomem  *iq_pi[2];
601         pqi_index_t     iq_pi_copy[2];
602         volatile pqi_index_t *iq_ci[2];
603         volatile pqi_index_t *oq_pi;
604         dma_addr_t      iq_ci_bus_addr[2];
605         dma_addr_t      oq_pi_bus_addr;
606         __le32 __iomem  *oq_ci;
607         pqi_index_t     oq_ci_copy;
608         spinlock_t      submit_lock[2]; /* protect submission queue */
609         struct list_head request_list[2];
610 };
611
612 struct pqi_event_queue {
613         u16             oq_id;
614         u16             int_msg_num;
615         void            *oq_element_array;
616         volatile pqi_index_t *oq_pi;
617         dma_addr_t      oq_element_array_bus_addr;
618         dma_addr_t      oq_pi_bus_addr;
619         __le32 __iomem  *oq_ci;
620         pqi_index_t     oq_ci_copy;
621 };
622
623 #define PQI_DEFAULT_QUEUE_GROUP         0
624 #define PQI_MAX_QUEUE_GROUPS            PQI_MAX_MSIX_VECTORS
625
626 struct pqi_encryption_info {
627         u16     data_encryption_key_index;
628         u32     encrypt_tweak_lower;
629         u32     encrypt_tweak_upper;
630 };
631
632 #define PQI_MAX_OUTSTANDING_REQUESTS    ((u32)~0)
633 #define PQI_MAX_TRANSFER_SIZE           (4 * 1024U * 1024U)
634
635 #define RAID_MAP_MAX_ENTRIES            1024
636
637 #define PQI_PHYSICAL_DEVICE_BUS         0
638 #define PQI_RAID_VOLUME_BUS             1
639 #define PQI_HBA_BUS                     2
640 #define PQI_MAX_BUS                     PQI_HBA_BUS
641
642 #pragma pack(1)
643
644 struct report_lun_header {
645         __be32  list_length;
646         u8      extended_response;
647         u8      reserved[3];
648 };
649
650 struct report_log_lun_extended_entry {
651         u8      lunid[8];
652         u8      volume_id[16];
653 };
654
655 struct report_log_lun_extended {
656         struct report_lun_header header;
657         struct report_log_lun_extended_entry lun_entries[1];
658 };
659
660 struct report_phys_lun_extended_entry {
661         u8      lunid[8];
662         __be64  wwid;
663         u8      device_type;
664         u8      device_flags;
665         u8      lun_count;      /* number of LUNs in a multi-LUN device */
666         u8      redundant_paths;
667         u32     aio_handle;
668 };
669
670 /* for device_flags field of struct report_phys_lun_extended_entry */
671 #define REPORT_PHYS_LUN_DEV_FLAG_NON_DISK       0x1
672 #define REPORT_PHYS_LUN_DEV_FLAG_AIO_ENABLED    0x8
673
674 struct report_phys_lun_extended {
675         struct report_lun_header header;
676         struct report_phys_lun_extended_entry lun_entries[1];
677 };
678
679 struct raid_map_disk_data {
680         u32     aio_handle;
681         u8      xor_mult[2];
682         u8      reserved[2];
683 };
684
685 /* constants for flags field of RAID map */
686 #define RAID_MAP_ENCRYPTION_ENABLED     0x1
687
688 struct raid_map {
689         __le32  structure_size;         /* size of entire structure in bytes */
690         __le32  volume_blk_size;        /* bytes / block in the volume */
691         __le64  volume_blk_cnt;         /* logical blocks on the volume */
692         u8      phys_blk_shift;         /* shift factor to convert between */
693                                         /* units of logical blocks and */
694                                         /* physical disk blocks */
695         u8      parity_rotation_shift;  /* shift factor to convert between */
696                                         /* units of logical stripes and */
697                                         /* physical stripes */
698         __le16  strip_size;             /* blocks used on each disk / stripe */
699         __le64  disk_starting_blk;      /* first disk block used in volume */
700         __le64  disk_blk_cnt;           /* disk blocks used by volume / disk */
701         __le16  data_disks_per_row;     /* data disk entries / row in the map */
702         __le16  metadata_disks_per_row; /* mirror/parity disk entries / row */
703                                         /* in the map */
704         __le16  row_cnt;                /* rows in each layout map */
705         __le16  layout_map_count;       /* layout maps (1 map per */
706                                         /* mirror parity group) */
707         __le16  flags;
708         __le16  data_encryption_key_index;
709         u8      reserved[16];
710         struct raid_map_disk_data disk_data[RAID_MAP_MAX_ENTRIES];
711 };
712
713 #pragma pack()
714
715 #define RAID_CTLR_LUNID         "\0\0\0\0\0\0\0\0"
716
717 struct pqi_scsi_dev {
718         int     devtype;                /* as reported by INQUIRY commmand */
719         u8      device_type;            /* as reported by */
720                                         /* BMIC_IDENTIFY_PHYSICAL_DEVICE */
721                                         /* only valid for devtype = TYPE_DISK */
722         int     bus;
723         int     target;
724         int     lun;
725         u8      scsi3addr[8];
726         __be64  wwid;
727         u8      volume_id[16];
728         u8      is_physical_device : 1;
729         u8      target_lun_valid : 1;
730         u8      expose_device : 1;
731         u8      no_uld_attach : 1;
732         u8      aio_enabled : 1;        /* only valid for physical disks */
733         u8      device_gone : 1;
734         u8      new_device : 1;
735         u8      keep_device : 1;
736         u8      volume_offline : 1;
737         u8      vendor[8];              /* bytes 8-15 of inquiry data */
738         u8      model[16];              /* bytes 16-31 of inquiry data */
739         u64     sas_address;
740         u8      raid_level;
741         u16     queue_depth;            /* max. queue_depth for this device */
742         u16     advertised_queue_depth;
743         u32     aio_handle;
744         u8      volume_status;
745         u8      active_path_index;
746         u8      path_map;
747         u8      bay;
748         u8      box[8];
749         u16     phys_connector[8];
750         int     offload_configured;     /* I/O accel RAID offload configured */
751         int     offload_enabled;        /* I/O accel RAID offload enabled */
752         int     offload_enabled_pending;
753         int     offload_to_mirror;      /* Send next I/O accelerator RAID */
754                                         /* offload request to mirror drive. */
755         struct raid_map *raid_map;      /* I/O accelerator RAID map */
756
757         struct pqi_sas_port *sas_port;
758         struct scsi_device *sdev;
759
760         struct list_head scsi_device_list_entry;
761         struct list_head new_device_list_entry;
762         struct list_head add_list_entry;
763         struct list_head delete_list_entry;
764 };
765
766 /* VPD inquiry pages */
767 #define SCSI_VPD_SUPPORTED_PAGES        0x0     /* standard page */
768 #define SCSI_VPD_DEVICE_ID              0x83    /* standard page */
769 #define CISS_VPD_LV_DEVICE_GEOMETRY     0xc1    /* vendor-specific page */
770 #define CISS_VPD_LV_OFFLOAD_STATUS      0xc2    /* vendor-specific page */
771 #define CISS_VPD_LV_STATUS              0xc3    /* vendor-specific page */
772
773 #define VPD_PAGE        (1 << 8)
774
775 #pragma pack(1)
776
777 /* structure for CISS_VPD_LV_STATUS */
778 struct ciss_vpd_logical_volume_status {
779         u8      peripheral_info;
780         u8      page_code;
781         u8      reserved;
782         u8      page_length;
783         u8      volume_status;
784         u8      reserved2[3];
785         __be32  flags;
786 };
787
788 #pragma pack()
789
790 /* constants for volume_status field of ciss_vpd_logical_volume_status */
791 #define CISS_LV_OK                                      0
792 #define CISS_LV_FAILED                                  1
793 #define CISS_LV_NOT_CONFIGURED                          2
794 #define CISS_LV_DEGRADED                                3
795 #define CISS_LV_READY_FOR_RECOVERY                      4
796 #define CISS_LV_UNDERGOING_RECOVERY                     5
797 #define CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED           6
798 #define CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM       7
799 #define CISS_LV_HARDWARE_OVERHEATING                    8
800 #define CISS_LV_HARDWARE_HAS_OVERHEATED                 9
801 #define CISS_LV_UNDERGOING_EXPANSION                    10
802 #define CISS_LV_NOT_AVAILABLE                           11
803 #define CISS_LV_QUEUED_FOR_EXPANSION                    12
804 #define CISS_LV_DISABLED_SCSI_ID_CONFLICT               13
805 #define CISS_LV_EJECTED                                 14
806 #define CISS_LV_UNDERGOING_ERASE                        15
807 /* state 16 not used */
808 #define CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD      17
809 #define CISS_LV_UNDERGOING_RPI                          18
810 #define CISS_LV_PENDING_RPI                             19
811 #define CISS_LV_ENCRYPTED_NO_KEY                        20
812 /* state 21 not used */
813 #define CISS_LV_UNDERGOING_ENCRYPTION                   22
814 #define CISS_LV_UNDERGOING_ENCRYPTION_REKEYING          23
815 #define CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER   24
816 #define CISS_LV_PENDING_ENCRYPTION                      25
817 #define CISS_LV_PENDING_ENCRYPTION_REKEYING             26
818 #define CISS_LV_NOT_SUPPORTED                           27
819 #define CISS_LV_STATUS_UNAVAILABLE                      255
820
821 /* constants for flags field of ciss_vpd_logical_volume_status */
822 #define CISS_LV_FLAGS_NO_HOST_IO        0x1     /* volume not available for */
823                                                 /* host I/O */
824
825 /* for SAS hosts and SAS expanders */
826 struct pqi_sas_node {
827         struct device *parent_dev;
828         struct list_head port_list_head;
829 };
830
831 struct pqi_sas_port {
832         struct list_head port_list_entry;
833         u64     sas_address;
834         struct sas_port *port;
835         int     next_phy_index;
836         struct list_head phy_list_head;
837         struct pqi_sas_node *parent_node;
838         struct sas_rphy *rphy;
839 };
840
841 struct pqi_sas_phy {
842         struct list_head phy_list_entry;
843         struct sas_phy *phy;
844         struct pqi_sas_port *parent_port;
845         bool    added_to_port;
846 };
847
848 struct pqi_io_request {
849         atomic_t        refcount;
850         u16             index;
851         void (*io_complete_callback)(struct pqi_io_request *io_request,
852                 void *context);
853         void            *context;
854         int             status;
855         struct scsi_cmnd *scmd;
856         void            *error_info;
857         struct pqi_sg_descriptor *sg_chain_buffer;
858         dma_addr_t      sg_chain_buffer_dma_handle;
859         void            *iu;
860         struct list_head request_list_entry;
861 };
862
863 /* for indexing into the pending_events[] field of struct pqi_ctrl_info */
864 #define PQI_EVENT_HEARTBEAT             0
865 #define PQI_EVENT_HOTPLUG               1
866 #define PQI_EVENT_HARDWARE              2
867 #define PQI_EVENT_PHYSICAL_DEVICE       3
868 #define PQI_EVENT_LOGICAL_DEVICE        4
869 #define PQI_EVENT_AIO_STATE_CHANGE      5
870 #define PQI_EVENT_AIO_CONFIG_CHANGE     6
871 #define PQI_NUM_SUPPORTED_EVENTS        7
872
873 struct pqi_event {
874         bool    pending;
875         u8      event_type;
876         __le16  event_id;
877         __le32  additional_event_id;
878 };
879
880 #define PQI_RESERVED_IO_SLOTS_LUN_RESET                 1
881 #define PQI_RESERVED_IO_SLOTS_EVENT_ACK                 PQI_NUM_SUPPORTED_EVENTS
882 #define PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS      3
883 #define PQI_RESERVED_IO_SLOTS                           \
884         (PQI_RESERVED_IO_SLOTS_LUN_RESET + PQI_RESERVED_IO_SLOTS_EVENT_ACK + \
885         PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS)
886
887 struct pqi_ctrl_info {
888         unsigned int    ctrl_id;
889         struct pci_dev  *pci_dev;
890         char            firmware_version[11];
891         void __iomem    *iomem_base;
892         struct pqi_ctrl_registers __iomem *registers;
893         struct pqi_device_registers __iomem *pqi_registers;
894         u32             max_sg_entries;
895         u32             config_table_offset;
896         u32             config_table_length;
897         u16             max_inbound_queues;
898         u16             max_elements_per_iq;
899         u16             max_iq_element_length;
900         u16             max_outbound_queues;
901         u16             max_elements_per_oq;
902         u16             max_oq_element_length;
903         u32             max_transfer_size;
904         u32             max_outstanding_requests;
905         u32             max_io_slots;
906         unsigned int    scsi_ml_can_queue;
907         unsigned short  sg_tablesize;
908         unsigned int    max_sectors;
909         u32             error_buffer_length;
910         void            *error_buffer;
911         dma_addr_t      error_buffer_dma_handle;
912         size_t          sg_chain_buffer_length;
913         unsigned int    num_queue_groups;
914         unsigned int    num_active_queue_groups;
915         u16             num_elements_per_iq;
916         u16             num_elements_per_oq;
917         u16             max_inbound_iu_length_per_firmware;
918         u16             max_inbound_iu_length;
919         unsigned int    max_sg_per_iu;
920         void            *admin_queue_memory_base;
921         u32             admin_queue_memory_length;
922         dma_addr_t      admin_queue_memory_base_dma_handle;
923         void            *queue_memory_base;
924         u32             queue_memory_length;
925         dma_addr_t      queue_memory_base_dma_handle;
926         struct pqi_admin_queues admin_queues;
927         struct pqi_queue_group queue_groups[PQI_MAX_QUEUE_GROUPS];
928         struct pqi_event_queue event_queue;
929         int             max_msix_vectors;
930         int             num_msix_vectors_enabled;
931         int             num_msix_vectors_initialized;
932         u32             msix_vectors[PQI_MAX_MSIX_VECTORS];
933         void            *intr_data[PQI_MAX_MSIX_VECTORS];
934         int             event_irq;
935         struct Scsi_Host *scsi_host;
936
937         struct mutex    scan_mutex;
938         u8              inbound_spanning_supported : 1;
939         u8              outbound_spanning_supported : 1;
940         u8              pqi_mode_enabled : 1;
941         u8              controller_online : 1;
942         u8              heartbeat_timer_started : 1;
943
944         struct list_head scsi_device_list;
945         spinlock_t      scsi_device_list_lock;
946
947         struct delayed_work rescan_work;
948         struct delayed_work update_time_work;
949
950         struct pqi_sas_node *sas_host;
951         u64             sas_address;
952
953         struct pqi_io_request *io_request_pool;
954         u16             next_io_request_slot;
955
956         struct pqi_event pending_events[PQI_NUM_SUPPORTED_EVENTS];
957         struct work_struct event_work;
958
959         atomic_t        num_interrupts;
960         int             previous_num_interrupts;
961         unsigned int    num_heartbeats_requested;
962         struct timer_list heartbeat_timer;
963
964         struct semaphore sync_request_sem;
965         struct semaphore lun_reset_sem;
966 };
967
968 enum pqi_ctrl_mode {
969         UNKNOWN,
970         PQI_MODE
971 };
972
973 /*
974  * assume worst case: SATA queue depth of 31 minus 4 internal firmware commands
975  */
976 #define PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH       27
977
978 /* 0 = no limit */
979 #define PQI_LOGICAL_DRIVE_DEFAULT_MAX_QUEUE_DEPTH       0
980
981 /* CISS commands */
982 #define CISS_READ               0xc0
983 #define CISS_REPORT_LOG         0xc2    /* Report Logical LUNs */
984 #define CISS_REPORT_PHYS        0xc3    /* Report Physical LUNs */
985 #define CISS_GET_RAID_MAP       0xc8
986
987 /* constants for CISS_REPORT_LOG/CISS_REPORT_PHYS commands */
988 #define CISS_REPORT_LOG_EXTENDED                0x1
989 #define CISS_REPORT_PHYS_EXTENDED               0x2
990
991 /* BMIC commands */
992 #define BMIC_IDENTIFY_CONTROLLER                0x11
993 #define BMIC_IDENTIFY_PHYSICAL_DEVICE           0x15
994 #define BMIC_READ                               0x26
995 #define BMIC_WRITE                              0x27
996 #define BMIC_SENSE_CONTROLLER_PARAMETERS        0x64
997 #define BMIC_SENSE_SUBSYSTEM_INFORMATION        0x66
998 #define BMIC_WRITE_HOST_WELLNESS                0xa5
999 #define BMIC_CACHE_FLUSH                        0xc2
1000
1001 #define SA_CACHE_FLUSH                          0x01
1002
1003 #define MASKED_DEVICE(lunid)                    ((lunid)[3] & 0xc0)
1004 #define CISS_GET_BUS(lunid)                     ((lunid)[7] & 0x3f)
1005 #define CISS_GET_LEVEL_2_TARGET(lunid)          ((lunid)[6])
1006 #define CISS_GET_DRIVE_NUMBER(lunid)            \
1007         (((CISS_GET_BUS((lunid)) - 1) << 8) +   \
1008         CISS_GET_LEVEL_2_TARGET((lunid)))
1009
1010 #define NO_TIMEOUT              ((unsigned long) -1)
1011
1012 #pragma pack(1)
1013
1014 struct bmic_identify_controller {
1015         u8      configured_logical_drive_count;
1016         __le32  configuration_signature;
1017         u8      firmware_version[4];
1018         u8      reserved[145];
1019         __le16  extended_logical_unit_count;
1020         u8      reserved1[34];
1021         __le16  firmware_build_number;
1022         u8      reserved2[100];
1023         u8      controller_mode;
1024         u8      reserved3[32];
1025 };
1026
1027 struct bmic_identify_physical_device {
1028         u8      scsi_bus;               /* SCSI Bus number on controller */
1029         u8      scsi_id;                /* SCSI ID on this bus */
1030         __le16  block_size;             /* sector size in bytes */
1031         __le32  total_blocks;           /* number for sectors on drive */
1032         __le32  reserved_blocks;        /* controller reserved (RIS) */
1033         u8      model[40];              /* Physical Drive Model */
1034         u8      serial_number[40];      /* Drive Serial Number */
1035         u8      firmware_revision[8];   /* drive firmware revision */
1036         u8      scsi_inquiry_bits;      /* inquiry byte 7 bits */
1037         u8      compaq_drive_stamp;     /* 0 means drive not stamped */
1038         u8      last_failure_reason;
1039         u8      flags;
1040         u8      more_flags;
1041         u8      scsi_lun;               /* SCSI LUN for phys drive */
1042         u8      yet_more_flags;
1043         u8      even_more_flags;
1044         __le32  spi_speed_rules;
1045         u8      phys_connector[2];      /* connector number on controller */
1046         u8      phys_box_on_bus;        /* phys enclosure this drive resides */
1047         u8      phys_bay_in_box;        /* phys drv bay this drive resides */
1048         __le32  rpm;                    /* drive rotational speed in RPM */
1049         u8      device_type;            /* type of drive */
1050         u8      sata_version;           /* only valid when device_type = */
1051                                         /* BMIC_DEVICE_TYPE_SATA */
1052         __le64  big_total_block_count;
1053         __le64  ris_starting_lba;
1054         __le32  ris_size;
1055         u8      wwid[20];
1056         u8      controller_phy_map[32];
1057         __le16  phy_count;
1058         u8      phy_connected_dev_type[256];
1059         u8      phy_to_drive_bay_num[256];
1060         __le16  phy_to_attached_dev_index[256];
1061         u8      box_index;
1062         u8      reserved;
1063         __le16  extra_physical_drive_flags;
1064         u8      negotiated_link_rate[256];
1065         u8      phy_to_phy_map[256];
1066         u8      redundant_path_present_map;
1067         u8      redundant_path_failure_map;
1068         u8      active_path_number;
1069         __le16  alternate_paths_phys_connector[8];
1070         u8      alternate_paths_phys_box_on_port[8];
1071         u8      multi_lun_device_lun_count;
1072         u8      minimum_good_fw_revision[8];
1073         u8      unique_inquiry_bytes[20];
1074         u8      current_temperature_degreesC;
1075         u8      temperature_threshold_degreesC;
1076         u8      max_temperature_degreesC;
1077         u8      logical_blocks_per_phys_block_exp;
1078         __le16  current_queue_depth_limit;
1079         u8      switch_name[10];
1080         __le16  switch_port;
1081         u8      alternate_paths_switch_name[40];
1082         u8      alternate_paths_switch_port[8];
1083         __le16  power_on_hours;
1084         __le16  percent_endurance_used;
1085         u8      drive_authentication;
1086         u8      smart_carrier_authentication;
1087         u8      smart_carrier_app_fw_version;
1088         u8      smart_carrier_bootloader_fw_version;
1089         u8      encryption_key_name[64];
1090         __le32  misc_drive_flags;
1091         __le16  dek_index;
1092         u8      padding[112];
1093 };
1094
1095 #pragma pack()
1096
1097 int pqi_add_sas_host(struct Scsi_Host *shost, struct pqi_ctrl_info *ctrl_info);
1098 void pqi_delete_sas_host(struct pqi_ctrl_info *ctrl_info);
1099 int pqi_add_sas_device(struct pqi_sas_node *pqi_sas_node,
1100         struct pqi_scsi_dev *device);
1101 void pqi_remove_sas_device(struct pqi_scsi_dev *device);
1102 struct pqi_scsi_dev *pqi_find_device_by_sas_rphy(
1103         struct pqi_ctrl_info *ctrl_info, struct sas_rphy *rphy);
1104
1105 extern struct sas_function_template pqi_sas_transport_functions;
1106
1107 #if !defined(readq)
1108 #define readq readq
1109 static inline u64 readq(const volatile void __iomem *addr)
1110 {
1111         u32 lower32;
1112         u32 upper32;
1113
1114         lower32 = readl(addr);
1115         upper32 = readl(addr + 4);
1116
1117         return ((u64)upper32 << 32) | lower32;
1118 }
1119 #endif
1120
1121 #if !defined(writeq)
1122 #define writeq writeq
1123 static inline void writeq(u64 value, volatile void __iomem *addr)
1124 {
1125         u32 lower32;
1126         u32 upper32;
1127
1128         lower32 = lower_32_bits(value);
1129         upper32 = upper_32_bits(value);
1130
1131         writel(lower32, addr);
1132         writel(upper32, addr + 4);
1133 }
1134 #endif
1135
1136 #endif /* _SMARTPQI_H */