hptiop.h 9.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * HighPoint RR3xxx/4xxx controller driver for Linux
  4. * Copyright (C) 2006-2015 HighPoint Technologies, Inc. All Rights Reserved.
  5. *
  6. * Please report bugs/comments/suggestions to linux@highpoint-tech.com
  7. *
  8. * For more information, visit http://www.highpoint-tech.com
  9. */
  10. #ifndef _HPTIOP_H_
  11. #define _HPTIOP_H_
  12. struct hpt_iopmu_itl {
  13. __le32 resrved0[4];
  14. __le32 inbound_msgaddr0;
  15. __le32 inbound_msgaddr1;
  16. __le32 outbound_msgaddr0;
  17. __le32 outbound_msgaddr1;
  18. __le32 inbound_doorbell;
  19. __le32 inbound_intstatus;
  20. __le32 inbound_intmask;
  21. __le32 outbound_doorbell;
  22. __le32 outbound_intstatus;
  23. __le32 outbound_intmask;
  24. __le32 reserved1[2];
  25. __le32 inbound_queue;
  26. __le32 outbound_queue;
  27. };
  28. #define IOPMU_QUEUE_EMPTY 0xffffffff
  29. #define IOPMU_QUEUE_MASK_HOST_BITS 0xf0000000
  30. #define IOPMU_QUEUE_ADDR_HOST_BIT 0x80000000
  31. #define IOPMU_QUEUE_REQUEST_SIZE_BIT 0x40000000
  32. #define IOPMU_QUEUE_REQUEST_RESULT_BIT 0x40000000
  33. #define IOPMU_OUTBOUND_INT_MSG0 1
  34. #define IOPMU_OUTBOUND_INT_MSG1 2
  35. #define IOPMU_OUTBOUND_INT_DOORBELL 4
  36. #define IOPMU_OUTBOUND_INT_POSTQUEUE 8
  37. #define IOPMU_OUTBOUND_INT_PCI 0x10
  38. #define IOPMU_INBOUND_INT_MSG0 1
  39. #define IOPMU_INBOUND_INT_MSG1 2
  40. #define IOPMU_INBOUND_INT_DOORBELL 4
  41. #define IOPMU_INBOUND_INT_ERROR 8
  42. #define IOPMU_INBOUND_INT_POSTQUEUE 0x10
  43. #define MVIOP_QUEUE_LEN 512
  44. struct hpt_iopmu_mv {
  45. __le32 inbound_head;
  46. __le32 inbound_tail;
  47. __le32 outbound_head;
  48. __le32 outbound_tail;
  49. __le32 inbound_msg;
  50. __le32 outbound_msg;
  51. __le32 reserve[10];
  52. __le64 inbound_q[MVIOP_QUEUE_LEN];
  53. __le64 outbound_q[MVIOP_QUEUE_LEN];
  54. };
  55. struct hpt_iopmv_regs {
  56. __le32 reserved[0x20400 / 4];
  57. __le32 inbound_doorbell;
  58. __le32 inbound_intmask;
  59. __le32 outbound_doorbell;
  60. __le32 outbound_intmask;
  61. };
  62. #pragma pack(1)
  63. struct hpt_iopmu_mvfrey {
  64. __le32 reserved0[(0x4000 - 0) / 4];
  65. __le32 inbound_base;
  66. __le32 inbound_base_high;
  67. __le32 reserved1[(0x4018 - 0x4008) / 4];
  68. __le32 inbound_write_ptr;
  69. __le32 reserved2[(0x402c - 0x401c) / 4];
  70. __le32 inbound_conf_ctl;
  71. __le32 reserved3[(0x4050 - 0x4030) / 4];
  72. __le32 outbound_base;
  73. __le32 outbound_base_high;
  74. __le32 outbound_shadow_base;
  75. __le32 outbound_shadow_base_high;
  76. __le32 reserved4[(0x4088 - 0x4060) / 4];
  77. __le32 isr_cause;
  78. __le32 isr_enable;
  79. __le32 reserved5[(0x1020c - 0x4090) / 4];
  80. __le32 pcie_f0_int_enable;
  81. __le32 reserved6[(0x10400 - 0x10210) / 4];
  82. __le32 f0_to_cpu_msg_a;
  83. __le32 reserved7[(0x10420 - 0x10404) / 4];
  84. __le32 cpu_to_f0_msg_a;
  85. __le32 reserved8[(0x10480 - 0x10424) / 4];
  86. __le32 f0_doorbell;
  87. __le32 f0_doorbell_enable;
  88. };
  89. struct mvfrey_inlist_entry {
  90. dma_addr_t addr;
  91. __le32 intrfc_len;
  92. __le32 reserved;
  93. };
  94. struct mvfrey_outlist_entry {
  95. __le32 val;
  96. };
  97. #pragma pack()
  98. #define MVIOP_MU_QUEUE_ADDR_HOST_MASK (~(0x1full))
  99. #define MVIOP_MU_QUEUE_ADDR_HOST_BIT 4
  100. #define MVIOP_MU_QUEUE_ADDR_IOP_HIGH32 0xffffffff
  101. #define MVIOP_MU_QUEUE_REQUEST_RESULT_BIT 1
  102. #define MVIOP_MU_QUEUE_REQUEST_RETURN_CONTEXT 2
  103. #define MVIOP_MU_INBOUND_INT_MSG 1
  104. #define MVIOP_MU_INBOUND_INT_POSTQUEUE 2
  105. #define MVIOP_MU_OUTBOUND_INT_MSG 1
  106. #define MVIOP_MU_OUTBOUND_INT_POSTQUEUE 2
  107. #define CL_POINTER_TOGGLE 0x00004000
  108. #define CPU_TO_F0_DRBL_MSG_BIT 0x02000000
  109. enum hpt_iopmu_message {
  110. /* host-to-iop messages */
  111. IOPMU_INBOUND_MSG0_NOP = 0,
  112. IOPMU_INBOUND_MSG0_RESET,
  113. IOPMU_INBOUND_MSG0_FLUSH,
  114. IOPMU_INBOUND_MSG0_SHUTDOWN,
  115. IOPMU_INBOUND_MSG0_STOP_BACKGROUND_TASK,
  116. IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK,
  117. IOPMU_INBOUND_MSG0_RESET_COMM,
  118. IOPMU_INBOUND_MSG0_MAX = 0xff,
  119. /* iop-to-host messages */
  120. IOPMU_OUTBOUND_MSG0_REGISTER_DEVICE_0 = 0x100,
  121. IOPMU_OUTBOUND_MSG0_REGISTER_DEVICE_MAX = 0x1ff,
  122. IOPMU_OUTBOUND_MSG0_UNREGISTER_DEVICE_0 = 0x200,
  123. IOPMU_OUTBOUND_MSG0_UNREGISTER_DEVICE_MAX = 0x2ff,
  124. IOPMU_OUTBOUND_MSG0_REVALIDATE_DEVICE_0 = 0x300,
  125. IOPMU_OUTBOUND_MSG0_REVALIDATE_DEVICE_MAX = 0x3ff,
  126. };
  127. struct hpt_iop_request_header {
  128. __le32 size;
  129. __le32 type;
  130. __le32 flags;
  131. __le32 result;
  132. __le32 context; /* host context */
  133. __le32 context_hi32;
  134. };
  135. #define IOP_REQUEST_FLAG_SYNC_REQUEST 1
  136. #define IOP_REQUEST_FLAG_BIST_REQUEST 2
  137. #define IOP_REQUEST_FLAG_REMAPPED 4
  138. #define IOP_REQUEST_FLAG_OUTPUT_CONTEXT 8
  139. #define IOP_REQUEST_FLAG_ADDR_BITS 0x40 /* flags[31:16] is phy_addr[47:32] */
  140. enum hpt_iop_request_type {
  141. IOP_REQUEST_TYPE_GET_CONFIG = 0,
  142. IOP_REQUEST_TYPE_SET_CONFIG,
  143. IOP_REQUEST_TYPE_BLOCK_COMMAND,
  144. IOP_REQUEST_TYPE_SCSI_COMMAND,
  145. IOP_REQUEST_TYPE_IOCTL_COMMAND,
  146. IOP_REQUEST_TYPE_MAX
  147. };
  148. enum hpt_iop_result_type {
  149. IOP_RESULT_PENDING = 0,
  150. IOP_RESULT_SUCCESS,
  151. IOP_RESULT_FAIL,
  152. IOP_RESULT_BUSY,
  153. IOP_RESULT_RESET,
  154. IOP_RESULT_INVALID_REQUEST,
  155. IOP_RESULT_BAD_TARGET,
  156. IOP_RESULT_CHECK_CONDITION,
  157. };
  158. struct hpt_iop_request_get_config {
  159. struct hpt_iop_request_header header;
  160. __le32 interface_version;
  161. __le32 firmware_version;
  162. __le32 max_requests;
  163. __le32 request_size;
  164. __le32 max_sg_count;
  165. __le32 data_transfer_length;
  166. __le32 alignment_mask;
  167. __le32 max_devices;
  168. __le32 sdram_size;
  169. };
  170. struct hpt_iop_request_set_config {
  171. struct hpt_iop_request_header header;
  172. __le32 iop_id;
  173. __le16 vbus_id;
  174. __le16 max_host_request_size;
  175. __le32 reserve[6];
  176. };
  177. struct hpt_iopsg {
  178. __le32 size;
  179. __le32 eot; /* non-zero: end of table */
  180. __le64 pci_address;
  181. };
  182. struct hpt_iop_request_block_command {
  183. struct hpt_iop_request_header header;
  184. u8 channel;
  185. u8 target;
  186. u8 lun;
  187. u8 pad1;
  188. __le16 command; /* IOP_BLOCK_COMMAND_{READ,WRITE} */
  189. __le16 sectors;
  190. __le64 lba;
  191. struct hpt_iopsg sg_list[1];
  192. };
  193. #define IOP_BLOCK_COMMAND_READ 1
  194. #define IOP_BLOCK_COMMAND_WRITE 2
  195. #define IOP_BLOCK_COMMAND_VERIFY 3
  196. #define IOP_BLOCK_COMMAND_FLUSH 4
  197. #define IOP_BLOCK_COMMAND_SHUTDOWN 5
  198. struct hpt_iop_request_scsi_command {
  199. struct hpt_iop_request_header header;
  200. u8 channel;
  201. u8 target;
  202. u8 lun;
  203. u8 pad1;
  204. u8 cdb[16];
  205. __le32 dataxfer_length;
  206. struct hpt_iopsg sg_list[1];
  207. };
  208. struct hpt_iop_request_ioctl_command {
  209. struct hpt_iop_request_header header;
  210. __le32 ioctl_code;
  211. __le32 inbuf_size;
  212. __le32 outbuf_size;
  213. __le32 bytes_returned;
  214. u8 buf[1];
  215. /* out data should be put at buf[(inbuf_size+3)&~3] */
  216. };
  217. #define HPTIOP_MAX_REQUESTS 256u
  218. struct hptiop_request {
  219. struct hptiop_request *next;
  220. void *req_virt;
  221. u32 req_shifted_phy;
  222. struct scsi_cmnd *scp;
  223. int index;
  224. };
  225. struct hpt_scsi_pointer {
  226. int mapped;
  227. int sgcnt;
  228. dma_addr_t dma_handle;
  229. };
  230. #define HPT_SCP(scp) ((struct hpt_scsi_pointer *)&(scp)->SCp)
  231. enum hptiop_family {
  232. UNKNOWN_BASED_IOP,
  233. INTEL_BASED_IOP,
  234. MV_BASED_IOP,
  235. MVFREY_BASED_IOP
  236. } ;
  237. struct hptiop_hba {
  238. struct hptiop_adapter_ops *ops;
  239. union {
  240. struct {
  241. struct hpt_iopmu_itl __iomem *iop;
  242. void __iomem *plx;
  243. } itl;
  244. struct {
  245. struct hpt_iopmv_regs *regs;
  246. struct hpt_iopmu_mv __iomem *mu;
  247. void *internal_req;
  248. dma_addr_t internal_req_phy;
  249. } mv;
  250. struct {
  251. struct hpt_iop_request_get_config __iomem *config;
  252. struct hpt_iopmu_mvfrey __iomem *mu;
  253. int internal_mem_size;
  254. struct hptiop_request internal_req;
  255. int list_count;
  256. struct mvfrey_inlist_entry *inlist;
  257. dma_addr_t inlist_phy;
  258. __le32 inlist_wptr;
  259. struct mvfrey_outlist_entry *outlist;
  260. dma_addr_t outlist_phy;
  261. __le32 *outlist_cptr; /* copy pointer shadow */
  262. dma_addr_t outlist_cptr_phy;
  263. __le32 outlist_rptr;
  264. } mvfrey;
  265. } u;
  266. struct Scsi_Host *host;
  267. struct pci_dev *pcidev;
  268. /* IOP config info */
  269. u32 interface_version;
  270. u32 firmware_version;
  271. u32 sdram_size;
  272. u32 max_devices;
  273. u32 max_requests;
  274. u32 max_request_size;
  275. u32 max_sg_descriptors;
  276. u32 req_size; /* host-allocated request buffer size */
  277. u32 iopintf_v2: 1;
  278. u32 initialized: 1;
  279. u32 msg_done: 1;
  280. struct hptiop_request * req_list;
  281. struct hptiop_request reqs[HPTIOP_MAX_REQUESTS];
  282. /* used to free allocated dma area */
  283. void *dma_coherent[HPTIOP_MAX_REQUESTS];
  284. dma_addr_t dma_coherent_handle[HPTIOP_MAX_REQUESTS];
  285. atomic_t reset_count;
  286. atomic_t resetting;
  287. wait_queue_head_t reset_wq;
  288. wait_queue_head_t ioctl_wq;
  289. };
  290. struct hpt_ioctl_k {
  291. struct hptiop_hba * hba;
  292. u32 ioctl_code;
  293. u32 inbuf_size;
  294. u32 outbuf_size;
  295. void *inbuf;
  296. void *outbuf;
  297. u32 *bytes_returned;
  298. void (*done)(struct hpt_ioctl_k *);
  299. int result; /* HPT_IOCTL_RESULT_ */
  300. };
  301. struct hptiop_adapter_ops {
  302. enum hptiop_family family;
  303. int (*iop_wait_ready)(struct hptiop_hba *hba, u32 millisec);
  304. int (*internal_memalloc)(struct hptiop_hba *hba);
  305. int (*internal_memfree)(struct hptiop_hba *hba);
  306. int (*map_pci_bar)(struct hptiop_hba *hba);
  307. void (*unmap_pci_bar)(struct hptiop_hba *hba);
  308. void (*enable_intr)(struct hptiop_hba *hba);
  309. void (*disable_intr)(struct hptiop_hba *hba);
  310. int (*get_config)(struct hptiop_hba *hba,
  311. struct hpt_iop_request_get_config *config);
  312. int (*set_config)(struct hptiop_hba *hba,
  313. struct hpt_iop_request_set_config *config);
  314. int (*iop_intr)(struct hptiop_hba *hba);
  315. void (*post_msg)(struct hptiop_hba *hba, u32 msg);
  316. void (*post_req)(struct hptiop_hba *hba, struct hptiop_request *_req);
  317. int hw_dma_bit_mask;
  318. int (*reset_comm)(struct hptiop_hba *hba);
  319. __le64 host_phy_flag;
  320. };
  321. #define HPT_IOCTL_RESULT_OK 0
  322. #define HPT_IOCTL_RESULT_FAILED (-1)
  323. #if 0
  324. #define dprintk(fmt, args...) do { printk(fmt, ##args); } while(0)
  325. #else
  326. #define dprintk(fmt, args...)
  327. #endif
  328. #endif