Frank Haverkamp | 12eb468 | 2013-12-09 13:30:39 +0100 | [diff] [blame] | 1 | #ifndef __CARD_BASE_H__ |
| 2 | #define __CARD_BASE_H__ |
| 3 | |
| 4 | /** |
| 5 | * IBM Accelerator Family 'GenWQE' |
| 6 | * |
| 7 | * (C) Copyright IBM Corp. 2013 |
| 8 | * |
| 9 | * Author: Frank Haverkamp <haver@linux.vnet.ibm.com> |
| 10 | * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com> |
Frank Haverkamp | 26d8f6f | 2014-09-10 16:37:48 +0200 | [diff] [blame] | 11 | * Author: Michael Jung <mijung@gmx.net> |
Frank Haverkamp | 12eb468 | 2013-12-09 13:30:39 +0100 | [diff] [blame] | 12 | * Author: Michael Ruettger <michael@ibmra.de> |
| 13 | * |
| 14 | * This program is free software; you can redistribute it and/or modify |
| 15 | * it under the terms of the GNU General Public License (version 2 only) |
| 16 | * as published by the Free Software Foundation. |
| 17 | * |
| 18 | * This program is distributed in the hope that it will be useful, |
| 19 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 20 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 21 | * GNU General Public License for more details. |
| 22 | */ |
| 23 | |
| 24 | /* |
| 25 | * Interfaces within the GenWQE module. Defines genwqe_card and |
| 26 | * ddcb_queue as well as ddcb_requ. |
| 27 | */ |
| 28 | |
| 29 | #include <linux/kernel.h> |
| 30 | #include <linux/types.h> |
| 31 | #include <linux/cdev.h> |
| 32 | #include <linux/stringify.h> |
| 33 | #include <linux/pci.h> |
| 34 | #include <linux/semaphore.h> |
| 35 | #include <linux/uaccess.h> |
| 36 | #include <linux/io.h> |
Frank Haverkamp | 12eb468 | 2013-12-09 13:30:39 +0100 | [diff] [blame] | 37 | #include <linux/debugfs.h> |
Frank Haverkamp | 90b4e97 | 2014-01-07 15:41:24 +0100 | [diff] [blame] | 38 | #include <linux/slab.h> |
Frank Haverkamp | 12eb468 | 2013-12-09 13:30:39 +0100 | [diff] [blame] | 39 | |
| 40 | #include <linux/genwqe/genwqe_card.h> |
| 41 | #include "genwqe_driver.h" |
| 42 | |
| 43 | #define GENWQE_MSI_IRQS 4 /* Just one supported, no MSIx */ |
| 44 | #define GENWQE_FLAG_MSI_ENABLED (1 << 0) |
| 45 | |
| 46 | #define GENWQE_MAX_VFS 15 /* maximum 15 VFs are possible */ |
| 47 | #define GENWQE_MAX_FUNCS 16 /* 1 PF and 15 VFs */ |
| 48 | #define GENWQE_CARD_NO_MAX (16 * GENWQE_MAX_FUNCS) |
| 49 | |
| 50 | /* Compile parameters, some of them appear in debugfs for later adjustment */ |
| 51 | #define genwqe_ddcb_max 32 /* DDCBs on the work-queue */ |
| 52 | #define genwqe_polling_enabled 0 /* in case of irqs not working */ |
| 53 | #define genwqe_ddcb_software_timeout 10 /* timeout per DDCB in seconds */ |
| 54 | #define genwqe_kill_timeout 8 /* time until process gets killed */ |
| 55 | #define genwqe_vf_jobtimeout_msec 250 /* 250 msec */ |
| 56 | #define genwqe_pf_jobtimeout_msec 8000 /* 8 sec should be ok */ |
| 57 | #define genwqe_health_check_interval 4 /* <= 0: disabled */ |
| 58 | |
| 59 | /* Sysfs attribute groups used when we create the genwqe device */ |
| 60 | extern const struct attribute_group *genwqe_attribute_groups[]; |
| 61 | |
| 62 | /* |
| 63 | * Config space for Genwqe5 A7: |
| 64 | * 00:[14 10 4b 04]40 00 10 00[00 00 00 12]00 00 00 00 |
| 65 | * 10: 0c 00 00 f0 07 3c 00 00 00 00 00 00 00 00 00 00 |
| 66 | * 20: 00 00 00 00 00 00 00 00 00 00 00 00[14 10 4b 04] |
| 67 | * 30: 00 00 00 00 50 00 00 00 00 00 00 00 00 00 00 00 |
| 68 | */ |
| 69 | #define PCI_DEVICE_GENWQE 0x044b /* Genwqe DeviceID */ |
| 70 | |
| 71 | #define PCI_SUBSYSTEM_ID_GENWQE5 0x035f /* Genwqe A5 Subsystem-ID */ |
| 72 | #define PCI_SUBSYSTEM_ID_GENWQE5_NEW 0x044b /* Genwqe A5 Subsystem-ID */ |
| 73 | #define PCI_CLASSCODE_GENWQE5 0x1200 /* UNKNOWN */ |
| 74 | |
| 75 | #define PCI_SUBVENDOR_ID_IBM_SRIOV 0x0000 |
| 76 | #define PCI_SUBSYSTEM_ID_GENWQE5_SRIOV 0x0000 /* Genwqe A5 Subsystem-ID */ |
| 77 | #define PCI_CLASSCODE_GENWQE5_SRIOV 0x1200 /* UNKNOWN */ |
| 78 | |
| 79 | #define GENWQE_SLU_ARCH_REQ 2 /* Required SLU architecture level */ |
| 80 | |
| 81 | /** |
| 82 | * struct genwqe_reg - Genwqe data dump functionality |
| 83 | */ |
| 84 | struct genwqe_reg { |
| 85 | u32 addr; |
| 86 | u32 idx; |
| 87 | u64 val; |
| 88 | }; |
| 89 | |
| 90 | /* |
| 91 | * enum genwqe_dbg_type - Specify chip unit to dump/debug |
| 92 | */ |
| 93 | enum genwqe_dbg_type { |
| 94 | GENWQE_DBG_UNIT0 = 0, /* captured before prev errs cleared */ |
| 95 | GENWQE_DBG_UNIT1 = 1, |
| 96 | GENWQE_DBG_UNIT2 = 2, |
| 97 | GENWQE_DBG_UNIT3 = 3, |
| 98 | GENWQE_DBG_UNIT4 = 4, |
| 99 | GENWQE_DBG_UNIT5 = 5, |
| 100 | GENWQE_DBG_UNIT6 = 6, |
| 101 | GENWQE_DBG_UNIT7 = 7, |
| 102 | GENWQE_DBG_REGS = 8, |
| 103 | GENWQE_DBG_DMA = 9, |
| 104 | GENWQE_DBG_UNITS = 10, /* max number of possible debug units */ |
| 105 | }; |
| 106 | |
| 107 | /* Software error injection to simulate card failures */ |
| 108 | #define GENWQE_INJECT_HARDWARE_FAILURE 0x00000001 /* injects -1 reg reads */ |
| 109 | #define GENWQE_INJECT_BUS_RESET_FAILURE 0x00000002 /* pci_bus_reset fail */ |
| 110 | #define GENWQE_INJECT_GFIR_FATAL 0x00000004 /* GFIR = 0x0000ffff */ |
| 111 | #define GENWQE_INJECT_GFIR_INFO 0x00000008 /* GFIR = 0xffff0000 */ |
| 112 | |
| 113 | /* |
| 114 | * Genwqe card description and management data. |
| 115 | * |
| 116 | * Error-handling in case of card malfunction |
| 117 | * ------------------------------------------ |
| 118 | * |
| 119 | * If the card is detected to be defective the outside environment |
| 120 | * will cause the PCI layer to call deinit (the cleanup function for |
| 121 | * probe). This is the same effect like doing a unbind/bind operation |
| 122 | * on the card. |
| 123 | * |
| 124 | * The genwqe card driver implements a health checking thread which |
| 125 | * verifies the card function. If this detects a problem the cards |
| 126 | * device is being shutdown and restarted again, along with a reset of |
| 127 | * the card and queue. |
| 128 | * |
| 129 | * All functions accessing the card device return either -EIO or -ENODEV |
| 130 | * code to indicate the malfunction to the user. The user has to close |
| 131 | * the file descriptor and open a new one, once the card becomes |
| 132 | * available again. |
| 133 | * |
| 134 | * If the open file descriptor is setup to receive SIGIO, the signal is |
| 135 | * genereated for the application which has to provide a handler to |
| 136 | * react on it. If the application does not close the open |
| 137 | * file descriptor a SIGKILL is send to enforce freeing the cards |
| 138 | * resources. |
| 139 | * |
| 140 | * I did not find a different way to prevent kernel problems due to |
| 141 | * reference counters for the cards character devices getting out of |
| 142 | * sync. The character device deallocation does not block, even if |
| 143 | * there is still an open file descriptor pending. If this pending |
| 144 | * descriptor is closed, the data structures used by the character |
| 145 | * device is reinstantiated, which will lead to the reference counter |
| 146 | * dropping below the allowed values. |
| 147 | * |
| 148 | * Card recovery |
| 149 | * ------------- |
| 150 | * |
| 151 | * To test the internal driver recovery the following command can be used: |
| 152 | * sudo sh -c 'echo 0xfffff > /sys/class/genwqe/genwqe0_card/err_inject' |
| 153 | */ |
| 154 | |
| 155 | |
| 156 | /** |
| 157 | * struct dma_mapping_type - Mapping type definition |
| 158 | * |
| 159 | * To avoid memcpying data arround we use user memory directly. To do |
| 160 | * this we need to pin/swap-in the memory and request a DMA address |
| 161 | * for it. |
| 162 | */ |
| 163 | enum dma_mapping_type { |
| 164 | GENWQE_MAPPING_RAW = 0, /* contignous memory buffer */ |
| 165 | GENWQE_MAPPING_SGL_TEMP, /* sglist dynamically used */ |
| 166 | GENWQE_MAPPING_SGL_PINNED, /* sglist used with pinning */ |
| 167 | }; |
| 168 | |
| 169 | /** |
| 170 | * struct dma_mapping - Information about memory mappings done by the driver |
| 171 | */ |
| 172 | struct dma_mapping { |
| 173 | enum dma_mapping_type type; |
| 174 | |
| 175 | void *u_vaddr; /* user-space vaddr/non-aligned */ |
| 176 | void *k_vaddr; /* kernel-space vaddr/non-aligned */ |
| 177 | dma_addr_t dma_addr; /* physical DMA address */ |
| 178 | |
| 179 | struct page **page_list; /* list of pages used by user buff */ |
| 180 | dma_addr_t *dma_list; /* list of dma addresses per page */ |
| 181 | unsigned int nr_pages; /* number of pages */ |
| 182 | unsigned int size; /* size in bytes */ |
| 183 | |
| 184 | struct list_head card_list; /* list of usr_maps for card */ |
| 185 | struct list_head pin_list; /* list of pinned memory for dev */ |
| 186 | }; |
| 187 | |
| 188 | static inline void genwqe_mapping_init(struct dma_mapping *m, |
| 189 | enum dma_mapping_type type) |
| 190 | { |
| 191 | memset(m, 0, sizeof(*m)); |
| 192 | m->type = type; |
| 193 | } |
| 194 | |
| 195 | /** |
| 196 | * struct ddcb_queue - DDCB queue data |
| 197 | * @ddcb_max: Number of DDCBs on the queue |
| 198 | * @ddcb_next: Next free DDCB |
| 199 | * @ddcb_act: Next DDCB supposed to finish |
| 200 | * @ddcb_seq: Sequence number of last DDCB |
| 201 | * @ddcbs_in_flight: Currently enqueued DDCBs |
| 202 | * @ddcbs_completed: Number of already completed DDCBs |
Frank Haverkamp | 1451f41 | 2014-09-10 16:37:53 +0200 | [diff] [blame] | 203 | * @return_on_busy: Number of -EBUSY returns on full queue |
| 204 | * @wait_on_busy: Number of waits on full queue |
Frank Haverkamp | 12eb468 | 2013-12-09 13:30:39 +0100 | [diff] [blame] | 205 | * @ddcb_daddr: DMA address of first DDCB in the queue |
| 206 | * @ddcb_vaddr: Kernel virtual address of first DDCB in the queue |
| 207 | * @ddcb_req: Associated requests (one per DDCB) |
| 208 | * @ddcb_waitqs: Associated wait queues (one per DDCB) |
| 209 | * @ddcb_lock: Lock to protect queuing operations |
| 210 | * @ddcb_waitq: Wait on next DDCB finishing |
| 211 | */ |
| 212 | |
| 213 | struct ddcb_queue { |
| 214 | int ddcb_max; /* amount of DDCBs */ |
| 215 | int ddcb_next; /* next available DDCB num */ |
| 216 | int ddcb_act; /* DDCB to be processed */ |
| 217 | u16 ddcb_seq; /* slc seq num */ |
| 218 | unsigned int ddcbs_in_flight; /* number of ddcbs in processing */ |
| 219 | unsigned int ddcbs_completed; |
| 220 | unsigned int ddcbs_max_in_flight; |
Frank Haverkamp | 1451f41 | 2014-09-10 16:37:53 +0200 | [diff] [blame] | 221 | unsigned int return_on_busy; /* how many times -EBUSY? */ |
| 222 | unsigned int wait_on_busy; |
Frank Haverkamp | 12eb468 | 2013-12-09 13:30:39 +0100 | [diff] [blame] | 223 | |
| 224 | dma_addr_t ddcb_daddr; /* DMA address */ |
| 225 | struct ddcb *ddcb_vaddr; /* kernel virtual addr for DDCBs */ |
| 226 | struct ddcb_requ **ddcb_req; /* ddcb processing parameter */ |
| 227 | wait_queue_head_t *ddcb_waitqs; /* waitqueue per ddcb */ |
| 228 | |
| 229 | spinlock_t ddcb_lock; /* exclusive access to queue */ |
Frank Haverkamp | 1451f41 | 2014-09-10 16:37:53 +0200 | [diff] [blame] | 230 | wait_queue_head_t busy_waitq; /* wait for ddcb processing */ |
Frank Haverkamp | 12eb468 | 2013-12-09 13:30:39 +0100 | [diff] [blame] | 231 | |
| 232 | /* registers or the respective queue to be used */ |
| 233 | u32 IO_QUEUE_CONFIG; |
| 234 | u32 IO_QUEUE_STATUS; |
| 235 | u32 IO_QUEUE_SEGMENT; |
| 236 | u32 IO_QUEUE_INITSQN; |
| 237 | u32 IO_QUEUE_WRAP; |
| 238 | u32 IO_QUEUE_OFFSET; |
| 239 | u32 IO_QUEUE_WTIME; |
| 240 | u32 IO_QUEUE_ERRCNTS; |
| 241 | u32 IO_QUEUE_LRW; |
| 242 | }; |
| 243 | |
| 244 | /* |
| 245 | * GFIR, SLU_UNITCFG, APP_UNITCFG |
| 246 | * 8 Units with FIR/FEC + 64 * 2ndary FIRS/FEC. |
| 247 | */ |
| 248 | #define GENWQE_FFDC_REGS (3 + (8 * (2 + 2 * 64))) |
| 249 | |
| 250 | struct genwqe_ffdc { |
| 251 | unsigned int entries; |
| 252 | struct genwqe_reg *regs; |
| 253 | }; |
| 254 | |
| 255 | /** |
| 256 | * struct genwqe_dev - GenWQE device information |
| 257 | * @card_state: Card operation state, see above |
| 258 | * @ffdc: First Failure Data Capture buffers for each unit |
| 259 | * @card_thread: Working thread to operate the DDCB queue |
| 260 | * @card_waitq: Wait queue used in card_thread |
| 261 | * @queue: DDCB queue |
| 262 | * @health_thread: Card monitoring thread (only for PFs) |
| 263 | * @health_waitq: Wait queue used in health_thread |
| 264 | * @pci_dev: Associated PCI device (function) |
| 265 | * @mmio: Base address of 64-bit register space |
| 266 | * @mmio_len: Length of register area |
| 267 | * @file_lock: Lock to protect access to file_list |
| 268 | * @file_list: List of all processes with open GenWQE file descriptors |
| 269 | * |
| 270 | * This struct contains all information needed to communicate with a |
| 271 | * GenWQE card. It is initialized when a GenWQE device is found and |
| 272 | * destroyed when it goes away. It holds data to maintain the queue as |
| 273 | * well as data needed to feed the user interfaces. |
| 274 | */ |
| 275 | struct genwqe_dev { |
| 276 | enum genwqe_card_state card_state; |
| 277 | spinlock_t print_lock; |
| 278 | |
| 279 | int card_idx; /* card index 0..CARD_NO_MAX-1 */ |
| 280 | u64 flags; /* general flags */ |
| 281 | |
| 282 | /* FFDC data gathering */ |
| 283 | struct genwqe_ffdc ffdc[GENWQE_DBG_UNITS]; |
| 284 | |
| 285 | /* DDCB workqueue */ |
| 286 | struct task_struct *card_thread; |
| 287 | wait_queue_head_t queue_waitq; |
| 288 | struct ddcb_queue queue; /* genwqe DDCB queue */ |
| 289 | unsigned int irqs_processed; |
| 290 | |
| 291 | /* Card health checking thread */ |
| 292 | struct task_struct *health_thread; |
| 293 | wait_queue_head_t health_waitq; |
| 294 | |
Kleber Sacilotto de Souza | fb14545 | 2014-06-04 10:57:51 -0300 | [diff] [blame] | 295 | int use_platform_recovery; /* use platform recovery mechanisms */ |
| 296 | |
Frank Haverkamp | 12eb468 | 2013-12-09 13:30:39 +0100 | [diff] [blame] | 297 | /* char device */ |
| 298 | dev_t devnum_genwqe; /* major/minor num card */ |
| 299 | struct class *class_genwqe; /* reference to class object */ |
| 300 | struct device *dev; /* for device creation */ |
| 301 | struct cdev cdev_genwqe; /* char device for card */ |
| 302 | |
| 303 | struct dentry *debugfs_root; /* debugfs card root directory */ |
| 304 | struct dentry *debugfs_genwqe; /* debugfs driver root directory */ |
| 305 | |
| 306 | /* pci resources */ |
| 307 | struct pci_dev *pci_dev; /* PCI device */ |
| 308 | void __iomem *mmio; /* BAR-0 MMIO start */ |
| 309 | unsigned long mmio_len; |
Frank Haverkamp | 95a8825 | 2014-09-10 16:37:46 +0200 | [diff] [blame] | 310 | int num_vfs; |
Frank Haverkamp | 12eb468 | 2013-12-09 13:30:39 +0100 | [diff] [blame] | 311 | u32 vf_jobtimeout_msec[GENWQE_MAX_VFS]; |
| 312 | int is_privileged; /* access to all regs possible */ |
| 313 | |
| 314 | /* config regs which we need often */ |
| 315 | u64 slu_unitcfg; |
| 316 | u64 app_unitcfg; |
| 317 | u64 softreset; |
| 318 | u64 err_inject; |
| 319 | u64 last_gfir; |
| 320 | char app_name[5]; |
| 321 | |
| 322 | spinlock_t file_lock; /* lock for open files */ |
| 323 | struct list_head file_list; /* list of open files */ |
| 324 | |
| 325 | /* debugfs parameters */ |
| 326 | int ddcb_software_timeout; /* wait until DDCB times out */ |
| 327 | int skip_recovery; /* circumvention if recovery fails */ |
| 328 | int kill_timeout; /* wait after sending SIGKILL */ |
| 329 | }; |
| 330 | |
| 331 | /** |
| 332 | * enum genwqe_requ_state - State of a DDCB execution request |
| 333 | */ |
| 334 | enum genwqe_requ_state { |
| 335 | GENWQE_REQU_NEW = 0, |
| 336 | GENWQE_REQU_ENQUEUED = 1, |
| 337 | GENWQE_REQU_TAPPED = 2, |
| 338 | GENWQE_REQU_FINISHED = 3, |
| 339 | GENWQE_REQU_STATE_MAX, |
| 340 | }; |
| 341 | |
| 342 | /** |
Frank Haverkamp | 718f762 | 2014-03-20 15:11:05 +0100 | [diff] [blame] | 343 | * struct genwqe_sgl - Scatter gather list describing user-space memory |
| 344 | * @sgl: scatter gather list needs to be 128 byte aligned |
| 345 | * @sgl_dma_addr: dma address of sgl |
| 346 | * @sgl_size: size of area used for sgl |
| 347 | * @user_addr: user-space address of memory area |
| 348 | * @user_size: size of user-space memory area |
| 349 | * @page: buffer for partial pages if needed |
| 350 | * @page_dma_addr: dma address partial pages |
| 351 | */ |
| 352 | struct genwqe_sgl { |
| 353 | dma_addr_t sgl_dma_addr; |
| 354 | struct sg_entry *sgl; |
| 355 | size_t sgl_size; /* size of sgl */ |
| 356 | |
| 357 | void __user *user_addr; /* user-space base-address */ |
| 358 | size_t user_size; /* size of memory area */ |
| 359 | |
| 360 | unsigned long nr_pages; |
| 361 | unsigned long fpage_offs; |
| 362 | size_t fpage_size; |
| 363 | size_t lpage_size; |
| 364 | |
| 365 | void *fpage; |
| 366 | dma_addr_t fpage_dma_addr; |
| 367 | |
| 368 | void *lpage; |
| 369 | dma_addr_t lpage_dma_addr; |
| 370 | }; |
| 371 | |
| 372 | int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl, |
| 373 | void __user *user_addr, size_t user_size); |
| 374 | |
| 375 | int genwqe_setup_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl, |
| 376 | dma_addr_t *dma_list); |
| 377 | |
| 378 | int genwqe_free_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl); |
| 379 | |
| 380 | /** |
Frank Haverkamp | 12eb468 | 2013-12-09 13:30:39 +0100 | [diff] [blame] | 381 | * struct ddcb_requ - Kernel internal representation of the DDCB request |
| 382 | * @cmd: User space representation of the DDCB execution request |
| 383 | */ |
| 384 | struct ddcb_requ { |
| 385 | /* kernel specific content */ |
| 386 | enum genwqe_requ_state req_state; /* request status */ |
| 387 | int num; /* ddcb_no for this request */ |
| 388 | struct ddcb_queue *queue; /* associated queue */ |
| 389 | |
| 390 | struct dma_mapping dma_mappings[DDCB_FIXUPS]; |
Frank Haverkamp | 718f762 | 2014-03-20 15:11:05 +0100 | [diff] [blame] | 391 | struct genwqe_sgl sgls[DDCB_FIXUPS]; |
Frank Haverkamp | 12eb468 | 2013-12-09 13:30:39 +0100 | [diff] [blame] | 392 | |
| 393 | /* kernel/user shared content */ |
| 394 | struct genwqe_ddcb_cmd cmd; /* ddcb_no for this request */ |
| 395 | struct genwqe_debug_data debug_data; |
| 396 | }; |
| 397 | |
| 398 | /** |
| 399 | * struct genwqe_file - Information for open GenWQE devices |
| 400 | */ |
| 401 | struct genwqe_file { |
| 402 | struct genwqe_dev *cd; |
| 403 | struct genwqe_driver *client; |
| 404 | struct file *filp; |
| 405 | |
| 406 | struct fasync_struct *async_queue; |
| 407 | struct task_struct *owner; |
| 408 | struct list_head list; /* entry in list of open files */ |
| 409 | |
| 410 | spinlock_t map_lock; /* lock for dma_mappings */ |
| 411 | struct list_head map_list; /* list of dma_mappings */ |
| 412 | |
| 413 | spinlock_t pin_lock; /* lock for pinned memory */ |
| 414 | struct list_head pin_list; /* list of pinned memory */ |
| 415 | }; |
| 416 | |
| 417 | int genwqe_setup_service_layer(struct genwqe_dev *cd); /* for PF only */ |
| 418 | int genwqe_finish_queue(struct genwqe_dev *cd); |
| 419 | int genwqe_release_service_layer(struct genwqe_dev *cd); |
| 420 | |
| 421 | /** |
| 422 | * genwqe_get_slu_id() - Read Service Layer Unit Id |
| 423 | * Return: 0x00: Development code |
| 424 | * 0x01: SLC1 (old) |
| 425 | * 0x02: SLC2 (sept2012) |
| 426 | * 0x03: SLC2 (feb2013, generic driver) |
| 427 | */ |
| 428 | static inline int genwqe_get_slu_id(struct genwqe_dev *cd) |
| 429 | { |
| 430 | return (int)((cd->slu_unitcfg >> 32) & 0xff); |
| 431 | } |
| 432 | |
| 433 | int genwqe_ddcbs_in_flight(struct genwqe_dev *cd); |
| 434 | |
| 435 | u8 genwqe_card_type(struct genwqe_dev *cd); |
| 436 | int genwqe_card_reset(struct genwqe_dev *cd); |
| 437 | int genwqe_set_interrupt_capability(struct genwqe_dev *cd, int count); |
| 438 | void genwqe_reset_interrupt_capability(struct genwqe_dev *cd); |
| 439 | |
| 440 | int genwqe_device_create(struct genwqe_dev *cd); |
| 441 | int genwqe_device_remove(struct genwqe_dev *cd); |
| 442 | |
| 443 | /* debugfs */ |
| 444 | int genwqe_init_debugfs(struct genwqe_dev *cd); |
| 445 | void genqwe_exit_debugfs(struct genwqe_dev *cd); |
| 446 | |
| 447 | int genwqe_read_softreset(struct genwqe_dev *cd); |
| 448 | |
| 449 | /* Hardware Circumventions */ |
| 450 | int genwqe_recovery_on_fatal_gfir_required(struct genwqe_dev *cd); |
| 451 | int genwqe_flash_readback_fails(struct genwqe_dev *cd); |
| 452 | |
| 453 | /** |
| 454 | * genwqe_write_vreg() - Write register in VF window |
| 455 | * @cd: genwqe device |
| 456 | * @reg: register address |
| 457 | * @val: value to write |
| 458 | * @func: 0: PF, 1: VF0, ..., 15: VF14 |
| 459 | */ |
| 460 | int genwqe_write_vreg(struct genwqe_dev *cd, u32 reg, u64 val, int func); |
| 461 | |
| 462 | /** |
| 463 | * genwqe_read_vreg() - Read register in VF window |
| 464 | * @cd: genwqe device |
| 465 | * @reg: register address |
| 466 | * @func: 0: PF, 1: VF0, ..., 15: VF14 |
| 467 | * |
| 468 | * Return: content of the register |
| 469 | */ |
| 470 | u64 genwqe_read_vreg(struct genwqe_dev *cd, u32 reg, int func); |
| 471 | |
| 472 | /* FFDC Buffer Management */ |
| 473 | int genwqe_ffdc_buff_size(struct genwqe_dev *cd, int unit_id); |
| 474 | int genwqe_ffdc_buff_read(struct genwqe_dev *cd, int unit_id, |
| 475 | struct genwqe_reg *regs, unsigned int max_regs); |
| 476 | int genwqe_read_ffdc_regs(struct genwqe_dev *cd, struct genwqe_reg *regs, |
| 477 | unsigned int max_regs, int all); |
| 478 | int genwqe_ffdc_dump_dma(struct genwqe_dev *cd, |
| 479 | struct genwqe_reg *regs, unsigned int max_regs); |
| 480 | |
| 481 | int genwqe_init_debug_data(struct genwqe_dev *cd, |
| 482 | struct genwqe_debug_data *d); |
| 483 | |
| 484 | void genwqe_init_crc32(void); |
| 485 | int genwqe_read_app_id(struct genwqe_dev *cd, char *app_name, int len); |
| 486 | |
| 487 | /* Memory allocation/deallocation; dma address handling */ |
| 488 | int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m, |
| 489 | void *uaddr, unsigned long size, |
| 490 | struct ddcb_requ *req); |
| 491 | |
| 492 | int genwqe_user_vunmap(struct genwqe_dev *cd, struct dma_mapping *m, |
| 493 | struct ddcb_requ *req); |
| 494 | |
Frank Haverkamp | 12eb468 | 2013-12-09 13:30:39 +0100 | [diff] [blame] | 495 | static inline bool dma_mapping_used(struct dma_mapping *m) |
| 496 | { |
| 497 | if (!m) |
| 498 | return 0; |
| 499 | return m->size != 0; |
| 500 | } |
| 501 | |
| 502 | /** |
| 503 | * __genwqe_execute_ddcb() - Execute DDCB request with addr translation |
| 504 | * |
| 505 | * This function will do the address translation changes to the DDCBs |
| 506 | * according to the definitions required by the ATS field. It looks up |
| 507 | * the memory allocation buffer or does vmap/vunmap for the respective |
| 508 | * user-space buffers, inclusive page pinning and scatter gather list |
| 509 | * buildup and teardown. |
| 510 | */ |
| 511 | int __genwqe_execute_ddcb(struct genwqe_dev *cd, |
Frank Haverkamp | 1451f41 | 2014-09-10 16:37:53 +0200 | [diff] [blame] | 512 | struct genwqe_ddcb_cmd *cmd, unsigned int f_flags); |
Frank Haverkamp | 12eb468 | 2013-12-09 13:30:39 +0100 | [diff] [blame] | 513 | |
| 514 | /** |
| 515 | * __genwqe_execute_raw_ddcb() - Execute DDCB request without addr translation |
| 516 | * |
Geliang Tang | 4d4896a | 2015-09-29 23:33:56 -0700 | [diff] [blame] | 517 | * This version will not do address translation or any modification of |
Frank Haverkamp | 12eb468 | 2013-12-09 13:30:39 +0100 | [diff] [blame] | 518 | * the DDCB data. It is used e.g. for the MoveFlash DDCB which is |
| 519 | * entirely prepared by the driver itself. That means the appropriate |
| 520 | * DMA addresses are already in the DDCB and do not need any |
| 521 | * modification. |
| 522 | */ |
| 523 | int __genwqe_execute_raw_ddcb(struct genwqe_dev *cd, |
Frank Haverkamp | 1451f41 | 2014-09-10 16:37:53 +0200 | [diff] [blame] | 524 | struct genwqe_ddcb_cmd *cmd, |
| 525 | unsigned int f_flags); |
| 526 | int __genwqe_enqueue_ddcb(struct genwqe_dev *cd, |
| 527 | struct ddcb_requ *req, |
| 528 | unsigned int f_flags); |
Frank Haverkamp | 12eb468 | 2013-12-09 13:30:39 +0100 | [diff] [blame] | 529 | |
Frank Haverkamp | 12eb468 | 2013-12-09 13:30:39 +0100 | [diff] [blame] | 530 | int __genwqe_wait_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req); |
| 531 | int __genwqe_purge_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req); |
| 532 | |
| 533 | /* register access */ |
| 534 | int __genwqe_writeq(struct genwqe_dev *cd, u64 byte_offs, u64 val); |
| 535 | u64 __genwqe_readq(struct genwqe_dev *cd, u64 byte_offs); |
| 536 | int __genwqe_writel(struct genwqe_dev *cd, u64 byte_offs, u32 val); |
| 537 | u32 __genwqe_readl(struct genwqe_dev *cd, u64 byte_offs); |
| 538 | |
| 539 | void *__genwqe_alloc_consistent(struct genwqe_dev *cd, size_t size, |
| 540 | dma_addr_t *dma_handle); |
| 541 | void __genwqe_free_consistent(struct genwqe_dev *cd, size_t size, |
| 542 | void *vaddr, dma_addr_t dma_handle); |
| 543 | |
| 544 | /* Base clock frequency in MHz */ |
| 545 | int genwqe_base_clock_frequency(struct genwqe_dev *cd); |
| 546 | |
| 547 | /* Before FFDC is captured the traps should be stopped. */ |
| 548 | void genwqe_stop_traps(struct genwqe_dev *cd); |
| 549 | void genwqe_start_traps(struct genwqe_dev *cd); |
| 550 | |
| 551 | /* Hardware circumvention */ |
| 552 | bool genwqe_need_err_masking(struct genwqe_dev *cd); |
| 553 | |
| 554 | /** |
| 555 | * genwqe_is_privileged() - Determine operation mode for PCI function |
| 556 | * |
| 557 | * On Intel with SRIOV support we see: |
| 558 | * PF: is_physfn = 1 is_virtfn = 0 |
| 559 | * VF: is_physfn = 0 is_virtfn = 1 |
| 560 | * |
| 561 | * On Systems with no SRIOV support _and_ virtualized systems we get: |
| 562 | * is_physfn = 0 is_virtfn = 0 |
| 563 | * |
| 564 | * Other vendors have individual pci device ids to distinguish between |
| 565 | * virtual function drivers and physical function drivers. GenWQE |
| 566 | * unfortunately has just on pci device id for both, VFs and PF. |
| 567 | * |
| 568 | * The following code is used to distinguish if the card is running in |
| 569 | * privileged mode, either as true PF or in a virtualized system with |
| 570 | * full register access e.g. currently on PowerPC. |
| 571 | * |
| 572 | * if (pci_dev->is_virtfn) |
| 573 | * cd->is_privileged = 0; |
| 574 | * else |
| 575 | * cd->is_privileged = (__genwqe_readq(cd, IO_SLU_BITSTREAM) |
| 576 | * != IO_ILLEGAL_VALUE); |
| 577 | */ |
| 578 | static inline int genwqe_is_privileged(struct genwqe_dev *cd) |
| 579 | { |
| 580 | return cd->is_privileged; |
| 581 | } |
| 582 | |
| 583 | #endif /* __CARD_BASE_H__ */ |