Jan-Bernd Themann | 7a29108 | 2006-09-13 17:44:31 +0200 | [diff] [blame] | 1 | /* |
| 2 | * linux/drivers/net/ehea/ehea.h |
| 3 | * |
| 4 | * eHEA ethernet device driver for IBM eServer System p |
| 5 | * |
| 6 | * (C) Copyright IBM Corp. 2006 |
| 7 | * |
| 8 | * Authors: |
| 9 | * Christoph Raisch <raisch@de.ibm.com> |
| 10 | * Jan-Bernd Themann <themann@de.ibm.com> |
| 11 | * Thomas Klein <tklein@de.ibm.com> |
| 12 | * |
| 13 | * |
| 14 | * This program is free software; you can redistribute it and/or modify |
| 15 | * it under the terms of the GNU General Public License as published by |
| 16 | * the Free Software Foundation; either version 2, or (at your option) |
| 17 | * any later version. |
| 18 | * |
| 19 | * This program is distributed in the hope that it will be useful, |
| 20 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 21 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 22 | * GNU General Public License for more details. |
| 23 | * |
| 24 | * You should have received a copy of the GNU General Public License |
| 25 | * along with this program; if not, write to the Free Software |
| 26 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
| 27 | */ |
| 28 | |
| 29 | #ifndef __EHEA_H__ |
| 30 | #define __EHEA_H__ |
| 31 | |
| 32 | #include <linux/module.h> |
| 33 | #include <linux/ethtool.h> |
| 34 | #include <linux/vmalloc.h> |
| 35 | #include <linux/if_vlan.h> |
| 36 | |
| 37 | #include <asm/ibmebus.h> |
| 38 | #include <asm/abs_addr.h> |
| 39 | #include <asm/io.h> |
| 40 | |
| 41 | #define DRV_NAME "ehea" |
Jan-Bernd Themann | bff0a55 | 2006-10-05 16:53:14 +0200 | [diff] [blame^] | 42 | #define DRV_VERSION "EHEA_0034" |
Jan-Bernd Themann | 7a29108 | 2006-09-13 17:44:31 +0200 | [diff] [blame] | 43 | |
| 44 | #define EHEA_MSG_DEFAULT (NETIF_MSG_LINK | NETIF_MSG_TIMER \ |
| 45 | | NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR) |
| 46 | |
| 47 | #define EHEA_MAX_ENTRIES_RQ1 32767 |
| 48 | #define EHEA_MAX_ENTRIES_RQ2 16383 |
| 49 | #define EHEA_MAX_ENTRIES_RQ3 16383 |
| 50 | #define EHEA_MAX_ENTRIES_SQ 32767 |
| 51 | #define EHEA_MIN_ENTRIES_QP 127 |
| 52 | |
Jan-Bernd Themann | bff0a55 | 2006-10-05 16:53:14 +0200 | [diff] [blame^] | 53 | #define EHEA_SMALL_QUEUES |
Jan-Bernd Themann | 7a29108 | 2006-09-13 17:44:31 +0200 | [diff] [blame] | 54 | #define EHEA_NUM_TX_QP 1 |
| 55 | |
| 56 | #ifdef EHEA_SMALL_QUEUES |
| 57 | #define EHEA_MAX_CQE_COUNT 1023 |
| 58 | #define EHEA_DEF_ENTRIES_SQ 1023 |
| 59 | #define EHEA_DEF_ENTRIES_RQ1 4095 |
| 60 | #define EHEA_DEF_ENTRIES_RQ2 1023 |
| 61 | #define EHEA_DEF_ENTRIES_RQ3 1023 |
| 62 | #else |
Jan-Bernd Themann | bff0a55 | 2006-10-05 16:53:14 +0200 | [diff] [blame^] | 63 | #define EHEA_MAX_CQE_COUNT 4080 |
| 64 | #define EHEA_DEF_ENTRIES_SQ 4080 |
| 65 | #define EHEA_DEF_ENTRIES_RQ1 8160 |
| 66 | #define EHEA_DEF_ENTRIES_RQ2 2040 |
| 67 | #define EHEA_DEF_ENTRIES_RQ3 2040 |
Jan-Bernd Themann | 7a29108 | 2006-09-13 17:44:31 +0200 | [diff] [blame] | 68 | #endif |
| 69 | |
| 70 | #define EHEA_MAX_ENTRIES_EQ 20 |
| 71 | |
| 72 | #define EHEA_SG_SQ 2 |
| 73 | #define EHEA_SG_RQ1 1 |
| 74 | #define EHEA_SG_RQ2 0 |
| 75 | #define EHEA_SG_RQ3 0 |
| 76 | |
| 77 | #define EHEA_MAX_PACKET_SIZE 9022 /* for jumbo frames */ |
| 78 | #define EHEA_RQ2_PKT_SIZE 1522 |
| 79 | #define EHEA_L_PKT_SIZE 256 /* low latency */ |
| 80 | |
| 81 | #define EHEA_POLL_MAX_RWQE 1000 |
| 82 | |
| 83 | /* Send completion signaling */ |
| 84 | #define EHEA_SIG_IV_LONG 1 |
| 85 | |
| 86 | /* Protection Domain Identifier */ |
| 87 | #define EHEA_PD_ID 0xaabcdeff |
| 88 | |
| 89 | #define EHEA_RQ2_THRESHOLD 1 |
| 90 | #define EHEA_RQ3_THRESHOLD 9 /* use RQ3 threshold of 1522 bytes */ |
| 91 | |
| 92 | #define EHEA_SPEED_10G 10000 |
| 93 | #define EHEA_SPEED_1G 1000 |
| 94 | #define EHEA_SPEED_100M 100 |
| 95 | #define EHEA_SPEED_10M 10 |
| 96 | #define EHEA_SPEED_AUTONEG 0 |
| 97 | |
| 98 | /* Broadcast/Multicast registration types */ |
| 99 | #define EHEA_BCMC_SCOPE_ALL 0x08 |
| 100 | #define EHEA_BCMC_SCOPE_SINGLE 0x00 |
| 101 | #define EHEA_BCMC_MULTICAST 0x04 |
| 102 | #define EHEA_BCMC_BROADCAST 0x00 |
| 103 | #define EHEA_BCMC_UNTAGGED 0x02 |
| 104 | #define EHEA_BCMC_TAGGED 0x00 |
| 105 | #define EHEA_BCMC_VLANID_ALL 0x01 |
| 106 | #define EHEA_BCMC_VLANID_SINGLE 0x00 |
| 107 | |
| 108 | /* Use this define to kmallocate pHYP control blocks */ |
| 109 | #define H_CB_ALIGNMENT 4096 |
| 110 | |
| 111 | #define EHEA_CACHE_LINE 128 |
| 112 | |
| 113 | /* Memory Regions */ |
| 114 | #define EHEA_MR_MAX_TX_PAGES 20 |
| 115 | #define EHEA_MR_TX_DATA_PN 3 |
| 116 | #define EHEA_MR_ACC_CTRL 0x00800000 |
| 117 | #define EHEA_RWQES_PER_MR_RQ2 10 |
| 118 | #define EHEA_RWQES_PER_MR_RQ3 10 |
| 119 | |
| 120 | #define EHEA_WATCH_DOG_TIMEOUT 10*HZ |
| 121 | |
| 122 | /* utility functions */ |
| 123 | |
| 124 | #define ehea_info(fmt, args...) \ |
| 125 | printk(KERN_INFO DRV_NAME ": " fmt "\n", ## args) |
| 126 | |
| 127 | #define ehea_error(fmt, args...) \ |
| 128 | printk(KERN_ERR DRV_NAME ": Error in %s: " fmt "\n", __func__, ## args) |
| 129 | |
| 130 | #ifdef DEBUG |
| 131 | #define ehea_debug(fmt, args...) \ |
| 132 | printk(KERN_DEBUG DRV_NAME ": " fmt, ## args) |
| 133 | #else |
| 134 | #define ehea_debug(fmt, args...) do {} while (0) |
| 135 | #endif |
| 136 | |
| 137 | void ehea_dump(void *adr, int len, char *msg); |
| 138 | |
| 139 | #define EHEA_BMASK(pos, length) (((pos) << 16) + (length)) |
| 140 | |
| 141 | #define EHEA_BMASK_IBM(from, to) (((63 - to) << 16) + ((to) - (from) + 1)) |
| 142 | |
| 143 | #define EHEA_BMASK_SHIFTPOS(mask) (((mask) >> 16) & 0xffff) |
| 144 | |
| 145 | #define EHEA_BMASK_MASK(mask) \ |
| 146 | (0xffffffffffffffffULL >> ((64 - (mask)) & 0xffff)) |
| 147 | |
| 148 | #define EHEA_BMASK_SET(mask, value) \ |
| 149 | ((EHEA_BMASK_MASK(mask) & ((u64)(value))) << EHEA_BMASK_SHIFTPOS(mask)) |
| 150 | |
| 151 | #define EHEA_BMASK_GET(mask, value) \ |
| 152 | (EHEA_BMASK_MASK(mask) & (((u64)(value)) >> EHEA_BMASK_SHIFTPOS(mask))) |
| 153 | |
| 154 | /* |
| 155 | * Generic ehea page |
| 156 | */ |
| 157 | struct ehea_page { |
| 158 | u8 entries[PAGE_SIZE]; |
| 159 | }; |
| 160 | |
| 161 | /* |
| 162 | * Generic queue in linux kernel virtual memory |
| 163 | */ |
| 164 | struct hw_queue { |
| 165 | u64 current_q_offset; /* current queue entry */ |
| 166 | struct ehea_page **queue_pages; /* array of pages belonging to queue */ |
| 167 | u32 qe_size; /* queue entry size */ |
| 168 | u32 queue_length; /* queue length allocated in bytes */ |
| 169 | u32 pagesize; |
| 170 | u32 toggle_state; /* toggle flag - per page */ |
| 171 | u32 reserved; /* 64 bit alignment */ |
| 172 | }; |
| 173 | |
| 174 | /* |
| 175 | * For pSeries this is a 64bit memory address where |
| 176 | * I/O memory is mapped into CPU address space |
| 177 | */ |
| 178 | struct h_epa { |
| 179 | void __iomem *addr; |
| 180 | }; |
| 181 | |
| 182 | struct h_epa_user { |
| 183 | u64 addr; |
| 184 | }; |
| 185 | |
| 186 | struct h_epas { |
| 187 | struct h_epa kernel; /* kernel space accessible resource, |
| 188 | set to 0 if unused */ |
| 189 | struct h_epa_user user; /* user space accessible resource |
| 190 | set to 0 if unused */ |
| 191 | }; |
| 192 | |
| 193 | struct ehea_qp; |
| 194 | struct ehea_cq; |
| 195 | struct ehea_eq; |
| 196 | struct ehea_port; |
| 197 | struct ehea_av; |
| 198 | |
| 199 | /* |
| 200 | * Queue attributes passed to ehea_create_qp() |
| 201 | */ |
| 202 | struct ehea_qp_init_attr { |
| 203 | /* input parameter */ |
| 204 | u32 qp_token; /* queue token */ |
| 205 | u8 low_lat_rq1; |
| 206 | u8 signalingtype; /* cqe generation flag */ |
| 207 | u8 rq_count; /* num of receive queues */ |
| 208 | u8 eqe_gen; /* eqe generation flag */ |
| 209 | u16 max_nr_send_wqes; /* max number of send wqes */ |
| 210 | u16 max_nr_rwqes_rq1; /* max number of receive wqes */ |
| 211 | u16 max_nr_rwqes_rq2; |
| 212 | u16 max_nr_rwqes_rq3; |
| 213 | u8 wqe_size_enc_sq; |
| 214 | u8 wqe_size_enc_rq1; |
| 215 | u8 wqe_size_enc_rq2; |
| 216 | u8 wqe_size_enc_rq3; |
| 217 | u8 swqe_imm_data_len; /* immediate data length for swqes */ |
| 218 | u16 port_nr; |
| 219 | u16 rq2_threshold; |
| 220 | u16 rq3_threshold; |
| 221 | u64 send_cq_handle; |
| 222 | u64 recv_cq_handle; |
| 223 | u64 aff_eq_handle; |
| 224 | |
| 225 | /* output parameter */ |
| 226 | u32 qp_nr; |
| 227 | u16 act_nr_send_wqes; |
| 228 | u16 act_nr_rwqes_rq1; |
| 229 | u16 act_nr_rwqes_rq2; |
| 230 | u16 act_nr_rwqes_rq3; |
| 231 | u8 act_wqe_size_enc_sq; |
| 232 | u8 act_wqe_size_enc_rq1; |
| 233 | u8 act_wqe_size_enc_rq2; |
| 234 | u8 act_wqe_size_enc_rq3; |
| 235 | u32 nr_sq_pages; |
| 236 | u32 nr_rq1_pages; |
| 237 | u32 nr_rq2_pages; |
| 238 | u32 nr_rq3_pages; |
| 239 | u32 liobn_sq; |
| 240 | u32 liobn_rq1; |
| 241 | u32 liobn_rq2; |
| 242 | u32 liobn_rq3; |
| 243 | }; |
| 244 | |
| 245 | /* |
| 246 | * Event Queue attributes, passed as paramter |
| 247 | */ |
| 248 | struct ehea_eq_attr { |
| 249 | u32 type; |
| 250 | u32 max_nr_of_eqes; |
| 251 | u8 eqe_gen; /* generate eqe flag */ |
| 252 | u64 eq_handle; |
| 253 | u32 act_nr_of_eqes; |
| 254 | u32 nr_pages; |
| 255 | u32 ist1; /* Interrupt service token */ |
| 256 | u32 ist2; |
| 257 | u32 ist3; |
| 258 | u32 ist4; |
| 259 | }; |
| 260 | |
| 261 | |
| 262 | /* |
| 263 | * Event Queue |
| 264 | */ |
| 265 | struct ehea_eq { |
| 266 | struct ehea_adapter *adapter; |
| 267 | struct hw_queue hw_queue; |
| 268 | u64 fw_handle; |
| 269 | struct h_epas epas; |
| 270 | spinlock_t spinlock; |
| 271 | struct ehea_eq_attr attr; |
| 272 | }; |
| 273 | |
| 274 | /* |
| 275 | * HEA Queues |
| 276 | */ |
| 277 | struct ehea_qp { |
| 278 | struct ehea_adapter *adapter; |
| 279 | u64 fw_handle; /* QP handle for firmware calls */ |
| 280 | struct hw_queue hw_squeue; |
| 281 | struct hw_queue hw_rqueue1; |
| 282 | struct hw_queue hw_rqueue2; |
| 283 | struct hw_queue hw_rqueue3; |
| 284 | struct h_epas epas; |
| 285 | struct ehea_qp_init_attr init_attr; |
| 286 | }; |
| 287 | |
| 288 | /* |
| 289 | * Completion Queue attributes |
| 290 | */ |
| 291 | struct ehea_cq_attr { |
| 292 | /* input parameter */ |
| 293 | u32 max_nr_of_cqes; |
| 294 | u32 cq_token; |
| 295 | u64 eq_handle; |
| 296 | |
| 297 | /* output parameter */ |
| 298 | u32 act_nr_of_cqes; |
| 299 | u32 nr_pages; |
| 300 | }; |
| 301 | |
| 302 | /* |
| 303 | * Completion Queue |
| 304 | */ |
| 305 | struct ehea_cq { |
| 306 | struct ehea_adapter *adapter; |
| 307 | u64 fw_handle; |
| 308 | struct hw_queue hw_queue; |
| 309 | struct h_epas epas; |
| 310 | struct ehea_cq_attr attr; |
| 311 | }; |
| 312 | |
| 313 | /* |
| 314 | * Memory Region |
| 315 | */ |
| 316 | struct ehea_mr { |
| 317 | u64 handle; |
| 318 | u64 vaddr; |
| 319 | u32 lkey; |
| 320 | }; |
| 321 | |
| 322 | /* |
| 323 | * Port state information |
| 324 | */ |
| 325 | struct port_state { |
| 326 | int poll_max_processed; |
| 327 | int poll_receive_errors; |
| 328 | int ehea_poll; |
| 329 | int queue_stopped; |
| 330 | int min_swqe_avail; |
| 331 | u64 sqc_stop_sum; |
| 332 | int pkt_send; |
| 333 | int pkt_xmit; |
| 334 | int send_tasklet; |
| 335 | int nwqe; |
| 336 | }; |
| 337 | |
| 338 | #define EHEA_IRQ_NAME_SIZE 20 |
| 339 | |
| 340 | /* |
| 341 | * Queue SKB Array |
| 342 | */ |
| 343 | struct ehea_q_skb_arr { |
| 344 | struct sk_buff **arr; /* skb array for queue */ |
| 345 | int len; /* array length */ |
| 346 | int index; /* array index */ |
| 347 | int os_skbs; /* rq2/rq3 only: outstanding skbs */ |
| 348 | }; |
| 349 | |
| 350 | /* |
| 351 | * Port resources |
| 352 | */ |
| 353 | struct ehea_port_res { |
| 354 | struct ehea_mr send_mr; /* send memory region */ |
| 355 | struct ehea_mr recv_mr; /* receive memory region */ |
| 356 | spinlock_t xmit_lock; |
| 357 | struct ehea_port *port; |
| 358 | char int_recv_name[EHEA_IRQ_NAME_SIZE]; |
| 359 | char int_send_name[EHEA_IRQ_NAME_SIZE]; |
| 360 | struct ehea_qp *qp; |
| 361 | struct ehea_cq *send_cq; |
| 362 | struct ehea_cq *recv_cq; |
| 363 | struct ehea_eq *send_eq; |
| 364 | struct ehea_eq *recv_eq; |
| 365 | spinlock_t send_lock; |
| 366 | struct ehea_q_skb_arr rq1_skba; |
| 367 | struct ehea_q_skb_arr rq2_skba; |
| 368 | struct ehea_q_skb_arr rq3_skba; |
| 369 | struct ehea_q_skb_arr sq_skba; |
| 370 | spinlock_t netif_queue; |
| 371 | int queue_stopped; |
| 372 | int swqe_refill_th; |
| 373 | atomic_t swqe_avail; |
| 374 | int swqe_ll_count; |
| 375 | int swqe_count; |
| 376 | u32 swqe_id_counter; |
| 377 | u64 tx_packets; |
| 378 | struct tasklet_struct send_comp_task; |
| 379 | spinlock_t recv_lock; |
| 380 | struct port_state p_state; |
| 381 | u64 rx_packets; |
| 382 | u32 poll_counter; |
| 383 | }; |
| 384 | |
| 385 | |
| 386 | struct ehea_adapter { |
| 387 | u64 handle; |
| 388 | u8 num_ports; |
| 389 | struct ehea_port *port[16]; |
| 390 | struct ehea_eq *neq; /* notification event queue */ |
| 391 | struct workqueue_struct *ehea_wq; |
| 392 | struct tasklet_struct neq_tasklet; |
| 393 | struct ehea_mr mr; |
| 394 | u32 pd; /* protection domain */ |
| 395 | u64 max_mc_mac; /* max number of multicast mac addresses */ |
| 396 | }; |
| 397 | |
| 398 | |
| 399 | struct ehea_mc_list { |
| 400 | struct list_head list; |
| 401 | u64 macaddr; |
| 402 | }; |
| 403 | |
| 404 | #define EHEA_PORT_UP 1 |
| 405 | #define EHEA_PORT_DOWN 0 |
| 406 | #define EHEA_MAX_PORT_RES 16 |
| 407 | struct ehea_port { |
| 408 | struct ehea_adapter *adapter; /* adapter that owns this port */ |
| 409 | struct net_device *netdev; |
| 410 | struct net_device_stats stats; |
| 411 | struct ehea_port_res port_res[EHEA_MAX_PORT_RES]; |
| 412 | struct device_node *of_dev_node; /* Open Firmware Device Node */ |
| 413 | struct ehea_mc_list *mc_list; /* Multicast MAC addresses */ |
| 414 | struct vlan_group *vgrp; |
| 415 | struct ehea_eq *qp_eq; |
| 416 | struct work_struct reset_task; |
| 417 | struct semaphore port_lock; |
| 418 | char int_aff_name[EHEA_IRQ_NAME_SIZE]; |
| 419 | int allmulti; /* Indicates IFF_ALLMULTI state */ |
| 420 | int promisc; /* Indicates IFF_PROMISC state */ |
| 421 | int num_add_tx_qps; |
| 422 | int resets; |
| 423 | u64 mac_addr; |
| 424 | u32 logical_port_id; |
| 425 | u32 port_speed; |
| 426 | u32 msg_enable; |
| 427 | u32 sig_comp_iv; |
| 428 | u32 state; |
| 429 | u8 full_duplex; |
| 430 | u8 autoneg; |
| 431 | u8 num_def_qps; |
| 432 | }; |
| 433 | |
| 434 | struct port_res_cfg { |
| 435 | int max_entries_rcq; |
| 436 | int max_entries_scq; |
| 437 | int max_entries_sq; |
| 438 | int max_entries_rq1; |
| 439 | int max_entries_rq2; |
| 440 | int max_entries_rq3; |
| 441 | }; |
| 442 | |
| 443 | |
| 444 | void ehea_set_ethtool_ops(struct net_device *netdev); |
| 445 | int ehea_sense_port_attr(struct ehea_port *port); |
| 446 | int ehea_set_portspeed(struct ehea_port *port, u32 port_speed); |
| 447 | |
| 448 | #endif /* __EHEA_H__ */ |