Heiko J Schick | fab9722 | 2006-09-22 15:22:22 -0700 | [diff] [blame] | 1 | /* |
| 2 | * IBM eServer eHCA Infiniband device driver for Linux on POWER |
| 3 | * |
| 4 | * internal queue handling |
| 5 | * |
| 6 | * Authors: Waleri Fomin <fomin@de.ibm.com> |
| 7 | * Reinhard Ernst <rernst@de.ibm.com> |
| 8 | * Christoph Raisch <raisch@de.ibm.com> |
| 9 | * |
| 10 | * Copyright (c) 2005 IBM Corporation |
| 11 | * |
| 12 | * All rights reserved. |
| 13 | * |
| 14 | * This source code is distributed under a dual license of GPL v2.0 and OpenIB |
| 15 | * BSD. |
| 16 | * |
| 17 | * OpenIB BSD License |
| 18 | * |
| 19 | * Redistribution and use in source and binary forms, with or without |
| 20 | * modification, are permitted provided that the following conditions are met: |
| 21 | * |
| 22 | * Redistributions of source code must retain the above copyright notice, this |
| 23 | * list of conditions and the following disclaimer. |
| 24 | * |
| 25 | * Redistributions in binary form must reproduce the above copyright notice, |
| 26 | * this list of conditions and the following disclaimer in the documentation |
| 27 | * and/or other materials |
| 28 | * provided with the distribution. |
| 29 | * |
| 30 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
| 31 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| 32 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| 33 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE |
| 34 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
| 35 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
| 36 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR |
| 37 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER |
| 38 | * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
| 39 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
| 40 | * POSSIBILITY OF SUCH DAMAGE. |
| 41 | */ |
| 42 | |
| 43 | #ifndef __IPZ_PT_FN_H__ |
| 44 | #define __IPZ_PT_FN_H__ |
| 45 | |
| 46 | #define EHCA_PAGESHIFT 12 |
| 47 | #define EHCA_PAGESIZE 4096UL |
| 48 | #define EHCA_PAGEMASK (~(EHCA_PAGESIZE-1)) |
| 49 | #define EHCA_PT_ENTRIES 512UL |
| 50 | |
| 51 | #include "ehca_tools.h" |
| 52 | #include "ehca_qes.h" |
| 53 | |
| 54 | /* struct generic ehca page */ |
| 55 | struct ipz_page { |
| 56 | u8 entries[EHCA_PAGESIZE]; |
| 57 | }; |
| 58 | |
| 59 | /* struct generic queue in linux kernel virtual memory (kv) */ |
| 60 | struct ipz_queue { |
| 61 | u64 current_q_offset; /* current queue entry */ |
| 62 | |
| 63 | struct ipz_page **queue_pages; /* array of pages belonging to queue */ |
| 64 | u32 qe_size; /* queue entry size */ |
| 65 | u32 act_nr_of_sg; |
| 66 | u32 queue_length; /* queue length allocated in bytes */ |
| 67 | u32 pagesize; |
| 68 | u32 toggle_state; /* toggle flag - per page */ |
| 69 | u32 dummy3; /* 64 bit alignment */ |
| 70 | }; |
| 71 | |
| 72 | /* |
| 73 | * return current Queue Entry for a certain q_offset |
| 74 | * returns address (kv) of Queue Entry |
| 75 | */ |
| 76 | static inline void *ipz_qeit_calc(struct ipz_queue *queue, u64 q_offset) |
| 77 | { |
| 78 | struct ipz_page *current_page; |
| 79 | if (q_offset >= queue->queue_length) |
| 80 | return NULL; |
| 81 | current_page = (queue->queue_pages)[q_offset >> EHCA_PAGESHIFT]; |
Hoang-Nam Nguyen | 78d8d5f | 2007-02-15 17:06:33 +0100 | [diff] [blame] | 82 | return ¤t_page->entries[q_offset & (EHCA_PAGESIZE - 1)]; |
Heiko J Schick | fab9722 | 2006-09-22 15:22:22 -0700 | [diff] [blame] | 83 | } |
| 84 | |
| 85 | /* |
| 86 | * return current Queue Entry |
| 87 | * returns address (kv) of Queue Entry |
| 88 | */ |
| 89 | static inline void *ipz_qeit_get(struct ipz_queue *queue) |
| 90 | { |
| 91 | return ipz_qeit_calc(queue, queue->current_q_offset); |
| 92 | } |
| 93 | |
| 94 | /* |
| 95 | * return current Queue Page , increment Queue Page iterator from |
| 96 | * page to page in struct ipz_queue, last increment will return 0! and |
| 97 | * NOT wrap |
| 98 | * returns address (kv) of Queue Page |
| 99 | * warning don't use in parallel with ipz_QE_get_inc() |
| 100 | */ |
| 101 | void *ipz_qpageit_get_inc(struct ipz_queue *queue); |
| 102 | |
| 103 | /* |
| 104 | * return current Queue Entry, increment Queue Entry iterator by one |
| 105 | * step in struct ipz_queue, will wrap in ringbuffer |
| 106 | * returns address (kv) of Queue Entry BEFORE increment |
| 107 | * warning don't use in parallel with ipz_qpageit_get_inc() |
| 108 | * warning unpredictable results may occur if steps>act_nr_of_queue_entries |
| 109 | */ |
| 110 | static inline void *ipz_qeit_get_inc(struct ipz_queue *queue) |
| 111 | { |
| 112 | void *ret = ipz_qeit_get(queue); |
| 113 | queue->current_q_offset += queue->qe_size; |
| 114 | if (queue->current_q_offset >= queue->queue_length) { |
| 115 | queue->current_q_offset = 0; |
| 116 | /* toggle the valid flag */ |
| 117 | queue->toggle_state = (~queue->toggle_state) & 1; |
| 118 | } |
| 119 | |
| 120 | return ret; |
| 121 | } |
| 122 | |
| 123 | /* |
| 124 | * return current Queue Entry, increment Queue Entry iterator by one |
| 125 | * step in struct ipz_queue, will wrap in ringbuffer |
| 126 | * returns address (kv) of Queue Entry BEFORE increment |
| 127 | * returns 0 and does not increment, if wrong valid state |
| 128 | * warning don't use in parallel with ipz_qpageit_get_inc() |
| 129 | * warning unpredictable results may occur if steps>act_nr_of_queue_entries |
| 130 | */ |
| 131 | static inline void *ipz_qeit_get_inc_valid(struct ipz_queue *queue) |
| 132 | { |
| 133 | struct ehca_cqe *cqe = ipz_qeit_get(queue); |
| 134 | u32 cqe_flags = cqe->cqe_flags; |
| 135 | |
| 136 | if ((cqe_flags >> 7) != (queue->toggle_state & 1)) |
| 137 | return NULL; |
| 138 | |
| 139 | ipz_qeit_get_inc(queue); |
| 140 | return cqe; |
| 141 | } |
| 142 | |
Roland Dreier | ed23a72 | 2007-05-06 21:02:48 -0700 | [diff] [blame^] | 143 | static inline int ipz_qeit_is_valid(struct ipz_queue *queue) |
| 144 | { |
| 145 | struct ehca_cqe *cqe = ipz_qeit_get(queue); |
| 146 | u32 cqe_flags = cqe->cqe_flags; |
| 147 | |
| 148 | return cqe_flags >> 7 == (queue->toggle_state & 1); |
| 149 | } |
| 150 | |
Heiko J Schick | fab9722 | 2006-09-22 15:22:22 -0700 | [diff] [blame] | 151 | /* |
| 152 | * returns and resets Queue Entry iterator |
| 153 | * returns address (kv) of first Queue Entry |
| 154 | */ |
| 155 | static inline void *ipz_qeit_reset(struct ipz_queue *queue) |
| 156 | { |
| 157 | queue->current_q_offset = 0; |
| 158 | return ipz_qeit_get(queue); |
| 159 | } |
| 160 | |
Hoang-Nam Nguyen | 2771e9e | 2006-11-20 23:54:12 +0100 | [diff] [blame] | 161 | /* |
| 162 | * return the q_offset corresponding to an absolute address |
| 163 | */ |
| 164 | int ipz_queue_abs_to_offset(struct ipz_queue *queue, u64 addr, u64 *q_offset); |
| 165 | |
| 166 | /* |
| 167 | * return the next queue offset. don't modify the queue. |
| 168 | */ |
| 169 | static inline u64 ipz_queue_advance_offset(struct ipz_queue *queue, u64 offset) |
| 170 | { |
| 171 | offset += queue->qe_size; |
| 172 | if (offset >= queue->queue_length) offset = 0; |
| 173 | return offset; |
| 174 | } |
| 175 | |
Heiko J Schick | fab9722 | 2006-09-22 15:22:22 -0700 | [diff] [blame] | 176 | /* struct generic page table */ |
| 177 | struct ipz_pt { |
| 178 | u64 entries[EHCA_PT_ENTRIES]; |
| 179 | }; |
| 180 | |
| 181 | /* struct page table for a queue, only to be used in pf */ |
| 182 | struct ipz_qpt { |
| 183 | /* queue page tables (kv), use u64 because we know the element length */ |
| 184 | u64 *qpts; |
| 185 | u32 n_qpts; |
| 186 | u32 n_ptes; /* number of page table entries */ |
| 187 | u64 *current_pte_addr; |
| 188 | }; |
| 189 | |
| 190 | /* |
| 191 | * constructor for a ipz_queue_t, placement new for ipz_queue_t, |
| 192 | * new for all dependent datastructors |
| 193 | * all QP Tables are the same |
| 194 | * flow: |
| 195 | * allocate+pin queue |
| 196 | * see ipz_qpt_ctor() |
| 197 | * returns true if ok, false if out of memory |
| 198 | */ |
| 199 | int ipz_queue_ctor(struct ipz_queue *queue, const u32 nr_of_pages, |
| 200 | const u32 pagesize, const u32 qe_size, |
| 201 | const u32 nr_of_sg); |
| 202 | |
| 203 | /* |
| 204 | * destructor for a ipz_queue_t |
| 205 | * -# free queue |
| 206 | * see ipz_queue_ctor() |
| 207 | * returns true if ok, false if queue was NULL-ptr of free failed |
| 208 | */ |
| 209 | int ipz_queue_dtor(struct ipz_queue *queue); |
| 210 | |
| 211 | /* |
| 212 | * constructor for a ipz_qpt_t, |
| 213 | * placement new for struct ipz_queue, new for all dependent datastructors |
| 214 | * all QP Tables are the same, |
| 215 | * flow: |
| 216 | * -# allocate+pin queue |
| 217 | * -# initialise ptcb |
| 218 | * -# allocate+pin PTs |
| 219 | * -# link PTs to a ring, according to HCA Arch, set bit62 id needed |
| 220 | * -# the ring must have room for exactly nr_of_PTEs |
| 221 | * see ipz_qpt_ctor() |
| 222 | */ |
| 223 | void ipz_qpt_ctor(struct ipz_qpt *qpt, |
| 224 | const u32 nr_of_qes, |
| 225 | const u32 pagesize, |
| 226 | const u32 qe_size, |
| 227 | const u8 lowbyte, const u8 toggle, |
| 228 | u32 * act_nr_of_QEs, u32 * act_nr_of_pages); |
| 229 | |
| 230 | /* |
| 231 | * return current Queue Entry, increment Queue Entry iterator by one |
| 232 | * step in struct ipz_queue, will wrap in ringbuffer |
| 233 | * returns address (kv) of Queue Entry BEFORE increment |
| 234 | * warning don't use in parallel with ipz_qpageit_get_inc() |
| 235 | * warning unpredictable results may occur if steps>act_nr_of_queue_entries |
| 236 | * fix EQ page problems |
| 237 | */ |
| 238 | void *ipz_qeit_eq_get_inc(struct ipz_queue *queue); |
| 239 | |
| 240 | /* |
| 241 | * return current Event Queue Entry, increment Queue Entry iterator |
| 242 | * by one step in struct ipz_queue if valid, will wrap in ringbuffer |
| 243 | * returns address (kv) of Queue Entry BEFORE increment |
| 244 | * returns 0 and does not increment, if wrong valid state |
| 245 | * warning don't use in parallel with ipz_queue_QPageit_get_inc() |
| 246 | * warning unpredictable results may occur if steps>act_nr_of_queue_entries |
| 247 | */ |
| 248 | static inline void *ipz_eqit_eq_get_inc_valid(struct ipz_queue *queue) |
| 249 | { |
| 250 | void *ret = ipz_qeit_get(queue); |
| 251 | u32 qe = *(u8 *) ret; |
| 252 | if ((qe >> 7) != (queue->toggle_state & 1)) |
| 253 | return NULL; |
| 254 | ipz_qeit_eq_get_inc(queue); /* this is a good one */ |
| 255 | return ret; |
| 256 | } |
| 257 | |
Hoang-Nam Nguyen | 78d8d5f | 2007-02-15 17:06:33 +0100 | [diff] [blame] | 258 | static inline void *ipz_eqit_eq_peek_valid(struct ipz_queue *queue) |
| 259 | { |
| 260 | void *ret = ipz_qeit_get(queue); |
| 261 | u32 qe = *(u8 *) ret; |
| 262 | if ((qe >> 7) != (queue->toggle_state & 1)) |
| 263 | return NULL; |
| 264 | return ret; |
| 265 | } |
| 266 | |
Heiko J Schick | fab9722 | 2006-09-22 15:22:22 -0700 | [diff] [blame] | 267 | /* returns address (GX) of first queue entry */ |
| 268 | static inline u64 ipz_qpt_get_firstpage(struct ipz_qpt *qpt) |
| 269 | { |
| 270 | return be64_to_cpu(qpt->qpts[0]); |
| 271 | } |
| 272 | |
| 273 | /* returns address (kv) of first page of queue page table */ |
| 274 | static inline void *ipz_qpt_get_qpt(struct ipz_qpt *qpt) |
| 275 | { |
| 276 | return qpt->qpts; |
| 277 | } |
| 278 | |
| 279 | #endif /* __IPZ_PT_FN_H__ */ |