Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 1 | /* |
| 2 | * (C) 2001 Clemson University and The University of Chicago |
| 3 | * |
| 4 | * See COPYING in top-level directory. |
| 5 | */ |
| 6 | #include "protocol.h" |
Mike Marshall | 575e946 | 2015-12-04 12:56:14 -0500 | [diff] [blame] | 7 | #include "orangefs-kernel.h" |
| 8 | #include "orangefs-bufmap.h" |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 9 | |
Al Viro | ea2c9c9 | 2016-02-13 21:01:21 -0500 | [diff] [blame] | 10 | struct slot_map { |
| 11 | int c; |
| 12 | wait_queue_head_t q; |
| 13 | int count; |
| 14 | unsigned long *map; |
| 15 | }; |
| 16 | |
| 17 | static struct slot_map rw_map = { |
| 18 | .c = -1, |
| 19 | .q = __WAIT_QUEUE_HEAD_INITIALIZER(rw_map.q) |
| 20 | }; |
| 21 | static struct slot_map readdir_map = { |
| 22 | .c = -1, |
| 23 | .q = __WAIT_QUEUE_HEAD_INITIALIZER(readdir_map.q) |
| 24 | }; |
| 25 | |
| 26 | |
| 27 | static void install(struct slot_map *m, int count, unsigned long *map) |
| 28 | { |
| 29 | spin_lock(&m->q.lock); |
| 30 | m->c = m->count = count; |
| 31 | m->map = map; |
| 32 | wake_up_all_locked(&m->q); |
| 33 | spin_unlock(&m->q.lock); |
| 34 | } |
| 35 | |
| 36 | static void mark_killed(struct slot_map *m) |
| 37 | { |
| 38 | spin_lock(&m->q.lock); |
| 39 | m->c -= m->count + 1; |
| 40 | spin_unlock(&m->q.lock); |
| 41 | } |
| 42 | |
| 43 | static void run_down(struct slot_map *m) |
| 44 | { |
| 45 | DEFINE_WAIT(wait); |
| 46 | spin_lock(&m->q.lock); |
| 47 | if (m->c != -1) { |
| 48 | for (;;) { |
| 49 | if (likely(list_empty(&wait.task_list))) |
| 50 | __add_wait_queue_tail(&m->q, &wait); |
| 51 | set_current_state(TASK_UNINTERRUPTIBLE); |
| 52 | |
| 53 | if (m->c == -1) |
| 54 | break; |
| 55 | |
| 56 | spin_unlock(&m->q.lock); |
| 57 | schedule(); |
| 58 | spin_lock(&m->q.lock); |
| 59 | } |
| 60 | __remove_wait_queue(&m->q, &wait); |
| 61 | __set_current_state(TASK_RUNNING); |
| 62 | } |
| 63 | m->map = NULL; |
| 64 | spin_unlock(&m->q.lock); |
| 65 | } |
| 66 | |
| 67 | static void put(struct slot_map *m, int slot) |
| 68 | { |
| 69 | int v; |
| 70 | spin_lock(&m->q.lock); |
| 71 | __clear_bit(slot, m->map); |
| 72 | v = ++m->c; |
| 73 | if (unlikely(v == 1)) /* no free slots -> one free slot */ |
| 74 | wake_up_locked(&m->q); |
| 75 | else if (unlikely(v == -1)) /* finished dying */ |
| 76 | wake_up_all_locked(&m->q); |
| 77 | spin_unlock(&m->q.lock); |
| 78 | } |
| 79 | |
| 80 | static int wait_for_free(struct slot_map *m) |
| 81 | { |
| 82 | long left = slot_timeout_secs * HZ; |
| 83 | DEFINE_WAIT(wait); |
| 84 | |
| 85 | do { |
| 86 | long n = left, t; |
| 87 | if (likely(list_empty(&wait.task_list))) |
| 88 | __add_wait_queue_tail_exclusive(&m->q, &wait); |
| 89 | set_current_state(TASK_INTERRUPTIBLE); |
| 90 | |
| 91 | if (m->c > 0) |
| 92 | break; |
| 93 | |
| 94 | if (m->c < 0) { |
| 95 | /* we are waiting for map to be installed */ |
| 96 | /* it would better be there soon, or we go away */ |
| 97 | if (n > ORANGEFS_BUFMAP_WAIT_TIMEOUT_SECS * HZ) |
| 98 | n = ORANGEFS_BUFMAP_WAIT_TIMEOUT_SECS * HZ; |
| 99 | } |
| 100 | spin_unlock(&m->q.lock); |
| 101 | t = schedule_timeout(n); |
| 102 | spin_lock(&m->q.lock); |
| 103 | if (unlikely(!t) && n != left && m->c < 0) |
| 104 | left = t; |
| 105 | else |
| 106 | left = t + (left - n); |
| 107 | if (unlikely(signal_pending(current))) |
| 108 | left = -EINTR; |
| 109 | } while (left > 0); |
| 110 | |
| 111 | if (!list_empty(&wait.task_list)) |
| 112 | list_del(&wait.task_list); |
| 113 | else if (left <= 0 && waitqueue_active(&m->q)) |
| 114 | __wake_up_locked_key(&m->q, TASK_INTERRUPTIBLE, NULL); |
| 115 | __set_current_state(TASK_RUNNING); |
| 116 | |
| 117 | if (likely(left > 0)) |
| 118 | return 0; |
| 119 | |
| 120 | return left < 0 ? -EINTR : -ETIMEDOUT; |
| 121 | } |
| 122 | |
| 123 | static int get(struct slot_map *m) |
| 124 | { |
| 125 | int res = 0; |
| 126 | spin_lock(&m->q.lock); |
| 127 | if (unlikely(m->c <= 0)) |
| 128 | res = wait_for_free(m); |
| 129 | if (likely(!res)) { |
| 130 | m->c--; |
| 131 | res = find_first_zero_bit(m->map, m->count); |
| 132 | __set_bit(res, m->map); |
| 133 | } |
| 134 | spin_unlock(&m->q.lock); |
| 135 | return res; |
| 136 | } |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 137 | |
Martin Brandenburg | bf89f58 | 2015-12-15 14:45:12 -0500 | [diff] [blame] | 138 | /* used to describe mapped buffers */ |
| 139 | struct orangefs_bufmap_desc { |
| 140 | void *uaddr; /* user space address pointer */ |
| 141 | struct page **page_array; /* array of mapped pages */ |
| 142 | int array_count; /* size of above arrays */ |
| 143 | struct list_head list_link; |
| 144 | }; |
| 145 | |
Yi Liu | 8bb8aef | 2015-11-24 15:12:14 -0500 | [diff] [blame] | 146 | static struct orangefs_bufmap { |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 147 | int desc_size; |
| 148 | int desc_shift; |
| 149 | int desc_count; |
| 150 | int total_size; |
| 151 | int page_count; |
| 152 | |
| 153 | struct page **page_array; |
Yi Liu | 8bb8aef | 2015-11-24 15:12:14 -0500 | [diff] [blame] | 154 | struct orangefs_bufmap_desc *desc_array; |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 155 | |
| 156 | /* array to track usage of buffer descriptors */ |
Al Viro | ea2c9c9 | 2016-02-13 21:01:21 -0500 | [diff] [blame] | 157 | unsigned long *buffer_index_array; |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 158 | |
| 159 | /* array to track usage of buffer descriptors for readdir */ |
Al Viro | ea2c9c9 | 2016-02-13 21:01:21 -0500 | [diff] [blame] | 160 | #define N DIV_ROUND_UP(ORANGEFS_READDIR_DEFAULT_DESC_COUNT, BITS_PER_LONG) |
| 161 | unsigned long readdir_index_array[N]; |
| 162 | #undef N |
Yi Liu | 8bb8aef | 2015-11-24 15:12:14 -0500 | [diff] [blame] | 163 | } *__orangefs_bufmap; |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 164 | |
Yi Liu | 8bb8aef | 2015-11-24 15:12:14 -0500 | [diff] [blame] | 165 | static DEFINE_SPINLOCK(orangefs_bufmap_lock); |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 166 | |
| 167 | static void |
Yi Liu | 8bb8aef | 2015-11-24 15:12:14 -0500 | [diff] [blame] | 168 | orangefs_bufmap_unmap(struct orangefs_bufmap *bufmap) |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 169 | { |
| 170 | int i; |
| 171 | |
| 172 | for (i = 0; i < bufmap->page_count; i++) |
| 173 | page_cache_release(bufmap->page_array[i]); |
| 174 | } |
| 175 | |
| 176 | static void |
Yi Liu | 8bb8aef | 2015-11-24 15:12:14 -0500 | [diff] [blame] | 177 | orangefs_bufmap_free(struct orangefs_bufmap *bufmap) |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 178 | { |
| 179 | kfree(bufmap->page_array); |
| 180 | kfree(bufmap->desc_array); |
| 181 | kfree(bufmap->buffer_index_array); |
| 182 | kfree(bufmap); |
| 183 | } |
| 184 | |
Martin Brandenburg | b09d10d | 2015-12-15 14:54:27 -0500 | [diff] [blame] | 185 | /* |
| 186 | * XXX: Can the size and shift change while the caller gives up the |
| 187 | * XXX: lock between calling this and doing something useful? |
| 188 | */ |
| 189 | |
Martin Brandenburg | 765a75b | 2015-12-15 14:48:17 -0500 | [diff] [blame] | 190 | int orangefs_bufmap_size_query(void) |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 191 | { |
Martin Brandenburg | b09d10d | 2015-12-15 14:54:27 -0500 | [diff] [blame] | 192 | struct orangefs_bufmap *bufmap; |
| 193 | int size = 0; |
Al Viro | 1780418 | 2016-02-13 11:16:37 -0500 | [diff] [blame] | 194 | spin_lock(&orangefs_bufmap_lock); |
| 195 | bufmap = __orangefs_bufmap; |
| 196 | if (bufmap) |
Martin Brandenburg | b09d10d | 2015-12-15 14:54:27 -0500 | [diff] [blame] | 197 | size = bufmap->desc_size; |
Al Viro | 1780418 | 2016-02-13 11:16:37 -0500 | [diff] [blame] | 198 | spin_unlock(&orangefs_bufmap_lock); |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 199 | return size; |
| 200 | } |
| 201 | |
Martin Brandenburg | 765a75b | 2015-12-15 14:48:17 -0500 | [diff] [blame] | 202 | int orangefs_bufmap_shift_query(void) |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 203 | { |
Martin Brandenburg | b09d10d | 2015-12-15 14:54:27 -0500 | [diff] [blame] | 204 | struct orangefs_bufmap *bufmap; |
| 205 | int shift = 0; |
Al Viro | 1780418 | 2016-02-13 11:16:37 -0500 | [diff] [blame] | 206 | spin_lock(&orangefs_bufmap_lock); |
| 207 | bufmap = __orangefs_bufmap; |
| 208 | if (bufmap) |
Martin Brandenburg | b09d10d | 2015-12-15 14:54:27 -0500 | [diff] [blame] | 209 | shift = bufmap->desc_shift; |
Al Viro | 1780418 | 2016-02-13 11:16:37 -0500 | [diff] [blame] | 210 | spin_unlock(&orangefs_bufmap_lock); |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 211 | return shift; |
| 212 | } |
| 213 | |
| 214 | static DECLARE_WAIT_QUEUE_HEAD(bufmap_waitq); |
| 215 | static DECLARE_WAIT_QUEUE_HEAD(readdir_waitq); |
| 216 | |
| 217 | /* |
Martin Brandenburg | 7d22148 | 2016-01-04 15:05:28 -0500 | [diff] [blame] | 218 | * orangefs_get_bufmap_init |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 219 | * |
| 220 | * If bufmap_init is 1, then the shared memory system, including the |
| 221 | * buffer_index_array, is available. Otherwise, it is not. |
| 222 | * |
| 223 | * returns the value of bufmap_init |
| 224 | */ |
Martin Brandenburg | 7d22148 | 2016-01-04 15:05:28 -0500 | [diff] [blame] | 225 | int orangefs_get_bufmap_init(void) |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 226 | { |
Yi Liu | 8bb8aef | 2015-11-24 15:12:14 -0500 | [diff] [blame] | 227 | return __orangefs_bufmap ? 1 : 0; |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 228 | } |
| 229 | |
| 230 | |
Yi Liu | 8bb8aef | 2015-11-24 15:12:14 -0500 | [diff] [blame] | 231 | static struct orangefs_bufmap * |
| 232 | orangefs_bufmap_alloc(struct ORANGEFS_dev_map_desc *user_desc) |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 233 | { |
Yi Liu | 8bb8aef | 2015-11-24 15:12:14 -0500 | [diff] [blame] | 234 | struct orangefs_bufmap *bufmap; |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 235 | |
| 236 | bufmap = kzalloc(sizeof(*bufmap), GFP_KERNEL); |
| 237 | if (!bufmap) |
| 238 | goto out; |
| 239 | |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 240 | bufmap->total_size = user_desc->total_size; |
| 241 | bufmap->desc_count = user_desc->count; |
| 242 | bufmap->desc_size = user_desc->size; |
| 243 | bufmap->desc_shift = ilog2(bufmap->desc_size); |
| 244 | |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 245 | bufmap->buffer_index_array = |
Al Viro | ea2c9c9 | 2016-02-13 21:01:21 -0500 | [diff] [blame] | 246 | kzalloc(DIV_ROUND_UP(bufmap->desc_count, BITS_PER_LONG), GFP_KERNEL); |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 247 | if (!bufmap->buffer_index_array) { |
Yi Liu | 8bb8aef | 2015-11-24 15:12:14 -0500 | [diff] [blame] | 248 | gossip_err("orangefs: could not allocate %d buffer indices\n", |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 249 | bufmap->desc_count); |
| 250 | goto out_free_bufmap; |
| 251 | } |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 252 | |
| 253 | bufmap->desc_array = |
Yi Liu | 8bb8aef | 2015-11-24 15:12:14 -0500 | [diff] [blame] | 254 | kcalloc(bufmap->desc_count, sizeof(struct orangefs_bufmap_desc), |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 255 | GFP_KERNEL); |
| 256 | if (!bufmap->desc_array) { |
Yi Liu | 8bb8aef | 2015-11-24 15:12:14 -0500 | [diff] [blame] | 257 | gossip_err("orangefs: could not allocate %d descriptors\n", |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 258 | bufmap->desc_count); |
| 259 | goto out_free_index_array; |
| 260 | } |
| 261 | |
| 262 | bufmap->page_count = bufmap->total_size / PAGE_SIZE; |
| 263 | |
| 264 | /* allocate storage to track our page mappings */ |
| 265 | bufmap->page_array = |
| 266 | kcalloc(bufmap->page_count, sizeof(struct page *), GFP_KERNEL); |
| 267 | if (!bufmap->page_array) |
| 268 | goto out_free_desc_array; |
| 269 | |
| 270 | return bufmap; |
| 271 | |
| 272 | out_free_desc_array: |
| 273 | kfree(bufmap->desc_array); |
| 274 | out_free_index_array: |
| 275 | kfree(bufmap->buffer_index_array); |
| 276 | out_free_bufmap: |
| 277 | kfree(bufmap); |
| 278 | out: |
| 279 | return NULL; |
| 280 | } |
| 281 | |
| 282 | static int |
Yi Liu | 8bb8aef | 2015-11-24 15:12:14 -0500 | [diff] [blame] | 283 | orangefs_bufmap_map(struct orangefs_bufmap *bufmap, |
| 284 | struct ORANGEFS_dev_map_desc *user_desc) |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 285 | { |
| 286 | int pages_per_desc = bufmap->desc_size / PAGE_SIZE; |
| 287 | int offset = 0, ret, i; |
| 288 | |
| 289 | /* map the pages */ |
Al Viro | 16742f2 | 2015-10-08 20:10:00 -0400 | [diff] [blame] | 290 | ret = get_user_pages_fast((unsigned long)user_desc->ptr, |
| 291 | bufmap->page_count, 1, bufmap->page_array); |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 292 | |
| 293 | if (ret < 0) |
| 294 | return ret; |
| 295 | |
| 296 | if (ret != bufmap->page_count) { |
Yi Liu | 8bb8aef | 2015-11-24 15:12:14 -0500 | [diff] [blame] | 297 | gossip_err("orangefs error: asked for %d pages, only got %d.\n", |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 298 | bufmap->page_count, ret); |
| 299 | |
| 300 | for (i = 0; i < ret; i++) { |
| 301 | SetPageError(bufmap->page_array[i]); |
| 302 | page_cache_release(bufmap->page_array[i]); |
| 303 | } |
| 304 | return -ENOMEM; |
| 305 | } |
| 306 | |
| 307 | /* |
| 308 | * ideally we want to get kernel space pointers for each page, but |
| 309 | * we can't kmap that many pages at once if highmem is being used. |
| 310 | * so instead, we just kmap/kunmap the page address each time the |
| 311 | * kaddr is needed. |
| 312 | */ |
| 313 | for (i = 0; i < bufmap->page_count; i++) |
| 314 | flush_dcache_page(bufmap->page_array[i]); |
| 315 | |
| 316 | /* build a list of available descriptors */ |
| 317 | for (offset = 0, i = 0; i < bufmap->desc_count; i++) { |
| 318 | bufmap->desc_array[i].page_array = &bufmap->page_array[offset]; |
| 319 | bufmap->desc_array[i].array_count = pages_per_desc; |
| 320 | bufmap->desc_array[i].uaddr = |
| 321 | (user_desc->ptr + (i * pages_per_desc * PAGE_SIZE)); |
| 322 | offset += pages_per_desc; |
| 323 | } |
| 324 | |
| 325 | return 0; |
| 326 | } |
| 327 | |
| 328 | /* |
Yi Liu | 8bb8aef | 2015-11-24 15:12:14 -0500 | [diff] [blame] | 329 | * orangefs_bufmap_initialize() |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 330 | * |
| 331 | * initializes the mapped buffer interface |
| 332 | * |
| 333 | * returns 0 on success, -errno on failure |
| 334 | */ |
Yi Liu | 8bb8aef | 2015-11-24 15:12:14 -0500 | [diff] [blame] | 335 | int orangefs_bufmap_initialize(struct ORANGEFS_dev_map_desc *user_desc) |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 336 | { |
Yi Liu | 8bb8aef | 2015-11-24 15:12:14 -0500 | [diff] [blame] | 337 | struct orangefs_bufmap *bufmap; |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 338 | int ret = -EINVAL; |
| 339 | |
| 340 | gossip_debug(GOSSIP_BUFMAP_DEBUG, |
Yi Liu | 8bb8aef | 2015-11-24 15:12:14 -0500 | [diff] [blame] | 341 | "orangefs_bufmap_initialize: called (ptr (" |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 342 | "%p) sz (%d) cnt(%d).\n", |
| 343 | user_desc->ptr, |
| 344 | user_desc->size, |
| 345 | user_desc->count); |
| 346 | |
| 347 | /* |
| 348 | * sanity check alignment and size of buffer that caller wants to |
| 349 | * work with |
| 350 | */ |
| 351 | if (PAGE_ALIGN((unsigned long)user_desc->ptr) != |
| 352 | (unsigned long)user_desc->ptr) { |
Yi Liu | 8bb8aef | 2015-11-24 15:12:14 -0500 | [diff] [blame] | 353 | gossip_err("orangefs error: memory alignment (front). %p\n", |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 354 | user_desc->ptr); |
| 355 | goto out; |
| 356 | } |
| 357 | |
| 358 | if (PAGE_ALIGN(((unsigned long)user_desc->ptr + user_desc->total_size)) |
| 359 | != (unsigned long)(user_desc->ptr + user_desc->total_size)) { |
Yi Liu | 8bb8aef | 2015-11-24 15:12:14 -0500 | [diff] [blame] | 360 | gossip_err("orangefs error: memory alignment (back).(%p + %d)\n", |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 361 | user_desc->ptr, |
| 362 | user_desc->total_size); |
| 363 | goto out; |
| 364 | } |
| 365 | |
| 366 | if (user_desc->total_size != (user_desc->size * user_desc->count)) { |
Yi Liu | 8bb8aef | 2015-11-24 15:12:14 -0500 | [diff] [blame] | 367 | gossip_err("orangefs error: user provided an oddly sized buffer: (%d, %d, %d)\n", |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 368 | user_desc->total_size, |
| 369 | user_desc->size, |
| 370 | user_desc->count); |
| 371 | goto out; |
| 372 | } |
| 373 | |
| 374 | if ((user_desc->size % PAGE_SIZE) != 0) { |
Yi Liu | 8bb8aef | 2015-11-24 15:12:14 -0500 | [diff] [blame] | 375 | gossip_err("orangefs error: bufmap size not page size divisible (%d).\n", |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 376 | user_desc->size); |
| 377 | goto out; |
| 378 | } |
| 379 | |
| 380 | ret = -ENOMEM; |
Yi Liu | 8bb8aef | 2015-11-24 15:12:14 -0500 | [diff] [blame] | 381 | bufmap = orangefs_bufmap_alloc(user_desc); |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 382 | if (!bufmap) |
| 383 | goto out; |
| 384 | |
Yi Liu | 8bb8aef | 2015-11-24 15:12:14 -0500 | [diff] [blame] | 385 | ret = orangefs_bufmap_map(bufmap, user_desc); |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 386 | if (ret) |
| 387 | goto out_free_bufmap; |
| 388 | |
| 389 | |
Yi Liu | 8bb8aef | 2015-11-24 15:12:14 -0500 | [diff] [blame] | 390 | spin_lock(&orangefs_bufmap_lock); |
| 391 | if (__orangefs_bufmap) { |
| 392 | spin_unlock(&orangefs_bufmap_lock); |
| 393 | gossip_err("orangefs: error: bufmap already initialized.\n"); |
Al Viro | ea2c9c9 | 2016-02-13 21:01:21 -0500 | [diff] [blame] | 394 | ret = -EINVAL; |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 395 | goto out_unmap_bufmap; |
| 396 | } |
Yi Liu | 8bb8aef | 2015-11-24 15:12:14 -0500 | [diff] [blame] | 397 | __orangefs_bufmap = bufmap; |
Al Viro | ea2c9c9 | 2016-02-13 21:01:21 -0500 | [diff] [blame] | 398 | install(&rw_map, |
| 399 | bufmap->desc_count, |
| 400 | bufmap->buffer_index_array); |
| 401 | install(&readdir_map, |
| 402 | ORANGEFS_READDIR_DEFAULT_DESC_COUNT, |
| 403 | bufmap->readdir_index_array); |
Yi Liu | 8bb8aef | 2015-11-24 15:12:14 -0500 | [diff] [blame] | 404 | spin_unlock(&orangefs_bufmap_lock); |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 405 | |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 406 | gossip_debug(GOSSIP_BUFMAP_DEBUG, |
Yi Liu | 8bb8aef | 2015-11-24 15:12:14 -0500 | [diff] [blame] | 407 | "orangefs_bufmap_initialize: exiting normally\n"); |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 408 | return 0; |
| 409 | |
| 410 | out_unmap_bufmap: |
Yi Liu | 8bb8aef | 2015-11-24 15:12:14 -0500 | [diff] [blame] | 411 | orangefs_bufmap_unmap(bufmap); |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 412 | out_free_bufmap: |
Yi Liu | 8bb8aef | 2015-11-24 15:12:14 -0500 | [diff] [blame] | 413 | orangefs_bufmap_free(bufmap); |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 414 | out: |
| 415 | return ret; |
| 416 | } |
| 417 | |
| 418 | /* |
Yi Liu | 8bb8aef | 2015-11-24 15:12:14 -0500 | [diff] [blame] | 419 | * orangefs_bufmap_finalize() |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 420 | * |
| 421 | * shuts down the mapped buffer interface and releases any resources |
| 422 | * associated with it |
| 423 | * |
| 424 | * no return value |
| 425 | */ |
Yi Liu | 8bb8aef | 2015-11-24 15:12:14 -0500 | [diff] [blame] | 426 | void orangefs_bufmap_finalize(void) |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 427 | { |
Al Viro | ea2c9c9 | 2016-02-13 21:01:21 -0500 | [diff] [blame] | 428 | struct orangefs_bufmap *bufmap = __orangefs_bufmap; |
| 429 | if (!bufmap) |
| 430 | return; |
Yi Liu | 8bb8aef | 2015-11-24 15:12:14 -0500 | [diff] [blame] | 431 | gossip_debug(GOSSIP_BUFMAP_DEBUG, "orangefs_bufmap_finalize: called\n"); |
Al Viro | ea2c9c9 | 2016-02-13 21:01:21 -0500 | [diff] [blame] | 432 | mark_killed(&rw_map); |
| 433 | mark_killed(&readdir_map); |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 434 | gossip_debug(GOSSIP_BUFMAP_DEBUG, |
Yi Liu | 8bb8aef | 2015-11-24 15:12:14 -0500 | [diff] [blame] | 435 | "orangefs_bufmap_finalize: exiting normally\n"); |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 436 | } |
| 437 | |
Al Viro | ea2c9c9 | 2016-02-13 21:01:21 -0500 | [diff] [blame] | 438 | void orangefs_bufmap_run_down(void) |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 439 | { |
Al Viro | ea2c9c9 | 2016-02-13 21:01:21 -0500 | [diff] [blame] | 440 | struct orangefs_bufmap *bufmap = __orangefs_bufmap; |
| 441 | if (!bufmap) |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 442 | return; |
Al Viro | ea2c9c9 | 2016-02-13 21:01:21 -0500 | [diff] [blame] | 443 | run_down(&rw_map); |
| 444 | run_down(&readdir_map); |
| 445 | spin_lock(&orangefs_bufmap_lock); |
| 446 | __orangefs_bufmap = NULL; |
| 447 | spin_unlock(&orangefs_bufmap_lock); |
| 448 | orangefs_bufmap_unmap(bufmap); |
| 449 | orangefs_bufmap_free(bufmap); |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 450 | } |
| 451 | |
| 452 | /* |
Yi Liu | 8bb8aef | 2015-11-24 15:12:14 -0500 | [diff] [blame] | 453 | * orangefs_bufmap_get() |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 454 | * |
| 455 | * gets a free mapped buffer descriptor, will sleep until one becomes |
| 456 | * available if necessary |
| 457 | * |
| 458 | * returns 0 on success, -errno on failure |
| 459 | */ |
Yi Liu | 8bb8aef | 2015-11-24 15:12:14 -0500 | [diff] [blame] | 460 | int orangefs_bufmap_get(struct orangefs_bufmap **mapp, int *buffer_index) |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 461 | { |
Al Viro | ea2c9c9 | 2016-02-13 21:01:21 -0500 | [diff] [blame] | 462 | int ret = get(&rw_map); |
| 463 | if (ret >= 0) { |
| 464 | *mapp = __orangefs_bufmap; |
| 465 | *buffer_index = ret; |
| 466 | ret = 0; |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 467 | } |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 468 | return ret; |
| 469 | } |
| 470 | |
| 471 | /* |
Yi Liu | 8bb8aef | 2015-11-24 15:12:14 -0500 | [diff] [blame] | 472 | * orangefs_bufmap_put() |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 473 | * |
| 474 | * returns a mapped buffer descriptor to the collection |
| 475 | * |
| 476 | * no return value |
| 477 | */ |
Al Viro | 1357d06 | 2016-02-11 21:34:52 -0500 | [diff] [blame] | 478 | void orangefs_bufmap_put(int buffer_index) |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 479 | { |
Al Viro | ea2c9c9 | 2016-02-13 21:01:21 -0500 | [diff] [blame] | 480 | put(&rw_map, buffer_index); |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 481 | } |
| 482 | |
| 483 | /* |
Martin Brandenburg | 7d22148 | 2016-01-04 15:05:28 -0500 | [diff] [blame] | 484 | * orangefs_readdir_index_get() |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 485 | * |
| 486 | * gets a free descriptor, will sleep until one becomes |
| 487 | * available if necessary. |
| 488 | * Although the readdir buffers are not mapped into kernel space |
| 489 | * we could do that at a later point of time. Regardless, these |
| 490 | * indices are used by the client-core. |
| 491 | * |
| 492 | * returns 0 on success, -errno on failure |
| 493 | */ |
Martin Brandenburg | 7d22148 | 2016-01-04 15:05:28 -0500 | [diff] [blame] | 494 | int orangefs_readdir_index_get(struct orangefs_bufmap **mapp, int *buffer_index) |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 495 | { |
Al Viro | ea2c9c9 | 2016-02-13 21:01:21 -0500 | [diff] [blame] | 496 | int ret = get(&readdir_map); |
| 497 | if (ret >= 0) { |
| 498 | *mapp = __orangefs_bufmap; |
| 499 | *buffer_index = ret; |
| 500 | ret = 0; |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 501 | } |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 502 | return ret; |
| 503 | } |
| 504 | |
Al Viro | 82d37f1 | 2016-02-13 21:04:51 -0500 | [diff] [blame^] | 505 | void orangefs_readdir_index_put(int buffer_index) |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 506 | { |
Al Viro | ea2c9c9 | 2016-02-13 21:01:21 -0500 | [diff] [blame] | 507 | put(&readdir_map, buffer_index); |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 508 | } |
| 509 | |
Mike Marshall | b5e376e | 2015-12-11 10:50:42 -0500 | [diff] [blame] | 510 | /* |
| 511 | * we've been handed an iovec, we need to copy it to |
| 512 | * the shared memory descriptor at "buffer_index". |
| 513 | */ |
Yi Liu | 8bb8aef | 2015-11-24 15:12:14 -0500 | [diff] [blame] | 514 | int orangefs_bufmap_copy_from_iovec(struct orangefs_bufmap *bufmap, |
Mike Marshall | 5480494 | 2015-10-05 13:44:24 -0400 | [diff] [blame] | 515 | struct iov_iter *iter, |
| 516 | int buffer_index, |
| 517 | size_t size) |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 518 | { |
Yi Liu | 8bb8aef | 2015-11-24 15:12:14 -0500 | [diff] [blame] | 519 | struct orangefs_bufmap_desc *to = &bufmap->desc_array[buffer_index]; |
Mike Marshall | 4d1c440 | 2015-09-04 10:31:16 -0400 | [diff] [blame] | 520 | int i; |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 521 | |
| 522 | gossip_debug(GOSSIP_BUFMAP_DEBUG, |
Al Viro | 34204fd | 2015-10-08 17:47:44 -0400 | [diff] [blame] | 523 | "%s: buffer_index:%d: size:%zu:\n", |
Mike Marshall | 4d1c440 | 2015-09-04 10:31:16 -0400 | [diff] [blame] | 524 | __func__, buffer_index, size); |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 525 | |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 526 | |
Mike Marshall | 4d1c440 | 2015-09-04 10:31:16 -0400 | [diff] [blame] | 527 | for (i = 0; size; i++) { |
Al Viro | 34204fd | 2015-10-08 17:47:44 -0400 | [diff] [blame] | 528 | struct page *page = to->page_array[i]; |
| 529 | size_t n = size; |
| 530 | if (n > PAGE_SIZE) |
| 531 | n = PAGE_SIZE; |
| 532 | n = copy_page_from_iter(page, 0, n, iter); |
| 533 | if (!n) |
| 534 | return -EFAULT; |
| 535 | size -= n; |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 536 | } |
Al Viro | 34204fd | 2015-10-08 17:47:44 -0400 | [diff] [blame] | 537 | return 0; |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 538 | |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 539 | } |
| 540 | |
| 541 | /* |
Mike Marshall | b5e376e | 2015-12-11 10:50:42 -0500 | [diff] [blame] | 542 | * we've been handed an iovec, we need to fill it from |
| 543 | * the shared memory descriptor at "buffer_index". |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 544 | */ |
Yi Liu | 8bb8aef | 2015-11-24 15:12:14 -0500 | [diff] [blame] | 545 | int orangefs_bufmap_copy_to_iovec(struct orangefs_bufmap *bufmap, |
Mike Marshall | 4d1c440 | 2015-09-04 10:31:16 -0400 | [diff] [blame] | 546 | struct iov_iter *iter, |
Al Viro | 5c27822 | 2015-10-08 17:43:58 -0400 | [diff] [blame] | 547 | int buffer_index, |
| 548 | size_t size) |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 549 | { |
Yi Liu | 8bb8aef | 2015-11-24 15:12:14 -0500 | [diff] [blame] | 550 | struct orangefs_bufmap_desc *from = &bufmap->desc_array[buffer_index]; |
Mike Marshall | 4d1c440 | 2015-09-04 10:31:16 -0400 | [diff] [blame] | 551 | int i; |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 552 | |
| 553 | gossip_debug(GOSSIP_BUFMAP_DEBUG, |
Al Viro | 5c27822 | 2015-10-08 17:43:58 -0400 | [diff] [blame] | 554 | "%s: buffer_index:%d: size:%zu:\n", |
| 555 | __func__, buffer_index, size); |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 556 | |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 557 | |
Al Viro | 5c27822 | 2015-10-08 17:43:58 -0400 | [diff] [blame] | 558 | for (i = 0; size; i++) { |
| 559 | struct page *page = from->page_array[i]; |
| 560 | size_t n = size; |
| 561 | if (n > PAGE_SIZE) |
| 562 | n = PAGE_SIZE; |
| 563 | n = copy_page_to_iter(page, 0, n, iter); |
| 564 | if (!n) |
| 565 | return -EFAULT; |
| 566 | size -= n; |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 567 | } |
Al Viro | 5c27822 | 2015-10-08 17:43:58 -0400 | [diff] [blame] | 568 | return 0; |
Mike Marshall | 274dcf5 | 2015-07-17 10:38:13 -0400 | [diff] [blame] | 569 | } |