Joe Thornber | b29d498 | 2016-12-15 04:57:31 -0500 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2017 Red Hat. All rights reserved. |
| 3 | * |
| 4 | * This file is released under the GPL. |
| 5 | */ |
| 6 | |
| 7 | #include "dm-cache-background-tracker.h" |
| 8 | |
| 9 | /*----------------------------------------------------------------*/ |
| 10 | |
| 11 | #define DM_MSG_PREFIX "dm-background-tracker" |
| 12 | |
| 13 | struct bt_work { |
| 14 | struct list_head list; |
| 15 | struct rb_node node; |
| 16 | struct policy_work work; |
| 17 | }; |
| 18 | |
| 19 | struct background_tracker { |
| 20 | unsigned max_work; |
| 21 | atomic_t pending_promotes; |
| 22 | atomic_t pending_writebacks; |
| 23 | atomic_t pending_demotes; |
| 24 | |
| 25 | struct list_head issued; |
| 26 | struct list_head queued; |
| 27 | struct rb_root pending; |
| 28 | |
| 29 | struct kmem_cache *work_cache; |
| 30 | }; |
| 31 | |
| 32 | struct background_tracker *btracker_create(unsigned max_work) |
| 33 | { |
| 34 | struct background_tracker *b = kmalloc(sizeof(*b), GFP_KERNEL); |
| 35 | |
| 36 | b->max_work = max_work; |
| 37 | atomic_set(&b->pending_promotes, 0); |
| 38 | atomic_set(&b->pending_writebacks, 0); |
| 39 | atomic_set(&b->pending_demotes, 0); |
| 40 | |
| 41 | INIT_LIST_HEAD(&b->issued); |
| 42 | INIT_LIST_HEAD(&b->queued); |
| 43 | |
| 44 | b->pending = RB_ROOT; |
| 45 | b->work_cache = KMEM_CACHE(bt_work, 0); |
| 46 | if (!b->work_cache) { |
| 47 | DMERR("couldn't create mempool for background work items"); |
| 48 | kfree(b); |
| 49 | b = NULL; |
| 50 | } |
| 51 | |
| 52 | return b; |
| 53 | } |
| 54 | EXPORT_SYMBOL_GPL(btracker_create); |
| 55 | |
| 56 | void btracker_destroy(struct background_tracker *b) |
| 57 | { |
| 58 | kmem_cache_destroy(b->work_cache); |
| 59 | kfree(b); |
| 60 | } |
| 61 | EXPORT_SYMBOL_GPL(btracker_destroy); |
| 62 | |
| 63 | static int cmp_oblock(dm_oblock_t lhs, dm_oblock_t rhs) |
| 64 | { |
| 65 | if (from_oblock(lhs) < from_oblock(rhs)) |
| 66 | return -1; |
| 67 | |
| 68 | if (from_oblock(rhs) < from_oblock(lhs)) |
| 69 | return 1; |
| 70 | |
| 71 | return 0; |
| 72 | } |
| 73 | |
| 74 | static bool __insert_pending(struct background_tracker *b, |
| 75 | struct bt_work *nw) |
| 76 | { |
| 77 | int cmp; |
| 78 | struct bt_work *w; |
| 79 | struct rb_node **new = &b->pending.rb_node, *parent = NULL; |
| 80 | |
| 81 | while (*new) { |
| 82 | w = container_of(*new, struct bt_work, node); |
| 83 | |
| 84 | parent = *new; |
| 85 | cmp = cmp_oblock(w->work.oblock, nw->work.oblock); |
| 86 | if (cmp < 0) |
| 87 | new = &((*new)->rb_left); |
| 88 | |
| 89 | else if (cmp > 0) |
| 90 | new = &((*new)->rb_right); |
| 91 | |
| 92 | else |
| 93 | /* already present */ |
| 94 | return false; |
| 95 | } |
| 96 | |
| 97 | rb_link_node(&nw->node, parent, new); |
| 98 | rb_insert_color(&nw->node, &b->pending); |
| 99 | |
| 100 | return true; |
| 101 | } |
| 102 | |
| 103 | static struct bt_work *__find_pending(struct background_tracker *b, |
| 104 | dm_oblock_t oblock) |
| 105 | { |
| 106 | int cmp; |
| 107 | struct bt_work *w; |
| 108 | struct rb_node **new = &b->pending.rb_node; |
| 109 | |
| 110 | while (*new) { |
| 111 | w = container_of(*new, struct bt_work, node); |
| 112 | |
| 113 | cmp = cmp_oblock(w->work.oblock, oblock); |
| 114 | if (cmp < 0) |
| 115 | new = &((*new)->rb_left); |
| 116 | |
| 117 | else if (cmp > 0) |
| 118 | new = &((*new)->rb_right); |
| 119 | |
| 120 | else |
| 121 | break; |
| 122 | } |
| 123 | |
| 124 | return *new ? w : NULL; |
| 125 | } |
| 126 | |
| 127 | |
| 128 | static void update_stats(struct background_tracker *b, struct policy_work *w, int delta) |
| 129 | { |
| 130 | switch (w->op) { |
| 131 | case POLICY_PROMOTE: |
| 132 | atomic_add(delta, &b->pending_promotes); |
| 133 | break; |
| 134 | |
| 135 | case POLICY_DEMOTE: |
| 136 | atomic_add(delta, &b->pending_demotes); |
| 137 | break; |
| 138 | |
| 139 | case POLICY_WRITEBACK: |
| 140 | atomic_add(delta, &b->pending_writebacks); |
| 141 | break; |
| 142 | } |
| 143 | } |
| 144 | |
| 145 | unsigned btracker_nr_writebacks_queued(struct background_tracker *b) |
| 146 | { |
| 147 | return atomic_read(&b->pending_writebacks); |
| 148 | } |
| 149 | EXPORT_SYMBOL_GPL(btracker_nr_writebacks_queued); |
| 150 | |
| 151 | unsigned btracker_nr_demotions_queued(struct background_tracker *b) |
| 152 | { |
| 153 | return atomic_read(&b->pending_demotes); |
| 154 | } |
| 155 | EXPORT_SYMBOL_GPL(btracker_nr_demotions_queued); |
| 156 | |
| 157 | static bool max_work_reached(struct background_tracker *b) |
| 158 | { |
| 159 | // FIXME: finish |
| 160 | return false; |
| 161 | } |
| 162 | |
| 163 | int btracker_queue(struct background_tracker *b, |
| 164 | struct policy_work *work, |
| 165 | struct policy_work **pwork) |
| 166 | { |
| 167 | struct bt_work *w; |
| 168 | |
| 169 | if (pwork) |
| 170 | *pwork = NULL; |
| 171 | |
| 172 | if (max_work_reached(b)) |
| 173 | return -ENOMEM; |
| 174 | |
| 175 | w = kmem_cache_alloc(b->work_cache, GFP_NOWAIT); |
| 176 | if (!w) |
| 177 | return -ENOMEM; |
| 178 | |
| 179 | memcpy(&w->work, work, sizeof(*work)); |
| 180 | |
| 181 | if (!__insert_pending(b, w)) { |
| 182 | /* |
| 183 | * There was a race, we'll just ignore this second |
| 184 | * bit of work for the same oblock. |
| 185 | */ |
| 186 | kmem_cache_free(b->work_cache, w); |
| 187 | return -EINVAL; |
| 188 | } |
| 189 | |
| 190 | if (pwork) { |
| 191 | *pwork = &w->work; |
| 192 | list_add(&w->list, &b->issued); |
| 193 | } else |
| 194 | list_add(&w->list, &b->queued); |
| 195 | update_stats(b, &w->work, 1); |
| 196 | |
| 197 | return 0; |
| 198 | } |
| 199 | EXPORT_SYMBOL_GPL(btracker_queue); |
| 200 | |
| 201 | /* |
| 202 | * Returns -ENODATA if there's no work. |
| 203 | */ |
| 204 | int btracker_issue(struct background_tracker *b, struct policy_work **work) |
| 205 | { |
| 206 | struct bt_work *w; |
| 207 | |
| 208 | if (list_empty(&b->queued)) |
| 209 | return -ENODATA; |
| 210 | |
| 211 | w = list_first_entry(&b->queued, struct bt_work, list); |
| 212 | list_move(&w->list, &b->issued); |
| 213 | *work = &w->work; |
| 214 | |
| 215 | return 0; |
| 216 | } |
| 217 | EXPORT_SYMBOL_GPL(btracker_issue); |
| 218 | |
| 219 | void btracker_complete(struct background_tracker *b, |
| 220 | struct policy_work *op) |
| 221 | { |
| 222 | struct bt_work *w = container_of(op, struct bt_work, work); |
| 223 | |
| 224 | update_stats(b, &w->work, -1); |
| 225 | rb_erase(&w->node, &b->pending); |
| 226 | list_del(&w->list); |
| 227 | kmem_cache_free(b->work_cache, w); |
| 228 | } |
| 229 | EXPORT_SYMBOL_GPL(btracker_complete); |
| 230 | |
| 231 | bool btracker_promotion_already_present(struct background_tracker *b, |
| 232 | dm_oblock_t oblock) |
| 233 | { |
| 234 | return __find_pending(b, oblock) != NULL; |
| 235 | } |
| 236 | EXPORT_SYMBOL_GPL(btracker_promotion_already_present); |
| 237 | |
| 238 | /*----------------------------------------------------------------*/ |