Sebastian Ott | f30664e | 2012-08-28 16:50:38 +0200 | [diff] [blame] | 1 | /* |
| 2 | * Block driver for s390 storage class memory. |
| 3 | * |
| 4 | * Copyright IBM Corp. 2012 |
| 5 | * Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com> |
| 6 | */ |
| 7 | |
| 8 | #define KMSG_COMPONENT "scm_block" |
| 9 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt |
| 10 | |
| 11 | #include <linux/interrupt.h> |
| 12 | #include <linux/spinlock.h> |
Sebastian Ott | 9d4df77 | 2014-12-05 16:32:13 +0100 | [diff] [blame] | 13 | #include <linux/mempool.h> |
Sebastian Ott | f30664e | 2012-08-28 16:50:38 +0200 | [diff] [blame] | 14 | #include <linux/module.h> |
| 15 | #include <linux/blkdev.h> |
| 16 | #include <linux/genhd.h> |
| 17 | #include <linux/slab.h> |
| 18 | #include <linux/list.h> |
| 19 | #include <asm/eadm.h> |
| 20 | #include "scm_blk.h" |
| 21 | |
| 22 | debug_info_t *scm_debug; |
| 23 | static int scm_major; |
Sebastian Ott | 9d4df77 | 2014-12-05 16:32:13 +0100 | [diff] [blame] | 24 | static mempool_t *aidaw_pool; |
Sebastian Ott | f30664e | 2012-08-28 16:50:38 +0200 | [diff] [blame] | 25 | static DEFINE_SPINLOCK(list_lock); |
| 26 | static LIST_HEAD(inactive_requests); |
| 27 | static unsigned int nr_requests = 64; |
Sebastian Ott | 8622384 | 2014-12-05 16:47:17 +0100 | [diff] [blame] | 28 | static unsigned int nr_requests_per_io = 8; |
Sebastian Ott | f30664e | 2012-08-28 16:50:38 +0200 | [diff] [blame] | 29 | static atomic_t nr_devices = ATOMIC_INIT(0); |
| 30 | module_param(nr_requests, uint, S_IRUGO); |
| 31 | MODULE_PARM_DESC(nr_requests, "Number of parallel requests."); |
| 32 | |
Sebastian Ott | 8622384 | 2014-12-05 16:47:17 +0100 | [diff] [blame] | 33 | module_param(nr_requests_per_io, uint, S_IRUGO); |
| 34 | MODULE_PARM_DESC(nr_requests_per_io, "Number of requests per IO."); |
| 35 | |
Sebastian Ott | f30664e | 2012-08-28 16:50:38 +0200 | [diff] [blame] | 36 | MODULE_DESCRIPTION("Block driver for s390 storage class memory."); |
| 37 | MODULE_LICENSE("GPL"); |
| 38 | MODULE_ALIAS("scm:scmdev*"); |
| 39 | |
| 40 | static void __scm_free_rq(struct scm_request *scmrq) |
| 41 | { |
| 42 | struct aob_rq_header *aobrq = to_aobrq(scmrq); |
| 43 | |
| 44 | free_page((unsigned long) scmrq->aob); |
Sebastian Ott | 0d804b2 | 2012-08-28 16:51:19 +0200 | [diff] [blame] | 45 | __scm_free_rq_cluster(scmrq); |
Sebastian Ott | 8622384 | 2014-12-05 16:47:17 +0100 | [diff] [blame] | 46 | kfree(scmrq->request); |
Sebastian Ott | f30664e | 2012-08-28 16:50:38 +0200 | [diff] [blame] | 47 | kfree(aobrq); |
| 48 | } |
| 49 | |
| 50 | static void scm_free_rqs(void) |
| 51 | { |
| 52 | struct list_head *iter, *safe; |
| 53 | struct scm_request *scmrq; |
| 54 | |
| 55 | spin_lock_irq(&list_lock); |
| 56 | list_for_each_safe(iter, safe, &inactive_requests) { |
| 57 | scmrq = list_entry(iter, struct scm_request, list); |
| 58 | list_del(&scmrq->list); |
| 59 | __scm_free_rq(scmrq); |
| 60 | } |
| 61 | spin_unlock_irq(&list_lock); |
Sebastian Ott | 9d4df77 | 2014-12-05 16:32:13 +0100 | [diff] [blame] | 62 | |
| 63 | mempool_destroy(aidaw_pool); |
Sebastian Ott | f30664e | 2012-08-28 16:50:38 +0200 | [diff] [blame] | 64 | } |
| 65 | |
| 66 | static int __scm_alloc_rq(void) |
| 67 | { |
| 68 | struct aob_rq_header *aobrq; |
| 69 | struct scm_request *scmrq; |
| 70 | |
| 71 | aobrq = kzalloc(sizeof(*aobrq) + sizeof(*scmrq), GFP_KERNEL); |
| 72 | if (!aobrq) |
| 73 | return -ENOMEM; |
| 74 | |
| 75 | scmrq = (void *) aobrq->data; |
Sebastian Ott | f30664e | 2012-08-28 16:50:38 +0200 | [diff] [blame] | 76 | scmrq->aob = (void *) get_zeroed_page(GFP_DMA); |
Sebastian Ott | 8622384 | 2014-12-05 16:47:17 +0100 | [diff] [blame] | 77 | if (!scmrq->aob) |
| 78 | goto free; |
Sebastian Ott | 0d804b2 | 2012-08-28 16:51:19 +0200 | [diff] [blame] | 79 | |
Sebastian Ott | 8622384 | 2014-12-05 16:47:17 +0100 | [diff] [blame] | 80 | scmrq->request = kcalloc(nr_requests_per_io, sizeof(scmrq->request[0]), |
| 81 | GFP_KERNEL); |
| 82 | if (!scmrq->request) |
| 83 | goto free; |
| 84 | |
| 85 | if (__scm_alloc_rq_cluster(scmrq)) |
| 86 | goto free; |
Sebastian Ott | 0d804b2 | 2012-08-28 16:51:19 +0200 | [diff] [blame] | 87 | |
Sebastian Ott | f30664e | 2012-08-28 16:50:38 +0200 | [diff] [blame] | 88 | INIT_LIST_HEAD(&scmrq->list); |
| 89 | spin_lock_irq(&list_lock); |
| 90 | list_add(&scmrq->list, &inactive_requests); |
| 91 | spin_unlock_irq(&list_lock); |
| 92 | |
| 93 | return 0; |
Sebastian Ott | 8622384 | 2014-12-05 16:47:17 +0100 | [diff] [blame] | 94 | free: |
| 95 | __scm_free_rq(scmrq); |
| 96 | return -ENOMEM; |
Sebastian Ott | f30664e | 2012-08-28 16:50:38 +0200 | [diff] [blame] | 97 | } |
| 98 | |
| 99 | static int scm_alloc_rqs(unsigned int nrqs) |
| 100 | { |
| 101 | int ret = 0; |
| 102 | |
Sebastian Ott | 9d4df77 | 2014-12-05 16:32:13 +0100 | [diff] [blame] | 103 | aidaw_pool = mempool_create_page_pool(max(nrqs/8, 1U), 0); |
| 104 | if (!aidaw_pool) |
| 105 | return -ENOMEM; |
| 106 | |
Sebastian Ott | f30664e | 2012-08-28 16:50:38 +0200 | [diff] [blame] | 107 | while (nrqs-- && !ret) |
| 108 | ret = __scm_alloc_rq(); |
| 109 | |
| 110 | return ret; |
| 111 | } |
| 112 | |
| 113 | static struct scm_request *scm_request_fetch(void) |
| 114 | { |
| 115 | struct scm_request *scmrq = NULL; |
| 116 | |
| 117 | spin_lock(&list_lock); |
| 118 | if (list_empty(&inactive_requests)) |
| 119 | goto out; |
| 120 | scmrq = list_first_entry(&inactive_requests, struct scm_request, list); |
| 121 | list_del(&scmrq->list); |
| 122 | out: |
| 123 | spin_unlock(&list_lock); |
| 124 | return scmrq; |
| 125 | } |
| 126 | |
| 127 | static void scm_request_done(struct scm_request *scmrq) |
| 128 | { |
| 129 | unsigned long flags; |
Sebastian Ott | bbc610a | 2014-12-05 16:43:58 +0100 | [diff] [blame] | 130 | struct msb *msb; |
| 131 | u64 aidaw; |
| 132 | int i; |
Sebastian Ott | f30664e | 2012-08-28 16:50:38 +0200 | [diff] [blame] | 133 | |
Sebastian Ott | 8622384 | 2014-12-05 16:47:17 +0100 | [diff] [blame] | 134 | for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) { |
Sebastian Ott | bbc610a | 2014-12-05 16:43:58 +0100 | [diff] [blame] | 135 | msb = &scmrq->aob->msb[i]; |
| 136 | aidaw = msb->data_addr; |
| 137 | |
| 138 | if ((msb->flags & MSB_FLAG_IDA) && aidaw && |
| 139 | IS_ALIGNED(aidaw, PAGE_SIZE)) |
| 140 | mempool_free(virt_to_page(aidaw), aidaw_pool); |
| 141 | } |
Sebastian Ott | 9d4df77 | 2014-12-05 16:32:13 +0100 | [diff] [blame] | 142 | |
Sebastian Ott | f30664e | 2012-08-28 16:50:38 +0200 | [diff] [blame] | 143 | spin_lock_irqsave(&list_lock, flags); |
| 144 | list_add(&scmrq->list, &inactive_requests); |
| 145 | spin_unlock_irqrestore(&list_lock, flags); |
| 146 | } |
| 147 | |
Sebastian Ott | 4fa3c01 | 2013-02-28 12:07:48 +0100 | [diff] [blame] | 148 | static bool scm_permit_request(struct scm_blk_dev *bdev, struct request *req) |
| 149 | { |
| 150 | return rq_data_dir(req) != WRITE || bdev->state != SCM_WR_PROHIBIT; |
| 151 | } |
| 152 | |
Sebastian Ott | de88d0d | 2014-12-05 16:41:47 +0100 | [diff] [blame] | 153 | static inline struct aidaw *scm_aidaw_alloc(void) |
Sebastian Ott | 9d4df77 | 2014-12-05 16:32:13 +0100 | [diff] [blame] | 154 | { |
| 155 | struct page *page = mempool_alloc(aidaw_pool, GFP_ATOMIC); |
| 156 | |
| 157 | return page ? page_address(page) : NULL; |
| 158 | } |
| 159 | |
Sebastian Ott | de88d0d | 2014-12-05 16:41:47 +0100 | [diff] [blame] | 160 | static inline unsigned long scm_aidaw_bytes(struct aidaw *aidaw) |
| 161 | { |
| 162 | unsigned long _aidaw = (unsigned long) aidaw; |
| 163 | unsigned long bytes = ALIGN(_aidaw, PAGE_SIZE) - _aidaw; |
| 164 | |
| 165 | return (bytes / sizeof(*aidaw)) * PAGE_SIZE; |
| 166 | } |
| 167 | |
| 168 | struct aidaw *scm_aidaw_fetch(struct scm_request *scmrq, unsigned int bytes) |
| 169 | { |
| 170 | struct aidaw *aidaw; |
| 171 | |
| 172 | if (scm_aidaw_bytes(scmrq->next_aidaw) >= bytes) |
| 173 | return scmrq->next_aidaw; |
| 174 | |
| 175 | aidaw = scm_aidaw_alloc(); |
| 176 | if (aidaw) |
| 177 | memset(aidaw, 0, PAGE_SIZE); |
| 178 | return aidaw; |
| 179 | } |
| 180 | |
Sebastian Ott | 9d4df77 | 2014-12-05 16:32:13 +0100 | [diff] [blame] | 181 | static int scm_request_prepare(struct scm_request *scmrq) |
Sebastian Ott | f30664e | 2012-08-28 16:50:38 +0200 | [diff] [blame] | 182 | { |
| 183 | struct scm_blk_dev *bdev = scmrq->bdev; |
| 184 | struct scm_device *scmdev = bdev->gendisk->private_data; |
Sebastian Ott | bbc610a | 2014-12-05 16:43:58 +0100 | [diff] [blame] | 185 | int pos = scmrq->aob->request.msb_count; |
| 186 | struct msb *msb = &scmrq->aob->msb[pos]; |
| 187 | struct request *req = scmrq->request[pos]; |
Sebastian Ott | f30664e | 2012-08-28 16:50:38 +0200 | [diff] [blame] | 188 | struct req_iterator iter; |
Sebastian Ott | de88d0d | 2014-12-05 16:41:47 +0100 | [diff] [blame] | 189 | struct aidaw *aidaw; |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 190 | struct bio_vec bv; |
Sebastian Ott | f30664e | 2012-08-28 16:50:38 +0200 | [diff] [blame] | 191 | |
Sebastian Ott | bbc610a | 2014-12-05 16:43:58 +0100 | [diff] [blame] | 192 | aidaw = scm_aidaw_fetch(scmrq, blk_rq_bytes(req)); |
Sebastian Ott | 9d4df77 | 2014-12-05 16:32:13 +0100 | [diff] [blame] | 193 | if (!aidaw) |
| 194 | return -ENOMEM; |
| 195 | |
Sebastian Ott | f30664e | 2012-08-28 16:50:38 +0200 | [diff] [blame] | 196 | msb->bs = MSB_BS_4K; |
Sebastian Ott | bbc610a | 2014-12-05 16:43:58 +0100 | [diff] [blame] | 197 | scmrq->aob->request.msb_count++; |
| 198 | msb->scm_addr = scmdev->address + ((u64) blk_rq_pos(req) << 9); |
| 199 | msb->oc = (rq_data_dir(req) == READ) ? MSB_OC_READ : MSB_OC_WRITE; |
Sebastian Ott | f30664e | 2012-08-28 16:50:38 +0200 | [diff] [blame] | 200 | msb->flags |= MSB_FLAG_IDA; |
| 201 | msb->data_addr = (u64) aidaw; |
| 202 | |
Sebastian Ott | bbc610a | 2014-12-05 16:43:58 +0100 | [diff] [blame] | 203 | rq_for_each_segment(bv, req, iter) { |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 204 | WARN_ON(bv.bv_offset); |
| 205 | msb->blk_count += bv.bv_len >> 12; |
| 206 | aidaw->data_addr = (u64) page_address(bv.bv_page); |
Sebastian Ott | f30664e | 2012-08-28 16:50:38 +0200 | [diff] [blame] | 207 | aidaw++; |
| 208 | } |
Sebastian Ott | 9d4df77 | 2014-12-05 16:32:13 +0100 | [diff] [blame] | 209 | |
Sebastian Ott | bbc610a | 2014-12-05 16:43:58 +0100 | [diff] [blame] | 210 | scmrq->next_aidaw = aidaw; |
Sebastian Ott | 9d4df77 | 2014-12-05 16:32:13 +0100 | [diff] [blame] | 211 | return 0; |
Sebastian Ott | f30664e | 2012-08-28 16:50:38 +0200 | [diff] [blame] | 212 | } |
| 213 | |
Sebastian Ott | bbc610a | 2014-12-05 16:43:58 +0100 | [diff] [blame] | 214 | static inline void scm_request_set(struct scm_request *scmrq, |
| 215 | struct request *req) |
| 216 | { |
| 217 | scmrq->request[scmrq->aob->request.msb_count] = req; |
| 218 | } |
| 219 | |
Sebastian Ott | f30664e | 2012-08-28 16:50:38 +0200 | [diff] [blame] | 220 | static inline void scm_request_init(struct scm_blk_dev *bdev, |
Sebastian Ott | bbc610a | 2014-12-05 16:43:58 +0100 | [diff] [blame] | 221 | struct scm_request *scmrq) |
Sebastian Ott | f30664e | 2012-08-28 16:50:38 +0200 | [diff] [blame] | 222 | { |
| 223 | struct aob_rq_header *aobrq = to_aobrq(scmrq); |
| 224 | struct aob *aob = scmrq->aob; |
| 225 | |
Sebastian Ott | 8622384 | 2014-12-05 16:47:17 +0100 | [diff] [blame] | 226 | memset(scmrq->request, 0, |
| 227 | nr_requests_per_io * sizeof(scmrq->request[0])); |
Sebastian Ott | f30664e | 2012-08-28 16:50:38 +0200 | [diff] [blame] | 228 | memset(aob, 0, sizeof(*aob)); |
Sebastian Ott | f30664e | 2012-08-28 16:50:38 +0200 | [diff] [blame] | 229 | aobrq->scmdev = bdev->scmdev; |
| 230 | aob->request.cmd_code = ARQB_CMD_MOVE; |
| 231 | aob->request.data = (u64) aobrq; |
Sebastian Ott | f30664e | 2012-08-28 16:50:38 +0200 | [diff] [blame] | 232 | scmrq->bdev = bdev; |
| 233 | scmrq->retries = 4; |
| 234 | scmrq->error = 0; |
Sebastian Ott | de88d0d | 2014-12-05 16:41:47 +0100 | [diff] [blame] | 235 | /* We don't use all msbs - place aidaws at the end of the aob page. */ |
Sebastian Ott | 8622384 | 2014-12-05 16:47:17 +0100 | [diff] [blame] | 236 | scmrq->next_aidaw = (void *) &aob->msb[nr_requests_per_io]; |
Sebastian Ott | 0d804b2 | 2012-08-28 16:51:19 +0200 | [diff] [blame] | 237 | scm_request_cluster_init(scmrq); |
Sebastian Ott | f30664e | 2012-08-28 16:50:38 +0200 | [diff] [blame] | 238 | } |
| 239 | |
| 240 | static void scm_ensure_queue_restart(struct scm_blk_dev *bdev) |
| 241 | { |
| 242 | if (atomic_read(&bdev->queued_reqs)) { |
| 243 | /* Queue restart is triggered by the next interrupt. */ |
| 244 | return; |
| 245 | } |
| 246 | blk_delay_queue(bdev->rq, SCM_QUEUE_DELAY); |
| 247 | } |
| 248 | |
Sebastian Ott | 0d804b2 | 2012-08-28 16:51:19 +0200 | [diff] [blame] | 249 | void scm_request_requeue(struct scm_request *scmrq) |
Sebastian Ott | f30664e | 2012-08-28 16:50:38 +0200 | [diff] [blame] | 250 | { |
| 251 | struct scm_blk_dev *bdev = scmrq->bdev; |
Sebastian Ott | bbc610a | 2014-12-05 16:43:58 +0100 | [diff] [blame] | 252 | int i; |
Sebastian Ott | f30664e | 2012-08-28 16:50:38 +0200 | [diff] [blame] | 253 | |
Sebastian Ott | 0d804b2 | 2012-08-28 16:51:19 +0200 | [diff] [blame] | 254 | scm_release_cluster(scmrq); |
Sebastian Ott | 8622384 | 2014-12-05 16:47:17 +0100 | [diff] [blame] | 255 | for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) |
Sebastian Ott | bbc610a | 2014-12-05 16:43:58 +0100 | [diff] [blame] | 256 | blk_requeue_request(bdev->rq, scmrq->request[i]); |
| 257 | |
Sebastian Ott | 8360cb5 | 2013-02-28 12:07:27 +0100 | [diff] [blame] | 258 | atomic_dec(&bdev->queued_reqs); |
Sebastian Ott | f30664e | 2012-08-28 16:50:38 +0200 | [diff] [blame] | 259 | scm_request_done(scmrq); |
| 260 | scm_ensure_queue_restart(bdev); |
| 261 | } |
| 262 | |
Sebastian Ott | 0d804b2 | 2012-08-28 16:51:19 +0200 | [diff] [blame] | 263 | void scm_request_finish(struct scm_request *scmrq) |
Sebastian Ott | f30664e | 2012-08-28 16:50:38 +0200 | [diff] [blame] | 264 | { |
Sebastian Ott | 8360cb5 | 2013-02-28 12:07:27 +0100 | [diff] [blame] | 265 | struct scm_blk_dev *bdev = scmrq->bdev; |
Sebastian Ott | bbc610a | 2014-12-05 16:43:58 +0100 | [diff] [blame] | 266 | int i; |
Sebastian Ott | 8360cb5 | 2013-02-28 12:07:27 +0100 | [diff] [blame] | 267 | |
Sebastian Ott | 0d804b2 | 2012-08-28 16:51:19 +0200 | [diff] [blame] | 268 | scm_release_cluster(scmrq); |
Sebastian Ott | 8622384 | 2014-12-05 16:47:17 +0100 | [diff] [blame] | 269 | for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) |
Sebastian Ott | bbc610a | 2014-12-05 16:43:58 +0100 | [diff] [blame] | 270 | blk_end_request_all(scmrq->request[i], scmrq->error); |
| 271 | |
Sebastian Ott | 8360cb5 | 2013-02-28 12:07:27 +0100 | [diff] [blame] | 272 | atomic_dec(&bdev->queued_reqs); |
Sebastian Ott | f30664e | 2012-08-28 16:50:38 +0200 | [diff] [blame] | 273 | scm_request_done(scmrq); |
| 274 | } |
| 275 | |
Sebastian Ott | bbc610a | 2014-12-05 16:43:58 +0100 | [diff] [blame] | 276 | static int scm_request_start(struct scm_request *scmrq) |
| 277 | { |
| 278 | struct scm_blk_dev *bdev = scmrq->bdev; |
| 279 | int ret; |
| 280 | |
| 281 | atomic_inc(&bdev->queued_reqs); |
| 282 | if (!scmrq->aob->request.msb_count) { |
| 283 | scm_request_requeue(scmrq); |
| 284 | return -EINVAL; |
| 285 | } |
| 286 | |
| 287 | ret = eadm_start_aob(scmrq->aob); |
| 288 | if (ret) { |
| 289 | SCM_LOG(5, "no subchannel"); |
| 290 | scm_request_requeue(scmrq); |
| 291 | } |
| 292 | return ret; |
| 293 | } |
| 294 | |
Sebastian Ott | f30664e | 2012-08-28 16:50:38 +0200 | [diff] [blame] | 295 | static void scm_blk_request(struct request_queue *rq) |
| 296 | { |
| 297 | struct scm_device *scmdev = rq->queuedata; |
| 298 | struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev); |
Sebastian Ott | bbc610a | 2014-12-05 16:43:58 +0100 | [diff] [blame] | 299 | struct scm_request *scmrq = NULL; |
Sebastian Ott | f30664e | 2012-08-28 16:50:38 +0200 | [diff] [blame] | 300 | struct request *req; |
Sebastian Ott | f30664e | 2012-08-28 16:50:38 +0200 | [diff] [blame] | 301 | |
| 302 | while ((req = blk_peek_request(rq))) { |
Steffen Maier | de9587a | 2013-11-05 12:59:46 +0100 | [diff] [blame] | 303 | if (req->cmd_type != REQ_TYPE_FS) { |
| 304 | blk_start_request(req); |
| 305 | blk_dump_rq_flags(req, KMSG_COMPONENT " bad request"); |
| 306 | blk_end_request_all(req, -EIO); |
Sebastian Ott | f30664e | 2012-08-28 16:50:38 +0200 | [diff] [blame] | 307 | continue; |
Steffen Maier | de9587a | 2013-11-05 12:59:46 +0100 | [diff] [blame] | 308 | } |
Sebastian Ott | f30664e | 2012-08-28 16:50:38 +0200 | [diff] [blame] | 309 | |
Sebastian Ott | bbc610a | 2014-12-05 16:43:58 +0100 | [diff] [blame] | 310 | if (!scm_permit_request(bdev, req)) |
| 311 | goto out; |
| 312 | |
Sebastian Ott | f30664e | 2012-08-28 16:50:38 +0200 | [diff] [blame] | 313 | if (!scmrq) { |
Sebastian Ott | bbc610a | 2014-12-05 16:43:58 +0100 | [diff] [blame] | 314 | scmrq = scm_request_fetch(); |
| 315 | if (!scmrq) { |
| 316 | SCM_LOG(5, "no request"); |
| 317 | goto out; |
| 318 | } |
| 319 | scm_request_init(bdev, scmrq); |
Sebastian Ott | f30664e | 2012-08-28 16:50:38 +0200 | [diff] [blame] | 320 | } |
Sebastian Ott | bbc610a | 2014-12-05 16:43:58 +0100 | [diff] [blame] | 321 | scm_request_set(scmrq, req); |
| 322 | |
Sebastian Ott | 0d804b2 | 2012-08-28 16:51:19 +0200 | [diff] [blame] | 323 | if (!scm_reserve_cluster(scmrq)) { |
| 324 | SCM_LOG(5, "cluster busy"); |
Sebastian Ott | bbc610a | 2014-12-05 16:43:58 +0100 | [diff] [blame] | 325 | scm_request_set(scmrq, NULL); |
| 326 | if (scmrq->aob->request.msb_count) |
| 327 | goto out; |
| 328 | |
Sebastian Ott | 0d804b2 | 2012-08-28 16:51:19 +0200 | [diff] [blame] | 329 | scm_request_done(scmrq); |
| 330 | return; |
| 331 | } |
Sebastian Ott | bbc610a | 2014-12-05 16:43:58 +0100 | [diff] [blame] | 332 | |
Sebastian Ott | 0d804b2 | 2012-08-28 16:51:19 +0200 | [diff] [blame] | 333 | if (scm_need_cluster_request(scmrq)) { |
Sebastian Ott | bbc610a | 2014-12-05 16:43:58 +0100 | [diff] [blame] | 334 | if (scmrq->aob->request.msb_count) { |
| 335 | /* Start cluster requests separately. */ |
| 336 | scm_request_set(scmrq, NULL); |
| 337 | if (scm_request_start(scmrq)) |
| 338 | return; |
| 339 | } else { |
| 340 | atomic_inc(&bdev->queued_reqs); |
| 341 | blk_start_request(req); |
| 342 | scm_initiate_cluster_request(scmrq); |
| 343 | } |
| 344 | scmrq = NULL; |
| 345 | continue; |
Sebastian Ott | 0d804b2 | 2012-08-28 16:51:19 +0200 | [diff] [blame] | 346 | } |
Sebastian Ott | 9d4df77 | 2014-12-05 16:32:13 +0100 | [diff] [blame] | 347 | |
| 348 | if (scm_request_prepare(scmrq)) { |
Sebastian Ott | bbc610a | 2014-12-05 16:43:58 +0100 | [diff] [blame] | 349 | SCM_LOG(5, "aidaw alloc failed"); |
| 350 | scm_request_set(scmrq, NULL); |
| 351 | goto out; |
Sebastian Ott | 9d4df77 | 2014-12-05 16:32:13 +0100 | [diff] [blame] | 352 | } |
Sebastian Ott | f30664e | 2012-08-28 16:50:38 +0200 | [diff] [blame] | 353 | blk_start_request(req); |
| 354 | |
Sebastian Ott | 8622384 | 2014-12-05 16:47:17 +0100 | [diff] [blame] | 355 | if (scmrq->aob->request.msb_count < nr_requests_per_io) |
Sebastian Ott | bbc610a | 2014-12-05 16:43:58 +0100 | [diff] [blame] | 356 | continue; |
| 357 | |
| 358 | if (scm_request_start(scmrq)) |
Sebastian Ott | f30664e | 2012-08-28 16:50:38 +0200 | [diff] [blame] | 359 | return; |
Sebastian Ott | bbc610a | 2014-12-05 16:43:58 +0100 | [diff] [blame] | 360 | |
| 361 | scmrq = NULL; |
Sebastian Ott | f30664e | 2012-08-28 16:50:38 +0200 | [diff] [blame] | 362 | } |
Sebastian Ott | bbc610a | 2014-12-05 16:43:58 +0100 | [diff] [blame] | 363 | out: |
| 364 | if (scmrq) |
| 365 | scm_request_start(scmrq); |
| 366 | else |
| 367 | scm_ensure_queue_restart(bdev); |
Sebastian Ott | f30664e | 2012-08-28 16:50:38 +0200 | [diff] [blame] | 368 | } |
| 369 | |
| 370 | static void __scmrq_log_error(struct scm_request *scmrq) |
| 371 | { |
| 372 | struct aob *aob = scmrq->aob; |
| 373 | |
| 374 | if (scmrq->error == -ETIMEDOUT) |
| 375 | SCM_LOG(1, "Request timeout"); |
| 376 | else { |
| 377 | SCM_LOG(1, "Request error"); |
| 378 | SCM_LOG_HEX(1, &aob->response, sizeof(aob->response)); |
| 379 | } |
| 380 | if (scmrq->retries) |
| 381 | SCM_LOG(1, "Retry request"); |
| 382 | else |
| 383 | pr_err("An I/O operation to SCM failed with rc=%d\n", |
| 384 | scmrq->error); |
| 385 | } |
| 386 | |
| 387 | void scm_blk_irq(struct scm_device *scmdev, void *data, int error) |
| 388 | { |
| 389 | struct scm_request *scmrq = data; |
| 390 | struct scm_blk_dev *bdev = scmrq->bdev; |
| 391 | |
| 392 | scmrq->error = error; |
| 393 | if (error) |
| 394 | __scmrq_log_error(scmrq); |
| 395 | |
| 396 | spin_lock(&bdev->lock); |
| 397 | list_add_tail(&scmrq->list, &bdev->finished_requests); |
| 398 | spin_unlock(&bdev->lock); |
| 399 | tasklet_hi_schedule(&bdev->tasklet); |
| 400 | } |
| 401 | |
Sebastian Ott | 4fa3c01 | 2013-02-28 12:07:48 +0100 | [diff] [blame] | 402 | static void scm_blk_handle_error(struct scm_request *scmrq) |
| 403 | { |
| 404 | struct scm_blk_dev *bdev = scmrq->bdev; |
| 405 | unsigned long flags; |
| 406 | |
| 407 | if (scmrq->error != -EIO) |
| 408 | goto restart; |
| 409 | |
| 410 | /* For -EIO the response block is valid. */ |
| 411 | switch (scmrq->aob->response.eqc) { |
| 412 | case EQC_WR_PROHIBIT: |
| 413 | spin_lock_irqsave(&bdev->lock, flags); |
| 414 | if (bdev->state != SCM_WR_PROHIBIT) |
Sebastian Ott | 3bff603 | 2013-03-18 16:01:30 +0100 | [diff] [blame] | 415 | pr_info("%lx: Write access to the SCM increment is suspended\n", |
Sebastian Ott | 4fa3c01 | 2013-02-28 12:07:48 +0100 | [diff] [blame] | 416 | (unsigned long) bdev->scmdev->address); |
| 417 | bdev->state = SCM_WR_PROHIBIT; |
| 418 | spin_unlock_irqrestore(&bdev->lock, flags); |
| 419 | goto requeue; |
| 420 | default: |
| 421 | break; |
| 422 | } |
| 423 | |
| 424 | restart: |
Sebastian Ott | 605c369 | 2013-11-14 10:44:56 +0100 | [diff] [blame] | 425 | if (!eadm_start_aob(scmrq->aob)) |
Sebastian Ott | 4fa3c01 | 2013-02-28 12:07:48 +0100 | [diff] [blame] | 426 | return; |
| 427 | |
| 428 | requeue: |
| 429 | spin_lock_irqsave(&bdev->rq_lock, flags); |
| 430 | scm_request_requeue(scmrq); |
| 431 | spin_unlock_irqrestore(&bdev->rq_lock, flags); |
| 432 | } |
| 433 | |
Sebastian Ott | f30664e | 2012-08-28 16:50:38 +0200 | [diff] [blame] | 434 | static void scm_blk_tasklet(struct scm_blk_dev *bdev) |
| 435 | { |
| 436 | struct scm_request *scmrq; |
| 437 | unsigned long flags; |
| 438 | |
| 439 | spin_lock_irqsave(&bdev->lock, flags); |
| 440 | while (!list_empty(&bdev->finished_requests)) { |
| 441 | scmrq = list_first_entry(&bdev->finished_requests, |
| 442 | struct scm_request, list); |
| 443 | list_del(&scmrq->list); |
| 444 | spin_unlock_irqrestore(&bdev->lock, flags); |
| 445 | |
| 446 | if (scmrq->error && scmrq->retries-- > 0) { |
Sebastian Ott | 4fa3c01 | 2013-02-28 12:07:48 +0100 | [diff] [blame] | 447 | scm_blk_handle_error(scmrq); |
| 448 | |
Sebastian Ott | f30664e | 2012-08-28 16:50:38 +0200 | [diff] [blame] | 449 | /* Request restarted or requeued, handle next. */ |
| 450 | spin_lock_irqsave(&bdev->lock, flags); |
| 451 | continue; |
| 452 | } |
Sebastian Ott | 0d804b2 | 2012-08-28 16:51:19 +0200 | [diff] [blame] | 453 | |
| 454 | if (scm_test_cluster_request(scmrq)) { |
| 455 | scm_cluster_request_irq(scmrq); |
| 456 | spin_lock_irqsave(&bdev->lock, flags); |
| 457 | continue; |
| 458 | } |
| 459 | |
Sebastian Ott | f30664e | 2012-08-28 16:50:38 +0200 | [diff] [blame] | 460 | scm_request_finish(scmrq); |
Sebastian Ott | f30664e | 2012-08-28 16:50:38 +0200 | [diff] [blame] | 461 | spin_lock_irqsave(&bdev->lock, flags); |
| 462 | } |
| 463 | spin_unlock_irqrestore(&bdev->lock, flags); |
| 464 | /* Look out for more requests. */ |
| 465 | blk_run_queue(bdev->rq); |
| 466 | } |
| 467 | |
Sebastian Ott | 605c369 | 2013-11-14 10:44:56 +0100 | [diff] [blame] | 468 | static const struct block_device_operations scm_blk_devops = { |
| 469 | .owner = THIS_MODULE, |
| 470 | }; |
| 471 | |
Sebastian Ott | f30664e | 2012-08-28 16:50:38 +0200 | [diff] [blame] | 472 | int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev) |
| 473 | { |
| 474 | struct request_queue *rq; |
| 475 | int len, ret = -ENOMEM; |
| 476 | unsigned int devindex, nr_max_blk; |
| 477 | |
| 478 | devindex = atomic_inc_return(&nr_devices) - 1; |
| 479 | /* scma..scmz + scmaa..scmzz */ |
| 480 | if (devindex > 701) { |
| 481 | ret = -ENODEV; |
| 482 | goto out; |
| 483 | } |
| 484 | |
| 485 | bdev->scmdev = scmdev; |
Sebastian Ott | 4fa3c01 | 2013-02-28 12:07:48 +0100 | [diff] [blame] | 486 | bdev->state = SCM_OPER; |
Sebastian Ott | f30664e | 2012-08-28 16:50:38 +0200 | [diff] [blame] | 487 | spin_lock_init(&bdev->rq_lock); |
| 488 | spin_lock_init(&bdev->lock); |
| 489 | INIT_LIST_HEAD(&bdev->finished_requests); |
| 490 | atomic_set(&bdev->queued_reqs, 0); |
| 491 | tasklet_init(&bdev->tasklet, |
| 492 | (void (*)(unsigned long)) scm_blk_tasklet, |
| 493 | (unsigned long) bdev); |
| 494 | |
| 495 | rq = blk_init_queue(scm_blk_request, &bdev->rq_lock); |
| 496 | if (!rq) |
| 497 | goto out; |
| 498 | |
| 499 | bdev->rq = rq; |
| 500 | nr_max_blk = min(scmdev->nr_max_block, |
| 501 | (unsigned int) (PAGE_SIZE / sizeof(struct aidaw))); |
| 502 | |
| 503 | blk_queue_logical_block_size(rq, 1 << 12); |
| 504 | blk_queue_max_hw_sectors(rq, nr_max_blk << 3); /* 8 * 512 = blk_size */ |
| 505 | blk_queue_max_segments(rq, nr_max_blk); |
| 506 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, rq); |
Mike Snitzer | b277da0 | 2014-10-04 10:55:32 -0600 | [diff] [blame] | 507 | queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, rq); |
Sebastian Ott | 0d804b2 | 2012-08-28 16:51:19 +0200 | [diff] [blame] | 508 | scm_blk_dev_cluster_setup(bdev); |
Sebastian Ott | f30664e | 2012-08-28 16:50:38 +0200 | [diff] [blame] | 509 | |
| 510 | bdev->gendisk = alloc_disk(SCM_NR_PARTS); |
| 511 | if (!bdev->gendisk) |
| 512 | goto out_queue; |
| 513 | |
| 514 | rq->queuedata = scmdev; |
| 515 | bdev->gendisk->driverfs_dev = &scmdev->dev; |
| 516 | bdev->gendisk->private_data = scmdev; |
| 517 | bdev->gendisk->fops = &scm_blk_devops; |
| 518 | bdev->gendisk->queue = rq; |
| 519 | bdev->gendisk->major = scm_major; |
| 520 | bdev->gendisk->first_minor = devindex * SCM_NR_PARTS; |
| 521 | |
| 522 | len = snprintf(bdev->gendisk->disk_name, DISK_NAME_LEN, "scm"); |
| 523 | if (devindex > 25) { |
| 524 | len += snprintf(bdev->gendisk->disk_name + len, |
| 525 | DISK_NAME_LEN - len, "%c", |
| 526 | 'a' + (devindex / 26) - 1); |
| 527 | devindex = devindex % 26; |
| 528 | } |
| 529 | snprintf(bdev->gendisk->disk_name + len, DISK_NAME_LEN - len, "%c", |
| 530 | 'a' + devindex); |
| 531 | |
| 532 | /* 512 byte sectors */ |
| 533 | set_capacity(bdev->gendisk, scmdev->size >> 9); |
| 534 | add_disk(bdev->gendisk); |
| 535 | return 0; |
| 536 | |
| 537 | out_queue: |
| 538 | blk_cleanup_queue(rq); |
| 539 | out: |
| 540 | atomic_dec(&nr_devices); |
| 541 | return ret; |
| 542 | } |
| 543 | |
| 544 | void scm_blk_dev_cleanup(struct scm_blk_dev *bdev) |
| 545 | { |
| 546 | tasklet_kill(&bdev->tasklet); |
| 547 | del_gendisk(bdev->gendisk); |
| 548 | blk_cleanup_queue(bdev->gendisk->queue); |
| 549 | put_disk(bdev->gendisk); |
| 550 | } |
| 551 | |
Sebastian Ott | 4fa3c01 | 2013-02-28 12:07:48 +0100 | [diff] [blame] | 552 | void scm_blk_set_available(struct scm_blk_dev *bdev) |
| 553 | { |
| 554 | unsigned long flags; |
| 555 | |
| 556 | spin_lock_irqsave(&bdev->lock, flags); |
| 557 | if (bdev->state == SCM_WR_PROHIBIT) |
Sebastian Ott | 3bff603 | 2013-03-18 16:01:30 +0100 | [diff] [blame] | 558 | pr_info("%lx: Write access to the SCM increment is restored\n", |
Sebastian Ott | 4fa3c01 | 2013-02-28 12:07:48 +0100 | [diff] [blame] | 559 | (unsigned long) bdev->scmdev->address); |
| 560 | bdev->state = SCM_OPER; |
| 561 | spin_unlock_irqrestore(&bdev->lock, flags); |
| 562 | } |
| 563 | |
Sebastian Ott | 8622384 | 2014-12-05 16:47:17 +0100 | [diff] [blame] | 564 | static bool __init scm_blk_params_valid(void) |
| 565 | { |
| 566 | if (!nr_requests_per_io || nr_requests_per_io > 64) |
| 567 | return false; |
| 568 | |
| 569 | return scm_cluster_size_valid(); |
| 570 | } |
| 571 | |
Sebastian Ott | f30664e | 2012-08-28 16:50:38 +0200 | [diff] [blame] | 572 | static int __init scm_blk_init(void) |
| 573 | { |
Sebastian Ott | 0d804b2 | 2012-08-28 16:51:19 +0200 | [diff] [blame] | 574 | int ret = -EINVAL; |
| 575 | |
Sebastian Ott | 8622384 | 2014-12-05 16:47:17 +0100 | [diff] [blame] | 576 | if (!scm_blk_params_valid()) |
Sebastian Ott | 0d804b2 | 2012-08-28 16:51:19 +0200 | [diff] [blame] | 577 | goto out; |
Sebastian Ott | f30664e | 2012-08-28 16:50:38 +0200 | [diff] [blame] | 578 | |
| 579 | ret = register_blkdev(0, "scm"); |
| 580 | if (ret < 0) |
| 581 | goto out; |
| 582 | |
| 583 | scm_major = ret; |
Wei Yongjun | 94f9852 | 2013-03-20 13:40:54 +0100 | [diff] [blame] | 584 | ret = scm_alloc_rqs(nr_requests); |
| 585 | if (ret) |
Sebastian Ott | fff60fa | 2013-04-25 13:03:18 +0200 | [diff] [blame] | 586 | goto out_free; |
Sebastian Ott | f30664e | 2012-08-28 16:50:38 +0200 | [diff] [blame] | 587 | |
| 588 | scm_debug = debug_register("scm_log", 16, 1, 16); |
Wei Yongjun | 94f9852 | 2013-03-20 13:40:54 +0100 | [diff] [blame] | 589 | if (!scm_debug) { |
| 590 | ret = -ENOMEM; |
Sebastian Ott | f30664e | 2012-08-28 16:50:38 +0200 | [diff] [blame] | 591 | goto out_free; |
Wei Yongjun | 94f9852 | 2013-03-20 13:40:54 +0100 | [diff] [blame] | 592 | } |
Sebastian Ott | f30664e | 2012-08-28 16:50:38 +0200 | [diff] [blame] | 593 | |
| 594 | debug_register_view(scm_debug, &debug_hex_ascii_view); |
| 595 | debug_set_level(scm_debug, 2); |
| 596 | |
| 597 | ret = scm_drv_init(); |
| 598 | if (ret) |
| 599 | goto out_dbf; |
| 600 | |
| 601 | return ret; |
| 602 | |
| 603 | out_dbf: |
| 604 | debug_unregister(scm_debug); |
| 605 | out_free: |
| 606 | scm_free_rqs(); |
Sebastian Ott | f30664e | 2012-08-28 16:50:38 +0200 | [diff] [blame] | 607 | unregister_blkdev(scm_major, "scm"); |
| 608 | out: |
| 609 | return ret; |
| 610 | } |
| 611 | module_init(scm_blk_init); |
| 612 | |
| 613 | static void __exit scm_blk_cleanup(void) |
| 614 | { |
| 615 | scm_drv_cleanup(); |
| 616 | debug_unregister(scm_debug); |
| 617 | scm_free_rqs(); |
| 618 | unregister_blkdev(scm_major, "scm"); |
| 619 | } |
| 620 | module_exit(scm_blk_cleanup); |