blob: 548209a9c43c58ae687809d5da605f7b8e97184b [file] [log] [blame]
Sebastian Ottf30664e2012-08-28 16:50:38 +02001/*
2 * Block driver for s390 storage class memory.
3 *
4 * Copyright IBM Corp. 2012
5 * Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com>
6 */
7
8#define KMSG_COMPONENT "scm_block"
9#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
10
11#include <linux/interrupt.h>
12#include <linux/spinlock.h>
13#include <linux/module.h>
14#include <linux/blkdev.h>
15#include <linux/genhd.h>
16#include <linux/slab.h>
17#include <linux/list.h>
18#include <asm/eadm.h>
19#include "scm_blk.h"
20
21debug_info_t *scm_debug;
22static int scm_major;
23static DEFINE_SPINLOCK(list_lock);
24static LIST_HEAD(inactive_requests);
25static unsigned int nr_requests = 64;
26static atomic_t nr_devices = ATOMIC_INIT(0);
27module_param(nr_requests, uint, S_IRUGO);
28MODULE_PARM_DESC(nr_requests, "Number of parallel requests.");
29
30MODULE_DESCRIPTION("Block driver for s390 storage class memory.");
31MODULE_LICENSE("GPL");
32MODULE_ALIAS("scm:scmdev*");
33
34static void __scm_free_rq(struct scm_request *scmrq)
35{
36 struct aob_rq_header *aobrq = to_aobrq(scmrq);
37
38 free_page((unsigned long) scmrq->aob);
39 free_page((unsigned long) scmrq->aidaw);
Sebastian Ott0d804b22012-08-28 16:51:19 +020040 __scm_free_rq_cluster(scmrq);
Sebastian Ottf30664e2012-08-28 16:50:38 +020041 kfree(aobrq);
42}
43
44static void scm_free_rqs(void)
45{
46 struct list_head *iter, *safe;
47 struct scm_request *scmrq;
48
49 spin_lock_irq(&list_lock);
50 list_for_each_safe(iter, safe, &inactive_requests) {
51 scmrq = list_entry(iter, struct scm_request, list);
52 list_del(&scmrq->list);
53 __scm_free_rq(scmrq);
54 }
55 spin_unlock_irq(&list_lock);
56}
57
58static int __scm_alloc_rq(void)
59{
60 struct aob_rq_header *aobrq;
61 struct scm_request *scmrq;
62
63 aobrq = kzalloc(sizeof(*aobrq) + sizeof(*scmrq), GFP_KERNEL);
64 if (!aobrq)
65 return -ENOMEM;
66
67 scmrq = (void *) aobrq->data;
68 scmrq->aidaw = (void *) get_zeroed_page(GFP_DMA);
69 scmrq->aob = (void *) get_zeroed_page(GFP_DMA);
70 if (!scmrq->aob || !scmrq->aidaw) {
71 __scm_free_rq(scmrq);
72 return -ENOMEM;
73 }
Sebastian Ott0d804b22012-08-28 16:51:19 +020074
75 if (__scm_alloc_rq_cluster(scmrq)) {
76 __scm_free_rq(scmrq);
77 return -ENOMEM;
78 }
79
Sebastian Ottf30664e2012-08-28 16:50:38 +020080 INIT_LIST_HEAD(&scmrq->list);
81 spin_lock_irq(&list_lock);
82 list_add(&scmrq->list, &inactive_requests);
83 spin_unlock_irq(&list_lock);
84
85 return 0;
86}
87
88static int scm_alloc_rqs(unsigned int nrqs)
89{
90 int ret = 0;
91
92 while (nrqs-- && !ret)
93 ret = __scm_alloc_rq();
94
95 return ret;
96}
97
98static struct scm_request *scm_request_fetch(void)
99{
100 struct scm_request *scmrq = NULL;
101
102 spin_lock(&list_lock);
103 if (list_empty(&inactive_requests))
104 goto out;
105 scmrq = list_first_entry(&inactive_requests, struct scm_request, list);
106 list_del(&scmrq->list);
107out:
108 spin_unlock(&list_lock);
109 return scmrq;
110}
111
112static void scm_request_done(struct scm_request *scmrq)
113{
114 unsigned long flags;
115
116 spin_lock_irqsave(&list_lock, flags);
117 list_add(&scmrq->list, &inactive_requests);
118 spin_unlock_irqrestore(&list_lock, flags);
119}
120
121static int scm_open(struct block_device *blkdev, fmode_t mode)
122{
123 return scm_get_ref();
124}
125
Al Virodb2a1442013-05-05 21:52:57 -0400126static void scm_release(struct gendisk *gendisk, fmode_t mode)
Sebastian Ottf30664e2012-08-28 16:50:38 +0200127{
128 scm_put_ref();
Sebastian Ottf30664e2012-08-28 16:50:38 +0200129}
130
131static const struct block_device_operations scm_blk_devops = {
132 .owner = THIS_MODULE,
133 .open = scm_open,
134 .release = scm_release,
135};
136
Sebastian Ott4fa3c012013-02-28 12:07:48 +0100137static bool scm_permit_request(struct scm_blk_dev *bdev, struct request *req)
138{
139 return rq_data_dir(req) != WRITE || bdev->state != SCM_WR_PROHIBIT;
140}
141
Sebastian Ottf30664e2012-08-28 16:50:38 +0200142static void scm_request_prepare(struct scm_request *scmrq)
143{
144 struct scm_blk_dev *bdev = scmrq->bdev;
145 struct scm_device *scmdev = bdev->gendisk->private_data;
146 struct aidaw *aidaw = scmrq->aidaw;
147 struct msb *msb = &scmrq->aob->msb[0];
148 struct req_iterator iter;
149 struct bio_vec *bv;
150
151 msb->bs = MSB_BS_4K;
152 scmrq->aob->request.msb_count = 1;
153 msb->scm_addr = scmdev->address +
154 ((u64) blk_rq_pos(scmrq->request) << 9);
155 msb->oc = (rq_data_dir(scmrq->request) == READ) ?
156 MSB_OC_READ : MSB_OC_WRITE;
157 msb->flags |= MSB_FLAG_IDA;
158 msb->data_addr = (u64) aidaw;
159
160 rq_for_each_segment(bv, scmrq->request, iter) {
161 WARN_ON(bv->bv_offset);
162 msb->blk_count += bv->bv_len >> 12;
163 aidaw->data_addr = (u64) page_address(bv->bv_page);
164 aidaw++;
165 }
166}
167
168static inline void scm_request_init(struct scm_blk_dev *bdev,
169 struct scm_request *scmrq,
170 struct request *req)
171{
172 struct aob_rq_header *aobrq = to_aobrq(scmrq);
173 struct aob *aob = scmrq->aob;
174
175 memset(aob, 0, sizeof(*aob));
176 memset(scmrq->aidaw, 0, PAGE_SIZE);
177 aobrq->scmdev = bdev->scmdev;
178 aob->request.cmd_code = ARQB_CMD_MOVE;
179 aob->request.data = (u64) aobrq;
180 scmrq->request = req;
181 scmrq->bdev = bdev;
182 scmrq->retries = 4;
183 scmrq->error = 0;
Sebastian Ott0d804b22012-08-28 16:51:19 +0200184 scm_request_cluster_init(scmrq);
Sebastian Ottf30664e2012-08-28 16:50:38 +0200185}
186
187static void scm_ensure_queue_restart(struct scm_blk_dev *bdev)
188{
189 if (atomic_read(&bdev->queued_reqs)) {
190 /* Queue restart is triggered by the next interrupt. */
191 return;
192 }
193 blk_delay_queue(bdev->rq, SCM_QUEUE_DELAY);
194}
195
Sebastian Ott0d804b22012-08-28 16:51:19 +0200196void scm_request_requeue(struct scm_request *scmrq)
Sebastian Ottf30664e2012-08-28 16:50:38 +0200197{
198 struct scm_blk_dev *bdev = scmrq->bdev;
199
Sebastian Ott0d804b22012-08-28 16:51:19 +0200200 scm_release_cluster(scmrq);
Sebastian Ottf30664e2012-08-28 16:50:38 +0200201 blk_requeue_request(bdev->rq, scmrq->request);
Sebastian Ott8360cb52013-02-28 12:07:27 +0100202 atomic_dec(&bdev->queued_reqs);
Sebastian Ottf30664e2012-08-28 16:50:38 +0200203 scm_request_done(scmrq);
204 scm_ensure_queue_restart(bdev);
205}
206
Sebastian Ott0d804b22012-08-28 16:51:19 +0200207void scm_request_finish(struct scm_request *scmrq)
Sebastian Ottf30664e2012-08-28 16:50:38 +0200208{
Sebastian Ott8360cb52013-02-28 12:07:27 +0100209 struct scm_blk_dev *bdev = scmrq->bdev;
210
Sebastian Ott0d804b22012-08-28 16:51:19 +0200211 scm_release_cluster(scmrq);
Sebastian Ottf30664e2012-08-28 16:50:38 +0200212 blk_end_request_all(scmrq->request, scmrq->error);
Sebastian Ott8360cb52013-02-28 12:07:27 +0100213 atomic_dec(&bdev->queued_reqs);
Sebastian Ottf30664e2012-08-28 16:50:38 +0200214 scm_request_done(scmrq);
215}
216
217static void scm_blk_request(struct request_queue *rq)
218{
219 struct scm_device *scmdev = rq->queuedata;
220 struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev);
221 struct scm_request *scmrq;
222 struct request *req;
223 int ret;
224
225 while ((req = blk_peek_request(rq))) {
Steffen Maierde9587a2013-11-05 12:59:46 +0100226 if (req->cmd_type != REQ_TYPE_FS) {
227 blk_start_request(req);
228 blk_dump_rq_flags(req, KMSG_COMPONENT " bad request");
229 blk_end_request_all(req, -EIO);
Sebastian Ottf30664e2012-08-28 16:50:38 +0200230 continue;
Steffen Maierde9587a2013-11-05 12:59:46 +0100231 }
Sebastian Ottf30664e2012-08-28 16:50:38 +0200232
Sebastian Ott4fa3c012013-02-28 12:07:48 +0100233 if (!scm_permit_request(bdev, req)) {
234 scm_ensure_queue_restart(bdev);
235 return;
236 }
Sebastian Ottf30664e2012-08-28 16:50:38 +0200237 scmrq = scm_request_fetch();
238 if (!scmrq) {
239 SCM_LOG(5, "no request");
240 scm_ensure_queue_restart(bdev);
241 return;
242 }
243 scm_request_init(bdev, scmrq, req);
Sebastian Ott0d804b22012-08-28 16:51:19 +0200244 if (!scm_reserve_cluster(scmrq)) {
245 SCM_LOG(5, "cluster busy");
246 scm_request_done(scmrq);
247 return;
248 }
249 if (scm_need_cluster_request(scmrq)) {
Sebastian Ott8360cb52013-02-28 12:07:27 +0100250 atomic_inc(&bdev->queued_reqs);
Sebastian Ott0d804b22012-08-28 16:51:19 +0200251 blk_start_request(req);
252 scm_initiate_cluster_request(scmrq);
253 return;
254 }
Sebastian Ottf30664e2012-08-28 16:50:38 +0200255 scm_request_prepare(scmrq);
Sebastian Ott8360cb52013-02-28 12:07:27 +0100256 atomic_inc(&bdev->queued_reqs);
Sebastian Ottf30664e2012-08-28 16:50:38 +0200257 blk_start_request(req);
258
259 ret = scm_start_aob(scmrq->aob);
260 if (ret) {
261 SCM_LOG(5, "no subchannel");
262 scm_request_requeue(scmrq);
263 return;
264 }
Sebastian Ottf30664e2012-08-28 16:50:38 +0200265 }
266}
267
268static void __scmrq_log_error(struct scm_request *scmrq)
269{
270 struct aob *aob = scmrq->aob;
271
272 if (scmrq->error == -ETIMEDOUT)
273 SCM_LOG(1, "Request timeout");
274 else {
275 SCM_LOG(1, "Request error");
276 SCM_LOG_HEX(1, &aob->response, sizeof(aob->response));
277 }
278 if (scmrq->retries)
279 SCM_LOG(1, "Retry request");
280 else
281 pr_err("An I/O operation to SCM failed with rc=%d\n",
282 scmrq->error);
283}
284
285void scm_blk_irq(struct scm_device *scmdev, void *data, int error)
286{
287 struct scm_request *scmrq = data;
288 struct scm_blk_dev *bdev = scmrq->bdev;
289
290 scmrq->error = error;
291 if (error)
292 __scmrq_log_error(scmrq);
293
294 spin_lock(&bdev->lock);
295 list_add_tail(&scmrq->list, &bdev->finished_requests);
296 spin_unlock(&bdev->lock);
297 tasklet_hi_schedule(&bdev->tasklet);
298}
299
Sebastian Ott4fa3c012013-02-28 12:07:48 +0100300static void scm_blk_handle_error(struct scm_request *scmrq)
301{
302 struct scm_blk_dev *bdev = scmrq->bdev;
303 unsigned long flags;
304
305 if (scmrq->error != -EIO)
306 goto restart;
307
308 /* For -EIO the response block is valid. */
309 switch (scmrq->aob->response.eqc) {
310 case EQC_WR_PROHIBIT:
311 spin_lock_irqsave(&bdev->lock, flags);
312 if (bdev->state != SCM_WR_PROHIBIT)
Sebastian Ott3bff6032013-03-18 16:01:30 +0100313 pr_info("%lx: Write access to the SCM increment is suspended\n",
Sebastian Ott4fa3c012013-02-28 12:07:48 +0100314 (unsigned long) bdev->scmdev->address);
315 bdev->state = SCM_WR_PROHIBIT;
316 spin_unlock_irqrestore(&bdev->lock, flags);
317 goto requeue;
318 default:
319 break;
320 }
321
322restart:
323 if (!scm_start_aob(scmrq->aob))
324 return;
325
326requeue:
327 spin_lock_irqsave(&bdev->rq_lock, flags);
328 scm_request_requeue(scmrq);
329 spin_unlock_irqrestore(&bdev->rq_lock, flags);
330}
331
Sebastian Ottf30664e2012-08-28 16:50:38 +0200332static void scm_blk_tasklet(struct scm_blk_dev *bdev)
333{
334 struct scm_request *scmrq;
335 unsigned long flags;
336
337 spin_lock_irqsave(&bdev->lock, flags);
338 while (!list_empty(&bdev->finished_requests)) {
339 scmrq = list_first_entry(&bdev->finished_requests,
340 struct scm_request, list);
341 list_del(&scmrq->list);
342 spin_unlock_irqrestore(&bdev->lock, flags);
343
344 if (scmrq->error && scmrq->retries-- > 0) {
Sebastian Ott4fa3c012013-02-28 12:07:48 +0100345 scm_blk_handle_error(scmrq);
346
Sebastian Ottf30664e2012-08-28 16:50:38 +0200347 /* Request restarted or requeued, handle next. */
348 spin_lock_irqsave(&bdev->lock, flags);
349 continue;
350 }
Sebastian Ott0d804b22012-08-28 16:51:19 +0200351
352 if (scm_test_cluster_request(scmrq)) {
353 scm_cluster_request_irq(scmrq);
354 spin_lock_irqsave(&bdev->lock, flags);
355 continue;
356 }
357
Sebastian Ottf30664e2012-08-28 16:50:38 +0200358 scm_request_finish(scmrq);
Sebastian Ottf30664e2012-08-28 16:50:38 +0200359 spin_lock_irqsave(&bdev->lock, flags);
360 }
361 spin_unlock_irqrestore(&bdev->lock, flags);
362 /* Look out for more requests. */
363 blk_run_queue(bdev->rq);
364}
365
366int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
367{
368 struct request_queue *rq;
369 int len, ret = -ENOMEM;
370 unsigned int devindex, nr_max_blk;
371
372 devindex = atomic_inc_return(&nr_devices) - 1;
373 /* scma..scmz + scmaa..scmzz */
374 if (devindex > 701) {
375 ret = -ENODEV;
376 goto out;
377 }
378
379 bdev->scmdev = scmdev;
Sebastian Ott4fa3c012013-02-28 12:07:48 +0100380 bdev->state = SCM_OPER;
Sebastian Ottf30664e2012-08-28 16:50:38 +0200381 spin_lock_init(&bdev->rq_lock);
382 spin_lock_init(&bdev->lock);
383 INIT_LIST_HEAD(&bdev->finished_requests);
384 atomic_set(&bdev->queued_reqs, 0);
385 tasklet_init(&bdev->tasklet,
386 (void (*)(unsigned long)) scm_blk_tasklet,
387 (unsigned long) bdev);
388
389 rq = blk_init_queue(scm_blk_request, &bdev->rq_lock);
390 if (!rq)
391 goto out;
392
393 bdev->rq = rq;
394 nr_max_blk = min(scmdev->nr_max_block,
395 (unsigned int) (PAGE_SIZE / sizeof(struct aidaw)));
396
397 blk_queue_logical_block_size(rq, 1 << 12);
398 blk_queue_max_hw_sectors(rq, nr_max_blk << 3); /* 8 * 512 = blk_size */
399 blk_queue_max_segments(rq, nr_max_blk);
400 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, rq);
Sebastian Ott0d804b22012-08-28 16:51:19 +0200401 scm_blk_dev_cluster_setup(bdev);
Sebastian Ottf30664e2012-08-28 16:50:38 +0200402
403 bdev->gendisk = alloc_disk(SCM_NR_PARTS);
404 if (!bdev->gendisk)
405 goto out_queue;
406
407 rq->queuedata = scmdev;
408 bdev->gendisk->driverfs_dev = &scmdev->dev;
409 bdev->gendisk->private_data = scmdev;
410 bdev->gendisk->fops = &scm_blk_devops;
411 bdev->gendisk->queue = rq;
412 bdev->gendisk->major = scm_major;
413 bdev->gendisk->first_minor = devindex * SCM_NR_PARTS;
414
415 len = snprintf(bdev->gendisk->disk_name, DISK_NAME_LEN, "scm");
416 if (devindex > 25) {
417 len += snprintf(bdev->gendisk->disk_name + len,
418 DISK_NAME_LEN - len, "%c",
419 'a' + (devindex / 26) - 1);
420 devindex = devindex % 26;
421 }
422 snprintf(bdev->gendisk->disk_name + len, DISK_NAME_LEN - len, "%c",
423 'a' + devindex);
424
425 /* 512 byte sectors */
426 set_capacity(bdev->gendisk, scmdev->size >> 9);
427 add_disk(bdev->gendisk);
428 return 0;
429
430out_queue:
431 blk_cleanup_queue(rq);
432out:
433 atomic_dec(&nr_devices);
434 return ret;
435}
436
437void scm_blk_dev_cleanup(struct scm_blk_dev *bdev)
438{
439 tasklet_kill(&bdev->tasklet);
440 del_gendisk(bdev->gendisk);
441 blk_cleanup_queue(bdev->gendisk->queue);
442 put_disk(bdev->gendisk);
443}
444
Sebastian Ott4fa3c012013-02-28 12:07:48 +0100445void scm_blk_set_available(struct scm_blk_dev *bdev)
446{
447 unsigned long flags;
448
449 spin_lock_irqsave(&bdev->lock, flags);
450 if (bdev->state == SCM_WR_PROHIBIT)
Sebastian Ott3bff6032013-03-18 16:01:30 +0100451 pr_info("%lx: Write access to the SCM increment is restored\n",
Sebastian Ott4fa3c012013-02-28 12:07:48 +0100452 (unsigned long) bdev->scmdev->address);
453 bdev->state = SCM_OPER;
454 spin_unlock_irqrestore(&bdev->lock, flags);
455}
456
Sebastian Ottf30664e2012-08-28 16:50:38 +0200457static int __init scm_blk_init(void)
458{
Sebastian Ott0d804b22012-08-28 16:51:19 +0200459 int ret = -EINVAL;
460
461 if (!scm_cluster_size_valid())
462 goto out;
Sebastian Ottf30664e2012-08-28 16:50:38 +0200463
464 ret = register_blkdev(0, "scm");
465 if (ret < 0)
466 goto out;
467
468 scm_major = ret;
Wei Yongjun94f98522013-03-20 13:40:54 +0100469 ret = scm_alloc_rqs(nr_requests);
470 if (ret)
Sebastian Ottfff60fa2013-04-25 13:03:18 +0200471 goto out_free;
Sebastian Ottf30664e2012-08-28 16:50:38 +0200472
473 scm_debug = debug_register("scm_log", 16, 1, 16);
Wei Yongjun94f98522013-03-20 13:40:54 +0100474 if (!scm_debug) {
475 ret = -ENOMEM;
Sebastian Ottf30664e2012-08-28 16:50:38 +0200476 goto out_free;
Wei Yongjun94f98522013-03-20 13:40:54 +0100477 }
Sebastian Ottf30664e2012-08-28 16:50:38 +0200478
479 debug_register_view(scm_debug, &debug_hex_ascii_view);
480 debug_set_level(scm_debug, 2);
481
482 ret = scm_drv_init();
483 if (ret)
484 goto out_dbf;
485
486 return ret;
487
488out_dbf:
489 debug_unregister(scm_debug);
490out_free:
491 scm_free_rqs();
Sebastian Ottf30664e2012-08-28 16:50:38 +0200492 unregister_blkdev(scm_major, "scm");
493out:
494 return ret;
495}
496module_init(scm_blk_init);
497
498static void __exit scm_blk_cleanup(void)
499{
500 scm_drv_cleanup();
501 debug_unregister(scm_debug);
502 scm_free_rqs();
503 unregister_blkdev(scm_major, "scm");
504}
505module_exit(scm_blk_cleanup);