blob: 634ad58cbef6417f64ef91418ec6f82e5b03b0db [file] [log] [blame]
Sebastian Ottf30664e2012-08-28 16:50:38 +02001/*
2 * Block driver for s390 storage class memory.
3 *
4 * Copyright IBM Corp. 2012
5 * Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com>
6 */
7
8#define KMSG_COMPONENT "scm_block"
9#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
10
11#include <linux/interrupt.h>
12#include <linux/spinlock.h>
13#include <linux/module.h>
14#include <linux/blkdev.h>
15#include <linux/genhd.h>
16#include <linux/slab.h>
17#include <linux/list.h>
18#include <asm/eadm.h>
19#include "scm_blk.h"
20
21debug_info_t *scm_debug;
22static int scm_major;
23static DEFINE_SPINLOCK(list_lock);
24static LIST_HEAD(inactive_requests);
25static unsigned int nr_requests = 64;
26static atomic_t nr_devices = ATOMIC_INIT(0);
27module_param(nr_requests, uint, S_IRUGO);
28MODULE_PARM_DESC(nr_requests, "Number of parallel requests.");
29
30MODULE_DESCRIPTION("Block driver for s390 storage class memory.");
31MODULE_LICENSE("GPL");
32MODULE_ALIAS("scm:scmdev*");
33
34static void __scm_free_rq(struct scm_request *scmrq)
35{
36 struct aob_rq_header *aobrq = to_aobrq(scmrq);
37
38 free_page((unsigned long) scmrq->aob);
39 free_page((unsigned long) scmrq->aidaw);
40 kfree(aobrq);
41}
42
43static void scm_free_rqs(void)
44{
45 struct list_head *iter, *safe;
46 struct scm_request *scmrq;
47
48 spin_lock_irq(&list_lock);
49 list_for_each_safe(iter, safe, &inactive_requests) {
50 scmrq = list_entry(iter, struct scm_request, list);
51 list_del(&scmrq->list);
52 __scm_free_rq(scmrq);
53 }
54 spin_unlock_irq(&list_lock);
55}
56
57static int __scm_alloc_rq(void)
58{
59 struct aob_rq_header *aobrq;
60 struct scm_request *scmrq;
61
62 aobrq = kzalloc(sizeof(*aobrq) + sizeof(*scmrq), GFP_KERNEL);
63 if (!aobrq)
64 return -ENOMEM;
65
66 scmrq = (void *) aobrq->data;
67 scmrq->aidaw = (void *) get_zeroed_page(GFP_DMA);
68 scmrq->aob = (void *) get_zeroed_page(GFP_DMA);
69 if (!scmrq->aob || !scmrq->aidaw) {
70 __scm_free_rq(scmrq);
71 return -ENOMEM;
72 }
73 INIT_LIST_HEAD(&scmrq->list);
74 spin_lock_irq(&list_lock);
75 list_add(&scmrq->list, &inactive_requests);
76 spin_unlock_irq(&list_lock);
77
78 return 0;
79}
80
81static int scm_alloc_rqs(unsigned int nrqs)
82{
83 int ret = 0;
84
85 while (nrqs-- && !ret)
86 ret = __scm_alloc_rq();
87
88 return ret;
89}
90
91static struct scm_request *scm_request_fetch(void)
92{
93 struct scm_request *scmrq = NULL;
94
95 spin_lock(&list_lock);
96 if (list_empty(&inactive_requests))
97 goto out;
98 scmrq = list_first_entry(&inactive_requests, struct scm_request, list);
99 list_del(&scmrq->list);
100out:
101 spin_unlock(&list_lock);
102 return scmrq;
103}
104
105static void scm_request_done(struct scm_request *scmrq)
106{
107 unsigned long flags;
108
109 spin_lock_irqsave(&list_lock, flags);
110 list_add(&scmrq->list, &inactive_requests);
111 spin_unlock_irqrestore(&list_lock, flags);
112}
113
114static int scm_open(struct block_device *blkdev, fmode_t mode)
115{
116 return scm_get_ref();
117}
118
119static int scm_release(struct gendisk *gendisk, fmode_t mode)
120{
121 scm_put_ref();
122 return 0;
123}
124
125static const struct block_device_operations scm_blk_devops = {
126 .owner = THIS_MODULE,
127 .open = scm_open,
128 .release = scm_release,
129};
130
131static void scm_request_prepare(struct scm_request *scmrq)
132{
133 struct scm_blk_dev *bdev = scmrq->bdev;
134 struct scm_device *scmdev = bdev->gendisk->private_data;
135 struct aidaw *aidaw = scmrq->aidaw;
136 struct msb *msb = &scmrq->aob->msb[0];
137 struct req_iterator iter;
138 struct bio_vec *bv;
139
140 msb->bs = MSB_BS_4K;
141 scmrq->aob->request.msb_count = 1;
142 msb->scm_addr = scmdev->address +
143 ((u64) blk_rq_pos(scmrq->request) << 9);
144 msb->oc = (rq_data_dir(scmrq->request) == READ) ?
145 MSB_OC_READ : MSB_OC_WRITE;
146 msb->flags |= MSB_FLAG_IDA;
147 msb->data_addr = (u64) aidaw;
148
149 rq_for_each_segment(bv, scmrq->request, iter) {
150 WARN_ON(bv->bv_offset);
151 msb->blk_count += bv->bv_len >> 12;
152 aidaw->data_addr = (u64) page_address(bv->bv_page);
153 aidaw++;
154 }
155}
156
157static inline void scm_request_init(struct scm_blk_dev *bdev,
158 struct scm_request *scmrq,
159 struct request *req)
160{
161 struct aob_rq_header *aobrq = to_aobrq(scmrq);
162 struct aob *aob = scmrq->aob;
163
164 memset(aob, 0, sizeof(*aob));
165 memset(scmrq->aidaw, 0, PAGE_SIZE);
166 aobrq->scmdev = bdev->scmdev;
167 aob->request.cmd_code = ARQB_CMD_MOVE;
168 aob->request.data = (u64) aobrq;
169 scmrq->request = req;
170 scmrq->bdev = bdev;
171 scmrq->retries = 4;
172 scmrq->error = 0;
173}
174
175static void scm_ensure_queue_restart(struct scm_blk_dev *bdev)
176{
177 if (atomic_read(&bdev->queued_reqs)) {
178 /* Queue restart is triggered by the next interrupt. */
179 return;
180 }
181 blk_delay_queue(bdev->rq, SCM_QUEUE_DELAY);
182}
183
184static void scm_request_requeue(struct scm_request *scmrq)
185{
186 struct scm_blk_dev *bdev = scmrq->bdev;
187
188 blk_requeue_request(bdev->rq, scmrq->request);
189 scm_request_done(scmrq);
190 scm_ensure_queue_restart(bdev);
191}
192
193static void scm_request_finish(struct scm_request *scmrq)
194{
195 blk_end_request_all(scmrq->request, scmrq->error);
196 scm_request_done(scmrq);
197}
198
199static void scm_blk_request(struct request_queue *rq)
200{
201 struct scm_device *scmdev = rq->queuedata;
202 struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev);
203 struct scm_request *scmrq;
204 struct request *req;
205 int ret;
206
207 while ((req = blk_peek_request(rq))) {
208 if (req->cmd_type != REQ_TYPE_FS)
209 continue;
210
211 scmrq = scm_request_fetch();
212 if (!scmrq) {
213 SCM_LOG(5, "no request");
214 scm_ensure_queue_restart(bdev);
215 return;
216 }
217 scm_request_init(bdev, scmrq, req);
218 scm_request_prepare(scmrq);
219 blk_start_request(req);
220
221 ret = scm_start_aob(scmrq->aob);
222 if (ret) {
223 SCM_LOG(5, "no subchannel");
224 scm_request_requeue(scmrq);
225 return;
226 }
227 atomic_inc(&bdev->queued_reqs);
228 }
229}
230
231static void __scmrq_log_error(struct scm_request *scmrq)
232{
233 struct aob *aob = scmrq->aob;
234
235 if (scmrq->error == -ETIMEDOUT)
236 SCM_LOG(1, "Request timeout");
237 else {
238 SCM_LOG(1, "Request error");
239 SCM_LOG_HEX(1, &aob->response, sizeof(aob->response));
240 }
241 if (scmrq->retries)
242 SCM_LOG(1, "Retry request");
243 else
244 pr_err("An I/O operation to SCM failed with rc=%d\n",
245 scmrq->error);
246}
247
248void scm_blk_irq(struct scm_device *scmdev, void *data, int error)
249{
250 struct scm_request *scmrq = data;
251 struct scm_blk_dev *bdev = scmrq->bdev;
252
253 scmrq->error = error;
254 if (error)
255 __scmrq_log_error(scmrq);
256
257 spin_lock(&bdev->lock);
258 list_add_tail(&scmrq->list, &bdev->finished_requests);
259 spin_unlock(&bdev->lock);
260 tasklet_hi_schedule(&bdev->tasklet);
261}
262
263static void scm_blk_tasklet(struct scm_blk_dev *bdev)
264{
265 struct scm_request *scmrq;
266 unsigned long flags;
267
268 spin_lock_irqsave(&bdev->lock, flags);
269 while (!list_empty(&bdev->finished_requests)) {
270 scmrq = list_first_entry(&bdev->finished_requests,
271 struct scm_request, list);
272 list_del(&scmrq->list);
273 spin_unlock_irqrestore(&bdev->lock, flags);
274
275 if (scmrq->error && scmrq->retries-- > 0) {
276 if (scm_start_aob(scmrq->aob)) {
277 spin_lock_irqsave(&bdev->rq_lock, flags);
278 scm_request_requeue(scmrq);
279 spin_unlock_irqrestore(&bdev->rq_lock, flags);
280 }
281 /* Request restarted or requeued, handle next. */
282 spin_lock_irqsave(&bdev->lock, flags);
283 continue;
284 }
285 scm_request_finish(scmrq);
286 atomic_dec(&bdev->queued_reqs);
287 spin_lock_irqsave(&bdev->lock, flags);
288 }
289 spin_unlock_irqrestore(&bdev->lock, flags);
290 /* Look out for more requests. */
291 blk_run_queue(bdev->rq);
292}
293
294int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
295{
296 struct request_queue *rq;
297 int len, ret = -ENOMEM;
298 unsigned int devindex, nr_max_blk;
299
300 devindex = atomic_inc_return(&nr_devices) - 1;
301 /* scma..scmz + scmaa..scmzz */
302 if (devindex > 701) {
303 ret = -ENODEV;
304 goto out;
305 }
306
307 bdev->scmdev = scmdev;
308 spin_lock_init(&bdev->rq_lock);
309 spin_lock_init(&bdev->lock);
310 INIT_LIST_HEAD(&bdev->finished_requests);
311 atomic_set(&bdev->queued_reqs, 0);
312 tasklet_init(&bdev->tasklet,
313 (void (*)(unsigned long)) scm_blk_tasklet,
314 (unsigned long) bdev);
315
316 rq = blk_init_queue(scm_blk_request, &bdev->rq_lock);
317 if (!rq)
318 goto out;
319
320 bdev->rq = rq;
321 nr_max_blk = min(scmdev->nr_max_block,
322 (unsigned int) (PAGE_SIZE / sizeof(struct aidaw)));
323
324 blk_queue_logical_block_size(rq, 1 << 12);
325 blk_queue_max_hw_sectors(rq, nr_max_blk << 3); /* 8 * 512 = blk_size */
326 blk_queue_max_segments(rq, nr_max_blk);
327 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, rq);
328
329 bdev->gendisk = alloc_disk(SCM_NR_PARTS);
330 if (!bdev->gendisk)
331 goto out_queue;
332
333 rq->queuedata = scmdev;
334 bdev->gendisk->driverfs_dev = &scmdev->dev;
335 bdev->gendisk->private_data = scmdev;
336 bdev->gendisk->fops = &scm_blk_devops;
337 bdev->gendisk->queue = rq;
338 bdev->gendisk->major = scm_major;
339 bdev->gendisk->first_minor = devindex * SCM_NR_PARTS;
340
341 len = snprintf(bdev->gendisk->disk_name, DISK_NAME_LEN, "scm");
342 if (devindex > 25) {
343 len += snprintf(bdev->gendisk->disk_name + len,
344 DISK_NAME_LEN - len, "%c",
345 'a' + (devindex / 26) - 1);
346 devindex = devindex % 26;
347 }
348 snprintf(bdev->gendisk->disk_name + len, DISK_NAME_LEN - len, "%c",
349 'a' + devindex);
350
351 /* 512 byte sectors */
352 set_capacity(bdev->gendisk, scmdev->size >> 9);
353 add_disk(bdev->gendisk);
354 return 0;
355
356out_queue:
357 blk_cleanup_queue(rq);
358out:
359 atomic_dec(&nr_devices);
360 return ret;
361}
362
363void scm_blk_dev_cleanup(struct scm_blk_dev *bdev)
364{
365 tasklet_kill(&bdev->tasklet);
366 del_gendisk(bdev->gendisk);
367 blk_cleanup_queue(bdev->gendisk->queue);
368 put_disk(bdev->gendisk);
369}
370
371static int __init scm_blk_init(void)
372{
373 int ret;
374
375 ret = register_blkdev(0, "scm");
376 if (ret < 0)
377 goto out;
378
379 scm_major = ret;
380 if (scm_alloc_rqs(nr_requests))
381 goto out_unreg;
382
383 scm_debug = debug_register("scm_log", 16, 1, 16);
384 if (!scm_debug)
385 goto out_free;
386
387 debug_register_view(scm_debug, &debug_hex_ascii_view);
388 debug_set_level(scm_debug, 2);
389
390 ret = scm_drv_init();
391 if (ret)
392 goto out_dbf;
393
394 return ret;
395
396out_dbf:
397 debug_unregister(scm_debug);
398out_free:
399 scm_free_rqs();
400out_unreg:
401 unregister_blkdev(scm_major, "scm");
402out:
403 return ret;
404}
405module_init(scm_blk_init);
406
407static void __exit scm_blk_cleanup(void)
408{
409 scm_drv_cleanup();
410 debug_unregister(scm_debug);
411 scm_free_rqs();
412 unregister_blkdev(scm_major, "scm");
413}
414module_exit(scm_blk_cleanup);