blob: f5c369ce7e73bf9fc66d8e0adac0ec262d516e68 [file] [log] [blame]
Sebastian Ottf30664e2012-08-28 16:50:38 +02001/*
2 * Block driver for s390 storage class memory.
3 *
4 * Copyright IBM Corp. 2012
5 * Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com>
6 */
7
8#define KMSG_COMPONENT "scm_block"
9#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
10
11#include <linux/interrupt.h>
12#include <linux/spinlock.h>
Sebastian Ott9d4df772014-12-05 16:32:13 +010013#include <linux/mempool.h>
Sebastian Ottf30664e2012-08-28 16:50:38 +020014#include <linux/module.h>
15#include <linux/blkdev.h>
16#include <linux/genhd.h>
17#include <linux/slab.h>
18#include <linux/list.h>
19#include <asm/eadm.h>
20#include "scm_blk.h"
21
22debug_info_t *scm_debug;
23static int scm_major;
Sebastian Ott9d4df772014-12-05 16:32:13 +010024static mempool_t *aidaw_pool;
Sebastian Ottf30664e2012-08-28 16:50:38 +020025static DEFINE_SPINLOCK(list_lock);
26static LIST_HEAD(inactive_requests);
27static unsigned int nr_requests = 64;
28static atomic_t nr_devices = ATOMIC_INIT(0);
29module_param(nr_requests, uint, S_IRUGO);
30MODULE_PARM_DESC(nr_requests, "Number of parallel requests.");
31
32MODULE_DESCRIPTION("Block driver for s390 storage class memory.");
33MODULE_LICENSE("GPL");
34MODULE_ALIAS("scm:scmdev*");
35
36static void __scm_free_rq(struct scm_request *scmrq)
37{
38 struct aob_rq_header *aobrq = to_aobrq(scmrq);
39
40 free_page((unsigned long) scmrq->aob);
Sebastian Ott0d804b22012-08-28 16:51:19 +020041 __scm_free_rq_cluster(scmrq);
Sebastian Ottf30664e2012-08-28 16:50:38 +020042 kfree(aobrq);
43}
44
45static void scm_free_rqs(void)
46{
47 struct list_head *iter, *safe;
48 struct scm_request *scmrq;
49
50 spin_lock_irq(&list_lock);
51 list_for_each_safe(iter, safe, &inactive_requests) {
52 scmrq = list_entry(iter, struct scm_request, list);
53 list_del(&scmrq->list);
54 __scm_free_rq(scmrq);
55 }
56 spin_unlock_irq(&list_lock);
Sebastian Ott9d4df772014-12-05 16:32:13 +010057
58 mempool_destroy(aidaw_pool);
Sebastian Ottf30664e2012-08-28 16:50:38 +020059}
60
61static int __scm_alloc_rq(void)
62{
63 struct aob_rq_header *aobrq;
64 struct scm_request *scmrq;
65
66 aobrq = kzalloc(sizeof(*aobrq) + sizeof(*scmrq), GFP_KERNEL);
67 if (!aobrq)
68 return -ENOMEM;
69
70 scmrq = (void *) aobrq->data;
Sebastian Ottf30664e2012-08-28 16:50:38 +020071 scmrq->aob = (void *) get_zeroed_page(GFP_DMA);
Sebastian Ott9d4df772014-12-05 16:32:13 +010072 if (!scmrq->aob) {
Sebastian Ottf30664e2012-08-28 16:50:38 +020073 __scm_free_rq(scmrq);
74 return -ENOMEM;
75 }
Sebastian Ott0d804b22012-08-28 16:51:19 +020076
77 if (__scm_alloc_rq_cluster(scmrq)) {
78 __scm_free_rq(scmrq);
79 return -ENOMEM;
80 }
81
Sebastian Ottf30664e2012-08-28 16:50:38 +020082 INIT_LIST_HEAD(&scmrq->list);
83 spin_lock_irq(&list_lock);
84 list_add(&scmrq->list, &inactive_requests);
85 spin_unlock_irq(&list_lock);
86
87 return 0;
88}
89
90static int scm_alloc_rqs(unsigned int nrqs)
91{
92 int ret = 0;
93
Sebastian Ott9d4df772014-12-05 16:32:13 +010094 aidaw_pool = mempool_create_page_pool(max(nrqs/8, 1U), 0);
95 if (!aidaw_pool)
96 return -ENOMEM;
97
Sebastian Ottf30664e2012-08-28 16:50:38 +020098 while (nrqs-- && !ret)
99 ret = __scm_alloc_rq();
100
101 return ret;
102}
103
104static struct scm_request *scm_request_fetch(void)
105{
106 struct scm_request *scmrq = NULL;
107
108 spin_lock(&list_lock);
109 if (list_empty(&inactive_requests))
110 goto out;
111 scmrq = list_first_entry(&inactive_requests, struct scm_request, list);
112 list_del(&scmrq->list);
113out:
114 spin_unlock(&list_lock);
115 return scmrq;
116}
117
118static void scm_request_done(struct scm_request *scmrq)
119{
Sebastian Ott9d4df772014-12-05 16:32:13 +0100120 struct msb *msb = &scmrq->aob->msb[0];
121 u64 aidaw = msb->data_addr;
Sebastian Ottf30664e2012-08-28 16:50:38 +0200122 unsigned long flags;
123
Sebastian Ottde88d0d2014-12-05 16:41:47 +0100124 if ((msb->flags & MSB_FLAG_IDA) && aidaw &&
125 IS_ALIGNED(aidaw, PAGE_SIZE))
Sebastian Ott9d4df772014-12-05 16:32:13 +0100126 mempool_free(virt_to_page(aidaw), aidaw_pool);
127
Sebastian Ottf30664e2012-08-28 16:50:38 +0200128 spin_lock_irqsave(&list_lock, flags);
129 list_add(&scmrq->list, &inactive_requests);
130 spin_unlock_irqrestore(&list_lock, flags);
131}
132
Sebastian Ott4fa3c012013-02-28 12:07:48 +0100133static bool scm_permit_request(struct scm_blk_dev *bdev, struct request *req)
134{
135 return rq_data_dir(req) != WRITE || bdev->state != SCM_WR_PROHIBIT;
136}
137
Sebastian Ottde88d0d2014-12-05 16:41:47 +0100138static inline struct aidaw *scm_aidaw_alloc(void)
Sebastian Ott9d4df772014-12-05 16:32:13 +0100139{
140 struct page *page = mempool_alloc(aidaw_pool, GFP_ATOMIC);
141
142 return page ? page_address(page) : NULL;
143}
144
Sebastian Ottde88d0d2014-12-05 16:41:47 +0100145static inline unsigned long scm_aidaw_bytes(struct aidaw *aidaw)
146{
147 unsigned long _aidaw = (unsigned long) aidaw;
148 unsigned long bytes = ALIGN(_aidaw, PAGE_SIZE) - _aidaw;
149
150 return (bytes / sizeof(*aidaw)) * PAGE_SIZE;
151}
152
153struct aidaw *scm_aidaw_fetch(struct scm_request *scmrq, unsigned int bytes)
154{
155 struct aidaw *aidaw;
156
157 if (scm_aidaw_bytes(scmrq->next_aidaw) >= bytes)
158 return scmrq->next_aidaw;
159
160 aidaw = scm_aidaw_alloc();
161 if (aidaw)
162 memset(aidaw, 0, PAGE_SIZE);
163 return aidaw;
164}
165
Sebastian Ott9d4df772014-12-05 16:32:13 +0100166static int scm_request_prepare(struct scm_request *scmrq)
Sebastian Ottf30664e2012-08-28 16:50:38 +0200167{
168 struct scm_blk_dev *bdev = scmrq->bdev;
169 struct scm_device *scmdev = bdev->gendisk->private_data;
Sebastian Ottf30664e2012-08-28 16:50:38 +0200170 struct msb *msb = &scmrq->aob->msb[0];
171 struct req_iterator iter;
Sebastian Ottde88d0d2014-12-05 16:41:47 +0100172 struct aidaw *aidaw;
Kent Overstreet79886132013-11-23 17:19:00 -0800173 struct bio_vec bv;
Sebastian Ottf30664e2012-08-28 16:50:38 +0200174
Sebastian Ottde88d0d2014-12-05 16:41:47 +0100175 aidaw = scm_aidaw_fetch(scmrq, blk_rq_bytes(scmrq->request));
Sebastian Ott9d4df772014-12-05 16:32:13 +0100176 if (!aidaw)
177 return -ENOMEM;
178
Sebastian Ottf30664e2012-08-28 16:50:38 +0200179 msb->bs = MSB_BS_4K;
180 scmrq->aob->request.msb_count = 1;
181 msb->scm_addr = scmdev->address +
182 ((u64) blk_rq_pos(scmrq->request) << 9);
183 msb->oc = (rq_data_dir(scmrq->request) == READ) ?
184 MSB_OC_READ : MSB_OC_WRITE;
185 msb->flags |= MSB_FLAG_IDA;
186 msb->data_addr = (u64) aidaw;
187
188 rq_for_each_segment(bv, scmrq->request, iter) {
Kent Overstreet79886132013-11-23 17:19:00 -0800189 WARN_ON(bv.bv_offset);
190 msb->blk_count += bv.bv_len >> 12;
191 aidaw->data_addr = (u64) page_address(bv.bv_page);
Sebastian Ottf30664e2012-08-28 16:50:38 +0200192 aidaw++;
193 }
Sebastian Ott9d4df772014-12-05 16:32:13 +0100194
195 return 0;
Sebastian Ottf30664e2012-08-28 16:50:38 +0200196}
197
198static inline void scm_request_init(struct scm_blk_dev *bdev,
199 struct scm_request *scmrq,
200 struct request *req)
201{
202 struct aob_rq_header *aobrq = to_aobrq(scmrq);
203 struct aob *aob = scmrq->aob;
204
205 memset(aob, 0, sizeof(*aob));
Sebastian Ottf30664e2012-08-28 16:50:38 +0200206 aobrq->scmdev = bdev->scmdev;
207 aob->request.cmd_code = ARQB_CMD_MOVE;
208 aob->request.data = (u64) aobrq;
209 scmrq->request = req;
210 scmrq->bdev = bdev;
211 scmrq->retries = 4;
212 scmrq->error = 0;
Sebastian Ottde88d0d2014-12-05 16:41:47 +0100213 /* We don't use all msbs - place aidaws at the end of the aob page. */
214 scmrq->next_aidaw = (void *) &aob->msb[1];
Sebastian Ott0d804b22012-08-28 16:51:19 +0200215 scm_request_cluster_init(scmrq);
Sebastian Ottf30664e2012-08-28 16:50:38 +0200216}
217
218static void scm_ensure_queue_restart(struct scm_blk_dev *bdev)
219{
220 if (atomic_read(&bdev->queued_reqs)) {
221 /* Queue restart is triggered by the next interrupt. */
222 return;
223 }
224 blk_delay_queue(bdev->rq, SCM_QUEUE_DELAY);
225}
226
Sebastian Ott0d804b22012-08-28 16:51:19 +0200227void scm_request_requeue(struct scm_request *scmrq)
Sebastian Ottf30664e2012-08-28 16:50:38 +0200228{
229 struct scm_blk_dev *bdev = scmrq->bdev;
230
Sebastian Ott0d804b22012-08-28 16:51:19 +0200231 scm_release_cluster(scmrq);
Sebastian Ottf30664e2012-08-28 16:50:38 +0200232 blk_requeue_request(bdev->rq, scmrq->request);
Sebastian Ott8360cb52013-02-28 12:07:27 +0100233 atomic_dec(&bdev->queued_reqs);
Sebastian Ottf30664e2012-08-28 16:50:38 +0200234 scm_request_done(scmrq);
235 scm_ensure_queue_restart(bdev);
236}
237
Sebastian Ott0d804b22012-08-28 16:51:19 +0200238void scm_request_finish(struct scm_request *scmrq)
Sebastian Ottf30664e2012-08-28 16:50:38 +0200239{
Sebastian Ott8360cb52013-02-28 12:07:27 +0100240 struct scm_blk_dev *bdev = scmrq->bdev;
241
Sebastian Ott0d804b22012-08-28 16:51:19 +0200242 scm_release_cluster(scmrq);
Sebastian Ottf30664e2012-08-28 16:50:38 +0200243 blk_end_request_all(scmrq->request, scmrq->error);
Sebastian Ott8360cb52013-02-28 12:07:27 +0100244 atomic_dec(&bdev->queued_reqs);
Sebastian Ottf30664e2012-08-28 16:50:38 +0200245 scm_request_done(scmrq);
246}
247
248static void scm_blk_request(struct request_queue *rq)
249{
250 struct scm_device *scmdev = rq->queuedata;
251 struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev);
252 struct scm_request *scmrq;
253 struct request *req;
254 int ret;
255
256 while ((req = blk_peek_request(rq))) {
Steffen Maierde9587a2013-11-05 12:59:46 +0100257 if (req->cmd_type != REQ_TYPE_FS) {
258 blk_start_request(req);
259 blk_dump_rq_flags(req, KMSG_COMPONENT " bad request");
260 blk_end_request_all(req, -EIO);
Sebastian Ottf30664e2012-08-28 16:50:38 +0200261 continue;
Steffen Maierde9587a2013-11-05 12:59:46 +0100262 }
Sebastian Ottf30664e2012-08-28 16:50:38 +0200263
Sebastian Ott4fa3c012013-02-28 12:07:48 +0100264 if (!scm_permit_request(bdev, req)) {
265 scm_ensure_queue_restart(bdev);
266 return;
267 }
Sebastian Ottf30664e2012-08-28 16:50:38 +0200268 scmrq = scm_request_fetch();
269 if (!scmrq) {
270 SCM_LOG(5, "no request");
271 scm_ensure_queue_restart(bdev);
272 return;
273 }
274 scm_request_init(bdev, scmrq, req);
Sebastian Ott0d804b22012-08-28 16:51:19 +0200275 if (!scm_reserve_cluster(scmrq)) {
276 SCM_LOG(5, "cluster busy");
277 scm_request_done(scmrq);
278 return;
279 }
280 if (scm_need_cluster_request(scmrq)) {
Sebastian Ott8360cb52013-02-28 12:07:27 +0100281 atomic_inc(&bdev->queued_reqs);
Sebastian Ott0d804b22012-08-28 16:51:19 +0200282 blk_start_request(req);
283 scm_initiate_cluster_request(scmrq);
284 return;
285 }
Sebastian Ott9d4df772014-12-05 16:32:13 +0100286
287 if (scm_request_prepare(scmrq)) {
288 SCM_LOG(5, "no aidaw");
289 scm_release_cluster(scmrq);
290 scm_request_done(scmrq);
291 scm_ensure_queue_restart(bdev);
292 return;
293 }
294
Sebastian Ott8360cb52013-02-28 12:07:27 +0100295 atomic_inc(&bdev->queued_reqs);
Sebastian Ottf30664e2012-08-28 16:50:38 +0200296 blk_start_request(req);
297
Sebastian Ott605c3692013-11-14 10:44:56 +0100298 ret = eadm_start_aob(scmrq->aob);
Sebastian Ottf30664e2012-08-28 16:50:38 +0200299 if (ret) {
300 SCM_LOG(5, "no subchannel");
301 scm_request_requeue(scmrq);
302 return;
303 }
Sebastian Ottf30664e2012-08-28 16:50:38 +0200304 }
305}
306
307static void __scmrq_log_error(struct scm_request *scmrq)
308{
309 struct aob *aob = scmrq->aob;
310
311 if (scmrq->error == -ETIMEDOUT)
312 SCM_LOG(1, "Request timeout");
313 else {
314 SCM_LOG(1, "Request error");
315 SCM_LOG_HEX(1, &aob->response, sizeof(aob->response));
316 }
317 if (scmrq->retries)
318 SCM_LOG(1, "Retry request");
319 else
320 pr_err("An I/O operation to SCM failed with rc=%d\n",
321 scmrq->error);
322}
323
324void scm_blk_irq(struct scm_device *scmdev, void *data, int error)
325{
326 struct scm_request *scmrq = data;
327 struct scm_blk_dev *bdev = scmrq->bdev;
328
329 scmrq->error = error;
330 if (error)
331 __scmrq_log_error(scmrq);
332
333 spin_lock(&bdev->lock);
334 list_add_tail(&scmrq->list, &bdev->finished_requests);
335 spin_unlock(&bdev->lock);
336 tasklet_hi_schedule(&bdev->tasklet);
337}
338
Sebastian Ott4fa3c012013-02-28 12:07:48 +0100339static void scm_blk_handle_error(struct scm_request *scmrq)
340{
341 struct scm_blk_dev *bdev = scmrq->bdev;
342 unsigned long flags;
343
344 if (scmrq->error != -EIO)
345 goto restart;
346
347 /* For -EIO the response block is valid. */
348 switch (scmrq->aob->response.eqc) {
349 case EQC_WR_PROHIBIT:
350 spin_lock_irqsave(&bdev->lock, flags);
351 if (bdev->state != SCM_WR_PROHIBIT)
Sebastian Ott3bff6032013-03-18 16:01:30 +0100352 pr_info("%lx: Write access to the SCM increment is suspended\n",
Sebastian Ott4fa3c012013-02-28 12:07:48 +0100353 (unsigned long) bdev->scmdev->address);
354 bdev->state = SCM_WR_PROHIBIT;
355 spin_unlock_irqrestore(&bdev->lock, flags);
356 goto requeue;
357 default:
358 break;
359 }
360
361restart:
Sebastian Ott605c3692013-11-14 10:44:56 +0100362 if (!eadm_start_aob(scmrq->aob))
Sebastian Ott4fa3c012013-02-28 12:07:48 +0100363 return;
364
365requeue:
366 spin_lock_irqsave(&bdev->rq_lock, flags);
367 scm_request_requeue(scmrq);
368 spin_unlock_irqrestore(&bdev->rq_lock, flags);
369}
370
Sebastian Ottf30664e2012-08-28 16:50:38 +0200371static void scm_blk_tasklet(struct scm_blk_dev *bdev)
372{
373 struct scm_request *scmrq;
374 unsigned long flags;
375
376 spin_lock_irqsave(&bdev->lock, flags);
377 while (!list_empty(&bdev->finished_requests)) {
378 scmrq = list_first_entry(&bdev->finished_requests,
379 struct scm_request, list);
380 list_del(&scmrq->list);
381 spin_unlock_irqrestore(&bdev->lock, flags);
382
383 if (scmrq->error && scmrq->retries-- > 0) {
Sebastian Ott4fa3c012013-02-28 12:07:48 +0100384 scm_blk_handle_error(scmrq);
385
Sebastian Ottf30664e2012-08-28 16:50:38 +0200386 /* Request restarted or requeued, handle next. */
387 spin_lock_irqsave(&bdev->lock, flags);
388 continue;
389 }
Sebastian Ott0d804b22012-08-28 16:51:19 +0200390
391 if (scm_test_cluster_request(scmrq)) {
392 scm_cluster_request_irq(scmrq);
393 spin_lock_irqsave(&bdev->lock, flags);
394 continue;
395 }
396
Sebastian Ottf30664e2012-08-28 16:50:38 +0200397 scm_request_finish(scmrq);
Sebastian Ottf30664e2012-08-28 16:50:38 +0200398 spin_lock_irqsave(&bdev->lock, flags);
399 }
400 spin_unlock_irqrestore(&bdev->lock, flags);
401 /* Look out for more requests. */
402 blk_run_queue(bdev->rq);
403}
404
Sebastian Ott605c3692013-11-14 10:44:56 +0100405static const struct block_device_operations scm_blk_devops = {
406 .owner = THIS_MODULE,
407};
408
Sebastian Ottf30664e2012-08-28 16:50:38 +0200409int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
410{
411 struct request_queue *rq;
412 int len, ret = -ENOMEM;
413 unsigned int devindex, nr_max_blk;
414
415 devindex = atomic_inc_return(&nr_devices) - 1;
416 /* scma..scmz + scmaa..scmzz */
417 if (devindex > 701) {
418 ret = -ENODEV;
419 goto out;
420 }
421
422 bdev->scmdev = scmdev;
Sebastian Ott4fa3c012013-02-28 12:07:48 +0100423 bdev->state = SCM_OPER;
Sebastian Ottf30664e2012-08-28 16:50:38 +0200424 spin_lock_init(&bdev->rq_lock);
425 spin_lock_init(&bdev->lock);
426 INIT_LIST_HEAD(&bdev->finished_requests);
427 atomic_set(&bdev->queued_reqs, 0);
428 tasklet_init(&bdev->tasklet,
429 (void (*)(unsigned long)) scm_blk_tasklet,
430 (unsigned long) bdev);
431
432 rq = blk_init_queue(scm_blk_request, &bdev->rq_lock);
433 if (!rq)
434 goto out;
435
436 bdev->rq = rq;
437 nr_max_blk = min(scmdev->nr_max_block,
438 (unsigned int) (PAGE_SIZE / sizeof(struct aidaw)));
439
440 blk_queue_logical_block_size(rq, 1 << 12);
441 blk_queue_max_hw_sectors(rq, nr_max_blk << 3); /* 8 * 512 = blk_size */
442 blk_queue_max_segments(rq, nr_max_blk);
443 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, rq);
Mike Snitzerb277da02014-10-04 10:55:32 -0600444 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, rq);
Sebastian Ott0d804b22012-08-28 16:51:19 +0200445 scm_blk_dev_cluster_setup(bdev);
Sebastian Ottf30664e2012-08-28 16:50:38 +0200446
447 bdev->gendisk = alloc_disk(SCM_NR_PARTS);
448 if (!bdev->gendisk)
449 goto out_queue;
450
451 rq->queuedata = scmdev;
452 bdev->gendisk->driverfs_dev = &scmdev->dev;
453 bdev->gendisk->private_data = scmdev;
454 bdev->gendisk->fops = &scm_blk_devops;
455 bdev->gendisk->queue = rq;
456 bdev->gendisk->major = scm_major;
457 bdev->gendisk->first_minor = devindex * SCM_NR_PARTS;
458
459 len = snprintf(bdev->gendisk->disk_name, DISK_NAME_LEN, "scm");
460 if (devindex > 25) {
461 len += snprintf(bdev->gendisk->disk_name + len,
462 DISK_NAME_LEN - len, "%c",
463 'a' + (devindex / 26) - 1);
464 devindex = devindex % 26;
465 }
466 snprintf(bdev->gendisk->disk_name + len, DISK_NAME_LEN - len, "%c",
467 'a' + devindex);
468
469 /* 512 byte sectors */
470 set_capacity(bdev->gendisk, scmdev->size >> 9);
471 add_disk(bdev->gendisk);
472 return 0;
473
474out_queue:
475 blk_cleanup_queue(rq);
476out:
477 atomic_dec(&nr_devices);
478 return ret;
479}
480
481void scm_blk_dev_cleanup(struct scm_blk_dev *bdev)
482{
483 tasklet_kill(&bdev->tasklet);
484 del_gendisk(bdev->gendisk);
485 blk_cleanup_queue(bdev->gendisk->queue);
486 put_disk(bdev->gendisk);
487}
488
Sebastian Ott4fa3c012013-02-28 12:07:48 +0100489void scm_blk_set_available(struct scm_blk_dev *bdev)
490{
491 unsigned long flags;
492
493 spin_lock_irqsave(&bdev->lock, flags);
494 if (bdev->state == SCM_WR_PROHIBIT)
Sebastian Ott3bff6032013-03-18 16:01:30 +0100495 pr_info("%lx: Write access to the SCM increment is restored\n",
Sebastian Ott4fa3c012013-02-28 12:07:48 +0100496 (unsigned long) bdev->scmdev->address);
497 bdev->state = SCM_OPER;
498 spin_unlock_irqrestore(&bdev->lock, flags);
499}
500
Sebastian Ottf30664e2012-08-28 16:50:38 +0200501static int __init scm_blk_init(void)
502{
Sebastian Ott0d804b22012-08-28 16:51:19 +0200503 int ret = -EINVAL;
504
505 if (!scm_cluster_size_valid())
506 goto out;
Sebastian Ottf30664e2012-08-28 16:50:38 +0200507
508 ret = register_blkdev(0, "scm");
509 if (ret < 0)
510 goto out;
511
512 scm_major = ret;
Wei Yongjun94f98522013-03-20 13:40:54 +0100513 ret = scm_alloc_rqs(nr_requests);
514 if (ret)
Sebastian Ottfff60fa2013-04-25 13:03:18 +0200515 goto out_free;
Sebastian Ottf30664e2012-08-28 16:50:38 +0200516
517 scm_debug = debug_register("scm_log", 16, 1, 16);
Wei Yongjun94f98522013-03-20 13:40:54 +0100518 if (!scm_debug) {
519 ret = -ENOMEM;
Sebastian Ottf30664e2012-08-28 16:50:38 +0200520 goto out_free;
Wei Yongjun94f98522013-03-20 13:40:54 +0100521 }
Sebastian Ottf30664e2012-08-28 16:50:38 +0200522
523 debug_register_view(scm_debug, &debug_hex_ascii_view);
524 debug_set_level(scm_debug, 2);
525
526 ret = scm_drv_init();
527 if (ret)
528 goto out_dbf;
529
530 return ret;
531
532out_dbf:
533 debug_unregister(scm_debug);
534out_free:
535 scm_free_rqs();
Sebastian Ottf30664e2012-08-28 16:50:38 +0200536 unregister_blkdev(scm_major, "scm");
537out:
538 return ret;
539}
540module_init(scm_blk_init);
541
542static void __exit scm_blk_cleanup(void)
543{
544 scm_drv_cleanup();
545 debug_unregister(scm_debug);
546 scm_free_rqs();
547 unregister_blkdev(scm_major, "scm");
548}
549module_exit(scm_blk_cleanup);