blob: 7ac6bad919efc35cec5025aa35bf3b28f3d97a71 [file] [log] [blame]
Sebastian Ottf30664e2012-08-28 16:50:38 +02001#ifndef SCM_BLK_H
2#define SCM_BLK_H
3
4#include <linux/interrupt.h>
5#include <linux/spinlock.h>
6#include <linux/blkdev.h>
7#include <linux/genhd.h>
8#include <linux/list.h>
9
10#include <asm/debug.h>
11#include <asm/eadm.h>
12
13#define SCM_NR_PARTS 8
14#define SCM_QUEUE_DELAY 5
15
16struct scm_blk_dev {
17 struct tasklet_struct tasklet;
18 struct request_queue *rq;
19 struct gendisk *gendisk;
20 struct scm_device *scmdev;
21 spinlock_t rq_lock; /* guard the request queue */
22 spinlock_t lock; /* guard the rest of the blockdev */
23 atomic_t queued_reqs;
24 struct list_head finished_requests;
Sebastian Ott0d804b22012-08-28 16:51:19 +020025#ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE
26 struct list_head cluster_list;
27#endif
Sebastian Ottf30664e2012-08-28 16:50:38 +020028};
29
30struct scm_request {
31 struct scm_blk_dev *bdev;
32 struct request *request;
33 struct aidaw *aidaw;
34 struct aob *aob;
35 struct list_head list;
36 u8 retries;
37 int error;
Sebastian Ott0d804b22012-08-28 16:51:19 +020038#ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE
39 struct {
40 enum {CLUSTER_NONE, CLUSTER_READ, CLUSTER_WRITE} state;
41 struct list_head list;
42 void **buf;
43 } cluster;
44#endif
Sebastian Ottf30664e2012-08-28 16:50:38 +020045};
46
47#define to_aobrq(rq) container_of((void *) rq, struct aob_rq_header, data)
48
49int scm_blk_dev_setup(struct scm_blk_dev *, struct scm_device *);
50void scm_blk_dev_cleanup(struct scm_blk_dev *);
51void scm_blk_irq(struct scm_device *, void *, int);
52
Sebastian Ott0d804b22012-08-28 16:51:19 +020053void scm_request_finish(struct scm_request *);
54void scm_request_requeue(struct scm_request *);
55
Sebastian Ottf30664e2012-08-28 16:50:38 +020056int scm_drv_init(void);
57void scm_drv_cleanup(void);
58
Sebastian Ott0d804b22012-08-28 16:51:19 +020059#ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE
60void __scm_free_rq_cluster(struct scm_request *);
61int __scm_alloc_rq_cluster(struct scm_request *);
62void scm_request_cluster_init(struct scm_request *);
63bool scm_reserve_cluster(struct scm_request *);
64void scm_release_cluster(struct scm_request *);
65void scm_blk_dev_cluster_setup(struct scm_blk_dev *);
66bool scm_need_cluster_request(struct scm_request *);
67void scm_initiate_cluster_request(struct scm_request *);
68void scm_cluster_request_irq(struct scm_request *);
69bool scm_test_cluster_request(struct scm_request *);
70bool scm_cluster_size_valid(void);
71#else
72#define __scm_free_rq_cluster(scmrq) {}
73#define __scm_alloc_rq_cluster(scmrq) 0
74#define scm_request_cluster_init(scmrq) {}
75#define scm_reserve_cluster(scmrq) true
76#define scm_release_cluster(scmrq) {}
77#define scm_blk_dev_cluster_setup(bdev) {}
78#define scm_need_cluster_request(scmrq) false
79#define scm_initiate_cluster_request(scmrq) {}
80#define scm_cluster_request_irq(scmrq) {}
81#define scm_test_cluster_request(scmrq) false
82#define scm_cluster_size_valid() true
83#endif
Sebastian Ottf30664e2012-08-28 16:50:38 +020084
85extern debug_info_t *scm_debug;
86
87#define SCM_LOG(imp, txt) do { \
88 debug_text_event(scm_debug, imp, txt); \
89 } while (0)
90
91static inline void SCM_LOG_HEX(int level, void *data, int length)
92{
93 if (level > scm_debug->level)
94 return;
95 while (length > 0) {
96 debug_event(scm_debug, level, data, length);
97 length -= scm_debug->buf_size;
98 data += scm_debug->buf_size;
99 }
100}
101
102static inline void SCM_LOG_STATE(int level, struct scm_device *scmdev)
103{
104 struct {
105 u64 address;
106 u8 oper_state;
107 u8 rank;
108 } __packed data = {
109 .address = scmdev->address,
110 .oper_state = scmdev->attrs.oper_state,
111 .rank = scmdev->attrs.rank,
112 };
113
114 SCM_LOG_HEX(level, &data, sizeof(data));
115}
116
117#endif /* SCM_BLK_H */