Sebastian Ott | f30664e | 2012-08-28 16:50:38 +0200 | [diff] [blame] | 1 | #ifndef SCM_BLK_H |
| 2 | #define SCM_BLK_H |
| 3 | |
| 4 | #include <linux/interrupt.h> |
| 5 | #include <linux/spinlock.h> |
| 6 | #include <linux/blkdev.h> |
| 7 | #include <linux/genhd.h> |
| 8 | #include <linux/list.h> |
| 9 | |
| 10 | #include <asm/debug.h> |
| 11 | #include <asm/eadm.h> |
| 12 | |
| 13 | #define SCM_NR_PARTS 8 |
| 14 | #define SCM_QUEUE_DELAY 5 |
| 15 | |
| 16 | struct scm_blk_dev { |
| 17 | struct tasklet_struct tasklet; |
| 18 | struct request_queue *rq; |
| 19 | struct gendisk *gendisk; |
| 20 | struct scm_device *scmdev; |
| 21 | spinlock_t rq_lock; /* guard the request queue */ |
| 22 | spinlock_t lock; /* guard the rest of the blockdev */ |
| 23 | atomic_t queued_reqs; |
Sebastian Ott | 4fa3c01 | 2013-02-28 12:07:48 +0100 | [diff] [blame] | 24 | enum {SCM_OPER, SCM_WR_PROHIBIT} state; |
Sebastian Ott | f30664e | 2012-08-28 16:50:38 +0200 | [diff] [blame] | 25 | struct list_head finished_requests; |
Sebastian Ott | 0d804b2 | 2012-08-28 16:51:19 +0200 | [diff] [blame] | 26 | #ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE |
| 27 | struct list_head cluster_list; |
| 28 | #endif |
Sebastian Ott | f30664e | 2012-08-28 16:50:38 +0200 | [diff] [blame] | 29 | }; |
| 30 | |
| 31 | struct scm_request { |
| 32 | struct scm_blk_dev *bdev; |
| 33 | struct request *request; |
| 34 | struct aidaw *aidaw; |
| 35 | struct aob *aob; |
| 36 | struct list_head list; |
| 37 | u8 retries; |
| 38 | int error; |
Sebastian Ott | 0d804b2 | 2012-08-28 16:51:19 +0200 | [diff] [blame] | 39 | #ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE |
| 40 | struct { |
| 41 | enum {CLUSTER_NONE, CLUSTER_READ, CLUSTER_WRITE} state; |
| 42 | struct list_head list; |
| 43 | void **buf; |
| 44 | } cluster; |
| 45 | #endif |
Sebastian Ott | f30664e | 2012-08-28 16:50:38 +0200 | [diff] [blame] | 46 | }; |
| 47 | |
| 48 | #define to_aobrq(rq) container_of((void *) rq, struct aob_rq_header, data) |
| 49 | |
| 50 | int scm_blk_dev_setup(struct scm_blk_dev *, struct scm_device *); |
| 51 | void scm_blk_dev_cleanup(struct scm_blk_dev *); |
Sebastian Ott | 4fa3c01 | 2013-02-28 12:07:48 +0100 | [diff] [blame] | 52 | void scm_blk_set_available(struct scm_blk_dev *); |
Sebastian Ott | f30664e | 2012-08-28 16:50:38 +0200 | [diff] [blame] | 53 | void scm_blk_irq(struct scm_device *, void *, int); |
| 54 | |
Sebastian Ott | 0d804b2 | 2012-08-28 16:51:19 +0200 | [diff] [blame] | 55 | void scm_request_finish(struct scm_request *); |
| 56 | void scm_request_requeue(struct scm_request *); |
| 57 | |
Sebastian Ott | f30664e | 2012-08-28 16:50:38 +0200 | [diff] [blame] | 58 | int scm_drv_init(void); |
| 59 | void scm_drv_cleanup(void); |
| 60 | |
Sebastian Ott | 0d804b2 | 2012-08-28 16:51:19 +0200 | [diff] [blame] | 61 | #ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE |
| 62 | void __scm_free_rq_cluster(struct scm_request *); |
| 63 | int __scm_alloc_rq_cluster(struct scm_request *); |
| 64 | void scm_request_cluster_init(struct scm_request *); |
| 65 | bool scm_reserve_cluster(struct scm_request *); |
| 66 | void scm_release_cluster(struct scm_request *); |
| 67 | void scm_blk_dev_cluster_setup(struct scm_blk_dev *); |
| 68 | bool scm_need_cluster_request(struct scm_request *); |
| 69 | void scm_initiate_cluster_request(struct scm_request *); |
| 70 | void scm_cluster_request_irq(struct scm_request *); |
| 71 | bool scm_test_cluster_request(struct scm_request *); |
| 72 | bool scm_cluster_size_valid(void); |
Sebastian Ott | 58fece7 | 2013-01-28 19:34:26 +0100 | [diff] [blame] | 73 | #else /* CONFIG_SCM_BLOCK_CLUSTER_WRITE */ |
| 74 | static inline void __scm_free_rq_cluster(struct scm_request *scmrq) {} |
| 75 | static inline int __scm_alloc_rq_cluster(struct scm_request *scmrq) |
| 76 | { |
| 77 | return 0; |
| 78 | } |
| 79 | static inline void scm_request_cluster_init(struct scm_request *scmrq) {} |
| 80 | static inline bool scm_reserve_cluster(struct scm_request *scmrq) |
| 81 | { |
| 82 | return true; |
| 83 | } |
| 84 | static inline void scm_release_cluster(struct scm_request *scmrq) {} |
| 85 | static inline void scm_blk_dev_cluster_setup(struct scm_blk_dev *bdev) {} |
| 86 | static inline bool scm_need_cluster_request(struct scm_request *scmrq) |
| 87 | { |
| 88 | return false; |
| 89 | } |
| 90 | static inline void scm_initiate_cluster_request(struct scm_request *scmrq) {} |
| 91 | static inline void scm_cluster_request_irq(struct scm_request *scmrq) {} |
| 92 | static inline bool scm_test_cluster_request(struct scm_request *scmrq) |
| 93 | { |
| 94 | return false; |
| 95 | } |
| 96 | static inline bool scm_cluster_size_valid(void) |
| 97 | { |
| 98 | return true; |
| 99 | } |
| 100 | #endif /* CONFIG_SCM_BLOCK_CLUSTER_WRITE */ |
Sebastian Ott | f30664e | 2012-08-28 16:50:38 +0200 | [diff] [blame] | 101 | |
| 102 | extern debug_info_t *scm_debug; |
| 103 | |
| 104 | #define SCM_LOG(imp, txt) do { \ |
| 105 | debug_text_event(scm_debug, imp, txt); \ |
| 106 | } while (0) |
| 107 | |
| 108 | static inline void SCM_LOG_HEX(int level, void *data, int length) |
| 109 | { |
Hendrik Brueckner | 8e6a828 | 2013-09-18 17:21:34 +0200 | [diff] [blame] | 110 | if (!debug_level_enabled(scm_debug, level)) |
Sebastian Ott | f30664e | 2012-08-28 16:50:38 +0200 | [diff] [blame] | 111 | return; |
| 112 | while (length > 0) { |
| 113 | debug_event(scm_debug, level, data, length); |
| 114 | length -= scm_debug->buf_size; |
| 115 | data += scm_debug->buf_size; |
| 116 | } |
| 117 | } |
| 118 | |
| 119 | static inline void SCM_LOG_STATE(int level, struct scm_device *scmdev) |
| 120 | { |
| 121 | struct { |
| 122 | u64 address; |
| 123 | u8 oper_state; |
| 124 | u8 rank; |
| 125 | } __packed data = { |
| 126 | .address = scmdev->address, |
| 127 | .oper_state = scmdev->attrs.oper_state, |
| 128 | .rank = scmdev->attrs.rank, |
| 129 | }; |
| 130 | |
| 131 | SCM_LOG_HEX(level, &data, sizeof(data)); |
| 132 | } |
| 133 | |
| 134 | #endif /* SCM_BLK_H */ |