blob: a31bc4c36cca8f4680579f8ea4ec65b23c737224 [file] [log] [blame]
Oren Weil3ce72722011-05-15 13:43:43 +03001/*
2 *
3 * Intel Management Engine Interface (Intel MEI) Linux driver
Tomas Winkler733ba912012-02-09 19:25:53 +02004 * Copyright (c) 2003-2012, Intel Corporation.
Oren Weil3ce72722011-05-15 13:43:43 +03005 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 */
16
17#include <linux/pci.h>
Tomas Winkler06ecd642013-02-06 14:06:42 +020018
19#include <linux/kthread.h>
20#include <linux/interrupt.h>
Tomas Winkler47a73802012-12-25 19:06:03 +020021
22#include "mei_dev.h"
Tomas Winkler06ecd642013-02-06 14:06:42 +020023#include "hbm.h"
24
Tomas Winkler6e4cd272014-03-11 14:49:23 +020025#include "hw-me.h"
26#include "hw-me-regs.h"
Tomas Winkler06ecd642013-02-06 14:06:42 +020027
Tomas Winkler3a65dd42012-12-25 19:06:06 +020028/**
Tomas Winklerb68301e2013-03-27 16:58:29 +020029 * mei_me_reg_read - Reads 32bit data from the mei device
Tomas Winkler3a65dd42012-12-25 19:06:06 +020030 *
31 * @dev: the device structure
32 * @offset: offset from which to read the data
33 *
34 * returns register value (u32)
35 */
Tomas Winklerb68301e2013-03-27 16:58:29 +020036static inline u32 mei_me_reg_read(const struct mei_me_hw *hw,
Tomas Winkler3a65dd42012-12-25 19:06:06 +020037 unsigned long offset)
38{
Tomas Winkler52c34562013-02-06 14:06:40 +020039 return ioread32(hw->mem_addr + offset);
Tomas Winkler3a65dd42012-12-25 19:06:06 +020040}
Oren Weil3ce72722011-05-15 13:43:43 +030041
42
43/**
Tomas Winklerb68301e2013-03-27 16:58:29 +020044 * mei_me_reg_write - Writes 32bit data to the mei device
Tomas Winkler3a65dd42012-12-25 19:06:06 +020045 *
46 * @dev: the device structure
47 * @offset: offset from which to write the data
48 * @value: register value to write (u32)
49 */
Tomas Winklerb68301e2013-03-27 16:58:29 +020050static inline void mei_me_reg_write(const struct mei_me_hw *hw,
Tomas Winkler3a65dd42012-12-25 19:06:06 +020051 unsigned long offset, u32 value)
52{
Tomas Winkler52c34562013-02-06 14:06:40 +020053 iowrite32(value, hw->mem_addr + offset);
Tomas Winkler3a65dd42012-12-25 19:06:06 +020054}
55
56/**
Tomas Winklerb68301e2013-03-27 16:58:29 +020057 * mei_me_mecbrw_read - Reads 32bit data from ME circular buffer
Tomas Winklerd0252842013-01-08 23:07:24 +020058 * read window register
Tomas Winkler3a65dd42012-12-25 19:06:06 +020059 *
60 * @dev: the device structure
61 *
Tomas Winklerd0252842013-01-08 23:07:24 +020062 * returns ME_CB_RW register value (u32)
Tomas Winkler3a65dd42012-12-25 19:06:06 +020063 */
Tomas Winkler827eef52013-02-06 14:06:41 +020064static u32 mei_me_mecbrw_read(const struct mei_device *dev)
Tomas Winkler3a65dd42012-12-25 19:06:06 +020065{
Tomas Winklerb68301e2013-03-27 16:58:29 +020066 return mei_me_reg_read(to_me_hw(dev), ME_CB_RW);
Tomas Winkler3a65dd42012-12-25 19:06:06 +020067}
68/**
Tomas Winklerb68301e2013-03-27 16:58:29 +020069 * mei_me_mecsr_read - Reads 32bit data from the ME CSR
Tomas Winkler3a65dd42012-12-25 19:06:06 +020070 *
71 * @dev: the device structure
72 *
73 * returns ME_CSR_HA register value (u32)
74 */
Tomas Winklerb68301e2013-03-27 16:58:29 +020075static inline u32 mei_me_mecsr_read(const struct mei_me_hw *hw)
Tomas Winkler3a65dd42012-12-25 19:06:06 +020076{
Tomas Winklerb68301e2013-03-27 16:58:29 +020077 return mei_me_reg_read(hw, ME_CSR_HA);
Tomas Winkler3a65dd42012-12-25 19:06:06 +020078}
79
80/**
Tomas Winklerd0252842013-01-08 23:07:24 +020081 * mei_hcsr_read - Reads 32bit data from the host CSR
82 *
83 * @dev: the device structure
84 *
85 * returns H_CSR register value (u32)
86 */
Tomas Winkler52c34562013-02-06 14:06:40 +020087static inline u32 mei_hcsr_read(const struct mei_me_hw *hw)
Tomas Winklerd0252842013-01-08 23:07:24 +020088{
Tomas Winklerb68301e2013-03-27 16:58:29 +020089 return mei_me_reg_read(hw, H_CSR);
Tomas Winklerd0252842013-01-08 23:07:24 +020090}
91
92/**
93 * mei_hcsr_set - writes H_CSR register to the mei device,
Oren Weil3ce72722011-05-15 13:43:43 +030094 * and ignores the H_IS bit for it is write-one-to-zero.
95 *
96 * @dev: the device structure
97 */
Tomas Winkler52c34562013-02-06 14:06:40 +020098static inline void mei_hcsr_set(struct mei_me_hw *hw, u32 hcsr)
Oren Weil3ce72722011-05-15 13:43:43 +030099{
Tomas Winkler88eb99f2013-01-08 23:07:30 +0200100 hcsr &= ~H_IS;
Tomas Winklerb68301e2013-03-27 16:58:29 +0200101 mei_me_reg_write(hw, H_CSR, hcsr);
Oren Weil3ce72722011-05-15 13:43:43 +0300102}
103
Tomas Winklere7e0c232013-01-08 23:07:31 +0200104
105/**
Masanari Iida393b1482013-04-05 01:05:05 +0900106 * mei_me_hw_config - configure hw dependent settings
Tomas Winklere7e0c232013-01-08 23:07:31 +0200107 *
108 * @dev: mei device
109 */
Tomas Winkler827eef52013-02-06 14:06:41 +0200110static void mei_me_hw_config(struct mei_device *dev)
Tomas Winklere7e0c232013-01-08 23:07:31 +0200111{
Tomas Winklerba9cdd02014-03-18 22:52:00 +0200112 struct mei_me_hw *hw = to_me_hw(dev);
Tomas Winkler52c34562013-02-06 14:06:40 +0200113 u32 hcsr = mei_hcsr_read(to_me_hw(dev));
Tomas Winklere7e0c232013-01-08 23:07:31 +0200114 /* Doesn't change in runtime */
115 dev->hbuf_depth = (hcsr & H_CBD) >> 24;
Tomas Winklerba9cdd02014-03-18 22:52:00 +0200116
117 hw->pg_state = MEI_PG_OFF;
Tomas Winklere7e0c232013-01-08 23:07:31 +0200118}
Tomas Winkler964a2332014-03-18 22:51:59 +0200119
120/**
121 * mei_me_pg_state - translate internal pg state
122 * to the mei power gating state
123 *
124 * @hw - me hardware
125 * returns: MEI_PG_OFF if aliveness is on and MEI_PG_ON otherwise
126 */
127static inline enum mei_pg_state mei_me_pg_state(struct mei_device *dev)
128{
Tomas Winklerba9cdd02014-03-18 22:52:00 +0200129 struct mei_me_hw *hw = to_me_hw(dev);
130 return hw->pg_state;
Tomas Winkler964a2332014-03-18 22:51:59 +0200131}
132
Oren Weil3ce72722011-05-15 13:43:43 +0300133/**
Tomas Winklerd0252842013-01-08 23:07:24 +0200134 * mei_clear_interrupts - clear and stop interrupts
Tomas Winkler3a65dd42012-12-25 19:06:06 +0200135 *
136 * @dev: the device structure
137 */
Tomas Winkler827eef52013-02-06 14:06:41 +0200138static void mei_me_intr_clear(struct mei_device *dev)
Tomas Winkler3a65dd42012-12-25 19:06:06 +0200139{
Tomas Winkler52c34562013-02-06 14:06:40 +0200140 struct mei_me_hw *hw = to_me_hw(dev);
141 u32 hcsr = mei_hcsr_read(hw);
Tomas Winkler9ea73dd2013-01-08 23:07:28 +0200142 if ((hcsr & H_IS) == H_IS)
Tomas Winklerb68301e2013-03-27 16:58:29 +0200143 mei_me_reg_write(hw, H_CSR, hcsr);
Tomas Winkler3a65dd42012-12-25 19:06:06 +0200144}
Tomas Winkler3a65dd42012-12-25 19:06:06 +0200145/**
Tomas Winkler827eef52013-02-06 14:06:41 +0200146 * mei_me_intr_enable - enables mei device interrupts
Oren Weil3ce72722011-05-15 13:43:43 +0300147 *
148 * @dev: the device structure
149 */
Tomas Winkler827eef52013-02-06 14:06:41 +0200150static void mei_me_intr_enable(struct mei_device *dev)
Oren Weil3ce72722011-05-15 13:43:43 +0300151{
Tomas Winkler52c34562013-02-06 14:06:40 +0200152 struct mei_me_hw *hw = to_me_hw(dev);
153 u32 hcsr = mei_hcsr_read(hw);
Tomas Winkler9ea73dd2013-01-08 23:07:28 +0200154 hcsr |= H_IE;
Tomas Winkler52c34562013-02-06 14:06:40 +0200155 mei_hcsr_set(hw, hcsr);
Oren Weil3ce72722011-05-15 13:43:43 +0300156}
157
158/**
Tomas Winkler3a65dd42012-12-25 19:06:06 +0200159 * mei_disable_interrupts - disables mei device interrupts
Oren Weil3ce72722011-05-15 13:43:43 +0300160 *
161 * @dev: the device structure
162 */
Tomas Winkler827eef52013-02-06 14:06:41 +0200163static void mei_me_intr_disable(struct mei_device *dev)
Oren Weil3ce72722011-05-15 13:43:43 +0300164{
Tomas Winkler52c34562013-02-06 14:06:40 +0200165 struct mei_me_hw *hw = to_me_hw(dev);
166 u32 hcsr = mei_hcsr_read(hw);
Tomas Winkler9ea73dd2013-01-08 23:07:28 +0200167 hcsr &= ~H_IE;
Tomas Winkler52c34562013-02-06 14:06:40 +0200168 mei_hcsr_set(hw, hcsr);
Oren Weil3ce72722011-05-15 13:43:43 +0300169}
170
Tomas Winkleradfba322013-01-08 23:07:27 +0200171/**
Tomas Winkler68f8ea12013-03-10 13:56:07 +0200172 * mei_me_hw_reset_release - release device from the reset
173 *
174 * @dev: the device structure
175 */
176static void mei_me_hw_reset_release(struct mei_device *dev)
177{
178 struct mei_me_hw *hw = to_me_hw(dev);
179 u32 hcsr = mei_hcsr_read(hw);
180
181 hcsr |= H_IG;
182 hcsr &= ~H_RST;
183 mei_hcsr_set(hw, hcsr);
184}
185/**
Tomas Winkler827eef52013-02-06 14:06:41 +0200186 * mei_me_hw_reset - resets fw via mei csr register.
Tomas Winkleradfba322013-01-08 23:07:27 +0200187 *
188 * @dev: the device structure
Masanari Iida393b1482013-04-05 01:05:05 +0900189 * @intr_enable: if interrupt should be enabled after reset.
Tomas Winkleradfba322013-01-08 23:07:27 +0200190 */
Tomas Winklerc20c68d2013-06-23 10:42:49 +0300191static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
Tomas Winkleradfba322013-01-08 23:07:27 +0200192{
Tomas Winkler52c34562013-02-06 14:06:40 +0200193 struct mei_me_hw *hw = to_me_hw(dev);
194 u32 hcsr = mei_hcsr_read(hw);
Tomas Winkleradfba322013-01-08 23:07:27 +0200195
Tomas Winklerff960662013-07-30 14:11:51 +0300196 hcsr |= H_RST | H_IG | H_IS;
Tomas Winkleradfba322013-01-08 23:07:27 +0200197
198 if (intr_enable)
199 hcsr |= H_IE;
200 else
Tomas Winklerff960662013-07-30 14:11:51 +0300201 hcsr &= ~H_IE;
Tomas Winkleradfba322013-01-08 23:07:27 +0200202
Tomas Winklerff960662013-07-30 14:11:51 +0300203 mei_me_reg_write(hw, H_CSR, hcsr);
Tomas Winkleradfba322013-01-08 23:07:27 +0200204
Tomas Winkler33ec0822014-01-12 00:36:09 +0200205 if (intr_enable == false)
Tomas Winkler68f8ea12013-03-10 13:56:07 +0200206 mei_me_hw_reset_release(dev);
Tomas Winkleradfba322013-01-08 23:07:27 +0200207
Tomas Winklerc20c68d2013-06-23 10:42:49 +0300208 return 0;
Tomas Winkleradfba322013-01-08 23:07:27 +0200209}
210
Tomas Winkler115ba282013-01-08 23:07:29 +0200211/**
Tomas Winkler827eef52013-02-06 14:06:41 +0200212 * mei_me_host_set_ready - enable device
Tomas Winkler115ba282013-01-08 23:07:29 +0200213 *
214 * @dev - mei device
215 * returns bool
216 */
217
Tomas Winkler827eef52013-02-06 14:06:41 +0200218static void mei_me_host_set_ready(struct mei_device *dev)
Tomas Winkler115ba282013-01-08 23:07:29 +0200219{
Tomas Winkler52c34562013-02-06 14:06:40 +0200220 struct mei_me_hw *hw = to_me_hw(dev);
221 hw->host_hw_state |= H_IE | H_IG | H_RDY;
222 mei_hcsr_set(hw, hw->host_hw_state);
Tomas Winkler115ba282013-01-08 23:07:29 +0200223}
224/**
Tomas Winkler827eef52013-02-06 14:06:41 +0200225 * mei_me_host_is_ready - check whether the host has turned ready
Tomas Winkler115ba282013-01-08 23:07:29 +0200226 *
227 * @dev - mei device
228 * returns bool
229 */
Tomas Winkler827eef52013-02-06 14:06:41 +0200230static bool mei_me_host_is_ready(struct mei_device *dev)
Tomas Winkler115ba282013-01-08 23:07:29 +0200231{
Tomas Winkler52c34562013-02-06 14:06:40 +0200232 struct mei_me_hw *hw = to_me_hw(dev);
233 hw->host_hw_state = mei_hcsr_read(hw);
234 return (hw->host_hw_state & H_RDY) == H_RDY;
Tomas Winkler115ba282013-01-08 23:07:29 +0200235}
236
237/**
Tomas Winkler827eef52013-02-06 14:06:41 +0200238 * mei_me_hw_is_ready - check whether the me(hw) has turned ready
Tomas Winkler115ba282013-01-08 23:07:29 +0200239 *
240 * @dev - mei device
241 * returns bool
242 */
Tomas Winkler827eef52013-02-06 14:06:41 +0200243static bool mei_me_hw_is_ready(struct mei_device *dev)
Tomas Winkler115ba282013-01-08 23:07:29 +0200244{
Tomas Winkler52c34562013-02-06 14:06:40 +0200245 struct mei_me_hw *hw = to_me_hw(dev);
Tomas Winklerb68301e2013-03-27 16:58:29 +0200246 hw->me_hw_state = mei_me_mecsr_read(hw);
Tomas Winkler52c34562013-02-06 14:06:40 +0200247 return (hw->me_hw_state & ME_RDY_HRA) == ME_RDY_HRA;
Tomas Winkler115ba282013-01-08 23:07:29 +0200248}
Tomas Winkler3a65dd42012-12-25 19:06:06 +0200249
Tomas Winkleraafae7e2013-03-11 18:27:03 +0200250static int mei_me_hw_ready_wait(struct mei_device *dev)
251{
252 int err;
253 if (mei_me_hw_is_ready(dev))
254 return 0;
255
Tomas Winklerdab9bf42013-07-17 15:13:17 +0300256 dev->recvd_hw_ready = false;
Tomas Winkleraafae7e2013-03-11 18:27:03 +0200257 mutex_unlock(&dev->device_lock);
258 err = wait_event_interruptible_timeout(dev->wait_hw_ready,
Tomas Winklerdab9bf42013-07-17 15:13:17 +0300259 dev->recvd_hw_ready,
Tomas Winkler7d93e582014-01-14 23:10:10 +0200260 mei_secs_to_jiffies(MEI_HW_READY_TIMEOUT));
Tomas Winkleraafae7e2013-03-11 18:27:03 +0200261 mutex_lock(&dev->device_lock);
262 if (!err && !dev->recvd_hw_ready) {
Tomas Winklerdab9bf42013-07-17 15:13:17 +0300263 if (!err)
Alexander Usyskin7ca96aa2014-02-19 17:35:49 +0200264 err = -ETIME;
Tomas Winkleraafae7e2013-03-11 18:27:03 +0200265 dev_err(&dev->pdev->dev,
Tomas Winklerdab9bf42013-07-17 15:13:17 +0300266 "wait hw ready failed. status = %d\n", err);
267 return err;
Tomas Winkleraafae7e2013-03-11 18:27:03 +0200268 }
269
270 dev->recvd_hw_ready = false;
271 return 0;
272}
273
274static int mei_me_hw_start(struct mei_device *dev)
275{
276 int ret = mei_me_hw_ready_wait(dev);
277 if (ret)
278 return ret;
279 dev_dbg(&dev->pdev->dev, "hw is ready\n");
280
281 mei_me_host_set_ready(dev);
282 return ret;
283}
284
285
Tomas Winkler3a65dd42012-12-25 19:06:06 +0200286/**
Tomas Winkler726917f2012-06-25 23:46:28 +0300287 * mei_hbuf_filled_slots - gets number of device filled buffer slots
Oren Weil3ce72722011-05-15 13:43:43 +0300288 *
Sedat Dilek7353f852013-01-17 19:54:15 +0100289 * @dev: the device structure
Oren Weil3ce72722011-05-15 13:43:43 +0300290 *
291 * returns number of filled slots
292 */
Tomas Winkler726917f2012-06-25 23:46:28 +0300293static unsigned char mei_hbuf_filled_slots(struct mei_device *dev)
Oren Weil3ce72722011-05-15 13:43:43 +0300294{
Tomas Winkler52c34562013-02-06 14:06:40 +0200295 struct mei_me_hw *hw = to_me_hw(dev);
Oren Weil3ce72722011-05-15 13:43:43 +0300296 char read_ptr, write_ptr;
297
Tomas Winkler52c34562013-02-06 14:06:40 +0200298 hw->host_hw_state = mei_hcsr_read(hw);
Tomas Winkler726917f2012-06-25 23:46:28 +0300299
Tomas Winkler52c34562013-02-06 14:06:40 +0200300 read_ptr = (char) ((hw->host_hw_state & H_CBRP) >> 8);
301 write_ptr = (char) ((hw->host_hw_state & H_CBWP) >> 16);
Oren Weil3ce72722011-05-15 13:43:43 +0300302
303 return (unsigned char) (write_ptr - read_ptr);
304}
305
306/**
Masanari Iida393b1482013-04-05 01:05:05 +0900307 * mei_me_hbuf_is_empty - checks if host buffer is empty.
Oren Weil3ce72722011-05-15 13:43:43 +0300308 *
309 * @dev: the device structure
310 *
Tomas Winkler726917f2012-06-25 23:46:28 +0300311 * returns true if empty, false - otherwise.
Oren Weil3ce72722011-05-15 13:43:43 +0300312 */
Tomas Winkler827eef52013-02-06 14:06:41 +0200313static bool mei_me_hbuf_is_empty(struct mei_device *dev)
Oren Weil3ce72722011-05-15 13:43:43 +0300314{
Tomas Winkler726917f2012-06-25 23:46:28 +0300315 return mei_hbuf_filled_slots(dev) == 0;
Oren Weil3ce72722011-05-15 13:43:43 +0300316}
317
318/**
Tomas Winkler827eef52013-02-06 14:06:41 +0200319 * mei_me_hbuf_empty_slots - counts write empty slots.
Oren Weil3ce72722011-05-15 13:43:43 +0300320 *
321 * @dev: the device structure
322 *
Alexander Usyskin7ca96aa2014-02-19 17:35:49 +0200323 * returns -EOVERFLOW if overflow, otherwise empty slots count
Oren Weil3ce72722011-05-15 13:43:43 +0300324 */
Tomas Winkler827eef52013-02-06 14:06:41 +0200325static int mei_me_hbuf_empty_slots(struct mei_device *dev)
Oren Weil3ce72722011-05-15 13:43:43 +0300326{
Tomas Winkler24aadc82012-06-25 23:46:27 +0300327 unsigned char filled_slots, empty_slots;
Oren Weil3ce72722011-05-15 13:43:43 +0300328
Tomas Winkler726917f2012-06-25 23:46:28 +0300329 filled_slots = mei_hbuf_filled_slots(dev);
Tomas Winkler24aadc82012-06-25 23:46:27 +0300330 empty_slots = dev->hbuf_depth - filled_slots;
Oren Weil3ce72722011-05-15 13:43:43 +0300331
332 /* check for overflow */
Tomas Winkler24aadc82012-06-25 23:46:27 +0300333 if (filled_slots > dev->hbuf_depth)
Oren Weil3ce72722011-05-15 13:43:43 +0300334 return -EOVERFLOW;
335
336 return empty_slots;
337}
338
Tomas Winkler827eef52013-02-06 14:06:41 +0200339static size_t mei_me_hbuf_max_len(const struct mei_device *dev)
340{
341 return dev->hbuf_depth * sizeof(u32) - sizeof(struct mei_msg_hdr);
342}
343
344
Oren Weil3ce72722011-05-15 13:43:43 +0300345/**
Alexander Usyskin7ca96aa2014-02-19 17:35:49 +0200346 * mei_me_write_message - writes a message to mei device.
Oren Weil3ce72722011-05-15 13:43:43 +0300347 *
348 * @dev: the device structure
Sedat Dilek7353f852013-01-17 19:54:15 +0100349 * @header: mei HECI header of message
Tomas Winkler438763f2012-12-25 19:05:59 +0200350 * @buf: message payload will be written
Oren Weil3ce72722011-05-15 13:43:43 +0300351 *
Tomas Winkler1ccb7b62012-03-14 14:39:42 +0200352 * This function returns -EIO if write has failed
Oren Weil3ce72722011-05-15 13:43:43 +0300353 */
Tomas Winkler827eef52013-02-06 14:06:41 +0200354static int mei_me_write_message(struct mei_device *dev,
355 struct mei_msg_hdr *header,
356 unsigned char *buf)
Oren Weil3ce72722011-05-15 13:43:43 +0300357{
Tomas Winkler52c34562013-02-06 14:06:40 +0200358 struct mei_me_hw *hw = to_me_hw(dev);
Tomas Winklerc8c8d082013-03-11 18:27:02 +0200359 unsigned long rem;
Tomas Winkler438763f2012-12-25 19:05:59 +0200360 unsigned long length = header->length;
Tomas Winkler169d1332012-06-19 09:13:35 +0300361 u32 *reg_buf = (u32 *)buf;
Tomas Winkler88eb99f2013-01-08 23:07:30 +0200362 u32 hcsr;
Tomas Winklerc8c8d082013-03-11 18:27:02 +0200363 u32 dw_cnt;
Tomas Winkler169d1332012-06-19 09:13:35 +0300364 int i;
365 int empty_slots;
Oren Weil3ce72722011-05-15 13:43:43 +0300366
Tomas Winkler15d4acc2012-12-25 19:06:00 +0200367 dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(header));
Oren Weil3ce72722011-05-15 13:43:43 +0300368
Tomas Winkler726917f2012-06-25 23:46:28 +0300369 empty_slots = mei_hbuf_empty_slots(dev);
Tomas Winkler169d1332012-06-19 09:13:35 +0300370 dev_dbg(&dev->pdev->dev, "empty slots = %hu.\n", empty_slots);
Oren Weil3ce72722011-05-15 13:43:43 +0300371
Tomas Winkler7bdf72d2012-07-04 19:24:52 +0300372 dw_cnt = mei_data2slots(length);
Tomas Winkler169d1332012-06-19 09:13:35 +0300373 if (empty_slots < 0 || dw_cnt > empty_slots)
Tomas Winkler9d098192014-02-19 17:35:48 +0200374 return -EMSGSIZE;
Oren Weil3ce72722011-05-15 13:43:43 +0300375
Tomas Winklerb68301e2013-03-27 16:58:29 +0200376 mei_me_reg_write(hw, H_CB_WW, *((u32 *) header));
Oren Weil3ce72722011-05-15 13:43:43 +0300377
Tomas Winkler169d1332012-06-19 09:13:35 +0300378 for (i = 0; i < length / 4; i++)
Tomas Winklerb68301e2013-03-27 16:58:29 +0200379 mei_me_reg_write(hw, H_CB_WW, reg_buf[i]);
Tomas Winkler169d1332012-06-19 09:13:35 +0300380
381 rem = length & 0x3;
382 if (rem > 0) {
383 u32 reg = 0;
384 memcpy(&reg, &buf[length - rem], rem);
Tomas Winklerb68301e2013-03-27 16:58:29 +0200385 mei_me_reg_write(hw, H_CB_WW, reg);
Oren Weil3ce72722011-05-15 13:43:43 +0300386 }
387
Tomas Winkler52c34562013-02-06 14:06:40 +0200388 hcsr = mei_hcsr_read(hw) | H_IG;
389 mei_hcsr_set(hw, hcsr);
Tomas Winkler827eef52013-02-06 14:06:41 +0200390 if (!mei_me_hw_is_ready(dev))
Tomas Winkler1ccb7b62012-03-14 14:39:42 +0200391 return -EIO;
Oren Weil3ce72722011-05-15 13:43:43 +0300392
Tomas Winkler1ccb7b62012-03-14 14:39:42 +0200393 return 0;
Oren Weil3ce72722011-05-15 13:43:43 +0300394}
395
396/**
Tomas Winkler827eef52013-02-06 14:06:41 +0200397 * mei_me_count_full_read_slots - counts read full slots.
Oren Weil3ce72722011-05-15 13:43:43 +0300398 *
399 * @dev: the device structure
400 *
Alexander Usyskin7ca96aa2014-02-19 17:35:49 +0200401 * returns -EOVERFLOW if overflow, otherwise filled slots count
Oren Weil3ce72722011-05-15 13:43:43 +0300402 */
Tomas Winkler827eef52013-02-06 14:06:41 +0200403static int mei_me_count_full_read_slots(struct mei_device *dev)
Oren Weil3ce72722011-05-15 13:43:43 +0300404{
Tomas Winkler52c34562013-02-06 14:06:40 +0200405 struct mei_me_hw *hw = to_me_hw(dev);
Oren Weil3ce72722011-05-15 13:43:43 +0300406 char read_ptr, write_ptr;
407 unsigned char buffer_depth, filled_slots;
408
Tomas Winklerb68301e2013-03-27 16:58:29 +0200409 hw->me_hw_state = mei_me_mecsr_read(hw);
Tomas Winkler52c34562013-02-06 14:06:40 +0200410 buffer_depth = (unsigned char)((hw->me_hw_state & ME_CBD_HRA) >> 24);
411 read_ptr = (char) ((hw->me_hw_state & ME_CBRP_HRA) >> 8);
412 write_ptr = (char) ((hw->me_hw_state & ME_CBWP_HRA) >> 16);
Oren Weil3ce72722011-05-15 13:43:43 +0300413 filled_slots = (unsigned char) (write_ptr - read_ptr);
414
415 /* check for overflow */
416 if (filled_slots > buffer_depth)
417 return -EOVERFLOW;
418
419 dev_dbg(&dev->pdev->dev, "filled_slots =%08x\n", filled_slots);
420 return (int)filled_slots;
421}
422
423/**
Tomas Winkler827eef52013-02-06 14:06:41 +0200424 * mei_me_read_slots - reads a message from mei device.
Oren Weil3ce72722011-05-15 13:43:43 +0300425 *
426 * @dev: the device structure
427 * @buffer: message buffer will be written
428 * @buffer_length: message size will be read
429 */
Tomas Winkler827eef52013-02-06 14:06:41 +0200430static int mei_me_read_slots(struct mei_device *dev, unsigned char *buffer,
Tomas Winkleredf1eed2012-02-09 19:25:54 +0200431 unsigned long buffer_length)
Oren Weil3ce72722011-05-15 13:43:43 +0300432{
Tomas Winkler52c34562013-02-06 14:06:40 +0200433 struct mei_me_hw *hw = to_me_hw(dev);
Tomas Winkleredf1eed2012-02-09 19:25:54 +0200434 u32 *reg_buf = (u32 *)buffer;
Tomas Winkler88eb99f2013-01-08 23:07:30 +0200435 u32 hcsr;
Oren Weil3ce72722011-05-15 13:43:43 +0300436
Tomas Winkleredf1eed2012-02-09 19:25:54 +0200437 for (; buffer_length >= sizeof(u32); buffer_length -= sizeof(u32))
Tomas Winkler827eef52013-02-06 14:06:41 +0200438 *reg_buf++ = mei_me_mecbrw_read(dev);
Oren Weil3ce72722011-05-15 13:43:43 +0300439
440 if (buffer_length > 0) {
Tomas Winkler827eef52013-02-06 14:06:41 +0200441 u32 reg = mei_me_mecbrw_read(dev);
Tomas Winkleredf1eed2012-02-09 19:25:54 +0200442 memcpy(reg_buf, &reg, buffer_length);
Oren Weil3ce72722011-05-15 13:43:43 +0300443 }
444
Tomas Winkler52c34562013-02-06 14:06:40 +0200445 hcsr = mei_hcsr_read(hw) | H_IG;
446 mei_hcsr_set(hw, hcsr);
Tomas Winkler827eef52013-02-06 14:06:41 +0200447 return 0;
Oren Weil3ce72722011-05-15 13:43:43 +0300448}
449
Tomas Winkler06ecd642013-02-06 14:06:42 +0200450/**
Tomas Winklerb16c3572014-03-18 22:51:57 +0200451 * mei_me_pg_enter - write pg enter register to mei device.
452 *
453 * @dev: the device structure
454 */
455static void mei_me_pg_enter(struct mei_device *dev)
456{
457 struct mei_me_hw *hw = to_me_hw(dev);
458 u32 reg = mei_me_reg_read(hw, H_HPG_CSR);
459 reg |= H_HPG_CSR_PGI;
460 mei_me_reg_write(hw, H_HPG_CSR, reg);
461}
462
463/**
464 * mei_me_pg_enter - write pg enter register to mei device.
465 *
466 * @dev: the device structure
467 */
468static void mei_me_pg_exit(struct mei_device *dev)
469{
470 struct mei_me_hw *hw = to_me_hw(dev);
471 u32 reg = mei_me_reg_read(hw, H_HPG_CSR);
472
473 WARN(!(reg & H_HPG_CSR_PGI), "PGI is not set\n");
474
475 reg |= H_HPG_CSR_PGIHEXR;
476 mei_me_reg_write(hw, H_HPG_CSR, reg);
477}
478
479/**
Tomas Winklerba9cdd02014-03-18 22:52:00 +0200480 * mei_me_pg_set_sync - perform pg entry procedure
481 *
482 * @dev: the device structure
483 *
484 * returns 0 on success an error code otherwise
485 */
486int mei_me_pg_set_sync(struct mei_device *dev)
487{
488 struct mei_me_hw *hw = to_me_hw(dev);
489 unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
490 int ret;
491
492 dev->pg_event = MEI_PG_EVENT_WAIT;
493
494 ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_ENTRY_REQ_CMD);
495 if (ret)
496 return ret;
497
498 mutex_unlock(&dev->device_lock);
499 wait_event_timeout(dev->wait_pg,
500 dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout);
501 mutex_lock(&dev->device_lock);
502
503 if (dev->pg_event == MEI_PG_EVENT_RECEIVED) {
504 mei_me_pg_enter(dev);
505 ret = 0;
506 } else {
507 ret = -ETIME;
508 }
509
510 dev->pg_event = MEI_PG_EVENT_IDLE;
511 hw->pg_state = MEI_PG_ON;
512
513 return ret;
514}
515
516/**
517 * mei_me_pg_unset_sync - perform pg exit procedure
518 *
519 * @dev: the device structure
520 *
521 * returns 0 on success an error code otherwise
522 */
523int mei_me_pg_unset_sync(struct mei_device *dev)
524{
525 struct mei_me_hw *hw = to_me_hw(dev);
526 unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
527 int ret;
528
529 if (dev->pg_event == MEI_PG_EVENT_RECEIVED)
530 goto reply;
531
532 dev->pg_event = MEI_PG_EVENT_WAIT;
533
534 mei_me_pg_exit(dev);
535
536 mutex_unlock(&dev->device_lock);
537 wait_event_timeout(dev->wait_pg,
538 dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout);
539 mutex_lock(&dev->device_lock);
540
541reply:
542 if (dev->pg_event == MEI_PG_EVENT_RECEIVED)
543 ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_EXIT_RES_CMD);
544 else
545 ret = -ETIME;
546
547 dev->pg_event = MEI_PG_EVENT_IDLE;
548 hw->pg_state = MEI_PG_OFF;
549
550 return ret;
551}
552
553/**
Tomas Winkleree7e5af2014-03-18 22:51:58 +0200554 * mei_me_pg_is_enabled - detect if PG is supported by HW
555 *
556 * @dev: the device structure
557 *
558 * returns: true is pg supported, false otherwise
559 */
560static bool mei_me_pg_is_enabled(struct mei_device *dev)
561{
562 struct mei_me_hw *hw = to_me_hw(dev);
563 u32 reg = mei_me_reg_read(hw, ME_CSR_HA);
564
565 if ((reg & ME_PGIC_HRA) == 0)
566 goto notsupported;
567
568 if (dev->version.major_version < HBM_MAJOR_VERSION_PGI)
569 goto notsupported;
570
571 if (dev->version.major_version == HBM_MAJOR_VERSION_PGI &&
572 dev->version.minor_version < HBM_MINOR_VERSION_PGI)
573 goto notsupported;
574
575 return true;
576
577notsupported:
578 dev_dbg(&dev->pdev->dev, "pg: not supported: HGP = %d hbm version %d.%d ?= %d.%d\n",
579 !!(reg & ME_PGIC_HRA),
580 dev->version.major_version,
581 dev->version.minor_version,
582 HBM_MAJOR_VERSION_PGI,
583 HBM_MINOR_VERSION_PGI);
584
585 return false;
586}
587
588/**
Tomas Winkler06ecd642013-02-06 14:06:42 +0200589 * mei_me_irq_quick_handler - The ISR of the MEI device
590 *
591 * @irq: The irq number
592 * @dev_id: pointer to the device structure
593 *
594 * returns irqreturn_t
595 */
596
597irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id)
598{
599 struct mei_device *dev = (struct mei_device *) dev_id;
600 struct mei_me_hw *hw = to_me_hw(dev);
601 u32 csr_reg = mei_hcsr_read(hw);
602
603 if ((csr_reg & H_IS) != H_IS)
604 return IRQ_NONE;
605
606 /* clear H_IS bit in H_CSR */
Tomas Winklerb68301e2013-03-27 16:58:29 +0200607 mei_me_reg_write(hw, H_CSR, csr_reg);
Tomas Winkler06ecd642013-02-06 14:06:42 +0200608
609 return IRQ_WAKE_THREAD;
610}
611
612/**
613 * mei_me_irq_thread_handler - function called after ISR to handle the interrupt
614 * processing.
615 *
616 * @irq: The irq number
617 * @dev_id: pointer to the device structure
618 *
619 * returns irqreturn_t
620 *
621 */
622irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
623{
624 struct mei_device *dev = (struct mei_device *) dev_id;
625 struct mei_cl_cb complete_list;
Tomas Winkler06ecd642013-02-06 14:06:42 +0200626 s32 slots;
Tomas Winkler544f9462014-01-08 20:19:21 +0200627 int rets = 0;
Tomas Winkler06ecd642013-02-06 14:06:42 +0200628
629 dev_dbg(&dev->pdev->dev, "function called after ISR to handle the interrupt processing.\n");
630 /* initialize our complete list */
631 mutex_lock(&dev->device_lock);
632 mei_io_list_init(&complete_list);
633
634 /* Ack the interrupt here
635 * In case of MSI we don't go through the quick handler */
636 if (pci_dev_msi_enabled(dev->pdev))
637 mei_clear_interrupts(dev);
638
639 /* check if ME wants a reset */
Tomas Winkler33ec0822014-01-12 00:36:09 +0200640 if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING) {
Tomas Winkler544f9462014-01-08 20:19:21 +0200641 dev_warn(&dev->pdev->dev, "FW not ready: resetting.\n");
642 schedule_work(&dev->reset_work);
643 goto end;
Tomas Winkler06ecd642013-02-06 14:06:42 +0200644 }
645
646 /* check if we need to start the dev */
647 if (!mei_host_is_ready(dev)) {
648 if (mei_hw_is_ready(dev)) {
649 dev_dbg(&dev->pdev->dev, "we need to start the dev.\n");
650
Tomas Winkleraafae7e2013-03-11 18:27:03 +0200651 dev->recvd_hw_ready = true;
652 wake_up_interruptible(&dev->wait_hw_ready);
Tomas Winkler06ecd642013-02-06 14:06:42 +0200653 } else {
Tomas Winkler544f9462014-01-08 20:19:21 +0200654
Tomas Winkler68f8ea12013-03-10 13:56:07 +0200655 dev_dbg(&dev->pdev->dev, "Reset Completed.\n");
656 mei_me_hw_reset_release(dev);
Tomas Winkler06ecd642013-02-06 14:06:42 +0200657 }
Tomas Winkler544f9462014-01-08 20:19:21 +0200658 goto end;
Tomas Winkler06ecd642013-02-06 14:06:42 +0200659 }
660 /* check slots available for reading */
661 slots = mei_count_full_read_slots(dev);
662 while (slots > 0) {
Tomas Winkler544f9462014-01-08 20:19:21 +0200663 dev_dbg(&dev->pdev->dev, "slots to read = %08x\n", slots);
Tomas Winkler06ecd642013-02-06 14:06:42 +0200664 rets = mei_irq_read_handler(dev, &complete_list, &slots);
Tomas Winklerb1b94b52014-03-03 00:21:28 +0200665 /* There is a race between ME write and interrupt delivery:
666 * Not all data is always available immediately after the
667 * interrupt, so try to read again on the next interrupt.
668 */
669 if (rets == -ENODATA)
670 break;
671
Tomas Winkler33ec0822014-01-12 00:36:09 +0200672 if (rets && dev->dev_state != MEI_DEV_RESETTING) {
Tomas Winklerb1b94b52014-03-03 00:21:28 +0200673 dev_err(&dev->pdev->dev, "mei_irq_read_handler ret = %d.\n",
674 rets);
Tomas Winkler544f9462014-01-08 20:19:21 +0200675 schedule_work(&dev->reset_work);
Tomas Winkler06ecd642013-02-06 14:06:42 +0200676 goto end;
Tomas Winkler544f9462014-01-08 20:19:21 +0200677 }
Tomas Winkler06ecd642013-02-06 14:06:42 +0200678 }
Tomas Winkler06ecd642013-02-06 14:06:42 +0200679
Tomas Winkler6aae48f2014-02-19 17:35:47 +0200680 dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
681
Tomas Winklerba9cdd02014-03-18 22:52:00 +0200682 /*
683 * During PG handshake only allowed write is the replay to the
684 * PG exit message, so block calling write function
685 * if the pg state is not idle
686 */
687 if (dev->pg_event == MEI_PG_EVENT_IDLE) {
688 rets = mei_irq_write_handler(dev, &complete_list);
689 dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
690 }
Tomas Winkler06ecd642013-02-06 14:06:42 +0200691
Tomas Winkler4c6e22b2013-03-17 11:41:20 +0200692 mei_irq_compl_handler(dev, &complete_list);
Tomas Winkler06ecd642013-02-06 14:06:42 +0200693
Tomas Winkler544f9462014-01-08 20:19:21 +0200694end:
695 dev_dbg(&dev->pdev->dev, "interrupt thread end ret = %d\n", rets);
696 mutex_unlock(&dev->device_lock);
Tomas Winkler06ecd642013-02-06 14:06:42 +0200697 return IRQ_HANDLED;
698}
Alexander Usyskin04dd3662014-03-31 17:59:23 +0300699
700/**
701 * mei_me_fw_status - retrieve fw status from the pci config space
702 *
703 * @dev: the device structure
704 * @fw_status: fw status registers storage
705 *
706 * returns 0 on success an error code otherwise
707 */
708static int mei_me_fw_status(struct mei_device *dev,
709 struct mei_fw_status *fw_status)
710{
711 const u32 pci_cfg_reg[] = {PCI_CFG_HFS_1, PCI_CFG_HFS_2};
712 int i;
713
714 if (!fw_status)
715 return -EINVAL;
716
717 switch (dev->pdev->device) {
718 case MEI_DEV_ID_IBXPK_1:
719 case MEI_DEV_ID_IBXPK_2:
720 case MEI_DEV_ID_CPT_1:
721 case MEI_DEV_ID_PBG_1:
722 case MEI_DEV_ID_PPT_1:
723 case MEI_DEV_ID_PPT_2:
724 case MEI_DEV_ID_PPT_3:
725 case MEI_DEV_ID_LPT_H:
726 case MEI_DEV_ID_LPT_W:
727 case MEI_DEV_ID_LPT_LP:
728 case MEI_DEV_ID_LPT_HR:
729 case MEI_DEV_ID_WPT_LP:
730 fw_status->count = 2;
731 break;
732 case MEI_DEV_ID_ICH10_1:
733 case MEI_DEV_ID_ICH10_2:
734 case MEI_DEV_ID_ICH10_3:
735 case MEI_DEV_ID_ICH10_4:
736 fw_status->count = 1;
737 break;
738 default:
739 fw_status->count = 0;
740 break;
741 }
742
743 for (i = 0; i < fw_status->count && i < MEI_FW_STATUS_MAX; i++) {
744 int ret;
745 ret = pci_read_config_dword(dev->pdev,
746 pci_cfg_reg[i], &fw_status->status[i]);
747 if (ret)
748 return ret;
749 }
750 return 0;
751}
752
Tomas Winkler827eef52013-02-06 14:06:41 +0200753static const struct mei_hw_ops mei_me_hw_ops = {
754
Tomas Winkler964a2332014-03-18 22:51:59 +0200755 .pg_state = mei_me_pg_state,
756
Alexander Usyskin04dd3662014-03-31 17:59:23 +0300757 .fw_status = mei_me_fw_status,
Tomas Winkler827eef52013-02-06 14:06:41 +0200758 .host_is_ready = mei_me_host_is_ready,
759
760 .hw_is_ready = mei_me_hw_is_ready,
761 .hw_reset = mei_me_hw_reset,
Tomas Winkleraafae7e2013-03-11 18:27:03 +0200762 .hw_config = mei_me_hw_config,
763 .hw_start = mei_me_hw_start,
Tomas Winkler827eef52013-02-06 14:06:41 +0200764
Tomas Winkleree7e5af2014-03-18 22:51:58 +0200765 .pg_is_enabled = mei_me_pg_is_enabled,
766
Tomas Winkler827eef52013-02-06 14:06:41 +0200767 .intr_clear = mei_me_intr_clear,
768 .intr_enable = mei_me_intr_enable,
769 .intr_disable = mei_me_intr_disable,
770
771 .hbuf_free_slots = mei_me_hbuf_empty_slots,
772 .hbuf_is_ready = mei_me_hbuf_is_empty,
773 .hbuf_max_len = mei_me_hbuf_max_len,
774
775 .write = mei_me_write_message,
776
777 .rdbuf_full_slots = mei_me_count_full_read_slots,
778 .read_hdr = mei_me_mecbrw_read,
779 .read = mei_me_read_slots
780};
781
Tomas Winkler52c34562013-02-06 14:06:40 +0200782/**
Masanari Iida393b1482013-04-05 01:05:05 +0900783 * mei_me_dev_init - allocates and initializes the mei device structure
Tomas Winkler52c34562013-02-06 14:06:40 +0200784 *
785 * @pdev: The pci device structure
786 *
787 * returns The mei_device_device pointer on success, NULL on failure.
788 */
789struct mei_device *mei_me_dev_init(struct pci_dev *pdev)
790{
791 struct mei_device *dev;
792
793 dev = kzalloc(sizeof(struct mei_device) +
794 sizeof(struct mei_me_hw), GFP_KERNEL);
795 if (!dev)
796 return NULL;
797
798 mei_device_init(dev);
799
Tomas Winkler827eef52013-02-06 14:06:41 +0200800 dev->ops = &mei_me_hw_ops;
801
Tomas Winkler52c34562013-02-06 14:06:40 +0200802 dev->pdev = pdev;
803 return dev;
804}
Tomas Winkler06ecd642013-02-06 14:06:42 +0200805