blob: 031efcfed39e322faf2c2873829191e04323466b [file] [log] [blame]
Oren Weil3ce72722011-05-15 13:43:43 +03001/*
2 *
3 * Intel Management Engine Interface (Intel MEI) Linux driver
Tomas Winkler733ba912012-02-09 19:25:53 +02004 * Copyright (c) 2003-2012, Intel Corporation.
Oren Weil3ce72722011-05-15 13:43:43 +03005 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 */
16
17#include <linux/pci.h>
Tomas Winkler06ecd642013-02-06 14:06:42 +020018
19#include <linux/kthread.h>
20#include <linux/interrupt.h>
Tomas Winkler47a73802012-12-25 19:06:03 +020021
22#include "mei_dev.h"
Tomas Winkler06ecd642013-02-06 14:06:42 +020023#include "hbm.h"
24
Tomas Winkler6e4cd272014-03-11 14:49:23 +020025#include "hw-me.h"
26#include "hw-me-regs.h"
Tomas Winkler06ecd642013-02-06 14:06:42 +020027
Tomas Winkler3a65dd42012-12-25 19:06:06 +020028/**
Tomas Winklerb68301e2013-03-27 16:58:29 +020029 * mei_me_reg_read - Reads 32bit data from the mei device
Tomas Winkler3a65dd42012-12-25 19:06:06 +020030 *
31 * @dev: the device structure
32 * @offset: offset from which to read the data
33 *
34 * returns register value (u32)
35 */
Tomas Winklerb68301e2013-03-27 16:58:29 +020036static inline u32 mei_me_reg_read(const struct mei_me_hw *hw,
Tomas Winkler3a65dd42012-12-25 19:06:06 +020037 unsigned long offset)
38{
Tomas Winkler52c34562013-02-06 14:06:40 +020039 return ioread32(hw->mem_addr + offset);
Tomas Winkler3a65dd42012-12-25 19:06:06 +020040}
Oren Weil3ce72722011-05-15 13:43:43 +030041
42
43/**
Tomas Winklerb68301e2013-03-27 16:58:29 +020044 * mei_me_reg_write - Writes 32bit data to the mei device
Tomas Winkler3a65dd42012-12-25 19:06:06 +020045 *
46 * @dev: the device structure
47 * @offset: offset from which to write the data
48 * @value: register value to write (u32)
49 */
Tomas Winklerb68301e2013-03-27 16:58:29 +020050static inline void mei_me_reg_write(const struct mei_me_hw *hw,
Tomas Winkler3a65dd42012-12-25 19:06:06 +020051 unsigned long offset, u32 value)
52{
Tomas Winkler52c34562013-02-06 14:06:40 +020053 iowrite32(value, hw->mem_addr + offset);
Tomas Winkler3a65dd42012-12-25 19:06:06 +020054}
55
56/**
Tomas Winklerb68301e2013-03-27 16:58:29 +020057 * mei_me_mecbrw_read - Reads 32bit data from ME circular buffer
Tomas Winklerd0252842013-01-08 23:07:24 +020058 * read window register
Tomas Winkler3a65dd42012-12-25 19:06:06 +020059 *
60 * @dev: the device structure
61 *
Tomas Winklerd0252842013-01-08 23:07:24 +020062 * returns ME_CB_RW register value (u32)
Tomas Winkler3a65dd42012-12-25 19:06:06 +020063 */
Tomas Winkler827eef52013-02-06 14:06:41 +020064static u32 mei_me_mecbrw_read(const struct mei_device *dev)
Tomas Winkler3a65dd42012-12-25 19:06:06 +020065{
Tomas Winklerb68301e2013-03-27 16:58:29 +020066 return mei_me_reg_read(to_me_hw(dev), ME_CB_RW);
Tomas Winkler3a65dd42012-12-25 19:06:06 +020067}
68/**
Tomas Winklerb68301e2013-03-27 16:58:29 +020069 * mei_me_mecsr_read - Reads 32bit data from the ME CSR
Tomas Winkler3a65dd42012-12-25 19:06:06 +020070 *
71 * @dev: the device structure
72 *
73 * returns ME_CSR_HA register value (u32)
74 */
Tomas Winklerb68301e2013-03-27 16:58:29 +020075static inline u32 mei_me_mecsr_read(const struct mei_me_hw *hw)
Tomas Winkler3a65dd42012-12-25 19:06:06 +020076{
Tomas Winklerb68301e2013-03-27 16:58:29 +020077 return mei_me_reg_read(hw, ME_CSR_HA);
Tomas Winkler3a65dd42012-12-25 19:06:06 +020078}
79
80/**
Tomas Winklerd0252842013-01-08 23:07:24 +020081 * mei_hcsr_read - Reads 32bit data from the host CSR
82 *
83 * @dev: the device structure
84 *
85 * returns H_CSR register value (u32)
86 */
Tomas Winkler52c34562013-02-06 14:06:40 +020087static inline u32 mei_hcsr_read(const struct mei_me_hw *hw)
Tomas Winklerd0252842013-01-08 23:07:24 +020088{
Tomas Winklerb68301e2013-03-27 16:58:29 +020089 return mei_me_reg_read(hw, H_CSR);
Tomas Winklerd0252842013-01-08 23:07:24 +020090}
91
92/**
93 * mei_hcsr_set - writes H_CSR register to the mei device,
Oren Weil3ce72722011-05-15 13:43:43 +030094 * and ignores the H_IS bit for it is write-one-to-zero.
95 *
96 * @dev: the device structure
97 */
Tomas Winkler52c34562013-02-06 14:06:40 +020098static inline void mei_hcsr_set(struct mei_me_hw *hw, u32 hcsr)
Oren Weil3ce72722011-05-15 13:43:43 +030099{
Tomas Winkler88eb99f2013-01-08 23:07:30 +0200100 hcsr &= ~H_IS;
Tomas Winklerb68301e2013-03-27 16:58:29 +0200101 mei_me_reg_write(hw, H_CSR, hcsr);
Oren Weil3ce72722011-05-15 13:43:43 +0300102}
103
Tomas Winklere7e0c232013-01-08 23:07:31 +0200104
105/**
Masanari Iida393b1482013-04-05 01:05:05 +0900106 * mei_me_hw_config - configure hw dependent settings
Tomas Winklere7e0c232013-01-08 23:07:31 +0200107 *
108 * @dev: mei device
109 */
Tomas Winkler827eef52013-02-06 14:06:41 +0200110static void mei_me_hw_config(struct mei_device *dev)
Tomas Winklere7e0c232013-01-08 23:07:31 +0200111{
Tomas Winklerba9cdd02014-03-18 22:52:00 +0200112 struct mei_me_hw *hw = to_me_hw(dev);
Tomas Winkler52c34562013-02-06 14:06:40 +0200113 u32 hcsr = mei_hcsr_read(to_me_hw(dev));
Tomas Winklere7e0c232013-01-08 23:07:31 +0200114 /* Doesn't change in runtime */
115 dev->hbuf_depth = (hcsr & H_CBD) >> 24;
Tomas Winklerba9cdd02014-03-18 22:52:00 +0200116
117 hw->pg_state = MEI_PG_OFF;
Tomas Winklere7e0c232013-01-08 23:07:31 +0200118}
Tomas Winkler964a2332014-03-18 22:51:59 +0200119
120/**
121 * mei_me_pg_state - translate internal pg state
122 * to the mei power gating state
123 *
124 * @hw - me hardware
125 * returns: MEI_PG_OFF if aliveness is on and MEI_PG_ON otherwise
126 */
127static inline enum mei_pg_state mei_me_pg_state(struct mei_device *dev)
128{
Tomas Winklerba9cdd02014-03-18 22:52:00 +0200129 struct mei_me_hw *hw = to_me_hw(dev);
130 return hw->pg_state;
Tomas Winkler964a2332014-03-18 22:51:59 +0200131}
132
Oren Weil3ce72722011-05-15 13:43:43 +0300133/**
Tomas Winklerd0252842013-01-08 23:07:24 +0200134 * mei_clear_interrupts - clear and stop interrupts
Tomas Winkler3a65dd42012-12-25 19:06:06 +0200135 *
136 * @dev: the device structure
137 */
Tomas Winkler827eef52013-02-06 14:06:41 +0200138static void mei_me_intr_clear(struct mei_device *dev)
Tomas Winkler3a65dd42012-12-25 19:06:06 +0200139{
Tomas Winkler52c34562013-02-06 14:06:40 +0200140 struct mei_me_hw *hw = to_me_hw(dev);
141 u32 hcsr = mei_hcsr_read(hw);
Tomas Winkler9ea73dd2013-01-08 23:07:28 +0200142 if ((hcsr & H_IS) == H_IS)
Tomas Winklerb68301e2013-03-27 16:58:29 +0200143 mei_me_reg_write(hw, H_CSR, hcsr);
Tomas Winkler3a65dd42012-12-25 19:06:06 +0200144}
Tomas Winkler3a65dd42012-12-25 19:06:06 +0200145/**
Tomas Winkler827eef52013-02-06 14:06:41 +0200146 * mei_me_intr_enable - enables mei device interrupts
Oren Weil3ce72722011-05-15 13:43:43 +0300147 *
148 * @dev: the device structure
149 */
Tomas Winkler827eef52013-02-06 14:06:41 +0200150static void mei_me_intr_enable(struct mei_device *dev)
Oren Weil3ce72722011-05-15 13:43:43 +0300151{
Tomas Winkler52c34562013-02-06 14:06:40 +0200152 struct mei_me_hw *hw = to_me_hw(dev);
153 u32 hcsr = mei_hcsr_read(hw);
Tomas Winkler9ea73dd2013-01-08 23:07:28 +0200154 hcsr |= H_IE;
Tomas Winkler52c34562013-02-06 14:06:40 +0200155 mei_hcsr_set(hw, hcsr);
Oren Weil3ce72722011-05-15 13:43:43 +0300156}
157
158/**
Tomas Winkler3a65dd42012-12-25 19:06:06 +0200159 * mei_disable_interrupts - disables mei device interrupts
Oren Weil3ce72722011-05-15 13:43:43 +0300160 *
161 * @dev: the device structure
162 */
Tomas Winkler827eef52013-02-06 14:06:41 +0200163static void mei_me_intr_disable(struct mei_device *dev)
Oren Weil3ce72722011-05-15 13:43:43 +0300164{
Tomas Winkler52c34562013-02-06 14:06:40 +0200165 struct mei_me_hw *hw = to_me_hw(dev);
166 u32 hcsr = mei_hcsr_read(hw);
Tomas Winkler9ea73dd2013-01-08 23:07:28 +0200167 hcsr &= ~H_IE;
Tomas Winkler52c34562013-02-06 14:06:40 +0200168 mei_hcsr_set(hw, hcsr);
Oren Weil3ce72722011-05-15 13:43:43 +0300169}
170
Tomas Winkleradfba322013-01-08 23:07:27 +0200171/**
Tomas Winkler68f8ea12013-03-10 13:56:07 +0200172 * mei_me_hw_reset_release - release device from the reset
173 *
174 * @dev: the device structure
175 */
176static void mei_me_hw_reset_release(struct mei_device *dev)
177{
178 struct mei_me_hw *hw = to_me_hw(dev);
179 u32 hcsr = mei_hcsr_read(hw);
180
181 hcsr |= H_IG;
182 hcsr &= ~H_RST;
183 mei_hcsr_set(hw, hcsr);
Tomas Winklerb04ada92014-05-12 12:19:39 +0300184
185 /* complete this write before we set host ready on another CPU */
186 mmiowb();
Tomas Winkler68f8ea12013-03-10 13:56:07 +0200187}
188/**
Tomas Winkler827eef52013-02-06 14:06:41 +0200189 * mei_me_hw_reset - resets fw via mei csr register.
Tomas Winkleradfba322013-01-08 23:07:27 +0200190 *
191 * @dev: the device structure
Masanari Iida393b1482013-04-05 01:05:05 +0900192 * @intr_enable: if interrupt should be enabled after reset.
Tomas Winkleradfba322013-01-08 23:07:27 +0200193 */
Tomas Winklerc20c68d2013-06-23 10:42:49 +0300194static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
Tomas Winkleradfba322013-01-08 23:07:27 +0200195{
Tomas Winkler52c34562013-02-06 14:06:40 +0200196 struct mei_me_hw *hw = to_me_hw(dev);
197 u32 hcsr = mei_hcsr_read(hw);
Tomas Winkleradfba322013-01-08 23:07:27 +0200198
Tomas Winklerff960662013-07-30 14:11:51 +0300199 hcsr |= H_RST | H_IG | H_IS;
Tomas Winkleradfba322013-01-08 23:07:27 +0200200
201 if (intr_enable)
202 hcsr |= H_IE;
203 else
Tomas Winklerff960662013-07-30 14:11:51 +0300204 hcsr &= ~H_IE;
Tomas Winkleradfba322013-01-08 23:07:27 +0200205
Tomas Winkler07cd7be2014-05-12 12:19:40 +0300206 dev->recvd_hw_ready = false;
Tomas Winklerff960662013-07-30 14:11:51 +0300207 mei_me_reg_write(hw, H_CSR, hcsr);
Tomas Winkleradfba322013-01-08 23:07:27 +0200208
Tomas Winkler33ec0822014-01-12 00:36:09 +0200209 if (intr_enable == false)
Tomas Winkler68f8ea12013-03-10 13:56:07 +0200210 mei_me_hw_reset_release(dev);
Tomas Winkleradfba322013-01-08 23:07:27 +0200211
Tomas Winklerc20c68d2013-06-23 10:42:49 +0300212 return 0;
Tomas Winkleradfba322013-01-08 23:07:27 +0200213}
214
Tomas Winkler115ba282013-01-08 23:07:29 +0200215/**
Tomas Winkler827eef52013-02-06 14:06:41 +0200216 * mei_me_host_set_ready - enable device
Tomas Winkler115ba282013-01-08 23:07:29 +0200217 *
218 * @dev - mei device
219 * returns bool
220 */
221
Tomas Winkler827eef52013-02-06 14:06:41 +0200222static void mei_me_host_set_ready(struct mei_device *dev)
Tomas Winkler115ba282013-01-08 23:07:29 +0200223{
Tomas Winkler52c34562013-02-06 14:06:40 +0200224 struct mei_me_hw *hw = to_me_hw(dev);
Tomas Winklerb04ada92014-05-12 12:19:39 +0300225 hw->host_hw_state = mei_hcsr_read(hw);
Tomas Winkler52c34562013-02-06 14:06:40 +0200226 hw->host_hw_state |= H_IE | H_IG | H_RDY;
227 mei_hcsr_set(hw, hw->host_hw_state);
Tomas Winkler115ba282013-01-08 23:07:29 +0200228}
229/**
Tomas Winkler827eef52013-02-06 14:06:41 +0200230 * mei_me_host_is_ready - check whether the host has turned ready
Tomas Winkler115ba282013-01-08 23:07:29 +0200231 *
232 * @dev - mei device
233 * returns bool
234 */
Tomas Winkler827eef52013-02-06 14:06:41 +0200235static bool mei_me_host_is_ready(struct mei_device *dev)
Tomas Winkler115ba282013-01-08 23:07:29 +0200236{
Tomas Winkler52c34562013-02-06 14:06:40 +0200237 struct mei_me_hw *hw = to_me_hw(dev);
238 hw->host_hw_state = mei_hcsr_read(hw);
239 return (hw->host_hw_state & H_RDY) == H_RDY;
Tomas Winkler115ba282013-01-08 23:07:29 +0200240}
241
242/**
Tomas Winkler827eef52013-02-06 14:06:41 +0200243 * mei_me_hw_is_ready - check whether the me(hw) has turned ready
Tomas Winkler115ba282013-01-08 23:07:29 +0200244 *
245 * @dev - mei device
246 * returns bool
247 */
Tomas Winkler827eef52013-02-06 14:06:41 +0200248static bool mei_me_hw_is_ready(struct mei_device *dev)
Tomas Winkler115ba282013-01-08 23:07:29 +0200249{
Tomas Winkler52c34562013-02-06 14:06:40 +0200250 struct mei_me_hw *hw = to_me_hw(dev);
Tomas Winklerb68301e2013-03-27 16:58:29 +0200251 hw->me_hw_state = mei_me_mecsr_read(hw);
Tomas Winkler52c34562013-02-06 14:06:40 +0200252 return (hw->me_hw_state & ME_RDY_HRA) == ME_RDY_HRA;
Tomas Winkler115ba282013-01-08 23:07:29 +0200253}
Tomas Winkler3a65dd42012-12-25 19:06:06 +0200254
Tomas Winkleraafae7e2013-03-11 18:27:03 +0200255static int mei_me_hw_ready_wait(struct mei_device *dev)
256{
257 int err;
Tomas Winkleraafae7e2013-03-11 18:27:03 +0200258
259 mutex_unlock(&dev->device_lock);
260 err = wait_event_interruptible_timeout(dev->wait_hw_ready,
Tomas Winklerdab9bf42013-07-17 15:13:17 +0300261 dev->recvd_hw_ready,
Tomas Winkler7d93e582014-01-14 23:10:10 +0200262 mei_secs_to_jiffies(MEI_HW_READY_TIMEOUT));
Tomas Winkleraafae7e2013-03-11 18:27:03 +0200263 mutex_lock(&dev->device_lock);
264 if (!err && !dev->recvd_hw_ready) {
Tomas Winklerdab9bf42013-07-17 15:13:17 +0300265 if (!err)
Alexander Usyskin7ca96aa2014-02-19 17:35:49 +0200266 err = -ETIME;
Tomas Winkleraafae7e2013-03-11 18:27:03 +0200267 dev_err(&dev->pdev->dev,
Tomas Winklerdab9bf42013-07-17 15:13:17 +0300268 "wait hw ready failed. status = %d\n", err);
269 return err;
Tomas Winkleraafae7e2013-03-11 18:27:03 +0200270 }
271
272 dev->recvd_hw_ready = false;
273 return 0;
274}
275
276static int mei_me_hw_start(struct mei_device *dev)
277{
278 int ret = mei_me_hw_ready_wait(dev);
279 if (ret)
280 return ret;
281 dev_dbg(&dev->pdev->dev, "hw is ready\n");
282
283 mei_me_host_set_ready(dev);
284 return ret;
285}
286
287
Tomas Winkler3a65dd42012-12-25 19:06:06 +0200288/**
Tomas Winkler726917f2012-06-25 23:46:28 +0300289 * mei_hbuf_filled_slots - gets number of device filled buffer slots
Oren Weil3ce72722011-05-15 13:43:43 +0300290 *
Sedat Dilek7353f852013-01-17 19:54:15 +0100291 * @dev: the device structure
Oren Weil3ce72722011-05-15 13:43:43 +0300292 *
293 * returns number of filled slots
294 */
Tomas Winkler726917f2012-06-25 23:46:28 +0300295static unsigned char mei_hbuf_filled_slots(struct mei_device *dev)
Oren Weil3ce72722011-05-15 13:43:43 +0300296{
Tomas Winkler52c34562013-02-06 14:06:40 +0200297 struct mei_me_hw *hw = to_me_hw(dev);
Oren Weil3ce72722011-05-15 13:43:43 +0300298 char read_ptr, write_ptr;
299
Tomas Winkler52c34562013-02-06 14:06:40 +0200300 hw->host_hw_state = mei_hcsr_read(hw);
Tomas Winkler726917f2012-06-25 23:46:28 +0300301
Tomas Winkler52c34562013-02-06 14:06:40 +0200302 read_ptr = (char) ((hw->host_hw_state & H_CBRP) >> 8);
303 write_ptr = (char) ((hw->host_hw_state & H_CBWP) >> 16);
Oren Weil3ce72722011-05-15 13:43:43 +0300304
305 return (unsigned char) (write_ptr - read_ptr);
306}
307
308/**
Masanari Iida393b1482013-04-05 01:05:05 +0900309 * mei_me_hbuf_is_empty - checks if host buffer is empty.
Oren Weil3ce72722011-05-15 13:43:43 +0300310 *
311 * @dev: the device structure
312 *
Tomas Winkler726917f2012-06-25 23:46:28 +0300313 * returns true if empty, false - otherwise.
Oren Weil3ce72722011-05-15 13:43:43 +0300314 */
Tomas Winkler827eef52013-02-06 14:06:41 +0200315static bool mei_me_hbuf_is_empty(struct mei_device *dev)
Oren Weil3ce72722011-05-15 13:43:43 +0300316{
Tomas Winkler726917f2012-06-25 23:46:28 +0300317 return mei_hbuf_filled_slots(dev) == 0;
Oren Weil3ce72722011-05-15 13:43:43 +0300318}
319
320/**
Tomas Winkler827eef52013-02-06 14:06:41 +0200321 * mei_me_hbuf_empty_slots - counts write empty slots.
Oren Weil3ce72722011-05-15 13:43:43 +0300322 *
323 * @dev: the device structure
324 *
Alexander Usyskin7ca96aa2014-02-19 17:35:49 +0200325 * returns -EOVERFLOW if overflow, otherwise empty slots count
Oren Weil3ce72722011-05-15 13:43:43 +0300326 */
Tomas Winkler827eef52013-02-06 14:06:41 +0200327static int mei_me_hbuf_empty_slots(struct mei_device *dev)
Oren Weil3ce72722011-05-15 13:43:43 +0300328{
Tomas Winkler24aadc82012-06-25 23:46:27 +0300329 unsigned char filled_slots, empty_slots;
Oren Weil3ce72722011-05-15 13:43:43 +0300330
Tomas Winkler726917f2012-06-25 23:46:28 +0300331 filled_slots = mei_hbuf_filled_slots(dev);
Tomas Winkler24aadc82012-06-25 23:46:27 +0300332 empty_slots = dev->hbuf_depth - filled_slots;
Oren Weil3ce72722011-05-15 13:43:43 +0300333
334 /* check for overflow */
Tomas Winkler24aadc82012-06-25 23:46:27 +0300335 if (filled_slots > dev->hbuf_depth)
Oren Weil3ce72722011-05-15 13:43:43 +0300336 return -EOVERFLOW;
337
338 return empty_slots;
339}
340
Tomas Winkler827eef52013-02-06 14:06:41 +0200341static size_t mei_me_hbuf_max_len(const struct mei_device *dev)
342{
343 return dev->hbuf_depth * sizeof(u32) - sizeof(struct mei_msg_hdr);
344}
345
346
Oren Weil3ce72722011-05-15 13:43:43 +0300347/**
Alexander Usyskin7ca96aa2014-02-19 17:35:49 +0200348 * mei_me_write_message - writes a message to mei device.
Oren Weil3ce72722011-05-15 13:43:43 +0300349 *
350 * @dev: the device structure
Sedat Dilek7353f852013-01-17 19:54:15 +0100351 * @header: mei HECI header of message
Tomas Winkler438763f2012-12-25 19:05:59 +0200352 * @buf: message payload will be written
Oren Weil3ce72722011-05-15 13:43:43 +0300353 *
Tomas Winkler1ccb7b62012-03-14 14:39:42 +0200354 * This function returns -EIO if write has failed
Oren Weil3ce72722011-05-15 13:43:43 +0300355 */
Tomas Winkler827eef52013-02-06 14:06:41 +0200356static int mei_me_write_message(struct mei_device *dev,
357 struct mei_msg_hdr *header,
358 unsigned char *buf)
Oren Weil3ce72722011-05-15 13:43:43 +0300359{
Tomas Winkler52c34562013-02-06 14:06:40 +0200360 struct mei_me_hw *hw = to_me_hw(dev);
Tomas Winklerc8c8d082013-03-11 18:27:02 +0200361 unsigned long rem;
Tomas Winkler438763f2012-12-25 19:05:59 +0200362 unsigned long length = header->length;
Tomas Winkler169d1332012-06-19 09:13:35 +0300363 u32 *reg_buf = (u32 *)buf;
Tomas Winkler88eb99f2013-01-08 23:07:30 +0200364 u32 hcsr;
Tomas Winklerc8c8d082013-03-11 18:27:02 +0200365 u32 dw_cnt;
Tomas Winkler169d1332012-06-19 09:13:35 +0300366 int i;
367 int empty_slots;
Oren Weil3ce72722011-05-15 13:43:43 +0300368
Tomas Winkler15d4acc2012-12-25 19:06:00 +0200369 dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(header));
Oren Weil3ce72722011-05-15 13:43:43 +0300370
Tomas Winkler726917f2012-06-25 23:46:28 +0300371 empty_slots = mei_hbuf_empty_slots(dev);
Tomas Winkler169d1332012-06-19 09:13:35 +0300372 dev_dbg(&dev->pdev->dev, "empty slots = %hu.\n", empty_slots);
Oren Weil3ce72722011-05-15 13:43:43 +0300373
Tomas Winkler7bdf72d2012-07-04 19:24:52 +0300374 dw_cnt = mei_data2slots(length);
Tomas Winkler169d1332012-06-19 09:13:35 +0300375 if (empty_slots < 0 || dw_cnt > empty_slots)
Tomas Winkler9d098192014-02-19 17:35:48 +0200376 return -EMSGSIZE;
Oren Weil3ce72722011-05-15 13:43:43 +0300377
Tomas Winklerb68301e2013-03-27 16:58:29 +0200378 mei_me_reg_write(hw, H_CB_WW, *((u32 *) header));
Oren Weil3ce72722011-05-15 13:43:43 +0300379
Tomas Winkler169d1332012-06-19 09:13:35 +0300380 for (i = 0; i < length / 4; i++)
Tomas Winklerb68301e2013-03-27 16:58:29 +0200381 mei_me_reg_write(hw, H_CB_WW, reg_buf[i]);
Tomas Winkler169d1332012-06-19 09:13:35 +0300382
383 rem = length & 0x3;
384 if (rem > 0) {
385 u32 reg = 0;
386 memcpy(&reg, &buf[length - rem], rem);
Tomas Winklerb68301e2013-03-27 16:58:29 +0200387 mei_me_reg_write(hw, H_CB_WW, reg);
Oren Weil3ce72722011-05-15 13:43:43 +0300388 }
389
Tomas Winkler52c34562013-02-06 14:06:40 +0200390 hcsr = mei_hcsr_read(hw) | H_IG;
391 mei_hcsr_set(hw, hcsr);
Tomas Winkler827eef52013-02-06 14:06:41 +0200392 if (!mei_me_hw_is_ready(dev))
Tomas Winkler1ccb7b62012-03-14 14:39:42 +0200393 return -EIO;
Oren Weil3ce72722011-05-15 13:43:43 +0300394
Tomas Winkler1ccb7b62012-03-14 14:39:42 +0200395 return 0;
Oren Weil3ce72722011-05-15 13:43:43 +0300396}
397
398/**
Tomas Winkler827eef52013-02-06 14:06:41 +0200399 * mei_me_count_full_read_slots - counts read full slots.
Oren Weil3ce72722011-05-15 13:43:43 +0300400 *
401 * @dev: the device structure
402 *
Alexander Usyskin7ca96aa2014-02-19 17:35:49 +0200403 * returns -EOVERFLOW if overflow, otherwise filled slots count
Oren Weil3ce72722011-05-15 13:43:43 +0300404 */
Tomas Winkler827eef52013-02-06 14:06:41 +0200405static int mei_me_count_full_read_slots(struct mei_device *dev)
Oren Weil3ce72722011-05-15 13:43:43 +0300406{
Tomas Winkler52c34562013-02-06 14:06:40 +0200407 struct mei_me_hw *hw = to_me_hw(dev);
Oren Weil3ce72722011-05-15 13:43:43 +0300408 char read_ptr, write_ptr;
409 unsigned char buffer_depth, filled_slots;
410
Tomas Winklerb68301e2013-03-27 16:58:29 +0200411 hw->me_hw_state = mei_me_mecsr_read(hw);
Tomas Winkler52c34562013-02-06 14:06:40 +0200412 buffer_depth = (unsigned char)((hw->me_hw_state & ME_CBD_HRA) >> 24);
413 read_ptr = (char) ((hw->me_hw_state & ME_CBRP_HRA) >> 8);
414 write_ptr = (char) ((hw->me_hw_state & ME_CBWP_HRA) >> 16);
Oren Weil3ce72722011-05-15 13:43:43 +0300415 filled_slots = (unsigned char) (write_ptr - read_ptr);
416
417 /* check for overflow */
418 if (filled_slots > buffer_depth)
419 return -EOVERFLOW;
420
421 dev_dbg(&dev->pdev->dev, "filled_slots =%08x\n", filled_slots);
422 return (int)filled_slots;
423}
424
425/**
Tomas Winkler827eef52013-02-06 14:06:41 +0200426 * mei_me_read_slots - reads a message from mei device.
Oren Weil3ce72722011-05-15 13:43:43 +0300427 *
428 * @dev: the device structure
429 * @buffer: message buffer will be written
430 * @buffer_length: message size will be read
431 */
Tomas Winkler827eef52013-02-06 14:06:41 +0200432static int mei_me_read_slots(struct mei_device *dev, unsigned char *buffer,
Tomas Winkleredf1eed2012-02-09 19:25:54 +0200433 unsigned long buffer_length)
Oren Weil3ce72722011-05-15 13:43:43 +0300434{
Tomas Winkler52c34562013-02-06 14:06:40 +0200435 struct mei_me_hw *hw = to_me_hw(dev);
Tomas Winkleredf1eed2012-02-09 19:25:54 +0200436 u32 *reg_buf = (u32 *)buffer;
Tomas Winkler88eb99f2013-01-08 23:07:30 +0200437 u32 hcsr;
Oren Weil3ce72722011-05-15 13:43:43 +0300438
Tomas Winkleredf1eed2012-02-09 19:25:54 +0200439 for (; buffer_length >= sizeof(u32); buffer_length -= sizeof(u32))
Tomas Winkler827eef52013-02-06 14:06:41 +0200440 *reg_buf++ = mei_me_mecbrw_read(dev);
Oren Weil3ce72722011-05-15 13:43:43 +0300441
442 if (buffer_length > 0) {
Tomas Winkler827eef52013-02-06 14:06:41 +0200443 u32 reg = mei_me_mecbrw_read(dev);
Tomas Winkleredf1eed2012-02-09 19:25:54 +0200444 memcpy(reg_buf, &reg, buffer_length);
Oren Weil3ce72722011-05-15 13:43:43 +0300445 }
446
Tomas Winkler52c34562013-02-06 14:06:40 +0200447 hcsr = mei_hcsr_read(hw) | H_IG;
448 mei_hcsr_set(hw, hcsr);
Tomas Winkler827eef52013-02-06 14:06:41 +0200449 return 0;
Oren Weil3ce72722011-05-15 13:43:43 +0300450}
451
Tomas Winkler06ecd642013-02-06 14:06:42 +0200452/**
Tomas Winklerb16c3572014-03-18 22:51:57 +0200453 * mei_me_pg_enter - write pg enter register to mei device.
454 *
455 * @dev: the device structure
456 */
457static void mei_me_pg_enter(struct mei_device *dev)
458{
459 struct mei_me_hw *hw = to_me_hw(dev);
460 u32 reg = mei_me_reg_read(hw, H_HPG_CSR);
461 reg |= H_HPG_CSR_PGI;
462 mei_me_reg_write(hw, H_HPG_CSR, reg);
463}
464
465/**
466 * mei_me_pg_enter - write pg enter register to mei device.
467 *
468 * @dev: the device structure
469 */
470static void mei_me_pg_exit(struct mei_device *dev)
471{
472 struct mei_me_hw *hw = to_me_hw(dev);
473 u32 reg = mei_me_reg_read(hw, H_HPG_CSR);
474
475 WARN(!(reg & H_HPG_CSR_PGI), "PGI is not set\n");
476
477 reg |= H_HPG_CSR_PGIHEXR;
478 mei_me_reg_write(hw, H_HPG_CSR, reg);
479}
480
481/**
Tomas Winklerba9cdd02014-03-18 22:52:00 +0200482 * mei_me_pg_set_sync - perform pg entry procedure
483 *
484 * @dev: the device structure
485 *
486 * returns 0 on success an error code otherwise
487 */
488int mei_me_pg_set_sync(struct mei_device *dev)
489{
490 struct mei_me_hw *hw = to_me_hw(dev);
491 unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
492 int ret;
493
494 dev->pg_event = MEI_PG_EVENT_WAIT;
495
496 ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_ENTRY_REQ_CMD);
497 if (ret)
498 return ret;
499
500 mutex_unlock(&dev->device_lock);
501 wait_event_timeout(dev->wait_pg,
502 dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout);
503 mutex_lock(&dev->device_lock);
504
505 if (dev->pg_event == MEI_PG_EVENT_RECEIVED) {
506 mei_me_pg_enter(dev);
507 ret = 0;
508 } else {
509 ret = -ETIME;
510 }
511
512 dev->pg_event = MEI_PG_EVENT_IDLE;
513 hw->pg_state = MEI_PG_ON;
514
515 return ret;
516}
517
518/**
519 * mei_me_pg_unset_sync - perform pg exit procedure
520 *
521 * @dev: the device structure
522 *
523 * returns 0 on success an error code otherwise
524 */
525int mei_me_pg_unset_sync(struct mei_device *dev)
526{
527 struct mei_me_hw *hw = to_me_hw(dev);
528 unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
529 int ret;
530
531 if (dev->pg_event == MEI_PG_EVENT_RECEIVED)
532 goto reply;
533
534 dev->pg_event = MEI_PG_EVENT_WAIT;
535
536 mei_me_pg_exit(dev);
537
538 mutex_unlock(&dev->device_lock);
539 wait_event_timeout(dev->wait_pg,
540 dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout);
541 mutex_lock(&dev->device_lock);
542
543reply:
544 if (dev->pg_event == MEI_PG_EVENT_RECEIVED)
545 ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_EXIT_RES_CMD);
546 else
547 ret = -ETIME;
548
549 dev->pg_event = MEI_PG_EVENT_IDLE;
550 hw->pg_state = MEI_PG_OFF;
551
552 return ret;
553}
554
555/**
Tomas Winkleree7e5af2014-03-18 22:51:58 +0200556 * mei_me_pg_is_enabled - detect if PG is supported by HW
557 *
558 * @dev: the device structure
559 *
560 * returns: true is pg supported, false otherwise
561 */
562static bool mei_me_pg_is_enabled(struct mei_device *dev)
563{
564 struct mei_me_hw *hw = to_me_hw(dev);
565 u32 reg = mei_me_reg_read(hw, ME_CSR_HA);
566
567 if ((reg & ME_PGIC_HRA) == 0)
568 goto notsupported;
569
570 if (dev->version.major_version < HBM_MAJOR_VERSION_PGI)
571 goto notsupported;
572
573 if (dev->version.major_version == HBM_MAJOR_VERSION_PGI &&
574 dev->version.minor_version < HBM_MINOR_VERSION_PGI)
575 goto notsupported;
576
577 return true;
578
579notsupported:
580 dev_dbg(&dev->pdev->dev, "pg: not supported: HGP = %d hbm version %d.%d ?= %d.%d\n",
581 !!(reg & ME_PGIC_HRA),
582 dev->version.major_version,
583 dev->version.minor_version,
584 HBM_MAJOR_VERSION_PGI,
585 HBM_MINOR_VERSION_PGI);
586
587 return false;
588}
589
590/**
Tomas Winkler06ecd642013-02-06 14:06:42 +0200591 * mei_me_irq_quick_handler - The ISR of the MEI device
592 *
593 * @irq: The irq number
594 * @dev_id: pointer to the device structure
595 *
596 * returns irqreturn_t
597 */
598
599irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id)
600{
601 struct mei_device *dev = (struct mei_device *) dev_id;
602 struct mei_me_hw *hw = to_me_hw(dev);
603 u32 csr_reg = mei_hcsr_read(hw);
604
605 if ((csr_reg & H_IS) != H_IS)
606 return IRQ_NONE;
607
608 /* clear H_IS bit in H_CSR */
Tomas Winklerb68301e2013-03-27 16:58:29 +0200609 mei_me_reg_write(hw, H_CSR, csr_reg);
Tomas Winkler06ecd642013-02-06 14:06:42 +0200610
611 return IRQ_WAKE_THREAD;
612}
613
614/**
615 * mei_me_irq_thread_handler - function called after ISR to handle the interrupt
616 * processing.
617 *
618 * @irq: The irq number
619 * @dev_id: pointer to the device structure
620 *
621 * returns irqreturn_t
622 *
623 */
624irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
625{
626 struct mei_device *dev = (struct mei_device *) dev_id;
627 struct mei_cl_cb complete_list;
Tomas Winkler06ecd642013-02-06 14:06:42 +0200628 s32 slots;
Tomas Winkler544f9462014-01-08 20:19:21 +0200629 int rets = 0;
Tomas Winkler06ecd642013-02-06 14:06:42 +0200630
631 dev_dbg(&dev->pdev->dev, "function called after ISR to handle the interrupt processing.\n");
632 /* initialize our complete list */
633 mutex_lock(&dev->device_lock);
634 mei_io_list_init(&complete_list);
635
636 /* Ack the interrupt here
637 * In case of MSI we don't go through the quick handler */
638 if (pci_dev_msi_enabled(dev->pdev))
639 mei_clear_interrupts(dev);
640
641 /* check if ME wants a reset */
Tomas Winkler33ec0822014-01-12 00:36:09 +0200642 if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING) {
Tomas Winkler544f9462014-01-08 20:19:21 +0200643 dev_warn(&dev->pdev->dev, "FW not ready: resetting.\n");
644 schedule_work(&dev->reset_work);
645 goto end;
Tomas Winkler06ecd642013-02-06 14:06:42 +0200646 }
647
648 /* check if we need to start the dev */
649 if (!mei_host_is_ready(dev)) {
650 if (mei_hw_is_ready(dev)) {
Tomas Winklerb04ada92014-05-12 12:19:39 +0300651 mei_me_hw_reset_release(dev);
Tomas Winkler06ecd642013-02-06 14:06:42 +0200652 dev_dbg(&dev->pdev->dev, "we need to start the dev.\n");
653
Tomas Winkleraafae7e2013-03-11 18:27:03 +0200654 dev->recvd_hw_ready = true;
655 wake_up_interruptible(&dev->wait_hw_ready);
Tomas Winkler06ecd642013-02-06 14:06:42 +0200656 } else {
Tomas Winklerb04ada92014-05-12 12:19:39 +0300657 dev_dbg(&dev->pdev->dev, "Spurious Interrupt\n");
Tomas Winkler06ecd642013-02-06 14:06:42 +0200658 }
Tomas Winkler544f9462014-01-08 20:19:21 +0200659 goto end;
Tomas Winkler06ecd642013-02-06 14:06:42 +0200660 }
661 /* check slots available for reading */
662 slots = mei_count_full_read_slots(dev);
663 while (slots > 0) {
Tomas Winkler544f9462014-01-08 20:19:21 +0200664 dev_dbg(&dev->pdev->dev, "slots to read = %08x\n", slots);
Tomas Winkler06ecd642013-02-06 14:06:42 +0200665 rets = mei_irq_read_handler(dev, &complete_list, &slots);
Tomas Winklerb1b94b52014-03-03 00:21:28 +0200666 /* There is a race between ME write and interrupt delivery:
667 * Not all data is always available immediately after the
668 * interrupt, so try to read again on the next interrupt.
669 */
670 if (rets == -ENODATA)
671 break;
672
Tomas Winkler33ec0822014-01-12 00:36:09 +0200673 if (rets && dev->dev_state != MEI_DEV_RESETTING) {
Tomas Winklerb1b94b52014-03-03 00:21:28 +0200674 dev_err(&dev->pdev->dev, "mei_irq_read_handler ret = %d.\n",
675 rets);
Tomas Winkler544f9462014-01-08 20:19:21 +0200676 schedule_work(&dev->reset_work);
Tomas Winkler06ecd642013-02-06 14:06:42 +0200677 goto end;
Tomas Winkler544f9462014-01-08 20:19:21 +0200678 }
Tomas Winkler06ecd642013-02-06 14:06:42 +0200679 }
Tomas Winkler06ecd642013-02-06 14:06:42 +0200680
Tomas Winkler6aae48f2014-02-19 17:35:47 +0200681 dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
682
Tomas Winklerba9cdd02014-03-18 22:52:00 +0200683 /*
684 * During PG handshake only allowed write is the replay to the
685 * PG exit message, so block calling write function
686 * if the pg state is not idle
687 */
688 if (dev->pg_event == MEI_PG_EVENT_IDLE) {
689 rets = mei_irq_write_handler(dev, &complete_list);
690 dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
691 }
Tomas Winkler06ecd642013-02-06 14:06:42 +0200692
Tomas Winkler4c6e22b2013-03-17 11:41:20 +0200693 mei_irq_compl_handler(dev, &complete_list);
Tomas Winkler06ecd642013-02-06 14:06:42 +0200694
Tomas Winkler544f9462014-01-08 20:19:21 +0200695end:
696 dev_dbg(&dev->pdev->dev, "interrupt thread end ret = %d\n", rets);
697 mutex_unlock(&dev->device_lock);
Tomas Winkler06ecd642013-02-06 14:06:42 +0200698 return IRQ_HANDLED;
699}
Alexander Usyskin04dd3662014-03-31 17:59:23 +0300700
701/**
702 * mei_me_fw_status - retrieve fw status from the pci config space
703 *
704 * @dev: the device structure
705 * @fw_status: fw status registers storage
706 *
707 * returns 0 on success an error code otherwise
708 */
709static int mei_me_fw_status(struct mei_device *dev,
710 struct mei_fw_status *fw_status)
711{
712 const u32 pci_cfg_reg[] = {PCI_CFG_HFS_1, PCI_CFG_HFS_2};
713 int i;
714
715 if (!fw_status)
716 return -EINVAL;
717
718 switch (dev->pdev->device) {
719 case MEI_DEV_ID_IBXPK_1:
720 case MEI_DEV_ID_IBXPK_2:
721 case MEI_DEV_ID_CPT_1:
722 case MEI_DEV_ID_PBG_1:
723 case MEI_DEV_ID_PPT_1:
724 case MEI_DEV_ID_PPT_2:
725 case MEI_DEV_ID_PPT_3:
726 case MEI_DEV_ID_LPT_H:
727 case MEI_DEV_ID_LPT_W:
728 case MEI_DEV_ID_LPT_LP:
729 case MEI_DEV_ID_LPT_HR:
730 case MEI_DEV_ID_WPT_LP:
731 fw_status->count = 2;
732 break;
733 case MEI_DEV_ID_ICH10_1:
734 case MEI_DEV_ID_ICH10_2:
735 case MEI_DEV_ID_ICH10_3:
736 case MEI_DEV_ID_ICH10_4:
737 fw_status->count = 1;
738 break;
739 default:
740 fw_status->count = 0;
741 break;
742 }
743
744 for (i = 0; i < fw_status->count && i < MEI_FW_STATUS_MAX; i++) {
745 int ret;
746 ret = pci_read_config_dword(dev->pdev,
747 pci_cfg_reg[i], &fw_status->status[i]);
748 if (ret)
749 return ret;
750 }
751 return 0;
752}
753
Tomas Winkler827eef52013-02-06 14:06:41 +0200754static const struct mei_hw_ops mei_me_hw_ops = {
755
Tomas Winkler964a2332014-03-18 22:51:59 +0200756 .pg_state = mei_me_pg_state,
757
Alexander Usyskin04dd3662014-03-31 17:59:23 +0300758 .fw_status = mei_me_fw_status,
Tomas Winkler827eef52013-02-06 14:06:41 +0200759 .host_is_ready = mei_me_host_is_ready,
760
761 .hw_is_ready = mei_me_hw_is_ready,
762 .hw_reset = mei_me_hw_reset,
Tomas Winkleraafae7e2013-03-11 18:27:03 +0200763 .hw_config = mei_me_hw_config,
764 .hw_start = mei_me_hw_start,
Tomas Winkler827eef52013-02-06 14:06:41 +0200765
Tomas Winkleree7e5af2014-03-18 22:51:58 +0200766 .pg_is_enabled = mei_me_pg_is_enabled,
767
Tomas Winkler827eef52013-02-06 14:06:41 +0200768 .intr_clear = mei_me_intr_clear,
769 .intr_enable = mei_me_intr_enable,
770 .intr_disable = mei_me_intr_disable,
771
772 .hbuf_free_slots = mei_me_hbuf_empty_slots,
773 .hbuf_is_ready = mei_me_hbuf_is_empty,
774 .hbuf_max_len = mei_me_hbuf_max_len,
775
776 .write = mei_me_write_message,
777
778 .rdbuf_full_slots = mei_me_count_full_read_slots,
779 .read_hdr = mei_me_mecbrw_read,
780 .read = mei_me_read_slots
781};
782
Tomas Winkler52c34562013-02-06 14:06:40 +0200783/**
Masanari Iida393b1482013-04-05 01:05:05 +0900784 * mei_me_dev_init - allocates and initializes the mei device structure
Tomas Winkler52c34562013-02-06 14:06:40 +0200785 *
786 * @pdev: The pci device structure
787 *
788 * returns The mei_device_device pointer on success, NULL on failure.
789 */
790struct mei_device *mei_me_dev_init(struct pci_dev *pdev)
791{
792 struct mei_device *dev;
793
794 dev = kzalloc(sizeof(struct mei_device) +
795 sizeof(struct mei_me_hw), GFP_KERNEL);
796 if (!dev)
797 return NULL;
798
799 mei_device_init(dev);
800
Tomas Winkler827eef52013-02-06 14:06:41 +0200801 dev->ops = &mei_me_hw_ops;
802
Tomas Winkler52c34562013-02-06 14:06:40 +0200803 dev->pdev = pdev;
804 return dev;
805}
Tomas Winkler06ecd642013-02-06 14:06:42 +0200806