blob: caf0da33599448c46886909f9d3bb94db5fd9190 [file] [log] [blame]
Oren Weil3ce72722011-05-15 13:43:43 +03001/*
2 *
3 * Intel Management Engine Interface (Intel MEI) Linux driver
Tomas Winkler733ba912012-02-09 19:25:53 +02004 * Copyright (c) 2003-2012, Intel Corporation.
Oren Weil3ce72722011-05-15 13:43:43 +03005 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 */
16
17#include <linux/pci.h>
Tomas Winkler06ecd642013-02-06 14:06:42 +020018
19#include <linux/kthread.h>
20#include <linux/interrupt.h>
Tomas Winkler47a73802012-12-25 19:06:03 +020021
22#include "mei_dev.h"
Tomas Winkler06ecd642013-02-06 14:06:42 +020023#include "hbm.h"
24
Tomas Winkler6e4cd272014-03-11 14:49:23 +020025#include "hw-me.h"
26#include "hw-me-regs.h"
Tomas Winkler06ecd642013-02-06 14:06:42 +020027
Tomas Winkler3a65dd42012-12-25 19:06:06 +020028/**
Tomas Winklerb68301e2013-03-27 16:58:29 +020029 * mei_me_reg_read - Reads 32bit data from the mei device
Tomas Winkler3a65dd42012-12-25 19:06:06 +020030 *
31 * @dev: the device structure
32 * @offset: offset from which to read the data
33 *
34 * returns register value (u32)
35 */
Tomas Winklerb68301e2013-03-27 16:58:29 +020036static inline u32 mei_me_reg_read(const struct mei_me_hw *hw,
Tomas Winkler3a65dd42012-12-25 19:06:06 +020037 unsigned long offset)
38{
Tomas Winkler52c34562013-02-06 14:06:40 +020039 return ioread32(hw->mem_addr + offset);
Tomas Winkler3a65dd42012-12-25 19:06:06 +020040}
Oren Weil3ce72722011-05-15 13:43:43 +030041
42
43/**
Tomas Winklerb68301e2013-03-27 16:58:29 +020044 * mei_me_reg_write - Writes 32bit data to the mei device
Tomas Winkler3a65dd42012-12-25 19:06:06 +020045 *
46 * @dev: the device structure
47 * @offset: offset from which to write the data
48 * @value: register value to write (u32)
49 */
Tomas Winklerb68301e2013-03-27 16:58:29 +020050static inline void mei_me_reg_write(const struct mei_me_hw *hw,
Tomas Winkler3a65dd42012-12-25 19:06:06 +020051 unsigned long offset, u32 value)
52{
Tomas Winkler52c34562013-02-06 14:06:40 +020053 iowrite32(value, hw->mem_addr + offset);
Tomas Winkler3a65dd42012-12-25 19:06:06 +020054}
55
56/**
Tomas Winklerb68301e2013-03-27 16:58:29 +020057 * mei_me_mecbrw_read - Reads 32bit data from ME circular buffer
Tomas Winklerd0252842013-01-08 23:07:24 +020058 * read window register
Tomas Winkler3a65dd42012-12-25 19:06:06 +020059 *
60 * @dev: the device structure
61 *
Tomas Winklerd0252842013-01-08 23:07:24 +020062 * returns ME_CB_RW register value (u32)
Tomas Winkler3a65dd42012-12-25 19:06:06 +020063 */
Tomas Winkler827eef52013-02-06 14:06:41 +020064static u32 mei_me_mecbrw_read(const struct mei_device *dev)
Tomas Winkler3a65dd42012-12-25 19:06:06 +020065{
Tomas Winklerb68301e2013-03-27 16:58:29 +020066 return mei_me_reg_read(to_me_hw(dev), ME_CB_RW);
Tomas Winkler3a65dd42012-12-25 19:06:06 +020067}
68/**
Tomas Winklerb68301e2013-03-27 16:58:29 +020069 * mei_me_mecsr_read - Reads 32bit data from the ME CSR
Tomas Winkler3a65dd42012-12-25 19:06:06 +020070 *
71 * @dev: the device structure
72 *
73 * returns ME_CSR_HA register value (u32)
74 */
Tomas Winklerb68301e2013-03-27 16:58:29 +020075static inline u32 mei_me_mecsr_read(const struct mei_me_hw *hw)
Tomas Winkler3a65dd42012-12-25 19:06:06 +020076{
Tomas Winklerb68301e2013-03-27 16:58:29 +020077 return mei_me_reg_read(hw, ME_CSR_HA);
Tomas Winkler3a65dd42012-12-25 19:06:06 +020078}
79
80/**
Tomas Winklerd0252842013-01-08 23:07:24 +020081 * mei_hcsr_read - Reads 32bit data from the host CSR
82 *
83 * @dev: the device structure
84 *
85 * returns H_CSR register value (u32)
86 */
Tomas Winkler52c34562013-02-06 14:06:40 +020087static inline u32 mei_hcsr_read(const struct mei_me_hw *hw)
Tomas Winklerd0252842013-01-08 23:07:24 +020088{
Tomas Winklerb68301e2013-03-27 16:58:29 +020089 return mei_me_reg_read(hw, H_CSR);
Tomas Winklerd0252842013-01-08 23:07:24 +020090}
91
92/**
93 * mei_hcsr_set - writes H_CSR register to the mei device,
Oren Weil3ce72722011-05-15 13:43:43 +030094 * and ignores the H_IS bit for it is write-one-to-zero.
95 *
96 * @dev: the device structure
97 */
Tomas Winkler52c34562013-02-06 14:06:40 +020098static inline void mei_hcsr_set(struct mei_me_hw *hw, u32 hcsr)
Oren Weil3ce72722011-05-15 13:43:43 +030099{
Tomas Winkler88eb99f2013-01-08 23:07:30 +0200100 hcsr &= ~H_IS;
Tomas Winklerb68301e2013-03-27 16:58:29 +0200101 mei_me_reg_write(hw, H_CSR, hcsr);
Oren Weil3ce72722011-05-15 13:43:43 +0300102}
103
Tomas Winklere7e0c232013-01-08 23:07:31 +0200104
105/**
Masanari Iida393b1482013-04-05 01:05:05 +0900106 * mei_me_hw_config - configure hw dependent settings
Tomas Winklere7e0c232013-01-08 23:07:31 +0200107 *
108 * @dev: mei device
109 */
Tomas Winkler827eef52013-02-06 14:06:41 +0200110static void mei_me_hw_config(struct mei_device *dev)
Tomas Winklere7e0c232013-01-08 23:07:31 +0200111{
Tomas Winklerba9cdd02014-03-18 22:52:00 +0200112 struct mei_me_hw *hw = to_me_hw(dev);
Tomas Winkler52c34562013-02-06 14:06:40 +0200113 u32 hcsr = mei_hcsr_read(to_me_hw(dev));
Tomas Winklere7e0c232013-01-08 23:07:31 +0200114 /* Doesn't change in runtime */
115 dev->hbuf_depth = (hcsr & H_CBD) >> 24;
Tomas Winklerba9cdd02014-03-18 22:52:00 +0200116
117 hw->pg_state = MEI_PG_OFF;
Tomas Winklere7e0c232013-01-08 23:07:31 +0200118}
Tomas Winkler964a2332014-03-18 22:51:59 +0200119
120/**
121 * mei_me_pg_state - translate internal pg state
122 * to the mei power gating state
123 *
124 * @hw - me hardware
125 * returns: MEI_PG_OFF if aliveness is on and MEI_PG_ON otherwise
126 */
127static inline enum mei_pg_state mei_me_pg_state(struct mei_device *dev)
128{
Tomas Winklerba9cdd02014-03-18 22:52:00 +0200129 struct mei_me_hw *hw = to_me_hw(dev);
130 return hw->pg_state;
Tomas Winkler964a2332014-03-18 22:51:59 +0200131}
132
Oren Weil3ce72722011-05-15 13:43:43 +0300133/**
Tomas Winklerd0252842013-01-08 23:07:24 +0200134 * mei_clear_interrupts - clear and stop interrupts
Tomas Winkler3a65dd42012-12-25 19:06:06 +0200135 *
136 * @dev: the device structure
137 */
Tomas Winkler827eef52013-02-06 14:06:41 +0200138static void mei_me_intr_clear(struct mei_device *dev)
Tomas Winkler3a65dd42012-12-25 19:06:06 +0200139{
Tomas Winkler52c34562013-02-06 14:06:40 +0200140 struct mei_me_hw *hw = to_me_hw(dev);
141 u32 hcsr = mei_hcsr_read(hw);
Tomas Winkler9ea73dd2013-01-08 23:07:28 +0200142 if ((hcsr & H_IS) == H_IS)
Tomas Winklerb68301e2013-03-27 16:58:29 +0200143 mei_me_reg_write(hw, H_CSR, hcsr);
Tomas Winkler3a65dd42012-12-25 19:06:06 +0200144}
Tomas Winkler3a65dd42012-12-25 19:06:06 +0200145/**
Tomas Winkler827eef52013-02-06 14:06:41 +0200146 * mei_me_intr_enable - enables mei device interrupts
Oren Weil3ce72722011-05-15 13:43:43 +0300147 *
148 * @dev: the device structure
149 */
Tomas Winkler827eef52013-02-06 14:06:41 +0200150static void mei_me_intr_enable(struct mei_device *dev)
Oren Weil3ce72722011-05-15 13:43:43 +0300151{
Tomas Winkler52c34562013-02-06 14:06:40 +0200152 struct mei_me_hw *hw = to_me_hw(dev);
153 u32 hcsr = mei_hcsr_read(hw);
Tomas Winkler9ea73dd2013-01-08 23:07:28 +0200154 hcsr |= H_IE;
Tomas Winkler52c34562013-02-06 14:06:40 +0200155 mei_hcsr_set(hw, hcsr);
Oren Weil3ce72722011-05-15 13:43:43 +0300156}
157
158/**
Tomas Winkler3a65dd42012-12-25 19:06:06 +0200159 * mei_disable_interrupts - disables mei device interrupts
Oren Weil3ce72722011-05-15 13:43:43 +0300160 *
161 * @dev: the device structure
162 */
Tomas Winkler827eef52013-02-06 14:06:41 +0200163static void mei_me_intr_disable(struct mei_device *dev)
Oren Weil3ce72722011-05-15 13:43:43 +0300164{
Tomas Winkler52c34562013-02-06 14:06:40 +0200165 struct mei_me_hw *hw = to_me_hw(dev);
166 u32 hcsr = mei_hcsr_read(hw);
Tomas Winkler9ea73dd2013-01-08 23:07:28 +0200167 hcsr &= ~H_IE;
Tomas Winkler52c34562013-02-06 14:06:40 +0200168 mei_hcsr_set(hw, hcsr);
Oren Weil3ce72722011-05-15 13:43:43 +0300169}
170
Tomas Winkleradfba322013-01-08 23:07:27 +0200171/**
Tomas Winkler68f8ea12013-03-10 13:56:07 +0200172 * mei_me_hw_reset_release - release device from the reset
173 *
174 * @dev: the device structure
175 */
176static void mei_me_hw_reset_release(struct mei_device *dev)
177{
178 struct mei_me_hw *hw = to_me_hw(dev);
179 u32 hcsr = mei_hcsr_read(hw);
180
181 hcsr |= H_IG;
182 hcsr &= ~H_RST;
183 mei_hcsr_set(hw, hcsr);
Tomas Winklerb04ada92014-05-12 12:19:39 +0300184
185 /* complete this write before we set host ready on another CPU */
186 mmiowb();
Tomas Winkler68f8ea12013-03-10 13:56:07 +0200187}
188/**
Tomas Winkler827eef52013-02-06 14:06:41 +0200189 * mei_me_hw_reset - resets fw via mei csr register.
Tomas Winkleradfba322013-01-08 23:07:27 +0200190 *
191 * @dev: the device structure
Masanari Iida393b1482013-04-05 01:05:05 +0900192 * @intr_enable: if interrupt should be enabled after reset.
Tomas Winkleradfba322013-01-08 23:07:27 +0200193 */
Tomas Winklerc20c68d2013-06-23 10:42:49 +0300194static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
Tomas Winkleradfba322013-01-08 23:07:27 +0200195{
Tomas Winkler52c34562013-02-06 14:06:40 +0200196 struct mei_me_hw *hw = to_me_hw(dev);
197 u32 hcsr = mei_hcsr_read(hw);
Tomas Winkleradfba322013-01-08 23:07:27 +0200198
Tomas Winklerff960662013-07-30 14:11:51 +0300199 hcsr |= H_RST | H_IG | H_IS;
Tomas Winkleradfba322013-01-08 23:07:27 +0200200
201 if (intr_enable)
202 hcsr |= H_IE;
203 else
Tomas Winklerff960662013-07-30 14:11:51 +0300204 hcsr &= ~H_IE;
Tomas Winkleradfba322013-01-08 23:07:27 +0200205
Tomas Winkler07cd7be2014-05-12 12:19:40 +0300206 dev->recvd_hw_ready = false;
Tomas Winklerff960662013-07-30 14:11:51 +0300207 mei_me_reg_write(hw, H_CSR, hcsr);
Tomas Winkleradfba322013-01-08 23:07:27 +0200208
Tomas Winklerc40765d2014-05-12 12:19:41 +0300209 /*
210 * Host reads the H_CSR once to ensure that the
211 * posted write to H_CSR completes.
212 */
213 hcsr = mei_hcsr_read(hw);
214
215 if ((hcsr & H_RST) == 0)
216 dev_warn(&dev->pdev->dev, "H_RST is not set = 0x%08X", hcsr);
217
218 if ((hcsr & H_RDY) == H_RDY)
219 dev_warn(&dev->pdev->dev, "H_RDY is not cleared 0x%08X", hcsr);
220
Tomas Winkler33ec0822014-01-12 00:36:09 +0200221 if (intr_enable == false)
Tomas Winkler68f8ea12013-03-10 13:56:07 +0200222 mei_me_hw_reset_release(dev);
Tomas Winkleradfba322013-01-08 23:07:27 +0200223
Tomas Winklerc20c68d2013-06-23 10:42:49 +0300224 return 0;
Tomas Winkleradfba322013-01-08 23:07:27 +0200225}
226
Tomas Winkler115ba282013-01-08 23:07:29 +0200227/**
Tomas Winkler827eef52013-02-06 14:06:41 +0200228 * mei_me_host_set_ready - enable device
Tomas Winkler115ba282013-01-08 23:07:29 +0200229 *
230 * @dev - mei device
231 * returns bool
232 */
233
Tomas Winkler827eef52013-02-06 14:06:41 +0200234static void mei_me_host_set_ready(struct mei_device *dev)
Tomas Winkler115ba282013-01-08 23:07:29 +0200235{
Tomas Winkler52c34562013-02-06 14:06:40 +0200236 struct mei_me_hw *hw = to_me_hw(dev);
Tomas Winklerb04ada92014-05-12 12:19:39 +0300237 hw->host_hw_state = mei_hcsr_read(hw);
Tomas Winkler52c34562013-02-06 14:06:40 +0200238 hw->host_hw_state |= H_IE | H_IG | H_RDY;
239 mei_hcsr_set(hw, hw->host_hw_state);
Tomas Winkler115ba282013-01-08 23:07:29 +0200240}
241/**
Tomas Winkler827eef52013-02-06 14:06:41 +0200242 * mei_me_host_is_ready - check whether the host has turned ready
Tomas Winkler115ba282013-01-08 23:07:29 +0200243 *
244 * @dev - mei device
245 * returns bool
246 */
Tomas Winkler827eef52013-02-06 14:06:41 +0200247static bool mei_me_host_is_ready(struct mei_device *dev)
Tomas Winkler115ba282013-01-08 23:07:29 +0200248{
Tomas Winkler52c34562013-02-06 14:06:40 +0200249 struct mei_me_hw *hw = to_me_hw(dev);
250 hw->host_hw_state = mei_hcsr_read(hw);
251 return (hw->host_hw_state & H_RDY) == H_RDY;
Tomas Winkler115ba282013-01-08 23:07:29 +0200252}
253
254/**
Tomas Winkler827eef52013-02-06 14:06:41 +0200255 * mei_me_hw_is_ready - check whether the me(hw) has turned ready
Tomas Winkler115ba282013-01-08 23:07:29 +0200256 *
257 * @dev - mei device
258 * returns bool
259 */
Tomas Winkler827eef52013-02-06 14:06:41 +0200260static bool mei_me_hw_is_ready(struct mei_device *dev)
Tomas Winkler115ba282013-01-08 23:07:29 +0200261{
Tomas Winkler52c34562013-02-06 14:06:40 +0200262 struct mei_me_hw *hw = to_me_hw(dev);
Tomas Winklerb68301e2013-03-27 16:58:29 +0200263 hw->me_hw_state = mei_me_mecsr_read(hw);
Tomas Winkler52c34562013-02-06 14:06:40 +0200264 return (hw->me_hw_state & ME_RDY_HRA) == ME_RDY_HRA;
Tomas Winkler115ba282013-01-08 23:07:29 +0200265}
Tomas Winkler3a65dd42012-12-25 19:06:06 +0200266
Tomas Winkleraafae7e2013-03-11 18:27:03 +0200267static int mei_me_hw_ready_wait(struct mei_device *dev)
268{
Tomas Winkleraafae7e2013-03-11 18:27:03 +0200269 mutex_unlock(&dev->device_lock);
Alexander Usyskin2c2b93e2014-08-12 20:16:03 +0300270 wait_event_timeout(dev->wait_hw_ready,
Tomas Winklerdab9bf42013-07-17 15:13:17 +0300271 dev->recvd_hw_ready,
Tomas Winkler7d93e582014-01-14 23:10:10 +0200272 mei_secs_to_jiffies(MEI_HW_READY_TIMEOUT));
Tomas Winkleraafae7e2013-03-11 18:27:03 +0200273 mutex_lock(&dev->device_lock);
Alexander Usyskin2c2b93e2014-08-12 20:16:03 +0300274 if (!dev->recvd_hw_ready) {
275 dev_err(&dev->pdev->dev, "wait hw ready failed\n");
276 return -ETIME;
Tomas Winkleraafae7e2013-03-11 18:27:03 +0200277 }
278
279 dev->recvd_hw_ready = false;
280 return 0;
281}
282
283static int mei_me_hw_start(struct mei_device *dev)
284{
285 int ret = mei_me_hw_ready_wait(dev);
286 if (ret)
287 return ret;
288 dev_dbg(&dev->pdev->dev, "hw is ready\n");
289
290 mei_me_host_set_ready(dev);
291 return ret;
292}
293
294
Tomas Winkler3a65dd42012-12-25 19:06:06 +0200295/**
Tomas Winkler726917f2012-06-25 23:46:28 +0300296 * mei_hbuf_filled_slots - gets number of device filled buffer slots
Oren Weil3ce72722011-05-15 13:43:43 +0300297 *
Sedat Dilek7353f852013-01-17 19:54:15 +0100298 * @dev: the device structure
Oren Weil3ce72722011-05-15 13:43:43 +0300299 *
300 * returns number of filled slots
301 */
Tomas Winkler726917f2012-06-25 23:46:28 +0300302static unsigned char mei_hbuf_filled_slots(struct mei_device *dev)
Oren Weil3ce72722011-05-15 13:43:43 +0300303{
Tomas Winkler52c34562013-02-06 14:06:40 +0200304 struct mei_me_hw *hw = to_me_hw(dev);
Oren Weil3ce72722011-05-15 13:43:43 +0300305 char read_ptr, write_ptr;
306
Tomas Winkler52c34562013-02-06 14:06:40 +0200307 hw->host_hw_state = mei_hcsr_read(hw);
Tomas Winkler726917f2012-06-25 23:46:28 +0300308
Tomas Winkler52c34562013-02-06 14:06:40 +0200309 read_ptr = (char) ((hw->host_hw_state & H_CBRP) >> 8);
310 write_ptr = (char) ((hw->host_hw_state & H_CBWP) >> 16);
Oren Weil3ce72722011-05-15 13:43:43 +0300311
312 return (unsigned char) (write_ptr - read_ptr);
313}
314
315/**
Masanari Iida393b1482013-04-05 01:05:05 +0900316 * mei_me_hbuf_is_empty - checks if host buffer is empty.
Oren Weil3ce72722011-05-15 13:43:43 +0300317 *
318 * @dev: the device structure
319 *
Tomas Winkler726917f2012-06-25 23:46:28 +0300320 * returns true if empty, false - otherwise.
Oren Weil3ce72722011-05-15 13:43:43 +0300321 */
Tomas Winkler827eef52013-02-06 14:06:41 +0200322static bool mei_me_hbuf_is_empty(struct mei_device *dev)
Oren Weil3ce72722011-05-15 13:43:43 +0300323{
Tomas Winkler726917f2012-06-25 23:46:28 +0300324 return mei_hbuf_filled_slots(dev) == 0;
Oren Weil3ce72722011-05-15 13:43:43 +0300325}
326
327/**
Tomas Winkler827eef52013-02-06 14:06:41 +0200328 * mei_me_hbuf_empty_slots - counts write empty slots.
Oren Weil3ce72722011-05-15 13:43:43 +0300329 *
330 * @dev: the device structure
331 *
Alexander Usyskin7ca96aa2014-02-19 17:35:49 +0200332 * returns -EOVERFLOW if overflow, otherwise empty slots count
Oren Weil3ce72722011-05-15 13:43:43 +0300333 */
Tomas Winkler827eef52013-02-06 14:06:41 +0200334static int mei_me_hbuf_empty_slots(struct mei_device *dev)
Oren Weil3ce72722011-05-15 13:43:43 +0300335{
Tomas Winkler24aadc82012-06-25 23:46:27 +0300336 unsigned char filled_slots, empty_slots;
Oren Weil3ce72722011-05-15 13:43:43 +0300337
Tomas Winkler726917f2012-06-25 23:46:28 +0300338 filled_slots = mei_hbuf_filled_slots(dev);
Tomas Winkler24aadc82012-06-25 23:46:27 +0300339 empty_slots = dev->hbuf_depth - filled_slots;
Oren Weil3ce72722011-05-15 13:43:43 +0300340
341 /* check for overflow */
Tomas Winkler24aadc82012-06-25 23:46:27 +0300342 if (filled_slots > dev->hbuf_depth)
Oren Weil3ce72722011-05-15 13:43:43 +0300343 return -EOVERFLOW;
344
345 return empty_slots;
346}
347
Tomas Winkler827eef52013-02-06 14:06:41 +0200348static size_t mei_me_hbuf_max_len(const struct mei_device *dev)
349{
350 return dev->hbuf_depth * sizeof(u32) - sizeof(struct mei_msg_hdr);
351}
352
353
Oren Weil3ce72722011-05-15 13:43:43 +0300354/**
Alexander Usyskin7ca96aa2014-02-19 17:35:49 +0200355 * mei_me_write_message - writes a message to mei device.
Oren Weil3ce72722011-05-15 13:43:43 +0300356 *
357 * @dev: the device structure
Sedat Dilek7353f852013-01-17 19:54:15 +0100358 * @header: mei HECI header of message
Tomas Winkler438763f2012-12-25 19:05:59 +0200359 * @buf: message payload will be written
Oren Weil3ce72722011-05-15 13:43:43 +0300360 *
Tomas Winkler1ccb7b62012-03-14 14:39:42 +0200361 * This function returns -EIO if write has failed
Oren Weil3ce72722011-05-15 13:43:43 +0300362 */
Tomas Winkler827eef52013-02-06 14:06:41 +0200363static int mei_me_write_message(struct mei_device *dev,
364 struct mei_msg_hdr *header,
365 unsigned char *buf)
Oren Weil3ce72722011-05-15 13:43:43 +0300366{
Tomas Winkler52c34562013-02-06 14:06:40 +0200367 struct mei_me_hw *hw = to_me_hw(dev);
Tomas Winklerc8c8d082013-03-11 18:27:02 +0200368 unsigned long rem;
Tomas Winkler438763f2012-12-25 19:05:59 +0200369 unsigned long length = header->length;
Tomas Winkler169d1332012-06-19 09:13:35 +0300370 u32 *reg_buf = (u32 *)buf;
Tomas Winkler88eb99f2013-01-08 23:07:30 +0200371 u32 hcsr;
Tomas Winklerc8c8d082013-03-11 18:27:02 +0200372 u32 dw_cnt;
Tomas Winkler169d1332012-06-19 09:13:35 +0300373 int i;
374 int empty_slots;
Oren Weil3ce72722011-05-15 13:43:43 +0300375
Tomas Winkler15d4acc2012-12-25 19:06:00 +0200376 dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(header));
Oren Weil3ce72722011-05-15 13:43:43 +0300377
Tomas Winkler726917f2012-06-25 23:46:28 +0300378 empty_slots = mei_hbuf_empty_slots(dev);
Tomas Winkler169d1332012-06-19 09:13:35 +0300379 dev_dbg(&dev->pdev->dev, "empty slots = %hu.\n", empty_slots);
Oren Weil3ce72722011-05-15 13:43:43 +0300380
Tomas Winkler7bdf72d2012-07-04 19:24:52 +0300381 dw_cnt = mei_data2slots(length);
Tomas Winkler169d1332012-06-19 09:13:35 +0300382 if (empty_slots < 0 || dw_cnt > empty_slots)
Tomas Winkler9d098192014-02-19 17:35:48 +0200383 return -EMSGSIZE;
Oren Weil3ce72722011-05-15 13:43:43 +0300384
Tomas Winklerb68301e2013-03-27 16:58:29 +0200385 mei_me_reg_write(hw, H_CB_WW, *((u32 *) header));
Oren Weil3ce72722011-05-15 13:43:43 +0300386
Tomas Winkler169d1332012-06-19 09:13:35 +0300387 for (i = 0; i < length / 4; i++)
Tomas Winklerb68301e2013-03-27 16:58:29 +0200388 mei_me_reg_write(hw, H_CB_WW, reg_buf[i]);
Tomas Winkler169d1332012-06-19 09:13:35 +0300389
390 rem = length & 0x3;
391 if (rem > 0) {
392 u32 reg = 0;
393 memcpy(&reg, &buf[length - rem], rem);
Tomas Winklerb68301e2013-03-27 16:58:29 +0200394 mei_me_reg_write(hw, H_CB_WW, reg);
Oren Weil3ce72722011-05-15 13:43:43 +0300395 }
396
Tomas Winkler52c34562013-02-06 14:06:40 +0200397 hcsr = mei_hcsr_read(hw) | H_IG;
398 mei_hcsr_set(hw, hcsr);
Tomas Winkler827eef52013-02-06 14:06:41 +0200399 if (!mei_me_hw_is_ready(dev))
Tomas Winkler1ccb7b62012-03-14 14:39:42 +0200400 return -EIO;
Oren Weil3ce72722011-05-15 13:43:43 +0300401
Tomas Winkler1ccb7b62012-03-14 14:39:42 +0200402 return 0;
Oren Weil3ce72722011-05-15 13:43:43 +0300403}
404
405/**
Tomas Winkler827eef52013-02-06 14:06:41 +0200406 * mei_me_count_full_read_slots - counts read full slots.
Oren Weil3ce72722011-05-15 13:43:43 +0300407 *
408 * @dev: the device structure
409 *
Alexander Usyskin7ca96aa2014-02-19 17:35:49 +0200410 * returns -EOVERFLOW if overflow, otherwise filled slots count
Oren Weil3ce72722011-05-15 13:43:43 +0300411 */
Tomas Winkler827eef52013-02-06 14:06:41 +0200412static int mei_me_count_full_read_slots(struct mei_device *dev)
Oren Weil3ce72722011-05-15 13:43:43 +0300413{
Tomas Winkler52c34562013-02-06 14:06:40 +0200414 struct mei_me_hw *hw = to_me_hw(dev);
Oren Weil3ce72722011-05-15 13:43:43 +0300415 char read_ptr, write_ptr;
416 unsigned char buffer_depth, filled_slots;
417
Tomas Winklerb68301e2013-03-27 16:58:29 +0200418 hw->me_hw_state = mei_me_mecsr_read(hw);
Tomas Winkler52c34562013-02-06 14:06:40 +0200419 buffer_depth = (unsigned char)((hw->me_hw_state & ME_CBD_HRA) >> 24);
420 read_ptr = (char) ((hw->me_hw_state & ME_CBRP_HRA) >> 8);
421 write_ptr = (char) ((hw->me_hw_state & ME_CBWP_HRA) >> 16);
Oren Weil3ce72722011-05-15 13:43:43 +0300422 filled_slots = (unsigned char) (write_ptr - read_ptr);
423
424 /* check for overflow */
425 if (filled_slots > buffer_depth)
426 return -EOVERFLOW;
427
428 dev_dbg(&dev->pdev->dev, "filled_slots =%08x\n", filled_slots);
429 return (int)filled_slots;
430}
431
432/**
Tomas Winkler827eef52013-02-06 14:06:41 +0200433 * mei_me_read_slots - reads a message from mei device.
Oren Weil3ce72722011-05-15 13:43:43 +0300434 *
435 * @dev: the device structure
436 * @buffer: message buffer will be written
437 * @buffer_length: message size will be read
438 */
Tomas Winkler827eef52013-02-06 14:06:41 +0200439static int mei_me_read_slots(struct mei_device *dev, unsigned char *buffer,
Tomas Winkleredf1eed2012-02-09 19:25:54 +0200440 unsigned long buffer_length)
Oren Weil3ce72722011-05-15 13:43:43 +0300441{
Tomas Winkler52c34562013-02-06 14:06:40 +0200442 struct mei_me_hw *hw = to_me_hw(dev);
Tomas Winkleredf1eed2012-02-09 19:25:54 +0200443 u32 *reg_buf = (u32 *)buffer;
Tomas Winkler88eb99f2013-01-08 23:07:30 +0200444 u32 hcsr;
Oren Weil3ce72722011-05-15 13:43:43 +0300445
Tomas Winkleredf1eed2012-02-09 19:25:54 +0200446 for (; buffer_length >= sizeof(u32); buffer_length -= sizeof(u32))
Tomas Winkler827eef52013-02-06 14:06:41 +0200447 *reg_buf++ = mei_me_mecbrw_read(dev);
Oren Weil3ce72722011-05-15 13:43:43 +0300448
449 if (buffer_length > 0) {
Tomas Winkler827eef52013-02-06 14:06:41 +0200450 u32 reg = mei_me_mecbrw_read(dev);
Tomas Winkleredf1eed2012-02-09 19:25:54 +0200451 memcpy(reg_buf, &reg, buffer_length);
Oren Weil3ce72722011-05-15 13:43:43 +0300452 }
453
Tomas Winkler52c34562013-02-06 14:06:40 +0200454 hcsr = mei_hcsr_read(hw) | H_IG;
455 mei_hcsr_set(hw, hcsr);
Tomas Winkler827eef52013-02-06 14:06:41 +0200456 return 0;
Oren Weil3ce72722011-05-15 13:43:43 +0300457}
458
Tomas Winkler06ecd642013-02-06 14:06:42 +0200459/**
Tomas Winklerb16c3572014-03-18 22:51:57 +0200460 * mei_me_pg_enter - write pg enter register to mei device.
461 *
462 * @dev: the device structure
463 */
464static void mei_me_pg_enter(struct mei_device *dev)
465{
466 struct mei_me_hw *hw = to_me_hw(dev);
467 u32 reg = mei_me_reg_read(hw, H_HPG_CSR);
468 reg |= H_HPG_CSR_PGI;
469 mei_me_reg_write(hw, H_HPG_CSR, reg);
470}
471
472/**
473 * mei_me_pg_enter - write pg enter register to mei device.
474 *
475 * @dev: the device structure
476 */
477static void mei_me_pg_exit(struct mei_device *dev)
478{
479 struct mei_me_hw *hw = to_me_hw(dev);
480 u32 reg = mei_me_reg_read(hw, H_HPG_CSR);
481
482 WARN(!(reg & H_HPG_CSR_PGI), "PGI is not set\n");
483
484 reg |= H_HPG_CSR_PGIHEXR;
485 mei_me_reg_write(hw, H_HPG_CSR, reg);
486}
487
488/**
Tomas Winklerba9cdd02014-03-18 22:52:00 +0200489 * mei_me_pg_set_sync - perform pg entry procedure
490 *
491 * @dev: the device structure
492 *
493 * returns 0 on success an error code otherwise
494 */
495int mei_me_pg_set_sync(struct mei_device *dev)
496{
497 struct mei_me_hw *hw = to_me_hw(dev);
498 unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
499 int ret;
500
501 dev->pg_event = MEI_PG_EVENT_WAIT;
502
503 ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_ENTRY_REQ_CMD);
504 if (ret)
505 return ret;
506
507 mutex_unlock(&dev->device_lock);
508 wait_event_timeout(dev->wait_pg,
509 dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout);
510 mutex_lock(&dev->device_lock);
511
512 if (dev->pg_event == MEI_PG_EVENT_RECEIVED) {
513 mei_me_pg_enter(dev);
514 ret = 0;
515 } else {
516 ret = -ETIME;
517 }
518
519 dev->pg_event = MEI_PG_EVENT_IDLE;
520 hw->pg_state = MEI_PG_ON;
521
522 return ret;
523}
524
525/**
526 * mei_me_pg_unset_sync - perform pg exit procedure
527 *
528 * @dev: the device structure
529 *
530 * returns 0 on success an error code otherwise
531 */
532int mei_me_pg_unset_sync(struct mei_device *dev)
533{
534 struct mei_me_hw *hw = to_me_hw(dev);
535 unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
536 int ret;
537
538 if (dev->pg_event == MEI_PG_EVENT_RECEIVED)
539 goto reply;
540
541 dev->pg_event = MEI_PG_EVENT_WAIT;
542
543 mei_me_pg_exit(dev);
544
545 mutex_unlock(&dev->device_lock);
546 wait_event_timeout(dev->wait_pg,
547 dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout);
548 mutex_lock(&dev->device_lock);
549
550reply:
551 if (dev->pg_event == MEI_PG_EVENT_RECEIVED)
552 ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_EXIT_RES_CMD);
553 else
554 ret = -ETIME;
555
556 dev->pg_event = MEI_PG_EVENT_IDLE;
557 hw->pg_state = MEI_PG_OFF;
558
559 return ret;
560}
561
562/**
Tomas Winkleree7e5af2014-03-18 22:51:58 +0200563 * mei_me_pg_is_enabled - detect if PG is supported by HW
564 *
565 * @dev: the device structure
566 *
567 * returns: true is pg supported, false otherwise
568 */
569static bool mei_me_pg_is_enabled(struct mei_device *dev)
570{
571 struct mei_me_hw *hw = to_me_hw(dev);
572 u32 reg = mei_me_reg_read(hw, ME_CSR_HA);
573
574 if ((reg & ME_PGIC_HRA) == 0)
575 goto notsupported;
576
577 if (dev->version.major_version < HBM_MAJOR_VERSION_PGI)
578 goto notsupported;
579
580 if (dev->version.major_version == HBM_MAJOR_VERSION_PGI &&
581 dev->version.minor_version < HBM_MINOR_VERSION_PGI)
582 goto notsupported;
583
584 return true;
585
586notsupported:
587 dev_dbg(&dev->pdev->dev, "pg: not supported: HGP = %d hbm version %d.%d ?= %d.%d\n",
588 !!(reg & ME_PGIC_HRA),
589 dev->version.major_version,
590 dev->version.minor_version,
591 HBM_MAJOR_VERSION_PGI,
592 HBM_MINOR_VERSION_PGI);
593
594 return false;
595}
596
597/**
Tomas Winkler06ecd642013-02-06 14:06:42 +0200598 * mei_me_irq_quick_handler - The ISR of the MEI device
599 *
600 * @irq: The irq number
601 * @dev_id: pointer to the device structure
602 *
603 * returns irqreturn_t
604 */
605
606irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id)
607{
608 struct mei_device *dev = (struct mei_device *) dev_id;
609 struct mei_me_hw *hw = to_me_hw(dev);
610 u32 csr_reg = mei_hcsr_read(hw);
611
612 if ((csr_reg & H_IS) != H_IS)
613 return IRQ_NONE;
614
615 /* clear H_IS bit in H_CSR */
Tomas Winklerb68301e2013-03-27 16:58:29 +0200616 mei_me_reg_write(hw, H_CSR, csr_reg);
Tomas Winkler06ecd642013-02-06 14:06:42 +0200617
618 return IRQ_WAKE_THREAD;
619}
620
621/**
622 * mei_me_irq_thread_handler - function called after ISR to handle the interrupt
623 * processing.
624 *
625 * @irq: The irq number
626 * @dev_id: pointer to the device structure
627 *
628 * returns irqreturn_t
629 *
630 */
631irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
632{
633 struct mei_device *dev = (struct mei_device *) dev_id;
634 struct mei_cl_cb complete_list;
Tomas Winkler06ecd642013-02-06 14:06:42 +0200635 s32 slots;
Tomas Winkler544f9462014-01-08 20:19:21 +0200636 int rets = 0;
Tomas Winkler06ecd642013-02-06 14:06:42 +0200637
638 dev_dbg(&dev->pdev->dev, "function called after ISR to handle the interrupt processing.\n");
639 /* initialize our complete list */
640 mutex_lock(&dev->device_lock);
641 mei_io_list_init(&complete_list);
642
643 /* Ack the interrupt here
644 * In case of MSI we don't go through the quick handler */
645 if (pci_dev_msi_enabled(dev->pdev))
646 mei_clear_interrupts(dev);
647
648 /* check if ME wants a reset */
Tomas Winkler33ec0822014-01-12 00:36:09 +0200649 if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING) {
Tomas Winkler544f9462014-01-08 20:19:21 +0200650 dev_warn(&dev->pdev->dev, "FW not ready: resetting.\n");
651 schedule_work(&dev->reset_work);
652 goto end;
Tomas Winkler06ecd642013-02-06 14:06:42 +0200653 }
654
655 /* check if we need to start the dev */
656 if (!mei_host_is_ready(dev)) {
657 if (mei_hw_is_ready(dev)) {
Tomas Winklerb04ada92014-05-12 12:19:39 +0300658 mei_me_hw_reset_release(dev);
Tomas Winkler06ecd642013-02-06 14:06:42 +0200659 dev_dbg(&dev->pdev->dev, "we need to start the dev.\n");
660
Tomas Winkleraafae7e2013-03-11 18:27:03 +0200661 dev->recvd_hw_ready = true;
Alexander Usyskin2c2b93e2014-08-12 20:16:03 +0300662 wake_up(&dev->wait_hw_ready);
Tomas Winkler06ecd642013-02-06 14:06:42 +0200663 } else {
Tomas Winklerb04ada92014-05-12 12:19:39 +0300664 dev_dbg(&dev->pdev->dev, "Spurious Interrupt\n");
Tomas Winkler06ecd642013-02-06 14:06:42 +0200665 }
Tomas Winkler544f9462014-01-08 20:19:21 +0200666 goto end;
Tomas Winkler06ecd642013-02-06 14:06:42 +0200667 }
668 /* check slots available for reading */
669 slots = mei_count_full_read_slots(dev);
670 while (slots > 0) {
Tomas Winkler544f9462014-01-08 20:19:21 +0200671 dev_dbg(&dev->pdev->dev, "slots to read = %08x\n", slots);
Tomas Winkler06ecd642013-02-06 14:06:42 +0200672 rets = mei_irq_read_handler(dev, &complete_list, &slots);
Tomas Winklerb1b94b52014-03-03 00:21:28 +0200673 /* There is a race between ME write and interrupt delivery:
674 * Not all data is always available immediately after the
675 * interrupt, so try to read again on the next interrupt.
676 */
677 if (rets == -ENODATA)
678 break;
679
Tomas Winkler33ec0822014-01-12 00:36:09 +0200680 if (rets && dev->dev_state != MEI_DEV_RESETTING) {
Tomas Winklerb1b94b52014-03-03 00:21:28 +0200681 dev_err(&dev->pdev->dev, "mei_irq_read_handler ret = %d.\n",
682 rets);
Tomas Winkler544f9462014-01-08 20:19:21 +0200683 schedule_work(&dev->reset_work);
Tomas Winkler06ecd642013-02-06 14:06:42 +0200684 goto end;
Tomas Winkler544f9462014-01-08 20:19:21 +0200685 }
Tomas Winkler06ecd642013-02-06 14:06:42 +0200686 }
Tomas Winkler06ecd642013-02-06 14:06:42 +0200687
Tomas Winkler6aae48f2014-02-19 17:35:47 +0200688 dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
689
Tomas Winklerba9cdd02014-03-18 22:52:00 +0200690 /*
691 * During PG handshake only allowed write is the replay to the
692 * PG exit message, so block calling write function
693 * if the pg state is not idle
694 */
695 if (dev->pg_event == MEI_PG_EVENT_IDLE) {
696 rets = mei_irq_write_handler(dev, &complete_list);
697 dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
698 }
Tomas Winkler06ecd642013-02-06 14:06:42 +0200699
Tomas Winkler4c6e22b2013-03-17 11:41:20 +0200700 mei_irq_compl_handler(dev, &complete_list);
Tomas Winkler06ecd642013-02-06 14:06:42 +0200701
Tomas Winkler544f9462014-01-08 20:19:21 +0200702end:
703 dev_dbg(&dev->pdev->dev, "interrupt thread end ret = %d\n", rets);
704 mutex_unlock(&dev->device_lock);
Tomas Winkler06ecd642013-02-06 14:06:42 +0200705 return IRQ_HANDLED;
706}
Alexander Usyskin04dd3662014-03-31 17:59:23 +0300707
Tomas Winkler827eef52013-02-06 14:06:41 +0200708static const struct mei_hw_ops mei_me_hw_ops = {
709
Tomas Winkler964a2332014-03-18 22:51:59 +0200710 .pg_state = mei_me_pg_state,
711
Tomas Winkler827eef52013-02-06 14:06:41 +0200712 .host_is_ready = mei_me_host_is_ready,
713
714 .hw_is_ready = mei_me_hw_is_ready,
715 .hw_reset = mei_me_hw_reset,
Tomas Winkleraafae7e2013-03-11 18:27:03 +0200716 .hw_config = mei_me_hw_config,
717 .hw_start = mei_me_hw_start,
Tomas Winkler827eef52013-02-06 14:06:41 +0200718
Tomas Winkleree7e5af2014-03-18 22:51:58 +0200719 .pg_is_enabled = mei_me_pg_is_enabled,
720
Tomas Winkler827eef52013-02-06 14:06:41 +0200721 .intr_clear = mei_me_intr_clear,
722 .intr_enable = mei_me_intr_enable,
723 .intr_disable = mei_me_intr_disable,
724
725 .hbuf_free_slots = mei_me_hbuf_empty_slots,
726 .hbuf_is_ready = mei_me_hbuf_is_empty,
727 .hbuf_max_len = mei_me_hbuf_max_len,
728
729 .write = mei_me_write_message,
730
731 .rdbuf_full_slots = mei_me_count_full_read_slots,
732 .read_hdr = mei_me_mecbrw_read,
733 .read = mei_me_read_slots
734};
735
Tomas Winklerc9199512014-05-13 01:30:54 +0300736static bool mei_me_fw_type_nm(struct pci_dev *pdev)
737{
738 u32 reg;
739 pci_read_config_dword(pdev, PCI_CFG_HFS_2, &reg);
740 /* make sure that bit 9 (NM) is up and bit 10 (DM) is down */
741 return (reg & 0x600) == 0x200;
742}
743
744#define MEI_CFG_FW_NM \
745 .quirk_probe = mei_me_fw_type_nm
746
747static bool mei_me_fw_type_sps(struct pci_dev *pdev)
748{
749 u32 reg;
750 /* Read ME FW Status check for SPS Firmware */
751 pci_read_config_dword(pdev, PCI_CFG_HFS_1, &reg);
752 /* if bits [19:16] = 15, running SPS Firmware */
753 return (reg & 0xf0000) == 0xf0000;
754}
755
756#define MEI_CFG_FW_SPS \
757 .quirk_probe = mei_me_fw_type_sps
758
759
Alexander Usyskin8d929d42014-05-13 01:30:53 +0300760#define MEI_CFG_LEGACY_HFS \
761 .fw_status.count = 0
762
763#define MEI_CFG_ICH_HFS \
764 .fw_status.count = 1, \
765 .fw_status.status[0] = PCI_CFG_HFS_1
766
767#define MEI_CFG_PCH_HFS \
768 .fw_status.count = 2, \
769 .fw_status.status[0] = PCI_CFG_HFS_1, \
770 .fw_status.status[1] = PCI_CFG_HFS_2
771
772
773/* ICH Legacy devices */
774const struct mei_cfg mei_me_legacy_cfg = {
775 MEI_CFG_LEGACY_HFS,
776};
777
778/* ICH devices */
779const struct mei_cfg mei_me_ich_cfg = {
780 MEI_CFG_ICH_HFS,
781};
782
783/* PCH devices */
784const struct mei_cfg mei_me_pch_cfg = {
785 MEI_CFG_PCH_HFS,
786};
787
Tomas Winklerc9199512014-05-13 01:30:54 +0300788
789/* PCH Cougar Point and Patsburg with quirk for Node Manager exclusion */
790const struct mei_cfg mei_me_pch_cpt_pbg_cfg = {
791 MEI_CFG_PCH_HFS,
792 MEI_CFG_FW_NM,
793};
794
795/* PCH Lynx Point with quirk for SPS Firmware exclusion */
796const struct mei_cfg mei_me_lpt_cfg = {
797 MEI_CFG_PCH_HFS,
798 MEI_CFG_FW_SPS,
799};
800
Tomas Winkler52c34562013-02-06 14:06:40 +0200801/**
Masanari Iida393b1482013-04-05 01:05:05 +0900802 * mei_me_dev_init - allocates and initializes the mei device structure
Tomas Winkler52c34562013-02-06 14:06:40 +0200803 *
804 * @pdev: The pci device structure
Alexander Usyskin8d929d42014-05-13 01:30:53 +0300805 * @cfg: per device generation config
Tomas Winkler52c34562013-02-06 14:06:40 +0200806 *
807 * returns The mei_device_device pointer on success, NULL on failure.
808 */
Alexander Usyskin8d929d42014-05-13 01:30:53 +0300809struct mei_device *mei_me_dev_init(struct pci_dev *pdev,
810 const struct mei_cfg *cfg)
Tomas Winkler52c34562013-02-06 14:06:40 +0200811{
812 struct mei_device *dev;
813
814 dev = kzalloc(sizeof(struct mei_device) +
815 sizeof(struct mei_me_hw), GFP_KERNEL);
816 if (!dev)
817 return NULL;
818
Alexander Usyskin8d929d42014-05-13 01:30:53 +0300819 mei_device_init(dev, cfg);
Tomas Winkler52c34562013-02-06 14:06:40 +0200820
Tomas Winkler827eef52013-02-06 14:06:41 +0200821 dev->ops = &mei_me_hw_ops;
822
Tomas Winkler52c34562013-02-06 14:06:40 +0200823 dev->pdev = pdev;
824 return dev;
825}
Tomas Winkler06ecd642013-02-06 14:06:42 +0200826