blob: f38a32addad01fe2fe3e2bd7790d0c99707cdd6b [file] [log] [blame]
Oren Weilfb7d8792011-05-15 13:43:42 +03001/*
2 *
3 * Intel Management Engine Interface (Intel MEI) Linux driver
Tomas Winkler733ba912012-02-09 19:25:53 +02004 * Copyright (c) 2003-2012, Intel Corporation.
Oren Weilfb7d8792011-05-15 13:43:42 +03005 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 */
16
17
Tomas Winkler40e0b672013-03-27 16:58:30 +020018#include <linux/export.h>
Oren Weilfb7d8792011-05-15 13:43:42 +030019#include <linux/pci.h>
20#include <linux/kthread.h>
21#include <linux/interrupt.h>
22#include <linux/fs.h>
23#include <linux/jiffies.h>
24
Tomas Winkler4f3afe12012-05-09 16:38:59 +030025#include <linux/mei.h>
Tomas Winkler47a73802012-12-25 19:06:03 +020026
27#include "mei_dev.h"
Tomas Winkler0edb23f2013-01-08 23:07:12 +020028#include "hbm.h"
Tomas Winkler9dc64d62013-01-08 23:07:17 +020029#include "hw-me.h"
Tomas Winkler90e0b5f2013-01-08 23:07:14 +020030#include "client.h"
Oren Weilfb7d8792011-05-15 13:43:42 +030031
32
33/**
Alexander Usyskin83ce0742014-01-08 22:31:46 +020034 * mei_irq_compl_handler - dispatch complete handlers
Tomas Winkler4c6e22b2013-03-17 11:41:20 +020035 * for the completed callbacks
36 *
37 * @dev - mei device
38 * @compl_list - list of completed cbs
39 */
40void mei_irq_compl_handler(struct mei_device *dev, struct mei_cl_cb *compl_list)
41{
42 struct mei_cl_cb *cb, *next;
43 struct mei_cl *cl;
44
45 list_for_each_entry_safe(cb, next, &compl_list->list, list) {
46 cl = cb->cl;
47 list_del(&cb->list);
48 if (!cl)
49 continue;
50
51 dev_dbg(&dev->pdev->dev, "completing call back.\n");
52 if (cl == &dev->iamthif_cl)
53 mei_amthif_complete(dev, cb);
54 else
Tomas Winklerdb086fa2013-05-12 15:34:45 +030055 mei_cl_complete(cl, cb);
Tomas Winkler4c6e22b2013-03-17 11:41:20 +020056 }
57}
Tomas Winkler40e0b672013-03-27 16:58:30 +020058EXPORT_SYMBOL_GPL(mei_irq_compl_handler);
Tomas Winkler6e0f1802013-04-19 22:01:34 +030059
Tomas Winkler4c6e22b2013-03-17 11:41:20 +020060/**
Tomas Winkler6e0f1802013-04-19 22:01:34 +030061 * mei_cl_hbm_equal - check if hbm is addressed to the client
Oren Weilfb7d8792011-05-15 13:43:42 +030062 *
Tomas Winkler6e0f1802013-04-19 22:01:34 +030063 * @cl: host client
Oren Weilfb7d8792011-05-15 13:43:42 +030064 * @mei_hdr: header of mei client message
65 *
Tomas Winkler6e0f1802013-04-19 22:01:34 +030066 * returns true if matches, false otherwise
Oren Weilfb7d8792011-05-15 13:43:42 +030067 */
Tomas Winkler6e0f1802013-04-19 22:01:34 +030068static inline int mei_cl_hbm_equal(struct mei_cl *cl,
69 struct mei_msg_hdr *mei_hdr)
Oren Weilfb7d8792011-05-15 13:43:42 +030070{
Tomas Winkler6e0f1802013-04-19 22:01:34 +030071 return cl->host_client_id == mei_hdr->host_addr &&
72 cl->me_client_id == mei_hdr->me_addr;
73}
74/**
75 * mei_cl_is_reading - checks if the client
76 is the one to read this message
77 *
78 * @cl: mei client
79 * @mei_hdr: header of mei message
80 *
81 * returns true on match and false otherwise
82 */
83static bool mei_cl_is_reading(struct mei_cl *cl, struct mei_msg_hdr *mei_hdr)
84{
85 return mei_cl_hbm_equal(cl, mei_hdr) &&
Oren Weilfb7d8792011-05-15 13:43:42 +030086 cl->state == MEI_FILE_CONNECTED &&
Tomas Winkler6e0f1802013-04-19 22:01:34 +030087 cl->reading_state != MEI_READ_COMPLETE;
Oren Weilfb7d8792011-05-15 13:43:42 +030088}
89
90/**
Tomas Winkler6e0f1802013-04-19 22:01:34 +030091 * mei_irq_read_client_message - process client message
Oren Weilfb7d8792011-05-15 13:43:42 +030092 *
Oren Weilfb7d8792011-05-15 13:43:42 +030093 * @dev: the device structure
94 * @mei_hdr: header of mei client message
Tomas Winkler6e0f1802013-04-19 22:01:34 +030095 * @complete_list: An instance of our list structure
Oren Weilfb7d8792011-05-15 13:43:42 +030096 *
97 * returns 0 on success, <0 on failure.
98 */
Tomas Winkler6e0f1802013-04-19 22:01:34 +030099static int mei_cl_irq_read_msg(struct mei_device *dev,
100 struct mei_msg_hdr *mei_hdr,
101 struct mei_cl_cb *complete_list)
Oren Weilfb7d8792011-05-15 13:43:42 +0300102{
103 struct mei_cl *cl;
Tomas Winkler6e0f1802013-04-19 22:01:34 +0300104 struct mei_cl_cb *cb, *next;
Tomas Winkler479bc592011-06-16 00:46:03 +0300105 unsigned char *buffer = NULL;
Oren Weilfb7d8792011-05-15 13:43:42 +0300106
Tomas Winkler6e0f1802013-04-19 22:01:34 +0300107 list_for_each_entry_safe(cb, next, &dev->read_list.list, list) {
108 cl = cb->cl;
109 if (!cl || !mei_cl_is_reading(cl, mei_hdr))
110 continue;
Oren Weilfb7d8792011-05-15 13:43:42 +0300111
Tomas Winkler6e0f1802013-04-19 22:01:34 +0300112 cl->reading_state = MEI_READING;
Oren Weilfb7d8792011-05-15 13:43:42 +0300113
Tomas Winkler6e0f1802013-04-19 22:01:34 +0300114 if (cb->response_buffer.size == 0 ||
115 cb->response_buffer.data == NULL) {
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300116 cl_err(dev, cl, "response buffer is not allocated.\n");
Tomas Winkler6e0f1802013-04-19 22:01:34 +0300117 list_del(&cb->list);
118 return -ENOMEM;
Oren Weilfb7d8792011-05-15 13:43:42 +0300119 }
120
Tomas Winkler6e0f1802013-04-19 22:01:34 +0300121 if (cb->response_buffer.size < mei_hdr->length + cb->buf_idx) {
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300122 cl_dbg(dev, cl, "message overflow. size %d len %d idx %ld\n",
Tomas Winklerfcb136e2013-04-19 22:01:35 +0300123 cb->response_buffer.size,
124 mei_hdr->length, cb->buf_idx);
Wei Yongjun46e407d2013-04-23 10:44:35 +0800125 buffer = krealloc(cb->response_buffer.data,
126 mei_hdr->length + cb->buf_idx,
127 GFP_KERNEL);
Tomas Winklerfcb136e2013-04-19 22:01:35 +0300128
Wei Yongjun46e407d2013-04-23 10:44:35 +0800129 if (!buffer) {
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300130 cl_err(dev, cl, "allocation failed.\n");
Tomas Winklerfcb136e2013-04-19 22:01:35 +0300131 list_del(&cb->list);
132 return -ENOMEM;
133 }
Wei Yongjun46e407d2013-04-23 10:44:35 +0800134 cb->response_buffer.data = buffer;
Tomas Winklerfcb136e2013-04-19 22:01:35 +0300135 cb->response_buffer.size =
136 mei_hdr->length + cb->buf_idx;
Tomas Winkler6e0f1802013-04-19 22:01:34 +0300137 }
138
139 buffer = cb->response_buffer.data + cb->buf_idx;
140 mei_read_slots(dev, buffer, mei_hdr->length);
141
142 cb->buf_idx += mei_hdr->length;
143 if (mei_hdr->msg_complete) {
144 cl->status = 0;
145 list_del(&cb->list);
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300146 cl_dbg(dev, cl, "completed read length = %lu\n",
Tomas Winkler6e0f1802013-04-19 22:01:34 +0300147 cb->buf_idx);
148 list_add_tail(&cb->list, &complete_list->list);
149 }
150 break;
Oren Weilfb7d8792011-05-15 13:43:42 +0300151 }
152
Oren Weilfb7d8792011-05-15 13:43:42 +0300153 dev_dbg(&dev->pdev->dev, "message read\n");
154 if (!buffer) {
Tomas Winkleredf1eed2012-02-09 19:25:54 +0200155 mei_read_slots(dev, dev->rd_msg_buf, mei_hdr->length);
Tomas Winkler15d4acc2012-12-25 19:06:00 +0200156 dev_dbg(&dev->pdev->dev, "discarding message " MEI_HDR_FMT "\n",
157 MEI_HDR_PRM(mei_hdr));
Oren Weilfb7d8792011-05-15 13:43:42 +0300158 }
159
160 return 0;
161}
162
163/**
Tomas Winkler6bb948c2014-02-12 21:41:52 +0200164 * mei_cl_irq_disconnect_rsp - send disconnection response message
165 *
166 * @cl: client
167 * @cb: callback block.
Tomas Winkler6bb948c2014-02-12 21:41:52 +0200168 * @cmpl_list: complete list.
169 *
170 * returns 0, OK; otherwise, error.
171 */
172static int mei_cl_irq_disconnect_rsp(struct mei_cl *cl, struct mei_cl_cb *cb,
Tomas Winkler9d098192014-02-19 17:35:48 +0200173 struct mei_cl_cb *cmpl_list)
Tomas Winkler6bb948c2014-02-12 21:41:52 +0200174{
175 struct mei_device *dev = cl->dev;
Tomas Winkler9d098192014-02-19 17:35:48 +0200176 u32 msg_slots;
177 int slots;
Tomas Winkler6bb948c2014-02-12 21:41:52 +0200178 int ret;
179
Tomas Winkler9d098192014-02-19 17:35:48 +0200180 slots = mei_hbuf_empty_slots(dev);
181 msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_response));
Tomas Winkler6bb948c2014-02-12 21:41:52 +0200182
Tomas Winkler9d098192014-02-19 17:35:48 +0200183 if (slots < msg_slots)
Tomas Winkler6bb948c2014-02-12 21:41:52 +0200184 return -EMSGSIZE;
185
Tomas Winkler6bb948c2014-02-12 21:41:52 +0200186 ret = mei_hbm_cl_disconnect_rsp(dev, cl);
187
188 cl->state = MEI_FILE_DISCONNECTED;
189 cl->status = 0;
Alexander Usyskin31a5ef22014-03-06 23:53:53 +0200190 list_del(&cb->list);
Tomas Winkler6bb948c2014-02-12 21:41:52 +0200191 mei_io_cb_free(cb);
192
193 return ret;
194}
195
196
197
198/**
Tomas Winkler6220d6a2013-05-12 15:34:46 +0300199 * mei_cl_irq_close - processes close related operation from
200 * interrupt thread context - send disconnect request
Oren Weilfb7d8792011-05-15 13:43:42 +0300201 *
Tomas Winkler6220d6a2013-05-12 15:34:46 +0300202 * @cl: client
203 * @cb: callback block.
Oren Weilfb7d8792011-05-15 13:43:42 +0300204 * @cmpl_list: complete list.
205 *
206 * returns 0, OK; otherwise, error.
207 */
Tomas Winkler6220d6a2013-05-12 15:34:46 +0300208static int mei_cl_irq_close(struct mei_cl *cl, struct mei_cl_cb *cb,
Tomas Winkler9d098192014-02-19 17:35:48 +0200209 struct mei_cl_cb *cmpl_list)
Oren Weilfb7d8792011-05-15 13:43:42 +0300210{
Tomas Winkler6220d6a2013-05-12 15:34:46 +0300211 struct mei_device *dev = cl->dev;
Tomas Winkler9d098192014-02-19 17:35:48 +0200212 u32 msg_slots;
213 int slots;
Tomas Winkler6220d6a2013-05-12 15:34:46 +0300214
Tomas Winkler9d098192014-02-19 17:35:48 +0200215 msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_request));
216 slots = mei_hbuf_empty_slots(dev);
Tomas Winklerb45f3cc2012-07-04 19:24:53 +0300217
Tomas Winkler9d098192014-02-19 17:35:48 +0200218 if (slots < msg_slots)
Tomas Winklerc8c8d082013-03-11 18:27:02 +0200219 return -EMSGSIZE;
220
Tomas Winkler8120e722012-12-25 19:06:11 +0200221 if (mei_hbm_cl_disconnect_req(dev, cl)) {
Tomas Winklerb45f3cc2012-07-04 19:24:53 +0300222 cl->status = 0;
Tomas Winkler6220d6a2013-05-12 15:34:46 +0300223 cb->buf_idx = 0;
224 list_move_tail(&cb->list, &cmpl_list->list);
Tomas Winklerc8c8d082013-03-11 18:27:02 +0200225 return -EIO;
Oren Weilfb7d8792011-05-15 13:43:42 +0300226 }
227
Tomas Winklerc8c8d082013-03-11 18:27:02 +0200228 cl->state = MEI_FILE_DISCONNECTING;
229 cl->status = 0;
Tomas Winkler6220d6a2013-05-12 15:34:46 +0300230 cb->buf_idx = 0;
231 list_move_tail(&cb->list, &dev->ctrl_rd_list.list);
Tomas Winklerc8c8d082013-03-11 18:27:02 +0200232 cl->timer_count = MEI_CONNECT_TIMEOUT;
233
Oren Weilfb7d8792011-05-15 13:43:42 +0300234 return 0;
235}
236
Oren Weilfb7d8792011-05-15 13:43:42 +0300237
238/**
Tomas Winkler6220d6a2013-05-12 15:34:46 +0300239 * mei_cl_irq_close - processes client read related operation from the
240 * interrupt thread context - request for flow control credits
Oren Weilfb7d8792011-05-15 13:43:42 +0300241 *
Tomas Winkler6220d6a2013-05-12 15:34:46 +0300242 * @cl: client
243 * @cb: callback block.
Oren Weilfb7d8792011-05-15 13:43:42 +0300244 * @cmpl_list: complete list.
245 *
246 * returns 0, OK; otherwise, error.
247 */
Tomas Winkler6220d6a2013-05-12 15:34:46 +0300248static int mei_cl_irq_read(struct mei_cl *cl, struct mei_cl_cb *cb,
Tomas Winkler9d098192014-02-19 17:35:48 +0200249 struct mei_cl_cb *cmpl_list)
Oren Weilfb7d8792011-05-15 13:43:42 +0300250{
Tomas Winkler6220d6a2013-05-12 15:34:46 +0300251 struct mei_device *dev = cl->dev;
Tomas Winkler9d098192014-02-19 17:35:48 +0200252 u32 msg_slots;
253 int slots;
Tomas Winkler2ebf8c92013-09-16 23:44:43 +0300254 int ret;
255
Tomas Winkler9d098192014-02-19 17:35:48 +0200256 msg_slots = mei_data2slots(sizeof(struct hbm_flow_control));
257 slots = mei_hbuf_empty_slots(dev);
Tomas Winkler2ebf8c92013-09-16 23:44:43 +0300258
Tomas Winkler9d098192014-02-19 17:35:48 +0200259 if (slots < msg_slots)
Tomas Winklerc8c8d082013-03-11 18:27:02 +0200260 return -EMSGSIZE;
Tomas Winkler7bdf72d2012-07-04 19:24:52 +0300261
Tomas Winkler2ebf8c92013-09-16 23:44:43 +0300262 ret = mei_hbm_cl_flow_control_req(dev, cl);
263 if (ret) {
264 cl->status = ret;
Tomas Winkler6220d6a2013-05-12 15:34:46 +0300265 cb->buf_idx = 0;
266 list_move_tail(&cb->list, &cmpl_list->list);
Tomas Winkler2ebf8c92013-09-16 23:44:43 +0300267 return ret;
Tomas Winkler1ccb7b62012-03-14 14:39:42 +0200268 }
Tomas Winkler2ebf8c92013-09-16 23:44:43 +0300269
Tomas Winkler6220d6a2013-05-12 15:34:46 +0300270 list_move_tail(&cb->list, &dev->read_list.list);
Tomas Winkler1ccb7b62012-03-14 14:39:42 +0200271
Oren Weilfb7d8792011-05-15 13:43:42 +0300272 return 0;
273}
274
275
276/**
Tomas Winkler02a7eec2014-02-12 21:41:51 +0200277 * mei_cl_irq_connect - send connect request in irq_thread context
Oren Weilfb7d8792011-05-15 13:43:42 +0300278 *
Tomas Winkler6220d6a2013-05-12 15:34:46 +0300279 * @cl: client
280 * @cb: callback block.
Oren Weilfb7d8792011-05-15 13:43:42 +0300281 * @cmpl_list: complete list.
282 *
283 * returns 0, OK; otherwise, error.
284 */
Tomas Winkler02a7eec2014-02-12 21:41:51 +0200285static int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb,
Tomas Winkler9d098192014-02-19 17:35:48 +0200286 struct mei_cl_cb *cmpl_list)
Oren Weilfb7d8792011-05-15 13:43:42 +0300287{
Tomas Winkler6220d6a2013-05-12 15:34:46 +0300288 struct mei_device *dev = cl->dev;
Tomas Winkler9d098192014-02-19 17:35:48 +0200289 u32 msg_slots;
290 int slots;
Tomas Winkler2ebf8c92013-09-16 23:44:43 +0300291 int ret;
Tomas Winkler6220d6a2013-05-12 15:34:46 +0300292
Tomas Winkler9d098192014-02-19 17:35:48 +0200293 msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_request));
294 slots = mei_hbuf_empty_slots(dev);
Tomas Winklerc8c8d082013-03-11 18:27:02 +0200295
Tomas Winkler02a7eec2014-02-12 21:41:51 +0200296 if (mei_cl_is_other_connecting(cl))
297 return 0;
298
Tomas Winkler9d098192014-02-19 17:35:48 +0200299 if (slots < msg_slots)
Tomas Winklerc8c8d082013-03-11 18:27:02 +0200300 return -EMSGSIZE;
Tomas Winklerc8c8d082013-03-11 18:27:02 +0200301
Tomas Winklerb45f3cc2012-07-04 19:24:53 +0300302 cl->state = MEI_FILE_CONNECTING;
Tomas Winklerc8c8d082013-03-11 18:27:02 +0200303
Tomas Winkler2ebf8c92013-09-16 23:44:43 +0300304 ret = mei_hbm_cl_connect_req(dev, cl);
305 if (ret) {
306 cl->status = ret;
Tomas Winkler6220d6a2013-05-12 15:34:46 +0300307 cb->buf_idx = 0;
308 list_del(&cb->list);
Tomas Winkler2ebf8c92013-09-16 23:44:43 +0300309 return ret;
Tomas Winklerb45f3cc2012-07-04 19:24:53 +0300310 }
Tomas Winkler6220d6a2013-05-12 15:34:46 +0300311
312 list_move_tail(&cb->list, &dev->ctrl_rd_list.list);
313 cl->timer_count = MEI_CONNECT_TIMEOUT;
Oren Weilfb7d8792011-05-15 13:43:42 +0300314 return 0;
315}
316
Oren Weilfb7d8792011-05-15 13:43:42 +0300317
318/**
Masanari Iida393b1482013-04-05 01:05:05 +0900319 * mei_irq_read_handler - bottom half read routine after ISR to
Oren Weilfb7d8792011-05-15 13:43:42 +0300320 * handle the read processing.
321 *
Oren Weilfb7d8792011-05-15 13:43:42 +0300322 * @dev: the device structure
Tomas Winkler06ecd642013-02-06 14:06:42 +0200323 * @cmpl_list: An instance of our list structure
Oren Weilfb7d8792011-05-15 13:43:42 +0300324 * @slots: slots to read.
325 *
326 * returns 0 on success, <0 on failure.
327 */
Tomas Winkler06ecd642013-02-06 14:06:42 +0200328int mei_irq_read_handler(struct mei_device *dev,
329 struct mei_cl_cb *cmpl_list, s32 *slots)
Oren Weilfb7d8792011-05-15 13:43:42 +0300330{
331 struct mei_msg_hdr *mei_hdr;
Tomas Winkler10ee9072013-11-11 13:26:08 +0200332 struct mei_cl *cl;
333 int ret;
Oren Weilfb7d8792011-05-15 13:43:42 +0300334
335 if (!dev->rd_msg_hdr) {
Tomas Winkler827eef52013-02-06 14:06:41 +0200336 dev->rd_msg_hdr = mei_read_hdr(dev);
Oren Weilfb7d8792011-05-15 13:43:42 +0300337 (*slots)--;
338 dev_dbg(&dev->pdev->dev, "slots =%08x.\n", *slots);
339 }
340 mei_hdr = (struct mei_msg_hdr *) &dev->rd_msg_hdr;
Tomas Winkler15d4acc2012-12-25 19:06:00 +0200341 dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr));
Oren Weilfb7d8792011-05-15 13:43:42 +0300342
343 if (mei_hdr->reserved || !dev->rd_msg_hdr) {
Tomas Winkler10ee9072013-11-11 13:26:08 +0200344 dev_err(&dev->pdev->dev, "corrupted message header 0x%08X\n",
345 dev->rd_msg_hdr);
Oren Weilfb7d8792011-05-15 13:43:42 +0300346 ret = -EBADMSG;
347 goto end;
348 }
349
Tomas Winkler10ee9072013-11-11 13:26:08 +0200350 if (mei_slots2data(*slots) < mei_hdr->length) {
351 dev_err(&dev->pdev->dev, "less data available than length=%08x.\n",
Oren Weilfb7d8792011-05-15 13:43:42 +0300352 *slots);
353 /* we can't read the message */
Tomas Winklerb1b94b52014-03-03 00:21:28 +0200354 ret = -ENODATA;
Oren Weilfb7d8792011-05-15 13:43:42 +0300355 goto end;
356 }
357
Tomas Winkler10ee9072013-11-11 13:26:08 +0200358 /* HBM message */
359 if (mei_hdr->host_addr == 0 && mei_hdr->me_addr == 0) {
Tomas Winkler544f9462014-01-08 20:19:21 +0200360 ret = mei_hbm_dispatch(dev, mei_hdr);
361 if (ret) {
362 dev_dbg(&dev->pdev->dev, "mei_hbm_dispatch failed ret = %d\n",
363 ret);
364 goto end;
365 }
Tomas Winkler10ee9072013-11-11 13:26:08 +0200366 goto reset_slots;
Oren Weilfb7d8792011-05-15 13:43:42 +0300367 }
368
Alexander Usyskin83ce0742014-01-08 22:31:46 +0200369 /* find recipient cl */
Tomas Winkler10ee9072013-11-11 13:26:08 +0200370 list_for_each_entry(cl, &dev->file_list, link) {
371 if (mei_cl_hbm_equal(cl, mei_hdr)) {
372 cl_dbg(dev, cl, "got a message\n");
373 break;
374 }
375 }
376
Alexander Usyskin83ce0742014-01-08 22:31:46 +0200377 /* if no recipient cl was found we assume corrupted header */
Tomas Winkler10ee9072013-11-11 13:26:08 +0200378 if (&cl->link == &dev->file_list) {
379 dev_err(&dev->pdev->dev, "no destination client found 0x%08X\n",
380 dev->rd_msg_hdr);
381 ret = -EBADMSG;
382 goto end;
383 }
384
385 if (mei_hdr->host_addr == dev->iamthif_cl.host_client_id &&
386 MEI_FILE_CONNECTED == dev->iamthif_cl.state &&
387 dev->iamthif_state == MEI_IAMTHIF_READING) {
388
389 ret = mei_amthif_irq_read_msg(dev, mei_hdr, cmpl_list);
390 if (ret) {
391 dev_err(&dev->pdev->dev, "mei_amthif_irq_read_msg failed = %d\n",
392 ret);
393 goto end;
394 }
395 } else {
396 ret = mei_cl_irq_read_msg(dev, mei_hdr, cmpl_list);
397 if (ret) {
398 dev_err(&dev->pdev->dev, "mei_cl_irq_read_msg failed = %d\n",
399 ret);
400 goto end;
401 }
402 }
403
404reset_slots:
Oren Weilfb7d8792011-05-15 13:43:42 +0300405 /* reset the number of slots and header */
406 *slots = mei_count_full_read_slots(dev);
407 dev->rd_msg_hdr = 0;
408
409 if (*slots == -EOVERFLOW) {
410 /* overflow - reset */
Tomas Winkler6e0f1802013-04-19 22:01:34 +0300411 dev_err(&dev->pdev->dev, "resetting due to slots overflow.\n");
Oren Weilfb7d8792011-05-15 13:43:42 +0300412 /* set the event since message has been read */
413 ret = -ERANGE;
414 goto end;
415 }
416end:
417 return ret;
418}
Tomas Winkler40e0b672013-03-27 16:58:30 +0200419EXPORT_SYMBOL_GPL(mei_irq_read_handler);
Oren Weilfb7d8792011-05-15 13:43:42 +0300420
421
422/**
Tomas Winkler06ecd642013-02-06 14:06:42 +0200423 * mei_irq_write_handler - dispatch write requests
424 * after irq received
Oren Weilfb7d8792011-05-15 13:43:42 +0300425 *
Oren Weilfb7d8792011-05-15 13:43:42 +0300426 * @dev: the device structure
Tomas Winkler9a84d612012-11-18 15:13:18 +0200427 * @cmpl_list: An instance of our list structure
Oren Weilfb7d8792011-05-15 13:43:42 +0300428 *
429 * returns 0 on success, <0 on failure.
430 */
Tomas Winklerc8c8d082013-03-11 18:27:02 +0200431int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list)
Oren Weilfb7d8792011-05-15 13:43:42 +0300432{
433
434 struct mei_cl *cl;
Tomas Winkler6220d6a2013-05-12 15:34:46 +0300435 struct mei_cl_cb *cb, *next;
Tomas Winklerfb601ad2012-10-15 12:06:48 +0200436 struct mei_cl_cb *list;
Tomas Winkler9a84d612012-11-18 15:13:18 +0200437 s32 slots;
Oren Weilfb7d8792011-05-15 13:43:42 +0300438 int ret;
439
Tomas Winkler6aae48f2014-02-19 17:35:47 +0200440
441 if (!mei_hbuf_acquire(dev))
Oren Weilfb7d8792011-05-15 13:43:42 +0300442 return 0;
Tomas Winkler6aae48f2014-02-19 17:35:47 +0200443
Tomas Winkler9a84d612012-11-18 15:13:18 +0200444 slots = mei_hbuf_empty_slots(dev);
445 if (slots <= 0)
Tomas Winkler7d5e0e52012-06-19 09:13:36 +0300446 return -EMSGSIZE;
447
Oren Weilfb7d8792011-05-15 13:43:42 +0300448 /* complete all waiting for write CB */
449 dev_dbg(&dev->pdev->dev, "complete all waiting for write cb.\n");
450
451 list = &dev->write_waiting_list;
Tomas Winkler6220d6a2013-05-12 15:34:46 +0300452 list_for_each_entry_safe(cb, next, &list->list, list) {
453 cl = cb->cl;
Tomas Winklerb7cd2d92011-11-27 21:43:34 +0200454 if (cl == NULL)
455 continue;
Oren Weilfb7d8792011-05-15 13:43:42 +0300456
Tomas Winklerb7cd2d92011-11-27 21:43:34 +0200457 cl->status = 0;
Tomas Winkler6220d6a2013-05-12 15:34:46 +0300458 list_del(&cb->list);
Tomas Winklerb7cd2d92011-11-27 21:43:34 +0200459 if (MEI_WRITING == cl->writing_state &&
Tomas Winkler6220d6a2013-05-12 15:34:46 +0300460 cb->fop_type == MEI_FOP_WRITE &&
Tomas Winkler4b8960b2012-11-11 17:38:00 +0200461 cl != &dev->iamthif_cl) {
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300462 cl_dbg(dev, cl, "MEI WRITE COMPLETE\n");
Tomas Winklerb7cd2d92011-11-27 21:43:34 +0200463 cl->writing_state = MEI_WRITE_COMPLETE;
Tomas Winkler6220d6a2013-05-12 15:34:46 +0300464 list_add_tail(&cb->list, &cmpl_list->list);
Tomas Winklerb7cd2d92011-11-27 21:43:34 +0200465 }
466 if (cl == &dev->iamthif_cl) {
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300467 cl_dbg(dev, cl, "check iamthif flow control.\n");
Tomas Winklerb7cd2d92011-11-27 21:43:34 +0200468 if (dev->iamthif_flow_control_pending) {
Tomas Winkler9a84d612012-11-18 15:13:18 +0200469 ret = mei_amthif_irq_read(dev, &slots);
Tomas Winklerb7cd2d92011-11-27 21:43:34 +0200470 if (ret)
471 return ret;
472 }
Oren Weilfb7d8792011-05-15 13:43:42 +0300473 }
474 }
475
Tomas Winklerc216fde2012-08-16 19:39:43 +0300476 if (dev->wd_state == MEI_WD_STOPPING) {
477 dev->wd_state = MEI_WD_IDLE;
Tomas Winkler58772552014-02-19 17:35:51 +0200478 wake_up(&dev->wait_stop_wd);
Oren Weilfb7d8792011-05-15 13:43:42 +0300479 }
480
Tomas Winkler64092852014-02-17 15:13:21 +0200481 if (mei_cl_is_connected(&dev->wd_cl)) {
Oren Weilfb7d8792011-05-15 13:43:42 +0300482 if (dev->wd_pending &&
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200483 mei_cl_flow_ctrl_creds(&dev->wd_cl) > 0) {
Tomas Winklerb6d81fd2014-02-19 17:35:50 +0200484 ret = mei_wd_send(dev);
485 if (ret)
486 return ret;
Tomas Winklereb9af0a2011-05-25 17:28:22 +0300487 dev->wd_pending = false;
Oren Weilfb7d8792011-05-15 13:43:42 +0300488 }
489 }
Oren Weilfb7d8792011-05-15 13:43:42 +0300490
491 /* complete control write list CB */
Tomas Winklerc8372092011-11-27 21:43:33 +0200492 dev_dbg(&dev->pdev->dev, "complete control write list cb.\n");
Tomas Winkler6220d6a2013-05-12 15:34:46 +0300493 list_for_each_entry_safe(cb, next, &dev->ctrl_wr_list.list, list) {
494 cl = cb->cl;
Tomas Winklerc8372092011-11-27 21:43:33 +0200495 if (!cl) {
Tomas Winkler6220d6a2013-05-12 15:34:46 +0300496 list_del(&cb->list);
Tomas Winklerc8372092011-11-27 21:43:33 +0200497 return -ENODEV;
Oren Weilfb7d8792011-05-15 13:43:42 +0300498 }
Tomas Winkler6220d6a2013-05-12 15:34:46 +0300499 switch (cb->fop_type) {
Tomas Winkler4b8960b2012-11-11 17:38:00 +0200500 case MEI_FOP_CLOSE:
Tomas Winklerc8372092011-11-27 21:43:33 +0200501 /* send disconnect message */
Tomas Winkler9d098192014-02-19 17:35:48 +0200502 ret = mei_cl_irq_close(cl, cb, cmpl_list);
Tomas Winklerc8372092011-11-27 21:43:33 +0200503 if (ret)
504 return ret;
505
506 break;
Tomas Winkler4b8960b2012-11-11 17:38:00 +0200507 case MEI_FOP_READ:
Tomas Winklerc8372092011-11-27 21:43:33 +0200508 /* send flow control message */
Tomas Winkler9d098192014-02-19 17:35:48 +0200509 ret = mei_cl_irq_read(cl, cb, cmpl_list);
Tomas Winklerc8372092011-11-27 21:43:33 +0200510 if (ret)
511 return ret;
512
513 break;
Tomas Winkler02a7eec2014-02-12 21:41:51 +0200514 case MEI_FOP_CONNECT:
Tomas Winklerc8372092011-11-27 21:43:33 +0200515 /* connect message */
Tomas Winkler9d098192014-02-19 17:35:48 +0200516 ret = mei_cl_irq_connect(cl, cb, cmpl_list);
Tomas Winklerc8372092011-11-27 21:43:33 +0200517 if (ret)
518 return ret;
519
520 break;
Tomas Winkler6bb948c2014-02-12 21:41:52 +0200521 case MEI_FOP_DISCONNECT_RSP:
522 /* send disconnect resp */
Tomas Winkler9d098192014-02-19 17:35:48 +0200523 ret = mei_cl_irq_disconnect_rsp(cl, cb, cmpl_list);
Tomas Winkler6bb948c2014-02-12 21:41:52 +0200524 if (ret)
525 return ret;
Alexander Usyskin31a5ef22014-03-06 23:53:53 +0200526 break;
Tomas Winklerc8372092011-11-27 21:43:33 +0200527 default:
528 BUG();
529 }
530
Oren Weilfb7d8792011-05-15 13:43:42 +0300531 }
532 /* complete write list CB */
Tomas Winklerb7cd2d92011-11-27 21:43:34 +0200533 dev_dbg(&dev->pdev->dev, "complete write list cb.\n");
Tomas Winkler6220d6a2013-05-12 15:34:46 +0300534 list_for_each_entry_safe(cb, next, &dev->write_list.list, list) {
535 cl = cb->cl;
Tomas Winklerb7cd2d92011-11-27 21:43:34 +0200536 if (cl == NULL)
537 continue;
Tomas Winklerbe9d87a2012-11-18 15:13:19 +0200538 if (cl == &dev->iamthif_cl)
Tomas Winkler9d098192014-02-19 17:35:48 +0200539 ret = mei_amthif_irq_write(cl, cb, cmpl_list);
Tomas Winklerbe9d87a2012-11-18 15:13:19 +0200540 else
Tomas Winkler9d098192014-02-19 17:35:48 +0200541 ret = mei_cl_irq_write(cl, cb, cmpl_list);
Tomas Winklerbe9d87a2012-11-18 15:13:19 +0200542 if (ret)
543 return ret;
Oren Weilfb7d8792011-05-15 13:43:42 +0300544 }
545 return 0;
546}
Tomas Winkler40e0b672013-03-27 16:58:30 +0200547EXPORT_SYMBOL_GPL(mei_irq_write_handler);
Oren Weilfb7d8792011-05-15 13:43:42 +0300548
549
550
551/**
552 * mei_timer - timer function.
553 *
554 * @work: pointer to the work_struct structure
555 *
Oren Weilfb7d8792011-05-15 13:43:42 +0300556 */
Oren Weila61c6532011-09-07 09:03:13 +0300557void mei_timer(struct work_struct *work)
Oren Weilfb7d8792011-05-15 13:43:42 +0300558{
559 unsigned long timeout;
Tomas Winkler31f88f52014-02-17 15:13:25 +0200560 struct mei_cl *cl;
Oren Weilfb7d8792011-05-15 13:43:42 +0300561 struct mei_cl_cb *cb_pos = NULL;
562 struct mei_cl_cb *cb_next = NULL;
563
564 struct mei_device *dev = container_of(work,
Oren Weila61c6532011-09-07 09:03:13 +0300565 struct mei_device, timer_work.work);
Oren Weilfb7d8792011-05-15 13:43:42 +0300566
567
568 mutex_lock(&dev->device_lock);
Tomas Winkler66ae4602014-01-08 20:19:22 +0200569
570 /* Catch interrupt stalls during HBM init handshake */
571 if (dev->dev_state == MEI_DEV_INIT_CLIENTS &&
572 dev->hbm_state != MEI_HBM_IDLE) {
573
574 if (dev->init_clients_timer) {
575 if (--dev->init_clients_timer == 0) {
576 dev_err(&dev->pdev->dev, "timer: init clients timeout hbm_state = %d.\n",
577 dev->hbm_state);
Tomas Winkler33ec0822014-01-12 00:36:09 +0200578 mei_reset(dev);
Tomas Winkler66ae4602014-01-08 20:19:22 +0200579 goto out;
Oren Weilfb7d8792011-05-15 13:43:42 +0300580 }
581 }
Oren Weilfb7d8792011-05-15 13:43:42 +0300582 }
Tomas Winkler66ae4602014-01-08 20:19:22 +0200583
584 if (dev->dev_state != MEI_DEV_ENABLED)
585 goto out;
586
Oren Weilfb7d8792011-05-15 13:43:42 +0300587 /*** connect/disconnect timeouts ***/
Tomas Winkler31f88f52014-02-17 15:13:25 +0200588 list_for_each_entry(cl, &dev->file_list, link) {
589 if (cl->timer_count) {
590 if (--cl->timer_count == 0) {
Tomas Winkler33ec0822014-01-12 00:36:09 +0200591 dev_err(&dev->pdev->dev, "timer: connect/disconnect timeout.\n");
592 mei_reset(dev);
Oren Weilfb7d8792011-05-15 13:43:42 +0300593 goto out;
594 }
595 }
596 }
597
Tomas Winkler64092852014-02-17 15:13:21 +0200598 if (!mei_cl_is_connected(&dev->iamthif_cl))
599 goto out;
600
Oren Weilfb7d8792011-05-15 13:43:42 +0300601 if (dev->iamthif_stall_timer) {
602 if (--dev->iamthif_stall_timer == 0) {
Tomas Winkler33ec0822014-01-12 00:36:09 +0200603 dev_err(&dev->pdev->dev, "timer: amthif hanged.\n");
604 mei_reset(dev);
Oren Weilfb7d8792011-05-15 13:43:42 +0300605 dev->iamthif_msg_buf_size = 0;
606 dev->iamthif_msg_buf_index = 0;
Tomas Winklereb9af0a2011-05-25 17:28:22 +0300607 dev->iamthif_canceled = false;
608 dev->iamthif_ioctl = true;
Oren Weilfb7d8792011-05-15 13:43:42 +0300609 dev->iamthif_state = MEI_IAMTHIF_IDLE;
610 dev->iamthif_timer = 0;
611
Tomas Winkler601a1ef2012-10-09 16:50:20 +0200612 mei_io_cb_free(dev->iamthif_current_cb);
613 dev->iamthif_current_cb = NULL;
Oren Weilfb7d8792011-05-15 13:43:42 +0300614
615 dev->iamthif_file_object = NULL;
Tomas Winkler19838fb2012-11-01 21:17:15 +0200616 mei_amthif_run_next_cmd(dev);
Oren Weilfb7d8792011-05-15 13:43:42 +0300617 }
618 }
619
620 if (dev->iamthif_timer) {
621
622 timeout = dev->iamthif_timer +
Tomas Winkler3870c322012-11-01 21:17:14 +0200623 mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER);
Oren Weilfb7d8792011-05-15 13:43:42 +0300624
625 dev_dbg(&dev->pdev->dev, "dev->iamthif_timer = %ld\n",
626 dev->iamthif_timer);
627 dev_dbg(&dev->pdev->dev, "timeout = %ld\n", timeout);
628 dev_dbg(&dev->pdev->dev, "jiffies = %ld\n", jiffies);
629 if (time_after(jiffies, timeout)) {
630 /*
631 * User didn't read the AMTHI data on time (15sec)
632 * freeing AMTHI for other requests
633 */
634
635 dev_dbg(&dev->pdev->dev, "freeing AMTHI for other requests\n");
636
Tomas Winklere773efc2012-11-11 17:37:58 +0200637 list_for_each_entry_safe(cb_pos, cb_next,
638 &dev->amthif_rd_complete_list.list, list) {
Oren Weilfb7d8792011-05-15 13:43:42 +0300639
Tomas Winkler31f88f52014-02-17 15:13:25 +0200640 cl = cb_pos->file_object->private_data;
Oren Weilfb7d8792011-05-15 13:43:42 +0300641
Tomas Winklerb7cd2d92011-11-27 21:43:34 +0200642 /* Finding the AMTHI entry. */
Tomas Winkler31f88f52014-02-17 15:13:25 +0200643 if (cl == &dev->iamthif_cl)
Tomas Winklerfb601ad2012-10-15 12:06:48 +0200644 list_del(&cb_pos->list);
Oren Weilfb7d8792011-05-15 13:43:42 +0300645 }
Tomas Winkler601a1ef2012-10-09 16:50:20 +0200646 mei_io_cb_free(dev->iamthif_current_cb);
647 dev->iamthif_current_cb = NULL;
Oren Weilfb7d8792011-05-15 13:43:42 +0300648
649 dev->iamthif_file_object->private_data = NULL;
650 dev->iamthif_file_object = NULL;
Oren Weilfb7d8792011-05-15 13:43:42 +0300651 dev->iamthif_timer = 0;
Tomas Winkler19838fb2012-11-01 21:17:15 +0200652 mei_amthif_run_next_cmd(dev);
Oren Weilfb7d8792011-05-15 13:43:42 +0300653
654 }
655 }
656out:
Tomas Winkler33ec0822014-01-12 00:36:09 +0200657 if (dev->dev_state != MEI_DEV_DISABLED)
658 schedule_delayed_work(&dev->timer_work, 2 * HZ);
Tomas Winkler441ab502011-12-13 23:39:34 +0200659 mutex_unlock(&dev->device_lock);
Oren Weilfb7d8792011-05-15 13:43:42 +0300660}
661