blob: 7d9509a094e99d76d879aa0b84030c9c3be63fb7 [file] [log] [blame]
Oren Weilfb7d8792011-05-15 13:43:42 +03001/*
2 *
3 * Intel Management Engine Interface (Intel MEI) Linux driver
Tomas Winkler733ba912012-02-09 19:25:53 +02004 * Copyright (c) 2003-2012, Intel Corporation.
Oren Weilfb7d8792011-05-15 13:43:42 +03005 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 */
16
17
Tomas Winkler40e0b672013-03-27 16:58:30 +020018#include <linux/export.h>
Oren Weilfb7d8792011-05-15 13:43:42 +030019#include <linux/pci.h>
20#include <linux/kthread.h>
21#include <linux/interrupt.h>
22#include <linux/fs.h>
23#include <linux/jiffies.h>
24
Tomas Winkler4f3afe12012-05-09 16:38:59 +030025#include <linux/mei.h>
Tomas Winkler47a73802012-12-25 19:06:03 +020026
27#include "mei_dev.h"
Tomas Winkler0edb23f2013-01-08 23:07:12 +020028#include "hbm.h"
Tomas Winkler9dc64d62013-01-08 23:07:17 +020029#include "hw-me.h"
Tomas Winkler90e0b5f2013-01-08 23:07:14 +020030#include "client.h"
Oren Weilfb7d8792011-05-15 13:43:42 +030031
32
33/**
Tomas Winkler4c6e22b2013-03-17 11:41:20 +020034 * mei_irq_compl_handler - dispatch complete handelers
35 * for the completed callbacks
36 *
37 * @dev - mei device
38 * @compl_list - list of completed cbs
39 */
40void mei_irq_compl_handler(struct mei_device *dev, struct mei_cl_cb *compl_list)
41{
42 struct mei_cl_cb *cb, *next;
43 struct mei_cl *cl;
44
45 list_for_each_entry_safe(cb, next, &compl_list->list, list) {
46 cl = cb->cl;
47 list_del(&cb->list);
48 if (!cl)
49 continue;
50
51 dev_dbg(&dev->pdev->dev, "completing call back.\n");
52 if (cl == &dev->iamthif_cl)
53 mei_amthif_complete(dev, cb);
54 else
Tomas Winklerdb086fa2013-05-12 15:34:45 +030055 mei_cl_complete(cl, cb);
Tomas Winkler4c6e22b2013-03-17 11:41:20 +020056 }
57}
Tomas Winkler40e0b672013-03-27 16:58:30 +020058EXPORT_SYMBOL_GPL(mei_irq_compl_handler);
Tomas Winkler6e0f1802013-04-19 22:01:34 +030059
Tomas Winkler4c6e22b2013-03-17 11:41:20 +020060/**
Tomas Winkler6e0f1802013-04-19 22:01:34 +030061 * mei_cl_hbm_equal - check if hbm is addressed to the client
Oren Weilfb7d8792011-05-15 13:43:42 +030062 *
Tomas Winkler6e0f1802013-04-19 22:01:34 +030063 * @cl: host client
Oren Weilfb7d8792011-05-15 13:43:42 +030064 * @mei_hdr: header of mei client message
65 *
Tomas Winkler6e0f1802013-04-19 22:01:34 +030066 * returns true if matches, false otherwise
Oren Weilfb7d8792011-05-15 13:43:42 +030067 */
Tomas Winkler6e0f1802013-04-19 22:01:34 +030068static inline int mei_cl_hbm_equal(struct mei_cl *cl,
69 struct mei_msg_hdr *mei_hdr)
Oren Weilfb7d8792011-05-15 13:43:42 +030070{
Tomas Winkler6e0f1802013-04-19 22:01:34 +030071 return cl->host_client_id == mei_hdr->host_addr &&
72 cl->me_client_id == mei_hdr->me_addr;
73}
74/**
75 * mei_cl_is_reading - checks if the client
76 is the one to read this message
77 *
78 * @cl: mei client
79 * @mei_hdr: header of mei message
80 *
81 * returns true on match and false otherwise
82 */
83static bool mei_cl_is_reading(struct mei_cl *cl, struct mei_msg_hdr *mei_hdr)
84{
85 return mei_cl_hbm_equal(cl, mei_hdr) &&
Oren Weilfb7d8792011-05-15 13:43:42 +030086 cl->state == MEI_FILE_CONNECTED &&
Tomas Winkler6e0f1802013-04-19 22:01:34 +030087 cl->reading_state != MEI_READ_COMPLETE;
Oren Weilfb7d8792011-05-15 13:43:42 +030088}
89
90/**
Tomas Winkler6e0f1802013-04-19 22:01:34 +030091 * mei_irq_read_client_message - process client message
Oren Weilfb7d8792011-05-15 13:43:42 +030092 *
Oren Weilfb7d8792011-05-15 13:43:42 +030093 * @dev: the device structure
94 * @mei_hdr: header of mei client message
Tomas Winkler6e0f1802013-04-19 22:01:34 +030095 * @complete_list: An instance of our list structure
Oren Weilfb7d8792011-05-15 13:43:42 +030096 *
97 * returns 0 on success, <0 on failure.
98 */
Tomas Winkler6e0f1802013-04-19 22:01:34 +030099static int mei_cl_irq_read_msg(struct mei_device *dev,
100 struct mei_msg_hdr *mei_hdr,
101 struct mei_cl_cb *complete_list)
Oren Weilfb7d8792011-05-15 13:43:42 +0300102{
103 struct mei_cl *cl;
Tomas Winkler6e0f1802013-04-19 22:01:34 +0300104 struct mei_cl_cb *cb, *next;
Tomas Winkler479bc592011-06-16 00:46:03 +0300105 unsigned char *buffer = NULL;
Oren Weilfb7d8792011-05-15 13:43:42 +0300106
Tomas Winkler6e0f1802013-04-19 22:01:34 +0300107 list_for_each_entry_safe(cb, next, &dev->read_list.list, list) {
108 cl = cb->cl;
109 if (!cl || !mei_cl_is_reading(cl, mei_hdr))
110 continue;
Oren Weilfb7d8792011-05-15 13:43:42 +0300111
Tomas Winkler6e0f1802013-04-19 22:01:34 +0300112 cl->reading_state = MEI_READING;
Oren Weilfb7d8792011-05-15 13:43:42 +0300113
Tomas Winkler6e0f1802013-04-19 22:01:34 +0300114 if (cb->response_buffer.size == 0 ||
115 cb->response_buffer.data == NULL) {
116 dev_err(&dev->pdev->dev, "response buffer is not allocated.\n");
117 list_del(&cb->list);
118 return -ENOMEM;
Oren Weilfb7d8792011-05-15 13:43:42 +0300119 }
120
Tomas Winkler6e0f1802013-04-19 22:01:34 +0300121 if (cb->response_buffer.size < mei_hdr->length + cb->buf_idx) {
Tomas Winklerfcb136e2013-04-19 22:01:35 +0300122 dev_dbg(&dev->pdev->dev, "message overflow. size %d len %d idx %ld\n",
123 cb->response_buffer.size,
124 mei_hdr->length, cb->buf_idx);
Wei Yongjun46e407d2013-04-23 10:44:35 +0800125 buffer = krealloc(cb->response_buffer.data,
126 mei_hdr->length + cb->buf_idx,
127 GFP_KERNEL);
Tomas Winklerfcb136e2013-04-19 22:01:35 +0300128
Wei Yongjun46e407d2013-04-23 10:44:35 +0800129 if (!buffer) {
Tomas Winklerfcb136e2013-04-19 22:01:35 +0300130 dev_err(&dev->pdev->dev, "allocation failed.\n");
131 list_del(&cb->list);
132 return -ENOMEM;
133 }
Wei Yongjun46e407d2013-04-23 10:44:35 +0800134 cb->response_buffer.data = buffer;
Tomas Winklerfcb136e2013-04-19 22:01:35 +0300135 cb->response_buffer.size =
136 mei_hdr->length + cb->buf_idx;
Tomas Winkler6e0f1802013-04-19 22:01:34 +0300137 }
138
139 buffer = cb->response_buffer.data + cb->buf_idx;
140 mei_read_slots(dev, buffer, mei_hdr->length);
141
142 cb->buf_idx += mei_hdr->length;
143 if (mei_hdr->msg_complete) {
144 cl->status = 0;
145 list_del(&cb->list);
146 dev_dbg(&dev->pdev->dev, "completed read H cl = %d, ME cl = %d, length = %lu\n",
147 cl->host_client_id,
148 cl->me_client_id,
149 cb->buf_idx);
150 list_add_tail(&cb->list, &complete_list->list);
151 }
152 break;
Oren Weilfb7d8792011-05-15 13:43:42 +0300153 }
154
Oren Weilfb7d8792011-05-15 13:43:42 +0300155 dev_dbg(&dev->pdev->dev, "message read\n");
156 if (!buffer) {
Tomas Winkleredf1eed2012-02-09 19:25:54 +0200157 mei_read_slots(dev, dev->rd_msg_buf, mei_hdr->length);
Tomas Winkler15d4acc2012-12-25 19:06:00 +0200158 dev_dbg(&dev->pdev->dev, "discarding message " MEI_HDR_FMT "\n",
159 MEI_HDR_PRM(mei_hdr));
Oren Weilfb7d8792011-05-15 13:43:42 +0300160 }
161
162 return 0;
163}
164
165/**
Tomas Winkler6220d6a2013-05-12 15:34:46 +0300166 * mei_cl_irq_close - processes close related operation from
167 * interrupt thread context - send disconnect request
Oren Weilfb7d8792011-05-15 13:43:42 +0300168 *
Tomas Winkler6220d6a2013-05-12 15:34:46 +0300169 * @cl: client
170 * @cb: callback block.
Oren Weilfb7d8792011-05-15 13:43:42 +0300171 * @slots: free slots.
Oren Weilfb7d8792011-05-15 13:43:42 +0300172 * @cmpl_list: complete list.
173 *
174 * returns 0, OK; otherwise, error.
175 */
Tomas Winkler6220d6a2013-05-12 15:34:46 +0300176static int mei_cl_irq_close(struct mei_cl *cl, struct mei_cl_cb *cb,
177 s32 *slots, struct mei_cl_cb *cmpl_list)
Oren Weilfb7d8792011-05-15 13:43:42 +0300178{
Tomas Winkler6220d6a2013-05-12 15:34:46 +0300179 struct mei_device *dev = cl->dev;
180
Tomas Winklerc8c8d082013-03-11 18:27:02 +0200181 u32 msg_slots =
182 mei_data2slots(sizeof(struct hbm_client_connect_request));
Tomas Winklerb45f3cc2012-07-04 19:24:53 +0300183
Tomas Winklerc8c8d082013-03-11 18:27:02 +0200184 if (*slots < msg_slots)
185 return -EMSGSIZE;
186
187 *slots -= msg_slots;
Tomas Winklerb45f3cc2012-07-04 19:24:53 +0300188
Tomas Winkler8120e722012-12-25 19:06:11 +0200189 if (mei_hbm_cl_disconnect_req(dev, cl)) {
Tomas Winklerb45f3cc2012-07-04 19:24:53 +0300190 cl->status = 0;
Tomas Winkler6220d6a2013-05-12 15:34:46 +0300191 cb->buf_idx = 0;
192 list_move_tail(&cb->list, &cmpl_list->list);
Tomas Winklerc8c8d082013-03-11 18:27:02 +0200193 return -EIO;
Oren Weilfb7d8792011-05-15 13:43:42 +0300194 }
195
Tomas Winklerc8c8d082013-03-11 18:27:02 +0200196 cl->state = MEI_FILE_DISCONNECTING;
197 cl->status = 0;
Tomas Winkler6220d6a2013-05-12 15:34:46 +0300198 cb->buf_idx = 0;
199 list_move_tail(&cb->list, &dev->ctrl_rd_list.list);
Tomas Winklerc8c8d082013-03-11 18:27:02 +0200200 cl->timer_count = MEI_CONNECT_TIMEOUT;
201
Oren Weilfb7d8792011-05-15 13:43:42 +0300202 return 0;
203}
204
Oren Weilfb7d8792011-05-15 13:43:42 +0300205
206/**
Tomas Winkler6220d6a2013-05-12 15:34:46 +0300207 * mei_cl_irq_close - processes client read related operation from the
208 * interrupt thread context - request for flow control credits
Oren Weilfb7d8792011-05-15 13:43:42 +0300209 *
Tomas Winkler6220d6a2013-05-12 15:34:46 +0300210 * @cl: client
211 * @cb: callback block.
Oren Weilfb7d8792011-05-15 13:43:42 +0300212 * @slots: free slots.
Oren Weilfb7d8792011-05-15 13:43:42 +0300213 * @cmpl_list: complete list.
214 *
215 * returns 0, OK; otherwise, error.
216 */
Tomas Winkler6220d6a2013-05-12 15:34:46 +0300217static int mei_cl_irq_read(struct mei_cl *cl, struct mei_cl_cb *cb,
218 s32 *slots, struct mei_cl_cb *cmpl_list)
Oren Weilfb7d8792011-05-15 13:43:42 +0300219{
Tomas Winkler6220d6a2013-05-12 15:34:46 +0300220 struct mei_device *dev = cl->dev;
221
Tomas Winklerc8c8d082013-03-11 18:27:02 +0200222 u32 msg_slots = mei_data2slots(sizeof(struct hbm_flow_control));
223
224 if (*slots < msg_slots) {
Oren Weilfb7d8792011-05-15 13:43:42 +0300225 /* return the cancel routine */
Tomas Winkler6220d6a2013-05-12 15:34:46 +0300226 list_del(&cb->list);
Tomas Winklerc8c8d082013-03-11 18:27:02 +0200227 return -EMSGSIZE;
Oren Weilfb7d8792011-05-15 13:43:42 +0300228 }
229
Tomas Winklerc8c8d082013-03-11 18:27:02 +0200230 *slots -= msg_slots;
Tomas Winkler7bdf72d2012-07-04 19:24:52 +0300231
Tomas Winkler8120e722012-12-25 19:06:11 +0200232 if (mei_hbm_cl_flow_control_req(dev, cl)) {
Tomas Winkler1ccb7b62012-03-14 14:39:42 +0200233 cl->status = -ENODEV;
Tomas Winkler6220d6a2013-05-12 15:34:46 +0300234 cb->buf_idx = 0;
235 list_move_tail(&cb->list, &cmpl_list->list);
Tomas Winkler1ccb7b62012-03-14 14:39:42 +0200236 return -ENODEV;
237 }
Tomas Winkler6220d6a2013-05-12 15:34:46 +0300238 list_move_tail(&cb->list, &dev->read_list.list);
Tomas Winkler1ccb7b62012-03-14 14:39:42 +0200239
Oren Weilfb7d8792011-05-15 13:43:42 +0300240 return 0;
241}
242
243
244/**
Tomas Winkler6220d6a2013-05-12 15:34:46 +0300245 * mei_cl_irq_ioctl - processes client ioctl related operation from the
246 * interrupt thread context - send connection request
Oren Weilfb7d8792011-05-15 13:43:42 +0300247 *
Tomas Winkler6220d6a2013-05-12 15:34:46 +0300248 * @cl: client
249 * @cb: callback block.
Oren Weilfb7d8792011-05-15 13:43:42 +0300250 * @slots: free slots.
Oren Weilfb7d8792011-05-15 13:43:42 +0300251 * @cmpl_list: complete list.
252 *
253 * returns 0, OK; otherwise, error.
254 */
Tomas Winkler6220d6a2013-05-12 15:34:46 +0300255static int mei_cl_irq_ioctl(struct mei_cl *cl, struct mei_cl_cb *cb,
256 s32 *slots, struct mei_cl_cb *cmpl_list)
Oren Weilfb7d8792011-05-15 13:43:42 +0300257{
Tomas Winkler6220d6a2013-05-12 15:34:46 +0300258 struct mei_device *dev = cl->dev;
259
Tomas Winklerc8c8d082013-03-11 18:27:02 +0200260 u32 msg_slots =
261 mei_data2slots(sizeof(struct hbm_client_connect_request));
262
263 if (*slots < msg_slots) {
Oren Weilfb7d8792011-05-15 13:43:42 +0300264 /* return the cancel routine */
Tomas Winkler6220d6a2013-05-12 15:34:46 +0300265 list_del(&cb->list);
Tomas Winklerc8c8d082013-03-11 18:27:02 +0200266 return -EMSGSIZE;
Oren Weilfb7d8792011-05-15 13:43:42 +0300267 }
268
Tomas Winklerc8c8d082013-03-11 18:27:02 +0200269 *slots -= msg_slots;
270
Tomas Winklerb45f3cc2012-07-04 19:24:53 +0300271 cl->state = MEI_FILE_CONNECTING;
Tomas Winklerc8c8d082013-03-11 18:27:02 +0200272
Tomas Winkler8120e722012-12-25 19:06:11 +0200273 if (mei_hbm_cl_connect_req(dev, cl)) {
Tomas Winklerb45f3cc2012-07-04 19:24:53 +0300274 cl->status = -ENODEV;
Tomas Winkler6220d6a2013-05-12 15:34:46 +0300275 cb->buf_idx = 0;
276 list_del(&cb->list);
Tomas Winklerb45f3cc2012-07-04 19:24:53 +0300277 return -ENODEV;
Tomas Winklerb45f3cc2012-07-04 19:24:53 +0300278 }
Tomas Winkler6220d6a2013-05-12 15:34:46 +0300279
280 list_move_tail(&cb->list, &dev->ctrl_rd_list.list);
281 cl->timer_count = MEI_CONNECT_TIMEOUT;
Oren Weilfb7d8792011-05-15 13:43:42 +0300282 return 0;
283}
284
285/**
Tomas Winkler6220d6a2013-05-12 15:34:46 +0300286 * mei_cl_irq_write_complete - write messages to device.
Oren Weilfb7d8792011-05-15 13:43:42 +0300287 *
Tomas Winkler6220d6a2013-05-12 15:34:46 +0300288 * @cl: client
Tomas Winklerea3b5fb2012-11-18 15:13:16 +0200289 * @cb: callback block.
Tomas Winkler6220d6a2013-05-12 15:34:46 +0300290 * @slots: free slots.
Oren Weilfb7d8792011-05-15 13:43:42 +0300291 * @cmpl_list: complete list.
292 *
293 * returns 0, OK; otherwise, error.
294 */
Tomas Winkler6220d6a2013-05-12 15:34:46 +0300295static int mei_cl_irq_write_complete(struct mei_cl *cl, struct mei_cl_cb *cb,
296 s32 *slots, struct mei_cl_cb *cmpl_list)
Oren Weilfb7d8792011-05-15 13:43:42 +0300297{
Tomas Winkler6220d6a2013-05-12 15:34:46 +0300298 struct mei_device *dev = cl->dev;
Tomas Winklere46f1872012-12-25 19:06:10 +0200299 struct mei_msg_hdr mei_hdr;
Tomas Winklerea3b5fb2012-11-18 15:13:16 +0200300 size_t len = cb->request_buffer.size - cb->buf_idx;
Tomas Winklerc8c8d082013-03-11 18:27:02 +0200301 u32 msg_slots = mei_data2slots(len);
Oren Weilfb7d8792011-05-15 13:43:42 +0300302
Tomas Winklere46f1872012-12-25 19:06:10 +0200303 mei_hdr.host_addr = cl->host_client_id;
304 mei_hdr.me_addr = cl->me_client_id;
305 mei_hdr.reserved = 0;
Tomas Winklerea3b5fb2012-11-18 15:13:16 +0200306
307 if (*slots >= msg_slots) {
Tomas Winklere46f1872012-12-25 19:06:10 +0200308 mei_hdr.length = len;
309 mei_hdr.msg_complete = 1;
Tomas Winklerea3b5fb2012-11-18 15:13:16 +0200310 /* Split the message only if we can write the whole host buffer */
Tomas Winkler24aadc82012-06-25 23:46:27 +0300311 } else if (*slots == dev->hbuf_depth) {
Tomas Winklerea3b5fb2012-11-18 15:13:16 +0200312 msg_slots = *slots;
313 len = (*slots * sizeof(u32)) - sizeof(struct mei_msg_hdr);
Tomas Winklere46f1872012-12-25 19:06:10 +0200314 mei_hdr.length = len;
315 mei_hdr.msg_complete = 0;
Oren Weilfb7d8792011-05-15 13:43:42 +0300316 } else {
Tomas Winklerea3b5fb2012-11-18 15:13:16 +0200317 /* wait for next time the host buffer is empty */
318 return 0;
Oren Weilfb7d8792011-05-15 13:43:42 +0300319 }
320
Tomas Winklerea3b5fb2012-11-18 15:13:16 +0200321 dev_dbg(&dev->pdev->dev, "buf: size = %d idx = %lu\n",
322 cb->request_buffer.size, cb->buf_idx);
Tomas Winklere46f1872012-12-25 19:06:10 +0200323 dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(&mei_hdr));
Tomas Winklerea3b5fb2012-11-18 15:13:16 +0200324
325 *slots -= msg_slots;
Tomas Winklere46f1872012-12-25 19:06:10 +0200326 if (mei_write_message(dev, &mei_hdr,
Tomas Winkler438763f2012-12-25 19:05:59 +0200327 cb->request_buffer.data + cb->buf_idx)) {
Tomas Winklerea3b5fb2012-11-18 15:13:16 +0200328 cl->status = -ENODEV;
329 list_move_tail(&cb->list, &cmpl_list->list);
330 return -ENODEV;
331 }
332
Tomas Winklerea3b5fb2012-11-18 15:13:16 +0200333
334 cl->status = 0;
Tomas Winklere46f1872012-12-25 19:06:10 +0200335 cb->buf_idx += mei_hdr.length;
Tomas Winkler70135392013-04-23 07:38:12 +0300336 if (mei_hdr.msg_complete) {
337 if (mei_cl_flow_ctrl_reduce(cl))
338 return -ENODEV;
Tomas Winklerea3b5fb2012-11-18 15:13:16 +0200339 list_move_tail(&cb->list, &dev->write_waiting_list.list);
Tomas Winkler70135392013-04-23 07:38:12 +0300340 }
Tomas Winklerea3b5fb2012-11-18 15:13:16 +0200341
Oren Weilfb7d8792011-05-15 13:43:42 +0300342 return 0;
343}
344
345/**
Masanari Iida393b1482013-04-05 01:05:05 +0900346 * mei_irq_read_handler - bottom half read routine after ISR to
Oren Weilfb7d8792011-05-15 13:43:42 +0300347 * handle the read processing.
348 *
Oren Weilfb7d8792011-05-15 13:43:42 +0300349 * @dev: the device structure
Tomas Winkler06ecd642013-02-06 14:06:42 +0200350 * @cmpl_list: An instance of our list structure
Oren Weilfb7d8792011-05-15 13:43:42 +0300351 * @slots: slots to read.
352 *
353 * returns 0 on success, <0 on failure.
354 */
Tomas Winkler06ecd642013-02-06 14:06:42 +0200355int mei_irq_read_handler(struct mei_device *dev,
356 struct mei_cl_cb *cmpl_list, s32 *slots)
Oren Weilfb7d8792011-05-15 13:43:42 +0300357{
358 struct mei_msg_hdr *mei_hdr;
359 struct mei_cl *cl_pos = NULL;
360 struct mei_cl *cl_next = NULL;
361 int ret = 0;
362
363 if (!dev->rd_msg_hdr) {
Tomas Winkler827eef52013-02-06 14:06:41 +0200364 dev->rd_msg_hdr = mei_read_hdr(dev);
Oren Weilfb7d8792011-05-15 13:43:42 +0300365 dev_dbg(&dev->pdev->dev, "slots =%08x.\n", *slots);
366 (*slots)--;
367 dev_dbg(&dev->pdev->dev, "slots =%08x.\n", *slots);
368 }
369 mei_hdr = (struct mei_msg_hdr *) &dev->rd_msg_hdr;
Tomas Winkler15d4acc2012-12-25 19:06:00 +0200370 dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr));
Oren Weilfb7d8792011-05-15 13:43:42 +0300371
372 if (mei_hdr->reserved || !dev->rd_msg_hdr) {
373 dev_dbg(&dev->pdev->dev, "corrupted message header.\n");
374 ret = -EBADMSG;
375 goto end;
376 }
377
378 if (mei_hdr->host_addr || mei_hdr->me_addr) {
379 list_for_each_entry_safe(cl_pos, cl_next,
380 &dev->file_list, link) {
381 dev_dbg(&dev->pdev->dev,
382 "list_for_each_entry_safe read host"
383 " client = %d, ME client = %d\n",
384 cl_pos->host_client_id,
385 cl_pos->me_client_id);
Tomas Winkler6e0f1802013-04-19 22:01:34 +0300386 if (mei_cl_hbm_equal(cl_pos, mei_hdr))
Oren Weilfb7d8792011-05-15 13:43:42 +0300387 break;
388 }
389
390 if (&cl_pos->link == &dev->file_list) {
391 dev_dbg(&dev->pdev->dev, "corrupted message header\n");
392 ret = -EBADMSG;
393 goto end;
394 }
395 }
396 if (((*slots) * sizeof(u32)) < mei_hdr->length) {
Tomas Winkler6e0f1802013-04-19 22:01:34 +0300397 dev_err(&dev->pdev->dev,
Oren Weilfb7d8792011-05-15 13:43:42 +0300398 "we can't read the message slots =%08x.\n",
399 *slots);
400 /* we can't read the message */
401 ret = -ERANGE;
402 goto end;
403 }
404
405 /* decide where to read the message too */
406 if (!mei_hdr->host_addr) {
407 dev_dbg(&dev->pdev->dev, "call mei_irq_thread_read_bus_message.\n");
Tomas Winklerbb1b0132012-12-25 19:06:07 +0200408 mei_hbm_dispatch(dev, mei_hdr);
Oren Weilfb7d8792011-05-15 13:43:42 +0300409 dev_dbg(&dev->pdev->dev, "end mei_irq_thread_read_bus_message.\n");
410 } else if (mei_hdr->host_addr == dev->iamthif_cl.host_client_id &&
411 (MEI_FILE_CONNECTED == dev->iamthif_cl.state) &&
412 (dev->iamthif_state == MEI_IAMTHIF_READING)) {
Tomas Winkler15d4acc2012-12-25 19:06:00 +0200413
Tomas Winkler6e0f1802013-04-19 22:01:34 +0300414 dev_dbg(&dev->pdev->dev, "call mei_irq_thread_read_iamthif_message.\n");
Tomas Winkler15d4acc2012-12-25 19:06:00 +0200415 dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr));
Tomas Winkler19838fb2012-11-01 21:17:15 +0200416
Tomas Winkler5ceb46e2013-04-19 21:16:53 +0300417 ret = mei_amthif_irq_read_msg(dev, mei_hdr, cmpl_list);
Oren Weilfb7d8792011-05-15 13:43:42 +0300418 if (ret)
419 goto end;
Oren Weilfb7d8792011-05-15 13:43:42 +0300420 } else {
Tomas Winkler6e0f1802013-04-19 22:01:34 +0300421 dev_dbg(&dev->pdev->dev, "call mei_cl_irq_read_msg.\n");
422 dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr));
423 ret = mei_cl_irq_read_msg(dev, mei_hdr, cmpl_list);
Oren Weilfb7d8792011-05-15 13:43:42 +0300424 if (ret)
425 goto end;
Oren Weilfb7d8792011-05-15 13:43:42 +0300426 }
427
428 /* reset the number of slots and header */
429 *slots = mei_count_full_read_slots(dev);
430 dev->rd_msg_hdr = 0;
431
432 if (*slots == -EOVERFLOW) {
433 /* overflow - reset */
Tomas Winkler6e0f1802013-04-19 22:01:34 +0300434 dev_err(&dev->pdev->dev, "resetting due to slots overflow.\n");
Oren Weilfb7d8792011-05-15 13:43:42 +0300435 /* set the event since message has been read */
436 ret = -ERANGE;
437 goto end;
438 }
439end:
440 return ret;
441}
Tomas Winkler40e0b672013-03-27 16:58:30 +0200442EXPORT_SYMBOL_GPL(mei_irq_read_handler);
Oren Weilfb7d8792011-05-15 13:43:42 +0300443
444
445/**
Tomas Winkler06ecd642013-02-06 14:06:42 +0200446 * mei_irq_write_handler - dispatch write requests
447 * after irq received
Oren Weilfb7d8792011-05-15 13:43:42 +0300448 *
Oren Weilfb7d8792011-05-15 13:43:42 +0300449 * @dev: the device structure
Tomas Winkler9a84d612012-11-18 15:13:18 +0200450 * @cmpl_list: An instance of our list structure
Oren Weilfb7d8792011-05-15 13:43:42 +0300451 *
452 * returns 0 on success, <0 on failure.
453 */
Tomas Winklerc8c8d082013-03-11 18:27:02 +0200454int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list)
Oren Weilfb7d8792011-05-15 13:43:42 +0300455{
456
457 struct mei_cl *cl;
Tomas Winkler6220d6a2013-05-12 15:34:46 +0300458 struct mei_cl_cb *cb, *next;
Tomas Winklerfb601ad2012-10-15 12:06:48 +0200459 struct mei_cl_cb *list;
Tomas Winkler9a84d612012-11-18 15:13:18 +0200460 s32 slots;
Oren Weilfb7d8792011-05-15 13:43:42 +0300461 int ret;
462
Tomas Winkler827eef52013-02-06 14:06:41 +0200463 if (!mei_hbuf_is_ready(dev)) {
Oren Weilfb7d8792011-05-15 13:43:42 +0300464 dev_dbg(&dev->pdev->dev, "host buffer is not empty.\n");
465 return 0;
466 }
Tomas Winkler9a84d612012-11-18 15:13:18 +0200467 slots = mei_hbuf_empty_slots(dev);
468 if (slots <= 0)
Tomas Winkler7d5e0e52012-06-19 09:13:36 +0300469 return -EMSGSIZE;
470
Oren Weilfb7d8792011-05-15 13:43:42 +0300471 /* complete all waiting for write CB */
472 dev_dbg(&dev->pdev->dev, "complete all waiting for write cb.\n");
473
474 list = &dev->write_waiting_list;
Tomas Winkler6220d6a2013-05-12 15:34:46 +0300475 list_for_each_entry_safe(cb, next, &list->list, list) {
476 cl = cb->cl;
Tomas Winklerb7cd2d92011-11-27 21:43:34 +0200477 if (cl == NULL)
478 continue;
Oren Weilfb7d8792011-05-15 13:43:42 +0300479
Tomas Winklerb7cd2d92011-11-27 21:43:34 +0200480 cl->status = 0;
Tomas Winkler6220d6a2013-05-12 15:34:46 +0300481 list_del(&cb->list);
Tomas Winklerb7cd2d92011-11-27 21:43:34 +0200482 if (MEI_WRITING == cl->writing_state &&
Tomas Winkler6220d6a2013-05-12 15:34:46 +0300483 cb->fop_type == MEI_FOP_WRITE &&
Tomas Winkler4b8960b2012-11-11 17:38:00 +0200484 cl != &dev->iamthif_cl) {
Tomas Winkler483136e2012-07-04 19:24:54 +0300485 dev_dbg(&dev->pdev->dev, "MEI WRITE COMPLETE\n");
Tomas Winklerb7cd2d92011-11-27 21:43:34 +0200486 cl->writing_state = MEI_WRITE_COMPLETE;
Tomas Winkler6220d6a2013-05-12 15:34:46 +0300487 list_add_tail(&cb->list, &cmpl_list->list);
Tomas Winklerb7cd2d92011-11-27 21:43:34 +0200488 }
489 if (cl == &dev->iamthif_cl) {
490 dev_dbg(&dev->pdev->dev, "check iamthif flow control.\n");
491 if (dev->iamthif_flow_control_pending) {
Tomas Winkler9a84d612012-11-18 15:13:18 +0200492 ret = mei_amthif_irq_read(dev, &slots);
Tomas Winklerb7cd2d92011-11-27 21:43:34 +0200493 if (ret)
494 return ret;
495 }
Oren Weilfb7d8792011-05-15 13:43:42 +0300496 }
497 }
498
Tomas Winklerc216fde2012-08-16 19:39:43 +0300499 if (dev->wd_state == MEI_WD_STOPPING) {
500 dev->wd_state = MEI_WD_IDLE;
Oren Weilfb7d8792011-05-15 13:43:42 +0300501 wake_up_interruptible(&dev->wait_stop_wd);
Oren Weilfb7d8792011-05-15 13:43:42 +0300502 }
503
Tomas Winkler5fb54fb2012-11-18 15:13:15 +0200504 if (dev->wr_ext_msg.hdr.length) {
505 mei_write_message(dev, &dev->wr_ext_msg.hdr,
Tomas Winkler438763f2012-12-25 19:05:59 +0200506 dev->wr_ext_msg.data);
Tomas Winkler9a84d612012-11-18 15:13:18 +0200507 slots -= mei_data2slots(dev->wr_ext_msg.hdr.length);
Tomas Winkler5fb54fb2012-11-18 15:13:15 +0200508 dev->wr_ext_msg.hdr.length = 0;
Oren Weilfb7d8792011-05-15 13:43:42 +0300509 }
Tomas Winklerb210d752012-08-07 00:03:56 +0300510 if (dev->dev_state == MEI_DEV_ENABLED) {
Oren Weilfb7d8792011-05-15 13:43:42 +0300511 if (dev->wd_pending &&
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200512 mei_cl_flow_ctrl_creds(&dev->wd_cl) > 0) {
Oren Weilfb7d8792011-05-15 13:43:42 +0300513 if (mei_wd_send(dev))
514 dev_dbg(&dev->pdev->dev, "wd send failed.\n");
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200515 else if (mei_cl_flow_ctrl_reduce(&dev->wd_cl))
Tomas Winkler483136e2012-07-04 19:24:54 +0300516 return -ENODEV;
Oren Weilfb7d8792011-05-15 13:43:42 +0300517
Tomas Winklereb9af0a2011-05-25 17:28:22 +0300518 dev->wd_pending = false;
Oren Weilfb7d8792011-05-15 13:43:42 +0300519
Tomas Winklerc216fde2012-08-16 19:39:43 +0300520 if (dev->wd_state == MEI_WD_RUNNING)
Tomas Winkler9a84d612012-11-18 15:13:18 +0200521 slots -= mei_data2slots(MEI_WD_START_MSG_SIZE);
Tomas Winklerd242a0a2012-07-04 19:24:50 +0300522 else
Tomas Winkler9a84d612012-11-18 15:13:18 +0200523 slots -= mei_data2slots(MEI_WD_STOP_MSG_SIZE);
Oren Weilfb7d8792011-05-15 13:43:42 +0300524 }
525 }
Oren Weilfb7d8792011-05-15 13:43:42 +0300526
527 /* complete control write list CB */
Tomas Winklerc8372092011-11-27 21:43:33 +0200528 dev_dbg(&dev->pdev->dev, "complete control write list cb.\n");
Tomas Winkler6220d6a2013-05-12 15:34:46 +0300529 list_for_each_entry_safe(cb, next, &dev->ctrl_wr_list.list, list) {
530 cl = cb->cl;
Tomas Winklerc8372092011-11-27 21:43:33 +0200531 if (!cl) {
Tomas Winkler6220d6a2013-05-12 15:34:46 +0300532 list_del(&cb->list);
Tomas Winklerc8372092011-11-27 21:43:33 +0200533 return -ENODEV;
Oren Weilfb7d8792011-05-15 13:43:42 +0300534 }
Tomas Winkler6220d6a2013-05-12 15:34:46 +0300535 switch (cb->fop_type) {
Tomas Winkler4b8960b2012-11-11 17:38:00 +0200536 case MEI_FOP_CLOSE:
Tomas Winklerc8372092011-11-27 21:43:33 +0200537 /* send disconnect message */
Tomas Winkler6220d6a2013-05-12 15:34:46 +0300538 ret = mei_cl_irq_close(cl, cb, &slots, cmpl_list);
Tomas Winklerc8372092011-11-27 21:43:33 +0200539 if (ret)
540 return ret;
541
542 break;
Tomas Winkler4b8960b2012-11-11 17:38:00 +0200543 case MEI_FOP_READ:
Tomas Winklerc8372092011-11-27 21:43:33 +0200544 /* send flow control message */
Tomas Winkler6220d6a2013-05-12 15:34:46 +0300545 ret = mei_cl_irq_read(cl, cb, &slots, cmpl_list);
Tomas Winklerc8372092011-11-27 21:43:33 +0200546 if (ret)
547 return ret;
548
549 break;
Tomas Winkler4b8960b2012-11-11 17:38:00 +0200550 case MEI_FOP_IOCTL:
Tomas Winklerc8372092011-11-27 21:43:33 +0200551 /* connect message */
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200552 if (mei_cl_is_other_connecting(cl))
Tomas Winklerc8372092011-11-27 21:43:33 +0200553 continue;
Tomas Winkler6220d6a2013-05-12 15:34:46 +0300554 ret = mei_cl_irq_ioctl(cl, cb, &slots, cmpl_list);
Tomas Winklerc8372092011-11-27 21:43:33 +0200555 if (ret)
556 return ret;
557
558 break;
559
560 default:
561 BUG();
562 }
563
Oren Weilfb7d8792011-05-15 13:43:42 +0300564 }
565 /* complete write list CB */
Tomas Winklerb7cd2d92011-11-27 21:43:34 +0200566 dev_dbg(&dev->pdev->dev, "complete write list cb.\n");
Tomas Winkler6220d6a2013-05-12 15:34:46 +0300567 list_for_each_entry_safe(cb, next, &dev->write_list.list, list) {
568 cl = cb->cl;
Tomas Winklerb7cd2d92011-11-27 21:43:34 +0200569 if (cl == NULL)
570 continue;
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200571 if (mei_cl_flow_ctrl_creds(cl) <= 0) {
Tomas Winklerbe9d87a2012-11-18 15:13:19 +0200572 dev_dbg(&dev->pdev->dev,
573 "No flow control credentials for client %d, not sending.\n",
574 cl->host_client_id);
575 continue;
576 }
Oren Weilfb7d8792011-05-15 13:43:42 +0300577
Tomas Winklerbe9d87a2012-11-18 15:13:19 +0200578 if (cl == &dev->iamthif_cl)
Tomas Winkler6220d6a2013-05-12 15:34:46 +0300579 ret = mei_amthif_irq_write_complete(cl, cb,
580 &slots, cmpl_list);
Tomas Winklerbe9d87a2012-11-18 15:13:19 +0200581 else
Tomas Winkler6220d6a2013-05-12 15:34:46 +0300582 ret = mei_cl_irq_write_complete(cl, cb,
583 &slots, cmpl_list);
Tomas Winklerbe9d87a2012-11-18 15:13:19 +0200584 if (ret)
585 return ret;
Oren Weilfb7d8792011-05-15 13:43:42 +0300586 }
587 return 0;
588}
Tomas Winkler40e0b672013-03-27 16:58:30 +0200589EXPORT_SYMBOL_GPL(mei_irq_write_handler);
Oren Weilfb7d8792011-05-15 13:43:42 +0300590
591
592
593/**
594 * mei_timer - timer function.
595 *
596 * @work: pointer to the work_struct structure
597 *
598 * NOTE: This function is called by timer interrupt work
599 */
Oren Weila61c6532011-09-07 09:03:13 +0300600void mei_timer(struct work_struct *work)
Oren Weilfb7d8792011-05-15 13:43:42 +0300601{
602 unsigned long timeout;
603 struct mei_cl *cl_pos = NULL;
604 struct mei_cl *cl_next = NULL;
Oren Weilfb7d8792011-05-15 13:43:42 +0300605 struct mei_cl_cb *cb_pos = NULL;
606 struct mei_cl_cb *cb_next = NULL;
607
608 struct mei_device *dev = container_of(work,
Oren Weila61c6532011-09-07 09:03:13 +0300609 struct mei_device, timer_work.work);
Oren Weilfb7d8792011-05-15 13:43:42 +0300610
611
612 mutex_lock(&dev->device_lock);
Tomas Winklerb210d752012-08-07 00:03:56 +0300613 if (dev->dev_state != MEI_DEV_ENABLED) {
614 if (dev->dev_state == MEI_DEV_INIT_CLIENTS) {
Oren Weilfb7d8792011-05-15 13:43:42 +0300615 if (dev->init_clients_timer) {
616 if (--dev->init_clients_timer == 0) {
Tomas Winkler9b0d5ef2013-04-18 23:03:48 +0300617 dev_err(&dev->pdev->dev, "reset: init clients timeout hbm_state = %d.\n",
618 dev->hbm_state);
Oren Weilfb7d8792011-05-15 13:43:42 +0300619 mei_reset(dev, 1);
620 }
621 }
622 }
623 goto out;
624 }
625 /*** connect/disconnect timeouts ***/
626 list_for_each_entry_safe(cl_pos, cl_next, &dev->file_list, link) {
627 if (cl_pos->timer_count) {
628 if (--cl_pos->timer_count == 0) {
Tomas Winklerd6c36a42013-04-08 21:56:38 +0300629 dev_err(&dev->pdev->dev, "reset: connect/disconnect timeout.\n");
Oren Weilfb7d8792011-05-15 13:43:42 +0300630 mei_reset(dev, 1);
631 goto out;
632 }
633 }
634 }
635
Oren Weilfb7d8792011-05-15 13:43:42 +0300636 if (dev->iamthif_stall_timer) {
637 if (--dev->iamthif_stall_timer == 0) {
Tomas Winklerd6c36a42013-04-08 21:56:38 +0300638 dev_err(&dev->pdev->dev, "reset: amthif hanged.\n");
Oren Weilfb7d8792011-05-15 13:43:42 +0300639 mei_reset(dev, 1);
640 dev->iamthif_msg_buf_size = 0;
641 dev->iamthif_msg_buf_index = 0;
Tomas Winklereb9af0a2011-05-25 17:28:22 +0300642 dev->iamthif_canceled = false;
643 dev->iamthif_ioctl = true;
Oren Weilfb7d8792011-05-15 13:43:42 +0300644 dev->iamthif_state = MEI_IAMTHIF_IDLE;
645 dev->iamthif_timer = 0;
646
Tomas Winkler601a1ef2012-10-09 16:50:20 +0200647 mei_io_cb_free(dev->iamthif_current_cb);
648 dev->iamthif_current_cb = NULL;
Oren Weilfb7d8792011-05-15 13:43:42 +0300649
650 dev->iamthif_file_object = NULL;
Tomas Winkler19838fb2012-11-01 21:17:15 +0200651 mei_amthif_run_next_cmd(dev);
Oren Weilfb7d8792011-05-15 13:43:42 +0300652 }
653 }
654
655 if (dev->iamthif_timer) {
656
657 timeout = dev->iamthif_timer +
Tomas Winkler3870c322012-11-01 21:17:14 +0200658 mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER);
Oren Weilfb7d8792011-05-15 13:43:42 +0300659
660 dev_dbg(&dev->pdev->dev, "dev->iamthif_timer = %ld\n",
661 dev->iamthif_timer);
662 dev_dbg(&dev->pdev->dev, "timeout = %ld\n", timeout);
663 dev_dbg(&dev->pdev->dev, "jiffies = %ld\n", jiffies);
664 if (time_after(jiffies, timeout)) {
665 /*
666 * User didn't read the AMTHI data on time (15sec)
667 * freeing AMTHI for other requests
668 */
669
670 dev_dbg(&dev->pdev->dev, "freeing AMTHI for other requests\n");
671
Tomas Winklere773efc2012-11-11 17:37:58 +0200672 list_for_each_entry_safe(cb_pos, cb_next,
673 &dev->amthif_rd_complete_list.list, list) {
Oren Weilfb7d8792011-05-15 13:43:42 +0300674
Tomas Winklerb7cd2d92011-11-27 21:43:34 +0200675 cl_pos = cb_pos->file_object->private_data;
Oren Weilfb7d8792011-05-15 13:43:42 +0300676
Tomas Winklerb7cd2d92011-11-27 21:43:34 +0200677 /* Finding the AMTHI entry. */
678 if (cl_pos == &dev->iamthif_cl)
Tomas Winklerfb601ad2012-10-15 12:06:48 +0200679 list_del(&cb_pos->list);
Oren Weilfb7d8792011-05-15 13:43:42 +0300680 }
Tomas Winkler601a1ef2012-10-09 16:50:20 +0200681 mei_io_cb_free(dev->iamthif_current_cb);
682 dev->iamthif_current_cb = NULL;
Oren Weilfb7d8792011-05-15 13:43:42 +0300683
684 dev->iamthif_file_object->private_data = NULL;
685 dev->iamthif_file_object = NULL;
Oren Weilfb7d8792011-05-15 13:43:42 +0300686 dev->iamthif_timer = 0;
Tomas Winkler19838fb2012-11-01 21:17:15 +0200687 mei_amthif_run_next_cmd(dev);
Oren Weilfb7d8792011-05-15 13:43:42 +0300688
689 }
690 }
691out:
Tomas Winkler441ab502011-12-13 23:39:34 +0200692 schedule_delayed_work(&dev->timer_work, 2 * HZ);
693 mutex_unlock(&dev->device_lock);
Oren Weilfb7d8792011-05-15 13:43:42 +0300694}
695