blob: 2495e35ccb27c7202d05333236ee8de561d4359e [file] [log] [blame]
Oren Weilfb7d8792011-05-15 13:43:42 +03001/*
2 *
3 * Intel Management Engine Interface (Intel MEI) Linux driver
Tomas Winkler733ba912012-02-09 19:25:53 +02004 * Copyright (c) 2003-2012, Intel Corporation.
Oren Weilfb7d8792011-05-15 13:43:42 +03005 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 */
16
17
18#include <linux/pci.h>
19#include <linux/kthread.h>
20#include <linux/interrupt.h>
21#include <linux/fs.h>
22#include <linux/jiffies.h>
23
Tomas Winkler4f3afe12012-05-09 16:38:59 +030024#include <linux/mei.h>
Tomas Winkler47a73802012-12-25 19:06:03 +020025
26#include "mei_dev.h"
Tomas Winkler0edb23f2013-01-08 23:07:12 +020027#include "hbm.h"
Oren Weilfb7d8792011-05-15 13:43:42 +030028#include "interface.h"
29
30
31/**
Oren Weilfb7d8792011-05-15 13:43:42 +030032 * _mei_cmpl - processes completed operation.
33 *
34 * @cl: private data of the file object.
35 * @cb_pos: callback block.
36 */
37static void _mei_cmpl(struct mei_cl *cl, struct mei_cl_cb *cb_pos)
38{
Tomas Winkler4b8960b2012-11-11 17:38:00 +020039 if (cb_pos->fop_type == MEI_FOP_WRITE) {
Tomas Winkler601a1ef2012-10-09 16:50:20 +020040 mei_io_cb_free(cb_pos);
Oren Weilfb7d8792011-05-15 13:43:42 +030041 cb_pos = NULL;
42 cl->writing_state = MEI_WRITE_COMPLETE;
43 if (waitqueue_active(&cl->tx_wait))
44 wake_up_interruptible(&cl->tx_wait);
45
Tomas Winkler4b8960b2012-11-11 17:38:00 +020046 } else if (cb_pos->fop_type == MEI_FOP_READ &&
Oren Weilfb7d8792011-05-15 13:43:42 +030047 MEI_READING == cl->reading_state) {
48 cl->reading_state = MEI_READ_COMPLETE;
49 if (waitqueue_active(&cl->rx_wait))
50 wake_up_interruptible(&cl->rx_wait);
51
52 }
53}
54
55/**
Oren Weilfb7d8792011-05-15 13:43:42 +030056 * _mei_irq_thread_state_ok - checks if mei header matches file private data
57 *
58 * @cl: private data of the file object
59 * @mei_hdr: header of mei client message
60 *
61 * returns !=0 if matches, 0 if no match.
62 */
63static int _mei_irq_thread_state_ok(struct mei_cl *cl,
64 struct mei_msg_hdr *mei_hdr)
65{
66 return (cl->host_client_id == mei_hdr->host_addr &&
67 cl->me_client_id == mei_hdr->me_addr &&
68 cl->state == MEI_FILE_CONNECTED &&
69 MEI_READ_COMPLETE != cl->reading_state);
70}
71
72/**
73 * mei_irq_thread_read_client_message - bottom half read routine after ISR to
74 * handle the read mei client message data processing.
75 *
76 * @complete_list: An instance of our list structure
77 * @dev: the device structure
78 * @mei_hdr: header of mei client message
79 *
80 * returns 0 on success, <0 on failure.
81 */
Tomas Winklerfb601ad2012-10-15 12:06:48 +020082static int mei_irq_thread_read_client_message(struct mei_cl_cb *complete_list,
Oren Weilfb7d8792011-05-15 13:43:42 +030083 struct mei_device *dev,
84 struct mei_msg_hdr *mei_hdr)
85{
86 struct mei_cl *cl;
87 struct mei_cl_cb *cb_pos = NULL, *cb_next = NULL;
Tomas Winkler479bc592011-06-16 00:46:03 +030088 unsigned char *buffer = NULL;
Oren Weilfb7d8792011-05-15 13:43:42 +030089
90 dev_dbg(&dev->pdev->dev, "start client msg\n");
Tomas Winklerfb601ad2012-10-15 12:06:48 +020091 if (list_empty(&dev->read_list.list))
Oren Weilfb7d8792011-05-15 13:43:42 +030092 goto quit;
93
Tomas Winklerfb601ad2012-10-15 12:06:48 +020094 list_for_each_entry_safe(cb_pos, cb_next, &dev->read_list.list, list) {
Tomas Winklerdb3ed432012-11-11 17:37:59 +020095 cl = cb_pos->cl;
Oren Weilfb7d8792011-05-15 13:43:42 +030096 if (cl && _mei_irq_thread_state_ok(cl, mei_hdr)) {
97 cl->reading_state = MEI_READING;
Tomas Winklerebb108ef2012-10-09 16:50:16 +020098 buffer = cb_pos->response_buffer.data + cb_pos->buf_idx;
Oren Weilfb7d8792011-05-15 13:43:42 +030099
100 if (cb_pos->response_buffer.size <
Tomas Winklerebb108ef2012-10-09 16:50:16 +0200101 mei_hdr->length + cb_pos->buf_idx) {
Oren Weilfb7d8792011-05-15 13:43:42 +0300102 dev_dbg(&dev->pdev->dev, "message overflow.\n");
Tomas Winklerfb601ad2012-10-15 12:06:48 +0200103 list_del(&cb_pos->list);
Oren Weilfb7d8792011-05-15 13:43:42 +0300104 return -ENOMEM;
105 }
106 if (buffer)
107 mei_read_slots(dev, buffer, mei_hdr->length);
108
Tomas Winklerebb108ef2012-10-09 16:50:16 +0200109 cb_pos->buf_idx += mei_hdr->length;
Oren Weilfb7d8792011-05-15 13:43:42 +0300110 if (mei_hdr->msg_complete) {
111 cl->status = 0;
Tomas Winklerfb601ad2012-10-15 12:06:48 +0200112 list_del(&cb_pos->list);
Oren Weilfb7d8792011-05-15 13:43:42 +0300113 dev_dbg(&dev->pdev->dev,
Tomas Winklera4136b42012-09-11 00:43:22 +0300114 "completed read H cl = %d, ME cl = %d, length = %lu\n",
Oren Weilfb7d8792011-05-15 13:43:42 +0300115 cl->host_client_id,
116 cl->me_client_id,
Tomas Winklerebb108ef2012-10-09 16:50:16 +0200117 cb_pos->buf_idx);
118
Tomas Winklerfb601ad2012-10-15 12:06:48 +0200119 list_add_tail(&cb_pos->list,
120 &complete_list->list);
Oren Weilfb7d8792011-05-15 13:43:42 +0300121 }
122
123 break;
124 }
125
126 }
127
128quit:
129 dev_dbg(&dev->pdev->dev, "message read\n");
130 if (!buffer) {
Tomas Winkleredf1eed2012-02-09 19:25:54 +0200131 mei_read_slots(dev, dev->rd_msg_buf, mei_hdr->length);
Tomas Winkler15d4acc2012-12-25 19:06:00 +0200132 dev_dbg(&dev->pdev->dev, "discarding message " MEI_HDR_FMT "\n",
133 MEI_HDR_PRM(mei_hdr));
Oren Weilfb7d8792011-05-15 13:43:42 +0300134 }
135
136 return 0;
137}
138
139/**
Oren Weilfb7d8792011-05-15 13:43:42 +0300140 * _mei_irq_thread_close - processes close related operation.
141 *
142 * @dev: the device structure.
143 * @slots: free slots.
144 * @cb_pos: callback block.
145 * @cl: private data of the file object.
146 * @cmpl_list: complete list.
147 *
148 * returns 0, OK; otherwise, error.
149 */
150static int _mei_irq_thread_close(struct mei_device *dev, s32 *slots,
151 struct mei_cl_cb *cb_pos,
152 struct mei_cl *cl,
Tomas Winklerfb601ad2012-10-15 12:06:48 +0200153 struct mei_cl_cb *cmpl_list)
Oren Weilfb7d8792011-05-15 13:43:42 +0300154{
Tomas Winklerb45f3cc2012-07-04 19:24:53 +0300155 if ((*slots * sizeof(u32)) < (sizeof(struct mei_msg_hdr) +
Tomas Winkleraeba4a02012-11-11 17:38:04 +0200156 sizeof(struct hbm_client_connect_request)))
Oren Weilfb7d8792011-05-15 13:43:42 +0300157 return -EBADMSG;
Tomas Winklerb45f3cc2012-07-04 19:24:53 +0300158
Tomas Winkleraeba4a02012-11-11 17:38:04 +0200159 *slots -= mei_data2slots(sizeof(struct hbm_client_connect_request));
Tomas Winklerb45f3cc2012-07-04 19:24:53 +0300160
Tomas Winkler8120e722012-12-25 19:06:11 +0200161 if (mei_hbm_cl_disconnect_req(dev, cl)) {
Tomas Winklerb45f3cc2012-07-04 19:24:53 +0300162 cl->status = 0;
Tomas Winklerebb108ef2012-10-09 16:50:16 +0200163 cb_pos->buf_idx = 0;
Tomas Winklerfb601ad2012-10-15 12:06:48 +0200164 list_move_tail(&cb_pos->list, &cmpl_list->list);
Tomas Winklerb45f3cc2012-07-04 19:24:53 +0300165 return -EMSGSIZE;
166 } else {
167 cl->state = MEI_FILE_DISCONNECTING;
168 cl->status = 0;
Tomas Winklerebb108ef2012-10-09 16:50:16 +0200169 cb_pos->buf_idx = 0;
Tomas Winklerfb601ad2012-10-15 12:06:48 +0200170 list_move_tail(&cb_pos->list, &dev->ctrl_rd_list.list);
Tomas Winklerb45f3cc2012-07-04 19:24:53 +0300171 cl->timer_count = MEI_CONNECT_TIMEOUT;
Oren Weilfb7d8792011-05-15 13:43:42 +0300172 }
173
174 return 0;
175}
176
Oren Weilfb7d8792011-05-15 13:43:42 +0300177
178/**
179 * _mei_hb_read - processes read related operation.
180 *
181 * @dev: the device structure.
182 * @slots: free slots.
183 * @cb_pos: callback block.
184 * @cl: private data of the file object.
185 * @cmpl_list: complete list.
186 *
187 * returns 0, OK; otherwise, error.
188 */
189static int _mei_irq_thread_read(struct mei_device *dev, s32 *slots,
190 struct mei_cl_cb *cb_pos,
191 struct mei_cl *cl,
Tomas Winklerfb601ad2012-10-15 12:06:48 +0200192 struct mei_cl_cb *cmpl_list)
Oren Weilfb7d8792011-05-15 13:43:42 +0300193{
Tomas Winkler1e69d642012-05-29 16:39:12 +0300194 if ((*slots * sizeof(u32)) < (sizeof(struct mei_msg_hdr) +
Oren Weilfb7d8792011-05-15 13:43:42 +0300195 sizeof(struct hbm_flow_control))) {
Oren Weilfb7d8792011-05-15 13:43:42 +0300196 /* return the cancel routine */
Tomas Winklerfb601ad2012-10-15 12:06:48 +0200197 list_del(&cb_pos->list);
Oren Weilfb7d8792011-05-15 13:43:42 +0300198 return -EBADMSG;
199 }
200
Tomas Winkler7bdf72d2012-07-04 19:24:52 +0300201 *slots -= mei_data2slots(sizeof(struct hbm_flow_control));
202
Tomas Winkler8120e722012-12-25 19:06:11 +0200203 if (mei_hbm_cl_flow_control_req(dev, cl)) {
Tomas Winkler1ccb7b62012-03-14 14:39:42 +0200204 cl->status = -ENODEV;
Tomas Winklerebb108ef2012-10-09 16:50:16 +0200205 cb_pos->buf_idx = 0;
Tomas Winklerfb601ad2012-10-15 12:06:48 +0200206 list_move_tail(&cb_pos->list, &cmpl_list->list);
Tomas Winkler1ccb7b62012-03-14 14:39:42 +0200207 return -ENODEV;
208 }
Tomas Winklerfb601ad2012-10-15 12:06:48 +0200209 list_move_tail(&cb_pos->list, &dev->read_list.list);
Tomas Winkler1ccb7b62012-03-14 14:39:42 +0200210
Oren Weilfb7d8792011-05-15 13:43:42 +0300211 return 0;
212}
213
214
215/**
216 * _mei_irq_thread_ioctl - processes ioctl related operation.
217 *
218 * @dev: the device structure.
219 * @slots: free slots.
220 * @cb_pos: callback block.
221 * @cl: private data of the file object.
222 * @cmpl_list: complete list.
223 *
224 * returns 0, OK; otherwise, error.
225 */
226static int _mei_irq_thread_ioctl(struct mei_device *dev, s32 *slots,
227 struct mei_cl_cb *cb_pos,
228 struct mei_cl *cl,
Tomas Winklerfb601ad2012-10-15 12:06:48 +0200229 struct mei_cl_cb *cmpl_list)
Oren Weilfb7d8792011-05-15 13:43:42 +0300230{
Tomas Winklerb45f3cc2012-07-04 19:24:53 +0300231 if ((*slots * sizeof(u32)) < (sizeof(struct mei_msg_hdr) +
Oren Weilfb7d8792011-05-15 13:43:42 +0300232 sizeof(struct hbm_client_connect_request))) {
Oren Weilfb7d8792011-05-15 13:43:42 +0300233 /* return the cancel routine */
Tomas Winklerfb601ad2012-10-15 12:06:48 +0200234 list_del(&cb_pos->list);
Oren Weilfb7d8792011-05-15 13:43:42 +0300235 return -EBADMSG;
236 }
237
Tomas Winklerb45f3cc2012-07-04 19:24:53 +0300238 cl->state = MEI_FILE_CONNECTING;
Tomas Winkler8120e722012-12-25 19:06:11 +0200239 *slots -= mei_data2slots(sizeof(struct hbm_client_connect_request));
240 if (mei_hbm_cl_connect_req(dev, cl)) {
Tomas Winklerb45f3cc2012-07-04 19:24:53 +0300241 cl->status = -ENODEV;
Tomas Winklerebb108ef2012-10-09 16:50:16 +0200242 cb_pos->buf_idx = 0;
Tomas Winklerfb601ad2012-10-15 12:06:48 +0200243 list_del(&cb_pos->list);
Tomas Winklerb45f3cc2012-07-04 19:24:53 +0300244 return -ENODEV;
245 } else {
Tomas Winklerfb601ad2012-10-15 12:06:48 +0200246 list_move_tail(&cb_pos->list, &dev->ctrl_rd_list.list);
Tomas Winklerb45f3cc2012-07-04 19:24:53 +0300247 cl->timer_count = MEI_CONNECT_TIMEOUT;
248 }
Oren Weilfb7d8792011-05-15 13:43:42 +0300249 return 0;
250}
251
252/**
Tomas Winklerea3b5fb2012-11-18 15:13:16 +0200253 * mei_irq_thread_write_complete - write messages to device.
Oren Weilfb7d8792011-05-15 13:43:42 +0300254 *
255 * @dev: the device structure.
256 * @slots: free slots.
Tomas Winklerea3b5fb2012-11-18 15:13:16 +0200257 * @cb: callback block.
Oren Weilfb7d8792011-05-15 13:43:42 +0300258 * @cmpl_list: complete list.
259 *
260 * returns 0, OK; otherwise, error.
261 */
Tomas Winklerea3b5fb2012-11-18 15:13:16 +0200262static int mei_irq_thread_write_complete(struct mei_device *dev, s32 *slots,
263 struct mei_cl_cb *cb, struct mei_cl_cb *cmpl_list)
Oren Weilfb7d8792011-05-15 13:43:42 +0300264{
Tomas Winklere46f1872012-12-25 19:06:10 +0200265 struct mei_msg_hdr mei_hdr;
Tomas Winklerea3b5fb2012-11-18 15:13:16 +0200266 struct mei_cl *cl = cb->cl;
267 size_t len = cb->request_buffer.size - cb->buf_idx;
268 size_t msg_slots = mei_data2slots(len);
Oren Weilfb7d8792011-05-15 13:43:42 +0300269
Tomas Winklere46f1872012-12-25 19:06:10 +0200270 mei_hdr.host_addr = cl->host_client_id;
271 mei_hdr.me_addr = cl->me_client_id;
272 mei_hdr.reserved = 0;
Tomas Winklerea3b5fb2012-11-18 15:13:16 +0200273
274 if (*slots >= msg_slots) {
Tomas Winklere46f1872012-12-25 19:06:10 +0200275 mei_hdr.length = len;
276 mei_hdr.msg_complete = 1;
Tomas Winklerea3b5fb2012-11-18 15:13:16 +0200277 /* Split the message only if we can write the whole host buffer */
Tomas Winkler24aadc82012-06-25 23:46:27 +0300278 } else if (*slots == dev->hbuf_depth) {
Tomas Winklerea3b5fb2012-11-18 15:13:16 +0200279 msg_slots = *slots;
280 len = (*slots * sizeof(u32)) - sizeof(struct mei_msg_hdr);
Tomas Winklere46f1872012-12-25 19:06:10 +0200281 mei_hdr.length = len;
282 mei_hdr.msg_complete = 0;
Oren Weilfb7d8792011-05-15 13:43:42 +0300283 } else {
Tomas Winklerea3b5fb2012-11-18 15:13:16 +0200284 /* wait for next time the host buffer is empty */
285 return 0;
Oren Weilfb7d8792011-05-15 13:43:42 +0300286 }
287
Tomas Winklerea3b5fb2012-11-18 15:13:16 +0200288 dev_dbg(&dev->pdev->dev, "buf: size = %d idx = %lu\n",
289 cb->request_buffer.size, cb->buf_idx);
Tomas Winklere46f1872012-12-25 19:06:10 +0200290 dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(&mei_hdr));
Tomas Winklerea3b5fb2012-11-18 15:13:16 +0200291
292 *slots -= msg_slots;
Tomas Winklere46f1872012-12-25 19:06:10 +0200293 if (mei_write_message(dev, &mei_hdr,
Tomas Winkler438763f2012-12-25 19:05:59 +0200294 cb->request_buffer.data + cb->buf_idx)) {
Tomas Winklerea3b5fb2012-11-18 15:13:16 +0200295 cl->status = -ENODEV;
296 list_move_tail(&cb->list, &cmpl_list->list);
297 return -ENODEV;
298 }
299
300 if (mei_flow_ctrl_reduce(dev, cl))
301 return -ENODEV;
302
303 cl->status = 0;
Tomas Winklere46f1872012-12-25 19:06:10 +0200304 cb->buf_idx += mei_hdr.length;
305 if (mei_hdr.msg_complete)
Tomas Winklerea3b5fb2012-11-18 15:13:16 +0200306 list_move_tail(&cb->list, &dev->write_waiting_list.list);
307
Oren Weilfb7d8792011-05-15 13:43:42 +0300308 return 0;
309}
310
311/**
Oren Weilfb7d8792011-05-15 13:43:42 +0300312 * mei_irq_thread_read_handler - bottom half read routine after ISR to
313 * handle the read processing.
314 *
315 * @cmpl_list: An instance of our list structure
316 * @dev: the device structure
317 * @slots: slots to read.
318 *
319 * returns 0 on success, <0 on failure.
320 */
Tomas Winklerfb601ad2012-10-15 12:06:48 +0200321static int mei_irq_thread_read_handler(struct mei_cl_cb *cmpl_list,
Oren Weilfb7d8792011-05-15 13:43:42 +0300322 struct mei_device *dev,
323 s32 *slots)
324{
325 struct mei_msg_hdr *mei_hdr;
326 struct mei_cl *cl_pos = NULL;
327 struct mei_cl *cl_next = NULL;
328 int ret = 0;
329
330 if (!dev->rd_msg_hdr) {
331 dev->rd_msg_hdr = mei_mecbrw_read(dev);
332 dev_dbg(&dev->pdev->dev, "slots =%08x.\n", *slots);
333 (*slots)--;
334 dev_dbg(&dev->pdev->dev, "slots =%08x.\n", *slots);
335 }
336 mei_hdr = (struct mei_msg_hdr *) &dev->rd_msg_hdr;
Tomas Winkler15d4acc2012-12-25 19:06:00 +0200337 dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr));
Oren Weilfb7d8792011-05-15 13:43:42 +0300338
339 if (mei_hdr->reserved || !dev->rd_msg_hdr) {
340 dev_dbg(&dev->pdev->dev, "corrupted message header.\n");
341 ret = -EBADMSG;
342 goto end;
343 }
344
345 if (mei_hdr->host_addr || mei_hdr->me_addr) {
346 list_for_each_entry_safe(cl_pos, cl_next,
347 &dev->file_list, link) {
348 dev_dbg(&dev->pdev->dev,
349 "list_for_each_entry_safe read host"
350 " client = %d, ME client = %d\n",
351 cl_pos->host_client_id,
352 cl_pos->me_client_id);
353 if (cl_pos->host_client_id == mei_hdr->host_addr &&
354 cl_pos->me_client_id == mei_hdr->me_addr)
355 break;
356 }
357
358 if (&cl_pos->link == &dev->file_list) {
359 dev_dbg(&dev->pdev->dev, "corrupted message header\n");
360 ret = -EBADMSG;
361 goto end;
362 }
363 }
364 if (((*slots) * sizeof(u32)) < mei_hdr->length) {
365 dev_dbg(&dev->pdev->dev,
366 "we can't read the message slots =%08x.\n",
367 *slots);
368 /* we can't read the message */
369 ret = -ERANGE;
370 goto end;
371 }
372
373 /* decide where to read the message too */
374 if (!mei_hdr->host_addr) {
375 dev_dbg(&dev->pdev->dev, "call mei_irq_thread_read_bus_message.\n");
Tomas Winklerbb1b0132012-12-25 19:06:07 +0200376 mei_hbm_dispatch(dev, mei_hdr);
Oren Weilfb7d8792011-05-15 13:43:42 +0300377 dev_dbg(&dev->pdev->dev, "end mei_irq_thread_read_bus_message.\n");
378 } else if (mei_hdr->host_addr == dev->iamthif_cl.host_client_id &&
379 (MEI_FILE_CONNECTED == dev->iamthif_cl.state) &&
380 (dev->iamthif_state == MEI_IAMTHIF_READING)) {
381 dev_dbg(&dev->pdev->dev, "call mei_irq_thread_read_iamthif_message.\n");
Tomas Winkler15d4acc2012-12-25 19:06:00 +0200382
383 dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr));
Tomas Winkler19838fb2012-11-01 21:17:15 +0200384
385 ret = mei_amthif_irq_read_message(cmpl_list, dev, mei_hdr);
Oren Weilfb7d8792011-05-15 13:43:42 +0300386 if (ret)
387 goto end;
Oren Weilfb7d8792011-05-15 13:43:42 +0300388 } else {
389 dev_dbg(&dev->pdev->dev, "call mei_irq_thread_read_client_message.\n");
390 ret = mei_irq_thread_read_client_message(cmpl_list,
391 dev, mei_hdr);
392 if (ret)
393 goto end;
394
395 }
396
397 /* reset the number of slots and header */
398 *slots = mei_count_full_read_slots(dev);
399 dev->rd_msg_hdr = 0;
400
401 if (*slots == -EOVERFLOW) {
402 /* overflow - reset */
403 dev_dbg(&dev->pdev->dev, "resetting due to slots overflow.\n");
404 /* set the event since message has been read */
405 ret = -ERANGE;
406 goto end;
407 }
408end:
409 return ret;
410}
411
412
413/**
414 * mei_irq_thread_write_handler - bottom half write routine after
415 * ISR to handle the write processing.
416 *
Oren Weilfb7d8792011-05-15 13:43:42 +0300417 * @dev: the device structure
Tomas Winkler9a84d612012-11-18 15:13:18 +0200418 * @cmpl_list: An instance of our list structure
Oren Weilfb7d8792011-05-15 13:43:42 +0300419 *
420 * returns 0 on success, <0 on failure.
421 */
Tomas Winkler9a84d612012-11-18 15:13:18 +0200422static int mei_irq_thread_write_handler(struct mei_device *dev,
423 struct mei_cl_cb *cmpl_list)
Oren Weilfb7d8792011-05-15 13:43:42 +0300424{
425
426 struct mei_cl *cl;
Tomas Winklerb7cd2d92011-11-27 21:43:34 +0200427 struct mei_cl_cb *pos = NULL, *next = NULL;
Tomas Winklerfb601ad2012-10-15 12:06:48 +0200428 struct mei_cl_cb *list;
Tomas Winkler9a84d612012-11-18 15:13:18 +0200429 s32 slots;
Oren Weilfb7d8792011-05-15 13:43:42 +0300430 int ret;
431
Tomas Winkler726917f2012-06-25 23:46:28 +0300432 if (!mei_hbuf_is_empty(dev)) {
Oren Weilfb7d8792011-05-15 13:43:42 +0300433 dev_dbg(&dev->pdev->dev, "host buffer is not empty.\n");
434 return 0;
435 }
Tomas Winkler9a84d612012-11-18 15:13:18 +0200436 slots = mei_hbuf_empty_slots(dev);
437 if (slots <= 0)
Tomas Winkler7d5e0e52012-06-19 09:13:36 +0300438 return -EMSGSIZE;
439
Oren Weilfb7d8792011-05-15 13:43:42 +0300440 /* complete all waiting for write CB */
441 dev_dbg(&dev->pdev->dev, "complete all waiting for write cb.\n");
442
443 list = &dev->write_waiting_list;
Tomas Winklerfb601ad2012-10-15 12:06:48 +0200444 list_for_each_entry_safe(pos, next, &list->list, list) {
Tomas Winklerdb3ed432012-11-11 17:37:59 +0200445 cl = pos->cl;
Tomas Winklerb7cd2d92011-11-27 21:43:34 +0200446 if (cl == NULL)
447 continue;
Oren Weilfb7d8792011-05-15 13:43:42 +0300448
Tomas Winklerb7cd2d92011-11-27 21:43:34 +0200449 cl->status = 0;
Tomas Winklerfb601ad2012-10-15 12:06:48 +0200450 list_del(&pos->list);
Tomas Winklerb7cd2d92011-11-27 21:43:34 +0200451 if (MEI_WRITING == cl->writing_state &&
Tomas Winkler4b8960b2012-11-11 17:38:00 +0200452 pos->fop_type == MEI_FOP_WRITE &&
453 cl != &dev->iamthif_cl) {
Tomas Winkler483136e2012-07-04 19:24:54 +0300454 dev_dbg(&dev->pdev->dev, "MEI WRITE COMPLETE\n");
Tomas Winklerb7cd2d92011-11-27 21:43:34 +0200455 cl->writing_state = MEI_WRITE_COMPLETE;
Tomas Winklerfb601ad2012-10-15 12:06:48 +0200456 list_add_tail(&pos->list, &cmpl_list->list);
Tomas Winklerb7cd2d92011-11-27 21:43:34 +0200457 }
458 if (cl == &dev->iamthif_cl) {
459 dev_dbg(&dev->pdev->dev, "check iamthif flow control.\n");
460 if (dev->iamthif_flow_control_pending) {
Tomas Winkler9a84d612012-11-18 15:13:18 +0200461 ret = mei_amthif_irq_read(dev, &slots);
Tomas Winklerb7cd2d92011-11-27 21:43:34 +0200462 if (ret)
463 return ret;
464 }
Oren Weilfb7d8792011-05-15 13:43:42 +0300465 }
466 }
467
Tomas Winklerc216fde2012-08-16 19:39:43 +0300468 if (dev->wd_state == MEI_WD_STOPPING) {
469 dev->wd_state = MEI_WD_IDLE;
Oren Weilfb7d8792011-05-15 13:43:42 +0300470 wake_up_interruptible(&dev->wait_stop_wd);
Oren Weilfb7d8792011-05-15 13:43:42 +0300471 }
472
Tomas Winkler5fb54fb2012-11-18 15:13:15 +0200473 if (dev->wr_ext_msg.hdr.length) {
474 mei_write_message(dev, &dev->wr_ext_msg.hdr,
Tomas Winkler438763f2012-12-25 19:05:59 +0200475 dev->wr_ext_msg.data);
Tomas Winkler9a84d612012-11-18 15:13:18 +0200476 slots -= mei_data2slots(dev->wr_ext_msg.hdr.length);
Tomas Winkler5fb54fb2012-11-18 15:13:15 +0200477 dev->wr_ext_msg.hdr.length = 0;
Oren Weilfb7d8792011-05-15 13:43:42 +0300478 }
Tomas Winklerb210d752012-08-07 00:03:56 +0300479 if (dev->dev_state == MEI_DEV_ENABLED) {
Oren Weilfb7d8792011-05-15 13:43:42 +0300480 if (dev->wd_pending &&
Tomas Winkler483136e2012-07-04 19:24:54 +0300481 mei_flow_ctrl_creds(dev, &dev->wd_cl) > 0) {
Oren Weilfb7d8792011-05-15 13:43:42 +0300482 if (mei_wd_send(dev))
483 dev_dbg(&dev->pdev->dev, "wd send failed.\n");
Tomas Winkler483136e2012-07-04 19:24:54 +0300484 else if (mei_flow_ctrl_reduce(dev, &dev->wd_cl))
485 return -ENODEV;
Oren Weilfb7d8792011-05-15 13:43:42 +0300486
Tomas Winklereb9af0a2011-05-25 17:28:22 +0300487 dev->wd_pending = false;
Oren Weilfb7d8792011-05-15 13:43:42 +0300488
Tomas Winklerc216fde2012-08-16 19:39:43 +0300489 if (dev->wd_state == MEI_WD_RUNNING)
Tomas Winkler9a84d612012-11-18 15:13:18 +0200490 slots -= mei_data2slots(MEI_WD_START_MSG_SIZE);
Tomas Winklerd242a0a2012-07-04 19:24:50 +0300491 else
Tomas Winkler9a84d612012-11-18 15:13:18 +0200492 slots -= mei_data2slots(MEI_WD_STOP_MSG_SIZE);
Oren Weilfb7d8792011-05-15 13:43:42 +0300493 }
494 }
Oren Weilfb7d8792011-05-15 13:43:42 +0300495
496 /* complete control write list CB */
Tomas Winklerc8372092011-11-27 21:43:33 +0200497 dev_dbg(&dev->pdev->dev, "complete control write list cb.\n");
Tomas Winklerfb601ad2012-10-15 12:06:48 +0200498 list_for_each_entry_safe(pos, next, &dev->ctrl_wr_list.list, list) {
Tomas Winklerdb3ed432012-11-11 17:37:59 +0200499 cl = pos->cl;
Tomas Winklerc8372092011-11-27 21:43:33 +0200500 if (!cl) {
Tomas Winklerfb601ad2012-10-15 12:06:48 +0200501 list_del(&pos->list);
Tomas Winklerc8372092011-11-27 21:43:33 +0200502 return -ENODEV;
Oren Weilfb7d8792011-05-15 13:43:42 +0300503 }
Tomas Winkler4b8960b2012-11-11 17:38:00 +0200504 switch (pos->fop_type) {
505 case MEI_FOP_CLOSE:
Tomas Winklerc8372092011-11-27 21:43:33 +0200506 /* send disconnect message */
Tomas Winkler9a84d612012-11-18 15:13:18 +0200507 ret = _mei_irq_thread_close(dev, &slots, pos,
508 cl, cmpl_list);
Tomas Winklerc8372092011-11-27 21:43:33 +0200509 if (ret)
510 return ret;
511
512 break;
Tomas Winkler4b8960b2012-11-11 17:38:00 +0200513 case MEI_FOP_READ:
Tomas Winklerc8372092011-11-27 21:43:33 +0200514 /* send flow control message */
Tomas Winkler9a84d612012-11-18 15:13:18 +0200515 ret = _mei_irq_thread_read(dev, &slots, pos,
516 cl, cmpl_list);
Tomas Winklerc8372092011-11-27 21:43:33 +0200517 if (ret)
518 return ret;
519
520 break;
Tomas Winkler4b8960b2012-11-11 17:38:00 +0200521 case MEI_FOP_IOCTL:
Tomas Winklerc8372092011-11-27 21:43:33 +0200522 /* connect message */
Natalia Ovsyanikove8cd29d2011-12-05 00:16:54 +0200523 if (mei_other_client_is_connecting(dev, cl))
Tomas Winklerc8372092011-11-27 21:43:33 +0200524 continue;
Tomas Winkler9a84d612012-11-18 15:13:18 +0200525 ret = _mei_irq_thread_ioctl(dev, &slots, pos,
526 cl, cmpl_list);
Tomas Winklerc8372092011-11-27 21:43:33 +0200527 if (ret)
528 return ret;
529
530 break;
531
532 default:
533 BUG();
534 }
535
Oren Weilfb7d8792011-05-15 13:43:42 +0300536 }
537 /* complete write list CB */
Tomas Winklerb7cd2d92011-11-27 21:43:34 +0200538 dev_dbg(&dev->pdev->dev, "complete write list cb.\n");
Tomas Winklerfb601ad2012-10-15 12:06:48 +0200539 list_for_each_entry_safe(pos, next, &dev->write_list.list, list) {
Tomas Winklerdb3ed432012-11-11 17:37:59 +0200540 cl = pos->cl;
Tomas Winklerb7cd2d92011-11-27 21:43:34 +0200541 if (cl == NULL)
542 continue;
Tomas Winklerbe9d87a2012-11-18 15:13:19 +0200543 if (mei_flow_ctrl_creds(dev, cl) <= 0) {
544 dev_dbg(&dev->pdev->dev,
545 "No flow control credentials for client %d, not sending.\n",
546 cl->host_client_id);
547 continue;
548 }
Oren Weilfb7d8792011-05-15 13:43:42 +0300549
Tomas Winklerbe9d87a2012-11-18 15:13:19 +0200550 if (cl == &dev->iamthif_cl)
Tomas Winkler9a84d612012-11-18 15:13:18 +0200551 ret = mei_amthif_irq_write_complete(dev, &slots,
Tomas Winkler24c656e2012-11-18 15:13:17 +0200552 pos, cmpl_list);
Tomas Winklerbe9d87a2012-11-18 15:13:19 +0200553 else
554 ret = mei_irq_thread_write_complete(dev, &slots, pos,
555 cmpl_list);
556 if (ret)
557 return ret;
Tomas Winklerb7cd2d92011-11-27 21:43:34 +0200558
Oren Weilfb7d8792011-05-15 13:43:42 +0300559 }
560 return 0;
561}
562
563
564
565/**
566 * mei_timer - timer function.
567 *
568 * @work: pointer to the work_struct structure
569 *
570 * NOTE: This function is called by timer interrupt work
571 */
Oren Weila61c6532011-09-07 09:03:13 +0300572void mei_timer(struct work_struct *work)
Oren Weilfb7d8792011-05-15 13:43:42 +0300573{
574 unsigned long timeout;
575 struct mei_cl *cl_pos = NULL;
576 struct mei_cl *cl_next = NULL;
Oren Weilfb7d8792011-05-15 13:43:42 +0300577 struct mei_cl_cb *cb_pos = NULL;
578 struct mei_cl_cb *cb_next = NULL;
579
580 struct mei_device *dev = container_of(work,
Oren Weila61c6532011-09-07 09:03:13 +0300581 struct mei_device, timer_work.work);
Oren Weilfb7d8792011-05-15 13:43:42 +0300582
583
584 mutex_lock(&dev->device_lock);
Tomas Winklerb210d752012-08-07 00:03:56 +0300585 if (dev->dev_state != MEI_DEV_ENABLED) {
586 if (dev->dev_state == MEI_DEV_INIT_CLIENTS) {
Oren Weilfb7d8792011-05-15 13:43:42 +0300587 if (dev->init_clients_timer) {
588 if (--dev->init_clients_timer == 0) {
589 dev_dbg(&dev->pdev->dev, "IMEI reset due to init clients timeout ,init clients state = %d.\n",
590 dev->init_clients_state);
591 mei_reset(dev, 1);
592 }
593 }
594 }
595 goto out;
596 }
597 /*** connect/disconnect timeouts ***/
598 list_for_each_entry_safe(cl_pos, cl_next, &dev->file_list, link) {
599 if (cl_pos->timer_count) {
600 if (--cl_pos->timer_count == 0) {
601 dev_dbg(&dev->pdev->dev, "HECI reset due to connect/disconnect timeout.\n");
602 mei_reset(dev, 1);
603 goto out;
604 }
605 }
606 }
607
Oren Weilfb7d8792011-05-15 13:43:42 +0300608 if (dev->iamthif_stall_timer) {
609 if (--dev->iamthif_stall_timer == 0) {
Masanari Iida32de21f2012-01-25 23:14:56 +0900610 dev_dbg(&dev->pdev->dev, "resetting because of hang to amthi.\n");
Oren Weilfb7d8792011-05-15 13:43:42 +0300611 mei_reset(dev, 1);
612 dev->iamthif_msg_buf_size = 0;
613 dev->iamthif_msg_buf_index = 0;
Tomas Winklereb9af0a2011-05-25 17:28:22 +0300614 dev->iamthif_canceled = false;
615 dev->iamthif_ioctl = true;
Oren Weilfb7d8792011-05-15 13:43:42 +0300616 dev->iamthif_state = MEI_IAMTHIF_IDLE;
617 dev->iamthif_timer = 0;
618
Tomas Winkler601a1ef2012-10-09 16:50:20 +0200619 mei_io_cb_free(dev->iamthif_current_cb);
620 dev->iamthif_current_cb = NULL;
Oren Weilfb7d8792011-05-15 13:43:42 +0300621
622 dev->iamthif_file_object = NULL;
Tomas Winkler19838fb2012-11-01 21:17:15 +0200623 mei_amthif_run_next_cmd(dev);
Oren Weilfb7d8792011-05-15 13:43:42 +0300624 }
625 }
626
627 if (dev->iamthif_timer) {
628
629 timeout = dev->iamthif_timer +
Tomas Winkler3870c322012-11-01 21:17:14 +0200630 mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER);
Oren Weilfb7d8792011-05-15 13:43:42 +0300631
632 dev_dbg(&dev->pdev->dev, "dev->iamthif_timer = %ld\n",
633 dev->iamthif_timer);
634 dev_dbg(&dev->pdev->dev, "timeout = %ld\n", timeout);
635 dev_dbg(&dev->pdev->dev, "jiffies = %ld\n", jiffies);
636 if (time_after(jiffies, timeout)) {
637 /*
638 * User didn't read the AMTHI data on time (15sec)
639 * freeing AMTHI for other requests
640 */
641
642 dev_dbg(&dev->pdev->dev, "freeing AMTHI for other requests\n");
643
Tomas Winklere773efc2012-11-11 17:37:58 +0200644 list_for_each_entry_safe(cb_pos, cb_next,
645 &dev->amthif_rd_complete_list.list, list) {
Oren Weilfb7d8792011-05-15 13:43:42 +0300646
Tomas Winklerb7cd2d92011-11-27 21:43:34 +0200647 cl_pos = cb_pos->file_object->private_data;
Oren Weilfb7d8792011-05-15 13:43:42 +0300648
Tomas Winklerb7cd2d92011-11-27 21:43:34 +0200649 /* Finding the AMTHI entry. */
650 if (cl_pos == &dev->iamthif_cl)
Tomas Winklerfb601ad2012-10-15 12:06:48 +0200651 list_del(&cb_pos->list);
Oren Weilfb7d8792011-05-15 13:43:42 +0300652 }
Tomas Winkler601a1ef2012-10-09 16:50:20 +0200653 mei_io_cb_free(dev->iamthif_current_cb);
654 dev->iamthif_current_cb = NULL;
Oren Weilfb7d8792011-05-15 13:43:42 +0300655
656 dev->iamthif_file_object->private_data = NULL;
657 dev->iamthif_file_object = NULL;
Oren Weilfb7d8792011-05-15 13:43:42 +0300658 dev->iamthif_timer = 0;
Tomas Winkler19838fb2012-11-01 21:17:15 +0200659 mei_amthif_run_next_cmd(dev);
Oren Weilfb7d8792011-05-15 13:43:42 +0300660
661 }
662 }
663out:
Tomas Winkler441ab502011-12-13 23:39:34 +0200664 schedule_delayed_work(&dev->timer_work, 2 * HZ);
665 mutex_unlock(&dev->device_lock);
Oren Weilfb7d8792011-05-15 13:43:42 +0300666}
667
668/**
669 * mei_interrupt_thread_handler - function called after ISR to handle the interrupt
670 * processing.
671 *
672 * @irq: The irq number
673 * @dev_id: pointer to the device structure
674 *
675 * returns irqreturn_t
676 *
677 */
678irqreturn_t mei_interrupt_thread_handler(int irq, void *dev_id)
679{
680 struct mei_device *dev = (struct mei_device *) dev_id;
Tomas Winklerfb601ad2012-10-15 12:06:48 +0200681 struct mei_cl_cb complete_list;
Oren Weilfb7d8792011-05-15 13:43:42 +0300682 struct mei_cl_cb *cb_pos = NULL, *cb_next = NULL;
683 struct mei_cl *cl;
684 s32 slots;
685 int rets;
686 bool bus_message_received;
687
688
689 dev_dbg(&dev->pdev->dev, "function called after ISR to handle the interrupt processing.\n");
690 /* initialize our complete list */
691 mutex_lock(&dev->device_lock);
Tomas Winkler0288c7c2011-06-06 10:44:34 +0300692 mei_io_list_init(&complete_list);
Oren Weilfb7d8792011-05-15 13:43:42 +0300693 dev->host_hw_state = mei_hcsr_read(dev);
Tomas Winkler4f61a7a2011-07-14 20:11:25 +0300694
695 /* Ack the interrupt here
Justin P. Mattock5f9092f32012-03-12 07:18:09 -0700696 * In case of MSI we don't go through the quick handler */
Tomas Winkler4f61a7a2011-07-14 20:11:25 +0300697 if (pci_dev_msi_enabled(dev->pdev))
Tomas Winkler3a65dd42012-12-25 19:06:06 +0200698 mei_clear_interrupts(dev);
Tomas Winkler4f61a7a2011-07-14 20:11:25 +0300699
Oren Weilfb7d8792011-05-15 13:43:42 +0300700 dev->me_hw_state = mei_mecsr_read(dev);
701
702 /* check if ME wants a reset */
703 if ((dev->me_hw_state & ME_RDY_HRA) == 0 &&
Tomas Winklerb210d752012-08-07 00:03:56 +0300704 dev->dev_state != MEI_DEV_RESETING &&
705 dev->dev_state != MEI_DEV_INITIALIZING) {
Oren Weilfb7d8792011-05-15 13:43:42 +0300706 dev_dbg(&dev->pdev->dev, "FW not ready.\n");
707 mei_reset(dev, 1);
708 mutex_unlock(&dev->device_lock);
709 return IRQ_HANDLED;
710 }
711
712 /* check if we need to start the dev */
713 if ((dev->host_hw_state & H_RDY) == 0) {
714 if ((dev->me_hw_state & ME_RDY_HRA) == ME_RDY_HRA) {
715 dev_dbg(&dev->pdev->dev, "we need to start the dev.\n");
716 dev->host_hw_state |= (H_IE | H_IG | H_RDY);
717 mei_hcsr_set(dev);
Tomas Winklerb210d752012-08-07 00:03:56 +0300718 dev->dev_state = MEI_DEV_INIT_CLIENTS;
Oren Weilfb7d8792011-05-15 13:43:42 +0300719 dev_dbg(&dev->pdev->dev, "link is established start sending messages.\n");
720 /* link is established
721 * start sending messages.
722 */
Tomas Winkler8120e722012-12-25 19:06:11 +0200723 mei_hbm_start_req(dev);
Oren Weilfb7d8792011-05-15 13:43:42 +0300724 mutex_unlock(&dev->device_lock);
725 return IRQ_HANDLED;
726 } else {
727 dev_dbg(&dev->pdev->dev, "FW not ready.\n");
728 mutex_unlock(&dev->device_lock);
729 return IRQ_HANDLED;
730 }
731 }
Justin P. Mattock5f9092f32012-03-12 07:18:09 -0700732 /* check slots available for reading */
Oren Weilfb7d8792011-05-15 13:43:42 +0300733 slots = mei_count_full_read_slots(dev);
Tomas Winkler5fb54fb2012-11-18 15:13:15 +0200734 while (slots > 0) {
735 /* we have urgent data to send so break the read */
736 if (dev->wr_ext_msg.hdr.length)
737 break;
738 dev_dbg(&dev->pdev->dev, "slots =%08x\n", slots);
Oren Weilfb7d8792011-05-15 13:43:42 +0300739 dev_dbg(&dev->pdev->dev, "call mei_irq_thread_read_handler.\n");
740 rets = mei_irq_thread_read_handler(&complete_list, dev, &slots);
741 if (rets)
742 goto end;
743 }
Tomas Winkler9a84d612012-11-18 15:13:18 +0200744 rets = mei_irq_thread_write_handler(dev, &complete_list);
Oren Weilfb7d8792011-05-15 13:43:42 +0300745end:
746 dev_dbg(&dev->pdev->dev, "end of bottom half function.\n");
747 dev->host_hw_state = mei_hcsr_read(dev);
Tomas Winkler726917f2012-06-25 23:46:28 +0300748 dev->mei_host_buffer_is_empty = mei_hbuf_is_empty(dev);
Oren Weilfb7d8792011-05-15 13:43:42 +0300749
750 bus_message_received = false;
751 if (dev->recvd_msg && waitqueue_active(&dev->wait_recvd_msg)) {
752 dev_dbg(&dev->pdev->dev, "received waiting bus message\n");
753 bus_message_received = true;
754 }
755 mutex_unlock(&dev->device_lock);
756 if (bus_message_received) {
757 dev_dbg(&dev->pdev->dev, "wake up dev->wait_recvd_msg\n");
758 wake_up_interruptible(&dev->wait_recvd_msg);
759 bus_message_received = false;
760 }
Tomas Winklerfb601ad2012-10-15 12:06:48 +0200761 if (list_empty(&complete_list.list))
Oren Weilfb7d8792011-05-15 13:43:42 +0300762 return IRQ_HANDLED;
763
764
Tomas Winklerfb601ad2012-10-15 12:06:48 +0200765 list_for_each_entry_safe(cb_pos, cb_next, &complete_list.list, list) {
Tomas Winklerdb3ed432012-11-11 17:37:59 +0200766 cl = cb_pos->cl;
Tomas Winklerfb601ad2012-10-15 12:06:48 +0200767 list_del(&cb_pos->list);
Oren Weilfb7d8792011-05-15 13:43:42 +0300768 if (cl) {
769 if (cl != &dev->iamthif_cl) {
770 dev_dbg(&dev->pdev->dev, "completing call back.\n");
771 _mei_cmpl(cl, cb_pos);
772 cb_pos = NULL;
773 } else if (cl == &dev->iamthif_cl) {
Tomas Winkler19838fb2012-11-01 21:17:15 +0200774 mei_amthif_complete(dev, cb_pos);
Oren Weilfb7d8792011-05-15 13:43:42 +0300775 }
776 }
777 }
778 return IRQ_HANDLED;
779}