blob: 539e861abc1e7a38d8ae5926a98daca5736e5ed0 [file] [log] [blame]
Tomas Winkler9ca90502013-01-08 23:07:13 +02001/*
2 *
3 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2003-2012, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 */
16
17#include <linux/pci.h>
18#include <linux/sched.h>
19#include <linux/wait.h>
20#include <linux/delay.h>
21
22#include <linux/mei.h>
23
24#include "mei_dev.h"
25#include "hbm.h"
Tomas Winkler90e0b5f12013-01-08 23:07:14 +020026#include "client.h"
27
28/**
29 * mei_me_cl_by_uuid - locate index of me client
30 *
31 * @dev: mei device
Alexander Usyskina27a76d2014-02-17 15:13:22 +020032 *
33 * Locking: called under "dev->device_lock" lock
34 *
Tomas Winkler90e0b5f12013-01-08 23:07:14 +020035 * returns me client index or -ENOENT if not found
36 */
37int mei_me_cl_by_uuid(const struct mei_device *dev, const uuid_le *uuid)
38{
Alexander Usyskina27a76d2014-02-17 15:13:22 +020039 int i;
Tomas Winkler90e0b5f12013-01-08 23:07:14 +020040
41 for (i = 0; i < dev->me_clients_num; ++i)
42 if (uuid_le_cmp(*uuid,
Alexander Usyskina27a76d2014-02-17 15:13:22 +020043 dev->me_clients[i].props.protocol_name) == 0)
44 return i;
Tomas Winkler90e0b5f12013-01-08 23:07:14 +020045
Alexander Usyskina27a76d2014-02-17 15:13:22 +020046 return -ENOENT;
Tomas Winkler90e0b5f12013-01-08 23:07:14 +020047}
48
49
50/**
51 * mei_me_cl_by_id return index to me_clients for client_id
52 *
53 * @dev: the device structure
54 * @client_id: me client id
55 *
56 * Locking: called under "dev->device_lock" lock
57 *
58 * returns index on success, -ENOENT on failure.
59 */
60
61int mei_me_cl_by_id(struct mei_device *dev, u8 client_id)
62{
63 int i;
Alexander Usyskina27a76d2014-02-17 15:13:22 +020064
Tomas Winkler90e0b5f12013-01-08 23:07:14 +020065 for (i = 0; i < dev->me_clients_num; i++)
66 if (dev->me_clients[i].client_id == client_id)
Alexander Usyskina27a76d2014-02-17 15:13:22 +020067 return i;
Tomas Winkler90e0b5f12013-01-08 23:07:14 +020068
Alexander Usyskina27a76d2014-02-17 15:13:22 +020069 return -ENOENT;
Tomas Winkler90e0b5f12013-01-08 23:07:14 +020070}
Tomas Winkler9ca90502013-01-08 23:07:13 +020071
72
73/**
74 * mei_io_list_flush - removes list entry belonging to cl.
75 *
76 * @list: An instance of our list structure
77 * @cl: host client
78 */
79void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl)
80{
81 struct mei_cl_cb *cb;
82 struct mei_cl_cb *next;
83
84 list_for_each_entry_safe(cb, next, &list->list, list) {
85 if (cb->cl && mei_cl_cmp_id(cl, cb->cl))
86 list_del(&cb->list);
87 }
88}
89
90/**
91 * mei_io_cb_free - free mei_cb_private related memory
92 *
93 * @cb: mei callback struct
94 */
95void mei_io_cb_free(struct mei_cl_cb *cb)
96{
97 if (cb == NULL)
98 return;
99
100 kfree(cb->request_buffer.data);
101 kfree(cb->response_buffer.data);
102 kfree(cb);
103}
104
105/**
106 * mei_io_cb_init - allocate and initialize io callback
107 *
108 * @cl - mei client
Masanari Iida393b1482013-04-05 01:05:05 +0900109 * @fp: pointer to file structure
Tomas Winkler9ca90502013-01-08 23:07:13 +0200110 *
111 * returns mei_cl_cb pointer or NULL;
112 */
113struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, struct file *fp)
114{
115 struct mei_cl_cb *cb;
116
117 cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL);
118 if (!cb)
119 return NULL;
120
121 mei_io_list_init(cb);
122
123 cb->file_object = fp;
124 cb->cl = cl;
125 cb->buf_idx = 0;
126 return cb;
127}
128
129/**
130 * mei_io_cb_alloc_req_buf - allocate request buffer
131 *
Masanari Iida393b1482013-04-05 01:05:05 +0900132 * @cb: io callback structure
133 * @length: size of the buffer
Tomas Winkler9ca90502013-01-08 23:07:13 +0200134 *
135 * returns 0 on success
136 * -EINVAL if cb is NULL
137 * -ENOMEM if allocation failed
138 */
139int mei_io_cb_alloc_req_buf(struct mei_cl_cb *cb, size_t length)
140{
141 if (!cb)
142 return -EINVAL;
143
144 if (length == 0)
145 return 0;
146
147 cb->request_buffer.data = kmalloc(length, GFP_KERNEL);
148 if (!cb->request_buffer.data)
149 return -ENOMEM;
150 cb->request_buffer.size = length;
151 return 0;
152}
153/**
Alexander Usyskin83ce0742014-01-08 22:31:46 +0200154 * mei_io_cb_alloc_resp_buf - allocate response buffer
Tomas Winkler9ca90502013-01-08 23:07:13 +0200155 *
Masanari Iida393b1482013-04-05 01:05:05 +0900156 * @cb: io callback structure
157 * @length: size of the buffer
Tomas Winkler9ca90502013-01-08 23:07:13 +0200158 *
159 * returns 0 on success
160 * -EINVAL if cb is NULL
161 * -ENOMEM if allocation failed
162 */
163int mei_io_cb_alloc_resp_buf(struct mei_cl_cb *cb, size_t length)
164{
165 if (!cb)
166 return -EINVAL;
167
168 if (length == 0)
169 return 0;
170
171 cb->response_buffer.data = kmalloc(length, GFP_KERNEL);
172 if (!cb->response_buffer.data)
173 return -ENOMEM;
174 cb->response_buffer.size = length;
175 return 0;
176}
177
178
179
180/**
181 * mei_cl_flush_queues - flushes queue lists belonging to cl.
182 *
Tomas Winkler9ca90502013-01-08 23:07:13 +0200183 * @cl: host client
184 */
185int mei_cl_flush_queues(struct mei_cl *cl)
186{
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300187 struct mei_device *dev;
188
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200189 if (WARN_ON(!cl || !cl->dev))
Tomas Winkler9ca90502013-01-08 23:07:13 +0200190 return -EINVAL;
191
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300192 dev = cl->dev;
193
194 cl_dbg(dev, cl, "remove list entry belonging to cl\n");
Tomas Winkler9ca90502013-01-08 23:07:13 +0200195 mei_io_list_flush(&cl->dev->read_list, cl);
196 mei_io_list_flush(&cl->dev->write_list, cl);
197 mei_io_list_flush(&cl->dev->write_waiting_list, cl);
198 mei_io_list_flush(&cl->dev->ctrl_wr_list, cl);
199 mei_io_list_flush(&cl->dev->ctrl_rd_list, cl);
200 mei_io_list_flush(&cl->dev->amthif_cmd_list, cl);
201 mei_io_list_flush(&cl->dev->amthif_rd_complete_list, cl);
202 return 0;
203}
204
Tomas Winkler9ca90502013-01-08 23:07:13 +0200205
206/**
Alexander Usyskin83ce0742014-01-08 22:31:46 +0200207 * mei_cl_init - initializes cl.
Tomas Winkler9ca90502013-01-08 23:07:13 +0200208 *
209 * @cl: host client to be initialized
210 * @dev: mei device
211 */
212void mei_cl_init(struct mei_cl *cl, struct mei_device *dev)
213{
214 memset(cl, 0, sizeof(struct mei_cl));
215 init_waitqueue_head(&cl->wait);
216 init_waitqueue_head(&cl->rx_wait);
217 init_waitqueue_head(&cl->tx_wait);
218 INIT_LIST_HEAD(&cl->link);
Samuel Ortiza7b71bc2013-03-27 17:29:56 +0200219 INIT_LIST_HEAD(&cl->device_link);
Tomas Winkler9ca90502013-01-08 23:07:13 +0200220 cl->reading_state = MEI_IDLE;
221 cl->writing_state = MEI_IDLE;
222 cl->dev = dev;
223}
224
225/**
226 * mei_cl_allocate - allocates cl structure and sets it up.
227 *
228 * @dev: mei device
229 * returns The allocated file or NULL on failure
230 */
231struct mei_cl *mei_cl_allocate(struct mei_device *dev)
232{
233 struct mei_cl *cl;
234
235 cl = kmalloc(sizeof(struct mei_cl), GFP_KERNEL);
236 if (!cl)
237 return NULL;
238
239 mei_cl_init(cl, dev);
240
241 return cl;
242}
243
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200244/**
245 * mei_cl_find_read_cb - find this cl's callback in the read list
246 *
Masanari Iida393b1482013-04-05 01:05:05 +0900247 * @cl: host client
248 *
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200249 * returns cb on success, NULL on error
250 */
251struct mei_cl_cb *mei_cl_find_read_cb(struct mei_cl *cl)
252{
253 struct mei_device *dev = cl->dev;
254 struct mei_cl_cb *cb = NULL;
255 struct mei_cl_cb *next = NULL;
256
257 list_for_each_entry_safe(cb, next, &dev->read_list.list, list)
258 if (mei_cl_cmp_id(cl, cb->cl))
259 return cb;
260 return NULL;
261}
262
Alexander Usyskin83ce0742014-01-08 22:31:46 +0200263/** mei_cl_link: allocate host id in the host map
Tomas Winkler9ca90502013-01-08 23:07:13 +0200264 *
Tomas Winkler781d0d82013-01-08 23:07:22 +0200265 * @cl - host client
Alexander Usyskin83ce0742014-01-08 22:31:46 +0200266 * @id - fixed host id or -1 for generic one
Masanari Iida393b1482013-04-05 01:05:05 +0900267 *
Tomas Winkler781d0d82013-01-08 23:07:22 +0200268 * returns 0 on success
Tomas Winkler9ca90502013-01-08 23:07:13 +0200269 * -EINVAL on incorrect values
270 * -ENONET if client not found
271 */
Tomas Winkler781d0d82013-01-08 23:07:22 +0200272int mei_cl_link(struct mei_cl *cl, int id)
Tomas Winkler9ca90502013-01-08 23:07:13 +0200273{
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200274 struct mei_device *dev;
Tomas Winkler22f96a02013-09-16 23:44:47 +0300275 long open_handle_count;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200276
Tomas Winkler781d0d82013-01-08 23:07:22 +0200277 if (WARN_ON(!cl || !cl->dev))
Tomas Winkler9ca90502013-01-08 23:07:13 +0200278 return -EINVAL;
279
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200280 dev = cl->dev;
281
Alexander Usyskin83ce0742014-01-08 22:31:46 +0200282 /* If Id is not assigned get one*/
Tomas Winkler781d0d82013-01-08 23:07:22 +0200283 if (id == MEI_HOST_CLIENT_ID_ANY)
284 id = find_first_zero_bit(dev->host_clients_map,
285 MEI_CLIENTS_MAX);
Tomas Winkler9ca90502013-01-08 23:07:13 +0200286
Tomas Winkler781d0d82013-01-08 23:07:22 +0200287 if (id >= MEI_CLIENTS_MAX) {
Alexander Usyskin83ce0742014-01-08 22:31:46 +0200288 dev_err(&dev->pdev->dev, "id exceeded %d", MEI_CLIENTS_MAX);
Tomas Winklere036cc52013-09-16 23:44:46 +0300289 return -EMFILE;
290 }
291
Tomas Winkler22f96a02013-09-16 23:44:47 +0300292 open_handle_count = dev->open_handle_count + dev->iamthif_open_count;
293 if (open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) {
Alexander Usyskin83ce0742014-01-08 22:31:46 +0200294 dev_err(&dev->pdev->dev, "open_handle_count exceeded %d",
Tomas Winklere036cc52013-09-16 23:44:46 +0300295 MEI_MAX_OPEN_HANDLE_COUNT);
296 return -EMFILE;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200297 }
298
Tomas Winkler781d0d82013-01-08 23:07:22 +0200299 dev->open_handle_count++;
300
301 cl->host_client_id = id;
302 list_add_tail(&cl->link, &dev->file_list);
303
304 set_bit(id, dev->host_clients_map);
305
306 cl->state = MEI_FILE_INITIALIZING;
307
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300308 cl_dbg(dev, cl, "link cl\n");
Tomas Winkler781d0d82013-01-08 23:07:22 +0200309 return 0;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200310}
Tomas Winkler781d0d82013-01-08 23:07:22 +0200311
Tomas Winkler9ca90502013-01-08 23:07:13 +0200312/**
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200313 * mei_cl_unlink - remove me_cl from the list
Tomas Winkler9ca90502013-01-08 23:07:13 +0200314 *
Masanari Iida393b1482013-04-05 01:05:05 +0900315 * @cl: host client
Tomas Winkler9ca90502013-01-08 23:07:13 +0200316 */
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200317int mei_cl_unlink(struct mei_cl *cl)
Tomas Winkler9ca90502013-01-08 23:07:13 +0200318{
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200319 struct mei_device *dev;
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200320
Tomas Winkler781d0d82013-01-08 23:07:22 +0200321 /* don't shout on error exit path */
322 if (!cl)
323 return 0;
324
Tomas Winkler8e9a4a92013-01-10 17:32:14 +0200325 /* wd and amthif might not be initialized */
326 if (!cl->dev)
327 return 0;
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200328
329 dev = cl->dev;
330
Tomas Winklera14c44d2013-09-16 23:44:45 +0300331 cl_dbg(dev, cl, "unlink client");
332
Tomas Winkler22f96a02013-09-16 23:44:47 +0300333 if (dev->open_handle_count > 0)
334 dev->open_handle_count--;
335
336 /* never clear the 0 bit */
337 if (cl->host_client_id)
338 clear_bit(cl->host_client_id, dev->host_clients_map);
339
340 list_del_init(&cl->link);
341
342 cl->state = MEI_FILE_INITIALIZING;
343
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200344 return 0;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200345}
346
347
348void mei_host_client_init(struct work_struct *work)
349{
350 struct mei_device *dev = container_of(work,
351 struct mei_device, init_work);
352 struct mei_client_properties *client_props;
353 int i;
354
355 mutex_lock(&dev->device_lock);
356
Tomas Winkler9ca90502013-01-08 23:07:13 +0200357 for (i = 0; i < dev->me_clients_num; i++) {
358 client_props = &dev->me_clients[i].props;
359
Tomas Winkler1a1aca42013-01-08 23:07:21 +0200360 if (!uuid_le_cmp(client_props->protocol_name, mei_amthif_guid))
Tomas Winkler9ca90502013-01-08 23:07:13 +0200361 mei_amthif_host_init(dev);
362 else if (!uuid_le_cmp(client_props->protocol_name, mei_wd_guid))
363 mei_wd_host_init(dev);
Samuel Ortiz59fcd7c2013-04-11 03:03:29 +0200364 else if (!uuid_le_cmp(client_props->protocol_name, mei_nfc_guid))
365 mei_nfc_host_init(dev);
366
Tomas Winkler9ca90502013-01-08 23:07:13 +0200367 }
368
369 dev->dev_state = MEI_DEV_ENABLED;
Tomas Winkler6adb8ef2014-01-12 00:36:10 +0200370 dev->reset_count = 0;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200371
372 mutex_unlock(&dev->device_lock);
373}
374
375
376/**
Alexander Usyskin83ce0742014-01-08 22:31:46 +0200377 * mei_cl_disconnect - disconnect host client from the me one
Tomas Winkler9ca90502013-01-08 23:07:13 +0200378 *
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200379 * @cl: host client
Tomas Winkler9ca90502013-01-08 23:07:13 +0200380 *
381 * Locking: called under "dev->device_lock" lock
382 *
383 * returns 0 on success, <0 on failure.
384 */
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200385int mei_cl_disconnect(struct mei_cl *cl)
Tomas Winkler9ca90502013-01-08 23:07:13 +0200386{
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200387 struct mei_device *dev;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200388 struct mei_cl_cb *cb;
389 int rets, err;
390
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200391 if (WARN_ON(!cl || !cl->dev))
Tomas Winkler9ca90502013-01-08 23:07:13 +0200392 return -ENODEV;
393
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200394 dev = cl->dev;
395
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300396 cl_dbg(dev, cl, "disconnecting");
397
Tomas Winkler9ca90502013-01-08 23:07:13 +0200398 if (cl->state != MEI_FILE_DISCONNECTING)
399 return 0;
400
401 cb = mei_io_cb_init(cl, NULL);
402 if (!cb)
403 return -ENOMEM;
404
405 cb->fop_type = MEI_FOP_CLOSE;
Tomas Winkler330dd7d2013-02-06 14:06:43 +0200406 if (dev->hbuf_is_ready) {
407 dev->hbuf_is_ready = false;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200408 if (mei_hbm_cl_disconnect_req(dev, cl)) {
409 rets = -ENODEV;
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300410 cl_err(dev, cl, "failed to disconnect.\n");
Tomas Winkler9ca90502013-01-08 23:07:13 +0200411 goto free;
412 }
413 mdelay(10); /* Wait for hardware disconnection ready */
414 list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
415 } else {
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300416 cl_dbg(dev, cl, "add disconnect cb to control write list\n");
Tomas Winkler9ca90502013-01-08 23:07:13 +0200417 list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
418
419 }
420 mutex_unlock(&dev->device_lock);
421
422 err = wait_event_timeout(dev->wait_recvd_msg,
423 MEI_FILE_DISCONNECTED == cl->state,
424 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
425
426 mutex_lock(&dev->device_lock);
427 if (MEI_FILE_DISCONNECTED == cl->state) {
428 rets = 0;
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300429 cl_dbg(dev, cl, "successfully disconnected from FW client.\n");
Tomas Winkler9ca90502013-01-08 23:07:13 +0200430 } else {
431 rets = -ENODEV;
432 if (MEI_FILE_DISCONNECTED != cl->state)
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300433 cl_err(dev, cl, "wrong status client disconnect.\n");
Tomas Winkler9ca90502013-01-08 23:07:13 +0200434
435 if (err)
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300436 cl_dbg(dev, cl, "wait failed disconnect err=%08x\n",
Tomas Winkler9ca90502013-01-08 23:07:13 +0200437 err);
438
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300439 cl_err(dev, cl, "failed to disconnect from FW client.\n");
Tomas Winkler9ca90502013-01-08 23:07:13 +0200440 }
441
442 mei_io_list_flush(&dev->ctrl_rd_list, cl);
443 mei_io_list_flush(&dev->ctrl_wr_list, cl);
444free:
445 mei_io_cb_free(cb);
446 return rets;
447}
448
449
450/**
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200451 * mei_cl_is_other_connecting - checks if other
452 * client with the same me client id is connecting
Tomas Winkler9ca90502013-01-08 23:07:13 +0200453 *
Tomas Winkler9ca90502013-01-08 23:07:13 +0200454 * @cl: private data of the file object
455 *
Alexander Usyskin83ce0742014-01-08 22:31:46 +0200456 * returns true if other client is connected, false - otherwise.
Tomas Winkler9ca90502013-01-08 23:07:13 +0200457 */
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200458bool mei_cl_is_other_connecting(struct mei_cl *cl)
Tomas Winkler9ca90502013-01-08 23:07:13 +0200459{
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200460 struct mei_device *dev;
461 struct mei_cl *pos;
462 struct mei_cl *next;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200463
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200464 if (WARN_ON(!cl || !cl->dev))
465 return false;
466
467 dev = cl->dev;
468
469 list_for_each_entry_safe(pos, next, &dev->file_list, link) {
470 if ((pos->state == MEI_FILE_CONNECTING) &&
471 (pos != cl) && cl->me_client_id == pos->me_client_id)
472 return true;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200473
474 }
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200475
476 return false;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200477}
478
479/**
Alexander Usyskin83ce0742014-01-08 22:31:46 +0200480 * mei_cl_connect - connect host client to the me one
Tomas Winkler9f81abda2013-01-08 23:07:15 +0200481 *
482 * @cl: host client
483 *
484 * Locking: called under "dev->device_lock" lock
485 *
486 * returns 0 on success, <0 on failure.
487 */
488int mei_cl_connect(struct mei_cl *cl, struct file *file)
489{
490 struct mei_device *dev;
491 struct mei_cl_cb *cb;
Tomas Winkler9f81abda2013-01-08 23:07:15 +0200492 int rets;
493
494 if (WARN_ON(!cl || !cl->dev))
495 return -ENODEV;
496
497 dev = cl->dev;
498
499 cb = mei_io_cb_init(cl, file);
500 if (!cb) {
501 rets = -ENOMEM;
502 goto out;
503 }
504
Tomas Winkler02a7eec2014-02-12 21:41:51 +0200505 cb->fop_type = MEI_FOP_CONNECT;
Tomas Winkler9f81abda2013-01-08 23:07:15 +0200506
Tomas Winkler330dd7d2013-02-06 14:06:43 +0200507 if (dev->hbuf_is_ready && !mei_cl_is_other_connecting(cl)) {
508 dev->hbuf_is_ready = false;
Tomas Winkler9f81abda2013-01-08 23:07:15 +0200509
510 if (mei_hbm_cl_connect_req(dev, cl)) {
511 rets = -ENODEV;
512 goto out;
513 }
514 cl->timer_count = MEI_CONNECT_TIMEOUT;
515 list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
516 } else {
517 list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
518 }
519
520 mutex_unlock(&dev->device_lock);
Alexander Usyskin285e2992014-02-17 15:13:20 +0200521 wait_event_timeout(dev->wait_recvd_msg,
522 (cl->state == MEI_FILE_CONNECTED ||
523 cl->state == MEI_FILE_DISCONNECTED),
524 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
Tomas Winkler9f81abda2013-01-08 23:07:15 +0200525 mutex_lock(&dev->device_lock);
526
527 if (cl->state != MEI_FILE_CONNECTED) {
Alexander Usyskin285e2992014-02-17 15:13:20 +0200528 /* something went really wrong */
529 if (!cl->status)
530 cl->status = -EFAULT;
Tomas Winkler9f81abda2013-01-08 23:07:15 +0200531
532 mei_io_list_flush(&dev->ctrl_rd_list, cl);
533 mei_io_list_flush(&dev->ctrl_wr_list, cl);
Tomas Winkler9f81abda2013-01-08 23:07:15 +0200534 }
535
536 rets = cl->status;
537
538out:
539 mei_io_cb_free(cb);
540 return rets;
541}
542
543/**
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200544 * mei_cl_flow_ctrl_creds - checks flow_control credits for cl.
Tomas Winkler9ca90502013-01-08 23:07:13 +0200545 *
Tomas Winkler9ca90502013-01-08 23:07:13 +0200546 * @cl: private data of the file object
547 *
548 * returns 1 if mei_flow_ctrl_creds >0, 0 - otherwise.
549 * -ENOENT if mei_cl is not present
550 * -EINVAL if single_recv_buf == 0
551 */
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200552int mei_cl_flow_ctrl_creds(struct mei_cl *cl)
Tomas Winkler9ca90502013-01-08 23:07:13 +0200553{
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200554 struct mei_device *dev;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200555 int i;
556
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200557 if (WARN_ON(!cl || !cl->dev))
558 return -EINVAL;
559
560 dev = cl->dev;
561
Tomas Winkler9ca90502013-01-08 23:07:13 +0200562 if (!dev->me_clients_num)
563 return 0;
564
565 if (cl->mei_flow_ctrl_creds > 0)
566 return 1;
567
568 for (i = 0; i < dev->me_clients_num; i++) {
569 struct mei_me_client *me_cl = &dev->me_clients[i];
570 if (me_cl->client_id == cl->me_client_id) {
571 if (me_cl->mei_flow_ctrl_creds) {
572 if (WARN_ON(me_cl->props.single_recv_buf == 0))
573 return -EINVAL;
574 return 1;
575 } else {
576 return 0;
577 }
578 }
579 }
580 return -ENOENT;
581}
582
583/**
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200584 * mei_cl_flow_ctrl_reduce - reduces flow_control.
Tomas Winkler9ca90502013-01-08 23:07:13 +0200585 *
Tomas Winkler9ca90502013-01-08 23:07:13 +0200586 * @cl: private data of the file object
Masanari Iida393b1482013-04-05 01:05:05 +0900587 *
Tomas Winkler9ca90502013-01-08 23:07:13 +0200588 * @returns
589 * 0 on success
590 * -ENOENT when me client is not found
591 * -EINVAL when ctrl credits are <= 0
592 */
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200593int mei_cl_flow_ctrl_reduce(struct mei_cl *cl)
Tomas Winkler9ca90502013-01-08 23:07:13 +0200594{
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200595 struct mei_device *dev;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200596 int i;
597
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200598 if (WARN_ON(!cl || !cl->dev))
599 return -EINVAL;
600
601 dev = cl->dev;
602
Tomas Winkler9ca90502013-01-08 23:07:13 +0200603 if (!dev->me_clients_num)
604 return -ENOENT;
605
606 for (i = 0; i < dev->me_clients_num; i++) {
607 struct mei_me_client *me_cl = &dev->me_clients[i];
608 if (me_cl->client_id == cl->me_client_id) {
609 if (me_cl->props.single_recv_buf != 0) {
610 if (WARN_ON(me_cl->mei_flow_ctrl_creds <= 0))
611 return -EINVAL;
612 dev->me_clients[i].mei_flow_ctrl_creds--;
613 } else {
614 if (WARN_ON(cl->mei_flow_ctrl_creds <= 0))
615 return -EINVAL;
616 cl->mei_flow_ctrl_creds--;
617 }
618 return 0;
619 }
620 }
621 return -ENOENT;
622}
623
Tomas Winkler9ca90502013-01-08 23:07:13 +0200624/**
Masanari Iida393b1482013-04-05 01:05:05 +0900625 * mei_cl_read_start - the start read client message function.
Tomas Winkler9ca90502013-01-08 23:07:13 +0200626 *
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200627 * @cl: host client
Tomas Winkler9ca90502013-01-08 23:07:13 +0200628 *
629 * returns 0 on success, <0 on failure.
630 */
Tomas Winklerfcb136e2013-04-19 22:01:35 +0300631int mei_cl_read_start(struct mei_cl *cl, size_t length)
Tomas Winkler9ca90502013-01-08 23:07:13 +0200632{
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200633 struct mei_device *dev;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200634 struct mei_cl_cb *cb;
635 int rets;
636 int i;
637
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200638 if (WARN_ON(!cl || !cl->dev))
639 return -ENODEV;
640
641 dev = cl->dev;
642
Tomas Winklerb950ac12013-07-25 20:15:53 +0300643 if (!mei_cl_is_connected(cl))
Tomas Winkler9ca90502013-01-08 23:07:13 +0200644 return -ENODEV;
645
Tomas Winklerd91aaed2013-01-08 23:07:18 +0200646 if (cl->read_cb) {
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300647 cl_dbg(dev, cl, "read is pending.\n");
Tomas Winkler9ca90502013-01-08 23:07:13 +0200648 return -EBUSY;
649 }
650 i = mei_me_cl_by_id(dev, cl->me_client_id);
651 if (i < 0) {
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300652 cl_err(dev, cl, "no such me client %d\n", cl->me_client_id);
Tomas Winkler9ca90502013-01-08 23:07:13 +0200653 return -ENODEV;
654 }
655
656 cb = mei_io_cb_init(cl, NULL);
657 if (!cb)
658 return -ENOMEM;
659
Tomas Winklerfcb136e2013-04-19 22:01:35 +0300660 /* always allocate at least client max message */
661 length = max_t(size_t, length, dev->me_clients[i].props.max_msg_length);
662 rets = mei_io_cb_alloc_resp_buf(cb, length);
Tomas Winkler9ca90502013-01-08 23:07:13 +0200663 if (rets)
664 goto err;
665
666 cb->fop_type = MEI_FOP_READ;
667 cl->read_cb = cb;
Tomas Winkler330dd7d2013-02-06 14:06:43 +0200668 if (dev->hbuf_is_ready) {
669 dev->hbuf_is_ready = false;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200670 if (mei_hbm_cl_flow_control_req(dev, cl)) {
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300671 cl_err(dev, cl, "flow control send failed\n");
Tomas Winkler9ca90502013-01-08 23:07:13 +0200672 rets = -ENODEV;
673 goto err;
674 }
675 list_add_tail(&cb->list, &dev->read_list.list);
676 } else {
677 list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
678 }
679 return rets;
680err:
681 mei_io_cb_free(cb);
682 return rets;
683}
684
Tomas Winkler074b4c02013-02-06 14:06:44 +0200685/**
Tomas Winkler21767542013-06-23 09:36:59 +0300686 * mei_cl_irq_write_complete - write a message to device
687 * from the interrupt thread context
688 *
689 * @cl: client
690 * @cb: callback block.
691 * @slots: free slots.
692 * @cmpl_list: complete list.
693 *
694 * returns 0, OK; otherwise error.
695 */
696int mei_cl_irq_write_complete(struct mei_cl *cl, struct mei_cl_cb *cb,
697 s32 *slots, struct mei_cl_cb *cmpl_list)
698{
Tomas Winkler136698e2013-09-16 23:44:44 +0300699 struct mei_device *dev;
700 struct mei_msg_data *buf;
Tomas Winkler21767542013-06-23 09:36:59 +0300701 struct mei_msg_hdr mei_hdr;
Tomas Winkler136698e2013-09-16 23:44:44 +0300702 size_t len;
703 u32 msg_slots;
Tomas Winkler2ebf8c92013-09-16 23:44:43 +0300704 int rets;
Tomas Winkler21767542013-06-23 09:36:59 +0300705
Tomas Winkler136698e2013-09-16 23:44:44 +0300706
707 if (WARN_ON(!cl || !cl->dev))
708 return -ENODEV;
709
710 dev = cl->dev;
711
712 buf = &cb->request_buffer;
713
714 rets = mei_cl_flow_ctrl_creds(cl);
715 if (rets < 0)
716 return rets;
717
718 if (rets == 0) {
719 cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
720 return 0;
721 }
722
723 len = buf->size - cb->buf_idx;
724 msg_slots = mei_data2slots(len);
725
Tomas Winkler21767542013-06-23 09:36:59 +0300726 mei_hdr.host_addr = cl->host_client_id;
727 mei_hdr.me_addr = cl->me_client_id;
728 mei_hdr.reserved = 0;
Tomas Winkler479327f2013-12-17 15:56:56 +0200729 mei_hdr.internal = cb->internal;
Tomas Winkler21767542013-06-23 09:36:59 +0300730
731 if (*slots >= msg_slots) {
732 mei_hdr.length = len;
733 mei_hdr.msg_complete = 1;
734 /* Split the message only if we can write the whole host buffer */
735 } else if (*slots == dev->hbuf_depth) {
736 msg_slots = *slots;
737 len = (*slots * sizeof(u32)) - sizeof(struct mei_msg_hdr);
738 mei_hdr.length = len;
739 mei_hdr.msg_complete = 0;
740 } else {
741 /* wait for next time the host buffer is empty */
742 return 0;
743 }
744
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300745 cl_dbg(dev, cl, "buf: size = %d idx = %lu\n",
Tomas Winkler21767542013-06-23 09:36:59 +0300746 cb->request_buffer.size, cb->buf_idx);
Tomas Winkler21767542013-06-23 09:36:59 +0300747
748 *slots -= msg_slots;
Tomas Winkler136698e2013-09-16 23:44:44 +0300749 rets = mei_write_message(dev, &mei_hdr, buf->data + cb->buf_idx);
Tomas Winkler2ebf8c92013-09-16 23:44:43 +0300750 if (rets) {
751 cl->status = rets;
Tomas Winkler21767542013-06-23 09:36:59 +0300752 list_move_tail(&cb->list, &cmpl_list->list);
Tomas Winkler2ebf8c92013-09-16 23:44:43 +0300753 return rets;
Tomas Winkler21767542013-06-23 09:36:59 +0300754 }
755
756 cl->status = 0;
Tomas Winkler4dfaa9f2013-06-23 09:37:00 +0300757 cl->writing_state = MEI_WRITING;
Tomas Winkler21767542013-06-23 09:36:59 +0300758 cb->buf_idx += mei_hdr.length;
Tomas Winkler4dfaa9f2013-06-23 09:37:00 +0300759
Tomas Winkler21767542013-06-23 09:36:59 +0300760 if (mei_hdr.msg_complete) {
761 if (mei_cl_flow_ctrl_reduce(cl))
Tomas Winkler2ebf8c92013-09-16 23:44:43 +0300762 return -EIO;
Tomas Winkler21767542013-06-23 09:36:59 +0300763 list_move_tail(&cb->list, &dev->write_waiting_list.list);
764 }
765
766 return 0;
767}
768
769/**
Tomas Winkler4234a6d2013-04-08 21:56:37 +0300770 * mei_cl_write - submit a write cb to mei device
771 assumes device_lock is locked
772 *
773 * @cl: host client
774 * @cl: write callback with filled data
775 *
Alexander Usyskin83ce0742014-01-08 22:31:46 +0200776 * returns number of bytes sent on success, <0 on failure.
Tomas Winkler4234a6d2013-04-08 21:56:37 +0300777 */
778int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)
779{
780 struct mei_device *dev;
781 struct mei_msg_data *buf;
782 struct mei_msg_hdr mei_hdr;
783 int rets;
784
785
786 if (WARN_ON(!cl || !cl->dev))
787 return -ENODEV;
788
789 if (WARN_ON(!cb))
790 return -EINVAL;
791
792 dev = cl->dev;
793
794
795 buf = &cb->request_buffer;
796
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300797 cl_dbg(dev, cl, "mei_cl_write %d\n", buf->size);
Tomas Winkler4234a6d2013-04-08 21:56:37 +0300798
799
800 cb->fop_type = MEI_FOP_WRITE;
801
802 rets = mei_cl_flow_ctrl_creds(cl);
803 if (rets < 0)
804 goto err;
805
806 /* Host buffer is not ready, we queue the request */
807 if (rets == 0 || !dev->hbuf_is_ready) {
808 cb->buf_idx = 0;
809 /* unseting complete will enqueue the cb for write */
810 mei_hdr.msg_complete = 0;
Tomas Winkler4234a6d2013-04-08 21:56:37 +0300811 rets = buf->size;
812 goto out;
813 }
814
815 dev->hbuf_is_ready = false;
816
817 /* Check for a maximum length */
818 if (buf->size > mei_hbuf_max_len(dev)) {
819 mei_hdr.length = mei_hbuf_max_len(dev);
820 mei_hdr.msg_complete = 0;
821 } else {
822 mei_hdr.length = buf->size;
823 mei_hdr.msg_complete = 1;
824 }
825
826 mei_hdr.host_addr = cl->host_client_id;
827 mei_hdr.me_addr = cl->me_client_id;
828 mei_hdr.reserved = 0;
Tomas Winkler479327f2013-12-17 15:56:56 +0200829 mei_hdr.internal = cb->internal;
Tomas Winkler4234a6d2013-04-08 21:56:37 +0300830
Tomas Winkler4234a6d2013-04-08 21:56:37 +0300831
Tomas Winkler2ebf8c92013-09-16 23:44:43 +0300832 rets = mei_write_message(dev, &mei_hdr, buf->data);
833 if (rets)
Tomas Winkler4234a6d2013-04-08 21:56:37 +0300834 goto err;
Tomas Winkler4234a6d2013-04-08 21:56:37 +0300835
836 cl->writing_state = MEI_WRITING;
837 cb->buf_idx = mei_hdr.length;
838
839 rets = buf->size;
840out:
841 if (mei_hdr.msg_complete) {
842 if (mei_cl_flow_ctrl_reduce(cl)) {
843 rets = -ENODEV;
844 goto err;
845 }
846 list_add_tail(&cb->list, &dev->write_waiting_list.list);
847 } else {
848 list_add_tail(&cb->list, &dev->write_list.list);
849 }
850
851
852 if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) {
853
854 mutex_unlock(&dev->device_lock);
855 if (wait_event_interruptible(cl->tx_wait,
856 cl->writing_state == MEI_WRITE_COMPLETE)) {
857 if (signal_pending(current))
858 rets = -EINTR;
859 else
860 rets = -ERESTARTSYS;
861 }
862 mutex_lock(&dev->device_lock);
863 }
864err:
865 return rets;
866}
867
868
Tomas Winklerdb086fa2013-05-12 15:34:45 +0300869/**
870 * mei_cl_complete - processes completed operation for a client
871 *
872 * @cl: private data of the file object.
873 * @cb: callback block.
874 */
875void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
876{
877 if (cb->fop_type == MEI_FOP_WRITE) {
878 mei_io_cb_free(cb);
879 cb = NULL;
880 cl->writing_state = MEI_WRITE_COMPLETE;
881 if (waitqueue_active(&cl->tx_wait))
882 wake_up_interruptible(&cl->tx_wait);
883
884 } else if (cb->fop_type == MEI_FOP_READ &&
885 MEI_READING == cl->reading_state) {
886 cl->reading_state = MEI_READ_COMPLETE;
887 if (waitqueue_active(&cl->rx_wait))
888 wake_up_interruptible(&cl->rx_wait);
889 else
890 mei_cl_bus_rx_event(cl);
891
892 }
893}
894
Tomas Winkler4234a6d2013-04-08 21:56:37 +0300895
896/**
Tomas Winkler074b4c02013-02-06 14:06:44 +0200897 * mei_cl_all_disconnect - disconnect forcefully all connected clients
898 *
899 * @dev - mei device
900 */
901
902void mei_cl_all_disconnect(struct mei_device *dev)
903{
904 struct mei_cl *cl, *next;
905
906 list_for_each_entry_safe(cl, next, &dev->file_list, link) {
907 cl->state = MEI_FILE_DISCONNECTED;
908 cl->mei_flow_ctrl_creds = 0;
Tomas Winkler074b4c02013-02-06 14:06:44 +0200909 cl->timer_count = 0;
910 }
911}
912
913
914/**
Tomas Winkler52908012013-07-24 16:22:57 +0300915 * mei_cl_all_wakeup - wake up all readers and writers they can be interrupted
Tomas Winkler074b4c02013-02-06 14:06:44 +0200916 *
917 * @dev - mei device
918 */
Tomas Winkler52908012013-07-24 16:22:57 +0300919void mei_cl_all_wakeup(struct mei_device *dev)
Tomas Winkler074b4c02013-02-06 14:06:44 +0200920{
921 struct mei_cl *cl, *next;
922 list_for_each_entry_safe(cl, next, &dev->file_list, link) {
923 if (waitqueue_active(&cl->rx_wait)) {
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300924 cl_dbg(dev, cl, "Waking up reading client!\n");
Tomas Winkler074b4c02013-02-06 14:06:44 +0200925 wake_up_interruptible(&cl->rx_wait);
926 }
Tomas Winkler52908012013-07-24 16:22:57 +0300927 if (waitqueue_active(&cl->tx_wait)) {
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300928 cl_dbg(dev, cl, "Waking up writing client!\n");
Tomas Winkler52908012013-07-24 16:22:57 +0300929 wake_up_interruptible(&cl->tx_wait);
930 }
Tomas Winkler074b4c02013-02-06 14:06:44 +0200931 }
932}
933
934/**
935 * mei_cl_all_write_clear - clear all pending writes
936
937 * @dev - mei device
938 */
939void mei_cl_all_write_clear(struct mei_device *dev)
940{
941 struct mei_cl_cb *cb, *next;
Alexander Usyskin30c54df2014-01-27 22:27:23 +0200942 struct list_head *list;
Tomas Winkler074b4c02013-02-06 14:06:44 +0200943
Alexander Usyskin30c54df2014-01-27 22:27:23 +0200944 list = &dev->write_list.list;
945 list_for_each_entry_safe(cb, next, list, list) {
946 list_del(&cb->list);
947 mei_io_cb_free(cb);
948 }
949
950 list = &dev->write_waiting_list.list;
951 list_for_each_entry_safe(cb, next, list, list) {
Tomas Winkler074b4c02013-02-06 14:06:44 +0200952 list_del(&cb->list);
953 mei_io_cb_free(cb);
954 }
955}
956
957