blob: 2b0f99955ba6a71babc3d6e2c27df09be661d729 [file] [log] [blame]
Tomas Winkler9ca90502013-01-08 23:07:13 +02001/*
2 *
3 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2003-2012, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 */
16
17#include <linux/pci.h>
18#include <linux/sched.h>
19#include <linux/wait.h>
20#include <linux/delay.h>
21
22#include <linux/mei.h>
23
24#include "mei_dev.h"
25#include "hbm.h"
Tomas Winkler90e0b5f12013-01-08 23:07:14 +020026#include "client.h"
27
28/**
29 * mei_me_cl_by_uuid - locate index of me client
30 *
31 * @dev: mei device
Alexander Usyskina27a76d2014-02-17 15:13:22 +020032 *
33 * Locking: called under "dev->device_lock" lock
34 *
Tomas Winkler90e0b5f12013-01-08 23:07:14 +020035 * returns me client index or -ENOENT if not found
36 */
37int mei_me_cl_by_uuid(const struct mei_device *dev, const uuid_le *uuid)
38{
Alexander Usyskina27a76d2014-02-17 15:13:22 +020039 int i;
Tomas Winkler90e0b5f12013-01-08 23:07:14 +020040
41 for (i = 0; i < dev->me_clients_num; ++i)
42 if (uuid_le_cmp(*uuid,
Alexander Usyskina27a76d2014-02-17 15:13:22 +020043 dev->me_clients[i].props.protocol_name) == 0)
44 return i;
Tomas Winkler90e0b5f12013-01-08 23:07:14 +020045
Alexander Usyskina27a76d2014-02-17 15:13:22 +020046 return -ENOENT;
Tomas Winkler90e0b5f12013-01-08 23:07:14 +020047}
48
49
50/**
51 * mei_me_cl_by_id return index to me_clients for client_id
52 *
53 * @dev: the device structure
54 * @client_id: me client id
55 *
56 * Locking: called under "dev->device_lock" lock
57 *
58 * returns index on success, -ENOENT on failure.
59 */
60
61int mei_me_cl_by_id(struct mei_device *dev, u8 client_id)
62{
63 int i;
Alexander Usyskina27a76d2014-02-17 15:13:22 +020064
Tomas Winkler90e0b5f12013-01-08 23:07:14 +020065 for (i = 0; i < dev->me_clients_num; i++)
66 if (dev->me_clients[i].client_id == client_id)
Alexander Usyskina27a76d2014-02-17 15:13:22 +020067 return i;
Tomas Winkler90e0b5f12013-01-08 23:07:14 +020068
Alexander Usyskina27a76d2014-02-17 15:13:22 +020069 return -ENOENT;
Tomas Winkler90e0b5f12013-01-08 23:07:14 +020070}
Tomas Winkler9ca90502013-01-08 23:07:13 +020071
72
73/**
74 * mei_io_list_flush - removes list entry belonging to cl.
75 *
76 * @list: An instance of our list structure
77 * @cl: host client
78 */
79void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl)
80{
81 struct mei_cl_cb *cb;
82 struct mei_cl_cb *next;
83
84 list_for_each_entry_safe(cb, next, &list->list, list) {
85 if (cb->cl && mei_cl_cmp_id(cl, cb->cl))
86 list_del(&cb->list);
87 }
88}
89
90/**
91 * mei_io_cb_free - free mei_cb_private related memory
92 *
93 * @cb: mei callback struct
94 */
95void mei_io_cb_free(struct mei_cl_cb *cb)
96{
97 if (cb == NULL)
98 return;
99
100 kfree(cb->request_buffer.data);
101 kfree(cb->response_buffer.data);
102 kfree(cb);
103}
104
105/**
106 * mei_io_cb_init - allocate and initialize io callback
107 *
108 * @cl - mei client
Masanari Iida393b1482013-04-05 01:05:05 +0900109 * @fp: pointer to file structure
Tomas Winkler9ca90502013-01-08 23:07:13 +0200110 *
111 * returns mei_cl_cb pointer or NULL;
112 */
113struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, struct file *fp)
114{
115 struct mei_cl_cb *cb;
116
117 cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL);
118 if (!cb)
119 return NULL;
120
121 mei_io_list_init(cb);
122
123 cb->file_object = fp;
124 cb->cl = cl;
125 cb->buf_idx = 0;
126 return cb;
127}
128
129/**
130 * mei_io_cb_alloc_req_buf - allocate request buffer
131 *
Masanari Iida393b1482013-04-05 01:05:05 +0900132 * @cb: io callback structure
133 * @length: size of the buffer
Tomas Winkler9ca90502013-01-08 23:07:13 +0200134 *
135 * returns 0 on success
136 * -EINVAL if cb is NULL
137 * -ENOMEM if allocation failed
138 */
139int mei_io_cb_alloc_req_buf(struct mei_cl_cb *cb, size_t length)
140{
141 if (!cb)
142 return -EINVAL;
143
144 if (length == 0)
145 return 0;
146
147 cb->request_buffer.data = kmalloc(length, GFP_KERNEL);
148 if (!cb->request_buffer.data)
149 return -ENOMEM;
150 cb->request_buffer.size = length;
151 return 0;
152}
153/**
Alexander Usyskin83ce0742014-01-08 22:31:46 +0200154 * mei_io_cb_alloc_resp_buf - allocate response buffer
Tomas Winkler9ca90502013-01-08 23:07:13 +0200155 *
Masanari Iida393b1482013-04-05 01:05:05 +0900156 * @cb: io callback structure
157 * @length: size of the buffer
Tomas Winkler9ca90502013-01-08 23:07:13 +0200158 *
159 * returns 0 on success
160 * -EINVAL if cb is NULL
161 * -ENOMEM if allocation failed
162 */
163int mei_io_cb_alloc_resp_buf(struct mei_cl_cb *cb, size_t length)
164{
165 if (!cb)
166 return -EINVAL;
167
168 if (length == 0)
169 return 0;
170
171 cb->response_buffer.data = kmalloc(length, GFP_KERNEL);
172 if (!cb->response_buffer.data)
173 return -ENOMEM;
174 cb->response_buffer.size = length;
175 return 0;
176}
177
178
179
180/**
181 * mei_cl_flush_queues - flushes queue lists belonging to cl.
182 *
Tomas Winkler9ca90502013-01-08 23:07:13 +0200183 * @cl: host client
184 */
185int mei_cl_flush_queues(struct mei_cl *cl)
186{
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300187 struct mei_device *dev;
188
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200189 if (WARN_ON(!cl || !cl->dev))
Tomas Winkler9ca90502013-01-08 23:07:13 +0200190 return -EINVAL;
191
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300192 dev = cl->dev;
193
194 cl_dbg(dev, cl, "remove list entry belonging to cl\n");
Tomas Winkler9ca90502013-01-08 23:07:13 +0200195 mei_io_list_flush(&cl->dev->read_list, cl);
196 mei_io_list_flush(&cl->dev->write_list, cl);
197 mei_io_list_flush(&cl->dev->write_waiting_list, cl);
198 mei_io_list_flush(&cl->dev->ctrl_wr_list, cl);
199 mei_io_list_flush(&cl->dev->ctrl_rd_list, cl);
200 mei_io_list_flush(&cl->dev->amthif_cmd_list, cl);
201 mei_io_list_flush(&cl->dev->amthif_rd_complete_list, cl);
202 return 0;
203}
204
Tomas Winkler9ca90502013-01-08 23:07:13 +0200205
206/**
Alexander Usyskin83ce0742014-01-08 22:31:46 +0200207 * mei_cl_init - initializes cl.
Tomas Winkler9ca90502013-01-08 23:07:13 +0200208 *
209 * @cl: host client to be initialized
210 * @dev: mei device
211 */
212void mei_cl_init(struct mei_cl *cl, struct mei_device *dev)
213{
214 memset(cl, 0, sizeof(struct mei_cl));
215 init_waitqueue_head(&cl->wait);
216 init_waitqueue_head(&cl->rx_wait);
217 init_waitqueue_head(&cl->tx_wait);
218 INIT_LIST_HEAD(&cl->link);
Samuel Ortiza7b71bc2013-03-27 17:29:56 +0200219 INIT_LIST_HEAD(&cl->device_link);
Tomas Winkler9ca90502013-01-08 23:07:13 +0200220 cl->reading_state = MEI_IDLE;
221 cl->writing_state = MEI_IDLE;
222 cl->dev = dev;
223}
224
225/**
226 * mei_cl_allocate - allocates cl structure and sets it up.
227 *
228 * @dev: mei device
229 * returns The allocated file or NULL on failure
230 */
231struct mei_cl *mei_cl_allocate(struct mei_device *dev)
232{
233 struct mei_cl *cl;
234
235 cl = kmalloc(sizeof(struct mei_cl), GFP_KERNEL);
236 if (!cl)
237 return NULL;
238
239 mei_cl_init(cl, dev);
240
241 return cl;
242}
243
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200244/**
245 * mei_cl_find_read_cb - find this cl's callback in the read list
246 *
Masanari Iida393b1482013-04-05 01:05:05 +0900247 * @cl: host client
248 *
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200249 * returns cb on success, NULL on error
250 */
251struct mei_cl_cb *mei_cl_find_read_cb(struct mei_cl *cl)
252{
253 struct mei_device *dev = cl->dev;
Tomas Winkler31f88f52014-02-17 15:13:25 +0200254 struct mei_cl_cb *cb;
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200255
Tomas Winkler31f88f52014-02-17 15:13:25 +0200256 list_for_each_entry(cb, &dev->read_list.list, list)
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200257 if (mei_cl_cmp_id(cl, cb->cl))
258 return cb;
259 return NULL;
260}
261
Alexander Usyskin83ce0742014-01-08 22:31:46 +0200262/** mei_cl_link: allocate host id in the host map
Tomas Winkler9ca90502013-01-08 23:07:13 +0200263 *
Tomas Winkler781d0d82013-01-08 23:07:22 +0200264 * @cl - host client
Alexander Usyskin83ce0742014-01-08 22:31:46 +0200265 * @id - fixed host id or -1 for generic one
Masanari Iida393b1482013-04-05 01:05:05 +0900266 *
Tomas Winkler781d0d82013-01-08 23:07:22 +0200267 * returns 0 on success
Tomas Winkler9ca90502013-01-08 23:07:13 +0200268 * -EINVAL on incorrect values
269 * -ENONET if client not found
270 */
Tomas Winkler781d0d82013-01-08 23:07:22 +0200271int mei_cl_link(struct mei_cl *cl, int id)
Tomas Winkler9ca90502013-01-08 23:07:13 +0200272{
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200273 struct mei_device *dev;
Tomas Winkler22f96a02013-09-16 23:44:47 +0300274 long open_handle_count;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200275
Tomas Winkler781d0d82013-01-08 23:07:22 +0200276 if (WARN_ON(!cl || !cl->dev))
Tomas Winkler9ca90502013-01-08 23:07:13 +0200277 return -EINVAL;
278
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200279 dev = cl->dev;
280
Alexander Usyskin83ce0742014-01-08 22:31:46 +0200281 /* If Id is not assigned get one*/
Tomas Winkler781d0d82013-01-08 23:07:22 +0200282 if (id == MEI_HOST_CLIENT_ID_ANY)
283 id = find_first_zero_bit(dev->host_clients_map,
284 MEI_CLIENTS_MAX);
Tomas Winkler9ca90502013-01-08 23:07:13 +0200285
Tomas Winkler781d0d82013-01-08 23:07:22 +0200286 if (id >= MEI_CLIENTS_MAX) {
Alexander Usyskin83ce0742014-01-08 22:31:46 +0200287 dev_err(&dev->pdev->dev, "id exceeded %d", MEI_CLIENTS_MAX);
Tomas Winklere036cc52013-09-16 23:44:46 +0300288 return -EMFILE;
289 }
290
Tomas Winkler22f96a02013-09-16 23:44:47 +0300291 open_handle_count = dev->open_handle_count + dev->iamthif_open_count;
292 if (open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) {
Alexander Usyskin83ce0742014-01-08 22:31:46 +0200293 dev_err(&dev->pdev->dev, "open_handle_count exceeded %d",
Tomas Winklere036cc52013-09-16 23:44:46 +0300294 MEI_MAX_OPEN_HANDLE_COUNT);
295 return -EMFILE;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200296 }
297
Tomas Winkler781d0d82013-01-08 23:07:22 +0200298 dev->open_handle_count++;
299
300 cl->host_client_id = id;
301 list_add_tail(&cl->link, &dev->file_list);
302
303 set_bit(id, dev->host_clients_map);
304
305 cl->state = MEI_FILE_INITIALIZING;
306
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300307 cl_dbg(dev, cl, "link cl\n");
Tomas Winkler781d0d82013-01-08 23:07:22 +0200308 return 0;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200309}
Tomas Winkler781d0d82013-01-08 23:07:22 +0200310
Tomas Winkler9ca90502013-01-08 23:07:13 +0200311/**
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200312 * mei_cl_unlink - remove me_cl from the list
Tomas Winkler9ca90502013-01-08 23:07:13 +0200313 *
Masanari Iida393b1482013-04-05 01:05:05 +0900314 * @cl: host client
Tomas Winkler9ca90502013-01-08 23:07:13 +0200315 */
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200316int mei_cl_unlink(struct mei_cl *cl)
Tomas Winkler9ca90502013-01-08 23:07:13 +0200317{
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200318 struct mei_device *dev;
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200319
Tomas Winkler781d0d82013-01-08 23:07:22 +0200320 /* don't shout on error exit path */
321 if (!cl)
322 return 0;
323
Tomas Winkler8e9a4a92013-01-10 17:32:14 +0200324 /* wd and amthif might not be initialized */
325 if (!cl->dev)
326 return 0;
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200327
328 dev = cl->dev;
329
Tomas Winklera14c44d2013-09-16 23:44:45 +0300330 cl_dbg(dev, cl, "unlink client");
331
Tomas Winkler22f96a02013-09-16 23:44:47 +0300332 if (dev->open_handle_count > 0)
333 dev->open_handle_count--;
334
335 /* never clear the 0 bit */
336 if (cl->host_client_id)
337 clear_bit(cl->host_client_id, dev->host_clients_map);
338
339 list_del_init(&cl->link);
340
341 cl->state = MEI_FILE_INITIALIZING;
342
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200343 return 0;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200344}
345
346
347void mei_host_client_init(struct work_struct *work)
348{
349 struct mei_device *dev = container_of(work,
350 struct mei_device, init_work);
351 struct mei_client_properties *client_props;
352 int i;
353
354 mutex_lock(&dev->device_lock);
355
Tomas Winkler9ca90502013-01-08 23:07:13 +0200356 for (i = 0; i < dev->me_clients_num; i++) {
357 client_props = &dev->me_clients[i].props;
358
Tomas Winkler1a1aca42013-01-08 23:07:21 +0200359 if (!uuid_le_cmp(client_props->protocol_name, mei_amthif_guid))
Tomas Winkler9ca90502013-01-08 23:07:13 +0200360 mei_amthif_host_init(dev);
361 else if (!uuid_le_cmp(client_props->protocol_name, mei_wd_guid))
362 mei_wd_host_init(dev);
Samuel Ortiz59fcd7c2013-04-11 03:03:29 +0200363 else if (!uuid_le_cmp(client_props->protocol_name, mei_nfc_guid))
364 mei_nfc_host_init(dev);
365
Tomas Winkler9ca90502013-01-08 23:07:13 +0200366 }
367
368 dev->dev_state = MEI_DEV_ENABLED;
Tomas Winkler6adb8ef2014-01-12 00:36:10 +0200369 dev->reset_count = 0;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200370
371 mutex_unlock(&dev->device_lock);
372}
373
Tomas Winkler6aae48f2014-02-19 17:35:47 +0200374/**
375 * mei_hbuf_acquire: try to acquire host buffer
376 *
377 * @dev: the device structure
378 * returns true if host buffer was acquired
379 */
380bool mei_hbuf_acquire(struct mei_device *dev)
381{
382 if (!dev->hbuf_is_ready) {
383 dev_dbg(&dev->pdev->dev, "hbuf is not ready\n");
384 return false;
385 }
386
387 dev->hbuf_is_ready = false;
388
389 return true;
390}
Tomas Winkler9ca90502013-01-08 23:07:13 +0200391
392/**
Alexander Usyskin83ce0742014-01-08 22:31:46 +0200393 * mei_cl_disconnect - disconnect host client from the me one
Tomas Winkler9ca90502013-01-08 23:07:13 +0200394 *
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200395 * @cl: host client
Tomas Winkler9ca90502013-01-08 23:07:13 +0200396 *
397 * Locking: called under "dev->device_lock" lock
398 *
399 * returns 0 on success, <0 on failure.
400 */
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200401int mei_cl_disconnect(struct mei_cl *cl)
Tomas Winkler9ca90502013-01-08 23:07:13 +0200402{
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200403 struct mei_device *dev;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200404 struct mei_cl_cb *cb;
405 int rets, err;
406
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200407 if (WARN_ON(!cl || !cl->dev))
Tomas Winkler9ca90502013-01-08 23:07:13 +0200408 return -ENODEV;
409
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200410 dev = cl->dev;
411
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300412 cl_dbg(dev, cl, "disconnecting");
413
Tomas Winkler9ca90502013-01-08 23:07:13 +0200414 if (cl->state != MEI_FILE_DISCONNECTING)
415 return 0;
416
417 cb = mei_io_cb_init(cl, NULL);
418 if (!cb)
419 return -ENOMEM;
420
421 cb->fop_type = MEI_FOP_CLOSE;
Tomas Winkler6aae48f2014-02-19 17:35:47 +0200422 if (mei_hbuf_acquire(dev)) {
Tomas Winkler9ca90502013-01-08 23:07:13 +0200423 if (mei_hbm_cl_disconnect_req(dev, cl)) {
424 rets = -ENODEV;
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300425 cl_err(dev, cl, "failed to disconnect.\n");
Tomas Winkler9ca90502013-01-08 23:07:13 +0200426 goto free;
427 }
428 mdelay(10); /* Wait for hardware disconnection ready */
429 list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
430 } else {
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300431 cl_dbg(dev, cl, "add disconnect cb to control write list\n");
Tomas Winkler9ca90502013-01-08 23:07:13 +0200432 list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
433
434 }
435 mutex_unlock(&dev->device_lock);
436
437 err = wait_event_timeout(dev->wait_recvd_msg,
438 MEI_FILE_DISCONNECTED == cl->state,
439 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
440
441 mutex_lock(&dev->device_lock);
442 if (MEI_FILE_DISCONNECTED == cl->state) {
443 rets = 0;
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300444 cl_dbg(dev, cl, "successfully disconnected from FW client.\n");
Tomas Winkler9ca90502013-01-08 23:07:13 +0200445 } else {
446 rets = -ENODEV;
447 if (MEI_FILE_DISCONNECTED != cl->state)
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300448 cl_err(dev, cl, "wrong status client disconnect.\n");
Tomas Winkler9ca90502013-01-08 23:07:13 +0200449
450 if (err)
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300451 cl_dbg(dev, cl, "wait failed disconnect err=%08x\n",
Tomas Winkler9ca90502013-01-08 23:07:13 +0200452 err);
453
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300454 cl_err(dev, cl, "failed to disconnect from FW client.\n");
Tomas Winkler9ca90502013-01-08 23:07:13 +0200455 }
456
457 mei_io_list_flush(&dev->ctrl_rd_list, cl);
458 mei_io_list_flush(&dev->ctrl_wr_list, cl);
459free:
460 mei_io_cb_free(cb);
461 return rets;
462}
463
464
465/**
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200466 * mei_cl_is_other_connecting - checks if other
467 * client with the same me client id is connecting
Tomas Winkler9ca90502013-01-08 23:07:13 +0200468 *
Tomas Winkler9ca90502013-01-08 23:07:13 +0200469 * @cl: private data of the file object
470 *
Alexander Usyskin83ce0742014-01-08 22:31:46 +0200471 * returns true if other client is connected, false - otherwise.
Tomas Winkler9ca90502013-01-08 23:07:13 +0200472 */
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200473bool mei_cl_is_other_connecting(struct mei_cl *cl)
Tomas Winkler9ca90502013-01-08 23:07:13 +0200474{
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200475 struct mei_device *dev;
Tomas Winkler31f88f52014-02-17 15:13:25 +0200476 struct mei_cl *ocl; /* the other client */
Tomas Winkler9ca90502013-01-08 23:07:13 +0200477
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200478 if (WARN_ON(!cl || !cl->dev))
479 return false;
480
481 dev = cl->dev;
482
Tomas Winkler31f88f52014-02-17 15:13:25 +0200483 list_for_each_entry(ocl, &dev->file_list, link) {
484 if (ocl->state == MEI_FILE_CONNECTING &&
485 ocl != cl &&
486 cl->me_client_id == ocl->me_client_id)
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200487 return true;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200488
489 }
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200490
491 return false;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200492}
493
494/**
Alexander Usyskin83ce0742014-01-08 22:31:46 +0200495 * mei_cl_connect - connect host client to the me one
Tomas Winkler9f81abda2013-01-08 23:07:15 +0200496 *
497 * @cl: host client
498 *
499 * Locking: called under "dev->device_lock" lock
500 *
501 * returns 0 on success, <0 on failure.
502 */
503int mei_cl_connect(struct mei_cl *cl, struct file *file)
504{
505 struct mei_device *dev;
506 struct mei_cl_cb *cb;
Tomas Winkler9f81abda2013-01-08 23:07:15 +0200507 int rets;
508
509 if (WARN_ON(!cl || !cl->dev))
510 return -ENODEV;
511
512 dev = cl->dev;
513
514 cb = mei_io_cb_init(cl, file);
515 if (!cb) {
516 rets = -ENOMEM;
517 goto out;
518 }
519
Tomas Winkler02a7eec2014-02-12 21:41:51 +0200520 cb->fop_type = MEI_FOP_CONNECT;
Tomas Winkler9f81abda2013-01-08 23:07:15 +0200521
Tomas Winkler6aae48f2014-02-19 17:35:47 +0200522 /* run hbuf acquire last so we don't have to undo */
523 if (!mei_cl_is_other_connecting(cl) && mei_hbuf_acquire(dev)) {
Tomas Winkler9f81abda2013-01-08 23:07:15 +0200524 if (mei_hbm_cl_connect_req(dev, cl)) {
525 rets = -ENODEV;
526 goto out;
527 }
528 cl->timer_count = MEI_CONNECT_TIMEOUT;
529 list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
530 } else {
531 list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
532 }
533
534 mutex_unlock(&dev->device_lock);
Alexander Usyskin285e2992014-02-17 15:13:20 +0200535 wait_event_timeout(dev->wait_recvd_msg,
536 (cl->state == MEI_FILE_CONNECTED ||
537 cl->state == MEI_FILE_DISCONNECTED),
538 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
Tomas Winkler9f81abda2013-01-08 23:07:15 +0200539 mutex_lock(&dev->device_lock);
540
541 if (cl->state != MEI_FILE_CONNECTED) {
Alexander Usyskin285e2992014-02-17 15:13:20 +0200542 /* something went really wrong */
543 if (!cl->status)
544 cl->status = -EFAULT;
Tomas Winkler9f81abda2013-01-08 23:07:15 +0200545
546 mei_io_list_flush(&dev->ctrl_rd_list, cl);
547 mei_io_list_flush(&dev->ctrl_wr_list, cl);
Tomas Winkler9f81abda2013-01-08 23:07:15 +0200548 }
549
550 rets = cl->status;
551
552out:
553 mei_io_cb_free(cb);
554 return rets;
555}
556
557/**
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200558 * mei_cl_flow_ctrl_creds - checks flow_control credits for cl.
Tomas Winkler9ca90502013-01-08 23:07:13 +0200559 *
Tomas Winkler9ca90502013-01-08 23:07:13 +0200560 * @cl: private data of the file object
561 *
562 * returns 1 if mei_flow_ctrl_creds >0, 0 - otherwise.
563 * -ENOENT if mei_cl is not present
564 * -EINVAL if single_recv_buf == 0
565 */
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200566int mei_cl_flow_ctrl_creds(struct mei_cl *cl)
Tomas Winkler9ca90502013-01-08 23:07:13 +0200567{
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200568 struct mei_device *dev;
Alexander Usyskin12d00662014-02-17 15:13:23 +0200569 struct mei_me_client *me_cl;
570 int id;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200571
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200572 if (WARN_ON(!cl || !cl->dev))
573 return -EINVAL;
574
575 dev = cl->dev;
576
Tomas Winkler9ca90502013-01-08 23:07:13 +0200577 if (!dev->me_clients_num)
578 return 0;
579
580 if (cl->mei_flow_ctrl_creds > 0)
581 return 1;
582
Alexander Usyskin12d00662014-02-17 15:13:23 +0200583 id = mei_me_cl_by_id(dev, cl->me_client_id);
584 if (id < 0) {
585 cl_err(dev, cl, "no such me client %d\n", cl->me_client_id);
586 return id;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200587 }
Alexander Usyskin12d00662014-02-17 15:13:23 +0200588
589 me_cl = &dev->me_clients[id];
590 if (me_cl->mei_flow_ctrl_creds) {
591 if (WARN_ON(me_cl->props.single_recv_buf == 0))
592 return -EINVAL;
593 return 1;
594 }
595 return 0;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200596}
597
598/**
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200599 * mei_cl_flow_ctrl_reduce - reduces flow_control.
Tomas Winkler9ca90502013-01-08 23:07:13 +0200600 *
Tomas Winkler9ca90502013-01-08 23:07:13 +0200601 * @cl: private data of the file object
Masanari Iida393b1482013-04-05 01:05:05 +0900602 *
Tomas Winkler9ca90502013-01-08 23:07:13 +0200603 * @returns
604 * 0 on success
605 * -ENOENT when me client is not found
606 * -EINVAL when ctrl credits are <= 0
607 */
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200608int mei_cl_flow_ctrl_reduce(struct mei_cl *cl)
Tomas Winkler9ca90502013-01-08 23:07:13 +0200609{
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200610 struct mei_device *dev;
Alexander Usyskin12d00662014-02-17 15:13:23 +0200611 struct mei_me_client *me_cl;
612 int id;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200613
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200614 if (WARN_ON(!cl || !cl->dev))
615 return -EINVAL;
616
617 dev = cl->dev;
618
Alexander Usyskin12d00662014-02-17 15:13:23 +0200619 id = mei_me_cl_by_id(dev, cl->me_client_id);
620 if (id < 0) {
621 cl_err(dev, cl, "no such me client %d\n", cl->me_client_id);
622 return id;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200623 }
Alexander Usyskin12d00662014-02-17 15:13:23 +0200624
625 me_cl = &dev->me_clients[id];
626 if (me_cl->props.single_recv_buf != 0) {
627 if (WARN_ON(me_cl->mei_flow_ctrl_creds <= 0))
628 return -EINVAL;
629 me_cl->mei_flow_ctrl_creds--;
630 } else {
631 if (WARN_ON(cl->mei_flow_ctrl_creds <= 0))
632 return -EINVAL;
633 cl->mei_flow_ctrl_creds--;
634 }
635 return 0;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200636}
637
Tomas Winkler9ca90502013-01-08 23:07:13 +0200638/**
Masanari Iida393b1482013-04-05 01:05:05 +0900639 * mei_cl_read_start - the start read client message function.
Tomas Winkler9ca90502013-01-08 23:07:13 +0200640 *
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200641 * @cl: host client
Tomas Winkler9ca90502013-01-08 23:07:13 +0200642 *
643 * returns 0 on success, <0 on failure.
644 */
Tomas Winklerfcb136e2013-04-19 22:01:35 +0300645int mei_cl_read_start(struct mei_cl *cl, size_t length)
Tomas Winkler9ca90502013-01-08 23:07:13 +0200646{
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200647 struct mei_device *dev;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200648 struct mei_cl_cb *cb;
649 int rets;
650 int i;
651
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200652 if (WARN_ON(!cl || !cl->dev))
653 return -ENODEV;
654
655 dev = cl->dev;
656
Tomas Winklerb950ac12013-07-25 20:15:53 +0300657 if (!mei_cl_is_connected(cl))
Tomas Winkler9ca90502013-01-08 23:07:13 +0200658 return -ENODEV;
659
Tomas Winklerd91aaed2013-01-08 23:07:18 +0200660 if (cl->read_cb) {
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300661 cl_dbg(dev, cl, "read is pending.\n");
Tomas Winkler9ca90502013-01-08 23:07:13 +0200662 return -EBUSY;
663 }
664 i = mei_me_cl_by_id(dev, cl->me_client_id);
665 if (i < 0) {
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300666 cl_err(dev, cl, "no such me client %d\n", cl->me_client_id);
Tomas Winkler9ca90502013-01-08 23:07:13 +0200667 return -ENODEV;
668 }
669
670 cb = mei_io_cb_init(cl, NULL);
671 if (!cb)
672 return -ENOMEM;
673
Tomas Winklerfcb136e2013-04-19 22:01:35 +0300674 /* always allocate at least client max message */
675 length = max_t(size_t, length, dev->me_clients[i].props.max_msg_length);
676 rets = mei_io_cb_alloc_resp_buf(cb, length);
Tomas Winkler9ca90502013-01-08 23:07:13 +0200677 if (rets)
678 goto err;
679
680 cb->fop_type = MEI_FOP_READ;
Tomas Winkler6aae48f2014-02-19 17:35:47 +0200681 if (mei_hbuf_acquire(dev)) {
Tomas Winkler9ca90502013-01-08 23:07:13 +0200682 if (mei_hbm_cl_flow_control_req(dev, cl)) {
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300683 cl_err(dev, cl, "flow control send failed\n");
Tomas Winkler9ca90502013-01-08 23:07:13 +0200684 rets = -ENODEV;
685 goto err;
686 }
687 list_add_tail(&cb->list, &dev->read_list.list);
688 } else {
689 list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
690 }
Chao Biaccb8842014-02-12 21:27:25 +0200691
692 cl->read_cb = cb;
693
Tomas Winkler9ca90502013-01-08 23:07:13 +0200694 return rets;
695err:
696 mei_io_cb_free(cb);
697 return rets;
698}
699
Tomas Winkler074b4c02013-02-06 14:06:44 +0200700/**
Tomas Winkler9d098192014-02-19 17:35:48 +0200701 * mei_cl_irq_write - write a message to device
Tomas Winkler21767542013-06-23 09:36:59 +0300702 * from the interrupt thread context
703 *
704 * @cl: client
705 * @cb: callback block.
Tomas Winkler21767542013-06-23 09:36:59 +0300706 * @cmpl_list: complete list.
707 *
708 * returns 0, OK; otherwise error.
709 */
Tomas Winkler9d098192014-02-19 17:35:48 +0200710int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
711 struct mei_cl_cb *cmpl_list)
Tomas Winkler21767542013-06-23 09:36:59 +0300712{
Tomas Winkler136698e2013-09-16 23:44:44 +0300713 struct mei_device *dev;
714 struct mei_msg_data *buf;
Tomas Winkler21767542013-06-23 09:36:59 +0300715 struct mei_msg_hdr mei_hdr;
Tomas Winkler136698e2013-09-16 23:44:44 +0300716 size_t len;
717 u32 msg_slots;
Tomas Winkler9d098192014-02-19 17:35:48 +0200718 int slots;
Tomas Winkler2ebf8c92013-09-16 23:44:43 +0300719 int rets;
Tomas Winkler21767542013-06-23 09:36:59 +0300720
Tomas Winkler136698e2013-09-16 23:44:44 +0300721 if (WARN_ON(!cl || !cl->dev))
722 return -ENODEV;
723
724 dev = cl->dev;
725
726 buf = &cb->request_buffer;
727
728 rets = mei_cl_flow_ctrl_creds(cl);
729 if (rets < 0)
730 return rets;
731
732 if (rets == 0) {
733 cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
734 return 0;
735 }
736
Tomas Winkler9d098192014-02-19 17:35:48 +0200737 slots = mei_hbuf_empty_slots(dev);
Tomas Winkler136698e2013-09-16 23:44:44 +0300738 len = buf->size - cb->buf_idx;
739 msg_slots = mei_data2slots(len);
740
Tomas Winkler21767542013-06-23 09:36:59 +0300741 mei_hdr.host_addr = cl->host_client_id;
742 mei_hdr.me_addr = cl->me_client_id;
743 mei_hdr.reserved = 0;
Tomas Winkler479327f2013-12-17 15:56:56 +0200744 mei_hdr.internal = cb->internal;
Tomas Winkler21767542013-06-23 09:36:59 +0300745
Tomas Winkler9d098192014-02-19 17:35:48 +0200746 if (slots >= msg_slots) {
Tomas Winkler21767542013-06-23 09:36:59 +0300747 mei_hdr.length = len;
748 mei_hdr.msg_complete = 1;
749 /* Split the message only if we can write the whole host buffer */
Tomas Winkler9d098192014-02-19 17:35:48 +0200750 } else if (slots == dev->hbuf_depth) {
751 msg_slots = slots;
752 len = (slots * sizeof(u32)) - sizeof(struct mei_msg_hdr);
Tomas Winkler21767542013-06-23 09:36:59 +0300753 mei_hdr.length = len;
754 mei_hdr.msg_complete = 0;
755 } else {
756 /* wait for next time the host buffer is empty */
757 return 0;
758 }
759
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300760 cl_dbg(dev, cl, "buf: size = %d idx = %lu\n",
Tomas Winkler21767542013-06-23 09:36:59 +0300761 cb->request_buffer.size, cb->buf_idx);
Tomas Winkler21767542013-06-23 09:36:59 +0300762
Tomas Winkler136698e2013-09-16 23:44:44 +0300763 rets = mei_write_message(dev, &mei_hdr, buf->data + cb->buf_idx);
Tomas Winkler2ebf8c92013-09-16 23:44:43 +0300764 if (rets) {
765 cl->status = rets;
Tomas Winkler21767542013-06-23 09:36:59 +0300766 list_move_tail(&cb->list, &cmpl_list->list);
Tomas Winkler2ebf8c92013-09-16 23:44:43 +0300767 return rets;
Tomas Winkler21767542013-06-23 09:36:59 +0300768 }
769
770 cl->status = 0;
Tomas Winkler4dfaa9f2013-06-23 09:37:00 +0300771 cl->writing_state = MEI_WRITING;
Tomas Winkler21767542013-06-23 09:36:59 +0300772 cb->buf_idx += mei_hdr.length;
Tomas Winkler4dfaa9f2013-06-23 09:37:00 +0300773
Tomas Winkler21767542013-06-23 09:36:59 +0300774 if (mei_hdr.msg_complete) {
775 if (mei_cl_flow_ctrl_reduce(cl))
Tomas Winkler2ebf8c92013-09-16 23:44:43 +0300776 return -EIO;
Tomas Winkler21767542013-06-23 09:36:59 +0300777 list_move_tail(&cb->list, &dev->write_waiting_list.list);
778 }
779
780 return 0;
781}
782
783/**
Tomas Winkler4234a6d2013-04-08 21:56:37 +0300784 * mei_cl_write - submit a write cb to mei device
785 assumes device_lock is locked
786 *
787 * @cl: host client
788 * @cl: write callback with filled data
789 *
Alexander Usyskin83ce0742014-01-08 22:31:46 +0200790 * returns number of bytes sent on success, <0 on failure.
Tomas Winkler4234a6d2013-04-08 21:56:37 +0300791 */
792int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)
793{
794 struct mei_device *dev;
795 struct mei_msg_data *buf;
796 struct mei_msg_hdr mei_hdr;
797 int rets;
798
799
800 if (WARN_ON(!cl || !cl->dev))
801 return -ENODEV;
802
803 if (WARN_ON(!cb))
804 return -EINVAL;
805
806 dev = cl->dev;
807
808
809 buf = &cb->request_buffer;
810
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300811 cl_dbg(dev, cl, "mei_cl_write %d\n", buf->size);
Tomas Winkler4234a6d2013-04-08 21:56:37 +0300812
813
814 cb->fop_type = MEI_FOP_WRITE;
Tomas Winkler6aae48f2014-02-19 17:35:47 +0200815 cb->buf_idx = 0;
816 cl->writing_state = MEI_IDLE;
817
818 mei_hdr.host_addr = cl->host_client_id;
819 mei_hdr.me_addr = cl->me_client_id;
820 mei_hdr.reserved = 0;
821 mei_hdr.msg_complete = 0;
822 mei_hdr.internal = cb->internal;
Tomas Winkler4234a6d2013-04-08 21:56:37 +0300823
824 rets = mei_cl_flow_ctrl_creds(cl);
825 if (rets < 0)
826 goto err;
827
Tomas Winkler6aae48f2014-02-19 17:35:47 +0200828 if (rets == 0) {
829 cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
Tomas Winkler4234a6d2013-04-08 21:56:37 +0300830 rets = buf->size;
831 goto out;
832 }
Tomas Winkler6aae48f2014-02-19 17:35:47 +0200833 if (!mei_hbuf_acquire(dev)) {
834 cl_dbg(dev, cl, "Cannot acquire the host buffer: not sending.\n");
835 rets = buf->size;
836 goto out;
837 }
Tomas Winkler4234a6d2013-04-08 21:56:37 +0300838
839 /* Check for a maximum length */
840 if (buf->size > mei_hbuf_max_len(dev)) {
841 mei_hdr.length = mei_hbuf_max_len(dev);
842 mei_hdr.msg_complete = 0;
843 } else {
844 mei_hdr.length = buf->size;
845 mei_hdr.msg_complete = 1;
846 }
847
Tomas Winkler2ebf8c92013-09-16 23:44:43 +0300848 rets = mei_write_message(dev, &mei_hdr, buf->data);
849 if (rets)
Tomas Winkler4234a6d2013-04-08 21:56:37 +0300850 goto err;
Tomas Winkler4234a6d2013-04-08 21:56:37 +0300851
852 cl->writing_state = MEI_WRITING;
853 cb->buf_idx = mei_hdr.length;
854
855 rets = buf->size;
856out:
857 if (mei_hdr.msg_complete) {
858 if (mei_cl_flow_ctrl_reduce(cl)) {
859 rets = -ENODEV;
860 goto err;
861 }
862 list_add_tail(&cb->list, &dev->write_waiting_list.list);
863 } else {
864 list_add_tail(&cb->list, &dev->write_list.list);
865 }
866
867
868 if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) {
869
870 mutex_unlock(&dev->device_lock);
871 if (wait_event_interruptible(cl->tx_wait,
872 cl->writing_state == MEI_WRITE_COMPLETE)) {
873 if (signal_pending(current))
874 rets = -EINTR;
875 else
876 rets = -ERESTARTSYS;
877 }
878 mutex_lock(&dev->device_lock);
879 }
880err:
881 return rets;
882}
883
884
Tomas Winklerdb086fa2013-05-12 15:34:45 +0300885/**
886 * mei_cl_complete - processes completed operation for a client
887 *
888 * @cl: private data of the file object.
889 * @cb: callback block.
890 */
891void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
892{
893 if (cb->fop_type == MEI_FOP_WRITE) {
894 mei_io_cb_free(cb);
895 cb = NULL;
896 cl->writing_state = MEI_WRITE_COMPLETE;
897 if (waitqueue_active(&cl->tx_wait))
898 wake_up_interruptible(&cl->tx_wait);
899
900 } else if (cb->fop_type == MEI_FOP_READ &&
901 MEI_READING == cl->reading_state) {
902 cl->reading_state = MEI_READ_COMPLETE;
903 if (waitqueue_active(&cl->rx_wait))
904 wake_up_interruptible(&cl->rx_wait);
905 else
906 mei_cl_bus_rx_event(cl);
907
908 }
909}
910
Tomas Winkler4234a6d2013-04-08 21:56:37 +0300911
912/**
Tomas Winkler074b4c02013-02-06 14:06:44 +0200913 * mei_cl_all_disconnect - disconnect forcefully all connected clients
914 *
915 * @dev - mei device
916 */
917
918void mei_cl_all_disconnect(struct mei_device *dev)
919{
Tomas Winkler31f88f52014-02-17 15:13:25 +0200920 struct mei_cl *cl;
Tomas Winkler074b4c02013-02-06 14:06:44 +0200921
Tomas Winkler31f88f52014-02-17 15:13:25 +0200922 list_for_each_entry(cl, &dev->file_list, link) {
Tomas Winkler074b4c02013-02-06 14:06:44 +0200923 cl->state = MEI_FILE_DISCONNECTED;
924 cl->mei_flow_ctrl_creds = 0;
Tomas Winkler074b4c02013-02-06 14:06:44 +0200925 cl->timer_count = 0;
926 }
927}
928
929
930/**
Tomas Winkler52908012013-07-24 16:22:57 +0300931 * mei_cl_all_wakeup - wake up all readers and writers they can be interrupted
Tomas Winkler074b4c02013-02-06 14:06:44 +0200932 *
933 * @dev - mei device
934 */
Tomas Winkler52908012013-07-24 16:22:57 +0300935void mei_cl_all_wakeup(struct mei_device *dev)
Tomas Winkler074b4c02013-02-06 14:06:44 +0200936{
Tomas Winkler31f88f52014-02-17 15:13:25 +0200937 struct mei_cl *cl;
938 list_for_each_entry(cl, &dev->file_list, link) {
Tomas Winkler074b4c02013-02-06 14:06:44 +0200939 if (waitqueue_active(&cl->rx_wait)) {
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300940 cl_dbg(dev, cl, "Waking up reading client!\n");
Tomas Winkler074b4c02013-02-06 14:06:44 +0200941 wake_up_interruptible(&cl->rx_wait);
942 }
Tomas Winkler52908012013-07-24 16:22:57 +0300943 if (waitqueue_active(&cl->tx_wait)) {
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300944 cl_dbg(dev, cl, "Waking up writing client!\n");
Tomas Winkler52908012013-07-24 16:22:57 +0300945 wake_up_interruptible(&cl->tx_wait);
946 }
Tomas Winkler074b4c02013-02-06 14:06:44 +0200947 }
948}
949
950/**
951 * mei_cl_all_write_clear - clear all pending writes
952
953 * @dev - mei device
954 */
955void mei_cl_all_write_clear(struct mei_device *dev)
956{
957 struct mei_cl_cb *cb, *next;
Alexander Usyskin30c54df2014-01-27 22:27:23 +0200958 struct list_head *list;
Tomas Winkler074b4c02013-02-06 14:06:44 +0200959
Alexander Usyskin30c54df2014-01-27 22:27:23 +0200960 list = &dev->write_list.list;
961 list_for_each_entry_safe(cb, next, list, list) {
962 list_del(&cb->list);
963 mei_io_cb_free(cb);
964 }
965
966 list = &dev->write_waiting_list.list;
967 list_for_each_entry_safe(cb, next, list, list) {
Tomas Winkler074b4c02013-02-06 14:06:44 +0200968 list_del(&cb->list);
969 mei_io_cb_free(cb);
970 }
971}
972
973