blob: dfbddfe1c7a06f9e1ea435ffac42ab31b7a9511c [file] [log] [blame]
Tomas Winkler9ca90502013-01-08 23:07:13 +02001/*
2 *
3 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2003-2012, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 */
16
Tomas Winkler9ca90502013-01-08 23:07:13 +020017#include <linux/sched.h>
18#include <linux/wait.h>
19#include <linux/delay.h>
Tomas Winkler1f180352014-09-29 16:31:46 +030020#include <linux/slab.h>
Tomas Winkler04bb1392014-03-18 22:52:04 +020021#include <linux/pm_runtime.h>
Tomas Winkler9ca90502013-01-08 23:07:13 +020022
23#include <linux/mei.h>
24
25#include "mei_dev.h"
26#include "hbm.h"
Tomas Winkler90e0b5f12013-01-08 23:07:14 +020027#include "client.h"
28
29/**
Tomas Winkler79563db2015-01-11 00:07:16 +020030 * mei_me_cl_init - initialize me client
31 *
32 * @me_cl: me client
33 */
34void mei_me_cl_init(struct mei_me_client *me_cl)
35{
36 INIT_LIST_HEAD(&me_cl->list);
37 kref_init(&me_cl->refcnt);
38}
39
40/**
41 * mei_me_cl_get - increases me client refcount
42 *
43 * @me_cl: me client
44 *
45 * Locking: called under "dev->device_lock" lock
46 *
47 * Return: me client or NULL
48 */
49struct mei_me_client *mei_me_cl_get(struct mei_me_client *me_cl)
50{
51 if (me_cl)
52 kref_get(&me_cl->refcnt);
53
54 return me_cl;
55}
56
57/**
58 * mei_me_cl_release - unlink and free me client
59 *
60 * Locking: called under "dev->device_lock" lock
61 *
62 * @ref: me_client refcount
63 */
64static void mei_me_cl_release(struct kref *ref)
65{
66 struct mei_me_client *me_cl =
67 container_of(ref, struct mei_me_client, refcnt);
68 list_del(&me_cl->list);
69 kfree(me_cl);
70}
71/**
72 * mei_me_cl_put - decrease me client refcount and free client if necessary
73 *
74 * Locking: called under "dev->device_lock" lock
75 *
76 * @me_cl: me client
77 */
78void mei_me_cl_put(struct mei_me_client *me_cl)
79{
80 if (me_cl)
81 kref_put(&me_cl->refcnt, mei_me_cl_release);
82}
83
84/**
Alexander Usyskina8605ea2014-09-29 16:31:49 +030085 * mei_me_cl_by_uuid - locate me client by uuid
Tomas Winkler79563db2015-01-11 00:07:16 +020086 * increases ref count
Tomas Winkler90e0b5f12013-01-08 23:07:14 +020087 *
88 * @dev: mei device
Alexander Usyskina8605ea2014-09-29 16:31:49 +030089 * @uuid: me client uuid
Alexander Usyskina27a76d2014-02-17 15:13:22 +020090 *
91 * Locking: called under "dev->device_lock" lock
92 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +030093 * Return: me client or NULL if not found
Tomas Winkler90e0b5f12013-01-08 23:07:14 +020094 */
Tomas Winklerd3208322014-08-24 12:08:55 +030095struct mei_me_client *mei_me_cl_by_uuid(const struct mei_device *dev,
96 const uuid_le *uuid)
Tomas Winkler90e0b5f12013-01-08 23:07:14 +020097{
Tomas Winkler5ca2d382014-08-21 14:29:13 +030098 struct mei_me_client *me_cl;
Tomas Winkler90e0b5f12013-01-08 23:07:14 +020099
Tomas Winkler5ca2d382014-08-21 14:29:13 +0300100 list_for_each_entry(me_cl, &dev->me_clients, list)
101 if (uuid_le_cmp(*uuid, me_cl->props.protocol_name) == 0)
Tomas Winkler79563db2015-01-11 00:07:16 +0200102 return mei_me_cl_get(me_cl);
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200103
Tomas Winklerd3208322014-08-24 12:08:55 +0300104 return NULL;
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200105}
106
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200107/**
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300108 * mei_me_cl_by_id - locate me client by client id
Tomas Winkler79563db2015-01-11 00:07:16 +0200109 * increases ref count
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200110 *
111 * @dev: the device structure
112 * @client_id: me client id
113 *
114 * Locking: called under "dev->device_lock" lock
115 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300116 * Return: me client or NULL if not found
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200117 */
Tomas Winklerd3208322014-08-24 12:08:55 +0300118struct mei_me_client *mei_me_cl_by_id(struct mei_device *dev, u8 client_id)
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200119{
Alexander Usyskina27a76d2014-02-17 15:13:22 +0200120
Tomas Winkler5ca2d382014-08-21 14:29:13 +0300121 struct mei_me_client *me_cl;
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200122
Tomas Winkler5ca2d382014-08-21 14:29:13 +0300123 list_for_each_entry(me_cl, &dev->me_clients, list)
124 if (me_cl->client_id == client_id)
Tomas Winkler79563db2015-01-11 00:07:16 +0200125 return mei_me_cl_get(me_cl);
126
Tomas Winklerd3208322014-08-24 12:08:55 +0300127 return NULL;
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200128}
Tomas Winkler9ca90502013-01-08 23:07:13 +0200129
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300130/**
131 * mei_me_cl_by_uuid_id - locate me client by client id and uuid
Tomas Winkler79563db2015-01-11 00:07:16 +0200132 * increases ref count
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300133 *
134 * @dev: the device structure
135 * @uuid: me client uuid
136 * @client_id: me client id
137 *
138 * Locking: called under "dev->device_lock" lock
139 *
140 * Return: me client or NULL if not found
141 */
Tomas Winklerd880f322014-08-21 14:29:15 +0300142struct mei_me_client *mei_me_cl_by_uuid_id(struct mei_device *dev,
143 const uuid_le *uuid, u8 client_id)
144{
145 struct mei_me_client *me_cl;
146
147 list_for_each_entry(me_cl, &dev->me_clients, list)
148 if (uuid_le_cmp(*uuid, me_cl->props.protocol_name) == 0 &&
149 me_cl->client_id == client_id)
Tomas Winkler79563db2015-01-11 00:07:16 +0200150 return mei_me_cl_get(me_cl);
151
Tomas Winklerd880f322014-08-21 14:29:15 +0300152 return NULL;
153}
154
Tomas Winkler25ca6472014-08-21 14:29:14 +0300155/**
Tomas Winkler79563db2015-01-11 00:07:16 +0200156 * mei_me_cl_rm_by_uuid - remove all me clients matching uuid
Tomas Winkler25ca6472014-08-21 14:29:14 +0300157 *
158 * @dev: the device structure
159 * @uuid: me client uuid
Tomas Winkler79563db2015-01-11 00:07:16 +0200160 *
161 * Locking: called under "dev->device_lock" lock
Tomas Winkler25ca6472014-08-21 14:29:14 +0300162 */
Tomas Winkler79563db2015-01-11 00:07:16 +0200163void mei_me_cl_rm_by_uuid(struct mei_device *dev, const uuid_le *uuid)
Tomas Winkler25ca6472014-08-21 14:29:14 +0300164{
165 struct mei_me_client *me_cl, *next;
166
Tomas Winkler79563db2015-01-11 00:07:16 +0200167 dev_dbg(dev->dev, "remove %pUl\n", uuid);
168 list_for_each_entry_safe(me_cl, next, &dev->me_clients, list)
169 if (uuid_le_cmp(*uuid, me_cl->props.protocol_name) == 0)
170 mei_me_cl_put(me_cl);
171}
172
173/**
174 * mei_me_cl_rm_by_uuid_id - remove all me clients matching client id
175 *
176 * @dev: the device structure
177 * @uuid: me client uuid
178 * @id: me client id
179 *
180 * Locking: called under "dev->device_lock" lock
181 */
182void mei_me_cl_rm_by_uuid_id(struct mei_device *dev, const uuid_le *uuid, u8 id)
183{
184 struct mei_me_client *me_cl, *next;
185 const uuid_le *pn;
186
187 dev_dbg(dev->dev, "remove %pUl %d\n", uuid, id);
Tomas Winkler25ca6472014-08-21 14:29:14 +0300188 list_for_each_entry_safe(me_cl, next, &dev->me_clients, list) {
Tomas Winkler79563db2015-01-11 00:07:16 +0200189 pn = &me_cl->props.protocol_name;
190 if (me_cl->client_id == id && uuid_le_cmp(*uuid, *pn) == 0)
191 mei_me_cl_put(me_cl);
Tomas Winkler25ca6472014-08-21 14:29:14 +0300192 }
193}
194
Tomas Winkler79563db2015-01-11 00:07:16 +0200195/**
196 * mei_me_cl_rm_all - remove all me clients
197 *
198 * @dev: the device structure
199 *
200 * Locking: called under "dev->device_lock" lock
201 */
202void mei_me_cl_rm_all(struct mei_device *dev)
203{
204 struct mei_me_client *me_cl, *next;
205
206 list_for_each_entry_safe(me_cl, next, &dev->me_clients, list)
207 mei_me_cl_put(me_cl);
208}
209
210
Tomas Winkler9ca90502013-01-08 23:07:13 +0200211
212/**
Tomas Winklercc99ecf2014-03-10 15:10:40 +0200213 * mei_cl_cmp_id - tells if the clients are the same
214 *
215 * @cl1: host client 1
216 * @cl2: host client 2
217 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300218 * Return: true - if the clients has same host and me ids
Tomas Winklercc99ecf2014-03-10 15:10:40 +0200219 * false - otherwise
220 */
221static inline bool mei_cl_cmp_id(const struct mei_cl *cl1,
222 const struct mei_cl *cl2)
223{
224 return cl1 && cl2 &&
225 (cl1->host_client_id == cl2->host_client_id) &&
226 (cl1->me_client_id == cl2->me_client_id);
227}
228
229/**
230 * mei_io_list_flush - removes cbs belonging to cl.
231 *
232 * @list: an instance of our list structure
233 * @cl: host client, can be NULL for flushing the whole list
234 * @free: whether to free the cbs
235 */
236static void __mei_io_list_flush(struct mei_cl_cb *list,
237 struct mei_cl *cl, bool free)
238{
239 struct mei_cl_cb *cb;
240 struct mei_cl_cb *next;
241
242 /* enable removing everything if no cl is specified */
243 list_for_each_entry_safe(cb, next, &list->list, list) {
Alexander Usyskin140c7552014-10-02 13:39:31 +0300244 if (!cl || mei_cl_cmp_id(cl, cb->cl)) {
Tomas Winklercc99ecf2014-03-10 15:10:40 +0200245 list_del(&cb->list);
246 if (free)
247 mei_io_cb_free(cb);
248 }
249 }
250}
251
252/**
Tomas Winkler9ca90502013-01-08 23:07:13 +0200253 * mei_io_list_flush - removes list entry belonging to cl.
254 *
255 * @list: An instance of our list structure
256 * @cl: host client
257 */
Alexander Usyskin54567962014-08-14 17:22:20 +0300258void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl)
Tomas Winkler9ca90502013-01-08 23:07:13 +0200259{
Tomas Winklercc99ecf2014-03-10 15:10:40 +0200260 __mei_io_list_flush(list, cl, false);
261}
Tomas Winkler9ca90502013-01-08 23:07:13 +0200262
Tomas Winklercc99ecf2014-03-10 15:10:40 +0200263
264/**
265 * mei_io_list_free - removes cb belonging to cl and free them
266 *
267 * @list: An instance of our list structure
268 * @cl: host client
269 */
270static inline void mei_io_list_free(struct mei_cl_cb *list, struct mei_cl *cl)
271{
272 __mei_io_list_flush(list, cl, true);
Tomas Winkler9ca90502013-01-08 23:07:13 +0200273}
274
275/**
276 * mei_io_cb_free - free mei_cb_private related memory
277 *
278 * @cb: mei callback struct
279 */
280void mei_io_cb_free(struct mei_cl_cb *cb)
281{
282 if (cb == NULL)
283 return;
284
285 kfree(cb->request_buffer.data);
286 kfree(cb->response_buffer.data);
287 kfree(cb);
288}
289
290/**
291 * mei_io_cb_init - allocate and initialize io callback
292 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300293 * @cl: mei client
Masanari Iida393b1482013-04-05 01:05:05 +0900294 * @fp: pointer to file structure
Tomas Winkler9ca90502013-01-08 23:07:13 +0200295 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300296 * Return: mei_cl_cb pointer or NULL;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200297 */
298struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, struct file *fp)
299{
300 struct mei_cl_cb *cb;
301
302 cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL);
303 if (!cb)
304 return NULL;
305
306 mei_io_list_init(cb);
307
308 cb->file_object = fp;
309 cb->cl = cl;
310 cb->buf_idx = 0;
311 return cb;
312}
313
314/**
315 * mei_io_cb_alloc_req_buf - allocate request buffer
316 *
Masanari Iida393b1482013-04-05 01:05:05 +0900317 * @cb: io callback structure
318 * @length: size of the buffer
Tomas Winkler9ca90502013-01-08 23:07:13 +0200319 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300320 * Return: 0 on success
Tomas Winkler9ca90502013-01-08 23:07:13 +0200321 * -EINVAL if cb is NULL
322 * -ENOMEM if allocation failed
323 */
324int mei_io_cb_alloc_req_buf(struct mei_cl_cb *cb, size_t length)
325{
326 if (!cb)
327 return -EINVAL;
328
329 if (length == 0)
330 return 0;
331
332 cb->request_buffer.data = kmalloc(length, GFP_KERNEL);
333 if (!cb->request_buffer.data)
334 return -ENOMEM;
335 cb->request_buffer.size = length;
336 return 0;
337}
338/**
Alexander Usyskin83ce0742014-01-08 22:31:46 +0200339 * mei_io_cb_alloc_resp_buf - allocate response buffer
Tomas Winkler9ca90502013-01-08 23:07:13 +0200340 *
Masanari Iida393b1482013-04-05 01:05:05 +0900341 * @cb: io callback structure
342 * @length: size of the buffer
Tomas Winkler9ca90502013-01-08 23:07:13 +0200343 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300344 * Return: 0 on success
Tomas Winkler9ca90502013-01-08 23:07:13 +0200345 * -EINVAL if cb is NULL
346 * -ENOMEM if allocation failed
347 */
348int mei_io_cb_alloc_resp_buf(struct mei_cl_cb *cb, size_t length)
349{
350 if (!cb)
351 return -EINVAL;
352
353 if (length == 0)
354 return 0;
355
356 cb->response_buffer.data = kmalloc(length, GFP_KERNEL);
357 if (!cb->response_buffer.data)
358 return -ENOMEM;
359 cb->response_buffer.size = length;
360 return 0;
361}
362
363
364
365/**
366 * mei_cl_flush_queues - flushes queue lists belonging to cl.
367 *
Tomas Winkler9ca90502013-01-08 23:07:13 +0200368 * @cl: host client
Alexander Usyskince231392014-09-29 16:31:50 +0300369 *
370 * Return: 0 on success, -EINVAL if cl or cl->dev is NULL.
Tomas Winkler9ca90502013-01-08 23:07:13 +0200371 */
372int mei_cl_flush_queues(struct mei_cl *cl)
373{
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300374 struct mei_device *dev;
375
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200376 if (WARN_ON(!cl || !cl->dev))
Tomas Winkler9ca90502013-01-08 23:07:13 +0200377 return -EINVAL;
378
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300379 dev = cl->dev;
380
381 cl_dbg(dev, cl, "remove list entry belonging to cl\n");
Tomas Winkler9ca90502013-01-08 23:07:13 +0200382 mei_io_list_flush(&cl->dev->read_list, cl);
Tomas Winklercc99ecf2014-03-10 15:10:40 +0200383 mei_io_list_free(&cl->dev->write_list, cl);
384 mei_io_list_free(&cl->dev->write_waiting_list, cl);
Tomas Winkler9ca90502013-01-08 23:07:13 +0200385 mei_io_list_flush(&cl->dev->ctrl_wr_list, cl);
386 mei_io_list_flush(&cl->dev->ctrl_rd_list, cl);
387 mei_io_list_flush(&cl->dev->amthif_cmd_list, cl);
388 mei_io_list_flush(&cl->dev->amthif_rd_complete_list, cl);
389 return 0;
390}
391
Tomas Winkler9ca90502013-01-08 23:07:13 +0200392
393/**
Alexander Usyskin83ce0742014-01-08 22:31:46 +0200394 * mei_cl_init - initializes cl.
Tomas Winkler9ca90502013-01-08 23:07:13 +0200395 *
396 * @cl: host client to be initialized
397 * @dev: mei device
398 */
399void mei_cl_init(struct mei_cl *cl, struct mei_device *dev)
400{
401 memset(cl, 0, sizeof(struct mei_cl));
402 init_waitqueue_head(&cl->wait);
403 init_waitqueue_head(&cl->rx_wait);
404 init_waitqueue_head(&cl->tx_wait);
405 INIT_LIST_HEAD(&cl->link);
Samuel Ortiza7b71bc2013-03-27 17:29:56 +0200406 INIT_LIST_HEAD(&cl->device_link);
Tomas Winkler9ca90502013-01-08 23:07:13 +0200407 cl->reading_state = MEI_IDLE;
408 cl->writing_state = MEI_IDLE;
409 cl->dev = dev;
410}
411
412/**
413 * mei_cl_allocate - allocates cl structure and sets it up.
414 *
415 * @dev: mei device
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300416 * Return: The allocated file or NULL on failure
Tomas Winkler9ca90502013-01-08 23:07:13 +0200417 */
418struct mei_cl *mei_cl_allocate(struct mei_device *dev)
419{
420 struct mei_cl *cl;
421
422 cl = kmalloc(sizeof(struct mei_cl), GFP_KERNEL);
423 if (!cl)
424 return NULL;
425
426 mei_cl_init(cl, dev);
427
428 return cl;
429}
430
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200431/**
432 * mei_cl_find_read_cb - find this cl's callback in the read list
433 *
Masanari Iida393b1482013-04-05 01:05:05 +0900434 * @cl: host client
435 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300436 * Return: cb on success, NULL on error
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200437 */
438struct mei_cl_cb *mei_cl_find_read_cb(struct mei_cl *cl)
439{
440 struct mei_device *dev = cl->dev;
Tomas Winkler31f88f52014-02-17 15:13:25 +0200441 struct mei_cl_cb *cb;
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200442
Tomas Winkler31f88f52014-02-17 15:13:25 +0200443 list_for_each_entry(cb, &dev->read_list.list, list)
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200444 if (mei_cl_cmp_id(cl, cb->cl))
445 return cb;
446 return NULL;
447}
448
Alexander Usyskin83ce0742014-01-08 22:31:46 +0200449/** mei_cl_link: allocate host id in the host map
Tomas Winkler9ca90502013-01-08 23:07:13 +0200450 *
Tomas Winkler781d0d82013-01-08 23:07:22 +0200451 * @cl - host client
Alexander Usyskin83ce0742014-01-08 22:31:46 +0200452 * @id - fixed host id or -1 for generic one
Masanari Iida393b1482013-04-05 01:05:05 +0900453 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300454 * Return: 0 on success
Tomas Winkler9ca90502013-01-08 23:07:13 +0200455 * -EINVAL on incorrect values
456 * -ENONET if client not found
457 */
Tomas Winkler781d0d82013-01-08 23:07:22 +0200458int mei_cl_link(struct mei_cl *cl, int id)
Tomas Winkler9ca90502013-01-08 23:07:13 +0200459{
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200460 struct mei_device *dev;
Tomas Winkler22f96a02013-09-16 23:44:47 +0300461 long open_handle_count;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200462
Tomas Winkler781d0d82013-01-08 23:07:22 +0200463 if (WARN_ON(!cl || !cl->dev))
Tomas Winkler9ca90502013-01-08 23:07:13 +0200464 return -EINVAL;
465
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200466 dev = cl->dev;
467
Alexander Usyskin83ce0742014-01-08 22:31:46 +0200468 /* If Id is not assigned get one*/
Tomas Winkler781d0d82013-01-08 23:07:22 +0200469 if (id == MEI_HOST_CLIENT_ID_ANY)
470 id = find_first_zero_bit(dev->host_clients_map,
471 MEI_CLIENTS_MAX);
Tomas Winkler9ca90502013-01-08 23:07:13 +0200472
Tomas Winkler781d0d82013-01-08 23:07:22 +0200473 if (id >= MEI_CLIENTS_MAX) {
Tomas Winkler2bf94cab2014-09-29 16:31:42 +0300474 dev_err(dev->dev, "id exceeded %d", MEI_CLIENTS_MAX);
Tomas Winklere036cc52013-09-16 23:44:46 +0300475 return -EMFILE;
476 }
477
Tomas Winkler22f96a02013-09-16 23:44:47 +0300478 open_handle_count = dev->open_handle_count + dev->iamthif_open_count;
479 if (open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) {
Tomas Winkler2bf94cab2014-09-29 16:31:42 +0300480 dev_err(dev->dev, "open_handle_count exceeded %d",
Tomas Winklere036cc52013-09-16 23:44:46 +0300481 MEI_MAX_OPEN_HANDLE_COUNT);
482 return -EMFILE;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200483 }
484
Tomas Winkler781d0d82013-01-08 23:07:22 +0200485 dev->open_handle_count++;
486
487 cl->host_client_id = id;
488 list_add_tail(&cl->link, &dev->file_list);
489
490 set_bit(id, dev->host_clients_map);
491
492 cl->state = MEI_FILE_INITIALIZING;
493
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300494 cl_dbg(dev, cl, "link cl\n");
Tomas Winkler781d0d82013-01-08 23:07:22 +0200495 return 0;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200496}
Tomas Winkler781d0d82013-01-08 23:07:22 +0200497
Tomas Winkler9ca90502013-01-08 23:07:13 +0200498/**
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200499 * mei_cl_unlink - remove me_cl from the list
Tomas Winkler9ca90502013-01-08 23:07:13 +0200500 *
Masanari Iida393b1482013-04-05 01:05:05 +0900501 * @cl: host client
Alexander Usyskince231392014-09-29 16:31:50 +0300502 *
503 * Return: always 0
Tomas Winkler9ca90502013-01-08 23:07:13 +0200504 */
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200505int mei_cl_unlink(struct mei_cl *cl)
Tomas Winkler9ca90502013-01-08 23:07:13 +0200506{
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200507 struct mei_device *dev;
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200508
Tomas Winkler781d0d82013-01-08 23:07:22 +0200509 /* don't shout on error exit path */
510 if (!cl)
511 return 0;
512
Tomas Winkler8e9a4a92013-01-10 17:32:14 +0200513 /* wd and amthif might not be initialized */
514 if (!cl->dev)
515 return 0;
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200516
517 dev = cl->dev;
518
Tomas Winklera14c44d2013-09-16 23:44:45 +0300519 cl_dbg(dev, cl, "unlink client");
520
Tomas Winkler22f96a02013-09-16 23:44:47 +0300521 if (dev->open_handle_count > 0)
522 dev->open_handle_count--;
523
524 /* never clear the 0 bit */
525 if (cl->host_client_id)
526 clear_bit(cl->host_client_id, dev->host_clients_map);
527
528 list_del_init(&cl->link);
529
530 cl->state = MEI_FILE_INITIALIZING;
531
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200532 return 0;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200533}
534
535
536void mei_host_client_init(struct work_struct *work)
537{
538 struct mei_device *dev = container_of(work,
539 struct mei_device, init_work);
Tomas Winkler5ca2d382014-08-21 14:29:13 +0300540 struct mei_me_client *me_cl;
541 struct mei_client_properties *props;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200542
543 mutex_lock(&dev->device_lock);
544
Tomas Winkler5ca2d382014-08-21 14:29:13 +0300545 list_for_each_entry(me_cl, &dev->me_clients, list) {
546 props = &me_cl->props;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200547
Tomas Winkler5ca2d382014-08-21 14:29:13 +0300548 if (!uuid_le_cmp(props->protocol_name, mei_amthif_guid))
Tomas Winkler9ca90502013-01-08 23:07:13 +0200549 mei_amthif_host_init(dev);
Tomas Winkler5ca2d382014-08-21 14:29:13 +0300550 else if (!uuid_le_cmp(props->protocol_name, mei_wd_guid))
Tomas Winkler9ca90502013-01-08 23:07:13 +0200551 mei_wd_host_init(dev);
Tomas Winkler5ca2d382014-08-21 14:29:13 +0300552 else if (!uuid_le_cmp(props->protocol_name, mei_nfc_guid))
Samuel Ortiz59fcd7c2013-04-11 03:03:29 +0200553 mei_nfc_host_init(dev);
554
Tomas Winkler9ca90502013-01-08 23:07:13 +0200555 }
556
557 dev->dev_state = MEI_DEV_ENABLED;
Tomas Winkler6adb8ef2014-01-12 00:36:10 +0200558 dev->reset_count = 0;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200559
560 mutex_unlock(&dev->device_lock);
Tomas Winkler04bb1392014-03-18 22:52:04 +0200561
Tomas Winkler2bf94cab2014-09-29 16:31:42 +0300562 pm_runtime_mark_last_busy(dev->dev);
563 dev_dbg(dev->dev, "rpm: autosuspend\n");
564 pm_runtime_autosuspend(dev->dev);
Tomas Winkler9ca90502013-01-08 23:07:13 +0200565}
566
Tomas Winkler6aae48f2014-02-19 17:35:47 +0200567/**
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300568 * mei_hbuf_acquire - try to acquire host buffer
Tomas Winkler6aae48f2014-02-19 17:35:47 +0200569 *
570 * @dev: the device structure
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300571 * Return: true if host buffer was acquired
Tomas Winkler6aae48f2014-02-19 17:35:47 +0200572 */
573bool mei_hbuf_acquire(struct mei_device *dev)
574{
Tomas Winkler04bb1392014-03-18 22:52:04 +0200575 if (mei_pg_state(dev) == MEI_PG_ON ||
576 dev->pg_event == MEI_PG_EVENT_WAIT) {
Tomas Winkler2bf94cab2014-09-29 16:31:42 +0300577 dev_dbg(dev->dev, "device is in pg\n");
Tomas Winkler04bb1392014-03-18 22:52:04 +0200578 return false;
579 }
580
Tomas Winkler6aae48f2014-02-19 17:35:47 +0200581 if (!dev->hbuf_is_ready) {
Tomas Winkler2bf94cab2014-09-29 16:31:42 +0300582 dev_dbg(dev->dev, "hbuf is not ready\n");
Tomas Winkler6aae48f2014-02-19 17:35:47 +0200583 return false;
584 }
585
586 dev->hbuf_is_ready = false;
587
588 return true;
589}
Tomas Winkler9ca90502013-01-08 23:07:13 +0200590
591/**
Alexander Usyskin83ce0742014-01-08 22:31:46 +0200592 * mei_cl_disconnect - disconnect host client from the me one
Tomas Winkler9ca90502013-01-08 23:07:13 +0200593 *
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200594 * @cl: host client
Tomas Winkler9ca90502013-01-08 23:07:13 +0200595 *
596 * Locking: called under "dev->device_lock" lock
597 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300598 * Return: 0 on success, <0 on failure.
Tomas Winkler9ca90502013-01-08 23:07:13 +0200599 */
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200600int mei_cl_disconnect(struct mei_cl *cl)
Tomas Winkler9ca90502013-01-08 23:07:13 +0200601{
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200602 struct mei_device *dev;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200603 struct mei_cl_cb *cb;
Alexander Usyskinfe2f17e2014-07-17 10:53:38 +0300604 int rets;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200605
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200606 if (WARN_ON(!cl || !cl->dev))
Tomas Winkler9ca90502013-01-08 23:07:13 +0200607 return -ENODEV;
608
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200609 dev = cl->dev;
610
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300611 cl_dbg(dev, cl, "disconnecting");
612
Tomas Winkler9ca90502013-01-08 23:07:13 +0200613 if (cl->state != MEI_FILE_DISCONNECTING)
614 return 0;
615
Tomas Winkler2bf94cab2014-09-29 16:31:42 +0300616 rets = pm_runtime_get(dev->dev);
Tomas Winkler04bb1392014-03-18 22:52:04 +0200617 if (rets < 0 && rets != -EINPROGRESS) {
Tomas Winkler2bf94cab2014-09-29 16:31:42 +0300618 pm_runtime_put_noidle(dev->dev);
Tomas Winkler04bb1392014-03-18 22:52:04 +0200619 cl_err(dev, cl, "rpm: get failed %d\n", rets);
620 return rets;
621 }
622
Tomas Winkler9ca90502013-01-08 23:07:13 +0200623 cb = mei_io_cb_init(cl, NULL);
Tomas Winkler04bb1392014-03-18 22:52:04 +0200624 if (!cb) {
625 rets = -ENOMEM;
626 goto free;
627 }
Tomas Winkler9ca90502013-01-08 23:07:13 +0200628
Tomas Winkler5a8373f2014-08-21 14:29:17 +0300629 cb->fop_type = MEI_FOP_DISCONNECT;
630
Tomas Winkler6aae48f2014-02-19 17:35:47 +0200631 if (mei_hbuf_acquire(dev)) {
Tomas Winkler9ca90502013-01-08 23:07:13 +0200632 if (mei_hbm_cl_disconnect_req(dev, cl)) {
633 rets = -ENODEV;
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300634 cl_err(dev, cl, "failed to disconnect.\n");
Tomas Winkler9ca90502013-01-08 23:07:13 +0200635 goto free;
636 }
Alexander Usyskin22b987a2014-07-17 10:53:35 +0300637 cl->timer_count = MEI_CONNECT_TIMEOUT;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200638 mdelay(10); /* Wait for hardware disconnection ready */
639 list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
640 } else {
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300641 cl_dbg(dev, cl, "add disconnect cb to control write list\n");
Tomas Winkler9ca90502013-01-08 23:07:13 +0200642 list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
643
644 }
645 mutex_unlock(&dev->device_lock);
646
Tomas Winkler12f45ed2014-08-21 14:29:18 +0300647 wait_event_timeout(cl->wait,
Tomas Winkler9ca90502013-01-08 23:07:13 +0200648 MEI_FILE_DISCONNECTED == cl->state,
649 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
650
651 mutex_lock(&dev->device_lock);
Alexander Usyskinfe2f17e2014-07-17 10:53:38 +0300652
Tomas Winkler9ca90502013-01-08 23:07:13 +0200653 if (MEI_FILE_DISCONNECTED == cl->state) {
654 rets = 0;
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300655 cl_dbg(dev, cl, "successfully disconnected from FW client.\n");
Tomas Winkler9ca90502013-01-08 23:07:13 +0200656 } else {
Alexander Usyskinfe2f17e2014-07-17 10:53:38 +0300657 cl_dbg(dev, cl, "timeout on disconnect from FW client.\n");
658 rets = -ETIME;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200659 }
660
661 mei_io_list_flush(&dev->ctrl_rd_list, cl);
662 mei_io_list_flush(&dev->ctrl_wr_list, cl);
663free:
Tomas Winkler04bb1392014-03-18 22:52:04 +0200664 cl_dbg(dev, cl, "rpm: autosuspend\n");
Tomas Winkler2bf94cab2014-09-29 16:31:42 +0300665 pm_runtime_mark_last_busy(dev->dev);
666 pm_runtime_put_autosuspend(dev->dev);
Tomas Winkler04bb1392014-03-18 22:52:04 +0200667
Tomas Winkler9ca90502013-01-08 23:07:13 +0200668 mei_io_cb_free(cb);
669 return rets;
670}
671
672
673/**
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200674 * mei_cl_is_other_connecting - checks if other
675 * client with the same me client id is connecting
Tomas Winkler9ca90502013-01-08 23:07:13 +0200676 *
Tomas Winkler9ca90502013-01-08 23:07:13 +0200677 * @cl: private data of the file object
678 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300679 * Return: true if other client is connected, false - otherwise.
Tomas Winkler9ca90502013-01-08 23:07:13 +0200680 */
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200681bool mei_cl_is_other_connecting(struct mei_cl *cl)
Tomas Winkler9ca90502013-01-08 23:07:13 +0200682{
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200683 struct mei_device *dev;
Tomas Winkler31f88f52014-02-17 15:13:25 +0200684 struct mei_cl *ocl; /* the other client */
Tomas Winkler9ca90502013-01-08 23:07:13 +0200685
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200686 if (WARN_ON(!cl || !cl->dev))
687 return false;
688
689 dev = cl->dev;
690
Tomas Winkler31f88f52014-02-17 15:13:25 +0200691 list_for_each_entry(ocl, &dev->file_list, link) {
692 if (ocl->state == MEI_FILE_CONNECTING &&
693 ocl != cl &&
694 cl->me_client_id == ocl->me_client_id)
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200695 return true;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200696
697 }
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200698
699 return false;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200700}
701
702/**
Alexander Usyskin83ce0742014-01-08 22:31:46 +0200703 * mei_cl_connect - connect host client to the me one
Tomas Winkler9f81abda2013-01-08 23:07:15 +0200704 *
705 * @cl: host client
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300706 * @file: pointer to file structure
Tomas Winkler9f81abda2013-01-08 23:07:15 +0200707 *
708 * Locking: called under "dev->device_lock" lock
709 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300710 * Return: 0 on success, <0 on failure.
Tomas Winkler9f81abda2013-01-08 23:07:15 +0200711 */
712int mei_cl_connect(struct mei_cl *cl, struct file *file)
713{
714 struct mei_device *dev;
715 struct mei_cl_cb *cb;
Tomas Winkler9f81abda2013-01-08 23:07:15 +0200716 int rets;
717
718 if (WARN_ON(!cl || !cl->dev))
719 return -ENODEV;
720
721 dev = cl->dev;
722
Tomas Winkler2bf94cab2014-09-29 16:31:42 +0300723 rets = pm_runtime_get(dev->dev);
Tomas Winkler04bb1392014-03-18 22:52:04 +0200724 if (rets < 0 && rets != -EINPROGRESS) {
Tomas Winkler2bf94cab2014-09-29 16:31:42 +0300725 pm_runtime_put_noidle(dev->dev);
Tomas Winkler04bb1392014-03-18 22:52:04 +0200726 cl_err(dev, cl, "rpm: get failed %d\n", rets);
727 return rets;
728 }
729
Tomas Winkler9f81abda2013-01-08 23:07:15 +0200730 cb = mei_io_cb_init(cl, file);
731 if (!cb) {
732 rets = -ENOMEM;
733 goto out;
734 }
735
Tomas Winkler02a7eec2014-02-12 21:41:51 +0200736 cb->fop_type = MEI_FOP_CONNECT;
Tomas Winkler9f81abda2013-01-08 23:07:15 +0200737
Tomas Winkler6aae48f2014-02-19 17:35:47 +0200738 /* run hbuf acquire last so we don't have to undo */
739 if (!mei_cl_is_other_connecting(cl) && mei_hbuf_acquire(dev)) {
Alexander Usyskine4d82702014-04-27 15:42:21 +0300740 cl->state = MEI_FILE_CONNECTING;
Tomas Winkler9f81abda2013-01-08 23:07:15 +0200741 if (mei_hbm_cl_connect_req(dev, cl)) {
742 rets = -ENODEV;
743 goto out;
744 }
745 cl->timer_count = MEI_CONNECT_TIMEOUT;
746 list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
747 } else {
Alexander Usyskin73ab4232014-08-12 18:07:56 +0300748 cl->state = MEI_FILE_INITIALIZING;
Tomas Winkler9f81abda2013-01-08 23:07:15 +0200749 list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
750 }
751
752 mutex_unlock(&dev->device_lock);
Tomas Winkler12f45ed2014-08-21 14:29:18 +0300753 wait_event_timeout(cl->wait,
Alexander Usyskin285e2992014-02-17 15:13:20 +0200754 (cl->state == MEI_FILE_CONNECTED ||
755 cl->state == MEI_FILE_DISCONNECTED),
756 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
Tomas Winkler9f81abda2013-01-08 23:07:15 +0200757 mutex_lock(&dev->device_lock);
758
759 if (cl->state != MEI_FILE_CONNECTED) {
Alexander Usyskin3e37ebb2014-07-17 10:53:34 +0300760 cl->state = MEI_FILE_DISCONNECTED;
Alexander Usyskin285e2992014-02-17 15:13:20 +0200761 /* something went really wrong */
762 if (!cl->status)
763 cl->status = -EFAULT;
Tomas Winkler9f81abda2013-01-08 23:07:15 +0200764
765 mei_io_list_flush(&dev->ctrl_rd_list, cl);
766 mei_io_list_flush(&dev->ctrl_wr_list, cl);
Tomas Winkler9f81abda2013-01-08 23:07:15 +0200767 }
768
769 rets = cl->status;
770
771out:
Tomas Winkler04bb1392014-03-18 22:52:04 +0200772 cl_dbg(dev, cl, "rpm: autosuspend\n");
Tomas Winkler2bf94cab2014-09-29 16:31:42 +0300773 pm_runtime_mark_last_busy(dev->dev);
774 pm_runtime_put_autosuspend(dev->dev);
Tomas Winkler04bb1392014-03-18 22:52:04 +0200775
Tomas Winkler9f81abda2013-01-08 23:07:15 +0200776 mei_io_cb_free(cb);
777 return rets;
778}
779
780/**
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200781 * mei_cl_flow_ctrl_creds - checks flow_control credits for cl.
Tomas Winkler9ca90502013-01-08 23:07:13 +0200782 *
Tomas Winkler9ca90502013-01-08 23:07:13 +0200783 * @cl: private data of the file object
784 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300785 * Return: 1 if mei_flow_ctrl_creds >0, 0 - otherwise.
Tomas Winkler9ca90502013-01-08 23:07:13 +0200786 * -ENOENT if mei_cl is not present
787 * -EINVAL if single_recv_buf == 0
788 */
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200789int mei_cl_flow_ctrl_creds(struct mei_cl *cl)
Tomas Winkler9ca90502013-01-08 23:07:13 +0200790{
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200791 struct mei_device *dev;
Alexander Usyskin12d00662014-02-17 15:13:23 +0200792 struct mei_me_client *me_cl;
Tomas Winkler79563db2015-01-11 00:07:16 +0200793 int rets = 0;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200794
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200795 if (WARN_ON(!cl || !cl->dev))
796 return -EINVAL;
797
798 dev = cl->dev;
799
Tomas Winkler9ca90502013-01-08 23:07:13 +0200800 if (cl->mei_flow_ctrl_creds > 0)
801 return 1;
802
Tomas Winkler2e5df412014-12-07 16:40:14 +0200803 me_cl = mei_me_cl_by_uuid_id(dev, &cl->cl_uuid, cl->me_client_id);
Tomas Winklerd3208322014-08-24 12:08:55 +0300804 if (!me_cl) {
Alexander Usyskin12d00662014-02-17 15:13:23 +0200805 cl_err(dev, cl, "no such me client %d\n", cl->me_client_id);
Tomas Winklerd3208322014-08-24 12:08:55 +0300806 return -ENOENT;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200807 }
Alexander Usyskin12d00662014-02-17 15:13:23 +0200808
Tomas Winkler79563db2015-01-11 00:07:16 +0200809 if (me_cl->mei_flow_ctrl_creds > 0) {
810 rets = 1;
Alexander Usyskin12d00662014-02-17 15:13:23 +0200811 if (WARN_ON(me_cl->props.single_recv_buf == 0))
Tomas Winkler79563db2015-01-11 00:07:16 +0200812 rets = -EINVAL;
Alexander Usyskin12d00662014-02-17 15:13:23 +0200813 }
Tomas Winkler79563db2015-01-11 00:07:16 +0200814 mei_me_cl_put(me_cl);
815 return rets;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200816}
817
818/**
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200819 * mei_cl_flow_ctrl_reduce - reduces flow_control.
Tomas Winkler9ca90502013-01-08 23:07:13 +0200820 *
Tomas Winkler9ca90502013-01-08 23:07:13 +0200821 * @cl: private data of the file object
Masanari Iida393b1482013-04-05 01:05:05 +0900822 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300823 * Return:
Tomas Winkler9ca90502013-01-08 23:07:13 +0200824 * 0 on success
825 * -ENOENT when me client is not found
826 * -EINVAL when ctrl credits are <= 0
827 */
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200828int mei_cl_flow_ctrl_reduce(struct mei_cl *cl)
Tomas Winkler9ca90502013-01-08 23:07:13 +0200829{
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200830 struct mei_device *dev;
Alexander Usyskin12d00662014-02-17 15:13:23 +0200831 struct mei_me_client *me_cl;
Tomas Winkler79563db2015-01-11 00:07:16 +0200832 int rets;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200833
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200834 if (WARN_ON(!cl || !cl->dev))
835 return -EINVAL;
836
837 dev = cl->dev;
838
Tomas Winkler2e5df412014-12-07 16:40:14 +0200839 me_cl = mei_me_cl_by_uuid_id(dev, &cl->cl_uuid, cl->me_client_id);
Tomas Winklerd3208322014-08-24 12:08:55 +0300840 if (!me_cl) {
Alexander Usyskin12d00662014-02-17 15:13:23 +0200841 cl_err(dev, cl, "no such me client %d\n", cl->me_client_id);
Tomas Winklerd3208322014-08-24 12:08:55 +0300842 return -ENOENT;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200843 }
Alexander Usyskin12d00662014-02-17 15:13:23 +0200844
Tomas Winklerd3208322014-08-24 12:08:55 +0300845 if (me_cl->props.single_recv_buf) {
Tomas Winkler79563db2015-01-11 00:07:16 +0200846 if (WARN_ON(me_cl->mei_flow_ctrl_creds <= 0)) {
847 rets = -EINVAL;
848 goto out;
849 }
Alexander Usyskin12d00662014-02-17 15:13:23 +0200850 me_cl->mei_flow_ctrl_creds--;
851 } else {
Tomas Winkler79563db2015-01-11 00:07:16 +0200852 if (WARN_ON(cl->mei_flow_ctrl_creds <= 0)) {
853 rets = -EINVAL;
854 goto out;
855 }
Alexander Usyskin12d00662014-02-17 15:13:23 +0200856 cl->mei_flow_ctrl_creds--;
857 }
Tomas Winkler79563db2015-01-11 00:07:16 +0200858 rets = 0;
859out:
860 mei_me_cl_put(me_cl);
861 return rets;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200862}
863
Tomas Winkler9ca90502013-01-08 23:07:13 +0200864/**
Masanari Iida393b1482013-04-05 01:05:05 +0900865 * mei_cl_read_start - the start read client message function.
Tomas Winkler9ca90502013-01-08 23:07:13 +0200866 *
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200867 * @cl: host client
Alexander Usyskince231392014-09-29 16:31:50 +0300868 * @length: number of bytes to read
Tomas Winkler9ca90502013-01-08 23:07:13 +0200869 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300870 * Return: 0 on success, <0 on failure.
Tomas Winkler9ca90502013-01-08 23:07:13 +0200871 */
Tomas Winklerfcb136e2013-04-19 22:01:35 +0300872int mei_cl_read_start(struct mei_cl *cl, size_t length)
Tomas Winkler9ca90502013-01-08 23:07:13 +0200873{
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200874 struct mei_device *dev;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200875 struct mei_cl_cb *cb;
Tomas Winklerd3208322014-08-24 12:08:55 +0300876 struct mei_me_client *me_cl;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200877 int rets;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200878
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200879 if (WARN_ON(!cl || !cl->dev))
880 return -ENODEV;
881
882 dev = cl->dev;
883
Tomas Winklerb950ac12013-07-25 20:15:53 +0300884 if (!mei_cl_is_connected(cl))
Tomas Winkler9ca90502013-01-08 23:07:13 +0200885 return -ENODEV;
886
Tomas Winklerd91aaed2013-01-08 23:07:18 +0200887 if (cl->read_cb) {
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300888 cl_dbg(dev, cl, "read is pending.\n");
Tomas Winkler9ca90502013-01-08 23:07:13 +0200889 return -EBUSY;
890 }
Tomas Winklerd880f322014-08-21 14:29:15 +0300891 me_cl = mei_me_cl_by_uuid_id(dev, &cl->cl_uuid, cl->me_client_id);
Tomas Winklerd3208322014-08-24 12:08:55 +0300892 if (!me_cl) {
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300893 cl_err(dev, cl, "no such me client %d\n", cl->me_client_id);
Alexander Usyskin7ca96aa2014-02-19 17:35:49 +0200894 return -ENOTTY;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200895 }
Tomas Winkler79563db2015-01-11 00:07:16 +0200896 /* always allocate at least client max message */
897 length = max_t(size_t, length, me_cl->props.max_msg_length);
898 mei_me_cl_put(me_cl);
Tomas Winkler9ca90502013-01-08 23:07:13 +0200899
Tomas Winkler2bf94cab2014-09-29 16:31:42 +0300900 rets = pm_runtime_get(dev->dev);
Tomas Winkler04bb1392014-03-18 22:52:04 +0200901 if (rets < 0 && rets != -EINPROGRESS) {
Tomas Winkler2bf94cab2014-09-29 16:31:42 +0300902 pm_runtime_put_noidle(dev->dev);
Tomas Winkler04bb1392014-03-18 22:52:04 +0200903 cl_err(dev, cl, "rpm: get failed %d\n", rets);
904 return rets;
905 }
906
Tomas Winkler9ca90502013-01-08 23:07:13 +0200907 cb = mei_io_cb_init(cl, NULL);
Tomas Winkler04bb1392014-03-18 22:52:04 +0200908 if (!cb) {
909 rets = -ENOMEM;
910 goto out;
911 }
Tomas Winkler9ca90502013-01-08 23:07:13 +0200912
Tomas Winklerfcb136e2013-04-19 22:01:35 +0300913 rets = mei_io_cb_alloc_resp_buf(cb, length);
Tomas Winkler9ca90502013-01-08 23:07:13 +0200914 if (rets)
Tomas Winkler04bb1392014-03-18 22:52:04 +0200915 goto out;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200916
917 cb->fop_type = MEI_FOP_READ;
Tomas Winkler6aae48f2014-02-19 17:35:47 +0200918 if (mei_hbuf_acquire(dev)) {
Alexander Usyskin86113502014-03-31 17:59:24 +0300919 rets = mei_hbm_cl_flow_control_req(dev, cl);
920 if (rets < 0)
Tomas Winkler04bb1392014-03-18 22:52:04 +0200921 goto out;
Tomas Winkler04bb1392014-03-18 22:52:04 +0200922
Tomas Winkler9ca90502013-01-08 23:07:13 +0200923 list_add_tail(&cb->list, &dev->read_list.list);
924 } else {
925 list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
926 }
Chao Biaccb8842014-02-12 21:27:25 +0200927
928 cl->read_cb = cb;
929
Tomas Winkler04bb1392014-03-18 22:52:04 +0200930out:
931 cl_dbg(dev, cl, "rpm: autosuspend\n");
Tomas Winkler2bf94cab2014-09-29 16:31:42 +0300932 pm_runtime_mark_last_busy(dev->dev);
933 pm_runtime_put_autosuspend(dev->dev);
Tomas Winkler04bb1392014-03-18 22:52:04 +0200934
935 if (rets)
936 mei_io_cb_free(cb);
937
Tomas Winkler9ca90502013-01-08 23:07:13 +0200938 return rets;
939}
940
Tomas Winkler074b4c02013-02-06 14:06:44 +0200941/**
Tomas Winkler9d098192014-02-19 17:35:48 +0200942 * mei_cl_irq_write - write a message to device
Tomas Winkler21767542013-06-23 09:36:59 +0300943 * from the interrupt thread context
944 *
945 * @cl: client
946 * @cb: callback block.
Tomas Winkler21767542013-06-23 09:36:59 +0300947 * @cmpl_list: complete list.
948 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300949 * Return: 0, OK; otherwise error.
Tomas Winkler21767542013-06-23 09:36:59 +0300950 */
Tomas Winkler9d098192014-02-19 17:35:48 +0200951int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
952 struct mei_cl_cb *cmpl_list)
Tomas Winkler21767542013-06-23 09:36:59 +0300953{
Tomas Winkler136698e2013-09-16 23:44:44 +0300954 struct mei_device *dev;
955 struct mei_msg_data *buf;
Tomas Winkler21767542013-06-23 09:36:59 +0300956 struct mei_msg_hdr mei_hdr;
Tomas Winkler136698e2013-09-16 23:44:44 +0300957 size_t len;
958 u32 msg_slots;
Tomas Winkler9d098192014-02-19 17:35:48 +0200959 int slots;
Tomas Winkler2ebf8c92013-09-16 23:44:43 +0300960 int rets;
Tomas Winkler21767542013-06-23 09:36:59 +0300961
Tomas Winkler136698e2013-09-16 23:44:44 +0300962 if (WARN_ON(!cl || !cl->dev))
963 return -ENODEV;
964
965 dev = cl->dev;
966
967 buf = &cb->request_buffer;
968
969 rets = mei_cl_flow_ctrl_creds(cl);
970 if (rets < 0)
971 return rets;
972
973 if (rets == 0) {
Tomas Winkler04bb1392014-03-18 22:52:04 +0200974 cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
Tomas Winkler136698e2013-09-16 23:44:44 +0300975 return 0;
976 }
977
Tomas Winkler9d098192014-02-19 17:35:48 +0200978 slots = mei_hbuf_empty_slots(dev);
Tomas Winkler136698e2013-09-16 23:44:44 +0300979 len = buf->size - cb->buf_idx;
980 msg_slots = mei_data2slots(len);
981
Tomas Winkler21767542013-06-23 09:36:59 +0300982 mei_hdr.host_addr = cl->host_client_id;
983 mei_hdr.me_addr = cl->me_client_id;
984 mei_hdr.reserved = 0;
Tomas Winkler479327f2013-12-17 15:56:56 +0200985 mei_hdr.internal = cb->internal;
Tomas Winkler21767542013-06-23 09:36:59 +0300986
Tomas Winkler9d098192014-02-19 17:35:48 +0200987 if (slots >= msg_slots) {
Tomas Winkler21767542013-06-23 09:36:59 +0300988 mei_hdr.length = len;
989 mei_hdr.msg_complete = 1;
990 /* Split the message only if we can write the whole host buffer */
Tomas Winkler9d098192014-02-19 17:35:48 +0200991 } else if (slots == dev->hbuf_depth) {
992 msg_slots = slots;
993 len = (slots * sizeof(u32)) - sizeof(struct mei_msg_hdr);
Tomas Winkler21767542013-06-23 09:36:59 +0300994 mei_hdr.length = len;
995 mei_hdr.msg_complete = 0;
996 } else {
997 /* wait for next time the host buffer is empty */
998 return 0;
999 }
1000
Alexander Usyskinc0abffb2013-09-15 18:11:07 +03001001 cl_dbg(dev, cl, "buf: size = %d idx = %lu\n",
Tomas Winkler21767542013-06-23 09:36:59 +03001002 cb->request_buffer.size, cb->buf_idx);
Tomas Winkler21767542013-06-23 09:36:59 +03001003
Tomas Winkler136698e2013-09-16 23:44:44 +03001004 rets = mei_write_message(dev, &mei_hdr, buf->data + cb->buf_idx);
Tomas Winkler2ebf8c92013-09-16 23:44:43 +03001005 if (rets) {
1006 cl->status = rets;
Tomas Winkler21767542013-06-23 09:36:59 +03001007 list_move_tail(&cb->list, &cmpl_list->list);
Tomas Winkler2ebf8c92013-09-16 23:44:43 +03001008 return rets;
Tomas Winkler21767542013-06-23 09:36:59 +03001009 }
1010
1011 cl->status = 0;
Tomas Winkler4dfaa9f2013-06-23 09:37:00 +03001012 cl->writing_state = MEI_WRITING;
Tomas Winkler21767542013-06-23 09:36:59 +03001013 cb->buf_idx += mei_hdr.length;
Tomas Winkler4dfaa9f2013-06-23 09:37:00 +03001014
Tomas Winkler21767542013-06-23 09:36:59 +03001015 if (mei_hdr.msg_complete) {
1016 if (mei_cl_flow_ctrl_reduce(cl))
Tomas Winkler2ebf8c92013-09-16 23:44:43 +03001017 return -EIO;
Tomas Winkler21767542013-06-23 09:36:59 +03001018 list_move_tail(&cb->list, &dev->write_waiting_list.list);
1019 }
1020
1021 return 0;
1022}
1023
1024/**
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001025 * mei_cl_write - submit a write cb to mei device
Alexander Usyskina8605ea2014-09-29 16:31:49 +03001026 * assumes device_lock is locked
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001027 *
1028 * @cl: host client
Alexander Usyskina8605ea2014-09-29 16:31:49 +03001029 * @cb: write callback with filled data
Alexander Usyskince231392014-09-29 16:31:50 +03001030 * @blocking: block until completed
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001031 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +03001032 * Return: number of bytes sent on success, <0 on failure.
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001033 */
1034int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)
1035{
1036 struct mei_device *dev;
1037 struct mei_msg_data *buf;
1038 struct mei_msg_hdr mei_hdr;
1039 int rets;
1040
1041
1042 if (WARN_ON(!cl || !cl->dev))
1043 return -ENODEV;
1044
1045 if (WARN_ON(!cb))
1046 return -EINVAL;
1047
1048 dev = cl->dev;
1049
1050
1051 buf = &cb->request_buffer;
1052
Alexander Usyskin0a01e972014-09-29 16:31:47 +03001053 cl_dbg(dev, cl, "size=%d\n", buf->size);
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001054
Tomas Winkler2bf94cab2014-09-29 16:31:42 +03001055 rets = pm_runtime_get(dev->dev);
Tomas Winkler04bb1392014-03-18 22:52:04 +02001056 if (rets < 0 && rets != -EINPROGRESS) {
Tomas Winkler2bf94cab2014-09-29 16:31:42 +03001057 pm_runtime_put_noidle(dev->dev);
Tomas Winkler04bb1392014-03-18 22:52:04 +02001058 cl_err(dev, cl, "rpm: get failed %d\n", rets);
1059 return rets;
1060 }
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001061
1062 cb->fop_type = MEI_FOP_WRITE;
Tomas Winkler6aae48f2014-02-19 17:35:47 +02001063 cb->buf_idx = 0;
1064 cl->writing_state = MEI_IDLE;
1065
1066 mei_hdr.host_addr = cl->host_client_id;
1067 mei_hdr.me_addr = cl->me_client_id;
1068 mei_hdr.reserved = 0;
1069 mei_hdr.msg_complete = 0;
1070 mei_hdr.internal = cb->internal;
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001071
1072 rets = mei_cl_flow_ctrl_creds(cl);
1073 if (rets < 0)
1074 goto err;
1075
Tomas Winkler6aae48f2014-02-19 17:35:47 +02001076 if (rets == 0) {
1077 cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001078 rets = buf->size;
1079 goto out;
1080 }
Tomas Winkler6aae48f2014-02-19 17:35:47 +02001081 if (!mei_hbuf_acquire(dev)) {
1082 cl_dbg(dev, cl, "Cannot acquire the host buffer: not sending.\n");
1083 rets = buf->size;
1084 goto out;
1085 }
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001086
1087 /* Check for a maximum length */
1088 if (buf->size > mei_hbuf_max_len(dev)) {
1089 mei_hdr.length = mei_hbuf_max_len(dev);
1090 mei_hdr.msg_complete = 0;
1091 } else {
1092 mei_hdr.length = buf->size;
1093 mei_hdr.msg_complete = 1;
1094 }
1095
Tomas Winkler2ebf8c92013-09-16 23:44:43 +03001096 rets = mei_write_message(dev, &mei_hdr, buf->data);
1097 if (rets)
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001098 goto err;
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001099
1100 cl->writing_state = MEI_WRITING;
1101 cb->buf_idx = mei_hdr.length;
1102
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001103out:
1104 if (mei_hdr.msg_complete) {
Alexander Usyskin7ca96aa2014-02-19 17:35:49 +02001105 rets = mei_cl_flow_ctrl_reduce(cl);
1106 if (rets < 0)
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001107 goto err;
Alexander Usyskin7ca96aa2014-02-19 17:35:49 +02001108
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001109 list_add_tail(&cb->list, &dev->write_waiting_list.list);
1110 } else {
1111 list_add_tail(&cb->list, &dev->write_list.list);
1112 }
1113
1114
1115 if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) {
1116
1117 mutex_unlock(&dev->device_lock);
Alexander Usyskin7ca96aa2014-02-19 17:35:49 +02001118 rets = wait_event_interruptible(cl->tx_wait,
1119 cl->writing_state == MEI_WRITE_COMPLETE);
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001120 mutex_lock(&dev->device_lock);
Alexander Usyskin7ca96aa2014-02-19 17:35:49 +02001121 /* wait_event_interruptible returns -ERESTARTSYS */
1122 if (rets) {
1123 if (signal_pending(current))
1124 rets = -EINTR;
1125 goto err;
1126 }
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001127 }
Alexander Usyskin7ca96aa2014-02-19 17:35:49 +02001128
1129 rets = buf->size;
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001130err:
Tomas Winkler04bb1392014-03-18 22:52:04 +02001131 cl_dbg(dev, cl, "rpm: autosuspend\n");
Tomas Winkler2bf94cab2014-09-29 16:31:42 +03001132 pm_runtime_mark_last_busy(dev->dev);
1133 pm_runtime_put_autosuspend(dev->dev);
Tomas Winkler04bb1392014-03-18 22:52:04 +02001134
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001135 return rets;
1136}
1137
1138
Tomas Winklerdb086fa2013-05-12 15:34:45 +03001139/**
1140 * mei_cl_complete - processes completed operation for a client
1141 *
1142 * @cl: private data of the file object.
1143 * @cb: callback block.
1144 */
1145void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
1146{
1147 if (cb->fop_type == MEI_FOP_WRITE) {
1148 mei_io_cb_free(cb);
1149 cb = NULL;
1150 cl->writing_state = MEI_WRITE_COMPLETE;
1151 if (waitqueue_active(&cl->tx_wait))
1152 wake_up_interruptible(&cl->tx_wait);
1153
1154 } else if (cb->fop_type == MEI_FOP_READ &&
1155 MEI_READING == cl->reading_state) {
1156 cl->reading_state = MEI_READ_COMPLETE;
1157 if (waitqueue_active(&cl->rx_wait))
1158 wake_up_interruptible(&cl->rx_wait);
1159 else
1160 mei_cl_bus_rx_event(cl);
1161
1162 }
1163}
1164
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001165
1166/**
Tomas Winkler074b4c02013-02-06 14:06:44 +02001167 * mei_cl_all_disconnect - disconnect forcefully all connected clients
1168 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +03001169 * @dev: mei device
Tomas Winkler074b4c02013-02-06 14:06:44 +02001170 */
1171
1172void mei_cl_all_disconnect(struct mei_device *dev)
1173{
Tomas Winkler31f88f52014-02-17 15:13:25 +02001174 struct mei_cl *cl;
Tomas Winkler074b4c02013-02-06 14:06:44 +02001175
Tomas Winkler31f88f52014-02-17 15:13:25 +02001176 list_for_each_entry(cl, &dev->file_list, link) {
Tomas Winkler074b4c02013-02-06 14:06:44 +02001177 cl->state = MEI_FILE_DISCONNECTED;
1178 cl->mei_flow_ctrl_creds = 0;
Tomas Winkler074b4c02013-02-06 14:06:44 +02001179 cl->timer_count = 0;
1180 }
1181}
1182
1183
1184/**
Tomas Winkler52908012013-07-24 16:22:57 +03001185 * mei_cl_all_wakeup - wake up all readers and writers they can be interrupted
Tomas Winkler074b4c02013-02-06 14:06:44 +02001186 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +03001187 * @dev: mei device
Tomas Winkler074b4c02013-02-06 14:06:44 +02001188 */
Tomas Winkler52908012013-07-24 16:22:57 +03001189void mei_cl_all_wakeup(struct mei_device *dev)
Tomas Winkler074b4c02013-02-06 14:06:44 +02001190{
Tomas Winkler31f88f52014-02-17 15:13:25 +02001191 struct mei_cl *cl;
Tomas Winkler92db1552014-09-29 16:31:37 +03001192
Tomas Winkler31f88f52014-02-17 15:13:25 +02001193 list_for_each_entry(cl, &dev->file_list, link) {
Tomas Winkler074b4c02013-02-06 14:06:44 +02001194 if (waitqueue_active(&cl->rx_wait)) {
Alexander Usyskinc0abffb2013-09-15 18:11:07 +03001195 cl_dbg(dev, cl, "Waking up reading client!\n");
Tomas Winkler074b4c02013-02-06 14:06:44 +02001196 wake_up_interruptible(&cl->rx_wait);
1197 }
Tomas Winkler52908012013-07-24 16:22:57 +03001198 if (waitqueue_active(&cl->tx_wait)) {
Alexander Usyskinc0abffb2013-09-15 18:11:07 +03001199 cl_dbg(dev, cl, "Waking up writing client!\n");
Tomas Winkler52908012013-07-24 16:22:57 +03001200 wake_up_interruptible(&cl->tx_wait);
1201 }
Tomas Winkler074b4c02013-02-06 14:06:44 +02001202 }
1203}
1204
1205/**
1206 * mei_cl_all_write_clear - clear all pending writes
Alexander Usyskina8605ea2014-09-29 16:31:49 +03001207 *
1208 * @dev: mei device
Tomas Winkler074b4c02013-02-06 14:06:44 +02001209 */
1210void mei_cl_all_write_clear(struct mei_device *dev)
1211{
Tomas Winklercc99ecf2014-03-10 15:10:40 +02001212 mei_io_list_free(&dev->write_list, NULL);
1213 mei_io_list_free(&dev->write_waiting_list, NULL);
Tomas Winkler074b4c02013-02-06 14:06:44 +02001214}
1215
1216