blob: 324e1de936871952c726d42a33826c19a827cdb8 [file] [log] [blame]
Tomas Winkler9ca90502013-01-08 23:07:13 +02001/*
2 *
3 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2003-2012, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 */
16
17#include <linux/pci.h>
18#include <linux/sched.h>
19#include <linux/wait.h>
20#include <linux/delay.h>
Tomas Winkler04bb1392014-03-18 22:52:04 +020021#include <linux/pm_runtime.h>
Tomas Winkler9ca90502013-01-08 23:07:13 +020022
23#include <linux/mei.h>
24
25#include "mei_dev.h"
26#include "hbm.h"
Tomas Winkler90e0b5f2013-01-08 23:07:14 +020027#include "client.h"
28
29/**
30 * mei_me_cl_by_uuid - locate index of me client
31 *
32 * @dev: mei device
Alexander Usyskina27a76d2014-02-17 15:13:22 +020033 *
34 * Locking: called under "dev->device_lock" lock
35 *
Tomas Winkler90e0b5f2013-01-08 23:07:14 +020036 * returns me client index or -ENOENT if not found
37 */
38int mei_me_cl_by_uuid(const struct mei_device *dev, const uuid_le *uuid)
39{
Alexander Usyskina27a76d2014-02-17 15:13:22 +020040 int i;
Tomas Winkler90e0b5f2013-01-08 23:07:14 +020041
42 for (i = 0; i < dev->me_clients_num; ++i)
43 if (uuid_le_cmp(*uuid,
Alexander Usyskina27a76d2014-02-17 15:13:22 +020044 dev->me_clients[i].props.protocol_name) == 0)
45 return i;
Tomas Winkler90e0b5f2013-01-08 23:07:14 +020046
Alexander Usyskina27a76d2014-02-17 15:13:22 +020047 return -ENOENT;
Tomas Winkler90e0b5f2013-01-08 23:07:14 +020048}
49
50
51/**
52 * mei_me_cl_by_id return index to me_clients for client_id
53 *
54 * @dev: the device structure
55 * @client_id: me client id
56 *
57 * Locking: called under "dev->device_lock" lock
58 *
59 * returns index on success, -ENOENT on failure.
60 */
61
62int mei_me_cl_by_id(struct mei_device *dev, u8 client_id)
63{
64 int i;
Alexander Usyskina27a76d2014-02-17 15:13:22 +020065
Tomas Winkler90e0b5f2013-01-08 23:07:14 +020066 for (i = 0; i < dev->me_clients_num; i++)
67 if (dev->me_clients[i].client_id == client_id)
Alexander Usyskina27a76d2014-02-17 15:13:22 +020068 return i;
Tomas Winkler90e0b5f2013-01-08 23:07:14 +020069
Alexander Usyskina27a76d2014-02-17 15:13:22 +020070 return -ENOENT;
Tomas Winkler90e0b5f2013-01-08 23:07:14 +020071}
Tomas Winkler9ca90502013-01-08 23:07:13 +020072
73
74/**
Tomas Winklercc99ecf2014-03-10 15:10:40 +020075 * mei_cl_cmp_id - tells if the clients are the same
76 *
77 * @cl1: host client 1
78 * @cl2: host client 2
79 *
80 * returns true - if the clients has same host and me ids
81 * false - otherwise
82 */
83static inline bool mei_cl_cmp_id(const struct mei_cl *cl1,
84 const struct mei_cl *cl2)
85{
86 return cl1 && cl2 &&
87 (cl1->host_client_id == cl2->host_client_id) &&
88 (cl1->me_client_id == cl2->me_client_id);
89}
90
91/**
92 * mei_io_list_flush - removes cbs belonging to cl.
93 *
94 * @list: an instance of our list structure
95 * @cl: host client, can be NULL for flushing the whole list
96 * @free: whether to free the cbs
97 */
98static void __mei_io_list_flush(struct mei_cl_cb *list,
99 struct mei_cl *cl, bool free)
100{
101 struct mei_cl_cb *cb;
102 struct mei_cl_cb *next;
103
104 /* enable removing everything if no cl is specified */
105 list_for_each_entry_safe(cb, next, &list->list, list) {
106 if (!cl || (cb->cl && mei_cl_cmp_id(cl, cb->cl))) {
107 list_del(&cb->list);
108 if (free)
109 mei_io_cb_free(cb);
110 }
111 }
112}
113
114/**
Tomas Winkler9ca90502013-01-08 23:07:13 +0200115 * mei_io_list_flush - removes list entry belonging to cl.
116 *
117 * @list: An instance of our list structure
118 * @cl: host client
119 */
Tomas Winklercc99ecf2014-03-10 15:10:40 +0200120static inline void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl)
Tomas Winkler9ca90502013-01-08 23:07:13 +0200121{
Tomas Winklercc99ecf2014-03-10 15:10:40 +0200122 __mei_io_list_flush(list, cl, false);
123}
Tomas Winkler9ca90502013-01-08 23:07:13 +0200124
Tomas Winklercc99ecf2014-03-10 15:10:40 +0200125
126/**
127 * mei_io_list_free - removes cb belonging to cl and free them
128 *
129 * @list: An instance of our list structure
130 * @cl: host client
131 */
132static inline void mei_io_list_free(struct mei_cl_cb *list, struct mei_cl *cl)
133{
134 __mei_io_list_flush(list, cl, true);
Tomas Winkler9ca90502013-01-08 23:07:13 +0200135}
136
137/**
138 * mei_io_cb_free - free mei_cb_private related memory
139 *
140 * @cb: mei callback struct
141 */
142void mei_io_cb_free(struct mei_cl_cb *cb)
143{
144 if (cb == NULL)
145 return;
146
147 kfree(cb->request_buffer.data);
148 kfree(cb->response_buffer.data);
149 kfree(cb);
150}
151
152/**
153 * mei_io_cb_init - allocate and initialize io callback
154 *
155 * @cl - mei client
Masanari Iida393b1482013-04-05 01:05:05 +0900156 * @fp: pointer to file structure
Tomas Winkler9ca90502013-01-08 23:07:13 +0200157 *
158 * returns mei_cl_cb pointer or NULL;
159 */
160struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, struct file *fp)
161{
162 struct mei_cl_cb *cb;
163
164 cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL);
165 if (!cb)
166 return NULL;
167
168 mei_io_list_init(cb);
169
170 cb->file_object = fp;
171 cb->cl = cl;
172 cb->buf_idx = 0;
173 return cb;
174}
175
176/**
177 * mei_io_cb_alloc_req_buf - allocate request buffer
178 *
Masanari Iida393b1482013-04-05 01:05:05 +0900179 * @cb: io callback structure
180 * @length: size of the buffer
Tomas Winkler9ca90502013-01-08 23:07:13 +0200181 *
182 * returns 0 on success
183 * -EINVAL if cb is NULL
184 * -ENOMEM if allocation failed
185 */
186int mei_io_cb_alloc_req_buf(struct mei_cl_cb *cb, size_t length)
187{
188 if (!cb)
189 return -EINVAL;
190
191 if (length == 0)
192 return 0;
193
194 cb->request_buffer.data = kmalloc(length, GFP_KERNEL);
195 if (!cb->request_buffer.data)
196 return -ENOMEM;
197 cb->request_buffer.size = length;
198 return 0;
199}
200/**
Alexander Usyskin83ce0742014-01-08 22:31:46 +0200201 * mei_io_cb_alloc_resp_buf - allocate response buffer
Tomas Winkler9ca90502013-01-08 23:07:13 +0200202 *
Masanari Iida393b1482013-04-05 01:05:05 +0900203 * @cb: io callback structure
204 * @length: size of the buffer
Tomas Winkler9ca90502013-01-08 23:07:13 +0200205 *
206 * returns 0 on success
207 * -EINVAL if cb is NULL
208 * -ENOMEM if allocation failed
209 */
210int mei_io_cb_alloc_resp_buf(struct mei_cl_cb *cb, size_t length)
211{
212 if (!cb)
213 return -EINVAL;
214
215 if (length == 0)
216 return 0;
217
218 cb->response_buffer.data = kmalloc(length, GFP_KERNEL);
219 if (!cb->response_buffer.data)
220 return -ENOMEM;
221 cb->response_buffer.size = length;
222 return 0;
223}
224
225
226
227/**
228 * mei_cl_flush_queues - flushes queue lists belonging to cl.
229 *
Tomas Winkler9ca90502013-01-08 23:07:13 +0200230 * @cl: host client
231 */
232int mei_cl_flush_queues(struct mei_cl *cl)
233{
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300234 struct mei_device *dev;
235
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200236 if (WARN_ON(!cl || !cl->dev))
Tomas Winkler9ca90502013-01-08 23:07:13 +0200237 return -EINVAL;
238
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300239 dev = cl->dev;
240
241 cl_dbg(dev, cl, "remove list entry belonging to cl\n");
Tomas Winkler9ca90502013-01-08 23:07:13 +0200242 mei_io_list_flush(&cl->dev->read_list, cl);
Tomas Winklercc99ecf2014-03-10 15:10:40 +0200243 mei_io_list_free(&cl->dev->write_list, cl);
244 mei_io_list_free(&cl->dev->write_waiting_list, cl);
Tomas Winkler9ca90502013-01-08 23:07:13 +0200245 mei_io_list_flush(&cl->dev->ctrl_wr_list, cl);
246 mei_io_list_flush(&cl->dev->ctrl_rd_list, cl);
247 mei_io_list_flush(&cl->dev->amthif_cmd_list, cl);
248 mei_io_list_flush(&cl->dev->amthif_rd_complete_list, cl);
249 return 0;
250}
251
Tomas Winkler9ca90502013-01-08 23:07:13 +0200252
253/**
Alexander Usyskin83ce0742014-01-08 22:31:46 +0200254 * mei_cl_init - initializes cl.
Tomas Winkler9ca90502013-01-08 23:07:13 +0200255 *
256 * @cl: host client to be initialized
257 * @dev: mei device
258 */
259void mei_cl_init(struct mei_cl *cl, struct mei_device *dev)
260{
261 memset(cl, 0, sizeof(struct mei_cl));
262 init_waitqueue_head(&cl->wait);
263 init_waitqueue_head(&cl->rx_wait);
264 init_waitqueue_head(&cl->tx_wait);
265 INIT_LIST_HEAD(&cl->link);
Samuel Ortiza7b71bc2013-03-27 17:29:56 +0200266 INIT_LIST_HEAD(&cl->device_link);
Tomas Winkler9ca90502013-01-08 23:07:13 +0200267 cl->reading_state = MEI_IDLE;
268 cl->writing_state = MEI_IDLE;
269 cl->dev = dev;
270}
271
272/**
273 * mei_cl_allocate - allocates cl structure and sets it up.
274 *
275 * @dev: mei device
276 * returns The allocated file or NULL on failure
277 */
278struct mei_cl *mei_cl_allocate(struct mei_device *dev)
279{
280 struct mei_cl *cl;
281
282 cl = kmalloc(sizeof(struct mei_cl), GFP_KERNEL);
283 if (!cl)
284 return NULL;
285
286 mei_cl_init(cl, dev);
287
288 return cl;
289}
290
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200291/**
292 * mei_cl_find_read_cb - find this cl's callback in the read list
293 *
Masanari Iida393b1482013-04-05 01:05:05 +0900294 * @cl: host client
295 *
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200296 * returns cb on success, NULL on error
297 */
298struct mei_cl_cb *mei_cl_find_read_cb(struct mei_cl *cl)
299{
300 struct mei_device *dev = cl->dev;
Tomas Winkler31f88f52014-02-17 15:13:25 +0200301 struct mei_cl_cb *cb;
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200302
Tomas Winkler31f88f52014-02-17 15:13:25 +0200303 list_for_each_entry(cb, &dev->read_list.list, list)
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200304 if (mei_cl_cmp_id(cl, cb->cl))
305 return cb;
306 return NULL;
307}
308
Alexander Usyskin83ce0742014-01-08 22:31:46 +0200309/** mei_cl_link: allocate host id in the host map
Tomas Winkler9ca90502013-01-08 23:07:13 +0200310 *
Tomas Winkler781d0d82013-01-08 23:07:22 +0200311 * @cl - host client
Alexander Usyskin83ce0742014-01-08 22:31:46 +0200312 * @id - fixed host id or -1 for generic one
Masanari Iida393b1482013-04-05 01:05:05 +0900313 *
Tomas Winkler781d0d82013-01-08 23:07:22 +0200314 * returns 0 on success
Tomas Winkler9ca90502013-01-08 23:07:13 +0200315 * -EINVAL on incorrect values
316 * -ENONET if client not found
317 */
Tomas Winkler781d0d82013-01-08 23:07:22 +0200318int mei_cl_link(struct mei_cl *cl, int id)
Tomas Winkler9ca90502013-01-08 23:07:13 +0200319{
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200320 struct mei_device *dev;
Tomas Winkler22f96a02013-09-16 23:44:47 +0300321 long open_handle_count;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200322
Tomas Winkler781d0d82013-01-08 23:07:22 +0200323 if (WARN_ON(!cl || !cl->dev))
Tomas Winkler9ca90502013-01-08 23:07:13 +0200324 return -EINVAL;
325
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200326 dev = cl->dev;
327
Alexander Usyskin83ce0742014-01-08 22:31:46 +0200328 /* If Id is not assigned get one*/
Tomas Winkler781d0d82013-01-08 23:07:22 +0200329 if (id == MEI_HOST_CLIENT_ID_ANY)
330 id = find_first_zero_bit(dev->host_clients_map,
331 MEI_CLIENTS_MAX);
Tomas Winkler9ca90502013-01-08 23:07:13 +0200332
Tomas Winkler781d0d82013-01-08 23:07:22 +0200333 if (id >= MEI_CLIENTS_MAX) {
Alexander Usyskin83ce0742014-01-08 22:31:46 +0200334 dev_err(&dev->pdev->dev, "id exceeded %d", MEI_CLIENTS_MAX);
Tomas Winklere036cc52013-09-16 23:44:46 +0300335 return -EMFILE;
336 }
337
Tomas Winkler22f96a02013-09-16 23:44:47 +0300338 open_handle_count = dev->open_handle_count + dev->iamthif_open_count;
339 if (open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) {
Alexander Usyskin83ce0742014-01-08 22:31:46 +0200340 dev_err(&dev->pdev->dev, "open_handle_count exceeded %d",
Tomas Winklere036cc52013-09-16 23:44:46 +0300341 MEI_MAX_OPEN_HANDLE_COUNT);
342 return -EMFILE;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200343 }
344
Tomas Winkler781d0d82013-01-08 23:07:22 +0200345 dev->open_handle_count++;
346
347 cl->host_client_id = id;
348 list_add_tail(&cl->link, &dev->file_list);
349
350 set_bit(id, dev->host_clients_map);
351
352 cl->state = MEI_FILE_INITIALIZING;
353
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300354 cl_dbg(dev, cl, "link cl\n");
Tomas Winkler781d0d82013-01-08 23:07:22 +0200355 return 0;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200356}
Tomas Winkler781d0d82013-01-08 23:07:22 +0200357
Tomas Winkler9ca90502013-01-08 23:07:13 +0200358/**
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200359 * mei_cl_unlink - remove me_cl from the list
Tomas Winkler9ca90502013-01-08 23:07:13 +0200360 *
Masanari Iida393b1482013-04-05 01:05:05 +0900361 * @cl: host client
Tomas Winkler9ca90502013-01-08 23:07:13 +0200362 */
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200363int mei_cl_unlink(struct mei_cl *cl)
Tomas Winkler9ca90502013-01-08 23:07:13 +0200364{
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200365 struct mei_device *dev;
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200366
Tomas Winkler781d0d82013-01-08 23:07:22 +0200367 /* don't shout on error exit path */
368 if (!cl)
369 return 0;
370
Tomas Winkler8e9a4a92013-01-10 17:32:14 +0200371 /* wd and amthif might not be initialized */
372 if (!cl->dev)
373 return 0;
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200374
375 dev = cl->dev;
376
Tomas Winklera14c44d2013-09-16 23:44:45 +0300377 cl_dbg(dev, cl, "unlink client");
378
Tomas Winkler22f96a02013-09-16 23:44:47 +0300379 if (dev->open_handle_count > 0)
380 dev->open_handle_count--;
381
382 /* never clear the 0 bit */
383 if (cl->host_client_id)
384 clear_bit(cl->host_client_id, dev->host_clients_map);
385
386 list_del_init(&cl->link);
387
388 cl->state = MEI_FILE_INITIALIZING;
389
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200390 return 0;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200391}
392
393
394void mei_host_client_init(struct work_struct *work)
395{
396 struct mei_device *dev = container_of(work,
397 struct mei_device, init_work);
398 struct mei_client_properties *client_props;
399 int i;
400
401 mutex_lock(&dev->device_lock);
402
Tomas Winkler9ca90502013-01-08 23:07:13 +0200403 for (i = 0; i < dev->me_clients_num; i++) {
404 client_props = &dev->me_clients[i].props;
405
Tomas Winkler1a1aca42013-01-08 23:07:21 +0200406 if (!uuid_le_cmp(client_props->protocol_name, mei_amthif_guid))
Tomas Winkler9ca90502013-01-08 23:07:13 +0200407 mei_amthif_host_init(dev);
408 else if (!uuid_le_cmp(client_props->protocol_name, mei_wd_guid))
409 mei_wd_host_init(dev);
Samuel Ortiz59fcd7c2013-04-11 03:03:29 +0200410 else if (!uuid_le_cmp(client_props->protocol_name, mei_nfc_guid))
411 mei_nfc_host_init(dev);
412
Tomas Winkler9ca90502013-01-08 23:07:13 +0200413 }
414
415 dev->dev_state = MEI_DEV_ENABLED;
Tomas Winkler6adb8ef2014-01-12 00:36:10 +0200416 dev->reset_count = 0;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200417
418 mutex_unlock(&dev->device_lock);
Tomas Winkler04bb1392014-03-18 22:52:04 +0200419
420 pm_runtime_mark_last_busy(&dev->pdev->dev);
421 dev_dbg(&dev->pdev->dev, "rpm: autosuspend\n");
422 pm_runtime_autosuspend(&dev->pdev->dev);
Tomas Winkler9ca90502013-01-08 23:07:13 +0200423}
424
Tomas Winkler6aae48f2014-02-19 17:35:47 +0200425/**
426 * mei_hbuf_acquire: try to acquire host buffer
427 *
428 * @dev: the device structure
429 * returns true if host buffer was acquired
430 */
431bool mei_hbuf_acquire(struct mei_device *dev)
432{
Tomas Winkler04bb1392014-03-18 22:52:04 +0200433 if (mei_pg_state(dev) == MEI_PG_ON ||
434 dev->pg_event == MEI_PG_EVENT_WAIT) {
435 dev_dbg(&dev->pdev->dev, "device is in pg\n");
436 return false;
437 }
438
Tomas Winkler6aae48f2014-02-19 17:35:47 +0200439 if (!dev->hbuf_is_ready) {
440 dev_dbg(&dev->pdev->dev, "hbuf is not ready\n");
441 return false;
442 }
443
444 dev->hbuf_is_ready = false;
445
446 return true;
447}
Tomas Winkler9ca90502013-01-08 23:07:13 +0200448
449/**
Alexander Usyskin83ce0742014-01-08 22:31:46 +0200450 * mei_cl_disconnect - disconnect host client from the me one
Tomas Winkler9ca90502013-01-08 23:07:13 +0200451 *
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200452 * @cl: host client
Tomas Winkler9ca90502013-01-08 23:07:13 +0200453 *
454 * Locking: called under "dev->device_lock" lock
455 *
456 * returns 0 on success, <0 on failure.
457 */
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200458int mei_cl_disconnect(struct mei_cl *cl)
Tomas Winkler9ca90502013-01-08 23:07:13 +0200459{
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200460 struct mei_device *dev;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200461 struct mei_cl_cb *cb;
Alexander Usyskinfe2f17e2014-07-17 10:53:38 +0300462 int rets;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200463
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200464 if (WARN_ON(!cl || !cl->dev))
Tomas Winkler9ca90502013-01-08 23:07:13 +0200465 return -ENODEV;
466
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200467 dev = cl->dev;
468
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300469 cl_dbg(dev, cl, "disconnecting");
470
Tomas Winkler9ca90502013-01-08 23:07:13 +0200471 if (cl->state != MEI_FILE_DISCONNECTING)
472 return 0;
473
Tomas Winkler04bb1392014-03-18 22:52:04 +0200474 rets = pm_runtime_get(&dev->pdev->dev);
475 if (rets < 0 && rets != -EINPROGRESS) {
476 pm_runtime_put_noidle(&dev->pdev->dev);
477 cl_err(dev, cl, "rpm: get failed %d\n", rets);
478 return rets;
479 }
480
Tomas Winkler9ca90502013-01-08 23:07:13 +0200481 cb = mei_io_cb_init(cl, NULL);
Tomas Winkler04bb1392014-03-18 22:52:04 +0200482 if (!cb) {
483 rets = -ENOMEM;
484 goto free;
485 }
Tomas Winkler9ca90502013-01-08 23:07:13 +0200486
487 cb->fop_type = MEI_FOP_CLOSE;
Tomas Winkler6aae48f2014-02-19 17:35:47 +0200488 if (mei_hbuf_acquire(dev)) {
Tomas Winkler9ca90502013-01-08 23:07:13 +0200489 if (mei_hbm_cl_disconnect_req(dev, cl)) {
490 rets = -ENODEV;
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300491 cl_err(dev, cl, "failed to disconnect.\n");
Tomas Winkler9ca90502013-01-08 23:07:13 +0200492 goto free;
493 }
Alexander Usyskin22b987a2014-07-17 10:53:35 +0300494 cl->timer_count = MEI_CONNECT_TIMEOUT;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200495 mdelay(10); /* Wait for hardware disconnection ready */
496 list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
497 } else {
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300498 cl_dbg(dev, cl, "add disconnect cb to control write list\n");
Tomas Winkler9ca90502013-01-08 23:07:13 +0200499 list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
500
501 }
502 mutex_unlock(&dev->device_lock);
503
Alexander Usyskinfe2f17e2014-07-17 10:53:38 +0300504 wait_event_timeout(dev->wait_recvd_msg,
Tomas Winkler9ca90502013-01-08 23:07:13 +0200505 MEI_FILE_DISCONNECTED == cl->state,
506 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
507
508 mutex_lock(&dev->device_lock);
Alexander Usyskinfe2f17e2014-07-17 10:53:38 +0300509
Tomas Winkler9ca90502013-01-08 23:07:13 +0200510 if (MEI_FILE_DISCONNECTED == cl->state) {
511 rets = 0;
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300512 cl_dbg(dev, cl, "successfully disconnected from FW client.\n");
Tomas Winkler9ca90502013-01-08 23:07:13 +0200513 } else {
Alexander Usyskinfe2f17e2014-07-17 10:53:38 +0300514 cl_dbg(dev, cl, "timeout on disconnect from FW client.\n");
515 rets = -ETIME;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200516 }
517
518 mei_io_list_flush(&dev->ctrl_rd_list, cl);
519 mei_io_list_flush(&dev->ctrl_wr_list, cl);
520free:
Tomas Winkler04bb1392014-03-18 22:52:04 +0200521 cl_dbg(dev, cl, "rpm: autosuspend\n");
522 pm_runtime_mark_last_busy(&dev->pdev->dev);
523 pm_runtime_put_autosuspend(&dev->pdev->dev);
524
Tomas Winkler9ca90502013-01-08 23:07:13 +0200525 mei_io_cb_free(cb);
526 return rets;
527}
528
529
530/**
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200531 * mei_cl_is_other_connecting - checks if other
532 * client with the same me client id is connecting
Tomas Winkler9ca90502013-01-08 23:07:13 +0200533 *
Tomas Winkler9ca90502013-01-08 23:07:13 +0200534 * @cl: private data of the file object
535 *
Alexander Usyskin83ce0742014-01-08 22:31:46 +0200536 * returns true if other client is connected, false - otherwise.
Tomas Winkler9ca90502013-01-08 23:07:13 +0200537 */
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200538bool mei_cl_is_other_connecting(struct mei_cl *cl)
Tomas Winkler9ca90502013-01-08 23:07:13 +0200539{
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200540 struct mei_device *dev;
Tomas Winkler31f88f52014-02-17 15:13:25 +0200541 struct mei_cl *ocl; /* the other client */
Tomas Winkler9ca90502013-01-08 23:07:13 +0200542
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200543 if (WARN_ON(!cl || !cl->dev))
544 return false;
545
546 dev = cl->dev;
547
Tomas Winkler31f88f52014-02-17 15:13:25 +0200548 list_for_each_entry(ocl, &dev->file_list, link) {
549 if (ocl->state == MEI_FILE_CONNECTING &&
550 ocl != cl &&
551 cl->me_client_id == ocl->me_client_id)
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200552 return true;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200553
554 }
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200555
556 return false;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200557}
558
559/**
Alexander Usyskin83ce0742014-01-08 22:31:46 +0200560 * mei_cl_connect - connect host client to the me one
Tomas Winkler9f81abda2013-01-08 23:07:15 +0200561 *
562 * @cl: host client
563 *
564 * Locking: called under "dev->device_lock" lock
565 *
566 * returns 0 on success, <0 on failure.
567 */
568int mei_cl_connect(struct mei_cl *cl, struct file *file)
569{
570 struct mei_device *dev;
571 struct mei_cl_cb *cb;
Tomas Winkler9f81abda2013-01-08 23:07:15 +0200572 int rets;
573
574 if (WARN_ON(!cl || !cl->dev))
575 return -ENODEV;
576
577 dev = cl->dev;
578
Tomas Winkler04bb1392014-03-18 22:52:04 +0200579 rets = pm_runtime_get(&dev->pdev->dev);
580 if (rets < 0 && rets != -EINPROGRESS) {
581 pm_runtime_put_noidle(&dev->pdev->dev);
582 cl_err(dev, cl, "rpm: get failed %d\n", rets);
583 return rets;
584 }
585
Tomas Winkler9f81abda2013-01-08 23:07:15 +0200586 cb = mei_io_cb_init(cl, file);
587 if (!cb) {
588 rets = -ENOMEM;
589 goto out;
590 }
591
Tomas Winkler02a7eec2014-02-12 21:41:51 +0200592 cb->fop_type = MEI_FOP_CONNECT;
Tomas Winkler9f81abda2013-01-08 23:07:15 +0200593
Tomas Winkler6aae48f2014-02-19 17:35:47 +0200594 /* run hbuf acquire last so we don't have to undo */
595 if (!mei_cl_is_other_connecting(cl) && mei_hbuf_acquire(dev)) {
Alexander Usyskine4d82702014-04-27 15:42:21 +0300596 cl->state = MEI_FILE_CONNECTING;
Tomas Winkler9f81abda2013-01-08 23:07:15 +0200597 if (mei_hbm_cl_connect_req(dev, cl)) {
598 rets = -ENODEV;
599 goto out;
600 }
601 cl->timer_count = MEI_CONNECT_TIMEOUT;
602 list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
603 } else {
604 list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
605 }
606
607 mutex_unlock(&dev->device_lock);
Alexander Usyskin285e2992014-02-17 15:13:20 +0200608 wait_event_timeout(dev->wait_recvd_msg,
609 (cl->state == MEI_FILE_CONNECTED ||
610 cl->state == MEI_FILE_DISCONNECTED),
611 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
Tomas Winkler9f81abda2013-01-08 23:07:15 +0200612 mutex_lock(&dev->device_lock);
613
614 if (cl->state != MEI_FILE_CONNECTED) {
Alexander Usyskin3e37ebb2014-07-17 10:53:34 +0300615 cl->state = MEI_FILE_DISCONNECTED;
Alexander Usyskin285e2992014-02-17 15:13:20 +0200616 /* something went really wrong */
617 if (!cl->status)
618 cl->status = -EFAULT;
Tomas Winkler9f81abda2013-01-08 23:07:15 +0200619
620 mei_io_list_flush(&dev->ctrl_rd_list, cl);
621 mei_io_list_flush(&dev->ctrl_wr_list, cl);
Tomas Winkler9f81abda2013-01-08 23:07:15 +0200622 }
623
624 rets = cl->status;
625
626out:
Tomas Winkler04bb1392014-03-18 22:52:04 +0200627 cl_dbg(dev, cl, "rpm: autosuspend\n");
628 pm_runtime_mark_last_busy(&dev->pdev->dev);
629 pm_runtime_put_autosuspend(&dev->pdev->dev);
630
Tomas Winkler9f81abda2013-01-08 23:07:15 +0200631 mei_io_cb_free(cb);
632 return rets;
633}
634
635/**
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200636 * mei_cl_flow_ctrl_creds - checks flow_control credits for cl.
Tomas Winkler9ca90502013-01-08 23:07:13 +0200637 *
Tomas Winkler9ca90502013-01-08 23:07:13 +0200638 * @cl: private data of the file object
639 *
640 * returns 1 if mei_flow_ctrl_creds >0, 0 - otherwise.
641 * -ENOENT if mei_cl is not present
642 * -EINVAL if single_recv_buf == 0
643 */
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200644int mei_cl_flow_ctrl_creds(struct mei_cl *cl)
Tomas Winkler9ca90502013-01-08 23:07:13 +0200645{
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200646 struct mei_device *dev;
Alexander Usyskin12d00662014-02-17 15:13:23 +0200647 struct mei_me_client *me_cl;
648 int id;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200649
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200650 if (WARN_ON(!cl || !cl->dev))
651 return -EINVAL;
652
653 dev = cl->dev;
654
Tomas Winkler9ca90502013-01-08 23:07:13 +0200655 if (!dev->me_clients_num)
656 return 0;
657
658 if (cl->mei_flow_ctrl_creds > 0)
659 return 1;
660
Alexander Usyskin12d00662014-02-17 15:13:23 +0200661 id = mei_me_cl_by_id(dev, cl->me_client_id);
662 if (id < 0) {
663 cl_err(dev, cl, "no such me client %d\n", cl->me_client_id);
664 return id;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200665 }
Alexander Usyskin12d00662014-02-17 15:13:23 +0200666
667 me_cl = &dev->me_clients[id];
668 if (me_cl->mei_flow_ctrl_creds) {
669 if (WARN_ON(me_cl->props.single_recv_buf == 0))
670 return -EINVAL;
671 return 1;
672 }
673 return 0;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200674}
675
676/**
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200677 * mei_cl_flow_ctrl_reduce - reduces flow_control.
Tomas Winkler9ca90502013-01-08 23:07:13 +0200678 *
Tomas Winkler9ca90502013-01-08 23:07:13 +0200679 * @cl: private data of the file object
Masanari Iida393b1482013-04-05 01:05:05 +0900680 *
Tomas Winkler9ca90502013-01-08 23:07:13 +0200681 * @returns
682 * 0 on success
683 * -ENOENT when me client is not found
684 * -EINVAL when ctrl credits are <= 0
685 */
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200686int mei_cl_flow_ctrl_reduce(struct mei_cl *cl)
Tomas Winkler9ca90502013-01-08 23:07:13 +0200687{
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200688 struct mei_device *dev;
Alexander Usyskin12d00662014-02-17 15:13:23 +0200689 struct mei_me_client *me_cl;
690 int id;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200691
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200692 if (WARN_ON(!cl || !cl->dev))
693 return -EINVAL;
694
695 dev = cl->dev;
696
Alexander Usyskin12d00662014-02-17 15:13:23 +0200697 id = mei_me_cl_by_id(dev, cl->me_client_id);
698 if (id < 0) {
699 cl_err(dev, cl, "no such me client %d\n", cl->me_client_id);
700 return id;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200701 }
Alexander Usyskin12d00662014-02-17 15:13:23 +0200702
703 me_cl = &dev->me_clients[id];
704 if (me_cl->props.single_recv_buf != 0) {
705 if (WARN_ON(me_cl->mei_flow_ctrl_creds <= 0))
706 return -EINVAL;
707 me_cl->mei_flow_ctrl_creds--;
708 } else {
709 if (WARN_ON(cl->mei_flow_ctrl_creds <= 0))
710 return -EINVAL;
711 cl->mei_flow_ctrl_creds--;
712 }
713 return 0;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200714}
715
Tomas Winkler9ca90502013-01-08 23:07:13 +0200716/**
Masanari Iida393b1482013-04-05 01:05:05 +0900717 * mei_cl_read_start - the start read client message function.
Tomas Winkler9ca90502013-01-08 23:07:13 +0200718 *
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200719 * @cl: host client
Tomas Winkler9ca90502013-01-08 23:07:13 +0200720 *
721 * returns 0 on success, <0 on failure.
722 */
Tomas Winklerfcb136e2013-04-19 22:01:35 +0300723int mei_cl_read_start(struct mei_cl *cl, size_t length)
Tomas Winkler9ca90502013-01-08 23:07:13 +0200724{
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200725 struct mei_device *dev;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200726 struct mei_cl_cb *cb;
727 int rets;
728 int i;
729
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200730 if (WARN_ON(!cl || !cl->dev))
731 return -ENODEV;
732
733 dev = cl->dev;
734
Tomas Winklerb950ac12013-07-25 20:15:53 +0300735 if (!mei_cl_is_connected(cl))
Tomas Winkler9ca90502013-01-08 23:07:13 +0200736 return -ENODEV;
737
Tomas Winklerd91aaed2013-01-08 23:07:18 +0200738 if (cl->read_cb) {
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300739 cl_dbg(dev, cl, "read is pending.\n");
Tomas Winkler9ca90502013-01-08 23:07:13 +0200740 return -EBUSY;
741 }
742 i = mei_me_cl_by_id(dev, cl->me_client_id);
743 if (i < 0) {
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300744 cl_err(dev, cl, "no such me client %d\n", cl->me_client_id);
Alexander Usyskin7ca96aa2014-02-19 17:35:49 +0200745 return -ENOTTY;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200746 }
747
Tomas Winkler04bb1392014-03-18 22:52:04 +0200748 rets = pm_runtime_get(&dev->pdev->dev);
749 if (rets < 0 && rets != -EINPROGRESS) {
750 pm_runtime_put_noidle(&dev->pdev->dev);
751 cl_err(dev, cl, "rpm: get failed %d\n", rets);
752 return rets;
753 }
754
Tomas Winkler9ca90502013-01-08 23:07:13 +0200755 cb = mei_io_cb_init(cl, NULL);
Tomas Winkler04bb1392014-03-18 22:52:04 +0200756 if (!cb) {
757 rets = -ENOMEM;
758 goto out;
759 }
Tomas Winkler9ca90502013-01-08 23:07:13 +0200760
Tomas Winklerfcb136e2013-04-19 22:01:35 +0300761 /* always allocate at least client max message */
762 length = max_t(size_t, length, dev->me_clients[i].props.max_msg_length);
763 rets = mei_io_cb_alloc_resp_buf(cb, length);
Tomas Winkler9ca90502013-01-08 23:07:13 +0200764 if (rets)
Tomas Winkler04bb1392014-03-18 22:52:04 +0200765 goto out;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200766
767 cb->fop_type = MEI_FOP_READ;
Tomas Winkler6aae48f2014-02-19 17:35:47 +0200768 if (mei_hbuf_acquire(dev)) {
Alexander Usyskin86113502014-03-31 17:59:24 +0300769 rets = mei_hbm_cl_flow_control_req(dev, cl);
770 if (rets < 0)
Tomas Winkler04bb1392014-03-18 22:52:04 +0200771 goto out;
Tomas Winkler04bb1392014-03-18 22:52:04 +0200772
Tomas Winkler9ca90502013-01-08 23:07:13 +0200773 list_add_tail(&cb->list, &dev->read_list.list);
774 } else {
775 list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
776 }
Chao Biaccb8842014-02-12 21:27:25 +0200777
778 cl->read_cb = cb;
779
Tomas Winkler04bb1392014-03-18 22:52:04 +0200780out:
781 cl_dbg(dev, cl, "rpm: autosuspend\n");
782 pm_runtime_mark_last_busy(&dev->pdev->dev);
783 pm_runtime_put_autosuspend(&dev->pdev->dev);
784
785 if (rets)
786 mei_io_cb_free(cb);
787
Tomas Winkler9ca90502013-01-08 23:07:13 +0200788 return rets;
789}
790
Tomas Winkler074b4c02013-02-06 14:06:44 +0200791/**
Tomas Winkler9d098192014-02-19 17:35:48 +0200792 * mei_cl_irq_write - write a message to device
Tomas Winkler21767542013-06-23 09:36:59 +0300793 * from the interrupt thread context
794 *
795 * @cl: client
796 * @cb: callback block.
Tomas Winkler21767542013-06-23 09:36:59 +0300797 * @cmpl_list: complete list.
798 *
799 * returns 0, OK; otherwise error.
800 */
Tomas Winkler9d098192014-02-19 17:35:48 +0200801int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
802 struct mei_cl_cb *cmpl_list)
Tomas Winkler21767542013-06-23 09:36:59 +0300803{
Tomas Winkler136698e2013-09-16 23:44:44 +0300804 struct mei_device *dev;
805 struct mei_msg_data *buf;
Tomas Winkler21767542013-06-23 09:36:59 +0300806 struct mei_msg_hdr mei_hdr;
Tomas Winkler136698e2013-09-16 23:44:44 +0300807 size_t len;
808 u32 msg_slots;
Tomas Winkler9d098192014-02-19 17:35:48 +0200809 int slots;
Tomas Winkler2ebf8c92013-09-16 23:44:43 +0300810 int rets;
Tomas Winkler21767542013-06-23 09:36:59 +0300811
Tomas Winkler136698e2013-09-16 23:44:44 +0300812 if (WARN_ON(!cl || !cl->dev))
813 return -ENODEV;
814
815 dev = cl->dev;
816
817 buf = &cb->request_buffer;
818
819 rets = mei_cl_flow_ctrl_creds(cl);
820 if (rets < 0)
821 return rets;
822
823 if (rets == 0) {
Tomas Winkler04bb1392014-03-18 22:52:04 +0200824 cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
Tomas Winkler136698e2013-09-16 23:44:44 +0300825 return 0;
826 }
827
Tomas Winkler9d098192014-02-19 17:35:48 +0200828 slots = mei_hbuf_empty_slots(dev);
Tomas Winkler136698e2013-09-16 23:44:44 +0300829 len = buf->size - cb->buf_idx;
830 msg_slots = mei_data2slots(len);
831
Tomas Winkler21767542013-06-23 09:36:59 +0300832 mei_hdr.host_addr = cl->host_client_id;
833 mei_hdr.me_addr = cl->me_client_id;
834 mei_hdr.reserved = 0;
Tomas Winkler479327f2013-12-17 15:56:56 +0200835 mei_hdr.internal = cb->internal;
Tomas Winkler21767542013-06-23 09:36:59 +0300836
Tomas Winkler9d098192014-02-19 17:35:48 +0200837 if (slots >= msg_slots) {
Tomas Winkler21767542013-06-23 09:36:59 +0300838 mei_hdr.length = len;
839 mei_hdr.msg_complete = 1;
840 /* Split the message only if we can write the whole host buffer */
Tomas Winkler9d098192014-02-19 17:35:48 +0200841 } else if (slots == dev->hbuf_depth) {
842 msg_slots = slots;
843 len = (slots * sizeof(u32)) - sizeof(struct mei_msg_hdr);
Tomas Winkler21767542013-06-23 09:36:59 +0300844 mei_hdr.length = len;
845 mei_hdr.msg_complete = 0;
846 } else {
847 /* wait for next time the host buffer is empty */
848 return 0;
849 }
850
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300851 cl_dbg(dev, cl, "buf: size = %d idx = %lu\n",
Tomas Winkler21767542013-06-23 09:36:59 +0300852 cb->request_buffer.size, cb->buf_idx);
Tomas Winkler21767542013-06-23 09:36:59 +0300853
Tomas Winkler136698e2013-09-16 23:44:44 +0300854 rets = mei_write_message(dev, &mei_hdr, buf->data + cb->buf_idx);
Tomas Winkler2ebf8c92013-09-16 23:44:43 +0300855 if (rets) {
856 cl->status = rets;
Tomas Winkler21767542013-06-23 09:36:59 +0300857 list_move_tail(&cb->list, &cmpl_list->list);
Tomas Winkler2ebf8c92013-09-16 23:44:43 +0300858 return rets;
Tomas Winkler21767542013-06-23 09:36:59 +0300859 }
860
861 cl->status = 0;
Tomas Winkler4dfaa9f2013-06-23 09:37:00 +0300862 cl->writing_state = MEI_WRITING;
Tomas Winkler21767542013-06-23 09:36:59 +0300863 cb->buf_idx += mei_hdr.length;
Tomas Winkler4dfaa9f2013-06-23 09:37:00 +0300864
Tomas Winkler21767542013-06-23 09:36:59 +0300865 if (mei_hdr.msg_complete) {
866 if (mei_cl_flow_ctrl_reduce(cl))
Tomas Winkler2ebf8c92013-09-16 23:44:43 +0300867 return -EIO;
Tomas Winkler21767542013-06-23 09:36:59 +0300868 list_move_tail(&cb->list, &dev->write_waiting_list.list);
869 }
870
871 return 0;
872}
873
874/**
Tomas Winkler4234a6d2013-04-08 21:56:37 +0300875 * mei_cl_write - submit a write cb to mei device
876 assumes device_lock is locked
877 *
878 * @cl: host client
879 * @cl: write callback with filled data
880 *
Alexander Usyskin83ce0742014-01-08 22:31:46 +0200881 * returns number of bytes sent on success, <0 on failure.
Tomas Winkler4234a6d2013-04-08 21:56:37 +0300882 */
883int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)
884{
885 struct mei_device *dev;
886 struct mei_msg_data *buf;
887 struct mei_msg_hdr mei_hdr;
888 int rets;
889
890
891 if (WARN_ON(!cl || !cl->dev))
892 return -ENODEV;
893
894 if (WARN_ON(!cb))
895 return -EINVAL;
896
897 dev = cl->dev;
898
899
900 buf = &cb->request_buffer;
901
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300902 cl_dbg(dev, cl, "mei_cl_write %d\n", buf->size);
Tomas Winkler4234a6d2013-04-08 21:56:37 +0300903
Tomas Winkler04bb1392014-03-18 22:52:04 +0200904 rets = pm_runtime_get(&dev->pdev->dev);
905 if (rets < 0 && rets != -EINPROGRESS) {
906 pm_runtime_put_noidle(&dev->pdev->dev);
907 cl_err(dev, cl, "rpm: get failed %d\n", rets);
908 return rets;
909 }
Tomas Winkler4234a6d2013-04-08 21:56:37 +0300910
911 cb->fop_type = MEI_FOP_WRITE;
Tomas Winkler6aae48f2014-02-19 17:35:47 +0200912 cb->buf_idx = 0;
913 cl->writing_state = MEI_IDLE;
914
915 mei_hdr.host_addr = cl->host_client_id;
916 mei_hdr.me_addr = cl->me_client_id;
917 mei_hdr.reserved = 0;
918 mei_hdr.msg_complete = 0;
919 mei_hdr.internal = cb->internal;
Tomas Winkler4234a6d2013-04-08 21:56:37 +0300920
921 rets = mei_cl_flow_ctrl_creds(cl);
922 if (rets < 0)
923 goto err;
924
Tomas Winkler6aae48f2014-02-19 17:35:47 +0200925 if (rets == 0) {
926 cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
Tomas Winkler4234a6d2013-04-08 21:56:37 +0300927 rets = buf->size;
928 goto out;
929 }
Tomas Winkler6aae48f2014-02-19 17:35:47 +0200930 if (!mei_hbuf_acquire(dev)) {
931 cl_dbg(dev, cl, "Cannot acquire the host buffer: not sending.\n");
932 rets = buf->size;
933 goto out;
934 }
Tomas Winkler4234a6d2013-04-08 21:56:37 +0300935
936 /* Check for a maximum length */
937 if (buf->size > mei_hbuf_max_len(dev)) {
938 mei_hdr.length = mei_hbuf_max_len(dev);
939 mei_hdr.msg_complete = 0;
940 } else {
941 mei_hdr.length = buf->size;
942 mei_hdr.msg_complete = 1;
943 }
944
Tomas Winkler2ebf8c92013-09-16 23:44:43 +0300945 rets = mei_write_message(dev, &mei_hdr, buf->data);
946 if (rets)
Tomas Winkler4234a6d2013-04-08 21:56:37 +0300947 goto err;
Tomas Winkler4234a6d2013-04-08 21:56:37 +0300948
949 cl->writing_state = MEI_WRITING;
950 cb->buf_idx = mei_hdr.length;
951
Tomas Winkler4234a6d2013-04-08 21:56:37 +0300952out:
953 if (mei_hdr.msg_complete) {
Alexander Usyskin7ca96aa2014-02-19 17:35:49 +0200954 rets = mei_cl_flow_ctrl_reduce(cl);
955 if (rets < 0)
Tomas Winkler4234a6d2013-04-08 21:56:37 +0300956 goto err;
Alexander Usyskin7ca96aa2014-02-19 17:35:49 +0200957
Tomas Winkler4234a6d2013-04-08 21:56:37 +0300958 list_add_tail(&cb->list, &dev->write_waiting_list.list);
959 } else {
960 list_add_tail(&cb->list, &dev->write_list.list);
961 }
962
963
964 if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) {
965
966 mutex_unlock(&dev->device_lock);
Alexander Usyskin7ca96aa2014-02-19 17:35:49 +0200967 rets = wait_event_interruptible(cl->tx_wait,
968 cl->writing_state == MEI_WRITE_COMPLETE);
Tomas Winkler4234a6d2013-04-08 21:56:37 +0300969 mutex_lock(&dev->device_lock);
Alexander Usyskin7ca96aa2014-02-19 17:35:49 +0200970 /* wait_event_interruptible returns -ERESTARTSYS */
971 if (rets) {
972 if (signal_pending(current))
973 rets = -EINTR;
974 goto err;
975 }
Tomas Winkler4234a6d2013-04-08 21:56:37 +0300976 }
Alexander Usyskin7ca96aa2014-02-19 17:35:49 +0200977
978 rets = buf->size;
Tomas Winkler4234a6d2013-04-08 21:56:37 +0300979err:
Tomas Winkler04bb1392014-03-18 22:52:04 +0200980 cl_dbg(dev, cl, "rpm: autosuspend\n");
981 pm_runtime_mark_last_busy(&dev->pdev->dev);
982 pm_runtime_put_autosuspend(&dev->pdev->dev);
983
Tomas Winkler4234a6d2013-04-08 21:56:37 +0300984 return rets;
985}
986
987
Tomas Winklerdb086fa2013-05-12 15:34:45 +0300988/**
989 * mei_cl_complete - processes completed operation for a client
990 *
991 * @cl: private data of the file object.
992 * @cb: callback block.
993 */
994void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
995{
996 if (cb->fop_type == MEI_FOP_WRITE) {
997 mei_io_cb_free(cb);
998 cb = NULL;
999 cl->writing_state = MEI_WRITE_COMPLETE;
1000 if (waitqueue_active(&cl->tx_wait))
1001 wake_up_interruptible(&cl->tx_wait);
1002
1003 } else if (cb->fop_type == MEI_FOP_READ &&
1004 MEI_READING == cl->reading_state) {
1005 cl->reading_state = MEI_READ_COMPLETE;
1006 if (waitqueue_active(&cl->rx_wait))
1007 wake_up_interruptible(&cl->rx_wait);
1008 else
1009 mei_cl_bus_rx_event(cl);
1010
1011 }
1012}
1013
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001014
1015/**
Tomas Winkler074b4c02013-02-06 14:06:44 +02001016 * mei_cl_all_disconnect - disconnect forcefully all connected clients
1017 *
1018 * @dev - mei device
1019 */
1020
1021void mei_cl_all_disconnect(struct mei_device *dev)
1022{
Tomas Winkler31f88f52014-02-17 15:13:25 +02001023 struct mei_cl *cl;
Tomas Winkler074b4c02013-02-06 14:06:44 +02001024
Tomas Winkler31f88f52014-02-17 15:13:25 +02001025 list_for_each_entry(cl, &dev->file_list, link) {
Tomas Winkler074b4c02013-02-06 14:06:44 +02001026 cl->state = MEI_FILE_DISCONNECTED;
1027 cl->mei_flow_ctrl_creds = 0;
Tomas Winkler074b4c02013-02-06 14:06:44 +02001028 cl->timer_count = 0;
1029 }
1030}
1031
1032
1033/**
Tomas Winkler52908012013-07-24 16:22:57 +03001034 * mei_cl_all_wakeup - wake up all readers and writers they can be interrupted
Tomas Winkler074b4c02013-02-06 14:06:44 +02001035 *
1036 * @dev - mei device
1037 */
Tomas Winkler52908012013-07-24 16:22:57 +03001038void mei_cl_all_wakeup(struct mei_device *dev)
Tomas Winkler074b4c02013-02-06 14:06:44 +02001039{
Tomas Winkler31f88f52014-02-17 15:13:25 +02001040 struct mei_cl *cl;
1041 list_for_each_entry(cl, &dev->file_list, link) {
Tomas Winkler074b4c02013-02-06 14:06:44 +02001042 if (waitqueue_active(&cl->rx_wait)) {
Alexander Usyskinc0abffb2013-09-15 18:11:07 +03001043 cl_dbg(dev, cl, "Waking up reading client!\n");
Tomas Winkler074b4c02013-02-06 14:06:44 +02001044 wake_up_interruptible(&cl->rx_wait);
1045 }
Tomas Winkler52908012013-07-24 16:22:57 +03001046 if (waitqueue_active(&cl->tx_wait)) {
Alexander Usyskinc0abffb2013-09-15 18:11:07 +03001047 cl_dbg(dev, cl, "Waking up writing client!\n");
Tomas Winkler52908012013-07-24 16:22:57 +03001048 wake_up_interruptible(&cl->tx_wait);
1049 }
Tomas Winkler074b4c02013-02-06 14:06:44 +02001050 }
1051}
1052
1053/**
1054 * mei_cl_all_write_clear - clear all pending writes
1055
1056 * @dev - mei device
1057 */
1058void mei_cl_all_write_clear(struct mei_device *dev)
1059{
Tomas Winklercc99ecf2014-03-10 15:10:40 +02001060 mei_io_list_free(&dev->write_list, NULL);
1061 mei_io_list_free(&dev->write_waiting_list, NULL);
Tomas Winkler074b4c02013-02-06 14:06:44 +02001062}
1063
1064