blob: 1a4dafb772052489baa2873db370e11f9c852f2f [file] [log] [blame]
Tomas Winkler9ca90502013-01-08 23:07:13 +02001/*
2 *
3 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2003-2012, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 */
16
17#include <linux/pci.h>
18#include <linux/sched.h>
19#include <linux/wait.h>
20#include <linux/delay.h>
Tomas Winkler04bb1392014-03-18 22:52:04 +020021#include <linux/pm_runtime.h>
Tomas Winkler9ca90502013-01-08 23:07:13 +020022
23#include <linux/mei.h>
24
25#include "mei_dev.h"
26#include "hbm.h"
Tomas Winkler90e0b5f12013-01-08 23:07:14 +020027#include "client.h"
28
29/**
30 * mei_me_cl_by_uuid - locate index of me client
31 *
32 * @dev: mei device
Alexander Usyskina27a76d2014-02-17 15:13:22 +020033 *
34 * Locking: called under "dev->device_lock" lock
35 *
Tomas Winklerd3208322014-08-24 12:08:55 +030036 * returns me client or NULL if not found
Tomas Winkler90e0b5f12013-01-08 23:07:14 +020037 */
Tomas Winklerd3208322014-08-24 12:08:55 +030038struct mei_me_client *mei_me_cl_by_uuid(const struct mei_device *dev,
39 const uuid_le *uuid)
Tomas Winkler90e0b5f12013-01-08 23:07:14 +020040{
Tomas Winkler5ca2d382014-08-21 14:29:13 +030041 struct mei_me_client *me_cl;
Tomas Winkler90e0b5f12013-01-08 23:07:14 +020042
Tomas Winkler5ca2d382014-08-21 14:29:13 +030043 list_for_each_entry(me_cl, &dev->me_clients, list)
44 if (uuid_le_cmp(*uuid, me_cl->props.protocol_name) == 0)
45 return me_cl;
Tomas Winkler90e0b5f12013-01-08 23:07:14 +020046
Tomas Winklerd3208322014-08-24 12:08:55 +030047 return NULL;
Tomas Winkler90e0b5f12013-01-08 23:07:14 +020048}
49
Tomas Winkler90e0b5f12013-01-08 23:07:14 +020050/**
51 * mei_me_cl_by_id return index to me_clients for client_id
52 *
53 * @dev: the device structure
54 * @client_id: me client id
55 *
56 * Locking: called under "dev->device_lock" lock
57 *
Tomas Winklerd3208322014-08-24 12:08:55 +030058 * returns me client or NULL if not found
Tomas Winkler90e0b5f12013-01-08 23:07:14 +020059 */
60
Tomas Winklerd3208322014-08-24 12:08:55 +030061struct mei_me_client *mei_me_cl_by_id(struct mei_device *dev, u8 client_id)
Tomas Winkler90e0b5f12013-01-08 23:07:14 +020062{
Alexander Usyskina27a76d2014-02-17 15:13:22 +020063
Tomas Winkler5ca2d382014-08-21 14:29:13 +030064 struct mei_me_client *me_cl;
Tomas Winkler90e0b5f12013-01-08 23:07:14 +020065
Tomas Winkler5ca2d382014-08-21 14:29:13 +030066 list_for_each_entry(me_cl, &dev->me_clients, list)
67 if (me_cl->client_id == client_id)
68 return me_cl;
Tomas Winklerd3208322014-08-24 12:08:55 +030069 return NULL;
Tomas Winkler90e0b5f12013-01-08 23:07:14 +020070}
Tomas Winkler9ca90502013-01-08 23:07:13 +020071
Tomas Winklerd880f322014-08-21 14:29:15 +030072struct mei_me_client *mei_me_cl_by_uuid_id(struct mei_device *dev,
73 const uuid_le *uuid, u8 client_id)
74{
75 struct mei_me_client *me_cl;
76
77 list_for_each_entry(me_cl, &dev->me_clients, list)
78 if (uuid_le_cmp(*uuid, me_cl->props.protocol_name) == 0 &&
79 me_cl->client_id == client_id)
80 return me_cl;
81 return NULL;
82}
83
Tomas Winkler25ca6472014-08-21 14:29:14 +030084/**
85 * mei_me_cl_remove - remove me client matching uuid and client_id
86 *
87 * @dev: the device structure
88 * @uuid: me client uuid
89 * @client_id: me client address
90 */
91void mei_me_cl_remove(struct mei_device *dev, const uuid_le *uuid, u8 client_id)
92{
93 struct mei_me_client *me_cl, *next;
94
95 list_for_each_entry_safe(me_cl, next, &dev->me_clients, list) {
96 if (uuid_le_cmp(*uuid, me_cl->props.protocol_name) == 0 &&
97 me_cl->client_id == client_id) {
98 list_del(&me_cl->list);
99 kfree(me_cl);
100 break;
101 }
102 }
103}
104
Tomas Winkler9ca90502013-01-08 23:07:13 +0200105
106/**
Tomas Winklercc99ecf2014-03-10 15:10:40 +0200107 * mei_cl_cmp_id - tells if the clients are the same
108 *
109 * @cl1: host client 1
110 * @cl2: host client 2
111 *
112 * returns true - if the clients has same host and me ids
113 * false - otherwise
114 */
115static inline bool mei_cl_cmp_id(const struct mei_cl *cl1,
116 const struct mei_cl *cl2)
117{
118 return cl1 && cl2 &&
119 (cl1->host_client_id == cl2->host_client_id) &&
120 (cl1->me_client_id == cl2->me_client_id);
121}
122
123/**
124 * mei_io_list_flush - removes cbs belonging to cl.
125 *
126 * @list: an instance of our list structure
127 * @cl: host client, can be NULL for flushing the whole list
128 * @free: whether to free the cbs
129 */
130static void __mei_io_list_flush(struct mei_cl_cb *list,
131 struct mei_cl *cl, bool free)
132{
133 struct mei_cl_cb *cb;
134 struct mei_cl_cb *next;
135
136 /* enable removing everything if no cl is specified */
137 list_for_each_entry_safe(cb, next, &list->list, list) {
138 if (!cl || (cb->cl && mei_cl_cmp_id(cl, cb->cl))) {
139 list_del(&cb->list);
140 if (free)
141 mei_io_cb_free(cb);
142 }
143 }
144}
145
146/**
Tomas Winkler9ca90502013-01-08 23:07:13 +0200147 * mei_io_list_flush - removes list entry belonging to cl.
148 *
149 * @list: An instance of our list structure
150 * @cl: host client
151 */
Alexander Usyskin54567962014-08-14 17:22:20 +0300152void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl)
Tomas Winkler9ca90502013-01-08 23:07:13 +0200153{
Tomas Winklercc99ecf2014-03-10 15:10:40 +0200154 __mei_io_list_flush(list, cl, false);
155}
Tomas Winkler9ca90502013-01-08 23:07:13 +0200156
Tomas Winklercc99ecf2014-03-10 15:10:40 +0200157
158/**
159 * mei_io_list_free - removes cb belonging to cl and free them
160 *
161 * @list: An instance of our list structure
162 * @cl: host client
163 */
164static inline void mei_io_list_free(struct mei_cl_cb *list, struct mei_cl *cl)
165{
166 __mei_io_list_flush(list, cl, true);
Tomas Winkler9ca90502013-01-08 23:07:13 +0200167}
168
169/**
170 * mei_io_cb_free - free mei_cb_private related memory
171 *
172 * @cb: mei callback struct
173 */
174void mei_io_cb_free(struct mei_cl_cb *cb)
175{
176 if (cb == NULL)
177 return;
178
179 kfree(cb->request_buffer.data);
180 kfree(cb->response_buffer.data);
181 kfree(cb);
182}
183
184/**
185 * mei_io_cb_init - allocate and initialize io callback
186 *
187 * @cl - mei client
Masanari Iida393b1482013-04-05 01:05:05 +0900188 * @fp: pointer to file structure
Tomas Winkler9ca90502013-01-08 23:07:13 +0200189 *
190 * returns mei_cl_cb pointer or NULL;
191 */
192struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, struct file *fp)
193{
194 struct mei_cl_cb *cb;
195
196 cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL);
197 if (!cb)
198 return NULL;
199
200 mei_io_list_init(cb);
201
202 cb->file_object = fp;
203 cb->cl = cl;
204 cb->buf_idx = 0;
205 return cb;
206}
207
208/**
209 * mei_io_cb_alloc_req_buf - allocate request buffer
210 *
Masanari Iida393b1482013-04-05 01:05:05 +0900211 * @cb: io callback structure
212 * @length: size of the buffer
Tomas Winkler9ca90502013-01-08 23:07:13 +0200213 *
214 * returns 0 on success
215 * -EINVAL if cb is NULL
216 * -ENOMEM if allocation failed
217 */
218int mei_io_cb_alloc_req_buf(struct mei_cl_cb *cb, size_t length)
219{
220 if (!cb)
221 return -EINVAL;
222
223 if (length == 0)
224 return 0;
225
226 cb->request_buffer.data = kmalloc(length, GFP_KERNEL);
227 if (!cb->request_buffer.data)
228 return -ENOMEM;
229 cb->request_buffer.size = length;
230 return 0;
231}
232/**
Alexander Usyskin83ce0742014-01-08 22:31:46 +0200233 * mei_io_cb_alloc_resp_buf - allocate response buffer
Tomas Winkler9ca90502013-01-08 23:07:13 +0200234 *
Masanari Iida393b1482013-04-05 01:05:05 +0900235 * @cb: io callback structure
236 * @length: size of the buffer
Tomas Winkler9ca90502013-01-08 23:07:13 +0200237 *
238 * returns 0 on success
239 * -EINVAL if cb is NULL
240 * -ENOMEM if allocation failed
241 */
242int mei_io_cb_alloc_resp_buf(struct mei_cl_cb *cb, size_t length)
243{
244 if (!cb)
245 return -EINVAL;
246
247 if (length == 0)
248 return 0;
249
250 cb->response_buffer.data = kmalloc(length, GFP_KERNEL);
251 if (!cb->response_buffer.data)
252 return -ENOMEM;
253 cb->response_buffer.size = length;
254 return 0;
255}
256
257
258
259/**
260 * mei_cl_flush_queues - flushes queue lists belonging to cl.
261 *
Tomas Winkler9ca90502013-01-08 23:07:13 +0200262 * @cl: host client
263 */
264int mei_cl_flush_queues(struct mei_cl *cl)
265{
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300266 struct mei_device *dev;
267
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200268 if (WARN_ON(!cl || !cl->dev))
Tomas Winkler9ca90502013-01-08 23:07:13 +0200269 return -EINVAL;
270
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300271 dev = cl->dev;
272
273 cl_dbg(dev, cl, "remove list entry belonging to cl\n");
Tomas Winkler9ca90502013-01-08 23:07:13 +0200274 mei_io_list_flush(&cl->dev->read_list, cl);
Tomas Winklercc99ecf2014-03-10 15:10:40 +0200275 mei_io_list_free(&cl->dev->write_list, cl);
276 mei_io_list_free(&cl->dev->write_waiting_list, cl);
Tomas Winkler9ca90502013-01-08 23:07:13 +0200277 mei_io_list_flush(&cl->dev->ctrl_wr_list, cl);
278 mei_io_list_flush(&cl->dev->ctrl_rd_list, cl);
279 mei_io_list_flush(&cl->dev->amthif_cmd_list, cl);
280 mei_io_list_flush(&cl->dev->amthif_rd_complete_list, cl);
281 return 0;
282}
283
Tomas Winkler9ca90502013-01-08 23:07:13 +0200284
285/**
Alexander Usyskin83ce0742014-01-08 22:31:46 +0200286 * mei_cl_init - initializes cl.
Tomas Winkler9ca90502013-01-08 23:07:13 +0200287 *
288 * @cl: host client to be initialized
289 * @dev: mei device
290 */
291void mei_cl_init(struct mei_cl *cl, struct mei_device *dev)
292{
293 memset(cl, 0, sizeof(struct mei_cl));
294 init_waitqueue_head(&cl->wait);
295 init_waitqueue_head(&cl->rx_wait);
296 init_waitqueue_head(&cl->tx_wait);
297 INIT_LIST_HEAD(&cl->link);
Samuel Ortiza7b71bc2013-03-27 17:29:56 +0200298 INIT_LIST_HEAD(&cl->device_link);
Tomas Winkler9ca90502013-01-08 23:07:13 +0200299 cl->reading_state = MEI_IDLE;
300 cl->writing_state = MEI_IDLE;
301 cl->dev = dev;
302}
303
304/**
305 * mei_cl_allocate - allocates cl structure and sets it up.
306 *
307 * @dev: mei device
308 * returns The allocated file or NULL on failure
309 */
310struct mei_cl *mei_cl_allocate(struct mei_device *dev)
311{
312 struct mei_cl *cl;
313
314 cl = kmalloc(sizeof(struct mei_cl), GFP_KERNEL);
315 if (!cl)
316 return NULL;
317
318 mei_cl_init(cl, dev);
319
320 return cl;
321}
322
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200323/**
324 * mei_cl_find_read_cb - find this cl's callback in the read list
325 *
Masanari Iida393b1482013-04-05 01:05:05 +0900326 * @cl: host client
327 *
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200328 * returns cb on success, NULL on error
329 */
330struct mei_cl_cb *mei_cl_find_read_cb(struct mei_cl *cl)
331{
332 struct mei_device *dev = cl->dev;
Tomas Winkler31f88f52014-02-17 15:13:25 +0200333 struct mei_cl_cb *cb;
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200334
Tomas Winkler31f88f52014-02-17 15:13:25 +0200335 list_for_each_entry(cb, &dev->read_list.list, list)
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200336 if (mei_cl_cmp_id(cl, cb->cl))
337 return cb;
338 return NULL;
339}
340
Alexander Usyskin83ce0742014-01-08 22:31:46 +0200341/** mei_cl_link: allocate host id in the host map
Tomas Winkler9ca90502013-01-08 23:07:13 +0200342 *
Tomas Winkler781d0d82013-01-08 23:07:22 +0200343 * @cl - host client
Alexander Usyskin83ce0742014-01-08 22:31:46 +0200344 * @id - fixed host id or -1 for generic one
Masanari Iida393b1482013-04-05 01:05:05 +0900345 *
Tomas Winkler781d0d82013-01-08 23:07:22 +0200346 * returns 0 on success
Tomas Winkler9ca90502013-01-08 23:07:13 +0200347 * -EINVAL on incorrect values
348 * -ENONET if client not found
349 */
Tomas Winkler781d0d82013-01-08 23:07:22 +0200350int mei_cl_link(struct mei_cl *cl, int id)
Tomas Winkler9ca90502013-01-08 23:07:13 +0200351{
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200352 struct mei_device *dev;
Tomas Winkler22f96a02013-09-16 23:44:47 +0300353 long open_handle_count;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200354
Tomas Winkler781d0d82013-01-08 23:07:22 +0200355 if (WARN_ON(!cl || !cl->dev))
Tomas Winkler9ca90502013-01-08 23:07:13 +0200356 return -EINVAL;
357
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200358 dev = cl->dev;
359
Alexander Usyskin83ce0742014-01-08 22:31:46 +0200360 /* If Id is not assigned get one*/
Tomas Winkler781d0d82013-01-08 23:07:22 +0200361 if (id == MEI_HOST_CLIENT_ID_ANY)
362 id = find_first_zero_bit(dev->host_clients_map,
363 MEI_CLIENTS_MAX);
Tomas Winkler9ca90502013-01-08 23:07:13 +0200364
Tomas Winkler781d0d82013-01-08 23:07:22 +0200365 if (id >= MEI_CLIENTS_MAX) {
Alexander Usyskin83ce0742014-01-08 22:31:46 +0200366 dev_err(&dev->pdev->dev, "id exceeded %d", MEI_CLIENTS_MAX);
Tomas Winklere036cc52013-09-16 23:44:46 +0300367 return -EMFILE;
368 }
369
Tomas Winkler22f96a02013-09-16 23:44:47 +0300370 open_handle_count = dev->open_handle_count + dev->iamthif_open_count;
371 if (open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) {
Alexander Usyskin83ce0742014-01-08 22:31:46 +0200372 dev_err(&dev->pdev->dev, "open_handle_count exceeded %d",
Tomas Winklere036cc52013-09-16 23:44:46 +0300373 MEI_MAX_OPEN_HANDLE_COUNT);
374 return -EMFILE;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200375 }
376
Tomas Winkler781d0d82013-01-08 23:07:22 +0200377 dev->open_handle_count++;
378
379 cl->host_client_id = id;
380 list_add_tail(&cl->link, &dev->file_list);
381
382 set_bit(id, dev->host_clients_map);
383
384 cl->state = MEI_FILE_INITIALIZING;
385
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300386 cl_dbg(dev, cl, "link cl\n");
Tomas Winkler781d0d82013-01-08 23:07:22 +0200387 return 0;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200388}
Tomas Winkler781d0d82013-01-08 23:07:22 +0200389
Tomas Winkler9ca90502013-01-08 23:07:13 +0200390/**
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200391 * mei_cl_unlink - remove me_cl from the list
Tomas Winkler9ca90502013-01-08 23:07:13 +0200392 *
Masanari Iida393b1482013-04-05 01:05:05 +0900393 * @cl: host client
Tomas Winkler9ca90502013-01-08 23:07:13 +0200394 */
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200395int mei_cl_unlink(struct mei_cl *cl)
Tomas Winkler9ca90502013-01-08 23:07:13 +0200396{
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200397 struct mei_device *dev;
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200398
Tomas Winkler781d0d82013-01-08 23:07:22 +0200399 /* don't shout on error exit path */
400 if (!cl)
401 return 0;
402
Tomas Winkler8e9a4a92013-01-10 17:32:14 +0200403 /* wd and amthif might not be initialized */
404 if (!cl->dev)
405 return 0;
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200406
407 dev = cl->dev;
408
Tomas Winklera14c44d2013-09-16 23:44:45 +0300409 cl_dbg(dev, cl, "unlink client");
410
Tomas Winkler22f96a02013-09-16 23:44:47 +0300411 if (dev->open_handle_count > 0)
412 dev->open_handle_count--;
413
414 /* never clear the 0 bit */
415 if (cl->host_client_id)
416 clear_bit(cl->host_client_id, dev->host_clients_map);
417
418 list_del_init(&cl->link);
419
420 cl->state = MEI_FILE_INITIALIZING;
421
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200422 return 0;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200423}
424
425
426void mei_host_client_init(struct work_struct *work)
427{
428 struct mei_device *dev = container_of(work,
429 struct mei_device, init_work);
Tomas Winkler5ca2d382014-08-21 14:29:13 +0300430 struct mei_me_client *me_cl;
431 struct mei_client_properties *props;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200432
433 mutex_lock(&dev->device_lock);
434
Tomas Winkler5ca2d382014-08-21 14:29:13 +0300435 list_for_each_entry(me_cl, &dev->me_clients, list) {
436 props = &me_cl->props;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200437
Tomas Winkler5ca2d382014-08-21 14:29:13 +0300438 if (!uuid_le_cmp(props->protocol_name, mei_amthif_guid))
Tomas Winkler9ca90502013-01-08 23:07:13 +0200439 mei_amthif_host_init(dev);
Tomas Winkler5ca2d382014-08-21 14:29:13 +0300440 else if (!uuid_le_cmp(props->protocol_name, mei_wd_guid))
Tomas Winkler9ca90502013-01-08 23:07:13 +0200441 mei_wd_host_init(dev);
Tomas Winkler5ca2d382014-08-21 14:29:13 +0300442 else if (!uuid_le_cmp(props->protocol_name, mei_nfc_guid))
Samuel Ortiz59fcd7c2013-04-11 03:03:29 +0200443 mei_nfc_host_init(dev);
444
Tomas Winkler9ca90502013-01-08 23:07:13 +0200445 }
446
447 dev->dev_state = MEI_DEV_ENABLED;
Tomas Winkler6adb8ef2014-01-12 00:36:10 +0200448 dev->reset_count = 0;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200449
450 mutex_unlock(&dev->device_lock);
Tomas Winkler04bb1392014-03-18 22:52:04 +0200451
452 pm_runtime_mark_last_busy(&dev->pdev->dev);
453 dev_dbg(&dev->pdev->dev, "rpm: autosuspend\n");
454 pm_runtime_autosuspend(&dev->pdev->dev);
Tomas Winkler9ca90502013-01-08 23:07:13 +0200455}
456
Tomas Winkler6aae48f2014-02-19 17:35:47 +0200457/**
458 * mei_hbuf_acquire: try to acquire host buffer
459 *
460 * @dev: the device structure
461 * returns true if host buffer was acquired
462 */
463bool mei_hbuf_acquire(struct mei_device *dev)
464{
Tomas Winkler04bb1392014-03-18 22:52:04 +0200465 if (mei_pg_state(dev) == MEI_PG_ON ||
466 dev->pg_event == MEI_PG_EVENT_WAIT) {
467 dev_dbg(&dev->pdev->dev, "device is in pg\n");
468 return false;
469 }
470
Tomas Winkler6aae48f2014-02-19 17:35:47 +0200471 if (!dev->hbuf_is_ready) {
472 dev_dbg(&dev->pdev->dev, "hbuf is not ready\n");
473 return false;
474 }
475
476 dev->hbuf_is_ready = false;
477
478 return true;
479}
Tomas Winkler9ca90502013-01-08 23:07:13 +0200480
481/**
Alexander Usyskin83ce0742014-01-08 22:31:46 +0200482 * mei_cl_disconnect - disconnect host client from the me one
Tomas Winkler9ca90502013-01-08 23:07:13 +0200483 *
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200484 * @cl: host client
Tomas Winkler9ca90502013-01-08 23:07:13 +0200485 *
486 * Locking: called under "dev->device_lock" lock
487 *
488 * returns 0 on success, <0 on failure.
489 */
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200490int mei_cl_disconnect(struct mei_cl *cl)
Tomas Winkler9ca90502013-01-08 23:07:13 +0200491{
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200492 struct mei_device *dev;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200493 struct mei_cl_cb *cb;
Alexander Usyskinfe2f17e2014-07-17 10:53:38 +0300494 int rets;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200495
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200496 if (WARN_ON(!cl || !cl->dev))
Tomas Winkler9ca90502013-01-08 23:07:13 +0200497 return -ENODEV;
498
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200499 dev = cl->dev;
500
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300501 cl_dbg(dev, cl, "disconnecting");
502
Tomas Winkler9ca90502013-01-08 23:07:13 +0200503 if (cl->state != MEI_FILE_DISCONNECTING)
504 return 0;
505
Tomas Winkler04bb1392014-03-18 22:52:04 +0200506 rets = pm_runtime_get(&dev->pdev->dev);
507 if (rets < 0 && rets != -EINPROGRESS) {
508 pm_runtime_put_noidle(&dev->pdev->dev);
509 cl_err(dev, cl, "rpm: get failed %d\n", rets);
510 return rets;
511 }
512
Tomas Winkler9ca90502013-01-08 23:07:13 +0200513 cb = mei_io_cb_init(cl, NULL);
Tomas Winkler04bb1392014-03-18 22:52:04 +0200514 if (!cb) {
515 rets = -ENOMEM;
516 goto free;
517 }
Tomas Winkler9ca90502013-01-08 23:07:13 +0200518
519 cb->fop_type = MEI_FOP_CLOSE;
Tomas Winkler6aae48f2014-02-19 17:35:47 +0200520 if (mei_hbuf_acquire(dev)) {
Tomas Winkler9ca90502013-01-08 23:07:13 +0200521 if (mei_hbm_cl_disconnect_req(dev, cl)) {
522 rets = -ENODEV;
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300523 cl_err(dev, cl, "failed to disconnect.\n");
Tomas Winkler9ca90502013-01-08 23:07:13 +0200524 goto free;
525 }
Alexander Usyskin22b987a2014-07-17 10:53:35 +0300526 cl->timer_count = MEI_CONNECT_TIMEOUT;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200527 mdelay(10); /* Wait for hardware disconnection ready */
528 list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
529 } else {
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300530 cl_dbg(dev, cl, "add disconnect cb to control write list\n");
Tomas Winkler9ca90502013-01-08 23:07:13 +0200531 list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
532
533 }
534 mutex_unlock(&dev->device_lock);
535
Alexander Usyskinfe2f17e2014-07-17 10:53:38 +0300536 wait_event_timeout(dev->wait_recvd_msg,
Tomas Winkler9ca90502013-01-08 23:07:13 +0200537 MEI_FILE_DISCONNECTED == cl->state,
538 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
539
540 mutex_lock(&dev->device_lock);
Alexander Usyskinfe2f17e2014-07-17 10:53:38 +0300541
Tomas Winkler9ca90502013-01-08 23:07:13 +0200542 if (MEI_FILE_DISCONNECTED == cl->state) {
543 rets = 0;
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300544 cl_dbg(dev, cl, "successfully disconnected from FW client.\n");
Tomas Winkler9ca90502013-01-08 23:07:13 +0200545 } else {
Alexander Usyskinfe2f17e2014-07-17 10:53:38 +0300546 cl_dbg(dev, cl, "timeout on disconnect from FW client.\n");
547 rets = -ETIME;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200548 }
549
550 mei_io_list_flush(&dev->ctrl_rd_list, cl);
551 mei_io_list_flush(&dev->ctrl_wr_list, cl);
552free:
Tomas Winkler04bb1392014-03-18 22:52:04 +0200553 cl_dbg(dev, cl, "rpm: autosuspend\n");
554 pm_runtime_mark_last_busy(&dev->pdev->dev);
555 pm_runtime_put_autosuspend(&dev->pdev->dev);
556
Tomas Winkler9ca90502013-01-08 23:07:13 +0200557 mei_io_cb_free(cb);
558 return rets;
559}
560
561
562/**
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200563 * mei_cl_is_other_connecting - checks if other
564 * client with the same me client id is connecting
Tomas Winkler9ca90502013-01-08 23:07:13 +0200565 *
Tomas Winkler9ca90502013-01-08 23:07:13 +0200566 * @cl: private data of the file object
567 *
Alexander Usyskin83ce0742014-01-08 22:31:46 +0200568 * returns true if other client is connected, false - otherwise.
Tomas Winkler9ca90502013-01-08 23:07:13 +0200569 */
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200570bool mei_cl_is_other_connecting(struct mei_cl *cl)
Tomas Winkler9ca90502013-01-08 23:07:13 +0200571{
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200572 struct mei_device *dev;
Tomas Winkler31f88f52014-02-17 15:13:25 +0200573 struct mei_cl *ocl; /* the other client */
Tomas Winkler9ca90502013-01-08 23:07:13 +0200574
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200575 if (WARN_ON(!cl || !cl->dev))
576 return false;
577
578 dev = cl->dev;
579
Tomas Winkler31f88f52014-02-17 15:13:25 +0200580 list_for_each_entry(ocl, &dev->file_list, link) {
581 if (ocl->state == MEI_FILE_CONNECTING &&
582 ocl != cl &&
583 cl->me_client_id == ocl->me_client_id)
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200584 return true;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200585
586 }
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200587
588 return false;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200589}
590
591/**
Alexander Usyskin83ce0742014-01-08 22:31:46 +0200592 * mei_cl_connect - connect host client to the me one
Tomas Winkler9f81abda2013-01-08 23:07:15 +0200593 *
594 * @cl: host client
595 *
596 * Locking: called under "dev->device_lock" lock
597 *
598 * returns 0 on success, <0 on failure.
599 */
600int mei_cl_connect(struct mei_cl *cl, struct file *file)
601{
602 struct mei_device *dev;
603 struct mei_cl_cb *cb;
Tomas Winkler9f81abda2013-01-08 23:07:15 +0200604 int rets;
605
606 if (WARN_ON(!cl || !cl->dev))
607 return -ENODEV;
608
609 dev = cl->dev;
610
Tomas Winkler04bb1392014-03-18 22:52:04 +0200611 rets = pm_runtime_get(&dev->pdev->dev);
612 if (rets < 0 && rets != -EINPROGRESS) {
613 pm_runtime_put_noidle(&dev->pdev->dev);
614 cl_err(dev, cl, "rpm: get failed %d\n", rets);
615 return rets;
616 }
617
Tomas Winkler9f81abda2013-01-08 23:07:15 +0200618 cb = mei_io_cb_init(cl, file);
619 if (!cb) {
620 rets = -ENOMEM;
621 goto out;
622 }
623
Tomas Winkler02a7eec2014-02-12 21:41:51 +0200624 cb->fop_type = MEI_FOP_CONNECT;
Tomas Winkler9f81abda2013-01-08 23:07:15 +0200625
Tomas Winkler6aae48f2014-02-19 17:35:47 +0200626 /* run hbuf acquire last so we don't have to undo */
627 if (!mei_cl_is_other_connecting(cl) && mei_hbuf_acquire(dev)) {
Alexander Usyskine4d82702014-04-27 15:42:21 +0300628 cl->state = MEI_FILE_CONNECTING;
Tomas Winkler9f81abda2013-01-08 23:07:15 +0200629 if (mei_hbm_cl_connect_req(dev, cl)) {
630 rets = -ENODEV;
631 goto out;
632 }
633 cl->timer_count = MEI_CONNECT_TIMEOUT;
634 list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
635 } else {
Alexander Usyskin73ab4232014-08-12 18:07:56 +0300636 cl->state = MEI_FILE_INITIALIZING;
Tomas Winkler9f81abda2013-01-08 23:07:15 +0200637 list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
638 }
639
640 mutex_unlock(&dev->device_lock);
Alexander Usyskin285e2992014-02-17 15:13:20 +0200641 wait_event_timeout(dev->wait_recvd_msg,
642 (cl->state == MEI_FILE_CONNECTED ||
643 cl->state == MEI_FILE_DISCONNECTED),
644 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
Tomas Winkler9f81abda2013-01-08 23:07:15 +0200645 mutex_lock(&dev->device_lock);
646
647 if (cl->state != MEI_FILE_CONNECTED) {
Alexander Usyskin3e37ebb2014-07-17 10:53:34 +0300648 cl->state = MEI_FILE_DISCONNECTED;
Alexander Usyskin285e2992014-02-17 15:13:20 +0200649 /* something went really wrong */
650 if (!cl->status)
651 cl->status = -EFAULT;
Tomas Winkler9f81abda2013-01-08 23:07:15 +0200652
653 mei_io_list_flush(&dev->ctrl_rd_list, cl);
654 mei_io_list_flush(&dev->ctrl_wr_list, cl);
Tomas Winkler9f81abda2013-01-08 23:07:15 +0200655 }
656
657 rets = cl->status;
658
659out:
Tomas Winkler04bb1392014-03-18 22:52:04 +0200660 cl_dbg(dev, cl, "rpm: autosuspend\n");
661 pm_runtime_mark_last_busy(&dev->pdev->dev);
662 pm_runtime_put_autosuspend(&dev->pdev->dev);
663
Tomas Winkler9f81abda2013-01-08 23:07:15 +0200664 mei_io_cb_free(cb);
665 return rets;
666}
667
668/**
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200669 * mei_cl_flow_ctrl_creds - checks flow_control credits for cl.
Tomas Winkler9ca90502013-01-08 23:07:13 +0200670 *
Tomas Winkler9ca90502013-01-08 23:07:13 +0200671 * @cl: private data of the file object
672 *
673 * returns 1 if mei_flow_ctrl_creds >0, 0 - otherwise.
674 * -ENOENT if mei_cl is not present
675 * -EINVAL if single_recv_buf == 0
676 */
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200677int mei_cl_flow_ctrl_creds(struct mei_cl *cl)
Tomas Winkler9ca90502013-01-08 23:07:13 +0200678{
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200679 struct mei_device *dev;
Alexander Usyskin12d00662014-02-17 15:13:23 +0200680 struct mei_me_client *me_cl;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200681
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200682 if (WARN_ON(!cl || !cl->dev))
683 return -EINVAL;
684
685 dev = cl->dev;
686
Tomas Winkler9ca90502013-01-08 23:07:13 +0200687 if (cl->mei_flow_ctrl_creds > 0)
688 return 1;
689
Tomas Winklerd3208322014-08-24 12:08:55 +0300690 me_cl = mei_me_cl_by_id(dev, cl->me_client_id);
691 if (!me_cl) {
Alexander Usyskin12d00662014-02-17 15:13:23 +0200692 cl_err(dev, cl, "no such me client %d\n", cl->me_client_id);
Tomas Winklerd3208322014-08-24 12:08:55 +0300693 return -ENOENT;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200694 }
Alexander Usyskin12d00662014-02-17 15:13:23 +0200695
Alexander Usyskin12d00662014-02-17 15:13:23 +0200696 if (me_cl->mei_flow_ctrl_creds) {
697 if (WARN_ON(me_cl->props.single_recv_buf == 0))
698 return -EINVAL;
699 return 1;
700 }
701 return 0;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200702}
703
704/**
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200705 * mei_cl_flow_ctrl_reduce - reduces flow_control.
Tomas Winkler9ca90502013-01-08 23:07:13 +0200706 *
Tomas Winkler9ca90502013-01-08 23:07:13 +0200707 * @cl: private data of the file object
Masanari Iida393b1482013-04-05 01:05:05 +0900708 *
Tomas Winkler9ca90502013-01-08 23:07:13 +0200709 * @returns
710 * 0 on success
711 * -ENOENT when me client is not found
712 * -EINVAL when ctrl credits are <= 0
713 */
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200714int mei_cl_flow_ctrl_reduce(struct mei_cl *cl)
Tomas Winkler9ca90502013-01-08 23:07:13 +0200715{
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200716 struct mei_device *dev;
Alexander Usyskin12d00662014-02-17 15:13:23 +0200717 struct mei_me_client *me_cl;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200718
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200719 if (WARN_ON(!cl || !cl->dev))
720 return -EINVAL;
721
722 dev = cl->dev;
723
Tomas Winklerd3208322014-08-24 12:08:55 +0300724 me_cl = mei_me_cl_by_id(dev, cl->me_client_id);
725 if (!me_cl) {
Alexander Usyskin12d00662014-02-17 15:13:23 +0200726 cl_err(dev, cl, "no such me client %d\n", cl->me_client_id);
Tomas Winklerd3208322014-08-24 12:08:55 +0300727 return -ENOENT;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200728 }
Alexander Usyskin12d00662014-02-17 15:13:23 +0200729
Tomas Winklerd3208322014-08-24 12:08:55 +0300730 if (me_cl->props.single_recv_buf) {
Alexander Usyskin12d00662014-02-17 15:13:23 +0200731 if (WARN_ON(me_cl->mei_flow_ctrl_creds <= 0))
732 return -EINVAL;
733 me_cl->mei_flow_ctrl_creds--;
734 } else {
735 if (WARN_ON(cl->mei_flow_ctrl_creds <= 0))
736 return -EINVAL;
737 cl->mei_flow_ctrl_creds--;
738 }
739 return 0;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200740}
741
Tomas Winkler9ca90502013-01-08 23:07:13 +0200742/**
Masanari Iida393b1482013-04-05 01:05:05 +0900743 * mei_cl_read_start - the start read client message function.
Tomas Winkler9ca90502013-01-08 23:07:13 +0200744 *
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200745 * @cl: host client
Tomas Winkler9ca90502013-01-08 23:07:13 +0200746 *
747 * returns 0 on success, <0 on failure.
748 */
Tomas Winklerfcb136e2013-04-19 22:01:35 +0300749int mei_cl_read_start(struct mei_cl *cl, size_t length)
Tomas Winkler9ca90502013-01-08 23:07:13 +0200750{
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200751 struct mei_device *dev;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200752 struct mei_cl_cb *cb;
Tomas Winklerd3208322014-08-24 12:08:55 +0300753 struct mei_me_client *me_cl;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200754 int rets;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200755
Tomas Winkler90e0b5f12013-01-08 23:07:14 +0200756 if (WARN_ON(!cl || !cl->dev))
757 return -ENODEV;
758
759 dev = cl->dev;
760
Tomas Winklerb950ac12013-07-25 20:15:53 +0300761 if (!mei_cl_is_connected(cl))
Tomas Winkler9ca90502013-01-08 23:07:13 +0200762 return -ENODEV;
763
Tomas Winklerd91aaed2013-01-08 23:07:18 +0200764 if (cl->read_cb) {
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300765 cl_dbg(dev, cl, "read is pending.\n");
Tomas Winkler9ca90502013-01-08 23:07:13 +0200766 return -EBUSY;
767 }
Tomas Winklerd880f322014-08-21 14:29:15 +0300768 me_cl = mei_me_cl_by_uuid_id(dev, &cl->cl_uuid, cl->me_client_id);
Tomas Winklerd3208322014-08-24 12:08:55 +0300769 if (!me_cl) {
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300770 cl_err(dev, cl, "no such me client %d\n", cl->me_client_id);
Alexander Usyskin7ca96aa2014-02-19 17:35:49 +0200771 return -ENOTTY;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200772 }
773
Tomas Winkler04bb1392014-03-18 22:52:04 +0200774 rets = pm_runtime_get(&dev->pdev->dev);
775 if (rets < 0 && rets != -EINPROGRESS) {
776 pm_runtime_put_noidle(&dev->pdev->dev);
777 cl_err(dev, cl, "rpm: get failed %d\n", rets);
778 return rets;
779 }
780
Tomas Winkler9ca90502013-01-08 23:07:13 +0200781 cb = mei_io_cb_init(cl, NULL);
Tomas Winkler04bb1392014-03-18 22:52:04 +0200782 if (!cb) {
783 rets = -ENOMEM;
784 goto out;
785 }
Tomas Winkler9ca90502013-01-08 23:07:13 +0200786
Tomas Winklerfcb136e2013-04-19 22:01:35 +0300787 /* always allocate at least client max message */
Tomas Winklerd3208322014-08-24 12:08:55 +0300788 length = max_t(size_t, length, me_cl->props.max_msg_length);
Tomas Winklerfcb136e2013-04-19 22:01:35 +0300789 rets = mei_io_cb_alloc_resp_buf(cb, length);
Tomas Winkler9ca90502013-01-08 23:07:13 +0200790 if (rets)
Tomas Winkler04bb1392014-03-18 22:52:04 +0200791 goto out;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200792
793 cb->fop_type = MEI_FOP_READ;
Tomas Winkler6aae48f2014-02-19 17:35:47 +0200794 if (mei_hbuf_acquire(dev)) {
Alexander Usyskin86113502014-03-31 17:59:24 +0300795 rets = mei_hbm_cl_flow_control_req(dev, cl);
796 if (rets < 0)
Tomas Winkler04bb1392014-03-18 22:52:04 +0200797 goto out;
Tomas Winkler04bb1392014-03-18 22:52:04 +0200798
Tomas Winkler9ca90502013-01-08 23:07:13 +0200799 list_add_tail(&cb->list, &dev->read_list.list);
800 } else {
801 list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
802 }
Chao Biaccb8842014-02-12 21:27:25 +0200803
804 cl->read_cb = cb;
805
Tomas Winkler04bb1392014-03-18 22:52:04 +0200806out:
807 cl_dbg(dev, cl, "rpm: autosuspend\n");
808 pm_runtime_mark_last_busy(&dev->pdev->dev);
809 pm_runtime_put_autosuspend(&dev->pdev->dev);
810
811 if (rets)
812 mei_io_cb_free(cb);
813
Tomas Winkler9ca90502013-01-08 23:07:13 +0200814 return rets;
815}
816
Tomas Winkler074b4c02013-02-06 14:06:44 +0200817/**
Tomas Winkler9d098192014-02-19 17:35:48 +0200818 * mei_cl_irq_write - write a message to device
Tomas Winkler21767542013-06-23 09:36:59 +0300819 * from the interrupt thread context
820 *
821 * @cl: client
822 * @cb: callback block.
Tomas Winkler21767542013-06-23 09:36:59 +0300823 * @cmpl_list: complete list.
824 *
825 * returns 0, OK; otherwise error.
826 */
Tomas Winkler9d098192014-02-19 17:35:48 +0200827int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
828 struct mei_cl_cb *cmpl_list)
Tomas Winkler21767542013-06-23 09:36:59 +0300829{
Tomas Winkler136698e2013-09-16 23:44:44 +0300830 struct mei_device *dev;
831 struct mei_msg_data *buf;
Tomas Winkler21767542013-06-23 09:36:59 +0300832 struct mei_msg_hdr mei_hdr;
Tomas Winkler136698e2013-09-16 23:44:44 +0300833 size_t len;
834 u32 msg_slots;
Tomas Winkler9d098192014-02-19 17:35:48 +0200835 int slots;
Tomas Winkler2ebf8c92013-09-16 23:44:43 +0300836 int rets;
Tomas Winkler21767542013-06-23 09:36:59 +0300837
Tomas Winkler136698e2013-09-16 23:44:44 +0300838 if (WARN_ON(!cl || !cl->dev))
839 return -ENODEV;
840
841 dev = cl->dev;
842
843 buf = &cb->request_buffer;
844
845 rets = mei_cl_flow_ctrl_creds(cl);
846 if (rets < 0)
847 return rets;
848
849 if (rets == 0) {
Tomas Winkler04bb1392014-03-18 22:52:04 +0200850 cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
Tomas Winkler136698e2013-09-16 23:44:44 +0300851 return 0;
852 }
853
Tomas Winkler9d098192014-02-19 17:35:48 +0200854 slots = mei_hbuf_empty_slots(dev);
Tomas Winkler136698e2013-09-16 23:44:44 +0300855 len = buf->size - cb->buf_idx;
856 msg_slots = mei_data2slots(len);
857
Tomas Winkler21767542013-06-23 09:36:59 +0300858 mei_hdr.host_addr = cl->host_client_id;
859 mei_hdr.me_addr = cl->me_client_id;
860 mei_hdr.reserved = 0;
Tomas Winkler479327f2013-12-17 15:56:56 +0200861 mei_hdr.internal = cb->internal;
Tomas Winkler21767542013-06-23 09:36:59 +0300862
Tomas Winkler9d098192014-02-19 17:35:48 +0200863 if (slots >= msg_slots) {
Tomas Winkler21767542013-06-23 09:36:59 +0300864 mei_hdr.length = len;
865 mei_hdr.msg_complete = 1;
866 /* Split the message only if we can write the whole host buffer */
Tomas Winkler9d098192014-02-19 17:35:48 +0200867 } else if (slots == dev->hbuf_depth) {
868 msg_slots = slots;
869 len = (slots * sizeof(u32)) - sizeof(struct mei_msg_hdr);
Tomas Winkler21767542013-06-23 09:36:59 +0300870 mei_hdr.length = len;
871 mei_hdr.msg_complete = 0;
872 } else {
873 /* wait for next time the host buffer is empty */
874 return 0;
875 }
876
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300877 cl_dbg(dev, cl, "buf: size = %d idx = %lu\n",
Tomas Winkler21767542013-06-23 09:36:59 +0300878 cb->request_buffer.size, cb->buf_idx);
Tomas Winkler21767542013-06-23 09:36:59 +0300879
Tomas Winkler136698e2013-09-16 23:44:44 +0300880 rets = mei_write_message(dev, &mei_hdr, buf->data + cb->buf_idx);
Tomas Winkler2ebf8c92013-09-16 23:44:43 +0300881 if (rets) {
882 cl->status = rets;
Tomas Winkler21767542013-06-23 09:36:59 +0300883 list_move_tail(&cb->list, &cmpl_list->list);
Tomas Winkler2ebf8c92013-09-16 23:44:43 +0300884 return rets;
Tomas Winkler21767542013-06-23 09:36:59 +0300885 }
886
887 cl->status = 0;
Tomas Winkler4dfaa9f2013-06-23 09:37:00 +0300888 cl->writing_state = MEI_WRITING;
Tomas Winkler21767542013-06-23 09:36:59 +0300889 cb->buf_idx += mei_hdr.length;
Tomas Winkler4dfaa9f2013-06-23 09:37:00 +0300890
Tomas Winkler21767542013-06-23 09:36:59 +0300891 if (mei_hdr.msg_complete) {
892 if (mei_cl_flow_ctrl_reduce(cl))
Tomas Winkler2ebf8c92013-09-16 23:44:43 +0300893 return -EIO;
Tomas Winkler21767542013-06-23 09:36:59 +0300894 list_move_tail(&cb->list, &dev->write_waiting_list.list);
895 }
896
897 return 0;
898}
899
900/**
Tomas Winkler4234a6d2013-04-08 21:56:37 +0300901 * mei_cl_write - submit a write cb to mei device
902 assumes device_lock is locked
903 *
904 * @cl: host client
905 * @cl: write callback with filled data
906 *
Alexander Usyskin83ce0742014-01-08 22:31:46 +0200907 * returns number of bytes sent on success, <0 on failure.
Tomas Winkler4234a6d2013-04-08 21:56:37 +0300908 */
909int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)
910{
911 struct mei_device *dev;
912 struct mei_msg_data *buf;
913 struct mei_msg_hdr mei_hdr;
914 int rets;
915
916
917 if (WARN_ON(!cl || !cl->dev))
918 return -ENODEV;
919
920 if (WARN_ON(!cb))
921 return -EINVAL;
922
923 dev = cl->dev;
924
925
926 buf = &cb->request_buffer;
927
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300928 cl_dbg(dev, cl, "mei_cl_write %d\n", buf->size);
Tomas Winkler4234a6d2013-04-08 21:56:37 +0300929
Tomas Winkler04bb1392014-03-18 22:52:04 +0200930 rets = pm_runtime_get(&dev->pdev->dev);
931 if (rets < 0 && rets != -EINPROGRESS) {
932 pm_runtime_put_noidle(&dev->pdev->dev);
933 cl_err(dev, cl, "rpm: get failed %d\n", rets);
934 return rets;
935 }
Tomas Winkler4234a6d2013-04-08 21:56:37 +0300936
937 cb->fop_type = MEI_FOP_WRITE;
Tomas Winkler6aae48f2014-02-19 17:35:47 +0200938 cb->buf_idx = 0;
939 cl->writing_state = MEI_IDLE;
940
941 mei_hdr.host_addr = cl->host_client_id;
942 mei_hdr.me_addr = cl->me_client_id;
943 mei_hdr.reserved = 0;
944 mei_hdr.msg_complete = 0;
945 mei_hdr.internal = cb->internal;
Tomas Winkler4234a6d2013-04-08 21:56:37 +0300946
947 rets = mei_cl_flow_ctrl_creds(cl);
948 if (rets < 0)
949 goto err;
950
Tomas Winkler6aae48f2014-02-19 17:35:47 +0200951 if (rets == 0) {
952 cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
Tomas Winkler4234a6d2013-04-08 21:56:37 +0300953 rets = buf->size;
954 goto out;
955 }
Tomas Winkler6aae48f2014-02-19 17:35:47 +0200956 if (!mei_hbuf_acquire(dev)) {
957 cl_dbg(dev, cl, "Cannot acquire the host buffer: not sending.\n");
958 rets = buf->size;
959 goto out;
960 }
Tomas Winkler4234a6d2013-04-08 21:56:37 +0300961
962 /* Check for a maximum length */
963 if (buf->size > mei_hbuf_max_len(dev)) {
964 mei_hdr.length = mei_hbuf_max_len(dev);
965 mei_hdr.msg_complete = 0;
966 } else {
967 mei_hdr.length = buf->size;
968 mei_hdr.msg_complete = 1;
969 }
970
Tomas Winkler2ebf8c92013-09-16 23:44:43 +0300971 rets = mei_write_message(dev, &mei_hdr, buf->data);
972 if (rets)
Tomas Winkler4234a6d2013-04-08 21:56:37 +0300973 goto err;
Tomas Winkler4234a6d2013-04-08 21:56:37 +0300974
975 cl->writing_state = MEI_WRITING;
976 cb->buf_idx = mei_hdr.length;
977
Tomas Winkler4234a6d2013-04-08 21:56:37 +0300978out:
979 if (mei_hdr.msg_complete) {
Alexander Usyskin7ca96aa2014-02-19 17:35:49 +0200980 rets = mei_cl_flow_ctrl_reduce(cl);
981 if (rets < 0)
Tomas Winkler4234a6d2013-04-08 21:56:37 +0300982 goto err;
Alexander Usyskin7ca96aa2014-02-19 17:35:49 +0200983
Tomas Winkler4234a6d2013-04-08 21:56:37 +0300984 list_add_tail(&cb->list, &dev->write_waiting_list.list);
985 } else {
986 list_add_tail(&cb->list, &dev->write_list.list);
987 }
988
989
990 if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) {
991
992 mutex_unlock(&dev->device_lock);
Alexander Usyskin7ca96aa2014-02-19 17:35:49 +0200993 rets = wait_event_interruptible(cl->tx_wait,
994 cl->writing_state == MEI_WRITE_COMPLETE);
Tomas Winkler4234a6d2013-04-08 21:56:37 +0300995 mutex_lock(&dev->device_lock);
Alexander Usyskin7ca96aa2014-02-19 17:35:49 +0200996 /* wait_event_interruptible returns -ERESTARTSYS */
997 if (rets) {
998 if (signal_pending(current))
999 rets = -EINTR;
1000 goto err;
1001 }
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001002 }
Alexander Usyskin7ca96aa2014-02-19 17:35:49 +02001003
1004 rets = buf->size;
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001005err:
Tomas Winkler04bb1392014-03-18 22:52:04 +02001006 cl_dbg(dev, cl, "rpm: autosuspend\n");
1007 pm_runtime_mark_last_busy(&dev->pdev->dev);
1008 pm_runtime_put_autosuspend(&dev->pdev->dev);
1009
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001010 return rets;
1011}
1012
1013
Tomas Winklerdb086fa2013-05-12 15:34:45 +03001014/**
1015 * mei_cl_complete - processes completed operation for a client
1016 *
1017 * @cl: private data of the file object.
1018 * @cb: callback block.
1019 */
1020void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
1021{
1022 if (cb->fop_type == MEI_FOP_WRITE) {
1023 mei_io_cb_free(cb);
1024 cb = NULL;
1025 cl->writing_state = MEI_WRITE_COMPLETE;
1026 if (waitqueue_active(&cl->tx_wait))
1027 wake_up_interruptible(&cl->tx_wait);
1028
1029 } else if (cb->fop_type == MEI_FOP_READ &&
1030 MEI_READING == cl->reading_state) {
1031 cl->reading_state = MEI_READ_COMPLETE;
1032 if (waitqueue_active(&cl->rx_wait))
1033 wake_up_interruptible(&cl->rx_wait);
1034 else
1035 mei_cl_bus_rx_event(cl);
1036
1037 }
1038}
1039
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001040
1041/**
Tomas Winkler074b4c02013-02-06 14:06:44 +02001042 * mei_cl_all_disconnect - disconnect forcefully all connected clients
1043 *
1044 * @dev - mei device
1045 */
1046
1047void mei_cl_all_disconnect(struct mei_device *dev)
1048{
Tomas Winkler31f88f52014-02-17 15:13:25 +02001049 struct mei_cl *cl;
Tomas Winkler074b4c02013-02-06 14:06:44 +02001050
Tomas Winkler31f88f52014-02-17 15:13:25 +02001051 list_for_each_entry(cl, &dev->file_list, link) {
Tomas Winkler074b4c02013-02-06 14:06:44 +02001052 cl->state = MEI_FILE_DISCONNECTED;
1053 cl->mei_flow_ctrl_creds = 0;
Tomas Winkler074b4c02013-02-06 14:06:44 +02001054 cl->timer_count = 0;
1055 }
1056}
1057
1058
1059/**
Tomas Winkler52908012013-07-24 16:22:57 +03001060 * mei_cl_all_wakeup - wake up all readers and writers they can be interrupted
Tomas Winkler074b4c02013-02-06 14:06:44 +02001061 *
1062 * @dev - mei device
1063 */
Tomas Winkler52908012013-07-24 16:22:57 +03001064void mei_cl_all_wakeup(struct mei_device *dev)
Tomas Winkler074b4c02013-02-06 14:06:44 +02001065{
Tomas Winkler31f88f52014-02-17 15:13:25 +02001066 struct mei_cl *cl;
1067 list_for_each_entry(cl, &dev->file_list, link) {
Tomas Winkler074b4c02013-02-06 14:06:44 +02001068 if (waitqueue_active(&cl->rx_wait)) {
Alexander Usyskinc0abffb2013-09-15 18:11:07 +03001069 cl_dbg(dev, cl, "Waking up reading client!\n");
Tomas Winkler074b4c02013-02-06 14:06:44 +02001070 wake_up_interruptible(&cl->rx_wait);
1071 }
Tomas Winkler52908012013-07-24 16:22:57 +03001072 if (waitqueue_active(&cl->tx_wait)) {
Alexander Usyskinc0abffb2013-09-15 18:11:07 +03001073 cl_dbg(dev, cl, "Waking up writing client!\n");
Tomas Winkler52908012013-07-24 16:22:57 +03001074 wake_up_interruptible(&cl->tx_wait);
1075 }
Tomas Winkler074b4c02013-02-06 14:06:44 +02001076 }
1077}
1078
1079/**
1080 * mei_cl_all_write_clear - clear all pending writes
1081
1082 * @dev - mei device
1083 */
1084void mei_cl_all_write_clear(struct mei_device *dev)
1085{
Tomas Winklercc99ecf2014-03-10 15:10:40 +02001086 mei_io_list_free(&dev->write_list, NULL);
1087 mei_io_list_free(&dev->write_waiting_list, NULL);
Tomas Winkler074b4c02013-02-06 14:06:44 +02001088}
1089
1090