blob: ae2cd0d5290703bcd47adff2105c25bb80e73f37 [file] [log] [blame]
Oren Weilab841162011-05-15 13:43:41 +03001/*
2 *
3 * Intel Management Engine Interface (Intel MEI) Linux driver
Tomas Winkler733ba912012-02-09 19:25:53 +02004 * Copyright (c) 2003-2012, Intel Corporation.
Oren Weilab841162011-05-15 13:43:41 +03005 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 */
16
Tomas Winkler2f3d2b42012-03-19 22:38:13 +020017#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
Oren Weilab841162011-05-15 13:43:41 +030019#include <linux/module.h>
20#include <linux/moduleparam.h>
21#include <linux/kernel.h>
22#include <linux/device.h>
23#include <linux/fs.h>
24#include <linux/errno.h>
25#include <linux/types.h>
26#include <linux/fcntl.h>
27#include <linux/aio.h>
28#include <linux/pci.h>
29#include <linux/poll.h>
30#include <linux/init.h>
31#include <linux/ioctl.h>
32#include <linux/cdev.h>
Oren Weilab841162011-05-15 13:43:41 +030033#include <linux/sched.h>
34#include <linux/uuid.h>
35#include <linux/compat.h>
36#include <linux/jiffies.h>
37#include <linux/interrupt.h>
Oren Weil5b881e32011-11-13 09:41:14 +020038#include <linux/miscdevice.h>
Oren Weilab841162011-05-15 13:43:41 +030039
40#include "mei_dev.h"
Tomas Winkler4f3afe12012-05-09 16:38:59 +030041#include <linux/mei.h>
Oren Weilab841162011-05-15 13:43:41 +030042#include "interface.h"
Oren Weilab841162011-05-15 13:43:41 +030043
Tomas Winklerdaed6b52012-08-17 09:54:23 +030044/* AMT device is a singleton on the platform */
45static struct pci_dev *mei_pdev;
Oren Weilab841162011-05-15 13:43:41 +030046
Oren Weilab841162011-05-15 13:43:41 +030047/* mei_pci_tbl - PCI Device ID Table */
48static DEFINE_PCI_DEVICE_TABLE(mei_pci_tbl) = {
49 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82946GZ)},
50 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G35)},
51 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82Q965)},
52 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G965)},
53 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82GM965)},
54 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82GME965)},
55 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82Q35)},
56 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82G33)},
57 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82Q33)},
58 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82X38)},
59 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_3200)},
60 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_6)},
61 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_7)},
62 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_8)},
63 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_9)},
64 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_10)},
65 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_1)},
66 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_2)},
67 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_3)},
68 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_4)},
69 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_1)},
70 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_2)},
71 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_3)},
72 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_4)},
73 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_IBXPK_1)},
74 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_IBXPK_2)},
75 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_CPT_1)},
76 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PBG_1)},
77 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_1)},
78 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_2)},
79 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_3)},
Tomas Winkler9af51422012-08-29 01:15:50 +030080 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT)},
81 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_LP)},
Oren Weilab841162011-05-15 13:43:41 +030082
83 /* required last entry */
84 {0, }
85};
86
87MODULE_DEVICE_TABLE(pci, mei_pci_tbl);
88
89static DEFINE_MUTEX(mei_mutex);
90
Oren Weilab841162011-05-15 13:43:41 +030091
92/**
93 * mei_clear_list - removes all callbacks associated with file
94 * from mei_cb_list
95 *
96 * @dev: device structure.
97 * @file: file structure
98 * @mei_cb_list: callbacks list
99 *
100 * mei_clear_list is called to clear resources associated with file
101 * when application calls close function or Ctrl-C was pressed
102 *
103 * returns true if callback removed from the list, false otherwise
104 */
105static bool mei_clear_list(struct mei_device *dev,
106 struct file *file, struct list_head *mei_cb_list)
107{
108 struct mei_cl_cb *cb_pos = NULL;
109 struct mei_cl_cb *cb_next = NULL;
110 struct file *file_temp;
111 bool removed = false;
112
113 /* list all list member */
114 list_for_each_entry_safe(cb_pos, cb_next, mei_cb_list, cb_list) {
115 file_temp = (struct file *)cb_pos->file_object;
116 /* check if list member associated with a file */
117 if (file_temp == file) {
118 /* remove member from the list */
119 list_del(&cb_pos->cb_list);
120 /* check if cb equal to current iamthif cb */
121 if (dev->iamthif_current_cb == cb_pos) {
122 dev->iamthif_current_cb = NULL;
123 /* send flow control to iamthif client */
124 mei_send_flow_control(dev, &dev->iamthif_cl);
125 }
126 /* free all allocated buffers */
127 mei_free_cb_private(cb_pos);
128 cb_pos = NULL;
129 removed = true;
130 }
131 }
132 return removed;
133}
134
135/**
136 * mei_clear_lists - removes all callbacks associated with file
137 *
138 * @dev: device structure
139 * @file: file structure
140 *
141 * mei_clear_lists is called to clear resources associated with file
142 * when application calls close function or Ctrl-C was pressed
143 *
144 * returns true if callback removed from the list, false otherwise
145 */
146static bool mei_clear_lists(struct mei_device *dev, struct file *file)
147{
148 bool removed = false;
149
150 /* remove callbacks associated with a file */
151 mei_clear_list(dev, file, &dev->amthi_cmd_list.mei_cb.cb_list);
152 if (mei_clear_list(dev, file,
153 &dev->amthi_read_complete_list.mei_cb.cb_list))
154 removed = true;
155
156 mei_clear_list(dev, file, &dev->ctrl_rd_list.mei_cb.cb_list);
157
158 if (mei_clear_list(dev, file, &dev->ctrl_wr_list.mei_cb.cb_list))
159 removed = true;
160
161 if (mei_clear_list(dev, file, &dev->write_waiting_list.mei_cb.cb_list))
162 removed = true;
163
164 if (mei_clear_list(dev, file, &dev->write_list.mei_cb.cb_list))
165 removed = true;
166
167 /* check if iamthif_current_cb not NULL */
168 if (dev->iamthif_current_cb && !removed) {
169 /* check file and iamthif current cb association */
170 if (dev->iamthif_current_cb->file_object == file) {
171 /* remove cb */
172 mei_free_cb_private(dev->iamthif_current_cb);
173 dev->iamthif_current_cb = NULL;
174 removed = true;
175 }
176 }
177 return removed;
178}
179/**
180 * find_read_list_entry - find read list entry
181 *
182 * @dev: device structure
183 * @file: pointer to file structure
184 *
185 * returns cb on success, NULL on error
186 */
187static struct mei_cl_cb *find_read_list_entry(
188 struct mei_device *dev,
189 struct mei_cl *cl)
190{
Tomas Winklerb7cd2d92011-11-27 21:43:34 +0200191 struct mei_cl_cb *pos = NULL;
192 struct mei_cl_cb *next = NULL;
Oren Weilab841162011-05-15 13:43:41 +0300193
Tomas Winklerb7cd2d92011-11-27 21:43:34 +0200194 dev_dbg(&dev->pdev->dev, "remove read_list CB\n");
195 list_for_each_entry_safe(pos, next,
196 &dev->read_list.mei_cb.cb_list, cb_list) {
197 struct mei_cl *cl_temp;
198 cl_temp = (struct mei_cl *)pos->file_private;
Oren Weilab841162011-05-15 13:43:41 +0300199
Tomas Winklerb7cd2d92011-11-27 21:43:34 +0200200 if (mei_cl_cmp_id(cl, cl_temp))
201 return pos;
Oren Weilab841162011-05-15 13:43:41 +0300202 }
203 return NULL;
204}
205
206/**
207 * mei_open - the open function
208 *
209 * @inode: pointer to inode structure
210 * @file: pointer to file structure
211 *
212 * returns 0 on success, <0 on error
213 */
214static int mei_open(struct inode *inode, struct file *file)
215{
216 struct mei_cl *cl;
Oren Weilab841162011-05-15 13:43:41 +0300217 struct mei_device *dev;
Tomas Winkler6f37aca2011-11-13 09:41:15 +0200218 unsigned long cl_id;
219 int err;
Oren Weilab841162011-05-15 13:43:41 +0300220
221 err = -ENODEV;
Tomas Winklerdaed6b52012-08-17 09:54:23 +0300222 if (!mei_pdev)
Oren Weilab841162011-05-15 13:43:41 +0300223 goto out;
224
Tomas Winklerdaed6b52012-08-17 09:54:23 +0300225 dev = pci_get_drvdata(mei_pdev);
Oren Weil5b881e32011-11-13 09:41:14 +0200226 if (!dev)
Oren Weilab841162011-05-15 13:43:41 +0300227 goto out;
228
229 mutex_lock(&dev->device_lock);
230 err = -ENOMEM;
Tomas Winklerc95efb72011-05-25 17:28:21 +0300231 cl = mei_cl_allocate(dev);
Oren Weilab841162011-05-15 13:43:41 +0300232 if (!cl)
Alexey Khoroshilov303dfbf2011-08-31 00:41:14 +0400233 goto out_unlock;
Oren Weilab841162011-05-15 13:43:41 +0300234
235 err = -ENODEV;
Tomas Winklerb210d752012-08-07 00:03:56 +0300236 if (dev->dev_state != MEI_DEV_ENABLED) {
237 dev_dbg(&dev->pdev->dev, "dev_state != MEI_ENABLED dev_state = %s\n",
238 mei_dev_state_str(dev->dev_state));
Oren Weilab841162011-05-15 13:43:41 +0300239 goto out_unlock;
240 }
241 err = -EMFILE;
242 if (dev->open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT)
243 goto out_unlock;
244
Tomas Winkler6f37aca2011-11-13 09:41:15 +0200245 cl_id = find_first_zero_bit(dev->host_clients_map, MEI_CLIENTS_MAX);
246 if (cl_id >= MEI_CLIENTS_MAX)
Oren Weilab841162011-05-15 13:43:41 +0300247 goto out_unlock;
248
Tomas Winkler6f37aca2011-11-13 09:41:15 +0200249 cl->host_client_id = cl_id;
250
Oren Weilab841162011-05-15 13:43:41 +0300251 dev_dbg(&dev->pdev->dev, "client_id = %d\n", cl->host_client_id);
252
253 dev->open_handle_count++;
Tomas Winkler6f37aca2011-11-13 09:41:15 +0200254
Oren Weilab841162011-05-15 13:43:41 +0300255 list_add_tail(&cl->link, &dev->file_list);
256
257 set_bit(cl->host_client_id, dev->host_clients_map);
258 cl->state = MEI_FILE_INITIALIZING;
259 cl->sm_state = 0;
260
261 file->private_data = cl;
262 mutex_unlock(&dev->device_lock);
263
Oren Weil5b881e32011-11-13 09:41:14 +0200264 return nonseekable_open(inode, file);
Oren Weilab841162011-05-15 13:43:41 +0300265
266out_unlock:
267 mutex_unlock(&dev->device_lock);
268 kfree(cl);
269out:
270 return err;
271}
272
273/**
274 * mei_release - the release function
275 *
276 * @inode: pointer to inode structure
277 * @file: pointer to file structure
278 *
279 * returns 0 on success, <0 on error
280 */
281static int mei_release(struct inode *inode, struct file *file)
282{
283 struct mei_cl *cl = file->private_data;
284 struct mei_cl_cb *cb;
285 struct mei_device *dev;
286 int rets = 0;
287
288 if (WARN_ON(!cl || !cl->dev))
289 return -ENODEV;
290
291 dev = cl->dev;
292
293 mutex_lock(&dev->device_lock);
294 if (cl != &dev->iamthif_cl) {
295 if (cl->state == MEI_FILE_CONNECTED) {
296 cl->state = MEI_FILE_DISCONNECTING;
297 dev_dbg(&dev->pdev->dev,
298 "disconnecting client host client = %d, "
299 "ME client = %d\n",
300 cl->host_client_id,
301 cl->me_client_id);
302 rets = mei_disconnect_host_client(dev, cl);
303 }
Tomas Winkler0288c7c2011-06-06 10:44:34 +0300304 mei_cl_flush_queues(cl);
Oren Weilab841162011-05-15 13:43:41 +0300305 dev_dbg(&dev->pdev->dev, "remove client host client = %d, ME client = %d\n",
306 cl->host_client_id,
307 cl->me_client_id);
308
309 if (dev->open_handle_count > 0) {
Tomas Winkler441ab502011-12-13 23:39:34 +0200310 clear_bit(cl->host_client_id, dev->host_clients_map);
Oren Weilab841162011-05-15 13:43:41 +0300311 dev->open_handle_count--;
312 }
313 mei_remove_client_from_file_list(dev, cl->host_client_id);
314
315 /* free read cb */
316 cb = NULL;
317 if (cl->read_cb) {
318 cb = find_read_list_entry(dev, cl);
319 /* Remove entry from read list */
320 if (cb)
321 list_del(&cb->cb_list);
322
323 cb = cl->read_cb;
324 cl->read_cb = NULL;
325 }
326
327 file->private_data = NULL;
328
329 if (cb) {
330 mei_free_cb_private(cb);
331 cb = NULL;
332 }
333
334 kfree(cl);
335 } else {
336 if (dev->open_handle_count > 0)
337 dev->open_handle_count--;
338
339 if (dev->iamthif_file_object == file &&
340 dev->iamthif_state != MEI_IAMTHIF_IDLE) {
341
342 dev_dbg(&dev->pdev->dev, "amthi canceled iamthif state %d\n",
343 dev->iamthif_state);
Tomas Winklereb9af0a2011-05-25 17:28:22 +0300344 dev->iamthif_canceled = true;
Oren Weilab841162011-05-15 13:43:41 +0300345 if (dev->iamthif_state == MEI_IAMTHIF_READ_COMPLETE) {
346 dev_dbg(&dev->pdev->dev, "run next amthi iamthif cb\n");
Tomas Winklerc95efb72011-05-25 17:28:21 +0300347 mei_run_next_iamthif_cmd(dev);
Oren Weilab841162011-05-15 13:43:41 +0300348 }
349 }
350
351 if (mei_clear_lists(dev, file))
352 dev->iamthif_state = MEI_IAMTHIF_IDLE;
353
354 }
355 mutex_unlock(&dev->device_lock);
356 return rets;
357}
358
359
360/**
361 * mei_read - the read function.
362 *
363 * @file: pointer to file structure
364 * @ubuf: pointer to user buffer
365 * @length: buffer length
366 * @offset: data offset in buffer
367 *
368 * returns >=0 data length on success , <0 on error
369 */
370static ssize_t mei_read(struct file *file, char __user *ubuf,
Tomas Winkler441ab502011-12-13 23:39:34 +0200371 size_t length, loff_t *offset)
Oren Weilab841162011-05-15 13:43:41 +0300372{
373 struct mei_cl *cl = file->private_data;
374 struct mei_cl_cb *cb_pos = NULL;
375 struct mei_cl_cb *cb = NULL;
376 struct mei_device *dev;
377 int i;
378 int rets;
379 int err;
380
381
382 if (WARN_ON(!cl || !cl->dev))
383 return -ENODEV;
384
385 dev = cl->dev;
386
387 mutex_lock(&dev->device_lock);
Tomas Winklerb210d752012-08-07 00:03:56 +0300388 if (dev->dev_state != MEI_DEV_ENABLED) {
Oren Weilab841162011-05-15 13:43:41 +0300389 rets = -ENODEV;
390 goto out;
391 }
392
393 if ((cl->sm_state & MEI_WD_STATE_INDEPENDENCE_MSG_SENT) == 0) {
394 /* Do not allow to read watchdog client */
Tomas Winkler07b509b2012-07-23 14:05:39 +0300395 i = mei_me_cl_by_uuid(dev, &mei_wd_guid);
Oren Weilab841162011-05-15 13:43:41 +0300396 if (i >= 0) {
397 struct mei_me_client *me_client = &dev->me_clients[i];
Oren Weilab841162011-05-15 13:43:41 +0300398 if (cl->me_client_id == me_client->client_id) {
399 rets = -EBADF;
400 goto out;
401 }
402 }
403 } else {
404 cl->sm_state &= ~MEI_WD_STATE_INDEPENDENCE_MSG_SENT;
405 }
406
407 if (cl == &dev->iamthif_cl) {
408 rets = amthi_read(dev, file, ubuf, length, offset);
409 goto out;
410 }
411
412 if (cl->read_cb && cl->read_cb->information > *offset) {
413 cb = cl->read_cb;
414 goto copy_buffer;
415 } else if (cl->read_cb && cl->read_cb->information > 0 &&
416 cl->read_cb->information <= *offset) {
417 cb = cl->read_cb;
418 rets = 0;
419 goto free;
420 } else if ((!cl->read_cb || !cl->read_cb->information) &&
421 *offset > 0) {
Justin P. Mattock5f9092f32012-03-12 07:18:09 -0700422 /*Offset needs to be cleaned for contiguous reads*/
Oren Weilab841162011-05-15 13:43:41 +0300423 *offset = 0;
424 rets = 0;
425 goto out;
426 }
427
428 err = mei_start_read(dev, cl);
429 if (err && err != -EBUSY) {
430 dev_dbg(&dev->pdev->dev,
431 "mei start read failure with status = %d\n", err);
432 rets = err;
433 goto out;
434 }
435
436 if (MEI_READ_COMPLETE != cl->reading_state &&
437 !waitqueue_active(&cl->rx_wait)) {
438 if (file->f_flags & O_NONBLOCK) {
439 rets = -EAGAIN;
440 goto out;
441 }
442
443 mutex_unlock(&dev->device_lock);
444
445 if (wait_event_interruptible(cl->rx_wait,
446 (MEI_READ_COMPLETE == cl->reading_state ||
447 MEI_FILE_INITIALIZING == cl->state ||
448 MEI_FILE_DISCONNECTED == cl->state ||
449 MEI_FILE_DISCONNECTING == cl->state))) {
450 if (signal_pending(current))
451 return -EINTR;
452 return -ERESTARTSYS;
453 }
454
455 mutex_lock(&dev->device_lock);
456 if (MEI_FILE_INITIALIZING == cl->state ||
457 MEI_FILE_DISCONNECTED == cl->state ||
458 MEI_FILE_DISCONNECTING == cl->state) {
459 rets = -EBUSY;
460 goto out;
461 }
462 }
463
464 cb = cl->read_cb;
465
466 if (!cb) {
467 rets = -ENODEV;
468 goto out;
469 }
470 if (cl->reading_state != MEI_READ_COMPLETE) {
471 rets = 0;
472 goto out;
473 }
474 /* now copy the data to user space */
475copy_buffer:
476 dev_dbg(&dev->pdev->dev, "cb->response_buffer size - %d\n",
477 cb->response_buffer.size);
478 dev_dbg(&dev->pdev->dev, "cb->information - %lu\n",
479 cb->information);
480 if (length == 0 || ubuf == NULL || *offset > cb->information) {
481 rets = -EMSGSIZE;
482 goto free;
483 }
484
Justin P. Mattock5f9092f32012-03-12 07:18:09 -0700485 /* length is being truncated to PAGE_SIZE, however, */
Oren Weilab841162011-05-15 13:43:41 +0300486 /* information size may be longer */
487 length = min_t(size_t, length, (cb->information - *offset));
488
Tomas Winkler441ab502011-12-13 23:39:34 +0200489 if (copy_to_user(ubuf, cb->response_buffer.data + *offset, length)) {
Oren Weilab841162011-05-15 13:43:41 +0300490 rets = -EFAULT;
491 goto free;
492 }
493
494 rets = length;
495 *offset += length;
496 if ((unsigned long)*offset < cb->information)
497 goto out;
498
499free:
500 cb_pos = find_read_list_entry(dev, cl);
501 /* Remove entry from read list */
502 if (cb_pos)
503 list_del(&cb_pos->cb_list);
504 mei_free_cb_private(cb);
505 cl->reading_state = MEI_IDLE;
506 cl->read_cb = NULL;
507 cl->read_pending = 0;
508out:
509 dev_dbg(&dev->pdev->dev, "end mei read rets= %d\n", rets);
510 mutex_unlock(&dev->device_lock);
511 return rets;
512}
513
514/**
515 * mei_write - the write function.
516 *
517 * @file: pointer to file structure
518 * @ubuf: pointer to user buffer
519 * @length: buffer length
520 * @offset: data offset in buffer
521 *
522 * returns >=0 data length on success , <0 on error
523 */
524static ssize_t mei_write(struct file *file, const char __user *ubuf,
Tomas Winkler441ab502011-12-13 23:39:34 +0200525 size_t length, loff_t *offset)
Oren Weilab841162011-05-15 13:43:41 +0300526{
527 struct mei_cl *cl = file->private_data;
528 struct mei_cl_cb *write_cb = NULL;
529 struct mei_msg_hdr mei_hdr;
530 struct mei_device *dev;
531 unsigned long timeout = 0;
532 int rets;
533 int i;
534
535 if (WARN_ON(!cl || !cl->dev))
536 return -ENODEV;
537
538 dev = cl->dev;
539
540 mutex_lock(&dev->device_lock);
541
Tomas Winklerb210d752012-08-07 00:03:56 +0300542 if (dev->dev_state != MEI_DEV_ENABLED) {
Oren Weilab841162011-05-15 13:43:41 +0300543 mutex_unlock(&dev->device_lock);
544 return -ENODEV;
545 }
546
547 if (cl == &dev->iamthif_cl) {
548 write_cb = find_amthi_read_list_entry(dev, file);
549
550 if (write_cb) {
551 timeout = write_cb->read_time +
552 msecs_to_jiffies(IAMTHIF_READ_TIMER);
553
554 if (time_after(jiffies, timeout) ||
555 cl->reading_state == MEI_READ_COMPLETE) {
556 *offset = 0;
557 list_del(&write_cb->cb_list);
558 mei_free_cb_private(write_cb);
559 write_cb = NULL;
560 }
561 }
562 }
563
564 /* free entry used in read */
565 if (cl->reading_state == MEI_READ_COMPLETE) {
566 *offset = 0;
567 write_cb = find_read_list_entry(dev, cl);
568 if (write_cb) {
569 list_del(&write_cb->cb_list);
570 mei_free_cb_private(write_cb);
571 write_cb = NULL;
572 cl->reading_state = MEI_IDLE;
573 cl->read_cb = NULL;
574 cl->read_pending = 0;
575 }
Tomas Winkler441ab502011-12-13 23:39:34 +0200576 } else if (cl->reading_state == MEI_IDLE && !cl->read_pending)
Oren Weilab841162011-05-15 13:43:41 +0300577 *offset = 0;
578
579
580 write_cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL);
581 if (!write_cb) {
582 mutex_unlock(&dev->device_lock);
583 return -ENOMEM;
584 }
585
586 write_cb->file_object = file;
587 write_cb->file_private = cl;
588 write_cb->request_buffer.data = kmalloc(length, GFP_KERNEL);
589 rets = -ENOMEM;
590 if (!write_cb->request_buffer.data)
591 goto unlock_dev;
592
593 dev_dbg(&dev->pdev->dev, "length =%d\n", (int) length);
594
595 rets = -EFAULT;
596 if (copy_from_user(write_cb->request_buffer.data, ubuf, length))
597 goto unlock_dev;
598
599 cl->sm_state = 0;
600 if (length == 4 &&
601 ((memcmp(mei_wd_state_independence_msg[0],
602 write_cb->request_buffer.data, 4) == 0) ||
603 (memcmp(mei_wd_state_independence_msg[1],
604 write_cb->request_buffer.data, 4) == 0) ||
605 (memcmp(mei_wd_state_independence_msg[2],
606 write_cb->request_buffer.data, 4) == 0)))
607 cl->sm_state |= MEI_WD_STATE_INDEPENDENCE_MSG_SENT;
608
609 INIT_LIST_HEAD(&write_cb->cb_list);
610 if (cl == &dev->iamthif_cl) {
611 write_cb->response_buffer.data =
612 kmalloc(dev->iamthif_mtu, GFP_KERNEL);
613 if (!write_cb->response_buffer.data) {
614 rets = -ENOMEM;
615 goto unlock_dev;
616 }
Tomas Winklerb210d752012-08-07 00:03:56 +0300617 if (dev->dev_state != MEI_DEV_ENABLED) {
Oren Weilab841162011-05-15 13:43:41 +0300618 rets = -ENODEV;
619 goto unlock_dev;
620 }
Tomas Winkler07b509b2012-07-23 14:05:39 +0300621 i = mei_me_cl_by_id(dev, dev->iamthif_cl.me_client_id);
622 if (i < 0) {
Oren Weilab841162011-05-15 13:43:41 +0300623 rets = -ENODEV;
624 goto unlock_dev;
625 }
Tomas Winkler07b509b2012-07-23 14:05:39 +0300626 if (length > dev->me_clients[i].props.max_msg_length ||
Oren Weilab841162011-05-15 13:43:41 +0300627 length <= 0) {
628 rets = -EMSGSIZE;
629 goto unlock_dev;
630 }
631
632 write_cb->response_buffer.size = dev->iamthif_mtu;
633 write_cb->major_file_operations = MEI_IOCTL;
634 write_cb->information = 0;
635 write_cb->request_buffer.size = length;
636 if (dev->iamthif_cl.state != MEI_FILE_CONNECTED) {
637 rets = -ENODEV;
638 goto unlock_dev;
639 }
640
641 if (!list_empty(&dev->amthi_cmd_list.mei_cb.cb_list) ||
642 dev->iamthif_state != MEI_IAMTHIF_IDLE) {
643 dev_dbg(&dev->pdev->dev, "amthi_state = %d\n",
644 (int) dev->iamthif_state);
645 dev_dbg(&dev->pdev->dev, "add amthi cb to amthi cmd waiting list\n");
646 list_add_tail(&write_cb->cb_list,
647 &dev->amthi_cmd_list.mei_cb.cb_list);
648 rets = length;
649 } else {
650 dev_dbg(&dev->pdev->dev, "call amthi write\n");
651 rets = amthi_write(dev, write_cb);
652
653 if (rets) {
654 dev_dbg(&dev->pdev->dev, "amthi write failed with status = %d\n",
655 rets);
656 goto unlock_dev;
657 }
658 rets = length;
659 }
660 mutex_unlock(&dev->device_lock);
661 return rets;
662 }
663
664 write_cb->major_file_operations = MEI_WRITE;
665 /* make sure information is zero before we start */
666
667 write_cb->information = 0;
668 write_cb->request_buffer.size = length;
669
670 dev_dbg(&dev->pdev->dev, "host client = %d, ME client = %d\n",
671 cl->host_client_id, cl->me_client_id);
672 if (cl->state != MEI_FILE_CONNECTED) {
673 rets = -ENODEV;
674 dev_dbg(&dev->pdev->dev, "host client = %d, is not connected to ME client = %d",
675 cl->host_client_id,
676 cl->me_client_id);
677 goto unlock_dev;
678 }
Tomas Winkler07b509b2012-07-23 14:05:39 +0300679 i = mei_me_cl_by_id(dev, cl->me_client_id);
680 if (i < 0) {
Oren Weilab841162011-05-15 13:43:41 +0300681 rets = -ENODEV;
682 goto unlock_dev;
683 }
684 if (length > dev->me_clients[i].props.max_msg_length || length <= 0) {
685 rets = -EINVAL;
686 goto unlock_dev;
687 }
688 write_cb->file_private = cl;
689
690 rets = mei_flow_ctrl_creds(dev, cl);
691 if (rets < 0)
692 goto unlock_dev;
693
694 if (rets && dev->mei_host_buffer_is_empty) {
695 rets = 0;
Tomas Winklereb9af0a2011-05-25 17:28:22 +0300696 dev->mei_host_buffer_is_empty = false;
Tomas Winkler24aadc82012-06-25 23:46:27 +0300697 if (length > mei_hbuf_max_data(dev)) {
698 mei_hdr.length = mei_hbuf_max_data(dev);
Oren Weilab841162011-05-15 13:43:41 +0300699 mei_hdr.msg_complete = 0;
700 } else {
701 mei_hdr.length = length;
702 mei_hdr.msg_complete = 1;
703 }
704 mei_hdr.host_addr = cl->host_client_id;
705 mei_hdr.me_addr = cl->me_client_id;
706 mei_hdr.reserved = 0;
707 dev_dbg(&dev->pdev->dev, "call mei_write_message header=%08x.\n",
708 *((u32 *) &mei_hdr));
Tomas Winkler1ccb7b62012-03-14 14:39:42 +0200709 if (mei_write_message(dev, &mei_hdr,
Oren Weilab841162011-05-15 13:43:41 +0300710 (unsigned char *) (write_cb->request_buffer.data),
711 mei_hdr.length)) {
712 rets = -ENODEV;
713 goto unlock_dev;
714 }
715 cl->writing_state = MEI_WRITING;
716 write_cb->information = mei_hdr.length;
717 if (mei_hdr.msg_complete) {
718 if (mei_flow_ctrl_reduce(dev, cl)) {
719 rets = -ENODEV;
720 goto unlock_dev;
721 }
722 list_add_tail(&write_cb->cb_list,
723 &dev->write_waiting_list.mei_cb.cb_list);
724 } else {
725 list_add_tail(&write_cb->cb_list,
726 &dev->write_list.mei_cb.cb_list);
727 }
728
729 } else {
730
731 write_cb->information = 0;
732 cl->writing_state = MEI_WRITING;
733 list_add_tail(&write_cb->cb_list,
734 &dev->write_list.mei_cb.cb_list);
735 }
736 mutex_unlock(&dev->device_lock);
737 return length;
738
739unlock_dev:
740 mutex_unlock(&dev->device_lock);
741 mei_free_cb_private(write_cb);
742 return rets;
743}
744
745
746/**
747 * mei_ioctl - the IOCTL function
748 *
749 * @file: pointer to file structure
750 * @cmd: ioctl command
751 * @data: pointer to mei message structure
752 *
753 * returns 0 on success , <0 on error
754 */
755static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data)
756{
757 struct mei_device *dev;
758 struct mei_cl *cl = file->private_data;
759 struct mei_connect_client_data *connect_data = NULL;
760 int rets;
761
762 if (cmd != IOCTL_MEI_CONNECT_CLIENT)
763 return -EINVAL;
764
765 if (WARN_ON(!cl || !cl->dev))
766 return -ENODEV;
767
768 dev = cl->dev;
769
770 dev_dbg(&dev->pdev->dev, "IOCTL cmd = 0x%x", cmd);
771
772 mutex_lock(&dev->device_lock);
Tomas Winklerb210d752012-08-07 00:03:56 +0300773 if (dev->dev_state != MEI_DEV_ENABLED) {
Oren Weilab841162011-05-15 13:43:41 +0300774 rets = -ENODEV;
775 goto out;
776 }
777
778 dev_dbg(&dev->pdev->dev, ": IOCTL_MEI_CONNECT_CLIENT.\n");
779
780 connect_data = kzalloc(sizeof(struct mei_connect_client_data),
781 GFP_KERNEL);
782 if (!connect_data) {
783 rets = -ENOMEM;
784 goto out;
785 }
786 dev_dbg(&dev->pdev->dev, "copy connect data from user\n");
787 if (copy_from_user(connect_data, (char __user *)data,
788 sizeof(struct mei_connect_client_data))) {
789 dev_dbg(&dev->pdev->dev, "failed to copy data from userland\n");
790 rets = -EFAULT;
791 goto out;
792 }
793 rets = mei_ioctl_connect_client(file, connect_data);
794
795 /* if all is ok, copying the data back to user. */
796 if (rets)
797 goto out;
798
799 dev_dbg(&dev->pdev->dev, "copy connect data to user\n");
800 if (copy_to_user((char __user *)data, connect_data,
801 sizeof(struct mei_connect_client_data))) {
802 dev_dbg(&dev->pdev->dev, "failed to copy data to userland\n");
803 rets = -EFAULT;
804 goto out;
805 }
806
807out:
808 kfree(connect_data);
809 mutex_unlock(&dev->device_lock);
810 return rets;
811}
812
813/**
814 * mei_compat_ioctl - the compat IOCTL function
815 *
816 * @file: pointer to file structure
817 * @cmd: ioctl command
818 * @data: pointer to mei message structure
819 *
820 * returns 0 on success , <0 on error
821 */
822#ifdef CONFIG_COMPAT
823static long mei_compat_ioctl(struct file *file,
Tomas Winkler441ab502011-12-13 23:39:34 +0200824 unsigned int cmd, unsigned long data)
Oren Weilab841162011-05-15 13:43:41 +0300825{
826 return mei_ioctl(file, cmd, (unsigned long)compat_ptr(data));
827}
828#endif
829
830
831/**
832 * mei_poll - the poll function
833 *
834 * @file: pointer to file structure
835 * @wait: pointer to poll_table structure
836 *
837 * returns poll mask
838 */
839static unsigned int mei_poll(struct file *file, poll_table *wait)
840{
841 struct mei_cl *cl = file->private_data;
842 struct mei_device *dev;
843 unsigned int mask = 0;
844
845 if (WARN_ON(!cl || !cl->dev))
846 return mask;
847
848 dev = cl->dev;
849
850 mutex_lock(&dev->device_lock);
851
Tomas Winklerb210d752012-08-07 00:03:56 +0300852 if (dev->dev_state != MEI_DEV_ENABLED)
Oren Weilab841162011-05-15 13:43:41 +0300853 goto out;
854
855
856 if (cl == &dev->iamthif_cl) {
857 mutex_unlock(&dev->device_lock);
858 poll_wait(file, &dev->iamthif_cl.wait, wait);
859 mutex_lock(&dev->device_lock);
860 if (dev->iamthif_state == MEI_IAMTHIF_READ_COMPLETE &&
861 dev->iamthif_file_object == file) {
862 mask |= (POLLIN | POLLRDNORM);
863 dev_dbg(&dev->pdev->dev, "run next amthi cb\n");
Tomas Winklerc95efb72011-05-25 17:28:21 +0300864 mei_run_next_iamthif_cmd(dev);
Oren Weilab841162011-05-15 13:43:41 +0300865 }
866 goto out;
867 }
868
869 mutex_unlock(&dev->device_lock);
870 poll_wait(file, &cl->tx_wait, wait);
871 mutex_lock(&dev->device_lock);
872 if (MEI_WRITE_COMPLETE == cl->writing_state)
873 mask |= (POLLIN | POLLRDNORM);
874
875out:
876 mutex_unlock(&dev->device_lock);
877 return mask;
878}
879
Oren Weil5b881e32011-11-13 09:41:14 +0200880/*
881 * file operations structure will be used for mei char device.
882 */
883static const struct file_operations mei_fops = {
884 .owner = THIS_MODULE,
885 .read = mei_read,
886 .unlocked_ioctl = mei_ioctl,
887#ifdef CONFIG_COMPAT
888 .compat_ioctl = mei_compat_ioctl,
889#endif
890 .open = mei_open,
891 .release = mei_release,
892 .write = mei_write,
893 .poll = mei_poll,
894 .llseek = no_llseek
895};
896
897
898/*
899 * Misc Device Struct
900 */
901static struct miscdevice mei_misc_device = {
Tomas Winklerc38ea242012-04-02 20:32:39 +0300902 .name = "mei",
Oren Weil5b881e32011-11-13 09:41:14 +0200903 .fops = &mei_fops,
904 .minor = MISC_DYNAMIC_MINOR,
905};
906
907/**
Tomas Winkler9a123f12012-08-06 15:23:55 +0300908 * mei_quirk_probe - probe for devices that doesn't valid ME interface
909 * @pdev: PCI device structure
910 * @ent: entry into pci_device_table
911 *
912 * returns true if ME Interface is valid, false otherwise
913 */
914static bool __devinit mei_quirk_probe(struct pci_dev *pdev,
915 const struct pci_device_id *ent)
916{
917 u32 reg;
918 if (ent->device == MEI_DEV_ID_PBG_1) {
919 pci_read_config_dword(pdev, 0x48, &reg);
920 /* make sure that bit 9 is up and bit 10 is down */
921 if ((reg & 0x600) == 0x200) {
922 dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n");
923 return false;
924 }
925 }
926 return true;
927}
928/**
Oren Weil5b881e32011-11-13 09:41:14 +0200929 * mei_probe - Device Initialization Routine
930 *
931 * @pdev: PCI device structure
932 * @ent: entry in kcs_pci_tbl
933 *
934 * returns 0 on success, <0 on failure.
935 */
936static int __devinit mei_probe(struct pci_dev *pdev,
937 const struct pci_device_id *ent)
938{
939 struct mei_device *dev;
940 int err;
941
942 mutex_lock(&mei_mutex);
Tomas Winkler9a123f12012-08-06 15:23:55 +0300943
944 if (!mei_quirk_probe(pdev, ent)) {
945 err = -ENODEV;
946 goto end;
947 }
948
Tomas Winklerdaed6b52012-08-17 09:54:23 +0300949 if (mei_pdev) {
Oren Weil5b881e32011-11-13 09:41:14 +0200950 err = -EEXIST;
951 goto end;
952 }
953 /* enable pci dev */
954 err = pci_enable_device(pdev);
955 if (err) {
Tomas Winkler32c826b2012-05-08 23:04:56 +0300956 dev_err(&pdev->dev, "failed to enable pci device.\n");
Oren Weil5b881e32011-11-13 09:41:14 +0200957 goto end;
958 }
959 /* set PCI host mastering */
960 pci_set_master(pdev);
961 /* pci request regions for mei driver */
Tomas Winkler068c0ae2012-08-07 00:03:54 +0300962 err = pci_request_regions(pdev, KBUILD_MODNAME);
Oren Weil5b881e32011-11-13 09:41:14 +0200963 if (err) {
Tomas Winkler32c826b2012-05-08 23:04:56 +0300964 dev_err(&pdev->dev, "failed to get pci regions.\n");
Oren Weil5b881e32011-11-13 09:41:14 +0200965 goto disable_device;
966 }
967 /* allocates and initializes the mei dev structure */
968 dev = mei_device_init(pdev);
969 if (!dev) {
970 err = -ENOMEM;
971 goto release_regions;
972 }
973 /* mapping IO device memory */
974 dev->mem_addr = pci_iomap(pdev, 0, 0);
975 if (!dev->mem_addr) {
Tomas Winkler32c826b2012-05-08 23:04:56 +0300976 dev_err(&pdev->dev, "mapping I/O device memory failure.\n");
Oren Weil5b881e32011-11-13 09:41:14 +0200977 err = -ENOMEM;
978 goto free_device;
979 }
980 pci_enable_msi(pdev);
981
982 /* request and enable interrupt */
983 if (pci_dev_msi_enabled(pdev))
984 err = request_threaded_irq(pdev->irq,
985 NULL,
986 mei_interrupt_thread_handler,
Tomas Winkler068c0ae2012-08-07 00:03:54 +0300987 IRQF_ONESHOT, KBUILD_MODNAME, dev);
Oren Weil5b881e32011-11-13 09:41:14 +0200988 else
989 err = request_threaded_irq(pdev->irq,
990 mei_interrupt_quick_handler,
991 mei_interrupt_thread_handler,
Tomas Winkler068c0ae2012-08-07 00:03:54 +0300992 IRQF_SHARED, KBUILD_MODNAME, dev);
Oren Weil5b881e32011-11-13 09:41:14 +0200993
994 if (err) {
Tomas Winkler32c826b2012-05-08 23:04:56 +0300995 dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n",
Oren Weil5b881e32011-11-13 09:41:14 +0200996 pdev->irq);
Samuel Ortiz169dc382012-06-11 12:18:30 +0300997 goto disable_msi;
Oren Weil5b881e32011-11-13 09:41:14 +0200998 }
999 INIT_DELAYED_WORK(&dev->timer_work, mei_timer);
1000 if (mei_hw_init(dev)) {
Tomas Winkler32c826b2012-05-08 23:04:56 +03001001 dev_err(&pdev->dev, "init hw failure.\n");
Oren Weil5b881e32011-11-13 09:41:14 +02001002 err = -ENODEV;
1003 goto release_irq;
1004 }
1005
1006 err = misc_register(&mei_misc_device);
1007 if (err)
1008 goto release_irq;
1009
Tomas Winklerdaed6b52012-08-17 09:54:23 +03001010 mei_pdev = pdev;
Oren Weil5b881e32011-11-13 09:41:14 +02001011 pci_set_drvdata(pdev, dev);
1012
1013
1014 schedule_delayed_work(&dev->timer_work, HZ);
1015
1016 mutex_unlock(&mei_mutex);
1017
Tomas Winkler2f3d2b42012-03-19 22:38:13 +02001018 pr_debug("initialization successful.\n");
Oren Weil5b881e32011-11-13 09:41:14 +02001019
1020 return 0;
1021
1022release_irq:
1023 /* disable interrupts */
1024 dev->host_hw_state = mei_hcsr_read(dev);
1025 mei_disable_interrupts(dev);
1026 flush_scheduled_work();
1027 free_irq(pdev->irq, dev);
Samuel Ortiz169dc382012-06-11 12:18:30 +03001028disable_msi:
Oren Weil5b881e32011-11-13 09:41:14 +02001029 pci_disable_msi(pdev);
Oren Weil5b881e32011-11-13 09:41:14 +02001030 pci_iounmap(pdev, dev->mem_addr);
1031free_device:
1032 kfree(dev);
1033release_regions:
1034 pci_release_regions(pdev);
1035disable_device:
1036 pci_disable_device(pdev);
1037end:
1038 mutex_unlock(&mei_mutex);
Tomas Winkler32c826b2012-05-08 23:04:56 +03001039 dev_err(&pdev->dev, "initialization failed.\n");
Oren Weil5b881e32011-11-13 09:41:14 +02001040 return err;
1041}
1042
1043/**
1044 * mei_remove - Device Removal Routine
1045 *
1046 * @pdev: PCI device structure
1047 *
1048 * mei_remove is called by the PCI subsystem to alert the driver
1049 * that it should release a PCI device.
1050 */
1051static void __devexit mei_remove(struct pci_dev *pdev)
1052{
1053 struct mei_device *dev;
1054
Tomas Winklerdaed6b52012-08-17 09:54:23 +03001055 if (mei_pdev != pdev)
Oren Weil5b881e32011-11-13 09:41:14 +02001056 return;
1057
1058 dev = pci_get_drvdata(pdev);
1059 if (!dev)
1060 return;
1061
1062 mutex_lock(&dev->device_lock);
1063
Tomas Winklerc216fde2012-08-16 19:39:43 +03001064 cancel_delayed_work(&dev->timer_work);
1065
1066 mei_wd_stop(dev);
Oren Weil5b881e32011-11-13 09:41:14 +02001067
Tomas Winklerdaed6b52012-08-17 09:54:23 +03001068 mei_pdev = NULL;
Oren Weil5b881e32011-11-13 09:41:14 +02001069
1070 if (dev->iamthif_cl.state == MEI_FILE_CONNECTED) {
1071 dev->iamthif_cl.state = MEI_FILE_DISCONNECTING;
1072 mei_disconnect_host_client(dev, &dev->iamthif_cl);
1073 }
1074 if (dev->wd_cl.state == MEI_FILE_CONNECTED) {
1075 dev->wd_cl.state = MEI_FILE_DISCONNECTING;
1076 mei_disconnect_host_client(dev, &dev->wd_cl);
1077 }
1078
1079 /* Unregistering watchdog device */
Tomas Winkler70cd5332011-12-22 18:50:50 +02001080 mei_watchdog_unregister(dev);
Oren Weil5b881e32011-11-13 09:41:14 +02001081
1082 /* remove entry if already in list */
1083 dev_dbg(&pdev->dev, "list del iamthif and wd file list.\n");
1084 mei_remove_client_from_file_list(dev, dev->wd_cl.host_client_id);
1085 mei_remove_client_from_file_list(dev, dev->iamthif_cl.host_client_id);
1086
1087 dev->iamthif_current_cb = NULL;
1088 dev->me_clients_num = 0;
1089
1090 mutex_unlock(&dev->device_lock);
1091
1092 flush_scheduled_work();
1093
1094 /* disable interrupts */
1095 mei_disable_interrupts(dev);
1096
1097 free_irq(pdev->irq, dev);
1098 pci_disable_msi(pdev);
1099 pci_set_drvdata(pdev, NULL);
1100
1101 if (dev->mem_addr)
1102 pci_iounmap(pdev, dev->mem_addr);
1103
1104 kfree(dev);
1105
1106 pci_release_regions(pdev);
1107 pci_disable_device(pdev);
Tomas Winklera44cab42012-05-29 16:39:11 +03001108
1109 misc_deregister(&mei_misc_device);
Oren Weil5b881e32011-11-13 09:41:14 +02001110}
Oren Weilab841162011-05-15 13:43:41 +03001111#ifdef CONFIG_PM
1112static int mei_pci_suspend(struct device *device)
1113{
1114 struct pci_dev *pdev = to_pci_dev(device);
1115 struct mei_device *dev = pci_get_drvdata(pdev);
1116 int err;
1117
1118 if (!dev)
1119 return -ENODEV;
1120 mutex_lock(&dev->device_lock);
Tomas Winklerc216fde2012-08-16 19:39:43 +03001121
1122 cancel_delayed_work(&dev->timer_work);
1123
Oren Weilab841162011-05-15 13:43:41 +03001124 /* Stop watchdog if exists */
Tomas Winklerc216fde2012-08-16 19:39:43 +03001125 err = mei_wd_stop(dev);
Oren Weilab841162011-05-15 13:43:41 +03001126 /* Set new mei state */
Tomas Winklerb210d752012-08-07 00:03:56 +03001127 if (dev->dev_state == MEI_DEV_ENABLED ||
1128 dev->dev_state == MEI_DEV_RECOVERING_FROM_RESET) {
1129 dev->dev_state = MEI_DEV_POWER_DOWN;
Oren Weilab841162011-05-15 13:43:41 +03001130 mei_reset(dev, 0);
1131 }
1132 mutex_unlock(&dev->device_lock);
1133
1134 free_irq(pdev->irq, dev);
Tomas Winkler4f61a7a2011-07-14 20:11:25 +03001135 pci_disable_msi(pdev);
Oren Weilab841162011-05-15 13:43:41 +03001136
1137 return err;
1138}
1139
1140static int mei_pci_resume(struct device *device)
1141{
1142 struct pci_dev *pdev = to_pci_dev(device);
1143 struct mei_device *dev;
1144 int err;
1145
1146 dev = pci_get_drvdata(pdev);
1147 if (!dev)
1148 return -ENODEV;
1149
Tomas Winkler4f61a7a2011-07-14 20:11:25 +03001150 pci_enable_msi(pdev);
1151
1152 /* request and enable interrupt */
1153 if (pci_dev_msi_enabled(pdev))
1154 err = request_threaded_irq(pdev->irq,
1155 NULL,
1156 mei_interrupt_thread_handler,
Tomas Winkler068c0ae2012-08-07 00:03:54 +03001157 IRQF_ONESHOT, KBUILD_MODNAME, dev);
Tomas Winkler4f61a7a2011-07-14 20:11:25 +03001158 else
1159 err = request_threaded_irq(pdev->irq,
Oren Weilab841162011-05-15 13:43:41 +03001160 mei_interrupt_quick_handler,
1161 mei_interrupt_thread_handler,
Tomas Winkler068c0ae2012-08-07 00:03:54 +03001162 IRQF_SHARED, KBUILD_MODNAME, dev);
Tomas Winkler4f61a7a2011-07-14 20:11:25 +03001163
Oren Weilab841162011-05-15 13:43:41 +03001164 if (err) {
Tomas Winkler32c826b2012-05-08 23:04:56 +03001165 dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n",
1166 pdev->irq);
Oren Weilab841162011-05-15 13:43:41 +03001167 return err;
1168 }
1169
1170 mutex_lock(&dev->device_lock);
Tomas Winklerb210d752012-08-07 00:03:56 +03001171 dev->dev_state = MEI_DEV_POWER_UP;
Oren Weilab841162011-05-15 13:43:41 +03001172 mei_reset(dev, 1);
1173 mutex_unlock(&dev->device_lock);
1174
Oren Weil6d70e932011-09-07 09:03:14 +03001175 /* Start timer if stopped in suspend */
1176 schedule_delayed_work(&dev->timer_work, HZ);
1177
Oren Weilab841162011-05-15 13:43:41 +03001178 return err;
1179}
1180static SIMPLE_DEV_PM_OPS(mei_pm_ops, mei_pci_suspend, mei_pci_resume);
1181#define MEI_PM_OPS (&mei_pm_ops)
1182#else
Randy Dunlap2d990362011-05-19 08:52:34 -07001183#define MEI_PM_OPS NULL
Oren Weilab841162011-05-15 13:43:41 +03001184#endif /* CONFIG_PM */
1185/*
1186 * PCI driver structure
1187 */
1188static struct pci_driver mei_driver = {
Tomas Winkler068c0ae2012-08-07 00:03:54 +03001189 .name = KBUILD_MODNAME,
Oren Weilab841162011-05-15 13:43:41 +03001190 .id_table = mei_pci_tbl,
1191 .probe = mei_probe,
1192 .remove = __devexit_p(mei_remove),
1193 .shutdown = __devexit_p(mei_remove),
1194 .driver.pm = MEI_PM_OPS,
1195};
1196
Tomas Winkler60781882012-07-19 09:45:32 +03001197module_pci_driver(mei_driver);
Oren Weilab841162011-05-15 13:43:41 +03001198
1199MODULE_AUTHOR("Intel Corporation");
1200MODULE_DESCRIPTION("Intel(R) Management Engine Interface");
1201MODULE_LICENSE("GPL v2");