blob: c70333228337d28bc76d1c61510bb57b615d3a68 [file] [log] [blame]
Oren Weilab841162011-05-15 13:43:41 +03001/*
2 *
3 * Intel Management Engine Interface (Intel MEI) Linux driver
Tomas Winkler733ba912012-02-09 19:25:53 +02004 * Copyright (c) 2003-2012, Intel Corporation.
Oren Weilab841162011-05-15 13:43:41 +03005 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 */
16
Tomas Winkler2f3d2b42012-03-19 22:38:13 +020017#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
Oren Weilab841162011-05-15 13:43:41 +030019#include <linux/module.h>
20#include <linux/moduleparam.h>
21#include <linux/kernel.h>
22#include <linux/device.h>
23#include <linux/fs.h>
24#include <linux/errno.h>
25#include <linux/types.h>
26#include <linux/fcntl.h>
27#include <linux/aio.h>
28#include <linux/pci.h>
29#include <linux/poll.h>
30#include <linux/init.h>
31#include <linux/ioctl.h>
32#include <linux/cdev.h>
Oren Weilab841162011-05-15 13:43:41 +030033#include <linux/sched.h>
34#include <linux/uuid.h>
35#include <linux/compat.h>
36#include <linux/jiffies.h>
37#include <linux/interrupt.h>
Oren Weil5b881e32011-11-13 09:41:14 +020038#include <linux/miscdevice.h>
Oren Weilab841162011-05-15 13:43:41 +030039
40#include "mei_dev.h"
Tomas Winkler4f3afe12012-05-09 16:38:59 +030041#include <linux/mei.h>
Oren Weilab841162011-05-15 13:43:41 +030042#include "interface.h"
Oren Weilab841162011-05-15 13:43:41 +030043
Tomas Winklerc38ea242012-04-02 20:32:39 +030044static const char mei_driver_name[] = "mei";
Oren Weilab841162011-05-15 13:43:41 +030045
Oren Weilab841162011-05-15 13:43:41 +030046/* The device pointer */
47/* Currently this driver works as long as there is only a single AMT device. */
Oren Weil4a3cafd2011-09-07 09:03:10 +030048struct pci_dev *mei_device;
Oren Weilab841162011-05-15 13:43:41 +030049
Oren Weilab841162011-05-15 13:43:41 +030050/* mei_pci_tbl - PCI Device ID Table */
51static DEFINE_PCI_DEVICE_TABLE(mei_pci_tbl) = {
52 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82946GZ)},
53 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G35)},
54 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82Q965)},
55 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G965)},
56 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82GM965)},
57 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82GME965)},
58 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82Q35)},
59 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82G33)},
60 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82Q33)},
61 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82X38)},
62 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_3200)},
63 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_6)},
64 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_7)},
65 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_8)},
66 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_9)},
67 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_10)},
68 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_1)},
69 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_2)},
70 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_3)},
71 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_4)},
72 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_1)},
73 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_2)},
74 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_3)},
75 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_4)},
76 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_IBXPK_1)},
77 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_IBXPK_2)},
78 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_CPT_1)},
79 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PBG_1)},
80 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_1)},
81 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_2)},
82 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_3)},
83
84 /* required last entry */
85 {0, }
86};
87
88MODULE_DEVICE_TABLE(pci, mei_pci_tbl);
89
90static DEFINE_MUTEX(mei_mutex);
91
Oren Weilab841162011-05-15 13:43:41 +030092
93/**
94 * mei_clear_list - removes all callbacks associated with file
95 * from mei_cb_list
96 *
97 * @dev: device structure.
98 * @file: file structure
99 * @mei_cb_list: callbacks list
100 *
101 * mei_clear_list is called to clear resources associated with file
102 * when application calls close function or Ctrl-C was pressed
103 *
104 * returns true if callback removed from the list, false otherwise
105 */
106static bool mei_clear_list(struct mei_device *dev,
107 struct file *file, struct list_head *mei_cb_list)
108{
109 struct mei_cl_cb *cb_pos = NULL;
110 struct mei_cl_cb *cb_next = NULL;
111 struct file *file_temp;
112 bool removed = false;
113
114 /* list all list member */
115 list_for_each_entry_safe(cb_pos, cb_next, mei_cb_list, cb_list) {
116 file_temp = (struct file *)cb_pos->file_object;
117 /* check if list member associated with a file */
118 if (file_temp == file) {
119 /* remove member from the list */
120 list_del(&cb_pos->cb_list);
121 /* check if cb equal to current iamthif cb */
122 if (dev->iamthif_current_cb == cb_pos) {
123 dev->iamthif_current_cb = NULL;
124 /* send flow control to iamthif client */
125 mei_send_flow_control(dev, &dev->iamthif_cl);
126 }
127 /* free all allocated buffers */
128 mei_free_cb_private(cb_pos);
129 cb_pos = NULL;
130 removed = true;
131 }
132 }
133 return removed;
134}
135
136/**
137 * mei_clear_lists - removes all callbacks associated with file
138 *
139 * @dev: device structure
140 * @file: file structure
141 *
142 * mei_clear_lists is called to clear resources associated with file
143 * when application calls close function or Ctrl-C was pressed
144 *
145 * returns true if callback removed from the list, false otherwise
146 */
147static bool mei_clear_lists(struct mei_device *dev, struct file *file)
148{
149 bool removed = false;
150
151 /* remove callbacks associated with a file */
152 mei_clear_list(dev, file, &dev->amthi_cmd_list.mei_cb.cb_list);
153 if (mei_clear_list(dev, file,
154 &dev->amthi_read_complete_list.mei_cb.cb_list))
155 removed = true;
156
157 mei_clear_list(dev, file, &dev->ctrl_rd_list.mei_cb.cb_list);
158
159 if (mei_clear_list(dev, file, &dev->ctrl_wr_list.mei_cb.cb_list))
160 removed = true;
161
162 if (mei_clear_list(dev, file, &dev->write_waiting_list.mei_cb.cb_list))
163 removed = true;
164
165 if (mei_clear_list(dev, file, &dev->write_list.mei_cb.cb_list))
166 removed = true;
167
168 /* check if iamthif_current_cb not NULL */
169 if (dev->iamthif_current_cb && !removed) {
170 /* check file and iamthif current cb association */
171 if (dev->iamthif_current_cb->file_object == file) {
172 /* remove cb */
173 mei_free_cb_private(dev->iamthif_current_cb);
174 dev->iamthif_current_cb = NULL;
175 removed = true;
176 }
177 }
178 return removed;
179}
180/**
181 * find_read_list_entry - find read list entry
182 *
183 * @dev: device structure
184 * @file: pointer to file structure
185 *
186 * returns cb on success, NULL on error
187 */
188static struct mei_cl_cb *find_read_list_entry(
189 struct mei_device *dev,
190 struct mei_cl *cl)
191{
Tomas Winklerb7cd2d92011-11-27 21:43:34 +0200192 struct mei_cl_cb *pos = NULL;
193 struct mei_cl_cb *next = NULL;
Oren Weilab841162011-05-15 13:43:41 +0300194
Tomas Winklerb7cd2d92011-11-27 21:43:34 +0200195 dev_dbg(&dev->pdev->dev, "remove read_list CB\n");
196 list_for_each_entry_safe(pos, next,
197 &dev->read_list.mei_cb.cb_list, cb_list) {
198 struct mei_cl *cl_temp;
199 cl_temp = (struct mei_cl *)pos->file_private;
Oren Weilab841162011-05-15 13:43:41 +0300200
Tomas Winklerb7cd2d92011-11-27 21:43:34 +0200201 if (mei_cl_cmp_id(cl, cl_temp))
202 return pos;
Oren Weilab841162011-05-15 13:43:41 +0300203 }
204 return NULL;
205}
206
207/**
208 * mei_open - the open function
209 *
210 * @inode: pointer to inode structure
211 * @file: pointer to file structure
212 *
213 * returns 0 on success, <0 on error
214 */
215static int mei_open(struct inode *inode, struct file *file)
216{
217 struct mei_cl *cl;
Oren Weilab841162011-05-15 13:43:41 +0300218 struct mei_device *dev;
Tomas Winkler6f37aca2011-11-13 09:41:15 +0200219 unsigned long cl_id;
220 int err;
Oren Weilab841162011-05-15 13:43:41 +0300221
222 err = -ENODEV;
223 if (!mei_device)
224 goto out;
225
226 dev = pci_get_drvdata(mei_device);
Oren Weil5b881e32011-11-13 09:41:14 +0200227 if (!dev)
Oren Weilab841162011-05-15 13:43:41 +0300228 goto out;
229
230 mutex_lock(&dev->device_lock);
231 err = -ENOMEM;
Tomas Winklerc95efb72011-05-25 17:28:21 +0300232 cl = mei_cl_allocate(dev);
Oren Weilab841162011-05-15 13:43:41 +0300233 if (!cl)
Alexey Khoroshilov303dfbf2011-08-31 00:41:14 +0400234 goto out_unlock;
Oren Weilab841162011-05-15 13:43:41 +0300235
236 err = -ENODEV;
237 if (dev->mei_state != MEI_ENABLED) {
238 dev_dbg(&dev->pdev->dev, "mei_state != MEI_ENABLED mei_state= %d\n",
239 dev->mei_state);
240 goto out_unlock;
241 }
242 err = -EMFILE;
243 if (dev->open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT)
244 goto out_unlock;
245
Tomas Winkler6f37aca2011-11-13 09:41:15 +0200246 cl_id = find_first_zero_bit(dev->host_clients_map, MEI_CLIENTS_MAX);
247 if (cl_id >= MEI_CLIENTS_MAX)
Oren Weilab841162011-05-15 13:43:41 +0300248 goto out_unlock;
249
Tomas Winkler6f37aca2011-11-13 09:41:15 +0200250 cl->host_client_id = cl_id;
251
Oren Weilab841162011-05-15 13:43:41 +0300252 dev_dbg(&dev->pdev->dev, "client_id = %d\n", cl->host_client_id);
253
254 dev->open_handle_count++;
Tomas Winkler6f37aca2011-11-13 09:41:15 +0200255
Oren Weilab841162011-05-15 13:43:41 +0300256 list_add_tail(&cl->link, &dev->file_list);
257
258 set_bit(cl->host_client_id, dev->host_clients_map);
259 cl->state = MEI_FILE_INITIALIZING;
260 cl->sm_state = 0;
261
262 file->private_data = cl;
263 mutex_unlock(&dev->device_lock);
264
Oren Weil5b881e32011-11-13 09:41:14 +0200265 return nonseekable_open(inode, file);
Oren Weilab841162011-05-15 13:43:41 +0300266
267out_unlock:
268 mutex_unlock(&dev->device_lock);
269 kfree(cl);
270out:
271 return err;
272}
273
274/**
275 * mei_release - the release function
276 *
277 * @inode: pointer to inode structure
278 * @file: pointer to file structure
279 *
280 * returns 0 on success, <0 on error
281 */
282static int mei_release(struct inode *inode, struct file *file)
283{
284 struct mei_cl *cl = file->private_data;
285 struct mei_cl_cb *cb;
286 struct mei_device *dev;
287 int rets = 0;
288
289 if (WARN_ON(!cl || !cl->dev))
290 return -ENODEV;
291
292 dev = cl->dev;
293
294 mutex_lock(&dev->device_lock);
295 if (cl != &dev->iamthif_cl) {
296 if (cl->state == MEI_FILE_CONNECTED) {
297 cl->state = MEI_FILE_DISCONNECTING;
298 dev_dbg(&dev->pdev->dev,
299 "disconnecting client host client = %d, "
300 "ME client = %d\n",
301 cl->host_client_id,
302 cl->me_client_id);
303 rets = mei_disconnect_host_client(dev, cl);
304 }
Tomas Winkler0288c7c2011-06-06 10:44:34 +0300305 mei_cl_flush_queues(cl);
Oren Weilab841162011-05-15 13:43:41 +0300306 dev_dbg(&dev->pdev->dev, "remove client host client = %d, ME client = %d\n",
307 cl->host_client_id,
308 cl->me_client_id);
309
310 if (dev->open_handle_count > 0) {
Tomas Winkler441ab502011-12-13 23:39:34 +0200311 clear_bit(cl->host_client_id, dev->host_clients_map);
Oren Weilab841162011-05-15 13:43:41 +0300312 dev->open_handle_count--;
313 }
314 mei_remove_client_from_file_list(dev, cl->host_client_id);
315
316 /* free read cb */
317 cb = NULL;
318 if (cl->read_cb) {
319 cb = find_read_list_entry(dev, cl);
320 /* Remove entry from read list */
321 if (cb)
322 list_del(&cb->cb_list);
323
324 cb = cl->read_cb;
325 cl->read_cb = NULL;
326 }
327
328 file->private_data = NULL;
329
330 if (cb) {
331 mei_free_cb_private(cb);
332 cb = NULL;
333 }
334
335 kfree(cl);
336 } else {
337 if (dev->open_handle_count > 0)
338 dev->open_handle_count--;
339
340 if (dev->iamthif_file_object == file &&
341 dev->iamthif_state != MEI_IAMTHIF_IDLE) {
342
343 dev_dbg(&dev->pdev->dev, "amthi canceled iamthif state %d\n",
344 dev->iamthif_state);
Tomas Winklereb9af0a2011-05-25 17:28:22 +0300345 dev->iamthif_canceled = true;
Oren Weilab841162011-05-15 13:43:41 +0300346 if (dev->iamthif_state == MEI_IAMTHIF_READ_COMPLETE) {
347 dev_dbg(&dev->pdev->dev, "run next amthi iamthif cb\n");
Tomas Winklerc95efb72011-05-25 17:28:21 +0300348 mei_run_next_iamthif_cmd(dev);
Oren Weilab841162011-05-15 13:43:41 +0300349 }
350 }
351
352 if (mei_clear_lists(dev, file))
353 dev->iamthif_state = MEI_IAMTHIF_IDLE;
354
355 }
356 mutex_unlock(&dev->device_lock);
357 return rets;
358}
359
360
361/**
362 * mei_read - the read function.
363 *
364 * @file: pointer to file structure
365 * @ubuf: pointer to user buffer
366 * @length: buffer length
367 * @offset: data offset in buffer
368 *
369 * returns >=0 data length on success , <0 on error
370 */
371static ssize_t mei_read(struct file *file, char __user *ubuf,
Tomas Winkler441ab502011-12-13 23:39:34 +0200372 size_t length, loff_t *offset)
Oren Weilab841162011-05-15 13:43:41 +0300373{
374 struct mei_cl *cl = file->private_data;
375 struct mei_cl_cb *cb_pos = NULL;
376 struct mei_cl_cb *cb = NULL;
377 struct mei_device *dev;
378 int i;
379 int rets;
380 int err;
381
382
383 if (WARN_ON(!cl || !cl->dev))
384 return -ENODEV;
385
386 dev = cl->dev;
387
388 mutex_lock(&dev->device_lock);
389 if (dev->mei_state != MEI_ENABLED) {
390 rets = -ENODEV;
391 goto out;
392 }
393
394 if ((cl->sm_state & MEI_WD_STATE_INDEPENDENCE_MSG_SENT) == 0) {
395 /* Do not allow to read watchdog client */
396 i = mei_find_me_client_index(dev, mei_wd_guid);
397 if (i >= 0) {
398 struct mei_me_client *me_client = &dev->me_clients[i];
399
400 if (cl->me_client_id == me_client->client_id) {
401 rets = -EBADF;
402 goto out;
403 }
404 }
405 } else {
406 cl->sm_state &= ~MEI_WD_STATE_INDEPENDENCE_MSG_SENT;
407 }
408
409 if (cl == &dev->iamthif_cl) {
410 rets = amthi_read(dev, file, ubuf, length, offset);
411 goto out;
412 }
413
414 if (cl->read_cb && cl->read_cb->information > *offset) {
415 cb = cl->read_cb;
416 goto copy_buffer;
417 } else if (cl->read_cb && cl->read_cb->information > 0 &&
418 cl->read_cb->information <= *offset) {
419 cb = cl->read_cb;
420 rets = 0;
421 goto free;
422 } else if ((!cl->read_cb || !cl->read_cb->information) &&
423 *offset > 0) {
Justin P. Mattock5f9092f32012-03-12 07:18:09 -0700424 /*Offset needs to be cleaned for contiguous reads*/
Oren Weilab841162011-05-15 13:43:41 +0300425 *offset = 0;
426 rets = 0;
427 goto out;
428 }
429
430 err = mei_start_read(dev, cl);
431 if (err && err != -EBUSY) {
432 dev_dbg(&dev->pdev->dev,
433 "mei start read failure with status = %d\n", err);
434 rets = err;
435 goto out;
436 }
437
438 if (MEI_READ_COMPLETE != cl->reading_state &&
439 !waitqueue_active(&cl->rx_wait)) {
440 if (file->f_flags & O_NONBLOCK) {
441 rets = -EAGAIN;
442 goto out;
443 }
444
445 mutex_unlock(&dev->device_lock);
446
447 if (wait_event_interruptible(cl->rx_wait,
448 (MEI_READ_COMPLETE == cl->reading_state ||
449 MEI_FILE_INITIALIZING == cl->state ||
450 MEI_FILE_DISCONNECTED == cl->state ||
451 MEI_FILE_DISCONNECTING == cl->state))) {
452 if (signal_pending(current))
453 return -EINTR;
454 return -ERESTARTSYS;
455 }
456
457 mutex_lock(&dev->device_lock);
458 if (MEI_FILE_INITIALIZING == cl->state ||
459 MEI_FILE_DISCONNECTED == cl->state ||
460 MEI_FILE_DISCONNECTING == cl->state) {
461 rets = -EBUSY;
462 goto out;
463 }
464 }
465
466 cb = cl->read_cb;
467
468 if (!cb) {
469 rets = -ENODEV;
470 goto out;
471 }
472 if (cl->reading_state != MEI_READ_COMPLETE) {
473 rets = 0;
474 goto out;
475 }
476 /* now copy the data to user space */
477copy_buffer:
478 dev_dbg(&dev->pdev->dev, "cb->response_buffer size - %d\n",
479 cb->response_buffer.size);
480 dev_dbg(&dev->pdev->dev, "cb->information - %lu\n",
481 cb->information);
482 if (length == 0 || ubuf == NULL || *offset > cb->information) {
483 rets = -EMSGSIZE;
484 goto free;
485 }
486
Justin P. Mattock5f9092f32012-03-12 07:18:09 -0700487 /* length is being truncated to PAGE_SIZE, however, */
Oren Weilab841162011-05-15 13:43:41 +0300488 /* information size may be longer */
489 length = min_t(size_t, length, (cb->information - *offset));
490
Tomas Winkler441ab502011-12-13 23:39:34 +0200491 if (copy_to_user(ubuf, cb->response_buffer.data + *offset, length)) {
Oren Weilab841162011-05-15 13:43:41 +0300492 rets = -EFAULT;
493 goto free;
494 }
495
496 rets = length;
497 *offset += length;
498 if ((unsigned long)*offset < cb->information)
499 goto out;
500
501free:
502 cb_pos = find_read_list_entry(dev, cl);
503 /* Remove entry from read list */
504 if (cb_pos)
505 list_del(&cb_pos->cb_list);
506 mei_free_cb_private(cb);
507 cl->reading_state = MEI_IDLE;
508 cl->read_cb = NULL;
509 cl->read_pending = 0;
510out:
511 dev_dbg(&dev->pdev->dev, "end mei read rets= %d\n", rets);
512 mutex_unlock(&dev->device_lock);
513 return rets;
514}
515
516/**
517 * mei_write - the write function.
518 *
519 * @file: pointer to file structure
520 * @ubuf: pointer to user buffer
521 * @length: buffer length
522 * @offset: data offset in buffer
523 *
524 * returns >=0 data length on success , <0 on error
525 */
526static ssize_t mei_write(struct file *file, const char __user *ubuf,
Tomas Winkler441ab502011-12-13 23:39:34 +0200527 size_t length, loff_t *offset)
Oren Weilab841162011-05-15 13:43:41 +0300528{
529 struct mei_cl *cl = file->private_data;
530 struct mei_cl_cb *write_cb = NULL;
531 struct mei_msg_hdr mei_hdr;
532 struct mei_device *dev;
533 unsigned long timeout = 0;
534 int rets;
535 int i;
536
537 if (WARN_ON(!cl || !cl->dev))
538 return -ENODEV;
539
540 dev = cl->dev;
541
542 mutex_lock(&dev->device_lock);
543
544 if (dev->mei_state != MEI_ENABLED) {
545 mutex_unlock(&dev->device_lock);
546 return -ENODEV;
547 }
548
549 if (cl == &dev->iamthif_cl) {
550 write_cb = find_amthi_read_list_entry(dev, file);
551
552 if (write_cb) {
553 timeout = write_cb->read_time +
554 msecs_to_jiffies(IAMTHIF_READ_TIMER);
555
556 if (time_after(jiffies, timeout) ||
557 cl->reading_state == MEI_READ_COMPLETE) {
558 *offset = 0;
559 list_del(&write_cb->cb_list);
560 mei_free_cb_private(write_cb);
561 write_cb = NULL;
562 }
563 }
564 }
565
566 /* free entry used in read */
567 if (cl->reading_state == MEI_READ_COMPLETE) {
568 *offset = 0;
569 write_cb = find_read_list_entry(dev, cl);
570 if (write_cb) {
571 list_del(&write_cb->cb_list);
572 mei_free_cb_private(write_cb);
573 write_cb = NULL;
574 cl->reading_state = MEI_IDLE;
575 cl->read_cb = NULL;
576 cl->read_pending = 0;
577 }
Tomas Winkler441ab502011-12-13 23:39:34 +0200578 } else if (cl->reading_state == MEI_IDLE && !cl->read_pending)
Oren Weilab841162011-05-15 13:43:41 +0300579 *offset = 0;
580
581
582 write_cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL);
583 if (!write_cb) {
584 mutex_unlock(&dev->device_lock);
585 return -ENOMEM;
586 }
587
588 write_cb->file_object = file;
589 write_cb->file_private = cl;
590 write_cb->request_buffer.data = kmalloc(length, GFP_KERNEL);
591 rets = -ENOMEM;
592 if (!write_cb->request_buffer.data)
593 goto unlock_dev;
594
595 dev_dbg(&dev->pdev->dev, "length =%d\n", (int) length);
596
597 rets = -EFAULT;
598 if (copy_from_user(write_cb->request_buffer.data, ubuf, length))
599 goto unlock_dev;
600
601 cl->sm_state = 0;
602 if (length == 4 &&
603 ((memcmp(mei_wd_state_independence_msg[0],
604 write_cb->request_buffer.data, 4) == 0) ||
605 (memcmp(mei_wd_state_independence_msg[1],
606 write_cb->request_buffer.data, 4) == 0) ||
607 (memcmp(mei_wd_state_independence_msg[2],
608 write_cb->request_buffer.data, 4) == 0)))
609 cl->sm_state |= MEI_WD_STATE_INDEPENDENCE_MSG_SENT;
610
611 INIT_LIST_HEAD(&write_cb->cb_list);
612 if (cl == &dev->iamthif_cl) {
613 write_cb->response_buffer.data =
614 kmalloc(dev->iamthif_mtu, GFP_KERNEL);
615 if (!write_cb->response_buffer.data) {
616 rets = -ENOMEM;
617 goto unlock_dev;
618 }
619 if (dev->mei_state != MEI_ENABLED) {
620 rets = -ENODEV;
621 goto unlock_dev;
622 }
Tomas Winklercf9673d2011-06-06 10:44:33 +0300623 for (i = 0; i < dev->me_clients_num; i++) {
Oren Weilab841162011-05-15 13:43:41 +0300624 if (dev->me_clients[i].client_id ==
625 dev->iamthif_cl.me_client_id)
626 break;
627 }
628
629 if (WARN_ON(dev->me_clients[i].client_id != cl->me_client_id)) {
630 rets = -ENODEV;
631 goto unlock_dev;
632 }
Tomas Winklercf9673d2011-06-06 10:44:33 +0300633 if (i == dev->me_clients_num ||
Oren Weilab841162011-05-15 13:43:41 +0300634 (dev->me_clients[i].client_id !=
635 dev->iamthif_cl.me_client_id)) {
636 rets = -ENODEV;
637 goto unlock_dev;
638 } else if (length > dev->me_clients[i].props.max_msg_length ||
639 length <= 0) {
640 rets = -EMSGSIZE;
641 goto unlock_dev;
642 }
643
644 write_cb->response_buffer.size = dev->iamthif_mtu;
645 write_cb->major_file_operations = MEI_IOCTL;
646 write_cb->information = 0;
647 write_cb->request_buffer.size = length;
648 if (dev->iamthif_cl.state != MEI_FILE_CONNECTED) {
649 rets = -ENODEV;
650 goto unlock_dev;
651 }
652
653 if (!list_empty(&dev->amthi_cmd_list.mei_cb.cb_list) ||
654 dev->iamthif_state != MEI_IAMTHIF_IDLE) {
655 dev_dbg(&dev->pdev->dev, "amthi_state = %d\n",
656 (int) dev->iamthif_state);
657 dev_dbg(&dev->pdev->dev, "add amthi cb to amthi cmd waiting list\n");
658 list_add_tail(&write_cb->cb_list,
659 &dev->amthi_cmd_list.mei_cb.cb_list);
660 rets = length;
661 } else {
662 dev_dbg(&dev->pdev->dev, "call amthi write\n");
663 rets = amthi_write(dev, write_cb);
664
665 if (rets) {
666 dev_dbg(&dev->pdev->dev, "amthi write failed with status = %d\n",
667 rets);
668 goto unlock_dev;
669 }
670 rets = length;
671 }
672 mutex_unlock(&dev->device_lock);
673 return rets;
674 }
675
676 write_cb->major_file_operations = MEI_WRITE;
677 /* make sure information is zero before we start */
678
679 write_cb->information = 0;
680 write_cb->request_buffer.size = length;
681
682 dev_dbg(&dev->pdev->dev, "host client = %d, ME client = %d\n",
683 cl->host_client_id, cl->me_client_id);
684 if (cl->state != MEI_FILE_CONNECTED) {
685 rets = -ENODEV;
686 dev_dbg(&dev->pdev->dev, "host client = %d, is not connected to ME client = %d",
687 cl->host_client_id,
688 cl->me_client_id);
689 goto unlock_dev;
690 }
Tomas Winklercf9673d2011-06-06 10:44:33 +0300691 for (i = 0; i < dev->me_clients_num; i++) {
Oren Weilab841162011-05-15 13:43:41 +0300692 if (dev->me_clients[i].client_id ==
693 cl->me_client_id)
694 break;
695 }
696 if (WARN_ON(dev->me_clients[i].client_id != cl->me_client_id)) {
697 rets = -ENODEV;
698 goto unlock_dev;
699 }
Tomas Winklercf9673d2011-06-06 10:44:33 +0300700 if (i == dev->me_clients_num) {
Oren Weilab841162011-05-15 13:43:41 +0300701 rets = -ENODEV;
702 goto unlock_dev;
703 }
704 if (length > dev->me_clients[i].props.max_msg_length || length <= 0) {
705 rets = -EINVAL;
706 goto unlock_dev;
707 }
708 write_cb->file_private = cl;
709
710 rets = mei_flow_ctrl_creds(dev, cl);
711 if (rets < 0)
712 goto unlock_dev;
713
714 if (rets && dev->mei_host_buffer_is_empty) {
715 rets = 0;
Tomas Winklereb9af0a2011-05-25 17:28:22 +0300716 dev->mei_host_buffer_is_empty = false;
Oren Weilab841162011-05-15 13:43:41 +0300717 if (length > ((((dev->host_hw_state & H_CBD) >> 24) *
718 sizeof(u32)) - sizeof(struct mei_msg_hdr))) {
719
720 mei_hdr.length =
721 (((dev->host_hw_state & H_CBD) >> 24) *
722 sizeof(u32)) -
723 sizeof(struct mei_msg_hdr);
724 mei_hdr.msg_complete = 0;
725 } else {
726 mei_hdr.length = length;
727 mei_hdr.msg_complete = 1;
728 }
729 mei_hdr.host_addr = cl->host_client_id;
730 mei_hdr.me_addr = cl->me_client_id;
731 mei_hdr.reserved = 0;
732 dev_dbg(&dev->pdev->dev, "call mei_write_message header=%08x.\n",
733 *((u32 *) &mei_hdr));
Tomas Winkler1ccb7b62012-03-14 14:39:42 +0200734 if (mei_write_message(dev, &mei_hdr,
Oren Weilab841162011-05-15 13:43:41 +0300735 (unsigned char *) (write_cb->request_buffer.data),
736 mei_hdr.length)) {
737 rets = -ENODEV;
738 goto unlock_dev;
739 }
740 cl->writing_state = MEI_WRITING;
741 write_cb->information = mei_hdr.length;
742 if (mei_hdr.msg_complete) {
743 if (mei_flow_ctrl_reduce(dev, cl)) {
744 rets = -ENODEV;
745 goto unlock_dev;
746 }
747 list_add_tail(&write_cb->cb_list,
748 &dev->write_waiting_list.mei_cb.cb_list);
749 } else {
750 list_add_tail(&write_cb->cb_list,
751 &dev->write_list.mei_cb.cb_list);
752 }
753
754 } else {
755
756 write_cb->information = 0;
757 cl->writing_state = MEI_WRITING;
758 list_add_tail(&write_cb->cb_list,
759 &dev->write_list.mei_cb.cb_list);
760 }
761 mutex_unlock(&dev->device_lock);
762 return length;
763
764unlock_dev:
765 mutex_unlock(&dev->device_lock);
766 mei_free_cb_private(write_cb);
767 return rets;
768}
769
770
771/**
772 * mei_ioctl - the IOCTL function
773 *
774 * @file: pointer to file structure
775 * @cmd: ioctl command
776 * @data: pointer to mei message structure
777 *
778 * returns 0 on success , <0 on error
779 */
780static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data)
781{
782 struct mei_device *dev;
783 struct mei_cl *cl = file->private_data;
784 struct mei_connect_client_data *connect_data = NULL;
785 int rets;
786
787 if (cmd != IOCTL_MEI_CONNECT_CLIENT)
788 return -EINVAL;
789
790 if (WARN_ON(!cl || !cl->dev))
791 return -ENODEV;
792
793 dev = cl->dev;
794
795 dev_dbg(&dev->pdev->dev, "IOCTL cmd = 0x%x", cmd);
796
797 mutex_lock(&dev->device_lock);
798 if (dev->mei_state != MEI_ENABLED) {
799 rets = -ENODEV;
800 goto out;
801 }
802
803 dev_dbg(&dev->pdev->dev, ": IOCTL_MEI_CONNECT_CLIENT.\n");
804
805 connect_data = kzalloc(sizeof(struct mei_connect_client_data),
806 GFP_KERNEL);
807 if (!connect_data) {
808 rets = -ENOMEM;
809 goto out;
810 }
811 dev_dbg(&dev->pdev->dev, "copy connect data from user\n");
812 if (copy_from_user(connect_data, (char __user *)data,
813 sizeof(struct mei_connect_client_data))) {
814 dev_dbg(&dev->pdev->dev, "failed to copy data from userland\n");
815 rets = -EFAULT;
816 goto out;
817 }
818 rets = mei_ioctl_connect_client(file, connect_data);
819
820 /* if all is ok, copying the data back to user. */
821 if (rets)
822 goto out;
823
824 dev_dbg(&dev->pdev->dev, "copy connect data to user\n");
825 if (copy_to_user((char __user *)data, connect_data,
826 sizeof(struct mei_connect_client_data))) {
827 dev_dbg(&dev->pdev->dev, "failed to copy data to userland\n");
828 rets = -EFAULT;
829 goto out;
830 }
831
832out:
833 kfree(connect_data);
834 mutex_unlock(&dev->device_lock);
835 return rets;
836}
837
838/**
839 * mei_compat_ioctl - the compat IOCTL function
840 *
841 * @file: pointer to file structure
842 * @cmd: ioctl command
843 * @data: pointer to mei message structure
844 *
845 * returns 0 on success , <0 on error
846 */
847#ifdef CONFIG_COMPAT
848static long mei_compat_ioctl(struct file *file,
Tomas Winkler441ab502011-12-13 23:39:34 +0200849 unsigned int cmd, unsigned long data)
Oren Weilab841162011-05-15 13:43:41 +0300850{
851 return mei_ioctl(file, cmd, (unsigned long)compat_ptr(data));
852}
853#endif
854
855
856/**
857 * mei_poll - the poll function
858 *
859 * @file: pointer to file structure
860 * @wait: pointer to poll_table structure
861 *
862 * returns poll mask
863 */
864static unsigned int mei_poll(struct file *file, poll_table *wait)
865{
866 struct mei_cl *cl = file->private_data;
867 struct mei_device *dev;
868 unsigned int mask = 0;
869
870 if (WARN_ON(!cl || !cl->dev))
871 return mask;
872
873 dev = cl->dev;
874
875 mutex_lock(&dev->device_lock);
876
877 if (dev->mei_state != MEI_ENABLED)
878 goto out;
879
880
881 if (cl == &dev->iamthif_cl) {
882 mutex_unlock(&dev->device_lock);
883 poll_wait(file, &dev->iamthif_cl.wait, wait);
884 mutex_lock(&dev->device_lock);
885 if (dev->iamthif_state == MEI_IAMTHIF_READ_COMPLETE &&
886 dev->iamthif_file_object == file) {
887 mask |= (POLLIN | POLLRDNORM);
888 dev_dbg(&dev->pdev->dev, "run next amthi cb\n");
Tomas Winklerc95efb72011-05-25 17:28:21 +0300889 mei_run_next_iamthif_cmd(dev);
Oren Weilab841162011-05-15 13:43:41 +0300890 }
891 goto out;
892 }
893
894 mutex_unlock(&dev->device_lock);
895 poll_wait(file, &cl->tx_wait, wait);
896 mutex_lock(&dev->device_lock);
897 if (MEI_WRITE_COMPLETE == cl->writing_state)
898 mask |= (POLLIN | POLLRDNORM);
899
900out:
901 mutex_unlock(&dev->device_lock);
902 return mask;
903}
904
Oren Weil5b881e32011-11-13 09:41:14 +0200905/*
906 * file operations structure will be used for mei char device.
907 */
908static const struct file_operations mei_fops = {
909 .owner = THIS_MODULE,
910 .read = mei_read,
911 .unlocked_ioctl = mei_ioctl,
912#ifdef CONFIG_COMPAT
913 .compat_ioctl = mei_compat_ioctl,
914#endif
915 .open = mei_open,
916 .release = mei_release,
917 .write = mei_write,
918 .poll = mei_poll,
919 .llseek = no_llseek
920};
921
922
923/*
924 * Misc Device Struct
925 */
926static struct miscdevice mei_misc_device = {
Tomas Winklerc38ea242012-04-02 20:32:39 +0300927 .name = "mei",
Oren Weil5b881e32011-11-13 09:41:14 +0200928 .fops = &mei_fops,
929 .minor = MISC_DYNAMIC_MINOR,
930};
931
932/**
933 * mei_probe - Device Initialization Routine
934 *
935 * @pdev: PCI device structure
936 * @ent: entry in kcs_pci_tbl
937 *
938 * returns 0 on success, <0 on failure.
939 */
940static int __devinit mei_probe(struct pci_dev *pdev,
941 const struct pci_device_id *ent)
942{
943 struct mei_device *dev;
944 int err;
945
946 mutex_lock(&mei_mutex);
947 if (mei_device) {
948 err = -EEXIST;
949 goto end;
950 }
951 /* enable pci dev */
952 err = pci_enable_device(pdev);
953 if (err) {
Tomas Winkler32c826b2012-05-08 23:04:56 +0300954 dev_err(&pdev->dev, "failed to enable pci device.\n");
Oren Weil5b881e32011-11-13 09:41:14 +0200955 goto end;
956 }
957 /* set PCI host mastering */
958 pci_set_master(pdev);
959 /* pci request regions for mei driver */
960 err = pci_request_regions(pdev, mei_driver_name);
961 if (err) {
Tomas Winkler32c826b2012-05-08 23:04:56 +0300962 dev_err(&pdev->dev, "failed to get pci regions.\n");
Oren Weil5b881e32011-11-13 09:41:14 +0200963 goto disable_device;
964 }
965 /* allocates and initializes the mei dev structure */
966 dev = mei_device_init(pdev);
967 if (!dev) {
968 err = -ENOMEM;
969 goto release_regions;
970 }
971 /* mapping IO device memory */
972 dev->mem_addr = pci_iomap(pdev, 0, 0);
973 if (!dev->mem_addr) {
Tomas Winkler32c826b2012-05-08 23:04:56 +0300974 dev_err(&pdev->dev, "mapping I/O device memory failure.\n");
Oren Weil5b881e32011-11-13 09:41:14 +0200975 err = -ENOMEM;
976 goto free_device;
977 }
978 pci_enable_msi(pdev);
979
980 /* request and enable interrupt */
981 if (pci_dev_msi_enabled(pdev))
982 err = request_threaded_irq(pdev->irq,
983 NULL,
984 mei_interrupt_thread_handler,
985 0, mei_driver_name, dev);
986 else
987 err = request_threaded_irq(pdev->irq,
988 mei_interrupt_quick_handler,
989 mei_interrupt_thread_handler,
990 IRQF_SHARED, mei_driver_name, dev);
991
992 if (err) {
Tomas Winkler32c826b2012-05-08 23:04:56 +0300993 dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n",
Oren Weil5b881e32011-11-13 09:41:14 +0200994 pdev->irq);
995 goto unmap_memory;
996 }
997 INIT_DELAYED_WORK(&dev->timer_work, mei_timer);
998 if (mei_hw_init(dev)) {
Tomas Winkler32c826b2012-05-08 23:04:56 +0300999 dev_err(&pdev->dev, "init hw failure.\n");
Oren Weil5b881e32011-11-13 09:41:14 +02001000 err = -ENODEV;
1001 goto release_irq;
1002 }
1003
1004 err = misc_register(&mei_misc_device);
1005 if (err)
1006 goto release_irq;
1007
1008 mei_device = pdev;
1009 pci_set_drvdata(pdev, dev);
1010
1011
1012 schedule_delayed_work(&dev->timer_work, HZ);
1013
1014 mutex_unlock(&mei_mutex);
1015
Tomas Winkler2f3d2b42012-03-19 22:38:13 +02001016 pr_debug("initialization successful.\n");
Oren Weil5b881e32011-11-13 09:41:14 +02001017
1018 return 0;
1019
1020release_irq:
1021 /* disable interrupts */
1022 dev->host_hw_state = mei_hcsr_read(dev);
1023 mei_disable_interrupts(dev);
1024 flush_scheduled_work();
1025 free_irq(pdev->irq, dev);
1026 pci_disable_msi(pdev);
1027unmap_memory:
1028 pci_iounmap(pdev, dev->mem_addr);
1029free_device:
1030 kfree(dev);
1031release_regions:
1032 pci_release_regions(pdev);
1033disable_device:
1034 pci_disable_device(pdev);
1035end:
1036 mutex_unlock(&mei_mutex);
Tomas Winkler32c826b2012-05-08 23:04:56 +03001037 dev_err(&pdev->dev, "initialization failed.\n");
Oren Weil5b881e32011-11-13 09:41:14 +02001038 return err;
1039}
1040
1041/**
1042 * mei_remove - Device Removal Routine
1043 *
1044 * @pdev: PCI device structure
1045 *
1046 * mei_remove is called by the PCI subsystem to alert the driver
1047 * that it should release a PCI device.
1048 */
1049static void __devexit mei_remove(struct pci_dev *pdev)
1050{
1051 struct mei_device *dev;
1052
1053 if (mei_device != pdev)
1054 return;
1055
1056 dev = pci_get_drvdata(pdev);
1057 if (!dev)
1058 return;
1059
1060 mutex_lock(&dev->device_lock);
1061
1062 mei_wd_stop(dev, false);
1063
1064 mei_device = NULL;
1065
1066 if (dev->iamthif_cl.state == MEI_FILE_CONNECTED) {
1067 dev->iamthif_cl.state = MEI_FILE_DISCONNECTING;
1068 mei_disconnect_host_client(dev, &dev->iamthif_cl);
1069 }
1070 if (dev->wd_cl.state == MEI_FILE_CONNECTED) {
1071 dev->wd_cl.state = MEI_FILE_DISCONNECTING;
1072 mei_disconnect_host_client(dev, &dev->wd_cl);
1073 }
1074
1075 /* Unregistering watchdog device */
Tomas Winkler70cd5332011-12-22 18:50:50 +02001076 mei_watchdog_unregister(dev);
Oren Weil5b881e32011-11-13 09:41:14 +02001077
1078 /* remove entry if already in list */
1079 dev_dbg(&pdev->dev, "list del iamthif and wd file list.\n");
1080 mei_remove_client_from_file_list(dev, dev->wd_cl.host_client_id);
1081 mei_remove_client_from_file_list(dev, dev->iamthif_cl.host_client_id);
1082
1083 dev->iamthif_current_cb = NULL;
1084 dev->me_clients_num = 0;
1085
1086 mutex_unlock(&dev->device_lock);
1087
1088 flush_scheduled_work();
1089
1090 /* disable interrupts */
1091 mei_disable_interrupts(dev);
1092
1093 free_irq(pdev->irq, dev);
1094 pci_disable_msi(pdev);
1095 pci_set_drvdata(pdev, NULL);
1096
1097 if (dev->mem_addr)
1098 pci_iounmap(pdev, dev->mem_addr);
1099
1100 kfree(dev);
1101
1102 pci_release_regions(pdev);
1103 pci_disable_device(pdev);
1104}
Oren Weilab841162011-05-15 13:43:41 +03001105#ifdef CONFIG_PM
1106static int mei_pci_suspend(struct device *device)
1107{
1108 struct pci_dev *pdev = to_pci_dev(device);
1109 struct mei_device *dev = pci_get_drvdata(pdev);
1110 int err;
1111
1112 if (!dev)
1113 return -ENODEV;
1114 mutex_lock(&dev->device_lock);
1115 /* Stop watchdog if exists */
1116 err = mei_wd_stop(dev, true);
1117 /* Set new mei state */
1118 if (dev->mei_state == MEI_ENABLED ||
1119 dev->mei_state == MEI_RECOVERING_FROM_RESET) {
1120 dev->mei_state = MEI_POWER_DOWN;
1121 mei_reset(dev, 0);
1122 }
1123 mutex_unlock(&dev->device_lock);
1124
1125 free_irq(pdev->irq, dev);
Tomas Winkler4f61a7a2011-07-14 20:11:25 +03001126 pci_disable_msi(pdev);
Oren Weilab841162011-05-15 13:43:41 +03001127
1128 return err;
1129}
1130
1131static int mei_pci_resume(struct device *device)
1132{
1133 struct pci_dev *pdev = to_pci_dev(device);
1134 struct mei_device *dev;
1135 int err;
1136
1137 dev = pci_get_drvdata(pdev);
1138 if (!dev)
1139 return -ENODEV;
1140
Tomas Winkler4f61a7a2011-07-14 20:11:25 +03001141 pci_enable_msi(pdev);
1142
1143 /* request and enable interrupt */
1144 if (pci_dev_msi_enabled(pdev))
1145 err = request_threaded_irq(pdev->irq,
1146 NULL,
1147 mei_interrupt_thread_handler,
1148 0, mei_driver_name, dev);
1149 else
1150 err = request_threaded_irq(pdev->irq,
Oren Weilab841162011-05-15 13:43:41 +03001151 mei_interrupt_quick_handler,
1152 mei_interrupt_thread_handler,
1153 IRQF_SHARED, mei_driver_name, dev);
Tomas Winkler4f61a7a2011-07-14 20:11:25 +03001154
Oren Weilab841162011-05-15 13:43:41 +03001155 if (err) {
Tomas Winkler32c826b2012-05-08 23:04:56 +03001156 dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n",
1157 pdev->irq);
Oren Weilab841162011-05-15 13:43:41 +03001158 return err;
1159 }
1160
1161 mutex_lock(&dev->device_lock);
1162 dev->mei_state = MEI_POWER_UP;
1163 mei_reset(dev, 1);
1164 mutex_unlock(&dev->device_lock);
1165
Oren Weil6d70e932011-09-07 09:03:14 +03001166 /* Start timer if stopped in suspend */
1167 schedule_delayed_work(&dev->timer_work, HZ);
1168
Oren Weilab841162011-05-15 13:43:41 +03001169 return err;
1170}
1171static SIMPLE_DEV_PM_OPS(mei_pm_ops, mei_pci_suspend, mei_pci_resume);
1172#define MEI_PM_OPS (&mei_pm_ops)
1173#else
Randy Dunlap2d990362011-05-19 08:52:34 -07001174#define MEI_PM_OPS NULL
Oren Weilab841162011-05-15 13:43:41 +03001175#endif /* CONFIG_PM */
1176/*
1177 * PCI driver structure
1178 */
1179static struct pci_driver mei_driver = {
1180 .name = mei_driver_name,
1181 .id_table = mei_pci_tbl,
1182 .probe = mei_probe,
1183 .remove = __devexit_p(mei_remove),
1184 .shutdown = __devexit_p(mei_remove),
1185 .driver.pm = MEI_PM_OPS,
1186};
1187
Oren Weilab841162011-05-15 13:43:41 +03001188/**
1189 * mei_init_module - Driver Registration Routine
1190 *
1191 * mei_init_module is the first routine called when the driver is
1192 * loaded. All it does is to register with the PCI subsystem.
1193 *
1194 * returns 0 on success, <0 on failure.
1195 */
1196static int __init mei_init_module(void)
1197{
1198 int ret;
1199
Tomas Winkler2f3d2b42012-03-19 22:38:13 +02001200 pr_debug("loading.\n");
Oren Weilab841162011-05-15 13:43:41 +03001201 /* init pci module */
1202 ret = pci_register_driver(&mei_driver);
Oren Weil5b881e32011-11-13 09:41:14 +02001203 if (ret < 0)
Tomas Winkler37421272012-05-08 23:04:55 +03001204 pr_err("error registering driver.\n");
Oren Weilab841162011-05-15 13:43:41 +03001205
Oren Weilab841162011-05-15 13:43:41 +03001206 return ret;
1207}
1208
1209module_init(mei_init_module);
1210
1211/**
1212 * mei_exit_module - Driver Exit Cleanup Routine
1213 *
1214 * mei_exit_module is called just before the driver is removed
1215 * from memory.
1216 */
1217static void __exit mei_exit_module(void)
1218{
Oren Weil5b881e32011-11-13 09:41:14 +02001219 misc_deregister(&mei_misc_device);
Oren Weilcbecb8b2011-07-07 16:02:45 +03001220 pci_unregister_driver(&mei_driver);
Oren Weilab841162011-05-15 13:43:41 +03001221
Tomas Winkler2f3d2b42012-03-19 22:38:13 +02001222 pr_debug("unloaded successfully.\n");
Oren Weilab841162011-05-15 13:43:41 +03001223}
1224
1225module_exit(mei_exit_module);
1226
1227
1228MODULE_AUTHOR("Intel Corporation");
1229MODULE_DESCRIPTION("Intel(R) Management Engine Interface");
1230MODULE_LICENSE("GPL v2");