blob: 6cb9819ac5d5fcdafddc26f9f529725ebe66026a [file] [log] [blame]
Tomas Winkler2703d4b2013-02-06 14:06:39 +02001/*
2 *
3 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2003-2012, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 */
Tomas Winkler2703d4b2013-02-06 14:06:39 +020016#include <linux/module.h>
17#include <linux/moduleparam.h>
18#include <linux/kernel.h>
19#include <linux/device.h>
20#include <linux/fs.h>
21#include <linux/errno.h>
22#include <linux/types.h>
23#include <linux/fcntl.h>
24#include <linux/aio.h>
25#include <linux/pci.h>
26#include <linux/poll.h>
Tomas Winkler2703d4b2013-02-06 14:06:39 +020027#include <linux/ioctl.h>
28#include <linux/cdev.h>
29#include <linux/sched.h>
30#include <linux/uuid.h>
31#include <linux/compat.h>
32#include <linux/jiffies.h>
33#include <linux/interrupt.h>
34#include <linux/miscdevice.h>
35
Tomas Winkler180ea052014-03-18 22:52:02 +020036#include <linux/pm_runtime.h>
37
Tomas Winkler2703d4b2013-02-06 14:06:39 +020038#include <linux/mei.h>
39
40#include "mei_dev.h"
Tomas Winkler2703d4b2013-02-06 14:06:39 +020041#include "client.h"
Tomas Winkler6e4cd272014-03-11 14:49:23 +020042#include "hw-me-regs.h"
43#include "hw-me.h"
Tomas Winkler2703d4b2013-02-06 14:06:39 +020044
Tomas Winkler2703d4b2013-02-06 14:06:39 +020045/* mei_pci_tbl - PCI Device ID Table */
Tomas Winklera05f8f82014-03-16 14:35:58 +020046static const struct pci_device_id mei_me_pci_tbl[] = {
Alexander Usyskin8d929d42014-05-13 01:30:53 +030047 {MEI_PCI_DEVICE(MEI_DEV_ID_82946GZ, mei_me_legacy_cfg)},
48 {MEI_PCI_DEVICE(MEI_DEV_ID_82G35, mei_me_legacy_cfg)},
49 {MEI_PCI_DEVICE(MEI_DEV_ID_82Q965, mei_me_legacy_cfg)},
50 {MEI_PCI_DEVICE(MEI_DEV_ID_82G965, mei_me_legacy_cfg)},
51 {MEI_PCI_DEVICE(MEI_DEV_ID_82GM965, mei_me_legacy_cfg)},
52 {MEI_PCI_DEVICE(MEI_DEV_ID_82GME965, mei_me_legacy_cfg)},
53 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82Q35, mei_me_legacy_cfg)},
54 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82G33, mei_me_legacy_cfg)},
55 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82Q33, mei_me_legacy_cfg)},
56 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82X38, mei_me_legacy_cfg)},
57 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_3200, mei_me_legacy_cfg)},
58
59 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_6, mei_me_legacy_cfg)},
60 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_7, mei_me_legacy_cfg)},
61 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_8, mei_me_legacy_cfg)},
62 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_9, mei_me_legacy_cfg)},
63 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_10, mei_me_legacy_cfg)},
64 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_1, mei_me_legacy_cfg)},
65 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_2, mei_me_legacy_cfg)},
66 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_3, mei_me_legacy_cfg)},
67 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_4, mei_me_legacy_cfg)},
68 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_1, mei_me_ich_cfg)},
69 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_2, mei_me_ich_cfg)},
70 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_3, mei_me_ich_cfg)},
71 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_4, mei_me_ich_cfg)},
72
73 {MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_1, mei_me_pch_cfg)},
74 {MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_2, mei_me_pch_cfg)},
75 {MEI_PCI_DEVICE(MEI_DEV_ID_CPT_1, mei_me_pch_cfg)},
76 {MEI_PCI_DEVICE(MEI_DEV_ID_PBG_1, mei_me_pch_cfg)},
77 {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_1, mei_me_pch_cfg)},
78 {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_2, mei_me_pch_cfg)},
79 {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_3, mei_me_pch_cfg)},
80 {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_H, mei_me_pch_cfg)},
81 {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_W, mei_me_pch_cfg)},
82 {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_LP, mei_me_pch_cfg)},
83 {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_HR, mei_me_pch_cfg)},
84 {MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP, mei_me_pch_cfg)},
Tomas Winkler2703d4b2013-02-06 14:06:39 +020085
86 /* required last entry */
87 {0, }
88};
89
Tomas Winklerb68301e2013-03-27 16:58:29 +020090MODULE_DEVICE_TABLE(pci, mei_me_pci_tbl);
Tomas Winkler2703d4b2013-02-06 14:06:39 +020091
Tomas Winklere13fa902014-03-18 22:52:05 +020092#ifdef CONFIG_PM_RUNTIME
93static inline void mei_me_set_pm_domain(struct mei_device *dev);
94static inline void mei_me_unset_pm_domain(struct mei_device *dev);
95#else
96static inline void mei_me_set_pm_domain(struct mei_device *dev) {}
97static inline void mei_me_unset_pm_domain(struct mei_device *dev) {}
98#endif /* CONFIG_PM_RUNTIME */
99
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200100/**
101 * mei_quirk_probe - probe for devices that doesn't valid ME interface
Masanari Iida393b1482013-04-05 01:05:05 +0900102 *
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200103 * @pdev: PCI device structure
104 * @ent: entry into pci_device_table
105 *
106 * returns true if ME Interface is valid, false otherwise
107 */
Tomas Winklerb68301e2013-03-27 16:58:29 +0200108static bool mei_me_quirk_probe(struct pci_dev *pdev,
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200109 const struct pci_device_id *ent)
110{
111 u32 reg;
Tomas Winkler5e6533a2014-03-25 21:25:18 +0200112 /* Cougar Point || Patsburg */
113 if (ent->device == MEI_DEV_ID_CPT_1 ||
114 ent->device == MEI_DEV_ID_PBG_1) {
115 pci_read_config_dword(pdev, PCI_CFG_HFS_2, &reg);
116 /* make sure that bit 9 (NM) is up and bit 10 (DM) is down */
117 if ((reg & 0x600) == 0x200)
118 goto no_mei;
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200119 }
Tomas Winkler5e6533a2014-03-25 21:25:18 +0200120
121 /* Lynx Point */
122 if (ent->device == MEI_DEV_ID_LPT_H ||
123 ent->device == MEI_DEV_ID_LPT_W ||
124 ent->device == MEI_DEV_ID_LPT_HR) {
125 /* Read ME FW Status check for SPS Firmware */
126 pci_read_config_dword(pdev, PCI_CFG_HFS_1, &reg);
127 /* if bits [19:16] = 15, running SPS Firmware */
128 if ((reg & 0xf0000) == 0xf0000)
129 goto no_mei;
130 }
131
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200132 return true;
Tomas Winkler5e6533a2014-03-25 21:25:18 +0200133
134no_mei:
135 dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n");
136 return false;
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200137}
138/**
139 * mei_probe - Device Initialization Routine
140 *
141 * @pdev: PCI device structure
142 * @ent: entry in kcs_pci_tbl
143 *
144 * returns 0 on success, <0 on failure.
145 */
Tomas Winklerb68301e2013-03-27 16:58:29 +0200146static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200147{
Alexander Usyskin8d929d42014-05-13 01:30:53 +0300148 const struct mei_cfg *cfg = (struct mei_cfg *)(ent->driver_data);
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200149 struct mei_device *dev;
Tomas Winkler52c34562013-02-06 14:06:40 +0200150 struct mei_me_hw *hw;
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200151 int err;
152
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200153
Tomas Winklerb68301e2013-03-27 16:58:29 +0200154 if (!mei_me_quirk_probe(pdev, ent)) {
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200155 err = -ENODEV;
156 goto end;
157 }
158
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200159 /* enable pci dev */
160 err = pci_enable_device(pdev);
161 if (err) {
162 dev_err(&pdev->dev, "failed to enable pci device.\n");
163 goto end;
164 }
165 /* set PCI host mastering */
166 pci_set_master(pdev);
167 /* pci request regions for mei driver */
168 err = pci_request_regions(pdev, KBUILD_MODNAME);
169 if (err) {
170 dev_err(&pdev->dev, "failed to get pci regions.\n");
171 goto disable_device;
172 }
Tomas Winkler3ecfb162013-12-17 15:56:57 +0200173
174 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) ||
175 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
176
177 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
178 if (err)
179 err = dma_set_coherent_mask(&pdev->dev,
180 DMA_BIT_MASK(32));
181 }
182 if (err) {
183 dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
184 goto release_regions;
185 }
186
187
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200188 /* allocates and initializes the mei dev structure */
Alexander Usyskin8d929d42014-05-13 01:30:53 +0300189 dev = mei_me_dev_init(pdev, cfg);
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200190 if (!dev) {
191 err = -ENOMEM;
192 goto release_regions;
193 }
Tomas Winkler52c34562013-02-06 14:06:40 +0200194 hw = to_me_hw(dev);
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200195 /* mapping IO device memory */
Tomas Winkler52c34562013-02-06 14:06:40 +0200196 hw->mem_addr = pci_iomap(pdev, 0, 0);
197 if (!hw->mem_addr) {
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200198 dev_err(&pdev->dev, "mapping I/O device memory failure.\n");
199 err = -ENOMEM;
200 goto free_device;
201 }
202 pci_enable_msi(pdev);
203
204 /* request and enable interrupt */
205 if (pci_dev_msi_enabled(pdev))
206 err = request_threaded_irq(pdev->irq,
207 NULL,
Tomas Winkler06ecd642013-02-06 14:06:42 +0200208 mei_me_irq_thread_handler,
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200209 IRQF_ONESHOT, KBUILD_MODNAME, dev);
210 else
211 err = request_threaded_irq(pdev->irq,
Tomas Winkler06ecd642013-02-06 14:06:42 +0200212 mei_me_irq_quick_handler,
213 mei_me_irq_thread_handler,
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200214 IRQF_SHARED, KBUILD_MODNAME, dev);
215
216 if (err) {
217 dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n",
218 pdev->irq);
219 goto disable_msi;
220 }
221
Tomas Winklerc4d589b2013-03-27 16:58:28 +0200222 if (mei_start(dev)) {
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200223 dev_err(&pdev->dev, "init hw failure.\n");
224 err = -ENODEV;
225 goto release_irq;
226 }
227
Tomas Winkler180ea052014-03-18 22:52:02 +0200228 pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_ME_RPM_TIMEOUT);
229 pm_runtime_use_autosuspend(&pdev->dev);
230
Tomas Winkler30e53bb2013-04-05 22:10:34 +0300231 err = mei_register(dev);
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200232 if (err)
233 goto release_irq;
234
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200235 pci_set_drvdata(pdev, dev);
236
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200237 schedule_delayed_work(&dev->timer_work, HZ);
238
Tomas Winklere13fa902014-03-18 22:52:05 +0200239 /*
240 * For not wake-able HW runtime pm framework
241 * can't be used on pci device level.
242 * Use domain runtime pm callbacks instead.
243 */
244 if (!pci_dev_run_wake(pdev))
245 mei_me_set_pm_domain(dev);
246
Tomas Winkler180ea052014-03-18 22:52:02 +0200247 if (mei_pg_is_enabled(dev))
248 pm_runtime_put_noidle(&pdev->dev);
249
Alexander Usyskinc4e87b52013-10-21 22:05:42 +0300250 dev_dbg(&pdev->dev, "initialization successful.\n");
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200251
252 return 0;
253
254release_irq:
Tomas Winklerdc844b02013-11-11 13:26:06 +0200255 mei_cancel_work(dev);
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200256 mei_disable_interrupts(dev);
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200257 free_irq(pdev->irq, dev);
258disable_msi:
259 pci_disable_msi(pdev);
Tomas Winkler52c34562013-02-06 14:06:40 +0200260 pci_iounmap(pdev, hw->mem_addr);
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200261free_device:
262 kfree(dev);
263release_regions:
264 pci_release_regions(pdev);
265disable_device:
266 pci_disable_device(pdev);
267end:
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200268 dev_err(&pdev->dev, "initialization failed.\n");
269 return err;
270}
271
272/**
273 * mei_remove - Device Removal Routine
274 *
275 * @pdev: PCI device structure
276 *
277 * mei_remove is called by the PCI subsystem to alert the driver
278 * that it should release a PCI device.
279 */
Tomas Winklerb68301e2013-03-27 16:58:29 +0200280static void mei_me_remove(struct pci_dev *pdev)
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200281{
282 struct mei_device *dev;
Tomas Winkler52c34562013-02-06 14:06:40 +0200283 struct mei_me_hw *hw;
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200284
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200285 dev = pci_get_drvdata(pdev);
286 if (!dev)
287 return;
288
Tomas Winkler180ea052014-03-18 22:52:02 +0200289 if (mei_pg_is_enabled(dev))
290 pm_runtime_get_noresume(&pdev->dev);
291
Tomas Winkler52c34562013-02-06 14:06:40 +0200292 hw = to_me_hw(dev);
293
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200294
Paul Bolleed6f7ac2013-10-17 13:52:10 +0300295 dev_dbg(&pdev->dev, "stop\n");
Tomas Winkler7cb035d2013-03-10 13:56:08 +0200296 mei_stop(dev);
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200297
Tomas Winklere13fa902014-03-18 22:52:05 +0200298 if (!pci_dev_run_wake(pdev))
299 mei_me_unset_pm_domain(dev);
300
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200301 /* disable interrupts */
302 mei_disable_interrupts(dev);
303
304 free_irq(pdev->irq, dev);
305 pci_disable_msi(pdev);
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200306
Tomas Winkler52c34562013-02-06 14:06:40 +0200307 if (hw->mem_addr)
308 pci_iounmap(pdev, hw->mem_addr);
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200309
Tomas Winkler30e53bb2013-04-05 22:10:34 +0300310 mei_deregister(dev);
311
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200312 kfree(dev);
313
314 pci_release_regions(pdev);
315 pci_disable_device(pdev);
316
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200317
318}
Alexander Usyskin16833252014-02-18 14:31:08 +0200319#ifdef CONFIG_PM_SLEEP
Tomas Winklerb68301e2013-03-27 16:58:29 +0200320static int mei_me_pci_suspend(struct device *device)
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200321{
322 struct pci_dev *pdev = to_pci_dev(device);
323 struct mei_device *dev = pci_get_drvdata(pdev);
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200324
325 if (!dev)
326 return -ENODEV;
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200327
Paul Bolleed6f7ac2013-10-17 13:52:10 +0300328 dev_dbg(&pdev->dev, "suspend\n");
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200329
Tomas Winkler7cb035d2013-03-10 13:56:08 +0200330 mei_stop(dev);
331
332 mei_disable_interrupts(dev);
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200333
334 free_irq(pdev->irq, dev);
335 pci_disable_msi(pdev);
336
Tomas Winkler7cb035d2013-03-10 13:56:08 +0200337 return 0;
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200338}
339
Tomas Winklerb68301e2013-03-27 16:58:29 +0200340static int mei_me_pci_resume(struct device *device)
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200341{
342 struct pci_dev *pdev = to_pci_dev(device);
343 struct mei_device *dev;
344 int err;
345
346 dev = pci_get_drvdata(pdev);
347 if (!dev)
348 return -ENODEV;
349
350 pci_enable_msi(pdev);
351
352 /* request and enable interrupt */
353 if (pci_dev_msi_enabled(pdev))
354 err = request_threaded_irq(pdev->irq,
355 NULL,
Tomas Winkler06ecd642013-02-06 14:06:42 +0200356 mei_me_irq_thread_handler,
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200357 IRQF_ONESHOT, KBUILD_MODNAME, dev);
358 else
359 err = request_threaded_irq(pdev->irq,
Tomas Winkler06ecd642013-02-06 14:06:42 +0200360 mei_me_irq_quick_handler,
361 mei_me_irq_thread_handler,
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200362 IRQF_SHARED, KBUILD_MODNAME, dev);
363
364 if (err) {
365 dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n",
366 pdev->irq);
367 return err;
368 }
369
Tomas Winkler33ec0822014-01-12 00:36:09 +0200370 err = mei_restart(dev);
371 if (err)
372 return err;
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200373
374 /* Start timer if stopped in suspend */
375 schedule_delayed_work(&dev->timer_work, HZ);
376
Tomas Winkler33ec0822014-01-12 00:36:09 +0200377 return 0;
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200378}
Tomas Winkler180ea052014-03-18 22:52:02 +0200379#endif /* CONFIG_PM_SLEEP */
Alexander Usyskin16833252014-02-18 14:31:08 +0200380
Tomas Winkler180ea052014-03-18 22:52:02 +0200381#ifdef CONFIG_PM_RUNTIME
382static int mei_me_pm_runtime_idle(struct device *device)
383{
384 struct pci_dev *pdev = to_pci_dev(device);
385 struct mei_device *dev;
386
387 dev_dbg(&pdev->dev, "rpm: me: runtime_idle\n");
388
389 dev = pci_get_drvdata(pdev);
390 if (!dev)
391 return -ENODEV;
392 if (mei_write_is_idle(dev))
393 pm_schedule_suspend(device, MEI_ME_RPM_TIMEOUT * 2);
394
395 return -EBUSY;
396}
397
398static int mei_me_pm_runtime_suspend(struct device *device)
399{
400 struct pci_dev *pdev = to_pci_dev(device);
401 struct mei_device *dev;
402 int ret;
403
404 dev_dbg(&pdev->dev, "rpm: me: runtime suspend\n");
405
406 dev = pci_get_drvdata(pdev);
407 if (!dev)
408 return -ENODEV;
409
410 mutex_lock(&dev->device_lock);
411
412 if (mei_write_is_idle(dev))
413 ret = mei_me_pg_set_sync(dev);
414 else
415 ret = -EAGAIN;
416
417 mutex_unlock(&dev->device_lock);
418
419 dev_dbg(&pdev->dev, "rpm: me: runtime suspend ret=%d\n", ret);
420
421 return ret;
422}
423
424static int mei_me_pm_runtime_resume(struct device *device)
425{
426 struct pci_dev *pdev = to_pci_dev(device);
427 struct mei_device *dev;
428 int ret;
429
430 dev_dbg(&pdev->dev, "rpm: me: runtime resume\n");
431
432 dev = pci_get_drvdata(pdev);
433 if (!dev)
434 return -ENODEV;
435
436 mutex_lock(&dev->device_lock);
437
438 ret = mei_me_pg_unset_sync(dev);
439
440 mutex_unlock(&dev->device_lock);
441
442 dev_dbg(&pdev->dev, "rpm: me: runtime resume ret = %d\n", ret);
443
444 return ret;
445}
Tomas Winklere13fa902014-03-18 22:52:05 +0200446
447/**
448 * mei_me_set_pm_domain - fill and set pm domian stucture for device
449 *
450 * @dev: mei_device
451 */
452static inline void mei_me_set_pm_domain(struct mei_device *dev)
453{
454 struct pci_dev *pdev = dev->pdev;
455
456 if (pdev->dev.bus && pdev->dev.bus->pm) {
457 dev->pg_domain.ops = *pdev->dev.bus->pm;
458
459 dev->pg_domain.ops.runtime_suspend = mei_me_pm_runtime_suspend;
460 dev->pg_domain.ops.runtime_resume = mei_me_pm_runtime_resume;
461 dev->pg_domain.ops.runtime_idle = mei_me_pm_runtime_idle;
462
463 pdev->dev.pm_domain = &dev->pg_domain;
464 }
465}
466
467/**
468 * mei_me_unset_pm_domain - clean pm domian stucture for device
469 *
470 * @dev: mei_device
471 */
472static inline void mei_me_unset_pm_domain(struct mei_device *dev)
473{
474 /* stop using pm callbacks if any */
475 dev->pdev->dev.pm_domain = NULL;
476}
Tomas Winkler180ea052014-03-18 22:52:02 +0200477#endif /* CONFIG_PM_RUNTIME */
478
479#ifdef CONFIG_PM
480static const struct dev_pm_ops mei_me_pm_ops = {
481 SET_SYSTEM_SLEEP_PM_OPS(mei_me_pci_suspend,
482 mei_me_pci_resume)
483 SET_RUNTIME_PM_OPS(
484 mei_me_pm_runtime_suspend,
485 mei_me_pm_runtime_resume,
486 mei_me_pm_runtime_idle)
487};
488
Tomas Winklerb68301e2013-03-27 16:58:29 +0200489#define MEI_ME_PM_OPS (&mei_me_pm_ops)
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200490#else
Tomas Winklerb68301e2013-03-27 16:58:29 +0200491#define MEI_ME_PM_OPS NULL
Tomas Winkler180ea052014-03-18 22:52:02 +0200492#endif /* CONFIG_PM */
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200493/*
494 * PCI driver structure
495 */
Tomas Winklerb68301e2013-03-27 16:58:29 +0200496static struct pci_driver mei_me_driver = {
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200497 .name = KBUILD_MODNAME,
Tomas Winklerb68301e2013-03-27 16:58:29 +0200498 .id_table = mei_me_pci_tbl,
499 .probe = mei_me_probe,
500 .remove = mei_me_remove,
501 .shutdown = mei_me_remove,
502 .driver.pm = MEI_ME_PM_OPS,
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200503};
504
Tomas Winklerb68301e2013-03-27 16:58:29 +0200505module_pci_driver(mei_me_driver);
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200506
507MODULE_AUTHOR("Intel Corporation");
508MODULE_DESCRIPTION("Intel(R) Management Engine Interface");
509MODULE_LICENSE("GPL v2");