blob: 23f71f5ce4fb07be4bb9c32021bf3babef6b938d [file] [log] [blame]
Tomas Winkler2703d4b2013-02-06 14:06:39 +02001/*
2 *
3 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2003-2012, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 */
Tomas Winkler2703d4b2013-02-06 14:06:39 +020016#include <linux/module.h>
17#include <linux/moduleparam.h>
18#include <linux/kernel.h>
19#include <linux/device.h>
20#include <linux/fs.h>
21#include <linux/errno.h>
22#include <linux/types.h>
23#include <linux/fcntl.h>
Tomas Winkler2703d4b2013-02-06 14:06:39 +020024#include <linux/pci.h>
25#include <linux/poll.h>
Tomas Winkler2703d4b2013-02-06 14:06:39 +020026#include <linux/ioctl.h>
27#include <linux/cdev.h>
28#include <linux/sched.h>
29#include <linux/uuid.h>
30#include <linux/compat.h>
31#include <linux/jiffies.h>
32#include <linux/interrupt.h>
Tomas Winkler2703d4b2013-02-06 14:06:39 +020033
Tomas Winkler180ea052014-03-18 22:52:02 +020034#include <linux/pm_runtime.h>
35
Tomas Winkler2703d4b2013-02-06 14:06:39 +020036#include <linux/mei.h>
37
38#include "mei_dev.h"
Tomas Winkler2703d4b2013-02-06 14:06:39 +020039#include "client.h"
Tomas Winkler6e4cd272014-03-11 14:49:23 +020040#include "hw-me-regs.h"
41#include "hw-me.h"
Tomas Winkler2703d4b2013-02-06 14:06:39 +020042
Tomas Winkler2703d4b2013-02-06 14:06:39 +020043/* mei_pci_tbl - PCI Device ID Table */
Tomas Winklera05f8f82014-03-16 14:35:58 +020044static const struct pci_device_id mei_me_pci_tbl[] = {
Alexander Usyskin8d929d42014-05-13 01:30:53 +030045 {MEI_PCI_DEVICE(MEI_DEV_ID_82946GZ, mei_me_legacy_cfg)},
46 {MEI_PCI_DEVICE(MEI_DEV_ID_82G35, mei_me_legacy_cfg)},
47 {MEI_PCI_DEVICE(MEI_DEV_ID_82Q965, mei_me_legacy_cfg)},
48 {MEI_PCI_DEVICE(MEI_DEV_ID_82G965, mei_me_legacy_cfg)},
49 {MEI_PCI_DEVICE(MEI_DEV_ID_82GM965, mei_me_legacy_cfg)},
50 {MEI_PCI_DEVICE(MEI_DEV_ID_82GME965, mei_me_legacy_cfg)},
51 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82Q35, mei_me_legacy_cfg)},
52 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82G33, mei_me_legacy_cfg)},
53 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82Q33, mei_me_legacy_cfg)},
54 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82X38, mei_me_legacy_cfg)},
55 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_3200, mei_me_legacy_cfg)},
56
57 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_6, mei_me_legacy_cfg)},
58 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_7, mei_me_legacy_cfg)},
59 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_8, mei_me_legacy_cfg)},
60 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_9, mei_me_legacy_cfg)},
61 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_10, mei_me_legacy_cfg)},
62 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_1, mei_me_legacy_cfg)},
63 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_2, mei_me_legacy_cfg)},
64 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_3, mei_me_legacy_cfg)},
65 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_4, mei_me_legacy_cfg)},
66 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_1, mei_me_ich_cfg)},
67 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_2, mei_me_ich_cfg)},
68 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_3, mei_me_ich_cfg)},
69 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_4, mei_me_ich_cfg)},
70
71 {MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_1, mei_me_pch_cfg)},
72 {MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_2, mei_me_pch_cfg)},
Tomas Winklerc9199512014-05-13 01:30:54 +030073 {MEI_PCI_DEVICE(MEI_DEV_ID_CPT_1, mei_me_pch_cpt_pbg_cfg)},
74 {MEI_PCI_DEVICE(MEI_DEV_ID_PBG_1, mei_me_pch_cpt_pbg_cfg)},
Alexander Usyskin8d929d42014-05-13 01:30:53 +030075 {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_1, mei_me_pch_cfg)},
76 {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_2, mei_me_pch_cfg)},
77 {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_3, mei_me_pch_cfg)},
Alexander Usyskinedca5ea2014-11-19 17:01:38 +020078 {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_H, mei_me_pch8_sps_cfg)},
79 {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_W, mei_me_pch8_sps_cfg)},
80 {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_LP, mei_me_pch8_cfg)},
81 {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_HR, mei_me_pch8_sps_cfg)},
82 {MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP, mei_me_pch8_cfg)},
83 {MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP_2, mei_me_pch8_cfg)},
Tomas Winkler2703d4b2013-02-06 14:06:39 +020084
85 /* required last entry */
86 {0, }
87};
88
Tomas Winklerb68301e2013-03-27 16:58:29 +020089MODULE_DEVICE_TABLE(pci, mei_me_pci_tbl);
Tomas Winkler2703d4b2013-02-06 14:06:39 +020090
Rafael J. Wysockibbd6d052014-12-04 22:43:07 +010091#ifdef CONFIG_PM
Tomas Winklere13fa902014-03-18 22:52:05 +020092static inline void mei_me_set_pm_domain(struct mei_device *dev);
93static inline void mei_me_unset_pm_domain(struct mei_device *dev);
94#else
95static inline void mei_me_set_pm_domain(struct mei_device *dev) {}
96static inline void mei_me_unset_pm_domain(struct mei_device *dev) {}
Rafael J. Wysockibbd6d052014-12-04 22:43:07 +010097#endif /* CONFIG_PM */
Tomas Winklere13fa902014-03-18 22:52:05 +020098
Tomas Winkler2703d4b2013-02-06 14:06:39 +020099/**
Alexander Usyskince231392014-09-29 16:31:50 +0300100 * mei_me_quirk_probe - probe for devices that doesn't valid ME interface
Masanari Iida393b1482013-04-05 01:05:05 +0900101 *
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200102 * @pdev: PCI device structure
Tomas Winklerc9199512014-05-13 01:30:54 +0300103 * @cfg: per generation config
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200104 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300105 * Return: true if ME Interface is valid, false otherwise
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200106 */
Tomas Winklerb68301e2013-03-27 16:58:29 +0200107static bool mei_me_quirk_probe(struct pci_dev *pdev,
Tomas Winklerc9199512014-05-13 01:30:54 +0300108 const struct mei_cfg *cfg)
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200109{
Tomas Winklerc9199512014-05-13 01:30:54 +0300110 if (cfg->quirk_probe && cfg->quirk_probe(pdev)) {
111 dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n");
112 return false;
Tomas Winkler5e6533a2014-03-25 21:25:18 +0200113 }
114
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200115 return true;
116}
Tomas Winklerc9199512014-05-13 01:30:54 +0300117
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200118/**
Alexander Usyskince231392014-09-29 16:31:50 +0300119 * mei_me_probe - Device Initialization Routine
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200120 *
121 * @pdev: PCI device structure
122 * @ent: entry in kcs_pci_tbl
123 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300124 * Return: 0 on success, <0 on failure.
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200125 */
Tomas Winklerb68301e2013-03-27 16:58:29 +0200126static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200127{
Alexander Usyskin8d929d42014-05-13 01:30:53 +0300128 const struct mei_cfg *cfg = (struct mei_cfg *)(ent->driver_data);
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200129 struct mei_device *dev;
Tomas Winkler52c34562013-02-06 14:06:40 +0200130 struct mei_me_hw *hw;
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200131 int err;
132
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200133
Tomas Winklerc9199512014-05-13 01:30:54 +0300134 if (!mei_me_quirk_probe(pdev, cfg))
135 return -ENODEV;
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200136
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200137 /* enable pci dev */
138 err = pci_enable_device(pdev);
139 if (err) {
140 dev_err(&pdev->dev, "failed to enable pci device.\n");
141 goto end;
142 }
143 /* set PCI host mastering */
144 pci_set_master(pdev);
145 /* pci request regions for mei driver */
146 err = pci_request_regions(pdev, KBUILD_MODNAME);
147 if (err) {
148 dev_err(&pdev->dev, "failed to get pci regions.\n");
149 goto disable_device;
150 }
Tomas Winkler3ecfb162013-12-17 15:56:57 +0200151
152 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) ||
153 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
154
155 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
156 if (err)
157 err = dma_set_coherent_mask(&pdev->dev,
158 DMA_BIT_MASK(32));
159 }
160 if (err) {
161 dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
162 goto release_regions;
163 }
164
165
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200166 /* allocates and initializes the mei dev structure */
Alexander Usyskin8d929d42014-05-13 01:30:53 +0300167 dev = mei_me_dev_init(pdev, cfg);
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200168 if (!dev) {
169 err = -ENOMEM;
170 goto release_regions;
171 }
Tomas Winkler52c34562013-02-06 14:06:40 +0200172 hw = to_me_hw(dev);
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200173 /* mapping IO device memory */
Tomas Winkler52c34562013-02-06 14:06:40 +0200174 hw->mem_addr = pci_iomap(pdev, 0, 0);
175 if (!hw->mem_addr) {
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200176 dev_err(&pdev->dev, "mapping I/O device memory failure.\n");
177 err = -ENOMEM;
178 goto free_device;
179 }
180 pci_enable_msi(pdev);
181
182 /* request and enable interrupt */
183 if (pci_dev_msi_enabled(pdev))
184 err = request_threaded_irq(pdev->irq,
185 NULL,
Tomas Winkler06ecd642013-02-06 14:06:42 +0200186 mei_me_irq_thread_handler,
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200187 IRQF_ONESHOT, KBUILD_MODNAME, dev);
188 else
189 err = request_threaded_irq(pdev->irq,
Tomas Winkler06ecd642013-02-06 14:06:42 +0200190 mei_me_irq_quick_handler,
191 mei_me_irq_thread_handler,
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200192 IRQF_SHARED, KBUILD_MODNAME, dev);
193
194 if (err) {
195 dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n",
196 pdev->irq);
197 goto disable_msi;
198 }
199
Tomas Winklerc4d589b2013-03-27 16:58:28 +0200200 if (mei_start(dev)) {
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200201 dev_err(&pdev->dev, "init hw failure.\n");
202 err = -ENODEV;
203 goto release_irq;
204 }
205
Tomas Winkler180ea052014-03-18 22:52:02 +0200206 pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_ME_RPM_TIMEOUT);
207 pm_runtime_use_autosuspend(&pdev->dev);
208
Alexander Usyskinf3d8e872014-06-23 15:10:35 +0300209 err = mei_register(dev, &pdev->dev);
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200210 if (err)
211 goto release_irq;
212
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200213 pci_set_drvdata(pdev, dev);
214
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200215 schedule_delayed_work(&dev->timer_work, HZ);
216
Tomas Winklere13fa902014-03-18 22:52:05 +0200217 /*
218 * For not wake-able HW runtime pm framework
219 * can't be used on pci device level.
220 * Use domain runtime pm callbacks instead.
221 */
222 if (!pci_dev_run_wake(pdev))
223 mei_me_set_pm_domain(dev);
224
Tomas Winkler180ea052014-03-18 22:52:02 +0200225 if (mei_pg_is_enabled(dev))
226 pm_runtime_put_noidle(&pdev->dev);
227
Alexander Usyskinc4e87b52013-10-21 22:05:42 +0300228 dev_dbg(&pdev->dev, "initialization successful.\n");
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200229
230 return 0;
231
232release_irq:
Tomas Winklerdc844b02013-11-11 13:26:06 +0200233 mei_cancel_work(dev);
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200234 mei_disable_interrupts(dev);
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200235 free_irq(pdev->irq, dev);
236disable_msi:
237 pci_disable_msi(pdev);
Tomas Winkler52c34562013-02-06 14:06:40 +0200238 pci_iounmap(pdev, hw->mem_addr);
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200239free_device:
240 kfree(dev);
241release_regions:
242 pci_release_regions(pdev);
243disable_device:
244 pci_disable_device(pdev);
245end:
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200246 dev_err(&pdev->dev, "initialization failed.\n");
247 return err;
248}
249
250/**
Alexander Usyskince231392014-09-29 16:31:50 +0300251 * mei_me_remove - Device Removal Routine
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200252 *
253 * @pdev: PCI device structure
254 *
255 * mei_remove is called by the PCI subsystem to alert the driver
256 * that it should release a PCI device.
257 */
Tomas Winklerb68301e2013-03-27 16:58:29 +0200258static void mei_me_remove(struct pci_dev *pdev)
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200259{
260 struct mei_device *dev;
Tomas Winkler52c34562013-02-06 14:06:40 +0200261 struct mei_me_hw *hw;
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200262
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200263 dev = pci_get_drvdata(pdev);
264 if (!dev)
265 return;
266
Tomas Winkler180ea052014-03-18 22:52:02 +0200267 if (mei_pg_is_enabled(dev))
268 pm_runtime_get_noresume(&pdev->dev);
269
Tomas Winkler52c34562013-02-06 14:06:40 +0200270 hw = to_me_hw(dev);
271
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200272
Paul Bolleed6f7ac2013-10-17 13:52:10 +0300273 dev_dbg(&pdev->dev, "stop\n");
Tomas Winkler7cb035d2013-03-10 13:56:08 +0200274 mei_stop(dev);
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200275
Tomas Winklere13fa902014-03-18 22:52:05 +0200276 if (!pci_dev_run_wake(pdev))
277 mei_me_unset_pm_domain(dev);
278
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200279 /* disable interrupts */
280 mei_disable_interrupts(dev);
281
282 free_irq(pdev->irq, dev);
283 pci_disable_msi(pdev);
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200284
Tomas Winkler52c34562013-02-06 14:06:40 +0200285 if (hw->mem_addr)
286 pci_iounmap(pdev, hw->mem_addr);
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200287
Tomas Winkler30e53bb2013-04-05 22:10:34 +0300288 mei_deregister(dev);
289
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200290 kfree(dev);
291
292 pci_release_regions(pdev);
293 pci_disable_device(pdev);
294
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200295
296}
Alexander Usyskin16833252014-02-18 14:31:08 +0200297#ifdef CONFIG_PM_SLEEP
Tomas Winklerb68301e2013-03-27 16:58:29 +0200298static int mei_me_pci_suspend(struct device *device)
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200299{
300 struct pci_dev *pdev = to_pci_dev(device);
301 struct mei_device *dev = pci_get_drvdata(pdev);
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200302
303 if (!dev)
304 return -ENODEV;
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200305
Paul Bolleed6f7ac2013-10-17 13:52:10 +0300306 dev_dbg(&pdev->dev, "suspend\n");
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200307
Tomas Winkler7cb035d2013-03-10 13:56:08 +0200308 mei_stop(dev);
309
310 mei_disable_interrupts(dev);
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200311
312 free_irq(pdev->irq, dev);
313 pci_disable_msi(pdev);
314
Tomas Winkler7cb035d2013-03-10 13:56:08 +0200315 return 0;
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200316}
317
Tomas Winklerb68301e2013-03-27 16:58:29 +0200318static int mei_me_pci_resume(struct device *device)
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200319{
320 struct pci_dev *pdev = to_pci_dev(device);
321 struct mei_device *dev;
322 int err;
323
324 dev = pci_get_drvdata(pdev);
325 if (!dev)
326 return -ENODEV;
327
328 pci_enable_msi(pdev);
329
330 /* request and enable interrupt */
331 if (pci_dev_msi_enabled(pdev))
332 err = request_threaded_irq(pdev->irq,
333 NULL,
Tomas Winkler06ecd642013-02-06 14:06:42 +0200334 mei_me_irq_thread_handler,
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200335 IRQF_ONESHOT, KBUILD_MODNAME, dev);
336 else
337 err = request_threaded_irq(pdev->irq,
Tomas Winkler06ecd642013-02-06 14:06:42 +0200338 mei_me_irq_quick_handler,
339 mei_me_irq_thread_handler,
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200340 IRQF_SHARED, KBUILD_MODNAME, dev);
341
342 if (err) {
343 dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n",
344 pdev->irq);
345 return err;
346 }
347
Tomas Winkler33ec0822014-01-12 00:36:09 +0200348 err = mei_restart(dev);
349 if (err)
350 return err;
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200351
352 /* Start timer if stopped in suspend */
353 schedule_delayed_work(&dev->timer_work, HZ);
354
Tomas Winkler33ec0822014-01-12 00:36:09 +0200355 return 0;
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200356}
Tomas Winkler180ea052014-03-18 22:52:02 +0200357#endif /* CONFIG_PM_SLEEP */
Alexander Usyskin16833252014-02-18 14:31:08 +0200358
Rafael J. Wysockibbd6d052014-12-04 22:43:07 +0100359#ifdef CONFIG_PM
Tomas Winkler180ea052014-03-18 22:52:02 +0200360static int mei_me_pm_runtime_idle(struct device *device)
361{
362 struct pci_dev *pdev = to_pci_dev(device);
363 struct mei_device *dev;
364
365 dev_dbg(&pdev->dev, "rpm: me: runtime_idle\n");
366
367 dev = pci_get_drvdata(pdev);
368 if (!dev)
369 return -ENODEV;
370 if (mei_write_is_idle(dev))
Alexander Usyskind5d83f82014-07-17 10:53:36 +0300371 pm_runtime_autosuspend(device);
Tomas Winkler180ea052014-03-18 22:52:02 +0200372
373 return -EBUSY;
374}
375
376static int mei_me_pm_runtime_suspend(struct device *device)
377{
378 struct pci_dev *pdev = to_pci_dev(device);
379 struct mei_device *dev;
380 int ret;
381
382 dev_dbg(&pdev->dev, "rpm: me: runtime suspend\n");
383
384 dev = pci_get_drvdata(pdev);
385 if (!dev)
386 return -ENODEV;
387
388 mutex_lock(&dev->device_lock);
389
390 if (mei_write_is_idle(dev))
Alexander Usyskin2d1995f2015-02-10 10:39:34 +0200391 ret = mei_me_pg_enter_sync(dev);
Tomas Winkler180ea052014-03-18 22:52:02 +0200392 else
393 ret = -EAGAIN;
394
395 mutex_unlock(&dev->device_lock);
396
397 dev_dbg(&pdev->dev, "rpm: me: runtime suspend ret=%d\n", ret);
398
399 return ret;
400}
401
402static int mei_me_pm_runtime_resume(struct device *device)
403{
404 struct pci_dev *pdev = to_pci_dev(device);
405 struct mei_device *dev;
406 int ret;
407
408 dev_dbg(&pdev->dev, "rpm: me: runtime resume\n");
409
410 dev = pci_get_drvdata(pdev);
411 if (!dev)
412 return -ENODEV;
413
414 mutex_lock(&dev->device_lock);
415
Alexander Usyskin2d1995f2015-02-10 10:39:34 +0200416 ret = mei_me_pg_exit_sync(dev);
Tomas Winkler180ea052014-03-18 22:52:02 +0200417
418 mutex_unlock(&dev->device_lock);
419
420 dev_dbg(&pdev->dev, "rpm: me: runtime resume ret = %d\n", ret);
421
422 return ret;
423}
Tomas Winklere13fa902014-03-18 22:52:05 +0200424
425/**
Geert Uytterhoeven7efceb52014-09-16 22:00:47 +0200426 * mei_me_set_pm_domain - fill and set pm domain structure for device
Tomas Winklere13fa902014-03-18 22:52:05 +0200427 *
428 * @dev: mei_device
429 */
430static inline void mei_me_set_pm_domain(struct mei_device *dev)
431{
Tomas Winklerd08b8fc2014-09-29 16:31:44 +0300432 struct pci_dev *pdev = to_pci_dev(dev->dev);
Tomas Winklere13fa902014-03-18 22:52:05 +0200433
434 if (pdev->dev.bus && pdev->dev.bus->pm) {
435 dev->pg_domain.ops = *pdev->dev.bus->pm;
436
437 dev->pg_domain.ops.runtime_suspend = mei_me_pm_runtime_suspend;
438 dev->pg_domain.ops.runtime_resume = mei_me_pm_runtime_resume;
439 dev->pg_domain.ops.runtime_idle = mei_me_pm_runtime_idle;
440
441 pdev->dev.pm_domain = &dev->pg_domain;
442 }
443}
444
445/**
Geert Uytterhoeven7efceb52014-09-16 22:00:47 +0200446 * mei_me_unset_pm_domain - clean pm domain structure for device
Tomas Winklere13fa902014-03-18 22:52:05 +0200447 *
448 * @dev: mei_device
449 */
450static inline void mei_me_unset_pm_domain(struct mei_device *dev)
451{
452 /* stop using pm callbacks if any */
Tomas Winkler2bf94cab2014-09-29 16:31:42 +0300453 dev->dev->pm_domain = NULL;
Tomas Winklere13fa902014-03-18 22:52:05 +0200454}
Tomas Winkler180ea052014-03-18 22:52:02 +0200455
Tomas Winkler180ea052014-03-18 22:52:02 +0200456static const struct dev_pm_ops mei_me_pm_ops = {
457 SET_SYSTEM_SLEEP_PM_OPS(mei_me_pci_suspend,
458 mei_me_pci_resume)
459 SET_RUNTIME_PM_OPS(
460 mei_me_pm_runtime_suspend,
461 mei_me_pm_runtime_resume,
462 mei_me_pm_runtime_idle)
463};
464
Tomas Winklerb68301e2013-03-27 16:58:29 +0200465#define MEI_ME_PM_OPS (&mei_me_pm_ops)
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200466#else
Tomas Winklerb68301e2013-03-27 16:58:29 +0200467#define MEI_ME_PM_OPS NULL
Tomas Winkler180ea052014-03-18 22:52:02 +0200468#endif /* CONFIG_PM */
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200469/*
470 * PCI driver structure
471 */
Tomas Winklerb68301e2013-03-27 16:58:29 +0200472static struct pci_driver mei_me_driver = {
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200473 .name = KBUILD_MODNAME,
Tomas Winklerb68301e2013-03-27 16:58:29 +0200474 .id_table = mei_me_pci_tbl,
475 .probe = mei_me_probe,
476 .remove = mei_me_remove,
477 .shutdown = mei_me_remove,
478 .driver.pm = MEI_ME_PM_OPS,
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200479};
480
Tomas Winklerb68301e2013-03-27 16:58:29 +0200481module_pci_driver(mei_me_driver);
Tomas Winkler2703d4b2013-02-06 14:06:39 +0200482
483MODULE_AUTHOR("Intel Corporation");
484MODULE_DESCRIPTION("Intel(R) Management Engine Interface");
485MODULE_LICENSE("GPL v2");