blob: 198572c90b00a2b847f260de0e913d2dde0a0c0b [file] [log] [blame]
Stephen Boyd12332572011-12-06 16:00:51 -08001/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
Stephen Boyd322a9922011-09-20 01:05:54 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/kernel.h>
14#include <linux/err.h>
15#include <linux/io.h>
16#include <linux/elf.h>
17#include <linux/delay.h>
18#include <linux/module.h>
19#include <linux/slab.h>
20#include <linux/platform_device.h>
Stephen Boyd3bbdf6c2011-12-21 16:02:26 -080021#include <linux/regulator/consumer.h>
22#include <linux/workqueue.h>
Stephen Boyd86f2e652012-01-11 18:25:44 -080023#include <linux/clk.h>
Stephen Boyd322a9922011-09-20 01:05:54 -070024
25#include <mach/msm_iomap.h>
26
27#include "peripheral-loader.h"
28#include "scm-pas.h"
29
Matt Wagantall04b7cc72011-12-09 18:52:26 -080030#define PROXY_VOTE_TIMEOUT 10000
31
Stephen Boyd322a9922011-09-20 01:05:54 -070032#define RIVA_PMU_A2XB_CFG 0xB8
33#define RIVA_PMU_A2XB_CFG_EN BIT(0)
34
35#define RIVA_PMU_CFG 0x28
36#define RIVA_PMU_CFG_WARM_BOOT BIT(0)
37#define RIVA_PMU_CFG_IRIS_XO_MODE 0x6
38#define RIVA_PMU_CFG_IRIS_XO_MODE_48 (3 << 1)
39
Stephen Boyd12332572011-12-06 16:00:51 -080040#define RIVA_PMU_OVRD_EN 0x2C
41#define RIVA_PMU_OVRD_EN_CCPU_RESET BIT(0)
42#define RIVA_PMU_OVRD_EN_CCPU_CLK BIT(1)
43
Stephen Boyd322a9922011-09-20 01:05:54 -070044#define RIVA_PMU_OVRD_VAL 0x30
45#define RIVA_PMU_OVRD_VAL_CCPU_RESET BIT(0)
46#define RIVA_PMU_OVRD_VAL_CCPU_CLK BIT(1)
47
48#define RIVA_PMU_CCPU_CTL 0x9C
49#define RIVA_PMU_CCPU_CTL_HIGH_IVT BIT(0)
50#define RIVA_PMU_CCPU_CTL_REMAP_EN BIT(2)
51
52#define RIVA_PMU_CCPU_BOOT_REMAP_ADDR 0xA0
53
54#define RIVA_PLL_MODE (MSM_CLK_CTL_BASE + 0x31A0)
55#define PLL_MODE_OUTCTRL BIT(0)
56#define PLL_MODE_BYPASSNL BIT(1)
57#define PLL_MODE_RESET_N BIT(2)
58#define PLL_MODE_REF_XO_SEL 0x30
59#define PLL_MODE_REF_XO_SEL_CXO (2 << 4)
60#define PLL_MODE_REF_XO_SEL_RF (3 << 4)
61#define RIVA_PLL_L_VAL (MSM_CLK_CTL_BASE + 0x31A4)
62#define RIVA_PLL_M_VAL (MSM_CLK_CTL_BASE + 0x31A8)
63#define RIVA_PLL_N_VAL (MSM_CLK_CTL_BASE + 0x31Ac)
64#define RIVA_PLL_CONFIG (MSM_CLK_CTL_BASE + 0x31B4)
65#define RIVA_PLL_STATUS (MSM_CLK_CTL_BASE + 0x31B8)
Stephen Boyd12332572011-12-06 16:00:51 -080066#define RIVA_RESET (MSM_CLK_CTL_BASE + 0x35E0)
Stephen Boyd322a9922011-09-20 01:05:54 -070067
68#define RIVA_PMU_ROOT_CLK_SEL 0xC8
69#define RIVA_PMU_ROOT_CLK_SEL_3 BIT(2)
70
71#define RIVA_PMU_CLK_ROOT3 0x78
72#define RIVA_PMU_CLK_ROOT3_ENA BIT(0)
73#define RIVA_PMU_CLK_ROOT3_SRC0_DIV 0x3C
74#define RIVA_PMU_CLK_ROOT3_SRC0_DIV_2 (1 << 2)
75#define RIVA_PMU_CLK_ROOT3_SRC0_SEL 0x1C0
76#define RIVA_PMU_CLK_ROOT3_SRC0_SEL_RIVA (1 << 6)
77#define RIVA_PMU_CLK_ROOT3_SRC1_DIV 0x1E00
78#define RIVA_PMU_CLK_ROOT3_SRC1_DIV_2 (1 << 9)
79#define RIVA_PMU_CLK_ROOT3_SRC1_SEL 0xE000
80#define RIVA_PMU_CLK_ROOT3_SRC1_SEL_RIVA (1 << 13)
81
82struct riva_data {
83 void __iomem *base;
84 unsigned long start_addr;
Stephen Boyd86f2e652012-01-11 18:25:44 -080085 struct clk *xo;
Stephen Boyd3bbdf6c2011-12-21 16:02:26 -080086 bool use_cxo;
87 struct delayed_work work;
88 struct regulator *pll_supply;
Stephen Boyd6d67d252011-09-27 11:50:05 -070089 struct pil_device *pil;
Stephen Boyd322a9922011-09-20 01:05:54 -070090};
91
Stephen Boydd0b993a2012-01-30 11:59:31 -080092static int pil_riva_make_proxy_votes(struct device *dev)
Matt Wagantall04b7cc72011-12-09 18:52:26 -080093{
94 struct riva_data *drv = dev_get_drvdata(dev);
Stephen Boyd3bbdf6c2011-12-21 16:02:26 -080095 int ret;
Matt Wagantall04b7cc72011-12-09 18:52:26 -080096
Stephen Boydd0b993a2012-01-30 11:59:31 -080097 ret = regulator_enable(drv->pll_supply);
98 if (ret) {
99 dev_err(dev, "failed to enable pll supply\n");
100 goto err;
101 }
Stephen Boyd86f2e652012-01-11 18:25:44 -0800102 if (drv->use_cxo) {
103 ret = clk_prepare_enable(drv->xo);
Stephen Boydd0b993a2012-01-30 11:59:31 -0800104 if (ret) {
Stephen Boyd86f2e652012-01-11 18:25:44 -0800105 dev_err(dev, "failed to enable xo\n");
Stephen Boydd0b993a2012-01-30 11:59:31 -0800106 goto err_clk;
107 }
Stephen Boyd86f2e652012-01-11 18:25:44 -0800108 }
Stephen Boyd3bbdf6c2011-12-21 16:02:26 -0800109 schedule_delayed_work(&drv->work, msecs_to_jiffies(PROXY_VOTE_TIMEOUT));
Stephen Boydd0b993a2012-01-30 11:59:31 -0800110 return 0;
111err_clk:
112 regulator_disable(drv->pll_supply);
113err:
114 return ret;
Matt Wagantall04b7cc72011-12-09 18:52:26 -0800115}
116
Stephen Boyd3bbdf6c2011-12-21 16:02:26 -0800117static void pil_riva_remove_proxy_votes(struct work_struct *work)
Matt Wagantall04b7cc72011-12-09 18:52:26 -0800118{
Stephen Boyd3bbdf6c2011-12-21 16:02:26 -0800119 struct riva_data *drv = container_of(work, struct riva_data, work.work);
120 regulator_disable(drv->pll_supply);
121 if (drv->use_cxo)
Stephen Boyd86f2e652012-01-11 18:25:44 -0800122 clk_disable_unprepare(drv->xo);
Matt Wagantall04b7cc72011-12-09 18:52:26 -0800123}
124
Stephen Boyd3bbdf6c2011-12-21 16:02:26 -0800125static void pil_riva_remove_proxy_votes_now(struct device *dev)
Matt Wagantall04b7cc72011-12-09 18:52:26 -0800126{
127 struct riva_data *drv = dev_get_drvdata(dev);
Stephen Boyd3bbdf6c2011-12-21 16:02:26 -0800128 flush_delayed_work(&drv->work);
Matt Wagantall04b7cc72011-12-09 18:52:26 -0800129}
130
Stephen Boyd322a9922011-09-20 01:05:54 -0700131static int nop_verify_blob(struct pil_desc *pil, u32 phy_addr, size_t size)
132{
133 return 0;
134}
135
136static int pil_riva_init_image(struct pil_desc *pil, const u8 *metadata,
137 size_t size)
138{
139 const struct elf32_hdr *ehdr = (struct elf32_hdr *)metadata;
140 struct riva_data *drv = dev_get_drvdata(pil->dev);
141 drv->start_addr = ehdr->e_entry;
142 return 0;
143}
144
Stephen Boyd3bbdf6c2011-12-21 16:02:26 -0800145static bool cxo_is_needed(struct riva_data *drv)
146{
147 u32 reg = readl_relaxed(drv->base + RIVA_PMU_CFG);
148 return (reg & RIVA_PMU_CFG_IRIS_XO_MODE)
149 != RIVA_PMU_CFG_IRIS_XO_MODE_48;
150}
151
Stephen Boyd322a9922011-09-20 01:05:54 -0700152static int pil_riva_reset(struct pil_desc *pil)
153{
154 u32 reg, sel;
Stephen Boyd322a9922011-09-20 01:05:54 -0700155 struct riva_data *drv = dev_get_drvdata(pil->dev);
156 void __iomem *base = drv->base;
157 unsigned long start_addr = drv->start_addr;
Stephen Boyd86f2e652012-01-11 18:25:44 -0800158 int ret;
Stephen Boyd322a9922011-09-20 01:05:54 -0700159
Stephen Boyd86f2e652012-01-11 18:25:44 -0800160 ret = clk_prepare_enable(drv->xo);
161 if (ret)
162 return ret;
Stephen Boyd322a9922011-09-20 01:05:54 -0700163 /* Enable A2XB bridge */
164 reg = readl_relaxed(base + RIVA_PMU_A2XB_CFG);
165 reg |= RIVA_PMU_A2XB_CFG_EN;
166 writel_relaxed(reg, base + RIVA_PMU_A2XB_CFG);
167
Stephen Boyd3bbdf6c2011-12-21 16:02:26 -0800168 drv->use_cxo = cxo_is_needed(drv);
Stephen Boydd0b993a2012-01-30 11:59:31 -0800169 ret = pil_riva_make_proxy_votes(pil->dev);
170 if (ret) {
171 reg &= ~RIVA_PMU_A2XB_CFG_EN;
172 writel_relaxed(reg, base + RIVA_PMU_A2XB_CFG);
173 mb();
174 clk_disable_unprepare(drv->xo);
175 return ret;
176 }
Stephen Boyd322a9922011-09-20 01:05:54 -0700177
178 /* Program PLL 13 to 960 MHz */
179 reg = readl_relaxed(RIVA_PLL_MODE);
180 reg &= ~(PLL_MODE_BYPASSNL | PLL_MODE_OUTCTRL | PLL_MODE_RESET_N);
181 writel_relaxed(reg, RIVA_PLL_MODE);
182
Stephen Boyd3bbdf6c2011-12-21 16:02:26 -0800183 if (drv->use_cxo)
Stephen Boyd322a9922011-09-20 01:05:54 -0700184 writel_relaxed(0x40000C00 | 50, RIVA_PLL_L_VAL);
Matt Wagantall04b7cc72011-12-09 18:52:26 -0800185 else
186 writel_relaxed(0x40000C00 | 40, RIVA_PLL_L_VAL);
Stephen Boyd322a9922011-09-20 01:05:54 -0700187 writel_relaxed(0, RIVA_PLL_M_VAL);
188 writel_relaxed(1, RIVA_PLL_N_VAL);
189 writel_relaxed(0x01495227, RIVA_PLL_CONFIG);
190
191 reg = readl_relaxed(RIVA_PLL_MODE);
192 reg &= ~(PLL_MODE_REF_XO_SEL);
Stephen Boyd3bbdf6c2011-12-21 16:02:26 -0800193 reg |= drv->use_cxo ? PLL_MODE_REF_XO_SEL_CXO : PLL_MODE_REF_XO_SEL_RF;
Stephen Boyd322a9922011-09-20 01:05:54 -0700194 writel_relaxed(reg, RIVA_PLL_MODE);
195
196 /* Enable PLL 13 */
197 reg |= PLL_MODE_BYPASSNL;
198 writel_relaxed(reg, RIVA_PLL_MODE);
199
200 /*
201 * H/W requires a 5us delay between disabling the bypass and
202 * de-asserting the reset. Delay 10us just to be safe.
203 */
204 mb();
205 usleep_range(10, 20);
206
207 reg |= PLL_MODE_RESET_N;
208 writel_relaxed(reg, RIVA_PLL_MODE);
209 reg |= PLL_MODE_OUTCTRL;
210 writel_relaxed(reg, RIVA_PLL_MODE);
211
212 /* Wait for PLL to settle */
213 mb();
214 usleep_range(50, 100);
215
216 /* Configure cCPU for 240 MHz */
217 sel = readl_relaxed(base + RIVA_PMU_ROOT_CLK_SEL);
218 reg = readl_relaxed(base + RIVA_PMU_CLK_ROOT3);
219 if (sel & RIVA_PMU_ROOT_CLK_SEL_3) {
220 reg &= ~(RIVA_PMU_CLK_ROOT3_SRC0_SEL |
221 RIVA_PMU_CLK_ROOT3_SRC0_DIV);
222 reg |= RIVA_PMU_CLK_ROOT3_SRC0_SEL_RIVA |
223 RIVA_PMU_CLK_ROOT3_SRC0_DIV_2;
224 } else {
225 reg &= ~(RIVA_PMU_CLK_ROOT3_SRC1_SEL |
226 RIVA_PMU_CLK_ROOT3_SRC1_DIV);
227 reg |= RIVA_PMU_CLK_ROOT3_SRC1_SEL_RIVA |
228 RIVA_PMU_CLK_ROOT3_SRC1_DIV_2;
229 }
230 writel_relaxed(reg, base + RIVA_PMU_CLK_ROOT3);
231 reg |= RIVA_PMU_CLK_ROOT3_ENA;
232 writel_relaxed(reg, base + RIVA_PMU_CLK_ROOT3);
233 reg = readl_relaxed(base + RIVA_PMU_ROOT_CLK_SEL);
234 reg ^= RIVA_PMU_ROOT_CLK_SEL_3;
235 writel_relaxed(reg, base + RIVA_PMU_ROOT_CLK_SEL);
236
237 /* Use the high vector table */
238 reg = readl_relaxed(base + RIVA_PMU_CCPU_CTL);
239 reg |= RIVA_PMU_CCPU_CTL_HIGH_IVT | RIVA_PMU_CCPU_CTL_REMAP_EN;
240 writel_relaxed(reg, base + RIVA_PMU_CCPU_CTL);
241
242 /* Set base memory address */
243 writel_relaxed(start_addr >> 16, base + RIVA_PMU_CCPU_BOOT_REMAP_ADDR);
244
245 /* Clear warmboot bit indicating this is a cold boot */
246 reg = readl_relaxed(base + RIVA_PMU_CFG);
247 reg &= ~(RIVA_PMU_CFG_WARM_BOOT);
248 writel_relaxed(reg, base + RIVA_PMU_CFG);
249
250 /* Enable the cCPU clock */
251 reg = readl_relaxed(base + RIVA_PMU_OVRD_VAL);
252 reg |= RIVA_PMU_OVRD_VAL_CCPU_CLK;
253 writel_relaxed(reg, base + RIVA_PMU_OVRD_VAL);
254
255 /* Take cCPU out of reset */
256 reg |= RIVA_PMU_OVRD_VAL_CCPU_RESET;
257 writel_relaxed(reg, base + RIVA_PMU_OVRD_VAL);
Stephen Boyd86f2e652012-01-11 18:25:44 -0800258 clk_disable_unprepare(drv->xo);
Stephen Boyd322a9922011-09-20 01:05:54 -0700259
260 return 0;
261}
262
263static int pil_riva_shutdown(struct pil_desc *pil)
264{
265 struct riva_data *drv = dev_get_drvdata(pil->dev);
266 u32 reg;
Stephen Boyd86f2e652012-01-11 18:25:44 -0800267 int ret;
Stephen Boyd322a9922011-09-20 01:05:54 -0700268
Stephen Boyd86f2e652012-01-11 18:25:44 -0800269 ret = clk_prepare_enable(drv->xo);
270 if (ret)
271 return ret;
Stephen Boyd12332572011-12-06 16:00:51 -0800272 /* Put cCPU and cCPU clock into reset */
Stephen Boyd322a9922011-09-20 01:05:54 -0700273 reg = readl_relaxed(drv->base + RIVA_PMU_OVRD_VAL);
274 reg &= ~(RIVA_PMU_OVRD_VAL_CCPU_RESET | RIVA_PMU_OVRD_VAL_CCPU_CLK);
275 writel_relaxed(reg, drv->base + RIVA_PMU_OVRD_VAL);
Stephen Boyd12332572011-12-06 16:00:51 -0800276 reg = readl_relaxed(drv->base + RIVA_PMU_OVRD_EN);
277 reg |= RIVA_PMU_OVRD_EN_CCPU_RESET | RIVA_PMU_OVRD_EN_CCPU_CLK;
278 writel_relaxed(reg, drv->base + RIVA_PMU_OVRD_EN);
279 mb();
280
281 /* Assert reset to Riva */
282 writel_relaxed(1, RIVA_RESET);
283 mb();
284 usleep_range(1000, 2000);
285
286 /* Deassert reset to Riva */
287 writel_relaxed(0, RIVA_RESET);
288 mb();
Stephen Boyd322a9922011-09-20 01:05:54 -0700289
Stephen Boyd86f2e652012-01-11 18:25:44 -0800290 clk_disable_unprepare(drv->xo);
Stephen Boyd3bbdf6c2011-12-21 16:02:26 -0800291 pil_riva_remove_proxy_votes_now(pil->dev);
Matt Wagantall04b7cc72011-12-09 18:52:26 -0800292
Stephen Boyd322a9922011-09-20 01:05:54 -0700293 return 0;
294}
295
296static struct pil_reset_ops pil_riva_ops = {
297 .init_image = pil_riva_init_image,
298 .verify_blob = nop_verify_blob,
299 .auth_and_reset = pil_riva_reset,
300 .shutdown = pil_riva_shutdown,
301};
302
303static int pil_riva_init_image_trusted(struct pil_desc *pil,
304 const u8 *metadata, size_t size)
305{
306 return pas_init_image(PAS_RIVA, metadata, size);
307}
308
309static int pil_riva_reset_trusted(struct pil_desc *pil)
310{
Stephen Boyd86f2e652012-01-11 18:25:44 -0800311 struct riva_data *drv = dev_get_drvdata(pil->dev);
312 int ret;
313
314 ret = clk_prepare_enable(drv->xo);
315 if (ret)
316 return ret;
Stephen Boyd3bbdf6c2011-12-21 16:02:26 -0800317 /* Proxy-vote for resources RIVA needs */
Stephen Boydd0b993a2012-01-30 11:59:31 -0800318 ret = pil_riva_make_proxy_votes(pil->dev);
319 if (!ret)
320 ret = pas_auth_and_reset(PAS_RIVA);
Stephen Boyd86f2e652012-01-11 18:25:44 -0800321 clk_disable_unprepare(drv->xo);
322 return ret;
Stephen Boyd322a9922011-09-20 01:05:54 -0700323}
324
325static int pil_riva_shutdown_trusted(struct pil_desc *pil)
326{
Stephen Boyd86f2e652012-01-11 18:25:44 -0800327 int ret;
328 struct riva_data *drv = dev_get_drvdata(pil->dev);
Matt Wagantall04b7cc72011-12-09 18:52:26 -0800329
Stephen Boyd86f2e652012-01-11 18:25:44 -0800330 ret = clk_prepare_enable(drv->xo);
331 if (ret)
332 return ret;
333 ret = pas_shutdown(PAS_RIVA);
Stephen Boyd3bbdf6c2011-12-21 16:02:26 -0800334 pil_riva_remove_proxy_votes_now(pil->dev);
Stephen Boyd86f2e652012-01-11 18:25:44 -0800335 clk_disable_unprepare(drv->xo);
Matt Wagantall04b7cc72011-12-09 18:52:26 -0800336
337 return ret;
Stephen Boyd322a9922011-09-20 01:05:54 -0700338}
339
340static struct pil_reset_ops pil_riva_ops_trusted = {
341 .init_image = pil_riva_init_image_trusted,
342 .verify_blob = nop_verify_blob,
343 .auth_and_reset = pil_riva_reset_trusted,
344 .shutdown = pil_riva_shutdown_trusted,
345};
346
347static int __devinit pil_riva_probe(struct platform_device *pdev)
348{
349 struct riva_data *drv;
350 struct resource *res;
351 struct pil_desc *desc;
Stephen Boyd3bbdf6c2011-12-21 16:02:26 -0800352 int ret;
Stephen Boyd322a9922011-09-20 01:05:54 -0700353
354 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
355 if (!res)
356 return -EINVAL;
357
358 drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
359 if (!drv)
360 return -ENOMEM;
361 platform_set_drvdata(pdev, drv);
362
363 drv->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
364 if (!drv->base)
365 return -ENOMEM;
366
367 desc = devm_kzalloc(&pdev->dev, sizeof(*desc), GFP_KERNEL);
368 if (!desc)
369 return -ENOMEM;
370
Stephen Boyd3bbdf6c2011-12-21 16:02:26 -0800371 drv->pll_supply = regulator_get(&pdev->dev, "pll_vdd");
372 if (IS_ERR(drv->pll_supply)) {
373 dev_err(&pdev->dev, "failed to get pll supply\n");
374 return PTR_ERR(drv->pll_supply);
375 }
Matt Wagantall52dd0622012-02-02 18:26:16 -0800376 if (regulator_count_voltages(drv->pll_supply) > 0) {
377 ret = regulator_set_voltage(drv->pll_supply, 1800000, 1800000);
378 if (ret) {
379 dev_err(&pdev->dev,
380 "failed to set pll supply voltage\n");
381 goto err;
382 }
Stephen Boyd3bbdf6c2011-12-21 16:02:26 -0800383
Matt Wagantall52dd0622012-02-02 18:26:16 -0800384 ret = regulator_set_optimum_mode(drv->pll_supply, 100000);
385 if (ret < 0) {
386 dev_err(&pdev->dev,
387 "failed to set pll supply optimum mode\n");
388 goto err;
389 }
Stephen Boyd3bbdf6c2011-12-21 16:02:26 -0800390 }
391
Stephen Boyd322a9922011-09-20 01:05:54 -0700392 desc->name = "wcnss";
393 desc->dev = &pdev->dev;
Stephen Boyd6d67d252011-09-27 11:50:05 -0700394 desc->owner = THIS_MODULE;
Stephen Boyd322a9922011-09-20 01:05:54 -0700395
396 if (pas_supported(PAS_RIVA) > 0) {
397 desc->ops = &pil_riva_ops_trusted;
398 dev_info(&pdev->dev, "using secure boot\n");
399 } else {
400 desc->ops = &pil_riva_ops;
401 dev_info(&pdev->dev, "using non-secure boot\n");
402 }
Matt Wagantall04b7cc72011-12-09 18:52:26 -0800403
Stephen Boyd86f2e652012-01-11 18:25:44 -0800404 drv->xo = clk_get(&pdev->dev, "cxo");
Stephen Boyd3bbdf6c2011-12-21 16:02:26 -0800405 if (IS_ERR(drv->xo)) {
406 ret = PTR_ERR(drv->xo);
407 goto err;
408 }
409 INIT_DELAYED_WORK(&drv->work, pil_riva_remove_proxy_votes);
Matt Wagantall04b7cc72011-12-09 18:52:26 -0800410
Stephen Boyd6d67d252011-09-27 11:50:05 -0700411 drv->pil = msm_pil_register(desc);
412 if (IS_ERR(drv->pil)) {
413 ret = PTR_ERR(drv->pil);
Stephen Boyd3bbdf6c2011-12-21 16:02:26 -0800414 goto err_register;
Stephen Boyd6d67d252011-09-27 11:50:05 -0700415 }
Stephen Boyd3bbdf6c2011-12-21 16:02:26 -0800416 return 0;
417err_register:
Stephen Boydc01858c2012-01-30 17:03:42 -0800418 flush_delayed_work_sync(&drv->work);
Stephen Boyd86f2e652012-01-11 18:25:44 -0800419 clk_put(drv->xo);
Stephen Boyd3bbdf6c2011-12-21 16:02:26 -0800420err:
421 regulator_put(drv->pll_supply);
422 return ret;
Stephen Boyd322a9922011-09-20 01:05:54 -0700423}
424
425static int __devexit pil_riva_remove(struct platform_device *pdev)
426{
Stephen Boyd3bbdf6c2011-12-21 16:02:26 -0800427 struct riva_data *drv = platform_get_drvdata(pdev);
Stephen Boyd6d67d252011-09-27 11:50:05 -0700428 msm_pil_unregister(drv->pil);
Stephen Boydc01858c2012-01-30 17:03:42 -0800429 flush_delayed_work_sync(&drv->work);
Stephen Boyd86f2e652012-01-11 18:25:44 -0800430 clk_put(drv->xo);
Stephen Boyd3bbdf6c2011-12-21 16:02:26 -0800431 regulator_put(drv->pll_supply);
Stephen Boyd322a9922011-09-20 01:05:54 -0700432 return 0;
433}
434
435static struct platform_driver pil_riva_driver = {
436 .probe = pil_riva_probe,
437 .remove = __devexit_p(pil_riva_remove),
438 .driver = {
439 .name = "pil_riva",
440 .owner = THIS_MODULE,
441 },
442};
443
444static int __init pil_riva_init(void)
445{
446 return platform_driver_register(&pil_riva_driver);
447}
448module_init(pil_riva_init);
449
450static void __exit pil_riva_exit(void)
451{
452 platform_driver_unregister(&pil_riva_driver);
453}
454module_exit(pil_riva_exit);
455
456MODULE_DESCRIPTION("Support for booting RIVA (WCNSS) processors");
457MODULE_LICENSE("GPL v2");