blob: 429ab01914f583e42ab1ed1782b1e63912b6c8fb [file] [log] [blame]
Dhaval Patel020f7e122016-11-15 14:39:18 -08001/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#define pr_fmt(fmt) "[sde_rsc:%s:%d]: " fmt, __func__, __LINE__
15
16#include <linux/kernel.h>
17#include <linux/debugfs.h>
18#include <linux/of.h>
19#include <linux/string.h>
20#include <linux/of_address.h>
21#include <linux/component.h>
22#include <linux/slab.h>
23#include <linux/mutex.h>
24#include <linux/of_platform.h>
25#include <linux/module.h>
26
27#include <soc/qcom/rpmh.h>
28#include <drm/drmP.h>
29#include <drm/drm_irq.h>
Dhaval Patel49ef6d72017-03-26 09:35:53 -070030#include "sde_rsc_priv.h"
Dhaval Patel824e9682017-05-01 23:31:22 -070031#include "sde_dbg.h"
Dhaval Patel020f7e122016-11-15 14:39:18 -080032
Dhaval Patela2430842017-06-15 14:32:36 -070033#define SDE_RSC_DRV_DBG_NAME "sde_rsc_drv"
34#define SDE_RSC_WRAPPER_DBG_NAME "sde_rsc_wrapper"
35
Dhaval Patelc063d1f2017-06-08 13:19:26 -070036/* worst case time to execute the one tcs vote(sleep/wake) - ~1ms */
37#define SINGLE_TCS_EXECUTION_TIME 1064000
Dhaval Patel020f7e122016-11-15 14:39:18 -080038
Dhaval Patel824e9682017-05-01 23:31:22 -070039/* this time is ~1ms - only wake tcs in any mode */
Dhaval Patel9e3bfe42017-05-25 22:32:55 -070040#define RSC_BACKOFF_TIME_NS (SINGLE_TCS_EXECUTION_TIME + 100)
Dhaval Patel020f7e122016-11-15 14:39:18 -080041
Dhaval Patel824e9682017-05-01 23:31:22 -070042/* this time is ~1ms - only wake TCS in mode-0 */
Dhaval Patelc063d1f2017-06-08 13:19:26 -070043#define RSC_MODE_THRESHOLD_TIME_IN_NS (SINGLE_TCS_EXECUTION_TIME + 100)
Dhaval Patel824e9682017-05-01 23:31:22 -070044
45/* this time is ~2ms - sleep+ wake TCS in mode-1 */
Dhaval Patel9e3bfe42017-05-25 22:32:55 -070046#define RSC_TIME_SLOT_0_NS ((SINGLE_TCS_EXECUTION_TIME * 2) + 100)
Dhaval Patel020f7e122016-11-15 14:39:18 -080047
48#define DEFAULT_PANEL_FPS 60
Dhaval Patelf5cc5a32017-07-10 17:33:23 -070049#define DEFAULT_PANEL_JITTER_NUMERATOR 2
50#define DEFAULT_PANEL_JITTER_DENOMINATOR 1
51#define DEFAULT_PANEL_PREFILL_LINES 25
Dhaval Patel020f7e122016-11-15 14:39:18 -080052#define DEFAULT_PANEL_VTOTAL (480 + DEFAULT_PANEL_PREFILL_LINES)
53#define TICKS_IN_NANO_SECOND 1000000000
54
55#define MAX_BUFFER_SIZE 256
56
57#define TRY_CMD_MODE_SWITCH 0xFFFF
Dhaval Patela65b0f12017-03-16 00:36:55 -070058#define TRY_CLK_MODE_SWITCH 0xFFFE
59#define STATE_UPDATE_NOT_ALLOWED 0xFFFD
Dhaval Patel020f7e122016-11-15 14:39:18 -080060
Lloyd Atkinsonf68a2132017-07-17 10:16:30 -040061/* Primary panel worst case VSYNC expected to be no less than 30fps */
62#define PRIMARY_VBLANK_WORST_CASE_MS 34
Lloyd Atkinson16147ea2017-07-17 10:16:30 -040063
Dhaval Patel020f7e122016-11-15 14:39:18 -080064static struct sde_rsc_priv *rsc_prv_list[MAX_RSC_COUNT];
65
66/**
67 * sde_rsc_client_create() - create the client for sde rsc.
68 * Different displays like DSI, HDMI, DP, WB, etc should call this
69 * api to register their vote for rpmh. They still need to vote for
70 * power handle to get the clocks.
71
72 * @rsc_index: A client will be created on this RSC. As of now only
73 * SDE_RSC_INDEX is valid rsc index.
74 * @name: Caller needs to provide some valid string to identify
75 * the client. "primary", "dp", "hdmi" are suggested name.
76 * @is_primary: Caller needs to provide information if client is primary
77 * or not. Primary client votes will be redirected to
78 * display rsc.
79 *
80 * Return: client node pointer.
81 */
82struct sde_rsc_client *sde_rsc_client_create(u32 rsc_index, char *client_name,
83 bool is_primary_client)
84{
85 struct sde_rsc_client *client;
86 struct sde_rsc_priv *rsc;
Dhaval Patel824e9682017-05-01 23:31:22 -070087 static int id;
Dhaval Patel020f7e122016-11-15 14:39:18 -080088
89 if (!client_name) {
90 pr_err("client name is null- not supported\n");
91 return ERR_PTR(-EINVAL);
92 } else if (rsc_index >= MAX_RSC_COUNT) {
93 pr_err("invalid rsc index\n");
94 return ERR_PTR(-EINVAL);
95 } else if (!rsc_prv_list[rsc_index]) {
96 pr_err("rsc not probed yet or not available\n");
Dhaval Patel824e9682017-05-01 23:31:22 -070097 return NULL;
Dhaval Patel020f7e122016-11-15 14:39:18 -080098 }
99
100 rsc = rsc_prv_list[rsc_index];
101 client = kzalloc(sizeof(struct sde_rsc_client), GFP_KERNEL);
102 if (!client)
103 return ERR_PTR(-ENOMEM);
104
105 mutex_lock(&rsc->client_lock);
106 strlcpy(client->name, client_name, MAX_RSC_CLIENT_NAME_LEN);
107 client->current_state = SDE_RSC_IDLE_STATE;
108 client->rsc_index = rsc_index;
Dhaval Patel824e9682017-05-01 23:31:22 -0700109 client->id = id;
Dhaval Patel020f7e122016-11-15 14:39:18 -0800110 if (is_primary_client)
111 rsc->primary_client = client;
Dhaval Patel82c8dbc2017-02-18 23:15:10 -0800112 pr_debug("client %s rsc index:%d primary:%d\n", client_name,
113 rsc_index, is_primary_client);
Dhaval Patel020f7e122016-11-15 14:39:18 -0800114
115 list_add(&client->list, &rsc->client_list);
Dhaval Patel824e9682017-05-01 23:31:22 -0700116 id++;
Dhaval Patel020f7e122016-11-15 14:39:18 -0800117 mutex_unlock(&rsc->client_lock);
118
119 return client;
120}
Dhaval Patel49ef6d72017-03-26 09:35:53 -0700121EXPORT_SYMBOL(sde_rsc_client_create);
Dhaval Patel020f7e122016-11-15 14:39:18 -0800122
123/**
124 * sde_rsc_client_destroy() - Destroy the sde rsc client.
125 *
126 * @client: Client pointer provided by sde_rsc_client_create().
127 *
128 * Return: none
129 */
130void sde_rsc_client_destroy(struct sde_rsc_client *client)
131{
132 struct sde_rsc_priv *rsc;
Dhaval Patelbd8bbfe2017-05-22 10:55:37 -0700133 enum sde_rsc_state state;
Dhaval Patel020f7e122016-11-15 14:39:18 -0800134
135 if (!client) {
136 pr_debug("invalid client\n");
137 goto end;
138 } else if (client->rsc_index >= MAX_RSC_COUNT) {
139 pr_err("invalid rsc index\n");
140 goto end;
141 }
142
143 pr_debug("client %s destroyed\n", client->name);
144 rsc = rsc_prv_list[client->rsc_index];
145 if (!rsc)
146 goto end;
147
148 mutex_lock(&rsc->client_lock);
Dhaval Patelbd8bbfe2017-05-22 10:55:37 -0700149 state = client->current_state;
150 mutex_unlock(&rsc->client_lock);
151
Lloyd Atkinsonf68a2132017-07-17 10:16:30 -0400152 if (state != SDE_RSC_IDLE_STATE) {
153 int wait_vblank_crtc_id;
154
155 sde_rsc_client_state_update(client, SDE_RSC_IDLE_STATE, NULL,
156 SDE_RSC_INVALID_CRTC_ID, &wait_vblank_crtc_id);
157
158 /* if vblank wait required at shutdown, use a simple sleep */
159 if (wait_vblank_crtc_id != SDE_RSC_INVALID_CRTC_ID) {
160 pr_err("unexpected sleep required on crtc %d at rsc client destroy\n",
161 wait_vblank_crtc_id);
162 SDE_EVT32(client->id, state, rsc->current_state,
163 client->crtc_id, wait_vblank_crtc_id,
164 SDE_EVTLOG_ERROR);
165 msleep(PRIMARY_VBLANK_WORST_CASE_MS);
166 }
167 }
Dhaval Patelbd8bbfe2017-05-22 10:55:37 -0700168 mutex_lock(&rsc->client_lock);
Dhaval Patel020f7e122016-11-15 14:39:18 -0800169 list_del_init(&client->list);
170 mutex_unlock(&rsc->client_lock);
171
172 kfree(client);
173end:
174 return;
175}
Dhaval Patel49ef6d72017-03-26 09:35:53 -0700176EXPORT_SYMBOL(sde_rsc_client_destroy);
Dhaval Patel020f7e122016-11-15 14:39:18 -0800177
Dhaval Patel031b4152017-03-16 13:03:18 -0700178struct sde_rsc_event *sde_rsc_register_event(int rsc_index, uint32_t event_type,
179 void (*cb_func)(uint32_t event_type, void *usr), void *usr)
180{
181 struct sde_rsc_event *evt;
182 struct sde_rsc_priv *rsc;
183
184 if (rsc_index >= MAX_RSC_COUNT) {
185 pr_err("invalid rsc index:%d\n", rsc_index);
186 return ERR_PTR(-EINVAL);
187 } else if (!rsc_prv_list[rsc_index]) {
188 pr_err("rsc idx:%d not probed yet or not available\n",
189 rsc_index);
190 return ERR_PTR(-EINVAL);
191 } else if (!cb_func || !event_type) {
192 pr_err("no event or cb func\n");
193 return ERR_PTR(-EINVAL);
194 }
195
196 rsc = rsc_prv_list[rsc_index];
197 evt = kzalloc(sizeof(struct sde_rsc_event), GFP_KERNEL);
198 if (!evt)
199 return ERR_PTR(-ENOMEM);
200
201 evt->event_type = event_type;
202 evt->rsc_index = rsc_index;
203 evt->usr = usr;
204 evt->cb_func = cb_func;
205 pr_debug("event register type:%d rsc index:%d\n",
206 event_type, rsc_index);
207
208 mutex_lock(&rsc->client_lock);
209 list_add(&evt->list, &rsc->event_list);
210 mutex_unlock(&rsc->client_lock);
211
212 return evt;
213}
Dhaval Patel49ef6d72017-03-26 09:35:53 -0700214EXPORT_SYMBOL(sde_rsc_register_event);
Dhaval Patel031b4152017-03-16 13:03:18 -0700215
216void sde_rsc_unregister_event(struct sde_rsc_event *event)
217{
218 struct sde_rsc_priv *rsc;
219
220 if (!event) {
221 pr_debug("invalid event client\n");
222 goto end;
223 } else if (event->rsc_index >= MAX_RSC_COUNT) {
224 pr_err("invalid rsc index\n");
225 goto end;
226 }
227
228 pr_debug("event client destroyed\n");
229 rsc = rsc_prv_list[event->rsc_index];
230 if (!rsc)
231 goto end;
232
233 mutex_lock(&rsc->client_lock);
234 list_del_init(&event->list);
235 mutex_unlock(&rsc->client_lock);
236
237 kfree(event);
238end:
239 return;
240}
Dhaval Patel49ef6d72017-03-26 09:35:53 -0700241EXPORT_SYMBOL(sde_rsc_unregister_event);
Dhaval Patel031b4152017-03-16 13:03:18 -0700242
Dhaval Pateldd2032a2017-04-11 10:50:36 -0700243bool is_sde_rsc_available(int rsc_index)
244{
245 if (rsc_index >= MAX_RSC_COUNT) {
246 pr_err("invalid rsc index:%d\n", rsc_index);
247 return false;
248 } else if (!rsc_prv_list[rsc_index]) {
249 pr_err("rsc idx:%d not probed yet or not available\n",
250 rsc_index);
251 return false;
252 }
253
254 return true;
255}
256EXPORT_SYMBOL(is_sde_rsc_available);
257
258enum sde_rsc_state get_sde_rsc_current_state(int rsc_index)
259{
260 struct sde_rsc_priv *rsc;
261
262 if (rsc_index >= MAX_RSC_COUNT) {
263 pr_err("invalid rsc index:%d\n", rsc_index);
264 return SDE_RSC_IDLE_STATE;
265 } else if (!rsc_prv_list[rsc_index]) {
266 pr_err("rsc idx:%d not probed yet or not available\n",
267 rsc_index);
268 return SDE_RSC_IDLE_STATE;
269 }
270
271 rsc = rsc_prv_list[rsc_index];
272 return rsc->current_state;
273}
274EXPORT_SYMBOL(get_sde_rsc_current_state);
275
Dhaval Patelfcd9e912017-03-16 00:54:09 -0700276static int sde_rsc_clk_enable(struct sde_power_handle *phandle,
277 struct sde_power_client *pclient, bool enable)
278{
279 int rc = 0;
280 struct dss_module_power *mp;
281
282 if (!phandle || !pclient) {
283 pr_err("invalid input argument\n");
284 return -EINVAL;
285 }
286
287 mp = &phandle->mp;
288
289 if (enable)
290 pclient->refcount++;
291 else if (pclient->refcount)
292 pclient->refcount--;
293
294 if (pclient->refcount)
295 pclient->usecase_ndx = VOTE_INDEX_LOW;
296 else
297 pclient->usecase_ndx = VOTE_INDEX_DISABLE;
298
299 if (phandle->current_usecase_ndx == pclient->usecase_ndx)
300 goto end;
301
302 if (enable) {
303 rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, enable);
304 if (rc) {
305 pr_err("clock enable failed rc:%d\n", rc);
306 goto end;
307 }
308 } else {
309 msm_dss_enable_clk(mp->clk_config, mp->num_clk, enable);
310 }
311
312 phandle->current_usecase_ndx = pclient->usecase_ndx;
313
314end:
315 return rc;
316}
317
Dhaval Patel020f7e122016-11-15 14:39:18 -0800318static u32 sde_rsc_timer_calculate(struct sde_rsc_priv *rsc,
Dhaval Patel82c8dbc2017-02-18 23:15:10 -0800319 struct sde_rsc_cmd_config *cmd_config)
Dhaval Patel020f7e122016-11-15 14:39:18 -0800320{
321 const u32 cxo_period_ns = 52;
322 u64 rsc_backoff_time_ns = RSC_BACKOFF_TIME_NS;
323 u64 rsc_mode_threshold_time_ns = RSC_MODE_THRESHOLD_TIME_IN_NS;
324 u64 rsc_time_slot_0_ns = RSC_TIME_SLOT_0_NS;
325 u64 rsc_time_slot_1_ns;
326 const u64 pdc_jitter = 20; /* 20% more */
327
328 u64 frame_time_ns, frame_jitter;
329 u64 line_time_ns, prefill_time_ns;
330 u64 pdc_backoff_time_ns;
331 s64 total;
Dhaval Patel82c8dbc2017-02-18 23:15:10 -0800332 int ret = 0;
Dhaval Patel020f7e122016-11-15 14:39:18 -0800333
334 if (cmd_config)
335 memcpy(&rsc->cmd_config, cmd_config, sizeof(*cmd_config));
336
337 /* calculate for 640x480 60 fps resolution by default */
338 if (!rsc->cmd_config.fps)
339 rsc->cmd_config.fps = DEFAULT_PANEL_FPS;
Dhaval Patelf5cc5a32017-07-10 17:33:23 -0700340 if (!rsc->cmd_config.jitter_numer)
341 rsc->cmd_config.jitter_numer = DEFAULT_PANEL_JITTER_NUMERATOR;
342 if (!rsc->cmd_config.jitter_denom)
343 rsc->cmd_config.jitter_denom = DEFAULT_PANEL_JITTER_DENOMINATOR;
Dhaval Patel020f7e122016-11-15 14:39:18 -0800344 if (!rsc->cmd_config.vtotal)
345 rsc->cmd_config.vtotal = DEFAULT_PANEL_VTOTAL;
346 if (!rsc->cmd_config.prefill_lines)
347 rsc->cmd_config.prefill_lines = DEFAULT_PANEL_PREFILL_LINES;
Dhaval Patelf5cc5a32017-07-10 17:33:23 -0700348 pr_debug("frame fps:%d jitter_numer:%d jitter_denom:%d vtotal:%d prefill lines:%d\n",
349 rsc->cmd_config.fps, rsc->cmd_config.jitter_numer,
350 rsc->cmd_config.jitter_denom, rsc->cmd_config.vtotal,
351 rsc->cmd_config.prefill_lines);
Dhaval Patel020f7e122016-11-15 14:39:18 -0800352
353 /* 1 nano second */
354 frame_time_ns = TICKS_IN_NANO_SECOND;
355 frame_time_ns = div_u64(frame_time_ns, rsc->cmd_config.fps);
356
Dhaval Patelf5cc5a32017-07-10 17:33:23 -0700357 frame_jitter = frame_time_ns * rsc->cmd_config.jitter_numer;
358 frame_jitter = div_u64(frame_jitter, rsc->cmd_config.jitter_denom);
Dhaval Patel020f7e122016-11-15 14:39:18 -0800359 /* convert it to percentage */
360 frame_jitter = div_u64(frame_jitter, 100);
361
362 line_time_ns = frame_time_ns;
363 line_time_ns = div_u64(line_time_ns, rsc->cmd_config.vtotal);
364 prefill_time_ns = line_time_ns * rsc->cmd_config.prefill_lines;
365
366 total = frame_time_ns - frame_jitter - prefill_time_ns;
367 if (total < 0) {
368 pr_err("invalid total time period time:%llu jiter_time:%llu blanking time:%llu\n",
369 frame_time_ns, frame_jitter, prefill_time_ns);
370 total = 0;
371 }
372
373 total = div_u64(total, cxo_period_ns);
374 rsc->timer_config.static_wakeup_time_ns = total;
375
376 pr_debug("frame time:%llu frame jiter_time:%llu\n",
377 frame_time_ns, frame_jitter);
378 pr_debug("line time:%llu prefill time ps:%llu\n",
379 line_time_ns, prefill_time_ns);
380 pr_debug("static wakeup time:%lld cxo:%u\n", total, cxo_period_ns);
381
382 pdc_backoff_time_ns = rsc_backoff_time_ns;
383 rsc_backoff_time_ns = div_u64(rsc_backoff_time_ns, cxo_period_ns);
384 rsc->timer_config.rsc_backoff_time_ns = (u32) rsc_backoff_time_ns;
385
386 pdc_backoff_time_ns *= pdc_jitter;
387 pdc_backoff_time_ns = div_u64(pdc_backoff_time_ns, 100);
388 rsc->timer_config.pdc_backoff_time_ns = (u32) pdc_backoff_time_ns;
389
390 rsc_mode_threshold_time_ns =
391 div_u64(rsc_mode_threshold_time_ns, cxo_period_ns);
392 rsc->timer_config.rsc_mode_threshold_time_ns
393 = (u32) rsc_mode_threshold_time_ns;
394
395 /* time_slot_0 for mode0 latency */
396 rsc_time_slot_0_ns = div_u64(rsc_time_slot_0_ns, cxo_period_ns);
397 rsc->timer_config.rsc_time_slot_0_ns = (u32) rsc_time_slot_0_ns;
398
399 /* time_slot_1 for mode1 latency */
400 rsc_time_slot_1_ns = frame_time_ns;
401 rsc_time_slot_1_ns = div_u64(rsc_time_slot_1_ns, cxo_period_ns);
402 rsc->timer_config.rsc_time_slot_1_ns = (u32) rsc_time_slot_1_ns;
403
404 /* mode 2 is infinite */
405 rsc->timer_config.rsc_time_slot_2_ns = 0xFFFFFFFF;
Dhaval Patel82c8dbc2017-02-18 23:15:10 -0800406
Dhaval Patelf9c5c602017-08-01 12:32:04 -0700407 /* timer update should be called with client call */
408 if (cmd_config && rsc->hw_ops.timer_update) {
409 ret = rsc->hw_ops.timer_update(rsc);
410 if (ret)
411 pr_err("sde rsc: hw timer update failed ret:%d\n", ret);
412 /* rsc init should be called during rsc probe - one time only */
413 } else if (rsc->hw_ops.init) {
Dhaval Patel82c8dbc2017-02-18 23:15:10 -0800414 ret = rsc->hw_ops.init(rsc);
415 if (ret)
416 pr_err("sde rsc: hw init failed ret:%d\n", ret);
417 }
418
419 return ret;
Dhaval Patel020f7e122016-11-15 14:39:18 -0800420}
421
422static int sde_rsc_switch_to_idle(struct sde_rsc_priv *rsc)
423{
424 struct sde_rsc_client *client;
Dhaval Patela65b0f12017-03-16 00:36:55 -0700425 int rc = STATE_UPDATE_NOT_ALLOWED;
426 bool idle_switch = true;
Dhaval Patel020f7e122016-11-15 14:39:18 -0800427
428 list_for_each_entry(client, &rsc->client_list, list)
Dhaval Patela65b0f12017-03-16 00:36:55 -0700429 if (client->current_state != SDE_RSC_IDLE_STATE) {
430 idle_switch = false;
431 break;
432 }
Dhaval Patel020f7e122016-11-15 14:39:18 -0800433
Dhaval Patela65b0f12017-03-16 00:36:55 -0700434 if (!idle_switch) {
435 /**
436 * following code needs to run the loop through each
437 * client because they might be in different order
438 * sorting is not possible; only preference is available
439 */
440
441 /* first check if any vid client active */
442 list_for_each_entry(client, &rsc->client_list, list)
443 if (client->current_state == SDE_RSC_VID_STATE)
444 return rc;
445
446 /* now try cmd state switch */
447 list_for_each_entry(client, &rsc->client_list, list)
448 if (client->current_state == SDE_RSC_CMD_STATE)
449 return TRY_CMD_MODE_SWITCH;
450
451 /* now try clk state switch */
452 list_for_each_entry(client, &rsc->client_list, list)
453 if (client->current_state == SDE_RSC_CLK_STATE)
454 return TRY_CLK_MODE_SWITCH;
455
456 } else if (rsc->hw_ops.state_update) {
Dhaval Patel020f7e122016-11-15 14:39:18 -0800457 rc = rsc->hw_ops.state_update(rsc, SDE_RSC_IDLE_STATE);
Dhaval Patel52ec1192017-05-02 23:13:24 -0700458 if (!rc)
Dhaval Patel1b62c002017-09-22 12:19:11 -0700459 rpmh_mode_solver_set(rsc->disp_rsc, true);
Dhaval Patela65b0f12017-03-16 00:36:55 -0700460 }
Dhaval Patel020f7e122016-11-15 14:39:18 -0800461
462 return rc;
463}
464
Dhaval Patela65b0f12017-03-16 00:36:55 -0700465static int sde_rsc_switch_to_cmd(struct sde_rsc_priv *rsc,
Dhaval Patel82c8dbc2017-02-18 23:15:10 -0800466 struct sde_rsc_cmd_config *config,
Lloyd Atkinsonf68a2132017-07-17 10:16:30 -0400467 struct sde_rsc_client *caller_client,
468 int *wait_vblank_crtc_id)
Dhaval Patel020f7e122016-11-15 14:39:18 -0800469{
470 struct sde_rsc_client *client;
Dhaval Patela65b0f12017-03-16 00:36:55 -0700471 int rc = STATE_UPDATE_NOT_ALLOWED;
Dhaval Patel020f7e122016-11-15 14:39:18 -0800472
473 if (!rsc->primary_client) {
474 pr_err("primary client not available for cmd state switch\n");
475 rc = -EINVAL;
476 goto end;
477 } else if (caller_client != rsc->primary_client) {
478 pr_err("primary client state:%d not cmd state request\n",
479 rsc->primary_client->current_state);
480 rc = -EINVAL;
481 goto end;
482 }
483
Dhaval Patel82c8dbc2017-02-18 23:15:10 -0800484 /* update timers - might not be available at next switch */
485 if (config)
486 sde_rsc_timer_calculate(rsc, config);
487
Lloyd Atkinsonf68a2132017-07-17 10:16:30 -0400488 /**
489 * rsc clients can still send config at any time. If a config is
490 * received during cmd_state then vsync_wait will execute with the logic
491 * below. If a config is received when rsc is in AMC mode; A mode
492 * switch will do the vsync wait. updated checks still support all cases
493 * for dynamic mode switch and inline rotation.
494 */
Dhaval Patel1c1397d2017-05-16 18:37:34 -0700495 if (rsc->current_state == SDE_RSC_CMD_STATE) {
496 rc = 0;
Lloyd Atkinsonf68a2132017-07-17 10:16:30 -0400497 if (config)
498 goto vsync_wait;
499 else
500 goto end;
Dhaval Patel1c1397d2017-05-16 18:37:34 -0700501 }
502
Dhaval Patel020f7e122016-11-15 14:39:18 -0800503 /* any one client in video state blocks the cmd state switch */
504 list_for_each_entry(client, &rsc->client_list, list)
505 if (client->current_state == SDE_RSC_VID_STATE)
506 goto end;
507
Dhaval Patel52ec1192017-05-02 23:13:24 -0700508 if (rsc->hw_ops.state_update) {
Dhaval Patel020f7e122016-11-15 14:39:18 -0800509 rc = rsc->hw_ops.state_update(rsc, SDE_RSC_CMD_STATE);
Dhaval Patel52ec1192017-05-02 23:13:24 -0700510 if (!rc)
511 rpmh_mode_solver_set(rsc->disp_rsc, true);
512 }
Dhaval Patel020f7e122016-11-15 14:39:18 -0800513
Dhaval Patel1c1397d2017-05-16 18:37:34 -0700514vsync_wait:
Lloyd Atkinsonf68a2132017-07-17 10:16:30 -0400515 /* indicate wait for vsync for vid to cmd state switch & cfg update */
Dhaval Patel1c1397d2017-05-16 18:37:34 -0700516 if (!rc && (rsc->current_state == SDE_RSC_VID_STATE ||
Lloyd Atkinsonf68a2132017-07-17 10:16:30 -0400517 rsc->current_state == SDE_RSC_CMD_STATE)) {
518 /* clear VSYNC timestamp for indication when update completes */
519 if (rsc->hw_ops.hw_vsync)
520 rsc->hw_ops.hw_vsync(rsc, VSYNC_ENABLE, NULL, 0, 0);
521 if (!wait_vblank_crtc_id) {
522 pr_err("invalid crtc id wait pointer, client %d\n",
523 caller_client->id);
524 SDE_EVT32(caller_client->id, rsc->current_state,
525 caller_client->crtc_id,
526 wait_vblank_crtc_id, SDE_EVTLOG_ERROR);
527 msleep(PRIMARY_VBLANK_WORST_CASE_MS);
528 } else {
529 *wait_vblank_crtc_id = rsc->primary_client->crtc_id;
530 }
531 }
Dhaval Patel020f7e122016-11-15 14:39:18 -0800532end:
533 return rc;
534}
535
Lloyd Atkinsonf68a2132017-07-17 10:16:30 -0400536static int sde_rsc_switch_to_clk(struct sde_rsc_priv *rsc,
537 int *wait_vblank_crtc_id)
Dhaval Patela65b0f12017-03-16 00:36:55 -0700538{
539 struct sde_rsc_client *client;
540 int rc = STATE_UPDATE_NOT_ALLOWED;
541
542 list_for_each_entry(client, &rsc->client_list, list)
543 if ((client->current_state == SDE_RSC_VID_STATE) ||
544 (client->current_state == SDE_RSC_CMD_STATE))
545 goto end;
546
Dhaval Patel52ec1192017-05-02 23:13:24 -0700547 if (rsc->hw_ops.state_update) {
Dhaval Patelfbb11f02017-04-06 13:43:28 -0700548 rc = rsc->hw_ops.state_update(rsc, SDE_RSC_CLK_STATE);
Dhaval Patel52ec1192017-05-02 23:13:24 -0700549 if (!rc)
550 rpmh_mode_solver_set(rsc->disp_rsc, false);
551 }
Dhaval Pateld2dd1ad2017-03-29 16:13:17 -0700552
Lloyd Atkinsonf68a2132017-07-17 10:16:30 -0400553 /* indicate wait for vsync for cmd to clk state switch */
Dhaval Pateld2dd1ad2017-03-29 16:13:17 -0700554 if (!rc && rsc->primary_client &&
Lloyd Atkinsonf68a2132017-07-17 10:16:30 -0400555 (rsc->current_state == SDE_RSC_CMD_STATE)) {
556 /* clear VSYNC timestamp for indication when update completes */
557 if (rsc->hw_ops.hw_vsync)
558 rsc->hw_ops.hw_vsync(rsc, VSYNC_ENABLE, NULL, 0, 0);
559 if (!wait_vblank_crtc_id) {
560 pr_err("invalid crtc id wait pointer provided\n");
561 msleep(PRIMARY_VBLANK_WORST_CASE_MS);
562 } else {
563 *wait_vblank_crtc_id = rsc->primary_client->crtc_id;
Ingrid Gallardoe52302c2017-11-28 19:30:47 -0800564
565 /* increase refcount, so we wait for the next vsync */
566 atomic_inc(&rsc->rsc_vsync_wait);
567 SDE_EVT32(atomic_read(&rsc->rsc_vsync_wait));
568 }
569 } else if (atomic_read(&rsc->rsc_vsync_wait)) {
570 SDE_EVT32(rsc->primary_client, rsc->current_state,
571 atomic_read(&rsc->rsc_vsync_wait));
572
573 /* Wait for the vsync, if the refcount is set */
574 rc = wait_event_timeout(rsc->rsc_vsync_waitq,
575 atomic_read(&rsc->rsc_vsync_wait) == 0,
576 msecs_to_jiffies(PRIMARY_VBLANK_WORST_CASE_MS*2));
577 if (!rc) {
578 pr_err("Timeout waiting for vsync\n");
579 SDE_EVT32(atomic_read(&rsc->rsc_vsync_wait),
580 SDE_EVTLOG_ERROR);
Lloyd Atkinsonf68a2132017-07-17 10:16:30 -0400581 }
582 }
Dhaval Patela65b0f12017-03-16 00:36:55 -0700583end:
584 return rc;
585}
586
Clarence Ip56e33492017-05-18 13:43:45 -0400587static int sde_rsc_switch_to_vid(struct sde_rsc_priv *rsc,
Dhaval Patel82c8dbc2017-02-18 23:15:10 -0800588 struct sde_rsc_cmd_config *config,
Lloyd Atkinsonf68a2132017-07-17 10:16:30 -0400589 struct sde_rsc_client *caller_client,
590 int *wait_vblank_crtc_id)
Dhaval Patel020f7e122016-11-15 14:39:18 -0800591{
592 int rc = 0;
593
594 /* update timers - might not be available at next switch */
595 if (config && (caller_client == rsc->primary_client))
596 sde_rsc_timer_calculate(rsc, config);
597
Dhaval Patel1c1397d2017-05-16 18:37:34 -0700598 /* early exit without vsync wait for vid state */
599 if (rsc->current_state == SDE_RSC_VID_STATE)
600 goto end;
601
Dhaval Patel020f7e122016-11-15 14:39:18 -0800602 /* video state switch should be done immediately */
Dhaval Patel52ec1192017-05-02 23:13:24 -0700603 if (rsc->hw_ops.state_update) {
Dhaval Patel020f7e122016-11-15 14:39:18 -0800604 rc = rsc->hw_ops.state_update(rsc, SDE_RSC_VID_STATE);
Dhaval Patel52ec1192017-05-02 23:13:24 -0700605 if (!rc)
606 rpmh_mode_solver_set(rsc->disp_rsc, false);
607 }
Dhaval Patel020f7e122016-11-15 14:39:18 -0800608
Lloyd Atkinsonf68a2132017-07-17 10:16:30 -0400609 /* indicate wait for vsync for cmd to vid state switch */
Dhaval Pateld2dd1ad2017-03-29 16:13:17 -0700610 if (!rc && rsc->primary_client &&
Lloyd Atkinsonf68a2132017-07-17 10:16:30 -0400611 (rsc->current_state == SDE_RSC_CMD_STATE)) {
612 /* clear VSYNC timestamp for indication when update completes */
613 if (rsc->hw_ops.hw_vsync)
614 rsc->hw_ops.hw_vsync(rsc, VSYNC_ENABLE, NULL, 0, 0);
615 if (!wait_vblank_crtc_id) {
616 pr_err("invalid crtc id wait pointer provided\n");
617 msleep(PRIMARY_VBLANK_WORST_CASE_MS);
618 } else {
619 *wait_vblank_crtc_id = rsc->primary_client->crtc_id;
Ingrid Gallardoe52302c2017-11-28 19:30:47 -0800620
621 /* increase refcount, so we wait for the next vsync */
622 atomic_inc(&rsc->rsc_vsync_wait);
623 SDE_EVT32(atomic_read(&rsc->rsc_vsync_wait));
624 }
625 } else if (atomic_read(&rsc->rsc_vsync_wait)) {
626 SDE_EVT32(rsc->primary_client, rsc->current_state,
627 atomic_read(&rsc->rsc_vsync_wait));
628
629 /* Wait for the vsync, if the refcount is set */
630 rc = wait_event_timeout(rsc->rsc_vsync_waitq,
631 atomic_read(&rsc->rsc_vsync_wait) == 0,
632 msecs_to_jiffies(PRIMARY_VBLANK_WORST_CASE_MS*2));
633 if (!rc) {
634 pr_err("Timeout waiting for vsync\n");
635 SDE_EVT32(atomic_read(&rsc->rsc_vsync_wait),
636 SDE_EVTLOG_ERROR);
Lloyd Atkinsonf68a2132017-07-17 10:16:30 -0400637 }
638 }
Dhaval Patel1c1397d2017-05-16 18:37:34 -0700639
640end:
Dhaval Patel020f7e122016-11-15 14:39:18 -0800641 return rc;
642}
643
644/**
Ingrid Gallardoe52302c2017-11-28 19:30:47 -0800645 * sde_rsc_client_get_vsync_refcount() - returns the status of the vsync
646 * refcount, to signal if the client needs to reset the refcounting logic
647 * @client: Client pointer provided by sde_rsc_client_create().
648 *
649 * Return: value of the vsync refcount.
650 */
651int sde_rsc_client_get_vsync_refcount(
652 struct sde_rsc_client *caller_client)
653{
654 struct sde_rsc_priv *rsc;
655
656 if (!caller_client) {
657 pr_err("invalid client for rsc state update\n");
658 return -EINVAL;
659 } else if (caller_client->rsc_index >= MAX_RSC_COUNT) {
660 pr_err("invalid rsc index\n");
661 return -EINVAL;
662 }
663
664 rsc = rsc_prv_list[caller_client->rsc_index];
665 if (!rsc)
666 return 0;
667
668 return atomic_read(&rsc->rsc_vsync_wait);
669}
670
671/**
672 * sde_rsc_client_reset_vsync_refcount() - reduces the refcounting
673 * logic that waits for the vsync.
674 * @client: Client pointer provided by sde_rsc_client_create().
675 *
676 * Return: zero if refcount was already zero.
677 */
678int sde_rsc_client_reset_vsync_refcount(
679 struct sde_rsc_client *caller_client)
680{
681 struct sde_rsc_priv *rsc;
682 int ret;
683
684 if (!caller_client) {
685 pr_err("invalid client for rsc state update\n");
686 return -EINVAL;
687 } else if (caller_client->rsc_index >= MAX_RSC_COUNT) {
688 pr_err("invalid rsc index\n");
689 return -EINVAL;
690 }
691
692 rsc = rsc_prv_list[caller_client->rsc_index];
693 if (!rsc)
694 return 0;
695
696 ret = atomic_add_unless(&rsc->rsc_vsync_wait, -1, 0);
697 wake_up_all(&rsc->rsc_vsync_waitq);
698 SDE_EVT32(atomic_read(&rsc->rsc_vsync_wait));
699
700 return ret;
701}
702
703/**
Lloyd Atkinsonf68a2132017-07-17 10:16:30 -0400704 * sde_rsc_client_is_state_update_complete() - check if state update is complete
705 * RSC state transition is not complete until HW receives VBLANK signal. This
706 * function checks RSC HW to determine whether that signal has been received.
707 * @client: Client pointer provided by sde_rsc_client_create().
708 *
709 * Return: true if the state update has completed.
710 */
711bool sde_rsc_client_is_state_update_complete(
712 struct sde_rsc_client *caller_client)
713{
714 struct sde_rsc_priv *rsc;
715 u32 vsync_timestamp0 = 0;
716
717 if (!caller_client) {
718 pr_err("invalid client for rsc state update\n");
719 return false;
720 } else if (caller_client->rsc_index >= MAX_RSC_COUNT) {
721 pr_err("invalid rsc index\n");
722 return false;
723 }
724
725 rsc = rsc_prv_list[caller_client->rsc_index];
726 if (!rsc)
727 return false;
728
729 /**
730 * state updates clear VSYNC timestamp, check if a new one arrived.
731 * use VSYNC mode 0 (CMD TE) always for this, per HW recommendation.
732 */
733 if (rsc->hw_ops.hw_vsync)
734 vsync_timestamp0 = rsc->hw_ops.hw_vsync(rsc, VSYNC_READ_VSYNC0,
735 NULL, 0, 0);
736
737 return vsync_timestamp0 != 0;
738}
739
740/**
Dhaval Patel020f7e122016-11-15 14:39:18 -0800741 * sde_rsc_client_state_update() - rsc client state update
Dhaval Patela65b0f12017-03-16 00:36:55 -0700742 * Video mode, cmd mode and clk state are suppoed as modes. A client need to
Dhaval Patel020f7e122016-11-15 14:39:18 -0800743 * set this property during panel config time. A switching client can set the
744 * property to change the state
745 *
746 * @client: Client pointer provided by sde_rsc_client_create().
747 * @state: Client state - video/cmd
748 * @config: fps, vtotal, porches, etc configuration for command mode
749 * panel
750 * @crtc_id: current client's crtc id
Lloyd Atkinsonf68a2132017-07-17 10:16:30 -0400751 * @wait_vblank_crtc_id: Output parameter. If set to non-zero, rsc hw
752 * state update requires a wait for one vblank on
753 * the primary crtc. In that case, this output
754 * param will be set to the crtc on which to wait.
755 * If SDE_RSC_INVALID_CRTC_ID, no wait necessary
Dhaval Patel020f7e122016-11-15 14:39:18 -0800756 *
757 * Return: error code.
758 */
759int sde_rsc_client_state_update(struct sde_rsc_client *caller_client,
760 enum sde_rsc_state state,
Lloyd Atkinsonf68a2132017-07-17 10:16:30 -0400761 struct sde_rsc_cmd_config *config, int crtc_id,
762 int *wait_vblank_crtc_id)
Dhaval Patel020f7e122016-11-15 14:39:18 -0800763{
764 int rc = 0;
765 struct sde_rsc_priv *rsc;
Dhaval Patel020f7e122016-11-15 14:39:18 -0800766
767 if (!caller_client) {
768 pr_err("invalid client for rsc state update\n");
769 return -EINVAL;
770 } else if (caller_client->rsc_index >= MAX_RSC_COUNT) {
771 pr_err("invalid rsc index\n");
772 return -EINVAL;
773 }
774
775 rsc = rsc_prv_list[caller_client->rsc_index];
776 if (!rsc)
777 return -EINVAL;
778
Lloyd Atkinsonf68a2132017-07-17 10:16:30 -0400779 if (wait_vblank_crtc_id)
780 *wait_vblank_crtc_id = SDE_RSC_INVALID_CRTC_ID;
781
Dhaval Patel020f7e122016-11-15 14:39:18 -0800782 mutex_lock(&rsc->client_lock);
Dhaval Patela5f75952017-07-25 11:17:41 -0700783 SDE_EVT32_VERBOSE(caller_client->id, caller_client->current_state,
Dhaval Patel824e9682017-05-01 23:31:22 -0700784 state, rsc->current_state, SDE_EVTLOG_FUNC_ENTRY);
Dhaval Patel020f7e122016-11-15 14:39:18 -0800785 caller_client->crtc_id = crtc_id;
786 caller_client->current_state = state;
787
788 if (rsc->master_drm == NULL) {
789 pr_err("invalid master component binding\n");
790 rc = -EINVAL;
791 goto end;
Dhaval Patela65b0f12017-03-16 00:36:55 -0700792 } else if ((rsc->current_state == state) && !config) {
Dhaval Patel020f7e122016-11-15 14:39:18 -0800793 pr_debug("no state change: %d\n", state);
794 goto end;
795 }
796
797 pr_debug("%pS: rsc state:%d request client:%s state:%d\n",
798 __builtin_return_address(0), rsc->current_state,
799 caller_client->name, state);
800
Dhaval Pateld2dd1ad2017-03-29 16:13:17 -0700801 if (rsc->current_state == SDE_RSC_IDLE_STATE)
Dhaval Patelfcd9e912017-03-16 00:54:09 -0700802 sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, true);
Dhaval Patel020f7e122016-11-15 14:39:18 -0800803
804 switch (state) {
805 case SDE_RSC_IDLE_STATE:
806 rc = sde_rsc_switch_to_idle(rsc);
Dhaval Patela65b0f12017-03-16 00:36:55 -0700807
Dhaval Patel020f7e122016-11-15 14:39:18 -0800808 /* video state client might be exiting; try cmd state switch */
Dhaval Patela65b0f12017-03-16 00:36:55 -0700809 if (rc == TRY_CMD_MODE_SWITCH) {
Dhaval Patel020f7e122016-11-15 14:39:18 -0800810 rc = sde_rsc_switch_to_cmd(rsc, NULL,
Lloyd Atkinsonf68a2132017-07-17 10:16:30 -0400811 rsc->primary_client,
812 wait_vblank_crtc_id);
Dhaval Patela65b0f12017-03-16 00:36:55 -0700813 if (!rc)
814 state = SDE_RSC_CMD_STATE;
815
816 /* cmd state client might be exiting; try clk state switch */
817 } else if (rc == TRY_CLK_MODE_SWITCH) {
Lloyd Atkinsonf68a2132017-07-17 10:16:30 -0400818 rc = sde_rsc_switch_to_clk(rsc, wait_vblank_crtc_id);
Dhaval Patela65b0f12017-03-16 00:36:55 -0700819 if (!rc)
820 state = SDE_RSC_CLK_STATE;
821 }
Dhaval Patel020f7e122016-11-15 14:39:18 -0800822 break;
823
824 case SDE_RSC_CMD_STATE:
Lloyd Atkinsonf68a2132017-07-17 10:16:30 -0400825 rc = sde_rsc_switch_to_cmd(rsc, config, caller_client,
826 wait_vblank_crtc_id);
Dhaval Patel020f7e122016-11-15 14:39:18 -0800827 break;
828
829 case SDE_RSC_VID_STATE:
Lloyd Atkinsonf68a2132017-07-17 10:16:30 -0400830 rc = sde_rsc_switch_to_vid(rsc, config, caller_client,
831 wait_vblank_crtc_id);
Dhaval Patel020f7e122016-11-15 14:39:18 -0800832 break;
833
Dhaval Patela65b0f12017-03-16 00:36:55 -0700834 case SDE_RSC_CLK_STATE:
Lloyd Atkinsonf68a2132017-07-17 10:16:30 -0400835 rc = sde_rsc_switch_to_clk(rsc, wait_vblank_crtc_id);
Dhaval Patela65b0f12017-03-16 00:36:55 -0700836 break;
837
Dhaval Patel020f7e122016-11-15 14:39:18 -0800838 default:
839 pr_err("invalid state handling %d\n", state);
840 break;
841 }
842
Dhaval Patela65b0f12017-03-16 00:36:55 -0700843 if (rc == STATE_UPDATE_NOT_ALLOWED) {
844 rc = 0;
Dhaval Patel824e9682017-05-01 23:31:22 -0700845 SDE_EVT32(caller_client->id, caller_client->current_state,
846 state, rsc->current_state, rc, SDE_EVTLOG_FUNC_CASE1);
Dhaval Patela65b0f12017-03-16 00:36:55 -0700847 goto clk_disable;
848 } else if (rc) {
Dhaval Pateld2dd1ad2017-03-29 16:13:17 -0700849 pr_debug("state:%d update failed rc:%d\n", state, rc);
Dhaval Patel824e9682017-05-01 23:31:22 -0700850 SDE_EVT32(caller_client->id, caller_client->current_state,
851 state, rsc->current_state, rc, SDE_EVTLOG_FUNC_CASE2);
Dhaval Patela65b0f12017-03-16 00:36:55 -0700852 goto clk_disable;
Dhaval Patel020f7e122016-11-15 14:39:18 -0800853 }
854
855 pr_debug("state switch successfully complete: %d\n", state);
856 rsc->current_state = state;
Dhaval Patel824e9682017-05-01 23:31:22 -0700857 SDE_EVT32(caller_client->id, caller_client->current_state,
858 state, rsc->current_state, SDE_EVTLOG_FUNC_EXIT);
Dhaval Patel020f7e122016-11-15 14:39:18 -0800859
Dhaval Patela65b0f12017-03-16 00:36:55 -0700860clk_disable:
Dhaval Pateld2dd1ad2017-03-29 16:13:17 -0700861 if (rsc->current_state == SDE_RSC_IDLE_STATE)
Dhaval Patelfcd9e912017-03-16 00:54:09 -0700862 sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, false);
Dhaval Patel020f7e122016-11-15 14:39:18 -0800863end:
864 mutex_unlock(&rsc->client_lock);
865 return rc;
866}
Dhaval Patel49ef6d72017-03-26 09:35:53 -0700867EXPORT_SYMBOL(sde_rsc_client_state_update);
Dhaval Patel020f7e122016-11-15 14:39:18 -0800868
869/**
870 * sde_rsc_client_vote() - ab/ib vote from rsc client
871 *
872 * @client: Client pointer provided by sde_rsc_client_create().
Alan Kwong0230a102017-05-16 11:36:44 -0700873 * @bus_id: data bus for which to be voted
Dhaval Patel020f7e122016-11-15 14:39:18 -0800874 * @ab: aggregated bandwidth vote from client.
875 * @ib: instant bandwidth vote from client.
876 *
877 * Return: error code.
878 */
879int sde_rsc_client_vote(struct sde_rsc_client *caller_client,
Alan Kwong0230a102017-05-16 11:36:44 -0700880 u32 bus_id, u64 ab_vote, u64 ib_vote)
Dhaval Patel020f7e122016-11-15 14:39:18 -0800881{
882 int rc = 0;
883 struct sde_rsc_priv *rsc;
Dhaval Patel020f7e122016-11-15 14:39:18 -0800884
885 if (!caller_client) {
886 pr_err("invalid client for ab/ib vote\n");
887 return -EINVAL;
888 } else if (caller_client->rsc_index >= MAX_RSC_COUNT) {
889 pr_err("invalid rsc index\n");
890 return -EINVAL;
891 }
892
893 rsc = rsc_prv_list[caller_client->rsc_index];
894 if (!rsc)
895 return -EINVAL;
896
Dhaval Patel020f7e122016-11-15 14:39:18 -0800897 pr_debug("client:%s ab:%llu ib:%llu\n",
898 caller_client->name, ab_vote, ib_vote);
899
900 mutex_lock(&rsc->client_lock);
Dhaval Pateld2dd1ad2017-03-29 16:13:17 -0700901 rc = sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, true);
902 if (rc)
903 goto clk_enable_fail;
Dhaval Patel020f7e122016-11-15 14:39:18 -0800904
Dhaval Patel020f7e122016-11-15 14:39:18 -0800905 if (rsc->hw_ops.tcs_wait) {
906 rc = rsc->hw_ops.tcs_wait(rsc);
907 if (rc) {
908 pr_err("tcs is still busy; can't send command\n");
909 if (rsc->hw_ops.tcs_use_ok)
910 rsc->hw_ops.tcs_use_ok(rsc);
911 goto end;
912 }
913 }
914
Dhaval Pateld2dd1ad2017-03-29 16:13:17 -0700915 rpmh_invalidate(rsc->disp_rsc);
Dhaval Patel020f7e122016-11-15 14:39:18 -0800916 sde_power_data_bus_set_quota(&rsc->phandle, rsc->pclient,
Alan Kwong0230a102017-05-16 11:36:44 -0700917 SDE_POWER_HANDLE_DATA_BUS_CLIENT_RT,
918 bus_id, ab_vote, ib_vote);
Dhaval Pateld2dd1ad2017-03-29 16:13:17 -0700919 rpmh_flush(rsc->disp_rsc);
Dhaval Patel020f7e122016-11-15 14:39:18 -0800920
921 if (rsc->hw_ops.tcs_use_ok)
922 rsc->hw_ops.tcs_use_ok(rsc);
923
924end:
Dhaval Pateld2dd1ad2017-03-29 16:13:17 -0700925 sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, false);
926clk_enable_fail:
Dhaval Patel020f7e122016-11-15 14:39:18 -0800927 mutex_unlock(&rsc->client_lock);
Dhaval Pateld2dd1ad2017-03-29 16:13:17 -0700928
Dhaval Patel020f7e122016-11-15 14:39:18 -0800929 return rc;
930}
Dhaval Patel49ef6d72017-03-26 09:35:53 -0700931EXPORT_SYMBOL(sde_rsc_client_vote);
Dhaval Patel020f7e122016-11-15 14:39:18 -0800932
Dhaval Patelc5a2e5d2017-09-18 12:39:41 -0700933#if defined(CONFIG_DEBUG_FS)
934void sde_rsc_debug_dump(u32 mux_sel)
935{
936 struct sde_rsc_priv *rsc;
937
938 rsc = rsc_prv_list[SDE_RSC_INDEX];
939 if (!rsc)
940 return;
941
942 /* this must be called with rsc clocks enabled */
943 if (rsc->hw_ops.debug_dump)
944 rsc->hw_ops.debug_dump(rsc, mux_sel);
945}
946#endif /* defined(CONFIG_DEBUG_FS) */
947
Dhaval Patel020f7e122016-11-15 14:39:18 -0800948static int _sde_debugfs_status_show(struct seq_file *s, void *data)
949{
950 struct sde_rsc_priv *rsc;
951 struct sde_rsc_client *client;
952 int ret;
953
954 if (!s || !s->private)
955 return -EINVAL;
956
957 rsc = s->private;
958
959 mutex_lock(&rsc->client_lock);
Dhaval Pateld2dd1ad2017-03-29 16:13:17 -0700960 ret = sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, true);
961 if (ret)
962 goto end;
963
Dhaval Patel020f7e122016-11-15 14:39:18 -0800964 seq_printf(s, "rsc current state:%d\n", rsc->current_state);
965 seq_printf(s, "wraper backoff time(ns):%d\n",
966 rsc->timer_config.static_wakeup_time_ns);
967 seq_printf(s, "rsc backoff time(ns):%d\n",
968 rsc->timer_config.rsc_backoff_time_ns);
969 seq_printf(s, "pdc backoff time(ns):%d\n",
970 rsc->timer_config.pdc_backoff_time_ns);
971 seq_printf(s, "rsc mode threshold time(ns):%d\n",
972 rsc->timer_config.rsc_mode_threshold_time_ns);
973 seq_printf(s, "rsc time slot 0(ns):%d\n",
974 rsc->timer_config.rsc_time_slot_0_ns);
975 seq_printf(s, "rsc time slot 1(ns):%d\n",
976 rsc->timer_config.rsc_time_slot_1_ns);
Dhaval Patelf5cc5a32017-07-10 17:33:23 -0700977 seq_printf(s, "frame fps:%d jitter_numer:%d jitter_denom:%d vtotal:%d prefill lines:%d\n",
978 rsc->cmd_config.fps, rsc->cmd_config.jitter_numer,
979 rsc->cmd_config.jitter_denom,
Dhaval Patel020f7e122016-11-15 14:39:18 -0800980 rsc->cmd_config.vtotal, rsc->cmd_config.prefill_lines);
981
982 seq_puts(s, "\n");
983
984 list_for_each_entry(client, &rsc->client_list, list)
985 seq_printf(s, "\t client:%s state:%d\n",
986 client->name, client->current_state);
987
Dhaval Patel020f7e122016-11-15 14:39:18 -0800988 if (rsc->hw_ops.debug_show) {
989 ret = rsc->hw_ops.debug_show(s, rsc);
990 if (ret)
991 pr_err("sde rsc: hw debug failed ret:%d\n", ret);
992 }
Dhaval Patelfcd9e912017-03-16 00:54:09 -0700993 sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, false);
Dhaval Patel020f7e122016-11-15 14:39:18 -0800994
Dhaval Pateld2dd1ad2017-03-29 16:13:17 -0700995end:
996 mutex_unlock(&rsc->client_lock);
Dhaval Patel020f7e122016-11-15 14:39:18 -0800997 return 0;
998}
999
1000static int _sde_debugfs_status_open(struct inode *inode, struct file *file)
1001{
1002 return single_open(file, _sde_debugfs_status_show, inode->i_private);
1003}
1004
1005static int _sde_debugfs_mode_ctrl_open(struct inode *inode, struct file *file)
1006{
1007 /* non-seekable */
1008 file->private_data = inode->i_private;
1009 return nonseekable_open(inode, file);
1010}
1011
1012static ssize_t _sde_debugfs_mode_ctrl_read(struct file *file, char __user *buf,
1013 size_t count, loff_t *ppos)
1014{
1015 struct sde_rsc_priv *rsc = file->private_data;
1016 char buffer[MAX_BUFFER_SIZE];
Dhaval Pateld2dd1ad2017-03-29 16:13:17 -07001017 int blen = 0, rc;
Dhaval Patel020f7e122016-11-15 14:39:18 -08001018
1019 if (*ppos || !rsc || !rsc->hw_ops.mode_ctrl)
1020 return 0;
1021
1022 mutex_lock(&rsc->client_lock);
Dhaval Pateld2dd1ad2017-03-29 16:13:17 -07001023 rc = sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, true);
1024 if (rc)
1025 goto end;
Dhaval Patel020f7e122016-11-15 14:39:18 -08001026
1027 blen = rsc->hw_ops.mode_ctrl(rsc, MODE_READ, buffer,
1028 MAX_BUFFER_SIZE, 0);
1029
Dhaval Patelfcd9e912017-03-16 00:54:09 -07001030 sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, false);
Dhaval Patel020f7e122016-11-15 14:39:18 -08001031
Dhaval Pateld2dd1ad2017-03-29 16:13:17 -07001032end:
1033 mutex_unlock(&rsc->client_lock);
Dhaval Patelf9c5c602017-08-01 12:32:04 -07001034 if (blen <= 0)
Dhaval Patel020f7e122016-11-15 14:39:18 -08001035 return 0;
1036
1037 if (copy_to_user(buf, buffer, blen))
1038 return -EFAULT;
1039
1040 *ppos += blen;
1041 return blen;
1042}
1043
1044static ssize_t _sde_debugfs_mode_ctrl_write(struct file *file,
1045 const char __user *p, size_t count, loff_t *ppos)
1046{
1047 struct sde_rsc_priv *rsc = file->private_data;
Dhaval Patel3d56f892017-05-05 12:21:08 -07001048 char *input;
1049 u32 mode_state = 0;
Dhaval Pateld2dd1ad2017-03-29 16:13:17 -07001050 int rc;
Dhaval Patel020f7e122016-11-15 14:39:18 -08001051
Dhaval Patel3d56f892017-05-05 12:21:08 -07001052 if (!rsc || !rsc->hw_ops.mode_ctrl || !count ||
1053 count > MAX_COUNT_SIZE_SUPPORTED)
Dhaval Patel020f7e122016-11-15 14:39:18 -08001054 return 0;
1055
Dhaval Patel3d56f892017-05-05 12:21:08 -07001056 input = kmalloc(count + 1, GFP_KERNEL);
Dhaval Patel020f7e122016-11-15 14:39:18 -08001057 if (!input)
1058 return -ENOMEM;
1059
1060 if (copy_from_user(input, p, count)) {
1061 kfree(input);
1062 return -EFAULT;
1063 }
Dhaval Patel3d56f892017-05-05 12:21:08 -07001064 input[count] = '\0';
1065
1066 rc = kstrtoint(input, 0, &mode_state);
1067 if (rc) {
1068 pr_err("mode_state: int conversion failed rc:%d\n", rc);
1069 goto end;
1070 }
1071
1072 pr_debug("mode_state: %d\n", mode_state);
1073 mode_state &= 0x7;
1074 if (mode_state != ALL_MODES_DISABLED &&
1075 mode_state != ALL_MODES_ENABLED &&
1076 mode_state != ONLY_MODE_0_ENABLED &&
1077 mode_state != ONLY_MODE_0_1_ENABLED) {
1078 pr_err("invalid mode:%d combination\n", mode_state);
1079 goto end;
1080 }
Dhaval Patel020f7e122016-11-15 14:39:18 -08001081
1082 mutex_lock(&rsc->client_lock);
Dhaval Pateld2dd1ad2017-03-29 16:13:17 -07001083 rc = sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, true);
1084 if (rc)
1085 goto clk_enable_fail;
Dhaval Patel020f7e122016-11-15 14:39:18 -08001086
Dhaval Patel3d56f892017-05-05 12:21:08 -07001087 rsc->hw_ops.mode_ctrl(rsc, MODE_UPDATE, NULL, 0, mode_state);
Dhaval Patelfcd9e912017-03-16 00:54:09 -07001088 sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, false);
Dhaval Patel3d56f892017-05-05 12:21:08 -07001089
Dhaval Pateld2dd1ad2017-03-29 16:13:17 -07001090clk_enable_fail:
Dhaval Patel020f7e122016-11-15 14:39:18 -08001091 mutex_unlock(&rsc->client_lock);
Dhaval Patel3d56f892017-05-05 12:21:08 -07001092end:
Dhaval Patel020f7e122016-11-15 14:39:18 -08001093 kfree(input);
1094 return count;
1095}
1096
1097static int _sde_debugfs_vsync_mode_open(struct inode *inode, struct file *file)
1098{
1099 /* non-seekable */
1100 file->private_data = inode->i_private;
1101 return nonseekable_open(inode, file);
1102}
1103
1104static ssize_t _sde_debugfs_vsync_mode_read(struct file *file, char __user *buf,
1105 size_t count, loff_t *ppos)
1106{
1107 struct sde_rsc_priv *rsc = file->private_data;
1108 char buffer[MAX_BUFFER_SIZE];
Dhaval Pateld2dd1ad2017-03-29 16:13:17 -07001109 int blen = 0, rc;
Dhaval Patel020f7e122016-11-15 14:39:18 -08001110
1111 if (*ppos || !rsc || !rsc->hw_ops.hw_vsync)
1112 return 0;
1113
1114 mutex_lock(&rsc->client_lock);
Dhaval Pateld2dd1ad2017-03-29 16:13:17 -07001115 rc = sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, true);
1116 if (rc)
1117 goto end;
Dhaval Patel020f7e122016-11-15 14:39:18 -08001118
1119 blen = rsc->hw_ops.hw_vsync(rsc, VSYNC_READ, buffer,
1120 MAX_BUFFER_SIZE, 0);
1121
Dhaval Patelfcd9e912017-03-16 00:54:09 -07001122 sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, false);
Dhaval Patel020f7e122016-11-15 14:39:18 -08001123
Dhaval Pateld2dd1ad2017-03-29 16:13:17 -07001124end:
1125 mutex_unlock(&rsc->client_lock);
Dhaval Patelf9c5c602017-08-01 12:32:04 -07001126 if (blen <= 0)
Dhaval Patel020f7e122016-11-15 14:39:18 -08001127 return 0;
1128
1129 if (copy_to_user(buf, buffer, blen))
1130 return -EFAULT;
1131
1132 *ppos += blen;
1133 return blen;
1134}
1135
1136static ssize_t _sde_debugfs_vsync_mode_write(struct file *file,
1137 const char __user *p, size_t count, loff_t *ppos)
1138{
1139 struct sde_rsc_priv *rsc = file->private_data;
Dhaval Patel3d56f892017-05-05 12:21:08 -07001140 char *input;
Dhaval Patel020f7e122016-11-15 14:39:18 -08001141 u32 vsync_state = 0;
Dhaval Pateld2dd1ad2017-03-29 16:13:17 -07001142 int rc;
Dhaval Patel020f7e122016-11-15 14:39:18 -08001143
Dhaval Patel3d56f892017-05-05 12:21:08 -07001144 if (!rsc || !rsc->hw_ops.hw_vsync || !count ||
1145 count > MAX_COUNT_SIZE_SUPPORTED)
Dhaval Patel020f7e122016-11-15 14:39:18 -08001146 return 0;
1147
Dhaval Patel3d56f892017-05-05 12:21:08 -07001148 input = kmalloc(count + 1, GFP_KERNEL);
Dhaval Patel020f7e122016-11-15 14:39:18 -08001149 if (!input)
1150 return -ENOMEM;
1151
1152 if (copy_from_user(input, p, count)) {
1153 kfree(input);
1154 return -EFAULT;
1155 }
Dhaval Patel3d56f892017-05-05 12:21:08 -07001156 input[count] = '\0';
Dhaval Patel020f7e122016-11-15 14:39:18 -08001157
Dhaval Patel3d56f892017-05-05 12:21:08 -07001158 rc = kstrtoint(input, 0, &vsync_state);
1159 if (rc) {
1160 pr_err("vsync_state: int conversion failed rc:%d\n", rc);
1161 goto end;
Dhaval Patel020f7e122016-11-15 14:39:18 -08001162 }
1163
Dhaval Patel3d56f892017-05-05 12:21:08 -07001164 pr_debug("vsync_state: %d\n", vsync_state);
1165 vsync_state &= 0x7;
1166
Dhaval Patel020f7e122016-11-15 14:39:18 -08001167 mutex_lock(&rsc->client_lock);
Dhaval Pateld2dd1ad2017-03-29 16:13:17 -07001168 rc = sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, true);
1169 if (rc)
Dhaval Patel3d56f892017-05-05 12:21:08 -07001170 goto clk_en_fail;
Dhaval Patel020f7e122016-11-15 14:39:18 -08001171
1172 if (vsync_state)
1173 rsc->hw_ops.hw_vsync(rsc, VSYNC_ENABLE, NULL,
1174 0, vsync_state - 1);
1175 else
1176 rsc->hw_ops.hw_vsync(rsc, VSYNC_DISABLE, NULL, 0, 0);
1177
Dhaval Patelfcd9e912017-03-16 00:54:09 -07001178 sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, false);
Dhaval Patel020f7e122016-11-15 14:39:18 -08001179
Dhaval Patel3d56f892017-05-05 12:21:08 -07001180clk_en_fail:
Dhaval Pateld2dd1ad2017-03-29 16:13:17 -07001181 mutex_unlock(&rsc->client_lock);
Dhaval Patel3d56f892017-05-05 12:21:08 -07001182end:
Dhaval Patel020f7e122016-11-15 14:39:18 -08001183 kfree(input);
1184 return count;
1185}
1186
1187static const struct file_operations debugfs_status_fops = {
1188 .open = _sde_debugfs_status_open,
1189 .read = seq_read,
1190 .llseek = seq_lseek,
1191 .release = single_release,
1192};
1193
1194static const struct file_operations mode_control_fops = {
1195 .open = _sde_debugfs_mode_ctrl_open,
1196 .read = _sde_debugfs_mode_ctrl_read,
1197 .write = _sde_debugfs_mode_ctrl_write,
1198};
1199
1200static const struct file_operations vsync_status_fops = {
1201 .open = _sde_debugfs_vsync_mode_open,
1202 .read = _sde_debugfs_vsync_mode_read,
1203 .write = _sde_debugfs_vsync_mode_write,
1204};
1205
1206static void _sde_rsc_init_debugfs(struct sde_rsc_priv *rsc, char *name)
1207{
1208 rsc->debugfs_root = debugfs_create_dir(name, NULL);
1209 if (!rsc->debugfs_root)
1210 return;
1211
1212 /* don't error check these */
Lloyd Atkinson8de415a2017-05-23 11:31:16 -04001213 debugfs_create_file("status", 0400, rsc->debugfs_root, rsc,
Dhaval Patel020f7e122016-11-15 14:39:18 -08001214 &debugfs_status_fops);
Lloyd Atkinson8de415a2017-05-23 11:31:16 -04001215 debugfs_create_file("mode_control", 0600, rsc->debugfs_root, rsc,
Dhaval Patel020f7e122016-11-15 14:39:18 -08001216 &mode_control_fops);
Lloyd Atkinson8de415a2017-05-23 11:31:16 -04001217 debugfs_create_file("vsync_mode", 0600, rsc->debugfs_root, rsc,
Dhaval Patel020f7e122016-11-15 14:39:18 -08001218 &vsync_status_fops);
Lloyd Atkinson8de415a2017-05-23 11:31:16 -04001219 debugfs_create_x32("debug_mode", 0600, rsc->debugfs_root,
Dhaval Patel020f7e122016-11-15 14:39:18 -08001220 &rsc->debug_mode);
1221}
1222
1223static void sde_rsc_deinit(struct platform_device *pdev,
1224 struct sde_rsc_priv *rsc)
1225{
1226 if (!rsc)
1227 return;
1228
1229 if (rsc->pclient)
Dhaval Patelfcd9e912017-03-16 00:54:09 -07001230 sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, false);
Dhaval Patel020f7e122016-11-15 14:39:18 -08001231 if (rsc->fs)
1232 devm_regulator_put(rsc->fs);
1233 if (rsc->wrapper_io.base)
1234 msm_dss_iounmap(&rsc->wrapper_io);
1235 if (rsc->drv_io.base)
1236 msm_dss_iounmap(&rsc->drv_io);
Dhaval Pateld2dd1ad2017-03-29 16:13:17 -07001237 if (rsc->disp_rsc)
1238 rpmh_release(rsc->disp_rsc);
Dhaval Patel020f7e122016-11-15 14:39:18 -08001239 if (rsc->pclient)
1240 sde_power_client_destroy(&rsc->phandle, rsc->pclient);
1241
1242 sde_power_resource_deinit(pdev, &rsc->phandle);
1243 debugfs_remove_recursive(rsc->debugfs_root);
1244 kfree(rsc);
1245}
1246
1247/**
1248 * sde_rsc_bind - bind rsc device with controlling device
1249 * @dev: Pointer to base of platform device
1250 * @master: Pointer to container of drm device
1251 * @data: Pointer to private data
1252 * Returns: Zero on success
1253 */
1254static int sde_rsc_bind(struct device *dev,
1255 struct device *master,
1256 void *data)
1257{
1258 struct sde_rsc_priv *rsc;
1259 struct drm_device *drm;
1260 struct platform_device *pdev = to_platform_device(dev);
1261
1262 if (!dev || !pdev || !master) {
1263 pr_err("invalid param(s), dev %pK, pdev %pK, master %pK\n",
1264 dev, pdev, master);
1265 return -EINVAL;
1266 }
1267
1268 drm = dev_get_drvdata(master);
1269 rsc = platform_get_drvdata(pdev);
1270 if (!drm || !rsc) {
1271 pr_err("invalid param(s), drm %pK, rsc %pK\n",
1272 drm, rsc);
1273 return -EINVAL;
1274 }
1275
1276 mutex_lock(&rsc->client_lock);
1277 rsc->master_drm = drm;
1278 mutex_unlock(&rsc->client_lock);
1279
Dhaval Patela2430842017-06-15 14:32:36 -07001280 sde_dbg_reg_register_base(SDE_RSC_DRV_DBG_NAME, rsc->drv_io.base,
1281 rsc->drv_io.len);
1282 sde_dbg_reg_register_base(SDE_RSC_WRAPPER_DBG_NAME,
1283 rsc->wrapper_io.base, rsc->wrapper_io.len);
Dhaval Patel020f7e122016-11-15 14:39:18 -08001284 return 0;
1285}
1286
1287/**
1288 * sde_rsc_unbind - unbind rsc from controlling device
1289 * @dev: Pointer to base of platform device
1290 * @master: Pointer to container of drm device
1291 * @data: Pointer to private data
1292 */
1293static void sde_rsc_unbind(struct device *dev,
1294 struct device *master, void *data)
1295{
1296 struct sde_rsc_priv *rsc;
1297 struct platform_device *pdev = to_platform_device(dev);
1298
1299 if (!dev || !pdev) {
1300 pr_err("invalid param(s)\n");
1301 return;
1302 }
1303
1304 rsc = platform_get_drvdata(pdev);
1305 if (!rsc) {
1306 pr_err("invalid display rsc\n");
1307 return;
1308 }
1309
1310 mutex_lock(&rsc->client_lock);
1311 rsc->master_drm = NULL;
1312 mutex_unlock(&rsc->client_lock);
1313}
1314
1315static const struct component_ops sde_rsc_comp_ops = {
1316 .bind = sde_rsc_bind,
1317 .unbind = sde_rsc_unbind,
1318};
1319
1320static int sde_rsc_probe(struct platform_device *pdev)
1321{
1322 int ret;
1323 struct sde_rsc_priv *rsc;
1324 static int counter;
1325 char name[MAX_RSC_CLIENT_NAME_LEN];
1326
1327 rsc = kzalloc(sizeof(*rsc), GFP_KERNEL);
1328 if (!rsc) {
1329 ret = -ENOMEM;
1330 goto rsc_alloc_fail;
1331 }
1332
1333 platform_set_drvdata(pdev, rsc);
1334 of_property_read_u32(pdev->dev.of_node, "qcom,sde-rsc-version",
1335 &rsc->version);
1336
1337 ret = sde_power_resource_init(pdev, &rsc->phandle);
1338 if (ret) {
1339 pr_err("sde rsc:power resource init failed ret:%d\n", ret);
1340 goto sde_rsc_fail;
1341 }
1342
1343 rsc->pclient = sde_power_client_create(&rsc->phandle, "rsc");
1344 if (IS_ERR_OR_NULL(rsc->pclient)) {
1345 ret = PTR_ERR(rsc->pclient);
1346 rsc->pclient = NULL;
1347 pr_err("sde rsc:power client create failed ret:%d\n", ret);
1348 goto sde_rsc_fail;
1349 }
1350
Dhaval Patelc4f1c7c2017-08-16 12:02:21 -07001351 /**
1352 * sde rsc should always vote through enable path, sleep vote is
1353 * set to "0" by default.
1354 */
1355 sde_power_data_bus_state_update(&rsc->phandle, true);
1356
Dhaval Pateld2dd1ad2017-03-29 16:13:17 -07001357 rsc->disp_rsc = rpmh_get_byname(pdev, "disp_rsc");
1358 if (IS_ERR_OR_NULL(rsc->disp_rsc)) {
1359 ret = PTR_ERR(rsc->disp_rsc);
1360 rsc->disp_rsc = NULL;
1361 pr_err("sde rsc:get display rsc failed ret:%d\n", ret);
1362 goto sde_rsc_fail;
1363 }
Dhaval Pateld2dd1ad2017-03-29 16:13:17 -07001364
Dhaval Patel020f7e122016-11-15 14:39:18 -08001365 ret = msm_dss_ioremap_byname(pdev, &rsc->wrapper_io, "wrapper");
1366 if (ret) {
1367 pr_err("sde rsc: wrapper io data mapping failed ret=%d\n", ret);
1368 goto sde_rsc_fail;
1369 }
1370
1371 ret = msm_dss_ioremap_byname(pdev, &rsc->drv_io, "drv");
1372 if (ret) {
1373 pr_err("sde rsc: drv io data mapping failed ret:%d\n", ret);
1374 goto sde_rsc_fail;
1375 }
1376
1377 rsc->fs = devm_regulator_get(&pdev->dev, "vdd");
1378 if (IS_ERR_OR_NULL(rsc->fs)) {
1379 rsc->fs = NULL;
1380 pr_err("unable to get regulator\n");
1381 goto sde_rsc_fail;
1382 }
1383
1384 ret = sde_rsc_hw_register(rsc);
1385 if (ret) {
1386 pr_err("sde rsc: hw register failed ret:%d\n", ret);
1387 goto sde_rsc_fail;
1388 }
1389
Dhaval Patelfcd9e912017-03-16 00:54:09 -07001390 if (sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, true)) {
Dhaval Patel020f7e122016-11-15 14:39:18 -08001391 pr_err("failed to enable sde rsc power resources\n");
1392 goto sde_rsc_fail;
1393 }
1394
Dhaval Patel82c8dbc2017-02-18 23:15:10 -08001395 if (sde_rsc_timer_calculate(rsc, NULL))
1396 goto sde_rsc_fail;
Dhaval Patel020f7e122016-11-15 14:39:18 -08001397
Dhaval Patelfcd9e912017-03-16 00:54:09 -07001398 sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, false);
1399
Dhaval Patel020f7e122016-11-15 14:39:18 -08001400 INIT_LIST_HEAD(&rsc->client_list);
Dhaval Patelfbb11f02017-04-06 13:43:28 -07001401 INIT_LIST_HEAD(&rsc->event_list);
Dhaval Patel020f7e122016-11-15 14:39:18 -08001402 mutex_init(&rsc->client_lock);
Ingrid Gallardoe52302c2017-11-28 19:30:47 -08001403 init_waitqueue_head(&rsc->rsc_vsync_waitq);
Dhaval Patel020f7e122016-11-15 14:39:18 -08001404
1405 pr_info("sde rsc index:%d probed successfully\n",
1406 SDE_RSC_INDEX + counter);
1407
1408 rsc_prv_list[SDE_RSC_INDEX + counter] = rsc;
1409 snprintf(name, MAX_RSC_CLIENT_NAME_LEN, "%s%d", "sde_rsc", counter);
1410 _sde_rsc_init_debugfs(rsc, name);
1411 counter++;
Dhaval Patel020f7e122016-11-15 14:39:18 -08001412
1413 ret = component_add(&pdev->dev, &sde_rsc_comp_ops);
1414 if (ret)
1415 pr_debug("component add failed, ret=%d\n", ret);
1416 ret = 0;
1417
1418 return ret;
1419
1420sde_rsc_fail:
1421 sde_rsc_deinit(pdev, rsc);
1422rsc_alloc_fail:
1423 return ret;
1424}
1425
1426static int sde_rsc_remove(struct platform_device *pdev)
1427{
1428 struct sde_rsc_priv *rsc = platform_get_drvdata(pdev);
1429
1430 sde_rsc_deinit(pdev, rsc);
1431 return 0;
1432}
1433
1434static const struct of_device_id dt_match[] = {
1435 { .compatible = "qcom,sde-rsc"},
1436 {}
1437};
1438
1439MODULE_DEVICE_TABLE(of, dt_match);
1440
1441static struct platform_driver sde_rsc_platform_driver = {
1442 .probe = sde_rsc_probe,
1443 .remove = sde_rsc_remove,
1444 .driver = {
1445 .name = "sde_rsc",
1446 .of_match_table = dt_match,
1447 },
1448};
1449
1450static int __init sde_rsc_register(void)
1451{
1452 return platform_driver_register(&sde_rsc_platform_driver);
1453}
1454
1455static void __exit sde_rsc_unregister(void)
1456{
1457 platform_driver_unregister(&sde_rsc_platform_driver);
1458}
1459
1460module_init(sde_rsc_register);
1461module_exit(sde_rsc_unregister);