blob: b17ac828d1100b9bb2d29fcb58bc256557a4ce5b [file] [log] [blame]
Lloyd Atkinsonccc11c52016-08-10 18:54:30 -04001/*
2 * Copyright (c) 2016, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14
15#define pr_fmt(fmt) "[drm:%s] " fmt, __func__
16#include "sde_kms.h"
17#include "sde_hw_lm.h"
18#include "sde_hw_ctl.h"
19#include "sde_hw_cdm.h"
20#include "sde_hw_dspp.h"
21#include "sde_hw_pingpong.h"
22#include "sde_hw_intf.h"
23#include "sde_hw_wb.h"
24
25#define RESERVED_BY_OTHER(h, r) \
26 ((h)->rsvp && ((h)->rsvp->enc_id != (r)->enc_id))
27
28#define RM_RQ_LOCK(r) ((r)->top_ctrl & BIT(SDE_RM_TOPCTL_RESERVE_LOCK))
29#define RM_RQ_CLEAR(r) ((r)->top_ctrl & BIT(SDE_RM_TOPCTL_RESERVE_CLEAR))
30#define RM_RQ_DSPP(r) ((r)->top_ctrl & BIT(SDE_RM_TOPCTL_DSPP))
31#define RM_RQ_PPSPLIT(r) ((r)->top_ctrl & BIT(SDE_RM_TOPCTL_PPSPLIT))
32#define RM_RQ_FORCE_TILING(r) ((r)->top_ctrl & BIT(SDE_RM_TOPCTL_FORCE_TILING))
33
34/**
35 * struct sde_rm_requirements - Reservation requirements parameter bundle
36 * @top_name: DRM<->HW topology use case user is trying to enable
37 * @dspp: Whether the user requires a DSPP
38 * @num_lm: Number of layer mixers needed in the use case
39 * @hw_res: Hardware resources required as reported by the encoders
40 */
41struct sde_rm_requirements {
42 enum sde_rm_topology_name top_name;
43 uint64_t top_ctrl;
44 int num_lm;
Lloyd Atkinsoncf8996b2016-08-23 09:34:13 -040045 int num_ctl;
46 bool needs_split_display;
Lloyd Atkinsonccc11c52016-08-10 18:54:30 -040047 struct sde_encoder_hw_resources hw_res;
48};
49
50/**
51 * struct sde_rm_rsvp - Use Case Reservation tagging structure
52 * Used to tag HW blocks as reserved by a CRTC->Encoder->Connector chain
53 * By using as a tag, rather than lists of pointers to HW blocks used
54 * we can avoid some list management since we don't know how many blocks
55 * of each type a given use case may require.
56 * @list: List head for list of all reservations
57 * @seq: Global RSVP sequence number for debugging, especially for
58 * differentiating differenct allocations for same encoder.
59 * @enc_id: Reservations are tracked by Encoder DRM object ID.
60 * CRTCs may be connected to multiple Encoders.
61 * An encoder or connector id identifies the display path.
62 * @topology DRM<->HW topology use case
63 */
64struct sde_rm_rsvp {
65 struct list_head list;
66 uint32_t seq;
67 uint32_t enc_id;
68 enum sde_rm_topology_name topology;
69};
70
71/**
72 * struct sde_rm_hw_blk - hardware block tracking list member
73 * @list: List head for list of all hardware blocks tracking items
74 * @rsvp: Pointer to use case reservation if reserved by a client
75 * @rsvp_nxt: Temporary pointer used during reservation to the incoming
76 * request. Will be swapped into rsvp if proposal is accepted
77 * @type: Type of hardware block this structure tracks
78 * @id: Hardware ID number, within it's own space, ie. LM_X
79 * @catalog: Pointer to the hardware catalog entry for this block
80 * @hw: Pointer to the hardware register access object for this block
81 */
82struct sde_rm_hw_blk {
83 struct list_head list;
84 struct sde_rm_rsvp *rsvp;
85 struct sde_rm_rsvp *rsvp_nxt;
86 enum sde_hw_blk_type type;
87 const char *type_name;
88 uint32_t id;
89 void *catalog;
90 void *hw;
91};
92
93static void _sde_rm_print_rsvps(struct sde_rm *rm, const char *msg)
94{
95 struct sde_rm_rsvp *rsvp;
96 struct sde_rm_hw_blk *blk;
Lloyd Atkinsond1709812016-08-31 10:04:02 -040097 enum sde_hw_blk_type type;
Lloyd Atkinsonccc11c52016-08-10 18:54:30 -040098
99 SDE_DEBUG("%s\n", msg);
100
101 list_for_each_entry(rsvp, &rm->rsvps, list)
102 SDE_DEBUG("%s rsvp[s%ue%u] topology %d\n", msg, rsvp->seq,
103 rsvp->enc_id, rsvp->topology);
104
Lloyd Atkinsond1709812016-08-31 10:04:02 -0400105 for (type = 0; type < SDE_HW_BLK_MAX; type++) {
106 list_for_each_entry(blk, &rm->hw_blks[type], list) {
107 if (!blk->rsvp && !blk->rsvp_nxt)
108 continue;
Lloyd Atkinsonccc11c52016-08-10 18:54:30 -0400109
Lloyd Atkinsond1709812016-08-31 10:04:02 -0400110 SDE_DEBUG("%s rsvp[s%ue%u->s%ue%u] %s %d\n", msg,
111 (blk->rsvp) ? blk->rsvp->seq : 0,
112 (blk->rsvp) ? blk->rsvp->enc_id : 0,
113 (blk->rsvp_nxt) ? blk->rsvp_nxt->seq : 0,
114 (blk->rsvp_nxt) ? blk->rsvp_nxt->enc_id : 0,
115 blk->type_name, blk->id);
116 }
Lloyd Atkinsonccc11c52016-08-10 18:54:30 -0400117 }
118}
119
120struct sde_hw_mdp *sde_rm_get_mdp(struct sde_rm *rm)
121{
122 return rm->hw_mdp;
123}
124
Lloyd Atkinson11f34442016-08-11 11:19:52 -0400125void sde_rm_init_hw_iter(
Lloyd Atkinsonccc11c52016-08-10 18:54:30 -0400126 struct sde_rm_hw_iter *iter,
127 uint32_t enc_id,
128 enum sde_hw_blk_type type)
129{
130 memset(iter, 0, sizeof(*iter));
131 iter->enc_id = enc_id;
132 iter->type = type;
133}
134
135bool sde_rm_get_hw(struct sde_rm *rm, struct sde_rm_hw_iter *i)
136{
Lloyd Atkinsond1709812016-08-31 10:04:02 -0400137 struct list_head *blk_list;
138
Lloyd Atkinsonccc11c52016-08-10 18:54:30 -0400139 if (!rm || !i || i->type >= SDE_HW_BLK_MAX) {
140 SDE_ERROR("invalid rm\n");
141 return false;
142 }
143
144 i->hw = NULL;
Lloyd Atkinsond1709812016-08-31 10:04:02 -0400145 blk_list = &rm->hw_blks[i->type];
Lloyd Atkinsonccc11c52016-08-10 18:54:30 -0400146
Lloyd Atkinsond1709812016-08-31 10:04:02 -0400147 if (i->blk && (&i->blk->list == blk_list)) {
Lloyd Atkinsonccc11c52016-08-10 18:54:30 -0400148 SDE_ERROR("attempt resume iteration past last\n");
149 return false;
150 }
151
Lloyd Atkinsond1709812016-08-31 10:04:02 -0400152 i->blk = list_prepare_entry(i->blk, blk_list, list);
Lloyd Atkinsonccc11c52016-08-10 18:54:30 -0400153
Lloyd Atkinsond1709812016-08-31 10:04:02 -0400154 list_for_each_entry_continue(i->blk, blk_list, list) {
Lloyd Atkinsonccc11c52016-08-10 18:54:30 -0400155 struct sde_rm_rsvp *rsvp = i->blk->rsvp;
156
Lloyd Atkinsond1709812016-08-31 10:04:02 -0400157 if (i->blk->type != i->type) {
158 SDE_ERROR("found incorrect block type %d on %d list\n",
159 i->blk->type, i->type);
160 return false;
161 }
Lloyd Atkinsonccc11c52016-08-10 18:54:30 -0400162
163 if ((i->enc_id == 0) || (rsvp && rsvp->enc_id == i->enc_id)) {
164 i->hw = i->blk->hw;
165 SDE_DEBUG("found type %d %s id %d for enc %d\n",
166 i->type, i->blk->type_name, i->blk->id,
167 i->enc_id);
168 return true;
169 }
170 }
171
172 SDE_DEBUG("no match, type %d for enc %d\n", i->type, i->enc_id);
173
174 return false;
175}
176
177static void _sde_rm_hw_destroy(enum sde_hw_blk_type type, void *hw)
178{
179 switch (type) {
180 case SDE_HW_BLK_LM:
181 sde_hw_lm_destroy(hw);
182 break;
183 case SDE_HW_BLK_DSPP:
184 sde_hw_dspp_destroy(hw);
185 break;
186 case SDE_HW_BLK_CTL:
187 sde_hw_ctl_destroy(hw);
188 break;
189 case SDE_HW_BLK_CDM:
190 sde_hw_cdm_destroy(hw);
191 break;
192 case SDE_HW_BLK_PINGPONG:
193 sde_hw_pingpong_destroy(hw);
194 break;
195 case SDE_HW_BLK_INTF:
196 sde_hw_intf_destroy(hw);
197 break;
198 case SDE_HW_BLK_WB:
199 sde_hw_wb_destroy(hw);
200 break;
201 case SDE_HW_BLK_SSPP:
202 /* SSPPs are not managed by the resource manager */
203 case SDE_HW_BLK_TOP:
204 /* Top is a singleton, not managed in hw_blks list */
205 case SDE_HW_BLK_MAX:
206 default:
207 SDE_ERROR("unsupported block type %d\n", type);
208 break;
209 }
210}
211
212int sde_rm_destroy(struct sde_rm *rm)
213{
214
215 struct sde_rm_rsvp *rsvp_cur, *rsvp_nxt;
216 struct sde_rm_hw_blk *hw_cur, *hw_nxt;
Lloyd Atkinsond1709812016-08-31 10:04:02 -0400217 enum sde_hw_blk_type type;
Lloyd Atkinsonccc11c52016-08-10 18:54:30 -0400218
219 if (!rm) {
220 SDE_ERROR("invalid rm\n");
221 return -EINVAL;
222 }
223
224 list_for_each_entry_safe(rsvp_cur, rsvp_nxt, &rm->rsvps, list) {
225 list_del(&rsvp_cur->list);
226 kfree(rsvp_cur);
227 }
228
Lloyd Atkinsond1709812016-08-31 10:04:02 -0400229
230 for (type = 0; type < SDE_HW_BLK_MAX; type++) {
231 list_for_each_entry_safe(hw_cur, hw_nxt, &rm->hw_blks[type],
232 list) {
233 list_del(&hw_cur->list);
234 _sde_rm_hw_destroy(hw_cur->type, hw_cur->hw);
235 kfree(hw_cur);
236 }
Lloyd Atkinsonccc11c52016-08-10 18:54:30 -0400237 }
238
239 sde_hw_mdp_destroy(rm->hw_mdp);
240 rm->hw_mdp = NULL;
241
242 return 0;
243}
244
245static int _sde_rm_hw_blk_create(
246 struct sde_rm *rm,
247 struct sde_mdss_cfg *cat,
248 void *mmio,
249 enum sde_hw_blk_type type,
250 uint32_t id,
251 void *hw_catalog_info)
252{
253 struct sde_rm_hw_blk *blk;
254 struct sde_hw_mdp *hw_mdp;
255 const char *name;
256 void *hw;
257
258 hw_mdp = rm->hw_mdp;
259
260 switch (type) {
261 case SDE_HW_BLK_LM:
262 hw = sde_hw_lm_init(id, mmio, cat);
263 name = "lm";
264 break;
265 case SDE_HW_BLK_DSPP:
266 hw = sde_hw_dspp_init(id, mmio, cat);
267 name = "dspp";
268 break;
269 case SDE_HW_BLK_CTL:
270 hw = sde_hw_ctl_init(id, mmio, cat);
271 name = "ctl";
272 break;
273 case SDE_HW_BLK_CDM:
274 hw = sde_hw_cdm_init(id, mmio, cat, hw_mdp);
275 name = "cdm";
276 break;
277 case SDE_HW_BLK_PINGPONG:
278 hw = sde_hw_pingpong_init(id, mmio, cat);
279 name = "pp";
280 break;
281 case SDE_HW_BLK_INTF:
282 hw = sde_hw_intf_init(id, mmio, cat);
283 name = "intf";
284 break;
285 case SDE_HW_BLK_WB:
286 hw = sde_hw_wb_init(id, mmio, cat, hw_mdp);
287 name = "wb";
288 break;
289 case SDE_HW_BLK_SSPP:
290 /* SSPPs are not managed by the resource manager */
291 case SDE_HW_BLK_TOP:
292 /* Top is a singleton, not managed in hw_blks list */
293 case SDE_HW_BLK_MAX:
294 default:
295 SDE_ERROR("unsupported block type %d\n", type);
296 return -EINVAL;
297 }
298
299 if (IS_ERR_OR_NULL(hw)) {
300 SDE_ERROR("failed hw object creation: type %d, err %ld\n",
301 type, PTR_ERR(hw));
302 return -EFAULT;
303 }
304
305 blk = kzalloc(sizeof(*blk), GFP_KERNEL);
306 if (!blk) {
307 _sde_rm_hw_destroy(type, hw);
308 return -ENOMEM;
309 }
310
311 blk->type_name = name;
312 blk->type = type;
313 blk->id = id;
314 blk->catalog = hw_catalog_info;
315 blk->hw = hw;
Lloyd Atkinsond1709812016-08-31 10:04:02 -0400316 list_add_tail(&blk->list, &rm->hw_blks[type]);
Lloyd Atkinsonccc11c52016-08-10 18:54:30 -0400317
318 return 0;
319}
320
321int sde_rm_init(struct sde_rm *rm,
322 struct sde_mdss_cfg *cat,
323 void *mmio,
324 struct drm_device *dev)
325{
326 int rc, i;
Lloyd Atkinsond1709812016-08-31 10:04:02 -0400327 enum sde_hw_blk_type type;
Lloyd Atkinsonccc11c52016-08-10 18:54:30 -0400328
329 if (!rm || !cat || !mmio || !dev) {
330 SDE_ERROR("invalid kms\n");
331 return -EINVAL;
332 }
333
334 /* Clear, setup lists */
335 memset(rm, 0, sizeof(*rm));
336 INIT_LIST_HEAD(&rm->rsvps);
Lloyd Atkinsond1709812016-08-31 10:04:02 -0400337 for (type = 0; type < SDE_HW_BLK_MAX; type++)
338 INIT_LIST_HEAD(&rm->hw_blks[type]);
Lloyd Atkinsonccc11c52016-08-10 18:54:30 -0400339
340 /* Some of the sub-blocks require an mdptop to be created */
341 rm->hw_mdp = sde_hw_mdptop_init(MDP_TOP, mmio, cat);
342 if (IS_ERR_OR_NULL(rm->hw_mdp)) {
343 rc = PTR_ERR(rm->hw_mdp);
344 rm->hw_mdp = NULL;
Dhaval Patel3e8b8c92016-10-05 18:01:06 -0700345 SDE_ERROR("failed: mdp hw not available\n");
Lloyd Atkinsonccc11c52016-08-10 18:54:30 -0400346 goto fail;
347 }
348
349 /* Interrogate HW catalog and create tracking items for hw blocks */
350 for (i = 0; i < cat->mixer_count; i++) {
351 struct sde_lm_cfg *lm = &cat->mixer[i];
352
353 if (lm->pingpong == PINGPONG_MAX) {
354 SDE_DEBUG("skip mixer %d without pingpong\n", lm->id);
355 continue;
356 }
357
358 rc = _sde_rm_hw_blk_create(rm, cat, mmio, SDE_HW_BLK_LM,
359 cat->mixer[i].id, &cat->mixer[i]);
Dhaval Patel3e8b8c92016-10-05 18:01:06 -0700360 if (rc) {
361 SDE_ERROR("failed: lm hw not available\n");
Lloyd Atkinsonccc11c52016-08-10 18:54:30 -0400362 goto fail;
Dhaval Patel3e8b8c92016-10-05 18:01:06 -0700363 }
Lloyd Atkinsonccc11c52016-08-10 18:54:30 -0400364
365 if (!rm->lm_max_width) {
366 rm->lm_max_width = lm->sblk->maxwidth;
367 } else if (rm->lm_max_width != lm->sblk->maxwidth) {
368 /*
369 * Don't expect to have hw where lm max widths differ.
370 * If found, take the min.
371 */
372 SDE_ERROR("unsupported: lm maxwidth differs\n");
373 if (rm->lm_max_width > lm->sblk->maxwidth)
374 rm->lm_max_width = lm->sblk->maxwidth;
375 }
376 }
377
378 for (i = 0; i < cat->dspp_count; i++) {
379 rc = _sde_rm_hw_blk_create(rm, cat, mmio, SDE_HW_BLK_DSPP,
380 cat->dspp[i].id, &cat->dspp[i]);
Dhaval Patel3e8b8c92016-10-05 18:01:06 -0700381 if (rc) {
382 SDE_ERROR("failed: dspp hw not available\n");
Lloyd Atkinsonccc11c52016-08-10 18:54:30 -0400383 goto fail;
Dhaval Patel3e8b8c92016-10-05 18:01:06 -0700384 }
Lloyd Atkinsonccc11c52016-08-10 18:54:30 -0400385 }
386
387 for (i = 0; i < cat->pingpong_count; i++) {
388 rc = _sde_rm_hw_blk_create(rm, cat, mmio, SDE_HW_BLK_PINGPONG,
389 cat->pingpong[i].id, &cat->pingpong[i]);
Dhaval Patel3e8b8c92016-10-05 18:01:06 -0700390 if (rc) {
391 SDE_ERROR("failed: pp hw not available\n");
Lloyd Atkinsonccc11c52016-08-10 18:54:30 -0400392 goto fail;
Dhaval Patel3e8b8c92016-10-05 18:01:06 -0700393 }
Lloyd Atkinsonccc11c52016-08-10 18:54:30 -0400394 }
395
396 for (i = 0; i < cat->intf_count; i++) {
397 if (cat->intf[i].type == INTF_NONE) {
398 SDE_DEBUG("skip intf %d with type none\n", i);
399 continue;
400 }
401
402 rc = _sde_rm_hw_blk_create(rm, cat, mmio, SDE_HW_BLK_INTF,
403 cat->intf[i].id, &cat->intf[i]);
Dhaval Patel3e8b8c92016-10-05 18:01:06 -0700404 if (rc) {
405 SDE_ERROR("failed: intf hw not available\n");
Lloyd Atkinsonccc11c52016-08-10 18:54:30 -0400406 goto fail;
Dhaval Patel3e8b8c92016-10-05 18:01:06 -0700407 }
Lloyd Atkinsonccc11c52016-08-10 18:54:30 -0400408 }
409
410 for (i = 0; i < cat->wb_count; i++) {
411 rc = _sde_rm_hw_blk_create(rm, cat, mmio, SDE_HW_BLK_WB,
412 cat->wb[i].id, &cat->wb[i]);
Dhaval Patel3e8b8c92016-10-05 18:01:06 -0700413 if (rc) {
414 SDE_ERROR("failed: wb hw not available\n");
Lloyd Atkinsonccc11c52016-08-10 18:54:30 -0400415 goto fail;
Dhaval Patel3e8b8c92016-10-05 18:01:06 -0700416 }
Lloyd Atkinsonccc11c52016-08-10 18:54:30 -0400417 }
418
419 for (i = 0; i < cat->ctl_count; i++) {
420 rc = _sde_rm_hw_blk_create(rm, cat, mmio, SDE_HW_BLK_CTL,
421 cat->ctl[i].id, &cat->ctl[i]);
Dhaval Patel3e8b8c92016-10-05 18:01:06 -0700422 if (rc) {
423 SDE_ERROR("failed: ctl hw not available\n");
Lloyd Atkinsonccc11c52016-08-10 18:54:30 -0400424 goto fail;
Dhaval Patel3e8b8c92016-10-05 18:01:06 -0700425 }
Lloyd Atkinsonccc11c52016-08-10 18:54:30 -0400426 }
427
428 for (i = 0; i < cat->cdm_count; i++) {
429 rc = _sde_rm_hw_blk_create(rm, cat, mmio, SDE_HW_BLK_CDM,
430 cat->cdm[i].id, &cat->cdm[i]);
Dhaval Patel3e8b8c92016-10-05 18:01:06 -0700431 if (rc) {
432 SDE_ERROR("failed: cdm hw not available\n");
Lloyd Atkinsonccc11c52016-08-10 18:54:30 -0400433 goto fail;
Dhaval Patel3e8b8c92016-10-05 18:01:06 -0700434 }
Lloyd Atkinsonccc11c52016-08-10 18:54:30 -0400435 }
436
437 return 0;
438
439fail:
Lloyd Atkinsonccc11c52016-08-10 18:54:30 -0400440 sde_rm_destroy(rm);
441
442 return rc;
443}
444
445/**
446 * _sde_rm_check_lm_and_get_connected_blks - check if proposed layer mixer meets
447 * proposed use case requirements, incl. hardwired dependent blocks like
448 * pingpong, and dspp.
449 * @rm: sde resource manager handle
450 * @rsvp: reservation currently being created
451 * @reqs: proposed use case requirements
452 * @lm: proposed layer mixer, function checks if lm, and all other hardwired
453 * blocks connected to the lm (pp, dspp) are available and appropriate
454 * @dspp: output parameter, dspp block attached to the layer mixer.
455 * NULL if dspp was not available, or not matching requirements.
456 * @pp: output parameter, pingpong block attached to the layer mixer.
457 * NULL if dspp was not available, or not matching requirements.
458 * @primary_lm: if non-null, this function check if lm is compatible primary_lm
459 * as well as satisfying all other requirements
460 * @Return: true if lm matches all requirements, false otherwise
461 */
462static bool _sde_rm_check_lm_and_get_connected_blks(
463 struct sde_rm *rm,
464 struct sde_rm_rsvp *rsvp,
465 struct sde_rm_requirements *reqs,
466 struct sde_rm_hw_blk *lm,
467 struct sde_rm_hw_blk **dspp,
468 struct sde_rm_hw_blk **pp,
469 struct sde_rm_hw_blk *primary_lm)
470{
471 struct sde_lm_cfg *lm_cfg = (struct sde_lm_cfg *)lm->catalog;
472 struct sde_pingpong_cfg *pp_cfg;
473 struct sde_rm_hw_iter iter;
474
475 *dspp = NULL;
476 *pp = NULL;
477
478 SDE_DEBUG("check lm %d: dspp %d pp %d\n", lm_cfg->id, lm_cfg->dspp,
479 lm_cfg->pingpong);
480
481 /* Check if this layer mixer is a peer of the proposed primary LM */
482 if (primary_lm) {
483 struct sde_lm_cfg *prim_lm_cfg =
484 (struct sde_lm_cfg *)primary_lm->catalog;
485
486 if (!test_bit(lm_cfg->id, &prim_lm_cfg->lm_pair_mask)) {
487 SDE_DEBUG("lm %d not peer of lm %d\n", lm_cfg->id,
488 prim_lm_cfg->id);
489 return false;
490 }
491 }
492
493 /* Matches user requirements? */
494 if ((RM_RQ_DSPP(reqs) && lm_cfg->dspp == DSPP_MAX) ||
495 (!RM_RQ_DSPP(reqs) && lm_cfg->dspp != DSPP_MAX)) {
496 SDE_DEBUG("dspp req mismatch lm %d reqdspp %d, lm->dspp %d\n",
497 lm_cfg->id, (bool)(RM_RQ_DSPP(reqs)),
498 lm_cfg->dspp);
499 return false;
500 }
501
502 /* Already reserved? */
503 if (RESERVED_BY_OTHER(lm, rsvp)) {
504 SDE_DEBUG("lm %d already reserved\n", lm_cfg->id);
505 return false;
506 }
507
508 if (lm_cfg->dspp != DSPP_MAX) {
509 sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_DSPP);
510 while (sde_rm_get_hw(rm, &iter)) {
511 if (iter.blk->id == lm_cfg->dspp) {
512 *dspp = iter.blk;
513 break;
514 }
515 }
516
517 if (!*dspp) {
518 SDE_DEBUG("lm %d failed to retrieve dspp %d\n", lm->id,
519 lm_cfg->dspp);
520 return false;
521 }
522
523 if (RESERVED_BY_OTHER(*dspp, rsvp)) {
524 SDE_DEBUG("lm %d dspp %d already reserved\n",
525 lm->id, (*dspp)->id);
526 return false;
527 }
528 }
529
530 sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_PINGPONG);
531 while (sde_rm_get_hw(rm, &iter)) {
532 if (iter.blk->id == lm_cfg->pingpong) {
533 *pp = iter.blk;
534 break;
535 }
536 }
537
538 if (!*pp) {
539 SDE_ERROR("failed to get pp on lm %d\n", lm_cfg->pingpong);
540 return false;
541 }
542
543 if (RESERVED_BY_OTHER(*pp, rsvp)) {
544 SDE_DEBUG("lm %d pp %d already reserved\n", lm->id,
545 (*pp)->id);
546 *dspp = NULL;
547 return false;
548 }
549
550 pp_cfg = (struct sde_pingpong_cfg *)((*pp)->catalog);
551 if ((reqs->top_name == SDE_RM_TOPOLOGY_PPSPLIT) &&
552 !(test_bit(SDE_PINGPONG_SPLIT, &pp_cfg->features))) {
553 SDE_DEBUG("pp %d doesn't support ppsplit\n", pp_cfg->id);
554 *dspp = NULL;
555 return false;
556 }
557
558 return true;
559}
560
561static int _sde_rm_reserve_lms(
562 struct sde_rm *rm,
563 struct sde_rm_rsvp *rsvp,
564 struct sde_rm_requirements *reqs)
565
566{
567 struct sde_rm_hw_blk *lm[MAX_BLOCKS];
568 struct sde_rm_hw_blk *dspp[MAX_BLOCKS];
569 struct sde_rm_hw_blk *pp[MAX_BLOCKS];
570 struct sde_rm_hw_iter iter_i, iter_j;
571 int lm_count = 0;
572 int i;
573
574 if (!reqs->num_lm) {
575 SDE_ERROR("invalid number of lm: %d\n", reqs->num_lm);
576 return -EINVAL;
577 }
578
579 /* Find a primary mixer */
580 sde_rm_init_hw_iter(&iter_i, 0, SDE_HW_BLK_LM);
581 while (lm_count != reqs->num_lm && sde_rm_get_hw(rm, &iter_i)) {
582 memset(&lm, 0, sizeof(lm));
583 memset(&dspp, 0, sizeof(dspp));
584 memset(&pp, 0, sizeof(pp));
585
586 lm_count = 0;
587 lm[lm_count] = iter_i.blk;
588
589 if (!_sde_rm_check_lm_and_get_connected_blks(rm, rsvp, reqs,
590 lm[lm_count], &dspp[lm_count], &pp[lm_count],
591 NULL))
592 continue;
593
594 ++lm_count;
595
596 /* Valid primary mixer found, find matching peers */
597 sde_rm_init_hw_iter(&iter_j, 0, SDE_HW_BLK_LM);
598
599 while (lm_count != reqs->num_lm && sde_rm_get_hw(rm, &iter_j)) {
600 if (iter_i.blk == iter_j.blk)
601 continue;
602
603 if (!_sde_rm_check_lm_and_get_connected_blks(rm, rsvp,
604 reqs, iter_j.blk, &dspp[lm_count],
605 &pp[lm_count], iter_i.blk))
606 continue;
607
608 lm[lm_count] = iter_j.blk;
609 ++lm_count;
610 }
611 }
612
613 if (lm_count != reqs->num_lm) {
614 SDE_DEBUG("unable to find appropriate mixers\n");
615 return -ENAVAIL;
616 }
617
618 for (i = 0; i < ARRAY_SIZE(lm); i++) {
619 if (!lm[i])
620 break;
621
622 lm[i]->rsvp_nxt = rsvp;
623 pp[i]->rsvp_nxt = rsvp;
624 MSM_EVTMSG(rm->dev, lm[i]->type_name, rsvp->enc_id, lm[i]->id);
625 MSM_EVTMSG(rm->dev, pp[i]->type_name, rsvp->enc_id, pp[i]->id);
626 if (dspp[i]) {
627 dspp[i]->rsvp_nxt = rsvp;
628 MSM_EVTMSG(rm->dev, dspp[i]->type_name, rsvp->enc_id,
629 dspp[i]->id);
630 }
631 }
632
633 return 0;
634}
635
636static int _sde_rm_reserve_ctls(
637 struct sde_rm *rm,
638 struct sde_rm_rsvp *rsvp,
639 struct sde_rm_requirements *reqs)
640{
Lloyd Atkinsoncf8996b2016-08-23 09:34:13 -0400641 struct sde_rm_hw_blk *ctls[MAX_BLOCKS];
Lloyd Atkinsonccc11c52016-08-10 18:54:30 -0400642 struct sde_rm_hw_iter iter;
Lloyd Atkinsoncf8996b2016-08-23 09:34:13 -0400643 int i = 0;
Lloyd Atkinsonccc11c52016-08-10 18:54:30 -0400644
Lloyd Atkinsoncf8996b2016-08-23 09:34:13 -0400645 memset(&ctls, 0, sizeof(ctls));
Lloyd Atkinsonccc11c52016-08-10 18:54:30 -0400646
Lloyd Atkinsoncf8996b2016-08-23 09:34:13 -0400647 sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_CTL);
Lloyd Atkinsonccc11c52016-08-10 18:54:30 -0400648 while (sde_rm_get_hw(rm, &iter)) {
Lloyd Atkinsoncf8996b2016-08-23 09:34:13 -0400649 unsigned long caps;
650 bool has_split_display, has_ppsplit;
651
Lloyd Atkinsonccc11c52016-08-10 18:54:30 -0400652 if (RESERVED_BY_OTHER(iter.blk, rsvp))
653 continue;
654
Lloyd Atkinsoncf8996b2016-08-23 09:34:13 -0400655 caps = ((struct sde_ctl_cfg *)iter.blk->catalog)->features;
656 has_split_display = BIT(SDE_CTL_SPLIT_DISPLAY) & caps;
657 has_ppsplit = BIT(SDE_CTL_PINGPONG_SPLIT) & caps;
Lloyd Atkinsonccc11c52016-08-10 18:54:30 -0400658
Lloyd Atkinsoncf8996b2016-08-23 09:34:13 -0400659 SDE_DEBUG("ctl %d caps 0x%lX\n", iter.blk->id, caps);
660
661 if (reqs->needs_split_display != has_split_display)
662 continue;
663
664 if (reqs->top_name == SDE_RM_TOPOLOGY_PPSPLIT && !has_ppsplit)
665 continue;
666
667 ctls[i] = iter.blk;
668 SDE_DEBUG("ctl %d match\n", iter.blk->id);
669
670 if (++i == reqs->num_ctl)
Lloyd Atkinsonccc11c52016-08-10 18:54:30 -0400671 break;
672 }
673
Lloyd Atkinsoncf8996b2016-08-23 09:34:13 -0400674 if (i != reqs->num_ctl)
Lloyd Atkinsonccc11c52016-08-10 18:54:30 -0400675 return -ENAVAIL;
676
Lloyd Atkinsoncf8996b2016-08-23 09:34:13 -0400677 for (i = 0; i < ARRAY_SIZE(ctls) && i < reqs->num_ctl; i++) {
678 ctls[i]->rsvp_nxt = rsvp;
679 MSM_EVTMSG(rm->dev, ctls[i]->type_name, rsvp->enc_id,
680 ctls[i]->id);
681 }
682
Lloyd Atkinsonccc11c52016-08-10 18:54:30 -0400683 return 0;
684}
685
686static int _sde_rm_reserve_cdm(
687 struct sde_rm *rm,
688 struct sde_rm_rsvp *rsvp,
689 uint32_t id,
690 enum sde_hw_blk_type type)
691{
692 struct sde_rm_hw_iter iter;
693 struct sde_cdm_cfg *cdm;
694
695 sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_CDM);
696 while (sde_rm_get_hw(rm, &iter)) {
697 bool match = false;
698
699 if (RESERVED_BY_OTHER(iter.blk, rsvp))
700 continue;
701
702 cdm = (struct sde_cdm_cfg *)(iter.blk->catalog);
703
704 if (type == SDE_HW_BLK_INTF && id != INTF_MAX)
705 match = test_bit(id, &cdm->intf_connect);
706 else if (type == SDE_HW_BLK_WB && id != WB_MAX)
707 match = test_bit(id, &cdm->wb_connect);
708
709 SDE_DEBUG("type %d id %d, cdm intfs %lu wbs %lu match %d\n",
710 type, id, cdm->intf_connect, cdm->wb_connect,
711 match);
712
713 if (!match)
714 continue;
715
716 iter.blk->rsvp_nxt = rsvp;
717 MSM_EVTMSG(rm->dev, iter.blk->type_name, rsvp->enc_id,
718 iter.blk->id);
719 break;
720 }
721
722 if (!iter.hw) {
723 SDE_ERROR("couldn't reserve cdm for type %d id %d\n", type, id);
724 return -ENAVAIL;
725 }
726
727 return 0;
728}
729
730static int _sde_rm_reserve_intf_or_wb(
731 struct sde_rm *rm,
732 struct sde_rm_rsvp *rsvp,
733 uint32_t id,
734 enum sde_hw_blk_type type,
735 bool needs_cdm)
736{
737 struct sde_rm_hw_iter iter;
Lloyd Atkinson04d301c2016-08-31 11:39:45 -0400738 int ret = 0;
Lloyd Atkinsonccc11c52016-08-10 18:54:30 -0400739
740 /* Find the block entry in the rm, and note the reservation */
741 sde_rm_init_hw_iter(&iter, 0, type);
742 while (sde_rm_get_hw(rm, &iter)) {
743 if (iter.blk->id != id)
744 continue;
745
746 if (RESERVED_BY_OTHER(iter.blk, rsvp)) {
747 SDE_ERROR("type %d id %d already reserved\n", type, id);
748 return -ENAVAIL;
749 }
750
751 iter.blk->rsvp_nxt = rsvp;
752 MSM_EVTMSG(rm->dev, iter.blk->type_name,
753 rsvp->enc_id, iter.blk->id);
754 break;
755 }
756
757 /* Shouldn't happen since wbs / intfs are fixed at probe */
758 if (!iter.hw) {
759 SDE_ERROR("couldn't find type %d id %d\n", type, id);
760 return -EINVAL;
761 }
762
763 /* Expected only one intf or wb will request cdm */
764 if (needs_cdm)
765 ret = _sde_rm_reserve_cdm(rm, rsvp, id, type);
766
767 return ret;
768}
769
770static int _sde_rm_reserve_intf_related_hw(
771 struct sde_rm *rm,
772 struct sde_rm_rsvp *rsvp,
773 struct sde_encoder_hw_resources *hw_res)
774{
775 int i, ret = 0;
776 u32 id;
777
778 for (i = 0; i < ARRAY_SIZE(hw_res->intfs); i++) {
779 if (hw_res->intfs[i] == INTF_MODE_NONE)
780 continue;
781 id = i + INTF_0;
782 ret = _sde_rm_reserve_intf_or_wb(rm, rsvp, id,
783 SDE_HW_BLK_INTF, hw_res->needs_cdm);
784 if (ret)
785 return ret;
786 }
787
788 for (i = 0; i < ARRAY_SIZE(hw_res->wbs); i++) {
789 if (hw_res->wbs[i] == INTF_MODE_NONE)
790 continue;
791 id = i + WB_0;
792 ret = _sde_rm_reserve_intf_or_wb(rm, rsvp, id,
793 SDE_HW_BLK_WB, hw_res->needs_cdm);
794 if (ret)
795 return ret;
796 }
797
798 return ret;
799}
800
801static int _sde_rm_make_next_rsvp(
802 struct sde_rm *rm,
803 struct drm_encoder *enc,
804 struct drm_crtc_state *crtc_state,
805 struct drm_connector_state *conn_state,
806 struct sde_rm_rsvp *rsvp,
807 struct sde_rm_requirements *reqs)
808{
809 int ret;
810
811 /* Create reservation info, tag reserved blocks with it as we go */
812 rsvp->seq = ++rm->rsvp_next_seq;
813 rsvp->enc_id = enc->base.id;
814 rsvp->topology = reqs->top_name;
815 list_add_tail(&rsvp->list, &rm->rsvps);
816
817 /*
818 * Assign LMs and blocks whose usage is tied to them: DSPP & Pingpong.
819 * Do assignment preferring to give away low-resource mixers first:
820 * - Check mixers without DSPPs
Lloyd Atkinsoncf8996b2016-08-23 09:34:13 -0400821 * - Only then allow to grab from mixers with DSPP capability
Lloyd Atkinsonccc11c52016-08-10 18:54:30 -0400822 */
823 ret = _sde_rm_reserve_lms(rm, rsvp, reqs);
824 if (ret && !RM_RQ_DSPP(reqs)) {
825 reqs->top_ctrl |= BIT(SDE_RM_TOPCTL_DSPP);
826 ret = _sde_rm_reserve_lms(rm, rsvp, reqs);
827 }
828
829 if (ret) {
830 SDE_ERROR("unable to find appropriate mixers\n");
831 return ret;
832 }
833
Lloyd Atkinsoncf8996b2016-08-23 09:34:13 -0400834 /*
835 * Do assignment preferring to give away low-resource CTLs first:
836 * - Check mixers without Split Display
837 * - Only then allow to grab from CTLs with split display capability
838 */
Lloyd Atkinsonccc11c52016-08-10 18:54:30 -0400839 _sde_rm_reserve_ctls(rm, rsvp, reqs);
Lloyd Atkinsoncf8996b2016-08-23 09:34:13 -0400840 if (ret && !reqs->needs_split_display) {
841 reqs->needs_split_display = true;
842 _sde_rm_reserve_ctls(rm, rsvp, reqs);
843 }
844 if (ret) {
845 SDE_ERROR("unable to find appropriate CTL\n");
Lloyd Atkinsonccc11c52016-08-10 18:54:30 -0400846 return ret;
Lloyd Atkinsoncf8996b2016-08-23 09:34:13 -0400847 }
Lloyd Atkinsonccc11c52016-08-10 18:54:30 -0400848
849 /* Assign INTFs, WBs, and blks whose usage is tied to them: CTL & CDM */
850 ret = _sde_rm_reserve_intf_related_hw(rm, rsvp, &reqs->hw_res);
851 if (ret)
852 return ret;
853
854 return ret;
855}
856
857static int _sde_rm_populate_requirements(
858 struct sde_rm *rm,
859 struct drm_encoder *enc,
860 struct drm_crtc_state *crtc_state,
861 struct drm_connector_state *conn_state,
862 struct sde_rm_requirements *reqs)
863{
864 const struct drm_display_mode *mode = &crtc_state->mode;
865
866 /**
867 * DRM<->HW Topologies
868 *
869 * Name: SINGLEPIPE
870 * Description: 1 LM, 1 PP, 1 INTF
871 * Condition: 1 DRM Encoder w/ 1 Display Tiles (Default)
872 *
873 * Name: DUALPIPE
874 * Description: 2 LM, 2 PP, 2 INTF
875 * Condition: 1 DRM Encoder w/ 2 Display Tiles
876 *
877 * Name: PPSPLIT
878 * Description: 1 LM, 1 PP + 1 Slave PP, 2 INTF
879 * Condition:
880 * 1 DRM Encoder w/ 2 Display Tiles
881 * topology_control & SDE_TOPREQ_PPSPLIT
882 *
883 * Name: DUALPIPEMERGE
884 * Description: 2 LM, 2 PP, 3DMux, 1 INTF
885 * Condition:
886 * 1 DRM Encoder w/ 1 Display Tiles
887 * display_info.max_width >= layer_mixer.max_width
888 *
889 * Name: DUALPIPEMERGE
890 * Description: 2 LM, 2 PP, 3DMux, 1 INTF
891 * Condition:
892 * 1 DRM Encoder w/ 1 Display Tiles
893 * display_info.max_width <= layer_mixer.max_width
894 * topology_control & SDE_TOPREQ_FORCE_TILING
895 */
896
897 memset(reqs, 0, sizeof(*reqs));
898
899 reqs->top_ctrl = sde_connector_get_property(conn_state,
900 CONNECTOR_PROP_TOPOLOGY_CONTROL);
901 sde_encoder_get_hw_resources(enc, &reqs->hw_res, conn_state);
902
903 /* Base assumption is LMs = h_tiles, conditions below may override */
904 reqs->num_lm = reqs->hw_res.display_num_of_h_tiles;
905
906 if (reqs->num_lm == 2) {
907 if (RM_RQ_PPSPLIT(reqs)) {
908 /* user requests serving dual display with 1 lm */
909 reqs->top_name = SDE_RM_TOPOLOGY_PPSPLIT;
910 reqs->num_lm = 1;
Lloyd Atkinsoncf8996b2016-08-23 09:34:13 -0400911 reqs->num_ctl = 1;
912 reqs->needs_split_display = true;
Lloyd Atkinsonccc11c52016-08-10 18:54:30 -0400913 } else {
914 /* dual display, serve with 2 lms */
915 reqs->top_name = SDE_RM_TOPOLOGY_DUALPIPE;
Lloyd Atkinsoncf8996b2016-08-23 09:34:13 -0400916 reqs->num_ctl = 2;
917 reqs->needs_split_display = true;
Lloyd Atkinsonccc11c52016-08-10 18:54:30 -0400918 }
919
920 } else if (reqs->num_lm == 1) {
921 if (mode->hdisplay > rm->lm_max_width) {
922 /* wide display, must split across 2 lm and merge */
923 reqs->top_name = SDE_RM_TOPOLOGY_DUALPIPEMERGE;
924 reqs->num_lm = 2;
Lloyd Atkinsoncf8996b2016-08-23 09:34:13 -0400925 reqs->num_ctl = 1;
926 reqs->needs_split_display = false;
Lloyd Atkinsonccc11c52016-08-10 18:54:30 -0400927 } else if (RM_RQ_FORCE_TILING(reqs)) {
928 /* thin display, but user requests 2 lm and merge */
929 reqs->top_name = SDE_RM_TOPOLOGY_DUALPIPEMERGE;
930 reqs->num_lm = 2;
Lloyd Atkinsoncf8996b2016-08-23 09:34:13 -0400931 reqs->num_ctl = 1;
932 reqs->needs_split_display = false;
Lloyd Atkinsonccc11c52016-08-10 18:54:30 -0400933 } else {
934 /* thin display, serve with only 1 lm */
935 reqs->top_name = SDE_RM_TOPOLOGY_SINGLEPIPE;
Lloyd Atkinsoncf8996b2016-08-23 09:34:13 -0400936 reqs->num_ctl = 1;
937 reqs->needs_split_display = false;
Lloyd Atkinsonccc11c52016-08-10 18:54:30 -0400938 }
939
940 } else {
941 /* Currently no configurations with # LM > 2 */
942 SDE_ERROR("unsupported # of mixers %d\n", reqs->num_lm);
943 return -EINVAL;
944 }
945
946 SDE_DEBUG("top_ctrl 0x%llX num_h_tiles %d\n", reqs->top_ctrl,
947 reqs->hw_res.display_num_of_h_tiles);
948 SDE_DEBUG("display_max_width %d rm->lm_max_width %d\n",
949 mode->hdisplay, rm->lm_max_width);
Lloyd Atkinsoncf8996b2016-08-23 09:34:13 -0400950 SDE_DEBUG("num_lm %d num_ctl %d topology_name %d\n", reqs->num_lm,
951 reqs->num_ctl, reqs->top_name);
Lloyd Atkinsonccc11c52016-08-10 18:54:30 -0400952 MSM_EVT(rm->dev, mode->hdisplay, rm->lm_max_width);
953 MSM_EVT(rm->dev, reqs->num_lm, reqs->top_ctrl);
954 MSM_EVT(rm->dev, reqs->top_name, 0);
955
956 return 0;
957}
958
959static struct sde_rm_rsvp *_sde_rm_get_rsvp(
960 struct sde_rm *rm,
961 struct drm_encoder *enc)
962{
963 struct sde_rm_rsvp *i;
964
965 if (!rm || !enc) {
966 SDE_ERROR("invalid params\n");
967 return NULL;
968 }
969
970 if (list_empty(&rm->rsvps))
971 return NULL;
972
973 list_for_each_entry(i, &rm->rsvps, list)
974 if (i->enc_id == enc->base.id)
975 return i;
976
977 return NULL;
978}
979
980static struct drm_connector *_sde_rm_get_connector(
981 struct drm_encoder *enc)
982{
983 struct drm_connector *conn = NULL;
984 struct list_head *connector_list =
985 &enc->dev->mode_config.connector_list;
986
987 list_for_each_entry(conn, connector_list, head)
988 if (conn->encoder == enc)
989 return conn;
990
991 return NULL;
992}
993
994/**
995 * _sde_rm_release_rsvp - release resources and release a reservation
996 * @rm: KMS handle
997 * @rsvp: RSVP pointer to release and release resources for
998 */
999void _sde_rm_release_rsvp(
1000 struct sde_rm *rm,
Lloyd Atkinson9cb3f9e2016-09-22 12:55:39 -04001001 struct sde_rm_rsvp *rsvp,
1002 struct drm_connector *conn)
Lloyd Atkinsonccc11c52016-08-10 18:54:30 -04001003{
1004 struct sde_rm_rsvp *rsvp_c, *rsvp_n;
1005 struct sde_rm_hw_blk *blk;
Lloyd Atkinsond1709812016-08-31 10:04:02 -04001006 enum sde_hw_blk_type type;
Lloyd Atkinsonccc11c52016-08-10 18:54:30 -04001007
1008 if (!rsvp)
1009 return;
1010
1011 SDE_DEBUG("rel rsvp %d enc %d\n", rsvp->seq, rsvp->enc_id);
1012
1013 list_for_each_entry_safe(rsvp_c, rsvp_n, &rm->rsvps, list) {
1014 if (rsvp == rsvp_c) {
1015 list_del(&rsvp_c->list);
1016 break;
1017 }
1018 }
1019
Lloyd Atkinsond1709812016-08-31 10:04:02 -04001020 for (type = 0; type < SDE_HW_BLK_MAX; type++) {
1021 list_for_each_entry(blk, &rm->hw_blks[type], list) {
1022 if (blk->rsvp == rsvp) {
1023 blk->rsvp = NULL;
1024 SDE_DEBUG("rel rsvp %d enc %d %s %d\n",
1025 rsvp->seq, rsvp->enc_id,
1026 blk->type_name, blk->id);
1027 }
1028 if (blk->rsvp_nxt == rsvp) {
1029 blk->rsvp_nxt = NULL;
1030 SDE_DEBUG("rel rsvp_nxt %d enc %d %s %d\n",
1031 rsvp->seq, rsvp->enc_id,
1032 blk->type_name, blk->id);
1033 }
Lloyd Atkinsonccc11c52016-08-10 18:54:30 -04001034 }
1035 }
1036
1037 kfree(rsvp);
Lloyd Atkinson9cb3f9e2016-09-22 12:55:39 -04001038
1039 (void) msm_property_set_property(
1040 sde_connector_get_propinfo(conn),
1041 sde_connector_get_property_values(conn->state),
1042 CONNECTOR_PROP_TOPOLOGY_NAME,
1043 SDE_RM_TOPOLOGY_UNKNOWN);
Lloyd Atkinsonccc11c52016-08-10 18:54:30 -04001044}
1045
1046void sde_rm_release(struct sde_rm *rm, struct drm_encoder *enc)
1047{
1048 struct sde_rm_rsvp *rsvp;
1049 struct drm_connector *conn;
1050 uint64_t top_ctrl;
1051
1052 if (!rm || !enc) {
1053 SDE_ERROR("invalid params\n");
1054 return;
1055 }
1056
1057 rsvp = _sde_rm_get_rsvp(rm, enc);
1058 if (!rsvp) {
1059 SDE_ERROR("failed to find rsvp for enc %d\n", enc->base.id);
1060 return;
1061 }
1062
1063 conn = _sde_rm_get_connector(enc);
1064 if (!conn) {
1065 SDE_ERROR("failed to get connector for enc %d\n", enc->base.id);
1066 return;
1067 }
1068
1069 top_ctrl = sde_connector_get_property(conn->state,
1070 CONNECTOR_PROP_TOPOLOGY_CONTROL);
1071
1072 if (top_ctrl & BIT(SDE_RM_TOPCTL_RESERVE_LOCK)) {
1073 SDE_DEBUG("rsvp[s%de%d] not releasing locked resources\n",
1074 rsvp->seq, rsvp->enc_id);
1075 } else {
1076 SDE_DEBUG("release rsvp[s%de%d]\n", rsvp->seq,
1077 rsvp->enc_id);
Lloyd Atkinson9cb3f9e2016-09-22 12:55:39 -04001078 _sde_rm_release_rsvp(rm, rsvp, conn);
Lloyd Atkinsonccc11c52016-08-10 18:54:30 -04001079 }
1080}
1081
1082static int _sde_rm_commit_rsvp(
1083 struct sde_rm *rm,
1084 struct sde_rm_rsvp *rsvp,
1085 struct drm_connector_state *conn_state)
1086{
1087 struct sde_rm_hw_blk *blk;
Lloyd Atkinsond1709812016-08-31 10:04:02 -04001088 enum sde_hw_blk_type type;
Lloyd Atkinsonccc11c52016-08-10 18:54:30 -04001089 int ret = 0;
1090
1091 ret = msm_property_set_property(
1092 sde_connector_get_propinfo(conn_state->connector),
Lloyd Atkinson13cee812016-08-16 16:10:31 -04001093 sde_connector_get_property_values(conn_state),
Lloyd Atkinsonccc11c52016-08-10 18:54:30 -04001094 CONNECTOR_PROP_TOPOLOGY_NAME,
1095 rsvp->topology);
1096 if (ret)
Lloyd Atkinson9cb3f9e2016-09-22 12:55:39 -04001097 _sde_rm_release_rsvp(rm, rsvp, conn_state->connector);
Lloyd Atkinsonccc11c52016-08-10 18:54:30 -04001098
1099 /* Swap next rsvp to be the active */
Lloyd Atkinsond1709812016-08-31 10:04:02 -04001100 for (type = 0; type < SDE_HW_BLK_MAX; type++) {
1101 list_for_each_entry(blk, &rm->hw_blks[type], list) {
1102 if (blk->rsvp_nxt) {
1103 blk->rsvp = blk->rsvp_nxt;
1104 blk->rsvp_nxt = NULL;
1105 }
Lloyd Atkinsonccc11c52016-08-10 18:54:30 -04001106 }
1107 }
1108
1109 if (!ret) {
1110 SDE_DEBUG("rsrv enc %d topology %d\n", rsvp->enc_id,
1111 rsvp->topology);
1112 MSM_EVT(rm->dev, rsvp->enc_id, rsvp->topology);
1113 }
1114
1115 return ret;
1116}
1117
1118int sde_rm_check_property_topctl(uint64_t val)
1119{
1120 if ((BIT(SDE_RM_TOPCTL_FORCE_TILING) & val) &&
1121 (BIT(SDE_RM_TOPCTL_PPSPLIT) & val)) {
1122 SDE_ERROR("ppsplit & force_tiling are incompatible\n");
1123 return -EINVAL;
1124 }
1125
1126 return 0;
1127}
1128
1129int sde_rm_reserve(
1130 struct sde_rm *rm,
1131 struct drm_encoder *enc,
1132 struct drm_crtc_state *crtc_state,
1133 struct drm_connector_state *conn_state,
1134 bool test_only)
1135{
1136 struct sde_rm_rsvp *rsvp_cur, *rsvp_nxt;
1137 struct sde_rm_requirements reqs;
1138 int ret;
1139
1140 if (!rm || !enc || !crtc_state || !conn_state) {
1141 SDE_ERROR("invalid arguments\n");
1142 return -EINVAL;
1143 }
1144
1145 /* Check if this is just a page-flip */
1146 if (!drm_atomic_crtc_needs_modeset(crtc_state))
1147 return 0;
1148
1149 SDE_DEBUG("reserving hw for conn %d enc %d crtc %d test_only %d\n",
1150 conn_state->connector->base.id, enc->base.id,
1151 crtc_state->crtc->base.id, test_only);
1152 MSM_EVT(rm->dev, enc->base.id, conn_state->connector->base.id);
1153
1154 _sde_rm_print_rsvps(rm, "begin_reserve");
1155
1156 ret = _sde_rm_populate_requirements(rm, enc, crtc_state,
1157 conn_state, &reqs);
1158 if (ret) {
1159 SDE_ERROR("failed to populate hw requirements\n");
1160 return ret;
1161 }
1162
1163 /*
1164 * We only support one active reservation per-hw-block. But to implement
1165 * transactional semantics for test-only, and for allowing failure while
1166 * modifying your existing reservation, over the course of this
1167 * function we can have two reservations:
1168 * Current: Existing reservation
1169 * Next: Proposed reservation. The proposed reservation may fail, or may
1170 * be discarded if in test-only mode.
1171 * If reservation is successful, and we're not in test-only, then we
1172 * replace the current with the next.
1173 */
1174 rsvp_nxt = kzalloc(sizeof(*rsvp_nxt), GFP_KERNEL);
1175 if (!rsvp_nxt)
1176 return -ENOMEM;
1177
1178 rsvp_cur = _sde_rm_get_rsvp(rm, enc);
1179
1180 /*
1181 * User can request that we clear out any reservation during the
1182 * atomic_check phase by using this CLEAR bit
1183 */
1184 if (rsvp_cur && test_only && RM_RQ_CLEAR(&reqs)) {
1185 SDE_DEBUG("test_only & CLEAR: clear rsvp[s%de%d]\n",
1186 rsvp_cur->seq, rsvp_cur->enc_id);
Lloyd Atkinson9cb3f9e2016-09-22 12:55:39 -04001187 _sde_rm_release_rsvp(rm, rsvp_cur, conn_state->connector);
Lloyd Atkinsonccc11c52016-08-10 18:54:30 -04001188 rsvp_cur = NULL;
1189 _sde_rm_print_rsvps(rm, "post_clear");
1190 }
1191
1192 /* Check the proposed reservation, store it in hw's "next" field */
1193 ret = _sde_rm_make_next_rsvp(rm, enc, crtc_state, conn_state,
1194 rsvp_nxt, &reqs);
1195
1196 _sde_rm_print_rsvps(rm, "new_rsvp_next");
1197
1198 if (ret) {
1199 SDE_ERROR("failed to reserve hw resources: %d\n", ret);
Lloyd Atkinson9cb3f9e2016-09-22 12:55:39 -04001200 _sde_rm_release_rsvp(rm, rsvp_nxt, conn_state->connector);
Lloyd Atkinsonccc11c52016-08-10 18:54:30 -04001201 } else if (test_only && !RM_RQ_LOCK(&reqs)) {
1202 /*
1203 * Normally, if test_only, test the reservation and then undo
1204 * However, if the user requests LOCK, then keep the reservation
1205 * made during the atomic_check phase.
1206 */
1207 SDE_DEBUG("test_only: discard test rsvp[s%de%d]\n",
1208 rsvp_nxt->seq, rsvp_nxt->enc_id);
Lloyd Atkinson9cb3f9e2016-09-22 12:55:39 -04001209 _sde_rm_release_rsvp(rm, rsvp_nxt, conn_state->connector);
Lloyd Atkinsonccc11c52016-08-10 18:54:30 -04001210 } else {
1211 if (test_only && RM_RQ_LOCK(&reqs))
1212 SDE_DEBUG("test_only & LOCK: lock rsvp[s%de%d]\n",
1213 rsvp_nxt->seq, rsvp_nxt->enc_id);
1214
Lloyd Atkinson9cb3f9e2016-09-22 12:55:39 -04001215 _sde_rm_release_rsvp(rm, rsvp_cur, conn_state->connector);
Lloyd Atkinsonccc11c52016-08-10 18:54:30 -04001216
1217 ret = _sde_rm_commit_rsvp(rm, rsvp_nxt, conn_state);
1218 }
1219
1220 _sde_rm_print_rsvps(rm, "final");
1221
1222 return ret;
1223}