blob: f3db05dab0abb6210433b6195e80ddc205ed17a1 [file] [log] [blame]
Benjamin Gaignard4fdbc6782014-12-11 11:38:59 +01001/*
2 * Copyright (C) STMicroelectronics SA 2014
3 * Authors: Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics.
4 * License terms: GNU General Public License (GPL), version 2
5 */
6
7#include <linux/clk.h>
8#include <linux/component.h>
9#include <linux/firmware.h>
10#include <linux/module.h>
11#include <linux/platform_device.h>
12#include <linux/reset.h>
13
14#include <drm/drmP.h>
15
16#include "sti_drm_plane.h"
17#include "sti_hqvdp.h"
18#include "sti_hqvdp_lut.h"
19#include "sti_layer.h"
20#include "sti_vtg.h"
21
22/* Firmware name */
23#define HQVDP_FMW_NAME "hqvdp-stih407.bin"
24
25/* Regs address */
26#define HQVDP_DMEM 0x00000000 /* 0x00000000 */
27#define HQVDP_PMEM 0x00040000 /* 0x00040000 */
28#define HQVDP_RD_PLUG 0x000E0000 /* 0x000E0000 */
29#define HQVDP_RD_PLUG_CONTROL (HQVDP_RD_PLUG + 0x1000) /* 0x000E1000 */
30#define HQVDP_RD_PLUG_PAGE_SIZE (HQVDP_RD_PLUG + 0x1004) /* 0x000E1004 */
31#define HQVDP_RD_PLUG_MIN_OPC (HQVDP_RD_PLUG + 0x1008) /* 0x000E1008 */
32#define HQVDP_RD_PLUG_MAX_OPC (HQVDP_RD_PLUG + 0x100C) /* 0x000E100C */
33#define HQVDP_RD_PLUG_MAX_CHK (HQVDP_RD_PLUG + 0x1010) /* 0x000E1010 */
34#define HQVDP_RD_PLUG_MAX_MSG (HQVDP_RD_PLUG + 0x1014) /* 0x000E1014 */
35#define HQVDP_RD_PLUG_MIN_SPACE (HQVDP_RD_PLUG + 0x1018) /* 0x000E1018 */
36#define HQVDP_WR_PLUG 0x000E2000 /* 0x000E2000 */
37#define HQVDP_WR_PLUG_CONTROL (HQVDP_WR_PLUG + 0x1000) /* 0x000E3000 */
38#define HQVDP_WR_PLUG_PAGE_SIZE (HQVDP_WR_PLUG + 0x1004) /* 0x000E3004 */
39#define HQVDP_WR_PLUG_MIN_OPC (HQVDP_WR_PLUG + 0x1008) /* 0x000E3008 */
40#define HQVDP_WR_PLUG_MAX_OPC (HQVDP_WR_PLUG + 0x100C) /* 0x000E300C */
41#define HQVDP_WR_PLUG_MAX_CHK (HQVDP_WR_PLUG + 0x1010) /* 0x000E3010 */
42#define HQVDP_WR_PLUG_MAX_MSG (HQVDP_WR_PLUG + 0x1014) /* 0x000E3014 */
43#define HQVDP_WR_PLUG_MIN_SPACE (HQVDP_WR_PLUG + 0x1018) /* 0x000E3018 */
44#define HQVDP_MBX 0x000E4000 /* 0x000E4000 */
45#define HQVDP_MBX_IRQ_TO_XP70 (HQVDP_MBX + 0x0000) /* 0x000E4000 */
46#define HQVDP_MBX_INFO_HOST (HQVDP_MBX + 0x0004) /* 0x000E4004 */
47#define HQVDP_MBX_IRQ_TO_HOST (HQVDP_MBX + 0x0008) /* 0x000E4008 */
48#define HQVDP_MBX_INFO_XP70 (HQVDP_MBX + 0x000C) /* 0x000E400C */
49#define HQVDP_MBX_SW_RESET_CTRL (HQVDP_MBX + 0x0010) /* 0x000E4010 */
50#define HQVDP_MBX_STARTUP_CTRL1 (HQVDP_MBX + 0x0014) /* 0x000E4014 */
51#define HQVDP_MBX_STARTUP_CTRL2 (HQVDP_MBX + 0x0018) /* 0x000E4018 */
52#define HQVDP_MBX_GP_STATUS (HQVDP_MBX + 0x001C) /* 0x000E401C */
53#define HQVDP_MBX_NEXT_CMD (HQVDP_MBX + 0x0020) /* 0x000E4020 */
54#define HQVDP_MBX_CURRENT_CMD (HQVDP_MBX + 0x0024) /* 0x000E4024 */
55#define HQVDP_MBX_SOFT_VSYNC (HQVDP_MBX + 0x0028) /* 0x000E4028 */
56
57/* Plugs config */
58#define PLUG_CONTROL_ENABLE 0x00000001
59#define PLUG_PAGE_SIZE_256 0x00000002
60#define PLUG_MIN_OPC_8 0x00000003
61#define PLUG_MAX_OPC_64 0x00000006
62#define PLUG_MAX_CHK_2X 0x00000001
63#define PLUG_MAX_MSG_1X 0x00000000
64#define PLUG_MIN_SPACE_1 0x00000000
65
66/* SW reset CTRL */
67#define SW_RESET_CTRL_FULL BIT(0)
68#define SW_RESET_CTRL_CORE BIT(1)
69
70/* Startup ctrl 1 */
71#define STARTUP_CTRL1_RST_DONE BIT(0)
72#define STARTUP_CTRL1_AUTH_IDLE BIT(2)
73
74/* Startup ctrl 2 */
75#define STARTUP_CTRL2_FETCH_EN BIT(1)
76
77/* Info xP70 */
78#define INFO_XP70_FW_READY BIT(15)
79#define INFO_XP70_FW_PROCESSING BIT(14)
80#define INFO_XP70_FW_INITQUEUES BIT(13)
81
82/* SOFT_VSYNC */
83#define SOFT_VSYNC_HW 0x00000000
84#define SOFT_VSYNC_SW_CMD 0x00000001
85#define SOFT_VSYNC_SW_CTRL_IRQ 0x00000003
86
87/* Reset & boot poll config */
88#define POLL_MAX_ATTEMPT 50
89#define POLL_DELAY_MS 20
90
91#define SCALE_FACTOR 8192
92#define SCALE_MAX_FOR_LEG_LUT_F 4096
93#define SCALE_MAX_FOR_LEG_LUT_E 4915
94#define SCALE_MAX_FOR_LEG_LUT_D 6654
95#define SCALE_MAX_FOR_LEG_LUT_C 8192
96
97enum sti_hvsrc_orient {
98 HVSRC_HORI,
99 HVSRC_VERT
100};
101
102/* Command structures */
103struct sti_hqvdp_top {
104 u32 config;
105 u32 mem_format;
106 u32 current_luma;
107 u32 current_enh_luma;
108 u32 current_right_luma;
109 u32 current_enh_right_luma;
110 u32 current_chroma;
111 u32 current_enh_chroma;
112 u32 current_right_chroma;
113 u32 current_enh_right_chroma;
114 u32 output_luma;
115 u32 output_chroma;
116 u32 luma_src_pitch;
117 u32 luma_enh_src_pitch;
118 u32 luma_right_src_pitch;
119 u32 luma_enh_right_src_pitch;
120 u32 chroma_src_pitch;
121 u32 chroma_enh_src_pitch;
122 u32 chroma_right_src_pitch;
123 u32 chroma_enh_right_src_pitch;
124 u32 luma_processed_pitch;
125 u32 chroma_processed_pitch;
126 u32 input_frame_size;
127 u32 input_viewport_ori;
128 u32 input_viewport_ori_right;
129 u32 input_viewport_size;
130 u32 left_view_border_width;
131 u32 right_view_border_width;
132 u32 left_view_3d_offset_width;
133 u32 right_view_3d_offset_width;
134 u32 side_stripe_color;
135 u32 crc_reset_ctrl;
136};
137
138/* Configs for interlaced : no IT, no pass thru, 3 fields */
139#define TOP_CONFIG_INTER_BTM 0x00000000
140#define TOP_CONFIG_INTER_TOP 0x00000002
141
142/* Config for progressive : no IT, no pass thru, 3 fields */
143#define TOP_CONFIG_PROGRESSIVE 0x00000001
144
145/* Default MemFormat: in=420_raster_dual out=444_raster;opaque Mem2Tv mode */
146#define TOP_MEM_FORMAT_DFLT 0x00018060
147
148/* Min/Max size */
149#define MAX_WIDTH 0x1FFF
150#define MAX_HEIGHT 0x0FFF
151#define MIN_WIDTH 0x0030
152#define MIN_HEIGHT 0x0010
153
154struct sti_hqvdp_vc1re {
155 u32 ctrl_prv_csdi;
156 u32 ctrl_cur_csdi;
157 u32 ctrl_nxt_csdi;
158 u32 ctrl_cur_fmd;
159 u32 ctrl_nxt_fmd;
160};
161
162struct sti_hqvdp_fmd {
163 u32 config;
164 u32 viewport_ori;
165 u32 viewport_size;
166 u32 next_next_luma;
167 u32 next_next_right_luma;
168 u32 next_next_next_luma;
169 u32 next_next_next_right_luma;
170 u32 threshold_scd;
171 u32 threshold_rfd;
172 u32 threshold_move;
173 u32 threshold_cfd;
174};
175
176struct sti_hqvdp_csdi {
177 u32 config;
178 u32 config2;
179 u32 dcdi_config;
180 u32 prev_luma;
181 u32 prev_enh_luma;
182 u32 prev_right_luma;
183 u32 prev_enh_right_luma;
184 u32 next_luma;
185 u32 next_enh_luma;
186 u32 next_right_luma;
187 u32 next_enh_right_luma;
188 u32 prev_chroma;
189 u32 prev_enh_chroma;
190 u32 prev_right_chroma;
191 u32 prev_enh_right_chroma;
192 u32 next_chroma;
193 u32 next_enh_chroma;
194 u32 next_right_chroma;
195 u32 next_enh_right_chroma;
196 u32 prev_motion;
197 u32 prev_right_motion;
198 u32 cur_motion;
199 u32 cur_right_motion;
200 u32 next_motion;
201 u32 next_right_motion;
202};
203
204/* Config for progressive: by pass */
205#define CSDI_CONFIG_PROG 0x00000000
206/* Config for directional deinterlacing without motion */
207#define CSDI_CONFIG_INTER_DIR 0x00000016
208/* Additional configs for fader, blender, motion,... deinterlace algorithms */
209#define CSDI_CONFIG2_DFLT 0x000001B3
210#define CSDI_DCDI_CONFIG_DFLT 0x00203803
211
212struct sti_hqvdp_hvsrc {
213 u32 hor_panoramic_ctrl;
214 u32 output_picture_size;
215 u32 init_horizontal;
216 u32 init_vertical;
217 u32 param_ctrl;
218 u32 yh_coef[NB_COEF];
219 u32 ch_coef[NB_COEF];
220 u32 yv_coef[NB_COEF];
221 u32 cv_coef[NB_COEF];
222 u32 hori_shift;
223 u32 vert_shift;
224};
225
226/* Default ParamCtrl: all controls enabled */
227#define HVSRC_PARAM_CTRL_DFLT 0xFFFFFFFF
228
229struct sti_hqvdp_iqi {
230 u32 config;
231 u32 demo_wind_size;
232 u32 pk_config;
233 u32 coeff0_coeff1;
234 u32 coeff2_coeff3;
235 u32 coeff4;
236 u32 pk_lut;
237 u32 pk_gain;
238 u32 pk_coring_level;
239 u32 cti_config;
240 u32 le_config;
241 u32 le_lut[64];
242 u32 con_bri;
243 u32 sat_gain;
244 u32 pxf_conf;
245 u32 default_color;
246};
247
248/* Default Config : IQI bypassed */
249#define IQI_CONFIG_DFLT 0x00000001
250/* Default Contrast & Brightness gain = 256 */
251#define IQI_CON_BRI_DFLT 0x00000100
252/* Default Saturation gain = 256 */
253#define IQI_SAT_GAIN_DFLT 0x00000100
254/* Default PxfConf : P2I bypassed */
255#define IQI_PXF_CONF_DFLT 0x00000001
256
257struct sti_hqvdp_top_status {
258 u32 processing_time;
259 u32 input_y_crc;
260 u32 input_uv_crc;
261};
262
263struct sti_hqvdp_fmd_status {
264 u32 fmd_repeat_move_status;
265 u32 fmd_scene_count_status;
266 u32 cfd_sum;
267 u32 field_sum;
268 u32 next_y_fmd_crc;
269 u32 next_next_y_fmd_crc;
270 u32 next_next_next_y_fmd_crc;
271};
272
273struct sti_hqvdp_csdi_status {
274 u32 prev_y_csdi_crc;
275 u32 cur_y_csdi_crc;
276 u32 next_y_csdi_crc;
277 u32 prev_uv_csdi_crc;
278 u32 cur_uv_csdi_crc;
279 u32 next_uv_csdi_crc;
280 u32 y_csdi_crc;
281 u32 uv_csdi_crc;
282 u32 uv_cup_crc;
283 u32 mot_csdi_crc;
284 u32 mot_cur_csdi_crc;
285 u32 mot_prev_csdi_crc;
286};
287
288struct sti_hqvdp_hvsrc_status {
289 u32 y_hvsrc_crc;
290 u32 u_hvsrc_crc;
291 u32 v_hvsrc_crc;
292};
293
294struct sti_hqvdp_iqi_status {
295 u32 pxf_it_status;
296 u32 y_iqi_crc;
297 u32 u_iqi_crc;
298 u32 v_iqi_crc;
299};
300
301/* Main commands. We use 2 commands one being processed by the firmware, one
302 * ready to be fetched upon next Vsync*/
303#define NB_VDP_CMD 2
304
305struct sti_hqvdp_cmd {
306 struct sti_hqvdp_top top;
307 struct sti_hqvdp_vc1re vc1re;
308 struct sti_hqvdp_fmd fmd;
309 struct sti_hqvdp_csdi csdi;
310 struct sti_hqvdp_hvsrc hvsrc;
311 struct sti_hqvdp_iqi iqi;
312 struct sti_hqvdp_top_status top_status;
313 struct sti_hqvdp_fmd_status fmd_status;
314 struct sti_hqvdp_csdi_status csdi_status;
315 struct sti_hqvdp_hvsrc_status hvsrc_status;
316 struct sti_hqvdp_iqi_status iqi_status;
317};
318
319/*
320 * STI HQVDP structure
321 *
322 * @dev: driver device
323 * @drm_dev: the drm device
324 * @regs: registers
325 * @layer: layer structure for hqvdp it self
326 * @vid_plane: VID plug used as link with compositor IP
327 * @clk: IP clock
328 * @clk_pix_main: pix main clock
329 * @reset: reset control
330 * @vtg_nb: notifier to handle VTG Vsync
331 * @btm_field_pending: is there any bottom field (interlaced frame) to display
332 * @curr_field_count: number of field updates
333 * @last_field_count: number of field updates since last fps measure
334 * @hqvdp_cmd: buffer of commands
335 * @hqvdp_cmd_paddr: physical address of hqvdp_cmd
336 * @vtg: vtg for main data path
337 */
338struct sti_hqvdp {
339 struct device *dev;
340 struct drm_device *drm_dev;
341 void __iomem *regs;
342 struct sti_layer layer;
343 struct drm_plane *vid_plane;
344 struct clk *clk;
345 struct clk *clk_pix_main;
346 struct reset_control *reset;
347 struct notifier_block vtg_nb;
348 bool btm_field_pending;
349 unsigned int curr_field_count;
350 unsigned int last_field_count;
351 void *hqvdp_cmd;
352 dma_addr_t hqvdp_cmd_paddr;
353 struct sti_vtg *vtg;
354};
355
356#define to_sti_hqvdp(x) container_of(x, struct sti_hqvdp, layer)
357
358static const uint32_t hqvdp_supported_formats[] = {
359 DRM_FORMAT_NV12,
360};
361
362static const uint32_t *sti_hqvdp_get_formats(struct sti_layer *layer)
363{
364 return hqvdp_supported_formats;
365}
366
367static unsigned int sti_hqvdp_get_nb_formats(struct sti_layer *layer)
368{
369 return ARRAY_SIZE(hqvdp_supported_formats);
370}
371
372/**
373 * sti_hqvdp_get_free_cmd
374 * @hqvdp: hqvdp structure
375 *
376 * Look for a hqvdp_cmd that is not being used (or about to be used) by the FW.
377 *
378 * RETURNS:
379 * the offset of the command to be used.
380 * -1 in error cases
381 */
382static int sti_hqvdp_get_free_cmd(struct sti_hqvdp *hqvdp)
383{
384 int curr_cmd, next_cmd;
385 dma_addr_t cmd = hqvdp->hqvdp_cmd_paddr;
386 int i;
387
388 curr_cmd = readl(hqvdp->regs + HQVDP_MBX_CURRENT_CMD);
389 next_cmd = readl(hqvdp->regs + HQVDP_MBX_NEXT_CMD);
390
391 for (i = 0; i < NB_VDP_CMD; i++) {
392 if ((cmd != curr_cmd) && (cmd != next_cmd))
393 return i * sizeof(struct sti_hqvdp_cmd);
394 cmd += sizeof(struct sti_hqvdp_cmd);
395 }
396
397 return -1;
398}
399
400/**
401 * sti_hqvdp_get_curr_cmd
402 * @hqvdp: hqvdp structure
403 *
404 * Look for the hqvdp_cmd that is being used by the FW.
405 *
406 * RETURNS:
407 * the offset of the command to be used.
408 * -1 in error cases
409 */
410static int sti_hqvdp_get_curr_cmd(struct sti_hqvdp *hqvdp)
411{
412 int curr_cmd;
413 dma_addr_t cmd = hqvdp->hqvdp_cmd_paddr;
414 unsigned int i;
415
416 curr_cmd = readl(hqvdp->regs + HQVDP_MBX_CURRENT_CMD);
417
418 for (i = 0; i < NB_VDP_CMD; i++) {
419 if (cmd == curr_cmd)
420 return i * sizeof(struct sti_hqvdp_cmd);
421
422 cmd += sizeof(struct sti_hqvdp_cmd);
423 }
424
425 return -1;
426}
427
428/**
429 * sti_hqvdp_update_hvsrc
430 * @orient: horizontal or vertical
431 * @scale: scaling/zoom factor
432 * @hvsrc: the structure containing the LUT coef
433 *
434 * Update the Y and C Lut coef, as well as the shift param
435 *
436 * RETURNS:
437 * None.
438 */
439static void sti_hqvdp_update_hvsrc(enum sti_hvsrc_orient orient, int scale,
440 struct sti_hqvdp_hvsrc *hvsrc)
441{
442 const int *coef_c, *coef_y;
443 int shift_c, shift_y;
444
445 /* Get the appropriate coef tables */
446 if (scale < SCALE_MAX_FOR_LEG_LUT_F) {
447 coef_y = coef_lut_f_y_legacy;
448 coef_c = coef_lut_f_c_legacy;
449 shift_y = SHIFT_LUT_F_Y_LEGACY;
450 shift_c = SHIFT_LUT_F_C_LEGACY;
451 } else if (scale < SCALE_MAX_FOR_LEG_LUT_E) {
452 coef_y = coef_lut_e_y_legacy;
453 coef_c = coef_lut_e_c_legacy;
454 shift_y = SHIFT_LUT_E_Y_LEGACY;
455 shift_c = SHIFT_LUT_E_C_LEGACY;
456 } else if (scale < SCALE_MAX_FOR_LEG_LUT_D) {
457 coef_y = coef_lut_d_y_legacy;
458 coef_c = coef_lut_d_c_legacy;
459 shift_y = SHIFT_LUT_D_Y_LEGACY;
460 shift_c = SHIFT_LUT_D_C_LEGACY;
461 } else if (scale < SCALE_MAX_FOR_LEG_LUT_C) {
462 coef_y = coef_lut_c_y_legacy;
463 coef_c = coef_lut_c_c_legacy;
464 shift_y = SHIFT_LUT_C_Y_LEGACY;
465 shift_c = SHIFT_LUT_C_C_LEGACY;
466 } else if (scale == SCALE_MAX_FOR_LEG_LUT_C) {
467 coef_y = coef_c = coef_lut_b;
468 shift_y = shift_c = SHIFT_LUT_B;
469 } else {
470 coef_y = coef_c = coef_lut_a_legacy;
471 shift_y = shift_c = SHIFT_LUT_A_LEGACY;
472 }
473
474 if (orient == HVSRC_HORI) {
475 hvsrc->hori_shift = (shift_c << 16) | shift_y;
476 memcpy(hvsrc->yh_coef, coef_y, sizeof(hvsrc->yh_coef));
477 memcpy(hvsrc->ch_coef, coef_c, sizeof(hvsrc->ch_coef));
478 } else {
479 hvsrc->vert_shift = (shift_c << 16) | shift_y;
480 memcpy(hvsrc->yv_coef, coef_y, sizeof(hvsrc->yv_coef));
481 memcpy(hvsrc->cv_coef, coef_c, sizeof(hvsrc->cv_coef));
482 }
483}
484
485/**
486 * sti_hqvdp_check_hw_scaling
487 * @layer: hqvdp layer
488 *
489 * Check if the HW is able to perform the scaling request
490 * The firmware scaling limitation is "CEIL(1/Zy) <= FLOOR(LFW)" where:
491 * Zy = OutputHeight / InputHeight
492 * LFW = (Tx * IPClock) / (MaxNbCycles * Cp)
493 * Tx : Total video mode horizontal resolution
494 * IPClock : HQVDP IP clock (Mhz)
495 * MaxNbCycles: max(InputWidth, OutputWidth)
496 * Cp: Video mode pixel clock (Mhz)
497 *
498 * RETURNS:
499 * True if the HW can scale.
500 */
501static bool sti_hqvdp_check_hw_scaling(struct sti_layer *layer)
502{
503 struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer);
504 unsigned long lfw;
505 unsigned int inv_zy;
506
507 lfw = layer->mode->htotal * (clk_get_rate(hqvdp->clk) / 1000000);
508 lfw /= max(layer->src_w, layer->dst_w) * layer->mode->clock / 1000;
509
510 inv_zy = DIV_ROUND_UP(layer->src_h, layer->dst_h);
511
512 return (inv_zy <= lfw) ? true : false;
513}
514
515/**
516 * sti_hqvdp_prepare_layer
517 * @layer: hqvdp layer
518 * @first_prepare: true if it is the first time this function is called
519 *
520 * Prepares a command for the firmware
521 *
522 * RETURNS:
523 * 0 on success.
524 */
525static int sti_hqvdp_prepare_layer(struct sti_layer *layer, bool first_prepare)
526{
527 struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer);
528 struct sti_hqvdp_cmd *cmd;
529 int scale_h, scale_v;
530 int cmd_offset;
531
532 dev_dbg(hqvdp->dev, "%s %s\n", __func__, sti_layer_to_str(layer));
533
534 /* prepare and commit VID plane */
535 hqvdp->vid_plane->funcs->update_plane(hqvdp->vid_plane,
536 layer->crtc, layer->fb,
537 layer->dst_x, layer->dst_y,
538 layer->dst_w, layer->dst_h,
539 layer->src_x, layer->src_y,
540 layer->src_w, layer->src_h);
541
542 cmd_offset = sti_hqvdp_get_free_cmd(hqvdp);
543 if (cmd_offset == -1) {
544 DRM_ERROR("No available hqvdp_cmd now\n");
545 return -EBUSY;
546 }
547 cmd = hqvdp->hqvdp_cmd + cmd_offset;
548
549 if (!sti_hqvdp_check_hw_scaling(layer)) {
550 DRM_ERROR("Scaling beyond HW capabilities\n");
551 return -EINVAL;
552 }
553
554 /* Static parameters, defaulting to progressive mode */
555 cmd->top.config = TOP_CONFIG_PROGRESSIVE;
556 cmd->top.mem_format = TOP_MEM_FORMAT_DFLT;
557 cmd->hvsrc.param_ctrl = HVSRC_PARAM_CTRL_DFLT;
558 cmd->csdi.config = CSDI_CONFIG_PROG;
559
560 /* VC1RE, FMD bypassed : keep everything set to 0
561 * IQI/P2I bypassed */
562 cmd->iqi.config = IQI_CONFIG_DFLT;
563 cmd->iqi.con_bri = IQI_CON_BRI_DFLT;
564 cmd->iqi.sat_gain = IQI_SAT_GAIN_DFLT;
565 cmd->iqi.pxf_conf = IQI_PXF_CONF_DFLT;
566
567 /* Buffer planes address */
568 cmd->top.current_luma = (u32) layer->paddr + layer->offsets[0];
569 cmd->top.current_chroma = (u32) layer->paddr + layer->offsets[1];
570
571 /* Pitches */
572 cmd->top.luma_processed_pitch = cmd->top.luma_src_pitch =
573 layer->pitches[0];
574 cmd->top.chroma_processed_pitch = cmd->top.chroma_src_pitch =
575 layer->pitches[1];
576
577 /* Input / output size
578 * Align to upper even value */
579 layer->dst_w = ALIGN(layer->dst_w, 2);
580 layer->dst_h = ALIGN(layer->dst_h, 2);
581
582 if ((layer->src_w > MAX_WIDTH) || (layer->src_w < MIN_WIDTH) ||
583 (layer->src_h > MAX_HEIGHT) || (layer->src_h < MIN_HEIGHT) ||
584 (layer->dst_w > MAX_WIDTH) || (layer->dst_w < MIN_WIDTH) ||
585 (layer->dst_h > MAX_HEIGHT) || (layer->dst_h < MIN_HEIGHT)) {
586 DRM_ERROR("Invalid in/out size %dx%d -> %dx%d\n",
587 layer->src_w, layer->src_h,
588 layer->dst_w, layer->dst_h);
589 return -EINVAL;
590 }
591 cmd->top.input_viewport_size = cmd->top.input_frame_size =
592 layer->src_h << 16 | layer->src_w;
593 cmd->hvsrc.output_picture_size = layer->dst_h << 16 | layer->dst_w;
594 cmd->top.input_viewport_ori = layer->src_y << 16 | layer->src_x;
595
596 /* Handle interlaced */
597 if (layer->fb->flags & DRM_MODE_FB_INTERLACED) {
598 /* Top field to display */
599 cmd->top.config = TOP_CONFIG_INTER_TOP;
600
601 /* Update pitches and vert size */
602 cmd->top.input_frame_size = (layer->src_h / 2) << 16 |
603 layer->src_w;
604 cmd->top.luma_processed_pitch *= 2;
605 cmd->top.luma_src_pitch *= 2;
606 cmd->top.chroma_processed_pitch *= 2;
607 cmd->top.chroma_src_pitch *= 2;
608
609 /* Enable directional deinterlacing processing */
610 cmd->csdi.config = CSDI_CONFIG_INTER_DIR;
611 cmd->csdi.config2 = CSDI_CONFIG2_DFLT;
612 cmd->csdi.dcdi_config = CSDI_DCDI_CONFIG_DFLT;
613 }
614
615 /* Update hvsrc lut coef */
616 scale_h = SCALE_FACTOR * layer->dst_w / layer->src_w;
617 sti_hqvdp_update_hvsrc(HVSRC_HORI, scale_h, &cmd->hvsrc);
618
619 scale_v = SCALE_FACTOR * layer->dst_h / layer->src_h;
620 sti_hqvdp_update_hvsrc(HVSRC_VERT, scale_v, &cmd->hvsrc);
621
622 if (first_prepare) {
623 /* Prevent VTG shutdown */
624 if (clk_prepare_enable(hqvdp->clk_pix_main)) {
625 DRM_ERROR("Failed to prepare/enable pix main clk\n");
626 return -ENXIO;
627 }
628
629 /* Register VTG Vsync callback to handle bottom fields */
630 if ((layer->fb->flags & DRM_MODE_FB_INTERLACED) &&
631 sti_vtg_register_client(hqvdp->vtg,
632 &hqvdp->vtg_nb, layer->mixer_id)) {
633 DRM_ERROR("Cannot register VTG notifier\n");
634 return -ENXIO;
635 }
636 }
637
638 return 0;
639}
640
641static int sti_hqvdp_commit_layer(struct sti_layer *layer)
642{
643 struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer);
644 int cmd_offset;
645
646 dev_dbg(hqvdp->dev, "%s %s\n", __func__, sti_layer_to_str(layer));
647
648 cmd_offset = sti_hqvdp_get_free_cmd(hqvdp);
649 if (cmd_offset == -1) {
650 DRM_ERROR("No available hqvdp_cmd now\n");
651 return -EBUSY;
652 }
653
654 writel(hqvdp->hqvdp_cmd_paddr + cmd_offset,
655 hqvdp->regs + HQVDP_MBX_NEXT_CMD);
656
657 hqvdp->curr_field_count++;
658
659 /* Interlaced : get ready to display the bottom field at next Vsync */
660 if (layer->fb->flags & DRM_MODE_FB_INTERLACED)
661 hqvdp->btm_field_pending = true;
662
663 dev_dbg(hqvdp->dev, "%s Posted command:0x%x\n",
664 __func__, hqvdp->hqvdp_cmd_paddr + cmd_offset);
665
666 return 0;
667}
668
669static int sti_hqvdp_disable_layer(struct sti_layer *layer)
670{
671 struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer);
672 int i;
673
674 DRM_DEBUG_DRIVER("%s\n", sti_layer_to_str(layer));
675
676 /* Unregister VTG Vsync callback */
677 if ((layer->fb->flags & DRM_MODE_FB_INTERLACED) &&
678 sti_vtg_unregister_client(hqvdp->vtg, &hqvdp->vtg_nb))
679 DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n");
680
681 /* Set next cmd to NULL */
682 writel(0, hqvdp->regs + HQVDP_MBX_NEXT_CMD);
683
684 for (i = 0; i < POLL_MAX_ATTEMPT; i++) {
685 if (readl(hqvdp->regs + HQVDP_MBX_INFO_XP70)
686 & INFO_XP70_FW_READY)
687 break;
688 msleep(POLL_DELAY_MS);
689 }
690
691 /* VTG can stop now */
692 clk_disable_unprepare(hqvdp->clk_pix_main);
693
694 if (i == POLL_MAX_ATTEMPT) {
695 DRM_ERROR("XP70 could not revert to idle\n");
696 return -ENXIO;
697 }
698
699 /* disable VID plane */
700 hqvdp->vid_plane->funcs->disable_plane(hqvdp->vid_plane);
701
702 return 0;
703}
704
705/**
706 * sti_vdp_vtg_cb
707 * @nb: notifier block
708 * @evt: event message
709 * @data: private data
710 *
711 * Handle VTG Vsync event, display pending bottom field
712 *
713 * RETURNS:
714 * 0 on success.
715 */
716int sti_hqvdp_vtg_cb(struct notifier_block *nb, unsigned long evt, void *data)
717{
718 struct sti_hqvdp *hqvdp = container_of(nb, struct sti_hqvdp, vtg_nb);
719 int btm_cmd_offset, top_cmd_offest;
720 struct sti_hqvdp_cmd *btm_cmd, *top_cmd;
721
722 if ((evt != VTG_TOP_FIELD_EVENT) && (evt != VTG_BOTTOM_FIELD_EVENT)) {
723 DRM_DEBUG_DRIVER("Unknown event\n");
724 return 0;
725 }
726
727 if (hqvdp->btm_field_pending) {
728 /* Create the btm field command from the current one */
729 btm_cmd_offset = sti_hqvdp_get_free_cmd(hqvdp);
730 top_cmd_offest = sti_hqvdp_get_curr_cmd(hqvdp);
731 if ((btm_cmd_offset == -1) || (top_cmd_offest == -1)) {
732 DRM_ERROR("Cannot get cmds, skip btm field\n");
733 return -EBUSY;
734 }
735
736 btm_cmd = hqvdp->hqvdp_cmd + btm_cmd_offset;
737 top_cmd = hqvdp->hqvdp_cmd + top_cmd_offest;
738
739 memcpy(btm_cmd, top_cmd, sizeof(*btm_cmd));
740
741 btm_cmd->top.config = TOP_CONFIG_INTER_BTM;
742 btm_cmd->top.current_luma +=
743 btm_cmd->top.luma_src_pitch / 2;
744 btm_cmd->top.current_chroma +=
745 btm_cmd->top.chroma_src_pitch / 2;
746
747 /* Post the command to mailbox */
748 writel(hqvdp->hqvdp_cmd_paddr + btm_cmd_offset,
749 hqvdp->regs + HQVDP_MBX_NEXT_CMD);
750
751 hqvdp->curr_field_count++;
752 hqvdp->btm_field_pending = false;
753
754 dev_dbg(hqvdp->dev, "%s Posted command:0x%x\n",
755 __func__, hqvdp->hqvdp_cmd_paddr);
756 }
757
758 return 0;
759}
760
761static struct drm_plane *sti_hqvdp_find_vid(struct drm_device *dev, int id)
762{
763 struct drm_plane *plane;
764
765 list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
766 struct sti_layer *layer = to_sti_layer(plane);
767
768 if (layer->desc == id)
769 return plane;
770 }
771
772 return NULL;
773}
774
775static void sti_hqvd_init(struct sti_layer *layer)
776{
777 struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer);
778 int size;
779
780 /* find the plane macthing with vid 0 */
781 hqvdp->vid_plane = sti_hqvdp_find_vid(hqvdp->drm_dev, STI_VID_0);
782 if (!hqvdp->vid_plane) {
783 DRM_ERROR("Cannot find Main video layer\n");
784 return;
785 }
786
787 hqvdp->vtg_nb.notifier_call = sti_hqvdp_vtg_cb;
788
789 /* Allocate memory for the VDP commands */
790 size = NB_VDP_CMD * sizeof(struct sti_hqvdp_cmd);
791 hqvdp->hqvdp_cmd = dma_alloc_writecombine(hqvdp->dev, size,
792 &hqvdp->hqvdp_cmd_paddr,
793 GFP_KERNEL | GFP_DMA);
794 if (!hqvdp->hqvdp_cmd) {
795 DRM_ERROR("Failed to allocate memory for VDP cmd\n");
796 return;
797 }
798
799 memset(hqvdp->hqvdp_cmd, 0, size);
800}
801
802static const struct sti_layer_funcs hqvdp_ops = {
803 .get_formats = sti_hqvdp_get_formats,
804 .get_nb_formats = sti_hqvdp_get_nb_formats,
805 .init = sti_hqvd_init,
806 .prepare = sti_hqvdp_prepare_layer,
807 .commit = sti_hqvdp_commit_layer,
808 .disable = sti_hqvdp_disable_layer,
809};
810
811struct sti_layer *sti_hqvdp_create(struct device *dev)
812{
813 struct sti_hqvdp *hqvdp = dev_get_drvdata(dev);
814
815 hqvdp->layer.ops = &hqvdp_ops;
816
817 return &hqvdp->layer;
818}
benjamin.gaignard@linaro.org4e0cd682014-12-13 07:59:31 +0100819EXPORT_SYMBOL(sti_hqvdp_create);
Benjamin Gaignard4fdbc6782014-12-11 11:38:59 +0100820
821static void sti_hqvdp_init_plugs(struct sti_hqvdp *hqvdp)
822{
823 /* Configure Plugs (same for RD & WR) */
824 writel(PLUG_PAGE_SIZE_256, hqvdp->regs + HQVDP_RD_PLUG_PAGE_SIZE);
825 writel(PLUG_MIN_OPC_8, hqvdp->regs + HQVDP_RD_PLUG_MIN_OPC);
826 writel(PLUG_MAX_OPC_64, hqvdp->regs + HQVDP_RD_PLUG_MAX_OPC);
827 writel(PLUG_MAX_CHK_2X, hqvdp->regs + HQVDP_RD_PLUG_MAX_CHK);
828 writel(PLUG_MAX_MSG_1X, hqvdp->regs + HQVDP_RD_PLUG_MAX_MSG);
829 writel(PLUG_MIN_SPACE_1, hqvdp->regs + HQVDP_RD_PLUG_MIN_SPACE);
830 writel(PLUG_CONTROL_ENABLE, hqvdp->regs + HQVDP_RD_PLUG_CONTROL);
831
832 writel(PLUG_PAGE_SIZE_256, hqvdp->regs + HQVDP_WR_PLUG_PAGE_SIZE);
833 writel(PLUG_MIN_OPC_8, hqvdp->regs + HQVDP_WR_PLUG_MIN_OPC);
834 writel(PLUG_MAX_OPC_64, hqvdp->regs + HQVDP_WR_PLUG_MAX_OPC);
835 writel(PLUG_MAX_CHK_2X, hqvdp->regs + HQVDP_WR_PLUG_MAX_CHK);
836 writel(PLUG_MAX_MSG_1X, hqvdp->regs + HQVDP_WR_PLUG_MAX_MSG);
837 writel(PLUG_MIN_SPACE_1, hqvdp->regs + HQVDP_WR_PLUG_MIN_SPACE);
838 writel(PLUG_CONTROL_ENABLE, hqvdp->regs + HQVDP_WR_PLUG_CONTROL);
839}
840
841/**
842 * sti_hqvdp_start_xp70
843 * @firmware: firmware found
844 * @ctxt: hqvdp structure
845 *
846 * Run the xP70 initialization sequence
847 */
848static void sti_hqvdp_start_xp70(const struct firmware *firmware, void *ctxt)
849{
850 struct sti_hqvdp *hqvdp = ctxt;
851 u32 *fw_rd_plug, *fw_wr_plug, *fw_pmem, *fw_dmem;
852 u8 *data;
853 int i;
854 struct fw_header {
855 int rd_size;
856 int wr_size;
857 int pmem_size;
858 int dmem_size;
859 } *header;
860
861 DRM_DEBUG_DRIVER("\n");
862 /* Check firmware parts */
863 if (!firmware) {
864 DRM_ERROR("Firmware not available\n");
865 return;
866 }
867
868 header = (struct fw_header *) firmware->data;
869 if (firmware->size < sizeof(*header)) {
870 DRM_ERROR("Invalid firmware size (%d)\n", firmware->size);
871 goto out;
872 }
873 if ((sizeof(*header) + header->rd_size + header->wr_size +
874 header->pmem_size + header->dmem_size) != firmware->size) {
875 DRM_ERROR("Invalid fmw structure (%d+%d+%d+%d+%d != %d)\n",
876 sizeof(*header), header->rd_size, header->wr_size,
877 header->pmem_size, header->dmem_size,
878 firmware->size);
879 goto out;
880 }
881
882 data = (u8 *) firmware->data;
883 data += sizeof(*header);
884 fw_rd_plug = (void *) data;
885 data += header->rd_size;
886 fw_wr_plug = (void *) data;
887 data += header->wr_size;
888 fw_pmem = (void *) data;
889 data += header->pmem_size;
890 fw_dmem = (void *) data;
891
892 /* Enable clock */
893 if (clk_prepare_enable(hqvdp->clk))
894 DRM_ERROR("Failed to prepare/enable HQVDP clk\n");
895
896 /* Reset */
897 writel(SW_RESET_CTRL_FULL, hqvdp->regs + HQVDP_MBX_SW_RESET_CTRL);
898
899 for (i = 0; i < POLL_MAX_ATTEMPT; i++) {
900 if (readl(hqvdp->regs + HQVDP_MBX_STARTUP_CTRL1)
901 & STARTUP_CTRL1_RST_DONE)
902 break;
903 msleep(POLL_DELAY_MS);
904 }
905 if (i == POLL_MAX_ATTEMPT) {
906 DRM_ERROR("Could not reset\n");
907 goto out;
908 }
909
910 /* Init Read & Write plugs */
911 for (i = 0; i < header->rd_size / 4; i++)
912 writel(fw_rd_plug[i], hqvdp->regs + HQVDP_RD_PLUG + i * 4);
913 for (i = 0; i < header->wr_size / 4; i++)
914 writel(fw_wr_plug[i], hqvdp->regs + HQVDP_WR_PLUG + i * 4);
915
916 sti_hqvdp_init_plugs(hqvdp);
917
918 /* Authorize Idle Mode */
919 writel(STARTUP_CTRL1_AUTH_IDLE, hqvdp->regs + HQVDP_MBX_STARTUP_CTRL1);
920
921 /* Prevent VTG interruption during the boot */
922 writel(SOFT_VSYNC_SW_CTRL_IRQ, hqvdp->regs + HQVDP_MBX_SOFT_VSYNC);
923 writel(0, hqvdp->regs + HQVDP_MBX_NEXT_CMD);
924
925 /* Download PMEM & DMEM */
926 for (i = 0; i < header->pmem_size / 4; i++)
927 writel(fw_pmem[i], hqvdp->regs + HQVDP_PMEM + i * 4);
928 for (i = 0; i < header->dmem_size / 4; i++)
929 writel(fw_dmem[i], hqvdp->regs + HQVDP_DMEM + i * 4);
930
931 /* Enable fetch */
932 writel(STARTUP_CTRL2_FETCH_EN, hqvdp->regs + HQVDP_MBX_STARTUP_CTRL2);
933
934 /* Wait end of boot */
935 for (i = 0; i < POLL_MAX_ATTEMPT; i++) {
936 if (readl(hqvdp->regs + HQVDP_MBX_INFO_XP70)
937 & INFO_XP70_FW_READY)
938 break;
939 msleep(POLL_DELAY_MS);
940 }
941 if (i == POLL_MAX_ATTEMPT) {
942 DRM_ERROR("Could not boot\n");
943 goto out;
944 }
945
946 /* Launch Vsync */
947 writel(SOFT_VSYNC_HW, hqvdp->regs + HQVDP_MBX_SOFT_VSYNC);
948
949 DRM_INFO("HQVDP XP70 started\n");
950out:
951 release_firmware(firmware);
952}
953
954int sti_hqvdp_bind(struct device *dev, struct device *master, void *data)
955{
956 struct sti_hqvdp *hqvdp = dev_get_drvdata(dev);
957 struct drm_device *drm_dev = data;
958 struct sti_layer *layer;
959 int err;
960
961 DRM_DEBUG_DRIVER("\n");
962
963 hqvdp->drm_dev = drm_dev;
964
965 /* Request for firmware */
966 err = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
967 HQVDP_FMW_NAME, hqvdp->dev,
968 GFP_KERNEL, hqvdp, sti_hqvdp_start_xp70);
969 if (err) {
970 DRM_ERROR("Can't get HQVDP firmware\n");
971 return err;
972 }
973
974 layer = sti_layer_create(hqvdp->dev, STI_HQVDP_0, hqvdp->regs);
975 if (!layer) {
976 DRM_ERROR("Can't create HQVDP plane\n");
977 return -ENOMEM;
978 }
979
980 sti_drm_plane_init(drm_dev, layer, 1, DRM_PLANE_TYPE_OVERLAY);
981
982 return 0;
983}
984
985static void sti_hqvdp_unbind(struct device *dev,
986 struct device *master, void *data)
987{
988 /* do nothing */
989}
990
991static const struct component_ops sti_hqvdp_ops = {
992 .bind = sti_hqvdp_bind,
993 .unbind = sti_hqvdp_unbind,
994};
995
996static int sti_hqvdp_probe(struct platform_device *pdev)
997{
998 struct device *dev = &pdev->dev;
999 struct device_node *vtg_np;
1000 struct sti_hqvdp *hqvdp;
1001 struct resource *res;
1002
1003 DRM_DEBUG_DRIVER("\n");
1004
1005 hqvdp = devm_kzalloc(dev, sizeof(*hqvdp), GFP_KERNEL);
1006 if (!hqvdp) {
1007 DRM_ERROR("Failed to allocate HQVDP context\n");
1008 return -ENOMEM;
1009 }
1010
1011 hqvdp->dev = dev;
1012
1013 /* Get Memory resources */
1014 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1015 if (res == NULL) {
1016 DRM_ERROR("Get memory resource failed\n");
1017 return -ENXIO;
1018 }
1019 hqvdp->regs = devm_ioremap(dev, res->start, resource_size(res));
1020 if (hqvdp->regs == NULL) {
1021 DRM_ERROR("Register mapping failed\n");
1022 return -ENXIO;
1023 }
1024
1025 /* Get clock resources */
1026 hqvdp->clk = devm_clk_get(dev, "hqvdp");
1027 hqvdp->clk_pix_main = devm_clk_get(dev, "pix_main");
1028 if (IS_ERR(hqvdp->clk) || IS_ERR(hqvdp->clk)) {
1029 DRM_ERROR("Cannot get clocks\n");
1030 return -ENXIO;
1031 }
1032
1033 /* Get reset resources */
1034 hqvdp->reset = devm_reset_control_get(dev, "hqvdp");
1035 if (!IS_ERR(hqvdp->reset))
1036 reset_control_deassert(hqvdp->reset);
1037
1038 vtg_np = of_parse_phandle(pdev->dev.of_node, "st,vtg", 0);
1039 if (vtg_np)
1040 hqvdp->vtg = of_vtg_find(vtg_np);
1041
1042 platform_set_drvdata(pdev, hqvdp);
1043
1044 return component_add(&pdev->dev, &sti_hqvdp_ops);
1045}
1046
1047static int sti_hqvdp_remove(struct platform_device *pdev)
1048{
1049 component_del(&pdev->dev, &sti_hqvdp_ops);
1050 return 0;
1051}
1052
1053static struct of_device_id hqvdp_of_match[] = {
1054 { .compatible = "st,stih407-hqvdp", },
1055 { /* end node */ }
1056};
1057MODULE_DEVICE_TABLE(of, hqvdp_of_match);
1058
1059struct platform_driver sti_hqvdp_driver = {
1060 .driver = {
1061 .name = "sti-hqvdp",
1062 .owner = THIS_MODULE,
1063 .of_match_table = hqvdp_of_match,
1064 },
1065 .probe = sti_hqvdp_probe,
1066 .remove = sti_hqvdp_remove,
1067};
1068
1069module_platform_driver(sti_hqvdp_driver);
1070
1071MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>");
1072MODULE_DESCRIPTION("STMicroelectronics SoC DRM driver");
1073MODULE_LICENSE("GPL");