summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/display/dc/core
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/display/dc/core')
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c57
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c67
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_surface.c7
7 files changed, 114 insertions, 68 deletions
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index dc463d99ef50..83ce55edb3aa 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1246,6 +1246,19 @@ void dc_trigger_sync(struct dc *dc, struct dc_state *context)
}
}
+static uint8_t get_stream_mask(struct dc *dc, struct dc_state *context)
+{
+ int i;
+ unsigned int stream_mask = 0;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (context->res_ctx.pipe_ctx[i].stream)
+ stream_mask |= 1 << i;
+ }
+
+ return stream_mask;
+}
+
/*
* Applies given context to HW and copy it into current context.
* It's up to the user to release the src context afterwards.
@@ -1273,7 +1286,7 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
dc->optimize_seamless_boot_streams++;
}
- if (dc->optimize_seamless_boot_streams == 0)
+ if (context->stream_count > dc->optimize_seamless_boot_streams)
dc->hwss.prepare_bandwidth(dc, context);
disable_dangling_plane(dc, context);
@@ -1355,13 +1368,18 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
dc_enable_stereo(dc, context, dc_streams, context->stream_count);
- if (dc->optimize_seamless_boot_streams == 0) {
+ if (context->stream_count > dc->optimize_seamless_boot_streams) {
/* Must wait for no flips to be pending before doing optimize bw */
wait_for_no_pipes_pending(dc, context);
/* pplib is notified if disp_num changed */
dc->hwss.optimize_bandwidth(dc, context);
}
+ context->stream_mask = get_stream_mask(dc, context);
+
+ if (context->stream_mask != dc->current_state->stream_mask)
+ dc_dmub_srv_notify_stream_mask(dc->ctx->dmub_srv, context->stream_mask);
+
for (i = 0; i < context->stream_count; i++)
context->streams[i]->mode_changed = false;
@@ -1481,13 +1499,8 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc)
return true;
}
-struct dc_state *dc_create_state(struct dc *dc)
+static void init_state(struct dc *dc, struct dc_state *context)
{
- struct dc_state *context = kvzalloc(sizeof(struct dc_state),
- GFP_KERNEL);
-
- if (!context)
- return NULL;
/* Each context must have their own instance of VBA and in order to
* initialize and obtain IP and SOC the base DML instance from DC is
* initially copied into every context
@@ -1495,6 +1508,17 @@ struct dc_state *dc_create_state(struct dc *dc)
#ifdef CONFIG_DRM_AMD_DC_DCN
memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib));
#endif
+}
+
+struct dc_state *dc_create_state(struct dc *dc)
+{
+ struct dc_state *context = kzalloc(sizeof(struct dc_state),
+ GFP_KERNEL);
+
+ if (!context)
+ return NULL;
+
+ init_state(dc, context);
kref_init(&context->refcount);
@@ -2415,8 +2439,7 @@ static void commit_planes_for_stream(struct dc *dc,
plane_state->triplebuffer_flips = false;
if (update_type == UPDATE_TYPE_FAST &&
dc->hwss.program_triplebuffer != NULL &&
- !plane_state->flip_immediate &&
- !dc->debug.disable_tri_buf) {
+ !plane_state->flip_immediate && dc->debug.enable_tri_buf) {
/*triple buffer for VUpdate only*/
plane_state->triplebuffer_flips = true;
}
@@ -2443,8 +2466,7 @@ static void commit_planes_for_stream(struct dc *dc,
ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
- if (dc->hwss.program_triplebuffer != NULL &&
- !dc->debug.disable_tri_buf) {
+ if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
/*turn off triple buffer for full update*/
dc->hwss.program_triplebuffer(
dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
@@ -2509,8 +2531,7 @@ static void commit_planes_for_stream(struct dc *dc,
if (pipe_ctx->plane_state != plane_state)
continue;
/*program triple buffer after lock based on flip type*/
- if (dc->hwss.program_triplebuffer != NULL &&
- !dc->debug.disable_tri_buf) {
+ if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
/*only enable triplebuffer for fast_update*/
dc->hwss.program_triplebuffer(
dc, pipe_ctx, plane_state->triplebuffer_flips);
@@ -2965,7 +2986,7 @@ bool dc_set_psr_allow_active(struct dc *dc, bool enable)
if (enable && !link->psr_settings.psr_allow_active)
return dc_link_set_psr_allow_active(link, true, false);
else if (!enable && link->psr_settings.psr_allow_active)
- return dc_link_set_psr_allow_active(link, false, false);
+ return dc_link_set_psr_allow_active(link, false, true);
}
}
@@ -3018,4 +3039,10 @@ void dc_lock_memory_clock_frequency(struct dc *dc)
if (dc->current_state->res_ctx.pipe_ctx[i].plane_state)
core_link_enable_stream(dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]);
}
+
+bool dc_is_plane_eligible_for_idle_optimizaitons(struct dc *dc,
+ struct dc_plane_state *plane)
+{
+ return false;
+}
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 437d1a7a16fe..1871ff6119ae 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -2946,7 +2946,7 @@ enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx)
pbn = get_pbn_from_timing(pipe_ctx);
avg_time_slots_per_mtp = dc_fixpt_div(pbn, pbn_per_slot);
- stream_encoder->funcs->set_mst_bandwidth(
+ stream_encoder->funcs->set_throttled_vcp_size(
stream_encoder,
avg_time_slots_per_mtp);
@@ -2974,7 +2974,7 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
*/
/* slot X.Y */
- stream_encoder->funcs->set_mst_bandwidth(
+ stream_encoder->funcs->set_throttled_vcp_size(
stream_encoder,
avg_time_slots_per_mtp);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index d1d95d3e248a..b9b66db8332b 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -49,14 +49,14 @@ static struct dc_link_settings get_common_supported_link_settings(
struct dc_link_settings link_setting_a,
struct dc_link_settings link_setting_b);
-static uint32_t get_training_aux_rd_interval(
+static uint32_t get_eq_training_aux_rd_interval(
struct dc_link *link,
- uint32_t default_wait_in_micro_secs)
+ const struct dc_link_settings *link_settings)
{
union training_aux_rd_interval training_rd_interval;
+ uint32_t wait_in_micro_secs = 400;
memset(&training_rd_interval, 0, sizeof(training_rd_interval));
-
/* overwrite the delay if rev > 1.1*/
if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) {
/* DP 1.2 or later - retrieve delay through
@@ -68,10 +68,10 @@ static uint32_t get_training_aux_rd_interval(
sizeof(training_rd_interval));
if (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL)
- default_wait_in_micro_secs = training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL * 4000;
+ wait_in_micro_secs = training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL * 4000;
}
- return default_wait_in_micro_secs;
+ return wait_in_micro_secs;
}
static void wait_for_training_aux_rd_interval(
@@ -101,7 +101,16 @@ static void dpcd_set_training_pattern(
dpcd_pattern.v1_4.TRAINING_PATTERN_SET);
}
-static enum dc_dp_training_pattern get_supported_tp(struct dc_link *link)
+static enum dc_dp_training_pattern decide_cr_training_pattern(
+ const struct dc_link_settings *link_settings)
+{
+ enum dc_dp_training_pattern pattern = DP_TRAINING_PATTERN_SEQUENCE_1;
+
+ return pattern;
+}
+
+static enum dc_dp_training_pattern decide_eq_training_pattern(struct dc_link *link,
+ const struct dc_link_settings *link_settings)
{
enum dc_dp_training_pattern highest_tp = DP_TRAINING_PATTERN_SEQUENCE_2;
struct encoder_feature_support *features = &link->link_enc->features;
@@ -132,7 +141,6 @@ static void dpcd_set_link_settings(
union down_spread_ctrl downspread = { {0} };
union lane_count_set lane_count_set = { {0} };
- enum dc_dp_training_pattern dp_tr_pattern;
downspread.raw = (uint8_t)
(lt_settings->link_settings.link_spread);
@@ -143,9 +151,8 @@ static void dpcd_set_link_settings(
lane_count_set.bits.ENHANCED_FRAMING = lt_settings->enhanced_framing;
lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = 0;
- dp_tr_pattern = get_supported_tp(link);
- if (dp_tr_pattern != DP_TRAINING_PATTERN_SEQUENCE_4) {
+ if (lt_settings->pattern_for_eq < DP_TRAINING_PATTERN_SEQUENCE_4) {
lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED =
link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED;
}
@@ -373,34 +380,30 @@ static void dpcd_set_lt_pattern_and_lane_settings(
static bool is_cr_done(enum dc_lane_count ln_count,
union lane_status *dpcd_lane_status)
{
- bool done = true;
uint32_t lane;
/*LANEx_CR_DONE bits All 1's?*/
for (lane = 0; lane < (uint32_t)(ln_count); lane++) {
if (!dpcd_lane_status[lane].bits.CR_DONE_0)
- done = false;
+ return false;
}
- return done;
-
+ return true;
}
static bool is_ch_eq_done(enum dc_lane_count ln_count,
union lane_status *dpcd_lane_status,
union lane_align_status_updated *lane_status_updated)
{
- bool done = true;
uint32_t lane;
if (!lane_status_updated->bits.INTERLANE_ALIGN_DONE)
- done = false;
+ return false;
else {
for (lane = 0; lane < (uint32_t)(ln_count); lane++) {
if (!dpcd_lane_status[lane].bits.SYMBOL_LOCKED_0 ||
!dpcd_lane_status[lane].bits.CHANNEL_EQ_DONE_0)
- done = false;
+ return false;
}
}
- return done;
-
+ return true;
}
static void update_drive_settings(
@@ -979,7 +982,7 @@ static void start_clock_recovery_pattern_early(struct dc_link *link,
{
DC_LOG_HW_LINK_TRAINING("%s\n GPU sends TPS1. Wait 400us.\n",
__func__);
- dp_set_hw_training_pattern(link, DP_TRAINING_PATTERN_SEQUENCE_1, offset);
+ dp_set_hw_training_pattern(link, lt_settings->pattern_for_cr, offset);
dp_set_hw_lane_settings(link, lt_settings, offset);
udelay(400);
}
@@ -994,7 +997,6 @@ static enum link_training_result perform_clock_recovery_sequence(
uint32_t wait_time_microsec;
struct link_training_settings req_settings;
enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
- enum dc_dp_training_pattern tr_pattern = DP_TRAINING_PATTERN_SEQUENCE_1;
union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX];
union lane_align_status_updated dpcd_lane_status_updated;
@@ -1002,7 +1004,7 @@ static enum link_training_result perform_clock_recovery_sequence(
retry_count = 0;
if (!link->ctx->dc->work_arounds.lt_early_cr_pattern)
- dp_set_hw_training_pattern(link, tr_pattern, offset);
+ dp_set_hw_training_pattern(link, lt_settings->pattern_for_cr, offset);
/* najeeb - The synaptics MST hub can put the LT in
* infinite loop by switching the VS
@@ -1029,7 +1031,7 @@ static enum link_training_result perform_clock_recovery_sequence(
dpcd_set_lt_pattern_and_lane_settings(
link,
lt_settings,
- tr_pattern,
+ lt_settings->pattern_for_cr,
offset);
else
dpcd_set_lane_settings(
@@ -1113,7 +1115,7 @@ static inline enum link_training_result perform_link_training_int(
* TPS4 must be used instead of POST_LT_ADJ_REQ.
*/
if (link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED != 1 ||
- get_supported_tp(link) == DP_TRAINING_PATTERN_SEQUENCE_4)
+ lt_settings->pattern_for_eq == DP_TRAINING_PATTERN_SEQUENCE_4)
return status;
if (status == LINK_TRAINING_SUCCESS &&
@@ -1245,17 +1247,21 @@ static void initialize_training_settings(
if (overrides->cr_pattern_time != NULL)
lt_settings->cr_pattern_time = *overrides->cr_pattern_time;
else
- lt_settings->cr_pattern_time = get_training_aux_rd_interval(link, 100);
+ lt_settings->cr_pattern_time = 100;
if (overrides->eq_pattern_time != NULL)
lt_settings->eq_pattern_time = *overrides->eq_pattern_time;
else
- lt_settings->eq_pattern_time = get_training_aux_rd_interval(link, 400);
+ lt_settings->eq_pattern_time = get_eq_training_aux_rd_interval(link, link_setting);
+ if (overrides->pattern_for_cr != NULL)
+ lt_settings->pattern_for_cr = *overrides->pattern_for_cr;
+ else
+ lt_settings->pattern_for_cr = decide_cr_training_pattern(link_setting);
if (overrides->pattern_for_eq != NULL)
lt_settings->pattern_for_eq = *overrides->pattern_for_eq;
else
- lt_settings->pattern_for_eq = get_supported_tp(link);
+ lt_settings->pattern_for_eq = decide_eq_training_pattern(link, link_setting);
if (overrides->enhanced_framing != NULL)
lt_settings->enhanced_framing = *overrides->enhanced_framing;
@@ -1457,7 +1463,6 @@ bool dc_link_dp_perform_link_training_skip_aux(
const struct dc_link_settings *link_setting)
{
struct link_training_settings lt_settings;
- enum dc_dp_training_pattern pattern_for_cr = DP_TRAINING_PATTERN_SEQUENCE_1;
initialize_training_settings(
link,
@@ -1468,7 +1473,7 @@ bool dc_link_dp_perform_link_training_skip_aux(
/* 1. Perform_clock_recovery_sequence. */
/* transmit training pattern for clock recovery */
- dp_set_hw_training_pattern(link, pattern_for_cr, DPRX);
+ dp_set_hw_training_pattern(link, lt_settings.pattern_for_cr, DPRX);
/* call HWSS to set lane settings*/
dp_set_hw_lane_settings(link, &lt_settings, DPRX);
@@ -1610,6 +1615,9 @@ bool perform_link_training_with_retries(
for (j = 0; j < attempts; ++j) {
+ DC_LOG_HW_LINK_TRAINING("%s: Beginning link training attempt %u of %d\n",
+ __func__, (unsigned int)j + 1, attempts);
+
dp_enable_link_phy(
link,
signal,
@@ -1638,6 +1646,9 @@ bool perform_link_training_with_retries(
if (j == (attempts - 1))
break;
+ DC_LOG_WARNING("%s: Link training attempt %u of %d failed\n",
+ __func__, (unsigned int)j + 1, attempts);
+
dp_disable_link_phy(link, signal);
msleep(delay_between_attempts);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
index dd88eb348dfa..81c026319ccd 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
@@ -104,6 +104,12 @@ void dp_enable_link_phy(
struct clock_source *dp_cs =
link->dc->res_pool->dp_clock_source;
unsigned int i;
+
+ if (link->connector_signal == SIGNAL_TYPE_EDP) {
+ link->dc->hwss.edp_power_control(link, true);
+ link->dc->hwss.edp_wait_for_hpd_ready(link, true);
+ }
+
/* If the current pixel clock source is not DTO(happens after
* switching from HDMI passive dongle to DP on the same connector),
* switch the pixel clock source to DTO.
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index c6b737dd8425..4cea9344d8aa 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -782,7 +782,13 @@ static void calculate_recout(struct pipe_ctx *pipe_ctx)
calculate_split_count_and_index(pipe_ctx, &split_count, &split_idx);
- data->recout.x = stream->dst.x;
+ /*
+ * Only the leftmost ODM pipe should be offset by a nonzero distance
+ */
+ if (!pipe_ctx->prev_odm_pipe)
+ data->recout.x = stream->dst.x;
+ else
+ data->recout.x = 0;
if (stream->src.x < surf_clip.x)
data->recout.x += (surf_clip.x - stream->src.x) * stream->dst.width
/ stream->src.width;
@@ -957,7 +963,7 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx)
{
const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
const struct dc_stream_state *stream = pipe_ctx->stream;
- struct pipe_ctx *odm_pipe = pipe_ctx->prev_odm_pipe;
+ struct pipe_ctx *odm_pipe = pipe_ctx;
struct scaler_data *data = &pipe_ctx->plane_res.scl_data;
struct rect src = pipe_ctx->plane_state->src_rect;
int recout_skip_h, recout_skip_v, surf_size_h, surf_size_v;
@@ -988,21 +994,24 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx)
swap(src.width, src.height);
}
+ /*modified recout_skip_h calculation due to odm having no recout offset*/
+ while (odm_pipe->prev_odm_pipe) {
+ odm_idx++;
+ odm_pipe = odm_pipe->prev_odm_pipe;
+ }
+ /*odm_pipe is the leftmost pipe in the ODM group*/
+ recout_skip_h = odm_idx * data->recout.width;
+
/* Recout matching initial vp offset = recout_offset - (stream dst offset +
* ((surf dst offset - stream src offset) * 1/ stream scaling ratio)
* - (surf surf_src offset * 1/ full scl ratio))
*/
- recout_skip_h = data->recout.x - (stream->dst.x + (plane_state->dst_rect.x - stream->src.x)
+ recout_skip_h += odm_pipe->plane_res.scl_data.recout.x
+ - (stream->dst.x + (plane_state->dst_rect.x - stream->src.x)
* stream->dst.width / stream->src.width -
src.x * plane_state->dst_rect.width / src.width
* stream->dst.width / stream->src.width);
- /*modified recout_skip_h calculation due to odm having no recout offset*/
- while (odm_pipe) {
- odm_idx++;
- odm_pipe = odm_pipe->prev_odm_pipe;
- }
- if (odm_idx)
- recout_skip_h += odm_idx * data->recout.width;
+
recout_skip_v = data->recout.y - (stream->dst.y + (plane_state->dst_rect.y - stream->src.y)
* stream->dst.height / stream->src.height -
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index f42a17d765e3..d48fd87d3b95 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -123,7 +123,6 @@ static bool dc_stream_construct(struct dc_stream_state *stream,
return false;
}
stream->out_transfer_func->type = TF_TYPE_BYPASS;
- stream->out_transfer_func->ctx = stream->ctx;
stream->stream_id = stream->ctx->dc_stream_id_count;
stream->ctx->dc_stream_id_count++;
@@ -298,7 +297,7 @@ bool dc_stream_set_cursor_attributes(
#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
/* disable idle optimizations while updating cursor */
if (dc->idle_optimizations_allowed) {
- dc->hwss.apply_idle_power_optimizations(dc, false);
+ dc_allow_idle_optimizations(dc, false);
reset_idle_optimizations = true;
}
@@ -326,7 +325,7 @@ bool dc_stream_set_cursor_attributes(
#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
/* re-enable idle optimizations if necessary */
if (reset_idle_optimizations)
- dc->hwss.apply_idle_power_optimizations(dc, true);
+ dc_allow_idle_optimizations(dc, true);
#endif
return true;
@@ -359,9 +358,8 @@ bool dc_stream_set_cursor_position(
#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
/* disable idle optimizations if enabling cursor */
- if (dc->idle_optimizations_allowed &&
- !stream->cursor_position.enable && position->enable) {
- dc->hwss.apply_idle_power_optimizations(dc, false);
+ if (dc->idle_optimizations_allowed && !stream->cursor_position.enable && position->enable) {
+ dc_allow_idle_optimizations(dc, false);
reset_idle_optimizations = true;
}
@@ -392,7 +390,7 @@ bool dc_stream_set_cursor_position(
#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
/* re-enable idle optimizations if necessary */
if (reset_idle_optimizations)
- dc->hwss.apply_idle_power_optimizations(dc, true);
+ dc_allow_idle_optimizations(dc, true);
#endif
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
index ea1229a3e2b2..3d7d27435f15 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
@@ -48,22 +48,17 @@ static void dc_plane_construct(struct dc_context *ctx, struct dc_plane_state *pl
plane_state->in_transfer_func = dc_create_transfer_func();
if (plane_state->in_transfer_func != NULL) {
plane_state->in_transfer_func->type = TF_TYPE_BYPASS;
- plane_state->in_transfer_func->ctx = ctx;
}
plane_state->in_shaper_func = dc_create_transfer_func();
if (plane_state->in_shaper_func != NULL) {
plane_state->in_shaper_func->type = TF_TYPE_BYPASS;
- plane_state->in_shaper_func->ctx = ctx;
}
plane_state->lut3d_func = dc_create_3dlut_func();
- if (plane_state->lut3d_func != NULL) {
- plane_state->lut3d_func->ctx = ctx;
- }
+
plane_state->blend_tf = dc_create_transfer_func();
if (plane_state->blend_tf != NULL) {
plane_state->blend_tf->type = TF_TYPE_BYPASS;
- plane_state->blend_tf->ctx = ctx;
}
}