Home | History | Annotate | Line # | Download | only in core
      1 /*	$NetBSD: amdgpu_dc.c,v 1.4 2021/12/19 11:59:30 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright 2015 Advanced Micro Devices, Inc.
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a
      7  * copy of this software and associated documentation files (the "Software"),
      8  * to deal in the Software without restriction, including without limitation
      9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     10  * and/or sell copies of the Software, and to permit persons to whom the
     11  * Software is furnished to do so, subject to the following conditions:
     12  *
     13  * The above copyright notice and this permission notice shall be included in
     14  * all copies or substantial portions of the Software.
     15  *
     16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     22  * OTHER DEALINGS IN THE SOFTWARE.
     23  *
     24  * Authors: AMD
     25  */
     26 
     27 #include <sys/cdefs.h>
     28 __KERNEL_RCSID(0, "$NetBSD: amdgpu_dc.c,v 1.4 2021/12/19 11:59:30 riastradh Exp $");
     29 
     30 #include <linux/slab.h>
     31 #include <linux/mm.h>
     32 
     33 #include "dm_services.h"
     34 
     35 #include "dc.h"
     36 
     37 #include "core_status.h"
     38 #include "core_types.h"
     39 #include "hw_sequencer.h"
     40 #include "dce/dce_hwseq.h"
     41 
     42 #include "resource.h"
     43 
     44 #include "clk_mgr.h"
     45 #include "clock_source.h"
     46 #include "dc_bios_types.h"
     47 
     48 #include "bios_parser_interface.h"
     49 #include "include/irq_service_interface.h"
     50 #include "transform.h"
     51 #include "dmcu.h"
     52 #include "dpp.h"
     53 #include "timing_generator.h"
     54 #include "abm.h"
     55 #include "virtual/virtual_link_encoder.h"
     56 
     57 #include "link_hwss.h"
     58 #include "link_encoder.h"
     59 
     60 #include "dc_link_ddc.h"
     61 #include "dm_helpers.h"
     62 #include "mem_input.h"
     63 #include "hubp.h"
     64 
     65 #include "dc_link_dp.h"
     66 #include "dc_dmub_srv.h"
     67 
     68 #include "dsc.h"
     69 
     70 #include "vm_helper.h"
     71 
     72 #include "dce/dce_i2c.h"
     73 
     74 #define CTX \
     75 	dc->ctx
     76 
     77 #define DC_LOGGER \
     78 	dc->ctx->logger
     79 
     80 static const char DC_BUILD_ID[] = "production-build";
     81 
     82 /**
     83  * DOC: Overview
     84  *
     85  * DC is the OS-agnostic component of the amdgpu DC driver.
     86  *
     87  * DC maintains and validates a set of structs representing the state of the
     88  * driver and writes that state to AMD hardware
     89  *
     90  * Main DC HW structs:
     91  *
     92  * struct dc - The central struct.  One per driver.  Created on driver load,
     93  * destroyed on driver unload.
     94  *
     95  * struct dc_context - One per driver.
     96  * Used as a backpointer by most other structs in dc.
     97  *
     98  * struct dc_link - One per connector (the physical DP, HDMI, miniDP, or eDP
     99  * plugpoints).  Created on driver load, destroyed on driver unload.
    100  *
    101  * struct dc_sink - One per display.  Created on boot or hotplug.
    102  * Destroyed on shutdown or hotunplug.  A dc_link can have a local sink
    103  * (the display directly attached).  It may also have one or more remote
    104  * sinks (in the Multi-Stream Transport case)
    105  *
    106  * struct resource_pool - One per driver.  Represents the hw blocks not in the
    107  * main pipeline.  Not directly accessible by dm.
    108  *
    109  * Main dc state structs:
    110  *
    111  * These structs can be created and destroyed as needed.  There is a full set of
    112  * these structs in dc->current_state representing the currently programmed state.
    113  *
    114  * struct dc_state - The global DC state to track global state information,
    115  * such as bandwidth values.
    116  *
    117  * struct dc_stream_state - Represents the hw configuration for the pipeline from
    118  * a framebuffer to a display.  Maps one-to-one with dc_sink.
    119  *
    120  * struct dc_plane_state - Represents a framebuffer.  Each stream has at least one,
    121  * and may have more in the Multi-Plane Overlay case.
    122  *
    123  * struct resource_context - Represents the programmable state of everything in
    124  * the resource_pool.  Not directly accessible by dm.
    125  *
    126  * struct pipe_ctx - A member of struct resource_context.  Represents the
    127  * internal hardware pipeline components.  Each dc_plane_state has either
    128  * one or two (in the pipe-split case).
    129  */
    130 
    131 /*******************************************************************************
    132  * Private functions
    133  ******************************************************************************/
    134 
    135 static inline void elevate_update_type(enum surface_update_type *original, enum surface_update_type new)
    136 {
    137 	if (new > *original)
    138 		*original = new;
    139 }
    140 
    141 static void destroy_links(struct dc *dc)
    142 {
    143 	uint32_t i;
    144 
    145 	for (i = 0; i < dc->link_count; i++) {
    146 		if (NULL != dc->links[i])
    147 			link_destroy(&dc->links[i]);
    148 	}
    149 }
    150 
    151 static bool create_links(
    152 		struct dc *dc,
    153 		uint32_t num_virtual_links)
    154 {
    155 	int i;
    156 	int connectors_num;
    157 	struct dc_bios *bios = dc->ctx->dc_bios;
    158 
    159 	dc->link_count = 0;
    160 
    161 	connectors_num = bios->funcs->get_connectors_number(bios);
    162 
    163 	if (connectors_num > ENUM_ID_COUNT) {
    164 		dm_error(
    165 			"DC: Number of connectors %d exceeds maximum of %d!\n",
    166 			connectors_num,
    167 			ENUM_ID_COUNT);
    168 		return false;
    169 	}
    170 
    171 	dm_output_to_console(
    172 		"DC: %s: connectors_num: physical:%d, virtual:%d\n",
    173 		__func__,
    174 		connectors_num,
    175 		num_virtual_links);
    176 
    177 	for (i = 0; i < connectors_num; i++) {
    178 		struct link_init_data link_init_params = {0};
    179 		struct dc_link *link;
    180 
    181 		link_init_params.ctx = dc->ctx;
    182 		/* next BIOS object table connector */
    183 		link_init_params.connector_index = i;
    184 		link_init_params.link_index = dc->link_count;
    185 		link_init_params.dc = dc;
    186 		link = link_create(&link_init_params);
    187 
    188 		if (link) {
    189 			bool should_destory_link = false;
    190 
    191 			if (link->connector_signal == SIGNAL_TYPE_EDP) {
    192 				if (dc->config.edp_not_connected)
    193 					should_destory_link = true;
    194 				else if (dc->debug.remove_disconnect_edp) {
    195 					enum dc_connection_type type;
    196 					dc_link_detect_sink(link, &type);
    197 					if (type == dc_connection_none)
    198 						should_destory_link = true;
    199 				}
    200 			}
    201 
    202 			if (dc->config.force_enum_edp || !should_destory_link) {
    203 				dc->links[dc->link_count] = link;
    204 				link->dc = dc;
    205 				++dc->link_count;
    206 			} else {
    207 				link_destroy(&link);
    208 			}
    209 		}
    210 	}
    211 
    212 	for (i = 0; i < num_virtual_links; i++) {
    213 		struct dc_link *link = kzalloc(sizeof(*link), GFP_KERNEL);
    214 		struct encoder_init_data enc_init = {0};
    215 
    216 		if (link == NULL) {
    217 			BREAK_TO_DEBUGGER();
    218 			goto failed_alloc;
    219 		}
    220 
    221 		link->link_index = dc->link_count;
    222 		dc->links[dc->link_count] = link;
    223 		dc->link_count++;
    224 
    225 		link->ctx = dc->ctx;
    226 		link->dc = dc;
    227 		link->connector_signal = SIGNAL_TYPE_VIRTUAL;
    228 		link->link_id.type = OBJECT_TYPE_CONNECTOR;
    229 		link->link_id.id = CONNECTOR_ID_VIRTUAL;
    230 		link->link_id.enum_id = ENUM_ID_1;
    231 		link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL);
    232 
    233 		if (!link->link_enc) {
    234 			BREAK_TO_DEBUGGER();
    235 			goto failed_alloc;
    236 		}
    237 
    238 		link->link_status.dpcd_caps = &link->dpcd_caps;
    239 
    240 		enc_init.ctx = dc->ctx;
    241 		enc_init.channel = CHANNEL_ID_UNKNOWN;
    242 		enc_init.hpd_source = HPD_SOURCEID_UNKNOWN;
    243 		enc_init.transmitter = TRANSMITTER_UNKNOWN;
    244 		enc_init.connector = link->link_id;
    245 		enc_init.encoder.type = OBJECT_TYPE_ENCODER;
    246 		enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL;
    247 		enc_init.encoder.enum_id = ENUM_ID_1;
    248 		virtual_link_encoder_construct(link->link_enc, &enc_init);
    249 	}
    250 
    251 	return true;
    252 
    253 failed_alloc:
    254 	return false;
    255 }
    256 
    257 static struct dc_perf_trace *dc_perf_trace_create(void)
    258 {
    259 	return kzalloc(sizeof(struct dc_perf_trace), GFP_KERNEL);
    260 }
    261 
    262 static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace)
    263 {
    264 	kfree(*perf_trace);
    265 	*perf_trace = NULL;
    266 }
    267 
    268 /**
    269  *****************************************************************************
    270  *  Function: dc_stream_adjust_vmin_vmax
    271  *
    272  *  @brief
    273  *     Looks up the pipe context of dc_stream_state and updates the
    274  *     vertical_total_min and vertical_total_max of the DRR, Dynamic Refresh
    275  *     Rate, which is a power-saving feature that targets reducing panel
    276  *     refresh rate while the screen is static
    277  *
    278  *  @param [in] dc: dc reference
    279  *  @param [in] stream: Initial dc stream state
    280  *  @param [in] adjust: Updated parameters for vertical_total_min and
    281  *  vertical_total_max
    282  *****************************************************************************
    283  */
    284 bool dc_stream_adjust_vmin_vmax(struct dc *dc,
    285 		struct dc_stream_state *stream,
    286 		struct dc_crtc_timing_adjust *adjust)
    287 {
    288 	int i = 0;
    289 	bool ret = false;
    290 
    291 	for (i = 0; i < MAX_PIPES; i++) {
    292 		struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
    293 
    294 		if (pipe->stream == stream && pipe->stream_res.tg) {
    295 			dc->hwss.set_drr(&pipe,
    296 					1,
    297 					adjust->v_total_min,
    298 					adjust->v_total_max,
    299 					adjust->v_total_mid,
    300 					adjust->v_total_mid_frame_num);
    301 
    302 			ret = true;
    303 		}
    304 	}
    305 	return ret;
    306 }
    307 
    308 bool dc_stream_get_crtc_position(struct dc *dc,
    309 		struct dc_stream_state **streams, int num_streams,
    310 		unsigned int *v_pos, unsigned int *nom_v_pos)
    311 {
    312 	/* TODO: Support multiple streams */
    313 	const struct dc_stream_state *stream = streams[0];
    314 	int i = 0;
    315 	bool ret = false;
    316 	struct crtc_position position;
    317 
    318 	for (i = 0; i < MAX_PIPES; i++) {
    319 		struct pipe_ctx *pipe =
    320 				&dc->current_state->res_ctx.pipe_ctx[i];
    321 
    322 		if (pipe->stream == stream && pipe->stream_res.stream_enc) {
    323 			dc->hwss.get_position(&pipe, 1, &position);
    324 
    325 			*v_pos = position.vertical_count;
    326 			*nom_v_pos = position.nominal_vcount;
    327 			ret = true;
    328 		}
    329 	}
    330 	return ret;
    331 }
    332 
    333 /**
    334  * dc_stream_configure_crc() - Configure CRC capture for the given stream.
    335  * @dc: DC Object
    336  * @stream: The stream to configure CRC on.
    337  * @enable: Enable CRC if true, disable otherwise.
    338  * @continuous: Capture CRC on every frame if true. Otherwise, only capture
    339  *              once.
    340  *
    341  * By default, only CRC0 is configured, and the entire frame is used to
    342  * calculate the crc.
    343  */
    344 bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
    345 			     bool enable, bool continuous)
    346 {
    347 	int i;
    348 	struct pipe_ctx *pipe;
    349 	struct crc_params param;
    350 	struct timing_generator *tg;
    351 
    352 	for (i = 0; i < MAX_PIPES; i++) {
    353 		pipe = &dc->current_state->res_ctx.pipe_ctx[i];
    354 		if (pipe->stream == stream)
    355 			break;
    356 	}
    357 	/* Stream not found */
    358 	if (i == MAX_PIPES)
    359 		return false;
    360 
    361 	/* Always capture the full frame */
    362 	param.windowa_x_start = 0;
    363 	param.windowa_y_start = 0;
    364 	param.windowa_x_end = pipe->stream->timing.h_addressable;
    365 	param.windowa_y_end = pipe->stream->timing.v_addressable;
    366 	param.windowb_x_start = 0;
    367 	param.windowb_y_start = 0;
    368 	param.windowb_x_end = pipe->stream->timing.h_addressable;
    369 	param.windowb_y_end = pipe->stream->timing.v_addressable;
    370 
    371 	/* Default to the union of both windows */
    372 	param.selection = UNION_WINDOW_A_B;
    373 	param.continuous_mode = continuous;
    374 	param.enable = enable;
    375 
    376 	tg = pipe->stream_res.tg;
    377 
    378 	/* Only call if supported */
    379 	if (tg->funcs->configure_crc)
    380 		return tg->funcs->configure_crc(tg, &param);
    381 	DC_LOG_WARNING("CRC capture not supported.");
    382 	return false;
    383 }
    384 
    385 /**
    386  * dc_stream_get_crc() - Get CRC values for the given stream.
    387  * @dc: DC object
    388  * @stream: The DC stream state of the stream to get CRCs from.
    389  * @r_cr, g_y, b_cb: CRC values for the three channels are stored here.
    390  *
    391  * dc_stream_configure_crc needs to be called beforehand to enable CRCs.
    392  * Return false if stream is not found, or if CRCs are not enabled.
    393  */
    394 bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream,
    395 		       uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
    396 {
    397 	int i;
    398 	struct pipe_ctx *pipe;
    399 	struct timing_generator *tg;
    400 
    401 	for (i = 0; i < MAX_PIPES; i++) {
    402 		pipe = &dc->current_state->res_ctx.pipe_ctx[i];
    403 		if (pipe->stream == stream)
    404 			break;
    405 	}
    406 	/* Stream not found */
    407 	if (i == MAX_PIPES)
    408 		return false;
    409 
    410 	tg = pipe->stream_res.tg;
    411 
    412 	if (tg->funcs->get_crc)
    413 		return tg->funcs->get_crc(tg, r_cr, g_y, b_cb);
    414 	DC_LOG_WARNING("CRC capture not supported.");
    415 	return false;
    416 }
    417 
    418 void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream,
    419 		enum dc_dynamic_expansion option)
    420 {
    421 	/* OPP FMT dyn expansion updates*/
    422 	int i = 0;
    423 	struct pipe_ctx *pipe_ctx;
    424 
    425 	for (i = 0; i < MAX_PIPES; i++) {
    426 		if (dc->current_state->res_ctx.pipe_ctx[i].stream
    427 				== stream) {
    428 			pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
    429 			pipe_ctx->stream_res.opp->dyn_expansion = option;
    430 			pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion(
    431 					pipe_ctx->stream_res.opp,
    432 					COLOR_SPACE_YCBCR601,
    433 					stream->timing.display_color_depth,
    434 					stream->signal);
    435 		}
    436 	}
    437 }
    438 
    439 void dc_stream_set_dither_option(struct dc_stream_state *stream,
    440 		enum dc_dither_option option)
    441 {
    442 	struct bit_depth_reduction_params params;
    443 	struct dc_link *link = stream->link;
    444 	struct pipe_ctx *pipes = NULL;
    445 	int i;
    446 
    447 	for (i = 0; i < MAX_PIPES; i++) {
    448 		if (link->dc->current_state->res_ctx.pipe_ctx[i].stream ==
    449 				stream) {
    450 			pipes = &link->dc->current_state->res_ctx.pipe_ctx[i];
    451 			break;
    452 		}
    453 	}
    454 
    455 	if (!pipes)
    456 		return;
    457 	if (option > DITHER_OPTION_MAX)
    458 		return;
    459 
    460 	stream->dither_option = option;
    461 
    462 	memset(&params, 0, sizeof(params));
    463 	resource_build_bit_depth_reduction_params(stream, &params);
    464 	stream->bit_depth_params = params;
    465 
    466 	if (pipes->plane_res.xfm &&
    467 	    pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth) {
    468 		pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth(
    469 			pipes->plane_res.xfm,
    470 			pipes->plane_res.scl_data.lb_params.depth,
    471 			&stream->bit_depth_params);
    472 	}
    473 
    474 	pipes->stream_res.opp->funcs->
    475 		opp_program_bit_depth_reduction(pipes->stream_res.opp, &params);
    476 }
    477 
    478 bool dc_stream_set_gamut_remap(struct dc *dc, const struct dc_stream_state *stream)
    479 {
    480 	int i = 0;
    481 	bool ret = false;
    482 	struct pipe_ctx *pipes;
    483 
    484 	for (i = 0; i < MAX_PIPES; i++) {
    485 		if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) {
    486 			pipes = &dc->current_state->res_ctx.pipe_ctx[i];
    487 			dc->hwss.program_gamut_remap(pipes);
    488 			ret = true;
    489 		}
    490 	}
    491 
    492 	return ret;
    493 }
    494 
    495 bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream)
    496 {
    497 	int i = 0;
    498 	bool ret = false;
    499 	struct pipe_ctx *pipes;
    500 
    501 	for (i = 0; i < MAX_PIPES; i++) {
    502 		if (dc->current_state->res_ctx.pipe_ctx[i].stream
    503 				== stream) {
    504 
    505 			pipes = &dc->current_state->res_ctx.pipe_ctx[i];
    506 			dc->hwss.program_output_csc(dc,
    507 					pipes,
    508 					stream->output_color_space,
    509 					stream->csc_color_matrix.matrix,
    510 					pipes->stream_res.opp->inst);
    511 			ret = true;
    512 		}
    513 	}
    514 
    515 	return ret;
    516 }
    517 
    518 void dc_stream_set_static_screen_params(struct dc *dc,
    519 		struct dc_stream_state **streams,
    520 		int num_streams,
    521 		const struct dc_static_screen_params *params)
    522 {
    523 	int i = 0;
    524 	int j = 0;
    525 	struct pipe_ctx *pipes_affected[MAX_PIPES];
    526 	int num_pipes_affected = 0;
    527 
    528 	for (i = 0; i < num_streams; i++) {
    529 		struct dc_stream_state *stream = streams[i];
    530 
    531 		for (j = 0; j < MAX_PIPES; j++) {
    532 			if (dc->current_state->res_ctx.pipe_ctx[j].stream
    533 					== stream) {
    534 				pipes_affected[num_pipes_affected++] =
    535 						&dc->current_state->res_ctx.pipe_ctx[j];
    536 			}
    537 		}
    538 	}
    539 
    540 	dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, params);
    541 }
    542 
    543 static void dc_destruct(struct dc *dc)
    544 {
    545 	if (dc->current_state) {
    546 		dc_release_state(dc->current_state);
    547 		dc->current_state = NULL;
    548 	}
    549 
    550 	destroy_links(dc);
    551 
    552 	if (dc->clk_mgr) {
    553 		dc_destroy_clk_mgr(dc->clk_mgr);
    554 		dc->clk_mgr = NULL;
    555 	}
    556 
    557 	dc_destroy_resource_pool(dc);
    558 
    559 	if (dc->ctx->gpio_service)
    560 		dal_gpio_service_destroy(&dc->ctx->gpio_service);
    561 
    562 	if (dc->ctx->created_bios)
    563 		dal_bios_parser_destroy(&dc->ctx->dc_bios);
    564 
    565 	dc_perf_trace_destroy(&dc->ctx->perf_trace);
    566 
    567 	kfree(dc->ctx);
    568 	dc->ctx = NULL;
    569 
    570 	kfree(dc->bw_vbios);
    571 	dc->bw_vbios = NULL;
    572 
    573 	kfree(dc->bw_dceip);
    574 	dc->bw_dceip = NULL;
    575 
    576 #ifdef CONFIG_DRM_AMD_DC_DCN
    577 	kfree(dc->dcn_soc);
    578 	dc->dcn_soc = NULL;
    579 
    580 	kfree(dc->dcn_ip);
    581 	dc->dcn_ip = NULL;
    582 
    583 #endif
    584 	kfree(dc->vm_helper);
    585 	dc->vm_helper = NULL;
    586 
    587 }
    588 
    589 static bool dc_construct_ctx(struct dc *dc,
    590 		const struct dc_init_data *init_params)
    591 {
    592 	struct dc_context *dc_ctx;
    593 	enum dce_version dc_version = DCE_VERSION_UNKNOWN;
    594 
    595 	dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL);
    596 	if (!dc_ctx)
    597 		return false;
    598 
    599 	dc_ctx->cgs_device = init_params->cgs_device;
    600 	dc_ctx->driver_context = init_params->driver;
    601 	dc_ctx->dc = dc;
    602 	dc_ctx->asic_id = init_params->asic_id;
    603 	dc_ctx->dc_sink_id_count = 0;
    604 	dc_ctx->dc_stream_id_count = 0;
    605 	dc_ctx->dce_environment = init_params->dce_environment;
    606 
    607 	/* Create logger */
    608 
    609 	dc_version = resource_parse_asic_id(init_params->asic_id);
    610 	dc_ctx->dce_version = dc_version;
    611 
    612 	dc_ctx->perf_trace = dc_perf_trace_create();
    613 	if (!dc_ctx->perf_trace) {
    614 		ASSERT_CRITICAL(false);
    615 		return false;
    616 	}
    617 
    618 	dc->ctx = dc_ctx;
    619 
    620 	return true;
    621 }
    622 
    623 static bool dc_construct(struct dc *dc,
    624 		const struct dc_init_data *init_params)
    625 {
    626 	struct dc_context *dc_ctx;
    627 	struct bw_calcs_dceip *dc_dceip;
    628 	struct bw_calcs_vbios *dc_vbios;
    629 #ifdef CONFIG_DRM_AMD_DC_DCN
    630 	struct dcn_soc_bounding_box *dcn_soc;
    631 	struct dcn_ip_params *dcn_ip;
    632 #endif
    633 
    634 	dc->config = init_params->flags;
    635 
    636 	// Allocate memory for the vm_helper
    637 	dc->vm_helper = kzalloc(sizeof(struct vm_helper), GFP_KERNEL);
    638 	if (!dc->vm_helper) {
    639 		dm_error("%s: failed to create dc->vm_helper\n", __func__);
    640 		goto fail;
    641 	}
    642 
    643 	memcpy(&dc->bb_overrides, &init_params->bb_overrides, sizeof(dc->bb_overrides));
    644 
    645 	dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL);
    646 	if (!dc_dceip) {
    647 		dm_error("%s: failed to create dceip\n", __func__);
    648 		goto fail;
    649 	}
    650 
    651 	dc->bw_dceip = dc_dceip;
    652 
    653 	dc_vbios = kzalloc(sizeof(*dc_vbios), GFP_KERNEL);
    654 	if (!dc_vbios) {
    655 		dm_error("%s: failed to create vbios\n", __func__);
    656 		goto fail;
    657 	}
    658 
    659 	dc->bw_vbios = dc_vbios;
    660 #ifdef CONFIG_DRM_AMD_DC_DCN
    661 	dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL);
    662 	if (!dcn_soc) {
    663 		dm_error("%s: failed to create dcn_soc\n", __func__);
    664 		goto fail;
    665 	}
    666 
    667 	dc->dcn_soc = dcn_soc;
    668 
    669 	dcn_ip = kzalloc(sizeof(*dcn_ip), GFP_KERNEL);
    670 	if (!dcn_ip) {
    671 		dm_error("%s: failed to create dcn_ip\n", __func__);
    672 		goto fail;
    673 	}
    674 
    675 	dc->dcn_ip = dcn_ip;
    676 	dc->soc_bounding_box = init_params->soc_bounding_box;
    677 #endif
    678 
    679 	if (!dc_construct_ctx(dc, init_params)) {
    680 		dm_error("%s: failed to create ctx\n", __func__);
    681 		goto fail;
    682 	}
    683 
    684         dc_ctx = dc->ctx;
    685 
    686 	/* Resource should construct all asic specific resources.
    687 	 * This should be the only place where we need to parse the asic id
    688 	 */
    689 	if (init_params->vbios_override)
    690 		dc_ctx->dc_bios = init_params->vbios_override;
    691 	else {
    692 		/* Create BIOS parser */
    693 		struct bp_init_data bp_init_data;
    694 
    695 		bp_init_data.ctx = dc_ctx;
    696 		bp_init_data.bios = init_params->asic_id.atombios_base_address;
    697 
    698 		dc_ctx->dc_bios = dal_bios_parser_create(
    699 				&bp_init_data, dc_ctx->dce_version);
    700 
    701 		if (!dc_ctx->dc_bios) {
    702 			ASSERT_CRITICAL(false);
    703 			goto fail;
    704 		}
    705 
    706 		dc_ctx->created_bios = true;
    707 	}
    708 
    709 
    710 
    711 	/* Create GPIO service */
    712 	dc_ctx->gpio_service = dal_gpio_service_create(
    713 			dc_ctx->dce_version,
    714 			dc_ctx->dce_environment,
    715 			dc_ctx);
    716 
    717 	if (!dc_ctx->gpio_service) {
    718 		ASSERT_CRITICAL(false);
    719 		goto fail;
    720 	}
    721 
    722 	dc->res_pool = dc_create_resource_pool(dc, init_params, dc_ctx->dce_version);
    723 	if (!dc->res_pool)
    724 		goto fail;
    725 
    726 	dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg);
    727 	if (!dc->clk_mgr)
    728 		goto fail;
    729 
    730 	if (dc->res_pool->funcs->update_bw_bounding_box)
    731 		dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params);
    732 
    733 	/* Creation of current_state must occur after dc->dml
    734 	 * is initialized in dc_create_resource_pool because
    735 	 * on creation it copies the contents of dc->dml
    736 	 */
    737 
    738 	dc->current_state = dc_create_state(dc);
    739 
    740 	if (!dc->current_state) {
    741 		dm_error("%s: failed to create validate ctx\n", __func__);
    742 		goto fail;
    743 	}
    744 
    745 	dc_resource_state_construct(dc, dc->current_state);
    746 
    747 	if (!create_links(dc, init_params->num_virtual_links))
    748 		goto fail;
    749 
    750 	return true;
    751 
    752 fail:
    753 	return false;
    754 }
    755 
    756 static bool disable_all_writeback_pipes_for_stream(
    757 		const struct dc *dc,
    758 		struct dc_stream_state *stream,
    759 		struct dc_state *context)
    760 {
    761 	int i;
    762 
    763 	for (i = 0; i < stream->num_wb_info; i++)
    764 		stream->writeback_info[i].wb_enabled = false;
    765 
    766 	return true;
    767 }
    768 
    769 static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
    770 {
    771 	int i, j;
    772 	struct dc_state *dangling_context = dc_create_state(dc);
    773 	struct dc_state *current_ctx;
    774 
    775 	if (dangling_context == NULL)
    776 		return;
    777 
    778 	dc_resource_state_copy_construct(dc->current_state, dangling_context);
    779 
    780 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
    781 		struct dc_stream_state *old_stream =
    782 				dc->current_state->res_ctx.pipe_ctx[i].stream;
    783 		bool should_disable = true;
    784 
    785 		for (j = 0; j < context->stream_count; j++) {
    786 			if (old_stream == context->streams[j]) {
    787 				should_disable = false;
    788 				break;
    789 			}
    790 		}
    791 		if (should_disable && old_stream) {
    792 			dc_rem_all_planes_for_stream(dc, old_stream, dangling_context);
    793 			disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
    794 			if (dc->hwss.apply_ctx_for_surface)
    795 				dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context);
    796 		}
    797 		if (dc->hwss.program_front_end_for_ctx)
    798 			dc->hwss.program_front_end_for_ctx(dc, dangling_context);
    799 	}
    800 
    801 	current_ctx = dc->current_state;
    802 	dc->current_state = dangling_context;
    803 	dc_release_state(current_ctx);
    804 }
    805 
    806 static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
    807 {
    808 	int i;
    809 	int count = 0;
    810 	struct pipe_ctx *pipe;
    811 	PERF_TRACE();
    812 	for (i = 0; i < MAX_PIPES; i++) {
    813 		pipe = &context->res_ctx.pipe_ctx[i];
    814 
    815 		if (!pipe->plane_state)
    816 			continue;
    817 
    818 		/* Timeout 100 ms */
    819 		while (count < 100000) {
    820 			/* Must set to false to start with, due to OR in update function */
    821 			pipe->plane_state->status.is_flip_pending = false;
    822 			dc->hwss.update_pending_status(pipe);
    823 			if (!pipe->plane_state->status.is_flip_pending)
    824 				break;
    825 			udelay(1);
    826 			count++;
    827 		}
    828 		ASSERT(!pipe->plane_state->status.is_flip_pending);
    829 	}
    830 	PERF_TRACE();
    831 }
    832 
    833 /*******************************************************************************
    834  * Public functions
    835  ******************************************************************************/
    836 
    837 struct dc *dc_create(const struct dc_init_data *init_params)
    838 {
    839 	struct dc *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
    840 	unsigned int full_pipe_count;
    841 
    842 	if (NULL == dc)
    843 		goto alloc_fail;
    844 
    845 	if (init_params->dce_environment == DCE_ENV_VIRTUAL_HW) {
    846 		if (false == dc_construct_ctx(dc, init_params)) {
    847 			dc_destruct(dc);
    848 			goto construct_fail;
    849 		}
    850 	} else {
    851 		if (false == dc_construct(dc, init_params)) {
    852 			dc_destruct(dc);
    853 			goto construct_fail;
    854 		}
    855 
    856 		full_pipe_count = dc->res_pool->pipe_count;
    857 		if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
    858 			full_pipe_count--;
    859 		dc->caps.max_streams = min(
    860 				full_pipe_count,
    861 				dc->res_pool->stream_enc_count);
    862 
    863 		dc->optimize_seamless_boot_streams = 0;
    864 		dc->caps.max_links = dc->link_count;
    865 		dc->caps.max_audios = dc->res_pool->audio_count;
    866 		dc->caps.linear_pitch_alignment = 64;
    867 
    868 		dc->caps.max_dp_protocol_version = DP_VERSION_1_4;
    869 
    870 		if (dc->res_pool->dmcu != NULL)
    871 			dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version;
    872 	}
    873 
    874 	/* Populate versioning information */
    875 	dc->versions.dc_ver = DC_VER;
    876 
    877 	dc->build_id = DC_BUILD_ID;
    878 
    879 	DC_LOG_DC("Display Core initialized\n");
    880 
    881 
    882 
    883 	return dc;
    884 
    885 construct_fail:
    886 	kfree(dc);
    887 
    888 alloc_fail:
    889 	return NULL;
    890 }
    891 
    892 void dc_hardware_init(struct dc *dc)
    893 {
    894 	if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW)
    895 		dc->hwss.init_hw(dc);
    896 }
    897 
    898 void dc_init_callbacks(struct dc *dc,
    899 		const struct dc_callback_init *init_params)
    900 {
    901 #ifdef CONFIG_DRM_AMD_DC_HDCP
    902 	dc->ctx->cp_psp = init_params->cp_psp;
    903 #endif
    904 }
    905 
    906 void dc_deinit_callbacks(struct dc *dc)
    907 {
    908 #ifdef CONFIG_DRM_AMD_DC_HDCP
    909 	memset(&dc->ctx->cp_psp, 0, sizeof(dc->ctx->cp_psp));
    910 #endif
    911 }
    912 
    913 void dc_destroy(struct dc **dc)
    914 {
    915 	dc_destruct(*dc);
    916 	kfree(*dc);
    917 	*dc = NULL;
    918 }
    919 
    920 static void enable_timing_multisync(
    921 		struct dc *dc,
    922 		struct dc_state *ctx)
    923 {
    924 	int i = 0, multisync_count = 0;
    925 	int pipe_count = dc->res_pool->pipe_count;
    926 	struct pipe_ctx *multisync_pipes[MAX_PIPES] = { NULL };
    927 
    928 	for (i = 0; i < pipe_count; i++) {
    929 		if (!ctx->res_ctx.pipe_ctx[i].stream ||
    930 				!ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.enabled)
    931 			continue;
    932 		if (ctx->res_ctx.pipe_ctx[i].stream == ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.event_source)
    933 			continue;
    934 		multisync_pipes[multisync_count] = &ctx->res_ctx.pipe_ctx[i];
    935 		multisync_count++;
    936 	}
    937 
    938 	if (multisync_count > 0) {
    939 		dc->hwss.enable_per_frame_crtc_position_reset(
    940 			dc, multisync_count, multisync_pipes);
    941 	}
    942 }
    943 
    944 static void program_timing_sync(
    945 		struct dc *dc,
    946 		struct dc_state *ctx)
    947 {
    948 	int i, j, k;
    949 	int group_index = 0;
    950 	int num_group = 0;
    951 	int pipe_count = dc->res_pool->pipe_count;
    952 	struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL };
    953 
    954 	for (i = 0; i < pipe_count; i++) {
    955 		if (!ctx->res_ctx.pipe_ctx[i].stream || ctx->res_ctx.pipe_ctx[i].top_pipe)
    956 			continue;
    957 
    958 		unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i];
    959 	}
    960 
    961 	for (i = 0; i < pipe_count; i++) {
    962 		int group_size = 1;
    963 		struct pipe_ctx *pipe_set[MAX_PIPES];
    964 
    965 		if (!unsynced_pipes[i])
    966 			continue;
    967 
    968 		pipe_set[0] = unsynced_pipes[i];
    969 		unsynced_pipes[i] = NULL;
    970 
    971 		/* Add tg to the set, search rest of the tg's for ones with
    972 		 * same timing, add all tgs with same timing to the group
    973 		 */
    974 		for (j = i + 1; j < pipe_count; j++) {
    975 			if (!unsynced_pipes[j])
    976 				continue;
    977 
    978 			if (resource_are_streams_timing_synchronizable(
    979 					unsynced_pipes[j]->stream,
    980 					pipe_set[0]->stream)) {
    981 				pipe_set[group_size] = unsynced_pipes[j];
    982 				unsynced_pipes[j] = NULL;
    983 				group_size++;
    984 			}
    985 		}
    986 
    987 		/* set first pipe with plane as master */
    988 		for (j = 0; j < group_size; j++) {
    989 			if (pipe_set[j]->plane_state) {
    990 				if (j == 0)
    991 					break;
    992 
    993 				swap(pipe_set[0], pipe_set[j]);
    994 				break;
    995 			}
    996 		}
    997 
    998 
    999 		for (k = 0; k < group_size; k++) {
   1000 			struct dc_stream_status *status = dc_stream_get_status_from_state(ctx, pipe_set[k]->stream);
   1001 
   1002 			status->timing_sync_info.group_id = num_group;
   1003 			status->timing_sync_info.group_size = group_size;
   1004 			if (k == 0)
   1005 				status->timing_sync_info.master = true;
   1006 			else
   1007 				status->timing_sync_info.master = false;
   1008 
   1009 		}
   1010 		/* remove any other pipes with plane as they have already been synced */
   1011 		for (j = j + 1; j < group_size; j++) {
   1012 			if (pipe_set[j]->plane_state) {
   1013 				group_size--;
   1014 				pipe_set[j] = pipe_set[group_size];
   1015 				j--;
   1016 			}
   1017 		}
   1018 
   1019 		if (group_size > 1) {
   1020 			dc->hwss.enable_timing_synchronization(
   1021 				dc, group_index, group_size, pipe_set);
   1022 			group_index++;
   1023 		}
   1024 		num_group++;
   1025 	}
   1026 }
   1027 
   1028 static bool context_changed(
   1029 		struct dc *dc,
   1030 		struct dc_state *context)
   1031 {
   1032 	uint8_t i;
   1033 
   1034 	if (context->stream_count != dc->current_state->stream_count)
   1035 		return true;
   1036 
   1037 	for (i = 0; i < dc->current_state->stream_count; i++) {
   1038 		if (dc->current_state->streams[i] != context->streams[i])
   1039 			return true;
   1040 	}
   1041 
   1042 	return false;
   1043 }
   1044 
   1045 bool dc_validate_seamless_boot_timing(const struct dc *dc,
   1046 				const struct dc_sink *sink,
   1047 				struct dc_crtc_timing *crtc_timing)
   1048 {
   1049 	struct timing_generator *tg;
   1050 	struct stream_encoder *se = NULL;
   1051 
   1052 	struct dc_crtc_timing hw_crtc_timing = {0};
   1053 
   1054 	struct dc_link *link = sink->link;
   1055 	unsigned int i, enc_inst, tg_inst = 0;
   1056 
   1057 	// Seamless port only support single DP and EDP so far
   1058 	if (sink->sink_signal != SIGNAL_TYPE_DISPLAY_PORT &&
   1059 		sink->sink_signal != SIGNAL_TYPE_EDP)
   1060 		return false;
   1061 
   1062 	/* Check for enabled DIG to identify enabled display */
   1063 	if (!link->link_enc->funcs->is_dig_enabled(link->link_enc))
   1064 		return false;
   1065 
   1066 	enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
   1067 
   1068 	if (enc_inst == ENGINE_ID_UNKNOWN)
   1069 		return false;
   1070 
   1071 	for (i = 0; i < dc->res_pool->stream_enc_count; i++) {
   1072 		if (dc->res_pool->stream_enc[i]->id == enc_inst) {
   1073 
   1074 			se = dc->res_pool->stream_enc[i];
   1075 
   1076 			tg_inst = dc->res_pool->stream_enc[i]->funcs->dig_source_otg(
   1077 				dc->res_pool->stream_enc[i]);
   1078 			break;
   1079 		}
   1080 	}
   1081 
   1082 	// tg_inst not found
   1083 	if (i == dc->res_pool->stream_enc_count)
   1084 		return false;
   1085 
   1086 	if (tg_inst >= dc->res_pool->timing_generator_count)
   1087 		return false;
   1088 
   1089 	tg = dc->res_pool->timing_generators[tg_inst];
   1090 
   1091 	if (!tg->funcs->get_hw_timing)
   1092 		return false;
   1093 
   1094 	if (!tg->funcs->get_hw_timing(tg, &hw_crtc_timing))
   1095 		return false;
   1096 
   1097 	if (crtc_timing->h_total != hw_crtc_timing.h_total)
   1098 		return false;
   1099 
   1100 	if (crtc_timing->h_border_left != hw_crtc_timing.h_border_left)
   1101 		return false;
   1102 
   1103 	if (crtc_timing->h_addressable != hw_crtc_timing.h_addressable)
   1104 		return false;
   1105 
   1106 	if (crtc_timing->h_border_right != hw_crtc_timing.h_border_right)
   1107 		return false;
   1108 
   1109 	if (crtc_timing->h_front_porch != hw_crtc_timing.h_front_porch)
   1110 		return false;
   1111 
   1112 	if (crtc_timing->h_sync_width != hw_crtc_timing.h_sync_width)
   1113 		return false;
   1114 
   1115 	if (crtc_timing->v_total != hw_crtc_timing.v_total)
   1116 		return false;
   1117 
   1118 	if (crtc_timing->v_border_top != hw_crtc_timing.v_border_top)
   1119 		return false;
   1120 
   1121 	if (crtc_timing->v_addressable != hw_crtc_timing.v_addressable)
   1122 		return false;
   1123 
   1124 	if (crtc_timing->v_border_bottom != hw_crtc_timing.v_border_bottom)
   1125 		return false;
   1126 
   1127 	if (crtc_timing->v_front_porch != hw_crtc_timing.v_front_porch)
   1128 		return false;
   1129 
   1130 	if (crtc_timing->v_sync_width != hw_crtc_timing.v_sync_width)
   1131 		return false;
   1132 
   1133 	if (dc_is_dp_signal(link->connector_signal)) {
   1134 		unsigned int pix_clk_100hz;
   1135 
   1136 		dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
   1137 			dc->res_pool->dp_clock_source,
   1138 			tg_inst, &pix_clk_100hz);
   1139 
   1140 		if (crtc_timing->pix_clk_100hz != pix_clk_100hz)
   1141 			return false;
   1142 
   1143 		if (!se->funcs->dp_get_pixel_format)
   1144 			return false;
   1145 
   1146 		if (!se->funcs->dp_get_pixel_format(
   1147 			se,
   1148 			&hw_crtc_timing.pixel_encoding,
   1149 			&hw_crtc_timing.display_color_depth))
   1150 			return false;
   1151 
   1152 		if (hw_crtc_timing.display_color_depth != crtc_timing->display_color_depth)
   1153 			return false;
   1154 
   1155 		if (hw_crtc_timing.pixel_encoding != crtc_timing->pixel_encoding)
   1156 			return false;
   1157 	}
   1158 
   1159 	return true;
   1160 }
   1161 
   1162 bool dc_enable_stereo(
   1163 	struct dc *dc,
   1164 	struct dc_state *context,
   1165 	struct dc_stream_state *streams[],
   1166 	uint8_t stream_count)
   1167 {
   1168 	bool ret = true;
   1169 	int i, j;
   1170 	struct pipe_ctx *pipe;
   1171 
   1172 	for (i = 0; i < MAX_PIPES; i++) {
   1173 		if (context != NULL)
   1174 			pipe = &context->res_ctx.pipe_ctx[i];
   1175 		else
   1176 			pipe = &dc->current_state->res_ctx.pipe_ctx[i];
   1177 		for (j = 0 ; pipe && j < stream_count; j++)  {
   1178 			if (streams[j] && streams[j] == pipe->stream &&
   1179 				dc->hwss.setup_stereo)
   1180 				dc->hwss.setup_stereo(pipe, dc);
   1181 		}
   1182 	}
   1183 
   1184 	return ret;
   1185 }
   1186 
   1187 /*
   1188  * Applies given context to HW and copy it into current context.
   1189  * It's up to the user to release the src context afterwards.
   1190  */
   1191 static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *context)
   1192 {
   1193 	struct dc_bios *dcb = dc->ctx->dc_bios;
   1194 	enum dc_status result = DC_ERROR_UNEXPECTED;
   1195 	struct pipe_ctx *pipe;
   1196 	int i, k, l;
   1197 	struct dc_stream_state *dc_streams[MAX_STREAMS] = {0};
   1198 
   1199 	disable_dangling_plane(dc, context);
   1200 
   1201 	for (i = 0; i < context->stream_count; i++)
   1202 		dc_streams[i] =  context->streams[i];
   1203 
   1204 	if (!dcb->funcs->is_accelerated_mode(dcb))
   1205 		dc->hwss.enable_accelerated_mode(dc, context);
   1206 
   1207 	for (i = 0; i < context->stream_count; i++) {
   1208 		if (context->streams[i]->apply_seamless_boot_optimization)
   1209 			dc->optimize_seamless_boot_streams++;
   1210 	}
   1211 
   1212 	if (dc->optimize_seamless_boot_streams == 0)
   1213 		dc->hwss.prepare_bandwidth(dc, context);
   1214 
   1215 	/* re-program planes for existing stream, in case we need to
   1216 	 * free up plane resource for later use
   1217 	 */
   1218 	if (dc->hwss.apply_ctx_for_surface)
   1219 		for (i = 0; i < context->stream_count; i++) {
   1220 			if (context->streams[i]->mode_changed)
   1221 				continue;
   1222 
   1223 			dc->hwss.apply_ctx_for_surface(
   1224 				dc, context->streams[i],
   1225 				context->stream_status[i].plane_count,
   1226 				context); /* use new pipe config in new context */
   1227 		}
   1228 
   1229 	/* Program hardware */
   1230 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
   1231 		pipe = &context->res_ctx.pipe_ctx[i];
   1232 		dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
   1233 	}
   1234 
   1235 	result = dc->hwss.apply_ctx_to_hw(dc, context);
   1236 
   1237 	if (result != DC_OK)
   1238 		return result;
   1239 
   1240 	if (context->stream_count > 1 && !dc->debug.disable_timing_sync) {
   1241 		enable_timing_multisync(dc, context);
   1242 		program_timing_sync(dc, context);
   1243 	}
   1244 
   1245 	/* Program all planes within new context*/
   1246 	if (dc->hwss.program_front_end_for_ctx)
   1247 		dc->hwss.program_front_end_for_ctx(dc, context);
   1248 	for (i = 0; i < context->stream_count; i++) {
   1249 		const struct dc_link *link = context->streams[i]->link;
   1250 
   1251 		if (!context->streams[i]->mode_changed)
   1252 			continue;
   1253 
   1254 		if (dc->hwss.apply_ctx_for_surface)
   1255 			dc->hwss.apply_ctx_for_surface(
   1256 					dc, context->streams[i],
   1257 					context->stream_status[i].plane_count,
   1258 					context);
   1259 
   1260 		/*
   1261 		 * enable stereo
   1262 		 * TODO rework dc_enable_stereo call to work with validation sets?
   1263 		 */
   1264 		for (k = 0; k < MAX_PIPES; k++) {
   1265 			pipe = &context->res_ctx.pipe_ctx[k];
   1266 
   1267 			for (l = 0 ; pipe && l < context->stream_count; l++)  {
   1268 				if (context->streams[l] &&
   1269 					context->streams[l] == pipe->stream &&
   1270 					dc->hwss.setup_stereo)
   1271 					dc->hwss.setup_stereo(pipe, dc);
   1272 			}
   1273 		}
   1274 
   1275 		CONN_MSG_MODE(link, "{%dx%d, %dx%d@%dKhz}",
   1276 				context->streams[i]->timing.h_addressable,
   1277 				context->streams[i]->timing.v_addressable,
   1278 				context->streams[i]->timing.h_total,
   1279 				context->streams[i]->timing.v_total,
   1280 				context->streams[i]->timing.pix_clk_100hz / 10);
   1281 	}
   1282 
   1283 	dc_enable_stereo(dc, context, dc_streams, context->stream_count);
   1284 
   1285 	if (dc->optimize_seamless_boot_streams == 0) {
   1286 		/* Must wait for no flips to be pending before doing optimize bw */
   1287 		wait_for_no_pipes_pending(dc, context);
   1288 		/* pplib is notified if disp_num changed */
   1289 		dc->hwss.optimize_bandwidth(dc, context);
   1290 	}
   1291 
   1292 	for (i = 0; i < context->stream_count; i++)
   1293 		context->streams[i]->mode_changed = false;
   1294 
   1295 	dc_release_state(dc->current_state);
   1296 
   1297 	dc->current_state = context;
   1298 
   1299 	dc_retain_state(dc->current_state);
   1300 
   1301 	return result;
   1302 }
   1303 
   1304 bool dc_commit_state(struct dc *dc, struct dc_state *context)
   1305 {
   1306 	enum dc_status result = DC_ERROR_UNEXPECTED;
   1307 	int i;
   1308 
   1309 	if (false == context_changed(dc, context))
   1310 		return DC_OK;
   1311 
   1312 	DC_LOG_DC("%s: %d streams\n",
   1313 				__func__, context->stream_count);
   1314 
   1315 	for (i = 0; i < context->stream_count; i++) {
   1316 		struct dc_stream_state *stream = context->streams[i];
   1317 
   1318 		dc_stream_log(dc, stream);
   1319 	}
   1320 
   1321 	result = dc_commit_state_no_check(dc, context);
   1322 
   1323 	return (result == DC_OK);
   1324 }
   1325 
   1326 bool dc_is_hw_initialized(struct dc *dc)
   1327 {
   1328 	struct dc_bios *dcb = dc->ctx->dc_bios;
   1329 	return dcb->funcs->is_accelerated_mode(dcb);
   1330 }
   1331 
   1332 bool dc_post_update_surfaces_to_stream(struct dc *dc)
   1333 {
   1334 	int i;
   1335 	struct dc_state *context = dc->current_state;
   1336 
   1337 	if (!dc->optimized_required || dc->optimize_seamless_boot_streams > 0)
   1338 		return true;
   1339 
   1340 	post_surface_trace(dc);
   1341 
   1342 	for (i = 0; i < dc->res_pool->pipe_count; i++)
   1343 		if (context->res_ctx.pipe_ctx[i].stream == NULL ||
   1344 		    context->res_ctx.pipe_ctx[i].plane_state == NULL) {
   1345 			context->res_ctx.pipe_ctx[i].pipe_idx = i;
   1346 			dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]);
   1347 		}
   1348 
   1349 	dc->optimized_required = false;
   1350 
   1351 	dc->hwss.optimize_bandwidth(dc, context);
   1352 	return true;
   1353 }
   1354 
   1355 struct dc_state *dc_create_state(struct dc *dc)
   1356 {
   1357 	struct dc_state *context = kvzalloc(sizeof(struct dc_state),
   1358 					    GFP_KERNEL);
   1359 
   1360 	if (!context)
   1361 		return NULL;
   1362 	/* Each context must have their own instance of VBA and in order to
   1363 	 * initialize and obtain IP and SOC the base DML instance from DC is
   1364 	 * initially copied into every context
   1365 	 */
   1366 #ifdef CONFIG_DRM_AMD_DC_DCN
   1367 	memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib));
   1368 #endif
   1369 
   1370 	kref_init(&context->refcount);
   1371 
   1372 	return context;
   1373 }
   1374 
   1375 struct dc_state *dc_copy_state(struct dc_state *src_ctx)
   1376 {
   1377 	int i, j;
   1378 	struct dc_state *new_ctx = kvmalloc(sizeof(struct dc_state), GFP_KERNEL);
   1379 
   1380 	if (!new_ctx)
   1381 		return NULL;
   1382 	memcpy(new_ctx, src_ctx, sizeof(struct dc_state));
   1383 
   1384 	for (i = 0; i < MAX_PIPES; i++) {
   1385 			struct pipe_ctx *cur_pipe = &new_ctx->res_ctx.pipe_ctx[i];
   1386 
   1387 			if (cur_pipe->top_pipe)
   1388 				cur_pipe->top_pipe =  &new_ctx->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx];
   1389 
   1390 			if (cur_pipe->bottom_pipe)
   1391 				cur_pipe->bottom_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx];
   1392 
   1393 			if (cur_pipe->prev_odm_pipe)
   1394 				cur_pipe->prev_odm_pipe =  &new_ctx->res_ctx.pipe_ctx[cur_pipe->prev_odm_pipe->pipe_idx];
   1395 
   1396 			if (cur_pipe->next_odm_pipe)
   1397 				cur_pipe->next_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->next_odm_pipe->pipe_idx];
   1398 
   1399 	}
   1400 
   1401 	for (i = 0; i < new_ctx->stream_count; i++) {
   1402 			dc_stream_retain(new_ctx->streams[i]);
   1403 			for (j = 0; j < new_ctx->stream_status[i].plane_count; j++)
   1404 				dc_plane_state_retain(
   1405 					new_ctx->stream_status[i].plane_states[j]);
   1406 	}
   1407 
   1408 	kref_init(&new_ctx->refcount);
   1409 
   1410 	return new_ctx;
   1411 }
   1412 
   1413 void dc_retain_state(struct dc_state *context)
   1414 {
   1415 	kref_get(&context->refcount);
   1416 }
   1417 
   1418 static void dc_state_free(struct kref *kref)
   1419 {
   1420 	struct dc_state *context = container_of(kref, struct dc_state, refcount);
   1421 	dc_resource_state_destruct(context);
   1422 	kvfree(context);
   1423 }
   1424 
   1425 void dc_release_state(struct dc_state *context)
   1426 {
   1427 	kref_put(&context->refcount, dc_state_free);
   1428 }
   1429 
   1430 bool dc_set_generic_gpio_for_stereo(bool enable,
   1431 		struct gpio_service *gpio_service)
   1432 {
   1433 	enum gpio_result gpio_result = GPIO_RESULT_NON_SPECIFIC_ERROR;
   1434 	struct gpio_pin_info pin_info;
   1435 	struct gpio *generic;
   1436 	struct gpio_generic_mux_config *config = kzalloc(sizeof(struct gpio_generic_mux_config),
   1437 			   GFP_KERNEL);
   1438 
   1439 	if (!config)
   1440 		return false;
   1441 	pin_info = dal_gpio_get_generic_pin_info(gpio_service, GPIO_ID_GENERIC, 0);
   1442 
   1443 	if (pin_info.mask == 0xFFFFFFFF || pin_info.offset == 0xFFFFFFFF) {
   1444 		kfree(config);
   1445 		return false;
   1446 	} else {
   1447 		generic = dal_gpio_service_create_generic_mux(
   1448 			gpio_service,
   1449 			pin_info.offset,
   1450 			pin_info.mask);
   1451 	}
   1452 
   1453 	if (!generic) {
   1454 		kfree(config);
   1455 		return false;
   1456 	}
   1457 
   1458 	gpio_result = dal_gpio_open(generic, GPIO_MODE_OUTPUT);
   1459 
   1460 	config->enable_output_from_mux = enable;
   1461 	config->mux_select = GPIO_SIGNAL_SOURCE_PASS_THROUGH_STEREO_SYNC;
   1462 
   1463 	if (gpio_result == GPIO_RESULT_OK)
   1464 		gpio_result = dal_mux_setup_config(generic, config);
   1465 
   1466 	if (gpio_result == GPIO_RESULT_OK) {
   1467 		dal_gpio_close(generic);
   1468 		dal_gpio_destroy_generic_mux(&generic);
   1469 		kfree(config);
   1470 		return true;
   1471 	} else {
   1472 		dal_gpio_close(generic);
   1473 		dal_gpio_destroy_generic_mux(&generic);
   1474 		kfree(config);
   1475 		return false;
   1476 	}
   1477 }
   1478 
   1479 static bool is_surface_in_context(
   1480 		const struct dc_state *context,
   1481 		const struct dc_plane_state *plane_state)
   1482 {
   1483 	int j;
   1484 
   1485 	for (j = 0; j < MAX_PIPES; j++) {
   1486 		const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
   1487 
   1488 		if (plane_state == pipe_ctx->plane_state) {
   1489 			return true;
   1490 		}
   1491 	}
   1492 
   1493 	return false;
   1494 }
   1495 
   1496 static enum surface_update_type get_plane_info_update_type(const struct dc_surface_update *u)
   1497 {
   1498 	union surface_update_flags *update_flags = &u->surface->update_flags;
   1499 	enum surface_update_type update_type = UPDATE_TYPE_FAST;
   1500 
   1501 	if (!u->plane_info)
   1502 		return UPDATE_TYPE_FAST;
   1503 
   1504 	if (u->plane_info->color_space != u->surface->color_space) {
   1505 		update_flags->bits.color_space_change = 1;
   1506 		elevate_update_type(&update_type, UPDATE_TYPE_MED);
   1507 	}
   1508 
   1509 	if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror) {
   1510 		update_flags->bits.horizontal_mirror_change = 1;
   1511 		elevate_update_type(&update_type, UPDATE_TYPE_MED);
   1512 	}
   1513 
   1514 	if (u->plane_info->rotation != u->surface->rotation) {
   1515 		update_flags->bits.rotation_change = 1;
   1516 		elevate_update_type(&update_type, UPDATE_TYPE_FULL);
   1517 	}
   1518 
   1519 	if (u->plane_info->format != u->surface->format) {
   1520 		update_flags->bits.pixel_format_change = 1;
   1521 		elevate_update_type(&update_type, UPDATE_TYPE_FULL);
   1522 	}
   1523 
   1524 	if (u->plane_info->stereo_format != u->surface->stereo_format) {
   1525 		update_flags->bits.stereo_format_change = 1;
   1526 		elevate_update_type(&update_type, UPDATE_TYPE_FULL);
   1527 	}
   1528 
   1529 	if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha) {
   1530 		update_flags->bits.per_pixel_alpha_change = 1;
   1531 		elevate_update_type(&update_type, UPDATE_TYPE_MED);
   1532 	}
   1533 
   1534 	if (u->plane_info->global_alpha_value != u->surface->global_alpha_value) {
   1535 		update_flags->bits.global_alpha_change = 1;
   1536 		elevate_update_type(&update_type, UPDATE_TYPE_MED);
   1537 	}
   1538 
   1539 	if (u->plane_info->dcc.enable != u->surface->dcc.enable
   1540 			|| u->plane_info->dcc.independent_64b_blks != u->surface->dcc.independent_64b_blks
   1541 			|| u->plane_info->dcc.meta_pitch != u->surface->dcc.meta_pitch) {
   1542 		update_flags->bits.dcc_change = 1;
   1543 		elevate_update_type(&update_type, UPDATE_TYPE_MED);
   1544 	}
   1545 
   1546 	if (resource_pixel_format_to_bpp(u->plane_info->format) !=
   1547 			resource_pixel_format_to_bpp(u->surface->format)) {
   1548 		/* different bytes per element will require full bandwidth
   1549 		 * and DML calculation
   1550 		 */
   1551 		update_flags->bits.bpp_change = 1;
   1552 		elevate_update_type(&update_type, UPDATE_TYPE_FULL);
   1553 	}
   1554 
   1555 	if (u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch
   1556 			|| u->plane_info->plane_size.chroma_pitch != u->surface->plane_size.chroma_pitch) {
   1557 		update_flags->bits.plane_size_change = 1;
   1558 		elevate_update_type(&update_type, UPDATE_TYPE_MED);
   1559 	}
   1560 
   1561 
   1562 	if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info,
   1563 			sizeof(union dc_tiling_info)) != 0) {
   1564 		update_flags->bits.swizzle_change = 1;
   1565 		elevate_update_type(&update_type, UPDATE_TYPE_MED);
   1566 
   1567 		/* todo: below are HW dependent, we should add a hook to
   1568 		 * DCE/N resource and validated there.
   1569 		 */
   1570 		if (u->plane_info->tiling_info.gfx9.swizzle != DC_SW_LINEAR) {
   1571 			/* swizzled mode requires RQ to be setup properly,
   1572 			 * thus need to run DML to calculate RQ settings
   1573 			 */
   1574 			update_flags->bits.bandwidth_change = 1;
   1575 			elevate_update_type(&update_type, UPDATE_TYPE_FULL);
   1576 		}
   1577 	}
   1578 
   1579 	/* This should be UPDATE_TYPE_FAST if nothing has changed. */
   1580 	return update_type;
   1581 }
   1582 
   1583 static enum surface_update_type get_scaling_info_update_type(
   1584 		const struct dc_surface_update *u)
   1585 {
   1586 	union surface_update_flags *update_flags = &u->surface->update_flags;
   1587 
   1588 	if (!u->scaling_info)
   1589 		return UPDATE_TYPE_FAST;
   1590 
   1591 	if (u->scaling_info->clip_rect.width != u->surface->clip_rect.width
   1592 			|| u->scaling_info->clip_rect.height != u->surface->clip_rect.height
   1593 			|| u->scaling_info->dst_rect.width != u->surface->dst_rect.width
   1594 			|| u->scaling_info->dst_rect.height != u->surface->dst_rect.height
   1595 			|| u->scaling_info->scaling_quality.integer_scaling !=
   1596 				u->surface->scaling_quality.integer_scaling
   1597 			) {
   1598 		update_flags->bits.scaling_change = 1;
   1599 
   1600 		if ((u->scaling_info->dst_rect.width < u->surface->dst_rect.width
   1601 			|| u->scaling_info->dst_rect.height < u->surface->dst_rect.height)
   1602 				&& (u->scaling_info->dst_rect.width < u->surface->src_rect.width
   1603 					|| u->scaling_info->dst_rect.height < u->surface->src_rect.height))
   1604 			/* Making dst rect smaller requires a bandwidth change */
   1605 			update_flags->bits.bandwidth_change = 1;
   1606 	}
   1607 
   1608 	if (u->scaling_info->src_rect.width != u->surface->src_rect.width
   1609 		|| u->scaling_info->src_rect.height != u->surface->src_rect.height) {
   1610 
   1611 		update_flags->bits.scaling_change = 1;
   1612 		if (u->scaling_info->src_rect.width > u->surface->src_rect.width
   1613 				|| u->scaling_info->src_rect.height > u->surface->src_rect.height)
   1614 			/* Making src rect bigger requires a bandwidth change */
   1615 			update_flags->bits.clock_change = 1;
   1616 	}
   1617 
   1618 	if (u->scaling_info->src_rect.x != u->surface->src_rect.x
   1619 			|| u->scaling_info->src_rect.y != u->surface->src_rect.y
   1620 			|| u->scaling_info->clip_rect.x != u->surface->clip_rect.x
   1621 			|| u->scaling_info->clip_rect.y != u->surface->clip_rect.y
   1622 			|| u->scaling_info->dst_rect.x != u->surface->dst_rect.x
   1623 			|| u->scaling_info->dst_rect.y != u->surface->dst_rect.y)
   1624 		update_flags->bits.position_change = 1;
   1625 
   1626 	if (update_flags->bits.clock_change
   1627 			|| update_flags->bits.bandwidth_change
   1628 			|| update_flags->bits.scaling_change)
   1629 		return UPDATE_TYPE_FULL;
   1630 
   1631 	if (update_flags->bits.position_change)
   1632 		return UPDATE_TYPE_MED;
   1633 
   1634 	return UPDATE_TYPE_FAST;
   1635 }
   1636 
   1637 static enum surface_update_type det_surface_update(const struct dc *dc,
   1638 		const struct dc_surface_update *u)
   1639 {
   1640 	const struct dc_state *context = dc->current_state;
   1641 	enum surface_update_type type;
   1642 	enum surface_update_type overall_type = UPDATE_TYPE_FAST;
   1643 	union surface_update_flags *update_flags = &u->surface->update_flags;
   1644 
   1645 	if (u->flip_addr)
   1646 		update_flags->bits.addr_update = 1;
   1647 
   1648 	if (!is_surface_in_context(context, u->surface) || u->surface->force_full_update) {
   1649 		update_flags->raw = 0xFFFFFFFF;
   1650 		return UPDATE_TYPE_FULL;
   1651 	}
   1652 
   1653 	update_flags->raw = 0; // Reset all flags
   1654 
   1655 	type = get_plane_info_update_type(u);
   1656 	elevate_update_type(&overall_type, type);
   1657 
   1658 	type = get_scaling_info_update_type(u);
   1659 	elevate_update_type(&overall_type, type);
   1660 
   1661 	if (u->flip_addr)
   1662 		update_flags->bits.addr_update = 1;
   1663 
   1664 	if (u->in_transfer_func)
   1665 		update_flags->bits.in_transfer_func_change = 1;
   1666 
   1667 	if (u->input_csc_color_matrix)
   1668 		update_flags->bits.input_csc_change = 1;
   1669 
   1670 	if (u->coeff_reduction_factor)
   1671 		update_flags->bits.coeff_reduction_change = 1;
   1672 
   1673 	if (u->gamma) {
   1674 		enum surface_pixel_format format = SURFACE_PIXEL_FORMAT_GRPH_BEGIN;
   1675 
   1676 		if (u->plane_info)
   1677 			format = u->plane_info->format;
   1678 		else if (u->surface)
   1679 			format = u->surface->format;
   1680 
   1681 		if (dce_use_lut(format))
   1682 			update_flags->bits.gamma_change = 1;
   1683 	}
   1684 
   1685 	if (u->hdr_mult.value)
   1686 		if (u->hdr_mult.value != u->surface->hdr_mult.value) {
   1687 			update_flags->bits.hdr_mult = 1;
   1688 			elevate_update_type(&overall_type, UPDATE_TYPE_MED);
   1689 		}
   1690 
   1691 	if (update_flags->bits.in_transfer_func_change) {
   1692 		type = UPDATE_TYPE_MED;
   1693 		elevate_update_type(&overall_type, type);
   1694 	}
   1695 
   1696 	if (update_flags->bits.input_csc_change
   1697 			|| update_flags->bits.coeff_reduction_change
   1698 			|| update_flags->bits.gamma_change) {
   1699 		type = UPDATE_TYPE_FULL;
   1700 		elevate_update_type(&overall_type, type);
   1701 	}
   1702 
   1703 	return overall_type;
   1704 }
   1705 
   1706 static enum surface_update_type check_update_surfaces_for_stream(
   1707 		struct dc *dc,
   1708 		struct dc_surface_update *updates,
   1709 		int surface_count,
   1710 		struct dc_stream_update *stream_update,
   1711 		const struct dc_stream_status *stream_status)
   1712 {
   1713 	int i;
   1714 	enum surface_update_type overall_type = UPDATE_TYPE_FAST;
   1715 
   1716 	if (stream_status == NULL || stream_status->plane_count != surface_count)
   1717 		overall_type = UPDATE_TYPE_FULL;
   1718 
   1719 	/* some stream updates require passive update */
   1720 	if (stream_update) {
   1721 		union stream_update_flags *su_flags = &stream_update->stream->update_flags;
   1722 
   1723 		if ((stream_update->src.height != 0 && stream_update->src.width != 0) ||
   1724 			(stream_update->dst.height != 0 && stream_update->dst.width != 0) ||
   1725 			stream_update->integer_scaling_update)
   1726 			su_flags->bits.scaling = 1;
   1727 
   1728 		if (stream_update->out_transfer_func)
   1729 			su_flags->bits.out_tf = 1;
   1730 
   1731 		if (stream_update->abm_level)
   1732 			su_flags->bits.abm_level = 1;
   1733 
   1734 		if (stream_update->dpms_off)
   1735 			su_flags->bits.dpms_off = 1;
   1736 
   1737 		if (stream_update->gamut_remap)
   1738 			su_flags->bits.gamut_remap = 1;
   1739 
   1740 		if (stream_update->wb_update)
   1741 			su_flags->bits.wb_update = 1;
   1742 		if (su_flags->raw != 0)
   1743 			overall_type = UPDATE_TYPE_FULL;
   1744 
   1745 		if (stream_update->output_csc_transform || stream_update->output_color_space)
   1746 			su_flags->bits.out_csc = 1;
   1747 
   1748 		if (stream_update->dsc_config)
   1749 			overall_type = UPDATE_TYPE_FULL;
   1750 	}
   1751 
   1752 	for (i = 0 ; i < surface_count; i++) {
   1753 		enum surface_update_type type =
   1754 				det_surface_update(dc, &updates[i]);
   1755 
   1756 		elevate_update_type(&overall_type, type);
   1757 	}
   1758 
   1759 	return overall_type;
   1760 }
   1761 
   1762 /**
   1763  * dc_check_update_surfaces_for_stream() - Determine update type (fast, med, or full)
   1764  *
   1765  * See :c:type:`enum surface_update_type <surface_update_type>` for explanation of update types
   1766  */
   1767 enum surface_update_type dc_check_update_surfaces_for_stream(
   1768 		struct dc *dc,
   1769 		struct dc_surface_update *updates,
   1770 		int surface_count,
   1771 		struct dc_stream_update *stream_update,
   1772 		const struct dc_stream_status *stream_status)
   1773 {
   1774 	int i;
   1775 	enum surface_update_type type;
   1776 
   1777 	if (stream_update)
   1778 		stream_update->stream->update_flags.raw = 0;
   1779 	for (i = 0; i < surface_count; i++)
   1780 		updates[i].surface->update_flags.raw = 0;
   1781 
   1782 	type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status);
   1783 	if (type == UPDATE_TYPE_FULL) {
   1784 		if (stream_update)
   1785 			stream_update->stream->update_flags.raw = 0xFFFFFFFF;
   1786 		for (i = 0; i < surface_count; i++)
   1787 			updates[i].surface->update_flags.raw = 0xFFFFFFFF;
   1788 	}
   1789 
   1790 	if (type == UPDATE_TYPE_FAST) {
   1791 		// If there's an available clock comparator, we use that.
   1792 		if (dc->clk_mgr->funcs->are_clock_states_equal) {
   1793 			if (!dc->clk_mgr->funcs->are_clock_states_equal(&dc->clk_mgr->clks, &dc->current_state->bw_ctx.bw.dcn.clk))
   1794 				dc->optimized_required = true;
   1795 		// Else we fallback to mem compare.
   1796 		} else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) {
   1797 			dc->optimized_required = true;
   1798 		}
   1799 	}
   1800 
   1801 	return type;
   1802 }
   1803 
   1804 static struct dc_stream_status *stream_get_status(
   1805 	struct dc_state *ctx,
   1806 	struct dc_stream_state *stream)
   1807 {
   1808 	uint8_t i;
   1809 
   1810 	for (i = 0; i < ctx->stream_count; i++) {
   1811 		if (stream == ctx->streams[i]) {
   1812 			return &ctx->stream_status[i];
   1813 		}
   1814 	}
   1815 
   1816 	return NULL;
   1817 }
   1818 
   1819 static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL;
   1820 
   1821 static void copy_surface_update_to_plane(
   1822 		struct dc_plane_state *surface,
   1823 		struct dc_surface_update *srf_update)
   1824 {
   1825 	if (srf_update->flip_addr) {
   1826 		surface->address = srf_update->flip_addr->address;
   1827 		surface->flip_immediate =
   1828 			srf_update->flip_addr->flip_immediate;
   1829 		surface->time.time_elapsed_in_us[surface->time.index] =
   1830 			srf_update->flip_addr->flip_timestamp_in_us -
   1831 				surface->time.prev_update_time_in_us;
   1832 		surface->time.prev_update_time_in_us =
   1833 			srf_update->flip_addr->flip_timestamp_in_us;
   1834 		surface->time.index++;
   1835 		if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX)
   1836 			surface->time.index = 0;
   1837 	}
   1838 
   1839 	if (srf_update->scaling_info) {
   1840 		surface->scaling_quality =
   1841 				srf_update->scaling_info->scaling_quality;
   1842 		surface->dst_rect =
   1843 				srf_update->scaling_info->dst_rect;
   1844 		surface->src_rect =
   1845 				srf_update->scaling_info->src_rect;
   1846 		surface->clip_rect =
   1847 				srf_update->scaling_info->clip_rect;
   1848 	}
   1849 
   1850 	if (srf_update->plane_info) {
   1851 		surface->color_space =
   1852 				srf_update->plane_info->color_space;
   1853 		surface->format =
   1854 				srf_update->plane_info->format;
   1855 		surface->plane_size =
   1856 				srf_update->plane_info->plane_size;
   1857 		surface->rotation =
   1858 				srf_update->plane_info->rotation;
   1859 		surface->horizontal_mirror =
   1860 				srf_update->plane_info->horizontal_mirror;
   1861 		surface->stereo_format =
   1862 				srf_update->plane_info->stereo_format;
   1863 		surface->tiling_info =
   1864 				srf_update->plane_info->tiling_info;
   1865 		surface->visible =
   1866 				srf_update->plane_info->visible;
   1867 		surface->per_pixel_alpha =
   1868 				srf_update->plane_info->per_pixel_alpha;
   1869 		surface->global_alpha =
   1870 				srf_update->plane_info->global_alpha;
   1871 		surface->global_alpha_value =
   1872 				srf_update->plane_info->global_alpha_value;
   1873 		surface->dcc =
   1874 				srf_update->plane_info->dcc;
   1875 		surface->layer_index =
   1876 				srf_update->plane_info->layer_index;
   1877 	}
   1878 
   1879 	if (srf_update->gamma &&
   1880 			(surface->gamma_correction !=
   1881 					srf_update->gamma)) {
   1882 		memcpy(&surface->gamma_correction->entries,
   1883 			&srf_update->gamma->entries,
   1884 			sizeof(struct dc_gamma_entries));
   1885 		surface->gamma_correction->is_identity =
   1886 			srf_update->gamma->is_identity;
   1887 		surface->gamma_correction->num_entries =
   1888 			srf_update->gamma->num_entries;
   1889 		surface->gamma_correction->type =
   1890 			srf_update->gamma->type;
   1891 	}
   1892 
   1893 	if (srf_update->in_transfer_func &&
   1894 			(surface->in_transfer_func !=
   1895 				srf_update->in_transfer_func)) {
   1896 		surface->in_transfer_func->sdr_ref_white_level =
   1897 			srf_update->in_transfer_func->sdr_ref_white_level;
   1898 		surface->in_transfer_func->tf =
   1899 			srf_update->in_transfer_func->tf;
   1900 		surface->in_transfer_func->type =
   1901 			srf_update->in_transfer_func->type;
   1902 		memcpy(&surface->in_transfer_func->tf_pts,
   1903 			&srf_update->in_transfer_func->tf_pts,
   1904 			sizeof(struct dc_transfer_func_distributed_points));
   1905 	}
   1906 
   1907 	if (srf_update->func_shaper &&
   1908 			(surface->in_shaper_func !=
   1909 			srf_update->func_shaper))
   1910 		memcpy(surface->in_shaper_func, srf_update->func_shaper,
   1911 		sizeof(*surface->in_shaper_func));
   1912 
   1913 	if (srf_update->lut3d_func &&
   1914 			(surface->lut3d_func !=
   1915 			srf_update->lut3d_func))
   1916 		memcpy(surface->lut3d_func, srf_update->lut3d_func,
   1917 		sizeof(*surface->lut3d_func));
   1918 
   1919 	if (srf_update->hdr_mult.value)
   1920 		surface->hdr_mult =
   1921 				srf_update->hdr_mult;
   1922 
   1923 	if (srf_update->blend_tf &&
   1924 			(surface->blend_tf !=
   1925 			srf_update->blend_tf))
   1926 		memcpy(surface->blend_tf, srf_update->blend_tf,
   1927 		sizeof(*surface->blend_tf));
   1928 
   1929 	if (srf_update->input_csc_color_matrix)
   1930 		surface->input_csc_color_matrix =
   1931 			*srf_update->input_csc_color_matrix;
   1932 
   1933 	if (srf_update->coeff_reduction_factor)
   1934 		surface->coeff_reduction_factor =
   1935 			*srf_update->coeff_reduction_factor;
   1936 }
   1937 
   1938 static void copy_stream_update_to_stream(struct dc *dc,
   1939 					 struct dc_state *context,
   1940 					 struct dc_stream_state *stream,
   1941 					 struct dc_stream_update *update)
   1942 {
   1943 	struct dc_context *dc_ctx = dc->ctx;
   1944 
   1945 	if (update == NULL || stream == NULL)
   1946 		return;
   1947 
   1948 	if (update->src.height && update->src.width)
   1949 		stream->src = update->src;
   1950 
   1951 	if (update->dst.height && update->dst.width)
   1952 		stream->dst = update->dst;
   1953 
   1954 	if (update->out_transfer_func &&
   1955 	    stream->out_transfer_func != update->out_transfer_func) {
   1956 		stream->out_transfer_func->sdr_ref_white_level =
   1957 			update->out_transfer_func->sdr_ref_white_level;
   1958 		stream->out_transfer_func->tf = update->out_transfer_func->tf;
   1959 		stream->out_transfer_func->type =
   1960 			update->out_transfer_func->type;
   1961 		memcpy(&stream->out_transfer_func->tf_pts,
   1962 		       &update->out_transfer_func->tf_pts,
   1963 		       sizeof(struct dc_transfer_func_distributed_points));
   1964 	}
   1965 
   1966 	if (update->hdr_static_metadata)
   1967 		stream->hdr_static_metadata = *update->hdr_static_metadata;
   1968 
   1969 	if (update->abm_level)
   1970 		stream->abm_level = *update->abm_level;
   1971 
   1972 	if (update->periodic_interrupt0)
   1973 		stream->periodic_interrupt0 = *update->periodic_interrupt0;
   1974 
   1975 	if (update->periodic_interrupt1)
   1976 		stream->periodic_interrupt1 = *update->periodic_interrupt1;
   1977 
   1978 	if (update->gamut_remap)
   1979 		stream->gamut_remap_matrix = *update->gamut_remap;
   1980 
   1981 	/* Note: this being updated after mode set is currently not a use case
   1982 	 * however if it arises OCSC would need to be reprogrammed at the
   1983 	 * minimum
   1984 	 */
   1985 	if (update->output_color_space)
   1986 		stream->output_color_space = *update->output_color_space;
   1987 
   1988 	if (update->output_csc_transform)
   1989 		stream->csc_color_matrix = *update->output_csc_transform;
   1990 
   1991 	if (update->vrr_infopacket)
   1992 		stream->vrr_infopacket = *update->vrr_infopacket;
   1993 
   1994 	if (update->dpms_off)
   1995 		stream->dpms_off = *update->dpms_off;
   1996 
   1997 	if (update->vsc_infopacket)
   1998 		stream->vsc_infopacket = *update->vsc_infopacket;
   1999 
   2000 	if (update->vsp_infopacket)
   2001 		stream->vsp_infopacket = *update->vsp_infopacket;
   2002 
   2003 	if (update->dither_option)
   2004 		stream->dither_option = *update->dither_option;
   2005 	/* update current stream with writeback info */
   2006 	if (update->wb_update) {
   2007 		int i;
   2008 
   2009 		stream->num_wb_info = update->wb_update->num_wb_info;
   2010 		ASSERT(stream->num_wb_info <= MAX_DWB_PIPES);
   2011 		for (i = 0; i < stream->num_wb_info; i++)
   2012 			stream->writeback_info[i] =
   2013 				update->wb_update->writeback_info[i];
   2014 	}
   2015 	if (update->dsc_config) {
   2016 		struct dc_dsc_config old_dsc_cfg = stream->timing.dsc_cfg;
   2017 		uint32_t old_dsc_enabled = stream->timing.flags.DSC;
   2018 		uint32_t enable_dsc = (update->dsc_config->num_slices_h != 0 &&
   2019 				       update->dsc_config->num_slices_v != 0);
   2020 
   2021 		/* Use temporarry context for validating new DSC config */
   2022 		struct dc_state *dsc_validate_context = dc_create_state(dc);
   2023 
   2024 		if (dsc_validate_context) {
   2025 			dc_resource_state_copy_construct(dc->current_state, dsc_validate_context);
   2026 
   2027 			stream->timing.dsc_cfg = *update->dsc_config;
   2028 			stream->timing.flags.DSC = enable_dsc;
   2029 			if (!dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true)) {
   2030 				stream->timing.dsc_cfg = old_dsc_cfg;
   2031 				stream->timing.flags.DSC = old_dsc_enabled;
   2032 				update->dsc_config = NULL;
   2033 			}
   2034 
   2035 			dc_release_state(dsc_validate_context);
   2036 		} else {
   2037 			DC_ERROR("Failed to allocate new validate context for DSC change\n");
   2038 			update->dsc_config = NULL;
   2039 		}
   2040 	}
   2041 }
   2042 
   2043 static void commit_planes_do_stream_update(struct dc *dc,
   2044 		struct dc_stream_state *stream,
   2045 		struct dc_stream_update *stream_update,
   2046 		enum surface_update_type update_type,
   2047 		struct dc_state *context)
   2048 {
   2049 	int j;
   2050 	bool should_program_abm;
   2051 
   2052 	// Stream updates
   2053 	for (j = 0; j < dc->res_pool->pipe_count; j++) {
   2054 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
   2055 
   2056 		if (!pipe_ctx->top_pipe &&  !pipe_ctx->prev_odm_pipe && pipe_ctx->stream == stream) {
   2057 
   2058 			if (stream_update->periodic_interrupt0 &&
   2059 					dc->hwss.setup_periodic_interrupt)
   2060 				dc->hwss.setup_periodic_interrupt(dc, pipe_ctx, VLINE0);
   2061 
   2062 			if (stream_update->periodic_interrupt1 &&
   2063 					dc->hwss.setup_periodic_interrupt)
   2064 				dc->hwss.setup_periodic_interrupt(dc, pipe_ctx, VLINE1);
   2065 
   2066 			if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) ||
   2067 					stream_update->vrr_infopacket ||
   2068 					stream_update->vsc_infopacket ||
   2069 					stream_update->vsp_infopacket) {
   2070 				resource_build_info_frame(pipe_ctx);
   2071 				dc->hwss.update_info_frame(pipe_ctx);
   2072 			}
   2073 
   2074 			if (stream_update->hdr_static_metadata &&
   2075 					stream->use_dynamic_meta &&
   2076 					dc->hwss.set_dmdata_attributes &&
   2077 					pipe_ctx->stream->dmdata_address.quad_part != 0)
   2078 				dc->hwss.set_dmdata_attributes(pipe_ctx);
   2079 
   2080 			if (stream_update->gamut_remap)
   2081 				dc_stream_set_gamut_remap(dc, stream);
   2082 
   2083 			if (stream_update->output_csc_transform)
   2084 				dc_stream_program_csc_matrix(dc, stream);
   2085 
   2086 			if (stream_update->dither_option) {
   2087 				struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe;
   2088 				resource_build_bit_depth_reduction_params(pipe_ctx->stream,
   2089 									&pipe_ctx->stream->bit_depth_params);
   2090 				pipe_ctx->stream_res.opp->funcs->opp_program_fmt(pipe_ctx->stream_res.opp,
   2091 						&stream->bit_depth_params,
   2092 						&stream->clamping);
   2093 				while (odm_pipe) {
   2094 					odm_pipe->stream_res.opp->funcs->opp_program_fmt(odm_pipe->stream_res.opp,
   2095 							&stream->bit_depth_params,
   2096 							&stream->clamping);
   2097 					odm_pipe = odm_pipe->next_odm_pipe;
   2098 				}
   2099 			}
   2100 
   2101 			if (stream_update->dsc_config && dc->hwss.pipe_control_lock_global) {
   2102 				dc->hwss.pipe_control_lock_global(dc, pipe_ctx, true);
   2103 				dp_update_dsc_config(pipe_ctx);
   2104 				dc->hwss.pipe_control_lock_global(dc, pipe_ctx, false);
   2105 			}
   2106 			/* Full fe update*/
   2107 			if (update_type == UPDATE_TYPE_FAST)
   2108 				continue;
   2109 
   2110 			if (stream_update->dpms_off) {
   2111 				dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
   2112 
   2113 				if (*stream_update->dpms_off) {
   2114 					core_link_disable_stream(pipe_ctx);
   2115 					/* for dpms, keep acquired resources*/
   2116 					if (pipe_ctx->stream_res.audio && !dc->debug.az_endpoint_mute_only)
   2117 						pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
   2118 
   2119 					dc->hwss.optimize_bandwidth(dc, dc->current_state);
   2120 				} else {
   2121 					if (dc->optimize_seamless_boot_streams == 0)
   2122 						dc->hwss.prepare_bandwidth(dc, dc->current_state);
   2123 
   2124 					core_link_enable_stream(dc->current_state, pipe_ctx);
   2125 				}
   2126 
   2127 				dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
   2128 			}
   2129 
   2130 			if (stream_update->abm_level && pipe_ctx->stream_res.abm) {
   2131 				should_program_abm = true;
   2132 
   2133 				// if otg funcs defined check if blanked before programming
   2134 				if (pipe_ctx->stream_res.tg->funcs->is_blanked)
   2135 					if (pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg))
   2136 						should_program_abm = false;
   2137 
   2138 				if (should_program_abm) {
   2139 					if (*stream_update->abm_level == ABM_LEVEL_IMMEDIATE_DISABLE) {
   2140 						pipe_ctx->stream_res.abm->funcs->set_abm_immediate_disable(pipe_ctx->stream_res.abm);
   2141 					} else {
   2142 						pipe_ctx->stream_res.abm->funcs->set_abm_level(
   2143 							pipe_ctx->stream_res.abm, stream->abm_level);
   2144 					}
   2145 				}
   2146 			}
   2147 		}
   2148 	}
   2149 }
   2150 
   2151 static void commit_planes_for_stream(struct dc *dc,
   2152 		struct dc_surface_update *srf_updates,
   2153 		int surface_count,
   2154 		struct dc_stream_state *stream,
   2155 		struct dc_stream_update *stream_update,
   2156 		enum surface_update_type update_type,
   2157 		struct dc_state *context)
   2158 {
   2159 	int i, j;
   2160 	struct pipe_ctx *top_pipe_to_program = NULL;
   2161 
   2162 	if (dc->optimize_seamless_boot_streams > 0 && surface_count > 0) {
   2163 		/* Optimize seamless boot flag keeps clocks and watermarks high until
   2164 		 * first flip. After first flip, optimization is required to lower
   2165 		 * bandwidth. Important to note that it is expected UEFI will
   2166 		 * only light up a single display on POST, therefore we only expect
   2167 		 * one stream with seamless boot flag set.
   2168 		 */
   2169 		if (stream->apply_seamless_boot_optimization) {
   2170 			stream->apply_seamless_boot_optimization = false;
   2171 			dc->optimize_seamless_boot_streams--;
   2172 
   2173 			if (dc->optimize_seamless_boot_streams == 0)
   2174 				dc->optimized_required = true;
   2175 		}
   2176 	}
   2177 
   2178 	if (update_type == UPDATE_TYPE_FULL && dc->optimize_seamless_boot_streams == 0) {
   2179 		dc->hwss.prepare_bandwidth(dc, context);
   2180 		context_clock_trace(dc, context);
   2181 	}
   2182 
   2183 	// Stream updates
   2184 	if (stream_update)
   2185 		commit_planes_do_stream_update(dc, stream, stream_update, update_type, context);
   2186 
   2187 	if (surface_count == 0) {
   2188 		/*
   2189 		 * In case of turning off screen, no need to program front end a second time.
   2190 		 * just return after program blank.
   2191 		 */
   2192 		if (dc->hwss.apply_ctx_for_surface)
   2193 			dc->hwss.apply_ctx_for_surface(dc, stream, 0, context);
   2194 		if (dc->hwss.program_front_end_for_ctx)
   2195 			dc->hwss.program_front_end_for_ctx(dc, context);
   2196 
   2197 		return;
   2198 	}
   2199 
   2200 	if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
   2201 		for (i = 0; i < surface_count; i++) {
   2202 			struct dc_plane_state *plane_state = srf_updates[i].surface;
   2203 			/*set logical flag for lock/unlock use*/
   2204 			for (j = 0; j < dc->res_pool->pipe_count; j++) {
   2205 				struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
   2206 				if (!pipe_ctx->plane_state)
   2207 					continue;
   2208 				if (pipe_ctx->plane_state != plane_state)
   2209 					continue;
   2210 				plane_state->triplebuffer_flips = false;
   2211 				if (update_type == UPDATE_TYPE_FAST &&
   2212 					dc->hwss.program_triplebuffer != NULL &&
   2213 					!plane_state->flip_immediate &&
   2214 					!dc->debug.disable_tri_buf) {
   2215 						/*triple buffer for VUpdate  only*/
   2216 						plane_state->triplebuffer_flips = true;
   2217 				}
   2218 			}
   2219 		}
   2220 	}
   2221 
   2222 	// Update Type FULL, Surface updates
   2223 	for (j = 0; j < dc->res_pool->pipe_count; j++) {
   2224 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
   2225 
   2226 		if (!pipe_ctx->top_pipe &&
   2227 			!pipe_ctx->prev_odm_pipe &&
   2228 			pipe_ctx->stream &&
   2229 			pipe_ctx->stream == stream) {
   2230 			struct dc_stream_status *stream_status = NULL;
   2231 
   2232 			top_pipe_to_program = pipe_ctx;
   2233 
   2234 			if (!pipe_ctx->plane_state)
   2235 				continue;
   2236 
   2237 			/* Full fe update*/
   2238 			if (update_type == UPDATE_TYPE_FAST)
   2239 				continue;
   2240 
   2241 			ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
   2242 
   2243 			if (dc->hwss.program_triplebuffer != NULL &&
   2244 				!dc->debug.disable_tri_buf) {
   2245 				/*turn off triple buffer for full update*/
   2246 				dc->hwss.program_triplebuffer(
   2247 					dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
   2248 			}
   2249 			stream_status =
   2250 				stream_get_status(context, pipe_ctx->stream);
   2251 
   2252 			if (dc->hwss.apply_ctx_for_surface)
   2253 				dc->hwss.apply_ctx_for_surface(
   2254 					dc, pipe_ctx->stream, stream_status->plane_count, context);
   2255 		}
   2256 	}
   2257 	if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) {
   2258 		dc->hwss.program_front_end_for_ctx(dc, context);
   2259 #ifdef CONFIG_DRM_AMD_DC_DCN
   2260 		if (dc->debug.validate_dml_output) {
   2261 			for (i = 0; i < dc->res_pool->pipe_count; i++) {
   2262 				struct pipe_ctx cur_pipe = context->res_ctx.pipe_ctx[i];
   2263 				if (cur_pipe.stream == NULL)
   2264 					continue;
   2265 
   2266 				cur_pipe.plane_res.hubp->funcs->validate_dml_output(
   2267 						cur_pipe.plane_res.hubp, dc->ctx,
   2268 						&context->res_ctx.pipe_ctx[i].rq_regs,
   2269 						&context->res_ctx.pipe_ctx[i].dlg_regs,
   2270 						&context->res_ctx.pipe_ctx[i].ttu_regs);
   2271 			}
   2272 		}
   2273 #endif
   2274 	}
   2275 
   2276 	// Update Type FAST, Surface updates
   2277 	if (update_type == UPDATE_TYPE_FAST) {
   2278 		/* Lock the top pipe while updating plane addrs, since freesync requires
   2279 		 *  plane addr update event triggers to be synchronized.
   2280 		 *  top_pipe_to_program is expected to never be NULL
   2281 		 */
   2282 		dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true);
   2283 
   2284 		if (dc->hwss.set_flip_control_gsl)
   2285 			for (i = 0; i < surface_count; i++) {
   2286 				struct dc_plane_state *plane_state = srf_updates[i].surface;
   2287 
   2288 				for (j = 0; j < dc->res_pool->pipe_count; j++) {
   2289 					struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
   2290 
   2291 					if (pipe_ctx->stream != stream)
   2292 						continue;
   2293 
   2294 					if (pipe_ctx->plane_state != plane_state)
   2295 						continue;
   2296 
   2297 					// GSL has to be used for flip immediate
   2298 					dc->hwss.set_flip_control_gsl(pipe_ctx,
   2299 							plane_state->flip_immediate);
   2300 				}
   2301 			}
   2302 		/* Perform requested Updates */
   2303 		for (i = 0; i < surface_count; i++) {
   2304 			struct dc_plane_state *plane_state = srf_updates[i].surface;
   2305 
   2306 			for (j = 0; j < dc->res_pool->pipe_count; j++) {
   2307 				struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
   2308 
   2309 				if (pipe_ctx->stream != stream)
   2310 					continue;
   2311 
   2312 				if (pipe_ctx->plane_state != plane_state)
   2313 					continue;
   2314 				/*program triple buffer after lock based on flip type*/
   2315 				if (dc->hwss.program_triplebuffer != NULL &&
   2316 					!dc->debug.disable_tri_buf) {
   2317 					/*only enable triplebuffer for  fast_update*/
   2318 					dc->hwss.program_triplebuffer(
   2319 						dc, pipe_ctx, plane_state->triplebuffer_flips);
   2320 				}
   2321 				if (srf_updates[i].flip_addr)
   2322 					dc->hwss.update_plane_addr(dc, pipe_ctx);
   2323 			}
   2324 		}
   2325 
   2326 		dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
   2327 	}
   2328 
   2329 	// Fire manual trigger only when bottom plane is flipped
   2330 	for (j = 0; j < dc->res_pool->pipe_count; j++) {
   2331 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
   2332 
   2333 		if (pipe_ctx->bottom_pipe ||
   2334 				!pipe_ctx->stream ||
   2335 				pipe_ctx->stream != stream ||
   2336 				!pipe_ctx->plane_state->update_flags.bits.addr_update)
   2337 			continue;
   2338 
   2339 		if (pipe_ctx->stream_res.tg->funcs->program_manual_trigger)
   2340 			pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg);
   2341 	}
   2342 }
   2343 
   2344 void dc_commit_updates_for_stream(struct dc *dc,
   2345 		struct dc_surface_update *srf_updates,
   2346 		int surface_count,
   2347 		struct dc_stream_state *stream,
   2348 		struct dc_stream_update *stream_update,
   2349 		struct dc_state *state)
   2350 {
   2351 	const struct dc_stream_status *stream_status;
   2352 	enum surface_update_type update_type;
   2353 	struct dc_state *context;
   2354 	struct dc_context *dc_ctx = dc->ctx;
   2355 	int i;
   2356 
   2357 	stream_status = dc_stream_get_status(stream);
   2358 	context = dc->current_state;
   2359 
   2360 	update_type = dc_check_update_surfaces_for_stream(
   2361 				dc, srf_updates, surface_count, stream_update, stream_status);
   2362 
   2363 	if (update_type >= update_surface_trace_level)
   2364 		update_surface_trace(dc, srf_updates, surface_count);
   2365 
   2366 
   2367 	if (update_type >= UPDATE_TYPE_FULL) {
   2368 
   2369 		/* initialize scratch memory for building context */
   2370 		context = dc_create_state(dc);
   2371 		if (context == NULL) {
   2372 			DC_ERROR("Failed to allocate new validate context!\n");
   2373 			return;
   2374 		}
   2375 
   2376 		dc_resource_state_copy_construct(state, context);
   2377 
   2378 		for (i = 0; i < dc->res_pool->pipe_count; i++) {
   2379 			struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
   2380 			struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
   2381 
   2382 			if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
   2383 				new_pipe->plane_state->force_full_update = true;
   2384 		}
   2385 	}
   2386 
   2387 
   2388 	for (i = 0; i < surface_count; i++) {
   2389 		struct dc_plane_state *surface = srf_updates[i].surface;
   2390 
   2391 		copy_surface_update_to_plane(surface, &srf_updates[i]);
   2392 
   2393 	}
   2394 
   2395 	copy_stream_update_to_stream(dc, context, stream, stream_update);
   2396 
   2397 	commit_planes_for_stream(
   2398 				dc,
   2399 				srf_updates,
   2400 				surface_count,
   2401 				stream,
   2402 				stream_update,
   2403 				update_type,
   2404 				context);
   2405 	/*update current_State*/
   2406 	if (dc->current_state != context) {
   2407 
   2408 		struct dc_state *old = dc->current_state;
   2409 
   2410 		dc->current_state = context;
   2411 		dc_release_state(old);
   2412 
   2413 		for (i = 0; i < dc->res_pool->pipe_count; i++) {
   2414 			struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
   2415 
   2416 			if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
   2417 				pipe_ctx->plane_state->force_full_update = false;
   2418 		}
   2419 	}
   2420 	/*let's use current_state to update watermark etc*/
   2421 	if (update_type >= UPDATE_TYPE_FULL)
   2422 		dc_post_update_surfaces_to_stream(dc);
   2423 
   2424 	return;
   2425 
   2426 }
   2427 
   2428 uint8_t dc_get_current_stream_count(struct dc *dc)
   2429 {
   2430 	return dc->current_state->stream_count;
   2431 }
   2432 
   2433 struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i)
   2434 {
   2435 	if (i < dc->current_state->stream_count)
   2436 		return dc->current_state->streams[i];
   2437 	return NULL;
   2438 }
   2439 
   2440 enum dc_irq_source dc_interrupt_to_irq_source(
   2441 		struct dc *dc,
   2442 		uint32_t src_id,
   2443 		uint32_t ext_id)
   2444 {
   2445 	return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id);
   2446 }
   2447 
   2448 /**
   2449  * dc_interrupt_set() - Enable/disable an AMD hw interrupt source
   2450  */
   2451 bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable)
   2452 {
   2453 
   2454 	if (dc == NULL)
   2455 		return false;
   2456 
   2457 	return dal_irq_service_set(dc->res_pool->irqs, src, enable);
   2458 }
   2459 
   2460 void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
   2461 {
   2462 	dal_irq_service_ack(dc->res_pool->irqs, src);
   2463 }
   2464 
   2465 void dc_set_power_state(
   2466 	struct dc *dc,
   2467 	enum dc_acpi_cm_power_state power_state)
   2468 {
   2469 	struct kref refcount;
   2470 	struct display_mode_lib *dml;
   2471 
   2472 	switch (power_state) {
   2473 	case DC_ACPI_CM_POWER_STATE_D0:
   2474 		dc_resource_state_construct(dc, dc->current_state);
   2475 
   2476 		if (dc->ctx->dmub_srv)
   2477 			dc_dmub_srv_wait_phy_init(dc->ctx->dmub_srv);
   2478 
   2479 		dc->hwss.init_hw(dc);
   2480 
   2481 		if (dc->hwss.init_sys_ctx != NULL &&
   2482 			dc->vm_pa_config.valid) {
   2483 			dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config);
   2484 		}
   2485 
   2486 		break;
   2487 	default:
   2488 		ASSERT(dc->current_state->stream_count == 0);
   2489 		/* Zero out the current context so that on resume we start with
   2490 		 * clean state, and dc hw programming optimizations will not
   2491 		 * cause any trouble.
   2492 		 */
   2493 		dml = kzalloc(sizeof(struct display_mode_lib),
   2494 				GFP_KERNEL);
   2495 
   2496 		ASSERT(dml);
   2497 		if (!dml)
   2498 			return;
   2499 
   2500 		/* Preserve refcount */
   2501 		refcount = dc->current_state->refcount;
   2502 		/* Preserve display mode lib */
   2503 		memcpy(dml, &dc->current_state->bw_ctx.dml, sizeof(struct display_mode_lib));
   2504 
   2505 		dc_resource_state_destruct(dc->current_state);
   2506 		memset(dc->current_state, 0,
   2507 				sizeof(*dc->current_state));
   2508 
   2509 		dc->current_state->refcount = refcount;
   2510 		dc->current_state->bw_ctx.dml = *dml;
   2511 
   2512 		kfree(dml);
   2513 
   2514 		break;
   2515 	}
   2516 }
   2517 
   2518 void dc_resume(struct dc *dc)
   2519 {
   2520 
   2521 	uint32_t i;
   2522 
   2523 	for (i = 0; i < dc->link_count; i++)
   2524 		core_link_resume(dc->links[i]);
   2525 }
   2526 
   2527 unsigned int dc_get_current_backlight_pwm(struct dc *dc)
   2528 {
   2529 	struct abm *abm = dc->res_pool->abm;
   2530 
   2531 	if (abm)
   2532 		return abm->funcs->get_current_backlight(abm);
   2533 
   2534 	return 0;
   2535 }
   2536 
   2537 unsigned int dc_get_target_backlight_pwm(struct dc *dc)
   2538 {
   2539 	struct abm *abm = dc->res_pool->abm;
   2540 
   2541 	if (abm)
   2542 		return abm->funcs->get_target_backlight(abm);
   2543 
   2544 	return 0;
   2545 }
   2546 
   2547 bool dc_is_dmcu_initialized(struct dc *dc)
   2548 {
   2549 	struct dmcu *dmcu = dc->res_pool->dmcu;
   2550 
   2551 	if (dmcu)
   2552 		return dmcu->funcs->is_dmcu_initialized(dmcu);
   2553 	return false;
   2554 }
   2555 
   2556 bool dc_submit_i2c(
   2557 		struct dc *dc,
   2558 		uint32_t link_index,
   2559 		struct i2c_command *cmd)
   2560 {
   2561 
   2562 	struct dc_link *link = dc->links[link_index];
   2563 	struct ddc_service *ddc = link->ddc;
   2564 	return dce_i2c_submit_command(
   2565 		dc->res_pool,
   2566 		ddc->ddc_pin,
   2567 		cmd);
   2568 }
   2569 
   2570 bool dc_submit_i2c_oem(
   2571 		struct dc *dc,
   2572 		struct i2c_command *cmd)
   2573 {
   2574 	struct ddc_service *ddc = dc->res_pool->oem_device;
   2575 	return dce_i2c_submit_command(
   2576 		dc->res_pool,
   2577 		ddc->ddc_pin,
   2578 		cmd);
   2579 }
   2580 
   2581 static bool link_add_remote_sink_helper(struct dc_link *dc_link, struct dc_sink *sink)
   2582 {
   2583 	if (dc_link->sink_count >= MAX_SINKS_PER_LINK) {
   2584 		BREAK_TO_DEBUGGER();
   2585 		return false;
   2586 	}
   2587 
   2588 	dc_sink_retain(sink);
   2589 
   2590 	dc_link->remote_sinks[dc_link->sink_count] = sink;
   2591 	dc_link->sink_count++;
   2592 
   2593 	return true;
   2594 }
   2595 
   2596 /**
   2597  * dc_link_add_remote_sink() - Create a sink and attach it to an existing link
   2598  *
   2599  * EDID length is in bytes
   2600  */
   2601 struct dc_sink *dc_link_add_remote_sink(
   2602 		struct dc_link *link,
   2603 		const uint8_t *edid,
   2604 		int len,
   2605 		struct dc_sink_init_data *init_data)
   2606 {
   2607 	struct dc_sink *dc_sink;
   2608 	enum dc_edid_status edid_status;
   2609 
   2610 	if (len > DC_MAX_EDID_BUFFER_SIZE) {
   2611 		dm_error("Max EDID buffer size breached!\n");
   2612 		return NULL;
   2613 	}
   2614 
   2615 	if (!init_data) {
   2616 		BREAK_TO_DEBUGGER();
   2617 		return NULL;
   2618 	}
   2619 
   2620 	if (!init_data->link) {
   2621 		BREAK_TO_DEBUGGER();
   2622 		return NULL;
   2623 	}
   2624 
   2625 	dc_sink = dc_sink_create(init_data);
   2626 
   2627 	if (!dc_sink)
   2628 		return NULL;
   2629 
   2630 	memmove(dc_sink->dc_edid.raw_edid, edid, len);
   2631 	dc_sink->dc_edid.length = len;
   2632 
   2633 	if (!link_add_remote_sink_helper(
   2634 			link,
   2635 			dc_sink))
   2636 		goto fail_add_sink;
   2637 
   2638 	edid_status = dm_helpers_parse_edid_caps(
   2639 			link->ctx,
   2640 			&dc_sink->dc_edid,
   2641 			&dc_sink->edid_caps);
   2642 
   2643 	/*
   2644 	 * Treat device as no EDID device if EDID
   2645 	 * parsing fails
   2646 	 */
   2647 	if (edid_status != EDID_OK) {
   2648 		dc_sink->dc_edid.length = 0;
   2649 		dm_error("Bad EDID, status%d!\n", edid_status);
   2650 	}
   2651 
   2652 	return dc_sink;
   2653 
   2654 fail_add_sink:
   2655 	dc_sink_release(dc_sink);
   2656 	return NULL;
   2657 }
   2658 
   2659 /**
   2660  * dc_link_remove_remote_sink() - Remove a remote sink from a dc_link
   2661  *
   2662  * Note that this just removes the struct dc_sink - it doesn't
   2663  * program hardware or alter other members of dc_link
   2664  */
   2665 void dc_link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink)
   2666 {
   2667 	int i;
   2668 
   2669 	if (!link->sink_count) {
   2670 		BREAK_TO_DEBUGGER();
   2671 		return;
   2672 	}
   2673 
   2674 	for (i = 0; i < link->sink_count; i++) {
   2675 		if (link->remote_sinks[i] == sink) {
   2676 			dc_sink_release(sink);
   2677 			link->remote_sinks[i] = NULL;
   2678 
   2679 			/* shrink array to remove empty place */
   2680 			while (i < link->sink_count - 1) {
   2681 				link->remote_sinks[i] = link->remote_sinks[i+1];
   2682 				i++;
   2683 			}
   2684 			link->remote_sinks[i] = NULL;
   2685 			link->sink_count--;
   2686 			return;
   2687 		}
   2688 	}
   2689 }
   2690 
   2691 void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info)
   2692 {
   2693 	info->displayClock				= (unsigned int)state->bw_ctx.bw.dcn.clk.dispclk_khz;
   2694 	info->engineClock				= (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_khz;
   2695 	info->memoryClock				= (unsigned int)state->bw_ctx.bw.dcn.clk.dramclk_khz;
   2696 	info->maxSupportedDppClock		= (unsigned int)state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz;
   2697 	info->dppClock					= (unsigned int)state->bw_ctx.bw.dcn.clk.dppclk_khz;
   2698 	info->socClock					= (unsigned int)state->bw_ctx.bw.dcn.clk.socclk_khz;
   2699 	info->dcfClockDeepSleep			= (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz;
   2700 	info->fClock					= (unsigned int)state->bw_ctx.bw.dcn.clk.fclk_khz;
   2701 	info->phyClock					= (unsigned int)state->bw_ctx.bw.dcn.clk.phyclk_khz;
   2702 }
   2703 enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping)
   2704 {
   2705 	if (dc->hwss.set_clock)
   2706 		return dc->hwss.set_clock(dc, clock_type, clk_khz, stepping);
   2707 	return DC_ERROR_UNEXPECTED;
   2708 }
   2709 void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg)
   2710 {
   2711 	if (dc->hwss.get_clock)
   2712 		dc->hwss.get_clock(dc, clock_type, clock_cfg);
   2713 }
   2714