1 /* Copyright 2022 Advanced Micro Devices, Inc.
2 *
3 * Permission is hereby granted, free of charge, to any person obtaining a
4 * copy of this software and associated documentation files (the "Software"),
5 * to deal in the Software without restriction, including without limitation
6 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
7 * and/or sell copies of the Software, and to permit persons to whom the
8 * Software is furnished to do so, subject to the following conditions:
9 *
10 * The above copyright notice and this permission notice shall be included in
11 * all copies or substantial portions of the Software.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
17 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
18 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
19 * OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * Authors: AMD
22 *
23 */
24
25 #include <string.h>
26 #include "vpelib.h"
27 #include "vpe_priv.h"
28 #include "common.h"
29 #include "color_bg.h"
30 #include "color_gamma.h"
31 #include "cmd_builder.h"
32 #include "resource.h"
33 #include "color.h"
34 #include "vpec.h"
35 #include "vpe_desc_writer.h"
36 #include "dpp.h"
37 #include "mpc.h"
38 #include "opp.h"
39 #include "geometric_scaling.h"
40 #include <stdlib.h>
41 #include <time.h>
42
override_debug_option(struct vpe_debug_options * debug,const struct vpe_debug_options * user_debug)43 static void override_debug_option(
44 struct vpe_debug_options *debug, const struct vpe_debug_options *user_debug)
45 {
46 if ((debug == NULL) || (user_debug == NULL)) {
47 return;
48 }
49
50 if (user_debug->flags.bg_bit_depth)
51 debug->bg_bit_depth = user_debug->bg_bit_depth;
52
53 if (user_debug->flags.cm_in_bypass)
54 debug->cm_in_bypass = user_debug->cm_in_bypass;
55
56 if (user_debug->flags.vpcnvc_bypass)
57 debug->vpcnvc_bypass = user_debug->vpcnvc_bypass;
58
59 if (user_debug->flags.mpc_bypass)
60 debug->mpc_bypass = user_debug->mpc_bypass;
61
62 if (user_debug->flags.disable_reuse_bit)
63 debug->disable_reuse_bit = user_debug->disable_reuse_bit;
64
65 if (user_debug->flags.identity_3dlut)
66 debug->identity_3dlut = user_debug->identity_3dlut;
67
68 if (user_debug->flags.sce_3dlut)
69 debug->sce_3dlut = user_debug->sce_3dlut;
70
71 if (user_debug->enable_mem_low_power.flags.cm)
72 debug->enable_mem_low_power.bits.cm = user_debug->enable_mem_low_power.bits.cm;
73
74 if (user_debug->enable_mem_low_power.flags.dscl)
75 debug->enable_mem_low_power.bits.dscl = user_debug->enable_mem_low_power.bits.dscl;
76
77 if (user_debug->enable_mem_low_power.flags.mpc)
78 debug->enable_mem_low_power.bits.mpc = user_debug->enable_mem_low_power.bits.mpc;
79
80 if (user_debug->flags.bg_color_fill_only)
81 debug->bg_color_fill_only = user_debug->bg_color_fill_only;
82
83 if (user_debug->flags.assert_when_not_support)
84 debug->assert_when_not_support = user_debug->assert_when_not_support;
85
86 if (user_debug->flags.bypass_ogam)
87 debug->bypass_ogam = user_debug->bypass_ogam;
88
89 if (user_debug->flags.bypass_gamcor)
90 debug->bypass_gamcor = user_debug->bypass_gamcor;
91
92 if (user_debug->flags.bypass_dpp_gamut_remap)
93 debug->bypass_dpp_gamut_remap = user_debug->bypass_dpp_gamut_remap;
94
95 if (user_debug->flags.bypass_post_csc)
96 debug->bypass_post_csc = user_debug->bypass_post_csc;
97
98 if (user_debug->flags.clamping_setting) {
99 debug->clamping_setting = user_debug->clamping_setting;
100 debug->clamping_params = user_debug->clamping_params;
101 }
102
103 if (user_debug->flags.expansion_mode)
104 debug->expansion_mode = user_debug->expansion_mode;
105
106 if (user_debug->flags.bypass_per_pixel_alpha)
107 debug->bypass_per_pixel_alpha = user_debug->bypass_per_pixel_alpha;
108
109 if (user_debug->flags.opp_pipe_crc_ctrl)
110 debug->opp_pipe_crc_ctrl = user_debug->opp_pipe_crc_ctrl;
111
112 if (user_debug->flags.dpp_crc_ctrl)
113 debug->dpp_crc_ctrl = user_debug->dpp_crc_ctrl;
114
115 if (user_debug->flags.mpc_crc_ctrl)
116 debug->mpc_crc_ctrl = user_debug->mpc_crc_ctrl;
117
118 if (user_debug->flags.visual_confirm)
119 debug->visual_confirm_params = user_debug->visual_confirm_params;
120
121 if (user_debug->flags.skip_optimal_tap_check)
122 debug->skip_optimal_tap_check = user_debug->skip_optimal_tap_check;
123
124 if (user_debug->flags.bypass_blndgam)
125 debug->bypass_blndgam = user_debug->bypass_blndgam;
126
127 if (user_debug->flags.disable_3dlut_cache)
128 debug->disable_3dlut_cache = user_debug->disable_3dlut_cache;
129 }
130
131 #ifdef VPE_BUILD_1_1
verify_collaboration_mode(struct vpe_priv * vpe_priv)132 static void verify_collaboration_mode(struct vpe_priv *vpe_priv)
133 {
134 if (vpe_priv->pub.level == VPE_IP_LEVEL_1_1) {
135 if (vpe_priv->collaboration_mode == true && vpe_priv->collaborate_sync_index == 0) {
136 srand((unsigned int)time(NULL)); // Initialization, should only be called once.
137 uint32_t randnum = (uint32_t)rand();
138 randnum = randnum & 0x0000f000;
139 vpe_priv->collaborate_sync_index = (int32_t)randnum;
140 }
141 } else if (vpe_priv->pub.level == VPE_IP_LEVEL_1_0) {
142 vpe_priv->collaboration_mode = false;
143 }
144 }
145 #endif
146
vpe_create(const struct vpe_init_data * params)147 struct vpe *vpe_create(const struct vpe_init_data *params)
148 {
149 struct vpe_priv *vpe_priv;
150 enum vpe_status status;
151
152 if (!params || (params->funcs.zalloc == NULL) || (params->funcs.free == NULL) ||
153 (params->funcs.log == NULL))
154 return NULL;
155
156 vpe_priv =
157 (struct vpe_priv *)params->funcs.zalloc(params->funcs.mem_ctx, sizeof(struct vpe_priv));
158 if (!vpe_priv)
159 return NULL;
160
161 vpe_priv->init = *params;
162
163 vpe_priv->pub.level =
164 vpe_resource_parse_ip_version(params->ver_major, params->ver_minor, params->ver_rev);
165
166 vpe_priv->pub.version = (VPELIB_API_VERSION_MAJOR << VPELIB_API_VERSION_MAJOR_SHIFT) |
167 (VPELIB_API_VERSION_MINOR << VPELIB_API_VERSION_MINOR_SHIFT);
168
169 status = vpe_construct_resource(vpe_priv, vpe_priv->pub.level, &vpe_priv->resource);
170 if (status != VPE_STATUS_OK) {
171 vpe_free(vpe_priv);
172 return NULL;
173 }
174
175 override_debug_option(&vpe_priv->init.debug, ¶ms->debug);
176
177 vpe_color_setup_x_points_distribution();
178 vpe_color_setup_x_points_distribution_degamma();
179
180 vpe_priv->ops_support = false;
181 vpe_priv->scale_yuv_matrix = true;
182
183 #ifdef VPE_BUILD_1_1
184 vpe_priv->collaborate_sync_index = 0;
185 #endif
186
187 return &vpe_priv->pub;
188 }
189
vpe_destroy(struct vpe ** vpe)190 void vpe_destroy(struct vpe **vpe)
191 {
192 struct vpe_priv *vpe_priv;
193
194 if (!vpe || ((*vpe) == NULL))
195 return;
196
197 vpe_priv = container_of(*vpe, struct vpe_priv, pub);
198
199 vpe_destroy_resource(vpe_priv, &vpe_priv->resource);
200
201 vpe_free_output_ctx(vpe_priv);
202
203 vpe_free_stream_ctx(vpe_priv);
204
205 if (vpe_priv->dummy_input_param)
206 vpe_free(vpe_priv->dummy_input_param);
207
208 if (vpe_priv->dummy_stream)
209 vpe_free(vpe_priv->dummy_stream);
210
211 vpe_free(vpe_priv);
212
213 *vpe = NULL;
214 }
215
216
217 /*****************************************************************************************
218 * populate_bg_stream
219 * populate virtual stream for background output only
220 * struct vpe* vpe
221 * [input] vpe context
222 * const struct vpe_build_param* org_param
223 * [input] original parameter from caller
224 * struct struct vpe_stream_ctx* stream_ctx
225 * [input/output] caller provided vpe_stream_ctx struct to populate
226 *****************************************************************************************/
populate_bg_stream(struct vpe_priv * vpe_priv,const struct vpe_build_param * param,struct stream_ctx * stream_ctx)227 static enum vpe_status populate_bg_stream(struct vpe_priv *vpe_priv, const struct vpe_build_param *param, struct stream_ctx *stream_ctx)
228 {
229 struct vpe_surface_info *surface_info;
230 struct vpe_scaling_info *scaling_info;
231 struct vpe_scaling_filter_coeffs *polyphaseCoeffs;
232 struct vpe_stream *stream;
233
234 if (!param || !stream_ctx)
235 return VPE_STATUS_ERROR;
236
237 stream = &stream_ctx->stream;
238 stream_ctx->stream_type = VPE_STREAM_TYPE_BG_GEN;
239
240 // if output surface is too small, don't use it as dummy input
241 // request 2x2 instead of 1x1 for bpc safety
242 // as we are to treat output as input for RGB 1x1, need 4bytes at least
243 // but if output is YUV, bpc will be smaller and need larger dimension
244
245 if (param->dst_surface.plane_size.surface_size.width < VPE_MIN_VIEWPORT_SIZE ||
246 param->dst_surface.plane_size.surface_size.height < VPE_MIN_VIEWPORT_SIZE ||
247 param->dst_surface.plane_size.surface_pitch < 256 / 4 || // 256bytes, 4bpp
248 param->target_rect.width < VPE_MIN_VIEWPORT_SIZE ||
249 param->target_rect.height < VPE_MIN_VIEWPORT_SIZE) {
250 return VPE_STATUS_ERROR;
251 }
252
253 // set output surface as our dummy input
254 surface_info = &stream->surface_info;
255 scaling_info = &stream->scaling_info;
256 polyphaseCoeffs = &stream->polyphase_scaling_coeffs;
257 surface_info->address.type = VPE_PLN_ADDR_TYPE_GRAPHICS;
258 surface_info->address.tmz_surface = param->dst_surface.address.tmz_surface;
259 surface_info->address.grph.addr.quad_part =
260 param->dst_surface.address.grph.addr.quad_part;
261
262 surface_info->swizzle = VPE_SW_LINEAR; // treat it as linear for simple
263 surface_info->plane_size.surface_size.x = 0;
264 surface_info->plane_size.surface_size.y = 0;
265 surface_info->plane_size.surface_size.width = VPE_MIN_VIEWPORT_SIZE; // min width in pixels
266 surface_info->plane_size.surface_size.height =
267 VPE_MIN_VIEWPORT_SIZE; // min height in pixels
268 surface_info->plane_size.surface_pitch = 256 / 4; // pitch in pixels
269 surface_info->plane_size.surface_aligned_height = VPE_MIN_VIEWPORT_SIZE;
270 surface_info->dcc.enable = false;
271 surface_info->format = VPE_SURFACE_PIXEL_FORMAT_GRPH_RGBA8888;
272 surface_info->cs.encoding = VPE_PIXEL_ENCODING_RGB;
273 surface_info->cs.range = VPE_COLOR_RANGE_FULL;
274 surface_info->cs.tf = VPE_TF_G22;
275 surface_info->cs.cositing = VPE_CHROMA_COSITING_NONE;
276 surface_info->cs.primaries = VPE_PRIMARIES_BT709;
277 scaling_info->src_rect.x = 0;
278 scaling_info->src_rect.y = 0;
279 scaling_info->src_rect.width = VPE_MIN_VIEWPORT_SIZE;
280 scaling_info->src_rect.height = VPE_MIN_VIEWPORT_SIZE;
281 scaling_info->dst_rect.x = param->target_rect.x;
282 scaling_info->dst_rect.y = param->target_rect.y;
283 scaling_info->dst_rect.width = VPE_MIN_VIEWPORT_SIZE;
284 scaling_info->dst_rect.height = VPE_MIN_VIEWPORT_SIZE;
285 scaling_info->taps.v_taps = 4;
286 scaling_info->taps.h_taps = 4;
287 scaling_info->taps.v_taps_c = 2;
288 scaling_info->taps.h_taps_c = 2;
289
290 polyphaseCoeffs->taps = scaling_info->taps;
291 polyphaseCoeffs->nb_phases = 64;
292
293 stream->blend_info.blending = true;
294 stream->blend_info.pre_multiplied_alpha = false;
295 stream->blend_info.global_alpha = true; // hardcoded upon DAL request
296 stream->blend_info.global_alpha_value = 0; // transparent as we are dummy input
297
298 stream->color_adj.brightness = 0.0f;
299 stream->color_adj.contrast = 1.0f;
300 stream->color_adj.hue = 0.0f;
301 stream->color_adj.saturation = 1.0f;
302 stream->rotation = VPE_ROTATION_ANGLE_0;
303 stream->horizontal_mirror = false;
304 stream->vertical_mirror = false;
305 stream->enable_luma_key = false;
306 stream->lower_luma_bound = 0;
307 stream->upper_luma_bound = 0;
308 stream->flags.hdr_metadata = 0;
309 stream->flags.geometric_scaling = 0;
310 stream->use_external_scaling_coeffs = false;
311
312 return VPE_STATUS_OK;
313 }
314
get_required_virtual_stream_count(struct vpe_priv * vpe_priv,const struct vpe_build_param * param)315 static uint32_t get_required_virtual_stream_count(struct vpe_priv *vpe_priv, const struct vpe_build_param *param)
316 {
317 uint32_t result = 0;
318
319 // Check for zero-input background stream
320 // Normally we result++ instead of returning, but bg_color_fill_only removes other streams (and therefore other features)
321 if (param->num_streams == 0 || vpe_priv->init.debug.bg_color_fill_only)
322 return 1;
323
324 return result;
325 }
326
populate_input_streams(struct vpe_priv * vpe_priv,const struct vpe_build_param * param,struct stream_ctx * stream_ctx_base)327 static enum vpe_status populate_input_streams(struct vpe_priv *vpe_priv, const struct vpe_build_param *param, struct stream_ctx *stream_ctx_base)
328 {
329 enum vpe_status result = VPE_STATUS_OK;
330 uint32_t i;
331 struct stream_ctx* stream_ctx;
332 bool input_h_mirror, output_h_mirror;
333
334 vpe_priv->resource.check_h_mirror_support(&input_h_mirror, &output_h_mirror);
335
336 for (i = 0; i < vpe_priv->num_input_streams; i++) {
337 stream_ctx = &stream_ctx_base[i];
338 stream_ctx->stream_type = VPE_STREAM_TYPE_INPUT;
339 stream_ctx->stream_idx = (int32_t)i;
340 stream_ctx->per_pixel_alpha =
341 vpe_has_per_pixel_alpha(param->streams[i].surface_info.format);
342 if (vpe_priv->init.debug.bypass_per_pixel_alpha) {
343 stream_ctx->per_pixel_alpha = false;
344 }
345 if (param->streams[i].horizontal_mirror && !input_h_mirror && output_h_mirror)
346 stream_ctx->flip_horizonal_output = true;
347 else
348 stream_ctx->flip_horizonal_output = false;
349
350 memcpy(&stream_ctx->stream, ¶m->streams[i], sizeof(struct vpe_stream));
351
352 /* if top-bottom blending is not supported,
353 * the 1st stream still can support blending with background,
354 * however, the 2nd stream and onward can't enable blending.
355 */
356 if (i && param->streams[i].blend_info.blending &&
357 !vpe_priv->pub.caps->color_caps.mpc.top_bottom_blending) {
358 result = VPE_STATUS_ALPHA_BLENDING_NOT_SUPPORTED;
359 break;
360 }
361 }
362
363 return result;
364 }
365
populate_virtual_streams(struct vpe_priv * vpe_priv,const struct vpe_build_param * param,struct stream_ctx * stream_ctx_base,uint32_t num_virtual_streams)366 static enum vpe_status populate_virtual_streams(struct vpe_priv* vpe_priv, const struct vpe_build_param* param, struct stream_ctx* stream_ctx_base, uint32_t num_virtual_streams)
367 {
368 enum vpe_status result = VPE_STATUS_OK;
369 uint32_t virtual_stream_idx = 0;
370 struct stream_ctx *stream_ctx;
371 bool input_h_mirror, output_h_mirror;
372
373 vpe_priv->resource.check_h_mirror_support(&input_h_mirror, &output_h_mirror);
374
375 if (param->num_streams == 0 || vpe_priv->init.debug.bg_color_fill_only) {
376 if (num_virtual_streams != 1)
377 result = VPE_STATUS_ERROR;
378 else
379 result = populate_bg_stream(vpe_priv, param, &vpe_priv->stream_ctx[virtual_stream_idx++]);
380 }
381
382 if (result != VPE_STATUS_OK)
383 return result;
384
385 for (virtual_stream_idx = 0; virtual_stream_idx < num_virtual_streams; virtual_stream_idx++) {
386 stream_ctx = &stream_ctx_base[virtual_stream_idx];
387 stream_ctx->stream_idx = virtual_stream_idx + vpe_priv->num_input_streams;
388 stream_ctx->per_pixel_alpha =
389 vpe_has_per_pixel_alpha(stream_ctx->stream.surface_info.format);
390 if (vpe_priv->init.debug.bypass_per_pixel_alpha) {
391 stream_ctx->per_pixel_alpha = false;
392 }
393 if (stream_ctx->stream.horizontal_mirror && !input_h_mirror && output_h_mirror)
394 stream_ctx->flip_horizonal_output = true;
395 else
396 stream_ctx->flip_horizonal_output = false;
397 }
398
399 return result;
400 }
401
vpe_check_support(struct vpe * vpe,const struct vpe_build_param * param,struct vpe_bufs_req * req)402 enum vpe_status vpe_check_support(
403 struct vpe *vpe, const struct vpe_build_param *param, struct vpe_bufs_req *req)
404 {
405 struct vpe_priv *vpe_priv;
406 struct vpec *vpec;
407 struct dpp *dpp;
408 enum vpe_status status;
409 struct output_ctx *output_ctx = NULL;
410 uint32_t i, required_virtual_streams;
411
412 vpe_priv = container_of(vpe, struct vpe_priv, pub);
413 vpec = &vpe_priv->resource.vpec;
414 dpp = vpe_priv->resource.dpp[0];
415 status = VPE_STATUS_OK;
416
417 #ifdef VPE_BUILD_1_1
418 vpe_priv->collaboration_mode = param->collaboration_mode;
419 vpe_priv->vpe_num_instance = param->num_instances;
420 verify_collaboration_mode(vpe_priv);
421 #endif
422
423 required_virtual_streams = get_required_virtual_stream_count(vpe_priv, param);
424
425 if (!vpe_priv->stream_ctx ||
426 vpe_priv->num_streams != (param->num_streams + vpe_priv->num_virtual_streams) ||
427 vpe_priv->num_virtual_streams != required_virtual_streams) {
428 if (vpe_priv->stream_ctx)
429 vpe_free_stream_ctx(vpe_priv);
430
431 vpe_priv->stream_ctx = vpe_alloc_stream_ctx(vpe_priv, param->num_streams + required_virtual_streams);
432 }
433
434 if (!vpe_priv->stream_ctx)
435 status = VPE_STATUS_NO_MEMORY;
436 else {
437 vpe_priv->num_streams = param->num_streams + required_virtual_streams;
438 vpe_priv->num_virtual_streams = required_virtual_streams;
439 vpe_priv->num_input_streams = param->num_streams;
440 }
441
442 if (param->num_streams == 0 || vpe_priv->init.debug.bg_color_fill_only) {
443 vpe_free_stream_ctx(vpe_priv);
444 vpe_priv->stream_ctx = vpe_alloc_stream_ctx(vpe_priv, 1);
445 vpe_priv->num_streams = required_virtual_streams;
446 vpe_priv->num_virtual_streams = required_virtual_streams;
447 vpe_priv->num_input_streams = 0;
448
449 if (!vpe_priv->stream_ctx)
450 status = VPE_STATUS_NO_MEMORY;
451 }
452
453
454 if (status == VPE_STATUS_OK) {
455 // output checking - check per asic support
456 status = vpe_check_output_support(vpe, param);
457 if (status != VPE_STATUS_OK) {
458 vpe_log("fail output support check. status %d\n", (int)status);
459 }
460 }
461
462 if (status == VPE_STATUS_OK) {
463 // input checking - check per asic support
464 for (i = 0; i < param->num_streams; i++) {
465 status = vpe_check_input_support(vpe, ¶m->streams[i]);
466 if (status != VPE_STATUS_OK) {
467 vpe_log("fail input support check. status %d\n", (int)status);
468 break;
469 }
470 }
471 }
472
473 if (status == VPE_STATUS_OK) {
474 // input checking - check tone map support
475 for (i = 0; i < param->num_streams; i++) {
476 status = vpe_check_tone_map_support(vpe, ¶m->streams[i], param);
477 if (status != VPE_STATUS_OK) {
478 vpe_log("fail tone map support check. status %d\n", (int)status);
479 break;
480 }
481 }
482 }
483
484 if (status == VPE_STATUS_OK) {
485 // output resource preparation for further checking (cache the result)
486 output_ctx = &vpe_priv->output_ctx;
487 output_ctx->surface = param->dst_surface;
488 output_ctx->bg_color = param->bg_color;
489 output_ctx->target_rect = param->target_rect;
490 output_ctx->alpha_mode = param->alpha_mode;
491 output_ctx->flags.hdr_metadata = param->flags.hdr_metadata;
492 output_ctx->hdr_metadata = param->hdr_metadata;
493
494 vpe_priv->num_vpe_cmds = 0;
495 output_ctx->clamping_params = vpe_priv->init.debug.clamping_params;
496 }
497
498
499 if (status == VPE_STATUS_OK) {
500 // blending support check
501 status = populate_input_streams(vpe_priv, param, vpe_priv->stream_ctx);
502 if (status != VPE_STATUS_OK)
503 vpe_log("fail input stream population. status %d\n", (int)status);
504 }
505
506 if (status == VPE_STATUS_OK) {
507 status = populate_virtual_streams(vpe_priv, param, vpe_priv->stream_ctx + vpe_priv->num_input_streams, vpe_priv->num_virtual_streams);
508 if (status != VPE_STATUS_OK)
509 vpe_log("fail virtual stream population. status %d\n", (int)status);
510 }
511
512 if (status == VPE_STATUS_OK) {
513 status = vpe_priv->resource.calculate_segments(vpe_priv, param);
514 if (status != VPE_STATUS_OK)
515 vpe_log("failed in calculate segments %d\n", (int)status);
516 }
517
518 if (status == VPE_STATUS_OK) {
519 // if the bg_color support is false, there is a flag to verify if the bg_color falls in the
520 // output gamut
521 if (!vpe_priv->pub.caps->bg_color_check_support) {
522 status = vpe_priv->resource.check_bg_color_support(vpe_priv, &output_ctx->bg_color);
523 if (status != VPE_STATUS_OK) {
524 vpe_log(
525 "failed in checking the background color versus the output color space %d\n",
526 (int)status);
527 }
528 }
529 }
530
531 if (status == VPE_STATUS_OK) {
532 // Calculate the buffer needed (worst case)
533 vpe_priv->resource.get_bufs_req(vpe_priv, &vpe_priv->bufs_required);
534 *req = vpe_priv->bufs_required;
535 vpe_priv->ops_support = true;
536 }
537
538 if (status == VPE_STATUS_OK) {
539 status = vpe_validate_geometric_scaling_support(param);
540 }
541
542 if (vpe_priv->init.debug.assert_when_not_support)
543 VPE_ASSERT(status == VPE_STATUS_OK);
544
545 return status;
546 }
547
vpe_build_noops(struct vpe * vpe,uint32_t num_dword,uint32_t ** ppcmd_space)548 enum vpe_status vpe_build_noops(struct vpe *vpe, uint32_t num_dword, uint32_t **ppcmd_space)
549 {
550 struct vpe_priv *vpe_priv;
551 struct cmd_builder *builder;
552 enum vpe_status status;
553
554 if (!vpe || !ppcmd_space || ((*ppcmd_space) == NULL))
555 return VPE_STATUS_ERROR;
556
557 vpe_priv = container_of(vpe, struct vpe_priv, pub);
558
559 builder = &vpe_priv->resource.cmd_builder;
560
561 status = builder->build_noops(vpe_priv, ppcmd_space, num_dword);
562
563 return status;
564 }
565
validate_cached_param(struct vpe_priv * vpe_priv,const struct vpe_build_param * param)566 static bool validate_cached_param(struct vpe_priv *vpe_priv, const struct vpe_build_param *param)
567 {
568 uint32_t i;
569 struct output_ctx *output_ctx;
570
571 if (vpe_priv->num_input_streams != param->num_streams &&
572 !(vpe_priv->init.debug.bg_color_fill_only == true && vpe_priv->num_streams == 1))
573 return false;
574
575 #ifdef VPE_BUILD_1_1
576 if (vpe_priv->collaboration_mode != param->collaboration_mode)
577 return false;
578
579 if (param->num_instances > 0 && vpe_priv->vpe_num_instance != param->num_instances)
580 return false;
581 #endif
582
583 for (i = 0; i < vpe_priv->num_input_streams; i++) {
584 struct vpe_stream stream = param->streams[i];
585
586 vpe_clip_stream(
587 &stream.scaling_info.src_rect, &stream.scaling_info.dst_rect, ¶m->target_rect);
588
589 if (memcmp(&vpe_priv->stream_ctx[i].stream, &stream, sizeof(struct vpe_stream)))
590 return false;
591 }
592
593 output_ctx = &vpe_priv->output_ctx;
594 if (output_ctx->alpha_mode != param->alpha_mode)
595 return false;
596
597 if (memcmp(&output_ctx->bg_color, ¶m->bg_color, sizeof(struct vpe_color)))
598 return false;
599
600 if (memcmp(&output_ctx->target_rect, ¶m->target_rect, sizeof(struct vpe_rect)))
601 return false;
602
603 if (memcmp(&output_ctx->surface, ¶m->dst_surface, sizeof(struct vpe_surface_info)))
604 return false;
605
606 return true;
607 }
608
vpe_build_commands(struct vpe * vpe,const struct vpe_build_param * param,struct vpe_build_bufs * bufs)609 enum vpe_status vpe_build_commands(
610 struct vpe *vpe, const struct vpe_build_param *param, struct vpe_build_bufs *bufs)
611 {
612 struct vpe_priv *vpe_priv;
613 struct cmd_builder *builder;
614 enum vpe_status status = VPE_STATUS_OK;
615 uint32_t cmd_idx, i, j;
616 struct vpe_build_bufs curr_bufs;
617 int64_t cmd_buf_size;
618 int64_t emb_buf_size;
619 uint64_t cmd_buf_gpu_a, cmd_buf_cpu_a;
620 uint64_t emb_buf_gpu_a, emb_buf_cpu_a;
621
622 if (!vpe || !param || !bufs)
623 return VPE_STATUS_ERROR;
624
625 vpe_priv = container_of(vpe, struct vpe_priv, pub);
626
627 if (!vpe_priv->ops_support) {
628 VPE_ASSERT(vpe_priv->ops_support);
629 status = VPE_STATUS_NOT_SUPPORTED;
630 }
631
632 if (status == VPE_STATUS_OK) {
633 if (!validate_cached_param(vpe_priv, param)) {
634 status = VPE_STATUS_PARAM_CHECK_ERROR;
635 }
636 }
637
638 if (status == VPE_STATUS_OK) {
639 if (param->streams->flags.geometric_scaling) {
640 vpe_geometric_scaling_feature_skip(vpe_priv, param);
641 }
642
643 if (bufs->cmd_buf.size == 0 || bufs->emb_buf.size == 0) {
644 /* Here we directly return without setting ops_support to false
645 * becaues the supported check is already passed
646 * and the caller can come again with correct buffer size.
647 */
648 bufs->cmd_buf.size = vpe_priv->bufs_required.cmd_buf_size;
649 bufs->emb_buf.size = vpe_priv->bufs_required.emb_buf_size;
650
651 return VPE_STATUS_OK;
652 } else if ((bufs->cmd_buf.size < vpe_priv->bufs_required.cmd_buf_size) ||
653 (bufs->emb_buf.size < vpe_priv->bufs_required.emb_buf_size)) {
654 status = VPE_STATUS_INVALID_BUFFER_SIZE;
655 }
656 }
657
658 builder = &vpe_priv->resource.cmd_builder;
659
660 // store buffers original values
661 cmd_buf_cpu_a = bufs->cmd_buf.cpu_va;
662 cmd_buf_gpu_a = bufs->cmd_buf.gpu_va;
663 cmd_buf_size = bufs->cmd_buf.size;
664
665 emb_buf_cpu_a = bufs->emb_buf.cpu_va;
666 emb_buf_gpu_a = bufs->emb_buf.gpu_va;
667 emb_buf_size = bufs->emb_buf.size;
668
669 // curr_bufs is used for tracking the built size and next pointers
670 curr_bufs = *bufs;
671
672 // copy the param, reset saved configs
673 for (i = 0; i < param->num_streams; i++) {
674 vpe_priv->stream_ctx[i].num_configs = 0;
675 for (j = 0; j < VPE_CMD_TYPE_COUNT; j++)
676 vpe_priv->stream_ctx[i].num_stream_op_configs[j] = 0;
677 }
678 vpe_priv->output_ctx.num_configs = 0;
679
680 // Reset pipes
681 vpe_pipe_reset(vpe_priv);
682
683 if (status == VPE_STATUS_OK) {
684 status = vpe_color_update_color_space_and_tf(vpe_priv, param);
685 if (status != VPE_STATUS_OK) {
686 vpe_log("failed in updating color space and tf %d\n", (int)status);
687 }
688 }
689
690 if (status == VPE_STATUS_OK) {
691 status = vpe_color_update_movable_cm(vpe_priv, param);
692 if (status != VPE_STATUS_OK) {
693 vpe_log("failed in updating movable 3d lut unit %d\n", (int)status);
694 }
695 }
696
697 if (status == VPE_STATUS_OK) {
698 status = vpe_color_update_whitepoint(vpe_priv, param);
699 if (status != VPE_STATUS_OK) {
700 vpe_log("failed updating whitepoint gain %d\n", (int)status);
701 }
702 }
703
704 if (status == VPE_STATUS_OK) {
705 /* since the background is generated by the first stream,
706 * the 3dlut enablement for the background color conversion
707 * is used based on the information of the first stream.
708 */
709 vpe_bg_color_convert(vpe_priv->output_ctx.cs, vpe_priv->output_ctx.output_tf,
710 &vpe_priv->output_ctx.bg_color, vpe_priv->stream_ctx[0].enable_3dlut);
711
712 #ifdef VPE_BUILD_1_1
713 if (vpe_priv->collaboration_mode == true) {
714 status = builder->build_collaborate_sync_cmd(vpe_priv, &curr_bufs);
715 if (status != VPE_STATUS_OK) {
716 vpe_log("failed in building collaborate sync cmd %d\n", (int)status);
717 }
718 }
719 #endif
720 for (cmd_idx = 0; cmd_idx < vpe_priv->num_vpe_cmds; cmd_idx++) {
721 status = builder->build_vpe_cmd(vpe_priv, &curr_bufs, cmd_idx);
722 if (status != VPE_STATUS_OK) {
723 vpe_log("failed in building vpe cmd %d\n", (int)status);
724 }
725
726 #ifdef VPE_BUILD_1_1
727 if ((vpe_priv->collaboration_mode == true) &&
728 (vpe_priv->vpe_cmd_info[cmd_idx].insert_end_csync == true)) {
729 status = builder->build_collaborate_sync_cmd(vpe_priv, &curr_bufs);
730 if (status != VPE_STATUS_OK) {
731 vpe_log("failed in building collaborate sync cmd %d\n", (int)status);
732 }
733
734 // Add next collaborate sync start command when this vpe_cmd isn't the final one.
735 if (cmd_idx < (uint32_t)(vpe_priv->num_vpe_cmds - 1)) {
736 status = builder->build_collaborate_sync_cmd(vpe_priv, &curr_bufs);
737 if (status != VPE_STATUS_OK) {
738 vpe_log("failed in building collaborate sync cmd %d\n", (int)status);
739 }
740 }
741 }
742 #endif
743 }
744 #ifdef VPE_BUILD_1_1
745 if (vpe_priv->collaboration_mode == true) {
746 status = builder->build_collaborate_sync_cmd(vpe_priv, &curr_bufs);
747 if (status != VPE_STATUS_OK) {
748 vpe_log("failed in building collaborate sync cmd %d\n", (int)status);
749 }
750 }
751 #endif
752 }
753
754 if (status == VPE_STATUS_OK) {
755 bufs->cmd_buf.size = cmd_buf_size - curr_bufs.cmd_buf.size; // used cmd buffer size
756 bufs->cmd_buf.gpu_va = cmd_buf_gpu_a;
757 bufs->cmd_buf.cpu_va = cmd_buf_cpu_a;
758
759 bufs->emb_buf.size = emb_buf_size - curr_bufs.emb_buf.size; // used emb buffer size
760 bufs->emb_buf.gpu_va = emb_buf_gpu_a;
761 bufs->emb_buf.cpu_va = emb_buf_cpu_a;
762 }
763
764 vpe_priv->ops_support = false;
765
766 if (vpe_priv->init.debug.assert_when_not_support)
767 VPE_ASSERT(status == VPE_STATUS_OK);
768
769 return status;
770 }
771
vpe_get_optimal_num_of_taps(struct vpe * vpe,struct vpe_scaling_info * scaling_info)772 void vpe_get_optimal_num_of_taps(struct vpe *vpe, struct vpe_scaling_info *scaling_info)
773 {
774 struct vpe_priv *vpe_priv;
775 struct dpp *dpp;
776
777 vpe_priv = container_of(vpe, struct vpe_priv, pub);
778 dpp = vpe_priv->resource.dpp[0];
779
780 dpp->funcs->get_optimal_number_of_taps(
781 &scaling_info->src_rect, &scaling_info->dst_rect, &scaling_info->taps);
782 }
783