summaryrefslogtreecommitdiffstats
path: root/libvideoeditor/vss
diff options
context:
space:
mode:
authorSantosh Madhava <smadhava@google.com>2011-01-27 10:48:25 -0800
committerSantosh Madhava <smadhava@google.com>2011-01-27 19:08:09 -0800
commit0078736220b9372f2c82eb258ceca3dbd6c358ef (patch)
treece6cd21a5537bf3a06e1e51086f9b16b2eec8bb9 /libvideoeditor/vss
parent342f932f6312be84a48512216b561ead42b53199 (diff)
downloadframeworks_av-0078736220b9372f2c82eb258ceca3dbd6c358ef.zip
frameworks_av-0078736220b9372f2c82eb258ceca3dbd6c358ef.tar.gz
frameworks_av-0078736220b9372f2c82eb258ceca3dbd6c358ef.tar.bz2
Fix for issue 3370007 Overlay transparency is missing
Change-Id: I1b513b607f669d141bcd28bd918b8c1319bef7fa
Diffstat (limited to 'libvideoeditor/vss')
-rwxr-xr-xlibvideoeditor/vss/src/M4xVSS_internal.c96
-rwxr-xr-xlibvideoeditor/vss/video_filters/src/Android.mk2
-rw-r--r--libvideoeditor/vss/video_filters/src/M4VIFI_RGB565toYUV420.c202
-rw-r--r--libvideoeditor/vss/video_filters/src/M4VIFI_ResizeRGB565toRGB565.c256
4 files changed, 540 insertions, 16 deletions
diff --git a/libvideoeditor/vss/src/M4xVSS_internal.c b/libvideoeditor/vss/src/M4xVSS_internal.c
index 62107aa..fbff915 100755
--- a/libvideoeditor/vss/src/M4xVSS_internal.c
+++ b/libvideoeditor/vss/src/M4xVSS_internal.c
@@ -1970,6 +1970,11 @@ M4OSA_ERR M4xVSS_internalConvertARGB888toYUV420_FrammingEffect(M4OSA_Context pCo
M4VIFI_ImagePlane rgbPlane;
M4OSA_UInt32 frameSize_argb=(framingCtx->width * framingCtx->height * 4);
M4OSA_UInt32 frameSize = (framingCtx->width * framingCtx->height * 3); //Size of RGB888 data
+ M4OSA_UInt32 tempAlphaPercent = 0;
+ M4VIFI_UInt8* TempPacData = M4OSA_NULL;
+ M4OSA_UInt16 *ptr = M4OSA_NULL;
+ M4OSA_UInt32 z = 0;
+
M4OSA_UInt8 *pTmpData = (M4OSA_UInt8*) M4OSA_malloc(frameSize_argb, M4VS, (M4OSA_Char*)\
"Image argb data");
M4OSA_TRACE1_0("M4xVSS_internalConvertARGB888toYUV420_FrammingEffect: Entering ");
@@ -2049,24 +2054,76 @@ M4OSA_ERR M4xVSS_internalConvertARGB888toYUV420_FrammingEffect(M4OSA_Context pCo
M4OSA_TRACE1_0("M4xVSS_internalConvertARGB888toYUV420_FrammingEffect:\
Remove the alpha channel ");
- /** Remove the alpha channel */
+#if 0
+ /** Remove the alpha channel*/
for (i=0, j = 0; i < frameSize_argb; i++) {
if ((i % 4) == 0) continue;
rgbPlane.pac_data[j] = pTmpData[i];
j++;
}
+#endif
+
+ /* premultiplied alpha % on RGB */
+ for (i=0, j = 0; i < frameSize_argb; i += 4) {
+ /* this is alpha value */
+ if ((i % 4) == 0)
+ {
+ tempAlphaPercent = pTmpData[i];
+ }
+
+ /* R */
+ rgbPlane.pac_data[j] = pTmpData[i+1];
+ j++;
+
+ /* G */
+ if (tempAlphaPercent > 0) {
+ rgbPlane.pac_data[j] = pTmpData[i+2];
+ j++;
+ } else {/* In case of alpha value 0, make GREEN to 255 */
+ rgbPlane.pac_data[j] = 255; //pTmpData[i+2];
+ j++;
+ }
+
+ /* B */
+ rgbPlane.pac_data[j] = pTmpData[i+3];
+ j++;
+ }
+
+ /* convert RGB888 to RGB565 */
+
+ /* allocate temp RGB 565 buffer */
+ TempPacData = (M4VIFI_UInt8*)M4OSA_malloc(((frameSize)+ (2 * framingCtx->width)),
+ M4VS, (M4OSA_Char*)"Image clip RGB565 data");
+
+ ptr = (M4OSA_UInt16 *)TempPacData;
+ z = 0;
+
+ for (i = 0; i < j ; i += 3)
+ {
+ ptr[z++] = PACK_RGB565(0, rgbPlane.pac_data[i],
+ rgbPlane.pac_data[i+1],
+ rgbPlane.pac_data[i+2]);
+ }
+
+ /* reset stride */
+ rgbPlane.u_stride = rgbPlane.u_width*2;
+
+ /* free the RBG888 and assign RGB565 */
+ M4OSA_free((M4OSA_MemAddr32)rgbPlane.pac_data);
+ rgbPlane.pac_data = TempPacData;
+
M4OSA_free((M4OSA_MemAddr32)pTmpData);
/**
* Check if output sizes are odd */
if(rgbPlane.u_height % 2 != 0)
{
-
M4VIFI_UInt8* output_pac_data = rgbPlane.pac_data;
M4OSA_UInt32 i;
M4OSA_TRACE1_0("M4xVSS_internalConvertARGB888toYUV420_FrammingEffect:\
output height is odd ");
- output_pac_data +=rgbPlane.u_width * rgbPlane.u_height*3;
+ output_pac_data +=rgbPlane.u_width * rgbPlane.u_height*2;
+
for(i=0;i<rgbPlane.u_width;i++)
{
*output_pac_data++ = transparent1;
@@ -2079,7 +2136,6 @@ M4OSA_ERR M4xVSS_internalConvertARGB888toYUV420_FrammingEffect(M4OSA_Context pCo
}
if(rgbPlane.u_width % 2 != 0)
{
-
/**
* We add a new column of white (=transparent), but we need to parse all RGB lines ... */
M4OSA_UInt32 i;
@@ -2092,8 +2148,9 @@ M4OSA_ERR M4xVSS_internalConvertARGB888toYUV420_FrammingEffect(M4OSA_Context pCo
/**
* We need to allocate a new RGB output buffer in which all decoded data
+ white line will be copied */
- newRGBpac_data = (M4VIFI_UInt8*)M4OSA_malloc(rgbPlane.u_height*rgbPlane.u_width*3\
+ newRGBpac_data = (M4VIFI_UInt8*)M4OSA_malloc(rgbPlane.u_height*rgbPlane.u_width*2\
*sizeof(M4VIFI_UInt8), M4VS, (M4OSA_Char *)"New Framing GIF Output pac_data RGB");
+
if(newRGBpac_data == M4OSA_NULL)
{
M4OSA_TRACE1_0("Allocation error in \
@@ -2110,12 +2167,14 @@ M4OSA_ERR M4xVSS_internalConvertARGB888toYUV420_FrammingEffect(M4OSA_Context pCo
for(i=0;i<rgbPlane.u_height;i++)
{
M4OSA_memcpy((M4OSA_MemAddr8)output_pac_data, (M4OSA_MemAddr8)input_pac_data,
- (rgbPlane.u_width-1)*3);
- output_pac_data += ((rgbPlane.u_width-1)*3);
+ (rgbPlane.u_width-1)*2);
+
+ output_pac_data += ((rgbPlane.u_width-1)*2);
/* Put the pixel to transparency color */
*output_pac_data++ = transparent1;
*output_pac_data++ = transparent2;
- input_pac_data += ((rgbPlane.u_width-1)*3);
+
+ input_pac_data += ((rgbPlane.u_width-1)*2);
}
rgbPlane.pac_data = newRGBpac_data;
@@ -2201,8 +2260,7 @@ M4OSA_ERR M4xVSS_internalConvertARGB888toYUV420_FrammingEffect(M4OSA_Context pCo
break;
}
-
- /**
+ /**
* Allocate output planes structures */
framingCtx->FramingRgb = (M4VIFI_ImagePlane*)M4OSA_malloc(sizeof(M4VIFI_ImagePlane), M4VS,
(M4OSA_Char *)"Framing Output plane RGB");
@@ -2226,13 +2284,14 @@ M4OSA_ERR M4xVSS_internalConvertARGB888toYUV420_FrammingEffect(M4OSA_Context pCo
framingCtx->FramingRgb->u_height = height_out;
framingCtx->FramingRgb->u_width = width_out;
- framingCtx->FramingRgb->u_stride = framingCtx->FramingRgb->u_width*3;
+ framingCtx->FramingRgb->u_stride = framingCtx->FramingRgb->u_width*2;
framingCtx->FramingRgb->u_topleft = 0;
framingCtx->FramingRgb->pac_data =
(M4VIFI_UInt8*)M4OSA_malloc(framingCtx->FramingRgb->u_height*framingCtx->\
- FramingRgb->u_width*3*sizeof(M4VIFI_UInt8), M4VS,
+ FramingRgb->u_width*2*sizeof(M4VIFI_UInt8), M4VS,
(M4OSA_Char *)"Framing Output pac_data RGB");
+
if(framingCtx->FramingRgb->pac_data == M4OSA_NULL)
{
M4OSA_TRACE1_0("Allocation error in \
@@ -2245,7 +2304,10 @@ M4OSA_ERR M4xVSS_internalConvertARGB888toYUV420_FrammingEffect(M4OSA_Context pCo
M4OSA_TRACE1_0("M4xVSS_internalConvertARGB888toYUV420_FrammingEffect: Resizing Needed ");
M4OSA_TRACE1_2("M4xVSS_internalConvertARGB888toYUV420_FrammingEffect:\
rgbPlane.u_height & rgbPlane.u_width %d %d",rgbPlane.u_height,rgbPlane.u_width);
- err = M4VIFI_ResizeBilinearRGB888toRGB888(M4OSA_NULL, &rgbPlane,framingCtx->FramingRgb);
+
+ //err = M4VIFI_ResizeBilinearRGB888toRGB888(M4OSA_NULL, &rgbPlane,framingCtx->FramingRgb);
+ err = M4VIFI_ResizeBilinearRGB565toRGB565(M4OSA_NULL, &rgbPlane,framingCtx->FramingRgb);
+
if(err != M4NO_ERROR)
{
M4OSA_TRACE1_1("M4xVSS_internalConvertARGB888toYUV420_FrammingEffect :\
@@ -2270,7 +2332,7 @@ M4OSA_ERR M4xVSS_internalConvertARGB888toYUV420_FrammingEffect(M4OSA_Context pCo
height = framingCtx->height;
framingCtx->FramingRgb->u_height = height;
framingCtx->FramingRgb->u_width = width;
- framingCtx->FramingRgb->u_stride = framingCtx->FramingRgb->u_width*3;
+ framingCtx->FramingRgb->u_stride = framingCtx->FramingRgb->u_width*2;
framingCtx->FramingRgb->u_topleft = 0;
framingCtx->FramingRgb->pac_data = rgbPlane.pac_data;
}
@@ -2300,7 +2362,7 @@ M4OSA_ERR M4xVSS_internalConvertARGB888toYUV420_FrammingEffect(M4OSA_Context pCo
framingCtx->FramingYuv[0].u_height = ((height+1)>>1)<<1;
framingCtx->FramingYuv[0].u_topleft = 0;
framingCtx->FramingYuv[0].u_stride = ((width+1)>>1)<<1;
- framingCtx->FramingYuv[0].pac_data = (M4VIFI_UInt8*)M4OSA_malloc
+ framingCtx->FramingYuv[0].pac_data = (M4VIFI_UInt8*)M4OSA_malloc
((framingCtx->FramingYuv[0].u_width*framingCtx->FramingYuv[0].u_height*3)>>1, M4VS,
(M4OSA_Char *)"Alloc for the output YUV");;
if(framingCtx->FramingYuv[0].pac_data == M4OSA_NULL)
@@ -2331,7 +2393,9 @@ M4OSA_ERR M4xVSS_internalConvertARGB888toYUV420_FrammingEffect(M4OSA_Context pCo
M4OSA_TRACE1_0("M4xVSS_internalConvertARGB888toYUV420_FrammingEffect:\
convert RGB to YUV ");
- err = M4VIFI_RGB888toYUV420(M4OSA_NULL, framingCtx->FramingRgb, framingCtx->FramingYuv);
+ //err = M4VIFI_RGB888toYUV420(M4OSA_NULL, framingCtx->FramingRgb, framingCtx->FramingYuv);
+ err = M4VIFI_RGB565toYUV420(M4OSA_NULL, framingCtx->FramingRgb, framingCtx->FramingYuv);
+
if(err != M4NO_ERROR)
{
M4OSA_TRACE1_1("SPS png: error when converting from RGB to YUV: 0x%x\n", err);
diff --git a/libvideoeditor/vss/video_filters/src/Android.mk b/libvideoeditor/vss/video_filters/src/Android.mk
index ebf62d2..480548f 100755
--- a/libvideoeditor/vss/video_filters/src/Android.mk
+++ b/libvideoeditor/vss/video_filters/src/Android.mk
@@ -28,9 +28,11 @@ LOCAL_MODULE:= libvideoeditor_videofilters
LOCAL_SRC_FILES:= \
M4VIFI_BGR565toYUV420.c \
M4VIFI_ResizeRGB888toRGB888.c \
+ M4VIFI_ResizeRGB565toRGB565.c \
M4VIFI_Clip.c \
M4VIFI_ResizeYUVtoBGR565.c \
M4VIFI_RGB888toYUV420.c \
+ M4VIFI_RGB565toYUV420.c \
M4VFL_transition.c
LOCAL_MODULE_TAGS := optional
diff --git a/libvideoeditor/vss/video_filters/src/M4VIFI_RGB565toYUV420.c b/libvideoeditor/vss/video_filters/src/M4VIFI_RGB565toYUV420.c
new file mode 100644
index 0000000..6192e4e
--- /dev/null
+++ b/libvideoeditor/vss/video_filters/src/M4VIFI_RGB565toYUV420.c
@@ -0,0 +1,202 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ ******************************************************************************
+ * @brief Contain video library function
+ * @note Color Conversion Filter
+ * Contains the format conversion filters from RGB565 to YUV420
+ ******************************************************************************
+*/
+
+/* Prototypes of functions, and type definitions */
+#include "M4VIFI_FiltersAPI.h"
+/* Macro definitions */
+#include "M4VIFI_Defines.h"
+/* Clip table declaration */
+#include "M4VIFI_Clip.h"
+
+
+/**
+ ******************************************************************************
+ * M4VIFI_UInt8 M4VIFI_RGB565toYUV420 (void *pUserData,
+ * M4VIFI_ImagePlane *pPlaneIn,
+ * M4VIFI_ImagePlane *pPlaneOut)
+ * @brief transform RGB565 image to a YUV420 image.
+ * @note Convert RGB565 to YUV420,
+ * Loop on each row ( 2 rows by 2 rows )
+ * Loop on each column ( 2 col by 2 col )
+ * Get 4 RGB samples from input data and build 4 output Y samples
+ * and each single U & V data
+ * end loop on col
+ * end loop on row
+ * @param pUserData: (IN) User Specific Data
+ * @param pPlaneIn: (IN) Pointer to RGB565 Plane
+ * @param pPlaneOut: (OUT) Pointer to YUV420 buffer Plane
+ * @return M4VIFI_OK: there is no error
+ * @return M4VIFI_ILLEGAL_FRAME_HEIGHT: YUV Plane height is ODD
+ * @return M4VIFI_ILLEGAL_FRAME_WIDTH: YUV Plane width is ODD
+ ******************************************************************************
+*/
+M4VIFI_UInt8 M4VIFI_RGB565toYUV420(void *pUserData, M4VIFI_ImagePlane *pPlaneIn,
+ M4VIFI_ImagePlane *pPlaneOut)
+{
+ M4VIFI_UInt32 u32_width, u32_height;
+ M4VIFI_UInt32 u32_stride_Y, u32_stride2_Y, u32_stride_U, u32_stride_V;
+ M4VIFI_UInt32 u32_stride_rgb, u32_stride_2rgb;
+ M4VIFI_UInt32 u32_col, u32_row;
+
+ M4VIFI_Int32 i32_r00, i32_r01, i32_r10, i32_r11;
+ M4VIFI_Int32 i32_g00, i32_g01, i32_g10, i32_g11;
+ M4VIFI_Int32 i32_b00, i32_b01, i32_b10, i32_b11;
+ M4VIFI_Int32 i32_y00, i32_y01, i32_y10, i32_y11;
+ M4VIFI_Int32 i32_u00, i32_u01, i32_u10, i32_u11;
+ M4VIFI_Int32 i32_v00, i32_v01, i32_v10, i32_v11;
+ M4VIFI_UInt8 *pu8_yn, *pu8_ys, *pu8_u, *pu8_v;
+ M4VIFI_UInt8 *pu8_y_data, *pu8_u_data, *pu8_v_data;
+ M4VIFI_UInt8 *pu8_rgbn_data, *pu8_rgbn;
+ M4VIFI_UInt16 u16_pix1, u16_pix2, u16_pix3, u16_pix4;
+
+ /* Check planes height are appropriate */
+ if ((pPlaneIn->u_height != pPlaneOut[0].u_height) ||
+ (pPlaneOut[0].u_height != (pPlaneOut[1].u_height<<1)) ||
+ (pPlaneOut[0].u_height != (pPlaneOut[2].u_height<<1)))
+ {
+ return M4VIFI_ILLEGAL_FRAME_HEIGHT;
+ }
+
+ /* Check planes width are appropriate */
+ if ((pPlaneIn->u_width != pPlaneOut[0].u_width) ||
+ (pPlaneOut[0].u_width != (pPlaneOut[1].u_width<<1)) ||
+ (pPlaneOut[0].u_width != (pPlaneOut[2].u_width<<1)))
+ {
+ return M4VIFI_ILLEGAL_FRAME_WIDTH;
+ }
+
+ /* Set the pointer to the beginning of the output data buffers */
+ pu8_y_data = pPlaneOut[0].pac_data + pPlaneOut[0].u_topleft;
+ pu8_u_data = pPlaneOut[1].pac_data + pPlaneOut[1].u_topleft;
+ pu8_v_data = pPlaneOut[2].pac_data + pPlaneOut[2].u_topleft;
+
+ /* Set the pointer to the beginning of the input data buffers */
+ pu8_rgbn_data = pPlaneIn->pac_data + pPlaneIn->u_topleft;
+
+ /* Get the size of the output image */
+ u32_width = pPlaneOut[0].u_width;
+ u32_height = pPlaneOut[0].u_height;
+
+ /* Set the size of the memory jumps corresponding to row jump in each output plane */
+ u32_stride_Y = pPlaneOut[0].u_stride;
+ u32_stride2_Y = u32_stride_Y << 1;
+ u32_stride_U = pPlaneOut[1].u_stride;
+ u32_stride_V = pPlaneOut[2].u_stride;
+
+ /* Set the size of the memory jumps corresponding to row jump in input plane */
+ u32_stride_rgb = pPlaneIn->u_stride;
+ u32_stride_2rgb = u32_stride_rgb << 1;
+
+
+ /* Loop on each row of the output image, input coordinates are estimated from output ones */
+ /* Two YUV rows are computed at each pass */
+ for (u32_row = u32_height ;u32_row != 0; u32_row -=2)
+ {
+ /* Current Y plane row pointers */
+ pu8_yn = pu8_y_data;
+ /* Next Y plane row pointers */
+ pu8_ys = pu8_yn + u32_stride_Y;
+ /* Current U plane row pointer */
+ pu8_u = pu8_u_data;
+ /* Current V plane row pointer */
+ pu8_v = pu8_v_data;
+
+ pu8_rgbn = pu8_rgbn_data;
+
+ /* Loop on each column of the output image */
+ for (u32_col = u32_width; u32_col != 0 ; u32_col -=2)
+ {
+ /* Get four RGB 565 samples from input data */
+ u16_pix1 = *( (M4VIFI_UInt16 *) pu8_rgbn);
+ u16_pix2 = *( (M4VIFI_UInt16 *) (pu8_rgbn + CST_RGB_16_SIZE));
+ u16_pix3 = *( (M4VIFI_UInt16 *) (pu8_rgbn + u32_stride_rgb));
+ u16_pix4 = *( (M4VIFI_UInt16 *) (pu8_rgbn + u32_stride_rgb + CST_RGB_16_SIZE));
+
+ /* Unpack RGB565 to 8bit R, G, B */
+ /* (x,y) */
+ GET_RGB565(i32_r00,i32_g00,i32_b00,u16_pix1);
+ /* (x+1,y) */
+ GET_RGB565(i32_r10,i32_g10,i32_b10,u16_pix2);
+ /* (x,y+1) */
+ GET_RGB565(i32_r01,i32_g01,i32_b01,u16_pix3);
+ /* (x+1,y+1) */
+ GET_RGB565(i32_r11,i32_g11,i32_b11,u16_pix4);
+
+ /* Convert RGB value to YUV */
+ i32_u00 = U16(i32_r00, i32_g00, i32_b00);
+ i32_v00 = V16(i32_r00, i32_g00, i32_b00);
+ /* luminance value */
+ i32_y00 = Y16(i32_r00, i32_g00, i32_b00);
+
+ i32_u10 = U16(i32_r10, i32_g10, i32_b10);
+ i32_v10 = V16(i32_r10, i32_g10, i32_b10);
+ /* luminance value */
+ i32_y10 = Y16(i32_r10, i32_g10, i32_b10);
+
+ i32_u01 = U16(i32_r01, i32_g01, i32_b01);
+ i32_v01 = V16(i32_r01, i32_g01, i32_b01);
+ /* luminance value */
+ i32_y01 = Y16(i32_r01, i32_g01, i32_b01);
+
+ i32_u11 = U16(i32_r11, i32_g11, i32_b11);
+ i32_v11 = V16(i32_r11, i32_g11, i32_b11);
+ /* luminance value */
+ i32_y11 = Y16(i32_r11, i32_g11, i32_b11);
+
+ /* Store luminance data */
+ pu8_yn[0] = (M4VIFI_UInt8)i32_y00;
+ pu8_yn[1] = (M4VIFI_UInt8)i32_y10;
+ pu8_ys[0] = (M4VIFI_UInt8)i32_y01;
+ pu8_ys[1] = (M4VIFI_UInt8)i32_y11;
+
+ /* Store chroma data */
+ *pu8_u = (M4VIFI_UInt8)((i32_u00 + i32_u01 + i32_u10 + i32_u11 + 2) >> 2);
+ *pu8_v = (M4VIFI_UInt8)((i32_v00 + i32_v01 + i32_v10 + i32_v11 + 2) >> 2);
+
+ /* Prepare for next column */
+ pu8_rgbn += (CST_RGB_16_SIZE<<1);
+ /* Update current Y plane line pointer*/
+ pu8_yn += 2;
+ /* Update next Y plane line pointer*/
+ pu8_ys += 2;
+ /* Update U plane line pointer*/
+ pu8_u ++;
+ /* Update V plane line pointer*/
+ pu8_v ++;
+ } /* End of horizontal scanning */
+
+ /* Prepare pointers for the next row */
+ pu8_y_data += u32_stride2_Y;
+ pu8_u_data += u32_stride_U;
+ pu8_v_data += u32_stride_V;
+ pu8_rgbn_data += u32_stride_2rgb;
+
+
+ } /* End of vertical scanning */
+
+ return M4VIFI_OK;
+}
+
+
diff --git a/libvideoeditor/vss/video_filters/src/M4VIFI_ResizeRGB565toRGB565.c b/libvideoeditor/vss/video_filters/src/M4VIFI_ResizeRGB565toRGB565.c
new file mode 100644
index 0000000..256687c
--- /dev/null
+++ b/libvideoeditor/vss/video_filters/src/M4VIFI_ResizeRGB565toRGB565.c
@@ -0,0 +1,256 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ ******************************************************************************
+ * @file M4VIFI_ResizeRGB565toRGB565.c
+ * @brief Contain video library function
+ * @note This file has a Resize filter function
+ * Generic resizing of RGB565 (Planar) image
+ ******************************************************************************
+*/
+/* Prototypes of functions, and type definitions */
+#include "M4VIFI_FiltersAPI.h"
+/* Macro definitions */
+#include "M4VIFI_Defines.h"
+/* Clip table declaration */
+#include "M4VIFI_Clip.h"
+
+/**
+ ***********************************************************************************************
+ * M4VIFI_UInt8 M4VIFI_ResizeBilinearRGB565toRGB565(void *pUserData, M4VIFI_ImagePlane *pPlaneIn,
+ * M4VIFI_ImagePlane *pPlaneOut)
+ * @brief Resizes RGB565 Planar plane.
+ * @param pUserData: (IN) User Data
+ * @param pPlaneIn: (IN) Pointer to RGB565 (Planar) plane buffer
+ * @param pPlaneOut: (OUT) Pointer to RGB565 (Planar) plane
+ * @return M4VIFI_OK: there is no error
+ * @return M4VIFI_ILLEGAL_FRAME_HEIGHT: Error in height
+ * @return M4VIFI_ILLEGAL_FRAME_WIDTH: Error in width
+ ***********************************************************************************************
+*/
+M4VIFI_UInt8 M4VIFI_ResizeBilinearRGB565toRGB565(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn,
+ M4VIFI_ImagePlane *pPlaneOut)
+{
+ M4VIFI_UInt16 *pu16_data_in;
+ M4VIFI_UInt16 *pu16_data_out;
+ M4VIFI_UInt32 u32_width_in, u32_width_out, u32_height_in, u32_height_out;
+ M4VIFI_UInt32 u32_stride_in, u32_stride_out;
+ M4VIFI_UInt32 u32_x_inc, u32_y_inc;
+ M4VIFI_UInt32 u32_x_accum, u32_y_accum, u32_x_accum_start;
+ M4VIFI_UInt32 u32_width, u32_height;
+ M4VIFI_UInt32 u32_y_frac;
+ M4VIFI_UInt32 u32_x_frac;
+ M4VIFI_UInt32 u32_Rtemp_value,u32_Gtemp_value,u32_Btemp_value;
+ M4VIFI_UInt16 *pu16_src_top;
+ M4VIFI_UInt16 *pu16_src_bottom;
+ M4VIFI_UInt32 i32_b00, i32_g00, i32_r00;
+ M4VIFI_UInt32 i32_b01, i32_g01, i32_r01;
+ M4VIFI_UInt32 i32_b02, i32_g02, i32_r02;
+ M4VIFI_UInt32 i32_b03, i32_g03, i32_r03;
+ M4VIFI_UInt8 count_trans=0;
+
+ /* Check for the RGB width and height are even */
+ if ((IS_EVEN(pPlaneIn->u_height) == FALSE) ||
+ (IS_EVEN(pPlaneOut->u_height) == FALSE)) {
+ return M4VIFI_ILLEGAL_FRAME_HEIGHT;
+ }
+
+ if ((IS_EVEN(pPlaneIn->u_width) == FALSE) ||
+ (IS_EVEN(pPlaneOut->u_width) == FALSE)) {
+ return M4VIFI_ILLEGAL_FRAME_WIDTH;
+ }
+
+ /* Set the working pointers at the beginning of the input/output data field */
+ pu16_data_in = (M4VIFI_UInt16*)(pPlaneIn->pac_data + pPlaneIn->u_topleft);
+ pu16_data_out = (M4VIFI_UInt16*)(pPlaneOut->pac_data + pPlaneOut->u_topleft);
+
+ /* Get the memory jump corresponding to a row jump */
+ u32_stride_in = pPlaneIn->u_stride;
+ u32_stride_out = pPlaneOut->u_stride;
+
+ /* Set the bounds of the active image */
+ u32_width_in = pPlaneIn->u_width;
+ u32_height_in = pPlaneIn->u_height;
+
+ u32_width_out = pPlaneOut->u_width;
+ u32_height_out = pPlaneOut->u_height;
+
+ /* Compute horizontal ratio between src and destination width.*/
+ if (u32_width_out >= u32_width_in) {
+ u32_x_inc = ((u32_width_in-1) * MAX_SHORT) / (u32_width_out-1);
+ } else {
+ u32_x_inc = (u32_width_in * MAX_SHORT) / (u32_width_out);
+ }
+
+ /* Compute vertical ratio between src and destination height.*/
+ if (u32_height_out >= u32_height_in) {
+ u32_y_inc = ((u32_height_in - 1) * MAX_SHORT) / (u32_height_out-1);
+ } else {
+ u32_y_inc = (u32_height_in * MAX_SHORT) / (u32_height_out);
+ }
+
+ /*
+ Calculate initial accumulator value : u32_y_accum_start.
+ u32_y_accum_start is coded on 15 bits, and represents a value between 0 and 0.5
+ */
+ if (u32_y_inc >= MAX_SHORT) {
+ /*
+ Keep the fractional part, integer part is coded
+ on the 16 high bits and the fractionnal on the 15 low bits
+ */
+ u32_y_accum = u32_y_inc & 0xffff;
+
+ if (!u32_y_accum)
+ {
+ u32_y_accum = MAX_SHORT;
+ }
+
+ u32_y_accum >>= 1;
+ } else {
+ u32_y_accum = 0;
+ }
+
+ /*
+ Calculate initial accumulator value : u32_x_accum_start.
+ u32_x_accum_start is coded on 15 bits, and represents a value between 0 and 0.5
+ */
+ if (u32_x_inc >= MAX_SHORT) {
+ u32_x_accum_start = u32_x_inc & 0xffff;
+
+ if (!u32_x_accum_start) {
+ u32_x_accum_start = MAX_SHORT;
+ }
+
+ u32_x_accum_start >>= 1;
+ } else {
+ u32_x_accum_start = 0;
+ }
+
+ u32_height = u32_height_out;
+
+ /*
+ Bilinear interpolation linearly interpolates along each row, and then uses that
+ result in a linear interpolation donw each column. Each estimated pixel in the
+ output image is a weighted combination of its four neighbours according to the formula:
+ F(p',q')=f(p,q)R(-a)R(b)+f(p,q-1)R(-a)R(b-1)+f(p+1,q)R(1-a)R(b)+f(p+&,q+1)R(1-a)R(b-1)
+ with R(x) = / x+1 -1 =< x =< 0 \ 1-x 0 =< x =< 1 and a (resp. b)weighting coefficient
+ is the distance from the nearest neighbor in the p (resp. q) direction
+ */
+
+ do { /* Scan all the row */
+
+ /* Vertical weight factor */
+ u32_y_frac = (u32_y_accum>>12)&15;
+
+ /* Reinit accumulator */
+ u32_x_accum = u32_x_accum_start;
+
+ u32_width = u32_width_out;
+
+ do { /* Scan along each row */
+ pu16_src_top = pu16_data_in + (u32_x_accum >> 16);
+ pu16_src_bottom = pu16_src_top + (u32_stride_in>>1);
+ u32_x_frac = (u32_x_accum >> 12)&15; /* Horizontal weight factor */
+
+ /* Weighted combination */
+ if ((u32_height == 1) && (u32_height_in == u32_height_out)) {
+ GET_RGB565(i32_b00,i32_g00,i32_r00,(M4VIFI_UInt16)pu16_src_top[0]);
+ GET_RGB565(i32_b01,i32_g01,i32_r01,(M4VIFI_UInt16)pu16_src_top[1]);
+ GET_RGB565(i32_b02,i32_g02,i32_r02,(M4VIFI_UInt16)pu16_src_top[0]);
+ GET_RGB565(i32_b03,i32_g03,i32_r03,(M4VIFI_UInt16)pu16_src_top[1]);
+ } else {
+ GET_RGB565(i32_b00,i32_g00,i32_r00,(M4VIFI_UInt16)pu16_src_top[0]);
+ GET_RGB565(i32_b01,i32_g01,i32_r01,(M4VIFI_UInt16)pu16_src_top[1]);
+ GET_RGB565(i32_b02,i32_g02,i32_r02,(M4VIFI_UInt16)pu16_src_bottom[0]);
+ GET_RGB565(i32_b03,i32_g03,i32_r03,(M4VIFI_UInt16)pu16_src_bottom[1]);
+
+ }
+
+ /* Solution to avoid green effects due to transparency */
+ count_trans = 0;
+
+ /* If RGB is transparent color (0, 63, 0), we transform it to white (31,63,31) */
+ if (i32_b00 == 0 && i32_g00 == 63 && i32_r00 == 0)
+ {
+ i32_b00 = 31;
+ i32_r00 = 31;
+ count_trans++;
+ }
+ if (i32_b01 == 0 && i32_g01 == 63 && i32_r01 == 0)
+ {
+ i32_b01 = 31;
+ i32_r01 = 31;
+ count_trans++;
+ }
+ if (i32_b02 == 0 && i32_g02 == 63 && i32_r02 == 0)
+ {
+ i32_b02 = 31;
+ i32_r02 = 31;
+ count_trans++;
+ }
+ if (i32_b03 == 0 && i32_g03 == 63 && i32_r03 == 0)
+ {
+ i32_b03 = 31;
+ i32_r03 = 31;
+ count_trans++;
+ }
+
+ if (count_trans > 2) {
+ /* pixel is transparent */
+ u32_Rtemp_value = 0;
+ u32_Gtemp_value = 63;
+ u32_Btemp_value = 0;
+ } else {
+ u32_Rtemp_value = (M4VIFI_UInt8)(((i32_r00*(16-u32_x_frac) +
+ i32_r01*u32_x_frac)*(16-u32_y_frac) +
+ (i32_r02*(16-u32_x_frac) +
+ i32_r03*u32_x_frac)*u32_y_frac )>>8);
+
+ u32_Gtemp_value = (M4VIFI_UInt8)(((i32_g00*(16-u32_x_frac) +
+ i32_g01*u32_x_frac)*(16-u32_y_frac) +
+ (i32_g02*(16-u32_x_frac) +
+ i32_g03*u32_x_frac)*u32_y_frac )>>8);
+
+ u32_Btemp_value = (M4VIFI_UInt8)(((i32_b00*(16-u32_x_frac) +
+ i32_b01*u32_x_frac)*(16-u32_y_frac) +
+ (i32_b02*(16-u32_x_frac) +
+ i32_b03*u32_x_frac)*u32_y_frac )>>8);
+ }
+
+ *pu16_data_out++ = (M4VIFI_UInt16)( (((u32_Gtemp_value & 0x38) >> 3) | (u32_Btemp_value << 3)) |\
+ ( (((u32_Gtemp_value & 0x7) << 5 ) | u32_Rtemp_value)<<8 ));
+
+ /* Update horizontal accumulator */
+ u32_x_accum += u32_x_inc;
+
+ } while(--u32_width);
+
+
+ /* Update vertical accumulator */
+ u32_y_accum += u32_y_inc;
+ if (u32_y_accum>>16) {
+ pu16_data_in = pu16_data_in + (u32_y_accum >> 16) * (u32_stride_in>>1);
+ u32_y_accum &= 0xffff;
+ }
+
+ } while(--u32_height);
+
+ return M4VIFI_OK;
+}
+