Commit a0ad521c authored by Ronald S. Bultje's avatar Ronald S. Bultje
Browse files

Minor refactoring in encodeintra.c.

Merge code blocks for different transform sizes; use MACROBLOCKD as a
temp variable where that leads to smaller overall source code; remove
duplicate code under #if CONFIG_HYBRIDTRANSFORM/#else blocks. Some style
changes to make it follow the style guide a little better.

Change-Id: I1870a06dae298243db46e14c6729c96c66196525
Showing with 58 additions and 96 deletions
...@@ -8,7 +8,6 @@ ...@@ -8,7 +8,6 @@
* be found in the AUTHORS file in the root of the source tree. * be found in the AUTHORS file in the root of the source tree.
*/ */
#include "vpx_ports/config.h" #include "vpx_ports/config.h"
#include "vp8/common/idct.h" #include "vp8/common/idct.h"
#include "quantize.h" #include "quantize.h"
...@@ -21,7 +20,6 @@ ...@@ -21,7 +20,6 @@
#include "vp8/common/g_common.h" #include "vp8/common/g_common.h"
#include "encodeintra.h" #include "encodeintra.h"
#if CONFIG_RUNTIME_CPU_DETECT #if CONFIG_RUNTIME_CPU_DETECT
#define IF_RTCD(x) (x) #define IF_RTCD(x) (x)
#else #else
...@@ -60,12 +58,6 @@ void vp8_encode_intra4x4block(const VP8_ENCODER_RTCD *rtcd, ...@@ -60,12 +58,6 @@ void vp8_encode_intra4x4block(const VP8_ENCODER_RTCD *rtcd,
BLOCKD *b = &x->e_mbd.block[ib]; BLOCKD *b = &x->e_mbd.block[ib];
BLOCK *be = &x->block[ib]; BLOCK *be = &x->block[ib];
#if CONFIG_HYBRIDTRANSFORM
int QIndex = x->q_index;
int active_ht = (QIndex < ACTIVE_HT);
#endif
#if CONFIG_COMP_INTRA_PRED #if CONFIG_COMP_INTRA_PRED
if (b->bmi.as_mode.second == (B_PREDICTION_MODE)(B_DC_PRED - 1)) { if (b->bmi.as_mode.second == (B_PREDICTION_MODE)(B_DC_PRED - 1)) {
#endif #endif
...@@ -81,22 +73,19 @@ void vp8_encode_intra4x4block(const VP8_ENCODER_RTCD *rtcd, ...@@ -81,22 +73,19 @@ void vp8_encode_intra4x4block(const VP8_ENCODER_RTCD *rtcd,
ENCODEMB_INVOKE(&rtcd->encodemb, subb)(be, b, 16); ENCODEMB_INVOKE(&rtcd->encodemb, subb)(be, b, 16);
#if CONFIG_HYBRIDTRANSFORM #if CONFIG_HYBRIDTRANSFORM
if (active_ht) { if (x->q_index < ACTIVE_HT) {
b->bmi.as_mode.test = b->bmi.as_mode.first; b->bmi.as_mode.test = b->bmi.as_mode.first;
txfm_map(b, b->bmi.as_mode.first); txfm_map(b, b->bmi.as_mode.first);
vp8_fht_c(be->src_diff, be->coeff, 32, b->bmi.as_mode.tx_type, 4); vp8_fht_c(be->src_diff, be->coeff, 32, b->bmi.as_mode.tx_type, 4);
vp8_ht_quantize_b_4x4(be, b); vp8_ht_quantize_b_4x4(be, b);
vp8_ihtllm_c(b->dqcoeff, b->diff, 32, b->bmi.as_mode.tx_type, 4); vp8_ihtllm_c(b->dqcoeff, b->diff, 32, b->bmi.as_mode.tx_type, 4);
} else { } else
#endif
{
x->vp8_short_fdct4x4(be->src_diff, be->coeff, 32) ; x->vp8_short_fdct4x4(be->src_diff, be->coeff, 32) ;
x->quantize_b_4x4(be, b) ; x->quantize_b_4x4(be, b) ;
vp8_inverse_transform_b_4x4(IF_RTCD(&rtcd->common->idct), b, 32) ; vp8_inverse_transform_b_4x4(IF_RTCD(&rtcd->common->idct), b, 32) ;
} }
#else
x->vp8_short_fdct4x4(be->src_diff, be->coeff, 32);
x->quantize_b_4x4(be, b);
vp8_inverse_transform_b_4x4(IF_RTCD(&rtcd->common->idct), b, 32);
#endif
RECON_INVOKE(&rtcd->common->recon, recon)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride); RECON_INVOKE(&rtcd->common->recon, recon)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
} }
...@@ -116,120 +105,96 @@ void vp8_encode_intra4x4mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *mb) { ...@@ -116,120 +105,96 @@ void vp8_encode_intra4x4mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *mb) {
} }
void vp8_encode_intra16x16mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) { void vp8_encode_intra16x16mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
MACROBLOCKD *xd = &x->e_mbd;
BLOCK *b = &x->block[0]; BLOCK *b = &x->block[0];
TX_SIZE tx_size = xd->mode_info_context->mbmi.txfm_size;
int tx_type = x->e_mbd.mode_info_context->mbmi.txfm_size;
#if CONFIG_HYBRIDTRANSFORM16X16 #if CONFIG_HYBRIDTRANSFORM16X16
TX_TYPE txfm_type = x->e_mbd.mode_info_context->bmi[0].as_mode.tx_type; TX_TYPE txfm_type = xd->mode_info_context->bmi[0].as_mode.tx_type;
#endif #endif
#if CONFIG_COMP_INTRA_PRED #if CONFIG_COMP_INTRA_PRED
if (x->e_mbd.mode_info_context->mbmi.second_mode == (MB_PREDICTION_MODE)(DC_PRED - 1)) if (xd->mode_info_context->mbmi.second_mode == (MB_PREDICTION_MODE)(DC_PRED - 1))
#endif #endif
RECON_INVOKE(&rtcd->common->recon, build_intra_predictors_mby)(&x->e_mbd); RECON_INVOKE(&rtcd->common->recon, build_intra_predictors_mby)(xd);
#if CONFIG_COMP_INTRA_PRED #if CONFIG_COMP_INTRA_PRED
else else
RECON_INVOKE(&rtcd->common->recon, build_comp_intra_predictors_mby)(&x->e_mbd); RECON_INVOKE(&rtcd->common->recon, build_comp_intra_predictors_mby)(xd);
#endif #endif
ENCODEMB_INVOKE(&rtcd->encodemb, submby)(x->src_diff, *(b->base_src), x->e_mbd.predictor, b->src_stride); ENCODEMB_INVOKE(&rtcd->encodemb, submby)(x->src_diff, *(b->base_src),
xd->predictor, b->src_stride);
if (tx_type == TX_16X16) if (tx_size == TX_16X16) {
#if CONFIG_HYBRIDTRANSFORM16X16 #if CONFIG_HYBRIDTRANSFORM16X16
{ if ((xd->mode_info_context->mbmi.mode < I8X8_PRED) &&
if ((x->e_mbd.mode_info_context->mbmi.mode < I8X8_PRED) &&
(x->q_index < ACTIVE_HT16)) { (x->q_index < ACTIVE_HT16)) {
BLOCKD *bd = &x->e_mbd.block[0]; BLOCKD *bd = &xd->block[0];
txfm_map(bd, pred_mode_conv(x->e_mbd.mode_info_context->mbmi.mode)); txfm_map(bd, pred_mode_conv(xd->mode_info_context->mbmi.mode));
txfm_type = bd->bmi.as_mode.tx_type; txfm_type = bd->bmi.as_mode.tx_type;
vp8_fht_c(b->src_diff, b->coeff, 32, txfm_type, 16); vp8_fht_c(b->src_diff, b->coeff, 32, txfm_type, 16);
vp8_quantize_mby_16x16(x);
if (x->optimize)
vp8_optimize_mby_16x16(x, rtcd);
vp8_ihtllm_c(bd->dqcoeff, bd->diff, 32, txfm_type, 16);
} else } else
vp8_transform_mby_16x16(x);
}
#else
vp8_transform_mby_16x16(x);
#endif #endif
else if (tx_type == TX_8X8) {
vp8_transform_mby_16x16(x);
vp8_quantize_mby_16x16(x);
if (x->optimize)
vp8_optimize_mby_16x16(x, rtcd);
vp8_inverse_transform_mby_16x16(IF_RTCD(&rtcd->common->idct), xd);
}
} else if (tx_size == TX_8X8) {
vp8_transform_mby_8x8(x); vp8_transform_mby_8x8(x);
else
vp8_transform_mby_4x4(x);
if (tx_type == TX_16X16)
vp8_quantize_mby_16x16(x);
else if (tx_type == TX_8X8)
vp8_quantize_mby_8x8(x); vp8_quantize_mby_8x8(x);
else if (x->optimize)
vp8_quantize_mby_4x4(x);
if (x->optimize) {
if (tx_type == TX_16X16)
vp8_optimize_mby_16x16(x, rtcd);
else if (tx_type == TX_8X8)
vp8_optimize_mby_8x8(x, rtcd); vp8_optimize_mby_8x8(x, rtcd);
else vp8_inverse_transform_mby_8x8(IF_RTCD(&rtcd->common->idct), xd);
} else {
vp8_transform_mby_4x4(x);
vp8_quantize_mby_4x4(x);
if (x->optimize)
vp8_optimize_mby_4x4(x, rtcd); vp8_optimize_mby_4x4(x, rtcd);
vp8_inverse_transform_mby_4x4(IF_RTCD(&rtcd->common->idct), xd);
} }
if (tx_type == TX_16X16) RECON_INVOKE(&rtcd->common->recon, recon_mby)(IF_RTCD(&rtcd->common->recon),
#if CONFIG_HYBRIDTRANSFORM16X16 xd);
{
if ((x->e_mbd.mode_info_context->mbmi.mode < I8X8_PRED) &&
(x->q_index < ACTIVE_HT16)) {
BLOCKD *bd = &x->e_mbd.block[0];
vp8_ihtllm_c(bd->dqcoeff, bd->diff, 32, txfm_type, 16);
} else
vp8_inverse_transform_mby_16x16(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
}
#else
vp8_inverse_transform_mby_16x16(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
#endif
else if (tx_type == TX_8X8)
vp8_inverse_transform_mby_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
else
vp8_inverse_transform_mby_4x4(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
RECON_INVOKE(&rtcd->common->recon, recon_mby)
(IF_RTCD(&rtcd->common->recon), &x->e_mbd);
} }
void vp8_encode_intra16x16mbuv(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) { void vp8_encode_intra16x16mbuv(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
int tx_type = x->e_mbd.mode_info_context->mbmi.txfm_size; MACROBLOCKD *xd = &x->e_mbd;
if (tx_type == TX_16X16) tx_type = TX_8X8; // 16x16 for U and V should default to 8x8 behavior. TX_SIZE tx_size = xd->mode_info_context->mbmi.txfm_size;
#if CONFIG_COMP_INTRA_PRED #if CONFIG_COMP_INTRA_PRED
if (x->e_mbd.mode_info_context->mbmi.second_uv_mode == (MB_PREDICTION_MODE)(DC_PRED - 1)) { if (xd->mode_info_context->mbmi.second_uv_mode == (MB_PREDICTION_MODE)(DC_PRED - 1)) {
#endif #endif
RECON_INVOKE(&rtcd->common->recon, build_intra_predictors_mbuv)(&x->e_mbd); RECON_INVOKE(&rtcd->common->recon, build_intra_predictors_mbuv)(xd);
#if CONFIG_COMP_INTRA_PRED #if CONFIG_COMP_INTRA_PRED
} else { } else {
RECON_INVOKE(&rtcd->common->recon, build_comp_intra_predictors_mbuv)(&x->e_mbd); RECON_INVOKE(&rtcd->common->recon, build_comp_intra_predictors_mbuv)(xd);
} }
#endif #endif
ENCODEMB_INVOKE(&rtcd->encodemb, submbuv)(x->src_diff, x->src.u_buffer, x->src.v_buffer, x->e_mbd.predictor, x->src.uv_stride); ENCODEMB_INVOKE(&rtcd->encodemb, submbuv)(x->src_diff,
if (tx_type == TX_8X8) x->src.u_buffer, x->src.v_buffer,
vp8_transform_mbuv_8x8(x); xd->predictor, x->src.uv_stride);
else if (tx_size == TX_4X4) {
vp8_transform_mbuv_4x4(x); vp8_transform_mbuv_4x4(x);
if (tx_type == TX_8X8)
vp8_quantize_mbuv_8x8(x);
else
vp8_quantize_mbuv_4x4(x); vp8_quantize_mbuv_4x4(x);
if (x->optimize)
if (x->optimize) {
if (tx_type == TX_8X8)
vp8_optimize_mbuv_8x8(x, rtcd);
else
vp8_optimize_mbuv_4x4(x, rtcd); vp8_optimize_mbuv_4x4(x, rtcd);
vp8_inverse_transform_mbuv_4x4(IF_RTCD(&rtcd->common->idct), xd);
} else /* 16x16 or 8x8 */ {
vp8_transform_mbuv_8x8(x);
vp8_quantize_mbuv_8x8(x);
if (x->optimize)
vp8_optimize_mbuv_8x8(x, rtcd);
vp8_inverse_transform_mbuv_8x8(IF_RTCD(&rtcd->common->idct), xd);
} }
if (tx_type == TX_8X8) vp8_recon_intra_mbuv(IF_RTCD(&rtcd->common->recon), xd);
vp8_inverse_transform_mbuv_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
else
vp8_inverse_transform_mbuv_4x4(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
vp8_recon_intra_mbuv(IF_RTCD(&rtcd->common->recon), &x->e_mbd);
} }
void vp8_encode_intra8x8(const VP8_ENCODER_RTCD *rtcd, void vp8_encode_intra8x8(const VP8_ENCODER_RTCD *rtcd,
...@@ -252,7 +217,7 @@ void vp8_encode_intra8x8(const VP8_ENCODER_RTCD *rtcd, ...@@ -252,7 +217,7 @@ void vp8_encode_intra8x8(const VP8_ENCODER_RTCD *rtcd,
} }
#endif #endif
if (x->e_mbd.mode_info_context->mbmi.txfm_size == TX_8X8) { if (xd->mode_info_context->mbmi.txfm_size == TX_8X8) {
int idx = (ib & 0x02) ? (ib + 2) : ib; int idx = (ib & 0x02) ? (ib + 2) : ib;
// generate residual blocks // generate residual blocks
...@@ -319,20 +284,17 @@ void vp8_encode_intra_uv4x4(const VP8_ENCODER_RTCD *rtcd, ...@@ -319,20 +284,17 @@ void vp8_encode_intra_uv4x4(const VP8_ENCODER_RTCD *rtcd,
ENCODEMB_INVOKE(&rtcd->encodemb, subb)(be, b, 8); ENCODEMB_INVOKE(&rtcd->encodemb, subb)(be, b, 8);
x->vp8_short_fdct4x4(be->src_diff, be->coeff, 16); x->vp8_short_fdct4x4(be->src_diff, be->coeff, 16);
x->quantize_b_4x4(be, b); x->quantize_b_4x4(be, b);
vp8_inverse_transform_b_4x4(IF_RTCD(&rtcd->common->idct), b, 16); vp8_inverse_transform_b_4x4(IF_RTCD(&rtcd->common->idct), b, 16);
RECON_INVOKE(&rtcd->common->recon, recon_uv)(b->predictor, RECON_INVOKE(&rtcd->common->recon, recon_uv)(b->predictor,
b->diff, *(b->base_dst) + b->dst, b->dst_stride); b->diff, *(b->base_dst) + b->dst, b->dst_stride);
} }
void vp8_encode_intra8x8mbuv(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) { void vp8_encode_intra8x8mbuv(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
int i, ib, mode, second; int i, ib, mode, second;
BLOCKD *b; BLOCKD *b;
for (i = 0; i < 4; i++) { for (i = 0; i < 4; i++) {
ib = vp8_i8x8_block[i]; ib = vp8_i8x8_block[i];
b = &x->e_mbd.block[ib]; b = &x->e_mbd.block[ib];
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment