From a0ad521cafcdc68d9fa29d6c719f5af55f30bf9c Mon Sep 17 00:00:00 2001
From: "Ronald S. Bultje" <rbultje@google.com>
Date: Sat, 13 Oct 2012 09:27:54 -0700
Subject: [PATCH] Minor refactoring in encodeintra.c.

Merge code blocks for different transform sizes; use MACROBLOCKD as a
temp variable where that leads to smaller overall source code; remove
duplicate code under #if CONFIG_HYBRIDTRANSFORM/#else blocks. Some style
changes to make it follow the style guide a little better.

Change-Id: I1870a06dae298243db46e14c6729c96c66196525
---
 vp8/encoder/encodeintra.c | 154 ++++++++++++++------------------------
 1 file changed, 58 insertions(+), 96 deletions(-)

diff --git a/vp8/encoder/encodeintra.c b/vp8/encoder/encodeintra.c
index 00bcdb0ee9..eacae81d67 100644
--- a/vp8/encoder/encodeintra.c
+++ b/vp8/encoder/encodeintra.c
@@ -8,7 +8,6 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-
 #include "vpx_ports/config.h"
 #include "vp8/common/idct.h"
 #include "quantize.h"
@@ -21,7 +20,6 @@
 #include "vp8/common/g_common.h"
 #include "encodeintra.h"
 
-
 #if CONFIG_RUNTIME_CPU_DETECT
 #define IF_RTCD(x) (x)
 #else
@@ -60,12 +58,6 @@ void vp8_encode_intra4x4block(const VP8_ENCODER_RTCD *rtcd,
   BLOCKD *b = &x->e_mbd.block[ib];
   BLOCK *be = &x->block[ib];
 
-#if CONFIG_HYBRIDTRANSFORM
-    int QIndex = x->q_index;
-    int active_ht = (QIndex < ACTIVE_HT);
-#endif
-
-
 #if CONFIG_COMP_INTRA_PRED
   if (b->bmi.as_mode.second == (B_PREDICTION_MODE)(B_DC_PRED - 1)) {
 #endif
@@ -81,22 +73,19 @@ void vp8_encode_intra4x4block(const VP8_ENCODER_RTCD *rtcd,
   ENCODEMB_INVOKE(&rtcd->encodemb, subb)(be, b, 16);
 
 #if CONFIG_HYBRIDTRANSFORM
-  if (active_ht) {
+  if (x->q_index < ACTIVE_HT) {
     b->bmi.as_mode.test = b->bmi.as_mode.first;
     txfm_map(b, b->bmi.as_mode.first);
     vp8_fht_c(be->src_diff, be->coeff, 32, b->bmi.as_mode.tx_type, 4);
     vp8_ht_quantize_b_4x4(be, b);
     vp8_ihtllm_c(b->dqcoeff, b->diff, 32, b->bmi.as_mode.tx_type, 4);
-  } else {
+  } else
+#endif
+  {
     x->vp8_short_fdct4x4(be->src_diff, be->coeff, 32) ;
     x->quantize_b_4x4(be, b) ;
     vp8_inverse_transform_b_4x4(IF_RTCD(&rtcd->common->idct), b, 32) ;
   }
-#else
-  x->vp8_short_fdct4x4(be->src_diff, be->coeff, 32);
-  x->quantize_b_4x4(be, b);
-  vp8_inverse_transform_b_4x4(IF_RTCD(&rtcd->common->idct), b, 32);
-#endif
 
   RECON_INVOKE(&rtcd->common->recon, recon)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
 }
@@ -116,120 +105,96 @@ void vp8_encode_intra4x4mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *mb) {
 }
 
 void vp8_encode_intra16x16mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
+  MACROBLOCKD *xd = &x->e_mbd;
   BLOCK *b = &x->block[0];
-
-  int tx_type = x->e_mbd.mode_info_context->mbmi.txfm_size;
+  TX_SIZE tx_size = xd->mode_info_context->mbmi.txfm_size;
 #if CONFIG_HYBRIDTRANSFORM16X16
-  TX_TYPE txfm_type = x->e_mbd.mode_info_context->bmi[0].as_mode.tx_type;
+  TX_TYPE txfm_type = xd->mode_info_context->bmi[0].as_mode.tx_type;
 #endif
 
 #if CONFIG_COMP_INTRA_PRED
-  if (x->e_mbd.mode_info_context->mbmi.second_mode == (MB_PREDICTION_MODE)(DC_PRED - 1))
+  if (xd->mode_info_context->mbmi.second_mode == (MB_PREDICTION_MODE)(DC_PRED - 1))
 #endif
-    RECON_INVOKE(&rtcd->common->recon, build_intra_predictors_mby)(&x->e_mbd);
+    RECON_INVOKE(&rtcd->common->recon, build_intra_predictors_mby)(xd);
 #if CONFIG_COMP_INTRA_PRED
   else
-    RECON_INVOKE(&rtcd->common->recon, build_comp_intra_predictors_mby)(&x->e_mbd);
+    RECON_INVOKE(&rtcd->common->recon, build_comp_intra_predictors_mby)(xd);
 #endif
 
-  ENCODEMB_INVOKE(&rtcd->encodemb, submby)(x->src_diff, *(b->base_src), x->e_mbd.predictor, b->src_stride);
+  ENCODEMB_INVOKE(&rtcd->encodemb, submby)(x->src_diff, *(b->base_src),
+                                           xd->predictor, b->src_stride);
 
-  if (tx_type == TX_16X16)
+  if (tx_size == TX_16X16) {
 #if CONFIG_HYBRIDTRANSFORM16X16
-  {
-    if ((x->e_mbd.mode_info_context->mbmi.mode < I8X8_PRED) &&
+    if ((xd->mode_info_context->mbmi.mode < I8X8_PRED) &&
         (x->q_index < ACTIVE_HT16)) {
-      BLOCKD  *bd = &x->e_mbd.block[0];
-      txfm_map(bd, pred_mode_conv(x->e_mbd.mode_info_context->mbmi.mode));
+      BLOCKD  *bd = &xd->block[0];
+      txfm_map(bd, pred_mode_conv(xd->mode_info_context->mbmi.mode));
       txfm_type = bd->bmi.as_mode.tx_type;
       vp8_fht_c(b->src_diff, b->coeff, 32, txfm_type, 16);
+      vp8_quantize_mby_16x16(x);
+      if (x->optimize)
+        vp8_optimize_mby_16x16(x, rtcd);
+      vp8_ihtllm_c(bd->dqcoeff, bd->diff, 32, txfm_type, 16);
     } else
-      vp8_transform_mby_16x16(x);
-  }
-#else
-    vp8_transform_mby_16x16(x);
 #endif
-  else if (tx_type == TX_8X8)
+    {
+      vp8_transform_mby_16x16(x);
+      vp8_quantize_mby_16x16(x);
+      if (x->optimize)
+        vp8_optimize_mby_16x16(x, rtcd);
+      vp8_inverse_transform_mby_16x16(IF_RTCD(&rtcd->common->idct), xd);
+    }
+  } else if (tx_size == TX_8X8) {
     vp8_transform_mby_8x8(x);
-  else
-    vp8_transform_mby_4x4(x);
-
-  if (tx_type == TX_16X16)
-    vp8_quantize_mby_16x16(x);
-  else if (tx_type == TX_8X8)
     vp8_quantize_mby_8x8(x);
-  else
-    vp8_quantize_mby_4x4(x);
-
-  if (x->optimize) {
-    if (tx_type == TX_16X16)
-      vp8_optimize_mby_16x16(x, rtcd);
-    else if (tx_type == TX_8X8)
+    if (x->optimize)
       vp8_optimize_mby_8x8(x, rtcd);
-    else
+    vp8_inverse_transform_mby_8x8(IF_RTCD(&rtcd->common->idct), xd);
+  } else {
+    vp8_transform_mby_4x4(x);
+    vp8_quantize_mby_4x4(x);
+    if (x->optimize)
       vp8_optimize_mby_4x4(x, rtcd);
+    vp8_inverse_transform_mby_4x4(IF_RTCD(&rtcd->common->idct), xd);
   }
 
-  if (tx_type == TX_16X16)
-#if CONFIG_HYBRIDTRANSFORM16X16
-  {
-    if ((x->e_mbd.mode_info_context->mbmi.mode < I8X8_PRED) &&
-        (x->q_index < ACTIVE_HT16)) {
-      BLOCKD *bd = &x->e_mbd.block[0];
-      vp8_ihtllm_c(bd->dqcoeff, bd->diff, 32, txfm_type, 16);
-    } else
-      vp8_inverse_transform_mby_16x16(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
-  }
-#else
-    vp8_inverse_transform_mby_16x16(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
-#endif
-  else if (tx_type == TX_8X8)
-    vp8_inverse_transform_mby_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
-  else
-    vp8_inverse_transform_mby_4x4(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
-
-  RECON_INVOKE(&rtcd->common->recon, recon_mby)
-      (IF_RTCD(&rtcd->common->recon), &x->e_mbd);
-
+  RECON_INVOKE(&rtcd->common->recon, recon_mby)(IF_RTCD(&rtcd->common->recon),
+                                                xd);
 }
 
 void vp8_encode_intra16x16mbuv(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
-  int tx_type = x->e_mbd.mode_info_context->mbmi.txfm_size;
-  if (tx_type == TX_16X16) tx_type = TX_8X8; // 16x16 for U and V should default to 8x8 behavior.
+  MACROBLOCKD *xd = &x->e_mbd;
+  TX_SIZE tx_size = xd->mode_info_context->mbmi.txfm_size;
+
 #if CONFIG_COMP_INTRA_PRED
-  if (x->e_mbd.mode_info_context->mbmi.second_uv_mode == (MB_PREDICTION_MODE)(DC_PRED - 1)) {
+  if (xd->mode_info_context->mbmi.second_uv_mode == (MB_PREDICTION_MODE)(DC_PRED - 1)) {
 #endif
-    RECON_INVOKE(&rtcd->common->recon, build_intra_predictors_mbuv)(&x->e_mbd);
+    RECON_INVOKE(&rtcd->common->recon, build_intra_predictors_mbuv)(xd);
 #if CONFIG_COMP_INTRA_PRED
   } else {
-    RECON_INVOKE(&rtcd->common->recon, build_comp_intra_predictors_mbuv)(&x->e_mbd);
+    RECON_INVOKE(&rtcd->common->recon, build_comp_intra_predictors_mbuv)(xd);
   }
 #endif
 
-  ENCODEMB_INVOKE(&rtcd->encodemb, submbuv)(x->src_diff, x->src.u_buffer, x->src.v_buffer, x->e_mbd.predictor, x->src.uv_stride);
-  if (tx_type == TX_8X8)
-    vp8_transform_mbuv_8x8(x);
-  else
+  ENCODEMB_INVOKE(&rtcd->encodemb, submbuv)(x->src_diff,
+                                            x->src.u_buffer, x->src.v_buffer,
+                                            xd->predictor, x->src.uv_stride);
+  if (tx_size == TX_4X4) {
     vp8_transform_mbuv_4x4(x);
-
-  if (tx_type == TX_8X8)
-    vp8_quantize_mbuv_8x8(x);
-  else
     vp8_quantize_mbuv_4x4(x);
-
-  if (x->optimize) {
-    if (tx_type == TX_8X8)
-      vp8_optimize_mbuv_8x8(x, rtcd);
-    else
+    if (x->optimize)
       vp8_optimize_mbuv_4x4(x, rtcd);
+    vp8_inverse_transform_mbuv_4x4(IF_RTCD(&rtcd->common->idct), xd);
+  } else /* 16x16 or 8x8 */ {
+    vp8_transform_mbuv_8x8(x);
+    vp8_quantize_mbuv_8x8(x);
+    if (x->optimize)
+      vp8_optimize_mbuv_8x8(x, rtcd);
+    vp8_inverse_transform_mbuv_8x8(IF_RTCD(&rtcd->common->idct), xd);
   }
 
-  if (tx_type == TX_8X8)
-    vp8_inverse_transform_mbuv_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
-  else
-    vp8_inverse_transform_mbuv_4x4(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
-
-  vp8_recon_intra_mbuv(IF_RTCD(&rtcd->common->recon), &x->e_mbd);
+  vp8_recon_intra_mbuv(IF_RTCD(&rtcd->common->recon), xd);
 }
 
 void vp8_encode_intra8x8(const VP8_ENCODER_RTCD *rtcd,
@@ -252,7 +217,7 @@ void vp8_encode_intra8x8(const VP8_ENCODER_RTCD *rtcd,
   }
 #endif
 
-  if (x->e_mbd.mode_info_context->mbmi.txfm_size == TX_8X8) {
+  if (xd->mode_info_context->mbmi.txfm_size == TX_8X8) {
     int idx = (ib & 0x02) ? (ib + 2) : ib;
 
     // generate residual blocks
@@ -319,20 +284,17 @@ void vp8_encode_intra_uv4x4(const VP8_ENCODER_RTCD *rtcd,
   ENCODEMB_INVOKE(&rtcd->encodemb, subb)(be, b, 8);
 
   x->vp8_short_fdct4x4(be->src_diff, be->coeff, 16);
-
   x->quantize_b_4x4(be, b);
-
   vp8_inverse_transform_b_4x4(IF_RTCD(&rtcd->common->idct), b, 16);
 
   RECON_INVOKE(&rtcd->common->recon, recon_uv)(b->predictor,
                                                b->diff, *(b->base_dst) + b->dst, b->dst_stride);
 }
 
-
-
 void vp8_encode_intra8x8mbuv(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
   int i, ib, mode, second;
   BLOCKD *b;
+
   for (i = 0; i < 4; i++) {
     ib = vp8_i8x8_block[i];
     b = &x->e_mbd.block[ib];
-- 
GitLab