From f10465634bbbdc2cd251d8e12d965b91b6cecb83 Mon Sep 17 00:00:00 2001 From: Deb Mukherjee <debargha@google.com> Date: Mon, 22 Oct 2012 11:55:29 -0700 Subject: [PATCH] Merging the hybrid transform experiments Change-Id: I99f1982b30a630a9a070a8326d83b34a33cba14c --- configure | 3 -- vp8/common/blockd.h | 27 ------------- vp8/common/default_coef_probs.h | 24 +++++------- vp8/common/entropy.c | 15 ------- vp8/common/entropy.h | 2 - vp8/common/idct.h | 3 -- vp8/common/idctllm.c | 8 ---- vp8/common/onyxc_int.h | 18 --------- vp8/decoder/decodframe.c | 44 --------------------- vp8/decoder/dequantize.c | 6 --- vp8/decoder/dequantize.h | 6 --- vp8/decoder/detokenize.c | 69 +-------------------------------- vp8/encoder/bitstream.c | 25 ------------ vp8/encoder/block.h | 2 - vp8/encoder/dct.c | 7 ---- vp8/encoder/dct.h | 2 - vp8/encoder/encodeframe.c | 6 --- vp8/encoder/encodeintra.c | 20 +--------- vp8/encoder/encodemb.c | 2 - vp8/encoder/onyx_if.c | 6 --- vp8/encoder/onyx_int.h | 12 ------ vp8/encoder/quantize.c | 2 - vp8/encoder/quantize.h | 2 - vp8/encoder/ratectrl.c | 12 ------ vp8/encoder/rdopt.c | 45 +-------------------- vp8/encoder/tokenize.c | 48 ----------------------- 26 files changed, 14 insertions(+), 402 deletions(-) diff --git a/configure b/configure index ceeec271cd..1612f36bc9 100755 --- a/configure +++ b/configure @@ -221,12 +221,9 @@ EXPERIMENT_LIST=" superblocks pred_filter lossless - hybridtransform - hybridtransform8x8 switchable_interp newbestrefmv new_mvref - hybridtransform16x16 newmventropy tx_select " diff --git a/vp8/common/blockd.h b/vp8/common/blockd.h index 3ab4cc3a96..feb6150662 100644 --- a/vp8/common/blockd.h +++ b/vp8/common/blockd.h @@ -135,14 +135,12 @@ typedef enum { TX_SIZE_MAX // Number of different transforms available } TX_SIZE; -#if CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM16X16 typedef enum { DCT_DCT = 0, // DCT in both horizontal and vertical ADST_DCT = 1, // ADST in horizontal, DCT in vertical DCT_ADST = 2, // DCT in horizontal, ADST in vertical ADST_ADST = 3 // ADST in both directions } TX_TYPE; -#endif #define VP8_YMODES (B_PRED + 1) #define VP8_UV_MODES (TM_PRED + 1) @@ -184,9 +182,7 @@ typedef enum { union b_mode_info { struct { B_PREDICTION_MODE first; -#if CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM16X16 TX_TYPE tx_type; -#endif #if CONFIG_COMP_INTRA_PRED B_PREDICTION_MODE second; @@ -388,17 +384,11 @@ typedef struct MacroBlockD { } MACROBLOCKD; -#if CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM16X16 #define ACTIVE_HT 110 // quantization stepsize threshold -#endif -#if CONFIG_HYBRIDTRANSFORM8X8 #define ACTIVE_HT8 300 -#endif -#if CONFIG_HYBRIDTRANSFORM16X16 #define ACTIVE_HT16 300 -#endif // convert MB_PREDICTION_MODE to B_PREDICTION_MODE static B_PREDICTION_MODE pred_mode_conv(MB_PREDICTION_MODE mode) { @@ -442,7 +432,6 @@ static B_PREDICTION_MODE pred_mode_conv(MB_PREDICTION_MODE mode) { return b_mode; } -#if CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM16X16 // transform mapping static TX_TYPE txfm_map(B_PREDICTION_MODE bmode) { // map transform type @@ -470,9 +459,7 @@ static TX_TYPE txfm_map(B_PREDICTION_MODE bmode) { } return tx_type; } -#endif -#if CONFIG_HYBRIDTRANSFORM static TX_TYPE get_tx_type_4x4(const MACROBLOCKD *xd, const BLOCKD *b) { TX_TYPE tx_type = DCT_DCT; if (xd->mode_info_context->mbmi.mode == B_PRED && @@ -481,9 +468,7 @@ static TX_TYPE get_tx_type_4x4(const MACROBLOCKD *xd, const BLOCKD *b) { } return tx_type; } -#endif -#if CONFIG_HYBRIDTRANSFORM8X8 static TX_TYPE get_tx_type_8x8(const MACROBLOCKD *xd, const BLOCKD *b) { TX_TYPE tx_type = DCT_DCT; if (xd->mode_info_context->mbmi.mode == I8X8_PRED && @@ -492,9 +477,7 @@ static TX_TYPE get_tx_type_8x8(const MACROBLOCKD *xd, const BLOCKD *b) { } return tx_type; } -#endif -#if CONFIG_HYBRIDTRANSFORM16X16 static TX_TYPE get_tx_type_16x16(const MACROBLOCKD *xd, const BLOCKD *b) { TX_TYPE tx_type = DCT_DCT; if (xd->mode_info_context->mbmi.mode < I8X8_PRED && @@ -503,34 +486,24 @@ static TX_TYPE get_tx_type_16x16(const MACROBLOCKD *xd, const BLOCKD *b) { } return tx_type; } -#endif -#if CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM8X8 || \ - CONFIG_HYBRIDTRANSFORM16X16 static TX_TYPE get_tx_type(const MACROBLOCKD *xd, const BLOCKD *b) { TX_TYPE tx_type = DCT_DCT; int ib = (b - xd->block); if (ib >= 16) return tx_type; -#if CONFIG_HYBRIDTRANSFORM16X16 if (xd->mode_info_context->mbmi.txfm_size == TX_16X16) { tx_type = get_tx_type_16x16(xd, b); } -#endif -#if CONFIG_HYBRIDTRANSFORM8X8 if (xd->mode_info_context->mbmi.txfm_size == TX_8X8) { ib = (ib & 8) + ((ib & 4) >> 1); tx_type = get_tx_type_8x8(xd, &xd->block[ib]); } -#endif -#if CONFIG_HYBRIDTRANSFORM if (xd->mode_info_context->mbmi.txfm_size == TX_4X4) { tx_type = get_tx_type_4x4(xd, b); } -#endif return tx_type; } -#endif extern void vp8_build_block_doffsets(MACROBLOCKD *xd); extern void vp8_setup_block_dptrs(MACROBLOCKD *xd); diff --git a/vp8/common/default_coef_probs.h b/vp8/common/default_coef_probs.h index 5e21195ee2..bd1f795d0b 100644 --- a/vp8/common/default_coef_probs.h +++ b/vp8/common/default_coef_probs.h @@ -13,9 +13,9 @@ static const vp8_prob default_coef_probs [BLOCK_TYPES] -[COEF_BANDS] -[PREV_COEF_CONTEXTS] -[ENTROPY_NODES] = { + [COEF_BANDS] + [PREV_COEF_CONTEXTS] + [ENTROPY_NODES] = { { /* Block Type ( 0 ) */ { @@ -254,11 +254,10 @@ static const vp8_prob default_coef_probs [BLOCK_TYPES] } }; -#if CONFIG_HYBRIDTRANSFORM static const vp8_prob default_hybrid_coef_probs [BLOCK_TYPES] -[COEF_BANDS] -[PREV_COEF_CONTEXTS] -[ENTROPY_NODES] = { + [COEF_BANDS] + [PREV_COEF_CONTEXTS] + [ENTROPY_NODES] = { { /* Block Type ( 0 ) */ { @@ -496,7 +495,6 @@ static const vp8_prob default_hybrid_coef_probs [BLOCK_TYPES] } } }; -#endif static const vp8_prob default_coef_probs_8x8[BLOCK_TYPES_8X8] @@ -731,12 +729,11 @@ default_coef_probs_8x8[BLOCK_TYPES_8X8] } }; -#if CONFIG_HYBRIDTRANSFORM8X8 static const vp8_prob default_hybrid_coef_probs_8x8[BLOCK_TYPES_8X8] - [COEF_BANDS] - [PREV_COEF_CONTEXTS] - [ENTROPY_NODES] = { + [COEF_BANDS] + [PREV_COEF_CONTEXTS] + [ENTROPY_NODES] = { { /* block Type 0 */ { @@ -964,7 +961,6 @@ default_hybrid_coef_probs_8x8[BLOCK_TYPES_8X8] } } }; -#endif static const vp8_prob default_coef_probs_16x16[BLOCK_TYPES_16X16] @@ -1173,7 +1169,6 @@ static const vp8_prob } }; -#if CONFIG_HYBRIDTRANSFORM16X16 static const vp8_prob default_hybrid_coef_probs_16x16[BLOCK_TYPES_16X16] [COEF_BANDS] @@ -1380,4 +1375,3 @@ static const vp8_prob } } }; -#endif diff --git a/vp8/common/entropy.c b/vp8/common/entropy.c index 90f7a52c2f..a3f731a3ca 100644 --- a/vp8/common/entropy.c +++ b/vp8/common/entropy.c @@ -64,8 +64,6 @@ DECLARE_ALIGNED(16, const int, vp8_default_zig_zag1d[16]) = { 7, 11, 14, 15, }; - -#if CONFIG_HYBRIDTRANSFORM DECLARE_ALIGNED(16, const int, vp8_col_scan[16]) = { 0, 4, 8, 12, 1, 5, 9, 13, @@ -78,7 +76,6 @@ DECLARE_ALIGNED(16, const int, vp8_row_scan[16]) = { 8, 9, 10, 11, 12, 13, 14, 15 }; -#endif DECLARE_ALIGNED(64, const int, vp8_coef_bands_8x8[64]) = { 0, 1, 2, 3, 5, 4, 4, 5, @@ -208,25 +205,19 @@ vp8_extra_bit_struct vp8_extra_bits[12] = { void vp8_default_coef_probs(VP8_COMMON *pc) { vpx_memcpy(pc->fc.coef_probs, default_coef_probs, sizeof(pc->fc.coef_probs)); -#if CONFIG_HYBRIDTRANSFORM vpx_memcpy(pc->fc.hybrid_coef_probs, default_hybrid_coef_probs, sizeof(pc->fc.hybrid_coef_probs)); -#endif vpx_memcpy(pc->fc.coef_probs_8x8, default_coef_probs_8x8, sizeof(pc->fc.coef_probs_8x8)); -#if CONFIG_HYBRIDTRANSFORM8X8 vpx_memcpy(pc->fc.hybrid_coef_probs_8x8, default_hybrid_coef_probs_8x8, sizeof(pc->fc.hybrid_coef_probs_8x8)); -#endif vpx_memcpy(pc->fc.coef_probs_16x16, default_coef_probs_16x16, sizeof(pc->fc.coef_probs_16x16)); -#if CONFIG_HYBRIDTRANSFORM16X16 vpx_memcpy(pc->fc.hybrid_coef_probs_16x16, default_hybrid_coef_probs_16x16, sizeof(pc->fc.hybrid_coef_probs_16x16)); -#endif } void vp8_coef_tree_initialize() { @@ -344,7 +335,6 @@ void vp8_adapt_coef_probs(VP8_COMMON *cm) { } } -#if CONFIG_HYBRIDTRANSFORM for (i = 0; i < BLOCK_TYPES; ++i) for (j = 0; j < COEF_BANDS; ++j) for (k = 0; k < PREV_COEF_CONTEXTS; ++k) { @@ -366,7 +356,6 @@ void vp8_adapt_coef_probs(VP8_COMMON *cm) { else cm->fc.hybrid_coef_probs[i][j][k][t] = prob; } } -#endif for (i = 0; i < BLOCK_TYPES_8X8; ++i) for (j = 0; j < COEF_BANDS; ++j) @@ -390,7 +379,6 @@ void vp8_adapt_coef_probs(VP8_COMMON *cm) { } } -#if CONFIG_HYBRIDTRANSFORM8X8 for (i = 0; i < BLOCK_TYPES_8X8; ++i) for (j = 0; j < COEF_BANDS; ++j) for (k = 0; k < PREV_COEF_CONTEXTS; ++k) { @@ -413,7 +401,6 @@ void vp8_adapt_coef_probs(VP8_COMMON *cm) { else cm->fc.hybrid_coef_probs_8x8[i][j][k][t] = prob; } } -#endif for (i = 0; i < BLOCK_TYPES_16X16; ++i) for (j = 0; j < COEF_BANDS; ++j) @@ -437,7 +424,6 @@ void vp8_adapt_coef_probs(VP8_COMMON *cm) { } } -#if CONFIG_HYBRIDTRANSFORM16X16 for (i = 0; i < BLOCK_TYPES_16X16; ++i) for (j = 0; j < COEF_BANDS; ++j) for (k = 0; k < PREV_COEF_CONTEXTS; ++k) { @@ -458,5 +444,4 @@ void vp8_adapt_coef_probs(VP8_COMMON *cm) { else cm->fc.hybrid_coef_probs_16x16[i][j][k][t] = prob; } } -#endif } diff --git a/vp8/common/entropy.h b/vp8/common/entropy.h index b9dfb344f9..48a100ac61 100644 --- a/vp8/common/entropy.h +++ b/vp8/common/entropy.h @@ -104,10 +104,8 @@ struct VP8Common; void vp8_default_coef_probs(struct VP8Common *); extern DECLARE_ALIGNED(16, const int, vp8_default_zig_zag1d[16]); -#if CONFIG_HYBRIDTRANSFORM extern DECLARE_ALIGNED(16, const int, vp8_col_scan[16]); extern DECLARE_ALIGNED(16, const int, vp8_row_scan[16]); -#endif extern short vp8_default_zig_zag_mask[16]; extern DECLARE_ALIGNED(64, const int, vp8_default_zig_zag1d_8x8[64]); diff --git a/vp8/common/idct.h b/vp8/common/idct.h index d096e81829..ae33df6680 100644 --- a/vp8/common/idct.h +++ b/vp8/common/idct.h @@ -109,12 +109,9 @@ extern prototype_second_order(vp8_short_inv_walsh4x4_lossless_c); extern prototype_second_order(vp8_short_inv_walsh4x4_1_lossless_c); #endif -#if CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM16X16 #include "vp8/common/blockd.h" void vp8_ihtllm_c(short *input, short *output, int pitch, TX_TYPE tx_type, int tx_dim); -#endif - typedef prototype_idct((*vp8_idct_fn_t)); typedef prototype_idct_scalar_add((*vp8_idct_scalar_add_fn_t)); diff --git a/vp8/common/idctllm.c b/vp8/common/idctllm.c index d705fec329..c7369b2e2e 100644 --- a/vp8/common/idctllm.c +++ b/vp8/common/idctllm.c @@ -26,9 +26,7 @@ #include "vp8/common/idct.h" #include "vp8/common/systemdependent.h" -#if CONFIG_HYBRIDTRANSFORM #include "vp8/common/blockd.h" -#endif #include <math.h> @@ -38,7 +36,6 @@ static const int rounding = 0; // TODO: these transforms can be further converted into integer forms // for complexity optimization -#if CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM16X16 float idct_4[16] = { 0.500000000000000, 0.653281482438188, 0.500000000000000, 0.270598050073099, 0.500000000000000, 0.270598050073099, -0.500000000000000, -0.653281482438188, @@ -90,9 +87,7 @@ float iadst_8[64] = { 0.483002021635509, -0.466553967085785, 0.434217976756762, -0.387095214016348, 0.326790388032145, -0.255357107325375, 0.175227946595736, -0.089131608307532 }; -#endif -#if CONFIG_HYBRIDTRANSFORM16X16 || CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM8X8 float idct_16[256] = { 0.250000, 0.351851, 0.346760, 0.338330, 0.326641, 0.311806, 0.293969, 0.273300, 0.250000, 0.224292, 0.196424, 0.166664, 0.135299, 0.102631, 0.068975, 0.034654, @@ -162,9 +157,7 @@ float iadst_16[256] = { 0.347761, -0.344612, 0.338341, -0.329007, 0.316693, -0.301511, 0.283599, -0.263118, 0.240255, -0.215215, 0.188227, -0.159534, 0.129396, -0.098087, 0.065889, -0.033094 }; -#endif -#if CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM16X16 void vp8_ihtllm_c(short *input, short *output, int pitch, TX_TYPE tx_type, int tx_dim) { @@ -289,7 +282,6 @@ void vp8_ihtllm_c(short *input, short *output, int pitch, } vp8_clear_system_state(); // Make it simd safe : __asm emms; } -#endif void vp8_short_idct4x4llm_c(short *input, short *output, int pitch) { int i; diff --git a/vp8/common/onyxc_int.h b/vp8/common/onyxc_int.h index 0396a70870..a212bc8982 100644 --- a/vp8/common/onyxc_int.h +++ b/vp8/common/onyxc_int.h @@ -51,17 +51,11 @@ typedef struct frame_contexts { vp8_prob sub_mv_ref_prob [SUBMVREF_COUNT][VP8_SUBMVREFS - 1]; vp8_prob mbsplit_prob [VP8_NUMMBSPLITS - 1]; vp8_prob coef_probs [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES]; -#if CONFIG_HYBRIDTRANSFORM vp8_prob hybrid_coef_probs [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES]; -#endif vp8_prob coef_probs_8x8 [BLOCK_TYPES_8X8] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES]; -#if CONFIG_HYBRIDTRANSFORM8X8 vp8_prob hybrid_coef_probs_8x8 [BLOCK_TYPES_8X8] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES]; -#endif vp8_prob coef_probs_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES]; -#if CONFIG_HYBRIDTRANSFORM16X16 vp8_prob hybrid_coef_probs_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES]; -#endif #if CONFIG_NEWMVENTROPY nmv_context nmvc; @@ -87,45 +81,33 @@ typedef struct frame_contexts { vp8_prob pre_coef_probs [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES]; -#if CONFIG_HYBRIDTRANSFORM vp8_prob pre_hybrid_coef_probs [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES]; -#endif vp8_prob pre_coef_probs_8x8 [BLOCK_TYPES_8X8] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES]; -#if CONFIG_HYBRIDTRANSFORM8X8 vp8_prob pre_hybrid_coef_probs_8x8 [BLOCK_TYPES_8X8] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES]; -#endif vp8_prob pre_coef_probs_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES]; -#if CONFIG_HYBRIDTRANSFORM16X16 vp8_prob pre_hybrid_coef_probs_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES]; -#endif unsigned int coef_counts [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]; -#if CONFIG_HYBRIDTRANSFORM unsigned int hybrid_coef_counts [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]; -#endif unsigned int coef_counts_8x8 [BLOCK_TYPES_8X8] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]; -#if CONFIG_HYBRIDTRANSFORM8X8 unsigned int hybrid_coef_counts_8x8 [BLOCK_TYPES_8X8] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]; -#endif unsigned int coef_counts_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]; -#if CONFIG_HYBRIDTRANSFORM16X16 unsigned int hybrid_coef_counts_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]; -#endif #if CONFIG_NEWMVENTROPY nmv_context_counts NMVcount; diff --git a/vp8/decoder/decodframe.c b/vp8/decoder/decodframe.c index 01739c0dbd..cc19985fd2 100644 --- a/vp8/decoder/decodframe.c +++ b/vp8/decoder/decodframe.c @@ -208,10 +208,7 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd, MB_PREDICTION_MODE mode; int i; int tx_size; -#if CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM8X8 || \ - CONFIG_HYBRIDTRANSFORM16X16 TX_TYPE tx_type; -#endif #if CONFIG_SUPERBLOCKS VP8_COMMON *pc = &pbi->common; int orig_skip_flag = xd->mode_info_context->mbmi.mb_skip_coeff; @@ -330,7 +327,6 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd, vp8_intra8x8_predict(b, i8x8mode, b->predictor); if (xd->mode_info_context->mbmi.txfm_size == TX_8X8) { -#if CONFIG_HYBRIDTRANSFORM8X8 tx_type = get_tx_type(xd, &xd->block[idx]); if (tx_type != DCT_DCT) { vp8_ht_dequant_idct_add_8x8_c(tx_type, @@ -338,9 +334,6 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd, } else { vp8_dequant_idct_add_8x8_c(q, dq, pre, dst, 16, stride); } -#else - vp8_dequant_idct_add_8x8_c(q, dq, pre, dst, 16, stride); -#endif q += 64; } else { for (j = 0; j < 4; j++) { @@ -380,7 +373,6 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd, } #endif -#if CONFIG_HYBRIDTRANSFORM tx_type = get_tx_type(xd, b); if (tx_type != DCT_DCT) { vp8_ht_dequant_idct_add_c(tx_type, b->qcoeff, @@ -390,18 +382,6 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd, vp8_dequant_idct_add_c(b->qcoeff, b->dequant, b->predictor, *(b->base_dst) + b->dst, 16, b->dst_stride); } -#else - if (xd->eobs[i] > 1) { - DEQUANT_INVOKE(&pbi->dequant, idct_add) - (b->qcoeff, b->dequant, b->predictor, - *(b->base_dst) + b->dst, 16, b->dst_stride); - } else { - IDCT_INVOKE(RTCD_VTABLE(idct), idct1_scalar_add) - (b->qcoeff[0] * b->dequant[0], b->predictor, - *(b->base_dst) + b->dst, 16, b->dst_stride); - ((int *)b->qcoeff)[0] = 0; - } -#endif } } else if (mode == SPLITMV) { DEQUANT_INVOKE(&pbi->dequant, idct_add_y_block) @@ -412,7 +392,6 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd, BLOCKD *b = &xd->block[24]; if (tx_size == TX_16X16) { -#if CONFIG_HYBRIDTRANSFORM16X16 BLOCKD *bd = &xd->block[0]; tx_type = get_tx_type(xd, bd); if (tx_type != DCT_DCT) { @@ -424,11 +403,6 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd, xd->predictor, xd->dst.y_buffer, 16, xd->dst.y_stride); } -#else - vp8_dequant_idct_add_16x16_c(xd->qcoeff, xd->block[0].dequant, - xd->predictor, xd->dst.y_buffer, - 16, xd->dst.y_stride); -#endif } else if (tx_size == TX_8X8) { #if CONFIG_SUPERBLOCKS void *orig = xd->mode_info_context; @@ -900,7 +874,6 @@ static void read_coef_probs(VP8D_COMP *pbi, BOOL_DECODER* const bc) { } } } -#if CONFIG_HYBRIDTRANSFORM { if (vp8_read_bit(bc)) { /* read coef probability tree */ @@ -920,7 +893,6 @@ static void read_coef_probs(VP8D_COMP *pbi, BOOL_DECODER* const bc) { } } } -#endif if (pbi->common.txfm_mode != ONLY_4X4 && vp8_read_bit(bc)) { // read coef probability tree @@ -940,7 +912,6 @@ static void read_coef_probs(VP8D_COMP *pbi, BOOL_DECODER* const bc) { } } } -#if CONFIG_HYBRIDTRANSFORM8X8 if (pbi->common.txfm_mode != ONLY_4X4 && vp8_read_bit(bc)) { // read coef probability tree for (i = 0; i < BLOCK_TYPES_8X8; i++) @@ -959,7 +930,6 @@ static void read_coef_probs(VP8D_COMP *pbi, BOOL_DECODER* const bc) { } } } -#endif // 16x16 if (pbi->common.txfm_mode > ALLOW_8X8 && vp8_read_bit(bc)) { @@ -980,7 +950,6 @@ static void read_coef_probs(VP8D_COMP *pbi, BOOL_DECODER* const bc) { } } } -#if CONFIG_HYBRIDTRANSFORM16X16 if (pbi->common.txfm_mode > ALLOW_8X8 && vp8_read_bit(bc)) { // read coef probability tree for (i = 0; i < BLOCK_TYPES_16X16; ++i) @@ -999,7 +968,6 @@ static void read_coef_probs(VP8D_COMP *pbi, BOOL_DECODER* const bc) { } } } -#endif } int vp8_decode_frame(VP8D_COMP *pbi) { @@ -1362,22 +1330,16 @@ int vp8_decode_frame(VP8D_COMP *pbi) { vp8_copy(pbi->common.fc.pre_coef_probs, pbi->common.fc.coef_probs); -#if CONFIG_HYBRIDTRANSFORM vp8_copy(pbi->common.fc.pre_hybrid_coef_probs, pbi->common.fc.hybrid_coef_probs); -#endif vp8_copy(pbi->common.fc.pre_coef_probs_8x8, pbi->common.fc.coef_probs_8x8); -#if CONFIG_HYBRIDTRANSFORM8X8 vp8_copy(pbi->common.fc.pre_hybrid_coef_probs_8x8, pbi->common.fc.hybrid_coef_probs_8x8); -#endif vp8_copy(pbi->common.fc.pre_coef_probs_16x16, pbi->common.fc.coef_probs_16x16); -#if CONFIG_HYBRIDTRANSFORM16X16 vp8_copy(pbi->common.fc.pre_hybrid_coef_probs_16x16, pbi->common.fc.hybrid_coef_probs_16x16); -#endif vp8_copy(pbi->common.fc.pre_ymode_prob, pbi->common.fc.ymode_prob); vp8_copy(pbi->common.fc.pre_uv_mode_prob, pbi->common.fc.uv_mode_prob); vp8_copy(pbi->common.fc.pre_bmode_prob, pbi->common.fc.bmode_prob); @@ -1391,17 +1353,11 @@ int vp8_decode_frame(VP8D_COMP *pbi) { vp8_copy(pbi->common.fc.pre_mvc_hp, pbi->common.fc.mvc_hp); #endif vp8_zero(pbi->common.fc.coef_counts); -#if CONFIG_HYBRIDTRANSFORM vp8_zero(pbi->common.fc.hybrid_coef_counts); -#endif vp8_zero(pbi->common.fc.coef_counts_8x8); -#if CONFIG_HYBRIDTRANSFORM8X8 vp8_zero(pbi->common.fc.hybrid_coef_counts_8x8); -#endif vp8_zero(pbi->common.fc.coef_counts_16x16); -#if CONFIG_HYBRIDTRANSFORM16X16 vp8_zero(pbi->common.fc.hybrid_coef_counts_16x16); -#endif vp8_zero(pbi->common.fc.ymode_counts); vp8_zero(pbi->common.fc.uv_mode_counts); vp8_zero(pbi->common.fc.bmode_counts); diff --git a/vp8/decoder/dequantize.c b/vp8/decoder/dequantize.c index e55da754bb..db9c3b0b82 100644 --- a/vp8/decoder/dequantize.c +++ b/vp8/decoder/dequantize.c @@ -42,7 +42,6 @@ void vp8_dequantize_b_c(BLOCKD *d) { } -#if CONFIG_HYBRIDTRANSFORM void vp8_ht_dequant_idct_add_c(TX_TYPE tx_type, short *input, short *dq, unsigned char *pred, unsigned char *dest, int pitch, int stride) { @@ -77,9 +76,7 @@ void vp8_ht_dequant_idct_add_c(TX_TYPE tx_type, short *input, short *dq, pred += pitch; } } -#endif -#if CONFIG_HYBRIDTRANSFORM8X8 void vp8_ht_dequant_idct_add_8x8_c(TX_TYPE tx_type, short *input, short *dq, unsigned char *pred, unsigned char *dest, int pitch, int stride) { @@ -123,7 +120,6 @@ void vp8_ht_dequant_idct_add_8x8_c(TX_TYPE tx_type, short *input, short *dq, pred = origpred + (b + 1) / 2 * 4 * pitch + ((b + 1) % 2) * 4; } } -#endif void vp8_dequant_idct_add_c(short *input, short *dq, unsigned char *pred, unsigned char *dest, int pitch, int stride) { @@ -468,7 +464,6 @@ void vp8_dequant_dc_idct_add_8x8_c(short *input, short *dq, unsigned char *pred, #endif } -#if CONFIG_HYBRIDTRANSFORM16X16 void vp8_ht_dequant_idct_add_16x16_c(TX_TYPE tx_type, short *input, short *dq, unsigned char *pred, unsigned char *dest, int pitch, int stride) { @@ -507,7 +502,6 @@ void vp8_ht_dequant_idct_add_16x16_c(TX_TYPE tx_type, short *input, short *dq, pred += pitch; } } -#endif void vp8_dequant_idct_add_16x16_c(short *input, short *dq, unsigned char *pred, unsigned char *dest, int pitch, int stride) { diff --git a/vp8/decoder/dequantize.h b/vp8/decoder/dequantize.h index 5b7b215989..4ac710431b 100644 --- a/vp8/decoder/dequantize.h +++ b/vp8/decoder/dequantize.h @@ -76,7 +76,6 @@ extern prototype_dequant_block(vp8_dequant_block); #endif extern prototype_dequant_idct_add(vp8_dequant_idct_add); -#if CONFIG_HYBRIDTRANSFORM // declare dequantization and inverse transform module of hybrid transform decoder #ifndef vp8_ht_dequant_idct_add #define vp8_ht_dequant_idct_add vp8_ht_dequant_idct_add_c @@ -85,7 +84,6 @@ extern void vp8_ht_dequant_idct_add(TX_TYPE tx_type, short *input, short *dq, unsigned char *pred, unsigned char *dest, int pitch, int stride); -#endif #ifndef vp8_dequant_dc_idct_add #define vp8_dequant_dc_idct_add vp8_dequant_dc_idct_add_c @@ -191,17 +189,13 @@ typedef struct { #define DEQUANT_INVOKE(ctx,fn) vp8_dequant_##fn #endif -#if CONFIG_HYBRIDTRANSFORM8X8 void vp8_ht_dequant_idct_add_8x8_c(TX_TYPE tx_type, short *input, short *dq, unsigned char *pred, unsigned char *dest, int pitch, int stride); -#endif -#if CONFIG_HYBRIDTRANSFORM16X16 void vp8_ht_dequant_idct_add_16x16_c(TX_TYPE tx_type, short *input, short *dq, unsigned char *pred, unsigned char *dest, int pitch, int stride); -#endif #if CONFIG_SUPERBLOCKS void vp8_dequant_dc_idct_add_y_block_8x8_inplace_c(short *q, short *dq, diff --git a/vp8/decoder/detokenize.c b/vp8/decoder/detokenize.c index 85f213470a..0e5e544181 100644 --- a/vp8/decoder/detokenize.c +++ b/vp8/decoder/detokenize.c @@ -135,7 +135,6 @@ int get_token(int v) { else return DCT_VAL_CATEGORY6; } -#if CONFIG_HYBRIDTRANSFORM void static count_tokens_adaptive_scan(const MACROBLOCKD *xd, INT16 *qcoeff_ptr, int block, PLANE_TYPE type, TX_TYPE tx_type, @@ -180,7 +179,6 @@ void static count_tokens_adaptive_scan(const MACROBLOCKD *xd, INT16 *qcoeff_ptr, fc->coef_counts[type][band][pt][DCT_EOB_TOKEN]++; } } -#endif void static count_tokens(INT16 *qcoeff_ptr, int block, PLANE_TYPE type, ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l, @@ -202,9 +200,7 @@ void static count_tokens(INT16 *qcoeff_ptr, int block, PLANE_TYPE type, } void static count_tokens_8x8(INT16 *qcoeff_ptr, int block, PLANE_TYPE type, -#if CONFIG_HYBRIDTRANSFORM8X8 TX_TYPE tx_type, -#endif ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l, int eob, int seg_eob, FRAME_CONTEXT *fc) { int c, pt, token, band; @@ -214,29 +210,23 @@ void static count_tokens_8x8(INT16 *qcoeff_ptr, int block, PLANE_TYPE type, int v = qcoeff_ptr[rc]; band = (type == 1 ? vp8_coef_bands[c] : vp8_coef_bands_8x8[c]); token = get_token(v); -#if CONFIG_HYBRIDTRANSFORM8X8 if (tx_type != DCT_DCT) fc->hybrid_coef_counts_8x8[type][band][pt][token]++; else -#endif fc->coef_counts_8x8[type][band][pt][token]++; pt = vp8_prev_token_class[token]; } if (eob < seg_eob) { band = (type == 1 ? vp8_coef_bands[c] : vp8_coef_bands_8x8[c]); -#if CONFIG_HYBRIDTRANSFORM8X8 if (tx_type != DCT_DCT) fc->hybrid_coef_counts_8x8[type][band][pt][DCT_EOB_TOKEN]++; else -#endif fc->coef_counts_8x8[type][band][pt][DCT_EOB_TOKEN]++; } } void static count_tokens_16x16(INT16 *qcoeff_ptr, int block, PLANE_TYPE type, -#if CONFIG_HYBRIDTRANSFORM16X16 TX_TYPE tx_type, -#endif ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l, int eob, int seg_eob, FRAME_CONTEXT *fc) { int c, pt, token; @@ -246,21 +236,17 @@ void static count_tokens_16x16(INT16 *qcoeff_ptr, int block, PLANE_TYPE type, int v = qcoeff_ptr[rc]; int band = vp8_coef_bands_16x16[c]; token = get_token(v); -#if CONFIG_HYBRIDTRANSFORM16X16 if (tx_type != DCT_DCT) fc->hybrid_coef_counts_16x16[type][band][pt][token]++; else -#endif fc->coef_counts_16x16[type][band][pt][token]++; pt = vp8_prev_token_class[token]; } if (eob < seg_eob) { int band = vp8_coef_bands_16x16[c]; -#if CONFIG_HYBRIDTRANSFORM16X16 if (tx_type != DCT_DCT) fc->hybrid_coef_counts_16x16[type][band][pt][DCT_EOB_TOKEN]++; else -#endif fc->coef_counts_16x16[type][band][pt][DCT_EOB_TOKEN]++; } } @@ -306,9 +292,7 @@ static int decode_coefs(VP8D_COMP *dx, const MACROBLOCKD *xd, BOOL_DECODER* const br, ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l, PLANE_TYPE type, -#if CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM16X16 TX_TYPE tx_type, -#endif int seg_eob, INT16 *qcoeff_ptr, int i, const int *const scan, int block_type, const int *coef_bands) { @@ -320,23 +304,17 @@ static int decode_coefs(VP8D_COMP *dx, const MACROBLOCKD *xd, default: case TX_4X4: coef_probs = -#if CONFIG_HYBRIDTRANSFORM tx_type != DCT_DCT ? fc->hybrid_coef_probs[type][0][0] : -#endif fc->coef_probs[type][0][0]; break; case TX_8X8: coef_probs = -#if CONFIG_HYBRIDTRANSFORM8X8 tx_type != DCT_DCT ? fc->hybrid_coef_probs_8x8[type][0][0] : -#endif fc->coef_probs_8x8[type][0][0]; break; case TX_16X16: coef_probs = -#if CONFIG_HYBRIDTRANSFORM16X16 tx_type != DCT_DCT ? fc->hybrid_coef_probs_16x16[type][0][0] : -#endif fc->coef_probs_16x16[type][0][0]; break; } @@ -422,26 +400,17 @@ SKIP_START: } if (block_type == TX_4X4) { -#if CONFIG_HYBRIDTRANSFORM count_tokens_adaptive_scan(xd, qcoeff_ptr, i, type, tx_type, a, l, c, seg_eob, fc); -#else - count_tokens(qcoeff_ptr, i, type, - a, l, c, seg_eob, fc); -#endif } else if (block_type == TX_8X8) count_tokens_8x8(qcoeff_ptr, i, type, -#if CONFIG_HYBRIDTRANSFORM8X8 tx_type, -#endif a, l, c, seg_eob, fc); else count_tokens_16x16(qcoeff_ptr, i, type, -#if CONFIG_HYBRIDTRANSFORM16X16 tx_type, -#endif a, l, c, seg_eob, fc); return c; } @@ -457,12 +426,7 @@ int vp8_decode_mb_tokens_16x16(VP8D_COMP *pbi, MACROBLOCKD *xd, const int segment_id = xd->mode_info_context->mbmi.segment_id; const int seg_active = segfeature_active(xd, segment_id, SEG_LVL_EOB); INT16 *qcoeff_ptr = &xd->qcoeff[0]; -#if CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM16X16 - TX_TYPE tx_type = DCT_DCT; -#endif -#if CONFIG_HYBRIDTRANSFORM16X16 - tx_type = get_tx_type(xd, &xd->block[0]); -#endif + TX_TYPE tx_type = get_tx_type(xd, &xd->block[0]); type = PLANE_TYPE_Y_WITH_DC; @@ -475,9 +439,7 @@ int vp8_decode_mb_tokens_16x16(VP8D_COMP *pbi, MACROBLOCKD *xd, { const int* const scan = vp8_default_zig_zag1d_16x16; c = decode_coefs(pbi, xd, bc, A, L, type, -#if CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM16X16 tx_type, -#endif seg_eob, qcoeff_ptr, 0, scan, TX_16X16, coef_bands_x_16x16); eobs[0] = c; @@ -492,9 +454,7 @@ int vp8_decode_mb_tokens_16x16(VP8D_COMP *pbi, MACROBLOCKD *xd, // 8x8 chroma blocks qcoeff_ptr += 256; type = PLANE_TYPE_UV; -#if CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM16X16 tx_type = DCT_DCT; -#endif if (seg_active) seg_eob = get_segdata(xd, segment_id, SEG_LVL_EOB); else @@ -505,9 +465,7 @@ int vp8_decode_mb_tokens_16x16(VP8D_COMP *pbi, MACROBLOCKD *xd, const int* const scan = vp8_default_zig_zag1d_8x8; c = decode_coefs(pbi, xd, bc, a, l, type, -#if CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM16X16 tx_type, -#endif seg_eob, qcoeff_ptr, i, scan, TX_8X8, coef_bands_x_8x8); a[0] = l[0] = ((eobs[i] = c) != !type); @@ -533,9 +491,7 @@ int vp8_decode_mb_tokens_8x8(VP8D_COMP *pbi, MACROBLOCKD *xd, const int segment_id = xd->mode_info_context->mbmi.segment_id; const int seg_active = segfeature_active(xd, segment_id, SEG_LVL_EOB); INT16 *qcoeff_ptr = &xd->qcoeff[0]; -#if CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM16X16 TX_TYPE tx_type = DCT_DCT; -#endif int bufthred = (xd->mode_info_context->mbmi.mode == I8X8_PRED) ? 16 : 24; if (xd->mode_info_context->mbmi.mode != B_PRED && @@ -551,9 +507,7 @@ int vp8_decode_mb_tokens_8x8(VP8D_COMP *pbi, MACROBLOCKD *xd, else seg_eob = 4; c = decode_coefs(pbi, xd, bc, a, l, type, -#if CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM16X16 tx_type, -#endif seg_eob, qcoeff_ptr + 24 * 16, 24, scan, TX_8X8, coef_bands_x); a[0] = l[0] = ((eobs[24] = c) != !type); @@ -573,22 +527,16 @@ int vp8_decode_mb_tokens_8x8(VP8D_COMP *pbi, MACROBLOCKD *xd, ENTROPY_CONTEXT *const a = A + vp8_block2above_8x8[i]; ENTROPY_CONTEXT *const l = L + vp8_block2left_8x8[i]; const int *const scan = vp8_default_zig_zag1d_8x8; -#if CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM16X16 tx_type = DCT_DCT; -#endif if (i == 16) type = PLANE_TYPE_UV; -#if CONFIG_HYBRIDTRANSFORM8X8 if (type == PLANE_TYPE_Y_WITH_DC) { tx_type = get_tx_type(xd, xd->block + i); } -#endif c = decode_coefs(pbi, xd, bc, a, l, type, -#if CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM16X16 tx_type, -#endif seg_eob, qcoeff_ptr, i, scan, TX_8X8, coef_bands_x_8x8); a[0] = l[0] = ((eobs[i] = c) != !type); @@ -601,9 +549,7 @@ int vp8_decode_mb_tokens_8x8(VP8D_COMP *pbi, MACROBLOCKD *xd, if (bufthred == 16) { type = PLANE_TYPE_UV; -#if CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM16X16 tx_type = DCT_DCT; -#endif seg_eob = 16; // use 4x4 transform for U, V components in I8X8 prediction mode @@ -613,9 +559,7 @@ int vp8_decode_mb_tokens_8x8(VP8D_COMP *pbi, MACROBLOCKD *xd, const int *scan = vp8_default_zig_zag1d; c = decode_coefs(pbi, xd, bc, a, l, type, -#if CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM16X16 tx_type, -#endif seg_eob, qcoeff_ptr, i, scan, TX_4X4, coef_bands_x); a[0] = l[0] = ((eobs[i] = c) != !type); @@ -652,9 +596,7 @@ int vp8_decode_mb_tokens(VP8D_COMP *dx, MACROBLOCKD *xd, type = PLANE_TYPE_Y2; c = decode_coefs(dx, xd, bc, a, l, type, -#if CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM16X16 DCT_DCT, -#endif seg_eob, qcoeff_ptr + 24 * 16, 24, scan, TX_4X4, coef_bands_x); a[0] = l[0] = ((eobs[24] = c) != !type); @@ -668,13 +610,10 @@ int vp8_decode_mb_tokens(VP8D_COMP *dx, MACROBLOCKD *xd, for (i = 0; i < 24; ++i) { ENTROPY_CONTEXT *const a = A + vp8_block2above[i]; ENTROPY_CONTEXT *const l = L + vp8_block2left[i]; -#if CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM16X16 TX_TYPE tx_type = DCT_DCT; -#endif if (i == 16) type = PLANE_TYPE_UV; -#if CONFIG_HYBRIDTRANSFORM tx_type = get_tx_type(xd, &xd->block[i]); switch(tx_type) { case ADST_DCT : @@ -689,12 +628,8 @@ int vp8_decode_mb_tokens(VP8D_COMP *dx, MACROBLOCKD *xd, scan = vp8_default_zig_zag1d; break; } -#endif - c = decode_coefs(dx, xd, bc, a, l, type, -#if CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM16X16 - tx_type, -#endif + c = decode_coefs(dx, xd, bc, a, l, type, tx_type, seg_eob, qcoeff_ptr, i, scan, TX_4X4, coef_bands_x); a[0] = l[0] = ((eobs[i] = c) != !type); diff --git a/vp8/encoder/bitstream.c b/vp8/encoder/bitstream.c index 70cdb6aaf5..c2f4476198 100644 --- a/vp8/encoder/bitstream.c +++ b/vp8/encoder/bitstream.c @@ -45,32 +45,26 @@ unsigned int tree_update_hist [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES][2]; -#if CONFIG_HYBRIDTRANSFORM unsigned int hybrid_tree_update_hist [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES][2]; -#endif unsigned int tree_update_hist_8x8 [BLOCK_TYPES_8X8] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES] [2]; -#if CONFIG_HYBRIDTRANSFORM8X8 unsigned int hybrid_tree_update_hist_8x8 [BLOCK_TYPES_8X8] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES] [2]; -#endif unsigned int tree_update_hist_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES] [2]; -#if CONFIG_HYBRIDTRANSFORM16X16 unsigned int hybrid_tree_update_hist_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES] [2]; -#endif extern unsigned int active_section; #endif @@ -1498,7 +1492,6 @@ void build_coeff_contexts(VP8_COMP *cpi) { } } } -#if CONFIG_HYBRIDTRANSFORM for (i = 0; i < BLOCK_TYPES; ++i) { for (j = 0; j < COEF_BANDS; ++j) { for (k = 0; k < PREV_COEF_CONTEXTS; ++k) { @@ -1519,8 +1512,6 @@ void build_coeff_contexts(VP8_COMP *cpi) { } } } -#endif - if (cpi->common.txfm_mode != ONLY_4X4) { for (i = 0; i < BLOCK_TYPES_8X8; ++i) { @@ -1547,7 +1538,6 @@ void build_coeff_contexts(VP8_COMP *cpi) { } } } -#if CONFIG_HYBRIDTRANSFORM8X8 for (i = 0; i < BLOCK_TYPES_8X8; ++i) { for (j = 0; j < COEF_BANDS; ++j) { for (k = 0; k < PREV_COEF_CONTEXTS; ++k) { @@ -1572,7 +1562,6 @@ void build_coeff_contexts(VP8_COMP *cpi) { } } } -#endif } if (cpi->common.txfm_mode > ALLOW_8X8) { @@ -1595,7 +1584,6 @@ void build_coeff_contexts(VP8_COMP *cpi) { } } } -#if CONFIG_HYBRIDTRANSFORM16X16 for (i = 0; i < BLOCK_TYPES_16X16; ++i) { for (j = 0; j < COEF_BANDS; ++j) { for (k = 0; k < PREV_COEF_CONTEXTS; ++k) { @@ -1614,7 +1602,6 @@ void build_coeff_contexts(VP8_COMP *cpi) { } } } -#endif } #if 0 @@ -1887,7 +1874,6 @@ static void update_coef_probs(VP8_COMP* const cpi, vp8_writer* const bc) { } } -#if CONFIG_HYBRIDTRANSFORM savings = 0; update[0] = update[1] = 0; for (i = 0; i < BLOCK_TYPES; ++i) { @@ -1976,7 +1962,6 @@ static void update_coef_probs(VP8_COMP* const cpi, vp8_writer* const bc) { } } } -#endif /* do not do this if not even allowed */ if (cpi->common.txfm_mode != ONLY_4X4) { @@ -2054,7 +2039,6 @@ static void update_coef_probs(VP8_COMP* const cpi, vp8_writer* const bc) { } } } -#if CONFIG_HYBRIDTRANSFORM8X8 update[0] = update[1] = 0; savings = 0; for (i = 0; i < BLOCK_TYPES_8X8; ++i) { @@ -2128,7 +2112,6 @@ static void update_coef_probs(VP8_COMP* const cpi, vp8_writer* const bc) { } } } -#endif } if (cpi->common.txfm_mode > ALLOW_8X8) { @@ -2206,7 +2189,6 @@ static void update_coef_probs(VP8_COMP* const cpi, vp8_writer* const bc) { } } } -#if CONFIG_HYBRIDTRANSFORM16X16 update[0] = update[1] = 0; savings = 0; for (i = 0; i < BLOCK_TYPES_16X16; ++i) { @@ -2280,7 +2262,6 @@ static void update_coef_probs(VP8_COMP* const cpi, vp8_writer* const bc) { } } } -#endif } } @@ -2731,17 +2712,11 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned long *size) vp8_clear_system_state(); // __asm emms; vp8_copy(cpi->common.fc.pre_coef_probs, cpi->common.fc.coef_probs); -#if CONFIG_HYBRIDTRANSFORM vp8_copy(cpi->common.fc.pre_hybrid_coef_probs, cpi->common.fc.hybrid_coef_probs); -#endif vp8_copy(cpi->common.fc.pre_coef_probs_8x8, cpi->common.fc.coef_probs_8x8); -#if CONFIG_HYBRIDTRANSFORM8X8 vp8_copy(cpi->common.fc.pre_hybrid_coef_probs_8x8, cpi->common.fc.hybrid_coef_probs_8x8); -#endif vp8_copy(cpi->common.fc.pre_coef_probs_16x16, cpi->common.fc.coef_probs_16x16); -#if CONFIG_HYBRIDTRANSFORM16X16 vp8_copy(cpi->common.fc.pre_hybrid_coef_probs_16x16, cpi->common.fc.hybrid_coef_probs_16x16); -#endif vp8_copy(cpi->common.fc.pre_ymode_prob, cpi->common.fc.ymode_prob); vp8_copy(cpi->common.fc.pre_uv_mode_prob, cpi->common.fc.uv_mode_prob); vp8_copy(cpi->common.fc.pre_bmode_prob, cpi->common.fc.bmode_prob); diff --git a/vp8/encoder/block.h b/vp8/encoder/block.h index 80f9b75b8f..292011f81c 100644 --- a/vp8/encoder/block.h +++ b/vp8/encoder/block.h @@ -164,10 +164,8 @@ typedef struct { unsigned int token_costs[TX_SIZE_MAX][BLOCK_TYPES][COEF_BANDS] [PREV_COEF_CONTEXTS][MAX_ENTROPY_TOKENS]; -#if CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM16X16 unsigned int hybrid_token_costs[TX_SIZE_MAX][BLOCK_TYPES][COEF_BANDS] [PREV_COEF_CONTEXTS][MAX_ENTROPY_TOKENS]; -#endif int optimize; diff --git a/vp8/encoder/dct.c b/vp8/encoder/dct.c index cd13fec7c1..0983b1c0ac 100644 --- a/vp8/encoder/dct.c +++ b/vp8/encoder/dct.c @@ -14,8 +14,6 @@ #include "vp8/common/idct.h" #include "vp8/common/systemdependent.h" -#if CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM16X16 - #include "vp8/common/blockd.h" // TODO: these transforms can be converted into integer forms to reduce @@ -71,9 +69,7 @@ float adst_8[64] = { 0.175227946595735, -0.326790388032145, 0.434217976756762, -0.483002021635509, 0.466553967085785, -0.387095214016348, 0.255357107325376, -0.089131608307532 }; -#endif -#if CONFIG_HYBRIDTRANSFORM16X16 || CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM8X8 float dct_16[256] = { 0.250000, 0.250000, 0.250000, 0.250000, 0.250000, 0.250000, 0.250000, 0.250000, 0.250000, 0.250000, 0.250000, 0.250000, 0.250000, 0.250000, 0.250000, 0.250000, @@ -143,7 +139,6 @@ float adst_16[256] = { 0.065889, -0.129396, 0.188227, -0.240255, 0.283599, -0.316693, 0.338341, -0.347761, 0.344612, -0.329007, 0.301511, -0.263118, 0.215215, -0.159534, 0.098087, -0.033094 }; -#endif static const int xC1S7 = 16069; static const int xC2S6 = 15137; @@ -400,7 +395,6 @@ void vp8_short_fhaar2x2_c(short *input, short *output, int pitch) { // pitch = 8 } -#if CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM16X16 void vp8_fht_c(short *input, short *output, int pitch, TX_TYPE tx_type, int tx_dim) { @@ -518,7 +512,6 @@ void vp8_fht_c(short *input, short *output, int pitch, } vp8_clear_system_state(); // Make it simd safe : __asm emms; } -#endif void vp8_short_fdct4x4_c(short *input, short *output, int pitch) { int i; diff --git a/vp8/encoder/dct.h b/vp8/encoder/dct.h index 180192bbb8..4ad1fe85d3 100644 --- a/vp8/encoder/dct.h +++ b/vp8/encoder/dct.h @@ -26,10 +26,8 @@ #endif -#if CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM16X16 void vp8_fht_c(short *input, short *output, int pitch, TX_TYPE tx_type, int tx_dim); -#endif #ifndef vp8_fdct_short16x16 #define vp8_fdct_short16x16 vp8_short_fdct16x16_c diff --git a/vp8/encoder/encodeframe.c b/vp8/encoder/encodeframe.c index 619695c334..15422cd2ba 100644 --- a/vp8/encoder/encodeframe.c +++ b/vp8/encoder/encodeframe.c @@ -1362,17 +1362,11 @@ static void encode_frame_internal(VP8_COMP *cpi) { vp8_zero(cpi->MVcount_hp); #endif vp8_zero(cpi->coef_counts); -#if CONFIG_HYBRIDTRANSFORM vp8_zero(cpi->hybrid_coef_counts); -#endif vp8_zero(cpi->coef_counts_8x8); -#if CONFIG_HYBRIDTRANSFORM8X8 vp8_zero(cpi->hybrid_coef_counts_8x8); -#endif vp8_zero(cpi->coef_counts_16x16); -#if CONFIG_HYBRIDTRANSFORM16X16 vp8_zero(cpi->hybrid_coef_counts_16x16); -#endif vp8cx_frame_init_quantizer(cpi); diff --git a/vp8/encoder/encodeintra.c b/vp8/encoder/encodeintra.c index 9076780d9d..c17cacdd0c 100644 --- a/vp8/encoder/encodeintra.c +++ b/vp8/encoder/encodeintra.c @@ -57,9 +57,7 @@ void vp8_encode_intra4x4block(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x, int ib) { BLOCKD *b = &x->e_mbd.block[ib]; BLOCK *be = &x->block[ib]; -#if CONFIG_HYBRIDTRANSFORM TX_TYPE tx_type; -#endif #if CONFIG_COMP_INTRA_PRED if (b->bmi.as_mode.second == (B_PREDICTION_MODE)(B_DC_PRED - 1)) { @@ -74,15 +72,12 @@ void vp8_encode_intra4x4block(const VP8_ENCODER_RTCD *rtcd, ENCODEMB_INVOKE(&rtcd->encodemb, subb)(be, b, 16); -#if CONFIG_HYBRIDTRANSFORM tx_type = get_tx_type(&x->e_mbd, b); if (tx_type != DCT_DCT) { vp8_fht_c(be->src_diff, be->coeff, 32, tx_type, 4); vp8_ht_quantize_b_4x4(be, b, tx_type); vp8_ihtllm_c(b->dqcoeff, b->diff, 32, tx_type, 4); - } else -#endif - { + } else { x->vp8_short_fdct4x4(be->src_diff, be->coeff, 32) ; x->quantize_b_4x4(be, b) ; vp8_inverse_transform_b_4x4(IF_RTCD(&rtcd->common->idct), b, 32) ; @@ -103,9 +98,7 @@ void vp8_encode_intra16x16mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) { MACROBLOCKD *xd = &x->e_mbd; BLOCK *b = &x->block[0]; TX_SIZE tx_size = xd->mode_info_context->mbmi.txfm_size; -#if CONFIG_HYBRIDTRANSFORM16X16 TX_TYPE tx_type; -#endif #if CONFIG_COMP_INTRA_PRED if (xd->mode_info_context->mbmi.second_mode == (MB_PREDICTION_MODE)(DC_PRED - 1)) @@ -120,7 +113,6 @@ void vp8_encode_intra16x16mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) { xd->predictor, b->src_stride); if (tx_size == TX_16X16) { -#if CONFIG_HYBRIDTRANSFORM16X16 BLOCKD *bd = &xd->block[0]; tx_type = get_tx_type(xd, bd); if (tx_type != DCT_DCT) { @@ -129,9 +121,7 @@ void vp8_encode_intra16x16mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) { if (x->optimize) vp8_optimize_mby_16x16(x, rtcd); vp8_ihtllm_c(bd->dqcoeff, bd->diff, 32, tx_type, 16); - } else -#endif - { + } else { vp8_transform_mby_16x16(x); vp8_quantize_mby_16x16(x); if (x->optimize) @@ -196,9 +186,7 @@ void vp8_encode_intra8x8(const VP8_ENCODER_RTCD *rtcd, BLOCK *be = &x->block[ib]; const int iblock[4] = {0, 1, 4, 5}; int i; -#if CONFIG_HYBRIDTRANSFORM8X8 TX_TYPE tx_type; -#endif #if CONFIG_COMP_INTRA_PRED if (b->bmi.as_mode.second == (MB_PREDICTION_MODE)(DC_PRED - 1)) { @@ -217,7 +205,6 @@ void vp8_encode_intra8x8(const VP8_ENCODER_RTCD *rtcd, // generate residual blocks vp8_subtract_4b_c(be, b, 16); -#if CONFIG_HYBRIDTRANSFORM8X8 tx_type = get_tx_type(xd, xd->block + idx); if (tx_type != DCT_DCT) { vp8_fht_c(be->src_diff, (x->block + idx)->coeff, 32, @@ -226,13 +213,10 @@ void vp8_encode_intra8x8(const VP8_ENCODER_RTCD *rtcd, vp8_ihtllm_c(xd->block[idx].dqcoeff, xd->block[ib].diff, 32, tx_type, 8); } else { -#endif x->vp8_short_fdct8x8(be->src_diff, (x->block + idx)->coeff, 32); x->quantize_b_8x8(x->block + idx, xd->block + idx); vp8_idct_idct8(xd->block[idx].dqcoeff, xd->block[ib].diff, 32); -#if CONFIG_HYBRIDTRANSFORM8X8 } -#endif } else { for (i = 0; i < 4; i++) { b = &xd->block[ib + iblock[i]]; diff --git a/vp8/encoder/encodemb.c b/vp8/encoder/encodemb.c index dc54d05a2d..0272a5cb6f 100644 --- a/vp8/encoder/encodemb.c +++ b/vp8/encoder/encodemb.c @@ -304,7 +304,6 @@ void optimize_b(MACROBLOCK *mb, int i, PLANE_TYPE type, scan = vp8_default_zig_zag1d; bands = vp8_coef_bands; default_eob = 16; -#if CONFIG_HYBRIDTRANSFORM // TODO: this isn't called (for intra4x4 modes), but will be left in // since it could be used later { @@ -327,7 +326,6 @@ void optimize_b(MACROBLOCK *mb, int i, PLANE_TYPE type, scan = vp8_default_zig_zag1d; } } -#endif break; case TX_8X8: scan = vp8_default_zig_zag1d_8x8; diff --git a/vp8/encoder/onyx_if.c b/vp8/encoder/onyx_if.c index 14e9e784a5..5e6bfaec6c 100644 --- a/vp8/encoder/onyx_if.c +++ b/vp8/encoder/onyx_if.c @@ -3758,18 +3758,12 @@ static void encode_frame_to_data_rate update_reference_frames(cm); vp8_copy(cpi->common.fc.coef_counts, cpi->coef_counts); -#if CONFIG_HYBRIDTRANSFORM vp8_copy(cpi->common.fc.hybrid_coef_counts, cpi->hybrid_coef_counts); -#endif vp8_copy(cpi->common.fc.coef_counts_8x8, cpi->coef_counts_8x8); -#if CONFIG_HYBRIDTRANSFORM8X8 vp8_copy(cpi->common.fc.hybrid_coef_counts_8x8, cpi->hybrid_coef_counts_8x8); -#endif vp8_copy(cpi->common.fc.coef_counts_16x16, cpi->coef_counts_16x16); -#if CONFIG_HYBRIDTRANSFORM16X16 vp8_copy(cpi->common.fc.hybrid_coef_counts_16x16, cpi->hybrid_coef_counts_16x16); -#endif vp8_adapt_coef_probs(&cpi->common); if (cpi->common.frame_type != KEY_FRAME) { vp8_copy(cpi->common.fc.ymode_counts, cpi->ymode_count); diff --git a/vp8/encoder/onyx_int.h b/vp8/encoder/onyx_int.h index 01151280c1..402218b36f 100644 --- a/vp8/encoder/onyx_int.h +++ b/vp8/encoder/onyx_int.h @@ -97,24 +97,18 @@ typedef struct { vp8_prob coef_probs[BLOCK_TYPES] [COEF_BANDS][PREV_COEF_CONTEXTS][ENTROPY_NODES]; -#if CONFIG_HYBRIDTRANSFORM vp8_prob hybrid_coef_probs[BLOCK_TYPES] [COEF_BANDS][PREV_COEF_CONTEXTS][ENTROPY_NODES]; -#endif vp8_prob coef_probs_8x8[BLOCK_TYPES_8X8] [COEF_BANDS][PREV_COEF_CONTEXTS][ENTROPY_NODES]; -#if CONFIG_HYBRIDTRANSFORM8X8 vp8_prob hybrid_coef_probs_8x8[BLOCK_TYPES_8X8] [COEF_BANDS][PREV_COEF_CONTEXTS][ENTROPY_NODES]; -#endif vp8_prob coef_probs_16x16[BLOCK_TYPES_16X16] [COEF_BANDS][PREV_COEF_CONTEXTS][ENTROPY_NODES]; -#if CONFIG_HYBRIDTRANSFORM16X16 vp8_prob hybrid_coef_probs_16x16[BLOCK_TYPES_16X16] [COEF_BANDS][PREV_COEF_CONTEXTS][ENTROPY_NODES]; -#endif vp8_prob ymode_prob [VP8_YMODES - 1]; /* interframe intra mode probs */ vp8_prob uv_mode_prob [VP8_YMODES][VP8_UV_MODES - 1]; @@ -573,29 +567,23 @@ typedef struct VP8_COMP { unsigned int coef_counts [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]; /* for this frame */ vp8_prob frame_coef_probs [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES]; unsigned int frame_branch_ct [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES][2]; -#if CONFIG_HYBRIDTRANSFORM unsigned int hybrid_coef_counts [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]; /* for this frame */ vp8_prob frame_hybrid_coef_probs [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES]; unsigned int frame_hybrid_branch_ct [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES][2]; -#endif unsigned int coef_counts_8x8 [BLOCK_TYPES_8X8] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]; /* for this frame */ vp8_prob frame_coef_probs_8x8 [BLOCK_TYPES_8X8] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES]; unsigned int frame_branch_ct_8x8 [BLOCK_TYPES_8X8] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES][2]; -#if CONFIG_HYBRIDTRANSFORM8X8 unsigned int hybrid_coef_counts_8x8 [BLOCK_TYPES_8X8] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]; /* for this frame */ vp8_prob frame_hybrid_coef_probs_8x8 [BLOCK_TYPES_8X8] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES]; unsigned int frame_hybrid_branch_ct_8x8 [BLOCK_TYPES_8X8] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES][2]; -#endif unsigned int coef_counts_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]; /* for this frame */ vp8_prob frame_coef_probs_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES]; unsigned int frame_branch_ct_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES][2]; -#if CONFIG_HYBRIDTRANSFORM16X16 unsigned int hybrid_coef_counts_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]; /* for this frame */ vp8_prob frame_hybrid_coef_probs_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES]; unsigned int frame_hybrid_branch_ct_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES][2]; -#endif int gfu_boost; int last_boost; diff --git a/vp8/encoder/quantize.c b/vp8/encoder/quantize.c index b6a1f27f85..16b4e6e1de 100644 --- a/vp8/encoder/quantize.c +++ b/vp8/encoder/quantize.c @@ -21,7 +21,6 @@ extern int enc_debug; #endif -#if CONFIG_HYBRIDTRANSFORM void vp8_ht_quantize_b_4x4(BLOCK *b, BLOCKD *d, TX_TYPE tx_type) { int i, rc, eob; int zbin; @@ -85,7 +84,6 @@ void vp8_ht_quantize_b_4x4(BLOCK *b, BLOCKD *d, TX_TYPE tx_type) { d->eob = eob + 1; } -#endif void vp8_regular_quantize_b_4x4(BLOCK *b, BLOCKD *d) { int i, rc, eob; diff --git a/vp8/encoder/quantize.h b/vp8/encoder/quantize.h index 1375ed0b0e..e39433fc3f 100644 --- a/vp8/encoder/quantize.h +++ b/vp8/encoder/quantize.h @@ -30,11 +30,9 @@ #include "arm/quantize_arm.h" #endif -#if CONFIG_HYBRIDTRANSFORM #define prototype_quantize_block_type(sym) \ void (sym)(BLOCK *b, BLOCKD *d, TX_TYPE type) extern prototype_quantize_block_type(vp8_ht_quantize_b_4x4); -#endif #ifndef vp8_quantize_quantb_4x4 #define vp8_quantize_quantb_4x4 vp8_regular_quantize_b_4x4 diff --git a/vp8/encoder/ratectrl.c b/vp8/encoder/ratectrl.c index 570bedfe94..e0e2a4e5b8 100644 --- a/vp8/encoder/ratectrl.c +++ b/vp8/encoder/ratectrl.c @@ -178,17 +178,11 @@ void vp8_save_coding_context(VP8_COMP *cpi) { vp8_copy(cc->last_mode_lf_deltas, xd->last_mode_lf_deltas); vp8_copy(cc->coef_probs, cm->fc.coef_probs); -#if CONFIG_HYBRIDTRANSFORM vp8_copy(cc->hybrid_coef_probs, cm->fc.hybrid_coef_probs); -#endif vp8_copy(cc->coef_probs_8x8, cm->fc.coef_probs_8x8); -#if CONFIG_HYBRIDTRANSFORM8X8 vp8_copy(cc->hybrid_coef_probs_8x8, cm->fc.hybrid_coef_probs_8x8); -#endif vp8_copy(cc->coef_probs_16x16, cm->fc.coef_probs_16x16); -#if CONFIG_HYBRIDTRANSFORM16X16 vp8_copy(cc->hybrid_coef_probs_16x16, cm->fc.hybrid_coef_probs_16x16); -#endif #if CONFIG_SWITCHABLE_INTERP vp8_copy(cc->switchable_interp_prob, cm->fc.switchable_interp_prob); #endif @@ -249,17 +243,11 @@ void vp8_restore_coding_context(VP8_COMP *cpi) { vp8_copy(xd->last_mode_lf_deltas, cc->last_mode_lf_deltas); vp8_copy(cm->fc.coef_probs, cc->coef_probs); -#if CONFIG_HYBRIDTRANSFORM vp8_copy(cm->fc.hybrid_coef_probs, cc->hybrid_coef_probs); -#endif vp8_copy(cm->fc.coef_probs_8x8, cc->coef_probs_8x8); -#if CONFIG_HYBRIDTRANSFORM8X8 vp8_copy(cm->fc.hybrid_coef_probs_8x8, cc->hybrid_coef_probs_8x8); -#endif vp8_copy(cm->fc.coef_probs_16x16, cc->coef_probs_16x16); -#if CONFIG_HYBRIDTRANSFORM16X16 vp8_copy(cm->fc.hybrid_coef_probs_16x16, cc->hybrid_coef_probs_16x16); -#endif #if CONFIG_SWITCHABLE_INTERP vp8_copy(cm->fc.switchable_interp_prob, cc->switchable_interp_prob); #endif diff --git a/vp8/encoder/rdopt.c b/vp8/encoder/rdopt.c index b080026929..b6d71867e6 100644 --- a/vp8/encoder/rdopt.c +++ b/vp8/encoder/rdopt.c @@ -355,37 +355,31 @@ void vp8_initialize_rd_consts(VP8_COMP *cpi, int QIndex) { cpi->mb.token_costs[TX_4X4], (const vp8_prob( *)[8][PREV_COEF_CONTEXTS][11]) cpi->common.fc.coef_probs, BLOCK_TYPES); -#if CONFIG_HYBRIDTRANSFORM fill_token_costs( cpi->mb.hybrid_token_costs[TX_4X4], (const vp8_prob( *)[8][PREV_COEF_CONTEXTS][11]) cpi->common.fc.hybrid_coef_probs, BLOCK_TYPES); -#endif fill_token_costs( cpi->mb.token_costs[TX_8X8], (const vp8_prob( *)[8][PREV_COEF_CONTEXTS][11]) cpi->common.fc.coef_probs_8x8, BLOCK_TYPES_8X8); -#if CONFIG_HYBRIDTRANSFORM8X8 fill_token_costs( cpi->mb.hybrid_token_costs[TX_8X8], (const vp8_prob( *)[8][PREV_COEF_CONTEXTS][11]) cpi->common.fc.hybrid_coef_probs_8x8, BLOCK_TYPES_8X8); -#endif fill_token_costs( cpi->mb.token_costs[TX_16X16], (const vp8_prob(*)[8][PREV_COEF_CONTEXTS][11]) cpi->common.fc.coef_probs_16x16, BLOCK_TYPES_16X16); -#if CONFIG_HYBRIDTRANSFORM16X16 fill_token_costs( cpi->mb.hybrid_token_costs[TX_16X16], (const vp8_prob(*)[8][PREV_COEF_CONTEXTS][11]) cpi->common.fc.hybrid_coef_probs_16x16, BLOCK_TYPES_16X16); -#endif /*rough estimate for costing*/ cpi->common.kf_ymode_probs_index = cpi->common.base_qindex >> 4; @@ -607,9 +601,7 @@ static int cost_coeffs(MACROBLOCK *mb, BLOCKD *b, PLANE_TYPE type, short *qcoeff_ptr = b->qcoeff; MACROBLOCKD *xd = &mb->e_mbd; MB_MODE_INFO *mbmi = &mb->e_mbd.mode_info_context->mbmi; -#if CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM16X16 TX_TYPE tx_type = DCT_DCT; -#endif int segment_id = mbmi->segment_id; switch (tx_size) { @@ -617,7 +609,6 @@ static int cost_coeffs(MACROBLOCK *mb, BLOCKD *b, PLANE_TYPE type, scan = vp8_default_zig_zag1d; band = vp8_coef_bands; default_eob = 16; -#if CONFIG_HYBRIDTRANSFORM if (type == PLANE_TYPE_Y_WITH_DC) { tx_type = get_tx_type_4x4(xd, b); if (tx_type != DCT_DCT) { @@ -636,14 +627,12 @@ static int cost_coeffs(MACROBLOCK *mb, BLOCKD *b, PLANE_TYPE type, } } } -#endif break; case TX_8X8: scan = vp8_default_zig_zag1d_8x8; band = vp8_coef_bands_8x8; default_eob = 64; -#if CONFIG_HYBRIDTRANSFORM8X8 if (type == PLANE_TYPE_Y_WITH_DC) { BLOCKD *bb; int ib = (b - xd->block); @@ -653,17 +642,14 @@ static int cost_coeffs(MACROBLOCK *mb, BLOCKD *b, PLANE_TYPE type, tx_type = get_tx_type_8x8(xd, bb); } } -#endif break; case TX_16X16: scan = vp8_default_zig_zag1d_16x16; band = vp8_coef_bands_16x16; default_eob = 256; -#if CONFIG_HYBRIDTRANSFORM16X16 if (type == PLANE_TYPE_Y_WITH_DC) { tx_type = get_tx_type_16x16(xd, b); } -#endif break; default: break; @@ -675,7 +661,6 @@ static int cost_coeffs(MACROBLOCK *mb, BLOCKD *b, PLANE_TYPE type, VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l); -#if CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM16X16 if (tx_type != DCT_DCT) { for (; c < eob; c++) { int v = qcoeff_ptr[scan[c]]; @@ -687,9 +672,7 @@ static int cost_coeffs(MACROBLOCK *mb, BLOCKD *b, PLANE_TYPE type, if (c < seg_eob) cost += mb->hybrid_token_costs[tx_size][type][band[c]] [pt][DCT_EOB_TOKEN]; - } else -#endif - { + } else { for (; c < eob; c++) { int v = qcoeff_ptr[scan[c]]; int t = vp8_dct_value_tokens_ptr[v].Token; @@ -870,9 +853,7 @@ static void macro_block_yrd_16x16(MACROBLOCK *mb, int *Rate, int *Distortion, MACROBLOCKD *xd = &mb->e_mbd; BLOCKD *b = &mb->e_mbd.block[0]; BLOCK *be = &mb->block[0]; -#if CONFIG_HYBRIDTRANSFORM16X16 TX_TYPE tx_type; -#endif ENCODEMB_INVOKE(&rtcd->encodemb, submby)( mb->src_diff, @@ -880,24 +861,18 @@ static void macro_block_yrd_16x16(MACROBLOCK *mb, int *Rate, int *Distortion, mb->e_mbd.predictor, mb->block[0].src_stride); -#if CONFIG_HYBRIDTRANSFORM16X16 tx_type = get_tx_type_16x16(xd, b); if (tx_type != DCT_DCT) { vp8_fht_c(be->src_diff, be->coeff, 32, tx_type, 16); } else vp8_transform_mby_16x16(mb); -#else - vp8_transform_mby_16x16(mb); -#endif vp8_quantize_mby_16x16(mb); -#if CONFIG_HYBRIDTRANSFORM16X16 // TODO(jingning) is it possible to quickly determine whether to force // trailing coefficients to be zero, instead of running trellis // optimization in the rate-distortion optimization loop? if (mb->e_mbd.mode_info_context->mbmi.mode < I8X8_PRED) vp8_optimize_mby_16x16(mb, rtcd); -#endif d = ENCODEMB_INVOKE(&rtcd->encodemb, mberr)(mb, 0); @@ -1155,10 +1130,8 @@ static int64_t rd_pick_intra4x4block(VP8_COMP *cpi, MACROBLOCK *x, BLOCK *be, ENTROPY_CONTEXT ta = *a, tempa = *a; ENTROPY_CONTEXT tl = *l, templ = *l; -#if CONFIG_HYBRIDTRANSFORM TX_TYPE tx_type = DCT_DCT; TX_TYPE best_tx_type = DCT_DCT; -#endif /* * The predictor buffer is a 2d buffer with a stride of 16. Create * a temp buffer that meets the stride requirements, but we are only @@ -1191,7 +1164,6 @@ static int64_t rd_pick_intra4x4block(VP8_COMP *cpi, MACROBLOCK *x, BLOCK *be, ENCODEMB_INVOKE(IF_RTCD(&cpi->rtcd.encodemb), subb)(be, b, 16); b->bmi.as_mode.first = mode; -#if CONFIG_HYBRIDTRANSFORM tx_type = get_tx_type_4x4(xd, b); if (tx_type != DCT_DCT) { vp8_fht_c(be->src_diff, be->coeff, 32, tx_type, 4); @@ -1200,10 +1172,6 @@ static int64_t rd_pick_intra4x4block(VP8_COMP *cpi, MACROBLOCK *x, BLOCK *be, x->vp8_short_fdct4x4(be->src_diff, be->coeff, 32); x->quantize_b_4x4(be, b); } -#else - x->vp8_short_fdct4x4(be->src_diff, be->coeff, 32); - x->quantize_b_4x4(be, b); -#endif tempa = ta; templ = tl; @@ -1221,9 +1189,7 @@ static int64_t rd_pick_intra4x4block(VP8_COMP *cpi, MACROBLOCK *x, BLOCK *be, *bestdistortion = distortion; best_rd = this_rd; *best_mode = mode; -#if CONFIG_HYBRIDTRANSFORM best_tx_type = tx_type; -#endif #if CONFIG_COMP_INTRA_PRED *best_second_mode = mode2; @@ -1242,17 +1208,12 @@ static int64_t rd_pick_intra4x4block(VP8_COMP *cpi, MACROBLOCK *x, BLOCK *be, b->bmi.as_mode.second = (B_PREDICTION_MODE)(*best_second_mode); #endif -#if CONFIG_HYBRIDTRANSFORM // inverse transform if (best_tx_type != DCT_DCT) vp8_ihtllm_c(best_dqcoeff, b->diff, 32, best_tx_type, 4); else IDCT_INVOKE(IF_RTCD(&cpi->rtcd.common->idct), idct16)( best_dqcoeff, b->diff, 32); -#else - IDCT_INVOKE(IF_RTCD(&cpi->rtcd.common->idct), idct16)( - best_dqcoeff, b->diff, 32); -#endif vp8_recon_b(best_predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride); @@ -1535,15 +1496,11 @@ static int64_t rd_pick_intra8x8block(VP8_COMP *cpi, MACROBLOCK *x, int ib, vp8_subtract_4b_c(be, b, 16); if (xd->mode_info_context->mbmi.txfm_size == TX_8X8) { -#if CONFIG_HYBRIDTRANSFORM8X8 TX_TYPE tx_type = get_tx_type_8x8(xd, b); if (tx_type != DCT_DCT) vp8_fht_c(be->src_diff, (x->block + idx)->coeff, 32, tx_type, 8); else x->vp8_short_fdct8x8(be->src_diff, (x->block + idx)->coeff, 32); -#else - x->vp8_short_fdct8x8(be->src_diff, (x->block + idx)->coeff, 32); -#endif x->quantize_b_8x8(x->block + idx, xd->block + idx); // compute quantization mse of 8x8 block diff --git a/vp8/encoder/tokenize.c b/vp8/encoder/tokenize.c index d46637a3ee..575c7acf06 100644 --- a/vp8/encoder/tokenize.c +++ b/vp8/encoder/tokenize.c @@ -26,38 +26,26 @@ #ifdef ENTROPY_STATS INT64 context_counters[BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]; -#if CONFIG_HYBRIDTRANSFORM INT64 hybrid_context_counters[BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]; -#endif INT64 context_counters_8x8[BLOCK_TYPES_8X8] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]; -#if CONFIG_HYBRIDTRANSFORM8X8 INT64 hybrid_context_counters_8x8[BLOCK_TYPES_8X8] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]; -#endif INT64 context_counters_16x16[BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]; -#if CONFIG_HYBRIDTRANSFORM16X16 INT64 hybrid_context_counters_16x16[BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]; -#endif extern unsigned int tree_update_hist[BLOCK_TYPES][COEF_BANDS] [PREV_COEF_CONTEXTS][ENTROPY_NODES][2]; -#if CONFIG_HYBRIDTRANSFORM extern unsigned int hybrid_tree_update_hist[BLOCK_TYPES][COEF_BANDS] [PREV_COEF_CONTEXTS][ENTROPY_NODES][2]; -#endif extern unsigned int tree_update_hist_8x8[BLOCK_TYPES_8X8][COEF_BANDS] [PREV_COEF_CONTEXTS][ENTROPY_NODES] [2]; -#if CONFIG_HYBRIDTRANSFORM8X8 extern unsigned int hybrid_tree_update_hist_8x8[BLOCK_TYPES_8X8][COEF_BANDS] [PREV_COEF_CONTEXTS][ENTROPY_NODES] [2]; -#endif extern unsigned int tree_update_hist_16x16[BLOCK_TYPES_16X16][COEF_BANDS] [PREV_COEF_CONTEXTS][ENTROPY_NODES] [2]; -#if CONFIG_HYBRIDTRANSFORM16X16 extern unsigned int hybrid_tree_update_hist_16x16[BLOCK_TYPES_16X16][COEF_BANDS] [PREV_COEF_CONTEXTS][ENTROPY_NODES] [2]; -#endif #endif /* ENTROPY_STATS */ void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCKD *xd, TOKENEXTRA **t, int dry_run); @@ -134,9 +122,7 @@ static void tokenize1st_order_b_16x16(MACROBLOCKD *xd, const int eob = b->eob; /* one beyond last nonzero coeff */ TOKENEXTRA *t = *tp; /* store tokens starting here */ const short *qcoeff_ptr = b->qcoeff; -#if CONFIG_HYBRIDTRANSFORM16X16 TX_TYPE tx_type = get_tx_type(xd, b); -#endif int seg_eob = 256; int segment_id = xd->mode_info_context->mbmi.segment_id; @@ -162,22 +148,18 @@ static void tokenize1st_order_b_16x16(MACROBLOCKD *xd, } t->Token = x; -#if CONFIG_HYBRIDTRANSFORM16X16 if (tx_type != DCT_DCT) t->context_tree = cpi->common.fc.hybrid_coef_probs_16x16[type][band][pt]; else -#endif t->context_tree = cpi->common.fc.coef_probs_16x16[type][band][pt]; t->skip_eob_node = pt == 0 && ((band > 0 && type != PLANE_TYPE_Y_NO_DC) || (band > 1 && type == PLANE_TYPE_Y_NO_DC)); assert(vp8_coef_encodings[t->Token].Len - t->skip_eob_node > 0); if (!dry_run) { -#if CONFIG_HYBRIDTRANSFORM16X16 if (tx_type != DCT_DCT) ++cpi->hybrid_coef_counts_16x16[type][band][pt][x]; else -#endif ++cpi->coef_counts_16x16[type][band][pt][x]; } pt = vp8_prev_token_class[x]; @@ -310,9 +292,7 @@ static void tokenize1st_order_b_8x8(MACROBLOCKD *xd, int c = (type == PLANE_TYPE_Y_NO_DC) ? 1 : 0; /* start at DC unless type 0 */ TOKENEXTRA *t = *tp; /* store tokens starting here */ const short *qcoeff_ptr = b->qcoeff; -#if CONFIG_HYBRIDTRANSFORM8X8 TX_TYPE tx_type = get_tx_type(xd, b); -#endif const int eob = b->eob; int seg_eob = 64; int segment_id = xd->mode_info_context->mbmi.segment_id; @@ -338,11 +318,9 @@ static void tokenize1st_order_b_8x8(MACROBLOCKD *xd, x = DCT_EOB_TOKEN; t->Token = x; -#if CONFIG_HYBRIDTRANSFORM8X8 if (tx_type != DCT_DCT) t->context_tree = cpi->common.fc.hybrid_coef_probs_8x8[type][band][pt]; else -#endif t->context_tree = cpi->common.fc.coef_probs_8x8[type][band][pt]; t->skip_eob_node = pt == 0 && ((band > 0 && type != PLANE_TYPE_Y_NO_DC) || @@ -350,11 +328,9 @@ static void tokenize1st_order_b_8x8(MACROBLOCKD *xd, assert(vp8_coef_encodings[t->Token].Len - t->skip_eob_node > 0); if (!dry_run) { -#if CONFIG_HYBRIDTRANSFORM8X8 if (tx_type != DCT_DCT) ++cpi->hybrid_coef_counts_8x8[type][band][pt][x]; else -#endif ++cpi->coef_counts_8x8[type][band][pt][x]; } pt = vp8_prev_token_class[x]; @@ -451,7 +427,6 @@ static void tokenize1st_order_b_4x4(MACROBLOCKD *xd, const int16_t *qcoeff_ptr = b->qcoeff; int c = (type == PLANE_TYPE_Y_NO_DC) ? 1 : 0; -#if CONFIG_HYBRIDTRANSFORM TX_TYPE tx_type = get_tx_type(xd, &xd->block[block]); switch (tx_type) { case ADST_DCT: @@ -464,7 +439,6 @@ static void tokenize1st_order_b_4x4(MACROBLOCKD *xd, pt_scan = vp8_default_zig_zag1d; break; } -#endif a = (ENTROPY_CONTEXT *)xd->above_context + vp8_block2above[block]; l = (ENTROPY_CONTEXT *)xd->left_context + vp8_block2left[block]; VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l); @@ -485,22 +459,18 @@ static void tokenize1st_order_b_4x4(MACROBLOCKD *xd, token = DCT_EOB_TOKEN; t->Token = token; -#if CONFIG_HYBRIDTRANSFORM if (tx_type != DCT_DCT) t->context_tree = cpi->common.fc.hybrid_coef_probs[type][band][pt]; else -#endif t->context_tree = cpi->common.fc.coef_probs[type][band][pt]; t->skip_eob_node = pt == 0 && ((band > 0 && type != PLANE_TYPE_Y_NO_DC) || (band > 1 && type == PLANE_TYPE_Y_NO_DC)); assert(vp8_coef_encodings[t->Token].Len - t->skip_eob_node > 0); if (!dry_run) { -#if CONFIG_HYBRIDTRANSFORM if (tx_type != DCT_DCT) ++cpi->hybrid_coef_counts[type][band][pt][token]; else -#endif ++cpi->coef_counts[type][band][pt][token]; } pt = vp8_prev_token_class[token]; @@ -995,30 +965,24 @@ static __inline void stuff1st_order_b_8x8(MACROBLOCKD *xd, int dry_run) { int pt; /* near block/prev token context index */ TOKENEXTRA *t = *tp; /* store tokens starting here */ -#if CONFIG_HYBRIDTRANSFORM8X8 TX_TYPE tx_type = get_tx_type(xd, b); -#endif const int band = vp8_coef_bands_8x8[(type == PLANE_TYPE_Y_NO_DC) ? 1 : 0]; VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l); (void) b; t->Token = DCT_EOB_TOKEN; -#if CONFIG_HYBRIDTRANSFORM8X8 if (tx_type != DCT_DCT) t->context_tree = cpi->common.fc.hybrid_coef_probs_8x8[type][band][pt]; else -#endif t->context_tree = cpi->common.fc.coef_probs_8x8[type][band][pt]; // t->section = 8; t->skip_eob_node = 0; ++t; *tp = t; if (!dry_run) { -#if CONFIG_HYBRIDTRANSFORM8X8 if (tx_type == DCT_DCT) ++cpi->hybrid_coef_counts_8x8[type][band][pt][DCT_EOB_TOKEN]; else -#endif ++cpi->coef_counts_8x8[type][band][pt][DCT_EOB_TOKEN]; } pt = 0; /* 0 <-> all coeff data is zero */ @@ -1100,29 +1064,23 @@ static __inline void stuff1st_order_b_16x16(MACROBLOCKD *xd, int dry_run) { int pt; /* near block/prev token context index */ TOKENEXTRA *t = *tp; /* store tokens starting here */ -#if CONFIG_HYBRIDTRANSFORM16X16 TX_TYPE tx_type = get_tx_type(xd, b); -#endif const int band = vp8_coef_bands_16x16[(type == PLANE_TYPE_Y_NO_DC) ? 1 : 0]; VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l); (void) b; t->Token = DCT_EOB_TOKEN; -#if CONFIG_HYBRIDTRANSFORM16X16 if (tx_type != DCT_DCT) t->context_tree = cpi->common.fc.hybrid_coef_probs_16x16[type][band][pt]; else -#endif t->context_tree = cpi->common.fc.coef_probs_16x16[type][band][pt]; t->skip_eob_node = 0; ++t; *tp = t; if (!dry_run) { -#if CONFIG_HYBRIDTRANSFORM16X16 if (tx_type != DCT_DCT) ++cpi->hybrid_coef_counts_16x16[type][band][pt][DCT_EOB_TOKEN]; else -#endif ++cpi->coef_counts_16x16[type][band][pt][DCT_EOB_TOKEN]; } pt = 0; /* 0 <-> all coeff data is zero */ @@ -1189,28 +1147,22 @@ static __inline void stuff1st_order_b_4x4(MACROBLOCKD *xd, int dry_run) { int pt; /* near block/prev token context index */ TOKENEXTRA *t = *tp; /* store tokens starting here */ -#if CONFIG_HYBRIDTRANSFORM TX_TYPE tx_type = get_tx_type(xd, b); -#endif const int band = vp8_coef_bands[(type == PLANE_TYPE_Y_NO_DC) ? 1 : 0]; VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l); t->Token = DCT_EOB_TOKEN; -#if CONFIG_HYBRIDTRANSFORM if (tx_type != DCT_DCT) t->context_tree = cpi->common.fc.hybrid_coef_probs[type][band][pt]; else -#endif t->context_tree = cpi->common.fc.coef_probs[type][band][pt]; t->skip_eob_node = 0; ++t; *tp = t; if (!dry_run) { -#if CONFIG_HYBRIDTRANSFORM if (tx_type != DCT_DCT) ++cpi->hybrid_coef_counts[type][band][pt][DCT_EOB_TOKEN]; else -#endif ++cpi->coef_counts[type][band][pt][DCT_EOB_TOKEN]; } pt = 0; /* 0 <-> all coeff data is zero */ -- GitLab