diff --git a/vp8/common/blockd.c b/vp8/common/blockd.c index 2793995a029318f3d963b2c11545adbc3f17f1ec..140500ec4799a76cd0f057ad8b313186bbfca561 100644 --- a/vp8/common/blockd.c +++ b/vp8/common/blockd.c @@ -13,17 +13,17 @@ #include "vpx_mem/vpx_mem.h" -const unsigned char vp8_block2left[25] = { +const unsigned char vp9_block2left[25] = { 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8 }; -const unsigned char vp8_block2above[25] = { +const unsigned char vp9_block2above[25] = { 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 4, 5, 6, 7, 6, 7, 8 }; -const unsigned char vp8_block2left_8x8[25] = { +const unsigned char vp9_block2left_8x8[25] = { 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4, 4, 6, 6, 6, 6, 8 }; -const unsigned char vp8_block2above_8x8[25] = { +const unsigned char vp9_block2above_8x8[25] = { 0, 0, 0, 0, 2, 2, 2, 2, 0, 0, 0, 0, 2, 2, 2, 2, 4, 4, 4, 4, 6, 6, 6, 6, 8 }; diff --git a/vp8/common/blockd.h b/vp8/common/blockd.h index d5d2e903e4e530c2051f19b972d32a645593626e..90b78502b401225059c5ffa8ba3b24d495c57828 100644 --- a/vp8/common/blockd.h +++ b/vp8/common/blockd.h @@ -67,10 +67,10 @@ typedef struct { ENTROPY_CONTEXT y2; } ENTROPY_CONTEXT_PLANES; -extern const unsigned char vp8_block2left[25]; -extern const unsigned char vp8_block2above[25]; -extern const unsigned char vp8_block2left_8x8[25]; -extern const unsigned char vp8_block2above_8x8[25]; +extern const unsigned char vp9_block2left[25]; +extern const unsigned char vp9_block2above[25]; +extern const unsigned char vp9_block2left_8x8[25]; +extern const unsigned char vp9_block2above_8x8[25]; #define VP8_COMBINEENTROPYCONTEXTS( Dest, A, B) \ Dest = ((A)!=0) + ((B)!=0); diff --git a/vp8/common/context.c b/vp8/common/context.c index 27ad6a42d9fb5af8df59a52499f90e87d7a9ec25..7006bc754122b7112d9e7188d09c1ea236132b0b 100644 --- a/vp8/common/context.c +++ b/vp8/common/context.c @@ -224,7 +224,7 @@ const int default_contexts[vp8_coef_counter_dimen] = { }; // Update probabilities for the nodes in the token entropy tree. -const vp8_prob tree_update_probs[vp8_coef_tree_dimen] = { +const vp8_prob tree_update_probs[vp9_coef_tree_dimen] = { { { {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, }, diff --git a/vp8/common/entropy.c b/vp8/common/entropy.c index 09760f9e5df5fecf4e7f5e4e9baef6dc80f7a985..34c3c2fdd9bb24f18618480a21e6c6f75606c912 100644 --- a/vp8/common/entropy.c +++ b/vp8/common/entropy.c @@ -28,9 +28,9 @@ typedef vp8_prob Prob; #include "coefupdateprobs.h" -const int vp8_i8x8_block[4] = {0, 2, 8, 10}; +const int vp9_i8x8_block[4] = {0, 2, 8, 10}; -DECLARE_ALIGNED(16, const unsigned char, vp8_norm[256]) = { +DECLARE_ALIGNED(16, const unsigned char, vp9_norm[256]) = { 0, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, @@ -49,28 +49,28 @@ DECLARE_ALIGNED(16, const unsigned char, vp8_norm[256]) = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; -DECLARE_ALIGNED(16, const int, vp8_coef_bands[16]) = { +DECLARE_ALIGNED(16, const int, vp9_coef_bands[16]) = { 0, 1, 2, 3, 6, 4, 5, 6, 6, 6, 6, 6, 6, 7, 7, 7 }; -DECLARE_ALIGNED(16, cuchar, vp8_prev_token_class[MAX_ENTROPY_TOKENS]) = { +DECLARE_ALIGNED(16, cuchar, vp9_prev_token_class[MAX_ENTROPY_TOKENS]) = { 0, 1, 2, 2, 3, 3, 3, 3, 3, 3, 3, 0 }; -DECLARE_ALIGNED(16, const int, vp8_default_zig_zag1d[16]) = { +DECLARE_ALIGNED(16, const int, vp9_default_zig_zag1d[16]) = { 0, 1, 4, 8, 5, 2, 3, 6, 9, 12, 13, 10, 7, 11, 14, 15, }; -DECLARE_ALIGNED(16, const int, vp8_col_scan[16]) = { +DECLARE_ALIGNED(16, const int, vp9_col_scan[16]) = { 0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15 }; -DECLARE_ALIGNED(16, const int, vp8_row_scan[16]) = { +DECLARE_ALIGNED(16, const int, vp9_row_scan[16]) = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, @@ -78,7 +78,7 @@ DECLARE_ALIGNED(16, const int, vp8_row_scan[16]) = { }; -DECLARE_ALIGNED(64, const int, vp8_coef_bands_8x8[64]) = { 0, 1, 2, 3, 5, 4, 4, 5, +DECLARE_ALIGNED(64, const int, vp9_coef_bands_8x8[64]) = { 0, 1, 2, 3, 5, 4, 4, 5, 5, 3, 6, 3, 5, 4, 6, 6, 6, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, @@ -87,7 +87,7 @@ DECLARE_ALIGNED(64, const int, vp8_coef_bands_8x8[64]) = { 0, 1, 2, 3, 5, 4, 4, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7 }; -DECLARE_ALIGNED(64, const int, vp8_default_zig_zag1d_8x8[64]) = { +DECLARE_ALIGNED(64, const int, vp9_default_zig_zag1d_8x8[64]) = { 0, 1, 8, 16, 9, 2, 3, 10, 17, 24, 32, 25, 18, 11, 4, 5, 12, 19, 26, 33, 40, 48, 41, 34, 27, 20, 13, 6, 7, 14, 21, 28, 35, 42, 49, 56, 57, 50, 43, 36, 29, 22, 15, 23, 30, 37, 44, 51, @@ -95,7 +95,7 @@ DECLARE_ALIGNED(64, const int, vp8_default_zig_zag1d_8x8[64]) = { }; // Table can be optimized. -DECLARE_ALIGNED(16, const int, vp8_coef_bands_16x16[256]) = { +DECLARE_ALIGNED(16, const int, vp9_coef_bands_16x16[256]) = { 0, 1, 2, 3, 5, 4, 4, 5, 5, 3, 6, 3, 5, 4, 6, 6, 6, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, @@ -113,7 +113,7 @@ DECLARE_ALIGNED(16, const int, vp8_coef_bands_16x16[256]) = { 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, }; -DECLARE_ALIGNED(16, const int, vp8_default_zig_zag1d_16x16[256]) = { +DECLARE_ALIGNED(16, const int, vp9_default_zig_zag1d_16x16[256]) = { 0, 1, 16, 32, 17, 2, 3, 18, 33, 48, 64, 49, 34, 19, 4, 5, 20, 35, 50, 65, 80, 96, 81, 66, 51, 36, 21, 6, 7, 22, 37, 52, 67, 82, 97, 112, 128, 113, 98, 83, 68, 53, 38, 23, 8, 9, 24, 39, @@ -135,7 +135,7 @@ DECLARE_ALIGNED(16, const int, vp8_default_zig_zag1d_16x16[256]) = { /* Array indices are identical to previously-existing CONTEXT_NODE indices */ -const vp8_tree_index vp8_coef_tree[ 22] = /* corresponding _CONTEXT_NODEs */ +const vp8_tree_index vp9_coef_tree[ 22] = /* corresponding _CONTEXT_NODEs */ { -DCT_EOB_TOKEN, 2, /* 0 = EOB */ -ZERO_TOKEN, 4, /* 1 = ZERO */ @@ -150,7 +150,7 @@ const vp8_tree_index vp8_coef_tree[ 22] = /* corresponding _CONTEXT_NODEs */ -DCT_VAL_CATEGORY5, -DCT_VAL_CATEGORY6 /* 10 = CAT_FIVE */ }; -struct vp8_token_struct vp8_coef_encodings[MAX_ENTROPY_TOKENS]; +struct vp8_token_struct vp9_coef_encodings[MAX_ENTROPY_TOKENS]; /* Trees for extra bits. Probabilities are constant and do not depend on previously encoded bits */ @@ -185,7 +185,7 @@ static void init_bit_trees() { init_bit_tree(cat6, 13); } -vp8_extra_bit_struct vp8_extra_bits[12] = { +vp8_extra_bit_struct vp9_extra_bits[12] = { { 0, 0, 0, 0}, { 0, 0, 0, 1}, { 0, 0, 0, 2}, @@ -222,7 +222,7 @@ void vp9_default_coef_probs(VP8_COMMON *pc) { void vp9_coef_tree_initialize() { init_bit_trees(); - vp9_tokens_from_tree(vp8_coef_encodings, vp8_coef_tree); + vp9_tokens_from_tree(vp9_coef_encodings, vp9_coef_tree); } // #define COEF_COUNT_TESTING @@ -319,7 +319,7 @@ void vp9_adapt_coef_probs(VP8_COMMON *cm) { if (k >= 3 && ((i == 0 && j == 1) || (i > 0 && j == 0))) continue; vp9_tree_probs_from_distribution( - MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree, + MAX_ENTROPY_TOKENS, vp9_coef_encodings, vp9_coef_tree, coef_probs, branch_ct, cm->fc.coef_counts [i][j][k], 256, 1); for (t = 0; t < ENTROPY_NODES; ++t) { @@ -341,7 +341,7 @@ void vp9_adapt_coef_probs(VP8_COMMON *cm) { if (k >= 3 && ((i == 0 && j == 1) || (i > 0 && j == 0))) continue; vp9_tree_probs_from_distribution( - MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree, + MAX_ENTROPY_TOKENS, vp9_coef_encodings, vp9_coef_tree, coef_probs, branch_ct, cm->fc.hybrid_coef_counts [i][j][k], 256, 1); for (t = 0; t < ENTROPY_NODES; ++t) { @@ -363,7 +363,7 @@ void vp9_adapt_coef_probs(VP8_COMMON *cm) { if (k >= 3 && ((i == 0 && j == 1) || (i > 0 && j == 0))) continue; vp9_tree_probs_from_distribution( - MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree, + MAX_ENTROPY_TOKENS, vp9_coef_encodings, vp9_coef_tree, coef_probs, branch_ct, cm->fc.coef_counts_8x8 [i][j][k], 256, 1); for (t = 0; t < ENTROPY_NODES; ++t) { @@ -385,7 +385,7 @@ void vp9_adapt_coef_probs(VP8_COMMON *cm) { if (k >= 3 && ((i == 0 && j == 1) || (i > 0 && j == 0))) continue; vp9_tree_probs_from_distribution( - MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree, + MAX_ENTROPY_TOKENS, vp9_coef_encodings, vp9_coef_tree, coef_probs, branch_ct, cm->fc.hybrid_coef_counts_8x8 [i][j][k], 256, 1); for (t = 0; t < ENTROPY_NODES; ++t) { @@ -408,7 +408,7 @@ void vp9_adapt_coef_probs(VP8_COMMON *cm) { if (k >= 3 && ((i == 0 && j == 1) || (i > 0 && j == 0))) continue; vp9_tree_probs_from_distribution( - MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree, + MAX_ENTROPY_TOKENS, vp9_coef_encodings, vp9_coef_tree, coef_probs, branch_ct, cm->fc.coef_counts_16x16[i][j][k], 256, 1); for (t = 0; t < ENTROPY_NODES; ++t) { int prob; @@ -430,7 +430,7 @@ void vp9_adapt_coef_probs(VP8_COMMON *cm) { if (k >= 3 && ((i == 0 && j == 1) || (i > 0 && j == 0))) continue; vp9_tree_probs_from_distribution( - MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree, + MAX_ENTROPY_TOKENS, vp9_coef_encodings, vp9_coef_tree, coef_probs, branch_ct, cm->fc.hybrid_coef_counts_16x16[i][j][k], 256, 1); for (t = 0; t < ENTROPY_NODES; ++t) { int prob; diff --git a/vp8/common/entropy.h b/vp8/common/entropy.h index 30c9ab11cf30c4683b91a7eb54a49c74df5a849a..a96b4044bc08e4d2ae2732ebde92938c70e19fca 100644 --- a/vp8/common/entropy.h +++ b/vp8/common/entropy.h @@ -21,7 +21,7 @@ //#define SUBMVREF_COUNT 5 //#define VP8_NUMMBSPLITS 4 -extern const int vp8_i8x8_block[4]; +extern const int vp9_i8x8_block[4]; /* Coefficient token alphabet */ @@ -41,9 +41,9 @@ extern const int vp8_i8x8_block[4]; #define ENTROPY_NODES 11 #define EOSB_TOKEN 127 /* Not signalled, encoder only */ -extern const vp8_tree_index vp8_coef_tree[]; +extern const vp8_tree_index vp9_coef_tree[]; -extern struct vp8_token_struct vp8_coef_encodings[MAX_ENTROPY_TOKENS]; +extern struct vp8_token_struct vp9_coef_encodings[MAX_ENTROPY_TOKENS]; typedef struct { vp8_tree_p tree; @@ -52,7 +52,7 @@ typedef struct { int base_val; } vp8_extra_bit_struct; -extern vp8_extra_bit_struct vp8_extra_bits[12]; /* indexed by token value */ +extern vp8_extra_bit_struct vp9_extra_bits[12]; /* indexed by token value */ #define PROB_UPDATE_BASELINE_COST 7 @@ -72,9 +72,9 @@ extern vp8_extra_bit_struct vp8_extra_bits[12]; /* indexed by token value */ position within the 4x4 DCT. */ #define COEF_BANDS 8 -extern DECLARE_ALIGNED(16, const int, vp8_coef_bands[16]); -extern DECLARE_ALIGNED(64, const int, vp8_coef_bands_8x8[64]); -extern DECLARE_ALIGNED(16, const int, vp8_coef_bands_16x16[256]); +extern DECLARE_ALIGNED(16, const int, vp9_coef_bands[16]); +extern DECLARE_ALIGNED(64, const int, vp9_coef_bands_8x8[64]); +extern DECLARE_ALIGNED(16, const int, vp9_coef_bands_16x16[256]); /* Inside dimension is 3-valued measure of nearby complexity, that is, the extent to which nearby coefficients are nonzero. For the first @@ -98,20 +98,20 @@ extern DECLARE_ALIGNED(16, const int, vp8_coef_bands_16x16[256]); #define SUBEXP_PARAM 4 /* Subexponential code parameter */ #define MODULUS_PARAM 13 /* Modulus parameter */ -extern DECLARE_ALIGNED(16, const unsigned char, vp8_prev_token_class[MAX_ENTROPY_TOKENS]); +extern DECLARE_ALIGNED(16, const unsigned char, vp9_prev_token_class[MAX_ENTROPY_TOKENS]); struct VP8Common; void vp9_default_coef_probs(struct VP8Common *); -extern DECLARE_ALIGNED(16, const int, vp8_default_zig_zag1d[16]); +extern DECLARE_ALIGNED(16, const int, vp9_default_zig_zag1d[16]); -extern DECLARE_ALIGNED(16, const int, vp8_col_scan[16]); -extern DECLARE_ALIGNED(16, const int, vp8_row_scan[16]); +extern DECLARE_ALIGNED(16, const int, vp9_col_scan[16]); +extern DECLARE_ALIGNED(16, const int, vp9_row_scan[16]); extern short vp8_default_zig_zag_mask[16]; -extern DECLARE_ALIGNED(64, const int, vp8_default_zig_zag1d_8x8[64]); +extern DECLARE_ALIGNED(64, const int, vp9_default_zig_zag1d_8x8[64]); void vp9_coef_tree_initialize(void); -extern DECLARE_ALIGNED(16, const int, vp8_default_zig_zag1d_16x16[256]); +extern DECLARE_ALIGNED(16, const int, vp9_default_zig_zag1d_16x16[256]); void vp9_adapt_coef_probs(struct VP8Common *); #endif diff --git a/vp8/common/entropymode.c b/vp8/common/entropymode.c index 41982036dff64c2f6ca95f1e23df4e9d5237946b..4578b4a236399cf6bf332bdfdf2a037e50ed5b3f 100644 --- a/vp8/common/entropymode.c +++ b/vp8/common/entropymode.c @@ -14,7 +14,7 @@ #include "vpx_mem/vpx_mem.h" -const unsigned int kf_y_mode_cts[8][VP8_YMODES] = { +static const unsigned int kf_y_mode_cts[8][VP8_YMODES] = { /* DC V H D45 135 117 153 D27 D63 TM i8x8 BPRED */ {12, 6, 5, 5, 5, 5, 5, 5, 5, 2, 22, 200}, {25, 13, 13, 7, 7, 7, 7, 7, 7, 6, 27, 160}, @@ -101,9 +101,9 @@ int vp9_mv_cont(const int_mv *l, const int_mv *a) { return SUBMVREF_NORMAL; } -const vp8_prob vp8_sub_mv_ref_prob [VP8_SUBMVREFS - 1] = { 180, 162, 25}; +const vp8_prob vp9_sub_mv_ref_prob [VP8_SUBMVREFS - 1] = { 180, 162, 25}; -const vp8_prob vp8_sub_mv_ref_prob2 [SUBMVREF_COUNT][VP8_SUBMVREFS - 1] = { +const vp8_prob vp9_sub_mv_ref_prob2 [SUBMVREF_COUNT][VP8_SUBMVREFS - 1] = { { 147, 136, 18 }, { 106, 145, 1 }, { 179, 121, 1 }, @@ -113,7 +113,7 @@ const vp8_prob vp8_sub_mv_ref_prob2 [SUBMVREF_COUNT][VP8_SUBMVREFS - 1] = { -vp8_mbsplit vp8_mbsplits [VP8_NUMMBSPLITS] = { +vp9_mbsplit vp9_mbsplits [VP8_NUMMBSPLITS] = { { 0, 0, 0, 0, 0, 0, 0, 0, @@ -140,14 +140,14 @@ vp8_mbsplit vp8_mbsplits [VP8_NUMMBSPLITS] = { }, }; -const int vp8_mbsplit_count [VP8_NUMMBSPLITS] = { 2, 2, 4, 16}; +const int vp9_mbsplit_count [VP8_NUMMBSPLITS] = { 2, 2, 4, 16}; -const vp8_prob vp8_mbsplit_probs [VP8_NUMMBSPLITS - 1] = { 110, 111, 150}; +const vp8_prob vp9_mbsplit_probs [VP8_NUMMBSPLITS - 1] = { 110, 111, 150}; /* Array indices are identical to previously-existing INTRAMODECONTEXTNODES. */ -const vp8_tree_index vp8_bmode_tree[VP8_BINTRAMODES * 2 - 2] = /* INTRAMODECONTEXTNODE value */ +const vp8_tree_index vp9_bmode_tree[VP8_BINTRAMODES * 2 - 2] = /* INTRAMODECONTEXTNODE value */ { -B_DC_PRED, 2, /* 0 = DC_NODE */ -B_TM_PRED, 4, /* 1 = TM_NODE */ @@ -162,7 +162,7 @@ const vp8_tree_index vp8_bmode_tree[VP8_BINTRAMODES * 2 - 2] = /* INTRAMODECONTE /* Again, these trees use the same probability indices as their explicitly-programmed predecessors. */ -const vp8_tree_index vp8_ymode_tree[VP8_YMODES * 2 - 2] = { +const vp8_tree_index vp9_ymode_tree[VP8_YMODES * 2 - 2] = { 2, 14, -DC_PRED, 4, 6, 8, @@ -176,7 +176,7 @@ const vp8_tree_index vp8_ymode_tree[VP8_YMODES * 2 - 2] = { -B_PRED, -I8X8_PRED }; -const vp8_tree_index vp8_kf_ymode_tree[VP8_YMODES * 2 - 2] = { +const vp8_tree_index vp9_kf_ymode_tree[VP8_YMODES * 2 - 2] = { 2, 14, -DC_PRED, 4, 6, 8, @@ -190,7 +190,7 @@ const vp8_tree_index vp8_kf_ymode_tree[VP8_YMODES * 2 - 2] = { -B_PRED, -I8X8_PRED }; -const vp8_tree_index vp8_i8x8_mode_tree[VP8_I8X8_MODES * 2 - 2] = { +const vp8_tree_index vp9_i8x8_mode_tree[VP8_I8X8_MODES * 2 - 2] = { 2, 14, -DC_PRED, 4, 6, 8, @@ -202,7 +202,7 @@ const vp8_tree_index vp8_i8x8_mode_tree[VP8_I8X8_MODES * 2 - 2] = { -H_PRED, -TM_PRED }; -const vp8_tree_index vp8_uv_mode_tree[VP8_UV_MODES * 2 - 2] = { +const vp8_tree_index vp9_uv_mode_tree[VP8_UV_MODES * 2 - 2] = { 2, 14, -DC_PRED, 4, 6, 8, @@ -214,13 +214,13 @@ const vp8_tree_index vp8_uv_mode_tree[VP8_UV_MODES * 2 - 2] = { -H_PRED, -TM_PRED }; -const vp8_tree_index vp8_mbsplit_tree[6] = { +const vp8_tree_index vp9_mbsplit_tree[6] = { -PARTITIONING_4X4, 2, -PARTITIONING_8X8, 4, -PARTITIONING_16X8, -PARTITIONING_8X16, }; -const vp8_tree_index vp8_mv_ref_tree[8] = { +const vp8_tree_index vp9_mv_ref_tree[8] = { -ZEROMV, 2, -NEARESTMV, 4, -NEARMV, 6, @@ -228,53 +228,53 @@ const vp8_tree_index vp8_mv_ref_tree[8] = { }; #if CONFIG_SUPERBLOCKS -const vp8_tree_index vp8_sb_mv_ref_tree[6] = { +const vp8_tree_index vp9_sb_mv_ref_tree[6] = { -ZEROMV, 2, -NEARESTMV, 4, -NEARMV, -NEWMV }; #endif -const vp8_tree_index vp8_sub_mv_ref_tree[6] = { +const vp8_tree_index vp9_sub_mv_ref_tree[6] = { -LEFT4X4, 2, -ABOVE4X4, 4, -ZERO4X4, -NEW4X4 }; -struct vp8_token_struct vp8_bmode_encodings [VP8_BINTRAMODES]; -struct vp8_token_struct vp8_ymode_encodings [VP8_YMODES]; +struct vp8_token_struct vp9_bmode_encodings [VP8_BINTRAMODES]; +struct vp8_token_struct vp9_ymode_encodings [VP8_YMODES]; #if CONFIG_SUPERBLOCKS -struct vp8_token_struct vp8_sb_kf_ymode_encodings [VP8_I32X32_MODES]; +struct vp8_token_struct vp9_sb_kf_ymode_encodings [VP8_I32X32_MODES]; #endif -struct vp8_token_struct vp8_kf_ymode_encodings [VP8_YMODES]; -struct vp8_token_struct vp8_uv_mode_encodings [VP8_UV_MODES]; -struct vp8_token_struct vp8_i8x8_mode_encodings [VP8_I8X8_MODES]; -struct vp8_token_struct vp8_mbsplit_encodings [VP8_NUMMBSPLITS]; +struct vp8_token_struct vp9_kf_ymode_encodings [VP8_YMODES]; +struct vp8_token_struct vp9_uv_mode_encodings [VP8_UV_MODES]; +struct vp8_token_struct vp9_i8x8_mode_encodings [VP8_I8X8_MODES]; +struct vp8_token_struct vp9_mbsplit_encodings [VP8_NUMMBSPLITS]; -struct vp8_token_struct vp8_mv_ref_encoding_array [VP8_MVREFS]; +struct vp8_token_struct vp9_mv_ref_encoding_array [VP8_MVREFS]; #if CONFIG_SUPERBLOCKS -struct vp8_token_struct vp8_sb_mv_ref_encoding_array [VP8_MVREFS]; +struct vp8_token_struct vp9_sb_mv_ref_encoding_array [VP8_MVREFS]; #endif -struct vp8_token_struct vp8_sub_mv_ref_encoding_array [VP8_SUBMVREFS]; +struct vp8_token_struct vp9_sub_mv_ref_encoding_array [VP8_SUBMVREFS]; void vp9_init_mbmode_probs(VP8_COMMON *x) { unsigned int bct [VP8_YMODES] [2]; /* num Ymodes > num UV modes */ - vp9_tree_probs_from_distribution(VP8_YMODES, vp8_ymode_encodings, - vp8_ymode_tree, x->fc.ymode_prob, bct, y_mode_cts, 256, 1); + vp9_tree_probs_from_distribution(VP8_YMODES, vp9_ymode_encodings, + vp9_ymode_tree, x->fc.ymode_prob, bct, y_mode_cts, 256, 1); { int i; for (i = 0; i < 8; i++) { vp9_tree_probs_from_distribution( - VP8_YMODES, vp8_kf_ymode_encodings, vp8_kf_ymode_tree, + VP8_YMODES, vp9_kf_ymode_encodings, vp9_kf_ymode_tree, x->kf_ymode_prob[i], bct, kf_y_mode_cts[i], 256, 1); #if CONFIG_SUPERBLOCKS vp9_tree_probs_from_distribution( - VP8_I32X32_MODES, vp8_sb_kf_ymode_encodings, vp8_sb_ymode_tree, + VP8_I32X32_MODES, vp9_sb_kf_ymode_encodings, vp8_sb_ymode_tree, x->sb_kf_ymode_prob[i], bct, kf_y_mode_cts[i], 256, 1); #endif @@ -284,25 +284,25 @@ void vp9_init_mbmode_probs(VP8_COMMON *x) { int i; for (i = 0; i < VP8_YMODES; i++) { vp9_tree_probs_from_distribution( - VP8_UV_MODES, vp8_uv_mode_encodings, vp8_uv_mode_tree, + VP8_UV_MODES, vp9_uv_mode_encodings, vp9_uv_mode_tree, x->kf_uv_mode_prob[i], bct, kf_uv_mode_cts[i], 256, 1); vp9_tree_probs_from_distribution( - VP8_UV_MODES, vp8_uv_mode_encodings, vp8_uv_mode_tree, + VP8_UV_MODES, vp9_uv_mode_encodings, vp9_uv_mode_tree, x->fc.uv_mode_prob[i], bct, uv_mode_cts[i], 256, 1); } } vp9_tree_probs_from_distribution( - VP8_I8X8_MODES, vp8_i8x8_mode_encodings, vp8_i8x8_mode_tree, + VP8_I8X8_MODES, vp9_i8x8_mode_encodings, vp9_i8x8_mode_tree, x->fc.i8x8_mode_prob, bct, i8x8_mode_cts, 256, 1); - vpx_memcpy(x->fc.sub_mv_ref_prob, vp8_sub_mv_ref_prob2, sizeof(vp8_sub_mv_ref_prob2)); - vpx_memcpy(x->fc.mbsplit_prob, vp8_mbsplit_probs, sizeof(vp8_mbsplit_probs)); - vpx_memcpy(x->fc.switchable_interp_prob, vp8_switchable_interp_prob, - sizeof(vp8_switchable_interp_prob)); + vpx_memcpy(x->fc.sub_mv_ref_prob, vp9_sub_mv_ref_prob2, sizeof(vp9_sub_mv_ref_prob2)); + vpx_memcpy(x->fc.mbsplit_prob, vp9_mbsplit_probs, sizeof(vp9_mbsplit_probs)); + vpx_memcpy(x->fc.switchable_interp_prob, vp9_switchable_interp_prob, + sizeof(vp9_switchable_interp_prob)); } @@ -310,8 +310,8 @@ static void intra_bmode_probs_from_distribution( vp8_prob p [VP8_BINTRAMODES - 1], unsigned int branch_ct [VP8_BINTRAMODES - 1] [2], const unsigned int events [VP8_BINTRAMODES]) { - vp9_tree_probs_from_distribution(VP8_BINTRAMODES, vp8_bmode_encodings, - vp8_bmode_tree, p, branch_ct, events, 256, 1); + vp9_tree_probs_from_distribution(VP8_BINTRAMODES, vp9_bmode_encodings, + vp9_bmode_tree, p, branch_ct, events, 256, 1); } void vp9_default_bmode_probs(vp8_prob p [VP8_BINTRAMODES - 1]) { @@ -329,62 +329,62 @@ void vp9_kf_default_bmode_probs(vp8_prob p [VP8_BINTRAMODES] [VP8_BINTRAMODES] [ do { intra_bmode_probs_from_distribution( - p[i][j], branch_ct, vp8_kf_default_bmode_counts[i][j]); + p[i][j], branch_ct, vp9_kf_default_bmode_counts[i][j]); } while (++j < VP8_BINTRAMODES); } while (++i < VP8_BINTRAMODES); } #if VP8_SWITCHABLE_FILTERS == 3 -const vp8_tree_index vp8_switchable_interp_tree[VP8_SWITCHABLE_FILTERS*2-2] = { +const vp8_tree_index vp9_switchable_interp_tree[VP8_SWITCHABLE_FILTERS*2-2] = { -0, 2, -1, -2 }; -struct vp8_token_struct vp8_switchable_interp_encodings[VP8_SWITCHABLE_FILTERS]; -const INTERPOLATIONFILTERTYPE vp8_switchable_interp[VP8_SWITCHABLE_FILTERS] = { +struct vp8_token_struct vp9_switchable_interp_encodings[VP8_SWITCHABLE_FILTERS]; +const INTERPOLATIONFILTERTYPE vp9_switchable_interp[VP8_SWITCHABLE_FILTERS] = { EIGHTTAP, SIXTAP, EIGHTTAP_SHARP}; -const int vp8_switchable_interp_map[SWITCHABLE+1] = {1, -1, 0, 2, -1}; -const vp8_prob vp8_switchable_interp_prob [VP8_SWITCHABLE_FILTERS+1] +const int vp9_switchable_interp_map[SWITCHABLE+1] = {1, -1, 0, 2, -1}; +const vp8_prob vp9_switchable_interp_prob [VP8_SWITCHABLE_FILTERS+1] [VP8_SWITCHABLE_FILTERS-1] = { {248, 192}, { 32, 248}, { 32, 32}, {192, 160} }; #elif VP8_SWITCHABLE_FILTERS == 2 -const vp8_tree_index vp8_switchable_interp_tree[VP8_SWITCHABLE_FILTERS*2-2] = { +const vp8_tree_index vp9_switchable_interp_tree[VP8_SWITCHABLE_FILTERS*2-2] = { -0, -1, }; -struct vp8_token_struct vp8_switchable_interp_encodings[VP8_SWITCHABLE_FILTERS]; -const vp8_prob vp8_switchable_interp_prob [VP8_SWITCHABLE_FILTERS+1] +struct vp8_token_struct vp9_switchable_interp_encodings[VP8_SWITCHABLE_FILTERS]; +const vp8_prob vp9_switchable_interp_prob [VP8_SWITCHABLE_FILTERS+1] [VP8_SWITCHABLE_FILTERS-1] = { {248}, { 64}, {192}, }; -const INTERPOLATIONFILTERTYPE vp8_switchable_interp[VP8_SWITCHABLE_FILTERS] = { +const INTERPOLATIONFILTERTYPE vp9_switchable_interp[VP8_SWITCHABLE_FILTERS] = { EIGHTTAP, EIGHTTAP_SHARP}; -const int vp8_switchable_interp_map[SWITCHABLE+1] = {-1, -1, 0, 1, -1}; //8, 8s +const int vp9_switchable_interp_map[SWITCHABLE+1] = {-1, -1, 0, 1, -1}; //8, 8s #endif void vp9_entropy_mode_init() { - vp9_tokens_from_tree(vp8_bmode_encodings, vp8_bmode_tree); - vp9_tokens_from_tree(vp8_ymode_encodings, vp8_ymode_tree); - vp9_tokens_from_tree(vp8_kf_ymode_encodings, vp8_kf_ymode_tree); + vp9_tokens_from_tree(vp9_bmode_encodings, vp9_bmode_tree); + vp9_tokens_from_tree(vp9_ymode_encodings, vp9_ymode_tree); + vp9_tokens_from_tree(vp9_kf_ymode_encodings, vp9_kf_ymode_tree); #if CONFIG_SUPERBLOCKS - vp9_tokens_from_tree(vp8_sb_kf_ymode_encodings, vp8_sb_ymode_tree); + vp9_tokens_from_tree(vp9_sb_kf_ymode_encodings, vp8_sb_ymode_tree); #endif - vp9_tokens_from_tree(vp8_uv_mode_encodings, vp8_uv_mode_tree); - vp9_tokens_from_tree(vp8_i8x8_mode_encodings, vp8_i8x8_mode_tree); - vp9_tokens_from_tree(vp8_mbsplit_encodings, vp8_mbsplit_tree); - vp9_tokens_from_tree(vp8_switchable_interp_encodings, - vp8_switchable_interp_tree); - - vp9_tokens_from_tree_offset(vp8_mv_ref_encoding_array, - vp8_mv_ref_tree, NEARESTMV); + vp9_tokens_from_tree(vp9_uv_mode_encodings, vp9_uv_mode_tree); + vp9_tokens_from_tree(vp9_i8x8_mode_encodings, vp9_i8x8_mode_tree); + vp9_tokens_from_tree(vp9_mbsplit_encodings, vp9_mbsplit_tree); + vp9_tokens_from_tree(vp9_switchable_interp_encodings, + vp9_switchable_interp_tree); + + vp9_tokens_from_tree_offset(vp9_mv_ref_encoding_array, + vp9_mv_ref_tree, NEARESTMV); #if CONFIG_SUPERBLOCKS - vp9_tokens_from_tree_offset(vp8_sb_mv_ref_encoding_array, - vp8_sb_mv_ref_tree, NEARESTMV); + vp9_tokens_from_tree_offset(vp9_sb_mv_ref_encoding_array, + vp9_sb_mv_ref_tree, NEARESTMV); #endif - vp9_tokens_from_tree_offset(vp8_sub_mv_ref_encoding_array, - vp8_sub_mv_ref_tree, LEFT4X4); + vp9_tokens_from_tree_offset(vp9_sub_mv_ref_encoding_array, + vp9_sub_mv_ref_tree, LEFT4X4); } void vp9_init_mode_contexts(VP8_COMMON *pc) { @@ -392,10 +392,10 @@ void vp9_init_mode_contexts(VP8_COMMON *pc) { vpx_memset(pc->fc.mv_ref_ct_a, 0, sizeof(pc->fc.mv_ref_ct_a)); vpx_memcpy(pc->fc.mode_context, - default_vp8_mode_contexts, + vp9_default_mode_contexts, sizeof(pc->fc.mode_context)); vpx_memcpy(pc->fc.mode_context_a, - default_vp8_mode_contexts, + vp9_default_mode_contexts_a, sizeof(pc->fc.mode_context_a)); } @@ -533,7 +533,7 @@ void vp9_adapt_mode_probs(VP8_COMMON *cm) { printf("};\n"); #endif vp9_tree_probs_from_distribution( - VP8_YMODES, vp8_ymode_encodings, vp8_ymode_tree, + VP8_YMODES, vp9_ymode_encodings, vp9_ymode_tree, ymode_probs, branch_ct, cm->fc.ymode_counts, 256, 1); for (t = 0; t < VP8_YMODES - 1; ++t) { @@ -549,7 +549,7 @@ void vp9_adapt_mode_probs(VP8_COMMON *cm) { } for (i = 0; i < VP8_YMODES; ++i) { vp9_tree_probs_from_distribution( - VP8_UV_MODES, vp8_uv_mode_encodings, vp8_uv_mode_tree, + VP8_UV_MODES, vp9_uv_mode_encodings, vp9_uv_mode_tree, uvmode_probs, branch_ct, cm->fc.uv_mode_counts[i], 256, 1); for (t = 0; t < VP8_UV_MODES - 1; ++t) { @@ -565,7 +565,7 @@ void vp9_adapt_mode_probs(VP8_COMMON *cm) { } } vp9_tree_probs_from_distribution( - VP8_BINTRAMODES, vp8_bmode_encodings, vp8_bmode_tree, + VP8_BINTRAMODES, vp9_bmode_encodings, vp9_bmode_tree, bmode_probs, branch_ct, cm->fc.bmode_counts, 256, 1); for (t = 0; t < VP8_BINTRAMODES - 1; ++t) { @@ -580,7 +580,7 @@ void vp9_adapt_mode_probs(VP8_COMMON *cm) { else cm->fc.bmode_prob[t] = prob; } vp9_tree_probs_from_distribution( - VP8_I8X8_MODES, vp8_i8x8_mode_encodings, vp8_i8x8_mode_tree, + VP8_I8X8_MODES, vp9_i8x8_mode_encodings, vp9_i8x8_mode_tree, i8x8_mode_probs, branch_ct, cm->fc.i8x8_mode_counts, 256, 1); for (t = 0; t < VP8_I8X8_MODES - 1; ++t) { @@ -596,7 +596,7 @@ void vp9_adapt_mode_probs(VP8_COMMON *cm) { } for (i = 0; i < SUBMVREF_COUNT; ++i) { vp9_tree_probs_from_distribution( - VP8_SUBMVREFS, vp8_sub_mv_ref_encoding_array, vp8_sub_mv_ref_tree, + VP8_SUBMVREFS, vp9_sub_mv_ref_encoding_array, vp9_sub_mv_ref_tree, sub_mv_ref_probs, branch_ct, cm->fc.sub_mv_ref_counts[i], 256, 1); for (t = 0; t < VP8_SUBMVREFS - 1; ++t) { @@ -612,7 +612,7 @@ void vp9_adapt_mode_probs(VP8_COMMON *cm) { } } vp9_tree_probs_from_distribution( - VP8_NUMMBSPLITS, vp8_mbsplit_encodings, vp8_mbsplit_tree, + VP8_NUMMBSPLITS, vp9_mbsplit_encodings, vp9_mbsplit_tree, mbsplit_probs, branch_ct, cm->fc.mbsplit_counts, 256, 1); for (t = 0; t < VP8_NUMMBSPLITS - 1; ++t) { diff --git a/vp8/common/entropymode.h b/vp8/common/entropymode.h index c66d6caaeab7ea602e28daf0c0dba047943b4034..ff22fa014979eabb0072e4820efa0d109509dad5 100644 --- a/vp8/common/entropymode.h +++ b/vp8/common/entropymode.h @@ -18,48 +18,48 @@ #define SUBMVREF_COUNT 5 #define VP8_NUMMBSPLITS 4 -typedef const int vp8_mbsplit[16]; +typedef const int vp9_mbsplit[16]; -extern vp8_mbsplit vp8_mbsplits [VP8_NUMMBSPLITS]; +extern vp9_mbsplit vp9_mbsplits [VP8_NUMMBSPLITS]; -extern const int vp8_mbsplit_count [VP8_NUMMBSPLITS]; /* # of subsets */ +extern const int vp9_mbsplit_count [VP8_NUMMBSPLITS]; /* # of subsets */ -extern const vp8_prob vp8_mbsplit_probs [VP8_NUMMBSPLITS - 1]; +extern const vp8_prob vp9_mbsplit_probs [VP8_NUMMBSPLITS - 1]; extern int vp9_mv_cont(const int_mv *l, const int_mv *a); -extern const vp8_prob vp8_sub_mv_ref_prob [VP8_SUBMVREFS - 1]; -extern const vp8_prob vp8_sub_mv_ref_prob2 [SUBMVREF_COUNT][VP8_SUBMVREFS - 1]; +extern const vp8_prob vp9_sub_mv_ref_prob [VP8_SUBMVREFS - 1]; +extern const vp8_prob vp9_sub_mv_ref_prob2 [SUBMVREF_COUNT][VP8_SUBMVREFS - 1]; -extern const unsigned int vp8_kf_default_bmode_counts [VP8_BINTRAMODES] [VP8_BINTRAMODES] [VP8_BINTRAMODES]; +extern const unsigned int vp9_kf_default_bmode_counts[VP8_BINTRAMODES][VP8_BINTRAMODES][VP8_BINTRAMODES]; -extern const vp8_tree_index vp8_bmode_tree[]; +extern const vp8_tree_index vp9_bmode_tree[]; -extern const vp8_tree_index vp8_ymode_tree[]; -extern const vp8_tree_index vp8_kf_ymode_tree[]; -extern const vp8_tree_index vp8_uv_mode_tree[]; -#define vp8_sb_ymode_tree vp8_uv_mode_tree -extern const vp8_tree_index vp8_i8x8_mode_tree[]; -extern const vp8_tree_index vp8_mbsplit_tree[]; -extern const vp8_tree_index vp8_mv_ref_tree[]; -extern const vp8_tree_index vp8_sb_mv_ref_tree[]; -extern const vp8_tree_index vp8_sub_mv_ref_tree[]; +extern const vp8_tree_index vp9_ymode_tree[]; +extern const vp8_tree_index vp9_kf_ymode_tree[]; +extern const vp8_tree_index vp9_uv_mode_tree[]; +#define vp8_sb_ymode_tree vp9_uv_mode_tree +extern const vp8_tree_index vp9_i8x8_mode_tree[]; +extern const vp8_tree_index vp9_mbsplit_tree[]; +extern const vp8_tree_index vp9_mv_ref_tree[]; +extern const vp8_tree_index vp9_sb_mv_ref_tree[]; +extern const vp8_tree_index vp9_sub_mv_ref_tree[]; -extern struct vp8_token_struct vp8_bmode_encodings [VP8_BINTRAMODES]; -extern struct vp8_token_struct vp8_ymode_encodings [VP8_YMODES]; -extern struct vp8_token_struct vp8_sb_kf_ymode_encodings [VP8_I32X32_MODES]; -extern struct vp8_token_struct vp8_kf_ymode_encodings [VP8_YMODES]; -extern struct vp8_token_struct vp8_i8x8_mode_encodings [VP8_I8X8_MODES]; -extern struct vp8_token_struct vp8_uv_mode_encodings [VP8_UV_MODES]; -extern struct vp8_token_struct vp8_mbsplit_encodings [VP8_NUMMBSPLITS]; +extern struct vp8_token_struct vp9_bmode_encodings [VP8_BINTRAMODES]; +extern struct vp8_token_struct vp9_ymode_encodings [VP8_YMODES]; +extern struct vp8_token_struct vp9_sb_kf_ymode_encodings [VP8_I32X32_MODES]; +extern struct vp8_token_struct vp9_kf_ymode_encodings [VP8_YMODES]; +extern struct vp8_token_struct vp9_i8x8_mode_encodings [VP8_I8X8_MODES]; +extern struct vp8_token_struct vp9_uv_mode_encodings [VP8_UV_MODES]; +extern struct vp8_token_struct vp9_mbsplit_encodings [VP8_NUMMBSPLITS]; /* Inter mode values do not start at zero */ -extern struct vp8_token_struct vp8_mv_ref_encoding_array [VP8_MVREFS]; -extern struct vp8_token_struct vp8_sb_mv_ref_encoding_array [VP8_MVREFS]; -extern struct vp8_token_struct vp8_sub_mv_ref_encoding_array [VP8_SUBMVREFS]; +extern struct vp8_token_struct vp9_mv_ref_encoding_array [VP8_MVREFS]; +extern struct vp8_token_struct vp9_sb_mv_ref_encoding_array [VP8_MVREFS]; +extern struct vp8_token_struct vp9_sub_mv_ref_encoding_array [VP8_SUBMVREFS]; void vp9_entropy_mode_init(void); @@ -77,13 +77,13 @@ void vp9_kf_default_bmode_probs(vp8_prob dest [VP8_BINTRAMODES] [VP8_BINTRAMODES void vp9_adapt_mode_probs(struct VP8Common *); #define VP8_SWITCHABLE_FILTERS 2 /* number of switchable filters */ -extern const INTERPOLATIONFILTERTYPE vp8_switchable_interp +extern const INTERPOLATIONFILTERTYPE vp9_switchable_interp [VP8_SWITCHABLE_FILTERS]; -extern const int vp8_switchable_interp_map[SWITCHABLE + 1]; -extern const vp8_tree_index vp8_switchable_interp_tree +extern const int vp9_switchable_interp_map[SWITCHABLE + 1]; +extern const vp8_tree_index vp9_switchable_interp_tree [2*(VP8_SWITCHABLE_FILTERS - 1)]; -extern struct vp8_token_struct vp8_switchable_interp_encodings +extern struct vp8_token_struct vp9_switchable_interp_encodings [VP8_SWITCHABLE_FILTERS]; -extern const vp8_prob vp8_switchable_interp_prob +extern const vp8_prob vp9_switchable_interp_prob [VP8_SWITCHABLE_FILTERS + 1][VP8_SWITCHABLE_FILTERS - 1]; #endif diff --git a/vp8/common/entropymv.c b/vp8/common/entropymv.c index 4af3e59faec2e07b9f7de76d40509fdf7aeb6627..a393e1a994752abf79a6acd0b4a3a5490b212283 100644 --- a/vp8/common/entropymv.c +++ b/vp8/common/entropymv.c @@ -28,14 +28,14 @@ /* Smooth or bias the mv-counts before prob computation */ /* #define SMOOTH_MV_COUNTS */ -const vp8_tree_index vp8_mv_joint_tree[2 * MV_JOINTS - 2] = { +const vp8_tree_index vp9_mv_joint_tree[2 * MV_JOINTS - 2] = { -MV_JOINT_ZERO, 2, -MV_JOINT_HNZVZ, 4, -MV_JOINT_HZVNZ, -MV_JOINT_HNZVNZ }; -struct vp8_token_struct vp8_mv_joint_encodings[MV_JOINTS]; +struct vp8_token_struct vp9_mv_joint_encodings[MV_JOINTS]; -const vp8_tree_index vp8_mv_class_tree[2 * MV_CLASSES - 2] = { +const vp8_tree_index vp9_mv_class_tree[2 * MV_CLASSES - 2] = { -MV_CLASS_0, 2, -MV_CLASS_1, 4, 6, 8, @@ -44,21 +44,21 @@ const vp8_tree_index vp8_mv_class_tree[2 * MV_CLASSES - 2] = { -MV_CLASS_4, -MV_CLASS_5, -MV_CLASS_6, -MV_CLASS_7, }; -struct vp8_token_struct vp8_mv_class_encodings[MV_CLASSES]; +struct vp8_token_struct vp9_mv_class_encodings[MV_CLASSES]; -const vp8_tree_index vp8_mv_class0_tree [2 * CLASS0_SIZE - 2] = { +const vp8_tree_index vp9_mv_class0_tree [2 * CLASS0_SIZE - 2] = { -0, -1, }; -struct vp8_token_struct vp8_mv_class0_encodings[CLASS0_SIZE]; +struct vp8_token_struct vp9_mv_class0_encodings[CLASS0_SIZE]; -const vp8_tree_index vp8_mv_fp_tree [2 * 4 - 2] = { +const vp8_tree_index vp9_mv_fp_tree [2 * 4 - 2] = { -0, 2, -1, 4, -2, -3 }; -struct vp8_token_struct vp8_mv_fp_encodings[4]; +struct vp8_token_struct vp9_mv_fp_encodings[4]; -const nmv_context vp8_default_nmv_context = { +const nmv_context vp9_default_nmv_context = { {32, 64, 96}, { { /* vert component */ @@ -243,8 +243,8 @@ void vp9_counts_to_nmv_context( counts_to_context(&NMVcount->comps[0], usehp); counts_to_context(&NMVcount->comps[1], usehp); vp9_tree_probs_from_distribution(MV_JOINTS, - vp8_mv_joint_encodings, - vp8_mv_joint_tree, + vp9_mv_joint_encodings, + vp9_mv_joint_tree, prob->joints, branch_ct_joint, NMVcount->joints, @@ -255,15 +255,15 @@ void vp9_counts_to_nmv_context( branch_ct_sign[i][0] = NMVcount->comps[i].sign[0]; branch_ct_sign[i][1] = NMVcount->comps[i].sign[1]; vp9_tree_probs_from_distribution(MV_CLASSES, - vp8_mv_class_encodings, - vp8_mv_class_tree, + vp9_mv_class_encodings, + vp9_mv_class_tree, prob->comps[i].classes, branch_ct_classes[i], NMVcount->comps[i].classes, 256, 1); vp9_tree_probs_from_distribution(CLASS0_SIZE, - vp8_mv_class0_encodings, - vp8_mv_class0_tree, + vp9_mv_class0_encodings, + vp9_mv_class0_tree, prob->comps[i].class0, branch_ct_class0[i], NMVcount->comps[i].class0, @@ -278,16 +278,16 @@ void vp9_counts_to_nmv_context( for (i = 0; i < 2; ++i) { for (k = 0; k < CLASS0_SIZE; ++k) { vp9_tree_probs_from_distribution(4, - vp8_mv_fp_encodings, - vp8_mv_fp_tree, + vp9_mv_fp_encodings, + vp9_mv_fp_tree, prob->comps[i].class0_fp[k], branch_ct_class0_fp[i][k], NMVcount->comps[i].class0_fp[k], 256, 1); } vp9_tree_probs_from_distribution(4, - vp8_mv_fp_encodings, - vp8_mv_fp_tree, + vp9_mv_fp_encodings, + vp9_mv_fp_tree, prob->comps[i].fp, branch_ct_fp[i], NMVcount->comps[i].fp, @@ -454,12 +454,12 @@ void vp9_adapt_nmv_probs(VP8_COMMON *cm, int usehp) { } void vp9_entropy_mv_init() { - vp9_tokens_from_tree(vp8_mv_joint_encodings, vp8_mv_joint_tree); - vp9_tokens_from_tree(vp8_mv_class_encodings, vp8_mv_class_tree); - vp9_tokens_from_tree(vp8_mv_class0_encodings, vp8_mv_class0_tree); - vp9_tokens_from_tree(vp8_mv_fp_encodings, vp8_mv_fp_tree); + vp9_tokens_from_tree(vp9_mv_joint_encodings, vp9_mv_joint_tree); + vp9_tokens_from_tree(vp9_mv_class_encodings, vp9_mv_class_tree); + vp9_tokens_from_tree(vp9_mv_class0_encodings, vp9_mv_class0_tree); + vp9_tokens_from_tree(vp9_mv_fp_encodings, vp9_mv_fp_tree); } void vp9_init_mv_probs(VP8_COMMON *cm) { - vpx_memcpy(&cm->fc.nmvc, &vp8_default_nmv_context, sizeof(nmv_context)); + vpx_memcpy(&cm->fc.nmvc, &vp9_default_nmv_context, sizeof(nmv_context)); } diff --git a/vp8/common/entropymv.h b/vp8/common/entropymv.h index 1509e20dc8ff8728e8f21b6d8e629ebfc01d258d..a64ac74e40a6313afbda9d416a90596aefdbe4e6 100644 --- a/vp8/common/entropymv.h +++ b/vp8/common/entropymv.h @@ -40,8 +40,8 @@ typedef enum { MV_JOINT_HNZVNZ = 3, /* Both components nonzero */ } MV_JOINT_TYPE; -extern const vp8_tree_index vp8_mv_joint_tree[2 * MV_JOINTS - 2]; -extern struct vp8_token_struct vp8_mv_joint_encodings [MV_JOINTS]; +extern const vp8_tree_index vp9_mv_joint_tree[2 * MV_JOINTS - 2]; +extern struct vp8_token_struct vp9_mv_joint_encodings [MV_JOINTS]; /* Symbols for coding magnitude class of nonzero components */ #define MV_CLASSES 8 @@ -56,8 +56,8 @@ typedef enum { MV_CLASS_7 = 7, /* (128, 256] integer pel */ } MV_CLASS_TYPE; -extern const vp8_tree_index vp8_mv_class_tree[2 * MV_CLASSES - 2]; -extern struct vp8_token_struct vp8_mv_class_encodings [MV_CLASSES]; +extern const vp8_tree_index vp9_mv_class_tree[2 * MV_CLASSES - 2]; +extern struct vp8_token_struct vp9_mv_class_encodings [MV_CLASSES]; #define CLASS0_BITS 1 /* bits at integer precision for class 0 */ #define CLASS0_SIZE (1 << CLASS0_BITS) @@ -67,11 +67,11 @@ extern struct vp8_token_struct vp8_mv_class_encodings [MV_CLASSES]; #define MV_MAX ((1 << MV_MAX_BITS) - 1) #define MV_VALS ((MV_MAX << 1) + 1) -extern const vp8_tree_index vp8_mv_class0_tree[2 * CLASS0_SIZE - 2]; -extern struct vp8_token_struct vp8_mv_class0_encodings[CLASS0_SIZE]; +extern const vp8_tree_index vp9_mv_class0_tree[2 * CLASS0_SIZE - 2]; +extern struct vp8_token_struct vp9_mv_class0_encodings[CLASS0_SIZE]; -extern const vp8_tree_index vp8_mv_fp_tree[2 * 4 - 2]; -extern struct vp8_token_struct vp8_mv_fp_encodings[4]; +extern const vp8_tree_index vp9_mv_fp_tree[2 * 4 - 2]; +extern struct vp8_token_struct vp9_mv_fp_encodings[4]; typedef struct { vp8_prob sign; @@ -113,7 +113,7 @@ typedef struct { void vp9_increment_nmv(const MV *mv, const MV *ref, nmv_context_counts *mvctx, int usehp); -extern const nmv_context vp8_default_nmv_context; +extern const nmv_context vp9_default_nmv_context; void vp9_counts_to_nmv_context( nmv_context_counts *NMVcount, nmv_context *prob, diff --git a/vp8/common/filter.c b/vp8/common/filter.c index a44d15acff67c22840c04da8eabc18c80f221009..c2a20e1a630c1c28313f01f2426a1e0f1f1774be 100644 --- a/vp8/common/filter.c +++ b/vp8/common/filter.c @@ -14,7 +14,7 @@ #include "vpx_ports/mem.h" #include "vpx_rtcd.h" -DECLARE_ALIGNED(16, const short, vp8_bilinear_filters[SUBPEL_SHIFTS][2]) = { +DECLARE_ALIGNED(16, const short, vp9_bilinear_filters[SUBPEL_SHIFTS][2]) = { { 128, 0 }, { 120, 8 }, { 112, 16 }, @@ -35,7 +35,7 @@ DECLARE_ALIGNED(16, const short, vp8_bilinear_filters[SUBPEL_SHIFTS][2]) = { #define FILTER_ALPHA 0 #define FILTER_ALPHA_SHARP 1 -DECLARE_ALIGNED(16, const short, vp8_sub_pel_filters_8[SUBPEL_SHIFTS][8]) = { +DECLARE_ALIGNED(16, const short, vp9_sub_pel_filters_8[SUBPEL_SHIFTS][8]) = { #if FILTER_ALPHA == 0 /* Lagrangian interpolation filter */ { 0, 0, 0, 128, 0, 0, 0, 0}, @@ -81,7 +81,7 @@ DECLARE_ALIGNED(16, const short, vp8_sub_pel_filters_8[SUBPEL_SHIFTS][8]) = { #endif /* FILTER_ALPHA */ }; -DECLARE_ALIGNED(16, const short, vp8_sub_pel_filters_8s[SUBPEL_SHIFTS][8]) = { +DECLARE_ALIGNED(16, const short, vp9_sub_pel_filters_8s[SUBPEL_SHIFTS][8]) = { #if FILTER_ALPHA_SHARP == 1 /* dct based filter */ {0, 0, 0, 128, 0, 0, 0, 0}, @@ -121,7 +121,7 @@ DECLARE_ALIGNED(16, const short, vp8_sub_pel_filters_8s[SUBPEL_SHIFTS][8]) = { #endif /* FILTER_ALPHA_SHARP */ }; -DECLARE_ALIGNED(16, const short, vp8_sub_pel_filters_6[SUBPEL_SHIFTS][6]) = { +DECLARE_ALIGNED(16, const short, vp9_sub_pel_filters_6[SUBPEL_SHIFTS][6]) = { {0, 0, 128, 0, 0, 0}, {1, -5, 125, 8, -2, 1}, {1, -8, 122, 17, -5, 1}, @@ -308,8 +308,8 @@ void vp9_sixtap_predict_c const short *HFilter; const short *VFilter; - HFilter = vp8_sub_pel_filters_6[xoffset]; /* 6 tap */ - VFilter = vp8_sub_pel_filters_6[yoffset]; /* 6 tap */ + HFilter = vp9_sub_pel_filters_6[xoffset]; /* 6 tap */ + VFilter = vp9_sub_pel_filters_6[yoffset]; /* 6 tap */ filter_block2d_6(src_ptr, dst_ptr, src_pixels_per_line, dst_pitch, HFilter, VFilter); } @@ -354,8 +354,8 @@ void vp9_sixtap_predict_avg_c const short *HFilter; const short *VFilter; - HFilter = vp8_sub_pel_filters_6[xoffset]; /* 6 tap */ - VFilter = vp8_sub_pel_filters_6[yoffset]; /* 6 tap */ + HFilter = vp9_sub_pel_filters_6[xoffset]; /* 6 tap */ + VFilter = vp9_sub_pel_filters_6[yoffset]; /* 6 tap */ filter_block2d_avg_6(src_ptr, dst_ptr, src_pixels_per_line, dst_pitch, HFilter, VFilter); @@ -375,8 +375,8 @@ void vp9_sixtap_predict8x8_c // int FData[(7+Interp_Extend*2)*16]; /* Temp data buffer used in filtering */ int FData[(7 + Interp_Extend * 2) * 8]; /* Temp data buffer used in filtering */ - HFilter = vp8_sub_pel_filters_6[xoffset]; /* 6 tap */ - VFilter = vp8_sub_pel_filters_6[yoffset]; /* 6 tap */ + HFilter = vp9_sub_pel_filters_6[xoffset]; /* 6 tap */ + VFilter = vp9_sub_pel_filters_6[yoffset]; /* 6 tap */ /* First filter 1-D horizontally... */ filter_block2d_first_pass_6(src_ptr - ((Interp_Extend - 1) * src_pixels_per_line), FData, src_pixels_per_line, 1, @@ -402,8 +402,8 @@ void vp9_sixtap_predict_avg8x8_c // int FData[(7+Interp_Extend*2)*16]; /* Temp data buffer used in filtering */ int FData[(7 + Interp_Extend * 2) * 8]; /* Temp data buffer used in filtering */ - HFilter = vp8_sub_pel_filters_6[xoffset]; /* 6 tap */ - VFilter = vp8_sub_pel_filters_6[yoffset]; /* 6 tap */ + HFilter = vp9_sub_pel_filters_6[xoffset]; /* 6 tap */ + VFilter = vp9_sub_pel_filters_6[yoffset]; /* 6 tap */ /* First filter 1-D horizontally... */ filter_block2d_first_pass_6(src_ptr - ((Interp_Extend - 1) * src_pixels_per_line), FData, src_pixels_per_line, 1, @@ -427,8 +427,8 @@ void vp9_sixtap_predict8x4_c // int FData[(7+Interp_Extend*2)*16]; /* Temp data buffer used in filtering */ int FData[(3 + Interp_Extend * 2) * 8]; /* Temp data buffer used in filtering */ - HFilter = vp8_sub_pel_filters_6[xoffset]; /* 6 tap */ - VFilter = vp8_sub_pel_filters_6[yoffset]; /* 6 tap */ + HFilter = vp9_sub_pel_filters_6[xoffset]; /* 6 tap */ + VFilter = vp9_sub_pel_filters_6[yoffset]; /* 6 tap */ /* First filter 1-D horizontally... */ filter_block2d_first_pass_6(src_ptr - ((Interp_Extend - 1) * src_pixels_per_line), FData, src_pixels_per_line, 1, @@ -455,8 +455,8 @@ void vp9_sixtap_predict16x16_c int FData[(15 + Interp_Extend * 2) * 16]; /* Temp data buffer used in filtering */ - HFilter = vp8_sub_pel_filters_6[xoffset]; /* 6 tap */ - VFilter = vp8_sub_pel_filters_6[yoffset]; /* 6 tap */ + HFilter = vp9_sub_pel_filters_6[xoffset]; /* 6 tap */ + VFilter = vp9_sub_pel_filters_6[yoffset]; /* 6 tap */ /* First filter 1-D horizontally... */ filter_block2d_first_pass_6(src_ptr - ((Interp_Extend - 1) * src_pixels_per_line), FData, src_pixels_per_line, 1, @@ -481,8 +481,8 @@ void vp9_sixtap_predict_avg16x16_c // int FData[(15+Interp_Extend*2)*24]; /* Temp data buffer used in filtering */ int FData[(15 + Interp_Extend * 2) * 16]; /* Temp data buffer used in filtering */ - HFilter = vp8_sub_pel_filters_6[xoffset]; /* 6 tap */ - VFilter = vp8_sub_pel_filters_6[yoffset]; /* 6 tap */ + HFilter = vp9_sub_pel_filters_6[xoffset]; /* 6 tap */ + VFilter = vp9_sub_pel_filters_6[yoffset]; /* 6 tap */ /* First filter 1-D horizontally... */ filter_block2d_first_pass_6(src_ptr - ((Interp_Extend - 1) * src_pixels_per_line), FData, @@ -685,8 +685,8 @@ void vp9_eighttap_predict_c const short *HFilter; const short *VFilter; - HFilter = vp8_sub_pel_filters_8[xoffset]; - VFilter = vp8_sub_pel_filters_8[yoffset]; + HFilter = vp9_sub_pel_filters_8[xoffset]; + VFilter = vp9_sub_pel_filters_8[yoffset]; vp9_filter_block2d_4x4_8(src_ptr, src_pixels_per_line, HFilter, VFilter, @@ -702,8 +702,8 @@ void vp9_eighttap_predict_avg4x4_c unsigned char *dst_ptr, int dst_pitch ) { - const short *HFilter = vp8_sub_pel_filters_8[xoffset]; - const short *VFilter = vp8_sub_pel_filters_8[yoffset]; + const short *HFilter = vp9_sub_pel_filters_8[xoffset]; + const short *VFilter = vp9_sub_pel_filters_8[yoffset]; unsigned char tmp[4 * 4]; vp9_filter_block2d_4x4_8(src_ptr, src_pixels_per_line, @@ -724,8 +724,8 @@ void vp9_eighttap_predict_sharp_c const short *HFilter; const short *VFilter; - HFilter = vp8_sub_pel_filters_8s[xoffset]; - VFilter = vp8_sub_pel_filters_8s[yoffset]; + HFilter = vp9_sub_pel_filters_8s[xoffset]; + VFilter = vp9_sub_pel_filters_8s[yoffset]; vp9_filter_block2d_4x4_8(src_ptr, src_pixels_per_line, HFilter, VFilter, @@ -741,8 +741,8 @@ void vp9_eighttap_predict_avg4x4_sharp_c unsigned char *dst_ptr, int dst_pitch ) { - const short *HFilter = vp8_sub_pel_filters_8s[xoffset]; - const short *VFilter = vp8_sub_pel_filters_8s[yoffset]; + const short *HFilter = vp9_sub_pel_filters_8s[xoffset]; + const short *VFilter = vp9_sub_pel_filters_8s[yoffset]; unsigned char tmp[4 * 4]; vp9_filter_block2d_4x4_8(src_ptr, src_pixels_per_line, @@ -760,8 +760,8 @@ void vp9_eighttap_predict8x8_c unsigned char *dst_ptr, int dst_pitch ) { - const short *HFilter = vp8_sub_pel_filters_8[xoffset]; - const short *VFilter = vp8_sub_pel_filters_8[yoffset]; + const short *HFilter = vp9_sub_pel_filters_8[xoffset]; + const short *VFilter = vp9_sub_pel_filters_8[yoffset]; vp9_filter_block2d_8x8_8(src_ptr, src_pixels_per_line, HFilter, VFilter, @@ -777,8 +777,8 @@ void vp9_eighttap_predict8x8_sharp_c unsigned char *dst_ptr, int dst_pitch ) { - const short *HFilter = vp8_sub_pel_filters_8s[xoffset]; - const short *VFilter = vp8_sub_pel_filters_8s[yoffset]; + const short *HFilter = vp9_sub_pel_filters_8s[xoffset]; + const short *VFilter = vp9_sub_pel_filters_8s[yoffset]; vp9_filter_block2d_8x8_8(src_ptr, src_pixels_per_line, HFilter, VFilter, @@ -795,8 +795,8 @@ void vp9_eighttap_predict_avg8x8_c int dst_pitch ) { unsigned char tmp[8 * 8]; - const short *HFilter = vp8_sub_pel_filters_8[xoffset]; - const short *VFilter = vp8_sub_pel_filters_8[yoffset]; + const short *HFilter = vp9_sub_pel_filters_8[xoffset]; + const short *VFilter = vp9_sub_pel_filters_8[yoffset]; vp9_filter_block2d_8x8_8(src_ptr, src_pixels_per_line, HFilter, VFilter, @@ -814,8 +814,8 @@ void vp9_eighttap_predict_avg8x8_sharp_c int dst_pitch ) { unsigned char tmp[8 * 8]; - const short *HFilter = vp8_sub_pel_filters_8s[xoffset]; - const short *VFilter = vp8_sub_pel_filters_8s[yoffset]; + const short *HFilter = vp9_sub_pel_filters_8s[xoffset]; + const short *VFilter = vp9_sub_pel_filters_8s[yoffset]; vp9_filter_block2d_8x8_8(src_ptr, src_pixels_per_line, HFilter, VFilter, @@ -833,8 +833,8 @@ void vp9_eighttap_predict8x4_c int dst_pitch ) { - const short *HFilter = vp8_sub_pel_filters_8[xoffset]; - const short *VFilter = vp8_sub_pel_filters_8[yoffset]; + const short *HFilter = vp9_sub_pel_filters_8[xoffset]; + const short *VFilter = vp9_sub_pel_filters_8[yoffset]; vp9_filter_block2d_8x4_8(src_ptr, src_pixels_per_line, HFilter, VFilter, @@ -850,8 +850,8 @@ void vp9_eighttap_predict8x4_sharp_c unsigned char *dst_ptr, int dst_pitch ) { - const short *HFilter = vp8_sub_pel_filters_8s[xoffset]; - const short *VFilter = vp8_sub_pel_filters_8s[yoffset]; + const short *HFilter = vp9_sub_pel_filters_8s[xoffset]; + const short *VFilter = vp9_sub_pel_filters_8s[yoffset]; vp9_filter_block2d_8x4_8(src_ptr, src_pixels_per_line, HFilter, VFilter, @@ -867,8 +867,8 @@ void vp9_eighttap_predict16x16_c unsigned char *dst_ptr, int dst_pitch ) { - const short *HFilter = vp8_sub_pel_filters_8[xoffset]; - const short *VFilter = vp8_sub_pel_filters_8[yoffset]; + const short *HFilter = vp9_sub_pel_filters_8[xoffset]; + const short *VFilter = vp9_sub_pel_filters_8[yoffset]; vp9_filter_block2d_16x16_8(src_ptr, src_pixels_per_line, HFilter, VFilter, @@ -884,8 +884,8 @@ void vp9_eighttap_predict16x16_sharp_c unsigned char *dst_ptr, int dst_pitch ) { - const short *HFilter = vp8_sub_pel_filters_8s[xoffset]; - const short *VFilter = vp8_sub_pel_filters_8s[yoffset]; + const short *HFilter = vp9_sub_pel_filters_8s[xoffset]; + const short *VFilter = vp9_sub_pel_filters_8s[yoffset]; vp9_filter_block2d_16x16_8(src_ptr, src_pixels_per_line, HFilter, VFilter, @@ -902,8 +902,8 @@ void vp9_eighttap_predict_avg16x16_c int dst_pitch ) { DECLARE_ALIGNED_ARRAY(16, unsigned char, tmp, 16 * 16); - const short *HFilter = vp8_sub_pel_filters_8[xoffset]; - const short *VFilter = vp8_sub_pel_filters_8[yoffset]; + const short *HFilter = vp9_sub_pel_filters_8[xoffset]; + const short *VFilter = vp9_sub_pel_filters_8[yoffset]; vp9_filter_block2d_16x16_8(src_ptr, src_pixels_per_line, HFilter, VFilter, @@ -921,8 +921,8 @@ void vp9_eighttap_predict_avg16x16_sharp_c int dst_pitch ) { DECLARE_ALIGNED_ARRAY(16, unsigned char, tmp, 16 * 16); - const short *HFilter = vp8_sub_pel_filters_8s[xoffset]; - const short *VFilter = vp8_sub_pel_filters_8s[yoffset]; + const short *HFilter = vp9_sub_pel_filters_8s[xoffset]; + const short *VFilter = vp9_sub_pel_filters_8s[yoffset]; vp9_filter_block2d_16x16_8(src_ptr, src_pixels_per_line, HFilter, VFilter, @@ -1139,8 +1139,8 @@ void vp9_bilinear_predict4x4_c const short *HFilter; const short *VFilter; - HFilter = vp8_bilinear_filters[xoffset]; - VFilter = vp8_bilinear_filters[yoffset]; + HFilter = vp9_bilinear_filters[xoffset]; + VFilter = vp9_bilinear_filters[yoffset]; #if 0 { int i; @@ -1174,8 +1174,8 @@ void vp9_bilinear_predict_avg4x4_c const short *HFilter; const short *VFilter; - HFilter = vp8_bilinear_filters[xoffset]; - VFilter = vp8_bilinear_filters[yoffset]; + HFilter = vp9_bilinear_filters[xoffset]; + VFilter = vp9_bilinear_filters[yoffset]; filter_block2d_bil_avg(src_ptr, dst_ptr, src_pixels_per_line, dst_pitch, HFilter, VFilter, 4, 4); @@ -1193,8 +1193,8 @@ void vp9_bilinear_predict8x8_c const short *HFilter; const short *VFilter; - HFilter = vp8_bilinear_filters[xoffset]; - VFilter = vp8_bilinear_filters[yoffset]; + HFilter = vp9_bilinear_filters[xoffset]; + VFilter = vp9_bilinear_filters[yoffset]; filter_block2d_bil(src_ptr, dst_ptr, src_pixels_per_line, dst_pitch, HFilter, VFilter, 8, 8); @@ -1212,8 +1212,8 @@ void vp9_bilinear_predict_avg8x8_c const short *HFilter; const short *VFilter; - HFilter = vp8_bilinear_filters[xoffset]; - VFilter = vp8_bilinear_filters[yoffset]; + HFilter = vp9_bilinear_filters[xoffset]; + VFilter = vp9_bilinear_filters[yoffset]; filter_block2d_bil_avg(src_ptr, dst_ptr, src_pixels_per_line, dst_pitch, HFilter, VFilter, 8, 8); @@ -1231,8 +1231,8 @@ void vp9_bilinear_predict8x4_c const short *HFilter; const short *VFilter; - HFilter = vp8_bilinear_filters[xoffset]; - VFilter = vp8_bilinear_filters[yoffset]; + HFilter = vp9_bilinear_filters[xoffset]; + VFilter = vp9_bilinear_filters[yoffset]; filter_block2d_bil(src_ptr, dst_ptr, src_pixels_per_line, dst_pitch, HFilter, VFilter, 8, 4); @@ -1250,8 +1250,8 @@ void vp9_bilinear_predict16x16_c const short *HFilter; const short *VFilter; - HFilter = vp8_bilinear_filters[xoffset]; - VFilter = vp8_bilinear_filters[yoffset]; + HFilter = vp9_bilinear_filters[xoffset]; + VFilter = vp9_bilinear_filters[yoffset]; filter_block2d_bil(src_ptr, dst_ptr, src_pixels_per_line, dst_pitch, HFilter, VFilter, 16, 16); } @@ -1268,8 +1268,8 @@ void vp9_bilinear_predict_avg16x16_c const short *HFilter; const short *VFilter; - HFilter = vp8_bilinear_filters[xoffset]; - VFilter = vp8_bilinear_filters[yoffset]; + HFilter = vp9_bilinear_filters[xoffset]; + VFilter = vp9_bilinear_filters[yoffset]; filter_block2d_bil_avg(src_ptr, dst_ptr, src_pixels_per_line, dst_pitch, HFilter, VFilter, 16, 16); diff --git a/vp8/common/filter.h b/vp8/common/filter.h index fdae128e8e501424b8656a3ac27f9b3c5129abaa..142c784283f6efb784ba3ae95713d4caf7cfeb23 100644 --- a/vp8/common/filter.h +++ b/vp8/common/filter.h @@ -20,9 +20,9 @@ #define SUBPEL_SHIFTS 16 -extern const short vp8_bilinear_filters[SUBPEL_SHIFTS][2]; -extern const short vp8_sub_pel_filters_6[SUBPEL_SHIFTS][6]; -extern const short vp8_sub_pel_filters_8[SUBPEL_SHIFTS][8]; -extern const short vp8_sub_pel_filters_8s[SUBPEL_SHIFTS][8]; +extern const short vp9_bilinear_filters[SUBPEL_SHIFTS][2]; +extern const short vp9_sub_pel_filters_6[SUBPEL_SHIFTS][6]; +extern const short vp9_sub_pel_filters_8[SUBPEL_SHIFTS][8]; +extern const short vp9_sub_pel_filters_8s[SUBPEL_SHIFTS][8]; #endif // FILTER_H diff --git a/vp8/common/findnearmv.c b/vp8/common/findnearmv.c index 20d869e6abc6ce7feeb55417d592fb5b12cb4b54..cffa77e39fb2591d7fa072113a3751897832fda4 100644 --- a/vp8/common/findnearmv.c +++ b/vp8/common/findnearmv.c @@ -13,7 +13,7 @@ #include "vp8/common/sadmxn.h" #include <limits.h> -const unsigned char vp8_mbsplit_offset[4][16] = { +const unsigned char vp9_mbsplit_offset[4][16] = { { 0, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, { 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, { 0, 2, 8, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, diff --git a/vp8/common/findnearmv.h b/vp8/common/findnearmv.h index 18d746b41065b8e7b3baae1f7ee3558454d9ef45..c429be7c7dadd334b003460f1e378eecb6a033df 100644 --- a/vp8/common/findnearmv.h +++ b/vp8/common/findnearmv.h @@ -100,7 +100,7 @@ vp8_prob *vp9_mv_ref_probs(VP8_COMMON *pc, vp8_prob p[VP8_MVREFS - 1], const int near_mv_ref_ct[4] ); -extern const unsigned char vp8_mbsplit_offset[4][16]; +extern const unsigned char vp9_mbsplit_offset[4][16]; static int left_block_mv(const MODE_INFO *cur_mb, int b) { diff --git a/vp8/common/idctllm.c b/vp8/common/idctllm.c index fdc12fd504fab6d22b9999ab521bc534745dea61..279ec585fb08b4c6d9031e191a8463682efd969c 100644 --- a/vp8/common/idctllm.c +++ b/vp8/common/idctllm.c @@ -36,21 +36,21 @@ static const int rounding = 0; // TODO: these transforms can be further converted into integer forms // for complexity optimization -float idct_4[16] = { +static const float idct_4[16] = { 0.500000000000000, 0.653281482438188, 0.500000000000000, 0.270598050073099, 0.500000000000000, 0.270598050073099, -0.500000000000000, -0.653281482438188, 0.500000000000000, -0.270598050073099, -0.500000000000000, 0.653281482438188, 0.500000000000000, -0.653281482438188, 0.500000000000000, -0.270598050073099 }; -float iadst_4[16] = { +static const float iadst_4[16] = { 0.228013428883779, 0.577350269189626, 0.656538502008139, 0.428525073124360, 0.428525073124360, 0.577350269189626, -0.228013428883779, -0.656538502008139, 0.577350269189626, 0, -0.577350269189626, 0.577350269189626, 0.656538502008139, -0.577350269189626, 0.428525073124359, -0.228013428883779 }; -float idct_8[64] = { +static const float idct_8[64] = { 0.353553390593274, 0.490392640201615, 0.461939766255643, 0.415734806151273, 0.353553390593274, 0.277785116509801, 0.191341716182545, 0.097545161008064, 0.353553390593274, 0.415734806151273, 0.191341716182545, -0.097545161008064, @@ -69,7 +69,7 @@ float idct_8[64] = { 0.353553390593274, -0.277785116509801, 0.191341716182545, -0.097545161008064 }; -float iadst_8[64] = { +static const float iadst_8[64] = { 0.089131608307533, 0.255357107325376, 0.387095214016349, 0.466553967085785, 0.483002021635509, 0.434217976756762, 0.326790388032145, 0.175227946595735, 0.175227946595735, 0.434217976756762, 0.466553967085785, 0.255357107325376, @@ -88,21 +88,21 @@ float iadst_8[64] = { 0.326790388032145, -0.255357107325375, 0.175227946595736, -0.089131608307532 }; -const int16_t idct_i4[16] = { +static const int16_t idct_i4[16] = { 8192, 10703, 8192, 4433, 8192, 4433, -8192, -10703, 8192, -4433, -8192, 10703, 8192, -10703, 8192, -4433 }; -const int16_t iadst_i4[16] = { +static const int16_t iadst_i4[16] = { 3736, 9459, 10757, 7021, 7021, 9459, -3736, -10757, 9459, 0, -9459, 9459, 10757, -9459, 7021, -3736 }; -const int16_t idct_i8[64] = { +static const int16_t idct_i8[64] = { 5793, 8035, 7568, 6811, 5793, 4551, 3135, 1598, 5793, 6811, 3135, -1598, @@ -121,7 +121,7 @@ const int16_t idct_i8[64] = { 5793, -4551, 3135, -1598 }; -const int16_t iadst_i8[64] = { +static const int16_t iadst_i8[64] = { 1460, 4184, 6342, 7644, 7914, 7114, 5354, 2871, 2871, 7114, 7644, 4184, @@ -140,7 +140,7 @@ const int16_t iadst_i8[64] = { 5354, -4184, 2871, -1460 }; -float idct_16[256] = { +static float idct_16[256] = { 0.250000, 0.351851, 0.346760, 0.338330, 0.326641, 0.311806, 0.293969, 0.273300, 0.250000, 0.224292, 0.196424, 0.166664, 0.135299, 0.102631, 0.068975, 0.034654, 0.250000, 0.338330, 0.293969, 0.224292, 0.135299, 0.034654, -0.068975, -0.166664, @@ -175,7 +175,7 @@ float idct_16[256] = { 0.250000, -0.224292, 0.196424, -0.166664, 0.135299, -0.102631, 0.068975, -0.034654 }; -float iadst_16[256] = { +static float iadst_16[256] = { 0.033094, 0.098087, 0.159534, 0.215215, 0.263118, 0.301511, 0.329007, 0.344612, 0.347761, 0.338341, 0.316693, 0.283599, 0.240255, 0.188227, 0.129396, 0.065889, 0.065889, 0.188227, 0.283599, 0.338341, 0.344612, 0.301511, 0.215215, 0.098087, @@ -210,7 +210,7 @@ float iadst_16[256] = { 0.240255, -0.215215, 0.188227, -0.159534, 0.129396, -0.098087, 0.065889, -0.033094 }; -const int16_t idct_i16[256] = { +static const int16_t idct_i16[256] = { 4096, 5765, 5681, 5543, 5352, 5109, 4816, 4478, 4096, 3675, 3218, 2731, 2217, 1682, 1130, 568, 4096, 5543, 4816, 3675, 2217, 568, -1130, -2731, @@ -245,7 +245,7 @@ const int16_t idct_i16[256] = { 4096, -3675, 3218, -2731, 2217, -1682, 1130, -568 }; -const int16_t iadst_i16[256] = { +static const int16_t iadst_i16[256] = { 542, 1607, 2614, 3526, 4311, 4940, 5390, 5646, 5698, 5543, 5189, 4646, 3936, 3084, 2120, 1080, 1080, 3084, 4646, 5543, 5646, 4940, 3526, 1607, @@ -304,7 +304,7 @@ void vp9_ihtllm_float_c(const int16_t *input, int16_t *output, int pitch, float *pfb = &bufb[0]; // pointers to vertical and horizontal transforms - float *ptv, *pth; + const float *ptv, *pth; assert(tx_type != DCT_DCT); // load and convert residual array into floating-point diff --git a/vp8/common/modecont.c b/vp8/common/modecont.c index 5995f5732ce9777893e34e35a197362d7d92cb28..023d201f1b313eae1708ed8d676a832e5a2b41c8 100644 --- a/vp8/common/modecont.c +++ b/vp8/common/modecont.c @@ -10,7 +10,7 @@ #include "entropy.h" -const int default_vp8_mode_contexts[6][4] = { +const int vp9_default_mode_contexts[6][4] = { { /* 0 */ 7, 1, 1, 183 @@ -36,7 +36,7 @@ const int default_vp8_mode_contexts[6][4] = { 234, 188, 128, 28 }, }; -const int default_vp8_mode_contexts_a[6][4] = { +const int vp9_default_mode_contexts_a[6][4] = { { /* 0 */ 4, 1, 1, 143 diff --git a/vp8/common/modecont.h b/vp8/common/modecont.h index f219d7594f2270021683e53ea5474ab4c65c600b..c13c14f6c1a3c6b90bc0cf72679ca23278474365 100644 --- a/vp8/common/modecont.h +++ b/vp8/common/modecont.h @@ -12,6 +12,6 @@ #ifndef __INC_MODECONT_H #define __INC_MODECONT_H -extern const int default_vp8_mode_contexts[6][4]; -extern const int default_vp8_mode_contexts_a[6][4]; +extern const int vp9_default_mode_contexts[6][4]; +extern const int vp9_default_mode_contexts_a[6][4]; #endif diff --git a/vp8/common/modecontext.c b/vp8/common/modecontext.c index 47b4596edc7622ee6959903c855b0575a038ff1f..412f5586dda3f19a907556ae556aad34c617656f 100644 --- a/vp8/common/modecontext.c +++ b/vp8/common/modecontext.c @@ -11,7 +11,7 @@ #include "entropymode.h" -const unsigned int vp8_kf_default_bmode_counts [VP8_BINTRAMODES] [VP8_BINTRAMODES] [VP8_BINTRAMODES] = { +const unsigned int vp9_kf_default_bmode_counts[VP8_BINTRAMODES][VP8_BINTRAMODES][VP8_BINTRAMODES] = { { /*Above Mode : 0*/ { 43438, 2195, 470, 316, 615, 171, 217, 412, 124, 160, }, /* left_mode 0 */ diff --git a/vp8/common/postproc.c b/vp8/common/postproc.c index 17404239dcea433de38fa4b6c9ba06418ee7d1aa..ea6bd9229522932fe4583b0634ab68c893b3d289 100644 --- a/vp8/common/postproc.c +++ b/vp8/common/postproc.c @@ -76,7 +76,7 @@ static const short kernel5[] = { 1, 1, 4, 1, 1 }; -const short vp8_rv[] = { +const short vp9_rv[] = { 8, 5, 2, 2, 8, 12, 4, 9, 8, 3, 0, 3, 9, 0, 0, 0, 8, 3, 14, 4, 10, 1, 11, 14, 1, 14, 9, 6, 12, 11, @@ -256,7 +256,7 @@ void vp9_mbpost_proc_across_ip_c(unsigned char *src, int pitch, int rows, int co void vp9_mbpost_proc_down_c(unsigned char *dst, int pitch, int rows, int cols, int flimit) { int r, c, i; - const short *rv3 = &vp8_rv[63 & rand()]; + const short *rv3 = &vp9_rv[63 & rand()]; for (c = 0; c < cols; c++) { unsigned char *s = &dst[c]; diff --git a/vp8/common/pred_common.c b/vp8/common/pred_common.c index 5c927f1ba79e9037787e1116ff430729d1f2136b..2fb8c44b1a4999910cbc42992b2f09cff85514d9 100644 --- a/vp8/common/pred_common.c +++ b/vp8/common/pred_common.c @@ -71,11 +71,11 @@ unsigned char vp9_get_pred_context(const VP8_COMMON *const cm, int above_mode = (m - cm->mode_info_stride)->mbmi.mode; int left_interp, above_interp; if (left_in_image && left_mode >= NEARESTMV && left_mode <= SPLITMV) - left_interp = vp8_switchable_interp_map[(m - 1)->mbmi.interp_filter]; + left_interp = vp9_switchable_interp_map[(m - 1)->mbmi.interp_filter]; else left_interp = VP8_SWITCHABLE_FILTERS; if (above_in_image && above_mode >= NEARESTMV && above_mode <= SPLITMV) - above_interp = vp8_switchable_interp_map[ + above_interp = vp9_switchable_interp_map[ (m - cm->mode_info_stride)->mbmi.interp_filter]; else above_interp = VP8_SWITCHABLE_FILTERS; diff --git a/vp8/common/seg_common.c b/vp8/common/seg_common.c index 8d23fd153cc96fc25d668de7ccb433aff117246b..0cc80b4d2241a0783664eb71e09f5c7d57cfd4aa 100644 --- a/vp8/common/seg_common.c +++ b/vp8/common/seg_common.c @@ -10,9 +10,8 @@ #include "vp8/common/seg_common.h" -const int segfeaturedata_signed[SEG_LVL_MAX] = {1, 1, 0, 0, 0, 0}; -const int vp8_seg_feature_data_bits[SEG_LVL_MAX] = -{QINDEX_BITS, 6, 4, 4, 6, 2}; +static const int segfeaturedata_signed[SEG_LVL_MAX] = { 1, 1, 0, 0, 0, 0 }; +static const int seg_feature_data_bits[SEG_LVL_MAX] = { QINDEX_BITS, 6, 4, 4, 6, 2 }; // These functions provide access to new segment level features. // Eventually these function may be "optimized out" but for the moment, @@ -46,7 +45,7 @@ void vp9_disable_segfeature(MACROBLOCKD *xd, } int vp9_seg_feature_data_bits(SEG_LVL_FEATURES feature_id) { - return vp8_seg_feature_data_bits[feature_id]; + return seg_feature_data_bits[feature_id]; } int vp9_is_segfeature_signed(SEG_LVL_FEATURES feature_id) { diff --git a/vp8/common/x86/postproc_mmx.asm b/vp8/common/x86/postproc_mmx.asm index 529ea9ab4cf8d95d8419d35a03b8049bf327767a..3e511b873a50c1bc55cac2da454bb4c1622372af 100644 --- a/vp8/common/x86/postproc_mmx.asm +++ b/vp8/common/x86/postproc_mmx.asm @@ -264,7 +264,7 @@ sym(vp9_post_proc_down_and_across_mmx): ;void vp9_mbpost_proc_down_mmx(unsigned char *dst, ; int pitch, int rows, int cols,int flimit) -extern sym(vp8_rv) +extern sym(vp9_rv) global sym(vp9_mbpost_proc_down_mmx) sym(vp9_mbpost_proc_down_mmx): push rbp @@ -286,7 +286,7 @@ sym(vp9_mbpost_proc_down_mmx): %define flimit2 [rsp+128] %if ABI_IS_32BIT=0 - lea r8, [GLOBAL(sym(vp8_rv))] + lea r8, [GLOBAL(sym(vp9_rv))] %endif ;rows +=8; @@ -404,13 +404,13 @@ sym(vp9_mbpost_proc_down_mmx): and rcx, 127 %if ABI_IS_32BIT=1 && CONFIG_PIC=1 push rax - lea rax, [GLOBAL(sym(vp8_rv))] - movq mm4, [rax + rcx*2] ;vp8_rv[rcx*2] + lea rax, [GLOBAL(sym(vp9_rv))] + movq mm4, [rax + rcx*2] ;vp9_rv[rcx*2] pop rax %elif ABI_IS_32BIT=0 - movq mm4, [r8 + rcx*2] ;vp8_rv[rcx*2] + movq mm4, [r8 + rcx*2] ;vp9_rv[rcx*2] %else - movq mm4, [sym(vp8_rv) + rcx*2] + movq mm4, [sym(vp9_rv) + rcx*2] %endif paddw mm1, mm4 ;paddw xmm1, eight8s diff --git a/vp8/common/x86/postproc_sse2.asm b/vp8/common/x86/postproc_sse2.asm index 0aeab52599cbfa2b3408ab1313693160c5aae294..91758e62d78473a7f90b9985186cc924b684a881 100644 --- a/vp8/common/x86/postproc_sse2.asm +++ b/vp8/common/x86/postproc_sse2.asm @@ -250,7 +250,7 @@ sym(vp9_post_proc_down_and_across_xmm): ;void vp9_mbpost_proc_down_xmm(unsigned char *dst, ; int pitch, int rows, int cols,int flimit) -extern sym(vp8_rv) +extern sym(vp9_rv) global sym(vp9_mbpost_proc_down_xmm) sym(vp9_mbpost_proc_down_xmm): push rbp @@ -275,7 +275,7 @@ sym(vp9_mbpost_proc_down_xmm): %define flimit4 [rsp+128] %if ABI_IS_32BIT=0 - lea r8, [GLOBAL(sym(vp8_rv))] + lea r8, [GLOBAL(sym(vp9_rv))] %endif ;rows +=8; @@ -393,13 +393,13 @@ sym(vp9_mbpost_proc_down_xmm): and rcx, 127 %if ABI_IS_32BIT=1 && CONFIG_PIC=1 push rax - lea rax, [GLOBAL(sym(vp8_rv))] - movdqu xmm4, [rax + rcx*2] ;vp8_rv[rcx*2] + lea rax, [GLOBAL(sym(vp9_rv))] + movdqu xmm4, [rax + rcx*2] ;vp9_rv[rcx*2] pop rax %elif ABI_IS_32BIT=0 - movdqu xmm4, [r8 + rcx*2] ;vp8_rv[rcx*2] + movdqu xmm4, [r8 + rcx*2] ;vp9_rv[rcx*2] %else - movdqu xmm4, [sym(vp8_rv) + rcx*2] + movdqu xmm4, [sym(vp9_rv) + rcx*2] %endif paddw xmm1, xmm4 diff --git a/vp8/common/x86/subpixel_mmx.asm b/vp8/common/x86/subpixel_mmx.asm index 0497187ed6c182ae1f0f64d643c1f8e978ef0ceb..919892d353286a9c7154e5ba915387ef3fc337d3 100644 --- a/vp8/common/x86/subpixel_mmx.asm +++ b/vp8/common/x86/subpixel_mmx.asm @@ -229,7 +229,7 @@ sym(vp9_bilinear_predict8x8_mmx): mov rdi, arg(4) ;dst_ptr ; shl rax, 5 ; offset * 32 - lea rcx, [GLOBAL(sym(vp9_bilinear_filters_mmx))] + lea rcx, [GLOBAL(sym(vp9_bilinear_filters_8x_mmx))] add rax, rcx ; HFilter mov rsi, arg(0) ;src_ptr ; @@ -385,7 +385,7 @@ sym(vp9_bilinear_predict8x4_mmx): movsxd rax, dword ptr arg(2) ;xoffset mov rdi, arg(4) ;dst_ptr ; - lea rcx, [GLOBAL(sym(vp9_bilinear_filters_mmx))] + lea rcx, [GLOBAL(sym(vp9_bilinear_filters_8x_mmx))] shl rax, 5 mov rsi, arg(0) ;src_ptr ; @@ -540,7 +540,7 @@ sym(vp9_bilinear_predict4x4_mmx): movsxd rax, dword ptr arg(2) ;xoffset mov rdi, arg(4) ;dst_ptr ; - lea rcx, [GLOBAL(sym(vp9_bilinear_filters_mmx))] + lea rcx, [GLOBAL(sym(vp9_bilinear_filters_8x_mmx))] shl rax, 5 add rax, rcx ; HFilter @@ -700,8 +700,8 @@ sym(vp9_six_tap_mmx): align 16 -global HIDDEN_DATA(sym(vp9_bilinear_filters_mmx)) -sym(vp9_bilinear_filters_mmx): +global HIDDEN_DATA(sym(vp9_bilinear_filters_8x_mmx)) +sym(vp9_bilinear_filters_8x_mmx): times 8 dw 128 times 8 dw 0 diff --git a/vp8/common/x86/vp8_asm_stubs.c b/vp8/common/x86/vp8_asm_stubs.c index 8e26fc6c90d88d3d1fd901248cb12c524c06d70d..8e4fd96c48d5f7c7d89f433eddc29f7cc8aa6a0b 100644 --- a/vp8/common/x86/vp8_asm_stubs.c +++ b/vp8/common/x86/vp8_asm_stubs.c @@ -14,7 +14,7 @@ #include "vp8/common/subpixel.h" extern const short vp9_six_tap_mmx[16][6 * 8]; -extern const short vp9_bilinear_filters_mmx[16][2 * 8]; +extern const short vp9_bilinear_filters_8x_mmx[16][2 * 8]; extern void vp9_filter_block1d_h6_mmx ( diff --git a/vp8/decoder/asm_dec_offsets.c b/vp8/decoder/asm_dec_offsets.c index 8551bab103fd5cb19a0c6bb7dd1ec870689fa1c7..270a4e0bce007216fad1748a2286da8f57494052 100644 --- a/vp8/decoder/asm_dec_offsets.c +++ b/vp8/decoder/asm_dec_offsets.c @@ -16,7 +16,7 @@ BEGIN DEFINE(detok_scan, offsetof(DETOK, scan)); DEFINE(detok_ptr_block2leftabove, offsetof(DETOK, ptr_block2leftabove)); -DEFINE(detok_coef_tree_ptr, offsetof(DETOK, vp8_coef_tree_ptr)); +DEFINE(detok_coef_tree_ptr, offsetof(DETOK, vp9_coef_tree_ptr)); DEFINE(detok_norm_ptr, offsetof(DETOK, norm_ptr)); DEFINE(detok_ptr_coef_bands_x, offsetof(DETOK, ptr_coef_bands_x)); diff --git a/vp8/decoder/dboolhuff.h b/vp8/decoder/dboolhuff.h index e6ca1a657ee7a1cb71deb30835ccccab2de18734..f6af5c8815cc3405e766b459f3857c053be947b6 100644 --- a/vp8/decoder/dboolhuff.h +++ b/vp8/decoder/dboolhuff.h @@ -33,7 +33,7 @@ typedef struct { unsigned int range; } BOOL_DECODER; -DECLARE_ALIGNED(16, extern const unsigned char, vp8_norm[256]); +DECLARE_ALIGNED(16, extern const unsigned char, vp9_norm[256]); int vp9_start_decode(BOOL_DECODER *br, const unsigned char *source, @@ -103,7 +103,7 @@ static int vp8dx_decode_bool(BOOL_DECODER *br, int probability) { } { - register unsigned int shift = vp8_norm[range]; + register unsigned int shift = vp9_norm[range]; range <<= shift; value <<= shift; count -= shift; diff --git a/vp8/decoder/decodemv.c b/vp8/decoder/decodemv.c index 98a0cc886d7de5f02e06dbf664b3e302ef3d635c..b2c51f5f694b2d929029f434581e28c6ab412af5 100644 --- a/vp8/decoder/decodemv.c +++ b/vp8/decoder/decodemv.c @@ -29,31 +29,31 @@ int dec_mvcount = 0; #endif static int vp8_read_bmode(vp8_reader *bc, const vp8_prob *p) { - return vp8_treed_read(bc, vp8_bmode_tree, p); + return vp8_treed_read(bc, vp9_bmode_tree, p); } static int vp8_read_ymode(vp8_reader *bc, const vp8_prob *p) { - return vp8_treed_read(bc, vp8_ymode_tree, p); + return vp8_treed_read(bc, vp9_ymode_tree, p); } #if CONFIG_SUPERBLOCKS static int vp8_sb_kfread_ymode(vp8_reader *bc, const vp8_prob *p) { - return vp8_treed_read(bc, vp8_uv_mode_tree, p); + return vp8_treed_read(bc, vp9_uv_mode_tree, p); } #endif static int vp8_kfread_ymode(vp8_reader *bc, const vp8_prob *p) { - return vp8_treed_read(bc, vp8_kf_ymode_tree, p); + return vp8_treed_read(bc, vp9_kf_ymode_tree, p); } static int vp8_read_i8x8_mode(vp8_reader *bc, const vp8_prob *p) { - return vp8_treed_read(bc, vp8_i8x8_mode_tree, p); + return vp8_treed_read(bc, vp9_i8x8_mode_tree, p); } static int vp8_read_uv_mode(vp8_reader *bc, const vp8_prob *p) { - return vp8_treed_read(bc, vp8_uv_mode_tree, p); + return vp8_treed_read(bc, vp9_uv_mode_tree, p); } // This function reads the current macro block's segnent id from the bitstream @@ -89,7 +89,7 @@ int vp8_read_mv_ref_id(vp8_reader *r, } #endif -extern const int vp8_i8x8_block[4]; +extern const int vp9_i8x8_block[4]; static void kfread_modes(VP8D_COMP *pbi, MODE_INFO *m, int mb_row, @@ -168,7 +168,7 @@ static void kfread_modes(VP8D_COMP *pbi, int i; int mode8x8; for (i = 0; i < 4; i++) { - int ib = vp8_i8x8_block[i]; + int ib = vp9_i8x8_block[i]; mode8x8 = vp8_read_i8x8_mode(bc, pbi->common.fc.i8x8_mode_prob); m->bmi[ib + 0].as_mode.first = mode8x8; m->bmi[ib + 1].as_mode.first = mode8x8; @@ -213,9 +213,9 @@ static int read_nmv_component(vp8_reader *r, const nmv_component *mvcomp) { int v, s, z, c, o, d; s = vp8_read(r, mvcomp->sign); - c = vp8_treed_read(r, vp8_mv_class_tree, mvcomp->classes); + c = vp8_treed_read(r, vp9_mv_class_tree, mvcomp->classes); if (c == MV_CLASS_0) { - d = vp8_treed_read(r, vp8_mv_class0_tree, mvcomp->class0); + d = vp8_treed_read(r, vp9_mv_class0_tree, mvcomp->class0); } else { int i, b; d = 0; @@ -244,9 +244,9 @@ static int read_nmv_component_fp(vp8_reader *r, d = o >> 3; if (c == MV_CLASS_0) { - f = vp8_treed_read(r, vp8_mv_fp_tree, mvcomp->class0_fp[d]); + f = vp8_treed_read(r, vp9_mv_fp_tree, mvcomp->class0_fp[d]); } else { - f = vp8_treed_read(r, vp8_mv_fp_tree, mvcomp->fp); + f = vp8_treed_read(r, vp9_mv_fp_tree, mvcomp->fp); } o += (f << 1); @@ -267,7 +267,7 @@ static int read_nmv_component_fp(vp8_reader *r, static void read_nmv(vp8_reader *r, MV *mv, const MV *ref, const nmv_context *mvctx) { - MV_JOINT_TYPE j = vp8_treed_read(r, vp8_mv_joint_tree, mvctx->joints); + MV_JOINT_TYPE j = vp8_treed_read(r, vp9_mv_joint_tree, mvctx->joints); mv->row = mv-> col = 0; if (j == MV_JOINT_HZVNZ || j == MV_JOINT_HNZVNZ) { mv->row = read_nmv_component(r, ref->row, &mvctx->comps[0]); @@ -468,16 +468,16 @@ static MV_REFERENCE_FRAME read_ref_frame(VP8D_COMP *pbi, #if CONFIG_SUPERBLOCKS static MB_PREDICTION_MODE read_sb_mv_ref(vp8_reader *bc, const vp8_prob *p) { - return (MB_PREDICTION_MODE) vp8_treed_read(bc, vp8_sb_mv_ref_tree, p); + return (MB_PREDICTION_MODE) vp8_treed_read(bc, vp9_sb_mv_ref_tree, p); } #endif static MB_PREDICTION_MODE read_mv_ref(vp8_reader *bc, const vp8_prob *p) { - return (MB_PREDICTION_MODE) vp8_treed_read(bc, vp8_mv_ref_tree, p); + return (MB_PREDICTION_MODE) vp8_treed_read(bc, vp9_mv_ref_tree, p); } static B_PREDICTION_MODE sub_mv_ref(vp8_reader *bc, const vp8_prob *p) { - return (B_PREDICTION_MODE) vp8_treed_read(bc, vp8_sub_mv_ref_tree, p); + return (B_PREDICTION_MODE) vp8_treed_read(bc, vp9_sub_mv_ref_tree, p); } #ifdef VPX_MODE_COUNT @@ -789,8 +789,8 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi, if (mbmi->mode >= NEARESTMV && mbmi->mode <= SPLITMV) { if (cm->mcomp_filter_type == SWITCHABLE) { - mbmi->interp_filter = vp8_switchable_interp[ - vp8_treed_read(bc, vp8_switchable_interp_tree, + mbmi->interp_filter = vp9_switchable_interp[ + vp8_treed_read(bc, vp9_switchable_interp_tree, vp9_get_pred_probs(cm, xd, PRED_SWITCHABLE_INTERP))]; } else { mbmi->interp_filter = cm->mcomp_filter_type; @@ -862,8 +862,8 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi, switch (mbmi->mode) { case SPLITMV: { const int s = mbmi->partitioning = - vp8_treed_read(bc, vp8_mbsplit_tree, cm->fc.mbsplit_prob); - const int num_p = vp8_mbsplit_count [s]; + vp8_treed_read(bc, vp9_mbsplit_tree, cm->fc.mbsplit_prob); + const int num_p = vp9_mbsplit_count [s]; int j = 0; cm->fc.mbsplit_counts[s]++; @@ -875,7 +875,7 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi, int mv_contz; int blockmode; - k = vp8_mbsplit_offset[s][j]; + k = vp9_mbsplit_offset[s][j]; leftmv.as_int = left_block_mv(mi, k); abovemv.as_int = above_block_mv(mi, k, mis); @@ -1121,7 +1121,7 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi, int i; int mode8x8; for (i = 0; i < 4; i++) { - int ib = vp8_i8x8_block[i]; + int ib = vp9_i8x8_block[i]; mode8x8 = vp8_read_i8x8_mode(bc, pbi->common.fc.i8x8_mode_prob); mi->bmi[ib + 0].as_mode.first = mode8x8; mi->bmi[ib + 1].as_mode.first = mode8x8; diff --git a/vp8/decoder/decodframe.c b/vp8/decoder/decodframe.c index 8bc40d78422a60c39ebce9044f2b63639530aea7..504ac642459bfe09b52a014c6d5b6d430edbe923 100644 --- a/vp8/decoder/decodframe.c +++ b/vp8/decoder/decodframe.c @@ -84,7 +84,7 @@ void vp9_init_de_quantizer(VP8D_COMP *pbi) { /* all the ac values =; */ for (i = 1; i < 16; i++) { - int rc = vp8_default_zig_zag1d[i]; + int rc = vp9_default_zig_zag1d[i]; pc->Y1dequant[Q][rc] = (short)vp9_ac_yquant(Q); pc->Y2dequant[Q][rc] = (short)vp9_ac2quant(Q, pc->y2ac_delta_q); @@ -309,7 +309,7 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd, /* dequantization and idct */ if (mode == I8X8_PRED) { for (i = 0; i < 4; i++) { - int ib = vp8_i8x8_block[i]; + int ib = vp9_i8x8_block[i]; const int iblock[4] = {0, 1, 4, 5}; int j; int i8x8mode; diff --git a/vp8/decoder/detokenize.c b/vp8/decoder/detokenize.c index 00797ed34db33177e3019eff4d0af340402361bd..37dfa6a5788010bc29ce418897224cdac369a5e7 100644 --- a/vp8/decoder/detokenize.c +++ b/vp8/decoder/detokenize.c @@ -22,13 +22,13 @@ #define OCB_X PREV_COEF_CONTEXTS * ENTROPY_NODES -DECLARE_ALIGNED(16, const int, coef_bands_x[16]) = { +DECLARE_ALIGNED(16, static const int, coef_bands_x[16]) = { 0 * OCB_X, 1 * OCB_X, 2 * OCB_X, 3 * OCB_X, 6 * OCB_X, 4 * OCB_X, 5 * OCB_X, 6 * OCB_X, 6 * OCB_X, 6 * OCB_X, 6 * OCB_X, 6 * OCB_X, 6 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X }; -DECLARE_ALIGNED(16, const int, coef_bands_x_8x8[64]) = { +DECLARE_ALIGNED(16, static const int, coef_bands_x_8x8[64]) = { 0 * OCB_X, 1 * OCB_X, 2 * OCB_X, 3 * OCB_X, 5 * OCB_X, 4 * OCB_X, 4 * OCB_X, 5 * OCB_X, 5 * OCB_X, 3 * OCB_X, 6 * OCB_X, 3 * OCB_X, 5 * OCB_X, 4 * OCB_X, 6 * OCB_X, 6 * OCB_X, 6 * OCB_X, 5 * OCB_X, 5 * OCB_X, 6 * OCB_X, 6 * OCB_X, 6 * OCB_X, 6 * OCB_X, 6 * OCB_X, @@ -39,7 +39,7 @@ DECLARE_ALIGNED(16, const int, coef_bands_x_8x8[64]) = { 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, }; -DECLARE_ALIGNED(16, const int, coef_bands_x_16x16[256]) = { +DECLARE_ALIGNED(16, static const int, coef_bands_x_16x16[256]) = { 0 * OCB_X, 1 * OCB_X, 2 * OCB_X, 3 * OCB_X, 5 * OCB_X, 4 * OCB_X, 4 * OCB_X, 5 * OCB_X, 5 * OCB_X, 3 * OCB_X, 6 * OCB_X, 3 * OCB_X, 5 * OCB_X, 4 * OCB_X, 6 * OCB_X, 6 * OCB_X, 6 * OCB_X, 5 * OCB_X, 5 * OCB_X, 6 * OCB_X, 6 * OCB_X, 6 * OCB_X, 6 * OCB_X, 6 * OCB_X, 6 * OCB_X, 6 * OCB_X, 6 * OCB_X, 6 * OCB_X, 6 * OCB_X, 6 * OCB_X, 6 * OCB_X, 6 * OCB_X, 6 * OCB_X, 6 * OCB_X, 6 * OCB_X, 6 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, @@ -113,11 +113,11 @@ void vp9_reset_mb_tokens_context(MACROBLOCKD *xd) { } } -DECLARE_ALIGNED(16, extern const unsigned char, vp8_norm[256]); +DECLARE_ALIGNED(16, extern const unsigned char, vp9_norm[256]); // #define PREV_CONTEXT_INC(val) (2+((val)>2)) -// #define PREV_CONTEXT_INC(val) (vp8_prev_token_class[(val)]) -#define PREV_CONTEXT_INC(val) (vp8_prev_token_class[(val)>10?10:(val)]) +// #define PREV_CONTEXT_INC(val) (vp9_prev_token_class[(val)]) +#define PREV_CONTEXT_INC(val) (vp9_prev_token_class[(val)>10?10:(val)]) static int get_token(int v) { if (v < 0) v = -v; @@ -145,15 +145,15 @@ void static count_tokens_adaptive_scan(const MACROBLOCKD *xd, INT16 *qcoeff_ptr, switch(tx_type) { case ADST_DCT : - scan = vp8_row_scan; + scan = vp9_row_scan; break; case DCT_ADST : - scan = vp8_col_scan; + scan = vp9_col_scan; break; default : - scan = vp8_default_zig_zag1d; + scan = vp9_default_zig_zag1d; break; } @@ -161,17 +161,17 @@ void static count_tokens_adaptive_scan(const MACROBLOCKD *xd, INT16 *qcoeff_ptr, for (c = !type; c < eob; ++c) { int rc = scan[c]; int v = qcoeff_ptr[rc]; - band = vp8_coef_bands[c]; + band = vp9_coef_bands[c]; token = get_token(v); if (tx_type != DCT_DCT) fc->hybrid_coef_counts[type][band][pt][token]++; else fc->coef_counts[type][band][pt][token]++; - pt = vp8_prev_token_class[token]; + pt = vp9_prev_token_class[token]; } if (eob < seg_eob) { - band = vp8_coef_bands[c]; + band = vp9_coef_bands[c]; if (tx_type != DCT_DCT) fc->hybrid_coef_counts[type][band][pt][DCT_EOB_TOKEN]++; else @@ -185,15 +185,15 @@ void static count_tokens(INT16 *qcoeff_ptr, int block, PLANE_TYPE type, int c, pt, token, band; VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l); for (c = !type; c < eob; ++c) { - int rc = vp8_default_zig_zag1d[c]; + int rc = vp9_default_zig_zag1d[c]; int v = qcoeff_ptr[rc]; - band = vp8_coef_bands[c]; + band = vp9_coef_bands[c]; token = get_token(v); fc->coef_counts[type][band][pt][token]++; - pt = vp8_prev_token_class[token]; + pt = vp9_prev_token_class[token]; } if (eob < seg_eob) { - band = vp8_coef_bands[c]; + band = vp9_coef_bands[c]; fc->coef_counts[type][band][pt][DCT_EOB_TOKEN]++; } } @@ -205,18 +205,18 @@ void static count_tokens_8x8(INT16 *qcoeff_ptr, int block, PLANE_TYPE type, int c, pt, token, band; VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l); for (c = !type; c < eob; ++c) { - int rc = (type == 1 ? vp8_default_zig_zag1d[c] : vp8_default_zig_zag1d_8x8[c]); + int rc = (type == 1 ? vp9_default_zig_zag1d[c] : vp9_default_zig_zag1d_8x8[c]); int v = qcoeff_ptr[rc]; - band = (type == 1 ? vp8_coef_bands[c] : vp8_coef_bands_8x8[c]); + band = (type == 1 ? vp9_coef_bands[c] : vp9_coef_bands_8x8[c]); token = get_token(v); if (tx_type != DCT_DCT) fc->hybrid_coef_counts_8x8[type][band][pt][token]++; else fc->coef_counts_8x8[type][band][pt][token]++; - pt = vp8_prev_token_class[token]; + pt = vp9_prev_token_class[token]; } if (eob < seg_eob) { - band = (type == 1 ? vp8_coef_bands[c] : vp8_coef_bands_8x8[c]); + band = (type == 1 ? vp9_coef_bands[c] : vp9_coef_bands_8x8[c]); if (tx_type != DCT_DCT) fc->hybrid_coef_counts_8x8[type][band][pt][DCT_EOB_TOKEN]++; else @@ -231,18 +231,18 @@ void static count_tokens_16x16(INT16 *qcoeff_ptr, int block, PLANE_TYPE type, int c, pt, token; VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l); for (c = !type; c < eob; ++c) { - int rc = vp8_default_zig_zag1d_16x16[c]; + int rc = vp9_default_zig_zag1d_16x16[c]; int v = qcoeff_ptr[rc]; - int band = vp8_coef_bands_16x16[c]; + int band = vp9_coef_bands_16x16[c]; token = get_token(v); if (tx_type != DCT_DCT) fc->hybrid_coef_counts_16x16[type][band][pt][token]++; else fc->coef_counts_16x16[type][band][pt][token]++; - pt = vp8_prev_token_class[token]; + pt = vp9_prev_token_class[token]; } if (eob < seg_eob) { - int band = vp8_coef_bands_16x16[c]; + int band = vp9_coef_bands_16x16[c]; if (tx_type != DCT_DCT) fc->hybrid_coef_counts_16x16[type][band][pt][DCT_EOB_TOKEN]++; else @@ -436,7 +436,7 @@ int vp9_decode_mb_tokens_16x16(VP8D_COMP *pbi, MACROBLOCKD *xd, // Luma block { - const int* const scan = vp8_default_zig_zag1d_16x16; + const int* const scan = vp9_default_zig_zag1d_16x16; c = decode_coefs(pbi, xd, bc, A, L, type, tx_type, seg_eob, qcoeff_ptr, @@ -457,9 +457,9 @@ int vp9_decode_mb_tokens_16x16(VP8D_COMP *pbi, MACROBLOCKD *xd, else seg_eob = 64; for (i = 16; i < 24; i += 4) { - ENTROPY_CONTEXT* const a = A + vp8_block2above_8x8[i]; - ENTROPY_CONTEXT* const l = L + vp8_block2left_8x8[i]; - const int* const scan = vp8_default_zig_zag1d_8x8; + ENTROPY_CONTEXT* const a = A + vp9_block2above_8x8[i]; + ENTROPY_CONTEXT* const l = L + vp9_block2left_8x8[i]; + const int* const scan = vp9_default_zig_zag1d_8x8; c = decode_coefs(pbi, xd, bc, a, l, type, tx_type, @@ -495,9 +495,9 @@ int vp9_decode_mb_tokens_8x8(VP8D_COMP *pbi, MACROBLOCKD *xd, if (xd->mode_info_context->mbmi.mode != B_PRED && xd->mode_info_context->mbmi.mode != SPLITMV && xd->mode_info_context->mbmi.mode != I8X8_PRED) { - ENTROPY_CONTEXT *const a = A + vp8_block2above_8x8[24]; - ENTROPY_CONTEXT *const l = L + vp8_block2left_8x8[24]; - const int *const scan = vp8_default_zig_zag1d; + ENTROPY_CONTEXT *const a = A + vp9_block2above_8x8[24]; + ENTROPY_CONTEXT *const l = L + vp9_block2left_8x8[24]; + const int *const scan = vp9_default_zig_zag1d; type = PLANE_TYPE_Y2; if (seg_active) @@ -522,9 +522,9 @@ int vp9_decode_mb_tokens_8x8(VP8D_COMP *pbi, MACROBLOCKD *xd, seg_eob = 64; for (i = 0; i < bufthred ; i += 4) { - ENTROPY_CONTEXT *const a = A + vp8_block2above_8x8[i]; - ENTROPY_CONTEXT *const l = L + vp8_block2left_8x8[i]; - const int *const scan = vp8_default_zig_zag1d_8x8; + ENTROPY_CONTEXT *const a = A + vp9_block2above_8x8[i]; + ENTROPY_CONTEXT *const l = L + vp9_block2left_8x8[i]; + const int *const scan = vp9_default_zig_zag1d_8x8; tx_type = DCT_DCT; if (i == 16) @@ -552,9 +552,9 @@ int vp9_decode_mb_tokens_8x8(VP8D_COMP *pbi, MACROBLOCKD *xd, // use 4x4 transform for U, V components in I8X8 prediction mode for (i = 16; i < 24; i++) { - ENTROPY_CONTEXT *const a = A + vp8_block2above[i]; - ENTROPY_CONTEXT *const l = L + vp8_block2left[i]; - const int *scan = vp8_default_zig_zag1d; + ENTROPY_CONTEXT *const a = A + vp9_block2above[i]; + ENTROPY_CONTEXT *const l = L + vp9_block2left[i]; + const int *scan = vp9_default_zig_zag1d; c = decode_coefs(pbi, xd, bc, a, l, type, tx_type, @@ -577,7 +577,7 @@ int vp9_decode_mb_tokens(VP8D_COMP *dx, MACROBLOCKD *xd, ENTROPY_CONTEXT *const L = (ENTROPY_CONTEXT *)xd->left_context; char *const eobs = xd->eobs; - const int *scan = vp8_default_zig_zag1d; + const int *scan = vp9_default_zig_zag1d; PLANE_TYPE type; int c, i, eobtotal = 0, seg_eob = 16; INT16 *qcoeff_ptr = &xd->qcoeff[0]; @@ -589,8 +589,8 @@ int vp9_decode_mb_tokens(VP8D_COMP *dx, MACROBLOCKD *xd, if (xd->mode_info_context->mbmi.mode != B_PRED && xd->mode_info_context->mbmi.mode != I8X8_PRED && xd->mode_info_context->mbmi.mode != SPLITMV) { - ENTROPY_CONTEXT *const a = A + vp8_block2above[24]; - ENTROPY_CONTEXT *const l = L + vp8_block2left[24]; + ENTROPY_CONTEXT *const a = A + vp9_block2above[24]; + ENTROPY_CONTEXT *const l = L + vp9_block2left[24]; type = PLANE_TYPE_Y2; c = decode_coefs(dx, xd, bc, a, l, type, @@ -606,8 +606,8 @@ int vp9_decode_mb_tokens(VP8D_COMP *dx, MACROBLOCKD *xd, } for (i = 0; i < 24; ++i) { - ENTROPY_CONTEXT *const a = A + vp8_block2above[i]; - ENTROPY_CONTEXT *const l = L + vp8_block2left[i]; + ENTROPY_CONTEXT *const a = A + vp9_block2above[i]; + ENTROPY_CONTEXT *const l = L + vp9_block2left[i]; TX_TYPE tx_type = DCT_DCT; if (i == 16) type = PLANE_TYPE_UV; @@ -615,15 +615,15 @@ int vp9_decode_mb_tokens(VP8D_COMP *dx, MACROBLOCKD *xd, tx_type = get_tx_type(xd, &xd->block[i]); switch(tx_type) { case ADST_DCT : - scan = vp8_row_scan; + scan = vp9_row_scan; break; case DCT_ADST : - scan = vp8_col_scan; + scan = vp9_col_scan; break; default : - scan = vp8_default_zig_zag1d; + scan = vp9_default_zig_zag1d; break; } diff --git a/vp8/decoder/onyxd_int.h b/vp8/decoder/onyxd_int.h index 6d542a327405bc81dc5b1079bb207a8c7756e654..437c1926e4f382c877b97b7519a5b242b1b14ef8 100644 --- a/vp8/decoder/onyxd_int.h +++ b/vp8/decoder/onyxd_int.h @@ -36,7 +36,7 @@ typedef struct { int const *scan; int const *scan_8x8; UINT8 const *ptr_block2leftabove; - vp8_tree_index const *vp8_coef_tree_ptr; + vp8_tree_index const *vp9_coef_tree_ptr; unsigned char *norm_ptr; UINT8 *ptr_coef_bands_x; UINT8 *ptr_coef_bands_x_8x8; diff --git a/vp8/encoder/asm_enc_offsets.c b/vp8/encoder/asm_enc_offsets.c index 8e74901b32550c605d6be4d565c6df9bd4b77b49..9749f05b4aed3147a9c3a4afe6ee189a0469f4d9 100644 --- a/vp8/encoder/asm_enc_offsets.c +++ b/vp8/encoder/asm_enc_offsets.c @@ -20,35 +20,35 @@ BEGIN /* regular quantize */ -DEFINE(vp8_block_coeff, offsetof(BLOCK, coeff)); -DEFINE(vp8_block_zbin, offsetof(BLOCK, zbin)); -DEFINE(vp8_block_round, offsetof(BLOCK, round)); -DEFINE(vp8_block_quant, offsetof(BLOCK, quant)); -DEFINE(vp8_block_quant_fast, offsetof(BLOCK, quant_fast)); -DEFINE(vp8_block_zbin_extra, offsetof(BLOCK, zbin_extra)); -DEFINE(vp8_block_zrun_zbin_boost, offsetof(BLOCK, zrun_zbin_boost)); -DEFINE(vp8_block_quant_shift, offsetof(BLOCK, quant_shift)); - -DEFINE(vp8_blockd_qcoeff, offsetof(BLOCKD, qcoeff)); -DEFINE(vp8_blockd_dequant, offsetof(BLOCKD, dequant)); -DEFINE(vp8_blockd_dqcoeff, offsetof(BLOCKD, dqcoeff)); -DEFINE(vp8_blockd_eob, offsetof(BLOCKD, eob)); +DEFINE(vp9_block_coeff, offsetof(BLOCK, coeff)); +DEFINE(vp9_block_zbin, offsetof(BLOCK, zbin)); +DEFINE(vp9_block_round, offsetof(BLOCK, round)); +DEFINE(vp9_block_quant, offsetof(BLOCK, quant)); +DEFINE(vp9_block_quant_fast, offsetof(BLOCK, quant_fast)); +DEFINE(vp9_block_zbin_extra, offsetof(BLOCK, zbin_extra)); +DEFINE(vp9_block_zrun_zbin_boost, offsetof(BLOCK, zrun_zbin_boost)); +DEFINE(vp9_block_quant_shift, offsetof(BLOCK, quant_shift)); + +DEFINE(vp9_blockd_qcoeff, offsetof(BLOCKD, qcoeff)); +DEFINE(vp9_blockd_dequant, offsetof(BLOCKD, dequant)); +DEFINE(vp9_blockd_dqcoeff, offsetof(BLOCKD, dqcoeff)); +DEFINE(vp9_blockd_eob, offsetof(BLOCKD, eob)); /* subtract */ -DEFINE(vp8_block_base_src, offsetof(BLOCK, base_src)); -DEFINE(vp8_block_src, offsetof(BLOCK, src)); -DEFINE(vp8_block_src_diff, offsetof(BLOCK, src_diff)); -DEFINE(vp8_block_src_stride, offsetof(BLOCK, src_stride)); +DEFINE(vp9_block_base_src, offsetof(BLOCK, base_src)); +DEFINE(vp9_block_src, offsetof(BLOCK, src)); +DEFINE(vp9_block_src_diff, offsetof(BLOCK, src_diff)); +DEFINE(vp9_block_src_stride, offsetof(BLOCK, src_stride)); -DEFINE(vp8_blockd_predictor, offsetof(BLOCKD, predictor)); +DEFINE(vp9_blockd_predictor, offsetof(BLOCKD, predictor)); /* pack tokens */ -DEFINE(vp8_writer_lowvalue, offsetof(vp8_writer, lowvalue)); -DEFINE(vp8_writer_range, offsetof(vp8_writer, range)); -DEFINE(vp8_writer_value, offsetof(vp8_writer, value)); -DEFINE(vp8_writer_count, offsetof(vp8_writer, count)); -DEFINE(vp8_writer_pos, offsetof(vp8_writer, pos)); -DEFINE(vp8_writer_buffer, offsetof(vp8_writer, buffer)); +DEFINE(vp9_writer_lowvalue, offsetof(vp8_writer, lowvalue)); +DEFINE(vp9_writer_range, offsetof(vp8_writer, range)); +DEFINE(vp9_writer_value, offsetof(vp8_writer, value)); +DEFINE(vp9_writer_count, offsetof(vp8_writer, count)); +DEFINE(vp9_writer_pos, offsetof(vp8_writer, pos)); +DEFINE(vp9_writer_buffer, offsetof(vp8_writer, buffer)); DEFINE(tokenextra_token, offsetof(TOKENEXTRA, Token)); DEFINE(tokenextra_extra, offsetof(TOKENEXTRA, Extra)); @@ -56,24 +56,24 @@ DEFINE(tokenextra_context_tree, offsetof(TOKENEXTRA, context_tre DEFINE(tokenextra_skip_eob_node, offsetof(TOKENEXTRA, skip_eob_node)); DEFINE(TOKENEXTRA_SZ, sizeof(TOKENEXTRA)); -DEFINE(vp8_extra_bit_struct_sz, sizeof(vp8_extra_bit_struct)); +DEFINE(vp9_extra_bit_struct_sz, sizeof(vp8_extra_bit_struct)); -DEFINE(vp8_token_value, offsetof(vp8_token, value)); -DEFINE(vp8_token_len, offsetof(vp8_token, Len)); +DEFINE(vp9_token_value, offsetof(vp8_token, value)); +DEFINE(vp9_token_len, offsetof(vp8_token, Len)); -DEFINE(vp8_extra_bit_struct_tree, offsetof(vp8_extra_bit_struct, tree)); -DEFINE(vp8_extra_bit_struct_prob, offsetof(vp8_extra_bit_struct, prob)); -DEFINE(vp8_extra_bit_struct_len, offsetof(vp8_extra_bit_struct, Len)); -DEFINE(vp8_extra_bit_struct_base_val, offsetof(vp8_extra_bit_struct, base_val)); +DEFINE(vp9_extra_bit_struct_tree, offsetof(vp8_extra_bit_struct, tree)); +DEFINE(vp9_extra_bit_struct_prob, offsetof(vp8_extra_bit_struct, prob)); +DEFINE(vp9_extra_bit_struct_len, offsetof(vp8_extra_bit_struct, Len)); +DEFINE(vp9_extra_bit_struct_base_val, offsetof(vp8_extra_bit_struct, base_val)); -DEFINE(vp8_comp_tplist, offsetof(VP8_COMP, tplist)); -DEFINE(vp8_comp_common, offsetof(VP8_COMP, common)); +DEFINE(vp9_comp_tplist, offsetof(VP8_COMP, tplist)); +DEFINE(vp9_comp_common, offsetof(VP8_COMP, common)); DEFINE(tokenlist_start, offsetof(TOKENLIST, start)); DEFINE(tokenlist_stop, offsetof(TOKENLIST, stop)); DEFINE(TOKENLIST_SZ, sizeof(TOKENLIST)); -DEFINE(vp8_common_mb_rows, offsetof(VP8_COMMON, mb_rows)); +DEFINE(vp9_common_mb_rows, offsetof(VP8_COMMON, mb_rows)); END @@ -86,5 +86,5 @@ END #if HAVE_ARMV5TE ct_assert(TOKENEXTRA_SZ, sizeof(TOKENEXTRA) == 8) -ct_assert(vp8_extra_bit_struct_sz, sizeof(vp8_extra_bit_struct) == 16) +ct_assert(vp9_extra_bit_struct_sz, sizeof(vp8_extra_bit_struct) == 16) #endif diff --git a/vp8/encoder/bitstream.c b/vp8/encoder/bitstream.c index 5fe47923802dd0b59da854b091fccb3cdefbb8ce..c50c59f2aa569335a346f1f9eec2e9df53763090 100644 --- a/vp8/encoder/bitstream.c +++ b/vp8/encoder/bitstream.c @@ -164,7 +164,7 @@ static void update_mbintra_mode_probs(VP8_COMP* const cpi, unsigned int bct [VP8_YMODES - 1] [2]; update_mode( - bc, VP8_YMODES, vp8_ymode_encodings, vp8_ymode_tree, + bc, VP8_YMODES, vp9_ymode_encodings, vp9_ymode_tree, Pnew, cm->fc.ymode_prob, bct, (unsigned int *)cpi->ymode_count ); } @@ -205,7 +205,7 @@ static void update_switchable_interp_probs(VP8_COMP *cpi, for (j = 0; j <= VP8_SWITCHABLE_FILTERS; ++j) { vp9_tree_probs_from_distribution( VP8_SWITCHABLE_FILTERS, - vp8_switchable_interp_encodings, vp8_switchable_interp_tree, + vp9_switchable_interp_encodings, vp9_switchable_interp_tree, pc->fc.switchable_interp_prob[j], branch_ct, cpi->switchable_interp_count[j], 256, 1); for (i = 0; i < VP8_SWITCHABLE_FILTERS - 1; ++i) { @@ -325,35 +325,35 @@ static void update_mvcount(VP8_COMP *cpi, MACROBLOCK *x, } static void write_ymode(vp8_writer *bc, int m, const vp8_prob *p) { - vp8_write_token(bc, vp8_ymode_tree, p, vp8_ymode_encodings + m); + vp8_write_token(bc, vp9_ymode_tree, p, vp9_ymode_encodings + m); } static void kfwrite_ymode(vp8_writer *bc, int m, const vp8_prob *p) { - vp8_write_token(bc, vp8_kf_ymode_tree, p, vp8_kf_ymode_encodings + m); + vp8_write_token(bc, vp9_kf_ymode_tree, p, vp9_kf_ymode_encodings + m); } #if CONFIG_SUPERBLOCKS static void sb_kfwrite_ymode(vp8_writer *bc, int m, const vp8_prob *p) { - vp8_write_token(bc, vp8_uv_mode_tree, p, vp8_sb_kf_ymode_encodings + m); + vp8_write_token(bc, vp9_uv_mode_tree, p, vp9_sb_kf_ymode_encodings + m); } #endif static void write_i8x8_mode(vp8_writer *bc, int m, const vp8_prob *p) { - vp8_write_token(bc, vp8_i8x8_mode_tree, p, vp8_i8x8_mode_encodings + m); + vp8_write_token(bc, vp9_i8x8_mode_tree, p, vp9_i8x8_mode_encodings + m); } static void write_uv_mode(vp8_writer *bc, int m, const vp8_prob *p) { - vp8_write_token(bc, vp8_uv_mode_tree, p, vp8_uv_mode_encodings + m); + vp8_write_token(bc, vp9_uv_mode_tree, p, vp9_uv_mode_encodings + m); } static void write_bmode(vp8_writer *bc, int m, const vp8_prob *p) { - vp8_write_token(bc, vp8_bmode_tree, p, vp8_bmode_encodings + m); + vp8_write_token(bc, vp9_bmode_tree, p, vp9_bmode_encodings + m); } static void write_split(vp8_writer *bc, int x, const vp8_prob *p) { vp8_write_token( - bc, vp8_mbsplit_tree, p, vp8_mbsplit_encodings + x + bc, vp9_mbsplit_tree, p, vp9_mbsplit_encodings + x ); } @@ -412,8 +412,8 @@ static void pack_mb_tokens(vp8_writer* const bc, while (p < stop) { const int t = p->Token; - vp8_token *const a = vp8_coef_encodings + t; - const vp8_extra_bit_struct *const b = vp8_extra_bits + t; + vp8_token *const a = vp9_coef_encodings + t; + const vp8_extra_bit_struct *const b = vp9_extra_bits + t; int i = 0; const unsigned char *pp = p->context_tree; int v = a->value; @@ -434,7 +434,7 @@ static void pack_mb_tokens(vp8_writer* const bc, do { const int bb = (v >> --n) & 1; split = 1 + (((range - 1) * pp[i >> 1]) >> 8); - i = vp8_coef_tree[i + bb]; + i = vp9_coef_tree[i + bb]; if (bb) { lowvalue += split; @@ -443,7 +443,7 @@ static void pack_mb_tokens(vp8_writer* const bc, range = split; } - shift = vp8_norm[range]; + shift = vp9_norm[range]; range <<= shift; count += shift; @@ -493,7 +493,7 @@ static void pack_mb_tokens(vp8_writer* const bc, range = split; } - shift = vp8_norm[range]; + shift = vp9_norm[range]; range <<= shift; count += shift; @@ -586,8 +586,8 @@ static void write_mv_ref #if CONFIG_DEBUG assert(NEARESTMV <= m && m <= SPLITMV); #endif - vp8_write_token(bc, vp8_mv_ref_tree, p, - vp8_mv_ref_encoding_array - NEARESTMV + m); + vp8_write_token(bc, vp9_mv_ref_tree, p, + vp9_mv_ref_encoding_array - NEARESTMV + m); } #if CONFIG_SUPERBLOCKS @@ -596,8 +596,8 @@ static void write_sb_mv_ref(vp8_writer *bc, MB_PREDICTION_MODE m, #if CONFIG_DEBUG assert(NEARESTMV <= m && m < SPLITMV); #endif - vp8_write_token(bc, vp8_sb_mv_ref_tree, p, - vp8_sb_mv_ref_encoding_array - NEARESTMV + m); + vp8_write_token(bc, vp9_sb_mv_ref_tree, p, + vp9_sb_mv_ref_encoding_array - NEARESTMV + m); } #endif @@ -608,8 +608,8 @@ static void write_sub_mv_ref #if CONFIG_DEBUG assert(LEFT4X4 <= m && m <= NEW4X4); #endif - vp8_write_token(bc, vp8_sub_mv_ref_tree, p, - vp8_sub_mv_ref_encoding_array - LEFT4X4 + m); + vp8_write_token(bc, vp9_sub_mv_ref_tree, p, + vp9_sub_mv_ref_encoding_array - LEFT4X4 + m); } static void write_nmv(vp8_writer *bc, const MV *mv, const int_mv *ref, @@ -1110,11 +1110,11 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi, vp8_writer *const bc) { if (mode >= NEARESTMV && mode <= SPLITMV) { if (cpi->common.mcomp_filter_type == SWITCHABLE) { - vp8_write_token(bc, vp8_switchable_interp_tree, + vp8_write_token(bc, vp9_switchable_interp_tree, vp9_get_pred_probs(&cpi->common, xd, PRED_SWITCHABLE_INTERP), - vp8_switchable_interp_encodings + - vp8_switchable_interp_map[mi->interp_filter]); + vp9_switchable_interp_encodings + + vp9_switchable_interp_map[mi->interp_filter]); } else { assert (mi->interp_filter == cpi->common.mcomp_filter_type); @@ -1207,7 +1207,7 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi, vp8_writer *const bc) { B_PREDICTION_MODE blockmode; int_mv blockmv; const int *const L = - vp8_mbsplits [mi->partitioning]; + vp9_mbsplits [mi->partitioning]; int k = -1; /* first block in subset j */ int mv_contz; int_mv leftmv, abovemv; @@ -1524,7 +1524,7 @@ static void build_coeff_contexts(VP8_COMP *cpi) { if (k >= 3 && ((i == 0 && j == 1) || (i > 0 && j == 0))) continue; vp9_tree_probs_from_distribution( - MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree, + MAX_ENTROPY_TOKENS, vp9_coef_encodings, vp9_coef_tree, cpi->frame_coef_probs [i][j][k], cpi->frame_branch_ct [i][j][k], cpi->coef_counts [i][j][k], @@ -1544,7 +1544,7 @@ static void build_coeff_contexts(VP8_COMP *cpi) { if (k >= 3 && ((i == 0 && j == 1) || (i > 0 && j == 0))) continue; vp9_tree_probs_from_distribution( - MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree, + MAX_ENTROPY_TOKENS, vp9_coef_encodings, vp9_coef_tree, cpi->frame_hybrid_coef_probs [i][j][k], cpi->frame_hybrid_branch_ct [i][j][k], cpi->hybrid_coef_counts [i][j][k], @@ -1570,7 +1570,7 @@ static void build_coeff_contexts(VP8_COMP *cpi) { if (k >= 3 && ((i == 0 && j == 1) || (i > 0 && j == 0))) continue; vp9_tree_probs_from_distribution( - MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree, + MAX_ENTROPY_TOKENS, vp9_coef_encodings, vp9_coef_tree, cpi->frame_coef_probs_8x8 [i][j][k], cpi->frame_branch_ct_8x8 [i][j][k], cpi->coef_counts_8x8 [i][j][k], @@ -1594,7 +1594,7 @@ static void build_coeff_contexts(VP8_COMP *cpi) { if (k >= 3 && ((i == 0 && j == 1) || (i > 0 && j == 0))) continue; vp9_tree_probs_from_distribution( - MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree, + MAX_ENTROPY_TOKENS, vp9_coef_encodings, vp9_coef_tree, cpi->frame_hybrid_coef_probs_8x8 [i][j][k], cpi->frame_hybrid_branch_ct_8x8 [i][j][k], cpi->hybrid_coef_counts_8x8 [i][j][k], @@ -1617,7 +1617,7 @@ static void build_coeff_contexts(VP8_COMP *cpi) { if (k >= 3 && ((i == 0 && j == 1) || (i > 0 && j == 0))) continue; vp9_tree_probs_from_distribution( - MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree, + MAX_ENTROPY_TOKENS, vp9_coef_encodings, vp9_coef_tree, cpi->frame_coef_probs_16x16[i][j][k], cpi->frame_branch_ct_16x16[i][j][k], cpi->coef_counts_16x16[i][j][k], 256, 1); @@ -1636,7 +1636,7 @@ static void build_coeff_contexts(VP8_COMP *cpi) { if (k >= 3 && ((i == 0 && j == 1) || (i > 0 && j == 0))) continue; vp9_tree_probs_from_distribution( - MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree, + MAX_ENTROPY_TOKENS, vp9_coef_encodings, vp9_coef_tree, cpi->frame_hybrid_coef_probs_16x16[i][j][k], cpi->frame_hybrid_branch_ct_16x16[i][j][k], cpi->hybrid_coef_counts_16x16[i][j][k], 256, 1); @@ -1820,7 +1820,7 @@ static void decide_kf_ymode_entropy(VP8_COMP *cpi) { int i, j; for (i = 0; i < 8; i++) { - vp9_cost_tokens(mode_cost, cpi->common.kf_ymode_prob[i], vp8_kf_ymode_tree); + vp9_cost_tokens(mode_cost, cpi->common.kf_ymode_prob[i], vp9_kf_ymode_tree); cost = 0; for (j = 0; j < VP8_YMODES; j++) { cost += mode_cost[j] * cpi->ymode_count[j]; @@ -2163,7 +2163,7 @@ void vp9_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, /* Only one filter is used. So set the filter at frame level */ for (i = 0; i < VP8_SWITCHABLE_FILTERS; ++i) { if (count[i]) { - pc->mcomp_filter_type = vp8_switchable_interp[i]; + pc->mcomp_filter_type = vp9_switchable_interp[i]; break; } } diff --git a/vp8/encoder/bitstream.h b/vp8/encoder/bitstream.h index 5156bf4e7ecd1c249705f11cd4aa6cd79268f769..87f0aa80b6bed8046f6c6eca3d029a91e1c0e257 100644 --- a/vp8/encoder/bitstream.h +++ b/vp8/encoder/bitstream.h @@ -12,16 +12,6 @@ #ifndef __INC_BITSTREAM_H #define __INC_BITSTREAM_H -#if HAVE_ARMV5TE -void vp8cx_pack_tokens_armv5(vp8_writer *w, const TOKENEXTRA *p, int xcount, - vp8_token *, - vp8_extra_bit_struct *, - const vp8_tree_index *); -# define pack_tokens(a,b,c) \ - vp8cx_pack_tokens_armv5(a,b,c,vp8_coef_encodings,vp8_extra_bits,vp8_coef_tree) -#else -# define pack_tokens(a,b,c) pack_tokens_c(a,b,c) -#endif -#endif - void vp9_update_skip_probs(VP8_COMP *cpi); + +#endif diff --git a/vp8/encoder/boolhuff.c b/vp8/encoder/boolhuff.c index ef03d8b143f7b70200ee628abe8087a95bff9563..1f885d8c5f7e43042272b8466d19eb547e014d16 100644 --- a/vp8/encoder/boolhuff.c +++ b/vp8/encoder/boolhuff.c @@ -20,7 +20,7 @@ unsigned __int64 Sectionbits[500]; unsigned int active_section = 0; #endif -const unsigned int vp8_prob_cost[256] = { +const unsigned int vp9_prob_cost[256] = { 2047, 2047, 1791, 1641, 1535, 1452, 1385, 1328, 1279, 1235, 1196, 1161, 1129, 1099, 1072, 1046, 1023, 1000, 979, 959, 940, 922, 905, 889, 873, 858, 843, 829, 816, 803, 790, 778, 767, 755, 744, 733, 723, 713, 703, 693, 684, 675, 666, 657, 649, 641, 633, 625, diff --git a/vp8/encoder/boolhuff.h b/vp8/encoder/boolhuff.h index 8516aa219d56953b89f2ca5aa6480a6e516308f3..3fbe41ecc03e3c4d13dc5fbb13fa275db11c206c 100644 --- a/vp8/encoder/boolhuff.h +++ b/vp8/encoder/boolhuff.h @@ -38,7 +38,7 @@ extern void vp9_start_encode(BOOL_CODER *bc, unsigned char *buffer); extern void vp9_encode_value(BOOL_CODER *br, int data, int bits); extern void vp9_stop_encode(BOOL_CODER *bc); -extern const unsigned int vp8_prob_cost[256]; +extern const unsigned int vp9_prob_cost[256]; extern void vp9_encode_uniform(BOOL_CODER *bc, int v, int n); extern void vp9_encode_term_subexp(BOOL_CODER *bc, int v, int k, int n); @@ -46,7 +46,7 @@ extern int vp9_count_uniform(int v, int n); extern int vp9_count_term_subexp(int v, int k, int n); extern int vp9_recenter_nonneg(int v, int m); -DECLARE_ALIGNED(16, extern const unsigned char, vp8_norm[256]); +DECLARE_ALIGNED(16, extern const unsigned char, vp9_norm[256]); static void vp8_encode_bool(BOOL_CODER *br, int bit, int probability) { @@ -60,9 +60,9 @@ static void vp8_encode_bool(BOOL_CODER *br, int bit, int probability) { #if defined(SECTIONBITS_OUTPUT) if (bit) - Sectionbits[active_section] += vp8_prob_cost[255 - probability]; + Sectionbits[active_section] += vp9_prob_cost[255 - probability]; else - Sectionbits[active_section] += vp8_prob_cost[probability]; + Sectionbits[active_section] += vp9_prob_cost[probability]; #endif #endif @@ -76,7 +76,7 @@ static void vp8_encode_bool(BOOL_CODER *br, int bit, int probability) { range = br->range - split; } - shift = vp8_norm[range]; + shift = vp9_norm[range]; range <<= shift; count += shift; diff --git a/vp8/encoder/dct.c b/vp8/encoder/dct.c index b56b6113c0a84554cedf121008a32b0973b78c0f..18d782c5254d6818bfec5c0bb22694112498f43f 100644 --- a/vp8/encoder/dct.c +++ b/vp8/encoder/dct.c @@ -19,21 +19,21 @@ // TODO: these transforms can be converted into integer forms to reduce // the complexity -float dct_4[16] = { +static const float dct_4[16] = { 0.500000000000000, 0.500000000000000, 0.500000000000000, 0.500000000000000, 0.653281482438188, 0.270598050073099, -0.270598050073099, -0.653281482438188, 0.500000000000000, -0.500000000000000, -0.500000000000000, 0.500000000000000, 0.270598050073099, -0.653281482438188, 0.653281482438188, -0.270598050073099 }; -float adst_4[16] = { +static const float adst_4[16] = { 0.228013428883779, 0.428525073124360, 0.577350269189626, 0.656538502008139, 0.577350269189626, 0.577350269189626, 0.000000000000000, -0.577350269189626, 0.656538502008139, -0.228013428883779, -0.577350269189626, 0.428525073124359, 0.428525073124360, -0.656538502008139, 0.577350269189626, -0.228013428883779 }; -float dct_8[64] = { +static const float dct_8[64] = { 0.353553390593274, 0.353553390593274, 0.353553390593274, 0.353553390593274, 0.353553390593274, 0.353553390593274, 0.353553390593274, 0.353553390593274, 0.490392640201615, 0.415734806151273, 0.277785116509801, 0.097545161008064, @@ -52,7 +52,7 @@ float dct_8[64] = { 0.490392640201615, -0.415734806151273, 0.277785116509801, -0.097545161008064 }; -float adst_8[64] = { +static const float adst_8[64] = { 0.089131608307533, 0.175227946595735, 0.255357107325376, 0.326790388032145, 0.387095214016349, 0.434217976756762, 0.466553967085785, 0.483002021635509, 0.255357107325376, 0.434217976756762, 0.483002021635509, 0.387095214016349, @@ -72,21 +72,21 @@ float adst_8[64] = { }; /* Converted the transforms to integers. */ -const int16_t dct_i4[16] = { +static const int16_t dct_i4[16] = { 16384, 16384, 16384, 16384, 21407, 8867, -8867, -21407, 16384, -16384, -16384, 16384, 8867, -21407, 21407, -8867 }; -const int16_t adst_i4[16] = { +static const int16_t adst_i4[16] = { 7472, 14042, 18919, 21513, 18919, 18919, 0, -18919, 21513, -7472, -18919, 14042, 14042, -21513, 18919, -7472 }; -const int16_t dct_i8[64] = { +static const int16_t dct_i8[64] = { 11585, 11585, 11585, 11585, 11585, 11585, 11585, 11585, 16069, 13623, 9102, 3196, @@ -105,7 +105,7 @@ const int16_t dct_i8[64] = { 16069, -13623, 9102, -3196 }; -const int16_t adst_i8[64] = { +static const int16_t adst_i8[64] = { 2921, 5742, 8368, 10708, 12684, 14228, 15288, 15827, 8368, 14228, 15827, 12684, @@ -124,7 +124,7 @@ const int16_t adst_i8[64] = { 15288, -12684, 8368, -2921 }; -float dct_16[256] = { +static const float dct_16[256] = { 0.250000, 0.250000, 0.250000, 0.250000, 0.250000, 0.250000, 0.250000, 0.250000, 0.250000, 0.250000, 0.250000, 0.250000, 0.250000, 0.250000, 0.250000, 0.250000, 0.351851, 0.338330, 0.311806, 0.273300, 0.224292, 0.166664, 0.102631, 0.034654, @@ -159,7 +159,7 @@ float dct_16[256] = { 0.351851, -0.338330, 0.311806, -0.273300, 0.224292, -0.166664, 0.102631, -0.034654 }; -float adst_16[256] = { +static const float adst_16[256] = { 0.033094, 0.065889, 0.098087, 0.129396, 0.159534, 0.188227, 0.215215, 0.240255, 0.263118, 0.283599, 0.301511, 0.316693, 0.329007, 0.338341, 0.344612, 0.347761, 0.098087, 0.188227, 0.263118, 0.316693, 0.344612, 0.344612, 0.316693, 0.263118, @@ -195,7 +195,7 @@ float adst_16[256] = { }; /* Converted the transforms to integers. */ -const int16_t dct_i16[256] = { +static const int16_t dct_i16[256] = { 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 11529, 11086, 10217, 8955, 7350, 5461, 3363, 1136, @@ -230,7 +230,7 @@ const int16_t dct_i16[256] = { 11529, -11086, 10217, -8955, 7350, -5461, 3363, -1136 }; -const int16_t adst_i16[256] = { +static const int16_t adst_i16[256] = { 1084, 2159, 3214, 4240, 5228, 6168, 7052, 7873, 8622, 9293, 9880, 10377, 10781, 11087, 11292, 11395, 3214, 6168, 8622, 10377, 11292, 11292, 10377, 8622, @@ -542,7 +542,7 @@ void vp9_fht_float_c(const int16_t *input, int pitch, int16_t *output, float *pfb = &bufb[0]; // pointers to vertical and horizontal transforms - float *ptv, *pth; + const float *ptv, *pth; assert(tx_type != DCT_DCT); // load and convert residual array into floating-point diff --git a/vp8/encoder/encodeintra.c b/vp8/encoder/encodeintra.c index 3cd45053335a24191176906f2b9f5baf4a7372dc..60cf60256fce015d76ad7402a8b3487b4d923de7 100644 --- a/vp8/encoder/encodeintra.c +++ b/vp8/encoder/encodeintra.c @@ -238,7 +238,7 @@ void vp9_encode_intra8x8mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) { int i, ib; for (i = 0; i < 4; i++) { - ib = vp8_i8x8_block[i]; + ib = vp9_i8x8_block[i]; vp9_encode_intra8x8(rtcd, x, ib); } } @@ -273,7 +273,7 @@ void vp9_encode_intra8x8mbuv(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) { BLOCKD *b; for (i = 0; i < 4; i++) { - ib = vp8_i8x8_block[i]; + ib = vp9_i8x8_block[i]; b = &x->e_mbd.block[ib]; mode = b->bmi.as_mode.first; #if CONFIG_COMP_INTRA_PRED diff --git a/vp8/encoder/encodemb.c b/vp8/encoder/encodemb.c index a87fd7c2ec71186b47f910d7713b3fc9ecce0374..456468dca8a94f8f3fd36d6d44368dd048efd70f 100644 --- a/vp8/encoder/encodemb.c +++ b/vp8/encoder/encodemb.c @@ -303,8 +303,8 @@ static void optimize_b(MACROBLOCK *mb, int i, PLANE_TYPE type, switch (tx_size) { default: case TX_4X4: - scan = vp8_default_zig_zag1d; - bands = vp8_coef_bands; + scan = vp9_default_zig_zag1d; + bands = vp9_coef_bands; default_eob = 16; // TODO: this isn't called (for intra4x4 modes), but will be left in // since it could be used later @@ -313,25 +313,25 @@ static void optimize_b(MACROBLOCK *mb, int i, PLANE_TYPE type, if (tx_type != DCT_DCT) { switch (tx_type) { case ADST_DCT: - scan = vp8_row_scan; + scan = vp9_row_scan; break; case DCT_ADST: - scan = vp8_col_scan; + scan = vp9_col_scan; break; default: - scan = vp8_default_zig_zag1d; + scan = vp9_default_zig_zag1d; break; } } else { - scan = vp8_default_zig_zag1d; + scan = vp9_default_zig_zag1d; } } break; case TX_8X8: - scan = vp8_default_zig_zag1d_8x8; - bands = vp8_coef_bands_8x8; + scan = vp9_default_zig_zag1d_8x8; + bands = vp9_coef_bands_8x8; default_eob = 64; break; } @@ -372,11 +372,11 @@ static void optimize_b(MACROBLOCK *mb, int i, PLANE_TYPE type, /* Evaluate the first possibility for this state. */ rate0 = tokens[next][0].rate; rate1 = tokens[next][1].rate; - t0 = (vp8_dct_value_tokens_ptr + x)->Token; + t0 = (vp9_dct_value_tokens_ptr + x)->Token; /* Consider both possible successor states. */ if (next < default_eob) { band = bands[i + 1]; - pt = vp8_prev_token_class[t0]; + pt = vp9_prev_token_class[t0]; rate0 += mb->token_costs[tx_size][type][band][pt][tokens[next][0].token]; rate1 += @@ -385,7 +385,7 @@ static void optimize_b(MACROBLOCK *mb, int i, PLANE_TYPE type, UPDATE_RD_COST(); /* And pick the best. */ best = rd_cost1 < rd_cost0; - base_bits = *(vp8_dct_value_cost_ptr + x); + base_bits = *(vp9_dct_value_cost_ptr + x); dx = dqcoeff_ptr[rc] - coeff_ptr[rc]; d2 = dx * dx; tokens[i][0].rate = base_bits + (best ? rate1 : rate0); @@ -419,17 +419,17 @@ static void optimize_b(MACROBLOCK *mb, int i, PLANE_TYPE type, t1 = tokens[next][1].token == DCT_EOB_TOKEN ? DCT_EOB_TOKEN : ZERO_TOKEN; } else { - t0 = t1 = (vp8_dct_value_tokens_ptr + x)->Token; + t0 = t1 = (vp9_dct_value_tokens_ptr + x)->Token; } if (next < default_eob) { band = bands[i + 1]; if (t0 != DCT_EOB_TOKEN) { - pt = vp8_prev_token_class[t0]; + pt = vp9_prev_token_class[t0]; rate0 += mb->token_costs[tx_size][type][band][pt][ tokens[next][0].token]; } if (t1 != DCT_EOB_TOKEN) { - pt = vp8_prev_token_class[t1]; + pt = vp9_prev_token_class[t1]; rate1 += mb->token_costs[tx_size][type][band][pt][ tokens[next][1].token]; } @@ -438,7 +438,7 @@ static void optimize_b(MACROBLOCK *mb, int i, PLANE_TYPE type, UPDATE_RD_COST(); /* And pick the best. */ best = rd_cost1 < rd_cost0; - base_bits = *(vp8_dct_value_cost_ptr + x); + base_bits = *(vp9_dct_value_cost_ptr + x); if (shortcut) { dx -= (dequant_ptr[rc != 0] + sz) ^ sz; @@ -524,7 +524,7 @@ static void check_reset_2nd_coeffs(MACROBLOCKD *xd, return; for (i = 0; i < bd->eob; i++) { - int coef = bd->dqcoeff[vp8_default_zig_zag1d[i]]; + int coef = bd->dqcoeff[vp9_default_zig_zag1d[i]]; sum += (coef >= 0) ? coef : -coef; if (sum >= SUM_2ND_COEFF_THRESH) return; @@ -532,7 +532,7 @@ static void check_reset_2nd_coeffs(MACROBLOCKD *xd, if (sum < SUM_2ND_COEFF_THRESH) { for (i = 0; i < bd->eob; i++) { - int rc = vp8_default_zig_zag1d[i]; + int rc = vp9_default_zig_zag1d[i]; bd->qcoeff[rc] = 0; bd->dqcoeff[rc] = 0; } @@ -594,15 +594,15 @@ void vp9_optimize_mby_4x4(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) { for (b = 0; b < 16; b++) { optimize_b(x, b, type, - ta + vp8_block2above[b], tl + vp8_block2left[b], rtcd, TX_4X4); + ta + vp9_block2above[b], tl + vp9_block2left[b], rtcd, TX_4X4); } if (has_2nd_order) { b = 24; optimize_b(x, b, PLANE_TYPE_Y2, - ta + vp8_block2above[b], tl + vp8_block2left[b], rtcd, TX_4X4); + ta + vp9_block2above[b], tl + vp9_block2left[b], rtcd, TX_4X4); check_reset_2nd_coeffs(&x->e_mbd, - ta + vp8_block2above[b], tl + vp8_block2left[b]); + ta + vp9_block2above[b], tl + vp9_block2left[b]); } } @@ -623,7 +623,7 @@ void vp9_optimize_mbuv_4x4(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) { for (b = 16; b < 24; b++) { optimize_b(x, b, PLANE_TYPE_UV, - ta + vp8_block2above[b], tl + vp8_block2left[b], rtcd, TX_4X4); + ta + vp9_block2above[b], tl + vp9_block2left[b], rtcd, TX_4X4); } } @@ -651,17 +651,17 @@ void vp9_optimize_mby_8x8(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) { type = has_2nd_order ? PLANE_TYPE_Y_NO_DC : PLANE_TYPE_Y_WITH_DC; for (b = 0; b < 16; b += 4) { optimize_b(x, b, type, - ta + vp8_block2above_8x8[b], tl + vp8_block2left_8x8[b], + ta + vp9_block2above_8x8[b], tl + vp9_block2left_8x8[b], rtcd, TX_8X8); - ta[vp8_block2above_8x8[b] + 1] = ta[vp8_block2above_8x8[b]]; - tl[vp8_block2left_8x8[b] + 1] = tl[vp8_block2left_8x8[b]]; + ta[vp9_block2above_8x8[b] + 1] = ta[vp9_block2above_8x8[b]]; + tl[vp9_block2left_8x8[b] + 1] = tl[vp9_block2left_8x8[b]]; } // 8x8 always have 2nd roder haar block if (has_2nd_order) { check_reset_8x8_2nd_coeffs(&x->e_mbd, - ta + vp8_block2above_8x8[24], - tl + vp8_block2left_8x8[24]); + ta + vp9_block2above_8x8[24], + tl + vp9_block2left_8x8[24]); } } @@ -682,10 +682,10 @@ void vp9_optimize_mbuv_8x8(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) { for (b = 16; b < 24; b += 4) { optimize_b(x, b, PLANE_TYPE_UV, - ta + vp8_block2above_8x8[b], tl + vp8_block2left_8x8[b], + ta + vp9_block2above_8x8[b], tl + vp9_block2left_8x8[b], rtcd, TX_8X8); - ta[vp8_block2above_8x8[b] + 1] = ta[vp8_block2above_8x8[b]]; - tl[vp8_block2left_8x8[b] + 1] = tl[vp8_block2left_8x8[b]]; + ta[vp9_block2above_8x8[b] + 1] = ta[vp9_block2above_8x8[b]]; + tl[vp9_block2left_8x8[b] + 1] = tl[vp9_block2left_8x8[b]]; } } @@ -728,7 +728,7 @@ static void optimize_b_16x16(MACROBLOCK *mb, int i, PLANE_TYPE type, for (i = eob; i-- > 0;) { int base_bits, d2, dx; - rc = vp8_default_zig_zag1d_16x16[i]; + rc = vp9_default_zig_zag1d_16x16[i]; x = qcoeff_ptr[rc]; /* Only add a trellis state for non-zero coefficients. */ if (x) { @@ -738,18 +738,18 @@ static void optimize_b_16x16(MACROBLOCK *mb, int i, PLANE_TYPE type, /* Evaluate the first possibility for this state. */ rate0 = tokens[next][0].rate; rate1 = tokens[next][1].rate; - t0 = (vp8_dct_value_tokens_ptr + x)->Token; + t0 = (vp9_dct_value_tokens_ptr + x)->Token; /* Consider both possible successor states. */ if (next < 256) { - band = vp8_coef_bands_16x16[i + 1]; - pt = vp8_prev_token_class[t0]; + band = vp9_coef_bands_16x16[i + 1]; + pt = vp9_prev_token_class[t0]; rate0 += mb->token_costs[TX_16X16][type][band][pt][tokens[next][0].token]; rate1 += mb->token_costs[TX_16X16][type][band][pt][tokens[next][1].token]; } UPDATE_RD_COST(); /* And pick the best. */ best = rd_cost1 < rd_cost0; - base_bits = *(vp8_dct_value_cost_ptr + x); + base_bits = *(vp9_dct_value_cost_ptr + x); dx = dqcoeff_ptr[rc] - coeff_ptr[rc]; d2 = dx*dx; tokens[i][0].rate = base_bits + (best ? rate1 : rate0); @@ -784,16 +784,16 @@ static void optimize_b_16x16(MACROBLOCK *mb, int i, PLANE_TYPE type, DCT_EOB_TOKEN : ZERO_TOKEN; } else - t0=t1 = (vp8_dct_value_tokens_ptr + x)->Token; + t0=t1 = (vp9_dct_value_tokens_ptr + x)->Token; if (next < 256) { - band = vp8_coef_bands_16x16[i + 1]; + band = vp9_coef_bands_16x16[i + 1]; if (t0 != DCT_EOB_TOKEN) { - pt = vp8_prev_token_class[t0]; + pt = vp9_prev_token_class[t0]; rate0 += mb->token_costs[TX_16X16][type][band][pt] [tokens[next][0].token]; } if (t1!=DCT_EOB_TOKEN) { - pt = vp8_prev_token_class[t1]; + pt = vp9_prev_token_class[t1]; rate1 += mb->token_costs[TX_16X16][type][band][pt] [tokens[next][1].token]; } @@ -801,7 +801,7 @@ static void optimize_b_16x16(MACROBLOCK *mb, int i, PLANE_TYPE type, UPDATE_RD_COST(); /* And pick the best. */ best = rd_cost1 < rd_cost0; - base_bits = *(vp8_dct_value_cost_ptr + x); + base_bits = *(vp9_dct_value_cost_ptr + x); if(shortcut) { dx -= (dequant_ptr[rc!=0] + sz) ^ sz; @@ -820,7 +820,7 @@ static void optimize_b_16x16(MACROBLOCK *mb, int i, PLANE_TYPE type, * add a new trellis node, but we do need to update the costs. */ else { - band = vp8_coef_bands_16x16[i + 1]; + band = vp9_coef_bands_16x16[i + 1]; t0 = tokens[next][0].token; t1 = tokens[next][1].token; /* Update the cost of each path if we're past the EOB token. */ @@ -837,7 +837,7 @@ static void optimize_b_16x16(MACROBLOCK *mb, int i, PLANE_TYPE type, } /* Now pick the best path through the whole trellis. */ - band = vp8_coef_bands_16x16[i + 1]; + band = vp9_coef_bands_16x16[i + 1]; VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l); rate0 = tokens[next][0].rate; rate1 = tokens[next][1].rate; @@ -855,7 +855,7 @@ static void optimize_b_16x16(MACROBLOCK *mb, int i, PLANE_TYPE type, x = tokens[i][best].qc; if (x) final_eob = i; - rc = vp8_default_zig_zag1d_16x16[i]; + rc = vp9_default_zig_zag1d_16x16[i]; qcoeff_ptr[rc] = x; dqcoeff_ptr[rc] = (x * dequant_ptr[rc!=0]); diff --git a/vp8/encoder/encodemv.c b/vp8/encoder/encodemv.c index 970e89f57913b7f5159e23589b25dabc9436e3d9..972255459c56f8589f34e3be5e2cb7779c56872e 100644 --- a/vp8/encoder/encodemv.c +++ b/vp8/encoder/encodemv.c @@ -38,14 +38,14 @@ static void encode_nmv_component(vp8_writer* const bc, c = vp9_get_mv_class(z, &o); - vp8_write_token(bc, vp8_mv_class_tree, mvcomp->classes, - vp8_mv_class_encodings + c); + vp8_write_token(bc, vp9_mv_class_tree, mvcomp->classes, + vp9_mv_class_encodings + c); d = (o >> 3); /* int mv data */ if (c == MV_CLASS_0) { - vp8_write_token(bc, vp8_mv_class0_tree, mvcomp->class0, - vp8_mv_class0_encodings + d); + vp8_write_token(bc, vp9_mv_class0_tree, mvcomp->class0, + vp9_mv_class0_encodings + d); } else { int i, b; b = c + CLASS0_BITS - 1; /* number of bits */ @@ -72,11 +72,11 @@ static void encode_nmv_component_fp(vp8_writer *bc, /* Code the fractional pel bits */ if (c == MV_CLASS_0) { - vp8_write_token(bc, vp8_mv_fp_tree, mvcomp->class0_fp[d], - vp8_mv_fp_encodings + f); + vp8_write_token(bc, vp9_mv_fp_tree, mvcomp->class0_fp[d], + vp9_mv_fp_encodings + f); } else { - vp8_write_token(bc, vp8_mv_fp_tree, mvcomp->fp, - vp8_mv_fp_encodings + f); + vp8_write_token(bc, vp9_mv_fp_tree, mvcomp->fp, + vp9_mv_fp_encodings + f); } /* Code the high precision bit */ if (usehp) { @@ -99,16 +99,16 @@ static void build_nmv_component_cost_table(int *mvcost, sign_cost[0] = vp8_cost_zero(mvcomp->sign); sign_cost[1] = vp8_cost_one(mvcomp->sign); - vp9_cost_tokens(class_cost, mvcomp->classes, vp8_mv_class_tree); - vp9_cost_tokens(class0_cost, mvcomp->class0, vp8_mv_class0_tree); + vp9_cost_tokens(class_cost, mvcomp->classes, vp9_mv_class_tree); + vp9_cost_tokens(class0_cost, mvcomp->class0, vp9_mv_class0_tree); for (i = 0; i < MV_OFFSET_BITS; ++i) { bits_cost[i][0] = vp8_cost_zero(mvcomp->bits[i]); bits_cost[i][1] = vp8_cost_one(mvcomp->bits[i]); } for (i = 0; i < CLASS0_SIZE; ++i) - vp9_cost_tokens(class0_fp_cost[i], mvcomp->class0_fp[i], vp8_mv_fp_tree); - vp9_cost_tokens(fp_cost, mvcomp->fp, vp8_mv_fp_tree); + vp9_cost_tokens(class0_fp_cost[i], mvcomp->class0_fp[i], vp9_mv_fp_tree); + vp9_cost_tokens(fp_cost, mvcomp->fp, vp9_mv_fp_tree); if (usehp) { class0_hp_cost[0] = vp8_cost_zero(mvcomp->class0_hp); @@ -511,8 +511,8 @@ void vp9_write_nmvprobs(VP8_COMP* const cpi, int usehp, vp8_writer* const bc) { void vp9_encode_nmv(vp8_writer* const bc, const MV* const mv, const MV* const ref, const nmv_context* const mvctx) { MV_JOINT_TYPE j = vp9_get_mv_joint(*mv); - vp8_write_token(bc, vp8_mv_joint_tree, mvctx->joints, - vp8_mv_joint_encodings + j); + vp8_write_token(bc, vp9_mv_joint_tree, mvctx->joints, + vp9_mv_joint_encodings + j); if (j == MV_JOINT_HZVNZ || j == MV_JOINT_HNZVNZ) { encode_nmv_component(bc, mv->row, ref->col, &mvctx->comps[0]); } @@ -541,7 +541,7 @@ void vp9_build_nmv_cost_table(int *mvjoint, int mvc_flag_v, int mvc_flag_h) { vp8_clear_system_state(); - vp9_cost_tokens(mvjoint, mvctx->joints, vp8_mv_joint_tree); + vp9_cost_tokens(mvjoint, mvctx->joints, vp9_mv_joint_tree); if (mvc_flag_v) build_nmv_component_cost_table(mvcost[0], &mvctx->comps[0], usehp); if (mvc_flag_h) diff --git a/vp8/encoder/modecosts.c b/vp8/encoder/modecosts.c index 5111a81f7c7f328cf927a47c02b9d44d22f07c6c..523dfc2416856ba751577c334708732eb4f585c4 100644 --- a/vp8/encoder/modecosts.c +++ b/vp8/encoder/modecosts.c @@ -17,7 +17,7 @@ void vp9_init_mode_costs(VP8_COMP *c) { VP8_COMMON *x = &c->common; - const vp8_tree_p T = vp8_bmode_tree; + const vp8_tree_p T = vp9_bmode_tree; int i, j; for (i = 0; i < VP8_BINTRAMODES; i++) { @@ -29,21 +29,21 @@ void vp9_init_mode_costs(VP8_COMP *c) { vp9_cost_tokens((int *)c->mb.inter_bmode_costs, x->fc.bmode_prob, T); vp9_cost_tokens((int *)c->mb.inter_bmode_costs, - x->fc.sub_mv_ref_prob[0], vp8_sub_mv_ref_tree); + x->fc.sub_mv_ref_prob[0], vp9_sub_mv_ref_tree); - vp9_cost_tokens(c->mb.mbmode_cost[1], x->fc.ymode_prob, vp8_ymode_tree); + vp9_cost_tokens(c->mb.mbmode_cost[1], x->fc.ymode_prob, vp9_ymode_tree); vp9_cost_tokens(c->mb.mbmode_cost[0], x->kf_ymode_prob[c->common.kf_ymode_probs_index], - vp8_kf_ymode_tree); + vp9_kf_ymode_tree); vp9_cost_tokens(c->mb.intra_uv_mode_cost[1], - x->fc.uv_mode_prob[VP8_YMODES - 1], vp8_uv_mode_tree); + x->fc.uv_mode_prob[VP8_YMODES - 1], vp9_uv_mode_tree); vp9_cost_tokens(c->mb.intra_uv_mode_cost[0], - x->kf_uv_mode_prob[VP8_YMODES - 1], vp8_uv_mode_tree); + x->kf_uv_mode_prob[VP8_YMODES - 1], vp9_uv_mode_tree); vp9_cost_tokens(c->mb.i8x8_mode_costs, - x->fc.i8x8_mode_prob, vp8_i8x8_mode_tree); + x->fc.i8x8_mode_prob, vp9_i8x8_mode_tree); for (i = 0; i <= VP8_SWITCHABLE_FILTERS; ++i) vp9_cost_tokens((int *)c->mb.switchable_interp_costs[i], x->fc.switchable_interp_prob[i], - vp8_switchable_interp_tree); + vp9_switchable_interp_tree); } diff --git a/vp8/encoder/onyx_if.c b/vp8/encoder/onyx_if.c index 86b469fa40b5c00442746482019ed7dad3664c4c..bc17432d3777b1aedd89aa3c0f25a2ce7f375982 100644 --- a/vp8/encoder/onyx_if.c +++ b/vp8/encoder/onyx_if.c @@ -182,7 +182,7 @@ extern unsigned int inter_b_modes[B_MODE_COUNT]; extern void vp9_init_quantizer(VP8_COMP *cpi); -int vp8cx_base_skip_false_prob[QINDEX_RANGE][3]; +static int base_skip_false_prob[QINDEX_RANGE][3]; // Tables relating active max Q to active min Q static int kf_low_motion_minq[QINDEX_RANGE]; @@ -271,21 +271,21 @@ static void init_base_skip_probs(void) { skip_prob = 1; else if (skip_prob > 255) skip_prob = 255; - vp8cx_base_skip_false_prob[i][1] = skip_prob; + base_skip_false_prob[i][1] = skip_prob; skip_prob = t * 0.75; if (skip_prob < 1) skip_prob = 1; else if (skip_prob > 255) skip_prob = 255; - vp8cx_base_skip_false_prob[i][2] = skip_prob; + base_skip_false_prob[i][2] = skip_prob; skip_prob = t * 1.25; if (skip_prob < 1) skip_prob = 1; else if (skip_prob > 255) skip_prob = 255; - vp8cx_base_skip_false_prob[i][0] = skip_prob; + base_skip_false_prob[i][0] = skip_prob; } } @@ -1762,7 +1762,7 @@ VP8_PTR vp9_create_compressor(VP8_CONFIG *oxcf) { init_config((VP8_PTR)cpi, oxcf); - memcpy(cpi->base_skip_false_prob, vp8cx_base_skip_false_prob, sizeof(vp8cx_base_skip_false_prob)); + memcpy(cpi->base_skip_false_prob, base_skip_false_prob, sizeof(base_skip_false_prob)); cpi->common.current_video_frame = 0; cpi->kf_overspend_bits = 0; cpi->kf_bitrate_adjustment = 0; @@ -3484,7 +3484,7 @@ static void encode_frame_to_data_rate /* Mostly one filter is used. So set the filter at frame level */ for (i = 0; i < VP8_SWITCHABLE_FILTERS; ++i) { if (count[i]) { - cm->mcomp_filter_type = vp8_switchable_interp[i]; + cm->mcomp_filter_type = vp9_switchable_interp[i]; Loop = TRUE; /* Make sure to loop since the filter changed */ break; } diff --git a/vp8/encoder/quantize.c b/vp8/encoder/quantize.c index 0bee9ec50f4eeb9cd3f4364f5e5e507c8798f64d..b5476b69890f1dc26e85c7184430b1c73354419c 100644 --- a/vp8/encoder/quantize.c +++ b/vp8/encoder/quantize.c @@ -40,15 +40,15 @@ void vp9_ht_quantize_b_4x4(BLOCK *b, BLOCKD *d, TX_TYPE tx_type) { switch (tx_type) { case ADST_DCT : - pt_scan = vp8_row_scan; + pt_scan = vp9_row_scan; break; case DCT_ADST : - pt_scan = vp8_col_scan; + pt_scan = vp9_col_scan; break; default : - pt_scan = vp8_default_zig_zag1d; + pt_scan = vp9_default_zig_zag1d; break; } @@ -106,7 +106,7 @@ void vp9_regular_quantize_b_4x4(BLOCK *b, BLOCKD *d) { eob = -1; for (i = 0; i < b->eob_max_offset; i++) { - rc = vp8_default_zig_zag1d[i]; + rc = vp9_default_zig_zag1d[i]; z = coeff_ptr[rc]; zbin = zbin_ptr[rc] + *zbin_boost_ptr + zbin_oq_value; @@ -179,7 +179,7 @@ void vp9_regular_quantize_b_2x2(BLOCK *b, BLOCKD *d) { eob = -1; for (i = 0; i < b->eob_max_offset_8x8; i++) { - rc = vp8_default_zig_zag1d[i]; + rc = vp9_default_zig_zag1d[i]; z = coeff_ptr[rc]; zbin_boost_ptr = &b->zrun_zbin_boost[zbin_zrun_index]; @@ -228,7 +228,7 @@ void vp9_regular_quantize_b_8x8(BLOCK *b, BLOCKD *d) { eob = -1; for (i = 0; i < b->eob_max_offset_8x8; i++) { - rc = vp8_default_zig_zag1d_8x8[i]; + rc = vp9_default_zig_zag1d_8x8[i]; z = coeff_ptr[rc]; zbin = (zbin_ptr[rc != 0] + *zbin_boost_ptr + zbin_oq_value); @@ -318,7 +318,7 @@ void vp9_regular_quantize_b_16x16(BLOCK *b, BLOCKD *d) { eob = -1; for (i = 0; i < b->eob_max_offset_16x16; i++) { - rc = vp8_default_zig_zag1d_16x16[i]; + rc = vp9_default_zig_zag1d_16x16[i]; z = coeff_ptr[rc]; zbin = (zbin_ptr[rc!=0] + *zbin_boost_ptr + zbin_oq_value); @@ -460,7 +460,7 @@ void vp9_init_quantizer(VP8_COMP *cpi) { // all the 4x4 ac values =; for (i = 1; i < 16; i++) { - int rc = vp8_default_zig_zag1d[i]; + int rc = vp9_default_zig_zag1d[i]; quant_val = vp9_ac_yquant(Q); invert_quant(cpi->Y1quant[Q] + rc, @@ -494,7 +494,7 @@ void vp9_init_quantizer(VP8_COMP *cpi) { // This needs cleaning up for 8x8 especially if we are to add // support for non flat Q matices for (i = 1; i < 64; i++) { - int rc = vp8_default_zig_zag1d_8x8[i]; + int rc = vp9_default_zig_zag1d_8x8[i]; quant_val = vp9_ac_yquant(Q); cpi->Y1zbin_8x8[Q][rc] = ((qzbin_factor * quant_val) + 64) >> 7; @@ -514,7 +514,7 @@ void vp9_init_quantizer(VP8_COMP *cpi) { // 16x16 structures. Same comment above applies. for (i = 1; i < 256; i++) { - int rc = vp8_default_zig_zag1d_16x16[i]; + int rc = vp9_default_zig_zag1d_16x16[i]; quant_val = vp9_ac_yquant(Q); cpi->Y1zbin_16x16[Q][rc] = ((qzbin_factor * quant_val) + 64) >> 7; diff --git a/vp8/encoder/ratectrl.c b/vp8/encoder/ratectrl.c index 8197bfd78bf4958da7899ae5baae1f3fbf9579b8..3191ab802d664cbd0dd00add85f64986b1b72344 100644 --- a/vp8/encoder/ratectrl.c +++ b/vp8/encoder/ratectrl.c @@ -28,9 +28,6 @@ #define MIN_BPB_FACTOR 0.005 #define MAX_BPB_FACTOR 50 -extern const MODE_DEFINITION vp8_mode_order[MAX_MODES]; - - #ifdef MODE_STATS extern unsigned int y_modes[VP8_YMODES]; extern unsigned int uv_modes[VP8_UV_MODES]; diff --git a/vp8/encoder/rdopt.c b/vp8/encoder/rdopt.c index aa955baa5a9b0b22629c68508705da7a5b9322c0..02076b28059997ea42303aec65f3db3101c9f93d 100644 --- a/vp8/encoder/rdopt.c +++ b/vp8/encoder/rdopt.c @@ -83,7 +83,7 @@ static const int auto_speed_thresh[17] = { }; #if CONFIG_PRED_FILTER -const MODE_DEFINITION vp8_mode_order[MAX_MODES] = { +const MODE_DEFINITION vp9_mode_order[MAX_MODES] = { {ZEROMV, LAST_FRAME, 0, 0}, {ZEROMV, LAST_FRAME, 0, 1}, {DC_PRED, INTRA_FRAME, 0, 0}, @@ -155,7 +155,7 @@ const MODE_DEFINITION vp8_mode_order[MAX_MODES] = { {SPLITMV, GOLDEN_FRAME, ALTREF_FRAME, 0} }; #else -const MODE_DEFINITION vp8_mode_order[MAX_MODES] = { +const MODE_DEFINITION vp9_mode_order[MAX_MODES] = { {ZEROMV, LAST_FRAME, 0}, {DC_PRED, INTRA_FRAME, 0}, @@ -228,11 +228,11 @@ static void fill_token_costs( if (k == 0 && ((j > 0 && i > 0) || (j > 1 && i == 0))) vp9_cost_tokens_skip((int *)(c[i][j][k]), p[i][j][k], - vp8_coef_tree); + vp9_coef_tree); else vp9_cost_tokens((int *)(c[i][j][k]), p[i][j][k], - vp8_coef_tree); + vp9_coef_tree); } } @@ -553,15 +553,15 @@ static int cost_coeffs_2x2(MACROBLOCK *mb, assert(eob <= 4); for (; c < eob; c++) { - int v = qcoeff_ptr[vp8_default_zig_zag1d[c]]; - int t = vp8_dct_value_tokens_ptr[v].Token; - cost += mb->token_costs[TX_8X8][type][vp8_coef_bands[c]][pt][t]; - cost += vp8_dct_value_cost_ptr[v]; - pt = vp8_prev_token_class[t]; + int v = qcoeff_ptr[vp9_default_zig_zag1d[c]]; + int t = vp9_dct_value_tokens_ptr[v].Token; + cost += mb->token_costs[TX_8X8][type][vp9_coef_bands[c]][pt][t]; + cost += vp9_dct_value_cost_ptr[v]; + pt = vp9_prev_token_class[t]; } if (c < 4) - cost += mb->token_costs[TX_8X8][type][vp8_coef_bands[c]] + cost += mb->token_costs[TX_8X8][type][vp9_coef_bands[c]] [pt] [DCT_EOB_TOKEN]; pt = (c != !type); // is eob first coefficient; @@ -585,23 +585,23 @@ static int cost_coeffs(MACROBLOCK *mb, BLOCKD *b, PLANE_TYPE type, switch (tx_size) { case TX_4X4: - scan = vp8_default_zig_zag1d; - band = vp8_coef_bands; + scan = vp9_default_zig_zag1d; + band = vp9_coef_bands; default_eob = 16; if (type == PLANE_TYPE_Y_WITH_DC) { tx_type = get_tx_type_4x4(xd, b); if (tx_type != DCT_DCT) { switch (tx_type) { case ADST_DCT: - scan = vp8_row_scan; + scan = vp9_row_scan; break; case DCT_ADST: - scan = vp8_col_scan; + scan = vp9_col_scan; break; default: - scan = vp8_default_zig_zag1d; + scan = vp9_default_zig_zag1d; break; } } @@ -609,8 +609,8 @@ static int cost_coeffs(MACROBLOCK *mb, BLOCKD *b, PLANE_TYPE type, break; case TX_8X8: - scan = vp8_default_zig_zag1d_8x8; - band = vp8_coef_bands_8x8; + scan = vp9_default_zig_zag1d_8x8; + band = vp9_coef_bands_8x8; default_eob = 64; if (type == PLANE_TYPE_Y_WITH_DC) { BLOCKD *bb; @@ -623,8 +623,8 @@ static int cost_coeffs(MACROBLOCK *mb, BLOCKD *b, PLANE_TYPE type, } break; case TX_16X16: - scan = vp8_default_zig_zag1d_16x16; - band = vp8_coef_bands_16x16; + scan = vp9_default_zig_zag1d_16x16; + band = vp9_coef_bands_16x16; default_eob = 256; if (type == PLANE_TYPE_Y_WITH_DC) { tx_type = get_tx_type_16x16(xd, b); @@ -643,10 +643,10 @@ static int cost_coeffs(MACROBLOCK *mb, BLOCKD *b, PLANE_TYPE type, if (tx_type != DCT_DCT) { for (; c < eob; c++) { int v = qcoeff_ptr[scan[c]]; - int t = vp8_dct_value_tokens_ptr[v].Token; + int t = vp9_dct_value_tokens_ptr[v].Token; cost += mb->hybrid_token_costs[tx_size][type][band[c]][pt][t]; - cost += vp8_dct_value_cost_ptr[v]; - pt = vp8_prev_token_class[t]; + cost += vp9_dct_value_cost_ptr[v]; + pt = vp9_prev_token_class[t]; } if (c < seg_eob) cost += mb->hybrid_token_costs[tx_size][type][band[c]] @@ -654,10 +654,10 @@ static int cost_coeffs(MACROBLOCK *mb, BLOCKD *b, PLANE_TYPE type, } else { for (; c < eob; c++) { int v = qcoeff_ptr[scan[c]]; - int t = vp8_dct_value_tokens_ptr[v].Token; + int t = vp9_dct_value_tokens_ptr[v].Token; cost += mb->token_costs[tx_size][type][band[c]][pt][t]; - cost += vp8_dct_value_cost_ptr[v]; - pt = vp8_prev_token_class[t]; + cost += vp9_dct_value_cost_ptr[v]; + pt = vp9_prev_token_class[t]; } if (c < seg_eob) cost += mb->token_costs[tx_size][type][band[c]] @@ -685,11 +685,11 @@ static int vp8_rdcost_mby(MACROBLOCK *mb) { for (b = 0; b < 16; b++) cost += cost_coeffs(mb, xd->block + b, PLANE_TYPE_Y_NO_DC, - ta + vp8_block2above[b], tl + vp8_block2left[b], + ta + vp9_block2above[b], tl + vp9_block2left[b], TX_4X4); cost += cost_coeffs(mb, xd->block + 24, PLANE_TYPE_Y2, - ta + vp8_block2above[24], tl + vp8_block2left[24], + ta + vp9_block2above[24], tl + vp9_block2left[24], TX_4X4); return cost; @@ -761,11 +761,11 @@ static int vp8_rdcost_mby_8x8(MACROBLOCK *mb, int backup) { for (b = 0; b < 16; b += 4) cost += cost_coeffs(mb, xd->block + b, PLANE_TYPE_Y_NO_DC, - ta + vp8_block2above_8x8[b], tl + vp8_block2left_8x8[b], + ta + vp9_block2above_8x8[b], tl + vp9_block2left_8x8[b], TX_8X8); cost += cost_coeffs_2x2(mb, xd->block + 24, PLANE_TYPE_Y2, - ta + vp8_block2above[24], tl + vp8_block2left[24]); + ta + vp9_block2above[24], tl + vp9_block2left[24]); return cost; } @@ -1221,8 +1221,8 @@ static int64_t rd_pick_intra4x4mby_modes(VP8_COMP *cpi, MACROBLOCK *mb, int *Rat #if CONFIG_COMP_INTRA_PRED & best_second_mode, allow_comp, #endif - bmode_costs, ta + vp8_block2above[i], - tl + vp8_block2left[i], &r, &ry, &d); + bmode_costs, ta + vp9_block2above[i], + tl + vp9_block2left[i], &r, &ry, &d); cost += r; distortion += d; @@ -1448,8 +1448,8 @@ static int64_t rd_pick_intra8x8block(VP8_COMP *cpi, MACROBLOCK *x, int ib, // compute quantization mse of 8x8 block distortion = vp9_block_error_c((x->block + idx)->coeff, (xd->block + idx)->dqcoeff, 64); - ta0 = a[vp8_block2above_8x8[idx]]; - tl0 = l[vp8_block2left_8x8[idx]]; + ta0 = a[vp9_block2above_8x8[idx]]; + tl0 = l[vp9_block2left_8x8[idx]]; rate_t = cost_coeffs(x, xd->block + idx, PLANE_TYPE_Y_WITH_DC, &ta0, &tl0, TX_8X8); @@ -1475,10 +1475,10 @@ static int64_t rd_pick_intra8x8block(VP8_COMP *cpi, MACROBLOCK *x, int ib, distortion += vp9_block_error_c((x->block + ib + 5)->coeff, (xd->block + ib + 5)->dqcoeff, 16); - ta0 = a[vp8_block2above[ib]]; - ta1 = a[vp8_block2above[ib + 1]]; - tl0 = l[vp8_block2left[ib]]; - tl1 = l[vp8_block2left[ib + 4]]; + ta0 = a[vp9_block2above[ib]]; + ta1 = a[vp9_block2above[ib + 1]]; + tl0 = l[vp9_block2left[ib]]; + tl1 = l[vp9_block2left[ib + 4]]; rate_t = cost_coeffs(x, xd->block + ib, PLANE_TYPE_Y_WITH_DC, &ta0, &tl0, TX_4X4); rate_t += cost_coeffs(x, xd->block + ib + 1, PLANE_TYPE_Y_WITH_DC, @@ -1520,15 +1520,15 @@ static int64_t rd_pick_intra8x8block(VP8_COMP *cpi, MACROBLOCK *x, int ib, vp9_encode_intra8x8(IF_RTCD(&cpi->rtcd), x, ib); if (xd->mode_info_context->mbmi.txfm_size == TX_8X8) { - a[vp8_block2above_8x8[idx]] = besta0; - a[vp8_block2above_8x8[idx] + 1] = besta1; - l[vp8_block2left_8x8[idx]] = bestl0; - l[vp8_block2left_8x8[idx] + 1] = bestl1; + a[vp9_block2above_8x8[idx]] = besta0; + a[vp9_block2above_8x8[idx] + 1] = besta1; + l[vp9_block2left_8x8[idx]] = bestl0; + l[vp9_block2left_8x8[idx] + 1] = bestl1; } else { - a[vp8_block2above[ib]] = besta0; - a[vp8_block2above[ib + 1]] = besta1; - l[vp8_block2left[ib]] = bestl0; - l[vp8_block2left[ib + 4]] = bestl1; + a[vp9_block2above[ib]] = besta0; + a[vp9_block2above[ib + 1]] = besta1; + l[vp9_block2left[ib]] = bestl0; + l[vp9_block2left[ib + 4]] = bestl1; } return best_rd; @@ -1564,7 +1564,7 @@ static int64_t rd_pick_intra8x8mby_modes(VP8_COMP *cpi, MACROBLOCK *mb, #endif int UNINITIALIZED_IS_SAFE(r), UNINITIALIZED_IS_SAFE(ry), UNINITIALIZED_IS_SAFE(d); - ib = vp8_i8x8_block[i]; + ib = vp9_i8x8_block[i]; total_rd += rd_pick_intra8x8block( cpi, mb, ib, &best_mode, #if CONFIG_COMP_INTRA_PRED @@ -1600,7 +1600,7 @@ static int rd_cost_mbuv(MACROBLOCK *mb) { for (b = 16; b < 24; b++) cost += cost_coeffs(mb, xd->block + b, PLANE_TYPE_UV, - ta + vp8_block2above[b], tl + vp8_block2left[b], + ta + vp9_block2above[b], tl + vp9_block2left[b], TX_4X4); return cost; @@ -1642,8 +1642,8 @@ static int rd_cost_mbuv_8x8(MACROBLOCK *mb, int backup) { for (b = 16; b < 24; b += 4) cost += cost_coeffs(mb, xd->block + b, PLANE_TYPE_UV, - ta + vp8_block2above_8x8[b], - tl + vp8_block2left_8x8[b], TX_8X8); + ta + vp9_block2above_8x8[b], + tl + vp9_block2left_8x8[b], TX_8X8); return cost; } @@ -1959,8 +1959,8 @@ int vp9_cost_mv_ref(VP8_COMP *cpi, vp8_prob p [VP8_MVREFS - 1]; assert(NEARESTMV <= m && m <= SPLITMV); vp9_mv_ref_probs(pc, p, near_mv_ref_ct); - return vp8_cost_token(vp8_mv_ref_tree, p, - vp8_mv_ref_encoding_array - NEARESTMV + m); + return vp8_cost_token(vp9_mv_ref_tree, p, + vp9_mv_ref_encoding_array - NEARESTMV + m); } else return 0; } @@ -2101,8 +2101,8 @@ static int64_t encode_inter_mb_segment(MACROBLOCK *x, thisdistortion = vp9_block_error(be->coeff, bd->dqcoeff, 16); *distortion += thisdistortion; *labelyrate += cost_coeffs(x, bd, PLANE_TYPE_Y_WITH_DC, - ta + vp8_block2above[i], - tl + vp8_block2left[i], TX_4X4); + ta + vp9_block2above[i], + tl + vp9_block2left[i], TX_4X4); } } *distortion >>= 2; @@ -2134,7 +2134,7 @@ static int64_t encode_inter_mb_segment_8x8(MACROBLOCK *x, *distortion = 0; *labelyrate = 0; for (i = 0; i < 4; i++) { - int ib = vp8_i8x8_block[i]; + int ib = vp9_i8x8_block[i]; if (labels[ib] == which_label) { int idx = (ib & 8) + ((ib & 2) << 1); @@ -2154,8 +2154,8 @@ static int64_t encode_inter_mb_segment_8x8(MACROBLOCK *x, thisdistortion = vp9_block_error_c(be2->coeff, bd2->dqcoeff, 64); otherdist += thisdistortion; othercost += cost_coeffs(x, bd2, PLANE_TYPE_Y_WITH_DC, - tacp + vp8_block2above_8x8[idx], - tlcp + vp8_block2left_8x8[idx], TX_8X8); + tacp + vp9_block2above_8x8[idx], + tlcp + vp9_block2left_8x8[idx], TX_8X8); } for (j = 0; j < 4; j += 2) { bd = &xd->block[ib + iblock[j]]; @@ -2165,12 +2165,12 @@ static int64_t encode_inter_mb_segment_8x8(MACROBLOCK *x, thisdistortion = vp9_block_error_c(be->coeff, bd->dqcoeff, 32); *distortion += thisdistortion; *labelyrate += cost_coeffs(x, bd, PLANE_TYPE_Y_WITH_DC, - ta + vp8_block2above[ib + iblock[j]], - tl + vp8_block2left[ib + iblock[j]], + ta + vp9_block2above[ib + iblock[j]], + tl + vp9_block2left[ib + iblock[j]], TX_4X4); *labelyrate += cost_coeffs(x, bd + 1, PLANE_TYPE_Y_WITH_DC, - ta + vp8_block2above[ib + iblock[j] + 1], - tl + vp8_block2left[ib + iblock[j]], + ta + vp9_block2above[ib + iblock[j] + 1], + tl + vp9_block2left[ib + iblock[j]], TX_4X4); } } else /* 8x8 */ { @@ -2183,12 +2183,12 @@ static int64_t encode_inter_mb_segment_8x8(MACROBLOCK *x, thisdistortion = vp9_block_error_c(be3->coeff, bd3->dqcoeff, 32); otherdist += thisdistortion; othercost += cost_coeffs(x, bd3, PLANE_TYPE_Y_WITH_DC, - tacp + vp8_block2above[ib + iblock[j]], - tlcp + vp8_block2left[ib + iblock[j]], + tacp + vp9_block2above[ib + iblock[j]], + tlcp + vp9_block2left[ib + iblock[j]], TX_4X4); othercost += cost_coeffs(x, bd3 + 1, PLANE_TYPE_Y_WITH_DC, - tacp + vp8_block2above[ib + iblock[j] + 1], - tlcp + vp8_block2left[ib + iblock[j]], + tacp + vp9_block2above[ib + iblock[j] + 1], + tlcp + vp9_block2left[ib + iblock[j]], TX_4X4); } } @@ -2197,8 +2197,8 @@ static int64_t encode_inter_mb_segment_8x8(MACROBLOCK *x, thisdistortion = vp9_block_error_c(be2->coeff, bd2->dqcoeff, 64); *distortion += thisdistortion; *labelyrate += cost_coeffs(x, bd2, PLANE_TYPE_Y_WITH_DC, - ta + vp8_block2above_8x8[idx], - tl + vp8_block2left_8x8[idx], TX_8X8); + ta + vp9_block2above_8x8[idx], + tl + vp9_block2left_8x8[idx], TX_8X8); } } } @@ -2283,8 +2283,8 @@ static void rd_check_segment_txsize(VP8_COMP *cpi, MACROBLOCK *x, tl_b = (ENTROPY_CONTEXT *)&t_left_b; v_fn_ptr = &cpi->fn_ptr[segmentation]; - labels = vp8_mbsplits[segmentation]; - label_count = vp8_mbsplit_count[segmentation]; + labels = vp9_mbsplits[segmentation]; + label_count = vp9_mbsplit_count[segmentation]; // 64 makes this threshold really big effectively // making it so that we very rarely check mvs on @@ -2293,8 +2293,8 @@ static void rd_check_segment_txsize(VP8_COMP *cpi, MACROBLOCK *x, label_mv_thresh = 1 * bsi->mvthresh / label_count; // Segmentation method overheads - rate = vp8_cost_token(vp8_mbsplit_tree, vp8_mbsplit_probs, - vp8_mbsplit_encodings + segmentation); + rate = vp8_cost_token(vp9_mbsplit_tree, vp9_mbsplit_probs, + vp9_mbsplit_encodings + segmentation); rate += vp9_cost_mv_ref(cpi, SPLITMV, bsi->mdcounts); this_segment_rd += RDCOST(x->rdmult, x->rddiv, rate, 0); br += rate; @@ -2365,7 +2365,7 @@ static void rd_check_segment_txsize(VP8_COMP *cpi, MACROBLOCK *x, mvp_full.as_mv.col = bsi->mvp.as_mv.col >> 3; // find first label - n = vp8_mbsplit_offset[segmentation][i]; + n = vp9_mbsplit_offset[segmentation][i]; c = &x->block[n]; e = &x->e_mbd.block[n]; @@ -2457,7 +2457,7 @@ static void rd_check_segment_txsize(VP8_COMP *cpi, MACROBLOCK *x, best_eobs[j] = x->e_mbd.block[j].eob; } else { for (j = 0; j < 4; j++) { - int ib = vp8_i8x8_block[j], idx = j * 4; + int ib = vp9_i8x8_block[j], idx = j * 4; if (labels[ib] == i) best_eobs[idx] = x->e_mbd.block[idx].eob; @@ -2521,7 +2521,7 @@ static void rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x, /* 16 = n_blocks */ int_mv seg_mvs[16][MAX_REF_FRAMES - 1], int64_t txfm_cache[NB_TXFM_MODES]) { - int i, n, c = vp8_mbsplit_count[segmentation]; + int i, n, c = vp9_mbsplit_count[segmentation]; if (segmentation == PARTITIONING_4X4) { int64_t rd[16]; @@ -2750,12 +2750,12 @@ static int rd_pick_best_mbsegmentation(VP8_COMP *cpi, MACROBLOCK *x, /* save partitions */ mbmi->txfm_size = bsi.txfm_size; mbmi->partitioning = bsi.segment_num; - x->partition_info->count = vp8_mbsplit_count[bsi.segment_num]; + x->partition_info->count = vp9_mbsplit_count[bsi.segment_num]; for (i = 0; i < x->partition_info->count; i++) { int j; - j = vp8_mbsplit_offset[bsi.segment_num][i]; + j = vp9_mbsplit_offset[bsi.segment_num][i]; x->partition_info->bmi[i].mode = bsi.modes[j]; x->partition_info->bmi[i].mv.as_mv = bsi.mvs[j].as_mv; @@ -3021,7 +3021,7 @@ static void set_i8x8_block_modes(MACROBLOCK *x, int modes[2][4]) { int i; MACROBLOCKD *xd = &x->e_mbd; for (i = 0; i < 4; i++) { - int ib = vp8_i8x8_block[i]; + int ib = vp9_i8x8_block[i]; xd->mode_info_context->bmi[ib + 0].as_mode.first = modes[0][i]; xd->mode_info_context->bmi[ib + 1].as_mode.first = modes[0][i]; xd->mode_info_context->bmi[ib + 4].as_mode.first = modes[0][i]; @@ -3402,13 +3402,13 @@ static int64_t handle_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, #if CONFIG_PRED_FILTER // Filtered prediction: - mbmi->pred_filter_enabled = vp8_mode_order[mode_index].pred_filter_flag; + mbmi->pred_filter_enabled = vp9_mode_order[mode_index].pred_filter_flag; *rate2 += vp8_cost_bit(cpi->common.prob_pred_filter_off, mbmi->pred_filter_enabled); #endif if (cpi->common.mcomp_filter_type == SWITCHABLE) { const int c = vp9_get_pred_context(cm, xd, PRED_SWITCHABLE_INTERP); - const int m = vp8_switchable_interp_map[mbmi->interp_filter]; + const int m = vp9_switchable_interp_map[mbmi->interp_filter]; *rate2 += SWITCHABLE_INTERP_RATE_FACTOR * x->switchable_interp_costs[c][m]; } @@ -3668,18 +3668,18 @@ void vp9_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, rate_y = 0; rate_uv = 0; - this_mode = vp8_mode_order[mode_index].mode; + this_mode = vp9_mode_order[mode_index].mode; mbmi->mode = this_mode; mbmi->uv_mode = DC_PRED; - mbmi->ref_frame = vp8_mode_order[mode_index].ref_frame; - mbmi->second_ref_frame = vp8_mode_order[mode_index].second_ref_frame; + mbmi->ref_frame = vp9_mode_order[mode_index].ref_frame; + mbmi->second_ref_frame = vp9_mode_order[mode_index].second_ref_frame; #if CONFIG_PRED_FILTER mbmi->pred_filter_enabled = 0; #endif if (cpi->common.mcomp_filter_type == SWITCHABLE && this_mode >= NEARESTMV && this_mode <= SPLITMV) { mbmi->interp_filter = - vp8_switchable_interp[switchable_filter_index++]; + vp9_switchable_interp[switchable_filter_index++]; if (switchable_filter_index == VP8_SWITCHABLE_FILTERS) switchable_filter_index = 0; } else { @@ -3747,15 +3747,15 @@ void vp9_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, // Experimental code. Special case for gf and arf zeromv modes. // Increase zbin size to suppress noise if (cpi->zbin_mode_boost_enabled) { - if (vp8_mode_order[mode_index].ref_frame == INTRA_FRAME) + if (vp9_mode_order[mode_index].ref_frame == INTRA_FRAME) cpi->zbin_mode_boost = 0; else { - if (vp8_mode_order[mode_index].mode == ZEROMV) { - if (vp8_mode_order[mode_index].ref_frame != LAST_FRAME) + if (vp9_mode_order[mode_index].mode == ZEROMV) { + if (vp9_mode_order[mode_index].ref_frame != LAST_FRAME) cpi->zbin_mode_boost = GF_ZEROMV_ZBIN_BOOST; else cpi->zbin_mode_boost = LF_ZEROMV_ZBIN_BOOST; - } else if (vp8_mode_order[mode_index].mode == SPLITMV) + } else if (vp9_mode_order[mode_index].mode == SPLITMV) cpi->zbin_mode_boost = 0; else cpi->zbin_mode_boost = MV_ZBIN_BOOST; @@ -3946,7 +3946,7 @@ void vp9_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, if (cpi->common.mcomp_filter_type == SWITCHABLE) rate2 += SWITCHABLE_INTERP_RATE_FACTOR * x->switchable_interp_costs [vp9_get_pred_context(&cpi->common, xd, PRED_SWITCHABLE_INTERP)] - [vp8_switchable_interp_map[mbmi->interp_filter]]; + [vp9_switchable_interp_map[mbmi->interp_filter]]; // If even the 'Y' rd value of split is higher than best so far // then dont bother looking at UV if (tmp_rd < best_yrd) { @@ -4183,7 +4183,7 @@ void vp9_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, best_mbmode.mode <= SPLITMV) { ++cpi->switchable_interp_count [vp9_get_pred_context(&cpi->common, xd, PRED_SWITCHABLE_INTERP)] - [vp8_switchable_interp_map[best_mbmode.interp_filter]]; + [vp9_switchable_interp_map[best_mbmode.interp_filter]]; } // Reduce the activation RD thresholds for the best choice mode @@ -4530,10 +4530,10 @@ int64_t vp9_rd_pick_inter_mode_sb(VP8_COMP *cpi, MACROBLOCK *x, continue; } - this_mode = vp8_mode_order[mode_index].mode; - ref_frame = vp8_mode_order[mode_index].ref_frame; + this_mode = vp9_mode_order[mode_index].mode; + ref_frame = vp9_mode_order[mode_index].ref_frame; mbmi->ref_frame = ref_frame; - comp_pred = vp8_mode_order[mode_index].second_ref_frame != INTRA_FRAME; + comp_pred = vp9_mode_order[mode_index].second_ref_frame != INTRA_FRAME; mbmi->mode = this_mode; mbmi->uv_mode = DC_PRED; #if CONFIG_COMP_INTRA_PRED diff --git a/vp8/encoder/tokenize.c b/vp8/encoder/tokenize.c index 9482b5f473206aeadd901a31fb5ce9326c26f7e5..1a30deb95e16e791f6d9f7b5b784aa31e7562e8f 100644 --- a/vp8/encoder/tokenize.c +++ b/vp8/encoder/tokenize.c @@ -52,14 +52,14 @@ void vp9_stuff_mb(VP8_COMP *cpi, MACROBLOCKD *xd, TOKENEXTRA **t, int dry_run); void vp9_fix_contexts(MACROBLOCKD *xd); static TOKENVALUE dct_value_tokens[DCT_MAX_VALUE * 2]; -const TOKENVALUE *vp8_dct_value_tokens_ptr; +const TOKENVALUE *vp9_dct_value_tokens_ptr; static int dct_value_cost[DCT_MAX_VALUE * 2]; -const int *vp8_dct_value_cost_ptr; +const int *vp9_dct_value_cost_ptr; static void fill_value_tokens() { TOKENVALUE *const t = dct_value_tokens + DCT_MAX_VALUE; - vp8_extra_bit_struct *const e = vp8_extra_bits; + vp8_extra_bit_struct *const e = vp9_extra_bits; int i = -DCT_MAX_VALUE; int sign = 1; @@ -88,7 +88,7 @@ static void fill_value_tokens() { // initialize the cost for extra bits for all possible coefficient value. { int cost = 0; - vp8_extra_bit_struct *p = vp8_extra_bits + t[i].Token; + vp8_extra_bit_struct *p = vp9_extra_bits + t[i].Token; if (p->base_val) { const int extra = t[i].Extra; @@ -105,8 +105,8 @@ static void fill_value_tokens() { } while (++i < DCT_MAX_VALUE); - vp8_dct_value_tokens_ptr = dct_value_tokens + DCT_MAX_VALUE; - vp8_dct_value_cost_ptr = dct_value_cost + DCT_MAX_VALUE; + vp9_dct_value_tokens_ptr = dct_value_tokens + DCT_MAX_VALUE; + vp9_dct_value_cost_ptr = dct_value_cost + DCT_MAX_VALUE; } static void tokenize_b(VP8_COMP *cpi, @@ -136,15 +136,15 @@ static void tokenize_b(VP8_COMP *cpi, default: case TX_4X4: seg_eob = 16; - bands = vp8_coef_bands; - scan = vp8_default_zig_zag1d; + bands = vp9_coef_bands; + scan = vp9_default_zig_zag1d; if (tx_type != DCT_DCT) { counts = cpi->hybrid_coef_counts; probs = cpi->common.fc.hybrid_coef_probs; if (tx_type == ADST_DCT) { - scan = vp8_row_scan; + scan = vp9_row_scan; } else if (tx_type == DCT_ADST) { - scan = vp8_col_scan; + scan = vp9_col_scan; } } else { counts = cpi->coef_counts; @@ -154,12 +154,12 @@ static void tokenize_b(VP8_COMP *cpi, case TX_8X8: if (type == PLANE_TYPE_Y2) { seg_eob = 4; - bands = vp8_coef_bands; - scan = vp8_default_zig_zag1d; + bands = vp9_coef_bands; + scan = vp9_default_zig_zag1d; } else { seg_eob = 64; - bands = vp8_coef_bands_8x8; - scan = vp8_default_zig_zag1d_8x8; + bands = vp9_coef_bands_8x8; + scan = vp9_default_zig_zag1d_8x8; } if (tx_type != DCT_DCT) { counts = cpi->hybrid_coef_counts_8x8; @@ -171,8 +171,8 @@ static void tokenize_b(VP8_COMP *cpi, break; case TX_16X16: seg_eob = 256; - bands = vp8_coef_bands_16x16; - scan = vp8_default_zig_zag1d_16x16; + bands = vp9_coef_bands_16x16; + scan = vp9_default_zig_zag1d_16x16; if (tx_type != DCT_DCT) { counts = cpi->hybrid_coef_counts_16x16; probs = cpi->common.fc.hybrid_coef_probs_16x16; @@ -196,8 +196,8 @@ static void tokenize_b(VP8_COMP *cpi, assert(-DCT_MAX_VALUE <= v && v < DCT_MAX_VALUE); - t->Extra = vp8_dct_value_tokens_ptr[v].Extra; - token = vp8_dct_value_tokens_ptr[v].Token; + t->Extra = vp9_dct_value_tokens_ptr[v].Extra; + token = vp9_dct_value_tokens_ptr[v].Token; } else { token = DCT_EOB_TOKEN; } @@ -206,11 +206,11 @@ static void tokenize_b(VP8_COMP *cpi, t->context_tree = probs[type][band][pt]; t->skip_eob_node = (pt == 0) && ((band > 0 && type != PLANE_TYPE_Y_NO_DC) || (band > 1 && type == PLANE_TYPE_Y_NO_DC)); - assert(vp8_coef_encodings[t->Token].Len - t->skip_eob_node > 0); + assert(vp9_coef_encodings[t->Token].Len - t->skip_eob_node > 0); if (!dry_run) { ++counts[type][band][pt][token]; } - pt = vp8_prev_token_class[token]; + pt = vp9_prev_token_class[token]; ++t; } while (c < eob && ++c < seg_eob); @@ -352,11 +352,11 @@ void vp9_tokenize_mb(VP8_COMP *cpi, if (has_y2_block) { if (tx_size == TX_8X8) { tokenize_b(cpi, xd, xd->block + 24, t, PLANE_TYPE_Y2, - A + vp8_block2above_8x8[24], L + vp8_block2left_8x8[24], + A + vp9_block2above_8x8[24], L + vp9_block2left_8x8[24], TX_8X8, dry_run); } else { tokenize_b(cpi, xd, xd->block + 24, t, PLANE_TYPE_Y2, - A + vp8_block2above[24], L + vp8_block2left[24], + A + vp9_block2above[24], L + vp9_block2left[24], TX_4X4, dry_run); } @@ -372,47 +372,47 @@ void vp9_tokenize_mb(VP8_COMP *cpi, for (b = 16; b < 24; b += 4) { tokenize_b(cpi, xd, xd->block + b, t, PLANE_TYPE_UV, - A + vp8_block2above_8x8[b], L + vp8_block2left_8x8[b], + A + vp9_block2above_8x8[b], L + vp9_block2left_8x8[b], TX_8X8, dry_run); - A[vp8_block2above_8x8[b] + 1] = A[vp8_block2above_8x8[b]]; - L[vp8_block2left_8x8[b] + 1] = L[vp8_block2left_8x8[b]]; + A[vp9_block2above_8x8[b] + 1] = A[vp9_block2above_8x8[b]]; + L[vp9_block2left_8x8[b] + 1] = L[vp9_block2left_8x8[b]]; } vpx_memset(&A[8], 0, sizeof(A[8])); vpx_memset(&L[8], 0, sizeof(L[8])); } else if (tx_size == TX_8X8) { for (b = 0; b < 16; b += 4) { tokenize_b(cpi, xd, xd->block + b, t, plane_type, - A + vp8_block2above_8x8[b], L + vp8_block2left_8x8[b], + A + vp9_block2above_8x8[b], L + vp9_block2left_8x8[b], TX_8X8, dry_run); - A[vp8_block2above_8x8[b] + 1] = A[vp8_block2above_8x8[b]]; - L[vp8_block2left_8x8[b] + 1] = L[vp8_block2left_8x8[b]]; + A[vp9_block2above_8x8[b] + 1] = A[vp9_block2above_8x8[b]]; + L[vp9_block2left_8x8[b] + 1] = L[vp9_block2left_8x8[b]]; } if (xd->mode_info_context->mbmi.mode == I8X8_PRED || xd->mode_info_context->mbmi.mode == SPLITMV) { for (b = 16; b < 24; b++) { tokenize_b(cpi, xd, xd->block + b, t, PLANE_TYPE_UV, - A + vp8_block2above[b], L + vp8_block2left[b], + A + vp9_block2above[b], L + vp9_block2left[b], TX_4X4, dry_run); } } else { for (b = 16; b < 24; b += 4) { tokenize_b(cpi, xd, xd->block + b, t, PLANE_TYPE_UV, - A + vp8_block2above_8x8[b], L + vp8_block2left_8x8[b], + A + vp9_block2above_8x8[b], L + vp9_block2left_8x8[b], TX_8X8, dry_run); - A[vp8_block2above_8x8[b] + 1] = A[vp8_block2above_8x8[b]]; - L[vp8_block2left_8x8[b] + 1] = L[vp8_block2left_8x8[b]]; + A[vp9_block2above_8x8[b] + 1] = A[vp9_block2above_8x8[b]]; + L[vp9_block2left_8x8[b] + 1] = L[vp9_block2left_8x8[b]]; } } } else { for (b = 0; b < 16; b++) { tokenize_b(cpi, xd, xd->block + b, t, plane_type, - A + vp8_block2above[b], L + vp8_block2left[b], + A + vp9_block2above[b], L + vp9_block2left[b], TX_4X4, dry_run); } for (b = 16; b < 24; b++) { tokenize_b(cpi, xd, xd->block + b, t, PLANE_TYPE_UV, - A + vp8_block2above[b], L + vp8_block2left[b], + A + vp9_block2above[b], L + vp9_block2left[b], TX_4X4, dry_run); } } @@ -569,7 +569,7 @@ void print_context_counters() { for (t = 0; t < MAX_ENTROPY_TOKENS; ++t) coef_counts[t] = context_counters [type] [band] [pt] [t]; vp9_tree_probs_from_distribution( - MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree, + MAX_ENTROPY_TOKENS, vp9_coef_encodings, vp9_coef_tree, coef_probs, branch_ct, coef_counts, 256, 1); fprintf(f, "%s\n {", Comma(pt)); @@ -604,7 +604,7 @@ void print_context_counters() { for (t = 0; t < MAX_ENTROPY_TOKENS; ++t) coef_counts[t] = context_counters_8x8[type] [band] [pt] [t]; vp9_tree_probs_from_distribution( - MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree, + MAX_ENTROPY_TOKENS, vp9_coef_encodings, vp9_coef_tree, coef_probs, branch_ct, coef_counts, 256, 1); fprintf(f, "%s\n {", Comma(pt)); @@ -637,7 +637,7 @@ void print_context_counters() { for (t = 0; t < MAX_ENTROPY_TOKENS; ++t) coef_counts[t] = context_counters_16x16[type] [band] [pt] [t]; vp9_tree_probs_from_distribution( - MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree, + MAX_ENTROPY_TOKENS, vp9_coef_encodings, vp9_coef_tree, coef_probs, branch_ct, coef_counts, 256, 1); fprintf(f, "%s\n {", Comma(pt)); @@ -688,7 +688,7 @@ static __inline void stuff_b(VP8_COMP *cpi, switch (tx_size) { default: case TX_4X4: - bands = vp8_coef_bands; + bands = vp9_coef_bands; if (tx_type != DCT_DCT) { counts = cpi->hybrid_coef_counts; probs = cpi->common.fc.hybrid_coef_probs; @@ -698,7 +698,7 @@ static __inline void stuff_b(VP8_COMP *cpi, } break; case TX_8X8: - bands = vp8_coef_bands_8x8; + bands = vp9_coef_bands_8x8; if (tx_type != DCT_DCT) { counts = cpi->hybrid_coef_counts_8x8; probs = cpi->common.fc.hybrid_coef_probs_8x8; @@ -708,7 +708,7 @@ static __inline void stuff_b(VP8_COMP *cpi, } break; case TX_16X16: - bands = vp8_coef_bands_16x16; + bands = vp9_coef_bands_16x16; if (tx_type != DCT_DCT) { counts = cpi->hybrid_coef_counts_16x16; probs = cpi->common.fc.hybrid_coef_probs_16x16; @@ -742,7 +742,7 @@ static void vp9_stuff_mb_8x8(VP8_COMP *cpi, MACROBLOCKD *xd, if (has_y2_block) { stuff_b(cpi, xd, xd->block + 24, t, PLANE_TYPE_Y2, - A + vp8_block2above_8x8[24], L + vp8_block2left_8x8[24], + A + vp9_block2above_8x8[24], L + vp9_block2left_8x8[24], TX_8X8, dry_run); plane_type = PLANE_TYPE_Y_NO_DC; } else { @@ -750,18 +750,18 @@ static void vp9_stuff_mb_8x8(VP8_COMP *cpi, MACROBLOCKD *xd, } for (b = 0; b < 16; b += 4) { - stuff_b(cpi, xd, xd->block + b, t, plane_type, A + vp8_block2above_8x8[b], - L + vp8_block2left_8x8[b], TX_8X8, dry_run); - A[vp8_block2above_8x8[b] + 1] = A[vp8_block2above_8x8[b]]; - L[vp8_block2left_8x8[b] + 1] = L[vp8_block2left_8x8[b]]; + stuff_b(cpi, xd, xd->block + b, t, plane_type, A + vp9_block2above_8x8[b], + L + vp9_block2left_8x8[b], TX_8X8, dry_run); + A[vp9_block2above_8x8[b] + 1] = A[vp9_block2above_8x8[b]]; + L[vp9_block2left_8x8[b] + 1] = L[vp9_block2left_8x8[b]]; } for (b = 16; b < 24; b += 4) { stuff_b(cpi, xd, xd->block + b, t, PLANE_TYPE_UV, - A + vp8_block2above_8x8[b], L + vp8_block2left_8x8[b], + A + vp9_block2above_8x8[b], L + vp9_block2left_8x8[b], TX_8X8, dry_run); - A[vp8_block2above_8x8[b] + 1] = A[vp8_block2above_8x8[b]]; - L[vp8_block2left_8x8[b] + 1] = L[vp8_block2left_8x8[b]]; + A[vp9_block2above_8x8[b] + 1] = A[vp9_block2above_8x8[b]]; + L[vp9_block2left_8x8[b] + 1] = L[vp9_block2left_8x8[b]]; } } @@ -775,10 +775,10 @@ static void vp9_stuff_mb_16x16(VP8_COMP *cpi, MACROBLOCKD *xd, A[1] = A[2] = A[3] = A[0]; L[1] = L[2] = L[3] = L[0]; for (b = 16; b < 24; b += 4) { - stuff_b(cpi, xd, xd->block + b, t, PLANE_TYPE_UV, A + vp8_block2above[b], - L + vp8_block2above_8x8[b], TX_8X8, dry_run); - A[vp8_block2above_8x8[b] + 1] = A[vp8_block2above_8x8[b]]; - L[vp8_block2left_8x8[b] + 1] = L[vp8_block2left_8x8[b]]; + stuff_b(cpi, xd, xd->block + b, t, PLANE_TYPE_UV, A + vp9_block2above[b], + L + vp9_block2above_8x8[b], TX_8X8, dry_run); + A[vp9_block2above_8x8[b] + 1] = A[vp9_block2above_8x8[b]]; + L[vp9_block2left_8x8[b] + 1] = L[vp9_block2left_8x8[b]]; } vpx_memset(&A[8], 0, sizeof(A[8])); vpx_memset(&L[8], 0, sizeof(L[8])); @@ -795,20 +795,20 @@ static void vp9_stuff_mb_4x4(VP8_COMP *cpi, MACROBLOCKD *xd, xd->mode_info_context->mbmi.mode != SPLITMV); if (has_y2_block) { - stuff_b(cpi, xd, xd->block + 24, t, PLANE_TYPE_Y2, A + vp8_block2above[24], - L + vp8_block2left[24], TX_4X4, dry_run); + stuff_b(cpi, xd, xd->block + 24, t, PLANE_TYPE_Y2, A + vp9_block2above[24], + L + vp9_block2left[24], TX_4X4, dry_run); plane_type = PLANE_TYPE_Y_NO_DC; } else { plane_type = PLANE_TYPE_Y_WITH_DC; } for (b = 0; b < 16; b++) - stuff_b(cpi, xd, xd->block + b, t, plane_type, A + vp8_block2above[b], - L + vp8_block2left[b], TX_4X4, dry_run); + stuff_b(cpi, xd, xd->block + b, t, plane_type, A + vp9_block2above[b], + L + vp9_block2left[b], TX_4X4, dry_run); for (b = 16; b < 24; b++) - stuff_b(cpi, xd, xd->block + b, t, PLANE_TYPE_UV, A + vp8_block2above[b], - L + vp8_block2left[b], TX_4X4, dry_run); + stuff_b(cpi, xd, xd->block + b, t, PLANE_TYPE_UV, A + vp9_block2above[b], + L + vp9_block2left[b], TX_4X4, dry_run); } static void vp9_stuff_mb_8x8_4x4uv(VP8_COMP *cpi, MACROBLOCKD *xd, @@ -819,15 +819,15 @@ static void vp9_stuff_mb_8x8_4x4uv(VP8_COMP *cpi, MACROBLOCKD *xd, for (b = 0; b < 16; b += 4) { stuff_b(cpi, xd, xd->block + b, t, PLANE_TYPE_Y_WITH_DC, - A + vp8_block2above_8x8[b], L + vp8_block2left_8x8[b], + A + vp9_block2above_8x8[b], L + vp9_block2left_8x8[b], TX_8X8, dry_run); - A[vp8_block2above_8x8[b] + 1] = A[vp8_block2above_8x8[b]]; - L[vp8_block2left_8x8[b] + 1] = L[vp8_block2left_8x8[b]]; + A[vp9_block2above_8x8[b] + 1] = A[vp9_block2above_8x8[b]]; + L[vp9_block2left_8x8[b] + 1] = L[vp9_block2left_8x8[b]]; } for (b = 16; b < 24; b++) - stuff_b(cpi, xd, xd->block + b, t, PLANE_TYPE_UV, A + vp8_block2above[b], - L + vp8_block2left[b], TX_4X4, dry_run); + stuff_b(cpi, xd, xd->block + b, t, PLANE_TYPE_UV, A + vp9_block2above[b], + L + vp9_block2left[b], TX_4X4, dry_run); } void vp9_stuff_mb(VP8_COMP *cpi, MACROBLOCKD *xd, TOKENEXTRA **t, int dry_run) { diff --git a/vp8/encoder/tokenize.h b/vp8/encoder/tokenize.h index 7e36561e0ebf81ae81c285f055ce8d67456e945b..e02f002ffeff5fdc2f008bbe60b6a6faae194340 100644 --- a/vp8/encoder/tokenize.h +++ b/vp8/encoder/tokenize.h @@ -49,11 +49,11 @@ extern INT64 context_counters_16x16[BLOCK_TYPES_16X16][COEF_BANDS] [PREV_COEF_CONTEXTS][MAX_ENTROPY_TOKENS]; #endif -extern const int *vp8_dct_value_cost_ptr; +extern const int *vp9_dct_value_cost_ptr; /* TODO: The Token field should be broken out into a separate char array to * improve cache locality, since it's needed for costing when the rest of the * fields are not. */ -extern const TOKENVALUE *vp8_dct_value_tokens_ptr; +extern const TOKENVALUE *vp9_dct_value_tokens_ptr; #endif /* tokenize_h */ diff --git a/vp8/encoder/treewriter.h b/vp8/encoder/treewriter.h index 4c34db4f1ee49076aefaf47125c432d3302ffcef..1a97f5a5101015a772489edf09b210dd581aa88b 100644 --- a/vp8/encoder/treewriter.h +++ b/vp8/encoder/treewriter.h @@ -32,7 +32,7 @@ typedef BOOL_CODER vp8_writer; /* Approximate length of an encoded bool in 256ths of a bit at given prob */ -#define vp8_cost_zero( x) ( vp8_prob_cost[x]) +#define vp8_cost_zero( x) ( vp9_prob_cost[x]) #define vp8_cost_one( x) vp8_cost_zero( vp8_complement(x)) #define vp8_cost_bit( x, b) vp8_cost_zero( (b)? vp8_complement(x) : (x) ) diff --git a/vp8/encoder/variance_c.c b/vp8/encoder/variance_c.c index 2684df7b282ae4752ec6831307caabbc15704414..760c519eafbb0c53f85229eefa386abde7120a7c 100644 --- a/vp8/encoder/variance_c.c +++ b/vp8/encoder/variance_c.c @@ -264,8 +264,8 @@ unsigned int vp9_sub_pixel_variance4x4_c(const unsigned char *src_ptr, const short *HFilter, *VFilter; unsigned short FData3[5 * 4]; // Temp data bufffer used in filtering - HFilter = vp8_bilinear_filters[xoffset]; - VFilter = vp8_bilinear_filters[yoffset]; + HFilter = vp9_bilinear_filters[xoffset]; + VFilter = vp9_bilinear_filters[yoffset]; // First filter 1d Horizontal var_filter_block2d_bil_first_pass(src_ptr, FData3, src_pixels_per_line, 1, 5, 4, HFilter); @@ -288,8 +288,8 @@ unsigned int vp9_sub_pixel_variance8x8_c(const unsigned char *src_ptr, unsigned char temp2[20 * 16]; const short *HFilter, *VFilter; - HFilter = vp8_bilinear_filters[xoffset]; - VFilter = vp8_bilinear_filters[yoffset]; + HFilter = vp9_bilinear_filters[xoffset]; + VFilter = vp9_bilinear_filters[yoffset]; var_filter_block2d_bil_first_pass(src_ptr, FData3, src_pixels_per_line, 1, 9, 8, HFilter); var_filter_block2d_bil_second_pass(FData3, temp2, 8, 8, 8, 8, VFilter); @@ -308,8 +308,8 @@ unsigned int vp9_sub_pixel_variance16x16_c(const unsigned char *src_ptr, unsigned char temp2[20 * 16]; const short *HFilter, *VFilter; - HFilter = vp8_bilinear_filters[xoffset]; - VFilter = vp8_bilinear_filters[yoffset]; + HFilter = vp9_bilinear_filters[xoffset]; + VFilter = vp9_bilinear_filters[yoffset]; var_filter_block2d_bil_first_pass(src_ptr, FData3, src_pixels_per_line, 1, 17, 16, HFilter); var_filter_block2d_bil_second_pass(FData3, temp2, 16, 16, 16, 16, VFilter); @@ -329,8 +329,8 @@ unsigned int vp9_sub_pixel_variance32x32_c(const unsigned char *src_ptr, unsigned char temp2[36 * 32]; const short *HFilter, *VFilter; - HFilter = vp8_bilinear_filters[xoffset]; - VFilter = vp8_bilinear_filters[yoffset]; + HFilter = vp9_bilinear_filters[xoffset]; + VFilter = vp9_bilinear_filters[yoffset]; var_filter_block2d_bil_first_pass(src_ptr, FData3, src_pixels_per_line, 1, 33, 32, HFilter); var_filter_block2d_bil_second_pass(FData3, temp2, 32, 32, 32, 32, VFilter); @@ -439,8 +439,8 @@ unsigned int vp9_sub_pixel_variance16x8_c(const unsigned char *src_ptr, unsigned char temp2[20 * 16]; const short *HFilter, *VFilter; - HFilter = vp8_bilinear_filters[xoffset]; - VFilter = vp8_bilinear_filters[yoffset]; + HFilter = vp9_bilinear_filters[xoffset]; + VFilter = vp9_bilinear_filters[yoffset]; var_filter_block2d_bil_first_pass(src_ptr, FData3, src_pixels_per_line, 1, 9, 16, HFilter); var_filter_block2d_bil_second_pass(FData3, temp2, 16, 16, 8, 16, VFilter); @@ -459,8 +459,8 @@ unsigned int vp9_sub_pixel_variance8x16_c(const unsigned char *src_ptr, unsigned char temp2[20 * 16]; const short *HFilter, *VFilter; - HFilter = vp8_bilinear_filters[xoffset]; - VFilter = vp8_bilinear_filters[yoffset]; + HFilter = vp9_bilinear_filters[xoffset]; + VFilter = vp9_bilinear_filters[yoffset]; var_filter_block2d_bil_first_pass(src_ptr, FData3, src_pixels_per_line, 1, 17, 8, HFilter); @@ -507,8 +507,8 @@ unsigned int vp8_sub_pixel_variance16x2_c(const unsigned char *src_ptr, unsigned char temp2[20 * 16]; const short *HFilter, *VFilter; - HFilter = vp8_bilinear_filters[xoffset]; - VFilter = vp8_bilinear_filters[yoffset]; + HFilter = vp9_bilinear_filters[xoffset]; + VFilter = vp9_bilinear_filters[yoffset]; var_filter_block2d_bil_first_pass(src_ptr, FData3, src_pixels_per_line, 1, 3, 16, HFilter); @@ -528,8 +528,8 @@ unsigned int vp8_sub_pixel_variance2x16_c(const unsigned char *src_ptr, unsigned char temp2[2 * 16]; const short *HFilter, *VFilter; - HFilter = vp8_bilinear_filters[xoffset]; - VFilter = vp8_bilinear_filters[yoffset]; + HFilter = vp9_bilinear_filters[xoffset]; + VFilter = vp9_bilinear_filters[yoffset]; var_filter_block2d_bil_first_pass(src_ptr, FData3, src_pixels_per_line, 1, 17, 2, HFilter); diff --git a/vp8/encoder/x86/quantize_sse2.asm b/vp8/encoder/x86/quantize_sse2.asm index b12a5850a81681259e9cc76756d9410cf2d62b9e..153060e74558a8c09daa650ee69c32a03753e5df 100644 --- a/vp8/encoder/x86/quantize_sse2.asm +++ b/vp8/encoder/x86/quantize_sse2.asm @@ -55,14 +55,14 @@ sym(vp9_regular_quantize_b_sse2): %endif %endif - mov rdx, [rdi + vp8_block_coeff] ; coeff_ptr - mov rcx, [rdi + vp8_block_zbin] ; zbin_ptr - movd xmm7, [rdi + vp8_block_zbin_extra] ; zbin_oq_value + mov rdx, [rdi + vp9_block_coeff] ; coeff_ptr + mov rcx, [rdi + vp9_block_zbin] ; zbin_ptr + movd xmm7, [rdi + vp9_block_zbin_extra] ; zbin_oq_value ; z movdqa xmm0, [rdx] movdqa xmm4, [rdx + 16] - mov rdx, [rdi + vp8_block_round] ; round_ptr + mov rdx, [rdi + vp9_block_round] ; round_ptr pshuflw xmm7, xmm7, 0 punpcklwd xmm7, xmm7 ; duplicated zbin_oq_value @@ -84,7 +84,7 @@ sym(vp9_regular_quantize_b_sse2): movdqa xmm2, [rcx] movdqa xmm3, [rcx + 16] - mov rcx, [rdi + vp8_block_quant] ; quant_ptr + mov rcx, [rdi + vp9_block_quant] ; quant_ptr ; *zbin_ptr + zbin_oq_value paddw xmm2, xmm7 @@ -126,8 +126,8 @@ sym(vp9_regular_quantize_b_sse2): movdqa [rsp + qcoeff], xmm6 movdqa [rsp + qcoeff + 16], xmm6 - mov rdx, [rdi + vp8_block_zrun_zbin_boost] ; zbin_boost_ptr - mov rax, [rdi + vp8_block_quant_shift] ; quant_shift_ptr + mov rdx, [rdi + vp9_block_zrun_zbin_boost] ; zbin_boost_ptr + mov rax, [rdi + vp9_block_quant_shift] ; quant_shift_ptr mov [rsp + zrun_zbin_boost], rdx %macro ZIGZAG_LOOP 1 @@ -149,7 +149,7 @@ sym(vp9_regular_quantize_b_sse2): mov rdx, [rsp + zrun_zbin_boost] ; reset to b->zrun_zbin_boost .rq_zigzag_loop_%1: %endmacro -; in vp8_default_zig_zag1d order: see vp8/common/entropy.c +; in vp9_default_zig_zag1d order: see vp8/common/entropy.c ZIGZAG_LOOP 0 ZIGZAG_LOOP 1 ZIGZAG_LOOP 4 @@ -170,8 +170,8 @@ ZIGZAG_LOOP 15 movdqa xmm2, [rsp + qcoeff] movdqa xmm3, [rsp + qcoeff + 16] - mov rcx, [rsi + vp8_blockd_dequant] ; dequant_ptr - mov rdi, [rsi + vp8_blockd_dqcoeff] ; dqcoeff_ptr + mov rcx, [rsi + vp9_blockd_dequant] ; dequant_ptr + mov rdi, [rsi + vp9_blockd_dqcoeff] ; dqcoeff_ptr ; y ^ sz pxor xmm2, xmm0 @@ -184,7 +184,7 @@ ZIGZAG_LOOP 15 movdqa xmm0, [rcx] movdqa xmm1, [rcx + 16] - mov rcx, [rsi + vp8_blockd_qcoeff] ; qcoeff_ptr + mov rcx, [rsi + vp9_blockd_qcoeff] ; qcoeff_ptr pmullw xmm0, xmm2 pmullw xmm1, xmm3 @@ -214,7 +214,7 @@ ZIGZAG_LOOP 15 pmaxsw xmm2, xmm3 movd eax, xmm2 and eax, 0xff - mov [rsi + vp8_blockd_eob], eax + mov [rsi + vp9_blockd_eob], eax ; begin epilog add rsp, stack_size @@ -270,9 +270,9 @@ sym(vp9_fast_quantize_b_sse2): %endif %endif - mov rax, [rdi + vp8_block_coeff] - mov rcx, [rdi + vp8_block_round] - mov rdx, [rdi + vp8_block_quant_fast] + mov rax, [rdi + vp9_block_coeff] + mov rcx, [rdi + vp9_block_round] + mov rdx, [rdi + vp9_block_quant_fast] ; z = coeff movdqa xmm0, [rax] @@ -296,9 +296,9 @@ sym(vp9_fast_quantize_b_sse2): paddw xmm1, [rcx] paddw xmm5, [rcx + 16] - mov rax, [rsi + vp8_blockd_qcoeff] - mov rcx, [rsi + vp8_blockd_dequant] - mov rdi, [rsi + vp8_blockd_dqcoeff] + mov rax, [rsi + vp9_blockd_qcoeff] + mov rcx, [rsi + vp9_blockd_dequant] + mov rdi, [rsi + vp9_blockd_dqcoeff] ; y = x * quant >> 16 pmulhw xmm1, [rdx] @@ -354,7 +354,7 @@ sym(vp9_fast_quantize_b_sse2): movd eax, xmm1 and eax, 0xff - mov [rsi + vp8_blockd_eob], eax + mov [rsi + vp9_blockd_eob], eax ; begin epilog %if ABI_IS_32BIT diff --git a/vp8/encoder/x86/quantize_sse4.asm b/vp8/encoder/x86/quantize_sse4.asm index 76f7a9278768dd5e1e38d2176b236b229dca31f7..8ce1b7cffaa2a1dd3fed9215f043d29224402f75 100644 --- a/vp8/encoder/x86/quantize_sse4.asm +++ b/vp8/encoder/x86/quantize_sse4.asm @@ -52,10 +52,10 @@ sym(vp9_regular_quantize_b_sse4): %endif %endif - mov rax, [rdi + vp8_block_coeff] - mov rcx, [rdi + vp8_block_zbin] - mov rdx, [rdi + vp8_block_round] - movd xmm7, [rdi + vp8_block_zbin_extra] + mov rax, [rdi + vp9_block_coeff] + mov rcx, [rdi + vp9_block_zbin] + mov rdx, [rdi + vp9_block_round] + movd xmm7, [rdi + vp9_block_zbin_extra] ; z movdqa xmm0, [rax] @@ -99,9 +99,9 @@ sym(vp9_regular_quantize_b_sse4): movdqa xmm4, [rdx] movdqa xmm5, [rdx + 16] - mov rax, [rdi + vp8_block_quant_shift] - mov rcx, [rdi + vp8_block_quant] - mov rdx, [rdi + vp8_block_zrun_zbin_boost] + mov rax, [rdi + vp9_block_quant_shift] + mov rcx, [rdi + vp9_block_quant] + mov rdx, [rdi + vp9_block_zrun_zbin_boost] ; x + round paddw xmm2, xmm4 @@ -156,7 +156,7 @@ sym(vp9_regular_quantize_b_sse4): mov rdx, rax ; reset to b->zrun_zbin_boost .rq_zigzag_loop_%1: %endmacro -; in vp8_default_zig_zag1d order: see vp8/common/entropy.c +; in vp9_default_zig_zag1d order: see vp8/common/entropy.c ZIGZAG_LOOP 0, 0, xmm2, xmm6, xmm4 ZIGZAG_LOOP 1, 1, xmm2, xmm6, xmm4 ZIGZAG_LOOP 4, 4, xmm2, xmm6, xmm4 @@ -174,8 +174,8 @@ ZIGZAG_LOOP 11, 3, xmm3, xmm7, xmm8 ZIGZAG_LOOP 14, 6, xmm3, xmm7, xmm8 ZIGZAG_LOOP 15, 7, xmm3, xmm7, xmm8 - mov rcx, [rsi + vp8_blockd_dequant] - mov rdi, [rsi + vp8_blockd_dqcoeff] + mov rcx, [rsi + vp9_blockd_dequant] + mov rdi, [rsi + vp9_blockd_dqcoeff] %if ABI_IS_32BIT movdqa xmm4, [rsp + qcoeff] @@ -195,7 +195,7 @@ ZIGZAG_LOOP 15, 7, xmm3, xmm7, xmm8 movdqa xmm0, [rcx] movdqa xmm1, [rcx + 16] - mov rcx, [rsi + vp8_blockd_qcoeff] + mov rcx, [rsi + vp9_blockd_qcoeff] pmullw xmm0, xmm4 pmullw xmm1, xmm5 @@ -225,7 +225,7 @@ ZIGZAG_LOOP 15, 7, xmm3, xmm7, xmm8 add eax, 1 and eax, edi - mov [rsi + vp8_blockd_eob], eax + mov [rsi + vp9_blockd_eob], eax ; begin epilog %if ABI_IS_32BIT @@ -249,6 +249,6 @@ ZIGZAG_LOOP 15, 7, xmm3, xmm7, xmm8 SECTION_RODATA align 16 -; vp8/common/entropy.c: vp8_default_zig_zag1d +; vp8/common/entropy.c: vp9_default_zig_zag1d zig_zag1d: db 0, 1, 4, 8, 5, 2, 3, 6, 9, 12, 13, 10, 7, 11, 14, 15 diff --git a/vp8/encoder/x86/quantize_ssse3.asm b/vp8/encoder/x86/quantize_ssse3.asm index 018161e7c8ee5baa080f5b3c25b3244f90596932..14a9912d244000ab889ea09d3e6df6961bfda5de 100644 --- a/vp8/encoder/x86/quantize_ssse3.asm +++ b/vp8/encoder/x86/quantize_ssse3.asm @@ -47,9 +47,9 @@ sym(vp9_fast_quantize_b_ssse3): %endif %endif - mov rax, [rdi + vp8_block_coeff] - mov rcx, [rdi + vp8_block_round] - mov rdx, [rdi + vp8_block_quant_fast] + mov rax, [rdi + vp9_block_coeff] + mov rcx, [rdi + vp9_block_round] + mov rdx, [rdi + vp9_block_quant_fast] ; coeff movdqa xmm0, [rax] @@ -76,9 +76,9 @@ sym(vp9_fast_quantize_b_ssse3): pmulhw xmm1, [rdx] pmulhw xmm5, [rdx + 16] - mov rax, [rsi + vp8_blockd_qcoeff] - mov rdi, [rsi + vp8_blockd_dequant] - mov rcx, [rsi + vp8_blockd_dqcoeff] + mov rax, [rsi + vp9_blockd_qcoeff] + mov rdi, [rsi + vp9_blockd_dequant] + mov rcx, [rsi + vp9_blockd_dqcoeff] pxor xmm1, xmm0 pxor xmm5, xmm4 @@ -115,7 +115,7 @@ sym(vp9_fast_quantize_b_ssse3): add eax, 1 and eax, edi ;if the bit mask was all zero, ;then eob = 0 - mov [rsi + vp8_blockd_eob], eax + mov [rsi + vp9_blockd_eob], eax ; begin epilog %if ABI_IS_32BIT diff --git a/vp8/encoder/x86/variance_mmx.c b/vp8/encoder/x86/variance_mmx.c index 58aae066fa0b0830e96273b8d5879d977d634cd8..2d72d50a8f2d8dc65f847582461e1436fc9cbb4d 100644 --- a/vp8/encoder/x86/variance_mmx.c +++ b/vp8/encoder/x86/variance_mmx.c @@ -198,7 +198,7 @@ unsigned int vp9_variance8x16_mmx( // the mmx function that does the bilinear filtering and var calculation // // int one pass // /////////////////////////////////////////////////////////////////////////// -DECLARE_ALIGNED(16, const short, vp8_vp7_bilinear_filters_mmx[16][8]) = { +DECLARE_ALIGNED(16, const short, vp9_bilinear_filters_mmx[16][8]) = { { 128, 128, 128, 128, 0, 0, 0, 0 }, { 120, 120, 120, 120, 8, 8, 8, 8 }, { 112, 112, 112, 112, 16, 16, 16, 16 }, @@ -233,7 +233,7 @@ unsigned int vp9_sub_pixel_variance4x4_mmx vp9_filter_block2d_bil4x4_var_mmx( src_ptr, src_pixels_per_line, dst_ptr, dst_pixels_per_line, - vp8_vp7_bilinear_filters_mmx[xoffset], vp8_vp7_bilinear_filters_mmx[yoffset], + vp9_bilinear_filters_mmx[xoffset], vp9_bilinear_filters_mmx[yoffset], &xsum, &xxsum ); *sse = xxsum; @@ -257,7 +257,7 @@ unsigned int vp9_sub_pixel_variance8x8_mmx vp9_filter_block2d_bil_var_mmx( src_ptr, src_pixels_per_line, dst_ptr, dst_pixels_per_line, 8, - vp8_vp7_bilinear_filters_mmx[xoffset], vp8_vp7_bilinear_filters_mmx[yoffset], + vp9_bilinear_filters_mmx[xoffset], vp9_bilinear_filters_mmx[yoffset], &xsum, &xxsum ); *sse = xxsum; @@ -281,14 +281,14 @@ unsigned int vp9_sub_pixel_variance16x16_mmx vp9_filter_block2d_bil_var_mmx( src_ptr, src_pixels_per_line, dst_ptr, dst_pixels_per_line, 16, - vp8_vp7_bilinear_filters_mmx[xoffset], vp8_vp7_bilinear_filters_mmx[yoffset], + vp9_bilinear_filters_mmx[xoffset], vp9_bilinear_filters_mmx[yoffset], &xsum0, &xxsum0 ); vp9_filter_block2d_bil_var_mmx( src_ptr + 8, src_pixels_per_line, dst_ptr + 8, dst_pixels_per_line, 16, - vp8_vp7_bilinear_filters_mmx[xoffset], vp8_vp7_bilinear_filters_mmx[yoffset], + vp9_bilinear_filters_mmx[xoffset], vp9_bilinear_filters_mmx[yoffset], &xsum1, &xxsum1 ); @@ -331,7 +331,7 @@ unsigned int vp9_sub_pixel_variance16x8_mmx vp9_filter_block2d_bil_var_mmx( src_ptr, src_pixels_per_line, dst_ptr, dst_pixels_per_line, 8, - vp8_vp7_bilinear_filters_mmx[xoffset], vp8_vp7_bilinear_filters_mmx[yoffset], + vp9_bilinear_filters_mmx[xoffset], vp9_bilinear_filters_mmx[yoffset], &xsum0, &xxsum0 ); @@ -339,7 +339,7 @@ unsigned int vp9_sub_pixel_variance16x8_mmx vp9_filter_block2d_bil_var_mmx( src_ptr + 8, src_pixels_per_line, dst_ptr + 8, dst_pixels_per_line, 8, - vp8_vp7_bilinear_filters_mmx[xoffset], vp8_vp7_bilinear_filters_mmx[yoffset], + vp9_bilinear_filters_mmx[xoffset], vp9_bilinear_filters_mmx[yoffset], &xsum1, &xxsum1 ); @@ -365,7 +365,7 @@ unsigned int vp9_sub_pixel_variance8x16_mmx vp9_filter_block2d_bil_var_mmx( src_ptr, src_pixels_per_line, dst_ptr, dst_pixels_per_line, 16, - vp8_vp7_bilinear_filters_mmx[xoffset], vp8_vp7_bilinear_filters_mmx[yoffset], + vp9_bilinear_filters_mmx[xoffset], vp9_bilinear_filters_mmx[yoffset], &xsum, &xxsum ); *sse = xxsum; diff --git a/vp8/encoder/x86/variance_sse2.c b/vp8/encoder/x86/variance_sse2.c index 79e13005eafd2b7e6009f8930dd191b6e8a61034..f3b0b600a647b23e479a6e73282a1f5d8e758276 100644 --- a/vp8/encoder/x86/variance_sse2.c +++ b/vp8/encoder/x86/variance_sse2.c @@ -137,7 +137,7 @@ void vp9_half_vert_variance16x_h_sse2 unsigned int *sumsquared ); -DECLARE_ALIGNED(16, extern short, vp8_vp7_bilinear_filters_mmx[16][8]); +DECLARE_ALIGNED(16, extern short, vp9_bilinear_filters_mmx[16][8]); unsigned int vp9_variance4x4_wmt( const unsigned char *src_ptr, @@ -257,7 +257,7 @@ unsigned int vp9_sub_pixel_variance4x4_wmt vp9_filter_block2d_bil4x4_var_mmx( src_ptr, src_pixels_per_line, dst_ptr, dst_pixels_per_line, - vp8_vp7_bilinear_filters_mmx[xoffset], vp8_vp7_bilinear_filters_mmx[yoffset], + vp9_bilinear_filters_mmx[xoffset], vp9_bilinear_filters_mmx[yoffset], &xsum, &xxsum ); *sse = xxsum; diff --git a/vp8/vp8_dx_iface.c b/vp8/vp8_dx_iface.c index 791fa0fc56f33eca8b793fd49b9b9d85741f5d19..599df7dec698cafbfdeaeb1094c586fb5306eb52 100644 --- a/vp8/vp8_dx_iface.c +++ b/vp8/vp8_dx_iface.c @@ -641,7 +641,7 @@ static vpx_codec_err_t vp8_get_frame_corrupted(vpx_codec_alg_priv_t *ctx, } -vpx_codec_ctrl_fn_map_t vp8_ctf_maps[] = { +static vpx_codec_ctrl_fn_map_t ctf_maps[] = { {VP8_SET_REFERENCE, vp9_set_reference}, {VP8_COPY_REFERENCE, vp9_get_reference}, {VP8_SET_POSTPROC, vp8_set_postproc}, @@ -666,7 +666,7 @@ CODEC_INTERFACE(vpx_codec_vp8_dx) = { /* vpx_codec_caps_t caps; */ vp8_init, /* vpx_codec_init_fn_t init; */ vp8_destroy, /* vpx_codec_destroy_fn_t destroy; */ - vp8_ctf_maps, /* vpx_codec_ctrl_fn_map_t *ctrl_maps; */ + ctf_maps, /* vpx_codec_ctrl_fn_map_t *ctrl_maps; */ vp8_xma_get_mmap, /* vpx_codec_get_mmap_fn_t get_mmap; */ vp8_xma_set_mmap, /* vpx_codec_set_mmap_fn_t set_mmap; */ { @@ -696,7 +696,7 @@ vpx_codec_iface_t vpx_codec_vp8_algo = { /* vpx_codec_caps_t caps; */ vp8_init, /* vpx_codec_init_fn_t init; */ vp8_destroy, /* vpx_codec_destroy_fn_t destroy; */ - vp8_ctf_maps, /* vpx_codec_ctrl_fn_map_t *ctrl_maps; */ + ctf_maps, /* vpx_codec_ctrl_fn_map_t *ctrl_maps; */ vp8_xma_get_mmap, /* vpx_codec_get_mmap_fn_t get_mmap; */ vp8_xma_set_mmap, /* vpx_codec_set_mmap_fn_t set_mmap; */ {