diff --git a/vp9/common/vp9_blockd.h b/vp9/common/vp9_blockd.h index f52adfc972dae2f193914053fa5ada6bf51fc5ba..575eb3136cf1cdfba8b8c0feee6ff5d47d871f1c 100644 --- a/vp9/common/vp9_blockd.h +++ b/vp9/common/vp9_blockd.h @@ -215,11 +215,6 @@ typedef struct macroblockd { int corrupted; - unsigned char sb_index; // index of 32x32 block inside the 64x64 block - unsigned char mb_index; // index of 16x16 block inside the 32x32 block - unsigned char b_index; // index of 8x8 block inside the 16x16 block - unsigned char ab_index; // index of 4x4 block inside the 8x8 block - int q_index; /* Y,U,V,(A) */ diff --git a/vp9/encoder/vp9_block.h b/vp9/encoder/vp9_block.h index 8033a4d157b37180d09ef645a5159bfc060e61d4..fd5b3416e61c0a2d01b06de1260c61f648826c0c 100644 --- a/vp9/encoder/vp9_block.h +++ b/vp9/encoder/vp9_block.h @@ -120,6 +120,11 @@ struct macroblock { int y_mode_costs[INTRA_MODES][INTRA_MODES][INTRA_MODES]; int switchable_interp_costs[SWITCHABLE_FILTER_CONTEXTS][SWITCHABLE_FILTERS]; + unsigned char sb_index; // index of 32x32 block inside the 64x64 block + unsigned char mb_index; // index of 16x16 block inside the 32x32 block + unsigned char b_index; // index of 8x8 block inside the 16x16 block + unsigned char ab_index; // index of 4x4 block inside the 8x8 block + // These define limits to motion vector components to prevent them // from extending outside the UMV borders int mv_col_min; @@ -179,35 +184,33 @@ struct macroblock { // refactoring on organizing the temporary buffers, when recursive // partition down to 4x4 block size is enabled. static PICK_MODE_CONTEXT *get_block_context(MACROBLOCK *x, BLOCK_SIZE bsize) { - MACROBLOCKD *const xd = &x->e_mbd; - switch (bsize) { case BLOCK_64X64: return &x->sb64_context; case BLOCK_64X32: - return &x->sb64x32_context[xd->sb_index]; + return &x->sb64x32_context[x->sb_index]; case BLOCK_32X64: - return &x->sb32x64_context[xd->sb_index]; + return &x->sb32x64_context[x->sb_index]; case BLOCK_32X32: - return &x->sb32_context[xd->sb_index]; + return &x->sb32_context[x->sb_index]; case BLOCK_32X16: - return &x->sb32x16_context[xd->sb_index][xd->mb_index]; + return &x->sb32x16_context[x->sb_index][x->mb_index]; case BLOCK_16X32: - return &x->sb16x32_context[xd->sb_index][xd->mb_index]; + return &x->sb16x32_context[x->sb_index][x->mb_index]; case BLOCK_16X16: - return &x->mb_context[xd->sb_index][xd->mb_index]; + return &x->mb_context[x->sb_index][x->mb_index]; case BLOCK_16X8: - return &x->sb16x8_context[xd->sb_index][xd->mb_index][xd->b_index]; + return &x->sb16x8_context[x->sb_index][x->mb_index][x->b_index]; case BLOCK_8X16: - return &x->sb8x16_context[xd->sb_index][xd->mb_index][xd->b_index]; + return &x->sb8x16_context[x->sb_index][x->mb_index][x->b_index]; case BLOCK_8X8: - return &x->sb8x8_context[xd->sb_index][xd->mb_index][xd->b_index]; + return &x->sb8x8_context[x->sb_index][x->mb_index][x->b_index]; case BLOCK_8X4: - return &x->sb8x4_context[xd->sb_index][xd->mb_index][xd->b_index]; + return &x->sb8x4_context[x->sb_index][x->mb_index][x->b_index]; case BLOCK_4X8: - return &x->sb4x8_context[xd->sb_index][xd->mb_index][xd->b_index]; + return &x->sb4x8_context[x->sb_index][x->mb_index][x->b_index]; case BLOCK_4X4: - return &x->ab4x4_context[xd->sb_index][xd->mb_index][xd->b_index]; + return &x->ab4x4_context[x->sb_index][x->mb_index][x->b_index]; default: assert(0); return NULL; diff --git a/vp9/encoder/vp9_encodeframe.c b/vp9/encoder/vp9_encodeframe.c index 702fc70bb7f8f423e430b134adc64b9891f6ca52..5c8f152a42effe062f757894f1cbd99933649db5 100644 --- a/vp9/encoder/vp9_encodeframe.c +++ b/vp9/encoder/vp9_encodeframe.c @@ -50,25 +50,25 @@ int enc_debug = 0; #endif -static INLINE uint8_t *get_sb_index(MACROBLOCKD *xd, BLOCK_SIZE subsize) { +static INLINE uint8_t *get_sb_index(MACROBLOCK *x, BLOCK_SIZE subsize) { switch (subsize) { case BLOCK_64X64: case BLOCK_64X32: case BLOCK_32X64: case BLOCK_32X32: - return &xd->sb_index; + return &x->sb_index; case BLOCK_32X16: case BLOCK_16X32: case BLOCK_16X16: - return &xd->mb_index; + return &x->mb_index; case BLOCK_16X8: case BLOCK_8X16: case BLOCK_8X8: - return &xd->b_index; + return &x->b_index; case BLOCK_8X4: case BLOCK_4X8: case BLOCK_4X4: - return &xd->ab_index; + return &x->ab_index; default: assert(0); return NULL; @@ -590,7 +590,7 @@ static void pick_sb_modes(VP9_COMP *cpi, const TileInfo *const tile, if (bsize < BLOCK_8X8) { // When ab_index = 0 all sub-blocks are handled, so for ab_index != 0 // there is nothing to be done. - if (xd->ab_index != 0) { + if (x->ab_index != 0) { *totalrate = 0; *totaldist = 0; return; @@ -687,16 +687,15 @@ static void update_stats(VP9_COMP *cpi) { } static BLOCK_SIZE *get_sb_partitioning(MACROBLOCK *x, BLOCK_SIZE bsize) { - MACROBLOCKD *const xd = &x->e_mbd; switch (bsize) { case BLOCK_64X64: return &x->sb64_partitioning; case BLOCK_32X32: - return &x->sb_partitioning[xd->sb_index]; + return &x->sb_partitioning[x->sb_index]; case BLOCK_16X16: - return &x->mb_partitioning[xd->sb_index][xd->mb_index]; + return &x->mb_partitioning[x->sb_index][x->mb_index]; case BLOCK_8X8: - return &x->b_partitioning[xd->sb_index][xd->mb_index][xd->b_index]; + return &x->b_partitioning[x->sb_index][x->mb_index][x->b_index]; default: assert(0); return NULL; @@ -769,20 +768,19 @@ static void save_context(VP9_COMP *cpi, int mi_row, int mi_col, static void encode_b(VP9_COMP *cpi, const TileInfo *const tile, TOKENEXTRA **tp, int mi_row, int mi_col, int output_enabled, BLOCK_SIZE bsize, int sub_index) { - VP9_COMMON * const cm = &cpi->common; - MACROBLOCK * const x = &cpi->mb; - MACROBLOCKD * const xd = &x->e_mbd; + VP9_COMMON *const cm = &cpi->common; + MACROBLOCK *const x = &cpi->mb; if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return; if (sub_index != -1) - *get_sb_index(xd, bsize) = sub_index; + *get_sb_index(x, bsize) = sub_index; if (bsize < BLOCK_8X8) { // When ab_index = 0 all sub-blocks are handled, so for ab_index != 0 // there is nothing to be done. - if (xd->ab_index > 0) + if (x->ab_index > 0) return; } set_offsets(cpi, tile, mi_row, mi_col, bsize); @@ -800,9 +798,8 @@ static void encode_b(VP9_COMP *cpi, const TileInfo *const tile, static void encode_sb(VP9_COMP *cpi, const TileInfo *const tile, TOKENEXTRA **tp, int mi_row, int mi_col, int output_enabled, BLOCK_SIZE bsize) { - VP9_COMMON * const cm = &cpi->common; - MACROBLOCK * const x = &cpi->mb; - MACROBLOCKD * const xd = &x->e_mbd; + VP9_COMMON *const cm = &cpi->common; + MACROBLOCK *const x = &cpi->mb; BLOCK_SIZE c1 = BLOCK_8X8; const int bsl = b_width_log2(bsize), bs = (1 << bsl) / 4; int pl = 0; @@ -848,7 +845,7 @@ static void encode_sb(VP9_COMP *cpi, const TileInfo *const tile, for (i = 0; i < 4; i++) { const int x_idx = i & 1, y_idx = i >> 1; - *get_sb_index(xd, subsize) = i; + *get_sb_index(x, subsize) = i; encode_sb(cpi, tile, tp, mi_row + y_idx * bs, mi_col + x_idx * bs, output_enabled, subsize); } @@ -975,9 +972,8 @@ static void rd_use_partition(VP9_COMP *cpi, TOKENEXTRA **tp, int mi_row, int mi_col, BLOCK_SIZE bsize, int *rate, int64_t *dist, int do_recon) { - VP9_COMMON * const cm = &cpi->common; - MACROBLOCK * const x = &cpi->mb; - MACROBLOCKD *xd = &cpi->mb.e_mbd; + VP9_COMMON *const cm = &cpi->common; + MACROBLOCK *const x = &cpi->mb; const int mis = cm->mode_info_stride; int bsl = b_width_log2(bsize); const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize]; @@ -1012,7 +1008,7 @@ static void rd_use_partition(VP9_COMP *cpi, if (bsize < BLOCK_8X8) { // When ab_index = 0 all sub-blocks are handled, so for ab_index != 0 // there is nothing to be done. - if (xd->ab_index != 0) { + if (x->ab_index != 0) { *rate = 0; *dist = 0; return; @@ -1070,7 +1066,7 @@ static void rd_use_partition(VP9_COMP *cpi, bsize, get_block_context(x, bsize), INT64_MAX); break; case PARTITION_HORZ: - *get_sb_index(xd, subsize) = 0; + *get_sb_index(x, subsize) = 0; pick_sb_modes(cpi, tile, mi_row, mi_col, &last_part_rate, &last_part_dist, subsize, get_block_context(x, subsize), INT64_MAX); if (last_part_rate != INT_MAX && @@ -1079,7 +1075,7 @@ static void rd_use_partition(VP9_COMP *cpi, int64_t dt = 0; update_state(cpi, get_block_context(x, subsize), subsize, 0); encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize); - *get_sb_index(xd, subsize) = 1; + *get_sb_index(x, subsize) = 1; pick_sb_modes(cpi, tile, mi_row + (ms >> 1), mi_col, &rt, &dt, subsize, get_block_context(x, subsize), INT64_MAX); if (rt == INT_MAX || dt == INT_MAX) { @@ -1093,7 +1089,7 @@ static void rd_use_partition(VP9_COMP *cpi, } break; case PARTITION_VERT: - *get_sb_index(xd, subsize) = 0; + *get_sb_index(x, subsize) = 0; pick_sb_modes(cpi, tile, mi_row, mi_col, &last_part_rate, &last_part_dist, subsize, get_block_context(x, subsize), INT64_MAX); if (last_part_rate != INT_MAX && @@ -1102,7 +1098,7 @@ static void rd_use_partition(VP9_COMP *cpi, int64_t dt = 0; update_state(cpi, get_block_context(x, subsize), subsize, 0); encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize); - *get_sb_index(xd, subsize) = 1; + *get_sb_index(x, subsize) = 1; pick_sb_modes(cpi, tile, mi_row, mi_col + (ms >> 1), &rt, &dt, subsize, get_block_context(x, subsize), INT64_MAX); if (rt == INT_MAX || dt == INT_MAX) { @@ -1128,7 +1124,7 @@ static void rd_use_partition(VP9_COMP *cpi, if ((mi_row + y_idx >= cm->mi_rows) || (mi_col + x_idx >= cm->mi_cols)) continue; - *get_sb_index(xd, subsize) = i; + *get_sb_index(x, subsize) = i; rd_use_partition(cpi, tile, mi_8x8 + jj * bss * mis + ii * bss, tp, mi_row + y_idx, mi_col + x_idx, subsize, &rt, &dt, @@ -1169,11 +1165,10 @@ static void rd_use_partition(VP9_COMP *cpi, ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE]; PARTITION_CONTEXT sl[8], sa[8]; - if ((mi_row + y_idx >= cm->mi_rows) - || (mi_col + x_idx >= cm->mi_cols)) + if ((mi_row + y_idx >= cm->mi_rows) || (mi_col + x_idx >= cm->mi_cols)) continue; - *get_sb_index(xd, split_subsize) = i; + *get_sb_index(x, split_subsize) = i; *get_sb_partitioning(x, bsize) = split_subsize; *get_sb_partitioning(x, split_subsize) = split_subsize; @@ -1353,7 +1348,6 @@ static void rd_auto_partition_range(VP9_COMP *cpi, const TileInfo *const tile, static void compute_fast_motion_search_level(VP9_COMP *cpi, BLOCK_SIZE bsize) { VP9_COMMON *const cm = &cpi->common; MACROBLOCK *const x = &cpi->mb; - MACROBLOCKD *const xd = &x->e_mbd; // Only use 8x8 result for non HD videos. // int use_8x8 = (MIN(cpi->common.width, cpi->common.height) < 720) ? 1 : 0; @@ -1366,9 +1360,9 @@ static void compute_fast_motion_search_level(VP9_COMP *cpi, BLOCK_SIZE bsize) { PICK_MODE_CONTEXT *block_context = NULL; if (bsize == BLOCK_16X16) { - block_context = x->sb8x8_context[xd->sb_index][xd->mb_index]; + block_context = x->sb8x8_context[x->sb_index][x->mb_index]; } else if (bsize == BLOCK_32X32) { - block_context = x->mb_context[xd->sb_index]; + block_context = x->mb_context[x->sb_index]; } else if (bsize == BLOCK_64X64) { block_context = x->sb32_context; } @@ -1456,9 +1450,8 @@ static void rd_pick_partition(VP9_COMP *cpi, const TileInfo *const tile, TOKENEXTRA **tp, int mi_row, int mi_col, BLOCK_SIZE bsize, int *rate, int64_t *dist, int do_recon, int64_t best_rd) { - VP9_COMMON * const cm = &cpi->common; - MACROBLOCK * const x = &cpi->mb; - MACROBLOCKD * const xd = &x->e_mbd; + VP9_COMMON *const cm = &cpi->common; + MACROBLOCK *const x = &cpi->mb; const int ms = num_8x8_blocks_wide_lookup[bsize] / 2; ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE]; PARTITION_CONTEXT sl[8], sa[8]; @@ -1484,7 +1477,7 @@ static void rd_pick_partition(VP9_COMP *cpi, const TileInfo *const tile, if (bsize < BLOCK_8X8) { // When ab_index = 0 all sub-blocks are handled, so for ab_index != 0 // there is nothing to be done. - if (xd->ab_index != 0) { + if (x->ab_index != 0) { *rate = 0; *dist = 0; return; @@ -1582,7 +1575,7 @@ static void rd_pick_partition(VP9_COMP *cpi, const TileInfo *const tile, if (mi_row + y_idx >= cm->mi_rows || mi_col + x_idx >= cm->mi_cols) continue; - *get_sb_index(xd, subsize) = i; + *get_sb_index(x, subsize) = i; if (cpi->sf.adaptive_motion_search) load_pred_mv(x, get_block_context(x, bsize)); rd_pick_partition(cpi, tile, tp, mi_row + y_idx, mi_col + x_idx, subsize, @@ -1629,7 +1622,7 @@ static void rd_pick_partition(VP9_COMP *cpi, const TileInfo *const tile, // PARTITION_HORZ if (partition_horz_allowed && do_rect) { subsize = get_subsize(bsize, PARTITION_HORZ); - *get_sb_index(xd, subsize) = 0; + *get_sb_index(x, subsize) = 0; if (cpi->sf.adaptive_motion_search) load_pred_mv(x, get_block_context(x, bsize)); pick_sb_modes(cpi, tile, mi_row, mi_col, &sum_rate, &sum_dist, subsize, @@ -1640,7 +1633,7 @@ static void rd_pick_partition(VP9_COMP *cpi, const TileInfo *const tile, update_state(cpi, get_block_context(x, subsize), subsize, 0); encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize); - *get_sb_index(xd, subsize) = 1; + *get_sb_index(x, subsize) = 1; if (cpi->sf.adaptive_motion_search) load_pred_mv(x, get_block_context(x, bsize)); pick_sb_modes(cpi, tile, mi_row + ms, mi_col, &this_rate, @@ -1674,7 +1667,7 @@ static void rd_pick_partition(VP9_COMP *cpi, const TileInfo *const tile, if (partition_vert_allowed && do_rect) { subsize = get_subsize(bsize, PARTITION_VERT); - *get_sb_index(xd, subsize) = 0; + *get_sb_index(x, subsize) = 0; if (cpi->sf.adaptive_motion_search) load_pred_mv(x, get_block_context(x, bsize)); pick_sb_modes(cpi, tile, mi_row, mi_col, &sum_rate, &sum_dist, subsize, @@ -1684,7 +1677,7 @@ static void rd_pick_partition(VP9_COMP *cpi, const TileInfo *const tile, update_state(cpi, get_block_context(x, subsize), subsize, 0); encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize); - *get_sb_index(xd, subsize) = 1; + *get_sb_index(x, subsize) = 1; if (cpi->sf.adaptive_motion_search) load_pred_mv(x, get_block_context(x, bsize)); pick_sb_modes(cpi, tile, mi_row, mi_col + ms, &this_rate, diff --git a/vp9/encoder/vp9_onyx_if.c b/vp9/encoder/vp9_onyx_if.c index f922f900ad42eec2cf8678b596ef33004ec6ff1b..f674296700214922dd7e638f08408025b304183c 100644 --- a/vp9/encoder/vp9_onyx_if.c +++ b/vp9/encoder/vp9_onyx_if.c @@ -1438,18 +1438,18 @@ static void cal_nmvsadcosts_hp(int *mvsadcost[2]) { static void init_pick_mode_context(VP9_COMP *cpi) { int i; - MACROBLOCK *x = &cpi->mb; - MACROBLOCKD *xd = &x->e_mbd; - VP9_COMMON *cm = &cpi->common; + VP9_COMMON *const cm = &cpi->common; + MACROBLOCK *const x = &cpi->mb; + for (i = 0; i < BLOCK_SIZES; ++i) { const int num_4x4_w = num_4x4_blocks_wide_lookup[i]; const int num_4x4_h = num_4x4_blocks_high_lookup[i]; const int num_4x4_blk = MAX(4, num_4x4_w * num_4x4_h); if (i < BLOCK_16X16) { - for (xd->sb_index = 0; xd->sb_index < 4; ++xd->sb_index) { - for (xd->mb_index = 0; xd->mb_index < 4; ++xd->mb_index) { - for (xd->b_index = 0; xd->b_index < 16 / num_4x4_blk; ++xd->b_index) { + for (x->sb_index = 0; x->sb_index < 4; ++x->sb_index) { + for (x->mb_index = 0; x->mb_index < 4; ++x->mb_index) { + for (x->b_index = 0; x->b_index < 16 / num_4x4_blk; ++x->b_index) { PICK_MODE_CONTEXT *ctx = get_block_context(x, i); ctx->num_4x4_blk = num_4x4_blk; CHECK_MEM_ERROR(cm, ctx->zcoeff_blk, @@ -1458,9 +1458,8 @@ static void init_pick_mode_context(VP9_COMP *cpi) { } } } else if (i < BLOCK_32X32) { - for (xd->sb_index = 0; xd->sb_index < 4; ++xd->sb_index) { - for (xd->mb_index = 0; xd->mb_index < 64 / num_4x4_blk; - ++xd->mb_index) { + for (x->sb_index = 0; x->sb_index < 4; ++x->sb_index) { + for (x->mb_index = 0; x->mb_index < 64 / num_4x4_blk; ++x->mb_index) { PICK_MODE_CONTEXT *ctx = get_block_context(x, i); ctx->num_4x4_blk = num_4x4_blk; CHECK_MEM_ERROR(cm, ctx->zcoeff_blk, @@ -1468,7 +1467,7 @@ static void init_pick_mode_context(VP9_COMP *cpi) { } } } else if (i < BLOCK_64X64) { - for (xd->sb_index = 0; xd->sb_index < 256 / num_4x4_blk; ++xd->sb_index) { + for (x->sb_index = 0; x->sb_index < 256 / num_4x4_blk; ++x->sb_index) { PICK_MODE_CONTEXT *ctx = get_block_context(x, i); ctx->num_4x4_blk = num_4x4_blk; CHECK_MEM_ERROR(cm, ctx->zcoeff_blk, @@ -1485,16 +1484,15 @@ static void init_pick_mode_context(VP9_COMP *cpi) { static void free_pick_mode_context(MACROBLOCK *x) { int i; - MACROBLOCKD *xd = &x->e_mbd; for (i = 0; i < BLOCK_SIZES; ++i) { const int num_4x4_w = num_4x4_blocks_wide_lookup[i]; const int num_4x4_h = num_4x4_blocks_high_lookup[i]; const int num_4x4_blk = MAX(4, num_4x4_w * num_4x4_h); if (i < BLOCK_16X16) { - for (xd->sb_index = 0; xd->sb_index < 4; ++xd->sb_index) { - for (xd->mb_index = 0; xd->mb_index < 4; ++xd->mb_index) { - for (xd->b_index = 0; xd->b_index < 16 / num_4x4_blk; ++xd->b_index) { + for (x->sb_index = 0; x->sb_index < 4; ++x->sb_index) { + for (x->mb_index = 0; x->mb_index < 4; ++x->mb_index) { + for (x->b_index = 0; x->b_index < 16 / num_4x4_blk; ++x->b_index) { PICK_MODE_CONTEXT *ctx = get_block_context(x, i); vpx_free(ctx->zcoeff_blk); ctx->zcoeff_blk = 0; @@ -1502,16 +1500,15 @@ static void free_pick_mode_context(MACROBLOCK *x) { } } } else if (i < BLOCK_32X32) { - for (xd->sb_index = 0; xd->sb_index < 4; ++xd->sb_index) { - for (xd->mb_index = 0; xd->mb_index < 64 / num_4x4_blk; - ++xd->mb_index) { + for (x->sb_index = 0; x->sb_index < 4; ++x->sb_index) { + for (x->mb_index = 0; x->mb_index < 64 / num_4x4_blk; ++x->mb_index) { PICK_MODE_CONTEXT *ctx = get_block_context(x, i); vpx_free(ctx->zcoeff_blk); ctx->zcoeff_blk = 0; } } } else if (i < BLOCK_64X64) { - for (xd->sb_index = 0; xd->sb_index < 256 / num_4x4_blk; ++xd->sb_index) { + for (x->sb_index = 0; x->sb_index < 256 / num_4x4_blk; ++x->sb_index) { PICK_MODE_CONTEXT *ctx = get_block_context(x, i); vpx_free(ctx->zcoeff_blk); ctx->zcoeff_blk = 0; diff --git a/vp9/encoder/vp9_rdopt.c b/vp9/encoder/vp9_rdopt.c index 6aaa2c77d15081eef91322a4fc83497ca83fca03..597d063ffc574b50b419daa8aca65a4164916d06 100644 --- a/vp9/encoder/vp9_rdopt.c +++ b/vp9/encoder/vp9_rdopt.c @@ -3196,8 +3196,8 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, case BLOCK_32X32: for (i = 0; i < 4; i++) { ref_frame_mask |= - x->mb_context[xd->sb_index][i].frames_with_high_error; - mode_mask |= x->mb_context[xd->sb_index][i].modes_with_high_error; + x->mb_context[x->sb_index][i].frames_with_high_error; + mode_mask |= x->mb_context[x->sb_index][i].modes_with_high_error; } break; default: