Newer
Older
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
#include "vp9/encoder/vp9_encodeframe.h"
#include "vp9/encoder/vp9_encodemb.h"
#include "vp9/encoder/vp9_encodemv.h"
#include "vp9/encoder/vp9_onyx_int.h"
#include "vp9/common/vp9_entropymode.h"
#include "vp9/common/vp9_quant_common.h"
#include "vp9/encoder/vp9_segmentation.h"
#include "vp9/encoder/vp9_encodeintra.h"
#include "vp9/common/vp9_reconinter.h"
#include "vp9/common/vp9_invtrans.h"
#include "vp9/common/vp9_findnearmv.h"
#include "vp9/common/vp9_reconintra.h"
#include "vp9/common/vp9_seg_common.h"
#include "vp9/common/vp9_tile_common.h"
#include <limits.h>
#include "vpx_ports/vpx_timer.h"
#include "vp9/common/vp9_pred_common.h"
#include "vp9/common/vp9_mvref_common.h"
static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t,
int output_enabled, int mi_row, int mi_col,
static void adjust_act_zbin(VP9_COMP *cpi, MACROBLOCK *x);
/* activity_avg must be positive, or flat regions could get a zero weight
* (infinite lambda), which confounds analysis.
* This also avoids the need for divide by zero checks in
* vp9_activity_masking().
/* This is used as a reference when computing the source variance for the
* purposes of activity masking.
* Eventually this should be replaced by custom no-reference routines,
* which will be faster.
*/
static const uint8_t VP9_VAR_OFFS[16] = {
128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128
// Original activity measure from Tim T's code.
static unsigned int tt_activity_measure(VP9_COMP *cpi, MACROBLOCK *x) {
unsigned int act;
unsigned int sse;
/* TODO: This could also be done over smaller areas (8x8), but that would
* require extensive changes elsewhere, as lambda is assumed to be fixed
* over an entire MB in most of the code.
* Another option is to compute four 8x8 variances, and pick a single
* lambda using a non-linear combination (e.g., the smallest, or second
* smallest, etc.).
*/
act = vp9_variance16x16(x->plane[0].src.buf, x->plane[0].src.stride,
VP9_VAR_OFFS, 0, &sse);
/* If the region is flat, lower the activity some more. */
if (act < 8 << 12)
act = act < 5 << 12 ? act : 5 << 12;
return act;
// Stub for alternative experimental activity measures.
static unsigned int alt_activity_measure(VP9_COMP *cpi,
return vp9_encode_intra(cpi, x, use_dc_pred);
DECLARE_ALIGNED(16, static const uint8_t, vp9_64x64_zeros[64*64]) = { 0 };
// Measure the activity of the current macroblock
// What we measure here is TBD so abstracted to this function
static unsigned int mb_activity_measure(VP9_COMP *cpi, MACROBLOCK *x,
if (ALT_ACT_MEASURE) {
int use_dc_pred = (mb_col || mb_row) && (!mb_col || !mb_row);
// Or use and alternative.
mb_activity = alt_activity_measure(cpi, x, use_dc_pred);
} else {
// Original activity measure from Tim T's code.
mb_activity = tt_activity_measure(cpi, x);
}
if (mb_activity < VP9_ACTIVITY_AVG_MIN)
mb_activity = VP9_ACTIVITY_AVG_MIN;
}
// Calculate an "average" mb activity value for the frame
static void calc_av_activity(VP9_COMP *cpi, int64_t activity_sum) {
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
// Find median: Simple n^2 algorithm for experimentation
{
unsigned int median;
unsigned int i, j;
unsigned int *sortlist;
unsigned int tmp;
// Create a list to sort to
CHECK_MEM_ERROR(sortlist,
vpx_calloc(sizeof(unsigned int),
cpi->common.MBs));
// Copy map to sort list
vpx_memcpy(sortlist, cpi->mb_activity_map,
sizeof(unsigned int) * cpi->common.MBs);
// Ripple each value down to its correct position
for (i = 1; i < cpi->common.MBs; i ++) {
for (j = i; j > 0; j --) {
if (sortlist[j] < sortlist[j - 1]) {
// Swap values
tmp = sortlist[j - 1];
sortlist[j - 1] = sortlist[j];
sortlist[j] = tmp;
} else
break;
}
}
// Even number MBs so estimate median as mean of two either side.
median = (1 + sortlist[cpi->common.MBs >> 1] +
sortlist[(cpi->common.MBs >> 1) + 1]) >> 1;
// Simple mean for now
cpi->activity_avg = (unsigned int)(activity_sum / cpi->common.MBs);
if (cpi->activity_avg < VP9_ACTIVITY_AVG_MIN)
cpi->activity_avg = VP9_ACTIVITY_AVG_MIN;
// Experimental code: return fixed value normalized for several clips
if (ALT_ACT_MEASURE)
cpi->activity_avg = 100000;
// Calculate an activity index for each mb
static void calc_activity_index(VP9_COMP *cpi, MACROBLOCK *x) {
VP9_COMMON *const cm = &cpi->common;
FILE *f = fopen("norm_act.stt", "a");
fprintf(f, "\n%12d\n", cpi->activity_avg);
// Reset pointers to start of activity map
x->mb_activity_ptr = cpi->mb_activity_map;
// Calculate normalized mb activity number.
for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) {
// for each macroblock col in image
for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) {
// Read activity from the map
act = *(x->mb_activity_ptr);
// Calculate a normalized activity number
a = act + 4 * cpi->activity_avg;
b = 4 * act + cpi->activity_avg;
if (b >= a)
*(x->activity_ptr) = (int)((b + (a >> 1)) / a) - 1;
else
*(x->activity_ptr) = 1 - (int)((a + (b >> 1)) / b);
// Increment activity map pointers
x->mb_activity_ptr++;
}
// Loop through all MBs. Note activity of each, average activity and
// calculate a normalized activity for each
static void build_activity_map(VP9_COMP *cpi) {
VP9_COMMON *const cm = &cpi->common;
YV12_BUFFER_CONFIG *new_yv12 = &cm->yv12_fb[cm->new_fb_idx];
int recon_yoffset;
int recon_y_stride = new_yv12->y_stride;
int mb_row, mb_col;
unsigned int mb_activity;
int64_t activity_sum = 0;
x->mb_activity_ptr = cpi->mb_activity_map;
// for each macroblock row in image
for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) {
// reset above block coeffs
xd->up_available = (mb_row != 0);
recon_yoffset = (mb_row * recon_y_stride * 16);
// for each macroblock col in image
for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) {
xd->plane[0].dst.buf = new_yv12->y_buffer + recon_yoffset;
xd->left_available = (mb_col != 0);
recon_yoffset += 16;
// measure activity
mb_activity = mb_activity_measure(cpi, x, mb_row, mb_col);
// Store MB level activity details.
*x->mb_activity_ptr = mb_activity;
// Increment activity map pointer
x->mb_activity_ptr++;
x->plane[0].src.buf += 16 * x->plane[0].src.stride - 16 * cm->mb_cols;
// Calculate an "average" MB activity
calc_av_activity(cpi, activity_sum);
// Calculate an activity index number of each mb
calc_activity_index(cpi, x);
void vp9_activity_masking(VP9_COMP *cpi, MACROBLOCK *x) {
x->rdmult += *(x->mb_activity_ptr) * (x->rdmult >> 2);
x->errorperbit = x->rdmult * 100 / (110 * x->rddiv);
x->errorperbit += (x->errorperbit == 0);
int64_t a;
int64_t b;
int64_t act = *(x->mb_activity_ptr);
// Apply the masking to the RD multiplier.
a = act + (2 * cpi->activity_avg);
b = (2 * act) + cpi->activity_avg;
x->rdmult = (unsigned int)(((int64_t)x->rdmult * b + (a >> 1)) / a);
x->errorperbit = x->rdmult * 100 / (110 * x->rddiv);
x->errorperbit += (x->errorperbit == 0);
// Activity based Zbin adjustment
adjust_act_zbin(cpi, x);
static void update_state(VP9_COMP *cpi,
PICK_MODE_CONTEXT *ctx,
BLOCK_SIZE_TYPE bsize,
MACROBLOCK *const x = &cpi->mb;
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi;
#if CONFIG_DEBUG || CONFIG_INTERNAL_STATS
MB_PREDICTION_MODE mb_mode = mi->mbmi.mode;
#endif
const int mis = cpi->common.mode_info_stride;
const int bh = 1 << mi_height_log2(bsize), bw = 1 << mi_width_log2(bsize);
assert(mb_mode < MB_MODE_COUNT);
assert(mb_mode_index < MAX_MODES);
assert(mi->mbmi.ref_frame[0] < MAX_REF_FRAMES);
assert(mi->mbmi.ref_frame[1] < MAX_REF_FRAMES);
assert(mi->mbmi.sb_type == bsize);
// Restore the coding context of the MB to that that was in place
// when the mode was picked for it
for (y = 0; y < bh; y++) {
for (x_idx = 0; x_idx < bw; x_idx++) {
if ((xd->mb_to_right_edge >> (3 + LOG2_MI_SIZE)) + bw > x_idx &&
(xd->mb_to_bottom_edge >> (3 + LOG2_MI_SIZE)) + bh > y) {
MODE_INFO *mi_addr = xd->mode_info_context + x_idx + y * mis;
if (bsize < BLOCK_SIZE_MB16X16)
ctx->txfm_rd_diff[ALLOW_16X16] = ctx->txfm_rd_diff[ALLOW_8X8];
ctx->txfm_rd_diff[ALLOW_32X32] = ctx->txfm_rd_diff[ALLOW_16X16];
if (mbmi->ref_frame[0] != INTRA_FRAME && mbmi->sb_type < BLOCK_SIZE_SB8X8) {
*x->partition_info = ctx->partition_info;
mbmi->mv[0].as_int = x->partition_info->bmi[3].mv.as_int;
mbmi->mv[1].as_int = x->partition_info->bmi[3].second_mv.as_int;
x->skip = ctx->skip;
if (!vp9_segfeature_active(xd, mbmi->segment_id, SEG_LVL_SKIP)) {
for (i = 0; i < NB_TXFM_MODES; i++) {
cpi->rd_tx_select_diff[i] += ctx->txfm_rd_diff[i];
if (cpi->common.frame_type == KEY_FRAME) {
// Restore the coding modes to that held in the coding context
// for (i = 0; i < 16; i++)
// {
// xd->block[i].bmi.as_mode =
// xd->mode_info_context->bmi[i].as_mode;
// assert(xd->mode_info_context->bmi[i].as_mode < MB_MODE_COUNT);
// }
#if CONFIG_INTERNAL_STATS
static const int kf_mode_index[] = {
THR_DC /*DC_PRED*/,
THR_V_PRED /*V_PRED*/,
THR_H_PRED /*H_PRED*/,
THR_D45_PRED /*D45_PRED*/,
THR_D135_PRED /*D135_PRED*/,
THR_D117_PRED /*D117_PRED*/,
THR_D153_PRED /*D153_PRED*/,
THR_D27_PRED /*D27_PRED*/,
THR_D63_PRED /*D63_PRED*/,
THR_TM /*TM_PRED*/,
} else {
/*
// Reduce the activation RD thresholds for the best choice mode
if ((cpi->rd_baseline_thresh[mb_mode_index] > 0) &&
(cpi->rd_baseline_thresh[mb_mode_index] < (INT_MAX >> 2)))
{
int best_adjustment = (cpi->rd_thresh_mult[mb_mode_index] >> 2);
cpi->rd_thresh_mult[mb_mode_index] =
(cpi->rd_thresh_mult[mb_mode_index]
>= (MIN_THRESHMULT + best_adjustment)) ?
cpi->rd_thresh_mult[mb_mode_index] - best_adjustment :
MIN_THRESHMULT;
cpi->rd_threshes[mb_mode_index] =
(cpi->rd_baseline_thresh[mb_mode_index] >> 7)
* cpi->rd_thresh_mult[mb_mode_index];
}
*/
// Note how often each mode chosen as best
cpi->mode_chosen_counts[mb_mode_index]++;
(mbmi->sb_type < BLOCK_SIZE_SB8X8 || mbmi->mode == NEWMV)) {
const MV_REFERENCE_FRAME rf1 = mbmi->ref_frame[0];
const MV_REFERENCE_FRAME rf2 = mbmi->ref_frame[1];
best_mv.as_int = ctx->best_ref_mv.as_int;
best_second_mv.as_int = ctx->second_best_ref_mv.as_int;
if (mbmi->mode == NEWMV) {
best_mv.as_int = mbmi->ref_mvs[rf1][0].as_int;
best_second_mv.as_int = mbmi->ref_mvs[rf2][0].as_int;
}
mbmi->best_mv.as_int = best_mv.as_int;
mbmi->best_second_mv.as_int = best_second_mv.as_int;
vp9_update_nmv_count(cpi, x, &best_mv, &best_second_mv);
if (bsize > BLOCK_SIZE_SB8X8 && mbmi->mode == NEWMV) {
int i, j;
for (j = 0; j < bh; ++j)
for (i = 0; i < bw; ++i)
if ((xd->mb_to_right_edge >> (3 + LOG2_MI_SIZE)) + bw > i &&
(xd->mb_to_bottom_edge >> (3 + LOG2_MI_SIZE)) + bh > j)
xd->mode_info_context[mis * j + i].mbmi = *mbmi;
if (cpi->common.mcomp_filter_type == SWITCHABLE &&
++cpi->common.fc.switchable_interp_count
[vp9_get_pred_context(&cpi->common, xd, PRED_SWITCHABLE_INTERP)]
[vp9_switchable_interp_map[mbmi->interp_filter]];
}
cpi->rd_comp_pred_diff[SINGLE_PREDICTION_ONLY] += ctx->single_pred_diff;
cpi->rd_comp_pred_diff[COMP_PREDICTION_ONLY] += ctx->comp_pred_diff;
cpi->rd_comp_pred_diff[HYBRID_PREDICTION] += ctx->hybrid_pred_diff;
static unsigned find_seg_id(VP9_COMMON *cm, uint8_t *buf, BLOCK_SIZE_TYPE bsize,
int start_y, int height, int start_x, int width) {
const int bw = 1 << mi_width_log2(bsize), bh = 1 << mi_height_log2(bsize);
const int end_x = MIN(start_x + bw, width);
const int end_y = MIN(start_y + bh, height);
int x, y;
unsigned seg_id = -1;
buf += width * start_y;
assert(start_y < cm->mi_rows && start_x < cm->cur_tile_mi_col_end);
for (y = start_y; y < end_y; y++, buf += width) {
for (x = start_x; x < end_x; x++) {
seg_id = MIN(seg_id, buf[x]);
}
}
return seg_id;
}
void vp9_setup_src_planes(MACROBLOCK *x,
const YV12_BUFFER_CONFIG *src,
int mb_row, int mb_col) {
uint8_t *buffers[4] = {src->y_buffer, src->u_buffer, src->v_buffer,
src->alpha_buffer};
int strides[4] = {src->y_stride, src->uv_stride, src->uv_stride,
src->alpha_stride};
int i;
for (i = 0; i < MAX_MB_PLANE; i++) {
setup_pred_plane(&x->plane[i].src,
buffers[i], strides[i],
mb_row, mb_col, NULL,
x->e_mbd.plane[i].subsampling_x,
x->e_mbd.plane[i].subsampling_y);
}
int mi_row, int mi_col, BLOCK_SIZE_TYPE bsize) {
MACROBLOCK *const x = &cpi->mb;
VP9_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *mbmi;
const int dst_fb_idx = cm->new_fb_idx;
const int idx_str = xd->mode_info_stride * mi_row + mi_col;
const int bw = 1 << mi_width_log2(bsize), bh = 1 << mi_height_log2(bsize);
const int mb_row = mi_row >> 1;
const int mb_col = mi_col >> 1;
const int idx_map = mb_row * cm->mb_cols + mb_col;
for (i = 0; i < MAX_MB_PLANE; i++) {
xd->plane[i].above_context = cm->above_context[i] +
(mi_col * 2 >> xd->plane[i].subsampling_x);
xd->plane[i].left_context = cm->left_context[i] +
(((mi_row * 2) & 15) >> xd->plane[i].subsampling_y);
set_partition_seg_context(cm, xd, mi_row, mi_col);
// Activity map pointer
x->mb_activity_ptr = &cpi->mb_activity_map[idx_map];
x->active_ptr = cpi->active_map + idx_map;
/* pointers to mode info contexts */
x->partition_info = x->pi + idx_str;
xd->mode_info_context = cm->mi + idx_str;
mbmi = &xd->mode_info_context->mbmi;
// Special case: if prev_mi is NULL, the previous mode info context
// cannot be used.
xd->prev_mode_info_context = cm->prev_mi ?
cm->prev_mi + idx_str : NULL;
setup_dst_planes(xd, &cm->yv12_fb[dst_fb_idx], mi_row, mi_col);
/* Set up limit values for MV components to prevent them from
* extending beyond the UMV borders assuming 16x16 block size */
x->mv_row_min = -((mi_row * MI_SIZE) + VP9BORDERINPIXELS - VP9_INTERP_EXTEND);
x->mv_col_min = -((mi_col * MI_SIZE) + VP9BORDERINPIXELS - VP9_INTERP_EXTEND);
x->mv_row_max = ((cm->mi_rows - mi_row) * MI_SIZE +
(VP9BORDERINPIXELS - MI_SIZE * bh - VP9_INTERP_EXTEND));
x->mv_col_max = ((cm->mi_cols - mi_col) * MI_SIZE +
(VP9BORDERINPIXELS - MI_SIZE * bw - VP9_INTERP_EXTEND));
// Set up distance of MB to edge of frame in 1/8th pel units
assert(!(mi_col & (bw - 1)) && !(mi_row & (bh - 1)));
set_mi_row_col(cm, xd, mi_row, bh, mi_col, bw);
vp9_setup_src_planes(x, cpi->Source, mi_row, mi_col);
/* R/D setup */
x->rddiv = cpi->RDDIV;
x->rdmult = cpi->RDMULT;
/* segment ID */
if (xd->segmentation_enabled) {
uint8_t *map = xd->update_mb_segmentation_map ? cpi->segmentation_map
: cm->last_frame_seg_map;
mbmi->segment_id = find_seg_id(cm, map, bsize, mi_row,
cm->mi_rows, mi_col, cm->mi_cols);
assert(mbmi->segment_id <= (MAX_MB_SEGMENTS-1));
vp9_mb_init_quantizer(cpi, x);
if (xd->segmentation_enabled && cpi->seg0_cnt > 0 &&
!vp9_segfeature_active(xd, 0, SEG_LVL_REF_FRAME) &&
vp9_segfeature_active(xd, 1, SEG_LVL_REF_FRAME)) {
cpi->seg0_progress = (cpi->seg0_idx << 16) / cpi->seg0_cnt;
} else {
const int y = mb_row & ~3;
const int x = mb_col & ~3;
const int p16 = ((mb_row & 1) << 1) + (mb_col & 1);
const int p32 = ((mb_row & 2) << 2) + ((mb_col & 2) << 1);
cm->cur_tile_mi_col_start * cm->mb_rows >> 1;
(cm->cur_tile_mi_col_end - cm->cur_tile_mi_col_start) >> 1;
((y * mb_cols + x * 4 + p32 + p16 + tile_progress) << 16) / cm->MBs;
}
} else {
mbmi->segment_id = 0;
}
}
static void pick_sb_modes(VP9_COMP *cpi, int mi_row, int mi_col,
TOKENEXTRA **tp, int *totalrate, int *totaldist,
BLOCK_SIZE_TYPE bsize, PICK_MODE_CONTEXT *ctx) {
VP9_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &cpi->mb;
MACROBLOCKD *const xd = &x->e_mbd;
if (bsize < BLOCK_SIZE_SB8X8)
if (xd->ab_index != 0)
return;
set_offsets(cpi, mi_row, mi_col, bsize);
xd->mode_info_context->mbmi.sb_type = bsize;
vp9_activity_masking(cpi, x);
/* Find best coding mode & reconstruct the MB so it is available
* as a predictor for MBs that follow in the SB */
vp9_rd_pick_intra_mode_sb(cpi, x, totalrate, totaldist, bsize, ctx);
vp9_rd_pick_inter_mode_sb(cpi, x, mi_row, mi_col, totalrate, totaldist,
static void update_stats(VP9_COMP *cpi, int mi_row, int mi_col) {
VP9_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &cpi->mb;
MACROBLOCKD *const xd = &x->e_mbd;
MODE_INFO *mi = xd->mode_info_context;
MB_MODE_INFO *const mbmi = &mi->mbmi;
segment_id = mbmi->segment_id;
seg_ref_active = vp9_segfeature_active(xd, segment_id,
SEG_LVL_REF_FRAME);
if (!seg_ref_active)
cpi->intra_inter_count[vp9_get_pred_context(cm, xd, PRED_INTRA_INTER)]
[mbmi->ref_frame[0] > INTRA_FRAME]++;
// If the segment reference feature is enabled we have only a single
// reference frame allowed for the segment so exclude it from
// the reference frame counts used to work out probabilities.
if ((mbmi->ref_frame[0] > INTRA_FRAME) && !seg_ref_active) {
if (cm->comp_pred_mode == HYBRID_PREDICTION)
cpi->comp_inter_count[vp9_get_pred_context(cm, xd,
PRED_COMP_INTER_INTER)]
[mbmi->ref_frame[1] > INTRA_FRAME]++;
if (mbmi->ref_frame[1] > INTRA_FRAME) {
cpi->comp_ref_count[vp9_get_pred_context(cm, xd, PRED_COMP_REF_P)]
[mbmi->ref_frame[0] == GOLDEN_FRAME]++;
} else {
cpi->single_ref_count[vp9_get_pred_context(cm, xd, PRED_SINGLE_REF_P1)]
[0][mbmi->ref_frame[0] != LAST_FRAME]++;
if (mbmi->ref_frame[0] != LAST_FRAME)
cpi->single_ref_count[vp9_get_pred_context(cm, xd,
PRED_SINGLE_REF_P2)]
[1][mbmi->ref_frame[0] != GOLDEN_FRAME]++;
}
if ((mbmi->mode == ZEROMV) && (mbmi->ref_frame[0] == LAST_FRAME))
// TODO(jingning): the variables used here are little complicated. need further
// refactoring on organizing the the temporary buffers, when recursive
// partition down to 4x4 block size is enabled.
static PICK_MODE_CONTEXT *get_block_context(MACROBLOCK *x,
BLOCK_SIZE_TYPE bsize) {
MACROBLOCKD *const xd = &x->e_mbd;
switch (bsize) {
case BLOCK_SIZE_SB64X64:
return &x->sb64_context;
case BLOCK_SIZE_SB64X32:
return &x->sb64x32_context[xd->sb_index];
case BLOCK_SIZE_SB32X64:
return &x->sb32x64_context[xd->sb_index];
case BLOCK_SIZE_SB32X32:
return &x->sb32_context[xd->sb_index];
case BLOCK_SIZE_SB32X16:
return &x->sb32x16_context[xd->sb_index][xd->mb_index];
case BLOCK_SIZE_SB16X32:
return &x->sb16x32_context[xd->sb_index][xd->mb_index];
case BLOCK_SIZE_MB16X16:
return &x->mb_context[xd->sb_index][xd->mb_index];
case BLOCK_SIZE_SB16X8:
return &x->sb16x8_context[xd->sb_index][xd->mb_index][xd->b_index];
case BLOCK_SIZE_SB8X16:
return &x->sb8x16_context[xd->sb_index][xd->mb_index][xd->b_index];
case BLOCK_SIZE_SB8X8:
return &x->sb8x8_context[xd->sb_index][xd->mb_index][xd->b_index];
case BLOCK_SIZE_SB8X4:
return &x->sb8x4_context[xd->sb_index][xd->mb_index][xd->b_index];
case BLOCK_SIZE_SB4X8:
return &x->sb4x8_context[xd->sb_index][xd->mb_index][xd->b_index];
case BLOCK_SIZE_AB4X4:
return &x->ab4x4_context[xd->sb_index][xd->mb_index][xd->b_index];
default:
assert(0);
return NULL;
}
}
static BLOCK_SIZE_TYPE *get_sb_partitioning(MACROBLOCK *x,
BLOCK_SIZE_TYPE bsize) {
MACROBLOCKD *xd = &x->e_mbd;
switch (bsize) {
case BLOCK_SIZE_SB64X64:
return &x->sb64_partitioning;
case BLOCK_SIZE_SB32X32:
return &x->sb_partitioning[xd->sb_index];
case BLOCK_SIZE_MB16X16:
return &x->mb_partitioning[xd->sb_index][xd->mb_index];
case BLOCK_SIZE_SB8X8:
return &x->b_partitioning[xd->sb_index][xd->mb_index][xd->b_index];
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
default:
assert(0);
return NULL;
}
}
static void restore_context(VP9_COMP *cpi, int mi_row, int mi_col,
ENTROPY_CONTEXT a[16 * MAX_MB_PLANE],
ENTROPY_CONTEXT l[16 * MAX_MB_PLANE],
PARTITION_CONTEXT sa[8],
PARTITION_CONTEXT sl[8],
BLOCK_SIZE_TYPE bsize) {
VP9_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &cpi->mb;
MACROBLOCKD *const xd = &x->e_mbd;
int p;
int bwl = b_width_log2(bsize), bw = 1 << bwl;
int bhl = b_height_log2(bsize), bh = 1 << bhl;
int mwl = mi_width_log2(bsize), mw = 1 << mwl;
int mhl = mi_height_log2(bsize), mh = 1 << mhl;
for (p = 0; p < MAX_MB_PLANE; p++) {
vpx_memcpy(cm->above_context[p] +
((mi_col * 2) >> xd->plane[p].subsampling_x),
a + bw * p,
sizeof(ENTROPY_CONTEXT) * bw >> xd->plane[p].subsampling_x);
vpx_memcpy(cm->left_context[p] +
((mi_row & MI_MASK) * 2 >> xd->plane[p].subsampling_y),
l + bh * p,
sizeof(ENTROPY_CONTEXT) * bh >> xd->plane[p].subsampling_y);
}
vpx_memcpy(cm->above_seg_context + mi_col, sa,
sizeof(PARTITION_CONTEXT) * mw);
vpx_memcpy(cm->left_seg_context + (mi_row & MI_MASK), sl,
sizeof(PARTITION_CONTEXT) * mh);
}
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
static void save_context(VP9_COMP *cpi, int mi_row, int mi_col,
ENTROPY_CONTEXT a[16 * MAX_MB_PLANE],
ENTROPY_CONTEXT l[16 * MAX_MB_PLANE],
PARTITION_CONTEXT sa[8],
PARTITION_CONTEXT sl[8],
BLOCK_SIZE_TYPE bsize) {
VP9_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &cpi->mb;
MACROBLOCKD *const xd = &x->e_mbd;
int p;
int bwl = b_width_log2(bsize), bw = 1 << bwl;
int bhl = b_height_log2(bsize), bh = 1 << bhl;
int mwl = mi_width_log2(bsize), mw = 1 << mwl;
int mhl = mi_height_log2(bsize), mh = 1 << mhl;
// buffer the above/left context information of the block in search.
for (p = 0; p < MAX_MB_PLANE; ++p) {
vpx_memcpy(a + bw * p, cm->above_context[p] +
(mi_col * 2 >> xd->plane[p].subsampling_x),
sizeof(ENTROPY_CONTEXT) * bw >> xd->plane[p].subsampling_x);
vpx_memcpy(l + bh * p, cm->left_context[p] +
((mi_row & MI_MASK) * 2 >> xd->plane[p].subsampling_y),
sizeof(ENTROPY_CONTEXT) * bh >> xd->plane[p].subsampling_y);
}
vpx_memcpy(sa, cm->above_seg_context + mi_col,
sizeof(PARTITION_CONTEXT) * mw);
vpx_memcpy(sl, cm->left_seg_context + (mi_row & MI_MASK),
sizeof(PARTITION_CONTEXT) * mh);
}
static void encode_b(VP9_COMP *cpi, TOKENEXTRA **tp,
int mi_row, int mi_col, int output_enabled,
BLOCK_SIZE_TYPE bsize, int sub_index) {
VP9_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &cpi->mb;
MACROBLOCKD *const xd = &x->e_mbd;
if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
return;
if (sub_index != -1)
*(get_sb_index(xd, bsize)) = sub_index;
if (bsize < BLOCK_SIZE_SB8X8)
if (xd->ab_index > 0)
return;
set_offsets(cpi, mi_row, mi_col, bsize);
update_state(cpi, get_block_context(x, bsize), bsize, output_enabled);
encode_superblock(cpi, tp, output_enabled, mi_row, mi_col, bsize);
if (output_enabled) {
update_stats(cpi, mi_row, mi_col);
(*tp)->token = EOSB_TOKEN;
(*tp)++;
static void encode_sb(VP9_COMP *cpi, TOKENEXTRA **tp,
int mi_row, int mi_col, int output_enabled,
VP9_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &cpi->mb;
MACROBLOCKD *const xd = &x->e_mbd;
BLOCK_SIZE_TYPE c1 = BLOCK_SIZE_SB8X8;
const int bsl = b_width_log2(bsize), bs = (1 << bsl) / 4;
if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
return;
if (bsize >= BLOCK_SIZE_SB8X8) {
set_partition_seg_context(cm, xd, mi_row, mi_col);
pl = partition_plane_context(xd, bsize);
c1 = *(get_sb_partitioning(x, bsize));
bwl = b_width_log2(c1), bhl = b_height_log2(c1);
if (bsl == bwl && bsl == bhl) {
if (output_enabled && bsize >= BLOCK_SIZE_SB8X8)
cpi->partition_count[pl][PARTITION_NONE]++;
encode_b(cpi, tp, mi_row, mi_col, output_enabled, c1, -1);
} else if (bsl == bhl && bsl > bwl) {
if (output_enabled)
cpi->partition_count[pl][PARTITION_VERT]++;
encode_b(cpi, tp, mi_row, mi_col, output_enabled, c1, 0);
encode_b(cpi, tp, mi_row, mi_col + bs, output_enabled, c1, 1);
} else if (bsl == bwl && bsl > bhl) {
if (output_enabled)
cpi->partition_count[pl][PARTITION_HORZ]++;
encode_b(cpi, tp, mi_row, mi_col, output_enabled, c1, 0);
encode_b(cpi, tp, mi_row + bs, mi_col, output_enabled, c1, 1);
} else {
BLOCK_SIZE_TYPE subsize;
int i;
assert(bwl < bsl && bhl < bsl);
subsize = get_subsize(bsize, PARTITION_SPLIT);
if (output_enabled)
cpi->partition_count[pl][PARTITION_SPLIT]++;
const int x_idx = i & 1, y_idx = i >> 1;
*(get_sb_index(xd, subsize)) = i;
encode_sb(cpi, tp, mi_row + y_idx * bs, mi_col + x_idx * bs,
if (bsize >= BLOCK_SIZE_SB8X8 &&
(bsize == BLOCK_SIZE_SB8X8 || bsl == bwl || bsl == bhl)) {
set_partition_seg_context(cm, xd, mi_row, mi_col);
update_partition_context(xd, c1, bsize);
static void set_partitioning(VP9_COMP *cpi, MODE_INFO *m,
BLOCK_SIZE_TYPE bsize) {
VP9_COMMON *const cm = &cpi->common;
const int mis = cm->mode_info_stride;
int bsl = b_width_log2(bsize);
int bs = (1 << bsl) / 2; //
int block_row, block_col;
int row, col;
// this test function sets the entire macroblock to the same bsize
for (block_row = 0; block_row < 8; block_row += bs) {
for (block_col = 0; block_col < 8; block_col += bs) {
for (row = 0; row < bs; row++) {
for (col = 0; col < bs; col++) {
m[(block_row+row)*mis + block_col+col].mbmi.sb_type = bsize;
}
}
}
}
}
static void set_block_size(VP9_COMMON *const cm,
MODE_INFO *m, BLOCK_SIZE_TYPE bsize, int mis,
int mi_row, int mi_col) {
int row, col;
int bwl = b_width_log2(bsize);
int bhl = b_height_log2(bsize);
int bsl = (bwl > bhl ? bwl : bhl);
int bs = (1 << bsl) / 2; //
MODE_INFO *m2 = m + mi_row * mis + mi_col;
for (row = 0; row < bs; row++) {
for (col = 0; col < bs; col++) {
if (mi_row + row >= cm->mi_rows || mi_col + col >= cm->mi_cols)
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
m2[row*mis+col].mbmi.sb_type = bsize;
}
}
}
typedef struct {
int64_t sum_square_error;
int64_t sum_error;
int count;
int variance;
} var;
#define VT(TYPE, BLOCKSIZE) \
typedef struct { \
var none; \
var horz[2]; \
var vert[2]; \
BLOCKSIZE split[4]; } TYPE;
VT(v8x8, var)
VT(v16x16, v8x8)
VT(v32x32, v16x16)
VT(v64x64, v32x32)
typedef enum {
V16X16,
V32X32,
V64X64,
} TREE_LEVEL;
// Set variance values given sum square error, sum error, count.
static void fill_variance(var *v, int64_t s2, int64_t s, int c) {
v->sum_square_error = s2;
v->sum_error = s;
v->count = c;
v->variance = 256
* (v->sum_square_error - v->sum_error * v->sum_error / v->count)
/ v->count;
}
// Combine 2 variance structures by summing the sum_error, sum_square_error,
// and counts and then calculating the new variance.
void sum_2_variances(var *r, var *a, var*b) {
fill_variance(r, a->sum_square_error + b->sum_square_error,
a->sum_error + b->sum_error, a->count + b->count);
}
// Fill one level of our variance tree, by summing the split sums into each of
// the horizontal, vertical and none from split and recalculating variance.
#define fill_variance_tree(VT) \
sum_2_variances(VT.horz[0], VT.split[0].none, VT.split[1].none); \
sum_2_variances(VT.horz[1], VT.split[2].none, VT.split[3].none); \
sum_2_variances(VT.vert[0], VT.split[0].none, VT.split[2].none); \
sum_2_variances(VT.vert[1], VT.split[1].none, VT.split[3].none); \
sum_2_variances(VT.none, VT.vert[0], VT.vert[1]);
// Set the blocksize in the macroblock info structure if the variance is less
// than our threshold to one of none, horz, vert.
#define set_vt_size(VT, BLOCKSIZE, R, C, ACTION) \
if (VT.none.variance < threshold) { \
set_block_size(cm, m, BLOCKSIZE, mis, R, C); \
ACTION; \
} \
if (VT.horz[0].variance < threshold && VT.horz[1].variance < threshold ) { \
set_block_size(cm, m, get_subsize(BLOCKSIZE, PARTITION_HORZ), mis, R, C); \
ACTION; \
} \
if (VT.vert[0].variance < threshold && VT.vert[1].variance < threshold ) { \
set_block_size(cm, m, get_subsize(BLOCKSIZE, PARTITION_VERT), mis, R, C); \
ACTION; \
}
static void choose_partitioning(VP9_COMP *cpi, MODE_INFO *m, int mi_row,
int mi_col) {
VP9_COMMON * const cm = &cpi->common;
MACROBLOCK *x = &cpi->mb;
MACROBLOCKD *xd = &cpi->mb.e_mbd;
const int mis = cm->mode_info_stride;
// TODO(JBB): More experimentation or testing of this threshold;
int64_t threshold = 4;
int i, j, k;
v64x64 vt;
unsigned char * s;
int sp;
const unsigned char * d = xd->plane[0].pre->buf;
int dp = xd->plane[0].pre->stride;
int pixels_wide = 64, pixels_high = 64;
vpx_memset(&vt, 0, sizeof(vt));