Newer
Older
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
#include "vp9/encoder/vp9_encodeframe.h"
#include "vp9/encoder/vp9_encodemb.h"
#include "vp9/encoder/vp9_encodemv.h"
#include "vp9/encoder/vp9_onyx_int.h"
#include "vp9/common/vp9_entropymode.h"
#include "vp9/common/vp9_quant_common.h"
#include "vp9/encoder/vp9_segmentation.h"
#include "vp9/encoder/vp9_encodeintra.h"
#include "vp9/common/vp9_findnearmv.h"
#include "vp9/common/vp9_reconintra.h"
#include "vp9/common/vp9_seg_common.h"
#include "vp9/common/vp9_tile_common.h"
#include <limits.h>
#include "vpx_ports/vpx_timer.h"
#include "vp9/common/vp9_pred_common.h"
#include "vp9/common/vp9_mvref_common.h"
static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t, int output_enabled,
int mi_row, int mi_col, BLOCK_SIZE_TYPE bsize);
static void adjust_act_zbin(VP9_COMP *cpi, MACROBLOCK *x);
/* activity_avg must be positive, or flat regions could get a zero weight
* (infinite lambda), which confounds analysis.
* This also avoids the need for divide by zero checks in
* vp9_activity_masking().
/* This is used as a reference when computing the source variance for the
* purposes of activity masking.
* Eventually this should be replaced by custom no-reference routines,
* which will be faster.
*/
static const uint8_t VP9_VAR_OFFS[16] = {128, 128, 128, 128, 128, 128, 128, 128,
128, 128, 128, 128, 128, 128, 128, 128};
// Original activity measure from Tim T's code.
static unsigned int tt_activity_measure(VP9_COMP *cpi, MACROBLOCK *x) {
unsigned int act;
unsigned int sse;
/* TODO: This could also be done over smaller areas (8x8), but that would
* require extensive changes elsewhere, as lambda is assumed to be fixed
* over an entire MB in most of the code.
* Another option is to compute four 8x8 variances, and pick a single
* lambda using a non-linear combination (e.g., the smallest, or second
* smallest, etc.).
*/
act = vp9_variance16x16(x->plane[0].src.buf, x->plane[0].src.stride,
VP9_VAR_OFFS, 0, &sse);
/* If the region is flat, lower the activity some more. */
if (act < 8 << 12)
act = act < 5 << 12 ? act : 5 << 12;
return act;
// Stub for alternative experimental activity measures.
static unsigned int alt_activity_measure(VP9_COMP *cpi, MACROBLOCK *x,
int use_dc_pred) {
return vp9_encode_intra(cpi, x, use_dc_pred);
DECLARE_ALIGNED(16, static const uint8_t, vp9_64x64_zeros[64*64]) = {0};
// Measure the activity of the current macroblock
// What we measure here is TBD so abstracted to this function
static unsigned int mb_activity_measure(VP9_COMP *cpi, MACROBLOCK *x,
if (ALT_ACT_MEASURE) {
int use_dc_pred = (mb_col || mb_row) && (!mb_col || !mb_row);
// Or use and alternative.
mb_activity = alt_activity_measure(cpi, x, use_dc_pred);
} else {
// Original activity measure from Tim T's code.
mb_activity = tt_activity_measure(cpi, x);
}
if (mb_activity < VP9_ACTIVITY_AVG_MIN)
mb_activity = VP9_ACTIVITY_AVG_MIN;
}
// Calculate an "average" mb activity value for the frame
static void calc_av_activity(VP9_COMP *cpi, int64_t activity_sum) {
// Find median: Simple n^2 algorithm for experimentation
{
unsigned int median;
unsigned int i, j;
unsigned int *sortlist;
unsigned int tmp;
// Create a list to sort to
CHECK_MEM_ERROR(&cpi->common, sortlist, vpx_calloc(sizeof(unsigned int),
cpi->common.MBs));
// Copy map to sort list
vpx_memcpy(sortlist, cpi->mb_activity_map,
// Ripple each value down to its correct position
for (i = 1; i < cpi->common.MBs; i ++) {
for (j = i; j > 0; j --) {
if (sortlist[j] < sortlist[j - 1]) {
// Swap values
tmp = sortlist[j - 1];
sortlist[j - 1] = sortlist[j];
sortlist[j] = tmp;
} else
// Even number MBs so estimate median as mean of two either side.
median = (1 + sortlist[cpi->common.MBs >> 1] +
cpi->activity_avg = (unsigned int) (activity_sum / cpi->common.MBs);
if (cpi->activity_avg < VP9_ACTIVITY_AVG_MIN)
cpi->activity_avg = VP9_ACTIVITY_AVG_MIN;
// Experimental code: return fixed value normalized for several clips
if (ALT_ACT_MEASURE)
cpi->activity_avg = 100000;
// Calculate an activity index for each mb
static void calc_activity_index(VP9_COMP *cpi, MACROBLOCK *x) {
VP9_COMMON *const cm = &cpi->common;
FILE *f = fopen("norm_act.stt", "a");
fprintf(f, "\n%12d\n", cpi->activity_avg);
// Reset pointers to start of activity map
x->mb_activity_ptr = cpi->mb_activity_map;
// Calculate normalized mb activity number.
for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) {
// for each macroblock col in image
for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) {
// Read activity from the map
act = *(x->mb_activity_ptr);
// Calculate a normalized activity number
a = act + 4 * cpi->activity_avg;
b = 4 * act + cpi->activity_avg;
*(x->activity_ptr) = (int)((b + (a >> 1)) / a) - 1;
*(x->activity_ptr) = 1 - (int)((a + (b >> 1)) / b);
// Increment activity map pointers
x->mb_activity_ptr++;
}
// Loop through all MBs. Note activity of each, average activity and
// calculate a normalized activity for each
static void build_activity_map(VP9_COMP *cpi) {
YV12_BUFFER_CONFIG *new_yv12 = &cm->yv12_fb[cm->new_fb_idx];
int recon_yoffset;
int recon_y_stride = new_yv12->y_stride;
int mb_row, mb_col;
unsigned int mb_activity;
int64_t activity_sum = 0;
x->mb_activity_ptr = cpi->mb_activity_map;
// for each macroblock row in image
for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) {
// reset above block coeffs
xd->up_available = (mb_row != 0);
recon_yoffset = (mb_row * recon_y_stride * 16);
// for each macroblock col in image
for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) {
xd->plane[0].dst.buf = new_yv12->y_buffer + recon_yoffset;
xd->left_available = (mb_col != 0);
recon_yoffset += 16;
// measure activity
mb_activity = mb_activity_measure(cpi, x, mb_row, mb_col);
// Store MB level activity details.
*x->mb_activity_ptr = mb_activity;
// Increment activity map pointer
x->mb_activity_ptr++;
x->plane[0].src.buf += 16 * x->plane[0].src.stride - 16 * cm->mb_cols;
// Calculate an "average" MB activity
calc_av_activity(cpi, activity_sum);
// Calculate an activity index number of each mb
calc_activity_index(cpi, x);
void vp9_activity_masking(VP9_COMP *cpi, MACROBLOCK *x) {
x->rdmult += *(x->mb_activity_ptr) * (x->rdmult >> 2);
x->errorperbit = x->rdmult * 100 / (110 * x->rddiv);
x->errorperbit += (x->errorperbit == 0);
int64_t a;
int64_t b;
int64_t act = *(x->mb_activity_ptr);
// Apply the masking to the RD multiplier.
a = act + (2 * cpi->activity_avg);
b = (2 * act) + cpi->activity_avg;
x->rdmult = (unsigned int) (((int64_t) x->rdmult * b + (a >> 1)) / a);
x->errorperbit = x->rdmult * 100 / (110 * x->rddiv);
x->errorperbit += (x->errorperbit == 0);
// Activity based Zbin adjustment
adjust_act_zbin(cpi, x);
static void update_state(VP9_COMP *cpi, PICK_MODE_CONTEXT *ctx,
BLOCK_SIZE_TYPE bsize, int output_enabled) {
MACROBLOCK * const x = &cpi->mb;
MACROBLOCKD * const xd = &x->e_mbd;
MB_MODE_INFO * const mbmi = &xd->mode_info_context->mbmi;
const int mis = cpi->common.mode_info_stride;
const int bh = 1 << mi_height_log2(bsize), bw = 1 << mi_width_log2(bsize);
const MB_PREDICTION_MODE mb_mode = mi->mbmi.mode;
assert(mb_mode < MB_MODE_COUNT);
assert(mb_mode_index < MAX_MODES);
assert(mi->mbmi.ref_frame[0] < MAX_REF_FRAMES);
assert(mi->mbmi.ref_frame[1] < MAX_REF_FRAMES);
assert(mi->mbmi.sb_type == bsize);
// Restore the coding context of the MB to that that was in place
// when the mode was picked for it
for (y = 0; y < bh; y++) {
for (x_idx = 0; x_idx < bw; x_idx++) {
if ((xd->mb_to_right_edge >> (3 + LOG2_MI_SIZE)) + bw > x_idx
&& (xd->mb_to_bottom_edge >> (3 + LOG2_MI_SIZE)) + bh > y) {
MODE_INFO *mi_addr = xd->mode_info_context + x_idx + y * mis;
if (bsize < BLOCK_SIZE_MB16X16)
ctx->txfm_rd_diff[ALLOW_16X16] = ctx->txfm_rd_diff[ALLOW_8X8];
ctx->txfm_rd_diff[ALLOW_32X32] = ctx->txfm_rd_diff[ALLOW_16X16];
if (mbmi->ref_frame[0] != INTRA_FRAME && mbmi->sb_type < BLOCK_SIZE_SB8X8) {
*x->partition_info = ctx->partition_info;
mbmi->mv[0].as_int = x->partition_info->bmi[3].mv.as_int;
mbmi->mv[1].as_int = x->partition_info->bmi[3].second_mv.as_int;
x->skip = ctx->skip;
if (!vp9_segfeature_active(xd, mbmi->segment_id, SEG_LVL_SKIP)) {
for (i = 0; i < NB_TXFM_MODES; i++) {
cpi->rd_tx_select_diff[i] += ctx->txfm_rd_diff[i];
if (cpi->common.frame_type == KEY_FRAME) {
// Restore the coding modes to that held in the coding context
// for (i = 0; i < 16; i++)
// {
// xd->block[i].bmi.as_mode =
// xd->mode_info_context->bmi[i].as_mode;
// assert(xd->mode_info_context->bmi[i].as_mode < MB_MODE_COUNT);
// }
#if CONFIG_INTERNAL_STATS
static const int kf_mode_index[] = {
THR_DC /*DC_PRED*/,
THR_V_PRED /*V_PRED*/,
THR_H_PRED /*H_PRED*/,
THR_D45_PRED /*D45_PRED*/,
THR_D135_PRED /*D135_PRED*/,
THR_D117_PRED /*D117_PRED*/,
THR_D153_PRED /*D153_PRED*/,
THR_D27_PRED /*D27_PRED*/,
THR_D63_PRED /*D63_PRED*/,
THR_TM /*TM_PRED*/,
} else {
// Note how often each mode chosen as best
cpi->mode_chosen_counts[mb_mode_index]++;
if (mbmi->ref_frame[0] != INTRA_FRAME
&& (mbmi->sb_type < BLOCK_SIZE_SB8X8 || mbmi->mode == NEWMV)) {
const MV_REFERENCE_FRAME rf1 = mbmi->ref_frame[0];
const MV_REFERENCE_FRAME rf2 = mbmi->ref_frame[1];
best_mv.as_int = ctx->best_ref_mv.as_int;
best_second_mv.as_int = ctx->second_best_ref_mv.as_int;
if (mbmi->mode == NEWMV) {
best_mv.as_int = mbmi->ref_mvs[rf1][0].as_int;
best_second_mv.as_int = mbmi->ref_mvs[rf2][0].as_int;
}
mbmi->best_mv.as_int = best_mv.as_int;
mbmi->best_second_mv.as_int = best_second_mv.as_int;
vp9_update_nmv_count(cpi, x, &best_mv, &best_second_mv);
if (bsize > BLOCK_SIZE_SB8X8 && mbmi->mode == NEWMV) {
int i, j;
for (j = 0; j < bh; ++j)
for (i = 0; i < bw; ++i)
if ((xd->mb_to_right_edge >> (3 + LOG2_MI_SIZE)) + bw > i
&& (xd->mb_to_bottom_edge >> (3 + LOG2_MI_SIZE)) + bh > j)
xd->mode_info_context[mis * j + i].mbmi = *mbmi;
if (cpi->common.mcomp_filter_type == SWITCHABLE
&& is_inter_mode(mbmi->mode)) {
++cpi->common.fc.switchable_interp_count[vp9_get_pred_context(
&cpi->common, xd, PRED_SWITCHABLE_INTERP)][vp9_switchable_interp_map[mbmi
->interp_filter]];
cpi->rd_comp_pred_diff[SINGLE_PREDICTION_ONLY] += ctx->single_pred_diff;
cpi->rd_comp_pred_diff[COMP_PREDICTION_ONLY] += ctx->comp_pred_diff;
cpi->rd_comp_pred_diff[HYBRID_PREDICTION] += ctx->hybrid_pred_diff;
void vp9_setup_src_planes(MACROBLOCK *x, const YV12_BUFFER_CONFIG *src,
uint8_t *buffers[4] = {src->y_buffer, src->u_buffer, src->v_buffer, src
->alpha_buffer};
int strides[4] = {src->y_stride, src->uv_stride, src->uv_stride, src
->alpha_stride};
int i;
for (i = 0; i < MAX_MB_PLANE; i++) {
setup_pred_plane(&x->plane[i].src, buffers[i], strides[i], mb_row, mb_col,
NULL, x->e_mbd.plane[i].subsampling_x,
x->e_mbd.plane[i].subsampling_y);
}
static void set_offsets(VP9_COMP *cpi, int mi_row, int mi_col,
BLOCK_SIZE_TYPE bsize) {
MACROBLOCK * const x = &cpi->mb;
VP9_COMMON * const cm = &cpi->common;
MACROBLOCKD * const xd = &x->e_mbd;
MB_MODE_INFO *mbmi;
const int dst_fb_idx = cm->new_fb_idx;
const int idx_str = xd->mode_info_stride * mi_row + mi_col;
const int bw = 1 << mi_width_log2(bsize), bh = 1 << mi_height_log2(bsize);
const int mb_row = mi_row >> 1;
const int mb_col = mi_col >> 1;
const int idx_map = mb_row * cm->mb_cols + mb_col;
for (i = 0; i < MAX_MB_PLANE; i++) {
xd->plane[i].above_context = cm->above_context[i]
+ (mi_col * 2 >> xd->plane[i].subsampling_x);
xd->plane[i].left_context = cm->left_context[i]
+ (((mi_row * 2) & 15) >> xd->plane[i].subsampling_y);
set_partition_seg_context(cm, xd, mi_row, mi_col);
// Activity map pointer
x->mb_activity_ptr = &cpi->mb_activity_map[idx_map];
x->active_ptr = cpi->active_map + idx_map;
/* pointers to mode info contexts */
x->partition_info = x->pi + idx_str;
xd->mode_info_context = cm->mi + idx_str;
// Special case: if prev_mi is NULL, the previous mode info context
// cannot be used.
xd->prev_mode_info_context = cm->prev_mi ? cm->prev_mi + idx_str : NULL;
setup_dst_planes(xd, &cm->yv12_fb[dst_fb_idx], mi_row, mi_col);
/* Set up limit values for MV components to prevent them from
* extending beyond the UMV borders assuming 16x16 block size */
x->mv_row_min = -((mi_row * MI_SIZE)+ VP9BORDERINPIXELS - VP9_INTERP_EXTEND);
x->mv_col_min = -((mi_col * MI_SIZE)+ VP9BORDERINPIXELS - VP9_INTERP_EXTEND);
x->mv_row_max = ((cm->mi_rows - mi_row) * MI_SIZE
+ (VP9BORDERINPIXELS - MI_SIZE * bh - VP9_INTERP_EXTEND));
x->mv_col_max = ((cm->mi_cols - mi_col) * MI_SIZE
+ (VP9BORDERINPIXELS - MI_SIZE * bw - VP9_INTERP_EXTEND));
// Set up distance of MB to edge of frame in 1/8th pel units
assert(!(mi_col & (bw - 1)) && !(mi_row & (bh - 1)));
set_mi_row_col(cm, xd, mi_row, bh, mi_col, bw);
vp9_setup_src_planes(x, cpi->Source, mi_row, mi_col);
/* R/D setup */
x->rddiv = cpi->RDDIV;
x->rdmult = cpi->RDMULT;
/* segment ID */
if (xd->segmentation_enabled) {
uint8_t *map = xd->update_mb_segmentation_map ? cpi->segmentation_map
: cm->last_frame_seg_map;
mbmi->segment_id = vp9_get_segment_id(cm, map, bsize, mi_row, mi_col);
if (xd->segmentation_enabled && cpi->seg0_cnt > 0
&& !vp9_segfeature_active(xd, 0, SEG_LVL_REF_FRAME)
&& vp9_segfeature_active(xd, 1, SEG_LVL_REF_FRAME)) {
cpi->seg0_progress = (cpi->seg0_idx << 16) / cpi->seg0_cnt;
} else {
const int y = mb_row & ~3;
const int x = mb_col & ~3;
const int p16 = ((mb_row & 1) << 1) + (mb_col & 1);
const int p32 = ((mb_row & 2) << 2) + ((mb_col & 2) << 1);
const int tile_progress = cm->cur_tile_mi_col_start * cm->mb_rows >> 1;
const int mb_cols = (cm->cur_tile_mi_col_end - cm->cur_tile_mi_col_start)
>> 1;
cpi->seg0_progress = ((y * mb_cols + x * 4 + p32 + p16 + tile_progress)
<< 16) / cm->MBs;
}
} else {
mbmi->segment_id = 0;
}
}
static void pick_sb_modes(VP9_COMP *cpi, int mi_row, int mi_col,
TOKENEXTRA **tp, int *totalrate, int64_t *totaldist,
BLOCK_SIZE_TYPE bsize, PICK_MODE_CONTEXT *ctx) {
VP9_COMMON * const cm = &cpi->common;
MACROBLOCK * const x = &cpi->mb;
MACROBLOCKD * const xd = &x->e_mbd;
if (bsize < BLOCK_SIZE_SB8X8)
if (xd->ab_index != 0)
return;
set_offsets(cpi, mi_row, mi_col, bsize);
xd->mode_info_context->mbmi.sb_type = bsize;
vp9_activity_masking(cpi, x);
/* Find best coding mode & reconstruct the MB so it is available
* as a predictor for MBs that follow in the SB */
vp9_rd_pick_intra_mode_sb(cpi, x, totalrate, totaldist, bsize, ctx);
vp9_rd_pick_inter_mode_sb(cpi, x, mi_row, mi_col, totalrate, totaldist,
static void update_stats(VP9_COMP *cpi, int mi_row, int mi_col) {
VP9_COMMON * const cm = &cpi->common;
MACROBLOCK * const x = &cpi->mb;
MACROBLOCKD * const xd = &x->e_mbd;
segment_id = mbmi->segment_id;
seg_ref_active = vp9_segfeature_active(xd, segment_id, SEG_LVL_REF_FRAME);
if (!seg_ref_active)
cpi->intra_inter_count[vp9_get_pred_context(cm, xd, PRED_INTRA_INTER)][mbmi
->ref_frame[0] > INTRA_FRAME]++;
// If the segment reference feature is enabled we have only a single
// reference frame allowed for the segment so exclude it from
// the reference frame counts used to work out probabilities.
if ((mbmi->ref_frame[0] > INTRA_FRAME) && !seg_ref_active) {
if (cm->comp_pred_mode == HYBRID_PREDICTION)
cpi->comp_inter_count[vp9_get_pred_context(cm, xd,
PRED_COMP_INTER_INTER)][mbmi
->ref_frame[1] > INTRA_FRAME]++;
cpi->comp_ref_count[vp9_get_pred_context(cm, xd, PRED_COMP_REF_P)][mbmi
->ref_frame[0] == GOLDEN_FRAME]++;
cpi->single_ref_count[vp9_get_pred_context(cm, xd, PRED_SINGLE_REF_P1)][0][mbmi
->ref_frame[0] != LAST_FRAME]++;
cpi->single_ref_count[vp9_get_pred_context(cm, xd, PRED_SINGLE_REF_P2)][1][mbmi
->ref_frame[0] != GOLDEN_FRAME]++;
if ((mbmi->mode == ZEROMV) && (mbmi->ref_frame[0] == LAST_FRAME))
// TODO(jingning): the variables used here are little complicated. need further
// refactoring on organizing the the temporary buffers, when recursive
// partition down to 4x4 block size is enabled.
static PICK_MODE_CONTEXT *get_block_context(MACROBLOCK *x,
BLOCK_SIZE_TYPE bsize) {
switch (bsize) {
case BLOCK_SIZE_SB64X64:
return &x->sb64_context;
case BLOCK_SIZE_SB64X32:
return &x->sb64x32_context[xd->sb_index];
case BLOCK_SIZE_SB32X64:
return &x->sb32x64_context[xd->sb_index];
case BLOCK_SIZE_SB32X32:
return &x->sb32_context[xd->sb_index];
case BLOCK_SIZE_SB32X16:
return &x->sb32x16_context[xd->sb_index][xd->mb_index];
case BLOCK_SIZE_SB16X32:
return &x->sb16x32_context[xd->sb_index][xd->mb_index];
case BLOCK_SIZE_MB16X16:
return &x->mb_context[xd->sb_index][xd->mb_index];
case BLOCK_SIZE_SB16X8:
return &x->sb16x8_context[xd->sb_index][xd->mb_index][xd->b_index];
case BLOCK_SIZE_SB8X16:
return &x->sb8x16_context[xd->sb_index][xd->mb_index][xd->b_index];
case BLOCK_SIZE_SB8X8:
return &x->sb8x8_context[xd->sb_index][xd->mb_index][xd->b_index];
case BLOCK_SIZE_SB8X4:
return &x->sb8x4_context[xd->sb_index][xd->mb_index][xd->b_index];
case BLOCK_SIZE_SB4X8:
return &x->sb4x8_context[xd->sb_index][xd->mb_index][xd->b_index];
case BLOCK_SIZE_AB4X4:
return &x->ab4x4_context[xd->sb_index][xd->mb_index][xd->b_index];
default:
assert(0);
}
}
static BLOCK_SIZE_TYPE *get_sb_partitioning(MACROBLOCK *x,
BLOCK_SIZE_TYPE bsize) {
MACROBLOCKD *xd = &x->e_mbd;
switch (bsize) {
case BLOCK_SIZE_SB64X64:
return &x->sb64_partitioning;
case BLOCK_SIZE_SB32X32:
return &x->sb_partitioning[xd->sb_index];
case BLOCK_SIZE_MB16X16:
return &x->mb_partitioning[xd->sb_index][xd->mb_index];
case BLOCK_SIZE_SB8X8:
return &x->b_partitioning[xd->sb_index][xd->mb_index][xd->b_index];
}
}
static void restore_context(VP9_COMP *cpi, int mi_row, int mi_col,
ENTROPY_CONTEXT a[16 * MAX_MB_PLANE],
ENTROPY_CONTEXT l[16 * MAX_MB_PLANE],
PARTITION_CONTEXT sa[8], PARTITION_CONTEXT sl[8],
VP9_COMMON * const cm = &cpi->common;
MACROBLOCK * const x = &cpi->mb;
MACROBLOCKD * const xd = &x->e_mbd;
int p;
int bwl = b_width_log2(bsize), bw = 1 << bwl;
int bhl = b_height_log2(bsize), bh = 1 << bhl;
int mwl = mi_width_log2(bsize), mw = 1 << mwl;
int mhl = mi_height_log2(bsize), mh = 1 << mhl;
for (p = 0; p < MAX_MB_PLANE; p++) {
vpx_memcpy(
cm->above_context[p] + ((mi_col * 2) >> xd->plane[p].subsampling_x),
a + bw * p, sizeof(ENTROPY_CONTEXT) * bw >> xd->plane[p].subsampling_x);
vpx_memcpy(
cm->left_context[p]
+ ((mi_row & MI_MASK)* 2 >> xd->plane[p].subsampling_y),l + bh * p,
sizeof(ENTROPY_CONTEXT) * bh >> xd->plane[p].subsampling_y);
}
vpx_memcpy(cm->above_seg_context + mi_col, sa,
sizeof(PARTITION_CONTEXT) * mw);
vpx_memcpy(cm->left_seg_context + (mi_row & MI_MASK), sl,
static void save_context(VP9_COMP *cpi, int mi_row, int mi_col,
ENTROPY_CONTEXT a[16 * MAX_MB_PLANE],
ENTROPY_CONTEXT l[16 * MAX_MB_PLANE],
PARTITION_CONTEXT sa[8], PARTITION_CONTEXT sl[8],
BLOCK_SIZE_TYPE bsize) {
VP9_COMMON * const cm = &cpi->common;
MACROBLOCK * const x = &cpi->mb;
MACROBLOCKD * const xd = &x->e_mbd;
int p;
int bwl = b_width_log2(bsize), bw = 1 << bwl;
int bhl = b_height_log2(bsize), bh = 1 << bhl;
int mwl = mi_width_log2(bsize), mw = 1 << mwl;
int mhl = mi_height_log2(bsize), mh = 1 << mhl;
// buffer the above/left context information of the block in search.
for (p = 0; p < MAX_MB_PLANE; ++p) {
vpx_memcpy(
a + bw * p,
cm->above_context[p] + (mi_col * 2 >> xd->plane[p].subsampling_x),
sizeof(ENTROPY_CONTEXT) * bw >> xd->plane[p].subsampling_x);
vpx_memcpy(
l + bh * p,
cm->left_context[p]
+ ((mi_row & MI_MASK)* 2 >> xd->plane[p].subsampling_y),sizeof(ENTROPY_CONTEXT) * bh >> xd->plane[p].subsampling_y);
}
vpx_memcpy(sa, cm->above_seg_context + mi_col,
sizeof(PARTITION_CONTEXT) * mw);
vpx_memcpy(sl, cm->left_seg_context + (mi_row & MI_MASK),
static void encode_b(VP9_COMP *cpi, TOKENEXTRA **tp, int mi_row, int mi_col,
int output_enabled, BLOCK_SIZE_TYPE bsize, int sub_index) {
VP9_COMMON * const cm = &cpi->common;
MACROBLOCK * const x = &cpi->mb;
MACROBLOCKD * const xd = &x->e_mbd;
if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
return;
if (sub_index != -1)
*(get_sb_index(xd, bsize)) = sub_index;
if (bsize < BLOCK_SIZE_SB8X8)
if (xd->ab_index > 0)
return;
set_offsets(cpi, mi_row, mi_col, bsize);
update_state(cpi, get_block_context(x, bsize), bsize, output_enabled);
encode_superblock(cpi, tp, output_enabled, mi_row, mi_col, bsize);
if (output_enabled) {
update_stats(cpi, mi_row, mi_col);
(*tp)->token = EOSB_TOKEN;
(*tp)++;
static void encode_sb(VP9_COMP *cpi, TOKENEXTRA **tp, int mi_row, int mi_col,
int output_enabled, BLOCK_SIZE_TYPE bsize) {
VP9_COMMON * const cm = &cpi->common;
MACROBLOCK * const x = &cpi->mb;
MACROBLOCKD * const xd = &x->e_mbd;
BLOCK_SIZE_TYPE c1 = BLOCK_SIZE_SB8X8;
const int bsl = b_width_log2(bsize), bs = (1 << bsl) / 4;
if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
return;
if (bsize >= BLOCK_SIZE_SB8X8) {
set_partition_seg_context(cm, xd, mi_row, mi_col);
pl = partition_plane_context(xd, bsize);
c1 = *(get_sb_partitioning(x, bsize));
bwl = b_width_log2(c1), bhl = b_height_log2(c1);
if (bsl == bwl && bsl == bhl) {
if (output_enabled && bsize >= BLOCK_SIZE_SB8X8)
encode_b(cpi, tp, mi_row, mi_col, output_enabled, c1, -1);
} else if (bsl == bhl && bsl > bwl) {
if (output_enabled)
cpi->partition_count[pl][PARTITION_VERT]++;
encode_b(cpi, tp, mi_row, mi_col, output_enabled, c1, 0);
encode_b(cpi, tp, mi_row, mi_col + bs, output_enabled, c1, 1);
} else if (bsl == bwl && bsl > bhl) {
if (output_enabled)
cpi->partition_count[pl][PARTITION_HORZ]++;
encode_b(cpi, tp, mi_row, mi_col, output_enabled, c1, 0);
encode_b(cpi, tp, mi_row + bs, mi_col, output_enabled, c1, 1);
} else {
BLOCK_SIZE_TYPE subsize;
int i;
assert(bwl < bsl && bhl < bsl);
subsize = get_subsize(bsize, PARTITION_SPLIT);
if (output_enabled)
cpi->partition_count[pl][PARTITION_SPLIT]++;
const int x_idx = i & 1, y_idx = i >> 1;
*(get_sb_index(xd, subsize)) = i;
encode_sb(cpi, tp, mi_row + y_idx * bs, mi_col + x_idx * bs,
if (bsize >= BLOCK_SIZE_SB8X8
&& (bsize == BLOCK_SIZE_SB8X8 || bsl == bwl || bsl == bhl)) {
set_partition_seg_context(cm, xd, mi_row, mi_col);
update_partition_context(xd, c1, bsize);
static void set_partitioning(VP9_COMP *cpi, MODE_INFO *m,
BLOCK_SIZE_TYPE bsize) {
VP9_COMMON *const cm = &cpi->common;
const int mis = cm->mode_info_stride;
int block_row, block_col;
for (block_row = 0; block_row < 8; ++block_row) {
for (block_col = 0; block_col < 8; ++block_col) {
m[block_row * mis + block_col].mbmi.sb_type = bsize;
static void copy_partitioning(VP9_COMP *cpi, MODE_INFO *m, MODE_INFO *p) {
VP9_COMMON *const cm = &cpi->common;
const int mis = cm->mode_info_stride;
int block_row, block_col;
for (block_row = 0; block_row < 8; ++block_row) {
for (block_col = 0; block_col < 8; ++block_col) {
m[block_row * mis + block_col].mbmi.sb_type =
p[block_row * mis + block_col].mbmi.sb_type;
}
}
}
static void set_block_size(VP9_COMMON * const cm, MODE_INFO *m,
BLOCK_SIZE_TYPE bsize, int mis, int mi_row,
int mi_col) {
int bwl = b_width_log2(bsize);
int bhl = b_height_log2(bsize);
int bsl = (bwl > bhl ? bwl : bhl);
int bs = (1 << bsl) / 2; //
MODE_INFO *m2 = m + mi_row * mis + mi_col;
for (row = 0; row < bs; row++) {
for (col = 0; col < bs; col++) {
if (mi_row + row >= cm->mi_rows || mi_col + col >= cm->mi_cols)
typedef struct {
int64_t sum_square_error;
int64_t sum_error;
int count;
int variance;
} var;
typedef struct {
var none;
var horz[2];
var vert[2];
} partition_variance;
#define VT(TYPE, BLOCKSIZE) \
typedef struct { \
BLOCKSIZE split[4]; } TYPE;
VT(v8x8, var)
VT(v16x16, v8x8)
VT(v32x32, v16x16)
VT(v64x64, v32x32)
typedef struct {
partition_variance *vt;
var *split[4];
} vt_node;
typedef enum {
V16X16,
V32X32,
V64X64,
} TREE_LEVEL;
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
static void tree_to_node(void *data, BLOCK_SIZE_TYPE block_size, vt_node *node) {
int i;
switch (block_size) {
case BLOCK_SIZE_SB64X64: {
v64x64 *vt = (v64x64 *) data;
node->vt = &vt->vt;
for (i = 0; i < 4; i++)
node->split[i] = &vt->split[i].vt.none;
break;
}
case BLOCK_SIZE_SB32X32: {
v32x32 *vt = (v32x32 *) data;
node->vt = &vt->vt;
for (i = 0; i < 4; i++)
node->split[i] = &vt->split[i].vt.none;
break;
}
case BLOCK_SIZE_MB16X16: {
v16x16 *vt = (v16x16 *) data;
node->vt = &vt->vt;
for (i = 0; i < 4; i++)
node->split[i] = &vt->split[i].vt.none;
break;
}
case BLOCK_SIZE_SB8X8: {
v8x8 *vt = (v8x8 *) data;
node->vt = &vt->vt;
for (i = 0; i < 4; i++)
node->split[i] = &vt->split[i];
break;
}
default:
node->vt = 0;
for (i = 0; i < 4; i++)
node->split[i] = 0;
assert(-1);
}
}
// Set variance values given sum square error, sum error, count.
static void fill_variance(var *v, int64_t s2, int64_t s, int c) {
v->sum_square_error = s2;
v->sum_error = s;
v->count = c;
if (c > 0)
v->variance = 256
* (v->sum_square_error - v->sum_error * v->sum_error / v->count)
/ v->count;
else
v->variance = 0;
}
// Combine 2 variance structures by summing the sum_error, sum_square_error,
// and counts and then calculating the new variance.
void sum_2_variances(var *r, var *a, var*b) {
fill_variance(r, a->sum_square_error + b->sum_square_error,
a->sum_error + b->sum_error, a->count + b->count);
}
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
static void fill_variance_tree(void *data, BLOCK_SIZE_TYPE block_size) {
vt_node node;
tree_to_node(data, block_size, &node);
sum_2_variances(&node.vt->horz[0], node.split[0], node.split[1]);
sum_2_variances(&node.vt->horz[1], node.split[2], node.split[3]);
sum_2_variances(&node.vt->vert[0], node.split[0], node.split[2]);
sum_2_variances(&node.vt->vert[1], node.split[1], node.split[3]);
sum_2_variances(&node.vt->none, &node.vt->vert[0], &node.vt->vert[1]);
}
#if PERFORM_RANDOM_PARTITIONING
static int set_vt_partitioning(VP9_COMP *cpi, void *data, MODE_INFO *m,
BLOCK_SIZE_TYPE block_size, int mi_row,
int mi_col, int mi_size) {
VP9_COMMON * const cm = &cpi->common;
vt_node vt;
const int mis = cm->mode_info_stride;
int64_t threshold = 4 * cpi->common.base_qindex * cpi->common.base_qindex;
tree_to_node(data, block_size, &vt);
// split none is available only if we have more than half a block size
// in width and height inside the visible image
if (mi_col + mi_size < cm->mi_cols && mi_row + mi_size < cm->mi_rows &&
(rand() & 3) < 1) {
set_block_size(cm, m, block_size, mis, mi_row, mi_col);
return 1;
}
// vertical split is available on all but the bottom border
if (mi_row + mi_size < cm->mi_rows && vt.vt->vert[0].variance < threshold
&& (rand() & 3) < 1) {
set_block_size(cm, m, get_subsize(block_size, PARTITION_VERT), mis, mi_row,
mi_col);
return 1;
}
// horizontal split is available on all but the right border
if (mi_col + mi_size < cm->mi_cols && vt.vt->horz[0].variance < threshold
&& (rand() & 3) < 1) {
set_block_size(cm, m, get_subsize(block_size, PARTITION_HORZ), mis, mi_row,
mi_col);
return 1;
}
return 0;
}
#else
static int set_vt_partitioning(VP9_COMP *cpi, void *data, MODE_INFO *m,
BLOCK_SIZE_TYPE block_size, int mi_row,
int mi_col, int mi_size) {
VP9_COMMON * const cm = &cpi->common;
vt_node vt;
const int mis = cm->mode_info_stride;
int64_t threshold = 50 * cpi->common.base_qindex;
tree_to_node(data, block_size, &vt);
// split none is available only if we have more than half a block size
// in width and height inside the visible image
if (mi_col + mi_size < cm->mi_cols && mi_row + mi_size < cm->mi_rows
&& vt.vt->none.variance < threshold) {
set_block_size(cm, m, block_size, mis, mi_row, mi_col);
return 1;
}
// vertical split is available on all but the bottom border
if (mi_row + mi_size < cm->mi_rows && vt.vt->vert[0].variance < threshold
&& vt.vt->vert[1].variance < threshold) {
set_block_size(cm, m, get_subsize(block_size, PARTITION_VERT), mis, mi_row,
mi_col);
return 1;
// horizontal split is available on all but the right border
if (mi_col + mi_size < cm->mi_cols && vt.vt->horz[0].variance < threshold
&& vt.vt->horz[1].variance < threshold) {
set_block_size(cm, m, get_subsize(block_size, PARTITION_HORZ), mis, mi_row,
mi_col);
return 1;
}
return 0;
}
#endif
static void choose_partitioning(VP9_COMP *cpi, MODE_INFO *m, int mi_row,
int mi_col) {
VP9_COMMON * const cm = &cpi->common;
MACROBLOCK *x = &cpi->mb;
MACROBLOCKD *xd = &cpi->mb.e_mbd;
const int mis = cm->mode_info_stride;
// TODO(JBB): More experimentation or testing of this threshold;
int64_t threshold = 4;
int i, j, k;
v64x64 vt;
unsigned char * s;
int sp;
int pixels_wide = 64, pixels_high = 64;
vpx_memset(&vt, 0, sizeof(vt));
set_offsets(cpi, mi_row, mi_col, BLOCK_SIZE_SB64X64);
if (xd->mb_to_right_edge < 0)
pixels_wide += (xd->mb_to_right_edge >> 3);
if (xd->mb_to_bottom_edge < 0)
pixels_high += (xd->mb_to_bottom_edge >> 3);
s = x->plane[0].src.buf;
sp = x->plane[0].src.stride;
// TODO(JBB): Clearly the higher the quantizer the fewer partitions we want
// but this needs more experimentation.
threshold = threshold * cpi->common.base_qindex * cpi->common.base_qindex;
d = vp9_64x64_zeros;
dp = 64;
YV12_BUFFER_CONFIG *ref_fb = &cm->yv12_fb[0];
YV12_BUFFER_CONFIG *second_ref_fb = NULL;
setup_pre_planes(xd, ref_fb, second_ref_fb, mi_row, mi_col,
xd->scale_factor, xd->scale_factor_uv);
xd->mode_info_context->mbmi.ref_frame[0] = LAST_FRAME;
xd->mode_info_context->mbmi.sb_type = BLOCK_SIZE_SB64X64;
vp9_find_best_ref_mvs(xd, m->mbmi.ref_mvs[m->mbmi.ref_frame[0]],
&nearest_mv, &near_mv);
xd->mode_info_context->mbmi.mv[0] = nearest_mv;
vp9_build_inter_predictors_sby(xd, mi_row, mi_col, BLOCK_SIZE_SB64X64);
d = xd->plane[0].dst.buf;
dp = xd->plane[0].dst.stride;
}
// Fill in the entire tree of 8x8 variances for splits.
for (i = 0; i < 4; i++) {
const int x32_idx = ((i & 1) << 5);
const int y32_idx = ((i >> 1) << 5);
for (j = 0; j < 4; j++) {
const int x16_idx = x32_idx + ((j & 1) << 4);
const int y16_idx = y32_idx + ((j >> 1) << 4);
v16x16 *vst = &vt.split[i].split[j];
for (k = 0; k < 4; k++) {
int x_idx = x16_idx + ((k & 1) << 3);
int y_idx = y16_idx + ((k >> 1) << 3);
unsigned int sse = 0;
int sum = 0;
if (x_idx < pixels_wide && y_idx < pixels_high)
vp9_get_sse_sum_8x8(s + y_idx * sp + x_idx, sp,
d + y_idx * dp + x_idx, dp, &sse, &sum);
fill_variance(&vst->split[k].vt.none, sse, sum, 64);
}
}
}
// Fill the rest of the variance tree by summing the split partition
// values.
for (i = 0; i < 4; i++) {
for (j = 0; j < 4; j++) {
fill_variance_tree(&vt.split[i].split[j], BLOCK_SIZE_MB16X16);
fill_variance_tree(&vt.split[i], BLOCK_SIZE_SB32X32);
fill_variance_tree(&vt, BLOCK_SIZE_SB64X64);
// Now go through the entire structure, splitting every block size until
// we get to one that's got a variance lower than our threshold, or we
// hit 8x8.
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
if (!set_vt_partitioning(cpi, &vt, m, BLOCK_SIZE_SB64X64, mi_row, mi_col,
4)) {
for (i = 0; i < 4; ++i) {
const int x32_idx = ((i & 1) << 2);
const int y32_idx = ((i >> 1) << 2);
if (!set_vt_partitioning(cpi, &vt.split[i], m, BLOCK_SIZE_SB32X32,
(mi_row + y32_idx), (mi_col + x32_idx), 2)) {
for (j = 0; j < 4; ++j) {
const int x16_idx = ((j & 1) << 1);
const int y16_idx = ((j >> 1) << 1);
if (!set_vt_partitioning(cpi, &vt.split[i].split[j], m,
BLOCK_SIZE_MB16X16,
(mi_row + y32_idx + y16_idx),
(mi_col + x32_idx + x16_idx), 1)) {
for (k = 0; k < 4; ++k) {
const int x8_idx = (k & 1);
const int y8_idx = (k >> 1);
set_block_size(cm, m, BLOCK_SIZE_SB8X8, mis,
(mi_row + y32_idx + y16_idx + y8_idx),
(mi_col + x32_idx + x16_idx + x8_idx));
}
}
}
}
}
}
}
static void rd_use_partition(VP9_COMP *cpi, MODE_INFO *m, TOKENEXTRA **tp,
int mi_row, int mi_col, BLOCK_SIZE_TYPE bsize,
VP9_COMMON * const cm = &cpi->common;
MACROBLOCK * const x = &cpi->mb;
MACROBLOCKD *xd = &cpi->mb.e_mbd;
const int mis = cm->mode_info_stride;
int bwl = b_width_log2(m->mbmi.sb_type);
int bhl = b_height_log2(m->mbmi.sb_type);
int bsl = b_width_log2(bsize);
int bs = (1 << bsl);
int bh = (1 << bhl);
int ms = bs / 2;
int mh = bh / 2;
int i, pl;
PARTITION_TYPE partition;
BLOCK_SIZE_TYPE subsize;
ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
PARTITION_CONTEXT sl[8], sa[8];
int last_part_rate = INT_MAX;
int64_t last_part_dist = INT_MAX;
int split_rate = INT_MAX;
int64_t split_dist = INT_MAX;
int none_rate = INT_MAX;
int64_t none_dist = INT_MAX;
int chosen_rate = INT_MAX;
int64_t chosen_dist = INT_MAX;
BLOCK_SIZE_TYPE sub_subsize = BLOCK_SIZE_AB4X4;
int splits_below = 0;
BLOCK_SIZE_TYPE bs_type = m->mbmi.sb_type;
if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
return;
// parse the partition type
if ((bwl == bsl) && (bhl == bsl))
partition = PARTITION_NONE;
else if ((bwl == bsl) && (bhl < bsl))
partition = PARTITION_HORZ;
else if ((bwl < bsl) && (bhl == bsl))
partition = PARTITION_VERT;
else if ((bwl < bsl) && (bhl < bsl))
partition = PARTITION_SPLIT;
else
assert(0);
subsize = get_subsize(bsize, partition);
if (bsize < BLOCK_SIZE_SB8X8) {
if (xd->ab_index != 0) {
*rate = 0;
*dist = 0;
return;
}
} else {
*(get_sb_partitioning(x, bsize)) = subsize;
}
save_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
if (cpi->sf.adjust_partitioning_from_last_frame) {
// Check if any of the sub blocks are further split.
if (partition == PARTITION_SPLIT && subsize > BLOCK_SIZE_SB8X8) {
sub_subsize = get_subsize(subsize, PARTITION_SPLIT);
splits_below = 1;
for (i = 0; i < 4; i++) {
int jj = i >> 1, ii = i & 0x01;
if (m[jj * bss * mis + ii * bss].mbmi.sb_type >= sub_subsize) {
splits_below = 0;
}
}
}
// If partition is not none try none unless each of the 4 splits are split
// even further..
if (partition != PARTITION_NONE && !splits_below &&
mi_row + (ms >> 1) < cm->mi_rows &&
mi_col + (ms >> 1) < cm->mi_cols) {
*(get_sb_partitioning(x, bsize)) = bsize;
pick_sb_modes(cpi, mi_row, mi_col, tp, &none_rate, &none_dist, bsize,
get_block_context(x, bsize));
set_partition_seg_context(cm, xd, mi_row, mi_col);
pl = partition_plane_context(xd, bsize);
none_rate += x->partition_cost[pl][PARTITION_NONE];
restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
m->mbmi.sb_type = bs_type;
*(get_sb_partitioning(x, bsize)) = subsize;
}
}
pick_sb_modes(cpi, mi_row, mi_col, tp, &last_part_rate, &last_part_dist,
bsize, get_block_context(x, bsize));
set_partition_seg_context(cm, xd, mi_row, mi_col);
pl = partition_plane_context(xd, bsize);
last_part_rate += x->partition_cost[pl][PARTITION_NONE];
break;
case PARTITION_HORZ:
*(get_sb_index(xd, subsize)) = 0;
pick_sb_modes(cpi, mi_row, mi_col, tp, &last_part_rate, &last_part_dist,
subsize, get_block_context(x, subsize));
if (bsize >= BLOCK_SIZE_SB8X8 && mi_row + (mh >> 1) < cm->mi_rows) {
update_state(cpi, get_block_context(x, subsize), subsize, 0);
encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize);
*(get_sb_index(xd, subsize)) = 1;
pick_sb_modes(cpi, mi_row + (ms >> 1), mi_col, tp, &rt, &dt, subsize,
last_part_rate += rt;
last_part_dist += dt;
}
set_partition_seg_context(cm, xd, mi_row, mi_col);
pl = partition_plane_context(xd, bsize);
last_part_rate += x->partition_cost[pl][PARTITION_HORZ];
break;
case PARTITION_VERT:
*(get_sb_index(xd, subsize)) = 0;
pick_sb_modes(cpi, mi_row, mi_col, tp, &last_part_rate, &last_part_dist,
subsize, get_block_context(x, subsize));
if (bsize >= BLOCK_SIZE_SB8X8 && mi_col + (ms >> 1) < cm->mi_cols) {
update_state(cpi, get_block_context(x, subsize), subsize, 0);
encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize);
*(get_sb_index(xd, subsize)) = 1;
pick_sb_modes(cpi, mi_row, mi_col + (ms >> 1), tp, &rt, &dt, subsize,
last_part_rate += rt;
last_part_dist += dt;
}
set_partition_seg_context(cm, xd, mi_row, mi_col);
pl = partition_plane_context(xd, bsize);
last_part_rate += x->partition_cost[pl][PARTITION_VERT];
// Split partition.
last_part_rate = 0;
last_part_dist = 0;
int x_idx = (i & 1) * (ms >> 1);
int y_idx = (i >> 1) * (ms >> 1);
if ((mi_row + y_idx >= cm->mi_rows) || (mi_col + x_idx >= cm->mi_cols))
continue;
*(get_sb_index(xd, subsize)) = i;
rd_use_partition(cpi, m + jj * bss * mis + ii * bss, tp, mi_row + y_idx,
mi_col + x_idx, subsize, &rt, &dt);
last_part_rate += rt;
last_part_dist += dt;
}
set_partition_seg_context(cm, xd, mi_row, mi_col);
pl = partition_plane_context(xd, bsize);
last_part_rate += x->partition_cost[pl][PARTITION_SPLIT];
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
if (cpi->sf.adjust_partitioning_from_last_frame
&& partition != PARTITION_SPLIT && bsize > BLOCK_SIZE_SB8X8
&& (mi_row + ms < cm->mi_rows || mi_row + (ms >> 1) == cm->mi_rows)
&& (mi_col + ms < cm->mi_cols || mi_col + (ms >> 1) == cm->mi_cols)) {
BLOCK_SIZE_TYPE split_subsize = get_subsize(bsize, PARTITION_SPLIT);
split_rate = 0;
split_dist = 0;
restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
// Split partition.
for (i = 0; i < 4; i++) {
int x_idx = (i & 1) * (bs >> 2);
int y_idx = (i >> 1) * (bs >> 2);
int rt = 0;
int64_t dt = 0;
ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
PARTITION_CONTEXT sl[8], sa[8];
if ((mi_row + y_idx >= cm->mi_rows)
|| (mi_col + x_idx >= cm->mi_cols))
continue;
*(get_sb_index(xd, split_subsize)) = i;
*(get_sb_partitioning(x, bsize)) = split_subsize;
*(get_sb_partitioning(x, split_subsize)) = split_subsize;
save_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
pick_sb_modes(cpi, mi_row + y_idx, mi_col + x_idx, tp, &rt, &dt,
split_subsize, get_block_context(x, split_subsize));
restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
if (rt < INT_MAX && dt < INT_MAX)
encode_sb(cpi, tp, mi_row + y_idx, mi_col + x_idx, 0,
split_subsize);
split_rate += rt;
split_dist += dt;
set_partition_seg_context(cm, xd, mi_row + y_idx, mi_col + x_idx);
pl = partition_plane_context(xd, bsize);
split_rate += x->partition_cost[pl][PARTITION_NONE];
}
set_partition_seg_context(cm, xd, mi_row, mi_col);
pl = partition_plane_context(xd, bsize);
split_rate += x->partition_cost[pl][PARTITION_SPLIT];
chosen_rate = split_rate;
chosen_dist = split_dist;
}
// If last_part is better set the partitioning to that...
if (RDCOST(x->rdmult, x->rddiv, last_part_rate, last_part_dist)
< RDCOST(x->rdmult, x->rddiv, chosen_rate, chosen_dist)) {
m->mbmi.sb_type = bsize;
if (bsize >= BLOCK_SIZE_SB8X8)
*(get_sb_partitioning(x, bsize)) = subsize;
chosen_rate = last_part_rate;
chosen_dist = last_part_dist;
}
// If none was better set the partitioning to that...
if (RDCOST(x->rdmult, x->rddiv, chosen_rate, chosen_dist)
> RDCOST(x->rdmult, x->rddiv, none_rate, none_dist)) {
if (bsize >= BLOCK_SIZE_SB8X8)
*(get_sb_partitioning(x, bsize)) = bsize;
chosen_rate = none_rate;
chosen_dist = none_dist;
}
restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
// We must have chosen a partitioning and encoding or we'll fail later on.
// No other opportunities for success.
assert(chosen_rate < INT_MAX && chosen_dist < INT_MAX);
encode_sb(cpi, tp, mi_row, mi_col, bsize == BLOCK_SIZE_SB64X64, bsize);
*rate = chosen_rate;
*dist = chosen_dist;
// TODO(jingning,jimbankoski,rbultje): properly skip partition types that are
// unlikely to be selected depending on previously rate-distortion optimization
// results, for encoding speed-up.
static void rd_pick_partition(VP9_COMP *cpi, TOKENEXTRA **tp, int mi_row,
int mi_col, BLOCK_SIZE_TYPE bsize, int *rate,
VP9_COMMON * const cm = &cpi->common;
MACROBLOCK * const x = &cpi->mb;
MACROBLOCKD * const xd = &x->e_mbd;
int bsl = b_width_log2(bsize), bs = 1 << bsl;
ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
PARTITION_CONTEXT sl[8], sa[8];
TOKENEXTRA *tp_orig = *tp;
int srate = INT_MAX;
int64_t sdist = INT_MAX;
if (bsize < BLOCK_SIZE_SB8X8)
if (xd->ab_index != 0) {
*rate = 0;
*dist = 0;
return;
}
assert(mi_height_log2(bsize) == mi_width_log2(bsize));
save_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
if (!cpi->sf.use_partitions_greater_than
|| (cpi->sf.use_partitions_greater_than
&& bsize > cpi->sf.greater_than_block_size)) {
if (bsize >= BLOCK_SIZE_SB8X8) {
subsize = get_subsize(bsize, PARTITION_SPLIT);
*(get_sb_partitioning(x, bsize)) = subsize;
for (i = 0; i < 4; ++i) {
int x_idx = (i & 1) * (ms >> 1);
int y_idx = (i >> 1) * (ms >> 1);
if ((mi_row + y_idx >= cm->mi_rows) || (mi_col + x_idx >= cm->mi_cols))
continue;
*(get_sb_index(xd, subsize)) = i;
rd_pick_partition(cpi, tp, mi_row + y_idx, mi_col + x_idx, subsize, &r,
&d);
r4 += r;
d4 += d;
}
set_partition_seg_context(cm, xd, mi_row, mi_col);
pl = partition_plane_context(xd, bsize);
if (r4 < INT_MAX)
r4 += x->partition_cost[pl][PARTITION_SPLIT];
assert(r4 >= 0);
assert(d4 >= 0);
srate = r4;
sdist = d4;
restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
if (!cpi->sf.use_partitions_less_than
|| (cpi->sf.use_partitions_less_than
&& bsize <= cpi->sf.less_than_block_size)) {
// PARTITION_NONE
if ((mi_row + (ms >> 1) < cm->mi_rows) &&
(mi_col + (ms >> 1) < cm->mi_cols)) {
pick_sb_modes(cpi, mi_row, mi_col, tp, &r, &d, bsize,
get_block_context(x, bsize));
if (bsize >= BLOCK_SIZE_SB8X8) {
set_partition_seg_context(cm, xd, mi_row, mi_col);
pl = partition_plane_context(xd, bsize);
r += x->partition_cost[pl][PARTITION_NONE];
}
if (RDCOST(x->rdmult, x->rddiv, r, d)
< RDCOST(x->rdmult, x->rddiv, srate, sdist)) {
srate = r;
sdist = d;
if (bsize >= BLOCK_SIZE_SB8X8)
*(get_sb_partitioning(x, bsize)) = bsize;
}
}
if (!cpi->sf.use_square_partition_only &&
(!cpi->sf.less_rectangular_check ||!larger_is_better)) {
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
// PARTITION_HORZ
if (bsize >= BLOCK_SIZE_SB8X8 && mi_col + (ms >> 1) < cm->mi_cols) {
int r2, r = 0;
int64_t d2, d = 0;
subsize = get_subsize(bsize, PARTITION_HORZ);
*(get_sb_index(xd, subsize)) = 0;
pick_sb_modes(cpi, mi_row, mi_col, tp, &r2, &d2, subsize,
get_block_context(x, subsize));
if (mi_row + (ms >> 1) < cm->mi_rows) {
update_state(cpi, get_block_context(x, subsize), subsize, 0);
encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize);
*(get_sb_index(xd, subsize)) = 1;
pick_sb_modes(cpi, mi_row + (ms >> 1), mi_col, tp, &r, &d, subsize,
get_block_context(x, subsize));
r2 += r;
d2 += d;
}
set_partition_seg_context(cm, xd, mi_row, mi_col);
pl = partition_plane_context(xd, bsize);
if (r2 < INT_MAX)
r2 += x->partition_cost[pl][PARTITION_HORZ];
if (RDCOST(x->rdmult, x->rddiv, r2, d2)
< RDCOST(x->rdmult, x->rddiv, srate, sdist)) {
srate = r2;
sdist = d2;
*(get_sb_partitioning(x, bsize)) = subsize;
}
restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
}
// PARTITION_VERT
if (bsize >= BLOCK_SIZE_SB8X8 && mi_row + (ms >> 1) < cm->mi_rows) {
int r2;
int64_t d2;
subsize = get_subsize(bsize, PARTITION_VERT);
*(get_sb_index(xd, subsize)) = 0;
pick_sb_modes(cpi, mi_row, mi_col, tp, &r2, &d2, subsize,
get_block_context(x, subsize));
if (mi_col + (ms >> 1) < cm->mi_cols) {
int r = 0;
int64_t d = 0;
update_state(cpi, get_block_context(x, subsize), subsize, 0);
encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize);
*(get_sb_index(xd, subsize)) = 1;
pick_sb_modes(cpi, mi_row, mi_col + (ms >> 1), tp, &r, &d, subsize,
get_block_context(x, subsize));
r2 += r;
d2 += d;
}
set_partition_seg_context(cm, xd, mi_row, mi_col);
pl = partition_plane_context(xd, bsize);
if (r2 < INT_MAX)
r2 += x->partition_cost[pl][PARTITION_VERT];
if (RDCOST(x->rdmult, x->rddiv, r2, d2)
< RDCOST(x->rdmult, x->rddiv, srate, sdist)) {
srate = r2;
sdist = d2;
*(get_sb_partitioning(x, bsize)) = subsize;
}
restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
}
}
}
*rate = srate;
*dist = sdist;
restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
if (srate < INT_MAX && sdist < INT_MAX)
encode_sb(cpi, tp, mi_row, mi_col, bsize == BLOCK_SIZE_SB64X64, bsize);
if (bsize == BLOCK_SIZE_SB64X64) {
assert(srate < INT_MAX);
assert(sdist < INT_MAX);
} else {
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
// Examines 64x64 block and chooses a best reference frame
static void rd_pick_reference_frame(VP9_COMP *cpi, TOKENEXTRA **tp, int mi_row,
int mi_col, int *rate, int64_t *dist) {
VP9_COMMON * const cm = &cpi->common;
MACROBLOCK * const x = &cpi->mb;
MACROBLOCKD * const xd = &x->e_mbd;
int bsl = b_width_log2(BLOCK_SIZE_SB64X64), bs = 1 << bsl;
int ms = bs / 2;
ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
PARTITION_CONTEXT sl[8], sa[8];
int pl;
int r;
int64_t d;
save_context(cpi, mi_row, mi_col, a, l, sa, sl, BLOCK_SIZE_SB64X64);
// Default is non mask (all reference frames allowed.
cpi->ref_frame_mask = 0;
// Do RD search for 64x64.
if ((mi_row + (ms >> 1) < cm->mi_rows) &&
(mi_col + (ms >> 1) < cm->mi_cols)) {
cpi->set_ref_frame_mask = 1;
pick_sb_modes(cpi, mi_row, mi_col, tp, &r, &d, BLOCK_SIZE_SB64X64,
get_block_context(x, BLOCK_SIZE_SB64X64));
set_partition_seg_context(cm, xd, mi_row, mi_col);
pl = partition_plane_context(xd, BLOCK_SIZE_SB64X64);
r += x->partition_cost[pl][PARTITION_NONE];
*(get_sb_partitioning(x, BLOCK_SIZE_SB64X64)) = BLOCK_SIZE_SB64X64;
cpi->set_ref_frame_mask = 0;
}
*rate = r;
*dist = d;
// RDCOST(x->rdmult, x->rddiv, r, d)
restore_context(cpi, mi_row, mi_col, a, l, sa, sl, BLOCK_SIZE_SB64X64);
/*if (srate < INT_MAX && sdist < INT_MAX)
encode_sb(cpi, tp, mi_row, mi_col, 1, BLOCK_SIZE_SB64X64);
if (bsize == BLOCK_SIZE_SB64X64) {
assert(tp_orig < *tp);
assert(srate < INT_MAX);
assert(sdist < INT_MAX);
} else {
assert(tp_orig == *tp);
}
*/
}
static void encode_sb_row(VP9_COMP *cpi, int mi_row, TOKENEXTRA **tp,
int *totalrate) {
VP9_COMMON * const cm = &cpi->common;
// Initialize the left context for the new SB row
vpx_memset(&cm->left_context, 0, sizeof(cm->left_context));
vpx_memset(cm->left_seg_context, 0, sizeof(cm->left_seg_context));
// Code each SB in the row
for (mi_col = cm->cur_tile_mi_col_start; mi_col < cm->cur_tile_mi_col_end;
int dummy_rate;
int64_t dummy_dist;
// Initialize a mask of modes that we will not consider;
// cpi->unused_mode_skip_mask = 0x0000000AAE17F800 (test no golden)
if (cpi->common.frame_type == KEY_FRAME)
cpi->unused_mode_skip_mask = 0;
else
cpi->unused_mode_skip_mask = 0xFFFFFFFFFFFFFE00;
if (cpi->sf.reference_masking) {
rd_pick_reference_frame(cpi, tp, mi_row, mi_col,
&dummy_rate, &dummy_dist);
}
if (cpi->sf.partition_by_variance || cpi->sf.use_lastframe_partitioning ||
cpi->sf.use_one_partition_size_always ) {
const int idx_str = cm->mode_info_stride * mi_row + mi_col;
MODE_INFO *m = cm->mi + idx_str;
MODE_INFO *p = cm->prev_mi + idx_str;
if (cpi->sf.use_one_partition_size_always) {
set_offsets(cpi, mi_row, mi_col, BLOCK_SIZE_SB64X64);
set_partitioning(cpi, m, cpi->sf.always_this_block_size);
rd_use_partition(cpi, m, tp, mi_row, mi_col, BLOCK_SIZE_SB64X64,
&dummy_rate, &dummy_dist);
} else if (cpi->sf.partition_by_variance) {
choose_partitioning(cpi, cm->mi, mi_row, mi_col);
rd_use_partition(cpi, m, tp, mi_row, mi_col, BLOCK_SIZE_SB64X64,
&dummy_rate, &dummy_dist);
if ((cpi->common.current_video_frame
% cpi->sf.last_partitioning_redo_frequency) == 0
|| cm->prev_mi == 0
|| cpi->common.show_frame == 0
|| cpi->common.frame_type == KEY_FRAME
|| cpi->is_src_frame_alt_ref) {
rd_pick_partition(cpi, tp, mi_row, mi_col, BLOCK_SIZE_SB64X64,
&dummy_rate, &dummy_dist);
} else {
copy_partitioning(cpi, m, p);
rd_use_partition(cpi, m, tp, mi_row, mi_col, BLOCK_SIZE_SB64X64,
&dummy_rate, &dummy_dist);
}
} else {
rd_pick_partition(cpi, tp, mi_row, mi_col, BLOCK_SIZE_SB64X64,
&dummy_rate, &dummy_dist);
static void init_encode_frame_mb_context(VP9_COMP *cpi) {
MACROBLOCK * const x = &cpi->mb;
VP9_COMMON * const cm = &cpi->common;
MACROBLOCKD * const xd = &x->e_mbd;
xd->mode_info_stride = cm->mode_info_stride;
xd->frame_type = cm->frame_type;
xd->frames_since_golden = cm->frames_since_golden;
xd->frames_till_alt_ref_frame = cm->frames_till_alt_ref_frame;
// reset intra mode contexts
if (cm->frame_type == KEY_FRAME)
vp9_init_mbmode_probs(cm);
// TODO(jkoleszar): are these initializations required?
setup_pre_planes(xd, &cm->yv12_fb[cm->ref_frame_map[cpi->lst_fb_idx]], NULL,
setup_dst_planes(xd, &cm->yv12_fb[cm->new_fb_idx], 0, 0);
setup_block_dptrs(&x->e_mbd, cm->subsampling_x, cm->subsampling_y);
xd->mode_info_context->mbmi.mode = DC_PRED;
xd->mode_info_context->mbmi.uv_mode = DC_PRED;
vp9_zero(cpi->y_mode_count)
vp9_zero(cm->fc.inter_mode_counts)
vp9_zero(cpi->intra_inter_count);
vp9_zero(cpi->comp_inter_count);
vp9_zero(cpi->single_ref_count);
vp9_zero(cpi->comp_ref_count);
vp9_zero(cm->fc.tx_count_32x32p);
vp9_zero(cm->fc.tx_count_16x16p);
vp9_zero(cm->fc.tx_count_8x8p);
// Note: this memset assumes above_context[0], [1] and [2]
// are allocated as part of the same buffer.
vpx_memset(
cm->above_context[0], 0,
sizeof(ENTROPY_CONTEXT) * 2 * MAX_MB_PLANE * mi_cols_aligned_to_sb(cm));
vpx_memset(cm->above_seg_context, 0,
sizeof(PARTITION_CONTEXT) * mi_cols_aligned_to_sb(cm));
static void switch_lossless_mode(VP9_COMP *cpi, int lossless) {
if (lossless) {
cpi->mb.fwd_txm8x4 = vp9_short_walsh8x4;
cpi->mb.fwd_txm4x4 = vp9_short_walsh4x4;
cpi->mb.e_mbd.inv_txm4x4_1_add = vp9_short_iwalsh4x4_1_add;
cpi->mb.e_mbd.inv_txm4x4_add = vp9_short_iwalsh4x4_add;
cpi->mb.optimize = 0;
cpi->common.filter_level = 0;
cpi->zbin_mode_boost_enabled = 0;
cpi->common.txfm_mode = ONLY_4X4;
cpi->mb.fwd_txm8x4 = vp9_short_fdct8x4;
cpi->mb.fwd_txm4x4 = vp9_short_fdct4x4;
cpi->mb.e_mbd.inv_txm4x4_1_add = vp9_short_idct4x4_1_add;
cpi->mb.e_mbd.inv_txm4x4_add = vp9_short_idct4x4_add;
static void switch_txfm_mode(VP9_COMP *cpi) {
if (cpi->sf.tx_size_search_method == USE_LARGESTALL &&
cpi->common.txfm_mode >= ALLOW_32X32)
cpi->common.txfm_mode = ALLOW_32X32;
}
static void encode_frame_internal(VP9_COMP *cpi) {
MACROBLOCK * const x = &cpi->mb;
VP9_COMMON * const cm = &cpi->common;
MACROBLOCKD * const xd = &x->e_mbd;
// fprintf(stderr, "encode_frame_internal frame %d (%d) type %d\n",
// cpi->common.current_video_frame, cpi->common.show_frame,
// cm->frame_type);
{
FILE *statsfile;
statsfile = fopen("segmap2.stt", "a");
fprintf(statsfile, "\n");
fclose(statsfile);
}
totalrate = 0;
// Reset frame count of inter 0,0 motion vector usage.
cpi->inter_zz_count = 0;
vp9_zero(cm->fc.switchable_interp_count);
vp9_zero(cpi->best_switchable_interp_count);
xd->mode_info_context = cm->mi;
xd->prev_mode_info_context = cm->prev_mi;
vp9_zero(cpi->coef_counts);
vp9_zero(cm->fc.eob_branch_counts);
cpi->mb.e_mbd.lossless = cm->base_qindex == 0 && cm->y_dc_delta_q == 0
&& cm->uv_dc_delta_q == 0 && cm->uv_ac_delta_q == 0;
vp9_frame_init_quantizer(cpi);
vp9_initialize_rd_consts(cpi, cm->base_qindex + cm->y_dc_delta_q);
vp9_initialize_me_consts(cpi, cm->base_qindex);
if (cpi->oxcf.tuning == VP8_TUNE_SSIM) {
// Initialize encode frame context.
// Build a frame level activity map
build_activity_map(cpi);
}
// re-initencode frame context.
init_encode_frame_mb_context(cpi);
vpx_memset(cpi->rd_comp_pred_diff, 0, sizeof(cpi->rd_comp_pred_diff));
vpx_memset(cpi->rd_tx_select_diff, 0, sizeof(cpi->rd_tx_select_diff));
vpx_memset(cpi->rd_tx_select_threshes, 0, sizeof(cpi->rd_tx_select_threshes));
set_prev_mi(cm);
// Take tiles into account and give start/end MB
int tile_col, tile_row;
for (tile_row = 0; tile_row < cm->tile_rows; tile_row++) {
vp9_get_tile_row_offsets(cm, tile_row);
for (tile_col = 0; tile_col < cm->tile_columns; tile_col++) {
TOKENEXTRA *tp_old = tp;
// For each row of SBs in the frame
vp9_get_tile_col_offsets(cm, tile_col);
for (mi_row = cm->cur_tile_mi_row_start;
mi_row < cm->cur_tile_mi_row_end; mi_row += 8)
encode_sb_row(cpi, mi_row, &tp, &totalrate);
cpi->tok_count[tile_row][tile_col] = (unsigned int)(tp - tp_old);
assert(tp - cpi->tok <=
get_token_alloc(cm->mb_rows, cm->mb_cols));
vpx_usec_timer_mark(&emr_timer);
cpi->time_encode_mb_row += vpx_usec_timer_elapsed(&emr_timer);
}
// 256 rate units to the bit,
// projected_frame_size in units of BYTES
cpi->projected_frame_size = totalrate >> 8;
// Keep record of the total distortion this time around for future use
cpi->last_frame_distortion = cpi->frame_distortion;
static int check_dual_ref_flags(VP9_COMP *cpi) {
MACROBLOCKD *xd = &cpi->mb.e_mbd;
int ref_flags = cpi->ref_frame_flags;
if (vp9_segfeature_active(xd, 1, SEG_LVL_REF_FRAME)) {
return (!!(ref_flags & VP9_GOLD_FLAG) + !!(ref_flags & VP9_LAST_FLAG)
+ !!(ref_flags & VP9_ALT_FLAG)) >= 2;
Ronald S. Bultje
committed
}
static int get_skip_flag(MODE_INFO *mi, int mis, int ymbs, int xmbs) {
int x, y;
for (y = 0; y < ymbs; y++) {
for (x = 0; x < xmbs; x++) {
if (!mi[y * mis + x].mbmi.mb_skip_coeff)
return 0;
}
}
return 1;
}
static void set_txfm_flag(MODE_INFO *mi, int mis, int ymbs, int xmbs,
TX_SIZE txfm_size) {
int x, y;
for (y = 0; y < ymbs; y++) {
mi[y * mis + x].mbmi.txfm_size = txfm_size;
}
}
static void reset_skip_txfm_size_b(VP9_COMP *cpi, MODE_INFO *mi, int mis,
TX_SIZE txfm_max, int bw, int bh, int mi_row,
int mi_col, BLOCK_SIZE_TYPE bsize) {
VP9_COMMON * const cm = &cpi->common;
MB_MODE_INFO * const mbmi = &mi->mbmi;
if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
return;
MACROBLOCK * const x = &cpi->mb;
MACROBLOCKD * const xd = &x->e_mbd;
const int ymbs = MIN(bh, cm->mi_rows - mi_row);
const int xmbs = MIN(bw, cm->mi_cols - mi_col);
assert(
vp9_segfeature_active(xd, segment_id, SEG_LVL_SKIP) || get_skip_flag(mi, mis, ymbs, xmbs));
set_txfm_flag(mi, mis, ymbs, xmbs, txfm_max);
}
}
static void reset_skip_txfm_size_sb(VP9_COMP *cpi, MODE_INFO *mi,
BLOCK_SIZE_TYPE bsize) {
const int mis = cm->mode_info_stride;
int bwl, bhl;
const int bsl = mi_width_log2(bsize), bs = 1 << (bsl - 1);
if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
return;
bwl = mi_width_log2(mi->mbmi.sb_type);
bhl = mi_height_log2(mi->mbmi.sb_type);
if (bwl == bsl && bhl == bsl) {
reset_skip_txfm_size_b(cpi, mi, mis, txfm_max, 1 << bsl, 1 << bsl, mi_row,
mi_col, bsize);
} else if (bwl == bsl && bhl < bsl) {
reset_skip_txfm_size_b(cpi, mi, mis, txfm_max, 1 << bsl, bs, mi_row, mi_col,
bsize);
reset_skip_txfm_size_b(cpi, mi + bs * mis, mis, txfm_max, 1 << bsl, bs,
mi_row + bs, mi_col, bsize);
} else if (bwl < bsl && bhl == bsl) {
reset_skip_txfm_size_b(cpi, mi, mis, txfm_max, bs, 1 << bsl, mi_row, mi_col,
bsize);
reset_skip_txfm_size_b(cpi, mi + bs, mis, txfm_max, bs, 1 << bsl, mi_row,
mi_col + bs, bsize);
} else {
BLOCK_SIZE_TYPE subsize;
int n;
assert(bwl < bsl && bhl < bsl);
if (bsize == BLOCK_SIZE_SB64X64) {
subsize = BLOCK_SIZE_SB32X32;
} else if (bsize == BLOCK_SIZE_SB32X32) {
subsize = BLOCK_SIZE_MB16X16;
} else {
assert(bsize == BLOCK_SIZE_MB16X16);
subsize = BLOCK_SIZE_SB8X8;
}
for (n = 0; n < 4; n++) {
const int y_idx = n >> 1, x_idx = n & 0x01;
reset_skip_txfm_size_sb(cpi, mi + y_idx * bs * mis + x_idx * bs, txfm_max,
mi_row + y_idx * bs, mi_col + x_idx * bs,
subsize);
}
}
}
static void reset_skip_txfm_size(VP9_COMP *cpi, TX_SIZE txfm_max) {
const int mis = cm->mode_info_stride;
MODE_INFO *mi, *mi_ptr = cm->mi;
for (mi_row = 0; mi_row < cm->mi_rows; mi_row += 8, mi_ptr += 8 * mis) {
mi = mi_ptr;
for (mi_col = 0; mi_col < cm->mi_cols; mi_col += 8, mi += 8) {
reset_skip_txfm_size_sb(cpi, mi, txfm_max, mi_row, mi_col,
BLOCK_SIZE_SB64X64);
}
}
}
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
static int get_frame_type(VP9_COMP *cpi) {
int frame_type;
if (cpi->common.frame_type == KEY_FRAME)
frame_type = 0;
else if (cpi->is_src_frame_alt_ref && cpi->refresh_golden_frame)
frame_type = 3;
else if (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)
frame_type = 1;
else
frame_type = 2;
return frame_type;
}
static void select_txfm_mode(VP9_COMP *cpi) {
if (cpi->oxcf.lossless) {
cpi->common.txfm_mode = ONLY_4X4;
} else if (cpi->common.current_video_frame == 0) {
cpi->common.txfm_mode = TX_MODE_SELECT;
} else {
if (cpi->sf.tx_size_search_method == USE_FULL_RD) {
int frame_type = get_frame_type(cpi);
cpi->common.txfm_mode =
cpi->rd_tx_select_threshes[frame_type][ALLOW_32X32]
> cpi->rd_tx_select_threshes[frame_type][TX_MODE_SELECT] ?
ALLOW_32X32 : TX_MODE_SELECT;
} else if (cpi->sf.tx_size_search_method == USE_LARGESTALL) {
cpi->common.txfm_mode = ALLOW_32X32;
} else {
unsigned int total = 0;
int i;
for (i = 0; i < TX_SIZE_MAX_SB; ++i)
total += cpi->txfm_stepdown_count[i];
if (total) {
double fraction = (double)cpi->txfm_stepdown_count[0] / total;
cpi->common.txfm_mode = fraction > 0.90 ? ALLOW_32X32 : TX_MODE_SELECT;
// printf("fraction = %f\n", fraction);
} // else keep unchanged
}
}
}
void vp9_encode_frame(VP9_COMP *cpi) {
// In the longer term the encoder should be generalized to match the
// decoder such that we allow compound where one of the 3 buffers has a
// differnt sign bias and that buffer is then the fixed ref. However, this
// requires further work in the rd loop. For now the only supported encoder
// side behaviour is where the ALT ref buffer has oppositie sign bias to
// the other two.
== cm->ref_frame_sign_bias[GOLDEN_FRAME])
|| (cm->ref_frame_sign_bias[ALTREF_FRAME]
== cm->ref_frame_sign_bias[LAST_FRAME])) {
cm->allow_comp_inter_inter = 0;
} else {
cm->allow_comp_inter_inter = 1;
cm->comp_fixed_ref = ALTREF_FRAME;
cm->comp_var_ref[0] = LAST_FRAME;
cm->comp_var_ref[1] = GOLDEN_FRAME;
}
/*
* This code does a single RD pass over the whole frame assuming
* either compound, single or hybrid prediction as per whatever has
* worked best for that type of frame in the past.
* It also predicts whether another coding mode would have worked
* better that this coding mode. If that is the case, it remembers
* that for subsequent frames.
* It does the same analysis for transform size selection also.
/* prediction (compound, single or hybrid) mode selection */
if (frame_type == 3 || !cm->allow_comp_inter_inter)
else if (cpi->rd_prediction_type_threshes[frame_type][1]
> cpi->rd_prediction_type_threshes[frame_type][0]
&& cpi->rd_prediction_type_threshes[frame_type][1]
> cpi->rd_prediction_type_threshes[frame_type][2]
&& check_dual_ref_flags(cpi) && cpi->static_mb_pct == 100)
else if (cpi->rd_prediction_type_threshes[frame_type][0]
> cpi->rd_prediction_type_threshes[frame_type][2])
pred_type = SINGLE_PREDICTION_ONLY;
else
pred_type = HYBRID_PREDICTION;
Loading full blame...