Newer
Older
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
#include "vp9/encoder/vp9_encodeframe.h"
#include "vp9/encoder/vp9_encodemb.h"
#include "vp9/encoder/vp9_encodemv.h"
#include "vp9/encoder/vp9_onyx_int.h"
#include "vp9/common/vp9_entropymode.h"
#include "vp9/common/vp9_quant_common.h"
#include "vp9/encoder/vp9_segmentation.h"
#include "vp9/common/vp9_setupintrarecon.h"
#include "vp9/common/vp9_reconintra4x4.h"
#include "vp9/encoder/vp9_encodeintra.h"
#include "vp9/common/vp9_reconinter.h"
#include "vp9/common/vp9_invtrans.h"
#include "vp9/common/vp9_findnearmv.h"
#include "vp9/common/vp9_reconintra.h"
#include "vp9/common/vp9_seg_common.h"
#include "vp9/encoder/vp9_tokenize.h"
#include <limits.h>
#include "vpx_ports/vpx_timer.h"
#include "vp9/common/vp9_pred_common.h"
#include "vp9/common/vp9_mvref_common.h"
extern void select_interp_filter_type(VP9_COMP *cpi);
static void encode_macroblock(VP9_COMP *cpi, TOKENEXTRA **t,
int recon_yoffset, int recon_uvoffset,
int output_enabled, int mb_row, int mb_col);
static void encode_superblock32(VP9_COMP *cpi, TOKENEXTRA **t,
int recon_yoffset, int recon_uvoffset,
int output_enabled, int mb_row, int mb_col);
static void encode_superblock64(VP9_COMP *cpi, TOKENEXTRA **t,
int recon_yoffset, int recon_uvoffset,
int output_enabled, int mb_row, int mb_col);
static void adjust_act_zbin(VP9_COMP *cpi, MACROBLOCK *x);
unsigned int inter_y_modes[MB_MODE_COUNT];
unsigned int inter_uv_modes[VP9_UV_MODES];
unsigned int inter_b_modes[B_MODE_COUNT];
unsigned int y_modes[VP9_YMODES];
unsigned int i8x8_modes[VP9_I8X8_MODES];
unsigned int uv_modes[VP9_UV_MODES];
unsigned int uv_modes_y[VP9_YMODES][VP9_UV_MODES];
unsigned int b_modes[B_MODE_COUNT];
/* activity_avg must be positive, or flat regions could get a zero weight
* (infinite lambda), which confounds analysis.
* This also avoids the need for divide by zero checks in
* vp9_activity_masking().
/* This is used as a reference when computing the source variance for the
* purposes of activity masking.
* Eventually this should be replaced by custom no-reference routines,
* which will be faster.
*/
static const uint8_t VP9_VAR_OFFS[16] = {
128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128
// Original activity measure from Tim T's code.
static unsigned int tt_activity_measure(VP9_COMP *cpi, MACROBLOCK *x) {
unsigned int act;
unsigned int sse;
/* TODO: This could also be done over smaller areas (8x8), but that would
* require extensive changes elsewhere, as lambda is assumed to be fixed
* over an entire MB in most of the code.
* Another option is to compute four 8x8 variances, and pick a single
* lambda using a non-linear combination (e.g., the smallest, or second
* smallest, etc.).
*/
act = vp9_variance16x16(x->src.y_buffer, x->src.y_stride, VP9_VAR_OFFS, 0,
act = act << 4;
/* If the region is flat, lower the activity some more. */
if (act < 8 << 12)
act = act < 5 << 12 ? act : 5 << 12;
return act;
// Stub for alternative experimental activity measures.
static unsigned int alt_activity_measure(VP9_COMP *cpi,
return vp9_encode_intra(cpi, x, use_dc_pred);
}
// Measure the activity of the current macroblock
// What we measure here is TBD so abstracted to this function
static unsigned int mb_activity_measure(VP9_COMP *cpi, MACROBLOCK *x,
if (ALT_ACT_MEASURE) {
int use_dc_pred = (mb_col || mb_row) && (!mb_col || !mb_row);
// Or use and alternative.
mb_activity = alt_activity_measure(cpi, x, use_dc_pred);
} else {
// Original activity measure from Tim T's code.
mb_activity = tt_activity_measure(cpi, x);
}
if (mb_activity < VP9_ACTIVITY_AVG_MIN)
mb_activity = VP9_ACTIVITY_AVG_MIN;
}
// Calculate an "average" mb activity value for the frame
static void calc_av_activity(VP9_COMP *cpi, int64_t activity_sum) {
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
// Find median: Simple n^2 algorithm for experimentation
{
unsigned int median;
unsigned int i, j;
unsigned int *sortlist;
unsigned int tmp;
// Create a list to sort to
CHECK_MEM_ERROR(sortlist,
vpx_calloc(sizeof(unsigned int),
cpi->common.MBs));
// Copy map to sort list
vpx_memcpy(sortlist, cpi->mb_activity_map,
sizeof(unsigned int) * cpi->common.MBs);
// Ripple each value down to its correct position
for (i = 1; i < cpi->common.MBs; i ++) {
for (j = i; j > 0; j --) {
if (sortlist[j] < sortlist[j - 1]) {
// Swap values
tmp = sortlist[j - 1];
sortlist[j - 1] = sortlist[j];
sortlist[j] = tmp;
} else
break;
}
}
// Even number MBs so estimate median as mean of two either side.
median = (1 + sortlist[cpi->common.MBs >> 1] +
sortlist[(cpi->common.MBs >> 1) + 1]) >> 1;
// Simple mean for now
cpi->activity_avg = (unsigned int)(activity_sum / cpi->common.MBs);
if (cpi->activity_avg < VP9_ACTIVITY_AVG_MIN)
cpi->activity_avg = VP9_ACTIVITY_AVG_MIN;
// Experimental code: return fixed value normalized for several clips
if (ALT_ACT_MEASURE)
cpi->activity_avg = 100000;
#if USE_ACT_INDEX
// Calculate and activity index for each mb
static void calc_activity_index(VP9_COMP *cpi, MACROBLOCK *x) {
VP9_COMMON *const cm = &cpi->common;
FILE *f = fopen("norm_act.stt", "a");
fprintf(f, "\n%12d\n", cpi->activity_avg);
// Reset pointers to start of activity map
x->mb_activity_ptr = cpi->mb_activity_map;
// Calculate normalized mb activity number.
for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) {
// for each macroblock col in image
for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) {
// Read activity from the map
act = *(x->mb_activity_ptr);
// Calculate a normalized activity number
a = act + 4 * cpi->activity_avg;
b = 4 * act + cpi->activity_avg;
if (b >= a)
*(x->activity_ptr) = (int)((b + (a >> 1)) / a) - 1;
else
*(x->activity_ptr) = 1 - (int)((a + (b >> 1)) / b);
// Increment activity map pointers
x->mb_activity_ptr++;
}
// Loop through all MBs. Note activity of each, average activity and
// calculate a normalized activity for each
static void build_activity_map(VP9_COMP *cpi) {
VP9_COMMON *const cm = &cpi->common;
YV12_BUFFER_CONFIG *new_yv12 = &cm->yv12_fb[cm->new_fb_idx];
int recon_yoffset;
int recon_y_stride = new_yv12->y_stride;
int mb_row, mb_col;
unsigned int mb_activity;
int64_t activity_sum = 0;
// for each macroblock row in image
for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) {
// reset above block coeffs
xd->up_available = (mb_row != 0);
recon_yoffset = (mb_row * recon_y_stride * 16);
// for each macroblock col in image
for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) {
xd->dst.y_buffer = new_yv12->y_buffer + recon_yoffset;
xd->left_available = (mb_col != 0);
recon_yoffset += 16;
#if !CONFIG_SUPERBLOCKS
vp9_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
#endif
// measure activity
mb_activity = mb_activity_measure(cpi, x, mb_row, mb_col);
// Store MB level activity details.
*x->mb_activity_ptr = mb_activity;
// Increment activity map pointer
x->mb_activity_ptr++;
// adjust to the next column of source macroblocks
x->src.y_buffer += 16;
}
// adjust to the next row of mbs
x->src.y_buffer += 16 * x->src.y_stride - 16 * cm->mb_cols;
vp9_extend_mb_row(new_yv12, xd->dst.y_buffer + 16,
// Calculate an "average" MB activity
calc_av_activity(cpi, activity_sum);
// Calculate an activity index number of each mb
calc_activity_index(cpi, x);
void vp9_activity_masking(VP9_COMP *cpi, MACROBLOCK *x) {
x->rdmult += *(x->mb_activity_ptr) * (x->rdmult >> 2);
x->errorperbit = x->rdmult * 100 / (110 * x->rddiv);
x->errorperbit += (x->errorperbit == 0);
int64_t a;
int64_t b;
int64_t act = *(x->mb_activity_ptr);
// Apply the masking to the RD multiplier.
a = act + (2 * cpi->activity_avg);
b = (2 * act) + cpi->activity_avg;
x->rdmult = (unsigned int)(((int64_t)x->rdmult * b + (a >> 1)) / a);
x->errorperbit = x->rdmult * 100 / (110 * x->rddiv);
x->errorperbit += (x->errorperbit == 0);
// Activity based Zbin adjustment
adjust_act_zbin(cpi, x);
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
#if CONFIG_NEW_MVREF
static int vp9_cost_mv_ref_id(vp9_prob * ref_id_probs, int mv_ref_id) {
int cost;
// Encode the index for the MV reference.
switch (mv_ref_id) {
case 0:
cost = vp9_cost_zero(ref_id_probs[0]);
break;
case 1:
cost = vp9_cost_one(ref_id_probs[0]);
cost += vp9_cost_zero(ref_id_probs[1]);
break;
case 2:
cost = vp9_cost_one(ref_id_probs[0]);
cost += vp9_cost_one(ref_id_probs[1]);
cost += vp9_cost_zero(ref_id_probs[2]);
break;
case 3:
cost = vp9_cost_one(ref_id_probs[0]);
cost += vp9_cost_one(ref_id_probs[1]);
cost += vp9_cost_one(ref_id_probs[2]);
break;
// TRAP.. This should not happen
default:
assert(0);
break;
}
return cost;
}
// Estimate the cost of each coding the vector using each reference candidate
static unsigned int pick_best_mv_ref(MACROBLOCK *x,
MV_REFERENCE_FRAME ref_frame,
int_mv target_mv,
int_mv * mv_ref_list,
int_mv * best_ref) {
int i;
int best_index = 0;
int cost, cost2;
int zero_seen = (mv_ref_list[0].as_int) ? FALSE : TRUE;
MACROBLOCKD *xd = &x->e_mbd;
int max_mv = MV_MAX;
cost = vp9_cost_mv_ref_id(xd->mb_mv_ref_probs[ref_frame], 0) +
vp9_mv_bit_cost(&target_mv, &mv_ref_list[0], x->nmvjointcost,
x->mvcost, 96, xd->allow_high_precision_mv);
for (i = 1; i < MAX_MV_REF_CANDIDATES; ++i) {
// If we see a 0,0 reference vector for a second time we have reached
// the end of the list of valid candidate vectors.
if (zero_seen)
break;
else
zero_seen = TRUE;
// Check for cases where the reference choice would give rise to an
// uncodable/out of range residual for row or col.
if ((abs(target_mv.as_mv.row - mv_ref_list[i].as_mv.row) > max_mv) ||
(abs(target_mv.as_mv.col - mv_ref_list[i].as_mv.col) > max_mv)) {
continue;
}
cost2 = vp9_cost_mv_ref_id(xd->mb_mv_ref_probs[ref_frame], i) +
vp9_mv_bit_cost(&target_mv, &mv_ref_list[i], x->nmvjointcost,
x->mvcost, 96, xd->allow_high_precision_mv);
if (cost2 < cost) {
cost = cost2;
best_index = i;
}
}
best_ref->as_int = mv_ref_list[best_index].as_int;
return best_index;
}
#endif
static void update_state(VP9_COMP *cpi, MACROBLOCK *x,
PICK_MODE_CONTEXT *ctx, int block_size,
int output_enabled) {
int i, x_idx, y;
MACROBLOCKD *xd = &x->e_mbd;
MODE_INFO *mi = &ctx->mic;
MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi;
int mb_mode = mi->mbmi.mode;
int mb_mode_index = ctx->best_mode_index;
const int mis = cpi->common.mode_info_stride;
#if CONFIG_SUPERBLOCKS
int mb_block_size = 1 << mi->mbmi.sb_type;
#else
int mb_block_size = 1;
#endif
assert(mb_mode < MB_MODE_COUNT);
assert(mb_mode_index < MAX_MODES);
assert(mi->mbmi.ref_frame < MAX_REF_FRAMES);
#if CONFIG_SUPERBLOCKS
assert(mi->mbmi.sb_type == (block_size >> 5));
#endif
// Restore the coding context of the MB to that that was in place
// when the mode was picked for it
for (y = 0; y < mb_block_size; y++) {
for (x_idx = 0; x_idx < mb_block_size; x_idx++) {
if ((xd->mb_to_right_edge >> 7) + mb_block_size > x_idx &&
(xd->mb_to_bottom_edge >> 7) + mb_block_size > y) {
MODE_INFO *mi_addr = xd->mode_info_context + x_idx + y * mis;
vpx_memcpy(mi_addr, mi, sizeof(MODE_INFO));
}
#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
ctx->txfm_rd_diff[ALLOW_32X32] = ctx->txfm_rd_diff[ALLOW_16X16];
if (mb_mode == B_PRED) {
for (i = 0; i < 16; i++) {
xd->block[i].bmi.as_mode = xd->mode_info_context->bmi[i].as_mode;
assert(xd->block[i].bmi.as_mode.first < B_MODE_COUNT);
} else if (mb_mode == I8X8_PRED) {
for (i = 0; i < 16; i++) {
xd->block[i].bmi = xd->mode_info_context->bmi[i];
} else if (mb_mode == SPLITMV) {
vpx_memcpy(x->partition_info, &ctx->partition_info,
sizeof(PARTITION_INFO));
mbmi->mv[0].as_int = x->partition_info->bmi[15].mv.as_int;
mbmi->mv[1].as_int = x->partition_info->bmi[15].second_mv.as_int;
{
int segment_id = mbmi->segment_id;
if (!vp9_segfeature_active(xd, segment_id, SEG_LVL_EOB) ||
vp9_get_segdata(xd, segment_id, SEG_LVL_EOB)) {
for (i = 0; i < NB_TXFM_MODES; i++) {
cpi->rd_tx_select_diff[i] += ctx->txfm_rd_diff[i];
}
}
}
if (cpi->common.frame_type == KEY_FRAME) {
// Restore the coding modes to that held in the coding context
// if (mb_mode == B_PRED)
// for (i = 0; i < 16; i++)
// {
// xd->block[i].bmi.as_mode =
// xd->mode_info_context->bmi[i].as_mode;
// assert(xd->mode_info_context->bmi[i].as_mode < MB_MODE_COUNT);
// }
#if CONFIG_INTERNAL_STATS
static const int kf_mode_index[] = {
THR_DC /*DC_PRED*/,
THR_V_PRED /*V_PRED*/,
THR_H_PRED /*H_PRED*/,
THR_D45_PRED /*D45_PRED*/,
THR_D135_PRED /*D135_PRED*/,
THR_D117_PRED /*D117_PRED*/,
THR_D153_PRED /*D153_PRED*/,
THR_D27_PRED /*D27_PRED*/,
THR_D63_PRED /*D63_PRED*/,
THR_TM /*TM_PRED*/,
THR_I8X8_PRED /*I8X8_PRED*/,
THR_B_PRED /*B_PRED*/,
};
cpi->mode_chosen_counts[kf_mode_index[mb_mode]]++;
} else {
/*
// Reduce the activation RD thresholds for the best choice mode
if ((cpi->rd_baseline_thresh[mb_mode_index] > 0) &&
(cpi->rd_baseline_thresh[mb_mode_index] < (INT_MAX >> 2)))
{
int best_adjustment = (cpi->rd_thresh_mult[mb_mode_index] >> 2);
cpi->rd_thresh_mult[mb_mode_index] =
(cpi->rd_thresh_mult[mb_mode_index]
>= (MIN_THRESHMULT + best_adjustment)) ?
cpi->rd_thresh_mult[mb_mode_index] - best_adjustment :
MIN_THRESHMULT;
cpi->rd_threshes[mb_mode_index] =
(cpi->rd_baseline_thresh[mb_mode_index] >> 7)
* cpi->rd_thresh_mult[mb_mode_index];
}
*/
// Note how often each mode chosen as best
cpi->mode_chosen_counts[mb_mode_index]++;
if (mbmi->mode == SPLITMV || mbmi->mode == NEWMV) {
int_mv best_mv, best_second_mv;
MV_REFERENCE_FRAME rf = mbmi->ref_frame;
MV_REFERENCE_FRAME sec_ref_frame = mbmi->second_ref_frame;
best_mv.as_int = ctx->best_ref_mv.as_int;
best_second_mv.as_int = ctx->second_best_ref_mv.as_int;
if (mbmi->mode == NEWMV) {
best_mv.as_int = mbmi->ref_mvs[rf][0].as_int;
best_second_mv.as_int = mbmi->ref_mvs[mbmi->second_ref_frame][0].as_int;
#if CONFIG_NEW_MVREF
best_index = pick_best_mv_ref(x, rf, mbmi->mv[0],
mbmi->ref_mvs[rf], &best_mv);
mbmi->best_index = best_index;
++cpi->mb_mv_ref_count[rf][best_index];
if (mbmi->second_ref_frame > 0) {
unsigned int best_index;
best_index =
pick_best_mv_ref(x, sec_ref_frame, mbmi->mv[1],
mbmi->ref_mvs[sec_ref_frame],
&best_second_mv);
mbmi->best_second_index = best_index;
++cpi->mb_mv_ref_count[sec_ref_frame][best_index];
}
#endif
}
mbmi->best_mv.as_int = best_mv.as_int;
mbmi->best_second_mv.as_int = best_second_mv.as_int;
vp9_update_nmv_count(cpi, x, &best_mv, &best_second_mv);
}
#if CONFIG_COMP_INTERINTRA_PRED
if (mbmi->mode >= NEARESTMV && mbmi->mode < SPLITMV &&
mbmi->second_ref_frame <= INTRA_FRAME) {
if (mbmi->second_ref_frame == INTRA_FRAME) {
++cpi->interintra_count[1];
++cpi->ymode_count[mbmi->interintra_mode];
#if SEPARATE_INTERINTRA_UV
++cpi->y_uv_mode_count[mbmi->interintra_mode][mbmi->interintra_uv_mode];
#endif
} else {
++cpi->interintra_count[0];
}
}
if (cpi->common.mcomp_filter_type == SWITCHABLE &&
mbmi->mode >= NEARESTMV &&
mbmi->mode <= SPLITMV) {
++cpi->switchable_interp_count
[vp9_get_pred_context(&cpi->common, xd, PRED_SWITCHABLE_INTERP)]
[vp9_switchable_interp_map[mbmi->interp_filter]];
}
cpi->prediction_error += ctx->distortion;
cpi->intra_error += ctx->intra_error;
cpi->rd_comp_pred_diff[SINGLE_PREDICTION_ONLY] += ctx->single_pred_diff;
cpi->rd_comp_pred_diff[COMP_PREDICTION_ONLY] += ctx->comp_pred_diff;
cpi->rd_comp_pred_diff[HYBRID_PREDICTION] += ctx->hybrid_pred_diff;
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
static unsigned find_seg_id(uint8_t *buf, int block_size,
int start_y, int height, int start_x, int width) {
const int end_x = MIN(start_x + block_size, width);
const int end_y = MIN(start_y + block_size, height);
int x, y;
unsigned seg_id = -1;
buf += width * start_y;
for (y = start_y; y < end_y; y++, buf += width) {
for (x = start_x; x < end_x; x++) {
seg_id = MIN(seg_id, buf[x]);
}
}
return seg_id;
}
static void set_offsets(VP9_COMP *cpi,
int mb_row, int mb_col, int block_size,
int *ref_yoffset, int *ref_uvoffset) {
MACROBLOCK *const x = &cpi->mb;
VP9_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *mbmi;
const int dst_fb_idx = cm->new_fb_idx;
const int recon_y_stride = cm->yv12_fb[dst_fb_idx].y_stride;
const int recon_uv_stride = cm->yv12_fb[dst_fb_idx].uv_stride;
const int recon_yoffset = 16 * mb_row * recon_y_stride + 16 * mb_col;
const int recon_uvoffset = 8 * mb_row * recon_uv_stride + 8 * mb_col;
const int src_y_stride = x->src.y_stride;
const int src_uv_stride = x->src.uv_stride;
const int src_yoffset = 16 * mb_row * src_y_stride + 16 * mb_col;
const int src_uvoffset = 8 * mb_row * src_uv_stride + 8 * mb_col;
const int ref_fb_idx = cm->lst_fb_idx;
const int ref_y_stride = cm->yv12_fb[ref_fb_idx].y_stride;
const int ref_uv_stride = cm->yv12_fb[ref_fb_idx].uv_stride;
const int idx_map = mb_row * cm->mb_cols + mb_col;
const int idx_str = xd->mode_info_stride * mb_row + mb_col;
// entropy context structures
xd->above_context = cm->above_context + mb_col;
xd->left_context = cm->left_context + (mb_row & 3);
// GF active flags data structure
x->gf_active_ptr = (signed char *)&cpi->gf_active_flags[idx_map];
// Activity map pointer
x->mb_activity_ptr = &cpi->mb_activity_map[idx_map];
x->active_ptr = cpi->active_map + idx_map;
/* pointers to mode info contexts */
x->partition_info = x->pi + idx_str;
xd->mode_info_context = cm->mi + idx_str;
mbmi = &xd->mode_info_context->mbmi;
xd->prev_mode_info_context = cm->prev_mi + idx_str;
// Set up destination pointers
xd->dst.y_buffer = cm->yv12_fb[dst_fb_idx].y_buffer + recon_yoffset;
xd->dst.u_buffer = cm->yv12_fb[dst_fb_idx].u_buffer + recon_uvoffset;
xd->dst.v_buffer = cm->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset;
/* Set up limit values for MV components to prevent them from
* extending beyond the UMV borders assuming 16x16 block size */
x->mv_row_min = -((mb_row * 16) + VP9BORDERINPIXELS - VP9_INTERP_EXTEND);
x->mv_col_min = -((mb_col * 16) + VP9BORDERINPIXELS - VP9_INTERP_EXTEND);
x->mv_row_max = ((cm->mb_rows - mb_row) * 16 +
(VP9BORDERINPIXELS - block_size - VP9_INTERP_EXTEND));
x->mv_col_max = ((cm->mb_cols - mb_col) * 16 +
(VP9BORDERINPIXELS - block_size - VP9_INTERP_EXTEND));
// Set up distance of MB to edge of frame in 1/8th pel units
block_size >>= 4; // in macroblock units
assert(!(mb_col & (block_size - 1)) && !(mb_row & (block_size - 1)));
xd->mb_to_top_edge = -((mb_row * 16) << 3);
xd->mb_to_left_edge = -((mb_col * 16) << 3);
xd->mb_to_bottom_edge = ((cm->mb_rows - block_size - mb_row) * 16) << 3;
xd->mb_to_right_edge = ((cm->mb_cols - block_size - mb_col) * 16) << 3;
// Are edges available for intra prediction?
xd->up_available = (mb_row != 0);
xd->left_available = (mb_col != 0);
/* Reference buffer offsets */
*ref_yoffset = (mb_row * ref_y_stride * 16) + (mb_col * 16);
*ref_uvoffset = (mb_row * ref_uv_stride * 8) + (mb_col * 8);
/* set up source buffers */
x->src.y_buffer = cpi->Source->y_buffer + src_yoffset;
x->src.u_buffer = cpi->Source->u_buffer + src_uvoffset;
x->src.v_buffer = cpi->Source->v_buffer + src_uvoffset;
/* R/D setup */
x->rddiv = cpi->RDDIV;
x->rdmult = cpi->RDMULT;
/* segment ID */
if (xd->segmentation_enabled) {
if (xd->update_mb_segmentation_map) {
mbmi->segment_id = find_seg_id(cpi->segmentation_map, block_size,
mb_row, cm->mb_rows, mb_col, cm->mb_cols);
} else {
mbmi->segment_id = find_seg_id(cm->last_frame_seg_map, block_size,
mb_row, cm->mb_rows, mb_col, cm->mb_cols);
}
assert(mbmi->segment_id <= 3);
vp9_mb_init_quantizer(cpi, x);
if (xd->segmentation_enabled && cpi->seg0_cnt > 0 &&
!vp9_segfeature_active(xd, 0, SEG_LVL_REF_FRAME) &&
vp9_segfeature_active(xd, 1, SEG_LVL_REF_FRAME) &&
vp9_check_segref(xd, 1, INTRA_FRAME) +
vp9_check_segref(xd, 1, LAST_FRAME) +
vp9_check_segref(xd, 1, GOLDEN_FRAME) +
vp9_check_segref(xd, 1, ALTREF_FRAME) == 1) {
cpi->seg0_progress = (cpi->seg0_idx << 16) / cpi->seg0_cnt;
} else {
const int y = mb_row & ~3;
const int x = mb_col & ~3;
const int p16 = ((mb_row & 1) << 1) + (mb_col & 1);
const int p32 = ((mb_row & 2) << 2) + ((mb_col & 2) << 1);
cpi->seg0_progress =
((y * cm->mb_cols + x * 4 + p32 + p16) << 16) / cm->MBs;
}
} else {
mbmi->segment_id = 0;
}
}
static void pick_mb_modes(VP9_COMP *cpi,
VP9_COMMON *cm,
int mb_row,
int mb_col,
MACROBLOCK *x,
MACROBLOCKD *xd,
TOKENEXTRA **tp,
int i;
int recon_yoffset, recon_uvoffset;
ENTROPY_CONTEXT_PLANES left_context[2];
ENTROPY_CONTEXT_PLANES above_context[2];
ENTROPY_CONTEXT_PLANES *initial_above_context_ptr = cm->above_context
+ mb_col;
/* Function should not modify L & A contexts; save and restore on exit */
vpx_memcpy(left_context,
sizeof(left_context));
vpx_memcpy(above_context,
initial_above_context_ptr,
sizeof(above_context));
/* Encode MBs in raster order within the SB */
for (i = 0; i < 4; i++) {
const int x_idx = i & 1, y_idx = i >> 1;
MB_MODE_INFO *mbmi;
if ((mb_row + y_idx >= cm->mb_rows) || (mb_col + x_idx >= cm->mb_cols)) {
set_offsets(cpi, mb_row + y_idx, mb_col + x_idx, 16,
&recon_yoffset, &recon_uvoffset);
#if !CONFIG_SUPERBLOCKS
vp9_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
#endif
vp9_activity_masking(cpi, x);
vp9_intra_prediction_down_copy(xd);
// Find best coding mode & reconstruct the MB so it is available
// as a predictor for MBs that follow in the SB
if (cm->frame_type == KEY_FRAME) {
#ifdef ENC_DEBUG
if (enc_debug)
printf("intra pick_mb_modes %d %d\n", mb_row, mb_col);
#endif
vp9_rd_pick_intra_mode(cpi, x, &r, &d);
encode_macroblock(cpi, tp, recon_yoffset, recon_uvoffset, 0,
mb_row + y_idx, mb_col + x_idx);
vpx_memcpy(&x->mb_context[xd->sb_index][i].mic, xd->mode_info_context,
#ifdef ENC_DEBUG
if (enc_debug)
printf("inter pick_mb_modes %d %d\n", mb_row, mb_col);
vp9_pick_mode_inter_macroblock(cpi, x, recon_yoffset,
recon_uvoffset, &r, &d);
encode_macroblock(cpi, tp, recon_yoffset, recon_uvoffset, 0,
mb_row + y_idx, mb_col + x_idx);
if (cpi->mb.e_mbd.segmentation_enabled && seg_id == 0) {
cpi->seg0_idx++;
}
if (!xd->segmentation_enabled ||
!vp9_segfeature_active(xd, seg_id, SEG_LVL_REF_FRAME) ||
vp9_check_segref(xd, seg_id, INTRA_FRAME) +
vp9_check_segref(xd, seg_id, LAST_FRAME) +
vp9_check_segref(xd, seg_id, GOLDEN_FRAME) +
vp9_check_segref(xd, seg_id, ALTREF_FRAME) > 1) {
int pred_flag = vp9_get_pred_flag(xd, PRED_REF);
int pred_context = vp9_get_pred_context(cm, xd, PRED_REF);
// Count prediction success
cpi->ref_pred_count[pred_context][pred_flag]++;
}
}
}
/* Restore L & A coding context to those in place on entry */
left_context,
sizeof(left_context));
vpx_memcpy(initial_above_context_ptr,
above_context,
sizeof(above_context));
static void pick_sb_modes(VP9_COMP *cpi,
VP9_COMMON *cm,
int mb_row,
int mb_col,
MACROBLOCK *x,
MACROBLOCKD *xd,
TOKENEXTRA **tp,
int *totalrate,
int *totaldist) {
set_offsets(cpi, mb_row, mb_col, 32, &recon_yoffset, &recon_uvoffset);
xd->mode_info_context->mbmi.sb_type = BLOCK_SIZE_SB32X32;
if (cpi->oxcf.tuning == VP8_TUNE_SSIM)
vp9_activity_masking(cpi, x);
cpi->update_context = 0; // TODO Do we need this now??
/* Find best coding mode & reconstruct the MB so it is available
* as a predictor for MBs that follow in the SB */
if (cm->frame_type == KEY_FRAME) {
vp9_rd_pick_intra_mode_sb32(cpi, x,
totalrate,
totaldist);
vpx_memcpy(&x->sb32_context[xd->sb_index].mic, xd->mode_info_context,
vp9_rd_pick_inter_mode_sb32(cpi, x,
recon_yoffset,
recon_uvoffset,
totalrate,
totaldist);
#if CONFIG_SUPERBLOCKS64
static void pick_sb64_modes(VP9_COMP *cpi,
VP9_COMMON *cm,
int mb_row,
int mb_col,
MACROBLOCK *x,
MACROBLOCKD *xd,
TOKENEXTRA **tp,
int *totalrate,
int *totaldist) {
set_offsets(cpi, mb_row, mb_col, 64, &recon_yoffset, &recon_uvoffset);
xd->mode_info_context->mbmi.sb_type = BLOCK_SIZE_SB64X64;
if (cpi->oxcf.tuning == VP8_TUNE_SSIM)
vp9_activity_masking(cpi, x);
cpi->update_context = 0; // TODO(rbultje) Do we need this now??
/* Find best coding mode & reconstruct the MB so it is available
* as a predictor for MBs that follow in the SB */
if (cm->frame_type == KEY_FRAME) {
vp9_rd_pick_intra_mode_sb64(cpi, x,
totalrate,
totaldist);
/* Save the coding context */
vpx_memcpy(&x->sb64_context.mic, xd->mode_info_context,
sizeof(MODE_INFO));
} else {
vp9_rd_pick_inter_mode_sb64(cpi, x,
recon_yoffset,
recon_uvoffset,
totalrate,
totaldist);
}
}
#endif // CONFIG_SUPERBLOCKS64
#endif // CONFIG_SUPERBLOCKS
static void update_stats(VP9_COMP *cpi) {
VP9_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &cpi->mb;
MACROBLOCKD *const xd = &x->e_mbd;
MODE_INFO *mi = xd->mode_info_context;
MB_MODE_INFO *const mbmi = &mi->mbmi;
if (cm->frame_type == KEY_FRAME) {
#ifdef MODE_STATS
y_modes[mbmi->mode]++;
} else {
int segment_id, seg_ref_active;
if (mbmi->ref_frame) {
int pred_context = vp9_get_pred_context(cm, xd, PRED_COMP);
if (mbmi->second_ref_frame <= INTRA_FRAME)
cpi->single_pred_count[pred_context]++;
else
cpi->comp_pred_count[pred_context]++;
#ifdef MODE_STATS
inter_y_modes[mbmi->mode]++;
for (b = 0; b < x->partition_info->count; b++) {
inter_b_modes[x->partition_info->bmi[b].mode]++;
}
}
#endif
// If we have just a single reference frame coded for a segment then
// exclude from the reference frame counts used to work out
// probabilities. NOTE: At the moment we dont support custom trees
// for the reference frame coding for each segment but this is a
// possible future action.
segment_id = mbmi->segment_id;
seg_ref_active = vp9_segfeature_active(xd, segment_id,
SEG_LVL_REF_FRAME);
if (!seg_ref_active ||
((vp9_check_segref(xd, segment_id, INTRA_FRAME) +
vp9_check_segref(xd, segment_id, LAST_FRAME) +
vp9_check_segref(xd, segment_id, GOLDEN_FRAME) +
vp9_check_segref(xd, segment_id, ALTREF_FRAME)) > 1)) {
cpi->count_mb_ref_frame_usage[mbmi->ref_frame]++;
// Count of last ref frame 0,0 usage
if ((mbmi->mode == ZEROMV) && (mbmi->ref_frame == LAST_FRAME))
cpi->inter_zz_count++;
}
}