Newer
Older
VP9_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &cpi->mb;
MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi;
assert(!xd->mode_info_context->mbmi.sb_type);
enc_debug = (cpi->common.current_video_frame == 46 &&
mb_row == 5 && mb_col == 2);
if (enc_debug)
printf("Encode MB %d %d output %d\n", mb_row, mb_col, output_enabled);
#endif
if (cm->frame_type == KEY_FRAME) {
if (cpi->oxcf.tuning == VP8_TUNE_SSIM && output_enabled) {
// Adjust the zbin based on this MB rate.
adjust_act_zbin(cpi, x);
vp9_update_zbin_extra(cpi, x);
}
} else {
vp9_setup_interp_filters(xd, mbmi->interp_filter, cm);
if (cpi->oxcf.tuning == VP8_TUNE_SSIM) {
// Adjust the zbin based on this MB rate.
adjust_act_zbin(cpi, x);
}
// Experimental code. Special case for gf and arf zeromv modes.
// Increase zbin size to suppress noise
cpi->zbin_mode_boost = 0;
if (cpi->zbin_mode_boost_enabled) {
if (mbmi->ref_frame != INTRA_FRAME) {
if (mbmi->mode == ZEROMV) {
if (mbmi->ref_frame != LAST_FRAME)
cpi->zbin_mode_boost = GF_ZEROMV_ZBIN_BOOST;
else
cpi->zbin_mode_boost = LF_ZEROMV_ZBIN_BOOST;
cpi->zbin_mode_boost = 0;
else
cpi->zbin_mode_boost = MV_ZBIN_BOOST;
}
vp9_update_zbin_extra(cpi, x);
// SET VARIOUS PREDICTION FLAGS
// Did the chosen reference frame match its predicted value.
ref_pred_flag = ((mbmi->ref_frame == vp9_get_pred_ref(cm, xd)));
vp9_set_pred_flag(xd, PRED_REF, ref_pred_flag);
}
#ifdef ENC_DEBUG
if (enc_debug) {
printf("Mode %d skip %d tx_size %d\n", mbmi->mode, x->skip,
mbmi->txfm_size);
}
#endif
vp9_encode_intra16x16mbuv(x);
vp9_encode_intra4x4mby(x);
vp9_encode_intra8x8mby(x);
vp9_encode_intra8x8mbuv(x);
vp9_encode_intra16x16mbuv(x);
vp9_encode_intra16x16mby(x);
if (output_enabled)
sum_intra_stats(cpi, x);
} else {
int ref_fb_idx;
#ifdef ENC_DEBUG
if (enc_debug)
printf("Mode %d skip %d tx_size %d ref %d ref2 %d mv %d %d interp %d\n",
mbmi->mode, x->skip, mbmi->txfm_size,
mbmi->ref_frame, mbmi->second_ref_frame,
mbmi->mv[0].as_mv.row, mbmi->mv[0].as_mv.col,
mbmi->interp_filter);
assert(cm->frame_type != KEY_FRAME);
ref_fb_idx = cpi->common.alt_fb_idx;
xd->pre.y_buffer = cpi->common.yv12_fb[ref_fb_idx].y_buffer + recon_yoffset;
xd->pre.u_buffer = cpi->common.yv12_fb[ref_fb_idx].u_buffer + recon_uvoffset;
xd->pre.v_buffer = cpi->common.yv12_fb[ref_fb_idx].v_buffer + recon_uvoffset;
second_ref_fb_idx = cpi->common.gld_fb_idx;
else
second_ref_fb_idx = cpi->common.alt_fb_idx;
xd->second_pre.y_buffer = cpi->common.yv12_fb[second_ref_fb_idx].y_buffer +
recon_yoffset;
xd->second_pre.u_buffer = cpi->common.yv12_fb[second_ref_fb_idx].u_buffer +
recon_uvoffset;
xd->second_pre.v_buffer = cpi->common.yv12_fb[second_ref_fb_idx].v_buffer +
recon_uvoffset;
// Clear mb_skip_coeff if mb_no_coeff_skip is not set
if (!cpi->common.mb_no_coeff_skip)
vp9_build_1st_inter16x16_predictors_mb(xd,
xd->dst.y_buffer,
xd->dst.u_buffer,
xd->dst.v_buffer,
xd->dst.y_stride,
xd->dst.uv_stride);
if (xd->mode_info_context->mbmi.second_ref_frame > 0) {
vp9_build_2nd_inter16x16_predictors_mb(xd,
xd->dst.y_buffer,
xd->dst.u_buffer,
xd->dst.v_buffer,
xd->dst.y_stride,
xd->dst.uv_stride);
}
#if CONFIG_COMP_INTERINTRA_PRED
else if (xd->mode_info_context->mbmi.second_ref_frame == INTRA_FRAME) {
vp9_build_interintra_16x16_predictors_mb(xd,
xd->dst.y_buffer,
xd->dst.u_buffer,
xd->dst.v_buffer,
xd->dst.y_stride,
xd->dst.uv_stride);
}
#endif
int i, j;
printf("\n");
printf("qcoeff\n");
for (i = 0; i < 400; i++) {
printf("%3d ", xd->qcoeff[i]);
if (i % 16 == 15) printf("\n");
}
printf("\n");
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
printf("predictor\n");
for (i = 0; i < 384; i++) {
printf("%3d ", xd->predictor[i]);
if (i % 16 == 15) printf("\n");
}
printf("\n");
printf("src_diff\n");
for (i = 0; i < 384; i++) {
printf("%3d ", x->src_diff[i]);
if (i % 16 == 15) printf("\n");
}
printf("\n");
printf("diff\n");
for (i = 0; i < 384; i++) {
printf("%3d ", xd->block[0].diff[i]);
if (i % 16 == 15) printf("\n");
}
printf("\n");
printf("final y\n");
for (i = 0; i < 16; i++) {
for (j = 0; j < 16; j++)
printf("%3d ", xd->dst.y_buffer[i * xd->dst.y_stride + j]);
printf("\n");
}
printf("\n");
printf("final u\n");
for (i = 0; i < 8; i++) {
for (j = 0; j < 8; j++)
printf("%3d ", xd->dst.u_buffer[i * xd->dst.uv_stride + j]);
printf("\n");
}
printf("final v\n");
for (i = 0; i < 8; i++) {
for (j = 0; j < 8; j++)
printf("%3d ", xd->dst.v_buffer[i * xd->dst.uv_stride + j]);
printf("\n");
}
vp9_tokenize_mb(cpi, xd, t, !output_enabled);
} else {
int mb_skip_context =
cpi->common.mb_no_coeff_skip ?
(x->e_mbd.mode_info_context - 1)->mbmi.mb_skip_coeff +
(x->e_mbd.mode_info_context - cpi->common.mode_info_stride)->mbmi.mb_skip_coeff :
0;
if (cpi->common.mb_no_coeff_skip) {
if (output_enabled)
cpi->skip_true_count[mb_skip_context]++;
vp9_stuff_mb(cpi, xd, t, !output_enabled);
if (output_enabled)
cpi->skip_false_count[mb_skip_context]++;
if (output_enabled) {
int segment_id = mbmi->segment_id;
if (cpi->common.txfm_mode == TX_MODE_SELECT &&
!((cpi->common.mb_no_coeff_skip && mbmi->mb_skip_coeff) ||
(vp9_segfeature_active(&x->e_mbd, segment_id, SEG_LVL_EOB) &&
vp9_get_segdata(&x->e_mbd, segment_id, SEG_LVL_EOB) == 0))) {
assert(mbmi->txfm_size <= TX_16X16);
if (mbmi->mode != B_PRED && mbmi->mode != I8X8_PRED &&
mbmi->mode != SPLITMV) {
cpi->txfm_count_16x16p[mbmi->txfm_size]++;
} else if (mbmi->mode == I8X8_PRED ||
(mbmi->mode == SPLITMV &&
mbmi->partitioning != PARTITIONING_4X4)) {
cpi->txfm_count_8x8p[mbmi->txfm_size]++;
} else if (mbmi->mode != B_PRED && mbmi->mode != I8X8_PRED &&
mbmi->mode != SPLITMV && cpi->common.txfm_mode >= ALLOW_16X16) {
mbmi->txfm_size = TX_16X16;
} else if (mbmi->mode != B_PRED &&
!(mbmi->mode == SPLITMV &&
mbmi->partitioning == PARTITIONING_4X4) &&
cpi->common.txfm_mode >= ALLOW_8X8) {
mbmi->txfm_size = TX_8X8;
} else {
mbmi->txfm_size = TX_4X4;
}
}
static void encode_superblock32(VP9_COMP *cpi, TOKENEXTRA **t,
int recon_yoffset, int recon_uvoffset,
int output_enabled, int mb_row, int mb_col) {
VP9_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
const uint8_t *src = x->src.y_buffer;
uint8_t *dst = xd->dst.y_buffer;
const uint8_t *usrc = x->src.u_buffer;
uint8_t *udst = xd->dst.u_buffer;
const uint8_t *vsrc = x->src.v_buffer;
uint8_t *vdst = xd->dst.v_buffer;
int src_y_stride = x->src.y_stride, dst_y_stride = xd->dst.y_stride;
int src_uv_stride = x->src.uv_stride, dst_uv_stride = xd->dst.uv_stride;
unsigned char ref_pred_flag;
int n;
TOKENEXTRA *tp[4];
int skip[4];
MODE_INFO *mi = x->e_mbd.mode_info_context;
unsigned int segment_id = mi->mbmi.segment_id;
const int mis = cm->mode_info_stride;
if (cm->frame_type == KEY_FRAME) {
if (cpi->oxcf.tuning == VP8_TUNE_SSIM) {
adjust_act_zbin(cpi, x);
vp9_update_zbin_extra(cpi, x);
}
} else {
vp9_setup_interp_filters(xd, xd->mode_info_context->mbmi.interp_filter, cm);
if (cpi->oxcf.tuning == VP8_TUNE_SSIM) {
// Adjust the zbin based on this MB rate.
adjust_act_zbin(cpi, x);
}
// Experimental code. Special case for gf and arf zeromv modes.
// Increase zbin size to suppress noise
cpi->zbin_mode_boost = 0;
if (cpi->zbin_mode_boost_enabled) {
if (xd->mode_info_context->mbmi.ref_frame != INTRA_FRAME) {
if (xd->mode_info_context->mbmi.mode == ZEROMV) {
if (xd->mode_info_context->mbmi.ref_frame != LAST_FRAME)
cpi->zbin_mode_boost = GF_ZEROMV_ZBIN_BOOST;
else
cpi->zbin_mode_boost = LF_ZEROMV_ZBIN_BOOST;
} else if (xd->mode_info_context->mbmi.mode == SPLITMV)
cpi->zbin_mode_boost = 0;
else
cpi->zbin_mode_boost = MV_ZBIN_BOOST;
}
}
vp9_update_zbin_extra(cpi, x);
// SET VARIOUS PREDICTION FLAGS
// Did the chosen reference frame match its predicted value.
ref_pred_flag = ((xd->mode_info_context->mbmi.ref_frame ==
vp9_get_pred_ref(cm, xd)));
vp9_set_pred_flag(xd, PRED_REF, ref_pred_flag);
}
if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) {
vp9_build_intra_predictors_sby_s(&x->e_mbd);
vp9_build_intra_predictors_sbuv_s(&x->e_mbd);
if (output_enabled)
sum_intra_stats(cpi, x);
assert(cm->frame_type != KEY_FRAME);
if (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME)
ref_fb_idx = cpi->common.lst_fb_idx;
else if (xd->mode_info_context->mbmi.ref_frame == GOLDEN_FRAME)
ref_fb_idx = cpi->common.gld_fb_idx;
else
ref_fb_idx = cpi->common.alt_fb_idx;
xd->pre.y_buffer = cpi->common.yv12_fb[ref_fb_idx].y_buffer + recon_yoffset;
xd->pre.u_buffer = cpi->common.yv12_fb[ref_fb_idx].u_buffer + recon_uvoffset;
xd->pre.v_buffer = cpi->common.yv12_fb[ref_fb_idx].v_buffer + recon_uvoffset;
if (xd->mode_info_context->mbmi.second_ref_frame > 0) {
int second_ref_fb_idx;
if (xd->mode_info_context->mbmi.second_ref_frame == LAST_FRAME)
second_ref_fb_idx = cpi->common.lst_fb_idx;
else if (xd->mode_info_context->mbmi.second_ref_frame == GOLDEN_FRAME)
second_ref_fb_idx = cpi->common.gld_fb_idx;
else
second_ref_fb_idx = cpi->common.alt_fb_idx;
xd->second_pre.y_buffer = cpi->common.yv12_fb[second_ref_fb_idx].y_buffer +
recon_yoffset;
xd->second_pre.u_buffer = cpi->common.yv12_fb[second_ref_fb_idx].u_buffer +
recon_uvoffset;
xd->second_pre.v_buffer = cpi->common.yv12_fb[second_ref_fb_idx].v_buffer +
recon_uvoffset;
}
vp9_build_inter32x32_predictors_sb(xd, xd->dst.y_buffer,
xd->dst.u_buffer, xd->dst.v_buffer,
xd->dst.y_stride, xd->dst.uv_stride);
}
if (xd->mode_info_context->mbmi.txfm_size == TX_32X32) {
if (!x->skip) {
vp9_subtract_sby_s_c(x->sb_coeff_data.src_diff, src, src_y_stride,
dst, dst_y_stride);
vp9_subtract_sbuv_s_c(x->sb_coeff_data.src_diff,
usrc, vsrc, src_uv_stride,
udst, vdst, dst_uv_stride);
vp9_transform_sby_32x32(x);
vp9_transform_sbuv_16x16(x);
vp9_quantize_sby_32x32(x);
vp9_quantize_sbuv_16x16(x);
// TODO(rbultje): trellis optimize
vp9_inverse_transform_sbuv_16x16(&x->e_mbd.sb_coeff_data);
vp9_inverse_transform_sby_32x32(&x->e_mbd.sb_coeff_data);
vp9_recon_sby_s_c(&x->e_mbd, dst);
vp9_recon_sbuv_s_c(&x->e_mbd, udst, vdst);
vp9_tokenize_sb(cpi, &x->e_mbd, t, !output_enabled);
} else {
int mb_skip_context =
cpi->common.mb_no_coeff_skip ?
(mi - 1)->mbmi.mb_skip_coeff +
(mi - mis)->mbmi.mb_skip_coeff :
0;
mi->mbmi.mb_skip_coeff = 1;
if (cm->mb_no_coeff_skip) {
if (output_enabled)
cpi->skip_true_count[mb_skip_context]++;
vp9_fix_contexts_sb(xd);
} else {
vp9_stuff_sb(cpi, xd, t, !output_enabled);
if (output_enabled)
cpi->skip_false_count[mb_skip_context]++;
}
}
// copy skip flag on all mb_mode_info contexts in this SB
// if this was a skip at this txfm size
if (mb_col < cm->mb_cols - 1)
mi[1].mbmi.mb_skip_coeff = mi->mbmi.mb_skip_coeff;
if (mb_row < cm->mb_rows - 1) {
mi[mis].mbmi.mb_skip_coeff = mi->mbmi.mb_skip_coeff;
if (mb_col < cm->mb_cols - 1)
mi[mis + 1].mbmi.mb_skip_coeff = mi->mbmi.mb_skip_coeff;
}
skip[0] = skip[2] = skip[1] = skip[3] = mi->mbmi.mb_skip_coeff;
for (n = 0; n < 4; n++) {
int x_idx = n & 1, y_idx = n >> 1;
xd->left_context = cm->left_context + y_idx + (mb_row & 2);
xd->above_context = cm->above_context + mb_col + x_idx;
memcpy(&ta[n], xd->above_context, sizeof(ta[n]));
memcpy(&tl[n], xd->left_context, sizeof(tl[n]));
tp[n] = *t;
xd->mode_info_context = mi + x_idx + y_idx * mis;
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
if (!x->skip) {
vp9_subtract_mby_s_c(x->src_diff,
src + x_idx * 16 + y_idx * 16 * src_y_stride,
src_y_stride,
dst + x_idx * 16 + y_idx * 16 * dst_y_stride,
dst_y_stride);
vp9_subtract_mbuv_s_c(x->src_diff,
usrc + x_idx * 8 + y_idx * 8 * src_uv_stride,
vsrc + x_idx * 8 + y_idx * 8 * src_uv_stride,
src_uv_stride,
udst + x_idx * 8 + y_idx * 8 * dst_uv_stride,
vdst + x_idx * 8 + y_idx * 8 * dst_uv_stride,
dst_uv_stride);
vp9_fidct_mb(x);
vp9_recon_mby_s_c(&x->e_mbd,
dst + x_idx * 16 + y_idx * 16 * dst_y_stride);
vp9_recon_mbuv_s_c(&x->e_mbd,
udst + x_idx * 8 + y_idx * 8 * dst_uv_stride,
vdst + x_idx * 8 + y_idx * 8 * dst_uv_stride);
vp9_tokenize_mb(cpi, &x->e_mbd, t, !output_enabled);
skip[n] = xd->mode_info_context->mbmi.mb_skip_coeff;
int mb_skip_context = cpi->common.mb_no_coeff_skip ?
(x->e_mbd.mode_info_context - 1)->mbmi.mb_skip_coeff +
(x->e_mbd.mode_info_context - mis)->mbmi.mb_skip_coeff :
0;
xd->mode_info_context->mbmi.mb_skip_coeff = skip[n] = 1;
if (cpi->common.mb_no_coeff_skip) {
// TODO(rbultje) this should be done per-sb instead of per-mb?
if (output_enabled)
cpi->skip_true_count[mb_skip_context]++;
vp9_reset_mb_tokens_context(xd);
} else {
vp9_stuff_mb(cpi, xd, t, !output_enabled);
// TODO(rbultje) this should be done per-sb instead of per-mb?
if (output_enabled)
cpi->skip_false_count[mb_skip_context]++;
}
xd->mode_info_context = mi;
update_sb_skip_coeff_state(cpi, ta, tl, tp, t, skip, output_enabled);
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
if (output_enabled) {
if (cm->txfm_mode == TX_MODE_SELECT &&
!((cm->mb_no_coeff_skip && skip[0] && skip[1] && skip[2] && skip[3]) ||
(vp9_segfeature_active(xd, segment_id, SEG_LVL_EOB) &&
vp9_get_segdata(xd, segment_id, SEG_LVL_EOB) == 0))) {
cpi->txfm_count_32x32p[mi->mbmi.txfm_size]++;
} else {
TX_SIZE sz = (cm->txfm_mode == TX_MODE_SELECT) ?
TX_32X32 :
cm->txfm_mode;
mi->mbmi.txfm_size = sz;
if (mb_col < cm->mb_cols - 1)
mi[1].mbmi.txfm_size = sz;
if (mb_row < cm->mb_rows - 1) {
mi[mis].mbmi.txfm_size = sz;
if (mb_col < cm->mb_cols - 1)
mi[mis + 1].mbmi.txfm_size = sz;
}
}
}
}
static void encode_superblock64(VP9_COMP *cpi, TOKENEXTRA **t,
int recon_yoffset, int recon_uvoffset,
int output_enabled, int mb_row, int mb_col) {
VP9_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &cpi->mb;
MACROBLOCKD *const xd = &x->e_mbd;
const uint8_t *src = x->src.y_buffer;
uint8_t *dst = xd->dst.y_buffer;
const uint8_t *usrc = x->src.u_buffer;
uint8_t *udst = xd->dst.u_buffer;
const uint8_t *vsrc = x->src.v_buffer;
uint8_t *vdst = xd->dst.v_buffer;
int src_y_stride = x->src.y_stride, dst_y_stride = xd->dst.y_stride;
int src_uv_stride = x->src.uv_stride, dst_uv_stride = xd->dst.uv_stride;
unsigned char ref_pred_flag;
int n;
TOKENEXTRA *tp[16];
int skip[16];
MODE_INFO *mi = x->e_mbd.mode_info_context;
unsigned int segment_id = mi->mbmi.segment_id;
ENTROPY_CONTEXT_PLANES ta[16], tl[16];
const int mis = cm->mode_info_stride;
if (cm->frame_type == KEY_FRAME) {
if (cpi->oxcf.tuning == VP8_TUNE_SSIM) {
adjust_act_zbin(cpi, x);
vp9_update_zbin_extra(cpi, x);
}
} else {
vp9_setup_interp_filters(xd, xd->mode_info_context->mbmi.interp_filter, cm);
if (cpi->oxcf.tuning == VP8_TUNE_SSIM) {
// Adjust the zbin based on this MB rate.
adjust_act_zbin(cpi, x);
}
// Experimental code. Special case for gf and arf zeromv modes.
// Increase zbin size to suppress noise
cpi->zbin_mode_boost = 0;
if (cpi->zbin_mode_boost_enabled) {
if (xd->mode_info_context->mbmi.ref_frame != INTRA_FRAME) {
if (xd->mode_info_context->mbmi.mode == ZEROMV) {
if (xd->mode_info_context->mbmi.ref_frame != LAST_FRAME)
cpi->zbin_mode_boost = GF_ZEROMV_ZBIN_BOOST;
else
cpi->zbin_mode_boost = LF_ZEROMV_ZBIN_BOOST;
} else if (xd->mode_info_context->mbmi.mode == SPLITMV) {
cpi->zbin_mode_boost = 0;
} else {
cpi->zbin_mode_boost = MV_ZBIN_BOOST;
}
}
}
vp9_update_zbin_extra(cpi, x);
// Did the chosen reference frame match its predicted value.
ref_pred_flag = ((xd->mode_info_context->mbmi.ref_frame ==
vp9_get_pred_ref(cm, xd)));
vp9_set_pred_flag(xd, PRED_REF, ref_pred_flag);
}
if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) {
vp9_build_intra_predictors_sb64y_s(&x->e_mbd);
vp9_build_intra_predictors_sb64uv_s(&x->e_mbd);
if (output_enabled)
sum_intra_stats(cpi, x);
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
int ref_fb_idx;
assert(cm->frame_type != KEY_FRAME);
if (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME)
ref_fb_idx = cpi->common.lst_fb_idx;
else if (xd->mode_info_context->mbmi.ref_frame == GOLDEN_FRAME)
ref_fb_idx = cpi->common.gld_fb_idx;
else
ref_fb_idx = cpi->common.alt_fb_idx;
xd->pre.y_buffer =
cpi->common.yv12_fb[ref_fb_idx].y_buffer + recon_yoffset;
xd->pre.u_buffer =
cpi->common.yv12_fb[ref_fb_idx].u_buffer + recon_uvoffset;
xd->pre.v_buffer =
cpi->common.yv12_fb[ref_fb_idx].v_buffer + recon_uvoffset;
if (xd->mode_info_context->mbmi.second_ref_frame > 0) {
int second_ref_fb_idx;
if (xd->mode_info_context->mbmi.second_ref_frame == LAST_FRAME)
second_ref_fb_idx = cpi->common.lst_fb_idx;
else if (xd->mode_info_context->mbmi.second_ref_frame == GOLDEN_FRAME)
second_ref_fb_idx = cpi->common.gld_fb_idx;
else
second_ref_fb_idx = cpi->common.alt_fb_idx;
xd->second_pre.y_buffer =
cpi->common.yv12_fb[second_ref_fb_idx].y_buffer + recon_yoffset;
xd->second_pre.u_buffer =
cpi->common.yv12_fb[second_ref_fb_idx].u_buffer + recon_uvoffset;
xd->second_pre.v_buffer =
cpi->common.yv12_fb[second_ref_fb_idx].v_buffer + recon_uvoffset;
}
vp9_build_inter64x64_predictors_sb(xd, xd->dst.y_buffer,
xd->dst.u_buffer, xd->dst.v_buffer,
xd->dst.y_stride, xd->dst.uv_stride);
}
if (xd->mode_info_context->mbmi.txfm_size == TX_32X32) {
int n;
for (n = 0; n < 4; n++) {
int x_idx = n & 1, y_idx = n >> 1;
xd->mode_info_context = mi + x_idx * 2 + mis * y_idx * 2;
xd->left_context = cm->left_context + (y_idx << 1);
xd->above_context = cm->above_context + mb_col + (x_idx << 1);
memcpy(&ta[n * 2], xd->above_context, sizeof(*ta) * 2);
memcpy(&tl[n * 2], xd->left_context, sizeof(*tl) * 2);
tp[n] = *t;
xd->mode_info_context = mi + x_idx * 2 + y_idx * mis * 2;
if (!x->skip) {
vp9_subtract_sby_s_c(x->sb_coeff_data.src_diff,
src + x_idx * 32 + y_idx * 32 * src_y_stride,
src_y_stride,
dst + x_idx * 32 + y_idx * 32 * dst_y_stride,
dst_y_stride);
vp9_subtract_sbuv_s_c(x->sb_coeff_data.src_diff,
usrc + x_idx * 16 + y_idx * 16 * src_uv_stride,
vsrc + x_idx * 16 + y_idx * 16 * src_uv_stride,
src_uv_stride,
udst + x_idx * 16 + y_idx * 16 * dst_uv_stride,
vdst + x_idx * 16 + y_idx * 16 * dst_uv_stride,
dst_uv_stride);
vp9_transform_sby_32x32(x);
vp9_transform_sbuv_16x16(x);
vp9_quantize_sby_32x32(x);
vp9_quantize_sbuv_16x16(x);
// TODO(rbultje): trellis optimize
vp9_inverse_transform_sbuv_16x16(&x->e_mbd.sb_coeff_data);
vp9_inverse_transform_sby_32x32(&x->e_mbd.sb_coeff_data);
vp9_recon_sby_s_c(&x->e_mbd,
dst + 32 * x_idx + 32 * y_idx * dst_y_stride);
vp9_recon_sbuv_s_c(&x->e_mbd,
udst + x_idx * 16 + y_idx * 16 * dst_uv_stride,
vdst + x_idx * 16 + y_idx * 16 * dst_uv_stride);
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
vp9_tokenize_sb(cpi, &x->e_mbd, t, !output_enabled);
} else {
int mb_skip_context = cpi->common.mb_no_coeff_skip ?
(mi - 1)->mbmi.mb_skip_coeff +
(mi - mis)->mbmi.mb_skip_coeff : 0;
xd->mode_info_context->mbmi.mb_skip_coeff = 1;
if (cm->mb_no_coeff_skip) {
if (output_enabled)
cpi->skip_true_count[mb_skip_context]++;
vp9_fix_contexts_sb(xd);
} else {
vp9_stuff_sb(cpi, xd, t, !output_enabled);
if (output_enabled)
cpi->skip_false_count[mb_skip_context]++;
}
}
// copy skip flag on all mb_mode_info contexts in this SB
// if this was a skip at this txfm size
if (mb_col + x_idx * 2 < cm->mb_cols - 1)
mi[mis * y_idx * 2 + x_idx * 2 + 1].mbmi.mb_skip_coeff =
mi[mis * y_idx * 2 + x_idx * 2].mbmi.mb_skip_coeff;
if (mb_row + y_idx * 2 < cm->mb_rows - 1) {
mi[mis * y_idx * 2 + x_idx * 2 + mis].mbmi.mb_skip_coeff =
mi[mis * y_idx * 2 + x_idx * 2].mbmi.mb_skip_coeff;
if (mb_col + x_idx * 2 < cm->mb_cols - 1)
mi[mis * y_idx * 2 + x_idx * 2 + mis + 1].mbmi.mb_skip_coeff =
mi[mis * y_idx * 2 + x_idx * 2].mbmi.mb_skip_coeff;
}
skip[n] = xd->mode_info_context->mbmi.mb_skip_coeff;
}
for (n = 0; n < 16; n++) {
const int x_idx = n & 3, y_idx = n >> 2;
xd->left_context = cm->left_context + y_idx;
xd->above_context = cm->above_context + mb_col + x_idx;
memcpy(&ta[n], xd->above_context, sizeof(ta[n]));
memcpy(&tl[n], xd->left_context, sizeof(tl[n]));
tp[n] = *t;
xd->mode_info_context = mi + x_idx + y_idx * mis;
if (!x->skip) {
vp9_subtract_mby_s_c(x->src_diff,
src + x_idx * 16 + y_idx * 16 * src_y_stride,
src_y_stride,
dst + x_idx * 16 + y_idx * 16 * dst_y_stride,
dst_y_stride);
vp9_subtract_mbuv_s_c(x->src_diff,
usrc + x_idx * 8 + y_idx * 8 * src_uv_stride,
vsrc + x_idx * 8 + y_idx * 8 * src_uv_stride,
src_uv_stride,
udst + x_idx * 8 + y_idx * 8 * dst_uv_stride,
vdst + x_idx * 8 + y_idx * 8 * dst_uv_stride,
dst_uv_stride);
vp9_fidct_mb(x);
vp9_recon_mby_s_c(&x->e_mbd,
dst + x_idx * 16 + y_idx * 16 * dst_y_stride);
vp9_recon_mbuv_s_c(&x->e_mbd,
udst + x_idx * 8 + y_idx * 8 * dst_uv_stride,
vdst + x_idx * 8 + y_idx * 8 * dst_uv_stride);
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719
vp9_tokenize_mb(cpi, &x->e_mbd, t, !output_enabled);
skip[n] = xd->mode_info_context->mbmi.mb_skip_coeff;
} else {
int mb_skip_context = cpi->common.mb_no_coeff_skip ?
(x->e_mbd.mode_info_context - 1)->mbmi.mb_skip_coeff +
(x->e_mbd.mode_info_context - mis)->mbmi.mb_skip_coeff : 0;
xd->mode_info_context->mbmi.mb_skip_coeff = skip[n] = 1;
if (cpi->common.mb_no_coeff_skip) {
// TODO(rbultje) this should be done per-sb instead of per-mb?
if (output_enabled)
cpi->skip_true_count[mb_skip_context]++;
vp9_reset_mb_tokens_context(xd);
} else {
vp9_stuff_mb(cpi, xd, t, !output_enabled);
// TODO(rbultje) this should be done per-sb instead of per-mb?
if (output_enabled)
cpi->skip_false_count[mb_skip_context]++;
}
}
}
}
xd->mode_info_context = mi;
update_sb64_skip_coeff_state(cpi, ta, tl, tp, t, skip, output_enabled);
if (output_enabled) {
if (cm->txfm_mode == TX_MODE_SELECT &&
!((cm->mb_no_coeff_skip &&
skip[0] && skip[1] && skip[2] && skip[3]) ||
skip[0] && skip[1] && skip[2] && skip[3] &&
skip[4] && skip[5] && skip[6] && skip[7] &&
skip[8] && skip[9] && skip[10] && skip[11] &&
skip[12] && skip[13] && skip[14] && skip[15]))) ||
(vp9_segfeature_active(xd, segment_id, SEG_LVL_EOB) &&
vp9_get_segdata(xd, segment_id, SEG_LVL_EOB) == 0))) {
cpi->txfm_count_32x32p[mi->mbmi.txfm_size]++;
} else {
int x, y;
TX_SIZE sz = (cm->txfm_mode == TX_MODE_SELECT) ?
for (y = 0; y < 4; y++) {
for (x = 0; x < 4; x++) {
if (mb_col + x < cm->mb_cols && mb_row + y < cm->mb_rows) {
mi[mis * y + x].mbmi.txfm_size = sz;
}
}
}