Commit 94266f4f authored by Jingning Han's avatar Jingning Han

Make loop filter support recursive transform block partitioning

This commit allows the loop filter to account for the recursive
transform block partition when selecting the filter and mask.

Change-Id: I62b6c2dcc0497cbe1f264b03c46163f55d2c9752
parent 6727943c
......@@ -1224,61 +1224,104 @@ void vp10_filter_block_plane_non420(VP10_COMMON *cm,
!blk_row : 1;
const int skip_this_r = skip_this && !block_edge_above;
#if CONFIG_VAR_TX
TX_SIZE tx_size = (plane->plane_type == PLANE_TYPE_UV)
? get_uv_tx_size(mbmi, plane) : mbmi->tx_size;
#else
const TX_SIZE tx_size = (plane->plane_type == PLANE_TYPE_UV)
? get_uv_tx_size(&mi[0].mbmi, plane)
: mi[0].mbmi.tx_size;
? get_uv_tx_size(mbmi, plane)
: mbmi->tx_size;
#endif
const int skip_border_4x4_c = ss_x && mi_col + c == cm->mi_cols - 1;
const int skip_border_4x4_r = ss_y && mi_row + r == cm->mi_rows - 1;
TX_SIZE tx_size_c = tx_size;
TX_SIZE tx_size_r = tx_size;
int tx_size_mask = 0;
// Filter level can vary per MI
if (!(lfl[(r << 3) + (c >> ss_x)] =
get_filter_level(&cm->lf_info, mbmi)))
continue;
if (tx_size == TX_32X32)
tx_size_mask = 3;
else if (tx_size == TX_16X16)
tx_size_mask = 1;
else
tx_size_mask = 0;
#if CONFIG_VAR_TX
if (is_inter_block(mbmi) && !mbmi->skip)
tx_size = (plane->plane_type == PLANE_TYPE_UV) ?
get_uv_tx_size_impl(mbmi->inter_tx_size[blk_row * 8 + blk_col],
sb_type, ss_x, ss_y) :
mbmi->inter_tx_size[blk_row * 8 + blk_col];
tx_size_r = VPXMIN(tx_size, cm->above_txfm_context[mi_col + c]);
tx_size_c = VPXMIN(tx_size, cm->left_txfm_context[(mi_row + r) & 0x07]);
cm->above_txfm_context[mi_col + c] = tx_size;
cm->left_txfm_context[(mi_row + r) & 0x07] = tx_size;
#endif
// Build masks based on the transform size of each block
if (tx_size == TX_32X32) {
if (!skip_this_c && ((c >> ss_x) & 3) == 0) {
// handle vertical mask
if (tx_size_c == TX_32X32) {
if (!skip_this_c && ((c >> ss_x) & tx_size_mask) == 0) {
if (!skip_border_4x4_c)
mask_16x16_c |= 1 << (c >> ss_x);
else
mask_8x8_c |= 1 << (c >> ss_x);
}
if (!skip_this_r && ((r >> ss_y) & 3) == 0) {
if (!skip_border_4x4_r)
mask_16x16[r] |= 1 << (c >> ss_x);
else
mask_8x8[r] |= 1 << (c >> ss_x);
}
} else if (tx_size == TX_16X16) {
if (!skip_this_c && ((c >> ss_x) & 1) == 0) {
} else if (tx_size_c == TX_16X16) {
if (!skip_this_c && ((c >> ss_x) & tx_size_mask) == 0) {
if (!skip_border_4x4_c)
mask_16x16_c |= 1 << (c >> ss_x);
else
mask_8x8_c |= 1 << (c >> ss_x);
}
if (!skip_this_r && ((r >> ss_y) & 1) == 0) {
if (!skip_border_4x4_r)
mask_16x16[r] |= 1 << (c >> ss_x);
else
mask_8x8[r] |= 1 << (c >> ss_x);
}
} else {
// force 8x8 filtering on 32x32 boundaries
if (!skip_this_c) {
if (tx_size == TX_8X8 || ((c >> ss_x) & 3) == 0)
if (!skip_this_c && ((c >> ss_x) & tx_size_mask) == 0) {
if (tx_size_c == TX_8X8 || ((c >> ss_x) & 3) == 0)
mask_8x8_c |= 1 << (c >> ss_x);
else
mask_4x4_c |= 1 << (c >> ss_x);
}
if (!skip_this_r) {
if (tx_size == TX_8X8 || ((r >> ss_y) & 3) == 0)
if (!skip_this && tx_size_c < TX_8X8 && !skip_border_4x4_c &&
((c >> ss_x) & tx_size_mask) == 0)
mask_4x4_int[r] |= 1 << (c >> ss_x);
}
// set horizontal mask
if (tx_size_r == TX_32X32) {
if (!skip_this_r && ((r >> ss_y) & tx_size_mask) == 0) {
if (!skip_border_4x4_r)
mask_16x16[r] |= 1 << (c >> ss_x);
else
mask_8x8[r] |= 1 << (c >> ss_x);
}
} else if (tx_size_r == TX_16X16) {
if (!skip_this_r && ((r >> ss_y) & tx_size_mask) == 0) {
if (!skip_border_4x4_r)
mask_16x16[r] |= 1 << (c >> ss_x);
else
mask_8x8[r] |= 1 << (c >> ss_x);
}
} else {
// force 8x8 filtering on 32x32 boundaries
if (!skip_this_r && ((r >> ss_y) & tx_size_mask) == 0) {
if (tx_size_r == TX_8X8 || ((r >> ss_y) & 3) == 0)
mask_8x8[r] |= 1 << (c >> ss_x);
else
mask_4x4[r] |= 1 << (c >> ss_x);
}
if (!skip_this && tx_size < TX_8X8 && !skip_border_4x4_c)
if (!skip_this && tx_size_r < TX_8X8 && !skip_border_4x4_c &&
((r >> ss_y) & tx_size_mask) == 0)
mask_4x4_int[r] |= 1 << (c >> ss_x);
}
}
......@@ -1592,9 +1635,14 @@ void vp10_loop_filter_rows(YV12_BUFFER_CONFIG *frame_buffer,
path = LF_PATH_SLOW;
#endif
#if CONFIG_VAR_TX
memset(cm->above_txfm_context, TX_SIZES, cm->mi_cols);
#endif
for (mi_row = start; mi_row < stop; mi_row += MI_BLOCK_SIZE) {
MODE_INFO **mi = cm->mi_grid_visible + mi_row * cm->mi_stride;
#if CONFIG_VAR_TX
memset(cm->left_txfm_context, TX_SIZES, 8);
#endif
for (mi_col = 0; mi_col < cm->mi_cols; mi_col += MI_BLOCK_SIZE) {
int plane;
......
......@@ -303,6 +303,7 @@ typedef struct VP10Common {
ENTROPY_CONTEXT *above_context;
#if CONFIG_VAR_TX
TXFM_CONTEXT *above_txfm_context;
TXFM_CONTEXT left_txfm_context[8];
#endif
int above_context_alloc_cols;
......
......@@ -514,7 +514,7 @@ static void block_rd_txfm(int plane, int block, int blk_row, int blk_col,
int dst_stride = xd->plane[plane].dst.stride;
unsigned int tmp_sse;
PREDICTION_MODE mode = (plane == 0) ?
get_y_mode(xd->mi[0], block) : mbmi->uv_mode;
get_y_mode(xd->mi[0], block) : mbmi->uv_mode;
#if CONFIG_VP9_HIGHBITDEPTH
vp10_encode_block_intra(plane, block, blk_row, blk_col,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment