Commit 4529c68b authored by John Koleszar's avatar John Koleszar

Separate transform and quant from vp9_encode_sb

This allows removing a large number of transform size specific functions,
as well as supporting 444/alpha by routing all code through the
subsampling-aware path.

Change-Id: Ieb085cebe9f37f24fc24de179898b22abfda08a4
parent 3f4e8063
......@@ -19,153 +19,3 @@ void vp9_inverse_transform_b_4x4(MACROBLOCKD *xd, int eob,
else
xd->inv_txm4x4(dqcoeff, diff, pitch);
}
void vp9_inverse_transform_b_8x8(int16_t *input_dqcoeff, int16_t *output_coeff,
int pitch) {
vp9_short_idct8x8(input_dqcoeff, output_coeff, pitch);
}
void vp9_inverse_transform_b_16x16(int16_t *input_dqcoeff,
int16_t *output_coeff, int pitch) {
vp9_short_idct16x16(input_dqcoeff, output_coeff, pitch);
}
void vp9_inverse_transform_sby_32x32(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize) {
const int bwl = b_width_log2(bsize) - 3, bw = 1 << bwl;
const int bh = 1 << (b_height_log2(bsize) - 3);
const int stride = 32 << bwl;
int n;
for (n = 0; n < bw * bh; n++) {
const int x_idx = n & (bw - 1), y_idx = n >> bwl;
const int offset = x_idx * 32 + y_idx * 32 * stride;
vp9_short_idct32x32(BLOCK_OFFSET(xd->plane[0].dqcoeff, n, 1024),
xd->plane[0].diff + offset, stride * 2);
}
}
void vp9_inverse_transform_sby_16x16(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize) {
const int bwl = b_width_log2(bsize) - 2, bw = 1 << bwl;
const int bh = 1 << (b_height_log2(bsize) - 2);
const int stride = 16 << bwl, bstride = 4 << bwl;
int n;
for (n = 0; n < bw * bh; n++) {
const int x_idx = n & (bw - 1), y_idx = n >> bwl;
const TX_TYPE tx_type = get_tx_type_16x16(xd,
(y_idx * bstride + x_idx) * 4);
const int offset = x_idx * 16 + y_idx * 16 * stride;
if (tx_type == DCT_DCT) {
vp9_inverse_transform_b_16x16(BLOCK_OFFSET(xd->plane[0].dqcoeff, n, 256),
xd->plane[0].diff + offset, stride * 2);
} else {
vp9_short_iht16x16(BLOCK_OFFSET(xd->plane[0].dqcoeff, n, 256),
xd->plane[0].diff + offset, stride, tx_type);
}
}
}
void vp9_inverse_transform_sby_8x8(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize) {
const int bwl = b_width_log2(bsize) - 1, bw = 1 << bwl;
const int bh = 1 << (b_height_log2(bsize) - 1);
const int stride = 8 << bwl, bstride = 2 << bwl;
int n;
for (n = 0; n < bw * bh; n++) {
const int x_idx = n & (bw - 1), y_idx = n >> bwl;
const TX_TYPE tx_type = get_tx_type_8x8(xd, (y_idx * bstride + x_idx) * 2);
const int offset = x_idx * 8 + y_idx * 8 * stride;
if (tx_type == DCT_DCT) {
vp9_inverse_transform_b_8x8(BLOCK_OFFSET(xd->plane[0].dqcoeff, n, 64),
xd->plane[0].diff + offset, stride * 2);
} else {
vp9_short_iht8x8(BLOCK_OFFSET(xd->plane[0].dqcoeff, n, 64),
xd->plane[0].diff + offset, stride, tx_type);
}
}
}
void vp9_inverse_transform_sby_4x4(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize) {
const int bwl = b_width_log2(bsize), bw = 1 << bwl;
const int bh = 1 << b_height_log2(bsize);
const int stride = 4 << bwl, bstride = 1 << bwl;
int n;
for (n = 0; n < bw * bh; n++) {
const int x_idx = n & (bw - 1), y_idx = n >> bwl;
const TX_TYPE tx_type = get_tx_type_4x4(xd, y_idx * bstride + x_idx);
const int offset = x_idx * 4 + y_idx * 4 * stride;
if (tx_type == DCT_DCT) {
vp9_inverse_transform_b_4x4(xd, xd->plane[0].eobs[n],
BLOCK_OFFSET(xd->plane[0].dqcoeff, n, 16),
xd->plane[0].diff + offset, stride * 2);
} else {
vp9_short_iht4x4(BLOCK_OFFSET(xd->plane[0].dqcoeff, n, 16),
xd->plane[0].diff + offset, stride, tx_type);
}
}
}
void vp9_inverse_transform_sbuv_32x32(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize) {
assert(bsize == BLOCK_SIZE_SB64X64);
vp9_short_idct32x32(xd->plane[1].dqcoeff, xd->plane[1].diff, 64);
vp9_short_idct32x32(xd->plane[2].dqcoeff, xd->plane[2].diff, 64);
}
void vp9_inverse_transform_sbuv_16x16(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize) {
const int bwl = b_width_log2(bsize) - 2, bhl = b_height_log2(bsize) - 2;
const int bw = 1 << (bwl - 1), bh = 1 << (bhl - 1);
const int stride = 16 << (bwl - 1);
int n;
for (n = 0; n < bw * bh; n++) {
const int x_idx = n & (bw - 1), y_idx = n >> (bwl - 1);
const int off = x_idx * 16 + y_idx * stride * 16;
vp9_inverse_transform_b_16x16(BLOCK_OFFSET(xd->plane[1].dqcoeff, n, 256),
xd->plane[1].diff + off, stride * 2);
vp9_inverse_transform_b_16x16(BLOCK_OFFSET(xd->plane[2].dqcoeff, n, 256),
xd->plane[2].diff + off, stride * 2);
}
}
void vp9_inverse_transform_sbuv_8x8(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize) {
const int bwl = b_width_log2(bsize) - 1, bhl = b_height_log2(bsize) - 1;
const int bw = 1 << (bwl - 1), bh = 1 << (bhl - 1);
const int stride = 8 << (bwl - 1);
int n;
for (n = 0; n < bw * bh; n++) {
const int x_idx = n & (bw - 1), y_idx = n >> (bwl - 1);
const int off = x_idx * 8 + y_idx * stride * 8;
vp9_inverse_transform_b_8x8(BLOCK_OFFSET(xd->plane[1].dqcoeff, n, 64),
xd->plane[1].diff + off, stride * 2);
vp9_inverse_transform_b_8x8(BLOCK_OFFSET(xd->plane[2].dqcoeff, n, 64),
xd->plane[2].diff + off, stride * 2);
}
}
void vp9_inverse_transform_sbuv_4x4(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize) {
const int bwl = b_width_log2(bsize), bhl = b_height_log2(bsize);
const int bw = 1 << (bwl - 1), bh = 1 << (bhl - 1);
const int stride = 4 << (bwl - 1);
int n;
for (n = 0; n < bw * bh; n++) {
const int x_idx = n & (bw - 1), y_idx = n >> (bwl - 1);
const int off = x_idx * 4 + y_idx * stride * 4;
vp9_inverse_transform_b_4x4(xd, xd->plane[1].eobs[n],
BLOCK_OFFSET(xd->plane[1].dqcoeff, n, 16),
xd->plane[1].diff + off, stride * 2);
vp9_inverse_transform_b_4x4(xd, xd->plane[2].eobs[n],
BLOCK_OFFSET(xd->plane[2].dqcoeff, n, 16),
xd->plane[2].diff + off, stride * 2);
}
}
......@@ -18,20 +18,4 @@
void vp9_inverse_transform_b_4x4(MACROBLOCKD *xd, int eob,
int16_t *dqcoeff, int16_t *diff,
int pitch);
void vp9_inverse_transform_b_8x8(int16_t *input_dqcoeff,
int16_t *output_coeff, int pitch);
void vp9_inverse_transform_b_16x16(int16_t *input_dqcoeff,
int16_t *output_coeff, int pitch);
void vp9_inverse_transform_sby_32x32(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize);
void vp9_inverse_transform_sby_16x16(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize);
void vp9_inverse_transform_sby_8x8(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize);
void vp9_inverse_transform_sby_4x4(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize);
void vp9_inverse_transform_sbuv_32x32(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize);
void vp9_inverse_transform_sbuv_16x16(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize);
void vp9_inverse_transform_sbuv_8x8(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize);
void vp9_inverse_transform_sbuv_4x4(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize);
#endif // VP9_COMMON_VP9_INVTRANS_H_
......@@ -164,12 +164,12 @@ struct macroblock {
void (*fwd_txm16x16)(int16_t *input, int16_t *output, int pitch);
void (*quantize_b_4x4)(MACROBLOCK *x, int b_idx, TX_TYPE tx_type,
int y_blocks);
#if !CONFIG_SB8X8
void (*quantize_b_4x4_pair)(MACROBLOCK *x, int b_idx1, int b_idx2,
int y_blocks);
void (*quantize_b_16x16)(MACROBLOCK *x, int b_idx, TX_TYPE tx_type,
int y_blocks);
void (*quantize_b_8x8)(MACROBLOCK *x, int b_idx, TX_TYPE tx_type,
int y_blocks);
#endif
};
#endif // VP9_ENCODER_VP9_BLOCK_H_
......@@ -2435,13 +2435,7 @@ static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t,
vp9_encode_intra4x4mby(x, bsize);
vp9_build_intra_predictors_sbuv_s(&x->e_mbd, bsize);
vp9_subtract_sbuv(x, bsize);
vp9_transform_sbuv_4x4(x, bsize);
vp9_quantize_sbuv_4x4(x, bsize);
if (x->optimize)
vp9_optimize_sbuv(cm, x, bsize);
vp9_inverse_transform_sbuv_4x4(xd, bsize);
vp9_recon_sbuv(xd, bsize);
vp9_encode_sbuv(cm, x, bsize);
if (output_enabled)
sum_intra_stats(cpi, x);
......
......@@ -104,63 +104,16 @@ void vp9_encode_intra4x4mby(MACROBLOCK *mb, BLOCK_SIZE_TYPE bsize) {
void vp9_encode_intra16x16mby(VP9_COMMON *const cm, MACROBLOCK *x) {
MACROBLOCKD *xd = &x->e_mbd;
TX_SIZE tx_size = xd->mode_info_context->mbmi.txfm_size;
vp9_build_intra_predictors_sby_s(xd, BLOCK_SIZE_MB16X16);
vp9_subtract_sby(x, BLOCK_SIZE_MB16X16);
switch (tx_size) {
case TX_16X16:
vp9_transform_sby_16x16(x, BLOCK_SIZE_MB16X16);
vp9_quantize_sby_16x16(x, BLOCK_SIZE_MB16X16);
if (x->optimize)
vp9_optimize_sby(cm, x, BLOCK_SIZE_MB16X16);
vp9_inverse_transform_sby_16x16(xd, BLOCK_SIZE_MB16X16);
break;
case TX_8X8:
vp9_transform_sby_8x8(x, BLOCK_SIZE_MB16X16);
vp9_quantize_sby_8x8(x, BLOCK_SIZE_MB16X16);
if (x->optimize)
vp9_optimize_sby(cm, x, BLOCK_SIZE_MB16X16);
vp9_inverse_transform_sby_8x8(xd, BLOCK_SIZE_MB16X16);
break;
default:
vp9_transform_sby_4x4(x, BLOCK_SIZE_MB16X16);
vp9_quantize_sby_4x4(x, BLOCK_SIZE_MB16X16);
if (x->optimize)
vp9_optimize_sby(cm, x, BLOCK_SIZE_MB16X16);
vp9_inverse_transform_sby_4x4(xd, BLOCK_SIZE_MB16X16);
break;
}
vp9_recon_sby(xd, BLOCK_SIZE_MB16X16);
vp9_encode_sby(cm, x, BLOCK_SIZE_MB16X16);
}
void vp9_encode_intra16x16mbuv(VP9_COMMON *const cm, MACROBLOCK *x) {
MACROBLOCKD *xd = &x->e_mbd;
TX_SIZE tx_size = xd->mode_info_context->mbmi.txfm_size;
vp9_build_intra_predictors_sbuv_s(xd, BLOCK_SIZE_MB16X16);
vp9_subtract_sbuv(x, BLOCK_SIZE_MB16X16);
switch (tx_size) {
case TX_4X4:
vp9_transform_sbuv_4x4(x, BLOCK_SIZE_MB16X16);
vp9_quantize_sbuv_4x4(x, BLOCK_SIZE_MB16X16);
if (x->optimize)
vp9_optimize_sbuv(cm, x, BLOCK_SIZE_MB16X16);
vp9_inverse_transform_sbuv_4x4(xd, BLOCK_SIZE_MB16X16);
break;
default: // 16x16 or 8x8
vp9_transform_sbuv_8x8(x, BLOCK_SIZE_MB16X16);
vp9_quantize_sbuv_8x8(x, BLOCK_SIZE_MB16X16);
if (x->optimize)
vp9_optimize_sbuv(cm, x, BLOCK_SIZE_MB16X16);
vp9_inverse_transform_sbuv_8x8(xd, BLOCK_SIZE_MB16X16);
break;
}
vp9_recon_sbuv(xd, BLOCK_SIZE_MB16X16);
vp9_encode_sbuv(cm, x, BLOCK_SIZE_MB16X16);
}
#if !CONFIG_SB8X8
......
This diff is collapsed.
......@@ -22,18 +22,6 @@ typedef struct {
MV_REFERENCE_FRAME second_ref_frame;
} MODE_DEFINITION;
#if !CONFIG_SB8X8
#endif
void vp9_transform_sby_32x32(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize);
void vp9_transform_sby_16x16(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize);
void vp9_transform_sby_8x8(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize);
void vp9_transform_sby_4x4(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize);
void vp9_transform_sbuv_32x32(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize);
void vp9_transform_sbuv_16x16(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize);
void vp9_transform_sbuv_8x8(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize);
void vp9_transform_sbuv_4x4(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize);
struct optimize_ctx {
ENTROPY_CONTEXT ta[MAX_MB_PLANE][16];
ENTROPY_CONTEXT tl[MAX_MB_PLANE][16];
......@@ -49,6 +37,14 @@ void vp9_optimize_sbuv(VP9_COMMON *const cm, MACROBLOCK *x,
BLOCK_SIZE_TYPE bsize);
void vp9_encode_sb(VP9_COMMON *const cm, MACROBLOCK *x, BLOCK_SIZE_TYPE bsize);
void vp9_encode_sby(VP9_COMMON *const cm, MACROBLOCK *x, BLOCK_SIZE_TYPE bsize);
void vp9_encode_sbuv(VP9_COMMON *const cm, MACROBLOCK *x,
BLOCK_SIZE_TYPE bsize);
void vp9_xform_quant_sby(VP9_COMMON *const cm, MACROBLOCK *x,
BLOCK_SIZE_TYPE bsize);
void vp9_xform_quant_sbuv(VP9_COMMON *const cm, MACROBLOCK *x,
BLOCK_SIZE_TYPE bsize);
void vp9_subtract_block(int rows, int cols,
int16_t *diff_ptr, int diff_stride,
......
......@@ -867,9 +867,10 @@ void vp9_set_speed_features(VP9_COMP *cpi) {
}
cpi->mb.quantize_b_4x4 = vp9_regular_quantize_b_4x4;
#if !CONFIG_SB8X8
cpi->mb.quantize_b_4x4_pair = vp9_regular_quantize_b_4x4_pair;
cpi->mb.quantize_b_8x8 = vp9_regular_quantize_b_8x8;
cpi->mb.quantize_b_16x16 = vp9_regular_quantize_b_16x16;
#endif
vp9_init_quantizer(cpi);
......
......@@ -133,6 +133,7 @@ void vp9_regular_quantize_b_4x4(MACROBLOCK *mb, int b_idx, TX_TYPE tx_type,
pt_scan, 1);
}
#if !CONFIG_SB8X8
void vp9_regular_quantize_b_8x8(MACROBLOCK *mb, int b_idx, TX_TYPE tx_type,
int y_blocks) {
MACROBLOCKD *const xd = &mb->e_mbd;
......@@ -154,131 +155,6 @@ void vp9_regular_quantize_b_8x8(MACROBLOCK *mb, int b_idx, TX_TYPE tx_type,
pt_scan, 1);
}
void vp9_regular_quantize_b_16x16(MACROBLOCK *mb, int b_idx, TX_TYPE tx_type,
int y_blocks) {
MACROBLOCKD *const xd = &mb->e_mbd;
const struct plane_block_idx pb_idx = plane_block_idx(y_blocks, b_idx);
const int *pt_scan = get_scan_16x16(tx_type);
quantize(mb->plane[pb_idx.plane].zrun_zbin_boost,
BLOCK_OFFSET(mb->plane[pb_idx.plane].coeff, pb_idx.block, 16),
256, mb->skip_block,
mb->plane[pb_idx.plane].zbin,
mb->plane[pb_idx.plane].round,
mb->plane[pb_idx.plane].quant,
mb->plane[pb_idx.plane].quant_shift,
BLOCK_OFFSET(xd->plane[pb_idx.plane].qcoeff, pb_idx.block, 16),
BLOCK_OFFSET(xd->plane[pb_idx.plane].dqcoeff, pb_idx.block, 16),
xd->plane[pb_idx.plane].dequant,
mb->plane[pb_idx.plane].zbin_extra,
&xd->plane[pb_idx.plane].eobs[pb_idx.block],
pt_scan, 1);
}
void vp9_regular_quantize_b_32x32(MACROBLOCK *mb, int b_idx, int y_blocks) {
MACROBLOCKD *const xd = &mb->e_mbd;
const struct plane_block_idx pb_idx = plane_block_idx(y_blocks, b_idx);
quantize(mb->plane[pb_idx.plane].zrun_zbin_boost,
BLOCK_OFFSET(mb->plane[pb_idx.plane].coeff, pb_idx.block, 16),
1024, mb->skip_block,
mb->plane[pb_idx.plane].zbin,
mb->plane[pb_idx.plane].round,
mb->plane[pb_idx.plane].quant,
mb->plane[pb_idx.plane].quant_shift,
BLOCK_OFFSET(xd->plane[pb_idx.plane].qcoeff, pb_idx.block, 16),
BLOCK_OFFSET(xd->plane[pb_idx.plane].dqcoeff, pb_idx.block, 16),
xd->plane[pb_idx.plane].dequant,
mb->plane[pb_idx.plane].zbin_extra,
&xd->plane[pb_idx.plane].eobs[pb_idx.block],
vp9_default_zig_zag1d_32x32, 2);
}
void vp9_quantize_sby_32x32(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize) {
const int bw = 1 << (b_width_log2(bsize) - 3);
const int bh = 1 << (b_height_log2(bsize) - 3);
int n;
for (n = 0; n < bw * bh; n++)
vp9_regular_quantize_b_32x32(x, n * 64, bw * bh * 64);
}
void vp9_quantize_sby_16x16(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize) {
const int bwl = b_width_log2(bsize) - 2, bw = 1 << bwl;
const int bh = 1 << (b_height_log2(bsize) - 2);
const int bstride = 16 << bwl;
int n;
for (n = 0; n < bw * bh; n++) {
const int x_idx = n & (bw - 1), y_idx = n >> bwl;
TX_TYPE tx_type = get_tx_type_16x16(&x->e_mbd,
4 * x_idx + y_idx * bstride);
x->quantize_b_16x16(x, n * 16, tx_type, 16 * bw * bh);
}
}
void vp9_quantize_sby_8x8(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize) {
const int bwl = b_width_log2(bsize) - 1, bw = 1 << bwl;
const int bh = 1 << (b_height_log2(bsize) - 1);
const int bstride = 4 << bwl;
int n;
for (n = 0; n < bw * bh; n++) {
const int x_idx = n & (bw - 1), y_idx = n >> bwl;
TX_TYPE tx_type = get_tx_type_8x8(&x->e_mbd,
2 * x_idx + y_idx * bstride);
x->quantize_b_8x8(x, n * 4, tx_type, 4 * bw * bh);
}
}
void vp9_quantize_sby_4x4(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize) {
const int bwl = b_width_log2(bsize), bw = 1 << bwl;
const int bh = 1 << b_height_log2(bsize);
MACROBLOCKD *const xd = &x->e_mbd;
int n;
for (n = 0; n < bw * bh; n++) {
const TX_TYPE tx_type = get_tx_type_4x4(xd, n);
x->quantize_b_4x4(x, n, tx_type, bw * bh);
}
}
void vp9_quantize_sbuv_32x32(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize) {
assert(bsize == BLOCK_SIZE_SB64X64);
vp9_regular_quantize_b_32x32(x, 256, 256);
vp9_regular_quantize_b_32x32(x, 320, 256);
}
void vp9_quantize_sbuv_16x16(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize) {
const int bwl = b_width_log2(bsize) - 2;
const int bhl = b_height_log2(bsize) - 2;
const int uoff = 16 << (bhl + bwl);
int i;
for (i = uoff; i < ((uoff * 3) >> 1); i += 16)
x->quantize_b_16x16(x, i, DCT_DCT, uoff);
}
void vp9_quantize_sbuv_8x8(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize) {
const int bwl = b_width_log2(bsize) - 1;
const int bhl = b_height_log2(bsize) - 1;
const int uoff = 4 << (bhl + bwl);
int i;
for (i = uoff; i < ((uoff * 3) >> 1); i += 4)
x->quantize_b_8x8(x, i, DCT_DCT, uoff);
}
void vp9_quantize_sbuv_4x4(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize) {
const int bwl = b_width_log2(bsize);
const int bhl = b_height_log2(bsize);
const int uoff = 1 << (bhl + bwl);
int i;
for (i = uoff; i < ((uoff * 3) >> 1); i++)
x->quantize_b_4x4(x, i, DCT_DCT, uoff);
}
/* quantize_b_pair function pointer in MACROBLOCK structure is set to one of
* these two C functions if corresponding optimized routine is not available.
* NEON optimized version implements currently the fast quantization for pair
......@@ -288,6 +164,7 @@ void vp9_regular_quantize_b_4x4_pair(MACROBLOCK *x, int b_idx1, int b_idx2,
vp9_regular_quantize_b_4x4(x, b_idx1, DCT_DCT, y_blocks);
vp9_regular_quantize_b_4x4(x, b_idx2, DCT_DCT, y_blocks);
}
#endif
static void invert_quant(int16_t *quant, uint8_t *shift, int d) {
unsigned t;
......
......@@ -31,20 +31,6 @@ void vp9_regular_quantize_b_4x4(MACROBLOCK *mb, int b_idx, TX_TYPE tx_type,
int y_blocks);
void vp9_regular_quantize_b_8x8(MACROBLOCK *mb, int b_idx, TX_TYPE tx_type,
int y_blocks);
void vp9_regular_quantize_b_16x16(MACROBLOCK *mb, int b_idx, TX_TYPE tx_type,
int y_blocks);
void vp9_regular_quantize_b_32x32(MACROBLOCK *mb, int b_idx,
int y_blocks);
void vp9_quantize_sby_32x32(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize);
void vp9_quantize_sby_16x16(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize);
void vp9_quantize_sby_8x8(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize);
void vp9_quantize_sby_4x4(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize);
void vp9_quantize_sbuv_32x32(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize);
void vp9_quantize_sbuv_16x16(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize);
void vp9_quantize_sbuv_8x8(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize);
void vp9_quantize_sbuv_4x4(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize);
struct VP9_COMP;
extern void vp9_set_quantizer(struct VP9_COMP *cpi, int Q);
......
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment