Commit f88558fb authored by Ronald S. Bultje's avatar Ronald S. Bultje

Change encoder vp8_ and vp8cx_ public symbol prefixes to vp9_.

Change-Id: Ie2e3652591b010ded10c216501ce24fd95d0aec5
parent fe178850
......@@ -11,14 +11,14 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
extern unsigned int vp8_sad16x16_sse3(
extern unsigned int vp9_sad16x16_sse3(
unsigned char *src_ptr,
int src_stride,
unsigned char *ref_ptr,
int ref_stride,
int max_err);
extern void vp8_sad16x16x3_sse3(
extern void vp9_sad16x16x3_sse3(
unsigned char *src_ptr,
int src_stride,
unsigned char *ref_ptr,
......@@ -43,14 +43,14 @@ extern void vp8_makemask_sse3(
int ut,
int vt);
unsigned int vp8_sad16x16_unmasked_wmt(
unsigned int vp9_sad16x16_unmasked_wmt(
unsigned char *src_ptr,
int src_stride,
unsigned char *ref_ptr,
int ref_stride,
unsigned char *mask);
unsigned int vp8_sad16x16_masked_wmt(
unsigned int vp9_sad16x16_masked_wmt(
unsigned char *src_ptr,
int src_stride,
unsigned char *ref_ptr,
......@@ -503,7 +503,7 @@ int fast_masked_motion_search(unsigned char *y, unsigned char *u, unsigned char
vp8_growmaskmb_sse3(dym, dym2);
e = vp8_sad16x16_unmasked_wmt(y, yp, dyz + j, dyp, dym2);
e = vp9_sad16x16_unmasked_wmt(y, yp, dyz + j, dyp, dym2);
if (e < beste) {
bui = i;
......@@ -529,7 +529,7 @@ int fast_masked_motion_search(unsigned char *y, unsigned char *u, unsigned char
for (i = -32; i < 32; i++) {
unsigned char *dyz = i * dyp + dy;
for (j = -32; j < 32; j++) {
e = vp8_sad16x16_masked_wmt(y, yp, dyz + j, dyp, dym2);
e = vp9_sad16x16_masked_wmt(y, yp, dyz + j, dyp, dym2);
if (e < beste) {
bmi = i;
bmj = j;
......@@ -581,7 +581,7 @@ int fast_masked_motion_search(unsigned char *y, unsigned char *u, unsigned char
vp8_growmaskmb_sse3(dym, dym2);
obeste = vp8_sad16x16_masked_wmt(y, yp, dy + bmi * dyp + bmj, dyp, dym2);
obeste = vp9_sad16x16_masked_wmt(y, yp, dy + bmi * dyp + bmj, dyp, dym2);
beste = 0xffffffff;
......@@ -589,7 +589,7 @@ int fast_masked_motion_search(unsigned char *y, unsigned char *u, unsigned char
for (i = -32; i < 32; i++) {
unsigned char *dyz = i * dyp + dy;
for (j = -32; j < 32; j++) {
e = vp8_sad16x16_unmasked_wmt(y, yp, dyz + j, dyp, dym2);
e = vp9_sad16x16_unmasked_wmt(y, yp, dyz + j, dyp, dym2);
if (e < beste) {
bui = i;
......@@ -698,8 +698,8 @@ int mainz(int argc, char *argv[]) {
vp8_growmaskmb_sse3(ym, ym3);
a = vp8_sad16x16_masked_wmt(str, 16, sts, 16, ym3);
b = vp8_sad16x16_unmasked_wmt(str, 16, sts, 16, ym3);
a = vp9_sad16x16_masked_wmt(str, 16, sts, 16, ym3);
b = vp9_sad16x16_unmasked_wmt(str, 16, sts, 16, ym3);
vp8_masked_predictor_wmt(str, sts, 16, ym, 16, ym3);
......@@ -738,7 +738,7 @@ int mainz(int argc, char *argv[]) {
int bmi, bmj, bui, buj, bwm;
unsigned char ym[256];
if (vp8_sad16x16_sse3(ys + c, y_stride, yd + c, y_stride, 0xffff) == 0)
if (vp9_sad16x16_sse3(ys + c, y_stride, yd + c, y_stride, 0xffff) == 0)
bmi = bmj = bui = buj = bwm = 0;
else {
COLOR_SEG_ELEMENT cs[5];
......
......@@ -172,29 +172,29 @@ extern "C"
} VP8_CONFIG;
void vp8_initialize();
void vp9_initialize();
VP8_PTR vp8_create_compressor(VP8_CONFIG *oxcf);
void vp8_remove_compressor(VP8_PTR *comp);
VP8_PTR vp9_create_compressor(VP8_CONFIG *oxcf);
void vp9_remove_compressor(VP8_PTR *comp);
void vp8_init_config(VP8_PTR onyx, VP8_CONFIG *oxcf);
void vp8_change_config(VP8_PTR onyx, VP8_CONFIG *oxcf);
void vp9_change_config(VP8_PTR onyx, VP8_CONFIG *oxcf);
// receive a frames worth of data caller can assume that a copy of this frame is made
// and not just a copy of the pointer..
int vp8_receive_raw_frame(VP8_PTR comp, unsigned int frame_flags, YV12_BUFFER_CONFIG *sd, int64_t time_stamp, int64_t end_time_stamp);
int vp8_get_compressed_data(VP8_PTR comp, unsigned int *frame_flags, unsigned long *size, unsigned char *dest, int64_t *time_stamp, int64_t *time_end, int flush);
int vp8_get_preview_raw_frame(VP8_PTR comp, YV12_BUFFER_CONFIG *dest, vp8_ppflags_t *flags);
int vp8_use_as_reference(VP8_PTR comp, int ref_frame_flags);
int vp8_update_reference(VP8_PTR comp, int ref_frame_flags);
int vp8_get_reference(VP8_PTR comp, VP8_REFFRAME ref_frame_flag, YV12_BUFFER_CONFIG *sd);
int vp8_set_reference(VP8_PTR comp, VP8_REFFRAME ref_frame_flag, YV12_BUFFER_CONFIG *sd);
int vp8_update_entropy(VP8_PTR comp, int update);
int vp8_set_roimap(VP8_PTR comp, unsigned char *map, unsigned int rows, unsigned int cols, int delta_q[4], int delta_lf[4], unsigned int threshold[4]);
int vp8_set_active_map(VP8_PTR comp, unsigned char *map, unsigned int rows, unsigned int cols);
int vp8_set_internal_size(VP8_PTR comp, VPX_SCALING horiz_mode, VPX_SCALING vert_mode);
int vp8_get_quantizer(VP8_PTR c);
int vp9_receive_raw_frame(VP8_PTR comp, unsigned int frame_flags, YV12_BUFFER_CONFIG *sd, int64_t time_stamp, int64_t end_time_stamp);
int vp9_get_compressed_data(VP8_PTR comp, unsigned int *frame_flags, unsigned long *size, unsigned char *dest, int64_t *time_stamp, int64_t *time_end, int flush);
int vp9_get_preview_raw_frame(VP8_PTR comp, YV12_BUFFER_CONFIG *dest, vp8_ppflags_t *flags);
int vp9_use_as_reference(VP8_PTR comp, int ref_frame_flags);
int vp9_update_reference(VP8_PTR comp, int ref_frame_flags);
int vp9_get_reference(VP8_PTR comp, VP8_REFFRAME ref_frame_flag, YV12_BUFFER_CONFIG *sd);
int vp9_set_reference(VP8_PTR comp, VP8_REFFRAME ref_frame_flag, YV12_BUFFER_CONFIG *sd);
int vp9_update_entropy(VP8_PTR comp, int update);
int vp9_set_roimap(VP8_PTR comp, unsigned char *map, unsigned int rows, unsigned int cols, int delta_q[4], int delta_lf[4], unsigned int threshold[4]);
int vp9_set_active_map(VP8_PTR comp, unsigned char *map, unsigned int rows, unsigned int cols);
int vp9_set_internal_size(VP8_PTR comp, VPX_SCALING horiz_mode, VPX_SCALING vert_mode);
int vp9_get_quantizer(VP8_PTR c);
#ifdef __cplusplus
}
......
This diff is collapsed.
This diff is collapsed.
......@@ -13,9 +13,9 @@
#if HAVE_ARMV6
void vp8_short_fdct8x4_armv6(short *input, short *output, int pitch) {
vp8_short_fdct4x4_armv6(input, output, pitch);
vp8_short_fdct4x4_armv6(input + 4, output + 16, pitch);
void vp9_short_fdct8x4_armv6(short *input, short *output, int pitch) {
vp9_short_fdct4x4_armv6(input, output, pitch);
vp9_short_fdct4x4_armv6(input + 4, output + 16, pitch);
}
#endif /* HAVE_ARMV6 */
......@@ -13,51 +13,51 @@
#define DCT_ARM_H
#if HAVE_ARMV6
extern prototype_fdct(vp8_short_walsh4x4_armv6);
extern prototype_fdct(vp8_short_fdct4x4_armv6);
extern prototype_fdct(vp8_short_fdct8x4_armv6);
extern prototype_fdct(vp9_short_walsh4x4_armv6);
extern prototype_fdct(vp9_short_fdct4x4_armv6);
extern prototype_fdct(vp9_short_fdct8x4_armv6);
#if !CONFIG_RUNTIME_CPU_DETECT
#undef vp8_fdct_walsh_short4x4
#define vp8_fdct_walsh_short4x4 vp8_short_walsh4x4_armv6
#define vp8_fdct_walsh_short4x4 vp9_short_walsh4x4_armv6
#undef vp8_fdct_short4x4
#define vp8_fdct_short4x4 vp8_short_fdct4x4_armv6
#define vp8_fdct_short4x4 vp9_short_fdct4x4_armv6
#undef vp8_fdct_short8x4
#define vp8_fdct_short8x4 vp8_short_fdct8x4_armv6
#define vp8_fdct_short8x4 vp9_short_fdct8x4_armv6
#undef vp8_fdct_fast4x4
#define vp8_fdct_fast4x4 vp8_short_fdct4x4_armv6
#define vp8_fdct_fast4x4 vp9_short_fdct4x4_armv6
#undef vp8_fdct_fast8x4
#define vp8_fdct_fast8x4 vp8_short_fdct8x4_armv6
#define vp8_fdct_fast8x4 vp9_short_fdct8x4_armv6
#endif
#endif /* HAVE_ARMV6 */
#if HAVE_ARMV7
extern prototype_fdct(vp8_short_fdct4x4_neon);
extern prototype_fdct(vp8_short_fdct8x4_neon);
extern prototype_fdct(vp9_short_fdct4x4_neon);
extern prototype_fdct(vp9_short_fdct8x4_neon);
extern prototype_fdct(vp8_fast_fdct4x4_neon);
extern prototype_fdct(vp8_fast_fdct8x4_neon);
extern prototype_fdct(vp8_short_walsh4x4_neon);
extern prototype_fdct(vp9_short_walsh4x4_neon);
#if !CONFIG_RUNTIME_CPU_DETECT
#undef vp8_fdct_short4x4
#define vp8_fdct_short4x4 vp8_short_fdct4x4_neon
#define vp8_fdct_short4x4 vp9_short_fdct4x4_neon
#undef vp8_fdct_short8x4
#define vp8_fdct_short8x4 vp8_short_fdct8x4_neon
#define vp8_fdct_short8x4 vp9_short_fdct8x4_neon
#undef vp8_fdct_fast4x4
#define vp8_fdct_fast4x4 vp8_short_fdct4x4_neon
#define vp8_fdct_fast4x4 vp9_short_fdct4x4_neon
#undef vp8_fdct_fast8x4
#define vp8_fdct_fast8x4 vp8_short_fdct8x4_neon
#define vp8_fdct_fast8x4 vp9_short_fdct8x4_neon
#undef vp8_fdct_walsh_short4x4
#define vp8_fdct_walsh_short4x4 vp8_short_walsh4x4_neon
#define vp8_fdct_walsh_short4x4 vp9_short_walsh4x4_neon
#endif
#endif
......
......@@ -13,50 +13,50 @@
#define ENCODEMB_ARM_H
#if HAVE_ARMV6
extern prototype_subb(vp8_subtract_b_armv6);
extern prototype_submby(vp8_subtract_mby_armv6);
extern prototype_submbuv(vp8_subtract_mbuv_armv6);
extern prototype_subb(vp9_subtract_b_armv6);
extern prototype_submby(vp9_subtract_mby_armv6);
extern prototype_submbuv(vp9_subtract_mbuv_armv6);
#if !CONFIG_RUNTIME_CPU_DETECT
#undef vp8_encodemb_subb
#define vp8_encodemb_subb vp8_subtract_b_armv6
#define vp8_encodemb_subb vp9_subtract_b_armv6
#undef vp8_encodemb_submby
#define vp8_encodemb_submby vp8_subtract_mby_armv6
#define vp8_encodemb_submby vp9_subtract_mby_armv6
#undef vp8_encodemb_submbuv
#define vp8_encodemb_submbuv vp8_subtract_mbuv_armv6
#define vp8_encodemb_submbuv vp9_subtract_mbuv_armv6
#endif
#endif /* HAVE_ARMV6 */
#if HAVE_ARMV7
// extern prototype_berr(vp8_block_error_c);
// extern prototype_mberr(vp8_mbblock_error_c);
// extern prototype_mbuverr(vp8_mbuverror_c);
// extern prototype_berr(vp9_block_error_c);
// extern prototype_mberr(vp9_mbblock_error_c);
// extern prototype_mbuverr(vp9_mbuverror_c);
extern prototype_subb(vp8_subtract_b_neon);
extern prototype_submby(vp8_subtract_mby_neon);
extern prototype_submbuv(vp8_subtract_mbuv_neon);
extern prototype_subb(vp9_subtract_b_neon);
extern prototype_submby(vp9_subtract_mby_neon);
extern prototype_submbuv(vp9_subtract_mbuv_neon);
// #undef vp8_encodemb_berr
// #define vp8_encodemb_berr vp8_block_error_c
// #define vp8_encodemb_berr vp9_block_error_c
// #undef vp8_encodemb_mberr
// #define vp8_encodemb_mberr vp8_mbblock_error_c
// #define vp8_encodemb_mberr vp9_mbblock_error_c
// #undef vp8_encodemb_mbuverr
// #define vp8_encodemb_mbuverr vp8_mbuverror_c
// #define vp8_encodemb_mbuverr vp9_mbuverror_c
#if !CONFIG_RUNTIME_CPU_DETECT
#undef vp8_encodemb_subb
#define vp8_encodemb_subb vp8_subtract_b_neon
#define vp8_encodemb_subb vp9_subtract_b_neon
#undef vp8_encodemb_submby
#define vp8_encodemb_submby vp8_subtract_mby_neon
#define vp8_encodemb_submby vp9_subtract_mby_neon
#undef vp8_encodemb_submbuv
#define vp8_encodemb_submbuv vp8_subtract_mbuv_neon
#define vp8_encodemb_submbuv vp9_subtract_mbuv_neon
#endif
#endif
......
......@@ -17,7 +17,7 @@
#if HAVE_ARMV6
unsigned int vp8_sub_pixel_variance8x8_armv6
unsigned int vp9_sub_pixel_variance8x8_armv6
(
const unsigned char *src_ptr,
int src_pixels_per_line,
......@@ -40,11 +40,11 @@ unsigned int vp8_sub_pixel_variance8x8_armv6
vp8_filter_block2d_bil_second_pass_armv6(first_pass, second_pass,
8, 8, 8, VFilter);
return vp8_variance8x8_armv6(second_pass, 8, dst_ptr,
return vp9_variance8x8_armv6(second_pass, 8, dst_ptr,
dst_pixels_per_line, sse);
}
unsigned int vp8_sub_pixel_variance16x16_armv6
unsigned int vp9_sub_pixel_variance16x16_armv6
(
const unsigned char *src_ptr,
int src_pixels_per_line,
......@@ -60,13 +60,13 @@ unsigned int vp8_sub_pixel_variance16x16_armv6
unsigned int var;
if (xoffset == HALFNDX && yoffset == 0) {
var = vp8_variance_halfpixvar16x16_h_armv6(src_ptr, src_pixels_per_line,
var = vp9_variance_halfpixvar16x16_h_armv6(src_ptr, src_pixels_per_line,
dst_ptr, dst_pixels_per_line, sse);
} else if (xoffset == 0 && yoffset == HALFNDX) {
var = vp8_variance_halfpixvar16x16_v_armv6(src_ptr, src_pixels_per_line,
var = vp9_variance_halfpixvar16x16_v_armv6(src_ptr, src_pixels_per_line,
dst_ptr, dst_pixels_per_line, sse);
} else if (xoffset == HALFNDX && yoffset == HALFNDX) {
var = vp8_variance_halfpixvar16x16_hv_armv6(src_ptr, src_pixels_per_line,
var = vp9_variance_halfpixvar16x16_hv_armv6(src_ptr, src_pixels_per_line,
dst_ptr, dst_pixels_per_line, sse);
} else {
HFilter = vp8_bilinear_filters[xoffset];
......@@ -78,7 +78,7 @@ unsigned int vp8_sub_pixel_variance16x16_armv6
vp8_filter_block2d_bil_second_pass_armv6(first_pass, second_pass,
16, 16, 16, VFilter);
var = vp8_variance16x16_armv6(second_pass, 16, dst_ptr,
var = vp9_variance16x16_armv6(second_pass, 16, dst_ptr,
dst_pixels_per_line, sse);
}
return var;
......@@ -89,7 +89,7 @@ unsigned int vp8_sub_pixel_variance16x16_armv6
#if HAVE_ARMV7
unsigned int vp8_sub_pixel_variance16x16_neon
unsigned int vp9_sub_pixel_variance16x16_neon
(
const unsigned char *src_ptr,
int src_pixels_per_line,
......@@ -100,13 +100,13 @@ unsigned int vp8_sub_pixel_variance16x16_neon
unsigned int *sse
) {
if (xoffset == HALFNDX && yoffset == 0)
return vp8_variance_halfpixvar16x16_h_neon(src_ptr, src_pixels_per_line, dst_ptr, dst_pixels_per_line, sse);
return vp9_variance_halfpixvar16x16_h_neon(src_ptr, src_pixels_per_line, dst_ptr, dst_pixels_per_line, sse);
else if (xoffset == 0 && yoffset == HALFNDX)
return vp8_variance_halfpixvar16x16_v_neon(src_ptr, src_pixels_per_line, dst_ptr, dst_pixels_per_line, sse);
return vp9_variance_halfpixvar16x16_v_neon(src_ptr, src_pixels_per_line, dst_ptr, dst_pixels_per_line, sse);
else if (xoffset == HALFNDX && yoffset == HALFNDX)
return vp8_variance_halfpixvar16x16_hv_neon(src_ptr, src_pixels_per_line, dst_ptr, dst_pixels_per_line, sse);
return vp9_variance_halfpixvar16x16_hv_neon(src_ptr, src_pixels_per_line, dst_ptr, dst_pixels_per_line, sse);
else
return vp8_sub_pixel_variance16x16_neon_func(src_ptr, src_pixels_per_line, xoffset, yoffset, dst_ptr, dst_pixels_per_line, sse);
return vp9_sub_pixel_variance16x16_neon_func(src_ptr, src_pixels_per_line, xoffset, yoffset, dst_ptr, dst_pixels_per_line, sse);
}
#endif
......@@ -14,44 +14,44 @@
#if HAVE_ARMV6
extern prototype_sad(vp8_sad16x16_armv6);
extern prototype_variance(vp8_variance16x16_armv6);
extern prototype_variance(vp8_variance8x8_armv6);
extern prototype_subpixvariance(vp8_sub_pixel_variance16x16_armv6);
extern prototype_subpixvariance(vp8_sub_pixel_variance8x8_armv6);
extern prototype_variance(vp8_variance_halfpixvar16x16_h_armv6);
extern prototype_variance(vp8_variance_halfpixvar16x16_v_armv6);
extern prototype_variance(vp8_variance_halfpixvar16x16_hv_armv6);
extern prototype_variance(vp8_mse16x16_armv6);
extern prototype_sad(vp9_sad16x16_armv6);
extern prototype_variance(vp9_variance16x16_armv6);
extern prototype_variance(vp9_variance8x8_armv6);
extern prototype_subpixvariance(vp9_sub_pixel_variance16x16_armv6);
extern prototype_subpixvariance(vp9_sub_pixel_variance8x8_armv6);
extern prototype_variance(vp9_variance_halfpixvar16x16_h_armv6);
extern prototype_variance(vp9_variance_halfpixvar16x16_v_armv6);
extern prototype_variance(vp9_variance_halfpixvar16x16_hv_armv6);
extern prototype_variance(vp9_mse16x16_armv6);
#if !CONFIG_RUNTIME_CPU_DETECT
#undef vp8_variance_sad16x16
#define vp8_variance_sad16x16 vp8_sad16x16_armv6
#define vp8_variance_sad16x16 vp9_sad16x16_armv6
#undef vp8_variance_subpixvar16x16
#define vp8_variance_subpixvar16x16 vp8_sub_pixel_variance16x16_armv6
#define vp8_variance_subpixvar16x16 vp9_sub_pixel_variance16x16_armv6
#undef vp8_variance_subpixvar8x8
#define vp8_variance_subpixvar8x8 vp8_sub_pixel_variance8x8_armv6
#define vp8_variance_subpixvar8x8 vp9_sub_pixel_variance8x8_armv6
#undef vp8_variance_var16x16
#define vp8_variance_var16x16 vp8_variance16x16_armv6
#define vp8_variance_var16x16 vp9_variance16x16_armv6
#undef vp8_variance_mse16x16
#define vp8_variance_mse16x16 vp8_mse16x16_armv6
#define vp8_variance_mse16x16 vp9_mse16x16_armv6
#undef vp8_variance_var8x8
#define vp8_variance_var8x8 vp8_variance8x8_armv6
#define vp8_variance_var8x8 vp9_variance8x8_armv6
#undef vp8_variance_halfpixvar16x16_h
#define vp8_variance_halfpixvar16x16_h vp8_variance_halfpixvar16x16_h_armv6
#undef vp9_variance_halfpixvar16x16_h
#define vp9_variance_halfpixvar16x16_h vp9_variance_halfpixvar16x16_h_armv6
#undef vp8_variance_halfpixvar16x16_v
#define vp8_variance_halfpixvar16x16_v vp8_variance_halfpixvar16x16_v_armv6
#undef vp9_variance_halfpixvar16x16_v
#define vp9_variance_halfpixvar16x16_v vp9_variance_halfpixvar16x16_v_armv6
#undef vp8_variance_halfpixvar16x16_hv
#define vp8_variance_halfpixvar16x16_hv vp8_variance_halfpixvar16x16_hv_armv6
#undef vp9_variance_halfpixvar16x16_hv
#define vp9_variance_halfpixvar16x16_hv vp9_variance_halfpixvar16x16_hv_armv6
#endif /* !CONFIG_RUNTIME_CPU_DETECT */
......@@ -59,91 +59,91 @@ extern prototype_variance(vp8_mse16x16_armv6);
#if HAVE_ARMV7
extern prototype_sad(vp8_sad4x4_neon);
extern prototype_sad(vp8_sad8x8_neon);
extern prototype_sad(vp8_sad8x16_neon);
extern prototype_sad(vp8_sad16x8_neon);
extern prototype_sad(vp8_sad16x16_neon);
// extern prototype_variance(vp8_variance4x4_c);
extern prototype_variance(vp8_variance8x8_neon);
extern prototype_variance(vp8_variance8x16_neon);
extern prototype_variance(vp8_variance16x8_neon);
extern prototype_variance(vp8_variance16x16_neon);
// extern prototype_subpixvariance(vp8_sub_pixel_variance4x4_c);
extern prototype_subpixvariance(vp8_sub_pixel_variance8x8_neon);
// extern prototype_subpixvariance(vp8_sub_pixel_variance8x16_c);
// extern prototype_subpixvariance(vp8_sub_pixel_variance16x8_c);
extern prototype_subpixvariance(vp8_sub_pixel_variance16x16_neon);
extern prototype_subpixvariance(vp8_sub_pixel_variance16x16_neon_func);
extern prototype_variance(vp8_variance_halfpixvar16x16_h_neon);
extern prototype_variance(vp8_variance_halfpixvar16x16_v_neon);
extern prototype_variance(vp8_variance_halfpixvar16x16_hv_neon);
// extern prototype_getmbss(vp8_get_mb_ss_c);
extern prototype_variance(vp8_mse16x16_neon);
extern prototype_sad(vp9_sad4x4_neon);
extern prototype_sad(vp9_sad8x8_neon);
extern prototype_sad(vp9_sad8x16_neon);
extern prototype_sad(vp9_sad16x8_neon);
extern prototype_sad(vp9_sad16x16_neon);
// extern prototype_variance(vp9_variance4x4_c);
extern prototype_variance(vp9_variance8x8_neon);
extern prototype_variance(vp9_variance8x16_neon);
extern prototype_variance(vp9_variance16x8_neon);
extern prototype_variance(vp9_variance16x16_neon);
// extern prototype_subpixvariance(vp9_sub_pixel_variance4x4_c);
extern prototype_subpixvariance(vp9_sub_pixel_variance8x8_neon);
// extern prototype_subpixvariance(vp9_sub_pixel_variance8x16_c);
// extern prototype_subpixvariance(vp9_sub_pixel_variance16x8_c);
extern prototype_subpixvariance(vp9_sub_pixel_variance16x16_neon);
extern prototype_subpixvariance(vp9_sub_pixel_variance16x16_neon_func);
extern prototype_variance(vp9_variance_halfpixvar16x16_h_neon);
extern prototype_variance(vp9_variance_halfpixvar16x16_v_neon);
extern prototype_variance(vp9_variance_halfpixvar16x16_hv_neon);
// extern prototype_getmbss(vp9_get_mb_ss_c);
extern prototype_variance(vp9_mse16x16_neon);
#if !CONFIG_RUNTIME_CPU_DETECT
#undef vp8_variance_sad4x4
#define vp8_variance_sad4x4 vp8_sad4x4_neon
#define vp8_variance_sad4x4 vp9_sad4x4_neon
#undef vp8_variance_sad8x8
#define vp8_variance_sad8x8 vp8_sad8x8_neon
#define vp8_variance_sad8x8 vp9_sad8x8_neon
#undef vp8_variance_sad8x16
#define vp8_variance_sad8x16 vp8_sad8x16_neon
#define vp8_variance_sad8x16 vp9_sad8x16_neon
#undef vp8_variance_sad16x8
#define vp8_variance_sad16x8 vp8_sad16x8_neon
#define vp8_variance_sad16x8 vp9_sad16x8_neon
#undef vp8_variance_sad16x16
#define vp8_variance_sad16x16 vp8_sad16x16_neon
#define vp8_variance_sad16x16 vp9_sad16x16_neon
// #undef vp8_variance_var4x4
// #define vp8_variance_var4x4 vp8_variance4x4_c
// #define vp8_variance_var4x4 vp9_variance4x4_c
#undef vp8_variance_var8x8
#define vp8_variance_var8x8 vp8_variance8x8_neon
#define vp8_variance_var8x8 vp9_variance8x8_neon
#undef vp8_variance_var8x16
#define vp8_variance_var8x16 vp8_variance8x16_neon
#define vp8_variance_var8x16 vp9_variance8x16_neon
#undef vp8_variance_var16x8
#define vp8_variance_var16x8 vp8_variance16x8_neon
#define vp8_variance_var16x8 vp9_variance16x8_neon
#undef vp8_variance_var16x16
#define vp8_variance_var16x16 vp8_variance16x16_neon
#define vp8_variance_var16x16 vp9_variance16x16_neon
// #undef vp8_variance_subpixvar4x4
// #define vp8_variance_subpixvar4x4 vp8_sub_pixel_variance4x4_c
// #define vp8_variance_subpixvar4x4 vp9_sub_pixel_variance4x4_c
#undef vp8_variance_subpixvar8x8
#define vp8_variance_subpixvar8x8 vp8_sub_pixel_variance8x8_neon
#define vp8_variance_subpixvar8x8 vp9_sub_pixel_variance8x8_neon
// #undef vp8_variance_subpixvar8x16
// #define vp8_variance_subpixvar8x16 vp8_sub_pixel_variance8x16_c
// #define vp8_variance_subpixvar8x16 vp9_sub_pixel_variance8x16_c
// #undef vp8_variance_subpixvar16x8
// #define vp8_variance_subpixvar16x8 vp8_sub_pixel_variance16x8_c
// #define vp8_variance_subpixvar16x8 vp9_sub_pixel_variance16x8_c
#undef vp8_variance_subpixvar16x16
#define vp8_variance_subpixvar16x16 vp8_sub_pixel_variance16x16_neon
#define vp8_variance_subpixvar16x16 vp9_sub_pixel_variance16x16_neon
#undef vp8_variance_halfpixvar16x16_h
#define vp8_variance_halfpixvar16x16_h vp8_variance_halfpixvar16x16_h_neon
#undef vp9_variance_halfpixvar16x16_h
#define vp9_variance_halfpixvar16x16_h vp9_variance_halfpixvar16x16_h_neon
#undef vp8_variance_halfpixvar16x16_v
#define vp8_variance_halfpixvar16x16_v vp8_variance_halfpixvar16x16_v_neon
#undef vp9_variance_halfpixvar16x16_v
#define vp9_variance_halfpixvar16x16_v vp9_variance_halfpixvar16x16_v_neon
#undef vp8_variance_halfpixvar16x16_hv
#define vp8_variance_halfpixvar16x16_hv vp8_variance_halfpixvar16x16_hv_neon
#undef vp9_variance_halfpixvar16x16_hv
#define vp9_variance_halfpixvar16x16_hv vp9_variance_halfpixvar16x16_hv_neon
// #undef vp8_variance_getmbss
// #define vp8_variance_getmbss vp8_get_mb_ss_c
// #define vp8_variance_getmbss vp9_get_mb_ss_c
#undef vp8_variance_mse16x16
#define vp8_variance_mse16x16 vp8_mse16x16_neon
#define vp8_variance_mse16x16 vp9_mse16x16_neon
#endif
......
......@@ -83,7 +83,7 @@ static int update_bits[255];
static void compute_update_table() {
int i;
for (i = 0; i < 255; i++)
update_bits[i] = vp8_count_term_subexp(i, SUBEXP_PARAM, 255);
update_bits[i] = vp9_count_term_subexp(i, SUBEXP_PARAM, 255);
}
static int split_index(int i, int n, int modulus) {
......@@ -109,7 +109,7 @@ static int remap_prob(int v, int m) {
static void write_prob_diff_update(vp8_writer *const bc,
vp8_prob newp, vp8_prob oldp) {
int delp = remap_prob(newp, oldp);
vp8_encode_term_subexp(bc, delp, SUBEXP_PARAM, 255);
vp9_encode_term_subexp(bc, delp, SUBEXP_PARAM, 255);
}
static int prob_diff_update_cost(vp8_prob newp, vp8_prob oldp) {
......@@ -618,12 +618,12 @@ static void write_nmv(vp8_writer *bc, const MV *mv, const int_mv *ref,
e.row = mv->row - ref->as_mv.row;
e.col = mv->col - ref->as_mv.col;
vp8_encode_nmv(bc, &e, &ref->as_mv, nmvc);
vp8_encode_nmv_fp(bc, &e, &ref->as_mv, nmvc, usehp);
vp9_encode_nmv(bc, &e, &ref->as_mv, nmvc);
vp9_encode_nmv_fp(bc, &e, &ref->as_mv, nmvc, usehp);
}
#if CONFIG_NEW_MVREF
static int vp8_cost_mv_ref_id(vp8_prob * ref_id_probs, int mv_ref_id) {
static int vp9_cost_mv_ref_id(vp8_prob * ref_id_probs, int mv_ref_id) {
int cost;
// Encode the index for the MV reference.
......@@ -698,8 +698,8 @@ static unsigned int pick_best_mv_ref(MACROBLOCK *x,
MACROBLOCKD *xd = &x->e_mbd;
int max_mv = MV_MAX;
cost = vp8_cost_mv_ref_id(xd->mb_mv_ref_id_probs[ref_frame], 0) +
vp8_mv_bit_cost(&target_mv,
cost = vp9_cost_mv_ref_id(xd->mb_mv_ref_id_probs[ref_frame], 0) +
vp9_mv_bit_cost(&target_mv,
&mv_ref_list[0],
XMVCOST, 96,
xd->allow_high_precision_mv);
......@@ -722,8 +722,8 @@ static unsigned int pick_best_mv_ref(MACROBLOCK *x,
continue;
}
cost2 = vp8_cost_mv_ref_id(xd->mb_mv_ref_id_probs[ref_frame], i) +
vp8_mv_bit_cost(&target_mv,
cost2 = vp9_cost_mv_ref_id(xd->mb_mv_ref_id_probs[ref_frame], i) +
vp9_mv_bit_cost(&target_mv,
&mv_ref_list[i],
XMVCOST, 96,
xd->allow_high_precision_mv);
......@@ -1820,13 +1820,13 @@ static void decide_kf_ymode_entropy(VP8_COMP *cpi) {
int i, j;
for (i = 0; i < 8; i++) {
vp8_cost_tokens(mode_cost, cpi->common.kf_ymode_prob[i], vp8_kf_ymode_tree);
vp9_cost_tokens(mode_cost, cpi->common.kf_ymode_prob[i], vp8_kf_ymode_tree);
cost = 0;
for (j = 0; j < VP8_YMODES; j++) {
cost += mode_cost[j] * cpi->ymode_count[j];
}
#if CONFIG_SUPERBLOCKS
vp8_cost_tokens(mode_cost, cpi->common.sb_kf_ymode_prob[i],
vp9_cost_tokens(mode_cost, cpi->common.sb_kf_ymode_prob[i],
vp8_sb_ymode_tree);
for (j = 0; j < VP8_I32X32_MODES; j++) {
cost += mode_cost[j] * cpi->sb_ymode_count[j];
......@@ -1860,7 +1860,7 @@ static void segment_reference_frames(VP8_COMP *cpi) {
}
}
void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned long *size) {
void vp9_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned long *size) {
int i, j;
VP8_HEADER oh;
VP8_COMMON *const pc = &cpi->common;
......@@ -1883,7 +1883,7 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned long *size)
compute_update_table();
// vp8_kf_default_bmode_probs() is called in vp8_setup_key_frame() once for each
// vp8_kf_default_bmode_probs() is called in vp9_setup_key_frame() once for each
// K frame before encode frame. pc->kf_bmode_prob doesn't get changed anywhere
// else. No need to call it again here. --yw
// vp8_kf_default_bmode_probs( pc->kf_bmode_prob);
......@@ -1908,14 +1908,14 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned long *size)
extra_bytes_packed = 7;
cx_data += extra_bytes_packed;
vp8_start_encode(&header_bc, cx_data);
vp9_start_encode(&header_bc, cx_data);
// signal clr type
vp8_write_bit(&header_bc, pc->clr_type);
vp8_write_bit(&header_bc, pc->clamp_type);
} else {
vp8_start_encode(&header_bc, cx_data);
vp9_start_encode(&header_bc, cx_data);
}
// Signal whether or not Segmentation is enabled
......@@ -2272,10 +2272,10 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned long *size)
vpx_memset(xd->mb_mv_ref_id_probs, 192, sizeof(xd->mb_mv_ref_id_probs));
#endif
vp8_write_nmvprobs(cpi, xd->allow_high_precision_mv, &header_bc);
vp9_write_nmvprobs(cpi, xd->allow_high_precision_mv, &header_bc);
}
vp8_stop_encode(&header_bc);
vp9_stop_encode(&header_bc);
oh.first_partition_length_in_bytes = header_bc.pos;
......@@ -2292,7 +2292,7 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned long *size)
}
*size = VP8_HEADER_SIZE + extra_bytes_packed + header_bc.pos;
vp8_start_encode(&residual_bc, cx_data + header_bc.pos);
vp9_start_encode(&residual_bc, cx_data + header_bc.pos);
if (pc->frame_type == KEY_FRAME) {
decide_kf_ymode_entropy(cpi);
......@@ -2303,7 +2303,7 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned long *size)
}
vp8_stop_encode(&residual_bc);
vp9_stop_encode(&residual_bc);