Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
BC
public
external
libvpx
Commits
f88558fb
Commit
f88558fb
authored
Oct 30, 2012
by
Ronald S. Bultje
Browse files
Change encoder vp8_ and vp8cx_ public symbol prefixes to vp9_.
Change-Id: Ie2e3652591b010ded10c216501ce24fd95d0aec5
parent
fe178850
Changes
86
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
661 additions
and
661 deletions
+661
-661
vp8/common/maskingmv.c
vp8/common/maskingmv.c
+11
-11
vp8/common/onyx.h
vp8/common/onyx.h
+17
-17
vp8/common/rtcd_defs.sh
vp8/common/rtcd_defs.sh
+159
-159
vp8/encoder/arm/arm_csystemdependent.c
vp8/encoder/arm/arm_csystemdependent.c
+75
-75
vp8/encoder/arm/dct_arm.c
vp8/encoder/arm/dct_arm.c
+3
-3
vp8/encoder/arm/dct_arm.h
vp8/encoder/arm/dct_arm.h
+16
-16
vp8/encoder/arm/encodemb_arm.h
vp8/encoder/arm/encodemb_arm.h
+18
-18
vp8/encoder/arm/variance_arm.c
vp8/encoder/arm/variance_arm.c
+12
-12
vp8/encoder/arm/variance_arm.h
vp8/encoder/arm/variance_arm.h
+68
-68
vp8/encoder/bitstream.c
vp8/encoder/bitstream.c
+19
-19
vp8/encoder/block.h
vp8/encoder/block.h
+4
-4
vp8/encoder/boolhuff.c
vp8/encoder/boolhuff.c
+14
-14
vp8/encoder/boolhuff.h
vp8/encoder/boolhuff.h
+7
-7
vp8/encoder/dct.c
vp8/encoder/dct.c
+17
-17
vp8/encoder/encodeframe.c
vp8/encoder/encodeframe.c
+81
-81
vp8/encoder/encodeintra.c
vp8/encoder/encodeintra.c
+47
-47
vp8/encoder/encodeintra.h
vp8/encoder/encodeintra.h
+8
-8
vp8/encoder/encodemb.c
vp8/encoder/encodemb.c
+58
-58
vp8/encoder/encodemb.h
vp8/encoder/encodemb.h
+18
-18
vp8/encoder/encodemv.c
vp8/encoder/encodemv.c
+9
-9
No files found.
vp8/common/maskingmv.c
View file @
f88558fb
...
...
@@ -11,14 +11,14 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
extern
unsigned
int
vp
8
_sad16x16_sse3
(
extern
unsigned
int
vp
9
_sad16x16_sse3
(
unsigned
char
*
src_ptr
,
int
src_stride
,
unsigned
char
*
ref_ptr
,
int
ref_stride
,
int
max_err
);
extern
void
vp
8
_sad16x16x3_sse3
(
extern
void
vp
9
_sad16x16x3_sse3
(
unsigned
char
*
src_ptr
,
int
src_stride
,
unsigned
char
*
ref_ptr
,
...
...
@@ -43,14 +43,14 @@ extern void vp8_makemask_sse3(
int
ut
,
int
vt
);
unsigned
int
vp
8
_sad16x16_unmasked_wmt
(
unsigned
int
vp
9
_sad16x16_unmasked_wmt
(
unsigned
char
*
src_ptr
,
int
src_stride
,
unsigned
char
*
ref_ptr
,
int
ref_stride
,
unsigned
char
*
mask
);
unsigned
int
vp
8
_sad16x16_masked_wmt
(
unsigned
int
vp
9
_sad16x16_masked_wmt
(
unsigned
char
*
src_ptr
,
int
src_stride
,
unsigned
char
*
ref_ptr
,
...
...
@@ -503,7 +503,7 @@ int fast_masked_motion_search(unsigned char *y, unsigned char *u, unsigned char
vp8_growmaskmb_sse3
(
dym
,
dym2
);
e
=
vp
8
_sad16x16_unmasked_wmt
(
y
,
yp
,
dyz
+
j
,
dyp
,
dym2
);
e
=
vp
9
_sad16x16_unmasked_wmt
(
y
,
yp
,
dyz
+
j
,
dyp
,
dym2
);
if
(
e
<
beste
)
{
bui
=
i
;
...
...
@@ -529,7 +529,7 @@ int fast_masked_motion_search(unsigned char *y, unsigned char *u, unsigned char
for
(
i
=
-
32
;
i
<
32
;
i
++
)
{
unsigned
char
*
dyz
=
i
*
dyp
+
dy
;
for
(
j
=
-
32
;
j
<
32
;
j
++
)
{
e
=
vp
8
_sad16x16_masked_wmt
(
y
,
yp
,
dyz
+
j
,
dyp
,
dym2
);
e
=
vp
9
_sad16x16_masked_wmt
(
y
,
yp
,
dyz
+
j
,
dyp
,
dym2
);
if
(
e
<
beste
)
{
bmi
=
i
;
bmj
=
j
;
...
...
@@ -581,7 +581,7 @@ int fast_masked_motion_search(unsigned char *y, unsigned char *u, unsigned char
vp8_growmaskmb_sse3
(
dym
,
dym2
);
obeste
=
vp
8
_sad16x16_masked_wmt
(
y
,
yp
,
dy
+
bmi
*
dyp
+
bmj
,
dyp
,
dym2
);
obeste
=
vp
9
_sad16x16_masked_wmt
(
y
,
yp
,
dy
+
bmi
*
dyp
+
bmj
,
dyp
,
dym2
);
beste
=
0xffffffff
;
...
...
@@ -589,7 +589,7 @@ int fast_masked_motion_search(unsigned char *y, unsigned char *u, unsigned char
for
(
i
=
-
32
;
i
<
32
;
i
++
)
{
unsigned
char
*
dyz
=
i
*
dyp
+
dy
;
for
(
j
=
-
32
;
j
<
32
;
j
++
)
{
e
=
vp
8
_sad16x16_unmasked_wmt
(
y
,
yp
,
dyz
+
j
,
dyp
,
dym2
);
e
=
vp
9
_sad16x16_unmasked_wmt
(
y
,
yp
,
dyz
+
j
,
dyp
,
dym2
);
if
(
e
<
beste
)
{
bui
=
i
;
...
...
@@ -698,8 +698,8 @@ int mainz(int argc, char *argv[]) {
vp8_growmaskmb_sse3(ym, ym3);
a = vp
8
_sad16x16_masked_wmt(str, 16, sts, 16, ym3);
b = vp
8
_sad16x16_unmasked_wmt(str, 16, sts, 16, ym3);
a = vp
9
_sad16x16_masked_wmt(str, 16, sts, 16, ym3);
b = vp
9
_sad16x16_unmasked_wmt(str, 16, sts, 16, ym3);
vp8_masked_predictor_wmt(str, sts, 16, ym, 16, ym3);
...
...
@@ -738,7 +738,7 @@ int mainz(int argc, char *argv[]) {
int
bmi
,
bmj
,
bui
,
buj
,
bwm
;
unsigned
char
ym
[
256
];
if
(
vp
8
_sad16x16_sse3
(
ys
+
c
,
y_stride
,
yd
+
c
,
y_stride
,
0xffff
)
==
0
)
if
(
vp
9
_sad16x16_sse3
(
ys
+
c
,
y_stride
,
yd
+
c
,
y_stride
,
0xffff
)
==
0
)
bmi
=
bmj
=
bui
=
buj
=
bwm
=
0
;
else
{
COLOR_SEG_ELEMENT
cs
[
5
];
...
...
vp8/common/onyx.h
View file @
f88558fb
...
...
@@ -172,29 +172,29 @@ extern "C"
}
VP8_CONFIG
;
void
vp
8
_initialize
();
void
vp
9
_initialize
();
VP8_PTR
vp
8
_create_compressor
(
VP8_CONFIG
*
oxcf
);
void
vp
8
_remove_compressor
(
VP8_PTR
*
comp
);
VP8_PTR
vp
9
_create_compressor
(
VP8_CONFIG
*
oxcf
);
void
vp
9
_remove_compressor
(
VP8_PTR
*
comp
);
void
vp8_init_config
(
VP8_PTR
onyx
,
VP8_CONFIG
*
oxcf
);
void
vp
8
_change_config
(
VP8_PTR
onyx
,
VP8_CONFIG
*
oxcf
);
void
vp
9
_change_config
(
VP8_PTR
onyx
,
VP8_CONFIG
*
oxcf
);
// receive a frames worth of data caller can assume that a copy of this frame is made
// and not just a copy of the pointer..
int
vp
8
_receive_raw_frame
(
VP8_PTR
comp
,
unsigned
int
frame_flags
,
YV12_BUFFER_CONFIG
*
sd
,
int64_t
time_stamp
,
int64_t
end_time_stamp
);
int
vp
8
_get_compressed_data
(
VP8_PTR
comp
,
unsigned
int
*
frame_flags
,
unsigned
long
*
size
,
unsigned
char
*
dest
,
int64_t
*
time_stamp
,
int64_t
*
time_end
,
int
flush
);
int
vp
8
_get_preview_raw_frame
(
VP8_PTR
comp
,
YV12_BUFFER_CONFIG
*
dest
,
vp8_ppflags_t
*
flags
);
int
vp
8
_use_as_reference
(
VP8_PTR
comp
,
int
ref_frame_flags
);
int
vp
8
_update_reference
(
VP8_PTR
comp
,
int
ref_frame_flags
);
int
vp
8
_get_reference
(
VP8_PTR
comp
,
VP8_REFFRAME
ref_frame_flag
,
YV12_BUFFER_CONFIG
*
sd
);
int
vp
8
_set_reference
(
VP8_PTR
comp
,
VP8_REFFRAME
ref_frame_flag
,
YV12_BUFFER_CONFIG
*
sd
);
int
vp
8
_update_entropy
(
VP8_PTR
comp
,
int
update
);
int
vp
8
_set_roimap
(
VP8_PTR
comp
,
unsigned
char
*
map
,
unsigned
int
rows
,
unsigned
int
cols
,
int
delta_q
[
4
],
int
delta_lf
[
4
],
unsigned
int
threshold
[
4
]);
int
vp
8
_set_active_map
(
VP8_PTR
comp
,
unsigned
char
*
map
,
unsigned
int
rows
,
unsigned
int
cols
);
int
vp
8
_set_internal_size
(
VP8_PTR
comp
,
VPX_SCALING
horiz_mode
,
VPX_SCALING
vert_mode
);
int
vp
8
_get_quantizer
(
VP8_PTR
c
);
int
vp
9
_receive_raw_frame
(
VP8_PTR
comp
,
unsigned
int
frame_flags
,
YV12_BUFFER_CONFIG
*
sd
,
int64_t
time_stamp
,
int64_t
end_time_stamp
);
int
vp
9
_get_compressed_data
(
VP8_PTR
comp
,
unsigned
int
*
frame_flags
,
unsigned
long
*
size
,
unsigned
char
*
dest
,
int64_t
*
time_stamp
,
int64_t
*
time_end
,
int
flush
);
int
vp
9
_get_preview_raw_frame
(
VP8_PTR
comp
,
YV12_BUFFER_CONFIG
*
dest
,
vp8_ppflags_t
*
flags
);
int
vp
9
_use_as_reference
(
VP8_PTR
comp
,
int
ref_frame_flags
);
int
vp
9
_update_reference
(
VP8_PTR
comp
,
int
ref_frame_flags
);
int
vp
9
_get_reference
(
VP8_PTR
comp
,
VP8_REFFRAME
ref_frame_flag
,
YV12_BUFFER_CONFIG
*
sd
);
int
vp
9
_set_reference
(
VP8_PTR
comp
,
VP8_REFFRAME
ref_frame_flag
,
YV12_BUFFER_CONFIG
*
sd
);
int
vp
9
_update_entropy
(
VP8_PTR
comp
,
int
update
);
int
vp
9
_set_roimap
(
VP8_PTR
comp
,
unsigned
char
*
map
,
unsigned
int
rows
,
unsigned
int
cols
,
int
delta_q
[
4
],
int
delta_lf
[
4
],
unsigned
int
threshold
[
4
]);
int
vp
9
_set_active_map
(
VP8_PTR
comp
,
unsigned
char
*
map
,
unsigned
int
rows
,
unsigned
int
cols
);
int
vp
9
_set_internal_size
(
VP8_PTR
comp
,
VPX_SCALING
horiz_mode
,
VPX_SCALING
vert_mode
);
int
vp
9
_get_quantizer
(
VP8_PTR
c
);
#ifdef __cplusplus
}
...
...
vp8/common/rtcd_defs.sh
View file @
f88558fb
This diff is collapsed.
Click to expand it.
vp8/encoder/arm/arm_csystemdependent.c
View file @
f88558fb
...
...
@@ -14,8 +14,8 @@
#include "vp8/encoder/variance.h"
#include "vp8/encoder/onyx_int.h"
extern
void
(
*
vp
8
_yv12_copy_partial_frame_ptr
)(
YV12_BUFFER_CONFIG
*
src_ybc
,
YV12_BUFFER_CONFIG
*
dst_ybc
,
int
Fraction
);
extern
void
vp
8
_yv12_copy_partial_frame
(
YV12_BUFFER_CONFIG
*
src_ybc
,
YV12_BUFFER_CONFIG
*
dst_ybc
,
int
Fraction
);
extern
void
(
*
vp
9
_yv12_copy_partial_frame_ptr
)(
YV12_BUFFER_CONFIG
*
src_ybc
,
YV12_BUFFER_CONFIG
*
dst_ybc
,
int
Fraction
);
extern
void
vp
9
_yv12_copy_partial_frame
(
YV12_BUFFER_CONFIG
*
src_ybc
,
YV12_BUFFER_CONFIG
*
dst_ybc
,
int
Fraction
);
extern
void
vpxyv12_copy_partial_frame_neon
(
YV12_BUFFER_CONFIG
*
src_ybc
,
YV12_BUFFER_CONFIG
*
dst_ybc
,
int
Fraction
);
void
vp8_arch_arm_encoder_init
(
VP8_COMP
*
cpi
)
{
...
...
@@ -29,42 +29,42 @@ void vp8_arch_arm_encoder_init(VP8_COMP *cpi) {
#if HAVE_ARMV6
if
(
flags
&
HAS_MEDIA
)
{
cpi
->
rtcd
.
variance
.
sad16x16
=
vp
8
_sad16x16_armv6
;
/*cpi->rtcd.variance.sad16x8 = vp
8
_sad16x8_c;
cpi->rtcd.variance.sad8x16 = vp
8
_sad8x16_c;
cpi->rtcd.variance.sad8x8 = vp
8
_sad8x8_c;
cpi->rtcd.variance.sad4x4 = vp
8
_sad4x4_c;*/
/*cpi->rtcd.variance.var4x4 = vp
8
_variance4x4_c;*/
cpi
->
rtcd
.
variance
.
var8x8
=
vp
8
_variance8x8_armv6
;
/*cpi->rtcd.variance.var8x16 = vp
8
_variance8x16_c;
cpi->rtcd.variance.var16x8 = vp
8
_variance16x8_c;*/
cpi
->
rtcd
.
variance
.
var16x16
=
vp
8
_variance16x16_armv6
;
/*cpi->rtcd.variance.subpixvar4x4 = vp
8
_sub_pixel_variance4x4_c;*/
cpi
->
rtcd
.
variance
.
subpixvar8x8
=
vp
8
_sub_pixel_variance8x8_armv6
;
/*cpi->rtcd.variance.subpixvar8x16 = vp
8
_sub_pixel_variance8x16_c;
cpi->rtcd.variance.subpixvar16x8 = vp
8
_sub_pixel_variance16x8_c;*/
cpi
->
rtcd
.
variance
.
subpixvar16x16
=
vp
8
_sub_pixel_variance16x16_armv6
;
cpi
->
rtcd
.
variance
.
halfpixvar16x16_h
=
vp
8
_variance_halfpixvar16x16_h_armv6
;
cpi
->
rtcd
.
variance
.
halfpixvar16x16_v
=
vp
8
_variance_halfpixvar16x16_v_armv6
;
cpi
->
rtcd
.
variance
.
halfpixvar16x16_hv
=
vp
8
_variance_halfpixvar16x16_hv_armv6
;
cpi
->
rtcd
.
variance
.
mse16x16
=
vp
8
_mse16x16_armv6
;
/*cpi->rtcd.variance.getmbss = vp
8
_get_mb_ss_c;*/
cpi
->
rtcd
.
fdct
.
short4x4
=
vp
8
_short_fdct4x4_armv6
;
cpi
->
rtcd
.
fdct
.
short8x4
=
vp
8
_short_fdct8x4_armv6
;
cpi
->
rtcd
.
fdct
.
fast4x4
=
vp
8
_short_fdct4x4_armv6
;
cpi
->
rtcd
.
fdct
.
fast8x4
=
vp
8
_short_fdct8x4_armv6
;
cpi
->
rtcd
.
fdct
.
walsh_short4x4
=
vp
8
_short_walsh4x4_armv6
;
/*cpi->rtcd.encodemb.berr = vp
8
_block_error_c;
cpi->rtcd.encodemb.mberr = vp
8
_mbblock_error_c;
cpi->rtcd.encodemb.mbuverr = vp
8
_mbuverror_c;*/
cpi
->
rtcd
.
encodemb
.
subb
=
vp
8
_subtract_b_armv6
;
cpi
->
rtcd
.
encodemb
.
submby
=
vp
8
_subtract_mby_armv6
;
cpi
->
rtcd
.
encodemb
.
submbuv
=
vp
8
_subtract_mbuv_armv6
;
cpi
->
rtcd
.
variance
.
sad16x16
=
vp
9
_sad16x16_armv6
;
/*cpi->rtcd.variance.sad16x8 = vp
9
_sad16x8_c;
cpi->rtcd.variance.sad8x16 = vp
9
_sad8x16_c;
cpi->rtcd.variance.sad8x8 = vp
9
_sad8x8_c;
cpi->rtcd.variance.sad4x4 = vp
9
_sad4x4_c;*/
/*cpi->rtcd.variance.var4x4 = vp
9
_variance4x4_c;*/
cpi
->
rtcd
.
variance
.
var8x8
=
vp
9
_variance8x8_armv6
;
/*cpi->rtcd.variance.var8x16 = vp
9
_variance8x16_c;
cpi->rtcd.variance.var16x8 = vp
9
_variance16x8_c;*/
cpi
->
rtcd
.
variance
.
var16x16
=
vp
9
_variance16x16_armv6
;
/*cpi->rtcd.variance.subpixvar4x4 = vp
9
_sub_pixel_variance4x4_c;*/
cpi
->
rtcd
.
variance
.
subpixvar8x8
=
vp
9
_sub_pixel_variance8x8_armv6
;
/*cpi->rtcd.variance.subpixvar8x16 = vp
9
_sub_pixel_variance8x16_c;
cpi->rtcd.variance.subpixvar16x8 = vp
9
_sub_pixel_variance16x8_c;*/
cpi
->
rtcd
.
variance
.
subpixvar16x16
=
vp
9
_sub_pixel_variance16x16_armv6
;
cpi
->
rtcd
.
variance
.
halfpixvar16x16_h
=
vp
9
_variance_halfpixvar16x16_h_armv6
;
cpi
->
rtcd
.
variance
.
halfpixvar16x16_v
=
vp
9
_variance_halfpixvar16x16_v_armv6
;
cpi
->
rtcd
.
variance
.
halfpixvar16x16_hv
=
vp
9
_variance_halfpixvar16x16_hv_armv6
;
cpi
->
rtcd
.
variance
.
mse16x16
=
vp
9
_mse16x16_armv6
;
/*cpi->rtcd.variance.getmbss = vp
9
_get_mb_ss_c;*/
cpi
->
rtcd
.
fdct
.
short4x4
=
vp
9
_short_fdct4x4_armv6
;
cpi
->
rtcd
.
fdct
.
short8x4
=
vp
9
_short_fdct8x4_armv6
;
cpi
->
rtcd
.
fdct
.
fast4x4
=
vp
9
_short_fdct4x4_armv6
;
cpi
->
rtcd
.
fdct
.
fast8x4
=
vp
9
_short_fdct8x4_armv6
;
cpi
->
rtcd
.
fdct
.
walsh_short4x4
=
vp
9
_short_walsh4x4_armv6
;
/*cpi->rtcd.encodemb.berr = vp
9
_block_error_c;
cpi->rtcd.encodemb.mberr = vp
9
_mbblock_error_c;
cpi->rtcd.encodemb.mbuverr = vp
9
_mbuverror_c;*/
cpi
->
rtcd
.
encodemb
.
subb
=
vp
9
_subtract_b_armv6
;
cpi
->
rtcd
.
encodemb
.
submby
=
vp
9
_subtract_mby_armv6
;
cpi
->
rtcd
.
encodemb
.
submbuv
=
vp
9
_subtract_mbuv_armv6
;
/*cpi->rtcd.quantize.quantb = vp8_regular_quantize_b;*/
cpi
->
rtcd
.
quantize
.
fastquantb
=
vp8_fast_quantize_b_armv6
;
...
...
@@ -73,42 +73,42 @@ void vp8_arch_arm_encoder_init(VP8_COMP *cpi) {
#if HAVE_ARMV7
if
(
flags
&
HAS_NEON
)
{
cpi
->
rtcd
.
variance
.
sad16x16
=
vp
8
_sad16x16_neon
;
cpi
->
rtcd
.
variance
.
sad16x8
=
vp
8
_sad16x8_neon
;
cpi
->
rtcd
.
variance
.
sad8x16
=
vp
8
_sad8x16_neon
;
cpi
->
rtcd
.
variance
.
sad8x8
=
vp
8
_sad8x8_neon
;
cpi
->
rtcd
.
variance
.
sad4x4
=
vp
8
_sad4x4_neon
;
/*cpi->rtcd.variance.var4x4 = vp
8
_variance4x4_c;*/
cpi
->
rtcd
.
variance
.
var8x8
=
vp
8
_variance8x8_neon
;
cpi
->
rtcd
.
variance
.
var8x16
=
vp
8
_variance8x16_neon
;
cpi
->
rtcd
.
variance
.
var16x8
=
vp
8
_variance16x8_neon
;
cpi
->
rtcd
.
variance
.
var16x16
=
vp
8
_variance16x16_neon
;
/*cpi->rtcd.variance.subpixvar4x4 = vp
8
_sub_pixel_variance4x4_c;*/
cpi
->
rtcd
.
variance
.
subpixvar8x8
=
vp
8
_sub_pixel_variance8x8_neon
;
/*cpi->rtcd.variance.subpixvar8x16 = vp
8
_sub_pixel_variance8x16_c;
cpi->rtcd.variance.subpixvar16x8 = vp
8
_sub_pixel_variance16x8_c;*/
cpi
->
rtcd
.
variance
.
subpixvar16x16
=
vp
8
_sub_pixel_variance16x16_neon
;
cpi
->
rtcd
.
variance
.
halfpixvar16x16_h
=
vp
8
_variance_halfpixvar16x16_h_neon
;
cpi
->
rtcd
.
variance
.
halfpixvar16x16_v
=
vp
8
_variance_halfpixvar16x16_v_neon
;
cpi
->
rtcd
.
variance
.
halfpixvar16x16_hv
=
vp
8
_variance_halfpixvar16x16_hv_neon
;
cpi
->
rtcd
.
variance
.
mse16x16
=
vp
8
_mse16x16_neon
;
/*cpi->rtcd.variance.getmbss = vp
8
_get_mb_ss_c;*/
cpi
->
rtcd
.
fdct
.
short4x4
=
vp
8
_short_fdct4x4_neon
;
cpi
->
rtcd
.
fdct
.
short8x4
=
vp
8
_short_fdct8x4_neon
;
cpi
->
rtcd
.
fdct
.
fast4x4
=
vp
8
_short_fdct4x4_neon
;
cpi
->
rtcd
.
fdct
.
fast8x4
=
vp
8
_short_fdct8x4_neon
;
cpi
->
rtcd
.
fdct
.
walsh_short4x4
=
vp
8
_short_walsh4x4_neon
;
/*cpi->rtcd.encodemb.berr = vp
8
_block_error_c;
cpi->rtcd.encodemb.mberr = vp
8
_mbblock_error_c;
cpi->rtcd.encodemb.mbuverr = vp
8
_mbuverror_c;*/
cpi
->
rtcd
.
encodemb
.
subb
=
vp
8
_subtract_b_neon
;
cpi
->
rtcd
.
encodemb
.
submby
=
vp
8
_subtract_mby_neon
;
cpi
->
rtcd
.
encodemb
.
submbuv
=
vp
8
_subtract_mbuv_neon
;
cpi
->
rtcd
.
variance
.
sad16x16
=
vp
9
_sad16x16_neon
;
cpi
->
rtcd
.
variance
.
sad16x8
=
vp
9
_sad16x8_neon
;
cpi
->
rtcd
.
variance
.
sad8x16
=
vp
9
_sad8x16_neon
;
cpi
->
rtcd
.
variance
.
sad8x8
=
vp
9
_sad8x8_neon
;
cpi
->
rtcd
.
variance
.
sad4x4
=
vp
9
_sad4x4_neon
;
/*cpi->rtcd.variance.var4x4 = vp
9
_variance4x4_c;*/
cpi
->
rtcd
.
variance
.
var8x8
=
vp
9
_variance8x8_neon
;
cpi
->
rtcd
.
variance
.
var8x16
=
vp
9
_variance8x16_neon
;
cpi
->
rtcd
.
variance
.
var16x8
=
vp
9
_variance16x8_neon
;
cpi
->
rtcd
.
variance
.
var16x16
=
vp
9
_variance16x16_neon
;
/*cpi->rtcd.variance.subpixvar4x4 = vp
9
_sub_pixel_variance4x4_c;*/
cpi
->
rtcd
.
variance
.
subpixvar8x8
=
vp
9
_sub_pixel_variance8x8_neon
;
/*cpi->rtcd.variance.subpixvar8x16 = vp
9
_sub_pixel_variance8x16_c;
cpi->rtcd.variance.subpixvar16x8 = vp
9
_sub_pixel_variance16x8_c;*/
cpi
->
rtcd
.
variance
.
subpixvar16x16
=
vp
9
_sub_pixel_variance16x16_neon
;
cpi
->
rtcd
.
variance
.
halfpixvar16x16_h
=
vp
9
_variance_halfpixvar16x16_h_neon
;
cpi
->
rtcd
.
variance
.
halfpixvar16x16_v
=
vp
9
_variance_halfpixvar16x16_v_neon
;
cpi
->
rtcd
.
variance
.
halfpixvar16x16_hv
=
vp
9
_variance_halfpixvar16x16_hv_neon
;
cpi
->
rtcd
.
variance
.
mse16x16
=
vp
9
_mse16x16_neon
;
/*cpi->rtcd.variance.getmbss = vp
9
_get_mb_ss_c;*/
cpi
->
rtcd
.
fdct
.
short4x4
=
vp
9
_short_fdct4x4_neon
;
cpi
->
rtcd
.
fdct
.
short8x4
=
vp
9
_short_fdct8x4_neon
;
cpi
->
rtcd
.
fdct
.
fast4x4
=
vp
9
_short_fdct4x4_neon
;
cpi
->
rtcd
.
fdct
.
fast8x4
=
vp
9
_short_fdct8x4_neon
;
cpi
->
rtcd
.
fdct
.
walsh_short4x4
=
vp
9
_short_walsh4x4_neon
;
/*cpi->rtcd.encodemb.berr = vp
9
_block_error_c;
cpi->rtcd.encodemb.mberr = vp
9
_mbblock_error_c;
cpi->rtcd.encodemb.mbuverr = vp
9
_mbuverror_c;*/
cpi
->
rtcd
.
encodemb
.
subb
=
vp
9
_subtract_b_neon
;
cpi
->
rtcd
.
encodemb
.
submby
=
vp
9
_subtract_mby_neon
;
cpi
->
rtcd
.
encodemb
.
submbuv
=
vp
9
_subtract_mbuv_neon
;
/*cpi->rtcd.quantize.quantb = vp8_regular_quantize_b;
cpi->rtcd.quantize.quantb_pair = vp8_regular_quantize_b_pair;*/
...
...
@@ -122,7 +122,7 @@ void vp8_arch_arm_encoder_init(VP8_COMP *cpi) {
if
(
flags
&
HAS_NEON
)
#endif
{
vp
8
_yv12_copy_partial_frame_ptr
=
vpxyv12_copy_partial_frame_neon
;
vp
9
_yv12_copy_partial_frame_ptr
=
vpxyv12_copy_partial_frame_neon
;
}
#endif
#endif
...
...
vp8/encoder/arm/dct_arm.c
View file @
f88558fb
...
...
@@ -13,9 +13,9 @@
#if HAVE_ARMV6
void
vp
8
_short_fdct8x4_armv6
(
short
*
input
,
short
*
output
,
int
pitch
)
{
vp
8
_short_fdct4x4_armv6
(
input
,
output
,
pitch
);
vp
8
_short_fdct4x4_armv6
(
input
+
4
,
output
+
16
,
pitch
);
void
vp
9
_short_fdct8x4_armv6
(
short
*
input
,
short
*
output
,
int
pitch
)
{
vp
9
_short_fdct4x4_armv6
(
input
,
output
,
pitch
);
vp
9
_short_fdct4x4_armv6
(
input
+
4
,
output
+
16
,
pitch
);
}
#endif
/* HAVE_ARMV6 */
vp8/encoder/arm/dct_arm.h
View file @
f88558fb
...
...
@@ -13,51 +13,51 @@
#define DCT_ARM_H
#if HAVE_ARMV6
extern
prototype_fdct
(
vp
8
_short_walsh4x4_armv6
);
extern
prototype_fdct
(
vp
8
_short_fdct4x4_armv6
);
extern
prototype_fdct
(
vp
8
_short_fdct8x4_armv6
);
extern
prototype_fdct
(
vp
9
_short_walsh4x4_armv6
);
extern
prototype_fdct
(
vp
9
_short_fdct4x4_armv6
);
extern
prototype_fdct
(
vp
9
_short_fdct8x4_armv6
);
#if !CONFIG_RUNTIME_CPU_DETECT
#undef vp8_fdct_walsh_short4x4
#define vp8_fdct_walsh_short4x4 vp
8
_short_walsh4x4_armv6
#define vp8_fdct_walsh_short4x4 vp
9
_short_walsh4x4_armv6
#undef vp8_fdct_short4x4
#define vp8_fdct_short4x4 vp
8
_short_fdct4x4_armv6
#define vp8_fdct_short4x4 vp
9
_short_fdct4x4_armv6
#undef vp8_fdct_short8x4
#define vp8_fdct_short8x4 vp
8
_short_fdct8x4_armv6
#define vp8_fdct_short8x4 vp
9
_short_fdct8x4_armv6
#undef vp8_fdct_fast4x4
#define vp8_fdct_fast4x4 vp
8
_short_fdct4x4_armv6
#define vp8_fdct_fast4x4 vp
9
_short_fdct4x4_armv6
#undef vp8_fdct_fast8x4
#define vp8_fdct_fast8x4 vp
8
_short_fdct8x4_armv6
#define vp8_fdct_fast8x4 vp
9
_short_fdct8x4_armv6
#endif
#endif
/* HAVE_ARMV6 */
#if HAVE_ARMV7
extern
prototype_fdct
(
vp
8
_short_fdct4x4_neon
);
extern
prototype_fdct
(
vp
8
_short_fdct8x4_neon
);
extern
prototype_fdct
(
vp
9
_short_fdct4x4_neon
);
extern
prototype_fdct
(
vp
9
_short_fdct8x4_neon
);
extern
prototype_fdct
(
vp8_fast_fdct4x4_neon
);
extern
prototype_fdct
(
vp8_fast_fdct8x4_neon
);
extern
prototype_fdct
(
vp
8
_short_walsh4x4_neon
);
extern
prototype_fdct
(
vp
9
_short_walsh4x4_neon
);
#if !CONFIG_RUNTIME_CPU_DETECT
#undef vp8_fdct_short4x4
#define vp8_fdct_short4x4 vp
8
_short_fdct4x4_neon
#define vp8_fdct_short4x4 vp
9
_short_fdct4x4_neon
#undef vp8_fdct_short8x4
#define vp8_fdct_short8x4 vp
8
_short_fdct8x4_neon
#define vp8_fdct_short8x4 vp
9
_short_fdct8x4_neon
#undef vp8_fdct_fast4x4
#define vp8_fdct_fast4x4 vp
8
_short_fdct4x4_neon
#define vp8_fdct_fast4x4 vp
9
_short_fdct4x4_neon
#undef vp8_fdct_fast8x4
#define vp8_fdct_fast8x4 vp
8
_short_fdct8x4_neon
#define vp8_fdct_fast8x4 vp
9
_short_fdct8x4_neon
#undef vp8_fdct_walsh_short4x4
#define vp8_fdct_walsh_short4x4 vp
8
_short_walsh4x4_neon
#define vp8_fdct_walsh_short4x4 vp
9
_short_walsh4x4_neon
#endif
#endif
...
...
vp8/encoder/arm/encodemb_arm.h
View file @
f88558fb
...
...
@@ -13,50 +13,50 @@
#define ENCODEMB_ARM_H
#if HAVE_ARMV6
extern
prototype_subb
(
vp
8
_subtract_b_armv6
);
extern
prototype_submby
(
vp
8
_subtract_mby_armv6
);
extern
prototype_submbuv
(
vp
8
_subtract_mbuv_armv6
);
extern
prototype_subb
(
vp
9
_subtract_b_armv6
);
extern
prototype_submby
(
vp
9
_subtract_mby_armv6
);
extern
prototype_submbuv
(
vp
9
_subtract_mbuv_armv6
);
#if !CONFIG_RUNTIME_CPU_DETECT
#undef vp8_encodemb_subb
#define vp8_encodemb_subb vp
8
_subtract_b_armv6
#define vp8_encodemb_subb vp
9
_subtract_b_armv6
#undef vp8_encodemb_submby
#define vp8_encodemb_submby vp
8
_subtract_mby_armv6
#define vp8_encodemb_submby vp
9
_subtract_mby_armv6
#undef vp8_encodemb_submbuv
#define vp8_encodemb_submbuv vp
8
_subtract_mbuv_armv6
#define vp8_encodemb_submbuv vp
9
_subtract_mbuv_armv6
#endif
#endif
/* HAVE_ARMV6 */
#if HAVE_ARMV7
// extern prototype_berr(vp
8
_block_error_c);
// extern prototype_mberr(vp
8
_mbblock_error_c);
// extern prototype_mbuverr(vp
8
_mbuverror_c);
// extern prototype_berr(vp
9
_block_error_c);
// extern prototype_mberr(vp
9
_mbblock_error_c);
// extern prototype_mbuverr(vp
9
_mbuverror_c);
extern
prototype_subb
(
vp
8
_subtract_b_neon
);
extern
prototype_submby
(
vp
8
_subtract_mby_neon
);
extern
prototype_submbuv
(
vp
8
_subtract_mbuv_neon
);
extern
prototype_subb
(
vp
9
_subtract_b_neon
);
extern
prototype_submby
(
vp
9
_subtract_mby_neon
);
extern
prototype_submbuv
(
vp
9
_subtract_mbuv_neon
);
// #undef vp8_encodemb_berr
// #define vp8_encodemb_berr vp
8
_block_error_c
// #define vp8_encodemb_berr vp
9
_block_error_c
// #undef vp8_encodemb_mberr
// #define vp8_encodemb_mberr vp
8
_mbblock_error_c
// #define vp8_encodemb_mberr vp
9
_mbblock_error_c
// #undef vp8_encodemb_mbuverr
// #define vp8_encodemb_mbuverr vp
8
_mbuverror_c
// #define vp8_encodemb_mbuverr vp
9
_mbuverror_c
#if !CONFIG_RUNTIME_CPU_DETECT
#undef vp8_encodemb_subb
#define vp8_encodemb_subb vp
8
_subtract_b_neon
#define vp8_encodemb_subb vp
9
_subtract_b_neon
#undef vp8_encodemb_submby
#define vp8_encodemb_submby vp
8
_subtract_mby_neon
#define vp8_encodemb_submby vp
9
_subtract_mby_neon
#undef vp8_encodemb_submbuv
#define vp8_encodemb_submbuv vp
8
_subtract_mbuv_neon
#define vp8_encodemb_submbuv vp
9
_subtract_mbuv_neon
#endif
#endif
...
...
vp8/encoder/arm/variance_arm.c
View file @
f88558fb
...
...
@@ -17,7 +17,7 @@
#if HAVE_ARMV6
unsigned
int
vp
8
_sub_pixel_variance8x8_armv6
unsigned
int
vp
9
_sub_pixel_variance8x8_armv6
(
const
unsigned
char
*
src_ptr
,
int
src_pixels_per_line
,
...
...
@@ -40,11 +40,11 @@ unsigned int vp8_sub_pixel_variance8x8_armv6
vp8_filter_block2d_bil_second_pass_armv6
(
first_pass
,
second_pass
,
8
,
8
,
8
,
VFilter
);
return
vp
8
_variance8x8_armv6
(
second_pass
,
8
,
dst_ptr
,
return
vp
9
_variance8x8_armv6
(
second_pass
,
8
,
dst_ptr
,
dst_pixels_per_line
,
sse
);
}
unsigned
int
vp
8
_sub_pixel_variance16x16_armv6
unsigned
int
vp
9
_sub_pixel_variance16x16_armv6
(
const
unsigned
char
*
src_ptr
,
int
src_pixels_per_line
,
...
...
@@ -60,13 +60,13 @@ unsigned int vp8_sub_pixel_variance16x16_armv6
unsigned
int
var
;
if
(
xoffset
==
HALFNDX
&&
yoffset
==
0
)
{
var
=
vp
8
_variance_halfpixvar16x16_h_armv6
(
src_ptr
,
src_pixels_per_line
,
var
=
vp
9
_variance_halfpixvar16x16_h_armv6
(
src_ptr
,
src_pixels_per_line
,
dst_ptr
,
dst_pixels_per_line
,
sse
);
}
else
if
(
xoffset
==
0
&&
yoffset
==
HALFNDX
)
{
var
=
vp
8
_variance_halfpixvar16x16_v_armv6
(
src_ptr
,
src_pixels_per_line
,
var
=
vp
9
_variance_halfpixvar16x16_v_armv6
(
src_ptr
,
src_pixels_per_line
,
dst_ptr
,
dst_pixels_per_line
,
sse
);
}
else
if
(
xoffset
==
HALFNDX
&&
yoffset
==
HALFNDX
)
{
var
=
vp
8
_variance_halfpixvar16x16_hv_armv6
(
src_ptr
,
src_pixels_per_line
,
var
=
vp
9
_variance_halfpixvar16x16_hv_armv6
(
src_ptr
,
src_pixels_per_line
,
dst_ptr
,
dst_pixels_per_line
,
sse
);
}
else
{
HFilter
=
vp8_bilinear_filters
[
xoffset
];
...
...
@@ -78,7 +78,7 @@ unsigned int vp8_sub_pixel_variance16x16_armv6
vp8_filter_block2d_bil_second_pass_armv6
(
first_pass
,
second_pass
,
16
,
16
,
16
,
VFilter
);
var
=
vp
8
_variance16x16_armv6
(
second_pass
,
16
,
dst_ptr
,
var
=
vp
9
_variance16x16_armv6
(
second_pass
,
16
,
dst_ptr
,
dst_pixels_per_line
,
sse
);
}
return
var
;
...
...
@@ -89,7 +89,7 @@ unsigned int vp8_sub_pixel_variance16x16_armv6
#if HAVE_ARMV7
unsigned
int
vp
8
_sub_pixel_variance16x16_neon
unsigned
int
vp
9
_sub_pixel_variance16x16_neon
(
const
unsigned
char
*
src_ptr
,
int
src_pixels_per_line
,
...
...
@@ -100,13 +100,13 @@ unsigned int vp8_sub_pixel_variance16x16_neon
unsigned
int
*
sse
)
{
if
(
xoffset
==
HALFNDX
&&
yoffset
==
0
)
return
vp
8
_variance_halfpixvar16x16_h_neon
(
src_ptr
,
src_pixels_per_line
,
dst_ptr
,
dst_pixels_per_line
,
sse
);
return
vp
9
_variance_halfpixvar16x16_h_neon
(
src_ptr
,
src_pixels_per_line
,
dst_ptr
,
dst_pixels_per_line
,
sse
);
else
if
(
xoffset
==
0
&&
yoffset
==
HALFNDX
)
return
vp
8
_variance_halfpixvar16x16_v_neon
(
src_ptr
,
src_pixels_per_line
,
dst_ptr
,
dst_pixels_per_line
,
sse
);
return
vp
9
_variance_halfpixvar16x16_v_neon
(
src_ptr
,
src_pixels_per_line
,
dst_ptr
,
dst_pixels_per_line
,
sse
);
else
if
(
xoffset
==
HALFNDX
&&
yoffset
==
HALFNDX
)
return
vp
8
_variance_halfpixvar16x16_hv_neon
(
src_ptr
,
src_pixels_per_line
,
dst_ptr
,
dst_pixels_per_line
,
sse
);
return
vp
9
_variance_halfpixvar16x16_hv_neon
(
src_ptr
,
src_pixels_per_line
,
dst_ptr
,
dst_pixels_per_line
,
sse
);
else
return
vp
8
_sub_pixel_variance16x16_neon_func
(
src_ptr
,
src_pixels_per_line
,
xoffset
,
yoffset
,
dst_ptr
,
dst_pixels_per_line
,
sse
);
return
vp
9
_sub_pixel_variance16x16_neon_func
(
src_ptr
,
src_pixels_per_line
,
xoffset
,
yoffset
,
dst_ptr
,
dst_pixels_per_line
,
sse
);
}
#endif
vp8/encoder/arm/variance_arm.h
View file @
f88558fb
...
...
@@ -14,44 +14,44 @@
#if HAVE_ARMV6
extern
prototype_sad
(
vp
8
_sad16x16_armv6
);
extern
prototype_variance
(
vp
8
_variance16x16_armv6
);
extern
prototype_variance
(
vp
8
_variance8x8_armv6
);
extern
prototype_subpixvariance
(
vp
8
_sub_pixel_variance16x16_armv6
);
extern
prototype_subpixvariance
(
vp
8
_sub_pixel_variance8x8_armv6
);
extern
prototype_variance
(
vp
8
_variance_halfpixvar16x16_h_armv6
);
extern
prototype_variance
(
vp
8
_variance_halfpixvar16x16_v_armv6
);
extern
prototype_variance
(
vp
8
_variance_halfpixvar16x16_hv_armv6
);
extern
prototype_variance
(
vp
8
_mse16x16_armv6
);
extern
prototype_sad
(
vp
9
_sad16x16_armv6
);
extern
prototype_variance
(
vp
9
_variance16x16_armv6
);
extern
prototype_variance
(
vp
9
_variance8x8_armv6
);
extern
prototype_subpixvariance
(
vp
9
_sub_pixel_variance16x16_armv6
);
extern
prototype_subpixvariance
(
vp
9
_sub_pixel_variance8x8_armv6
);
extern
prototype_variance
(
vp
9
_variance_halfpixvar16x16_h_armv6
);
extern
prototype_variance
(
vp
9
_variance_halfpixvar16x16_v_armv6
);
extern
prototype_variance
(
vp
9
_variance_halfpixvar16x16_hv_armv6
);
extern
prototype_variance
(
vp
9
_mse16x16_armv6
);
#if !CONFIG_RUNTIME_CPU_DETECT
#undef vp8_variance_sad16x16
#define vp8_variance_sad16x16 vp
8
_sad16x16_armv6
#define vp8_variance_sad16x16 vp
9
_sad16x16_armv6
#undef vp8_variance_subpixvar16x16
#define vp8_variance_subpixvar16x16 vp
8
_sub_pixel_variance16x16_armv6
#define vp8_variance_subpixvar16x16 vp
9
_sub_pixel_variance16x16_armv6
#undef vp8_variance_subpixvar8x8
#define vp8_variance_subpixvar8x8 vp
8
_sub_pixel_variance8x8_armv6
#define vp8_variance_subpixvar8x8 vp
9
_sub_pixel_variance8x8_armv6
#undef vp8_variance_var16x16
#define vp8_variance_var16x16 vp
8
_variance16x16_armv6
#define vp8_variance_var16x16 vp
9
_variance16x16_armv6
#undef vp8_variance_mse16x16
#define vp8_variance_mse16x16 vp
8
_mse16x16_armv6
#define vp8_variance_mse16x16 vp
9
_mse16x16_armv6
#undef vp8_variance_var8x8
#define vp8_variance_var8x8 vp
8
_variance8x8_armv6