Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
BC
public
external
libvpx
Commits
924d7451
Commit
924d7451
authored
Aug 22, 2013
by
James Zern
Browse files
consistently name VP9_COMMON variables #1
pc -> cm Change-Id: If3e83404f574316fdd3b9aace2487b64efdb66f3
parent
e80bf802
Changes
5
Hide whitespace changes
Inline
Side-by-side
vp9/common/vp9_entropy.c
View file @
924d7451
...
...
@@ -436,11 +436,11 @@ const vp9_extra_bit vp9_extra_bits[12] = {
#include "vp9/common/vp9_default_coef_probs.h"
void
vp9_default_coef_probs
(
VP9_COMMON
*
p
c
)
{
vp9_copy
(
p
c
->
fc
.
coef_probs
[
TX_4X4
],
default_coef_probs_4x4
);
vp9_copy
(
p
c
->
fc
.
coef_probs
[
TX_8X8
],
default_coef_probs_8x8
);
vp9_copy
(
p
c
->
fc
.
coef_probs
[
TX_16X16
],
default_coef_probs_16x16
);
vp9_copy
(
p
c
->
fc
.
coef_probs
[
TX_32X32
],
default_coef_probs_32x32
);
void
vp9_default_coef_probs
(
VP9_COMMON
*
c
m
)
{
vp9_copy
(
c
m
->
fc
.
coef_probs
[
TX_4X4
],
default_coef_probs_4x4
);
vp9_copy
(
c
m
->
fc
.
coef_probs
[
TX_8X8
],
default_coef_probs_8x8
);
vp9_copy
(
c
m
->
fc
.
coef_probs
[
TX_16X16
],
default_coef_probs_16x16
);
vp9_copy
(
c
m
->
fc
.
coef_probs
[
TX_32X32
],
default_coef_probs_32x32
);
}
// Neighborhood 5-tuples for various scans and blocksizes,
...
...
vp9/common/vp9_findnearmv.h
View file @
924d7451
...
...
@@ -36,7 +36,7 @@ static void clamp_mv2(MV *mv, const MACROBLOCKD *xd) {
xd
->
mb_to_bottom_edge
+
RIGHT_BOTTOM_MARGIN
);
}
void
vp9_append_sub8x8_mvs_for_idx
(
VP9_COMMON
*
p
c
,
void
vp9_append_sub8x8_mvs_for_idx
(
VP9_COMMON
*
c
m
,
MACROBLOCKD
*
xd
,
int_mv
*
dst_nearest
,
int_mv
*
dst_near
,
...
...
vp9/decoder/vp9_decodframe.c
View file @
924d7451
...
...
@@ -255,13 +255,13 @@ static void decode_modes_b(VP9D_COMP *pbi, int mi_row, int mi_col,
static
void
decode_modes_sb
(
VP9D_COMP
*
pbi
,
int
mi_row
,
int
mi_col
,
vp9_reader
*
r
,
BLOCK_SIZE
bsize
)
{
VP9_COMMON
*
const
p
c
=
&
pbi
->
common
;
VP9_COMMON
*
const
c
m
=
&
pbi
->
common
;
MACROBLOCKD
*
const
xd
=
&
pbi
->
mb
;
const
int
hbs
=
num_8x8_blocks_wide_lookup
[
bsize
]
/
2
;
PARTITION_TYPE
partition
=
PARTITION_NONE
;
BLOCK_SIZE
subsize
;
if
(
mi_row
>=
p
c
->
mi_rows
||
mi_col
>=
p
c
->
mi_cols
)
if
(
mi_row
>=
c
m
->
mi_rows
||
mi_col
>=
c
m
->
mi_cols
)
return
;
if
(
bsize
<
BLOCK_8X8
)
{
...
...
@@ -269,21 +269,21 @@ static void decode_modes_sb(VP9D_COMP *pbi, int mi_row, int mi_col,
return
;
}
else
{
int
pl
;
const
int
idx
=
check_bsize_coverage
(
hbs
,
p
c
->
mi_rows
,
p
c
->
mi_cols
,
const
int
idx
=
check_bsize_coverage
(
hbs
,
c
m
->
mi_rows
,
c
m
->
mi_cols
,
mi_row
,
mi_col
);
set_partition_seg_context
(
p
c
,
xd
,
mi_row
,
mi_col
);
set_partition_seg_context
(
c
m
,
xd
,
mi_row
,
mi_col
);
pl
=
partition_plane_context
(
xd
,
bsize
);
if
(
idx
==
0
)
partition
=
treed_read
(
r
,
vp9_partition_tree
,
p
c
->
fc
.
partition_prob
[
p
c
->
frame_type
][
pl
]);
c
m
->
fc
.
partition_prob
[
c
m
->
frame_type
][
pl
]);
else
if
(
idx
>
0
&&
!
vp9_read
(
r
,
p
c
->
fc
.
partition_prob
[
p
c
->
frame_type
][
pl
][
idx
]))
!
vp9_read
(
r
,
c
m
->
fc
.
partition_prob
[
c
m
->
frame_type
][
pl
][
idx
]))
partition
=
(
idx
==
1
)
?
PARTITION_HORZ
:
PARTITION_VERT
;
else
partition
=
PARTITION_SPLIT
;
p
c
->
counts
.
partition
[
pl
][
partition
]
++
;
c
m
->
counts
.
partition
[
pl
][
partition
]
++
;
}
subsize
=
get_subsize
(
bsize
,
partition
);
...
...
@@ -296,13 +296,13 @@ static void decode_modes_sb(VP9D_COMP *pbi, int mi_row, int mi_col,
case
PARTITION_HORZ
:
decode_modes_b
(
pbi
,
mi_row
,
mi_col
,
r
,
subsize
);
*
get_sb_index
(
xd
,
subsize
)
=
1
;
if
(
mi_row
+
hbs
<
p
c
->
mi_rows
)
if
(
mi_row
+
hbs
<
c
m
->
mi_rows
)
decode_modes_b
(
pbi
,
mi_row
+
hbs
,
mi_col
,
r
,
subsize
);
break
;
case
PARTITION_VERT
:
decode_modes_b
(
pbi
,
mi_row
,
mi_col
,
r
,
subsize
);
*
get_sb_index
(
xd
,
subsize
)
=
1
;
if
(
mi_col
+
hbs
<
p
c
->
mi_cols
)
if
(
mi_col
+
hbs
<
c
m
->
mi_cols
)
decode_modes_b
(
pbi
,
mi_row
,
mi_col
+
hbs
,
r
,
subsize
);
break
;
case
PARTITION_SPLIT
:
{
...
...
@@ -320,7 +320,7 @@ static void decode_modes_sb(VP9D_COMP *pbi, int mi_row, int mi_col,
// update partition context
if
(
bsize
>=
BLOCK_8X8
&&
(
bsize
==
BLOCK_8X8
||
partition
!=
PARTITION_SPLIT
))
{
set_partition_seg_context
(
p
c
,
xd
,
mi_row
,
mi_col
);
set_partition_seg_context
(
c
m
,
xd
,
mi_row
,
mi_col
);
update_partition_context
(
xd
,
subsize
,
bsize
);
}
}
...
...
@@ -328,18 +328,18 @@ static void decode_modes_sb(VP9D_COMP *pbi, int mi_row, int mi_col,
static
void
setup_token_decoder
(
VP9D_COMP
*
pbi
,
const
uint8_t
*
data
,
size_t
read_size
,
vp9_reader
*
r
)
{
VP9_COMMON
*
p
c
=
&
pbi
->
common
;
VP9_COMMON
*
c
m
=
&
pbi
->
common
;
const
uint8_t
*
data_end
=
pbi
->
source
+
pbi
->
source_sz
;
// Validate the calculated partition length. If the buffer
// described by the partition can't be fully read, then restrict
// it to the portion that can be (for EC mode) or throw an error.
if
(
!
read_is_valid
(
data
,
read_size
,
data_end
))
vpx_internal_error
(
&
p
c
->
error
,
VPX_CODEC_CORRUPT_FRAME
,
vpx_internal_error
(
&
c
m
->
error
,
VPX_CODEC_CORRUPT_FRAME
,
"Truncated packet or corrupt tile length"
);
if
(
vp9_reader_init
(
r
,
data
,
read_size
))
vpx_internal_error
(
&
p
c
->
error
,
VPX_CODEC_MEM_ERROR
,
vpx_internal_error
(
&
c
m
->
error
,
VPX_CODEC_MEM_ERROR
,
"Failed to allocate bool decoder %d"
,
1
);
}
...
...
@@ -571,28 +571,28 @@ static void setup_frame_size_with_refs(VP9D_COMP *pbi,
static
void
decode_tile
(
VP9D_COMP
*
pbi
,
vp9_reader
*
r
)
{
const
int
num_threads
=
pbi
->
oxcf
.
max_threads
;
VP9_COMMON
*
const
p
c
=
&
pbi
->
common
;
VP9_COMMON
*
const
c
m
=
&
pbi
->
common
;
int
mi_row
,
mi_col
;
YV12_BUFFER_CONFIG
*
const
fb
=
&
p
c
->
yv12_fb
[
p
c
->
new_fb_idx
];
YV12_BUFFER_CONFIG
*
const
fb
=
&
c
m
->
yv12_fb
[
c
m
->
new_fb_idx
];
if
(
pbi
->
do_loopfilter_inline
)
{
if
(
num_threads
>
1
)
{
LFWorkerData
*
const
lf_data
=
(
LFWorkerData
*
)
pbi
->
lf_worker
.
data1
;
lf_data
->
frame_buffer
=
fb
;
lf_data
->
cm
=
p
c
;
lf_data
->
cm
=
c
m
;
lf_data
->
xd
=
pbi
->
mb
;
lf_data
->
stop
=
0
;
lf_data
->
y_only
=
0
;
}
vp9_loop_filter_frame_init
(
pc
,
pc
->
lf
.
filter_level
);
vp9_loop_filter_frame_init
(
cm
,
cm
->
lf
.
filter_level
);
}
for
(
mi_row
=
p
c
->
cur_tile_mi_row_start
;
mi_row
<
p
c
->
cur_tile_mi_row_end
;
for
(
mi_row
=
c
m
->
cur_tile_mi_row_start
;
mi_row
<
c
m
->
cur_tile_mi_row_end
;
mi_row
+=
MI_BLOCK_SIZE
)
{
// For a SB there are 2 left contexts, each pertaining to a MB row within
vp9_zero
(
p
c
->
left_context
);
vp9_zero
(
p
c
->
left_seg_context
);
for
(
mi_col
=
p
c
->
cur_tile_mi_col_start
;
mi_col
<
p
c
->
cur_tile_mi_col_end
;
vp9_zero
(
c
m
->
left_context
);
vp9_zero
(
c
m
->
left_seg_context
);
for
(
mi_col
=
c
m
->
cur_tile_mi_col_start
;
mi_col
<
c
m
->
cur_tile_mi_col_end
;
mi_col
+=
MI_BLOCK_SIZE
)
decode_modes_sb
(
pbi
,
mi_row
,
mi_col
,
r
,
BLOCK_64X64
);
...
...
@@ -605,7 +605,7 @@ static void decode_tile(VP9D_COMP *pbi, vp9_reader *r) {
LFWorkerData
*
const
lf_data
=
(
LFWorkerData
*
)
pbi
->
lf_worker
.
data1
;
// decoding has completed: finish up the loop filter in this thread.
if
(
mi_row
+
MI_BLOCK_SIZE
>=
p
c
->
cur_tile_mi_row_end
)
continue
;
if
(
mi_row
+
MI_BLOCK_SIZE
>=
c
m
->
cur_tile_mi_row_end
)
continue
;
vp9_worker_sync
(
&
pbi
->
lf_worker
);
lf_data
->
start
=
lf_start
;
...
...
@@ -613,7 +613,7 @@ static void decode_tile(VP9D_COMP *pbi, vp9_reader *r) {
pbi
->
lf_worker
.
hook
=
vp9_loop_filter_worker
;
vp9_worker_launch
(
&
pbi
->
lf_worker
);
}
else
{
vp9_loop_filter_rows
(
fb
,
p
c
,
&
pbi
->
mb
,
lf_start
,
mi_row
,
0
);
vp9_loop_filter_rows
(
fb
,
c
m
,
&
pbi
->
mb
,
lf_start
,
mi_row
,
0
);
}
}
}
...
...
@@ -628,8 +628,8 @@ static void decode_tile(VP9D_COMP *pbi, vp9_reader *r) {
}
else
{
lf_start
=
mi_row
-
MI_BLOCK_SIZE
;
}
vp9_loop_filter_rows
(
fb
,
p
c
,
&
pbi
->
mb
,
lf_start
,
p
c
->
mi_rows
,
0
);
vp9_loop_filter_rows
(
fb
,
c
m
,
&
pbi
->
mb
,
lf_start
,
c
m
->
mi_rows
,
0
);
}
}
...
...
@@ -652,20 +652,20 @@ static void setup_tile_info(VP9_COMMON *cm, struct vp9_read_bit_buffer *rb) {
static
const
uint8_t
*
decode_tiles
(
VP9D_COMP
*
pbi
,
const
uint8_t
*
data
)
{
vp9_reader
residual_bc
;
VP9_COMMON
*
const
p
c
=
&
pbi
->
common
;
VP9_COMMON
*
const
c
m
=
&
pbi
->
common
;
const
uint8_t
*
const
data_end
=
pbi
->
source
+
pbi
->
source_sz
;
const
int
aligned_mi_cols
=
mi_cols_aligned_to_sb
(
p
c
->
mi_cols
);
const
int
tile_cols
=
1
<<
p
c
->
log2_tile_cols
;
const
int
tile_rows
=
1
<<
p
c
->
log2_tile_rows
;
const
int
aligned_mi_cols
=
mi_cols_aligned_to_sb
(
c
m
->
mi_cols
);
const
int
tile_cols
=
1
<<
c
m
->
log2_tile_cols
;
const
int
tile_rows
=
1
<<
c
m
->
log2_tile_rows
;
int
tile_row
,
tile_col
;
// Note: this memset assumes above_context[0], [1] and [2]
// are allocated as part of the same buffer.
vpx_memset
(
p
c
->
above_context
[
0
],
0
,
vpx_memset
(
c
m
->
above_context
[
0
],
0
,
sizeof
(
ENTROPY_CONTEXT
)
*
MAX_MB_PLANE
*
(
2
*
aligned_mi_cols
));
vpx_memset
(
p
c
->
above_seg_context
,
0
,
vpx_memset
(
c
m
->
above_seg_context
,
0
,
sizeof
(
PARTITION_CONTEXT
)
*
aligned_mi_cols
);
if
(
pbi
->
oxcf
.
inv_tile_order
)
{
...
...
@@ -690,9 +690,9 @@ static const uint8_t *decode_tiles(VP9D_COMP *pbi, const uint8_t *data) {
}
for
(
tile_row
=
0
;
tile_row
<
tile_rows
;
tile_row
++
)
{
vp9_get_tile_row_offsets
(
p
c
,
tile_row
);
vp9_get_tile_row_offsets
(
c
m
,
tile_row
);
for
(
tile_col
=
tile_cols
-
1
;
tile_col
>=
0
;
tile_col
--
)
{
vp9_get_tile_col_offsets
(
p
c
,
tile_col
);
vp9_get_tile_col_offsets
(
c
m
,
tile_col
);
setup_token_decoder
(
pbi
,
data_ptr2
[
tile_row
][
tile_col
],
data_end
-
data_ptr2
[
tile_row
][
tile_col
],
&
residual_bc
);
...
...
@@ -706,16 +706,16 @@ static const uint8_t *decode_tiles(VP9D_COMP *pbi, const uint8_t *data) {
int
has_more
;
for
(
tile_row
=
0
;
tile_row
<
tile_rows
;
tile_row
++
)
{
vp9_get_tile_row_offsets
(
p
c
,
tile_row
);
vp9_get_tile_row_offsets
(
c
m
,
tile_row
);
for
(
tile_col
=
0
;
tile_col
<
tile_cols
;
tile_col
++
)
{
size_t
size
;
vp9_get_tile_col_offsets
(
p
c
,
tile_col
);
vp9_get_tile_col_offsets
(
c
m
,
tile_col
);
has_more
=
tile_col
<
tile_cols
-
1
||
tile_row
<
tile_rows
-
1
;
if
(
has_more
)
{
if
(
!
read_is_valid
(
data
,
4
,
data_end
))
vpx_internal_error
(
&
p
c
->
error
,
VPX_CODEC_CORRUPT_FRAME
,
vpx_internal_error
(
&
c
m
->
error
,
VPX_CODEC_CORRUPT_FRAME
,
"Truncated packet or corrupt tile length"
);
size
=
read_be32
(
data
);
...
...
@@ -928,17 +928,17 @@ void vp9_init_dequantizer(VP9_COMMON *cm) {
int
vp9_decode_frame
(
VP9D_COMP
*
pbi
,
const
uint8_t
**
p_data_end
)
{
int
i
;
VP9_COMMON
*
const
p
c
=
&
pbi
->
common
;
VP9_COMMON
*
const
c
m
=
&
pbi
->
common
;
MACROBLOCKD
*
const
xd
=
&
pbi
->
mb
;
const
uint8_t
*
data
=
pbi
->
source
;
const
uint8_t
*
data_end
=
pbi
->
source
+
pbi
->
source_sz
;
struct
vp9_read_bit_buffer
rb
=
{
data
,
data_end
,
0
,
p
c
,
error_handler
};
c
m
,
error_handler
};
const
size_t
first_partition_size
=
read_uncompressed_header
(
pbi
,
&
rb
);
const
int
keyframe
=
p
c
->
frame_type
==
KEY_FRAME
;
YV12_BUFFER_CONFIG
*
new_fb
=
&
p
c
->
yv12_fb
[
p
c
->
new_fb_idx
];
const
int
keyframe
=
c
m
->
frame_type
==
KEY_FRAME
;
YV12_BUFFER_CONFIG
*
new_fb
=
&
c
m
->
yv12_fb
[
c
m
->
new_fb_idx
];
if
(
!
first_partition_size
)
{
// showing a frame directly
...
...
@@ -949,39 +949,39 @@ int vp9_decode_frame(VP9D_COMP *pbi, const uint8_t **p_data_end) {
xd
->
corrupted
=
0
;
new_fb
->
corrupted
=
0
;
pbi
->
do_loopfilter_inline
=
(
p
c
->
log2_tile_rows
|
p
c
->
log2_tile_cols
)
==
0
&&
p
c
->
lf
.
filter_level
;
(
c
m
->
log2_tile_rows
|
c
m
->
log2_tile_cols
)
==
0
&&
c
m
->
lf
.
filter_level
;
if
(
!
pbi
->
decoded_key_frame
&&
!
keyframe
)
return
-
1
;
if
(
!
read_is_valid
(
data
,
first_partition_size
,
data_end
))
vpx_internal_error
(
&
p
c
->
error
,
VPX_CODEC_CORRUPT_FRAME
,
vpx_internal_error
(
&
c
m
->
error
,
VPX_CODEC_CORRUPT_FRAME
,
"Truncated packet or corrupt header length"
);
xd
->
mode_info_context
=
p
c
->
mi
;
xd
->
prev_mode_info_context
=
p
c
->
prev_mi
;
xd
->
mode_info_stride
=
p
c
->
mode_info_stride
;
xd
->
mode_info_context
=
c
m
->
mi
;
xd
->
prev_mode_info_context
=
c
m
->
prev_mi
;
xd
->
mode_info_stride
=
c
m
->
mode_info_stride
;
init_dequantizer
(
p
c
,
&
pbi
->
mb
);
init_dequantizer
(
c
m
,
&
pbi
->
mb
);
p
c
->
fc
=
p
c
->
frame_contexts
[
p
c
->
frame_context_idx
];
c
m
->
fc
=
c
m
->
frame_contexts
[
c
m
->
frame_context_idx
];
vp9_zero
(
p
c
->
counts
);
vp9_zero
(
c
m
->
counts
);
new_fb
->
corrupted
|=
read_compressed_header
(
pbi
,
data
,
first_partition_size
);
setup_block_dptrs
(
xd
,
p
c
->
subsampling_x
,
p
c
->
subsampling_y
);
setup_block_dptrs
(
xd
,
c
m
->
subsampling_x
,
c
m
->
subsampling_y
);
// clear out the coeff buffer
for
(
i
=
0
;
i
<
MAX_MB_PLANE
;
++
i
)
vp9_zero
(
xd
->
plane
[
i
].
qcoeff
);
set_prev_mi
(
p
c
);
set_prev_mi
(
c
m
);
*
p_data_end
=
decode_tiles
(
pbi
,
data
+
first_partition_size
);
p
c
->
last_width
=
p
c
->
width
;
p
c
->
last_height
=
p
c
->
height
;
c
m
->
last_width
=
c
m
->
width
;
c
m
->
last_height
=
c
m
->
height
;
new_fb
->
corrupted
|=
xd
->
corrupted
;
...
...
@@ -989,21 +989,21 @@ int vp9_decode_frame(VP9D_COMP *pbi, const uint8_t **p_data_end) {
if
(
keyframe
&&
!
new_fb
->
corrupted
)
pbi
->
decoded_key_frame
=
1
;
else
vpx_internal_error
(
&
p
c
->
error
,
VPX_CODEC_CORRUPT_FRAME
,
vpx_internal_error
(
&
c
m
->
error
,
VPX_CODEC_CORRUPT_FRAME
,
"A stream must start with a complete key frame"
);
}
if
(
!
p
c
->
error_resilient_mode
&&
!
p
c
->
frame_parallel_decoding_mode
)
{
vp9_adapt_coef_probs
(
p
c
);
if
(
!
c
m
->
error_resilient_mode
&&
!
c
m
->
frame_parallel_decoding_mode
)
{
vp9_adapt_coef_probs
(
c
m
);
if
(
!
keyframe
&&
!
p
c
->
intra_only
)
{
vp9_adapt_mode_probs
(
p
c
);
vp9_adapt_mv_probs
(
p
c
,
xd
->
allow_high_precision_mv
);
if
(
!
keyframe
&&
!
c
m
->
intra_only
)
{
vp9_adapt_mode_probs
(
c
m
);
vp9_adapt_mv_probs
(
c
m
,
xd
->
allow_high_precision_mv
);
}
}
if
(
p
c
->
refresh_frame_context
)
p
c
->
frame_contexts
[
p
c
->
frame_context_idx
]
=
p
c
->
fc
;
if
(
c
m
->
refresh_frame_context
)
c
m
->
frame_contexts
[
c
m
->
frame_context_idx
]
=
c
m
->
fc
;
return
0
;
}
vp9/decoder/vp9_decodframe.h
View file @
924d7451
...
...
@@ -15,7 +15,7 @@
struct
VP9Common
;
struct
VP9Decompressor
;
void
vp9_init_dequantizer
(
struct
VP9Common
*
p
c
);
void
vp9_init_dequantizer
(
struct
VP9Common
*
c
m
);
int
vp9_decode_frame
(
struct
VP9Decompressor
*
cpi
,
const
uint8_t
**
p_data_end
);
#endif // VP9_DECODER_VP9_DECODFRAME_H_
vp9/encoder/vp9_bitstream.c
View file @
924d7451
...
...
@@ -237,7 +237,7 @@ static void write_intra_mode(vp9_writer *bc, int m, const vp9_prob *p) {
static
void
update_switchable_interp_probs
(
VP9_COMP
*
const
cpi
,
vp9_writer
*
const
bc
)
{
VP9_COMMON
*
const
p
c
=
&
cpi
->
common
;
VP9_COMMON
*
const
c
m
=
&
cpi
->
common
;
unsigned
int
branch_ct
[
SWITCHABLE_FILTERS
+
1
]
[
SWITCHABLE_FILTERS
-
1
][
2
];
vp9_prob
new_prob
[
SWITCHABLE_FILTERS
+
1
][
SWITCHABLE_FILTERS
-
1
];
...
...
@@ -246,21 +246,21 @@ static void update_switchable_interp_probs(VP9_COMP *const cpi,
vp9_tree_probs_from_distribution
(
vp9_switchable_interp_tree
,
new_prob
[
j
],
branch_ct
[
j
],
p
c
->
counts
.
switchable_interp
[
j
],
0
);
c
m
->
counts
.
switchable_interp
[
j
],
0
);
}
for
(
j
=
0
;
j
<=
SWITCHABLE_FILTERS
;
++
j
)
{
for
(
i
=
0
;
i
<
SWITCHABLE_FILTERS
-
1
;
++
i
)
{
vp9_cond_prob_diff_update
(
bc
,
&
p
c
->
fc
.
switchable_interp_prob
[
j
][
i
],
vp9_cond_prob_diff_update
(
bc
,
&
c
m
->
fc
.
switchable_interp_prob
[
j
][
i
],
MODE_UPDATE_PROB
,
branch_ct
[
j
][
i
]);
}
}
#ifdef MODE_STATS
if
(
!
cpi
->
dummy_packing
)
update_switchable_interp_stats
(
p
c
);
update_switchable_interp_stats
(
c
m
);
#endif
}
static
void
update_inter_mode_probs
(
VP9_COMMON
*
p
c
,
vp9_writer
*
const
bc
)
{
static
void
update_inter_mode_probs
(
VP9_COMMON
*
c
m
,
vp9_writer
*
const
bc
)
{
int
i
,
j
;
for
(
i
=
0
;
i
<
INTER_MODE_CONTEXTS
;
++
i
)
{
...
...
@@ -269,10 +269,10 @@ static void update_inter_mode_probs(VP9_COMMON *pc, vp9_writer* const bc) {
vp9_tree_probs_from_distribution
(
vp9_inter_mode_tree
,
new_prob
,
branch_ct
,
p
c
->
counts
.
inter_mode
[
i
],
NEARESTMV
);
c
m
->
counts
.
inter_mode
[
i
],
NEARESTMV
);
for
(
j
=
0
;
j
<
INTER_MODES
-
1
;
++
j
)
vp9_cond_prob_diff_update
(
bc
,
&
p
c
->
fc
.
inter_mode_probs
[
i
][
j
],
vp9_cond_prob_diff_update
(
bc
,
&
c
m
->
fc
.
inter_mode_probs
[
i
][
j
],
MODE_UPDATE_PROB
,
branch_ct
[
j
]);
}
}
...
...
@@ -356,39 +356,39 @@ static void write_segment_id(vp9_writer *w, const struct segmentation *seg,
// This function encodes the reference frame
static
void
encode_ref_frame
(
VP9_COMP
*
cpi
,
vp9_writer
*
bc
)
{
VP9_COMMON
*
const
p
c
=
&
cpi
->
common
;
VP9_COMMON
*
const
c
m
=
&
cpi
->
common
;
MACROBLOCK
*
const
x
=
&
cpi
->
mb
;
MACROBLOCKD
*
const
xd
=
&
x
->
e_mbd
;
MB_MODE_INFO
*
mi
=
&
xd
->
mode_info_context
->
mbmi
;
const
int
segment_id
=
mi
->
segment_id
;
int
seg_ref_active
=
vp9_segfeature_active
(
&
p
c
->
seg
,
segment_id
,
int
seg_ref_active
=
vp9_segfeature_active
(
&
c
m
->
seg
,
segment_id
,
SEG_LVL_REF_FRAME
);
// If segment level coding of this signal is disabled...
// or the segment allows multiple reference frame options
if
(
!
seg_ref_active
)
{
// does the feature use compound prediction or not
// (if not specified at the frame/segment level)
if
(
p
c
->
comp_pred_mode
==
HYBRID_PREDICTION
)
{
if
(
c
m
->
comp_pred_mode
==
HYBRID_PREDICTION
)
{
vp9_write
(
bc
,
mi
->
ref_frame
[
1
]
>
INTRA_FRAME
,
vp9_get_pred_prob_comp_inter_inter
(
p
c
,
xd
));
vp9_get_pred_prob_comp_inter_inter
(
c
m
,
xd
));
}
else
{
assert
((
mi
->
ref_frame
[
1
]
<=
INTRA_FRAME
)
==
(
p
c
->
comp_pred_mode
==
SINGLE_PREDICTION_ONLY
));
(
c
m
->
comp_pred_mode
==
SINGLE_PREDICTION_ONLY
));
}
if
(
mi
->
ref_frame
[
1
]
>
INTRA_FRAME
)
{
vp9_write
(
bc
,
mi
->
ref_frame
[
0
]
==
GOLDEN_FRAME
,
vp9_get_pred_prob_comp_ref_p
(
p
c
,
xd
));
vp9_get_pred_prob_comp_ref_p
(
c
m
,
xd
));
}
else
{
vp9_write
(
bc
,
mi
->
ref_frame
[
0
]
!=
LAST_FRAME
,
vp9_get_pred_prob_single_ref_p1
(
p
c
,
xd
));
vp9_get_pred_prob_single_ref_p1
(
c
m
,
xd
));
if
(
mi
->
ref_frame
[
0
]
!=
LAST_FRAME
)
vp9_write
(
bc
,
mi
->
ref_frame
[
0
]
!=
GOLDEN_FRAME
,
vp9_get_pred_prob_single_ref_p2
(
p
c
,
xd
));
vp9_get_pred_prob_single_ref_p2
(
c
m
,
xd
));
}
}
else
{
assert
(
mi
->
ref_frame
[
1
]
<=
INTRA_FRAME
);
assert
(
vp9_get_segdata
(
&
p
c
->
seg
,
segment_id
,
SEG_LVL_REF_FRAME
)
==
assert
(
vp9_get_segdata
(
&
c
m
->
seg
,
segment_id
,
SEG_LVL_REF_FRAME
)
==
mi
->
ref_frame
[
0
]);
}
...
...
@@ -397,11 +397,11 @@ static void encode_ref_frame(VP9_COMP *cpi, vp9_writer *bc) {
}
static
void
pack_inter_mode_mvs
(
VP9_COMP
*
cpi
,
MODE_INFO
*
m
,
vp9_writer
*
bc
)
{
VP9_COMMON
*
const
p
c
=
&
cpi
->
common
;
const
nmv_context
*
nmvc
=
&
p
c
->
fc
.
nmvc
;
VP9_COMMON
*
const
c
m
=
&
cpi
->
common
;
const
nmv_context
*
nmvc
=
&
c
m
->
fc
.
nmvc
;
MACROBLOCK
*
const
x
=
&
cpi
->
mb
;
MACROBLOCKD
*
const
xd
=
&
x
->
e_mbd
;
struct
segmentation
*
seg
=
&
p
c
->
seg
;
struct
segmentation
*
seg
=
&
c
m
->
seg
;
MB_MODE_INFO
*
const
mi
=
&
m
->
mbmi
;
const
MV_REFERENCE_FRAME
rf
=
mi
->
ref_frame
[
0
];
const
MB_PREDICTION_MODE
mode
=
mi
->
mode
;
...
...
@@ -410,7 +410,7 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc) {
const
BLOCK_SIZE
bsize
=
mi
->
sb_type
;
const
int
allow_hp
=
xd
->
allow_high_precision_mv
;
x
->
partition_info
=
x
->
pi
+
(
m
-
p
c
->
mi
);
x
->
partition_info
=
x
->
pi
+
(
m
-
c
m
->
mi
);
#ifdef ENTROPY_STATS
active_section
=
9
;
...
...
@@ -432,9 +432,9 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc) {
if
(
!
vp9_segfeature_active
(
seg
,
segment_id
,
SEG_LVL_REF_FRAME
))
vp9_write
(
bc
,
rf
!=
INTRA_FRAME
,
vp9_get_pred_prob_intra_inter
(
p
c
,
xd
));
vp9_get_pred_prob_intra_inter
(
c
m
,
xd
));
if
(
bsize
>=
BLOCK_8X8
&&
p
c
->
tx_mode
==
TX_MODE_SELECT
&&
if
(
bsize
>=
BLOCK_8X8
&&
c
m
->
tx_mode
==
TX_MODE_SELECT
&&
!
(
rf
!=
INTRA_FRAME
&&
(
skip_coeff
||
vp9_segfeature_active
(
seg
,
segment_id
,
SEG_LVL_SKIP
))))
{
write_selected_tx_size
(
cpi
,
mi
->
tx_size
,
bsize
,
bc
);
...
...
@@ -446,7 +446,7 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc) {
#endif
if
(
bsize
>=
BLOCK_8X8
)
{
write_intra_mode
(
bc
,
mode
,
p
c
->
fc
.
y_mode_prob
[
size_group_lookup
[
bsize
]]);
write_intra_mode
(
bc
,
mode
,
c
m
->
fc
.
y_mode_prob
[
size_group_lookup
[
bsize
]]);
}
else
{
int
idx
,
idy
;
const
int
num_4x4_blocks_wide
=
num_4x4_blocks_wide_lookup
[
bsize
];
...
...
@@ -454,11 +454,11 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc) {
for
(
idy
=
0
;
idy
<
2
;
idy
+=
num_4x4_blocks_high
)
{
for
(
idx
=
0
;
idx
<
2
;
idx
+=
num_4x4_blocks_wide
)
{
const
MB_PREDICTION_MODE
bm
=
m
->
bmi
[
idy
*
2
+
idx
].
as_mode
;
write_intra_mode
(
bc
,
bm
,
p
c
->
fc
.
y_mode_prob
[
0
]);
write_intra_mode
(
bc
,
bm
,
c
m
->
fc
.
y_mode_prob
[
0
]);
}
}
}
write_intra_mode
(
bc
,
mi
->
uv_mode
,
p
c
->
fc
.
uv_mode_prob
[
mode
]);
write_intra_mode
(
bc
,
mi
->
uv_mode
,
c
m
->
fc
.
uv_mode_prob
[
mode
]);
}
else
{
vp9_prob
*
mv_ref_p
;
encode_ref_frame
(
cpi
,
bc
);
...
...
@@ -472,18 +472,18 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc) {
if
(
!
vp9_segfeature_active
(
seg
,
segment_id
,
SEG_LVL_SKIP
))
{
if
(
bsize
>=
BLOCK_8X8
)
{
write_sb_mv_ref
(
bc
,
mode
,
mv_ref_p
);
++
p
c
->
counts
.
inter_mode
[
mi
->
mode_context
[
rf
]]
++
c
m
->
counts
.
inter_mode
[
mi
->
mode_context
[
rf
]]
[
inter_mode_offset
(
mode
)];
}
}
if
(
p
c
->
mcomp_filter_type
==
SWITCHABLE
)
{
if
(
c
m
->
mcomp_filter_type
==
SWITCHABLE
)
{
const
int
ctx
=
vp9_get_pred_context_switchable_interp
(
xd
);
write_token
(
bc
,
vp9_switchable_interp_tree
,
p
c
->
fc
.
switchable_interp_prob
[
ctx
],
c
m
->
fc
.
switchable_interp_prob
[
ctx
],
&
vp9_switchable_interp_encodings
[
mi
->
interp_filter
]);
}
else
{
assert
(
mi
->
interp_filter
==
p
c
->
mcomp_filter_type
);
assert
(
mi
->
interp_filter
==
c
m
->
mcomp_filter_type
);
}
if
(
bsize
<
BLOCK_8X8
)
{
...
...
@@ -499,7 +499,7 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc) {
blockmode
=
x
->
partition_info
->
bmi
[
j
].
mode
;
blockmv
=
m
->
bmi
[
j
].
as_mv
[
0
];
write_sb_mv_ref
(
bc
,
blockmode
,
mv_ref_p
);
++
p
c
->
counts
.
inter_mode
[
mi
->
mode_context
[
rf
]]
++
c
m
->
counts
.
inter_mode
[
mi
->
mode_context
[
rf
]]
[
inter_mode_offset
(
blockmode
)];
if
(
blockmode
==
NEWMV
)
{
...
...
@@ -1458,7 +1458,7 @@ void vp9_pack_bitstream(VP9_COMP *cpi, uint8_t *dest, unsigned long *size) {
vp9_compute_update_table
();
#ifdef ENTROPY_STATS
if
(
p
c
->
frame_type
==
INTER_FRAME
)
if
(
c
m
->
frame_type
==
INTER_FRAME
)
active_section
=
0
;
else
active_section
=
7
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment