Skip to content
GitLab
Explore
Projects
Groups
Topics
Snippets
Projects
Groups
Topics
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
BC
public
external
libvpx
Commits
b4cdcd6e
Commit
b4cdcd6e
authored
12 years ago
by
Ronald S. Bultje
Browse files
Options
Download
Patches
Plain Diff
Generalize coefficient tokenizing.
Change-Id: Ie33f7d0f90d0e5862be975d676a700d06f79eec6
parent
cb9e6abf
v1.14.0-linphone
1.4.X
experimental
feature/update_to_v1.9.0-linphone
feature/uwp_nuget
forest
frame_parallel
highbitdepth
indianrunnerduck
javanwhistlingduck
khakicampbell
linphone
linphone-android
linphone-old
longtailedduck
m29-baseline
m31-baseline
m49-2623
m52-2743
m54-2840
m56-2924
m66-3359
m68-3440
mandarinduck
mcw
mcw2
nextgen
nextgenv2
pcs-2013
playground
sandbox/Jingning/experimental
sandbox/Jingning/transcode
sandbox/Jingning/vpx
sandbox/aconverse@google.com/ansbench
sandbox/debargha/playground
sandbox/hkuang/frame_parallel
sandbox/hkuang@google.com/decode
sandbox/jimbankoski@google.com/proposed-aom
sandbox/jingning@google.com/decoder_test_suite
sandbox/jingning@google.com/experimental
sandbox/jzern@google.com/test
sandbox/wangch@google.com/vp9
sandbox/yaowu@google.com/mergeaom
stable-vp9-decoder
v1.12.0-linphone
v1.6.1_linphone
v1.7.0-linphone
v1.9.0-linphone
vp9-preview
v1.9.0
v1.9.0-rc1
v1.8.2
v1.8.1
v1.8.0
v1.7.0
v1.6.1
v1.6.0
v1.5.0
v1.4.0
v1.3.0
No related merge requests found
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
vp8/encoder/tokenize.c
+115
-375
vp8/encoder/tokenize.c
with
115 additions
and
375 deletions
vp8/encoder/tokenize.c
+
115
−
375
View file @
b4cdcd6e
...
@@ -109,380 +109,113 @@ static void fill_value_tokens() {
...
@@ -109,380 +109,113 @@ static void fill_value_tokens() {
vp8_dct_value_cost_ptr
=
dct_value_cost
+
DCT_MAX_VALUE
;
vp8_dct_value_cost_ptr
=
dct_value_cost
+
DCT_MAX_VALUE
;
}
}
static
void
tokenize1st_order_b_16x16
(
MACROBLOCKD
*
xd
,
static
void
tokenize_b
(
VP8_COMP
*
cpi
,
const
BLOCKD
*
const
b
,
MACROBLOCKD
*
xd
,
TOKENEXTRA
**
tp
,
const
BLOCKD
*
const
b
,
PLANE_TYPE
type
,
TOKENEXTRA
**
tp
,
ENTROPY_CONTEXT
*
a
,
PLANE_TYPE
type
,
ENTROPY_CONTEXT
*
l
,
ENTROPY_CONTEXT
*
a
,
VP8_COMP
*
cpi
,
ENTROPY_CONTEXT
*
l
,
int
dry_run
)
{
TX_SIZE
tx_size
,
int
dry_run
)
{
int
pt
;
/* near block/prev token context index */
int
pt
;
/* near block/prev token context index */
int
c
=
(
type
==
PLANE_TYPE_Y_NO_DC
)
?
1
:
0
;
int
c
=
(
type
==
PLANE_TYPE_Y_NO_DC
)
?
1
:
0
;
const
int
eob
=
b
->
eob
;
/* one beyond last nonzero coeff */
const
int
eob
=
b
->
eob
;
/* one beyond last nonzero coeff */
TOKENEXTRA
*
t
=
*
tp
;
/* store tokens starting here */
TOKENEXTRA
*
t
=
*
tp
;
/* store tokens starting here */
const
short
*
qcoeff_ptr
=
b
->
qcoeff
;
const
short
*
qcoeff_ptr
=
b
->
qcoeff
;
TX_TYPE
tx_type
=
get_tx_type
(
xd
,
b
);
int
seg_eob
;
int
seg_eob
=
256
;
int
segment_id
=
xd
->
mode_info_context
->
mbmi
.
segment_id
;
int
segment_id
=
xd
->
mode_info_context
->
mbmi
.
segment_id
;
const
int
*
bands
,
*
scan
;
if
(
segfeature_active
(
xd
,
segment_id
,
SEG_LVL_EOB
))
unsigned
int
(
*
counts
)[
COEF_BANDS
][
PREV_COEF_CONTEXTS
][
MAX_ENTROPY_TOKENS
];
seg_eob
=
get_segdata
(
xd
,
segment_id
,
SEG_LVL_EOB
);
vp8_prob
(
*
probs
)[
COEF_BANDS
][
PREV_COEF_CONTEXTS
][
ENTROPY_NODES
];
const
TX_TYPE
tx_type
=
(
type
==
PLANE_TYPE_Y_WITH_DC
)
?
get_tx_type
(
xd
,
b
)
:
DCT_DCT
;
VP8_COMBINEENTROPYCONTEXTS
(
pt
,
*
a
,
*
l
);
VP8_COMBINEENTROPYCONTEXTS
(
pt
,
*
a
,
*
l
);
switch
(
tx_size
)
{
do
{
default:
const
int
band
=
vp8_coef_bands_16x16
[
c
];
case
TX_4X4
:
int
x
;
seg_eob
=
16
;
bands
=
vp8_coef_bands
;
if
(
c
<
eob
)
{
scan
=
vp8_default_zig_zag1d
;
const
int
rc
=
vp8_default_zig_zag1d_16x16
[
c
];
if
(
tx_type
!=
DCT_DCT
)
{
const
int
v
=
qcoeff_ptr
[
rc
];
counts
=
cpi
->
hybrid_coef_counts
;
probs
=
cpi
->
common
.
fc
.
hybrid_coef_probs
;
assert
(
-
DCT_MAX_VALUE
<=
v
&&
v
<
(
DCT_MAX_VALUE
));
if
(
tx_type
==
ADST_DCT
)
{
scan
=
vp8_row_scan
;
t
->
Extra
=
vp8_dct_value_tokens_ptr
[
v
].
Extra
;
}
else
if
(
tx_type
==
DCT_ADST
)
{
x
=
vp8_dct_value_tokens_ptr
[
v
].
Token
;
scan
=
vp8_col_scan
;
}
else
{
}
x
=
DCT_EOB_TOKEN
;
}
else
{
}
counts
=
cpi
->
coef_counts
;
probs
=
cpi
->
common
.
fc
.
coef_probs
;
t
->
Token
=
x
;
}
if
(
tx_type
!=
DCT_DCT
)
break
;
t
->
context_tree
=
cpi
->
common
.
fc
.
hybrid_coef_probs_16x16
[
type
][
band
][
pt
];
case
TX_8X8
:
else
if
(
type
==
PLANE_TYPE_Y2
)
{
t
->
context_tree
=
cpi
->
common
.
fc
.
coef_probs_16x16
[
type
][
band
][
pt
];
seg_eob
=
4
;
bands
=
vp8_coef_bands
;
t
->
skip_eob_node
=
pt
==
0
&&
((
band
>
0
&&
type
!=
PLANE_TYPE_Y_NO_DC
)
||
scan
=
vp8_default_zig_zag1d
;
(
band
>
1
&&
type
==
PLANE_TYPE_Y_NO_DC
));
}
else
{
assert
(
vp8_coef_encodings
[
t
->
Token
].
Len
-
t
->
skip_eob_node
>
0
);
seg_eob
=
64
;
if
(
!
dry_run
)
{
bands
=
vp8_coef_bands_8x8
;
if
(
tx_type
!=
DCT_DCT
)
scan
=
vp8_default_zig_zag1d_8x8
;
++
cpi
->
hybrid_coef_counts_16x16
[
type
][
band
][
pt
][
x
];
}
else
if
(
tx_type
!=
DCT_DCT
)
{
++
cpi
->
coef_counts_16x16
[
type
][
band
][
pt
][
x
];
counts
=
cpi
->
hybrid_coef_counts_8x8
;
}
probs
=
cpi
->
common
.
fc
.
hybrid_coef_probs_8x8
;
pt
=
vp8_prev_token_class
[
x
];
}
else
{
++
t
;
counts
=
cpi
->
coef_counts_8x8
;
}
while
(
c
<
eob
&&
++
c
<
seg_eob
);
probs
=
cpi
->
common
.
fc
.
coef_probs_8x8
;
}
*
tp
=
t
;
break
;
pt
=
(
c
!=
!
type
);
/* 0 <-> all coeff data is zero */
case
TX_16X16
:
*
a
=
*
l
=
pt
;
seg_eob
=
256
;
}
bands
=
vp8_coef_bands_16x16
;
scan
=
vp8_default_zig_zag1d_16x16
;
static
void
tokenize2nd_order_b_8x8
(
MACROBLOCKD
*
xd
,
if
(
tx_type
!=
DCT_DCT
)
{
const
BLOCKD
*
const
b
,
counts
=
cpi
->
hybrid_coef_counts_16x16
;
TOKENEXTRA
**
tp
,
probs
=
cpi
->
common
.
fc
.
hybrid_coef_probs_16x16
;
ENTROPY_CONTEXT
*
a
,
}
else
{
ENTROPY_CONTEXT
*
l
,
counts
=
cpi
->
coef_counts_16x16
;
VP8_COMP
*
cpi
,
probs
=
cpi
->
common
.
fc
.
coef_probs_16x16
;
int
dry_run
)
{
}
int
pt
;
/* near block/prev token context index */
break
;
int
c
=
0
;
/* start at DC */
const
int
eob
=
b
->
eob
;
/* one beyond last nonzero coeff */
TOKENEXTRA
*
t
=
*
tp
;
/* store tokens starting here */
const
short
*
qcoeff_ptr
=
b
->
qcoeff
;
int
seg_eob
=
4
;
int
segment_id
=
xd
->
mode_info_context
->
mbmi
.
segment_id
;
if
(
segfeature_active
(
xd
,
segment_id
,
SEG_LVL_EOB
))
{
seg_eob
=
get_segdata
(
xd
,
segment_id
,
SEG_LVL_EOB
);
}
}
VP8_COMBINEENTROPYCONTEXTS
(
pt
,
*
a
,
*
l
);
assert
(
eob
<=
4
);
do
{
const
int
band
=
vp8_coef_bands
[
c
];
int
x
;
if
(
c
<
eob
)
{
const
int
rc
=
vp8_default_zig_zag1d
[
c
];
const
int
v
=
qcoeff_ptr
[
rc
];
assert
(
-
DCT_MAX_VALUE
<=
v
&&
v
<
(
DCT_MAX_VALUE
));
t
->
Extra
=
vp8_dct_value_tokens_ptr
[
v
].
Extra
;
x
=
vp8_dct_value_tokens_ptr
[
v
].
Token
;
}
else
{
x
=
DCT_EOB_TOKEN
;
}
t
->
Token
=
x
;
t
->
context_tree
=
cpi
->
common
.
fc
.
coef_probs_8x8
[
PLANE_TYPE_Y2
][
band
][
pt
];
t
->
skip_eob_node
=
((
pt
==
0
)
&&
(
band
>
0
));
assert
(
vp8_coef_encodings
[
t
->
Token
].
Len
-
t
->
skip_eob_node
>
0
);
if
(
!
dry_run
)
++
cpi
->
coef_counts_8x8
[
PLANE_TYPE_Y2
][
band
][
pt
][
x
];
pt
=
vp8_prev_token_class
[
x
];
++
t
;
}
while
(
c
<
eob
&&
++
c
<
seg_eob
);
*
tp
=
t
;
pt
=
(
c
!=
0
);
/* 0 <-> all coeff data is zero */
*
a
=
*
l
=
pt
;
}
static
void
tokenize2nd_order_b_4x4
(
MACROBLOCKD
*
xd
,
TOKENEXTRA
**
tp
,
VP8_COMP
*
cpi
,
int
dry_run
)
{
int
pt
;
/* near block/prev token context index */
int
c
=
0
;
/* start at DC */
TOKENEXTRA
*
t
=
*
tp
;
/* store tokens starting here */
const
BLOCKD
*
b
=
xd
->
block
+
24
;
const
short
*
qcoeff_ptr
=
b
->
qcoeff
;
ENTROPY_CONTEXT
*
a
;
ENTROPY_CONTEXT
*
l
;
const
int
eob
=
b
->
eob
;
int
seg_eob
=
16
;
int
segment_id
=
xd
->
mode_info_context
->
mbmi
.
segment_id
;
if
(
segfeature_active
(
xd
,
segment_id
,
SEG_LVL_EOB
))
if
(
segfeature_active
(
xd
,
segment_id
,
SEG_LVL_EOB
))
seg_eob
=
get_segdata
(
xd
,
segment_id
,
SEG_LVL_EOB
);
seg_eob
=
get_segdata
(
xd
,
segment_id
,
SEG_LVL_EOB
);
a
=
(
ENTROPY_CONTEXT
*
)
xd
->
above_context
+
8
;
l
=
(
ENTROPY_CONTEXT
*
)
xd
->
left_context
+
8
;
VP8_COMBINEENTROPYCONTEXTS
(
pt
,
*
a
,
*
l
);
do
{
do
{
const
int
band
=
vp8_coef_
bands
[
c
];
const
int
band
=
bands
[
c
];
int
token
;
int
token
;
if
(
c
<
eob
)
{
if
(
c
<
eob
)
{
const
int
rc
=
vp8_default_zig_zag1d
[
c
];
const
int
rc
=
scan
[
c
];
const
int
v
=
qcoeff_ptr
[
rc
];
const
int
v
=
qcoeff_ptr
[
rc
];
assert
(
-
DCT_MAX_VALUE
<=
v
&&
v
<
DCT_MAX_VALUE
);
t
->
Extra
=
vp8_dct_value_tokens_ptr
[
v
].
Extra
;
t
->
Extra
=
vp8_dct_value_tokens_ptr
[
v
].
Extra
;
token
=
vp8_dct_value_tokens_ptr
[
v
].
Token
;
token
=
vp8_dct_value_tokens_ptr
[
v
].
Token
;
}
else
}
else
{
token
=
DCT_EOB_TOKEN
;
token
=
DCT_EOB_TOKEN
;
}
t
->
Token
=
token
;
t
->
Token
=
token
;
t
->
context_tree
=
cpi
->
common
.
fc
.
coef_probs
[
PLANE_TYPE_Y2
][
band
][
pt
];
t
->
context_tree
=
probs
[
type
][
band
][
pt
];
t
->
skip_eob_node
=
(
pt
==
0
)
&&
((
band
>
0
&&
type
!=
PLANE_TYPE_Y_NO_DC
)
||
t
->
skip_eob_node
=
((
pt
==
0
)
&&
(
band
>
0
));
(
band
>
1
&&
type
==
PLANE_TYPE_Y_NO_DC
));
assert
(
vp8_coef_encodings
[
t
->
Token
].
Len
-
t
->
skip_eob_node
>
0
);
if
(
!
dry_run
)
++
cpi
->
coef_counts
[
PLANE_TYPE_Y2
][
band
][
pt
][
token
];
pt
=
vp8_prev_token_class
[
token
];
++
t
;
}
while
(
c
<
eob
&&
++
c
<
seg_eob
);
*
tp
=
t
;
pt
=
(
c
!=
0
);
/* 0 <-> all coeff data is zero */
*
a
=
*
l
=
pt
;
}
static
void
tokenize1st_order_b_8x8
(
MACROBLOCKD
*
xd
,
const
BLOCKD
*
const
b
,
TOKENEXTRA
**
tp
,
PLANE_TYPE
type
,
ENTROPY_CONTEXT
*
a
,
ENTROPY_CONTEXT
*
l
,
VP8_COMP
*
cpi
,
int
dry_run
)
{
int
pt
;
/* near block/prev token context index */
int
c
=
(
type
==
PLANE_TYPE_Y_NO_DC
)
?
1
:
0
;
/* start at DC unless type 0 */
TOKENEXTRA
*
t
=
*
tp
;
/* store tokens starting here */
const
short
*
qcoeff_ptr
=
b
->
qcoeff
;
TX_TYPE
tx_type
=
get_tx_type
(
xd
,
b
);
const
int
eob
=
b
->
eob
;
int
seg_eob
=
64
;
int
segment_id
=
xd
->
mode_info_context
->
mbmi
.
segment_id
;
if
(
segfeature_active
(
xd
,
segment_id
,
SEG_LVL_EOB
))
seg_eob
=
get_segdata
(
xd
,
segment_id
,
SEG_LVL_EOB
);
VP8_COMBINEENTROPYCONTEXTS
(
pt
,
*
a
,
*
l
);
do
{
const
int
band
=
vp8_coef_bands_8x8
[
c
];
int
x
;
if
(
c
<
eob
)
{
const
int
rc
=
vp8_default_zig_zag1d_8x8
[
c
];
const
int
v
=
qcoeff_ptr
[
rc
];
assert
(
-
DCT_MAX_VALUE
<=
v
&&
v
<
(
DCT_MAX_VALUE
));
t
->
Extra
=
vp8_dct_value_tokens_ptr
[
v
].
Extra
;
x
=
vp8_dct_value_tokens_ptr
[
v
].
Token
;
}
else
x
=
DCT_EOB_TOKEN
;
t
->
Token
=
x
;
if
(
tx_type
!=
DCT_DCT
)
t
->
context_tree
=
cpi
->
common
.
fc
.
hybrid_coef_probs_8x8
[
type
][
band
][
pt
];
else
t
->
context_tree
=
cpi
->
common
.
fc
.
coef_probs_8x8
[
type
][
band
][
pt
];
t
->
skip_eob_node
=
pt
==
0
&&
((
band
>
0
&&
type
!=
PLANE_TYPE_Y_NO_DC
)
||
(
band
>
1
&&
type
==
PLANE_TYPE_Y_NO_DC
));
assert
(
vp8_coef_encodings
[
t
->
Token
].
Len
-
t
->
skip_eob_node
>
0
);
assert
(
vp8_coef_encodings
[
t
->
Token
].
Len
-
t
->
skip_eob_node
>
0
);
if
(
!
dry_run
)
{
if
(
!
dry_run
)
{
if
(
tx_type
!=
DCT_DCT
)
++
counts
[
type
][
band
][
pt
][
token
];
++
cpi
->
hybrid_coef_counts_8x8
[
type
][
band
][
pt
][
x
];
else
++
cpi
->
coef_counts_8x8
[
type
][
band
][
pt
][
x
];
}
}
pt
=
vp8_prev_token_class
[
x
];
pt
=
vp8_prev_token_class
[
token
];
++
t
;
++
t
;
}
while
(
c
<
eob
&&
++
c
<
seg_eob
);
}
while
(
c
<
eob
&&
++
c
<
seg_eob
);
*
tp
=
t
;
*
tp
=
t
;
pt
=
(
c
!=
!
type
);
/* 0 <-> all coeff data is zero */
*
a
=
*
l
=
(
c
!=
!
type
);
/* 0 <-> all coeff data is zero */
*
a
=
*
l
=
pt
;
}
static
void
tokenize1st_order_chroma_4x4
(
MACROBLOCKD
*
xd
,
TOKENEXTRA
**
tp
,
VP8_COMP
*
cpi
,
int
dry_run
)
{
unsigned
int
block
;
const
BLOCKD
*
b
=
xd
->
block
+
16
;
int
pt
;
/* near block/prev token context index */
TOKENEXTRA
*
t
=
*
tp
;
/* store tokens starting here */
ENTROPY_CONTEXT
*
a
;
ENTROPY_CONTEXT
*
l
;
int
seg_eob
=
16
;
int
segment_id
=
xd
->
mode_info_context
->
mbmi
.
segment_id
;
if
(
segfeature_active
(
xd
,
segment_id
,
SEG_LVL_EOB
))
{
seg_eob
=
get_segdata
(
xd
,
segment_id
,
SEG_LVL_EOB
);
}
/* Chroma */
for
(
block
=
16
;
block
<
24
;
block
++
,
b
++
)
{
const
int
eob
=
b
->
eob
;
const
int
tmp1
=
vp8_block2above
[
block
];
const
int
tmp2
=
vp8_block2left
[
block
];
const
int16_t
*
qcoeff_ptr
=
b
->
qcoeff
;
int
c
=
0
;
a
=
(
ENTROPY_CONTEXT
*
)
xd
->
above_context
+
tmp1
;
l
=
(
ENTROPY_CONTEXT
*
)
xd
->
left_context
+
tmp2
;
VP8_COMBINEENTROPYCONTEXTS
(
pt
,
*
a
,
*
l
);
do
{
const
int
band
=
vp8_coef_bands
[
c
];
int
token
;
if
(
c
<
eob
)
{
const
int
rc
=
vp8_default_zig_zag1d
[
c
];
const
int
v
=
qcoeff_ptr
[
rc
];
t
->
Extra
=
vp8_dct_value_tokens_ptr
[
v
].
Extra
;
token
=
vp8_dct_value_tokens_ptr
[
v
].
Token
;
}
else
token
=
DCT_EOB_TOKEN
;
t
->
Token
=
token
;
t
->
context_tree
=
cpi
->
common
.
fc
.
coef_probs
[
PLANE_TYPE_UV
][
band
][
pt
];
t
->
skip_eob_node
=
((
pt
==
0
)
&&
(
band
>
0
));
assert
(
vp8_coef_encodings
[
t
->
Token
].
Len
-
t
->
skip_eob_node
>
0
);
if
(
!
dry_run
)
++
cpi
->
coef_counts
[
PLANE_TYPE_UV
][
band
][
pt
][
token
];
pt
=
vp8_prev_token_class
[
token
];
++
t
;
}
while
(
c
<
eob
&&
++
c
<
seg_eob
);
*
tp
=
t
;
pt
=
(
c
!=
0
);
/* 0 <-> all coeff data is zero */
*
a
=
*
l
=
pt
;
}
}
static
void
tokenize1st_order_b_4x4
(
MACROBLOCKD
*
xd
,
TOKENEXTRA
**
tp
,
PLANE_TYPE
type
,
VP8_COMP
*
cpi
,
int
dry_run
)
{
unsigned
int
block
;
const
BLOCKD
*
b
=
xd
->
block
;
int
pt
;
/* near block/prev token context index */
TOKENEXTRA
*
t
=
*
tp
;
/* store tokens starting here */
ENTROPY_CONTEXT
*
a
,
*
l
;
int
seg_eob
=
16
;
int
segment_id
=
xd
->
mode_info_context
->
mbmi
.
segment_id
;
int
const
*
pt_scan
=
vp8_default_zig_zag1d
;
if
(
segfeature_active
(
xd
,
segment_id
,
SEG_LVL_EOB
))
{
seg_eob
=
get_segdata
(
xd
,
segment_id
,
SEG_LVL_EOB
);
}
/* Luma */
for
(
block
=
0
;
block
<
16
;
block
++
,
b
++
)
{
const
int
eob
=
b
->
eob
;
const
int16_t
*
qcoeff_ptr
=
b
->
qcoeff
;
int
c
=
(
type
==
PLANE_TYPE_Y_NO_DC
)
?
1
:
0
;
TX_TYPE
tx_type
=
get_tx_type
(
xd
,
&
xd
->
block
[
block
]);
switch
(
tx_type
)
{
case
ADST_DCT
:
pt_scan
=
vp8_row_scan
;
break
;
case
DCT_ADST
:
pt_scan
=
vp8_col_scan
;
break
;
default
:
pt_scan
=
vp8_default_zig_zag1d
;
break
;
}
a
=
(
ENTROPY_CONTEXT
*
)
xd
->
above_context
+
vp8_block2above
[
block
];
l
=
(
ENTROPY_CONTEXT
*
)
xd
->
left_context
+
vp8_block2left
[
block
];
VP8_COMBINEENTROPYCONTEXTS
(
pt
,
*
a
,
*
l
);
assert
(
b
->
eob
<=
16
);
do
{
const
int
band
=
vp8_coef_bands
[
c
];
int
token
;
if
(
c
<
eob
)
{
const
int
rc
=
pt_scan
[
c
];
const
int
v
=
qcoeff_ptr
[
rc
];
t
->
Extra
=
vp8_dct_value_tokens_ptr
[
v
].
Extra
;
token
=
vp8_dct_value_tokens_ptr
[
v
].
Token
;
}
else
token
=
DCT_EOB_TOKEN
;
t
->
Token
=
token
;
if
(
tx_type
!=
DCT_DCT
)
t
->
context_tree
=
cpi
->
common
.
fc
.
hybrid_coef_probs
[
type
][
band
][
pt
];
else
t
->
context_tree
=
cpi
->
common
.
fc
.
coef_probs
[
type
][
band
][
pt
];
t
->
skip_eob_node
=
pt
==
0
&&
((
band
>
0
&&
type
!=
PLANE_TYPE_Y_NO_DC
)
||
(
band
>
1
&&
type
==
PLANE_TYPE_Y_NO_DC
));
assert
(
vp8_coef_encodings
[
t
->
Token
].
Len
-
t
->
skip_eob_node
>
0
);
if
(
!
dry_run
)
{
if
(
tx_type
!=
DCT_DCT
)
++
cpi
->
hybrid_coef_counts
[
type
][
band
][
pt
][
token
];
else
++
cpi
->
coef_counts
[
type
][
band
][
pt
][
token
];
}
pt
=
vp8_prev_token_class
[
token
];
++
t
;
}
while
(
c
<
eob
&&
++
c
<
seg_eob
);
*
tp
=
t
;
pt
=
(
c
!=
!
type
);
/* 0 <-> all coeff data is zero */
*
a
=
*
l
=
pt
;
}
tokenize1st_order_chroma_4x4
(
xd
,
tp
,
cpi
,
dry_run
);
}
}
int
mby_is_skippable_4x4
(
MACROBLOCKD
*
xd
,
int
has_y2_block
)
{
int
mby_is_skippable_4x4
(
MACROBLOCKD
*
xd
,
int
has_y2_block
)
{
...
@@ -566,6 +299,8 @@ void vp8_tokenize_mb(VP8_COMP *cpi,
...
@@ -566,6 +299,8 @@ void vp8_tokenize_mb(VP8_COMP *cpi,
int
tx_size
=
xd
->
mode_info_context
->
mbmi
.
txfm_size
;
int
tx_size
=
xd
->
mode_info_context
->
mbmi
.
txfm_size
;
int
mb_skip_context
=
get_pred_context
(
&
cpi
->
common
,
xd
,
PRED_MBSKIP
);
int
mb_skip_context
=
get_pred_context
(
&
cpi
->
common
,
xd
,
PRED_MBSKIP
);
TOKENEXTRA
*
t_backup
=
*
t
;
TOKENEXTRA
*
t_backup
=
*
t
;
ENTROPY_CONTEXT
*
A
=
(
ENTROPY_CONTEXT
*
)
xd
->
above_context
;
ENTROPY_CONTEXT
*
L
=
(
ENTROPY_CONTEXT
*
)
xd
->
left_context
;
// If the MB is going to be skipped because of a segment level flag
// If the MB is going to be skipped because of a segment level flag
// exclude this from the skip count stats used to calculate the
// exclude this from the skip count stats used to calculate the
...
@@ -619,65 +354,70 @@ void vp8_tokenize_mb(VP8_COMP *cpi,
...
@@ -619,65 +354,70 @@ void vp8_tokenize_mb(VP8_COMP *cpi,
if
(
has_y2_block
)
{
if
(
has_y2_block
)
{
if
(
tx_size
==
TX_8X8
)
{
if
(
tx_size
==
TX_8X8
)
{
ENTROPY_CONTEXT
*
A
=
(
ENTROPY_CONTEXT
*
)
xd
->
above_context
;
tokenize_b
(
cpi
,
xd
,
xd
->
block
+
24
,
t
,
PLANE_TYPE_Y2
,
ENTROPY_CONTEXT
*
L
=
(
ENTROPY_CONTEXT
*
)
xd
->
left_context
;
A
+
vp8_block2above_8x8
[
24
],
L
+
vp8_block2left_8x8
[
24
],
tokenize2nd_order_b_8x8
(
xd
,
TX_8X8
,
dry_run
);
xd
->
block
+
24
,
t
,
}
else
{
A
+
vp8_block2above_8x8
[
24
],
tokenize_b
(
cpi
,
xd
,
xd
->
block
+
24
,
t
,
PLANE_TYPE_Y2
,
L
+
vp8_block2left_8x8
[
24
],
A
+
vp8_block2above
[
24
],
L
+
vp8_block2left
[
24
],
cpi
,
dry_run
);
TX_4X4
,
dry_run
);
}
else
}
tokenize2nd_order_b_4x4
(
xd
,
t
,
cpi
,
dry_run
);
plane_type
=
PLANE_TYPE_Y_NO_DC
;
plane_type
=
PLANE_TYPE_Y_NO_DC
;
}
else
}
else
plane_type
=
PLANE_TYPE_Y_WITH_DC
;
plane_type
=
PLANE_TYPE_Y_WITH_DC
;
if
(
tx_size
==
TX_16X16
)
{
if
(
tx_size
==
TX_16X16
)
{
ENTROPY_CONTEXT
*
A
=
(
ENTROPY_CONTEXT
*
)
xd
->
above_context
;
tokenize_b
(
cpi
,
xd
,
xd
->
block
,
t
,
PLANE_TYPE_Y_WITH_DC
,
ENTROPY_CONTEXT
*
L
=
(
ENTROPY_CONTEXT
*
)
xd
->
left_context
;
A
,
L
,
TX_16X16
,
dry_run
);
tokenize1st_order_b_16x16
(
xd
,
xd
->
block
,
t
,
PLANE_TYPE_Y_WITH_DC
,
A
,
L
,
cpi
,
dry_run
);
A
[
1
]
=
A
[
2
]
=
A
[
3
]
=
A
[
0
];
A
[
1
]
=
A
[
2
]
=
A
[
3
]
=
A
[
0
];
L
[
1
]
=
L
[
2
]
=
L
[
3
]
=
L
[
0
];
L
[
1
]
=
L
[
2
]
=
L
[
3
]
=
L
[
0
];
for
(
b
=
16
;
b
<
24
;
b
+=
4
)
{
for
(
b
=
16
;
b
<
24
;
b
+=
4
)
{
tokenize
1st_order_b_8x8
(
xd
,
xd
->
block
+
b
,
t
,
PLANE_TYPE_UV
,
tokenize
_b
(
cpi
,
xd
,
xd
->
block
+
b
,
t
,
PLANE_TYPE_UV
,
A
+
vp8_block2above_8x8
[
b
],
A
+
vp8_block2above_8x8
[
b
],
L
+
vp8_block2left_8x8
[
b
],
L
+
vp8_block2left_8x8
[
b
],
cpi
,
dry_run
);
TX_8X8
,
dry_run
);
A
[
vp8_block2above_8x8
[
b
]
+
1
]
=
A
[
vp8_block2above_8x8
[
b
]];
A
[
vp8_block2above_8x8
[
b
]
+
1
]
=
A
[
vp8_block2above_8x8
[
b
]];
L
[
vp8_block2left_8x8
[
b
]
+
1
]
=
L
[
vp8_block2left_8x8
[
b
]];
L
[
vp8_block2left_8x8
[
b
]
+
1
]
=
L
[
vp8_block2left_8x8
[
b
]];
}
}
vpx_memset
(
&
A
[
8
],
0
,
sizeof
(
A
[
8
]));
vpx_memset
(
&
A
[
8
],
0
,
sizeof
(
A
[
8
]));
vpx_memset
(
&
L
[
8
],
0
,
sizeof
(
L
[
8
]));
vpx_memset
(
&
L
[
8
],
0
,
sizeof
(
L
[
8
]));
}
}
else
if
(
tx_size
==
TX_8X8
)
{
else
if
(
tx_size
==
TX_8X8
)
{
ENTROPY_CONTEXT
*
A
=
(
ENTROPY_CONTEXT
*
)
xd
->
above_context
;
ENTROPY_CONTEXT
*
L
=
(
ENTROPY_CONTEXT
*
)
xd
->
left_context
;
for
(
b
=
0
;
b
<
16
;
b
+=
4
)
{
for
(
b
=
0
;
b
<
16
;
b
+=
4
)
{
tokenize1st_order_b_8x8
(
xd
,
tokenize_b
(
cpi
,
xd
,
xd
->
block
+
b
,
t
,
plane_type
,
xd
->
block
+
b
,
t
,
plane_type
,
A
+
vp8_block2above_8x8
[
b
],
L
+
vp8_block2left_8x8
[
b
],
A
+
vp8_block2above_8x8
[
b
],
TX_8X8
,
dry_run
);
L
+
vp8_block2left_8x8
[
b
],
cpi
,
dry_run
);
A
[
vp8_block2above_8x8
[
b
]
+
1
]
=
A
[
vp8_block2above_8x8
[
b
]];
A
[
vp8_block2above_8x8
[
b
]
+
1
]
=
A
[
vp8_block2above_8x8
[
b
]];
L
[
vp8_block2left_8x8
[
b
]
+
1
]
=
L
[
vp8_block2left_8x8
[
b
]];
L
[
vp8_block2left_8x8
[
b
]
+
1
]
=
L
[
vp8_block2left_8x8
[
b
]];
}
}
if
(
xd
->
mode_info_context
->
mbmi
.
mode
==
I8X8_PRED
||
if
(
xd
->
mode_info_context
->
mbmi
.
mode
==
I8X8_PRED
||
xd
->
mode_info_context
->
mbmi
.
mode
==
SPLITMV
)
{
xd
->
mode_info_context
->
mbmi
.
mode
==
SPLITMV
)
{
tokenize1st_order_chroma_4x4
(
xd
,
t
,
cpi
,
dry_run
);
for
(
b
=
16
;
b
<
24
;
b
++
)
{
tokenize_b
(
cpi
,
xd
,
xd
->
block
+
b
,
t
,
PLANE_TYPE_UV
,
A
+
vp8_block2above
[
b
],
L
+
vp8_block2left
[
b
],
TX_4X4
,
dry_run
);
}
}
else
{
}
else
{
for
(
b
=
16
;
b
<
24
;
b
+=
4
)
{
for
(
b
=
16
;
b
<
24
;
b
+=
4
)
{
tokenize
1st_order_b_8x8
(
xd
,
xd
->
block
+
b
,
t
,
PLANE_TYPE_UV
,
tokenize
_b
(
cpi
,
xd
,
xd
->
block
+
b
,
t
,
PLANE_TYPE_UV
,
A
+
vp8_block2above_8x8
[
b
],
A
+
vp8_block2above_8x8
[
b
],
L
+
vp8_block2left_8x8
[
b
],
L
+
vp8_block2left_8x8
[
b
],
cpi
,
dry_run
);
TX_8X8
,
dry_run
);
A
[
vp8_block2above_8x8
[
b
]
+
1
]
=
A
[
vp8_block2above_8x8
[
b
]];
A
[
vp8_block2above_8x8
[
b
]
+
1
]
=
A
[
vp8_block2above_8x8
[
b
]];
L
[
vp8_block2left_8x8
[
b
]
+
1
]
=
L
[
vp8_block2left_8x8
[
b
]];
L
[
vp8_block2left_8x8
[
b
]
+
1
]
=
L
[
vp8_block2left_8x8
[
b
]];
}
}
}
}
}
else
{
}
else
{
tokenize1st_order_b_4x4
(
xd
,
t
,
plane_type
,
cpi
,
dry_run
);
for
(
b
=
0
;
b
<
16
;
b
++
)
{
tokenize_b
(
cpi
,
xd
,
xd
->
block
+
b
,
t
,
plane_type
,
A
+
vp8_block2above
[
b
],
L
+
vp8_block2left
[
b
],
TX_4X4
,
dry_run
);
}
for
(
b
=
16
;
b
<
24
;
b
++
)
{
tokenize_b
(
cpi
,
xd
,
xd
->
block
+
b
,
t
,
PLANE_TYPE_UV
,
A
+
vp8_block2above
[
b
],
L
+
vp8_block2left
[
b
],
TX_4X4
,
dry_run
);
}
}
}
if
(
dry_run
)
if
(
dry_run
)
*
t
=
t_backup
;
*
t
=
t_backup
;
...
...
This diff is collapsed.
Click to expand it.
Preview
Supports
Markdown
0%
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment
Menu
Explore
Projects
Groups
Topics
Snippets