Newer
Older
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "vpx_ports/config.h"
#include "encodemb.h"
#include "encodemv.h"
#include "vp8/common/extend.h"
#include "vp8/common/entropymode.h"
#include "vp8/common/quant_common.h"
#include "vp8/common/findnearmv.h"
#include "vp8/common/reconintra.h"
#if CONFIG_RUNTIME_CPU_DETECT
#define RTCD(x) &cpi->common.rtcd.x
#define IF_RTCD(x) (x)
#else
#define RTCD(x) NULL
#define IF_RTCD(x) NULL
#endif
#if CONFIG_SEGMENTATION
#define SEEK_SEGID 12
#define SEEK_SAMEID 4
#define SEEK_DIFFID 7
#endif
#ifdef ENC_DEBUG
int enc_debug=0;
int mb_row_debug, mb_col_debug;
#endif
extern void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t) ;
extern void vp8cx_initialize_me_consts(VP8_COMP *cpi, int QIndex);
extern void vp8_auto_select_speed(VP8_COMP *cpi);
extern void vp8cx_init_mbrthread_data(VP8_COMP *cpi,
MACROBLOCK *x,
MB_ROW_COMP *mbr_ei,
int mb_row,
int count);
void vp8_build_block_offsets(MACROBLOCK *x);
void vp8_setup_block_ptrs(MACROBLOCK *x);
int vp8cx_encode_inter_macroblock(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t, int recon_yoffset, int recon_uvoffset);
int vp8cx_encode_intra_macro_block(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t);
static void adjust_act_zbin( VP8_COMP *cpi, MACROBLOCK *x );
#ifdef MODE_STATS
unsigned int inter_y_modes[10] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
unsigned int inter_uv_modes[4] = {0, 0, 0, 0};
unsigned int inter_b_modes[15] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
unsigned int y_modes[5] = {0, 0, 0, 0, 0};
unsigned int uv_modes[4] = {0, 0, 0, 0};
unsigned int b_modes[14] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
#endif
/* activity_avg must be positive, or flat regions could get a zero weight
* (infinite lambda), which confounds analysis.
* This also avoids the need for divide by zero checks in
* vp8_activity_masking().
*/
#define VP8_ACTIVITY_AVG_MIN (64)
/* This is used as a reference when computing the source variance for the
* purposes of activity masking.
* Eventually this should be replaced by custom no-reference routines,
* which will be faster.
*/
static const unsigned char VP8_VAR_OFFS[16]=
{
128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128
};
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
#if CONFIG_T8X8
//INTRA mode transform size
//When all three criteria are off the default is 4x4
//#define INTRA_VARIANCE_ENTROPY_CRITERIA
#define INTRA_WTD_SSE_ENTROPY_CRITERIA
//#define INTRA_TEST_8X8_ONLY
//
//INTER mode transform size
//When all three criteria are off the default is 4x4
//#define INTER_VARIANCE_ENTROPY_CRITERIA
#define INTER_WTD_SSE_ENTROPY_CRITERIA
//#define INTER_TEST_8X8_ONLY
double variance_Block(short *b1, int pitch, int dimension)
{
short ip[8][8]={{0}};
short *b = b1;
int i, j = 0;
double mean = 0.0, variance = 0.0;
for (i = 0; i < dimension; i++)
{
for (j = 0; j < dimension; j++)
{
ip[i][j] = b[j];
mean += ip[i][j];
}
b += pitch;
}
mean /= (dimension*dimension);
for (i = 0; i < dimension; i++)
{
for (j = 0; j < dimension; j++)
{
variance += (ip[i][j]-mean)*(ip[i][j]-mean);
}
}
variance /= (dimension*dimension);
return variance;
}
double mean_Block(short *b, int pitch, int dimension)
{
short ip[8][8]={{0}};
int i, j = 0;
double mean = 0;
for (i = 0; i < dimension; i++)
{
for (j = 0; j < dimension; j++)
{
ip[i][j] = b[j];
mean += ip[i][j];
}
b += pitch;
}
mean /= (dimension*dimension);
return mean;
}
int SSE_Block(short *b, int pitch, int dimension)
{
int i, j, sse_block = 0;
for (i = 0; i < dimension; i++)
{
for (j = 0; j < dimension; j++)
{
sse_block += b[j]*b[j];
}
b += pitch;
}
return sse_block;
}
double Compute_Variance_Entropy(MACROBLOCK *x)
{
double variance_8[4] = {0.0, 0.0, 0.0, 0.0}, sum_var = 0.0, all_entropy = 0.0;
variance_8[0] = variance_Block(x->block[0].src_diff, 16, 8);
variance_8[1] = variance_Block(x->block[2].src_diff, 16, 8);
variance_8[2] = variance_Block(x->block[8].src_diff, 16, 8);
variance_8[3] = variance_Block(x->block[10].src_diff, 16, 8);
sum_var = variance_8[0] + variance_8[1] + variance_8[2] + variance_8[3];
if(sum_var)
{
int i;
for(i = 0; i <4; i++)
{
if(variance_8[i])
{
variance_8[i] /= sum_var;
all_entropy -= variance_8[i]*log(variance_8[i]);
}
}
}
return (all_entropy /log(2));
}
double Compute_Wtd_SSE_SubEntropy(MACROBLOCK *x)
{
double variance_8[4] = {0.0, 0.0, 0.0, 0.0};
Loading full blame...