Newer
Older
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "vpx_ports/config.h"
#include "encodemb.h"
#include "encodemv.h"
#include "vp8/common/extend.h"
#include "vp8/common/entropymode.h"
#include "vp8/common/quant_common.h"
#include "vp8/common/findnearmv.h"
#include "vp8/common/reconintra.h"
#if CONFIG_RUNTIME_CPU_DETECT
#define RTCD(x) &cpi->common.rtcd.x
#define IF_RTCD(x) (x)
#else
#define RTCD(x) NULL
#define IF_RTCD(x) NULL
#endif
#if CONFIG_SEGMENTATION
#define SEEK_SEGID 12
#define SEEK_SAMEID 4
#define SEEK_DIFFID 7
#endif
#ifdef ENC_DEBUG
int enc_debug=0;
int mb_row_debug, mb_col_debug;
#endif
extern void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t) ;
extern void vp8cx_initialize_me_consts(VP8_COMP *cpi, int QIndex);
extern void vp8_auto_select_speed(VP8_COMP *cpi);
extern void vp8cx_init_mbrthread_data(VP8_COMP *cpi,
MACROBLOCK *x,
MB_ROW_COMP *mbr_ei,
int mb_row,
int count);
void vp8_build_block_offsets(MACROBLOCK *x);
void vp8_setup_block_ptrs(MACROBLOCK *x);
int vp8cx_encode_inter_macroblock(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t, int recon_yoffset, int recon_uvoffset);
int vp8cx_encode_intra_macro_block(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t);
static void adjust_act_zbin( VP8_COMP *cpi, MACROBLOCK *x );
#ifdef MODE_STATS
unsigned int inter_y_modes[10] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
unsigned int inter_uv_modes[4] = {0, 0, 0, 0};
unsigned int inter_b_modes[15] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
unsigned int y_modes[5] = {0, 0, 0, 0, 0};
unsigned int uv_modes[4] = {0, 0, 0, 0};
unsigned int b_modes[14] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
#endif
/* activity_avg must be positive, or flat regions could get a zero weight
* (infinite lambda), which confounds analysis.
* This also avoids the need for divide by zero checks in
* vp8_activity_masking().
*/
#define VP8_ACTIVITY_AVG_MIN (64)
/* This is used as a reference when computing the source variance for the
* purposes of activity masking.
* Eventually this should be replaced by custom no-reference routines,
* which will be faster.
*/
static const unsigned char VP8_VAR_OFFS[16]=
{
128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128
};
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
#if CONFIG_T8X8
//INTRA mode transform size
//When all three criteria are off the default is 4x4
//#define INTRA_VARIANCE_ENTROPY_CRITERIA
#define INTRA_WTD_SSE_ENTROPY_CRITERIA
//#define INTRA_TEST_8X8_ONLY
//
//INTER mode transform size
//When all three criteria are off the default is 4x4
//#define INTER_VARIANCE_ENTROPY_CRITERIA
#define INTER_WTD_SSE_ENTROPY_CRITERIA
//#define INTER_TEST_8X8_ONLY
double variance_Block(short *b1, int pitch, int dimension)
{
short ip[8][8]={{0}};
short *b = b1;
int i, j = 0;
double mean = 0.0, variance = 0.0;
for (i = 0; i < dimension; i++)
{
for (j = 0; j < dimension; j++)
{
ip[i][j] = b[j];
mean += ip[i][j];
}
b += pitch;
}
mean /= (dimension*dimension);
for (i = 0; i < dimension; i++)
{
for (j = 0; j < dimension; j++)
{
variance += (ip[i][j]-mean)*(ip[i][j]-mean);
}
}
variance /= (dimension*dimension);
return variance;
}
double mean_Block(short *b, int pitch, int dimension)
{
short ip[8][8]={{0}};
int i, j = 0;
double mean = 0;
for (i = 0; i < dimension; i++)
{
for (j = 0; j < dimension; j++)
{
ip[i][j] = b[j];
mean += ip[i][j];
}
b += pitch;
}
mean /= (dimension*dimension);
return mean;
}
int SSE_Block(short *b, int pitch, int dimension)
{
int i, j, sse_block = 0;
for (i = 0; i < dimension; i++)
{
for (j = 0; j < dimension; j++)
{
sse_block += b[j]*b[j];
}
b += pitch;
}
return sse_block;
}
double Compute_Variance_Entropy(MACROBLOCK *x)
{
double variance_8[4] = {0.0, 0.0, 0.0, 0.0}, sum_var = 0.0, all_entropy = 0.0;
variance_8[0] = variance_Block(x->block[0].src_diff, 16, 8);
variance_8[1] = variance_Block(x->block[2].src_diff, 16, 8);
variance_8[2] = variance_Block(x->block[8].src_diff, 16, 8);
variance_8[3] = variance_Block(x->block[10].src_diff, 16, 8);
sum_var = variance_8[0] + variance_8[1] + variance_8[2] + variance_8[3];
if(sum_var)
{
int i;
for(i = 0; i <4; i++)
{
if(variance_8[i])
{
variance_8[i] /= sum_var;
all_entropy -= variance_8[i]*log(variance_8[i]);
}
}
}
return (all_entropy /log(2));
}
double Compute_Wtd_SSE_SubEntropy(MACROBLOCK *x)
{
double variance_8[4] = {0.0, 0.0, 0.0, 0.0};
double entropy_8[4] = {0.0, 0.0, 0.0, 0.0};
double sse_1, sse_2, sse_3, sse_4, sse_0;
int i;
for (i=0;i<3;i+=2)
{
sse_0 = SSE_Block(x->block[i].src_diff, 16, 8);
if(sse_0)
{
sse_1 = SSE_Block(x->block[i].src_diff, 16, 4)/sse_0;
sse_2 = SSE_Block(x->block[i+1].src_diff, 16, 4)/sse_0;
sse_3 = SSE_Block(x->block[i+4].src_diff, 16, 4)/sse_0;
sse_4 = SSE_Block(x->block[i+5].src_diff, 16, 4)/sse_0;
variance_8[i]= variance_Block(x->block[i].src_diff, 16, 8);
if(sse_1 && sse_2 && sse_3 && sse_4)
entropy_8[i]= (-sse_1*log(sse_1)
-sse_2*log(sse_2)
-sse_3*log(sse_3)
-sse_4*log(sse_4))/log(2);
}
}
for (i=8;i<11;i+=2)
{
if(sse_0)
{
sse_0 = SSE_Block(x->block[i].src_diff, 16, 8);
sse_1 = SSE_Block(x->block[i].src_diff, 16, 4)/sse_0;
sse_2 = SSE_Block(x->block[i+1].src_diff, 16, 4)/sse_0;
sse_3 = SSE_Block(x->block[i+4].src_diff, 16, 4)/sse_0;
sse_4 = SSE_Block(x->block[i+5].src_diff, 16, 4)/sse_0;
variance_8[i-7]= variance_Block(x->block[i].src_diff, 16, 8);
if(sse_1 && sse_2 && sse_3 && sse_4)
entropy_8[i-7]= (-sse_1*log(sse_1)
-sse_2*log(sse_2)
-sse_3*log(sse_3)
-sse_4*log(sse_4))/log(2);
}
}
if(variance_8[0]+variance_8[1]+variance_8[2]+variance_8[3])
return (entropy_8[0]*variance_8[0]+
entropy_8[1]*variance_8[1]+
entropy_8[2]*variance_8[2]+
entropy_8[3]*variance_8[3])/
(variance_8[0]+
variance_8[1]+
variance_8[2]+
variance_8[3]);
else
return 0;
}
int vp8_8x8_selection_intra(MACROBLOCK *x)
{
#ifdef INTRA_VARIANCE_ENTROPY_CRITERIA
return (Compute_Variance_Entropy(x) > 1.2);
#elif defined(INTRA_WTD_SSE_ENTROPY_CRITERIA)
return (Compute_Wtd_SSE_SubEntropy(x) > 1.2);
#elif defined(INTRA_TEST_8X8_ONLY)
return 1;
#else
return 0; //when all criteria are off use the default 4x4 only
#endif
}
int vp8_8x8_selection_inter(MACROBLOCK *x)
{
#ifdef INTER_VARIANCE_ENTROPY_CRITERIA
return (Compute_Variance_Entropy(x) > 1.5);
#elif defined(INTER_WTD_SSE_ENTROPY_CRITERIA)
return (Compute_Wtd_SSE_SubEntropy(x) > 1.5);
#elif defined(INTER_TEST_8X8_ONLY)
return 1;
#else
return 0; //when all criteria are off use the default 4x4 only
#endif
}
#endif
// Original activity measure from Tim T's code.
static unsigned int tt_activity_measure( VP8_COMP *cpi, MACROBLOCK *x )
{
unsigned int act;
unsigned int sse;
/* TODO: This could also be done over smaller areas (8x8), but that would
* require extensive changes elsewhere, as lambda is assumed to be fixed
* over an entire MB in most of the code.
* Another option is to compute four 8x8 variances, and pick a single
* lambda using a non-linear combination (e.g., the smallest, or second
* smallest, etc.).
*/
act = VARIANCE_INVOKE(&cpi->rtcd.variance, var16x16)(x->src.y_buffer,
x->src.y_stride, VP8_VAR_OFFS, 0, &sse);
act = act<<4;
/* If the region is flat, lower the activity some more. */
if (act < 8<<12)
act = act < 5<<12 ? act : 5<<12;
// Stub for alternative experimental activity measures.
static unsigned int alt_activity_measure( VP8_COMP *cpi,
MACROBLOCK *x, int use_dc_pred )
}
// Measure the activity of the current macroblock
// What we measure here is TBD so abstracted to this function
#define ALT_ACT_MEASURE 1
static unsigned int mb_activity_measure( VP8_COMP *cpi, MACROBLOCK *x,
int mb_row, int mb_col)
{
unsigned int mb_activity;
int use_dc_pred = (mb_col || mb_row) && (!mb_col || !mb_row);
// Or use and alternative.
mb_activity = alt_activity_measure( cpi, x, use_dc_pred );
// Original activity measure from Tim T's code.
mb_activity = tt_activity_measure( cpi, x );
if ( mb_activity < VP8_ACTIVITY_AVG_MIN )
mb_activity = VP8_ACTIVITY_AVG_MIN;
return mb_activity;
}
// Calculate an "average" mb activity value for the frame
static void calc_av_activity( VP8_COMP *cpi, int64_t activity_sum )
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
#if ACT_MEDIAN
// Find median: Simple n^2 algorithm for experimentation
{
unsigned int median;
unsigned int i,j;
unsigned int * sortlist;
unsigned int tmp;
// Create a list to sort to
CHECK_MEM_ERROR(sortlist,
vpx_calloc(sizeof(unsigned int),
cpi->common.MBs));
// Copy map to sort list
vpx_memcpy( sortlist, cpi->mb_activity_map,
sizeof(unsigned int) * cpi->common.MBs );
// Ripple each value down to its correct position
for ( i = 1; i < cpi->common.MBs; i ++ )
{
for ( j = i; j > 0; j -- )
{
if ( sortlist[j] < sortlist[j-1] )
{
// Swap values
tmp = sortlist[j-1];
sortlist[j-1] = sortlist[j];
sortlist[j] = tmp;
}
else
break;
}
}
// Even number MBs so estimate median as mean of two either side.
median = ( 1 + sortlist[cpi->common.MBs >> 1] +
sortlist[(cpi->common.MBs >> 1) + 1] ) >> 1;
cpi->activity_avg = median;
vpx_free(sortlist);
}
#else
// Simple mean for now
cpi->activity_avg = (unsigned int)(activity_sum/cpi->common.MBs);
if (cpi->activity_avg < VP8_ACTIVITY_AVG_MIN)
cpi->activity_avg = VP8_ACTIVITY_AVG_MIN;
// Experimental code: return fixed value normalized for several clips
if ( ALT_ACT_MEASURE )
cpi->activity_avg = 100000;
#if USE_ACT_INDEX
// Calculate and activity index for each mb
static void calc_activity_index( VP8_COMP *cpi, MACROBLOCK *x )
{
VP8_COMMON *const cm = & cpi->common;
int mb_row, mb_col;
int64_t act;
int64_t a;
int64_t b;
#if OUTPUT_NORM_ACT_STATS
FILE *f = fopen("norm_act.stt", "a");
fprintf(f, "\n%12d\n", cpi->activity_avg );
#endif
// Reset pointers to start of activity map
x->mb_activity_ptr = cpi->mb_activity_map;
// Calculate normalized mb activity number.
for (mb_row = 0; mb_row < cm->mb_rows; mb_row++)
{
// for each macroblock col in image
for (mb_col = 0; mb_col < cm->mb_cols; mb_col++)
{
// Read activity from the map
act = *(x->mb_activity_ptr);
// Calculate a normalized activity number
a = act + 4*cpi->activity_avg;
b = 4*act + cpi->activity_avg;
*(x->activity_ptr) = (int)((b + (a>>1))/a) - 1;
*(x->activity_ptr) = 1 - (int)((a + (b>>1))/b);
fprintf(f, " %6d", *(x->mb_activity_ptr));
#endif
// Increment activity map pointers
x->mb_activity_ptr++;
}
#if OUTPUT_NORM_ACT_STATS
fprintf(f, "\n");
#endif
}
#if OUTPUT_NORM_ACT_STATS
fclose(f);
#endif
}
// Loop through all MBs. Note activity of each, average activity and
// calculate a normalized activity for each
static void build_activity_map( VP8_COMP *cpi )
{
MACROBLOCK *const x = & cpi->mb;
VP8_COMMON *const cm = & cpi->common;
#if ALT_ACT_MEASURE
YV12_BUFFER_CONFIG *new_yv12 = &cm->yv12_fb[cm->new_fb_idx];
int recon_yoffset;
int recon_y_stride = new_yv12->y_stride;
#endif
int mb_row, mb_col;
unsigned int mb_activity;
// for each macroblock row in image
for (mb_row = 0; mb_row < cm->mb_rows; mb_row++)
{
#if ALT_ACT_MEASURE
// reset above block coeffs
xd->up_available = (mb_row != 0);
recon_yoffset = (mb_row * recon_y_stride * 16);
#endif
// for each macroblock col in image
for (mb_col = 0; mb_col < cm->mb_cols; mb_col++)
{
#if ALT_ACT_MEASURE
xd->dst.y_buffer = new_yv12->y_buffer + recon_yoffset;
xd->left_available = (mb_col != 0);
recon_yoffset += 16;
#endif
//Copy current mb to a buffer
RECON_INVOKE(&xd->rtcd->recon, copy16x16)(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
mb_activity = mb_activity_measure( cpi, x, mb_row, mb_col );
// Keep frame sum
activity_sum += mb_activity;
// Store MB level activity details.
*x->mb_activity_ptr = mb_activity;
// Increment activity map pointer
x->mb_activity_ptr++;
// adjust to the next column of source macroblocks
x->src.y_buffer += 16;
}
// adjust to the next row of mbs
x->src.y_buffer += 16 * x->src.y_stride - 16 * cm->mb_cols;
#if ALT_ACT_MEASURE
//extend the recon for intra prediction
vp8_extend_mb_row(new_yv12, xd->dst.y_buffer + 16,
xd->dst.u_buffer + 8, xd->dst.v_buffer + 8);
#endif
}
// Calculate an "average" MB activity
calc_av_activity(cpi, activity_sum);
#if USE_ACT_INDEX
// Calculate an activity index number of each mb
calc_activity_index( cpi, x );
#endif
void vp8_activity_masking(VP8_COMP *cpi, MACROBLOCK *x)
{
#if USE_ACT_INDEX
x->rdmult += *(x->mb_activity_ptr) * (x->rdmult >> 2);
x->errorperbit = x->rdmult * 100 /(110 * x->rddiv);
x->errorperbit += (x->errorperbit==0);
int64_t a;
int64_t b;
int64_t act = *(x->mb_activity_ptr);
// Apply the masking to the RD multiplier.
a = act + (2*cpi->activity_avg);
b = (2*act) + cpi->activity_avg;
x->rdmult = (unsigned int)(((int64_t)x->rdmult*b + (a>>1))/a);
x->errorperbit = x->rdmult * 100 /(110 * x->rddiv);
x->errorperbit += (x->errorperbit==0);
// Activity based Zbin adjustment
adjust_act_zbin(cpi, x);
static
void encode_mb_row(VP8_COMP *cpi,
VP8_COMMON *cm,
int mb_row,
MACROBLOCK *x,
MACROBLOCKD *xd,
TOKENEXTRA **tp,
int *segment_counts,
int *totalrate)
{
int i;
int recon_yoffset, recon_uvoffset;
int mb_col;
int ref_fb_idx = cm->lst_fb_idx;
int dst_fb_idx = cm->new_fb_idx;
int recon_y_stride = cm->yv12_fb[ref_fb_idx].y_stride;
int recon_uv_stride = cm->yv12_fb[ref_fb_idx].uv_stride;
int map_index = (mb_row * cpi->common.mb_cols);
#if CONFIG_SEGMENTATION
#if CONFIG_MULTITHREAD
const int nsync = cpi->mt_sync_range;
const int rightmost_col = cm->mb_cols - 1;
volatile const int *last_row_current_mb_col;
if ((cpi->b_multi_threaded != 0) && (mb_row != 0))
last_row_current_mb_col = &cpi->mt_current_mb_col[mb_row - 1];
else
last_row_current_mb_col = &rightmost_col;
xd->above_context = cm->above_context;
xd->up_available = (mb_row != 0);
recon_yoffset = (mb_row * recon_y_stride * 16);
recon_uvoffset = (mb_row * recon_uv_stride * 8);
cpi->tplist[mb_row].start = *tp;
//printf("Main mb_row = %d\n", mb_row);
// Distance of Mb to the top & bottom edges, specified in 1/8th pel
// units as they are always compared to values that are in 1/8th pel units
xd->mb_to_top_edge = -((mb_row * 16) << 3);
xd->mb_to_bottom_edge = ((cm->mb_rows - 1 - mb_row) * 16) << 3;
// Set up limit values for vertical motion vector components
// to prevent them extending beyond the UMV borders
x->mv_row_min = -((mb_row * 16) + (VP8BORDERINPIXELS - 16));
x->mv_row_max = ((cm->mb_rows - 1 - mb_row) * 16)
+ (VP8BORDERINPIXELS - 16);
// Set the mb activity pointer to the start of the row.
x->mb_activity_ptr = &cpi->mb_activity_map[map_index];
// for each macroblock col in image
for (mb_col = 0; mb_col < cm->mb_cols; mb_col++)
{
#ifdef ENC_DEBUG
//enc_debug = (cpi->count==29 && mb_row==5 && mb_col==0);
enc_debug = (cpi->count==4 && mb_row==17 && mb_col==13);
mb_col_debug=mb_col;
mb_row_debug=mb_row;
#endif
// Distance of Mb to the left & right edges, specified in
// 1/8th pel units as they are always compared to values
// that are in 1/8th pel units
xd->mb_to_left_edge = -((mb_col * 16) << 3);
xd->mb_to_right_edge = ((cm->mb_cols - 1 - mb_col) * 16) << 3;
// Set up limit values for horizontal motion vector components
// to prevent them extending beyond the UMV borders
x->mv_col_min = -((mb_col * 16) + (VP8BORDERINPIXELS - 16));
x->mv_col_max = ((cm->mb_cols - 1 - mb_col) * 16)
+ (VP8BORDERINPIXELS - 16);
xd->dst.y_buffer = cm->yv12_fb[dst_fb_idx].y_buffer + recon_yoffset;
xd->dst.u_buffer = cm->yv12_fb[dst_fb_idx].u_buffer + recon_uvoffset;
xd->dst.v_buffer = cm->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset;
x->rddiv = cpi->RDDIV;
x->rdmult = cpi->RDMULT;
//Copy current mb to a buffer
RECON_INVOKE(&xd->rtcd->recon, copy16x16)(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
#if CONFIG_MULTITHREAD
if ((cpi->b_multi_threaded != 0) && (mb_row != 0))
{
if ((mb_col & (nsync - 1)) == 0)
{
while (mb_col > (*last_row_current_mb_col - nsync)
&& (*last_row_current_mb_col) != (cm->mb_cols - 1))
{
x86_pause_hint();
thread_sleep(0);
}
}
}
#endif
// Is segmentation enabled
// MB level adjutment to quantizer
if (xd->segmentation_enabled)
{
// Code to set segment id in xd->mbmi.segment_id for current MB (with range checking)
// Reset segment_id to 0 or 1 so that the default transform mode is 4x4
if (cpi->segmentation_map[map_index+mb_col] <= 3)
xd->mode_info_context->mbmi.segment_id = cpi->segmentation_map[map_index+mb_col]&1;
#else
if (cpi->segmentation_map[map_index+mb_col] <= 3)
xd->mode_info_context->mbmi.segment_id = cpi->segmentation_map[map_index+mb_col];
#endif
xd->mode_info_context->mbmi.segment_id = 0;
vp8cx_mb_init_quantizer(cpi, x);
}
else
xd->mode_info_context->mbmi.segment_id = 0; // Set to Segment 0 by default
x->active_ptr = cpi->active_map + map_index + mb_col;
if (cm->frame_type == KEY_FRAME)
{
*totalrate += vp8cx_encode_intra_macro_block(cpi, x, tp);
//Note the encoder may have changed the segment_id
y_modes[xd->mode_info_context->mbmi.mode] ++;
#endif
}
else
{
*totalrate += vp8cx_encode_inter_macroblock(cpi, x, tp, recon_yoffset, recon_uvoffset);
//Note the encoder may have changed the segment_id
inter_y_modes[xd->mode_info_context->mbmi.mode] ++;
if (xd->mode_info_context->mbmi.mode == SPLITMV)
for (b = 0; b < x->partition_info->count; b++)
inter_b_modes[x->partition_info->bmi[b].mode] ++;
}
}
#endif
// Count of last ref frame 0,0 useage
if ((xd->mode_info_context->mbmi.mode == ZEROMV) && (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME))
cpi->inter_zz_count ++;
// Special case code for cyclic refresh
// If cyclic update enabled then copy xd->mbmi.segment_id; (which may have been updated based on mode
// during vp8cx_encode_inter_macroblock()) back into the global sgmentation map
if (cpi->cyclic_refresh_mode_enabled && xd->segmentation_enabled)
{
cpi->segmentation_map[map_index+mb_col] = xd->mode_info_context->mbmi.segment_id;
// If the block has been refreshed mark it as clean (the magnitude of the -ve influences how long it will be before we consider another refresh):
// Else if it was coded (last frame 0,0) and has not already been refreshed then mark it as a candidate for cleanup next time (marked 0)
// else mark it as dirty (1).
if (xd->mode_info_context->mbmi.segment_id)
cpi->cyclic_refresh_map[map_index+mb_col] = -1;
else if ((xd->mode_info_context->mbmi.mode == ZEROMV) && (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME))
if (cpi->cyclic_refresh_map[map_index+mb_col] == 1)
cpi->cyclic_refresh_map[map_index+mb_col] = 0;
cpi->cyclic_refresh_map[map_index+mb_col] = 1;
}
}
cpi->tplist[mb_row].stop = *tp;
// Increment pointer into gf useage flags structure.
x->gf_active_ptr++;
// Increment the activity mask pointers.
x->mb_activity_ptr++;
Suman Sunkara
committed
if ((xd->mode_info_context->mbmi.mode == ZEROMV) && (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME))
xd->mode_info_context->mbmi.segment_id = 0;
Suman Sunkara
committed
xd->mode_info_context->mbmi.segment_id = 1;
/* save the block info */
for (i = 0; i < 16; i++)
xd->mode_info_context->bmi[i] = xd->block[i].bmi;
// adjust to the next column of macroblocks
x->src.y_buffer += 16;
x->src.u_buffer += 8;
x->src.v_buffer += 8;
recon_yoffset += 16;
recon_uvoffset += 8;
#if CONFIG_SEGMENTATION
//cpi->segmentation_map[mb_row * cm->mb_cols + mb_col] = xd->mbmi.segment_id;
if (cm->frame_type == KEY_FRAME)
segment_counts[xd->mode_info_context->mbmi.segment_id]++;
}
else
{
sum = 0;
if (mb_col != 0)
sum += (xd->mode_info_context-1)->mbmi.segment_flag;
if (mb_row != 0)
sum += (xd->mode_info_context-cm->mb_cols)->mbmi.segment_flag;
Suman Sunkara
committed
if (xd->mode_info_context->mbmi.segment_id == cpi->segmentation_map[(mb_row*cm->mb_cols) + mb_col])
xd->mode_info_context->mbmi.segment_flag = 0;
else
xd->mode_info_context->mbmi.segment_flag = 1;
if (xd->mode_info_context->mbmi.segment_flag == 0)
segment_counts[SEEK_SAMEID + sum]++;
segment_counts[10]++;
}
else
{
segment_counts[SEEK_DIFFID + sum]++;
segment_counts[11]++;
//calculate individual segment ids
segment_counts[xd->mode_info_context->mbmi.segment_id] ++;
}
}
Suman Sunkara
committed
segment_counts[SEEK_SEGID + xd->mode_info_context->mbmi.segment_id] ++;
segment_counts[xd->mode_info_context->mbmi.segment_id] ++;
#if CONFIG_MULTITHREAD
if (cpi->b_multi_threaded != 0)
{
cpi->mt_current_mb_col[mb_row] = mb_col;
}
#endif
}
//extend the recon for intra prediction
vp8_extend_mb_row(
&cm->yv12_fb[dst_fb_idx],
xd->dst.y_buffer + 16,
xd->dst.u_buffer + 8,
xd->dst.v_buffer + 8);
// this is to account for the border
xd->mode_info_context++;
#if CONFIG_MULTITHREAD
if ((cpi->b_multi_threaded != 0) && (mb_row == cm->mb_rows - 1))
{
sem_post(&cpi->h_event_end_encoding); /* signal frame encoding end */
}
#endif
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
void init_encode_frame_mb_context(VP8_COMP *cpi)
{
MACROBLOCK *const x = & cpi->mb;
VP8_COMMON *const cm = & cpi->common;
MACROBLOCKD *const xd = & x->e_mbd;
// GF active flags data structure
x->gf_active_ptr = (signed char *)cpi->gf_active_flags;
// Activity map pointer
x->mb_activity_ptr = cpi->mb_activity_map;
x->vector_range = 32;
x->act_zbin_adj = 0;
x->partition_info = x->pi;
xd->mode_info_context = cm->mi;
xd->mode_info_stride = cm->mode_info_stride;
xd->frame_type = cm->frame_type;
xd->frames_since_golden = cm->frames_since_golden;
xd->frames_till_alt_ref_frame = cm->frames_till_alt_ref_frame;
// reset intra mode contexts
if (cm->frame_type == KEY_FRAME)
vp8_init_mbmode_probs(cm);
// Copy data over into macro block data sturctures.
x->src = * cpi->Source;
xd->pre = cm->yv12_fb[cm->lst_fb_idx];
xd->dst = cm->yv12_fb[cm->new_fb_idx];
// set up frame for intra coded blocks
vp8_setup_intra_recon(&cm->yv12_fb[cm->new_fb_idx]);
vp8_build_block_offsets(x);
vp8_setup_block_dptrs(&x->e_mbd);
vp8_setup_block_ptrs(x);
xd->mode_info_context->mbmi.mode = DC_PRED;
xd->mode_info_context->mbmi.uv_mode = DC_PRED;
xd->left_context = &cm->left_context;
vp8_zero(cpi->count_mb_ref_frame_usage)
vp8_zero(cpi->ymode_count)
vp8_zero(cpi->uv_mode_count)
x->mvc = cm->fc.mvc;
vpx_memset(cm->above_context, 0,
sizeof(ENTROPY_CONTEXT_PLANES) * cm->mb_cols);
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
xd->ref_frame_cost[INTRA_FRAME] = vp8_cost_zero(cpi->prob_intra_coded);
// Special case treatment when GF and ARF are not sensible options for reference
if (cpi->ref_frame_flags == VP8_LAST_FLAG)
{
xd->ref_frame_cost[LAST_FRAME] = vp8_cost_one(cpi->prob_intra_coded)
+ vp8_cost_zero(255);
xd->ref_frame_cost[GOLDEN_FRAME] = vp8_cost_one(cpi->prob_intra_coded)
+ vp8_cost_one(255)
+ vp8_cost_zero(128);
xd->ref_frame_cost[ALTREF_FRAME] = vp8_cost_one(cpi->prob_intra_coded)
+ vp8_cost_one(255)
+ vp8_cost_one(128);
}
else
{
xd->ref_frame_cost[LAST_FRAME] = vp8_cost_one(cpi->prob_intra_coded)
+ vp8_cost_zero(cpi->prob_last_coded);
xd->ref_frame_cost[GOLDEN_FRAME] = vp8_cost_one(cpi->prob_intra_coded)
+ vp8_cost_one(cpi->prob_last_coded)
+ vp8_cost_zero(cpi->prob_gf_coded);
xd->ref_frame_cost[ALTREF_FRAME] = vp8_cost_one(cpi->prob_intra_coded)
+ vp8_cost_one(cpi->prob_last_coded)
+ vp8_cost_one(cpi->prob_gf_coded);
}
void vp8_encode_frame(VP8_COMP *cpi)
{
int mb_row;
MACROBLOCK *const x = & cpi->mb;
VP8_COMMON *const cm = & cpi->common;
MACROBLOCKD *const xd = & x->e_mbd;
TOKENEXTRA *tp = cpi->tok;
#if CONFIG_SEGMENTATION
int segment_counts[MAX_MB_SEGMENTS + SEEK_SEGID];
int new_cost, original_cost;
vpx_memset(segment_counts, 0, sizeof(segment_counts));
totalrate = 0;
if (cpi->compressor_speed == 2)
{
if (cpi->oxcf.cpu_used < 0)
cpi->Speed = -(cpi->oxcf.cpu_used);
else
vp8_auto_select_speed(cpi);
}
// Functions setup for all frame types so we can use MC in AltRef
if (cm->mcomp_filter_type == SIXTAP)
xd->subpixel_predict = SUBPIX_INVOKE(
&cpi->common.rtcd.subpix, sixtap4x4);
xd->subpixel_predict8x4 = SUBPIX_INVOKE(
&cpi->common.rtcd.subpix, sixtap8x4);
xd->subpixel_predict8x8 = SUBPIX_INVOKE(
&cpi->common.rtcd.subpix, sixtap8x8);
xd->subpixel_predict16x16 = SUBPIX_INVOKE(
&cpi->common.rtcd.subpix, sixtap16x16);
}
else
{
xd->subpixel_predict = SUBPIX_INVOKE(
&cpi->common.rtcd.subpix, bilinear4x4);
xd->subpixel_predict8x4 = SUBPIX_INVOKE(
&cpi->common.rtcd.subpix, bilinear8x4);
xd->subpixel_predict8x8 = SUBPIX_INVOKE(
&cpi->common.rtcd.subpix, bilinear8x8);
xd->subpixel_predict16x16 = SUBPIX_INVOKE(
&cpi->common.rtcd.subpix, bilinear16x16);
}
// Reset frame count of inter 0,0 motion vector useage.
cpi->inter_zz_count = 0;
vpx_memset(segment_counts, 0, sizeof(segment_counts));
cpi->prediction_error = 0;
cpi->intra_error = 0;
cpi->skip_true_count = 0;
cpi->skip_false_count = 0;
#if 0
// Experimental code
cpi->frame_distortion = 0;
cpi->last_mb_distortion = 0;
#endif
xd->mode_info_context = cm->mi;
vp8_zero(cpi->MVcount);
vp8_zero(cpi->coef_counts);
vp8cx_frame_init_quantizer(cpi);
vp8_initialize_rd_consts(cpi, cm->base_qindex + cm->y1dc_delta_q);
vp8cx_initialize_me_consts(cpi, cm->base_qindex);