vp9_encodeframe.c 129 KB
Newer Older
John Koleszar's avatar
John Koleszar committed
/*
 *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
John Koleszar's avatar
John Koleszar committed
 *
 *  Use of this source code is governed by a BSD-style license
 *  that can be found in the LICENSE file in the root of the source
 *  tree. An additional intellectual property rights grant can be found
 *  in the file PATENTS.  All contributing project authors may
 *  be found in the AUTHORS file in the root of the source tree.
John Koleszar's avatar
John Koleszar committed
 */

#include <limits.h>
#include <math.h>
#include <stdio.h>

Jim Bankoski's avatar
Jim Bankoski committed
#include "./vp9_rtcd.h"
#include "./vpx_config.h"

#include "vpx_ports/vpx_timer.h"

#include "vp9/common/vp9_common.h"
#include "vp9/common/vp9_entropy.h"
#include "vp9/common/vp9_entropymode.h"
#include "vp9/common/vp9_idct.h"
#include "vp9/common/vp9_mvref_common.h"
#include "vp9/common/vp9_pred_common.h"
#include "vp9/common/vp9_quant_common.h"
#include "vp9/common/vp9_reconintra.h"
#include "vp9/common/vp9_reconinter.h"
#include "vp9/common/vp9_seg_common.h"
#include "vp9/common/vp9_systemdependent.h"
#include "vp9/common/vp9_tile_common.h"
#include "vp9/encoder/vp9_aq_complexity.h"
#include "vp9/encoder/vp9_aq_cyclicrefresh.h"
#include "vp9/encoder/vp9_aq_variance.h"
#include "vp9/encoder/vp9_encodeframe.h"
#include "vp9/encoder/vp9_encodemb.h"
#include "vp9/encoder/vp9_encodemv.h"
#include "vp9/encoder/vp9_extend.h"
#include "vp9/encoder/vp9_pickmode.h"
#include "vp9/encoder/vp9_rdopt.h"
#include "vp9/encoder/vp9_segmentation.h"
#include "vp9/encoder/vp9_tokenize.h"
#define GF_ZEROMV_ZBIN_BOOST 0
#define LF_ZEROMV_ZBIN_BOOST 0
#define MV_ZBIN_BOOST        0
#define SPLIT_MV_ZBIN_BOOST  0
#define INTRA_ZBIN_BOOST     0

static INLINE uint8_t *get_sb_index(MACROBLOCK *x, BLOCK_SIZE subsize) {
  switch (subsize) {
    case BLOCK_64X64:
    case BLOCK_64X32:
    case BLOCK_32X64:
    case BLOCK_32X32:
    case BLOCK_32X16:
    case BLOCK_16X32:
    case BLOCK_16X16:
    case BLOCK_16X8:
    case BLOCK_8X16:
    case BLOCK_8X8:
    case BLOCK_8X4:
    case BLOCK_4X8:
    case BLOCK_4X4:
Jim Bankoski's avatar
Jim Bankoski committed
static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t, int output_enabled,
                              int mi_row, int mi_col, BLOCK_SIZE bsize);
static void adjust_act_zbin(VP9_COMP *cpi, MACROBLOCK *x);
// activity_avg must be positive, or flat regions could get a zero weight
//  (infinite lambda), which confounds analysis.
// This also avoids the need for divide by zero checks in
//  vp9_activity_masking().
#define ACTIVITY_AVG_MIN 64
// Motion vector component magnitude threshold for defining fast motion.
#define FAST_MOTION_MV_THRESH 24
// This is used as a reference when computing the source variance for the
//  purposes of activity masking.
// Eventually this should be replaced by custom no-reference routines,
//  which will be faster.
static const uint8_t VP9_VAR_OFFS[64] = {
  128, 128, 128, 128, 128, 128, 128, 128,
  128, 128, 128, 128, 128, 128, 128, 128,
  128, 128, 128, 128, 128, 128, 128, 128,
  128, 128, 128, 128, 128, 128, 128, 128,
  128, 128, 128, 128, 128, 128, 128, 128,
  128, 128, 128, 128, 128, 128, 128, 128,
  128, 128, 128, 128, 128, 128, 128, 128,
  128, 128, 128, 128, 128, 128, 128, 128
};

static unsigned int get_sby_perpixel_variance(VP9_COMP *cpi,
                                              MACROBLOCK *x,
  unsigned int var, sse;
  var = cpi->fn_ptr[bs].vf(x->plane[0].src.buf, x->plane[0].src.stride,
                           VP9_VAR_OFFS, 0, &sse);
  return ROUND_POWER_OF_TWO(var, num_pels_log2_lookup[bs]);
static unsigned int get_sby_perpixel_diff_variance(VP9_COMP *cpi,
                                                   MACROBLOCK *x,
                                                   int mi_row,
                                                   int mi_col,
                                                   BLOCK_SIZE bs) {
  const YV12_BUFFER_CONFIG *yv12 = get_ref_frame_buffer(cpi, LAST_FRAME);
  int offset = (mi_row * MI_SIZE) * yv12->y_stride + (mi_col * MI_SIZE);
  unsigned int var, sse;
  var = cpi->fn_ptr[bs].vf(x->plane[0].src.buf,
                           x->plane[0].src.stride,
                           yv12->y_buffer + offset,
                           yv12->y_stride,
                           &sse);
  return ROUND_POWER_OF_TWO(var, num_pels_log2_lookup[bs]);
}

static BLOCK_SIZE get_rd_var_based_fixed_partition(VP9_COMP *cpi,
                                                   int mi_row,
                                                   int mi_col) {
  unsigned int var = get_sby_perpixel_diff_variance(cpi, &cpi->mb,
                                                    mi_row, mi_col,
                                                    BLOCK_64X64);
  if (var < 8)
    return BLOCK_64X64;
  else if (var < 128)
    return BLOCK_32X32;
  else if (var < 2048)
    return BLOCK_16X16;
  else
    return BLOCK_8X8;
static BLOCK_SIZE get_nonrd_var_based_fixed_partition(VP9_COMP *cpi,
                                                      int mi_row,
                                                      int mi_col) {
  unsigned int var = get_sby_perpixel_diff_variance(cpi, &cpi->mb,
                                                    mi_row, mi_col,
                                                    BLOCK_64X64);
    return BLOCK_64X64;
  else if (var < 10)
    return BLOCK_32X32;
  else
    return BLOCK_16X16;
// Lighter version of set_offsets that only sets the mode info
// pointers.
static INLINE void set_modeinfo_offsets(VP9_COMMON *const cm,
                                        MACROBLOCKD *const xd,
                                        int mi_row,
                                        int mi_col) {
  const int idx_str = xd->mi_stride * mi_row + mi_col;
  xd->mi = cm->mi_grid_visible + idx_str;
  xd->mi[0] = cm->mi + idx_str;
static int is_block_in_mb_map(const VP9_COMP *cpi, int mi_row, int mi_col,
                              BLOCK_SIZE bsize) {
  const VP9_COMMON *const cm = &cpi->common;
  const int mb_rows = cm->mb_rows;
  const int mb_cols = cm->mb_cols;
  const int mb_row = mi_row >> 1;
  const int mb_col = mi_col >> 1;
  const int mb_width = num_8x8_blocks_wide_lookup[bsize] >> 1;
  const int mb_height = num_8x8_blocks_high_lookup[bsize] >> 1;
  int r, c;
  if (bsize <= BLOCK_16X16) {
    return cpi->active_map[mb_row * mb_cols + mb_col];
  }
  for (r = 0; r < mb_height; ++r) {
    for (c = 0; c < mb_width; ++c) {
      int row = mb_row + r;
      int col = mb_col + c;
      if (row >= mb_rows || col >= mb_cols)
        continue;
      if (cpi->active_map[row * mb_cols + col])
        return 1;
    }
  }
  return 0;
}

static int check_active_map(const VP9_COMP *cpi, const MACROBLOCK *x,
                            int mi_row, int mi_col,
                            BLOCK_SIZE bsize) {
  if (cpi->active_map_enabled && !x->e_mbd.lossless) {
    return is_block_in_mb_map(cpi, mi_row, mi_col, bsize);
  } else {
    return 1;
  }
}

static void set_offsets(VP9_COMP *cpi, const TileInfo *const tile,
                        int mi_row, int mi_col, BLOCK_SIZE bsize) {
  MACROBLOCK *const x = &cpi->mb;
  VP9_COMMON *const cm = &cpi->common;
  MACROBLOCKD *const xd = &x->e_mbd;
  MB_MODE_INFO *mbmi;
  const int mi_width = num_8x8_blocks_wide_lookup[bsize];
  const int mi_height = num_8x8_blocks_high_lookup[bsize];
  const int mb_row = mi_row >> 1;
  const int mb_col = mi_col >> 1;
  const int idx_map = mb_row * cm->mb_cols + mb_col;
  const struct segmentation *const seg = &cm->seg;

  set_skip_context(xd, mi_row, mi_col);

  // Activity map pointer
  x->mb_activity_ptr = &cpi->mb_activity_map[idx_map];
  x->in_active_map = check_active_map(cpi, x, mi_row, mi_col, bsize);

  set_modeinfo_offsets(cm, xd, mi_row, mi_col);

  mbmi = &xd->mi[0]->mbmi;

  // Set up destination pointers.
  vp9_setup_dst_planes(xd, get_frame_new_buffer(cm), mi_row, mi_col);

  // Set up limit values for MV components.
  // Mv beyond the range do not produce new/different prediction block.
  x->mv_row_min = -(((mi_row + mi_height) * MI_SIZE) + VP9_INTERP_EXTEND);
  x->mv_col_min = -(((mi_col + mi_width) * MI_SIZE) + VP9_INTERP_EXTEND);
  x->mv_row_max = (cm->mi_rows - mi_row) * MI_SIZE + VP9_INTERP_EXTEND;
  x->mv_col_max = (cm->mi_cols - mi_col) * MI_SIZE + VP9_INTERP_EXTEND;

  // Set up distance of MB to edge of frame in 1/8th pel units.
  assert(!(mi_col & (mi_width - 1)) && !(mi_row & (mi_height - 1)));
  set_mi_row_col(xd, tile, mi_row, mi_height, mi_col, mi_width,
                 cm->mi_rows, cm->mi_cols);

  // Set up source buffers.
  vp9_setup_src_planes(x, cpi->Source, mi_row, mi_col);

  // R/D setup.
  x->rddiv = cpi->RDDIV;
  x->rdmult = cpi->RDMULT;

  // Setup segment ID.
  if (seg->enabled) {
    if (cpi->oxcf.aq_mode != VARIANCE_AQ) {
      const uint8_t *const map = seg->update_map ? cpi->segmentation_map
                                                 : cm->last_frame_seg_map;
      mbmi->segment_id = vp9_get_segment_id(cm, map, bsize, mi_row, mi_col);
    }
    vp9_init_plane_quantizers(cpi, x);

    x->encode_breakout = cpi->segment_encode_breakout[mbmi->segment_id];
  } else {
    mbmi->segment_id = 0;
    x->encode_breakout = cpi->encode_breakout;
  }
}

static void duplicate_mode_info_in_sb(VP9_COMMON * const cm,
                                     MACROBLOCKD *const xd,
                                     int mi_row,
                                     int mi_col,
                                     BLOCK_SIZE bsize) {
  const int block_width = num_8x8_blocks_wide_lookup[bsize];
  const int block_height = num_8x8_blocks_high_lookup[bsize];
  int i, j;
  for (j = 0; j < block_height; ++j)
    for (i = 0; i < block_width; ++i) {
      if (mi_row + j < cm->mi_rows && mi_col + i < cm->mi_cols)
        xd->mi[j * xd->mi_stride + i] = xd->mi[0];
    }
}

static void set_block_size(VP9_COMP * const cpi,
                           const TileInfo *const tile,
                           int mi_row, int mi_col,
                           BLOCK_SIZE bsize) {
  if (cpi->common.mi_cols > mi_col && cpi->common.mi_rows > mi_row) {
    MACROBLOCKD *const xd = &cpi->mb.e_mbd;
    set_modeinfo_offsets(&cpi->common, xd, mi_row, mi_col);
    xd->mi[0]->mbmi.sb_type = bsize;
    duplicate_mode_info_in_sb(&cpi->common, xd, mi_row, mi_col, bsize);
  }
}

typedef struct {
  int64_t sum_square_error;
  int64_t sum_error;
  int count;
  int variance;
} var;

typedef struct {
  var none;
  var horz[2];
  var vert[2];
} partition_variance;

typedef struct {
  partition_variance part_variances;
  var split[4];
} v8x8;

typedef struct {
  partition_variance part_variances;
  v8x8 split[4];
} v16x16;

typedef struct {
  partition_variance part_variances;
  v16x16 split[4];
} v32x32;

typedef struct {
  partition_variance part_variances;
  v32x32 split[4];
} v64x64;

typedef struct {
  partition_variance *part_variances;
  var *split[4];
} variance_node;

typedef enum {
  V16X16,
  V32X32,
  V64X64,
} TREE_LEVEL;

static void tree_to_node(void *data, BLOCK_SIZE bsize, variance_node *node) {
  int i;
  switch (bsize) {
    case BLOCK_64X64: {
      v64x64 *vt = (v64x64 *) data;
      node->part_variances = &vt->part_variances;
      for (i = 0; i < 4; i++)
        node->split[i] = &vt->split[i].part_variances.none;
      break;
    }
    case BLOCK_32X32: {
      v32x32 *vt = (v32x32 *) data;
      node->part_variances = &vt->part_variances;
      for (i = 0; i < 4; i++)
        node->split[i] = &vt->split[i].part_variances.none;
      break;
    }
    case BLOCK_16X16: {
      v16x16 *vt = (v16x16 *) data;
      node->part_variances = &vt->part_variances;
      for (i = 0; i < 4; i++)
        node->split[i] = &vt->split[i].part_variances.none;
      break;
    }
    case BLOCK_8X8: {
      v8x8 *vt = (v8x8 *) data;
      node->part_variances = &vt->part_variances;
      for (i = 0; i < 4; i++)
        node->split[i] = &vt->split[i];
      break;
    }
    default: {
      assert(0);
    }
  }
}

// Set variance values given sum square error, sum error, count.
static void fill_variance(int64_t s2, int64_t s, int c, var *v) {
  v->sum_square_error = s2;
  v->sum_error = s;
  v->count = c;
  if (c > 0)
    v->variance = (int)(256 *
                        (v->sum_square_error - v->sum_error * v->sum_error /
                         v->count) / v->count);
  else
    v->variance = 0;
}

void sum_2_variances(const var *a, const var *b, var *r) {
  fill_variance(a->sum_square_error + b->sum_square_error,
                a->sum_error + b->sum_error, a->count + b->count, r);
}

static void fill_variance_tree(void *data, BLOCK_SIZE bsize) {
  variance_node node;
  tree_to_node(data, bsize, &node);
  sum_2_variances(node.split[0], node.split[1], &node.part_variances->horz[0]);
  sum_2_variances(node.split[2], node.split[3], &node.part_variances->horz[1]);
  sum_2_variances(node.split[0], node.split[2], &node.part_variances->vert[0]);
  sum_2_variances(node.split[1], node.split[3], &node.part_variances->vert[1]);
  sum_2_variances(&node.part_variances->vert[0], &node.part_variances->vert[1],
                  &node.part_variances->none);
}

static int set_vt_partitioning(VP9_COMP *cpi,
                               void *data,
                               const TileInfo *const tile,
                               BLOCK_SIZE bsize,
                               int mi_row,
                               int mi_col,
                               int mi_size) {
  VP9_COMMON * const cm = &cpi->common;
  variance_node vt;
  const int block_width = num_8x8_blocks_wide_lookup[bsize];
  const int block_height = num_8x8_blocks_high_lookup[bsize];
  // TODO(debargha): Choose this more intelligently.
  const int64_t threshold_multiplier = 25;
  int64_t threshold = threshold_multiplier * cpi->common.base_qindex;
  assert(block_height == block_width);

  tree_to_node(data, bsize, &vt);

  // Split none is available only if we have more than half a block size
  // in width and height inside the visible image.
  if (mi_col + block_width / 2 < cm->mi_cols &&
      mi_row + block_height / 2 < cm->mi_rows &&
      vt.part_variances->none.variance < threshold) {
    set_block_size(cpi, tile, mi_row, mi_col, bsize);
    return 1;
  }

  // Vertical split is available on all but the bottom border.
  if (mi_row + block_height / 2 < cm->mi_rows &&
      vt.part_variances->vert[0].variance < threshold &&
      vt.part_variances->vert[1].variance < threshold) {
    BLOCK_SIZE subsize = get_subsize(bsize, PARTITION_VERT);
    set_block_size(cpi, tile, mi_row, mi_col, subsize);
    set_block_size(cpi, tile, mi_row, mi_col + block_width / 2, subsize);
    return 1;
  }

  // Horizontal split is available on all but the right border.
  if (mi_col + block_width / 2 < cm->mi_cols &&
      vt.part_variances->horz[0].variance < threshold &&
      vt.part_variances->horz[1].variance < threshold) {
    BLOCK_SIZE subsize = get_subsize(bsize, PARTITION_HORZ);
    set_block_size(cpi, tile, mi_row, mi_col, subsize);
    set_block_size(cpi, tile, mi_row + block_height / 2, mi_col, subsize);
    return 1;
  }
  return 0;
}

// TODO(debargha): Fix this function and make it work as expected.
static void choose_partitioning(VP9_COMP *cpi,
                                const TileInfo *const tile,
                                int mi_row, int mi_col) {
  VP9_COMMON * const cm = &cpi->common;
  MACROBLOCK *x = &cpi->mb;
  MACROBLOCKD *xd = &cpi->mb.e_mbd;

  int i, j, k;
  v64x64 vt;
  uint8_t *s;
  const uint8_t *d;
  int sp;
  int dp;
  int pixels_wide = 64, pixels_high = 64;
  int_mv nearest_mv, near_mv;
  const YV12_BUFFER_CONFIG *yv12 = get_ref_frame_buffer(cpi, LAST_FRAME);
  const struct scale_factors *const sf = &cm->frame_refs[LAST_FRAME - 1].sf;

  vp9_zero(vt);
  set_offsets(cpi, tile, mi_row, mi_col, BLOCK_64X64);

  if (xd->mb_to_right_edge < 0)
    pixels_wide += (xd->mb_to_right_edge >> 3);
  if (xd->mb_to_bottom_edge < 0)
    pixels_high += (xd->mb_to_bottom_edge >> 3);

  s = x->plane[0].src.buf;
  sp = x->plane[0].src.stride;

  if (cm->frame_type != KEY_FRAME) {
    vp9_setup_pre_planes(xd, 0, yv12, mi_row, mi_col, sf);

    xd->mi[0]->mbmi.ref_frame[0] = LAST_FRAME;
    xd->mi[0]->mbmi.sb_type = BLOCK_64X64;
    vp9_find_best_ref_mvs(xd, cm->allow_high_precision_mv,
                          xd->mi[0]->mbmi.ref_mvs[LAST_FRAME],
                          &nearest_mv, &near_mv);

    xd->mi[0]->mbmi.mv[0] = nearest_mv;
    vp9_build_inter_predictors_sby(xd, mi_row, mi_col, BLOCK_64X64);

    d = xd->plane[0].dst.buf;
    dp = xd->plane[0].dst.stride;
  } else {
    d = VP9_VAR_OFFS;
    dp = 0;
  }

  // Fill in the entire tree of 8x8 variances for splits.
  for (i = 0; i < 4; i++) {
    const int x32_idx = ((i & 1) << 5);
    const int y32_idx = ((i >> 1) << 5);
    for (j = 0; j < 4; j++) {
      const int x16_idx = x32_idx + ((j & 1) << 4);
      const int y16_idx = y32_idx + ((j >> 1) << 4);
      v16x16 *vst = &vt.split[i].split[j];
      for (k = 0; k < 4; k++) {
        int x_idx = x16_idx + ((k & 1) << 3);
        int y_idx = y16_idx + ((k >> 1) << 3);
        unsigned int sse = 0;
        int sum = 0;
        if (x_idx < pixels_wide && y_idx < pixels_high)
          vp9_get_sse_sum_8x8(s + y_idx * sp + x_idx, sp,
                              d + y_idx * dp + x_idx, dp, &sse, &sum);
        fill_variance(sse, sum, 64, &vst->split[k].part_variances.none);
      }
    }
  }
  // Fill the rest of the variance tree by summing split partition values.
  for (i = 0; i < 4; i++) {
    for (j = 0; j < 4; j++) {
      fill_variance_tree(&vt.split[i].split[j], BLOCK_16X16);
    }
    fill_variance_tree(&vt.split[i], BLOCK_32X32);
  }
  fill_variance_tree(&vt, BLOCK_64X64);

  // Now go through the entire structure,  splitting every block size until
  // we get to one that's got a variance lower than our threshold,  or we
  // hit 8x8.
  if (!set_vt_partitioning(cpi, &vt, tile, BLOCK_64X64,
                           mi_row, mi_col, 8)) {
    for (i = 0; i < 4; ++i) {
      const int x32_idx = ((i & 1) << 2);
      const int y32_idx = ((i >> 1) << 2);
      if (!set_vt_partitioning(cpi, &vt.split[i], tile, BLOCK_32X32,
                               (mi_row + y32_idx), (mi_col + x32_idx), 4)) {
        for (j = 0; j < 4; ++j) {
          const int x16_idx = ((j & 1) << 1);
          const int y16_idx = ((j >> 1) << 1);
          // NOTE: This is a temporary hack to disable 8x8 partitions,
          // since it works really bad - possibly due to a bug
#define DISABLE_8X8_VAR_BASED_PARTITION
#ifdef DISABLE_8X8_VAR_BASED_PARTITION
          if (mi_row + y32_idx + y16_idx + 1 < cm->mi_rows &&
              mi_row + x32_idx + x16_idx + 1 < cm->mi_cols) {
            set_block_size(cpi, tile,
                           (mi_row + y32_idx + y16_idx),
                           (mi_col + x32_idx + x16_idx),
                           BLOCK_16X16);
          } else {
            for (k = 0; k < 4; ++k) {
              const int x8_idx = (k & 1);
              const int y8_idx = (k >> 1);
              set_block_size(cpi, tile,
                             (mi_row + y32_idx + y16_idx + y8_idx),
                             (mi_col + x32_idx + x16_idx + x8_idx),
                             BLOCK_8X8);
            }
          }
#else
          if (!set_vt_partitioning(cpi, &vt.split[i].split[j], tile,
                                   BLOCK_16X16,
                                   (mi_row + y32_idx + y16_idx),
                                   (mi_col + x32_idx + x16_idx), 2)) {
            for (k = 0; k < 4; ++k) {
              const int x8_idx = (k & 1);
              const int y8_idx = (k >> 1);
              set_block_size(cpi, tile,
                             (mi_row + y32_idx + y16_idx + y8_idx),
                             (mi_col + x32_idx + x16_idx + x8_idx),
                             BLOCK_8X8);
            }
          }
#endif
        }
      }
    }
  }
}

// Original activity measure from Tim T's code.
static unsigned int tt_activity_measure(MACROBLOCK *x) {
John Koleszar's avatar
John Koleszar committed
  unsigned int sse;
  // TODO: This could also be done over smaller areas (8x8), but that would
  // require extensive changes elsewhere, as lambda is assumed to be fixed
  // over an entire MB in most of the code.
  // Another option is to compute four 8x8 variances, and pick a single
  // lambda using a non-linear combination (e.g., the smallest, or second
  // smallest, etc.).
  const unsigned int act = vp9_variance16x16(x->plane[0].src.buf,
                                             x->plane[0].src.stride,
                                             VP9_VAR_OFFS, 0, &sse) << 4;
  // If the region is flat, lower the activity some more.
  return act < (8 << 12) ? MIN(act, 5 << 12) : act;
// Stub for alternative experimental activity measures.
static unsigned int alt_activity_measure(MACROBLOCK *x, int use_dc_pred) {
  return vp9_encode_intra(x, use_dc_pred);
}

// Measure the activity of the current macroblock
// What we measure here is TBD so abstracted to this function
#define ALT_ACT_MEASURE 1
static unsigned int mb_activity_measure(MACROBLOCK *x, int mb_row, int mb_col) {
John Koleszar's avatar
John Koleszar committed
  unsigned int mb_activity;
John Koleszar's avatar
John Koleszar committed
  if (ALT_ACT_MEASURE) {
    const int use_dc_pred = (mb_col || mb_row) && (!mb_col || !mb_row);
John Koleszar's avatar
John Koleszar committed
    // Or use and alternative.
    mb_activity = alt_activity_measure(x, use_dc_pred);
John Koleszar's avatar
John Koleszar committed
  } else {
    // Original activity measure from Tim T's code.
    mb_activity = tt_activity_measure(x);
John Koleszar's avatar
John Koleszar committed
  }
  return MAX(mb_activity, ACTIVITY_AVG_MIN);
}

// Calculate an "average" mb activity value for the frame
#define ACT_MEDIAN 0
static void calc_av_activity(VP9_COMP *cpi, int64_t activity_sum) {
#if ACT_MEDIAN
John Koleszar's avatar
John Koleszar committed
  // Find median: Simple n^2 algorithm for experimentation
  {
    unsigned int median;
    unsigned int i, j;
    unsigned int *sortlist;
    unsigned int tmp;

    // Create a list to sort to
    CHECK_MEM_ERROR(&cpi->common, sortlist, vpx_calloc(sizeof(unsigned int),
                    cpi->common.MBs));
John Koleszar's avatar
John Koleszar committed

    // Copy map to sort list
    vpx_memcpy(sortlist, cpi->mb_activity_map,
Jim Bankoski's avatar
Jim Bankoski committed
        sizeof(unsigned int) * cpi->common.MBs);
John Koleszar's avatar
John Koleszar committed

    // Ripple each value down to its correct position
    for (i = 1; i < cpi->common.MBs; i ++) {
      for (j = i; j > 0; j --) {
        if (sortlist[j] < sortlist[j - 1]) {
          // Swap values
          tmp = sortlist[j - 1];
          sortlist[j - 1] = sortlist[j];
          sortlist[j] = tmp;
John Koleszar's avatar
John Koleszar committed
      }
    }
John Koleszar's avatar
John Koleszar committed
    // Even number MBs so estimate median as mean of two either side.
    median = (1 + sortlist[cpi->common.MBs >> 1] +
Jim Bankoski's avatar
Jim Bankoski committed
        sortlist[(cpi->common.MBs >> 1) + 1]) >> 1;
John Koleszar's avatar
John Koleszar committed
    cpi->activity_avg = median;
John Koleszar's avatar
John Koleszar committed
    vpx_free(sortlist);
  }
John Koleszar's avatar
John Koleszar committed
  // Simple mean for now
Jim Bankoski's avatar
Jim Bankoski committed
  cpi->activity_avg = (unsigned int) (activity_sum / cpi->common.MBs);
#endif  // ACT_MEDIAN
  if (cpi->activity_avg < ACTIVITY_AVG_MIN)
    cpi->activity_avg = ACTIVITY_AVG_MIN;
John Koleszar's avatar
John Koleszar committed
  // Experimental code: return fixed value normalized for several clips
  if (ALT_ACT_MEASURE)
    cpi->activity_avg = 100000;
#define USE_ACT_INDEX   0
#define OUTPUT_NORM_ACT_STATS   0

#if USE_ACT_INDEX
// Calculate an activity index for each mb
static void calc_activity_index(VP9_COMP *cpi, MACROBLOCK *x) {
  VP9_COMMON *const cm = &cpi->common;
John Koleszar's avatar
John Koleszar committed
  int mb_row, mb_col;
John Koleszar's avatar
John Koleszar committed
  int64_t act;
  int64_t a;
  int64_t b;

#if OUTPUT_NORM_ACT_STATS
John Koleszar's avatar
John Koleszar committed
  FILE *f = fopen("norm_act.stt", "a");
  fprintf(f, "\n%12d\n", cpi->activity_avg);
John Koleszar's avatar
John Koleszar committed
  // Reset pointers to start of activity map
  x->mb_activity_ptr = cpi->mb_activity_map;
John Koleszar's avatar
John Koleszar committed
  // Calculate normalized mb activity number.
  for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) {
    // for each macroblock col in image
    for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) {
      // Read activity from the map
      act = *(x->mb_activity_ptr);
John Koleszar's avatar
John Koleszar committed
      // Calculate a normalized activity number
      a = act + 4 * cpi->activity_avg;
      b = 4 * act + cpi->activity_avg;
John Koleszar's avatar
John Koleszar committed
      if (b >= a)
Jim Bankoski's avatar
Jim Bankoski committed
      *(x->activity_ptr) = (int)((b + (a >> 1)) / a) - 1;
John Koleszar's avatar
John Koleszar committed
      else
Jim Bankoski's avatar
Jim Bankoski committed
      *(x->activity_ptr) = 1 - (int)((a + (b >> 1)) / b);

#if OUTPUT_NORM_ACT_STATS
John Koleszar's avatar
John Koleszar committed
      fprintf(f, " %6d", *(x->mb_activity_ptr));
John Koleszar's avatar
John Koleszar committed
      // Increment activity map pointers
      x->mb_activity_ptr++;
    }

#if OUTPUT_NORM_ACT_STATS
John Koleszar's avatar
John Koleszar committed
    fprintf(f, "\n");
John Koleszar's avatar
John Koleszar committed
  }

#if OUTPUT_NORM_ACT_STATS
John Koleszar's avatar
John Koleszar committed
  fclose(f);
#endif  // USE_ACT_INDEX

// Loop through all MBs. Note activity of each, average activity and
// calculate a normalized activity for each
static void build_activity_map(VP9_COMP *cpi) {
  MACROBLOCK *const x = &cpi->mb;
John Koleszar's avatar
John Koleszar committed
  MACROBLOCKD *xd = &x->e_mbd;
  VP9_COMMON *const cm = &cpi->common;
#if ALT_ACT_MEASURE
  YV12_BUFFER_CONFIG *new_yv12 = get_frame_new_buffer(cm);
John Koleszar's avatar
John Koleszar committed
  int recon_yoffset;
  int recon_y_stride = new_yv12->y_stride;
John Koleszar's avatar
John Koleszar committed
  int mb_row, mb_col;
  unsigned int mb_activity;
  int64_t activity_sum = 0;
  x->mb_activity_ptr = cpi->mb_activity_map;

John Koleszar's avatar
John Koleszar committed
  // for each macroblock row in image
  for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) {
#if ALT_ACT_MEASURE
John Koleszar's avatar
John Koleszar committed
    // reset above block coeffs
    xd->up_available = (mb_row != 0);
    recon_yoffset = (mb_row * recon_y_stride * 16);
John Koleszar's avatar
John Koleszar committed
    // for each macroblock col in image
    for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) {
#if ALT_ACT_MEASURE
      xd->plane[0].dst.buf = new_yv12->y_buffer + recon_yoffset;
John Koleszar's avatar
John Koleszar committed
      xd->left_available = (mb_col != 0);
      recon_yoffset += 16;
John Koleszar's avatar
John Koleszar committed
      // measure activity
      mb_activity = mb_activity_measure(x, mb_row, mb_col);
John Koleszar's avatar
John Koleszar committed
      // Keep frame sum
      activity_sum += mb_activity;
John Koleszar's avatar
John Koleszar committed
      // Store MB level activity details.
      *x->mb_activity_ptr = mb_activity;
John Koleszar's avatar
John Koleszar committed
      // Increment activity map pointer
      x->mb_activity_ptr++;
John Koleszar's avatar
John Koleszar committed
      // adjust to the next column of source macroblocks
John Koleszar's avatar
John Koleszar committed
      x->plane[0].src.buf += 16;
John Koleszar's avatar
John Koleszar committed
    }
John Koleszar's avatar
John Koleszar committed
    // adjust to the next row of mbs
John Koleszar's avatar
John Koleszar committed
    x->plane[0].src.buf += 16 * x->plane[0].src.stride - 16 * cm->mb_cols;
John Koleszar's avatar
John Koleszar committed
  }
John Koleszar's avatar
John Koleszar committed
  // Calculate an "average" MB activity
  calc_av_activity(cpi, activity_sum);
#if USE_ACT_INDEX
John Koleszar's avatar
John Koleszar committed
  // Calculate an activity index number of each mb
  calc_activity_index(cpi, x);
// Macroblock activity masking
static void activity_masking(VP9_COMP *cpi, MACROBLOCK *x) {
#if USE_ACT_INDEX
John Koleszar's avatar
John Koleszar committed
  x->rdmult += *(x->mb_activity_ptr) * (x->rdmult >> 2);
  x->errorperbit = x->rdmult * 100 / (110 * x->rddiv);
  x->errorperbit += (x->errorperbit == 0);
  const int64_t act = *(x->mb_activity_ptr);
John Koleszar's avatar
John Koleszar committed
  // Apply the masking to the RD multiplier.
  const int64_t a = act + (2 * cpi->activity_avg);
  const int64_t b = (2 * act) + cpi->activity_avg;
Jim Bankoski's avatar
Jim Bankoski committed
  x->rdmult = (unsigned int) (((int64_t) x->rdmult * b + (a >> 1)) / a);
John Koleszar's avatar
John Koleszar committed
  x->errorperbit = x->rdmult * 100 / (110 * x->rddiv);
  x->errorperbit += (x->errorperbit == 0);
John Koleszar's avatar
John Koleszar committed
  // Activity based Zbin adjustment
  adjust_act_zbin(cpi, x);
Jim Bankoski's avatar
Jim Bankoski committed
static void update_state(VP9_COMP *cpi, PICK_MODE_CONTEXT *ctx,
                         int mi_row, int mi_col, BLOCK_SIZE bsize,
                         int output_enabled) {
  int i, x_idx, y;
  VP9_COMMON *const cm = &cpi->common;
  MACROBLOCK *const x = &cpi->mb;
  MACROBLOCKD *const xd = &x->e_mbd;
  struct macroblock_plane *const p = x->plane;
  struct macroblockd_plane *const pd = xd->plane;
John Koleszar's avatar
John Koleszar committed
  MODE_INFO *mi = &ctx->mic;
  MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
  MODE_INFO *mi_addr = xd->mi[0];
  const struct segmentation *const seg = &cm->seg;
  const int mis = cm->mi_stride;
Jim Bankoski's avatar
Jim Bankoski committed
  const int mi_width = num_8x8_blocks_wide_lookup[bsize];
Jim Bankoski's avatar
Jim Bankoski committed
  const int mi_height = num_8x8_blocks_high_lookup[bsize];
  int max_plane;
  assert(mi->mbmi.sb_type == bsize);
Paul Wilkins's avatar
Paul Wilkins committed
  // If segmentation in use
  if (seg->enabled && output_enabled) {
    // For in frame complexity AQ copy the segment id from the segment map.
    if (cpi->oxcf.aq_mode == COMPLEXITY_AQ) {
      const uint8_t *const map = seg->update_map ? cpi->segmentation_map
                                                 : cm->last_frame_seg_map;
      mi_addr->mbmi.segment_id =
        vp9_get_segment_id(cm, map, bsize, mi_row, mi_col);
    }
    // Else for cyclic refresh mode update the segment map, set the segment id
    // and then update the quantizer.
    else if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ) {
      vp9_cyclic_refresh_update_segment(cpi, &xd->mi[0]->mbmi,
                                        mi_row, mi_col, bsize, 1);
      vp9_init_plane_quantizers(cpi, x);
    }
  max_plane = is_inter_block(mbmi) ? MAX_MB_PLANE : 1;
  for (i = 0; i < max_plane; ++i) {
    p[i].coeff = ctx->coeff_pbuf[i][1];
    p[i].qcoeff = ctx->qcoeff_pbuf[i][1];
    pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][1];
    p[i].eobs = ctx->eobs_pbuf[i][1];
  for (i = max_plane; i < MAX_MB_PLANE; ++i) {
    p[i].coeff = ctx->coeff_pbuf[i][2];
    p[i].qcoeff = ctx->qcoeff_pbuf[i][2];
    pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][2];
    p[i].eobs = ctx->eobs_pbuf[i][2];
John Koleszar's avatar
John Koleszar committed
  // Restore the coding context of the MB to that that was in place
  // when the mode was picked for it
  for (y = 0; y < mi_height; y++)
    for (x_idx = 0; x_idx < mi_width; x_idx++)
      if ((xd->mb_to_right_edge >> (3 + MI_SIZE_LOG2)) + mi_width > x_idx
        && (xd->mb_to_bottom_edge >> (3 + MI_SIZE_LOG2)) + mi_height > y) {
        xd->mi[x_idx + y * mis] = mi_addr;
  if (cpi->oxcf.aq_mode)
    vp9_init_plane_quantizers(cpi, x);
  // FIXME(rbultje) I'm pretty sure this should go to the end of this block
  // (i.e. after the output_enabled)
  if (bsize < BLOCK_32X32) {
    if (bsize < BLOCK_16X16)
      ctx->tx_rd_diff[ALLOW_16X16] = ctx->tx_rd_diff[ALLOW_8X8];
    ctx->tx_rd_diff[ALLOW_32X32] = ctx->tx_rd_diff[ALLOW_16X16];
Ronald S. Bultje's avatar
Ronald S. Bultje committed
  }
  if (is_inter_block(mbmi) && mbmi->sb_type < BLOCK_8X8) {
    mbmi->mv[0].as_int = mi->bmi[3].as_mv[0].as_int;
    mbmi->mv[1].as_int = mi->bmi[3].as_mv[1].as_int;
John Koleszar's avatar
John Koleszar committed
  }

  vpx_memcpy(x->zcoeff_blk[mbmi->tx_size], ctx->zcoeff_blk,
             sizeof(uint8_t) * ctx->num_4x4_blk);
  if (!output_enabled)
    return;

  if (!vp9_segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
    for (i = 0; i < TX_MODES; i++)
      cpi->rd_tx_select_diff[i] += ctx->tx_rd_diff[i];
  if (frame_is_intra_only(cm)) {
John Koleszar's avatar
John Koleszar committed
    static const int kf_mode_index[] = {
      THR_DC        /*DC_PRED*/,
      THR_V_PRED    /*V_PRED*/,
      THR_H_PRED    /*H_PRED*/,
      THR_D45_PRED  /*D45_PRED*/,
John Koleszar's avatar
John Koleszar committed
      THR_D135_PRED /*D135_PRED*/,
      THR_D117_PRED /*D117_PRED*/,
      THR_D153_PRED /*D153_PRED*/,
Dmitry Kovalev's avatar
Dmitry Kovalev committed
      THR_D207_PRED /*D207_PRED*/,
      THR_D63_PRED  /*D63_PRED*/,
      THR_TM        /*TM_PRED*/,
John Koleszar's avatar
John Koleszar committed
    };
    ++cpi->mode_chosen_counts[kf_mode_index[mbmi->mode]];
John Koleszar's avatar
John Koleszar committed
  } else {
    // Note how often each mode chosen as best
    ++cpi->mode_chosen_counts[ctx->best_mode_index];
  }
#endif
  if (!frame_is_intra_only(cm)) {
    if (is_inter_block(mbmi)) {
      vp9_update_mv_count(cm, xd);

      if (cm->interp_filter == SWITCHABLE) {
        const int ctx = vp9_get_pred_context_switchable_interp(xd);
        ++cm->counts.switchable_interp[ctx][mbmi->interp_filter];
      }
    cpi->rd_comp_pred_diff[SINGLE_REFERENCE] += ctx->single_pred_diff;
    cpi->rd_comp_pred_diff[COMPOUND_REFERENCE] += ctx->comp_pred_diff;
    cpi->rd_comp_pred_diff[REFERENCE_MODE_SELECT] += ctx->hybrid_pred_diff;
    for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
      cpi->rd_filter_diff[i] += ctx->best_filter_diff[i];
John Koleszar's avatar
John Koleszar committed
  }
Jim Bankoski's avatar
Jim Bankoski committed
void vp9_setup_src_planes(MACROBLOCK *x, const YV12_BUFFER_CONFIG *src,
                          int mi_row, int mi_col) {
  uint8_t *const buffers[4] = {src->y_buffer, src->u_buffer, src->v_buffer,
                               src->alpha_buffer};
  const int strides[4] = {src->y_stride, src->uv_stride, src->uv_stride,
                          src->alpha_stride};
  // Set current frame pointer.
  x->e_mbd.cur_buf = src;

  for (i = 0; i < MAX_MB_PLANE; i++)
    setup_pred_plane(&x->plane[i].src, buffers[i], strides[i], mi_row, mi_col,
Jim Bankoski's avatar
Jim Bankoski committed
                     NULL, x->e_mbd.plane[i].subsampling_x,
                     x->e_mbd.plane[i].subsampling_y);
static void rd_pick_sb_modes(VP9_COMP *cpi, const TileInfo *const tile,
                             int mi_row, int mi_col,
                             int *totalrate, int64_t *totaldist,
                             BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
                             int64_t best_rd) {
  VP9_COMMON *const cm = &cpi->common;
  MACROBLOCK *const x = &cpi->mb;
  MACROBLOCKD *const xd = &x->e_mbd;
  MB_MODE_INFO *mbmi;
  struct macroblock_plane *const p = x->plane;
  struct macroblockd_plane *const pd = xd->plane;
  const AQ_MODE aq_mode = cpi->oxcf.aq_mode;
  int i, orig_rdmult;
  vp9_clear_system_state();
  rdmult_ratio = 1.0;  // avoid uninitialized warnings
  // Use the lower precision, but faster, 32x32 fdct for mode selection.
  x->use_lp32x32fdct = 1;
  if (bsize < BLOCK_8X8) {
    // When ab_index = 0 all sub-blocks are handled, so for ab_index != 0
    // there is nothing to be done.
      *totalrate = 0;
      *totaldist = 0;
James Zern's avatar
James Zern committed
  set_offsets(cpi, tile, mi_row, mi_col, bsize);