vp9_encodeframe.c 121 KB
Newer Older
    subsize = get_subsize(bsize, PARTITION_SPLIT);
    if (bsize == BLOCK_8X8) {
      i = 4;
      if (cpi->sf.adaptive_pred_interp_filter && partition_none_allowed)
        pc_tree->leaf_split[0]->pred_interp_filter =
            ctx->mic.mbmi.interp_filter;
      rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &sum_rate, &sum_dist, subsize,
                       pc_tree->leaf_split[0], best_rd, 0);
      if (sum_rate == INT_MAX) {
        sum_rd = INT64_MAX;
      } else {
        sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
        if (sum_rd < best_rd) {
          update_state(cpi, pc_tree->leaf_split[0], mi_row, mi_col, subsize, 0);
          encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize,
                            pc_tree->leaf_split[0]);
          update_partition_context(xd, mi_row, mi_col, subsize, bsize);
        }
      }
    } else {
      for (i = 0; i < 4 && sum_rd < best_rd; ++i) {
      const int x_idx = (i & 1) * mi_step;
      const int y_idx = (i >> 1) * mi_step;

        if (mi_row + y_idx >= cm->mi_rows || mi_col + x_idx >= cm->mi_cols)
          continue;

        if (cpi->sf.adaptive_motion_search)
          load_pred_mv(x, ctx);

        rd_pick_partition(cpi, tile, tp, mi_row + y_idx, mi_col + x_idx,
                          subsize, &this_rate, &this_dist, i != 3,
                          best_rd - sum_rd, pc_tree->split[i], i);

        if (this_rate == INT_MAX) {
          sum_rd = INT64_MAX;
        } else {
          sum_rate += this_rate;
          sum_dist += this_dist;
          sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
        }
    if (sum_rd < best_rd && i == 4) {
      pl = partition_plane_context(xd, mi_row, mi_col, bsize);
      sum_rate += x->partition_cost[pl][PARTITION_SPLIT];
      sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
      if (sum_rd < best_rd) {
        best_rate = sum_rate;
        best_dist = sum_dist;
        best_rd = sum_rd;
        pc_tree->partitioning = PARTITION_SPLIT;
    } else {
      // skip rectangular partition test when larger block size
      // gives better rd cost
      if (cpi->sf.less_rectangular_check)
        do_rect &= !partition_none_allowed;
    }
    restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
  // PARTITION_HORZ
  if (partition_horz_allowed && do_rect) {
    subsize = get_subsize(bsize, PARTITION_HORZ);
    if (cpi->sf.adaptive_motion_search)
      load_pred_mv(x, ctx);
    if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 &&
        partition_none_allowed)
      pc_tree->horizontal[0].pred_interp_filter =
          ctx->mic.mbmi.interp_filter;
    rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &sum_rate, &sum_dist, subsize,
                     &pc_tree->horizontal[0], best_rd, 0);
    sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);

    if (sum_rd < best_rd && mi_row + mi_step < cm->mi_rows) {
      PICK_MODE_CONTEXT *ctx = &pc_tree->horizontal[0];
      update_state(cpi, ctx, mi_row, mi_col, subsize, 0);
      encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize, ctx);
      if (cpi->sf.adaptive_motion_search)
        load_pred_mv(x, ctx);
      if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 &&
          partition_none_allowed)
        pc_tree->horizontal[1].pred_interp_filter =
            ctx->mic.mbmi.interp_filter;
      rd_pick_sb_modes(cpi, tile, mi_row + mi_step, mi_col, &this_rate,
                       &this_dist, subsize, &pc_tree->horizontal[1],
                       best_rd - sum_rd, 1);
      if (this_rate == INT_MAX) {
        sum_rd = INT64_MAX;
      } else {
        sum_rate += this_rate;
        sum_dist += this_dist;
        sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
    if (sum_rd < best_rd) {
      pl = partition_plane_context(xd, mi_row, mi_col, bsize);
      sum_rate += x->partition_cost[pl][PARTITION_HORZ];
      sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
      if (sum_rd < best_rd) {
        best_rd = sum_rd;
        best_rate = sum_rate;
        best_dist = sum_dist;
        pc_tree->partitioning = PARTITION_HORZ;
    restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
  }
  // PARTITION_VERT
  if (partition_vert_allowed && do_rect) {
    subsize = get_subsize(bsize, PARTITION_VERT);

    if (cpi->sf.adaptive_motion_search)
      load_pred_mv(x, ctx);
    if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 &&
        partition_none_allowed)
      pc_tree->vertical[0].pred_interp_filter =
          ctx->mic.mbmi.interp_filter;
    rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &sum_rate, &sum_dist, subsize,
                     &pc_tree->vertical[0], best_rd, 0);
    sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
    if (sum_rd < best_rd && mi_col + mi_step < cm->mi_cols) {
      update_state(cpi, &pc_tree->vertical[0], mi_row, mi_col, subsize, 0);
      encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize,
                        &pc_tree->vertical[0]);
      if (cpi->sf.adaptive_motion_search)
        load_pred_mv(x, ctx);
      if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 &&
          partition_none_allowed)
        pc_tree->vertical[1].pred_interp_filter =
            ctx->mic.mbmi.interp_filter;
      rd_pick_sb_modes(cpi, tile, mi_row, mi_col + mi_step, &this_rate,
                       &this_dist, subsize,
                       &pc_tree->vertical[1], best_rd - sum_rd,
      if (this_rate == INT_MAX) {
        sum_rd = INT64_MAX;
      } else {
        sum_rate += this_rate;
        sum_dist += this_dist;
        sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
    }
    if (sum_rd < best_rd) {
      pl = partition_plane_context(xd, mi_row, mi_col, bsize);
      sum_rate += x->partition_cost[pl][PARTITION_VERT];
      sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
      if (sum_rd < best_rd) {
        best_rate = sum_rate;
        best_dist = sum_dist;
        best_rd = sum_rd;
        pc_tree->partitioning = PARTITION_VERT;
    restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
  // TODO(jbb): This code added so that we avoid static analysis
  // warning related to the fact that best_rd isn't used after this
  // point.  This code should be refactored so that the duplicate
  // checks occur in some sub function and thus are used...
  (void) best_rd;
  *rate = best_rate;
  *dist = best_dist;
John Koleszar's avatar
John Koleszar committed

  if (best_rate < INT_MAX && best_dist < INT64_MAX && do_recon) {
    int output_enabled = (bsize == BLOCK_64X64);

    // Check the projected output rate for this SB against it's target
    // and and if necessary apply a Q delta using segmentation to get
    // closer to the target.
    if ((cpi->oxcf.aq_mode == COMPLEXITY_AQ) && cm->seg.update_map)
      vp9_select_in_frame_q_segment(cpi, mi_row, mi_col, output_enabled,
                                    best_rate);
    if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ)
      vp9_cyclic_refresh_set_rate_and_dist_sb(cpi->cyclic_refresh,
                                              best_rate, best_dist);
    encode_sb(cpi, tile, tp, mi_row, mi_col, output_enabled, bsize, pc_tree);
  if (bsize == BLOCK_64X64) {
    assert(tp_orig < *tp);
    assert(best_rate < INT_MAX);
    assert(best_dist < INT64_MAX);
    assert(tp_orig == *tp);
static void encode_rd_sb_row(VP9_COMP *cpi, const TileInfo *const tile,
                             int mi_row, TOKENEXTRA **tp) {
  VP9_COMMON *const cm = &cpi->common;
  MACROBLOCKD *const xd = &cpi->mb.e_mbd;
  SPEED_FEATURES *const sf = &cpi->sf;
  // Initialize the left context for the new SB row
  vpx_memset(&xd->left_context, 0, sizeof(xd->left_context));
  vpx_memset(xd->left_seg_context, 0, sizeof(xd->left_seg_context));

  // Code each SB in the row
James Zern's avatar
James Zern committed
  for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end;
       mi_col += MI_BLOCK_SIZE) {
    int dummy_rate;
    int64_t dummy_dist;
    MACROBLOCK *x = &cpi->mb;
    if (sf->adaptive_pred_interp_filter) {
      for (i = 0; i < 64; ++i)
        x->leaf_tree[i].pred_interp_filter = SWITCHABLE;

      for (i = 0; i < 64; ++i) {
        x->pc_tree[i].vertical[0].pred_interp_filter = SWITCHABLE;
        x->pc_tree[i].vertical[1].pred_interp_filter = SWITCHABLE;
        x->pc_tree[i].horizontal[0].pred_interp_filter = SWITCHABLE;
        x->pc_tree[i].horizontal[1].pred_interp_filter = SWITCHABLE;
    vp9_zero(cpi->mb.pred_mv);
    if ((sf->partition_search_type == SEARCH_PARTITION &&
         sf->use_lastframe_partitioning) ||
         sf->partition_search_type == FIXED_PARTITION ||
         sf->partition_search_type == VAR_BASED_PARTITION ||
         sf->partition_search_type == VAR_BASED_FIXED_PARTITION) {
      const int idx_str = cm->mi_stride * mi_row + mi_col;
      MODE_INFO **mi_8x8 = cm->mi_grid_visible + idx_str;
      MODE_INFO **prev_mi_8x8 = cm->prev_mi_grid_visible + idx_str;
      cpi->mb.source_variance = UINT_MAX;
      if (sf->partition_search_type == FIXED_PARTITION) {
James Zern's avatar
James Zern committed
        set_offsets(cpi, tile, mi_row, mi_col, BLOCK_64X64);
        set_fixed_partitioning(cpi, tile, mi_8x8, mi_row, mi_col,
                               sf->always_this_block_size);
James Zern's avatar
James Zern committed
        rd_use_partition(cpi, tile, mi_8x8, tp, mi_row, mi_col, BLOCK_64X64,
                         &dummy_rate, &dummy_dist, 1, x->pc_root, 0);
      } else if (sf->partition_search_type == VAR_BASED_FIXED_PARTITION) {
        BLOCK_SIZE bsize;
        set_offsets(cpi, tile, mi_row, mi_col, BLOCK_64X64);
        bsize = get_rd_var_based_fixed_partition(cpi, mi_row, mi_col);
        set_fixed_partitioning(cpi, tile, mi_8x8, mi_row, mi_col, bsize);
        rd_use_partition(cpi, tile, mi_8x8, tp, mi_row, mi_col, BLOCK_64X64,
                         &dummy_rate, &dummy_dist, 1, x->pc_root, 0);
      } else if (sf->partition_search_type == VAR_BASED_PARTITION) {
        choose_partitioning(cpi, tile, mi_row, mi_col);
        rd_use_partition(cpi, tile, mi_8x8, tp, mi_row, mi_col, BLOCK_64X64,
                         &dummy_rate, &dummy_dist, 1, x->pc_root, 0);
        if ((cm->current_video_frame
            % sf->last_partitioning_redo_frequency) == 0
            || cm->prev_mi == 0
            || cm->show_frame == 0
            || cm->frame_type == KEY_FRAME
            || cpi->rc.is_src_frame_alt_ref
            || ((sf->use_lastframe_partitioning ==
Paul Wilkins's avatar
Paul Wilkins committed
                 LAST_FRAME_PARTITION_LOW_MOTION) &&
                 sb_has_motion(cm, prev_mi_8x8))) {
          // If required set upper and lower partition size limits
          if (sf->auto_min_max_partition_size) {
James Zern's avatar
James Zern committed
            set_offsets(cpi, tile, mi_row, mi_col, BLOCK_64X64);
            rd_auto_partition_range(cpi, tile, mi_row, mi_col,
                                    &sf->min_partition_size,
                                    &sf->max_partition_size);
James Zern's avatar
James Zern committed
          rd_pick_partition(cpi, tile, tp, mi_row, mi_col, BLOCK_64X64,
                            &dummy_rate, &dummy_dist, 1, INT64_MAX, x->pc_root,
                            0);
          if (sf->constrain_copy_partition &&
              sb_has_motion(cm, prev_mi_8x8))
            constrain_copy_partitioning(cpi, tile, mi_8x8, prev_mi_8x8,
                                        mi_row, mi_col, BLOCK_16X16);
          else
            copy_partitioning(cm, mi_8x8, prev_mi_8x8);
James Zern's avatar
James Zern committed
          rd_use_partition(cpi, tile, mi_8x8, tp, mi_row, mi_col, BLOCK_64X64,
                           &dummy_rate, &dummy_dist, 1, x->pc_root, 0);
      // If required set upper and lower partition size limits
      if (sf->auto_min_max_partition_size) {
James Zern's avatar
James Zern committed
        set_offsets(cpi, tile, mi_row, mi_col, BLOCK_64X64);
        rd_auto_partition_range(cpi, tile, mi_row, mi_col,
                                &sf->min_partition_size,
                                &sf->max_partition_size);
James Zern's avatar
James Zern committed
      rd_pick_partition(cpi, tile, tp, mi_row, mi_col, BLOCK_64X64,
                        &dummy_rate, &dummy_dist, 1, INT64_MAX, x->pc_root, 0);
Jim Bankoski's avatar
Jim Bankoski committed
    }
John Koleszar's avatar
John Koleszar committed
  }
static void init_encode_frame_mb_context(VP9_COMP *cpi) {
  MACROBLOCK *const x = &cpi->mb;
  VP9_COMMON *const cm = &cpi->common;
  MACROBLOCKD *const xd = &x->e_mbd;
  const int aligned_mi_cols = mi_cols_aligned_to_sb(cm->mi_cols);
John Koleszar's avatar
John Koleszar committed
  // Copy data over into macro block data structures.
John Koleszar's avatar
John Koleszar committed
  vp9_setup_src_planes(x, cpi->Source, 0, 0);
  vp9_setup_block_planes(&x->e_mbd, cm->subsampling_x, cm->subsampling_y);
  // Note: this memset assumes above_context[0], [1] and [2]
  // are allocated as part of the same buffer.
  vpx_memset(xd->above_context[0], 0,
             sizeof(*xd->above_context[0]) *
             2 * aligned_mi_cols * MAX_MB_PLANE);
  vpx_memset(xd->above_seg_context, 0,
             sizeof(*xd->above_seg_context) * aligned_mi_cols);
Yaowu Xu's avatar
Yaowu Xu committed
static void switch_lossless_mode(VP9_COMP *cpi, int lossless) {
  if (lossless) {
    // printf("Switching to lossless\n");
    cpi->mb.fwd_txm4x4 = vp9_fwht4x4;
    cpi->mb.e_mbd.itxm_add = vp9_iwht4x4_add;
Jim Bankoski's avatar
Jim Bankoski committed
    cpi->mb.optimize = 0;
    cpi->common.lf.filter_level = 0;
Jim Bankoski's avatar
Jim Bankoski committed
    cpi->zbin_mode_boost_enabled = 0;
    cpi->common.tx_mode = ONLY_4X4;
Yaowu Xu's avatar
Yaowu Xu committed
  } else {
    // printf("Not lossless\n");
    cpi->mb.fwd_txm4x4 = vp9_fdct4x4;
    cpi->mb.e_mbd.itxm_add = vp9_idct4x4_add;
Yaowu Xu's avatar
Yaowu Xu committed
  }
}
static int check_dual_ref_flags(VP9_COMP *cpi) {
  const int ref_flags = cpi->ref_frame_flags;
John Koleszar's avatar
John Koleszar committed

  if (vp9_segfeature_active(&cpi->common.seg, 1, SEG_LVL_REF_FRAME)) {
John Koleszar's avatar
John Koleszar committed
    return 0;
  } else {
Jim Bankoski's avatar
Jim Bankoski committed
    return (!!(ref_flags & VP9_GOLD_FLAG) + !!(ref_flags & VP9_LAST_FLAG)
        + !!(ref_flags & VP9_ALT_FLAG)) >= 2;
John Koleszar's avatar
John Koleszar committed
  }
static void reset_skip_txfm_size(VP9_COMMON *cm, TX_SIZE txfm_max) {
  int mi_row, mi_col;
  const int mis = cm->mi_stride;
  MODE_INFO **mi_ptr = cm->mi_grid_visible;
  for (mi_row = 0; mi_row < cm->mi_rows; ++mi_row, mi_ptr += mis) {
    for (mi_col = 0; mi_col < cm->mi_cols; ++mi_col) {
      if (mi_ptr[mi_col]->mbmi.tx_size > txfm_max)
        mi_ptr[mi_col]->mbmi.tx_size = txfm_max;
static MV_REFERENCE_FRAME get_frame_type(const VP9_COMP *cpi) {
  if (frame_is_intra_only(&cpi->common))
    return INTRA_FRAME;
  else if (cpi->rc.is_src_frame_alt_ref && cpi->refresh_golden_frame)
    return ALTREF_FRAME;
  else if (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)
    return LAST_FRAME;
    return GOLDEN_FRAME;
static TX_MODE select_tx_mode(const VP9_COMP *cpi) {
  if (cpi->oxcf.lossless) {
    return ONLY_4X4;
  } else if (cpi->common.current_video_frame == 0) {
    return TX_MODE_SELECT;
  } else {
    if (cpi->sf.tx_size_search_method == USE_LARGESTALL) {
      return ALLOW_32X32;
    } else if (cpi->sf.tx_size_search_method == USE_FULL_RD) {
Dmitry Kovalev's avatar
Dmitry Kovalev committed
      const RD_OPT *const rd_opt = &cpi->rd;
      const MV_REFERENCE_FRAME frame_type = get_frame_type(cpi);
Dmitry Kovalev's avatar
Dmitry Kovalev committed
      return rd_opt->tx_select_threshes[frame_type][ALLOW_32X32] >
                 rd_opt->tx_select_threshes[frame_type][TX_MODE_SELECT] ?
                     ALLOW_32X32 : TX_MODE_SELECT;
    } else {
      unsigned int total = 0;
      int i;
      for (i = 0; i < TX_SIZES; ++i)
Dmitry Kovalev's avatar
Dmitry Kovalev committed
        total += cpi->tx_stepdown_count[i];
      if (total) {
        const double fraction = (double)cpi->tx_stepdown_count[0] / total;
        return fraction > 0.90 ? ALLOW_32X32 : TX_MODE_SELECT;
      } else {
        return cpi->common.tx_mode;
      }
Jim Bankoski's avatar
Jim Bankoski committed
// Start RTC Exploration
typedef enum {
  BOTH_ZERO = 0,
  ZERO_PLUS_PREDICTED = 1,
  BOTH_PREDICTED = 2,
  NEW_PLUS_NON_INTRA = 3,
  BOTH_NEW = 4,
  INTRA_PLUS_NON_INTRA = 5,
  BOTH_INTRA = 6,
  INVALID_CASE = 9
} motion_vector_context;

static void set_mode_info(MB_MODE_INFO *mbmi, BLOCK_SIZE bsize,
                          MB_PREDICTION_MODE mode) {
Jim Bankoski's avatar
Jim Bankoski committed
  mbmi->mode = mode;
Jingning Han's avatar
Jingning Han committed
  mbmi->uv_mode = mode;
Jim Bankoski's avatar
Jim Bankoski committed
  mbmi->mv[0].as_int = 0;
  mbmi->mv[1].as_int = 0;
Jingning Han's avatar
Jingning Han committed
  mbmi->ref_frame[0] = INTRA_FRAME;
  mbmi->ref_frame[1] = NONE;
Jim Bankoski's avatar
Jim Bankoski committed
  mbmi->tx_size = max_txsize_lookup[bsize];
Jim Bankoski's avatar
Jim Bankoski committed
  mbmi->sb_type = bsize;
  mbmi->segment_id = 0;
}
static void nonrd_pick_sb_modes(VP9_COMP *cpi, const TileInfo *const tile,
                                int mi_row, int mi_col,
                                int *rate, int64_t *dist,
                                BLOCK_SIZE bsize) {
Jim Bankoski's avatar
Jim Bankoski committed
  VP9_COMMON *const cm = &cpi->common;
  MACROBLOCK *const x = &cpi->mb;
  MACROBLOCKD *const xd = &x->e_mbd;
  set_offsets(cpi, tile, mi_row, mi_col, bsize);
  xd->mi[0]->mbmi.sb_type = bsize;
  if (!frame_is_intra_only(cm)) {
    vp9_pick_inter_mode(cpi, x, tile, mi_row, mi_col,
                        rate, dist, bsize);
  } else {
    MB_PREDICTION_MODE intramode = DC_PRED;
    set_mode_info(&xd->mi[0]->mbmi, bsize, intramode);
  duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col, bsize);
}

static void fill_mode_info_sb(VP9_COMMON *cm, MACROBLOCK *x,
                              int mi_row, int mi_col,
                              BLOCK_SIZE bsize, BLOCK_SIZE subsize,
                              PC_TREE *pc_tree) {
  MACROBLOCKD *xd = &x->e_mbd;
  int bsl = b_width_log2(bsize), hbs = (1 << bsl) / 4;
  PARTITION_TYPE partition = pc_tree->partitioning;
  if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
    return;

  switch (partition) {
    case PARTITION_NONE:
      set_modeinfo_offsets(cm, xd, mi_row, mi_col);
      *(xd->mi[0]) = pc_tree->none.mic;
      duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col, bsize);
      break;
    case PARTITION_VERT:
      set_modeinfo_offsets(cm, xd, mi_row, mi_col);
      *(xd->mi[0]) = pc_tree->vertical[0].mic;
      duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col, bsize);

      if (mi_col + hbs < cm->mi_cols) {
        set_modeinfo_offsets(cm, xd, mi_row, mi_col + hbs);
        *(xd->mi[0]) = pc_tree->vertical[1].mic;
        duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col + hbs, bsize);
      }
      break;
    case PARTITION_HORZ:
      set_modeinfo_offsets(cm, xd, mi_row, mi_col);
      *(xd->mi[0]) = pc_tree->horizontal[0].mic;
      duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col, bsize);
      if (mi_row + hbs < cm->mi_rows) {
        set_modeinfo_offsets(cm, xd, mi_row + hbs, mi_col);
        *(xd->mi[0]) = pc_tree->horizontal[1].mic;
        duplicate_mode_info_in_sb(cm, xd, mi_row + hbs, mi_col, bsize);
      }
      break;
    case PARTITION_SPLIT: {
      BLOCK_SIZE subsubsize = get_subsize(subsize, PARTITION_SPLIT);
      fill_mode_info_sb(cm, x, mi_row, mi_col, subsize,
                        subsubsize, pc_tree->split[0]);
      fill_mode_info_sb(cm, x, mi_row, mi_col + hbs, subsize,
                        subsubsize, pc_tree->split[1]);
      fill_mode_info_sb(cm, x, mi_row + hbs, mi_col, subsize,
                        subsubsize, pc_tree->split[2]);
      fill_mode_info_sb(cm, x, mi_row + hbs, mi_col + hbs, subsize,
                        subsubsize, pc_tree->split[3]);
    default:
      break;
  }
}

static void nonrd_pick_partition(VP9_COMP *cpi, const TileInfo *const tile,
                                 TOKENEXTRA **tp, int mi_row,
                                 int mi_col, BLOCK_SIZE bsize, int *rate,
                                 int64_t *dist, int do_recon, int64_t best_rd,
                                 PC_TREE *pc_tree) {
  VP9_COMMON *const cm = &cpi->common;
  MACROBLOCK *const x = &cpi->mb;
  MACROBLOCKD *const xd = &x->e_mbd;
  const int ms = num_8x8_blocks_wide_lookup[bsize] / 2;
  TOKENEXTRA *tp_orig = *tp;
  PICK_MODE_CONTEXT *ctx = &pc_tree->none;
  BLOCK_SIZE subsize = bsize;
  int this_rate, sum_rate = 0, best_rate = INT_MAX;
  int64_t this_dist, sum_dist = 0, best_dist = INT64_MAX;
  int64_t sum_rd = 0;
  int do_split = bsize >= BLOCK_8X8;
  int do_rect = 1;
  // Override skipping rectangular partition operations for edge blocks
  const int force_horz_split = (mi_row + ms >= cm->mi_rows);
  const int force_vert_split = (mi_col + ms >= cm->mi_cols);
  const int xss = x->e_mbd.plane[1].subsampling_x;
  const int yss = x->e_mbd.plane[1].subsampling_y;

  int partition_none_allowed = !force_horz_split && !force_vert_split;
  int partition_horz_allowed = !force_vert_split && yss <= xss &&
                               bsize >= BLOCK_8X8;
  int partition_vert_allowed = !force_horz_split && xss <= yss &&
                               bsize >= BLOCK_8X8;
  (void) *tp_orig;

  assert(num_8x8_blocks_wide_lookup[bsize] ==
             num_8x8_blocks_high_lookup[bsize]);

  x->in_active_map = check_active_map(cpi, x, mi_row, mi_col, bsize);

  // Determine partition types in search according to the speed features.
  // The threshold set here has to be of square block size.
  if (cpi->sf.auto_min_max_partition_size) {
    partition_none_allowed &= (bsize <= cpi->sf.max_partition_size &&
                               bsize >= cpi->sf.min_partition_size);
    partition_horz_allowed &= ((bsize <= cpi->sf.max_partition_size &&
                                bsize >  cpi->sf.min_partition_size) ||
                                force_horz_split);
    partition_vert_allowed &= ((bsize <= cpi->sf.max_partition_size &&
                                bsize >  cpi->sf.min_partition_size) ||
                                force_vert_split);
    do_split &= bsize > cpi->sf.min_partition_size;
  }
  if (cpi->sf.use_square_partition_only) {
    partition_horz_allowed &= force_horz_split;
    partition_vert_allowed &= force_vert_split;
  }

  if (!x->in_active_map && (partition_horz_allowed || partition_vert_allowed))
    do_split = 0;

  // PARTITION_NONE
  if (partition_none_allowed) {
    nonrd_pick_sb_modes(cpi, tile, mi_row, mi_col,
                        &this_rate, &this_dist, bsize);
    ctx->mic.mbmi = xd->mi[0]->mbmi;
      int pl = partition_plane_context(xd, mi_row, mi_col, bsize);
      this_rate += x->partition_cost[pl][PARTITION_NONE];
      sum_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_dist);
      if (sum_rd < best_rd) {
        int64_t stop_thresh = 4096;
        int64_t stop_thresh_rd;

        best_rate = this_rate;
        best_dist = this_dist;
        best_rd = sum_rd;
        if (bsize >= BLOCK_8X8)
          pc_tree->partitioning = PARTITION_NONE;

        // Adjust threshold according to partition size.
        stop_thresh >>= 8 - (b_width_log2_lookup[bsize] +
            b_height_log2_lookup[bsize]);

        stop_thresh_rd = RDCOST(x->rdmult, x->rddiv, 0, stop_thresh);
        // If obtained distortion is very small, choose current partition
        // and stop splitting.
        if (!x->e_mbd.lossless && best_rd < stop_thresh_rd) {
          do_split = 0;
          do_rect = 0;
        }
      }
    }
    if (!x->in_active_map) {
      do_split = 0;
      do_rect = 0;
    }
  }

  // store estimated motion vector

  // PARTITION_SPLIT
  sum_rd = 0;
  if (do_split) {
    int pl = partition_plane_context(xd, mi_row, mi_col, bsize);
    sum_rate += x->partition_cost[pl][PARTITION_SPLIT];
    subsize = get_subsize(bsize, PARTITION_SPLIT);
    for (i = 0; i < 4 && sum_rd < best_rd; ++i) {
      const int x_idx = (i & 1) * ms;
      const int y_idx = (i >> 1) * ms;

      if (mi_row + y_idx >= cm->mi_rows || mi_col + x_idx >= cm->mi_cols)
        continue;


      nonrd_pick_partition(cpi, tile, tp, mi_row + y_idx, mi_col + x_idx,
                           subsize, &this_rate, &this_dist, 0,
                           best_rd - sum_rd, pc_tree->split[i]);

      if (this_rate == INT_MAX) {
        sum_rd = INT64_MAX;
      } else {
        sum_rate += this_rate;
        sum_dist += this_dist;
        sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
      }
    }

    if (sum_rd < best_rd) {
      best_rate = sum_rate;
      best_dist = sum_dist;
      best_rd = sum_rd;
      pc_tree->partitioning = PARTITION_SPLIT;
    } else {
      // skip rectangular partition test when larger block size
      // gives better rd cost
      if (cpi->sf.less_rectangular_check)
        do_rect &= !partition_none_allowed;
    }
  }

  // PARTITION_HORZ
  if (partition_horz_allowed && do_rect) {
    subsize = get_subsize(bsize, PARTITION_HORZ);
    if (cpi->sf.adaptive_motion_search)
      load_pred_mv(x, ctx);

    nonrd_pick_sb_modes(cpi, tile, mi_row, mi_col,
                        &this_rate, &this_dist, subsize);

    pc_tree->horizontal[0].mic.mbmi = xd->mi[0]->mbmi;

    sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);

    if (sum_rd < best_rd && mi_row + ms < cm->mi_rows) {
      nonrd_pick_sb_modes(cpi, tile, mi_row + ms, mi_col,
                          &this_rate, &this_dist, subsize);

      pc_tree->horizontal[1].mic.mbmi = xd->mi[0]->mbmi;

      if (this_rate == INT_MAX) {
        sum_rd = INT64_MAX;
      } else {
        int pl = partition_plane_context(xd, mi_row, mi_col, bsize);
        this_rate += x->partition_cost[pl][PARTITION_HORZ];
        sum_rate += this_rate;
        sum_dist += this_dist;
        sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
      }
    }
    if (sum_rd < best_rd) {
      best_rd = sum_rd;
      best_rate = sum_rate;
      best_dist = sum_dist;
      pc_tree->partitioning = PARTITION_HORZ;
    }
  }

  // PARTITION_VERT
  if (partition_vert_allowed && do_rect) {
    subsize = get_subsize(bsize, PARTITION_VERT);

    if (cpi->sf.adaptive_motion_search)
      load_pred_mv(x, ctx);

    nonrd_pick_sb_modes(cpi, tile, mi_row, mi_col,
                        &this_rate, &this_dist, subsize);
    pc_tree->vertical[0].mic.mbmi = xd->mi[0]->mbmi;
    sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
    if (sum_rd < best_rd && mi_col + ms < cm->mi_cols) {
      nonrd_pick_sb_modes(cpi, tile, mi_row, mi_col + ms,
                          &this_rate, &this_dist, subsize);
      pc_tree->vertical[1].mic.mbmi = xd->mi[0]->mbmi;
      if (this_rate == INT_MAX) {
        sum_rd = INT64_MAX;
      } else {
        int pl = partition_plane_context(xd, mi_row, mi_col, bsize);
        this_rate += x->partition_cost[pl][PARTITION_VERT];
        sum_rate += this_rate;
        sum_dist += this_dist;
        sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
      }
    }
    if (sum_rd < best_rd) {
      best_rate = sum_rate;
      best_dist = sum_dist;
      best_rd = sum_rd;
      pc_tree->partitioning = PARTITION_VERT;
  // TODO(JBB): The following line is here just to avoid a static warning
  // that occurs because at this point we never again reuse best_rd
  // despite setting it here.  The code should be refactored to avoid this.
  (void) best_rd;
  if (best_rate == INT_MAX)
    return;

  subsize = get_subsize(bsize, pc_tree->partitioning);
  fill_mode_info_sb(cm, x, mi_row, mi_col, bsize, subsize,
                    pc_tree);

  if (best_rate < INT_MAX && best_dist < INT64_MAX && do_recon) {
    int output_enabled = (bsize == BLOCK_64X64);

    // Check the projected output rate for this SB against it's target
    // and and if necessary apply a Q delta using segmentation to get
    // closer to the target.
    if ((cpi->oxcf.aq_mode == COMPLEXITY_AQ) && cm->seg.update_map) {
      vp9_select_in_frame_q_segment(cpi, mi_row, mi_col, output_enabled,
                                    best_rate);

    if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ)
      vp9_cyclic_refresh_set_rate_and_dist_sb(cpi->cyclic_refresh,
                                              best_rate, best_dist);
    encode_sb_rt(cpi, tile, tp, mi_row, mi_col, output_enabled, bsize, pc_tree);
  }

  if (bsize == BLOCK_64X64) {
    assert(tp_orig < *tp);
    assert(best_rate < INT_MAX);
    assert(best_dist < INT64_MAX);
  } else {
    assert(tp_orig == *tp);
  }
}

static void nonrd_use_partition(VP9_COMP *cpi,
                                const TileInfo *const tile,
                                MODE_INFO **mi_8x8,
                                TOKENEXTRA **tp,
                                int mi_row, int mi_col,
                                BLOCK_SIZE bsize, int output_enabled,
                                int *totrate, int64_t *totdist,
                                PC_TREE *pc_tree) {
  VP9_COMMON *const cm = &cpi->common;
  MACROBLOCK *const x = &cpi->mb;
  MACROBLOCKD *const xd = &x->e_mbd;
  const int bsl = b_width_log2(bsize), hbs = (1 << bsl) / 4;
  const int mis = cm->mi_stride;
  PARTITION_TYPE partition;
  BLOCK_SIZE subsize;
Yaowu Xu's avatar
Yaowu Xu committed
  int rate = INT_MAX;
  int64_t dist = INT64_MAX;

  if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
    return;

  subsize = (bsize >= BLOCK_8X8) ? mi_8x8[0]->mbmi.sb_type : BLOCK_4X4;
  partition = partition_lookup[bsl][subsize];

  switch (partition) {
    case PARTITION_NONE:
      nonrd_pick_sb_modes(cpi, tile, mi_row, mi_col, totrate, totdist, subsize);
      pc_tree->none.mic.mbmi = xd->mi[0]->mbmi;
      break;
    case PARTITION_VERT:
      nonrd_pick_sb_modes(cpi, tile, mi_row, mi_col, totrate, totdist, subsize);
      pc_tree->vertical[0].mic.mbmi = xd->mi[0]->mbmi;
      if (mi_col + hbs < cm->mi_cols) {
        nonrd_pick_sb_modes(cpi, tile, mi_row, mi_col + hbs,
                            &rate, &dist, subsize);
        pc_tree->vertical[1].mic.mbmi = xd->mi[0]->mbmi;
        if (rate != INT_MAX && dist != INT64_MAX &&
            *totrate != INT_MAX && *totdist != INT64_MAX) {
          *totrate += rate;
          *totdist += dist;
        }
      }
      break;
    case PARTITION_HORZ:
      nonrd_pick_sb_modes(cpi, tile, mi_row, mi_col, totrate, totdist, subsize);
      pc_tree->horizontal[0].mic.mbmi = xd->mi[0]->mbmi;
      if (mi_row + hbs < cm->mi_rows) {
        nonrd_pick_sb_modes(cpi, tile, mi_row + hbs, mi_col,
                            &rate, &dist, subsize);
        pc_tree->horizontal[1].mic.mbmi = mi_8x8[0]->mbmi;
        if (rate != INT_MAX && dist != INT64_MAX &&
            *totrate != INT_MAX && *totdist != INT64_MAX) {
          *totrate += rate;
          *totdist += dist;
        }
      }
      break;
    case PARTITION_SPLIT:
      subsize = get_subsize(bsize, PARTITION_SPLIT);
      nonrd_use_partition(cpi, tile, mi_8x8, tp, mi_row, mi_col,
                          subsize, output_enabled, totrate, totdist,
                          pc_tree->split[0]);
      nonrd_use_partition(cpi, tile, mi_8x8 + hbs, tp,
                          mi_row, mi_col + hbs, subsize, output_enabled,
                          &rate, &dist, pc_tree->split[1]);
      if (rate != INT_MAX && dist != INT64_MAX &&
          *totrate != INT_MAX && *totdist != INT64_MAX) {
        *totrate += rate;
        *totdist += dist;
      }
      nonrd_use_partition(cpi, tile, mi_8x8 + hbs * mis, tp,
                          mi_row + hbs, mi_col, subsize, output_enabled,
                          &rate, &dist, pc_tree->split[2]);
      if (rate != INT_MAX && dist != INT64_MAX &&
          *totrate != INT_MAX && *totdist != INT64_MAX) {
        *totrate += rate;
        *totdist += dist;
      }
      nonrd_use_partition(cpi, tile, mi_8x8 + hbs * mis + hbs, tp,
                          mi_row + hbs, mi_col + hbs, subsize, output_enabled,
                          &rate, &dist, pc_tree->split[3]);
      if (rate != INT_MAX && dist != INT64_MAX &&
          *totrate != INT_MAX && *totdist != INT64_MAX) {
        *totrate += rate;
        *totdist += dist;
      }
      break;
    default:
      assert("Invalid partition type.");
  }

  if (bsize == BLOCK_64X64 && output_enabled) {
    if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ)
      vp9_cyclic_refresh_set_rate_and_dist_sb(cpi->cyclic_refresh,
                                              *totrate, *totdist);
    encode_sb_rt(cpi, tile, tp, mi_row, mi_col, 1, bsize, pc_tree);
static void encode_nonrd_sb_row(VP9_COMP *cpi, const TileInfo *const tile,
                                int mi_row, TOKENEXTRA **tp) {
  VP9_COMMON *cm = &cpi->common;
  MACROBLOCKD *xd = &cpi->mb.e_mbd;
Jim Bankoski's avatar
Jim Bankoski committed
  int mi_col;

  // Initialize the left context for the new SB row
  vpx_memset(&xd->left_context, 0, sizeof(xd->left_context));
  vpx_memset(xd->left_seg_context, 0, sizeof(xd->left_seg_context));
Jim Bankoski's avatar
Jim Bankoski committed

  // Code each SB in the row
  for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end;
       mi_col += MI_BLOCK_SIZE) {
    MACROBLOCK *x = &cpi->mb;
Yaowu Xu's avatar
Yaowu Xu committed
    int dummy_rate = 0;
    int64_t dummy_dist = 0;
    const int idx_str = cm->mi_stride * mi_row + mi_col;
    MODE_INFO **mi_8x8 = cm->mi_grid_visible + idx_str;
    MODE_INFO **prev_mi_8x8 = cm->prev_mi_grid_visible + idx_str;
Jim Bankoski's avatar
Jim Bankoski committed

    cpi->mb.source_variance = UINT_MAX;
    vp9_zero(cpi->mb.pred_mv);
    // Set the partition type of the 64X64 block
    switch (cpi->sf.partition_search_type) {
      case VAR_BASED_PARTITION:
        choose_partitioning(cpi, tile, mi_row, mi_col);
        nonrd_use_partition(cpi, tile, mi_8x8, tp, mi_row, mi_col, BLOCK_64X64,
                            1, &dummy_rate, &dummy_dist, x->pc_root);
        break;
      case SOURCE_VAR_BASED_PARTITION:
        set_source_var_based_partition(cpi, tile, mi_8x8, mi_row, mi_col);
        nonrd_use_partition(cpi, tile, mi_8x8, tp, mi_row, mi_col, BLOCK_64X64,
                            1, &dummy_rate, &dummy_dist, x->pc_root);
      case VAR_BASED_FIXED_PARTITION:
      case FIXED_PARTITION:
        bsize = cpi->sf.partition_search_type == FIXED_PARTITION ?
                cpi->sf.always_this_block_size :
                get_nonrd_var_based_fixed_partition(cpi, mi_row, mi_col);
        set_fixed_partitioning(cpi, tile, mi_8x8, mi_row, mi_col, bsize);
        nonrd_use_partition(cpi, tile, mi_8x8, tp, mi_row, mi_col, BLOCK_64X64,
                            1, &dummy_rate, &dummy_dist, x->pc_root);
        break;
      case REFERENCE_PARTITION:
        if (cpi->sf.partition_check ||
            !is_background(cpi, tile, mi_row, mi_col)) {
          nonrd_pick_partition(cpi, tile, tp, mi_row, mi_col, BLOCK_64X64,
                               &dummy_rate, &dummy_dist, 1, INT64_MAX,
                               x->pc_root);
          copy_partitioning(cm, mi_8x8, prev_mi_8x8);
          nonrd_use_partition(cpi, tile, mi_8x8, tp, mi_row, mi_col,
                              BLOCK_64X64, 1, &dummy_rate, &dummy_dist,
                              x->pc_root);
        break;
      default:
        assert(0);
// end RTC play code
static int get_skip_encode_frame(const VP9_COMMON *cm) {
  unsigned int intra_count = 0, inter_count = 0;
  int j;

  for (j = 0; j < INTRA_INTER_CONTEXTS; ++j) {
    intra_count += cm->counts.intra_inter[j][0];
    inter_count += cm->counts.intra_inter[j][1];
  }

  return (intra_count << 2) < inter_count &&
         cm->frame_type != KEY_FRAME &&
         cm->show_frame;
}

static void encode_frame_internal(VP9_COMP *cpi) {
  SPEED_FEATURES *const sf = &cpi->sf;
Dmitry Kovalev's avatar
Dmitry Kovalev committed
  RD_OPT *const rd_opt = &cpi->rd;
  MACROBLOCK *const x = &cpi->mb;
  VP9_COMMON *const cm = &cpi->common;
  MACROBLOCKD *const xd = &x->e_mbd;
  xd->mi = cm->mi_grid_visible;
  xd->mi[0] = cm->mi;
Jim Bankoski's avatar
Jim Bankoski committed
  vp9_zero(cpi->coef_counts);
  vp9_zero(cpi->tx_stepdown_count);
Dmitry Kovalev's avatar
Dmitry Kovalev committed
  vp9_zero(rd_opt->comp_pred_diff);
  vp9_zero(rd_opt->filter_diff);
  vp9_zero(rd_opt->tx_select_diff);
  vp9_zero(rd_opt->tx_select_threshes);
  cm->tx_mode = select_tx_mode(cpi);
  cpi->mb.e_mbd.lossless = cm->base_qindex == 0 &&
                           cm->y_dc_delta_q == 0 &&
                           cm->uv_dc_delta_q == 0 &&
                           cm->uv_ac_delta_q == 0;
Jim Bankoski's avatar
Jim Bankoski committed
  switch_lossless_mode(cpi, cpi->mb.e_mbd.lossless);

  vp9_frame_init_quantizer(cpi);

  vp9_initialize_rd_consts(cpi);
  vp9_initialize_me_consts(cpi, cm->base_qindex);
  init_encode_frame_mb_context(cpi);
  if (sf->use_nonrd_pick_mode) {
    // Initialize internal buffer pointers for rtc coding, where non-RD
    // mode decision is used and hence no buffer pointer swap needed.
    int i;
    struct macroblock_plane *const p = x->plane;
    struct macroblockd_plane *const pd = xd->plane;
    PICK_MODE_CONTEXT *ctx = &x->pc_root->none;

    for (i = 0; i < MAX_MB_PLANE; ++i) {
      p[i].coeff = ctx->coeff_pbuf[i][0];
      p[i].qcoeff = ctx->qcoeff_pbuf[i][0];
      pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][0];
      p[i].eobs = ctx->eobs_pbuf[i][0];
    }
Yunqing Wang's avatar
Yunqing Wang committed
    if (sf->partition_search_type == SOURCE_VAR_BASED_PARTITION &&
        cm->current_video_frame > 0) {
Yunqing Wang's avatar
Yunqing Wang committed
      int check_freq = sf->search_type_check_frequency;

      if ((cm->current_video_frame - 1) % check_freq == 0) {
        cpi->use_large_partition_rate = 0;
      }

      if ((cm->current_video_frame - 1) % check_freq == 1) {
        const int mbs_in_b32x32 = 1 << ((b_width_log2_lookup[BLOCK_32X32] -
                                  b_width_log2_lookup[BLOCK_16X16]) +
                                  (b_height_log2_lookup[BLOCK_32X32] -
                                  b_height_log2_lookup[BLOCK_16X16]));
        cpi->use_large_partition_rate = cpi->use_large_partition_rate * 100 *
                                        mbs_in_b32x32 / cm->MBs;
      }