vp9_encodeframe.c 124 KB
Newer Older
  // store estimated motion vector
  if (cpi->sf.adaptive_motion_search)
    store_pred_mv(x, ctx);
  // PARTITION_SPLIT
  sum_rd = 0;
  // TODO(jingning): use the motion vectors given by the above search as
  // the starting point of motion search in the following partition type check.
  if (do_split) {
    subsize = get_subsize(bsize, PARTITION_SPLIT);
    if (bsize == BLOCK_8X8) {
      i = 4;
      if (cpi->sf.adaptive_pred_interp_filter && partition_none_allowed)
        pc_tree->leaf_split[0]->pred_interp_filter =
            ctx->mic.mbmi.interp_filter;
      rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &sum_rate, &sum_dist, subsize,
                       pc_tree->leaf_split[0], best_rd, 0);
      if (sum_rate == INT_MAX) {
        sum_rd = INT64_MAX;
      } else {
        sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
        if (sum_rd < best_rd) {
          update_state(cpi, pc_tree->leaf_split[0], mi_row, mi_col, subsize, 0);
          encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize,
                            pc_tree->leaf_split[0]);
          update_partition_context(xd, mi_row, mi_col, subsize, bsize);
        }
      }
    } else {
      for (i = 0; i < 4 && sum_rd < best_rd; ++i) {
      const int x_idx = (i & 1) * mi_step;
      const int y_idx = (i >> 1) * mi_step;

        if (mi_row + y_idx >= cm->mi_rows || mi_col + x_idx >= cm->mi_cols)
          continue;

        if (cpi->sf.adaptive_motion_search)
          load_pred_mv(x, ctx);

        rd_pick_partition(cpi, tile, tp, mi_row + y_idx, mi_col + x_idx,
                          subsize, &this_rate, &this_dist, i != 3,
Yaowu Xu's avatar
Yaowu Xu committed
                          best_rd - sum_rd, pc_tree->split[i]);

        if (this_rate == INT_MAX) {
          sum_rd = INT64_MAX;
        } else {
          sum_rate += this_rate;
          sum_dist += this_dist;
          sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
        }
    if (sum_rd < best_rd && i == 4) {
      pl = partition_plane_context(xd, mi_row, mi_col, bsize);
      sum_rate += cpi->partition_cost[pl][PARTITION_SPLIT];
      sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
      if (sum_rd < best_rd) {
        best_rate = sum_rate;
        best_dist = sum_dist;
        best_rd = sum_rd;
        pc_tree->partitioning = PARTITION_SPLIT;
    } else {
      // skip rectangular partition test when larger block size
      // gives better rd cost
      if (cpi->sf.less_rectangular_check)
        do_rect &= !partition_none_allowed;
    }
    restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
  // PARTITION_HORZ
  if (partition_horz_allowed && do_rect) {
    subsize = get_subsize(bsize, PARTITION_HORZ);
    if (cpi->sf.adaptive_motion_search)
      load_pred_mv(x, ctx);
    if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 &&
        partition_none_allowed)
      pc_tree->horizontal[0].pred_interp_filter =
          ctx->mic.mbmi.interp_filter;
    rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &sum_rate, &sum_dist, subsize,
                     &pc_tree->horizontal[0], best_rd, 0);
    sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);

    if (sum_rd < best_rd && mi_row + mi_step < cm->mi_rows) {
      PICK_MODE_CONTEXT *ctx = &pc_tree->horizontal[0];
      update_state(cpi, ctx, mi_row, mi_col, subsize, 0);
      encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize, ctx);
      if (cpi->sf.adaptive_motion_search)
        load_pred_mv(x, ctx);
      if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 &&
          partition_none_allowed)
        pc_tree->horizontal[1].pred_interp_filter =
            ctx->mic.mbmi.interp_filter;
      rd_pick_sb_modes(cpi, tile, mi_row + mi_step, mi_col, &this_rate,
                       &this_dist, subsize, &pc_tree->horizontal[1],
                       best_rd - sum_rd, 1);
      if (this_rate == INT_MAX) {
        sum_rd = INT64_MAX;
      } else {
        sum_rate += this_rate;
        sum_dist += this_dist;
        sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
    if (sum_rd < best_rd) {
      pl = partition_plane_context(xd, mi_row, mi_col, bsize);
      sum_rate += cpi->partition_cost[pl][PARTITION_HORZ];
      sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
      if (sum_rd < best_rd) {
        best_rd = sum_rd;
        best_rate = sum_rate;
        best_dist = sum_dist;
        pc_tree->partitioning = PARTITION_HORZ;
    restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
  }
  // PARTITION_VERT
  if (partition_vert_allowed && do_rect) {
    subsize = get_subsize(bsize, PARTITION_VERT);

    if (cpi->sf.adaptive_motion_search)
      load_pred_mv(x, ctx);
    if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 &&
        partition_none_allowed)
      pc_tree->vertical[0].pred_interp_filter =
          ctx->mic.mbmi.interp_filter;
    rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &sum_rate, &sum_dist, subsize,
                     &pc_tree->vertical[0], best_rd, 0);
    sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
    if (sum_rd < best_rd && mi_col + mi_step < cm->mi_cols) {
      update_state(cpi, &pc_tree->vertical[0], mi_row, mi_col, subsize, 0);
      encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize,
                        &pc_tree->vertical[0]);
      if (cpi->sf.adaptive_motion_search)
        load_pred_mv(x, ctx);
      if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 &&
          partition_none_allowed)
        pc_tree->vertical[1].pred_interp_filter =
            ctx->mic.mbmi.interp_filter;
      rd_pick_sb_modes(cpi, tile, mi_row, mi_col + mi_step, &this_rate,
                       &this_dist, subsize,
                       &pc_tree->vertical[1], best_rd - sum_rd,
      if (this_rate == INT_MAX) {
        sum_rd = INT64_MAX;
      } else {
        sum_rate += this_rate;
        sum_dist += this_dist;
        sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
    }
    if (sum_rd < best_rd) {
      pl = partition_plane_context(xd, mi_row, mi_col, bsize);
      sum_rate += cpi->partition_cost[pl][PARTITION_VERT];
      sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
      if (sum_rd < best_rd) {
        best_rate = sum_rate;
        best_dist = sum_dist;
        best_rd = sum_rd;
        pc_tree->partitioning = PARTITION_VERT;
    restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
  // TODO(jbb): This code added so that we avoid static analysis
  // warning related to the fact that best_rd isn't used after this
  // point.  This code should be refactored so that the duplicate
  // checks occur in some sub function and thus are used...
  (void) best_rd;
  *rate = best_rate;
  *dist = best_dist;
John Koleszar's avatar
John Koleszar committed

  if (best_rate < INT_MAX && best_dist < INT64_MAX && do_recon) {
    int output_enabled = (bsize == BLOCK_64X64);

    // Check the projected output rate for this SB against it's target
    // and and if necessary apply a Q delta using segmentation to get
    // closer to the target.
    if ((cpi->oxcf.aq_mode == COMPLEXITY_AQ) && cm->seg.update_map)
      vp9_select_in_frame_q_segment(cpi, mi_row, mi_col, output_enabled,
                                    best_rate);
    if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ)
      vp9_cyclic_refresh_set_rate_and_dist_sb(cpi->cyclic_refresh,
                                              best_rate, best_dist);
    encode_sb(cpi, tile, tp, mi_row, mi_col, output_enabled, bsize, pc_tree);
  if (bsize == BLOCK_64X64) {
    assert(tp_orig < *tp);
    assert(best_rate < INT_MAX);
    assert(best_dist < INT64_MAX);
    assert(tp_orig == *tp);
static void encode_rd_sb_row(VP9_COMP *cpi, const TileInfo *const tile,
                             int mi_row, TOKENEXTRA **tp) {
  VP9_COMMON *const cm = &cpi->common;
  MACROBLOCKD *const xd = &cpi->mb.e_mbd;
  SPEED_FEATURES *const sf = &cpi->sf;
  // Initialize the left context for the new SB row
  vpx_memset(&xd->left_context, 0, sizeof(xd->left_context));
  vpx_memset(xd->left_seg_context, 0, sizeof(xd->left_seg_context));

  // Code each SB in the row
James Zern's avatar
James Zern committed
  for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end;
       mi_col += MI_BLOCK_SIZE) {
    int dummy_rate;
    int64_t dummy_dist;
    if (sf->adaptive_pred_interp_filter) {
      for (i = 0; i < 64; ++i)
        cpi->leaf_tree[i].pred_interp_filter = SWITCHABLE;

      for (i = 0; i < 64; ++i) {
        cpi->pc_tree[i].vertical[0].pred_interp_filter = SWITCHABLE;
        cpi->pc_tree[i].vertical[1].pred_interp_filter = SWITCHABLE;
        cpi->pc_tree[i].horizontal[0].pred_interp_filter = SWITCHABLE;
        cpi->pc_tree[i].horizontal[1].pred_interp_filter = SWITCHABLE;
    vp9_zero(cpi->mb.pred_mv);
    if ((sf->partition_search_type == SEARCH_PARTITION &&
         sf->use_lastframe_partitioning) ||
         sf->partition_search_type == FIXED_PARTITION ||
         sf->partition_search_type == VAR_BASED_PARTITION ||
         sf->partition_search_type == VAR_BASED_FIXED_PARTITION) {
      const int idx_str = cm->mi_stride * mi_row + mi_col;
      MODE_INFO **mi = cm->mi_grid_visible + idx_str;
      MODE_INFO **prev_mi = cm->prev_mi_grid_visible + idx_str;
      cpi->mb.source_variance = UINT_MAX;
      if (sf->partition_search_type == FIXED_PARTITION) {
James Zern's avatar
James Zern committed
        set_offsets(cpi, tile, mi_row, mi_col, BLOCK_64X64);
        set_fixed_partitioning(cpi, tile, mi, mi_row, mi_col,
                               sf->always_this_block_size);
        rd_use_partition(cpi, tile, mi, tp, mi_row, mi_col, BLOCK_64X64,
                         &dummy_rate, &dummy_dist, 1, cpi->pc_root);
      } else if (cpi->skippable_frame ||
                 sf->partition_search_type == VAR_BASED_FIXED_PARTITION) {
        BLOCK_SIZE bsize;
        set_offsets(cpi, tile, mi_row, mi_col, BLOCK_64X64);
        bsize = get_rd_var_based_fixed_partition(cpi, mi_row, mi_col);
        set_fixed_partitioning(cpi, tile, mi, mi_row, mi_col, bsize);
        rd_use_partition(cpi, tile, mi, tp, mi_row, mi_col, BLOCK_64X64,
                         &dummy_rate, &dummy_dist, 1, cpi->pc_root);
      } else if (sf->partition_search_type == VAR_BASED_PARTITION) {
        choose_partitioning(cpi, tile, mi_row, mi_col);
        rd_use_partition(cpi, tile, mi, tp, mi_row, mi_col, BLOCK_64X64,
                         &dummy_rate, &dummy_dist, 1, cpi->pc_root);
Paul Wilkins's avatar
Paul Wilkins committed
        GF_GROUP * gf_grp = &cpi->twopass.gf_group;
        int last_was_mid_sequence_overlay = 0;
        if ((cpi->pass == 2) && (gf_grp->index)) {
          if (gf_grp->update_type[gf_grp->index - 1] == OVERLAY_UPDATE)
            last_was_mid_sequence_overlay = 1;
        }
        if ((cm->current_video_frame
            % sf->last_partitioning_redo_frequency) == 0
Paul Wilkins's avatar
Paul Wilkins committed
            || last_was_mid_sequence_overlay
            || cm->prev_mi == 0
            || cm->show_frame == 0
            || cm->frame_type == KEY_FRAME
            || cpi->rc.is_src_frame_alt_ref
            || ((sf->use_lastframe_partitioning ==
Paul Wilkins's avatar
Paul Wilkins committed
                 LAST_FRAME_PARTITION_LOW_MOTION) &&
                 sb_has_motion(cm, prev_mi, sf->lf_motion_threshold))) {
          // If required set upper and lower partition size limits
          if (sf->auto_min_max_partition_size) {
James Zern's avatar
James Zern committed
            set_offsets(cpi, tile, mi_row, mi_col, BLOCK_64X64);
            rd_auto_partition_range(cpi, tile, mi_row, mi_col,
                                    &sf->min_partition_size,
                                    &sf->max_partition_size);
James Zern's avatar
James Zern committed
          rd_pick_partition(cpi, tile, tp, mi_row, mi_col, BLOCK_64X64,
                            &dummy_rate, &dummy_dist, 1, INT64_MAX,
                            cpi->pc_root);
          if (sf->constrain_copy_partition &&
              sb_has_motion(cm, prev_mi, sf->lf_motion_threshold))
            constrain_copy_partitioning(cpi, tile, mi, prev_mi,
                                        mi_row, mi_col, BLOCK_16X16);
          else
            copy_partitioning(cm, mi, prev_mi);
          rd_use_partition(cpi, tile, mi, tp, mi_row, mi_col, BLOCK_64X64,
                           &dummy_rate, &dummy_dist, 1, cpi->pc_root);
      // If required set upper and lower partition size limits
      if (sf->auto_min_max_partition_size) {
James Zern's avatar
James Zern committed
        set_offsets(cpi, tile, mi_row, mi_col, BLOCK_64X64);
        rd_auto_partition_range(cpi, tile, mi_row, mi_col,
                                &sf->min_partition_size,
                                &sf->max_partition_size);
James Zern's avatar
James Zern committed
      rd_pick_partition(cpi, tile, tp, mi_row, mi_col, BLOCK_64X64,
                        &dummy_rate, &dummy_dist, 1, INT64_MAX, cpi->pc_root);
Jim Bankoski's avatar
Jim Bankoski committed
    }
John Koleszar's avatar
John Koleszar committed
  }
static void init_encode_frame_mb_context(VP9_COMP *cpi) {
  MACROBLOCK *const x = &cpi->mb;
  VP9_COMMON *const cm = &cpi->common;
  MACROBLOCKD *const xd = &x->e_mbd;
  const int aligned_mi_cols = mi_cols_aligned_to_sb(cm->mi_cols);
John Koleszar's avatar
John Koleszar committed
  // Copy data over into macro block data structures.
John Koleszar's avatar
John Koleszar committed
  vp9_setup_src_planes(x, cpi->Source, 0, 0);
  vp9_setup_block_planes(&x->e_mbd, cm->subsampling_x, cm->subsampling_y);
  // Note: this memset assumes above_context[0], [1] and [2]
  // are allocated as part of the same buffer.
  vpx_memset(xd->above_context[0], 0,
             sizeof(*xd->above_context[0]) *
             2 * aligned_mi_cols * MAX_MB_PLANE);
  vpx_memset(xd->above_seg_context, 0,
             sizeof(*xd->above_seg_context) * aligned_mi_cols);
static int check_dual_ref_flags(VP9_COMP *cpi) {
  const int ref_flags = cpi->ref_frame_flags;
John Koleszar's avatar
John Koleszar committed

  if (vp9_segfeature_active(&cpi->common.seg, 1, SEG_LVL_REF_FRAME)) {
John Koleszar's avatar
John Koleszar committed
    return 0;
  } else {
Jim Bankoski's avatar
Jim Bankoski committed
    return (!!(ref_flags & VP9_GOLD_FLAG) + !!(ref_flags & VP9_LAST_FLAG)
        + !!(ref_flags & VP9_ALT_FLAG)) >= 2;
John Koleszar's avatar
John Koleszar committed
  }
static void reset_skip_tx_size(VP9_COMMON *cm, TX_SIZE max_tx_size) {
  int mi_row, mi_col;
  const int mis = cm->mi_stride;
  MODE_INFO **mi_ptr = cm->mi_grid_visible;
  for (mi_row = 0; mi_row < cm->mi_rows; ++mi_row, mi_ptr += mis) {
    for (mi_col = 0; mi_col < cm->mi_cols; ++mi_col) {
      if (mi_ptr[mi_col]->mbmi.tx_size > max_tx_size)
        mi_ptr[mi_col]->mbmi.tx_size = max_tx_size;
static MV_REFERENCE_FRAME get_frame_type(const VP9_COMP *cpi) {
  if (frame_is_intra_only(&cpi->common))
    return INTRA_FRAME;
  else if (cpi->rc.is_src_frame_alt_ref && cpi->refresh_golden_frame)
    return ALTREF_FRAME;
  else if (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)
    return LAST_FRAME;
    return GOLDEN_FRAME;
static TX_MODE select_tx_mode(const VP9_COMP *cpi) {
  if (cpi->mb.e_mbd.lossless) {
    return ONLY_4X4;
  } else if (cpi->common.current_video_frame == 0) {
    return TX_MODE_SELECT;
  } else {
    if (cpi->sf.tx_size_search_method == USE_LARGESTALL) {
      return ALLOW_32X32;
    } else if (cpi->sf.tx_size_search_method == USE_FULL_RD) {
Dmitry Kovalev's avatar
Dmitry Kovalev committed
      const RD_OPT *const rd_opt = &cpi->rd;
      const MV_REFERENCE_FRAME frame_type = get_frame_type(cpi);
Dmitry Kovalev's avatar
Dmitry Kovalev committed
      return rd_opt->tx_select_threshes[frame_type][ALLOW_32X32] >
                 rd_opt->tx_select_threshes[frame_type][TX_MODE_SELECT] ?
                     ALLOW_32X32 : TX_MODE_SELECT;
    } else if (cpi->sf.tx_size_search_method == USE_TX_8X8) {
    } else {
      unsigned int total = 0;
      int i;
      for (i = 0; i < TX_SIZES; ++i)
Dmitry Kovalev's avatar
Dmitry Kovalev committed
        total += cpi->tx_stepdown_count[i];
      if (total) {
        const double fraction = (double)cpi->tx_stepdown_count[0] / total;
        return fraction > 0.90 ? ALLOW_32X32 : TX_MODE_SELECT;
      } else {
        return cpi->common.tx_mode;
      }
static void nonrd_pick_sb_modes(VP9_COMP *cpi, const TileInfo *const tile,
                                int mi_row, int mi_col,
                                int *rate, int64_t *dist,
                                BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx) {
Jim Bankoski's avatar
Jim Bankoski committed
  VP9_COMMON *const cm = &cpi->common;
  MACROBLOCK *const x = &cpi->mb;
  MACROBLOCKD *const xd = &x->e_mbd;
  MB_MODE_INFO *mbmi;
  set_offsets(cpi, tile, mi_row, mi_col, bsize);
  mbmi = &xd->mi[0]->mbmi;
  mbmi->sb_type = bsize;
  if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cm->seg.enabled)
    if (mbmi->segment_id && x->in_static_area)
      x->rdmult = vp9_cyclic_refresh_get_rdmult(cpi->cyclic_refresh);

  if (vp9_segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP))
    set_mode_info_seg_skip(x, cm->tx_mode, rate, dist, bsize);
  else
    vp9_pick_inter_mode(cpi, x, tile, mi_row, mi_col, rate, dist, bsize, ctx);
  duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col, bsize);
}

static void fill_mode_info_sb(VP9_COMMON *cm, MACROBLOCK *x,
                              int mi_row, int mi_col,
                              BLOCK_SIZE bsize, BLOCK_SIZE subsize,
                              PC_TREE *pc_tree) {
  MACROBLOCKD *xd = &x->e_mbd;
  int bsl = b_width_log2(bsize), hbs = (1 << bsl) / 4;
  PARTITION_TYPE partition = pc_tree->partitioning;
  if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
    return;

  switch (partition) {
    case PARTITION_NONE:
      set_modeinfo_offsets(cm, xd, mi_row, mi_col);
      *(xd->mi[0]) = pc_tree->none.mic;
      duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col, bsize);
      break;
    case PARTITION_VERT:
      set_modeinfo_offsets(cm, xd, mi_row, mi_col);
      *(xd->mi[0]) = pc_tree->vertical[0].mic;
      duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col, bsize);

      if (mi_col + hbs < cm->mi_cols) {
        set_modeinfo_offsets(cm, xd, mi_row, mi_col + hbs);
        *(xd->mi[0]) = pc_tree->vertical[1].mic;
        duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col + hbs, bsize);
      }
      break;
    case PARTITION_HORZ:
      set_modeinfo_offsets(cm, xd, mi_row, mi_col);
      *(xd->mi[0]) = pc_tree->horizontal[0].mic;
      duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col, bsize);
      if (mi_row + hbs < cm->mi_rows) {
        set_modeinfo_offsets(cm, xd, mi_row + hbs, mi_col);
        *(xd->mi[0]) = pc_tree->horizontal[1].mic;
        duplicate_mode_info_in_sb(cm, xd, mi_row + hbs, mi_col, bsize);
      }
      break;
    case PARTITION_SPLIT: {
      BLOCK_SIZE subsubsize = get_subsize(subsize, PARTITION_SPLIT);
      fill_mode_info_sb(cm, x, mi_row, mi_col, subsize,
                        subsubsize, pc_tree->split[0]);
      fill_mode_info_sb(cm, x, mi_row, mi_col + hbs, subsize,
                        subsubsize, pc_tree->split[1]);
      fill_mode_info_sb(cm, x, mi_row + hbs, mi_col, subsize,
                        subsubsize, pc_tree->split[2]);
      fill_mode_info_sb(cm, x, mi_row + hbs, mi_col + hbs, subsize,
                        subsubsize, pc_tree->split[3]);
    default:
      break;
  }
}

static void nonrd_pick_partition(VP9_COMP *cpi, const TileInfo *const tile,
                                 TOKENEXTRA **tp, int mi_row,
                                 int mi_col, BLOCK_SIZE bsize, int *rate,
                                 int64_t *dist, int do_recon, int64_t best_rd,
                                 PC_TREE *pc_tree) {
  VP9_COMMON *const cm = &cpi->common;
  MACROBLOCK *const x = &cpi->mb;
  MACROBLOCKD *const xd = &x->e_mbd;
  const int ms = num_8x8_blocks_wide_lookup[bsize] / 2;
  TOKENEXTRA *tp_orig = *tp;
  PICK_MODE_CONTEXT *ctx = &pc_tree->none;
  BLOCK_SIZE subsize = bsize;
  int this_rate, sum_rate = 0, best_rate = INT_MAX;
  int64_t this_dist, sum_dist = 0, best_dist = INT64_MAX;
  int64_t sum_rd = 0;
  int do_split = bsize >= BLOCK_8X8;
  int do_rect = 1;
  // Override skipping rectangular partition operations for edge blocks
  const int force_horz_split = (mi_row + ms >= cm->mi_rows);
  const int force_vert_split = (mi_col + ms >= cm->mi_cols);
  const int xss = x->e_mbd.plane[1].subsampling_x;
  const int yss = x->e_mbd.plane[1].subsampling_y;

  int partition_none_allowed = !force_horz_split && !force_vert_split;
  int partition_horz_allowed = !force_vert_split && yss <= xss &&
                               bsize >= BLOCK_8X8;
  int partition_vert_allowed = !force_horz_split && xss <= yss &&
                               bsize >= BLOCK_8X8;
  (void) *tp_orig;

  assert(num_8x8_blocks_wide_lookup[bsize] ==
             num_8x8_blocks_high_lookup[bsize]);

  // Determine partition types in search according to the speed features.
  // The threshold set here has to be of square block size.
  if (cpi->sf.auto_min_max_partition_size) {
    partition_none_allowed &= (bsize <= cpi->sf.max_partition_size &&
                               bsize >= cpi->sf.min_partition_size);
    partition_horz_allowed &= ((bsize <= cpi->sf.max_partition_size &&
                                bsize >  cpi->sf.min_partition_size) ||
                                force_horz_split);
    partition_vert_allowed &= ((bsize <= cpi->sf.max_partition_size &&
                                bsize >  cpi->sf.min_partition_size) ||
                                force_vert_split);
    do_split &= bsize > cpi->sf.min_partition_size;
  }
  if (cpi->sf.use_square_partition_only) {
    partition_horz_allowed &= force_horz_split;
    partition_vert_allowed &= force_vert_split;
  }

  // PARTITION_NONE
  if (partition_none_allowed) {
    nonrd_pick_sb_modes(cpi, tile, mi_row, mi_col,
                        &this_rate, &this_dist, bsize, ctx);
    ctx->mic.mbmi = xd->mi[0]->mbmi;
    ctx->skip_txfm = x->skip_txfm;
      int pl = partition_plane_context(xd, mi_row, mi_col, bsize);
      this_rate += cpi->partition_cost[pl][PARTITION_NONE];
      sum_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_dist);
      if (sum_rd < best_rd) {
        int64_t stop_thresh = 4096;
        int64_t stop_thresh_rd;

        best_rate = this_rate;
        best_dist = this_dist;
        best_rd = sum_rd;
        if (bsize >= BLOCK_8X8)
          pc_tree->partitioning = PARTITION_NONE;

        // Adjust threshold according to partition size.
        stop_thresh >>= 8 - (b_width_log2(bsize) +
            b_height_log2(bsize));

        stop_thresh_rd = RDCOST(x->rdmult, x->rddiv, 0, stop_thresh);
        // If obtained distortion is very small, choose current partition
        // and stop splitting.
        if (!x->e_mbd.lossless && best_rd < stop_thresh_rd) {
          do_split = 0;
          do_rect = 0;
        }
      }
    }
  }

  // store estimated motion vector

  // PARTITION_SPLIT
  sum_rd = 0;
  if (do_split) {
    int pl = partition_plane_context(xd, mi_row, mi_col, bsize);
    sum_rate += cpi->partition_cost[pl][PARTITION_SPLIT];
    subsize = get_subsize(bsize, PARTITION_SPLIT);
    for (i = 0; i < 4 && sum_rd < best_rd; ++i) {
      const int x_idx = (i & 1) * ms;
      const int y_idx = (i >> 1) * ms;

      if (mi_row + y_idx >= cm->mi_rows || mi_col + x_idx >= cm->mi_cols)
        continue;
      nonrd_pick_partition(cpi, tile, tp, mi_row + y_idx, mi_col + x_idx,
                           subsize, &this_rate, &this_dist, 0,
                           best_rd - sum_rd, pc_tree->split[i]);

      if (this_rate == INT_MAX) {
        sum_rd = INT64_MAX;
      } else {
        sum_rate += this_rate;
        sum_dist += this_dist;
        sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
      }
    }

    if (sum_rd < best_rd) {
      best_rate = sum_rate;
      best_dist = sum_dist;
      best_rd = sum_rd;
      pc_tree->partitioning = PARTITION_SPLIT;
    } else {
      // skip rectangular partition test when larger block size
      // gives better rd cost
      if (cpi->sf.less_rectangular_check)
        do_rect &= !partition_none_allowed;
    }
  }

  // PARTITION_HORZ
  if (partition_horz_allowed && do_rect) {
    subsize = get_subsize(bsize, PARTITION_HORZ);
    if (cpi->sf.adaptive_motion_search)
      load_pred_mv(x, ctx);

    nonrd_pick_sb_modes(cpi, tile, mi_row, mi_col,
                        &this_rate, &this_dist, subsize,
                        &pc_tree->horizontal[0]);
    pc_tree->horizontal[0].mic.mbmi = xd->mi[0]->mbmi;
    pc_tree->horizontal[0].skip_txfm = x->skip_txfm;
    pc_tree->horizontal[0].skip = x->skip;

    sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);

    if (sum_rd < best_rd && mi_row + ms < cm->mi_rows) {
      nonrd_pick_sb_modes(cpi, tile, mi_row + ms, mi_col,
                          &this_rate, &this_dist, subsize,
                          &pc_tree->horizontal[1]);
      pc_tree->horizontal[1].mic.mbmi = xd->mi[0]->mbmi;
      pc_tree->horizontal[1].skip_txfm = x->skip_txfm;
      pc_tree->horizontal[1].skip = x->skip;

      if (this_rate == INT_MAX) {
        sum_rd = INT64_MAX;
      } else {
        int pl = partition_plane_context(xd, mi_row, mi_col, bsize);
        this_rate += cpi->partition_cost[pl][PARTITION_HORZ];
        sum_rate += this_rate;
        sum_dist += this_dist;
        sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
      }
    }
    if (sum_rd < best_rd) {
      best_rd = sum_rd;
      best_rate = sum_rate;
      best_dist = sum_dist;
      pc_tree->partitioning = PARTITION_HORZ;
    }
  }

  // PARTITION_VERT
  if (partition_vert_allowed && do_rect) {
    subsize = get_subsize(bsize, PARTITION_VERT);

    if (cpi->sf.adaptive_motion_search)
      load_pred_mv(x, ctx);

    nonrd_pick_sb_modes(cpi, tile, mi_row, mi_col,
                        &this_rate, &this_dist, subsize,
                        &pc_tree->vertical[0]);
    pc_tree->vertical[0].mic.mbmi = xd->mi[0]->mbmi;
    pc_tree->vertical[0].skip_txfm = x->skip_txfm;
    pc_tree->vertical[0].skip = x->skip;
    sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
    if (sum_rd < best_rd && mi_col + ms < cm->mi_cols) {
      nonrd_pick_sb_modes(cpi, tile, mi_row, mi_col + ms,
                          &this_rate, &this_dist, subsize,
                          &pc_tree->vertical[1]);
      pc_tree->vertical[1].mic.mbmi = xd->mi[0]->mbmi;
      pc_tree->vertical[1].skip_txfm = x->skip_txfm;
      pc_tree->vertical[1].skip = x->skip;
      if (this_rate == INT_MAX) {
        sum_rd = INT64_MAX;
      } else {
        int pl = partition_plane_context(xd, mi_row, mi_col, bsize);
        this_rate += cpi->partition_cost[pl][PARTITION_VERT];
        sum_rate += this_rate;
        sum_dist += this_dist;
        sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
      }
    }
    if (sum_rd < best_rd) {
      best_rate = sum_rate;
      best_dist = sum_dist;
      best_rd = sum_rd;
      pc_tree->partitioning = PARTITION_VERT;
  // TODO(JBB): The following line is here just to avoid a static warning
  // that occurs because at this point we never again reuse best_rd
  // despite setting it here.  The code should be refactored to avoid this.
  (void) best_rd;
  if (best_rate == INT_MAX)
    return;

  subsize = get_subsize(bsize, pc_tree->partitioning);
  fill_mode_info_sb(cm, x, mi_row, mi_col, bsize, subsize,
                    pc_tree);

  if (best_rate < INT_MAX && best_dist < INT64_MAX && do_recon) {
    int output_enabled = (bsize == BLOCK_64X64);

    // Check the projected output rate for this SB against it's target
    // and and if necessary apply a Q delta using segmentation to get
    // closer to the target.
    if ((cpi->oxcf.aq_mode == COMPLEXITY_AQ) && cm->seg.update_map) {
      vp9_select_in_frame_q_segment(cpi, mi_row, mi_col, output_enabled,
                                    best_rate);

    if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ)
      vp9_cyclic_refresh_set_rate_and_dist_sb(cpi->cyclic_refresh,
                                              best_rate, best_dist);
    encode_sb_rt(cpi, tile, tp, mi_row, mi_col, output_enabled, bsize, pc_tree);
  }

  if (bsize == BLOCK_64X64) {
    assert(tp_orig < *tp);
    assert(best_rate < INT_MAX);
    assert(best_dist < INT64_MAX);
  } else {
    assert(tp_orig == *tp);
  }
}

static void nonrd_use_partition(VP9_COMP *cpi,
                                const TileInfo *const tile,
                                TOKENEXTRA **tp,
                                int mi_row, int mi_col,
                                BLOCK_SIZE bsize, int output_enabled,
                                int *totrate, int64_t *totdist,
                                PC_TREE *pc_tree) {
  VP9_COMMON *const cm = &cpi->common;
  MACROBLOCK *const x = &cpi->mb;
  MACROBLOCKD *const xd = &x->e_mbd;
  const int bsl = b_width_log2(bsize), hbs = (1 << bsl) / 4;
  const int mis = cm->mi_stride;
  PARTITION_TYPE partition;
  BLOCK_SIZE subsize;
Yaowu Xu's avatar
Yaowu Xu committed
  int rate = INT_MAX;
  int64_t dist = INT64_MAX;

  if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
    return;

  subsize = (bsize >= BLOCK_8X8) ? mi[0]->mbmi.sb_type : BLOCK_4X4;
  partition = partition_lookup[bsl][subsize];

  switch (partition) {
    case PARTITION_NONE:
      nonrd_pick_sb_modes(cpi, tile, mi_row, mi_col, totrate, totdist,
                          subsize, &pc_tree->none);
      pc_tree->none.mic.mbmi = xd->mi[0]->mbmi;
      pc_tree->none.skip_txfm = x->skip_txfm;
      pc_tree->none.skip = x->skip;
      break;
    case PARTITION_VERT:
      nonrd_pick_sb_modes(cpi, tile, mi_row, mi_col, totrate, totdist,
                          subsize, &pc_tree->vertical[0]);
      pc_tree->vertical[0].mic.mbmi = xd->mi[0]->mbmi;
      pc_tree->vertical[0].skip_txfm = x->skip_txfm;
      pc_tree->vertical[0].skip = x->skip;
      if (mi_col + hbs < cm->mi_cols) {
        nonrd_pick_sb_modes(cpi, tile, mi_row, mi_col + hbs,
                            &rate, &dist, subsize, &pc_tree->vertical[1]);
        pc_tree->vertical[1].mic.mbmi = xd->mi[0]->mbmi;
        pc_tree->vertical[1].skip_txfm = x->skip_txfm;
        pc_tree->vertical[1].skip = x->skip;
        if (rate != INT_MAX && dist != INT64_MAX &&
            *totrate != INT_MAX && *totdist != INT64_MAX) {
          *totrate += rate;
          *totdist += dist;
        }
      }
      break;
    case PARTITION_HORZ:
      nonrd_pick_sb_modes(cpi, tile, mi_row, mi_col, totrate, totdist,
                          subsize, &pc_tree->horizontal[0]);
      pc_tree->horizontal[0].mic.mbmi = xd->mi[0]->mbmi;
      pc_tree->horizontal[0].skip_txfm = x->skip_txfm;
      pc_tree->horizontal[0].skip = x->skip;
      if (mi_row + hbs < cm->mi_rows) {
        nonrd_pick_sb_modes(cpi, tile, mi_row + hbs, mi_col,
                            &rate, &dist, subsize, &pc_tree->horizontal[0]);
        pc_tree->horizontal[1].mic.mbmi = xd->mi[0]->mbmi;
        pc_tree->horizontal[1].skip_txfm = x->skip_txfm;
        pc_tree->horizontal[1].skip = x->skip;
        if (rate != INT_MAX && dist != INT64_MAX &&
            *totrate != INT_MAX && *totdist != INT64_MAX) {
          *totrate += rate;
          *totdist += dist;
        }
      }
      break;
    case PARTITION_SPLIT:
      subsize = get_subsize(bsize, PARTITION_SPLIT);
      nonrd_use_partition(cpi, tile, mi, tp, mi_row, mi_col,
                          subsize, output_enabled, totrate, totdist,
                          pc_tree->split[0]);
      nonrd_use_partition(cpi, tile, mi + hbs, tp,
                          mi_row, mi_col + hbs, subsize, output_enabled,
                          &rate, &dist, pc_tree->split[1]);
      if (rate != INT_MAX && dist != INT64_MAX &&
          *totrate != INT_MAX && *totdist != INT64_MAX) {
        *totrate += rate;
        *totdist += dist;
      }
      nonrd_use_partition(cpi, tile, mi + hbs * mis, tp,
                          mi_row + hbs, mi_col, subsize, output_enabled,
                          &rate, &dist, pc_tree->split[2]);
      if (rate != INT_MAX && dist != INT64_MAX &&
          *totrate != INT_MAX && *totdist != INT64_MAX) {
        *totrate += rate;
        *totdist += dist;
      }
      nonrd_use_partition(cpi, tile, mi + hbs * mis + hbs, tp,
                          mi_row + hbs, mi_col + hbs, subsize, output_enabled,
                          &rate, &dist, pc_tree->split[3]);
      if (rate != INT_MAX && dist != INT64_MAX &&
          *totrate != INT_MAX && *totdist != INT64_MAX) {
        *totrate += rate;
        *totdist += dist;
      }
      break;
    default:
      assert("Invalid partition type.");
  }

  if (bsize == BLOCK_64X64 && output_enabled) {
    if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ)
      vp9_cyclic_refresh_set_rate_and_dist_sb(cpi->cyclic_refresh,
                                              *totrate, *totdist);
    encode_sb_rt(cpi, tile, tp, mi_row, mi_col, 1, bsize, pc_tree);
static void encode_nonrd_sb_row(VP9_COMP *cpi, const TileInfo *const tile,
                                int mi_row, TOKENEXTRA **tp) {
  VP9_COMMON *cm = &cpi->common;
  MACROBLOCK *x = &cpi->mb;
  MACROBLOCKD *xd = &x->e_mbd;
Jim Bankoski's avatar
Jim Bankoski committed
  int mi_col;

  // Initialize the left context for the new SB row
  vpx_memset(&xd->left_context, 0, sizeof(xd->left_context));
  vpx_memset(xd->left_seg_context, 0, sizeof(xd->left_seg_context));
Jim Bankoski's avatar
Jim Bankoski committed

  // Code each SB in the row
  for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end;
       mi_col += MI_BLOCK_SIZE) {
    MACROBLOCK *x = &cpi->mb;
Yaowu Xu's avatar
Yaowu Xu committed
    int dummy_rate = 0;
    int64_t dummy_dist = 0;
    const int idx_str = cm->mi_stride * mi_row + mi_col;
    MODE_INFO **mi = cm->mi_grid_visible + idx_str;
    MODE_INFO **prev_mi = cm->prev_mi_grid_visible + idx_str;
    x->in_static_area = 0;
    x->source_variance = UINT_MAX;
    vp9_zero(x->pred_mv);
    // Set the partition type of the 64X64 block
    switch (cpi->sf.partition_search_type) {
      case VAR_BASED_PARTITION:
        choose_partitioning(cpi, tile, mi_row, mi_col);
        nonrd_use_partition(cpi, tile, mi, tp, mi_row, mi_col, BLOCK_64X64,
                            1, &dummy_rate, &dummy_dist, cpi->pc_root);
        break;
      case SOURCE_VAR_BASED_PARTITION:
        set_source_var_based_partition(cpi, tile, mi, mi_row, mi_col);
        nonrd_use_partition(cpi, tile, mi, tp, mi_row, mi_col, BLOCK_64X64,
                            1, &dummy_rate, &dummy_dist, cpi->pc_root);
      case VAR_BASED_FIXED_PARTITION:
      case FIXED_PARTITION:
        bsize = cpi->sf.partition_search_type == FIXED_PARTITION ?
                cpi->sf.always_this_block_size :
                get_nonrd_var_based_fixed_partition(cpi, mi_row, mi_col);
        set_fixed_partitioning(cpi, tile, mi, mi_row, mi_col, bsize);
        nonrd_use_partition(cpi, tile, mi, tp, mi_row, mi_col, BLOCK_64X64,
                            1, &dummy_rate, &dummy_dist, cpi->pc_root);
        break;
      case REFERENCE_PARTITION:
        if (cpi->sf.partition_check ||
            !is_background(cpi, tile, mi_row, mi_col)) {
          set_modeinfo_offsets(cm, xd, mi_row, mi_col);
          auto_partition_range(cpi, tile, mi_row, mi_col,
                               &cpi->sf.min_partition_size,
                               &cpi->sf.max_partition_size);
          nonrd_pick_partition(cpi, tile, tp, mi_row, mi_col, BLOCK_64X64,
                               &dummy_rate, &dummy_dist, 1, INT64_MAX,
          copy_partitioning(cm, mi, prev_mi);
          nonrd_use_partition(cpi, tile, mi, tp, mi_row, mi_col,
                              BLOCK_64X64, 1, &dummy_rate, &dummy_dist,
        break;
      default:
        assert(0);
// end RTC play code
static int set_var_thresh_from_histogram(VP9_COMP *cpi) {
  SPEED_FEATURES *const sf = &cpi->sf;
  VP9_COMMON *const cm = &cpi->common;

  const uint8_t *src = cpi->Source->y_buffer;
  const uint8_t *last_src = cpi->Last_Source->y_buffer;
  const int src_stride = cpi->Source->y_stride;
  const int last_stride = cpi->Last_Source->y_stride;

  // Pick cutoff threshold
  const int cutoff = (MIN(cm->width, cm->height) >= 720) ?
      (cm->MBs * VAR_HIST_LARGE_CUT_OFF / 100) :
      (cm->MBs * VAR_HIST_SMALL_CUT_OFF / 100);
  DECLARE_ALIGNED_ARRAY(16, int, hist, VAR_HIST_BINS);
  diff *var16 = cpi->source_diff_var;

  int sum = 0;
  int i, j;

  vpx_memset(hist, 0, VAR_HIST_BINS * sizeof(hist[0]));

  for (i = 0; i < cm->mb_rows; i++) {
    for (j = 0; j < cm->mb_cols; j++) {
      vp9_get16x16var(src, src_stride, last_src, last_stride,
                      &var16->sse, &var16->sum);

      var16->var = var16->sse -
          (((uint32_t)var16->sum * var16->sum) >> 8);

      if (var16->var >= VAR_HIST_MAX_BG_VAR)
        hist[VAR_HIST_BINS - 1]++;
      else
        hist[var16->var / VAR_HIST_FACTOR]++;

      src += 16;
      last_src += 16;
      var16++;
    }

    src = src - cm->mb_cols * 16 + 16 * src_stride;
    last_src = last_src - cm->mb_cols * 16 + 16 * last_stride;
  }

  cpi->source_var_thresh = 0;

  if (hist[VAR_HIST_BINS - 1] < cutoff) {
    for (i = 0; i < VAR_HIST_BINS - 1; i++) {
      sum += hist[i];

      if (sum > cutoff) {
        cpi->source_var_thresh = (i + 1) * VAR_HIST_FACTOR;
        return 0;
      }
    }
  }

  return sf->search_type_check_frequency;
}

static void source_var_based_partition_search_method(VP9_COMP *cpi) {
  VP9_COMMON *const cm = &cpi->common;
  SPEED_FEATURES *const sf = &cpi->sf;

  if (cm->frame_type == KEY_FRAME) {
    // For key frame, use SEARCH_PARTITION.
    sf->partition_search_type = SEARCH_PARTITION;
  } else if (cm->intra_only) {
    sf->partition_search_type = FIXED_PARTITION;
  } else {
    if (cm->last_width != cm->width || cm->last_height != cm->height) {
      if (cpi->source_diff_var)
        vpx_free(cpi->source_diff_var);

        CHECK_MEM_ERROR(cm, cpi->source_diff_var,
                        vpx_calloc(cm->MBs, sizeof(diff)));
      }

    if (!cpi->frames_till_next_var_check)