vp9_encodeframe.c 124 KB
Newer Older
Paul Wilkins's avatar
Paul Wilkins committed
    return MIN(bsize, BLOCK_8X8);
  } else {
    for (; bsize > 0; bsize -= 3) {
Paul Wilkins's avatar
Paul Wilkins committed
      *bh = num_8x8_blocks_high_lookup[bsize];
      *bw = num_8x8_blocks_wide_lookup[bsize];
      if ((*bh <= rows_left) && (*bw <= cols_left)) {
        break;
      }
    }
  }
  return bsize;
}

static void set_partial_b64x64_partition(MODE_INFO *mi, int mis,
    int bh_in, int bw_in, int row8x8_remaining, int col8x8_remaining,
    BLOCK_SIZE bsize, MODE_INFO **mi_8x8) {
  int bh = bh_in;
  int r, c;
  for (r = 0; r < MI_BLOCK_SIZE; r += bh) {
    int bw = bw_in;
    for (c = 0; c < MI_BLOCK_SIZE; c += bw) {
      const int index = r * mis + c;
      mi_8x8[index] = mi + index;
      mi_8x8[index]->mbmi.sb_type = find_partition_size(bsize,
          row8x8_remaining - r, col8x8_remaining - c, &bh, &bw);
    }
  }
}

Paul Wilkins's avatar
Paul Wilkins committed
// This function attempts to set all mode info entries in a given SB64
// to the same block partition size.
// However, at the bottom and right borders of the image the requested size
// may not be allowed in which case this code attempts to choose the largest
// allowable partition.
static void set_fixed_partitioning(VP9_COMP *cpi, const TileInfo *const tile,
                                   MODE_INFO **mi_8x8, int mi_row, int mi_col,
                                   BLOCK_SIZE bsize) {
  VP9_COMMON *const cm = &cpi->common;
  const int mis = cm->mi_stride;
  const int row8x8_remaining = tile->mi_row_end - mi_row;
  const int col8x8_remaining = tile->mi_col_end - mi_col;
Jim Bankoski's avatar
Jim Bankoski committed
  int block_row, block_col;
  MODE_INFO *mi_upper_left = cm->mi + mi_row * mis + mi_col;
  int bh = num_8x8_blocks_high_lookup[bsize];
  int bw = num_8x8_blocks_wide_lookup[bsize];
Paul Wilkins's avatar
Paul Wilkins committed

  assert((row8x8_remaining > 0) && (col8x8_remaining > 0));

  // Apply the requested partition size to the SB64 if it is all "in image"
  if ((col8x8_remaining >= MI_BLOCK_SIZE) &&
      (row8x8_remaining >= MI_BLOCK_SIZE)) {
    for (block_row = 0; block_row < MI_BLOCK_SIZE; block_row += bh) {
      for (block_col = 0; block_col < MI_BLOCK_SIZE; block_col += bw) {
        int index = block_row * mis + block_col;
        mi_8x8[index] = mi_upper_left + index;
        mi_8x8[index]->mbmi.sb_type = bsize;
Paul Wilkins's avatar
Paul Wilkins committed
      }
    }
  } else {
    // Else this is a partial SB64.
    set_partial_b64x64_partition(mi_upper_left, mis, bh, bw, row8x8_remaining,
        col8x8_remaining, bsize, mi_8x8);
Yaowu Xu's avatar
Yaowu Xu committed
static void copy_partitioning(VP9_COMMON *cm, MODE_INFO **mi_8x8,
  MODE_INFO **prev_mi_8x8) {
  const int mis = cm->mi_stride;
  int block_row, block_col;

  for (block_row = 0; block_row < 8; ++block_row) {
    for (block_col = 0; block_col < 8; ++block_col) {
      MODE_INFO *const prev_mi = prev_mi_8x8[block_row * mis + block_col];
      const BLOCK_SIZE sb_type = prev_mi ? prev_mi->mbmi.sb_type : 0;

      if (prev_mi) {
        const ptrdiff_t offset = prev_mi - cm->prev_mi;
        mi_8x8[block_row * mis + block_col] = cm->mi + offset;
        mi_8x8[block_row * mis + block_col]->mbmi.sb_type = sb_type;
      }
    }
  }
}

static void constrain_copy_partitioning(VP9_COMP *const cpi,
                                        const TileInfo *const tile,
                                        MODE_INFO **mi_8x8,
                                        MODE_INFO **prev_mi_8x8,
                                        int mi_row, int mi_col,
                                        BLOCK_SIZE bsize) {
  VP9_COMMON *const cm = &cpi->common;
  const int mis = cm->mi_stride;
  const int row8x8_remaining = tile->mi_row_end - mi_row;
  const int col8x8_remaining = tile->mi_col_end - mi_col;
  MODE_INFO *const mi_upper_left = cm->mi + mi_row * mis + mi_col;
  const int bh = num_8x8_blocks_high_lookup[bsize];
  const int bw = num_8x8_blocks_wide_lookup[bsize];
  int block_row, block_col;

  assert((row8x8_remaining > 0) && (col8x8_remaining > 0));

  // If the SB64 if it is all "in image".
  if ((col8x8_remaining >= MI_BLOCK_SIZE) &&
      (row8x8_remaining >= MI_BLOCK_SIZE)) {
    for (block_row = 0; block_row < MI_BLOCK_SIZE; block_row += bh) {
      for (block_col = 0; block_col < MI_BLOCK_SIZE; block_col += bw) {
        const int index = block_row * mis + block_col;
        MODE_INFO *prev_mi = prev_mi_8x8[index];
        const BLOCK_SIZE sb_type = prev_mi ? prev_mi->mbmi.sb_type : 0;
        // Use previous partition if block size is not larger than bsize.
        if (prev_mi && sb_type <= bsize) {
          int block_row2, block_col2;
          for (block_row2 = 0; block_row2 < bh; ++block_row2) {
            for (block_col2 = 0; block_col2 < bw; ++block_col2) {
              const int index2 = (block_row + block_row2) * mis +
                  block_col + block_col2;
              prev_mi = prev_mi_8x8[index2];
              if (prev_mi) {
                const ptrdiff_t offset = prev_mi - cm->prev_mi;
                mi_8x8[index2] = cm->mi + offset;
                mi_8x8[index2]->mbmi.sb_type = prev_mi->mbmi.sb_type;
              }
            }
          }
        } else {
          // Otherwise, use fixed partition of size bsize.
          mi_8x8[index] = mi_upper_left + index;
          mi_8x8[index]->mbmi.sb_type = bsize;
        }
      }
    }
  } else {
    // Else this is a partial SB64, copy previous partition.
Yaowu Xu's avatar
Yaowu Xu committed
    copy_partitioning(cm, mi_8x8, prev_mi_8x8);
const struct {
  int row;
  int col;
} coord_lookup[16] = {
    // 32x32 index = 0
    {0, 0}, {0, 2}, {2, 0}, {2, 2},
    // 32x32 index = 1
    {0, 4}, {0, 6}, {2, 4}, {2, 6},
    // 32x32 index = 2
    {4, 0}, {4, 2}, {6, 0}, {6, 2},
    // 32x32 index = 3
    {4, 4}, {4, 6}, {6, 4}, {6, 6},
};

static void set_source_var_based_partition(VP9_COMP *cpi,
                                           const TileInfo *const tile,
                                           MODE_INFO **mi_8x8,
                                           int mi_row, int mi_col) {
  VP9_COMMON *const cm = &cpi->common;
Yaowu Xu's avatar
Yaowu Xu committed
  MACROBLOCK *const x = &cpi->mb;
Dmitry Kovalev's avatar
Dmitry Kovalev committed
  const int mis = cm->mi_stride;
Yaowu Xu's avatar
Yaowu Xu committed
  const int row8x8_remaining = tile->mi_row_end - mi_row;
  const int col8x8_remaining = tile->mi_col_end - mi_col;
  MODE_INFO *mi_upper_left = cm->mi + mi_row * mis + mi_col;

  vp9_setup_src_planes(x, cpi->Source, mi_row, mi_col);

  assert((row8x8_remaining > 0) && (col8x8_remaining > 0));

  // In-image SB64
  if ((col8x8_remaining >= MI_BLOCK_SIZE) &&
      (row8x8_remaining >= MI_BLOCK_SIZE)) {
    int i, j;
    int index;
    diff d32[4];
    const int offset = (mi_row >> 1) * cm->mb_cols + (mi_col >> 1);
    int is_larger_better = 0;
    int use32x32 = 0;
    unsigned int thr = cpi->source_var_thresh;

    vpx_memset(d32, 0, 4 * sizeof(diff));

      for (j = 0; j < 4; j++) {
        int b_mi_row = coord_lookup[i * 4 + j].row;
        int b_mi_col = coord_lookup[i * 4 + j].col;
        int boffset = b_mi_row / 2 * cm->mb_cols +
                      b_mi_col / 2;
        d16[j] = cpi->source_diff_var + offset + boffset;

        index = b_mi_row * mis + b_mi_col;
        mi_8x8[index] = mi_upper_left + index;
        mi_8x8[index]->mbmi.sb_type = BLOCK_16X16;

        // TODO(yunqingwang): If d16[j].var is very large, use 8x8 partition
        // size to further improve quality.
      }

      is_larger_better = (d16[0]->var < thr) && (d16[1]->var < thr) &&
          (d16[2]->var < thr) && (d16[3]->var < thr);

      // Use 32x32 partition
      if (is_larger_better) {
        use32x32 += 1;
        for (j = 0; j < 4; j++) {
          d32[i].sse += d16[j]->sse;
          d32[i].sum += d16[j]->sum;
        }

        d32[i].var = d32[i].sse - (((int64_t)d32[i].sum * d32[i].sum) >> 10);

        index = coord_lookup[i*4].row * mis + coord_lookup[i*4].col;
        mi_8x8[index] = mi_upper_left + index;
        mi_8x8[index]->mbmi.sb_type = BLOCK_32X32;
      }
    }

    if (use32x32 == 4) {
      thr <<= 1;
      is_larger_better = (d32[0].var < thr) && (d32[1].var < thr) &&
          (d32[2].var < thr) && (d32[3].var < thr);

      // Use 64x64 partition
      if (is_larger_better) {
        mi_8x8[0] = mi_upper_left;
        mi_8x8[0]->mbmi.sb_type = BLOCK_64X64;
      }
    }
  } else {   // partial in-image SB64
    int bh = num_8x8_blocks_high_lookup[BLOCK_16X16];
    int bw = num_8x8_blocks_wide_lookup[BLOCK_16X16];
    set_partial_b64x64_partition(mi_upper_left, mis, bh, bw,
        row8x8_remaining, col8x8_remaining, BLOCK_16X16, mi_8x8);
static int is_background(VP9_COMP *cpi, const TileInfo *const tile,
                         int mi_row, int mi_col) {
  MACROBLOCK *x = &cpi->mb;
  uint8_t *src, *pre;
  int src_stride, pre_stride;

  const int row8x8_remaining = tile->mi_row_end - mi_row;
  const int col8x8_remaining = tile->mi_col_end - mi_col;

  int this_sad = 0;
  int threshold = 0;

  // This assumes the input source frames are of the same dimension.
  src_stride = cpi->Source->y_stride;
  src = cpi->Source->y_buffer + (mi_row * MI_SIZE) * src_stride +
            (mi_col * MI_SIZE);
  pre_stride = cpi->Last_Source->y_stride;
  pre = cpi->Last_Source->y_buffer + (mi_row * MI_SIZE) * pre_stride +
          (mi_col * MI_SIZE);

  if (row8x8_remaining >= MI_BLOCK_SIZE &&
      col8x8_remaining >= MI_BLOCK_SIZE) {
    this_sad = cpi->fn_ptr[BLOCK_64X64].sdf(src, src_stride,
                                            pre, pre_stride);
    threshold = (1 << 12);
  } else {
    int r, c;
    for (r = 0; r < row8x8_remaining; r += 2)
      for (c = 0; c < col8x8_remaining; c += 2)
        this_sad += cpi->fn_ptr[BLOCK_16X16].sdf(src, src_stride,
                                                 pre, pre_stride);
    threshold = (row8x8_remaining * col8x8_remaining) << 6;
  }

  x->in_static_area = (this_sad < 2 * threshold);
  return x->in_static_area;
static int sb_has_motion(const VP9_COMMON *cm, MODE_INFO **prev_mi_8x8,
                         const int motion_thresh) {
  const int mis = cm->mi_stride;
  int block_row, block_col;

  if (cm->prev_mi) {
    for (block_row = 0; block_row < 8; ++block_row) {
      for (block_col = 0; block_col < 8; ++block_col) {
        const MODE_INFO *prev_mi = prev_mi_8x8[block_row * mis + block_col];
        if (prev_mi) {
          if (abs(prev_mi->mbmi.mv[0].as_mv.row) > motion_thresh ||
              abs(prev_mi->mbmi.mv[0].as_mv.col) > motion_thresh)
static void update_state_rt(VP9_COMP *cpi, PICK_MODE_CONTEXT *ctx,
                            int mi_row, int mi_col, int bsize) {
  VP9_COMMON *const cm = &cpi->common;
  MACROBLOCK *const x = &cpi->mb;
Jim Bankoski's avatar
Jim Bankoski committed
  MACROBLOCKD *const xd = &x->e_mbd;
  MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
  const struct segmentation *const seg = &cm->seg;
  *(xd->mi[0]) = ctx->mic;
  // For in frame adaptive Q, check for reseting the segment_id and updating
  // the cyclic refresh map.
  if ((cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ) && seg->enabled) {
    vp9_cyclic_refresh_update_segment(cpi, &xd->mi[0]->mbmi,
                                      mi_row, mi_col, bsize, 1);
    vp9_init_plane_quantizers(cpi, x);
  }

  if (is_inter_block(mbmi)) {
    vp9_update_mv_count(cm, xd);

    if (cm->interp_filter == SWITCHABLE) {
      const int pred_ctx = vp9_get_pred_context_switchable_interp(xd);
      ++cm->counts.switchable_interp[pred_ctx][mbmi->interp_filter];

  x->skip = ctx->skip;
  x->skip_txfm = mbmi->segment_id ? 0 : ctx->skip_txfm;
Jim Bankoski's avatar
Jim Bankoski committed
}

static void encode_b_rt(VP9_COMP *cpi, const TileInfo *const tile,
                        TOKENEXTRA **tp, int mi_row, int mi_col,
                     int output_enabled, BLOCK_SIZE bsize,
                     PICK_MODE_CONTEXT *ctx) {
Jim Bankoski's avatar
Jim Bankoski committed
  set_offsets(cpi, tile, mi_row, mi_col, bsize);
  update_state_rt(cpi, ctx, mi_row, mi_col, bsize);
#if CONFIG_DENOISING
  if (cpi->oxcf.noise_sensitivity > 0 && output_enabled) {
    vp9_denoiser_denoise(&cpi->denoiser, &cpi->mb, mi_row, mi_col,
                         MAX(BLOCK_8X8, bsize), ctx);
  }
#endif

  encode_superblock(cpi, tp, output_enabled, mi_row, mi_col, bsize, ctx);
Jim Bankoski's avatar
Jim Bankoski committed
  update_stats(cpi);

  (*tp)->token = EOSB_TOKEN;
  (*tp)++;
}

static void encode_sb_rt(VP9_COMP *cpi, const TileInfo *const tile,
                         TOKENEXTRA **tp, int mi_row, int mi_col,
                         int output_enabled, BLOCK_SIZE bsize,
                         PC_TREE *pc_tree) {
Jim Bankoski's avatar
Jim Bankoski committed
  VP9_COMMON *const cm = &cpi->common;
  MACROBLOCK *const x = &cpi->mb;
Jim Bankoski's avatar
Jim Bankoski committed
  const int bsl = b_width_log2(bsize), hbs = (1 << bsl) / 4;
  int ctx;
  PARTITION_TYPE partition;
  BLOCK_SIZE subsize;

  if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
    return;

  if (bsize >= BLOCK_8X8) {
    MACROBLOCKD *const xd = &cpi->mb.e_mbd;
    const int idx_str = xd->mi_stride * mi_row + mi_col;
Jim Bankoski's avatar
Jim Bankoski committed
    MODE_INFO ** mi_8x8 = cm->mi_grid_visible + idx_str;
    ctx = partition_plane_context(xd, mi_row, mi_col, bsize);
Jim Bankoski's avatar
Jim Bankoski committed
    subsize = mi_8x8[0]->mbmi.sb_type;
  } else {
Jim Bankoski's avatar
Jim Bankoski committed
    ctx = 0;
    subsize = BLOCK_4X4;
Jim Bankoski's avatar
Jim Bankoski committed
  partition = partition_lookup[bsl][subsize];
  if (output_enabled && bsize != BLOCK_4X4)
    cm->counts.partition[ctx][partition]++;
  switch (partition) {
    case PARTITION_NONE:
      encode_b_rt(cpi, tile, tp, mi_row, mi_col, output_enabled, subsize,
                  &pc_tree->none);
Jim Bankoski's avatar
Jim Bankoski committed
    case PARTITION_VERT:
      encode_b_rt(cpi, tile, tp, mi_row, mi_col, output_enabled, subsize,
                  &pc_tree->vertical[0]);
      if (mi_col + hbs < cm->mi_cols && bsize > BLOCK_8X8) {
Jim Bankoski's avatar
Jim Bankoski committed
        encode_b_rt(cpi, tile, tp, mi_row, mi_col + hbs, output_enabled,
                    subsize, &pc_tree->vertical[1]);
Jim Bankoski's avatar
Jim Bankoski committed
    case PARTITION_HORZ:
      encode_b_rt(cpi, tile, tp, mi_row, mi_col, output_enabled, subsize,
                  &pc_tree->horizontal[0]);
      if (mi_row + hbs < cm->mi_rows && bsize > BLOCK_8X8) {
Jim Bankoski's avatar
Jim Bankoski committed
        encode_b_rt(cpi, tile, tp, mi_row + hbs, mi_col, output_enabled,
                    subsize, &pc_tree->horizontal[1]);
      }
      break;
    case PARTITION_SPLIT:
Jim Bankoski's avatar
Jim Bankoski committed
      subsize = get_subsize(bsize, PARTITION_SPLIT);
      encode_sb_rt(cpi, tile, tp, mi_row, mi_col, output_enabled, subsize,
                   pc_tree->split[0]);
Jim Bankoski's avatar
Jim Bankoski committed
      encode_sb_rt(cpi, tile, tp, mi_row, mi_col + hbs, output_enabled,
                   subsize, pc_tree->split[1]);
Jim Bankoski's avatar
Jim Bankoski committed
      encode_sb_rt(cpi, tile, tp, mi_row + hbs, mi_col, output_enabled,
                   subsize, pc_tree->split[2]);
Jim Bankoski's avatar
Jim Bankoski committed
      encode_sb_rt(cpi, tile, tp, mi_row + hbs, mi_col + hbs, output_enabled,
                   subsize, pc_tree->split[3]);
      break;
    default:
Jim Bankoski's avatar
Jim Bankoski committed
      assert("Invalid partition type.");
Jim Bankoski's avatar
Jim Bankoski committed
  if (partition != PARTITION_SPLIT || bsize == BLOCK_8X8)
    update_partition_context(xd, mi_row, mi_col, subsize, bsize);
James Zern's avatar
James Zern committed
static void rd_use_partition(VP9_COMP *cpi,
                             const TileInfo *const tile,
                             MODE_INFO **mi_8x8,
                             TOKENEXTRA **tp, int mi_row, int mi_col,
                             BLOCK_SIZE bsize, int *rate, int64_t *dist,
Yaowu Xu's avatar
Yaowu Xu committed
                             int do_recon, PC_TREE *pc_tree) {
  VP9_COMMON *const cm = &cpi->common;
  MACROBLOCK *const x = &cpi->mb;
  MACROBLOCKD *const xd = &x->e_mbd;
  const int mis = cm->mi_stride;
  const int bsl = b_width_log2(bsize);
  const int mi_step = num_4x4_blocks_wide_lookup[bsize] / 2;
  const int bss = (1 << bsl) / 4;
Jim Bankoski's avatar
Jim Bankoski committed
  int i, pl;
  PARTITION_TYPE partition = PARTITION_NONE;
Jim Bankoski's avatar
Jim Bankoski committed
  ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
  PARTITION_CONTEXT sl[8], sa[8];
  int last_part_rate = INT_MAX;
  int64_t last_part_dist = INT64_MAX;
  int64_t last_part_rd = INT64_MAX;
  int none_rate = INT_MAX;
  int64_t none_dist = INT64_MAX;
  int64_t none_rd = INT64_MAX;
  int chosen_rate = INT_MAX;
  int64_t chosen_dist = INT64_MAX;
  int64_t chosen_rd = INT64_MAX;
  BLOCK_SIZE sub_subsize = BLOCK_4X4;
  int splits_below = 0;
  BLOCK_SIZE bs_type = mi_8x8[0]->mbmi.sb_type;
  int do_partition_search = 1;
  PICK_MODE_CONTEXT *ctx = &pc_tree->none;
Jim Bankoski's avatar
Jim Bankoski committed

  if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
    return;

  assert(num_4x4_blocks_wide_lookup[bsize] ==
         num_4x4_blocks_high_lookup[bsize]);

Jim Bankoski's avatar
Jim Bankoski committed
  partition = partition_lookup[bsl][bs_type];
Jim Bankoski's avatar
Jim Bankoski committed
  subsize = get_subsize(bsize, partition);

  pc_tree->partitioning = partition;
Jim Bankoski's avatar
Jim Bankoski committed
  save_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
  if (bsize == BLOCK_16X16) {
    set_offsets(cpi, tile, mi_row, mi_col, bsize);
    x->mb_energy = vp9_block_energy(cpi, x, bsize);
  }

  if (do_partition_search &&
      cpi->sf.partition_search_type == SEARCH_PARTITION &&
      cpi->sf.adjust_partitioning_from_last_frame) {
    // Check if any of the sub blocks are further split.
    if (partition == PARTITION_SPLIT && subsize > BLOCK_8X8) {
      sub_subsize = get_subsize(subsize, PARTITION_SPLIT);
      splits_below = 1;
      for (i = 0; i < 4; i++) {
        int jj = i >> 1, ii = i & 0x01;
        MODE_INFO * this_mi = mi_8x8[jj * bss * mis + ii * bss];
        if (this_mi && this_mi->mbmi.sb_type >= sub_subsize) {
          splits_below = 0;
        }
      }
    }

    // If partition is not none try none unless each of the 4 splits are split
    // even further..
    if (partition != PARTITION_NONE && !splits_below &&
        mi_row + (mi_step >> 1) < cm->mi_rows &&
        mi_col + (mi_step >> 1) < cm->mi_cols) {
      pc_tree->partitioning = PARTITION_NONE;
      rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &none_rate, &none_dist, bsize,
                       ctx, INT64_MAX, 0);
      pl = partition_plane_context(xd, mi_row, mi_col, bsize);

      if (none_rate < INT_MAX) {
        none_rate += cpi->partition_cost[pl][PARTITION_NONE];
        none_rd = RDCOST(x->rdmult, x->rddiv, none_rate, none_dist);
      }

      restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
      mi_8x8[0]->mbmi.sb_type = bs_type;
      pc_tree->partitioning = partition;
Jim Bankoski's avatar
Jim Bankoski committed
  switch (partition) {
    case PARTITION_NONE:
      rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &last_part_rate,
                       &last_part_dist, bsize, ctx, INT64_MAX, 0);
Jim Bankoski's avatar
Jim Bankoski committed
      break;
    case PARTITION_HORZ:
      rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &last_part_rate,
                       &last_part_dist, subsize, &pc_tree->horizontal[0],
                       INT64_MAX, 0);
Jim Bankoski's avatar
Jim Bankoski committed
      if (last_part_rate != INT_MAX &&
          bsize >= BLOCK_8X8 && mi_row + (mi_step >> 1) < cm->mi_rows) {
Yaowu Xu's avatar
Yaowu Xu committed
        int rt = 0;
        int64_t dt = 0;
        PICK_MODE_CONTEXT *ctx = &pc_tree->horizontal[0];
        update_state(cpi, ctx, mi_row, mi_col, subsize, 0);
        encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize, ctx);
        rd_pick_sb_modes(cpi, tile, mi_row + (mi_step >> 1), mi_col, &rt, &dt,
                         subsize, &pc_tree->horizontal[1], INT64_MAX, 1);
        if (rt == INT_MAX || dt == INT64_MAX) {
Jim Bankoski's avatar
Jim Bankoski committed
          last_part_rate = INT_MAX;
          last_part_dist = INT64_MAX;
        last_part_rate += rt;
        last_part_dist += dt;
Jim Bankoski's avatar
Jim Bankoski committed
      }
      break;
    case PARTITION_VERT:
      rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &last_part_rate,
                       &last_part_dist, subsize, &pc_tree->vertical[0],
                       INT64_MAX, 0);
Jim Bankoski's avatar
Jim Bankoski committed
      if (last_part_rate != INT_MAX &&
          bsize >= BLOCK_8X8 && mi_col + (mi_step >> 1) < cm->mi_cols) {
Yaowu Xu's avatar
Yaowu Xu committed
        int rt = 0;
        int64_t dt = 0;
        PICK_MODE_CONTEXT *ctx = &pc_tree->vertical[0];
        update_state(cpi, ctx, mi_row, mi_col, subsize, 0);
        encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize, ctx);
        rd_pick_sb_modes(cpi, tile, mi_row, mi_col + (mi_step >> 1), &rt, &dt,
                         subsize, &pc_tree->vertical[bsize > BLOCK_8X8],
                         INT64_MAX, 1);
        if (rt == INT_MAX || dt == INT64_MAX) {
Jim Bankoski's avatar
Jim Bankoski committed
          last_part_rate = INT_MAX;
          last_part_dist = INT64_MAX;
Jim Bankoski's avatar
Jim Bankoski committed
          break;
        }
        last_part_rate += rt;
        last_part_dist += dt;
Jim Bankoski's avatar
Jim Bankoski committed
      }
      break;
    case PARTITION_SPLIT:
      if (bsize == BLOCK_8X8) {
        rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &last_part_rate,
                         &last_part_dist, subsize, pc_tree->leaf_split[0],
                         INT64_MAX, 0);
        break;
      }
      last_part_rate = 0;
      last_part_dist = 0;
Jim Bankoski's avatar
Jim Bankoski committed
      for (i = 0; i < 4; i++) {
        int x_idx = (i & 1) * (mi_step >> 1);
        int y_idx = (i >> 1) * (mi_step >> 1);
Jim Bankoski's avatar
Jim Bankoski committed
        int jj = i >> 1, ii = i & 0x01;
        int rt;
        int64_t dt;
Jim Bankoski's avatar
Jim Bankoski committed

        if ((mi_row + y_idx >= cm->mi_rows) || (mi_col + x_idx >= cm->mi_cols))
          continue;

James Zern's avatar
James Zern committed
        rd_use_partition(cpi, tile, mi_8x8 + jj * bss * mis + ii * bss, tp,
                         mi_row + y_idx, mi_col + x_idx, subsize, &rt, &dt,
Yaowu Xu's avatar
Yaowu Xu committed
                         i != 3, pc_tree->split[i]);
        if (rt == INT_MAX || dt == INT64_MAX) {
          last_part_rate = INT_MAX;
          last_part_dist = INT64_MAX;
        last_part_rate += rt;
        last_part_dist += dt;
Jim Bankoski's avatar
Jim Bankoski committed
      }
      break;
    default:
      assert(0);
  }
  pl = partition_plane_context(xd, mi_row, mi_col, bsize);
  if (last_part_rate < INT_MAX) {
    last_part_rate += cpi->partition_cost[pl][partition];
    last_part_rd = RDCOST(x->rdmult, x->rddiv, last_part_rate, last_part_dist);
  }
  if (do_partition_search
      && cpi->sf.adjust_partitioning_from_last_frame
      && cpi->sf.partition_search_type == SEARCH_PARTITION
      && partition != PARTITION_SPLIT && bsize > BLOCK_8X8
      && (mi_row + mi_step < cm->mi_rows ||
          mi_row + (mi_step >> 1) == cm->mi_rows)
      && (mi_col + mi_step < cm->mi_cols ||
          mi_col + (mi_step >> 1) == cm->mi_cols)) {
    BLOCK_SIZE split_subsize = get_subsize(bsize, PARTITION_SPLIT);
    chosen_rate = 0;
    chosen_dist = 0;
    restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
    pc_tree->partitioning = PARTITION_SPLIT;

    // Split partition.
    for (i = 0; i < 4; i++) {
      int x_idx = (i & 1) * (mi_step >> 1);
      int y_idx = (i >> 1) * (mi_step >> 1);
      int rt = 0;
      int64_t dt = 0;
      ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
      PARTITION_CONTEXT sl[8], sa[8];

      if ((mi_row + y_idx >= cm->mi_rows) || (mi_col + x_idx >= cm->mi_cols))
        continue;

      save_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
      pc_tree->split[i]->partitioning = PARTITION_NONE;
      rd_pick_sb_modes(cpi, tile, mi_row + y_idx, mi_col + x_idx, &rt, &dt,
                       split_subsize, &pc_tree->split[i]->none,
                       INT64_MAX, i);

      restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);

      if (rt == INT_MAX || dt == INT64_MAX) {
        chosen_rate = INT_MAX;
        chosen_dist = INT64_MAX;
      chosen_rate += rt;
      chosen_dist += dt;

Jim Bankoski's avatar
Jim Bankoski committed
      if (i != 3)
James Zern's avatar
James Zern committed
        encode_sb(cpi, tile, tp,  mi_row + y_idx, mi_col + x_idx, 0,
                  split_subsize, pc_tree->split[i]);
      pl = partition_plane_context(xd, mi_row + y_idx, mi_col + x_idx,
Yaowu Xu's avatar
Yaowu Xu committed
                                   split_subsize);
      chosen_rate += cpi->partition_cost[pl][PARTITION_NONE];
    pl = partition_plane_context(xd, mi_row, mi_col, bsize);
    if (chosen_rate < INT_MAX) {
      chosen_rate += cpi->partition_cost[pl][PARTITION_SPLIT];
      chosen_rd = RDCOST(x->rdmult, x->rddiv, chosen_rate, chosen_dist);
Jim Bankoski's avatar
Jim Bankoski committed
    }
  // If last_part is better set the partitioning to that.
  if (last_part_rd < chosen_rd) {
    mi_8x8[0]->mbmi.sb_type = bsize;
    if (bsize >= BLOCK_8X8)
      pc_tree->partitioning = partition;
    chosen_rate = last_part_rate;
    chosen_dist = last_part_dist;
    chosen_rd = last_part_rd;
  // If none was better set the partitioning to that.
  if (none_rd < chosen_rd) {
    if (bsize >= BLOCK_8X8)
      pc_tree->partitioning = PARTITION_NONE;
    chosen_rate = none_rate;
    chosen_dist = none_dist;
  }
Jim Bankoski's avatar
Jim Bankoski committed

  restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);

  // We must have chosen a partitioning and encoding or we'll fail later on.
  // No other opportunities for success.
  if ( bsize == BLOCK_64X64)
    assert(chosen_rate < INT_MAX && chosen_dist < INT64_MAX);
  if (do_recon) {
    int output_enabled = (bsize == BLOCK_64X64);

    // Check the projected output rate for this SB against it's target
    // and and if necessary apply a Q delta using segmentation to get
    // closer to the target.
    if ((cpi->oxcf.aq_mode == COMPLEXITY_AQ) && cm->seg.update_map) {
      vp9_select_in_frame_q_segment(cpi, mi_row, mi_col,
                                    output_enabled, chosen_rate);

    if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ)
      vp9_cyclic_refresh_set_rate_and_dist_sb(cpi->cyclic_refresh,
                                              chosen_rate, chosen_dist);
    encode_sb(cpi, tile, tp, mi_row, mi_col, output_enabled, bsize,
              pc_tree);
  *rate = chosen_rate;
  *dist = chosen_dist;
static const BLOCK_SIZE min_partition_size[BLOCK_SIZES] = {
  BLOCK_4X4,   BLOCK_4X4,   BLOCK_4X4,
  BLOCK_4X4,   BLOCK_4X4,   BLOCK_4X4,
  BLOCK_8X8,   BLOCK_8X8,   BLOCK_8X8,
  BLOCK_16X16, BLOCK_16X16, BLOCK_16X16,
  BLOCK_16X16
};

static const BLOCK_SIZE max_partition_size[BLOCK_SIZES] = {
  BLOCK_8X8,   BLOCK_16X16, BLOCK_16X16,
  BLOCK_16X16, BLOCK_32X32, BLOCK_32X32,
  BLOCK_32X32, BLOCK_64X64, BLOCK_64X64,
  BLOCK_64X64, BLOCK_64X64, BLOCK_64X64,
  BLOCK_64X64
// Look at all the mode_info entries for blocks that are part of this
// partition and find the min and max values for sb_type.
// At the moment this is designed to work on a 64x64 SB but could be
// adjusted to use a size parameter.
//
// The min and max are assumed to have been initialized prior to calling this
// function so repeat calls can accumulate a min and max of more than one sb64.
static void get_sb_partition_size_range(VP9_COMP *cpi, MODE_INFO ** mi_8x8,
                                        BLOCK_SIZE * min_block_size,
                                        BLOCK_SIZE * max_block_size ) {
  MACROBLOCKD *const xd = &cpi->mb.e_mbd;
  int sb_width_in_blocks = MI_BLOCK_SIZE;
  int sb_height_in_blocks  = MI_BLOCK_SIZE;
  int i, j;
  int index = 0;

  // Check the sb_type for each block that belongs to this region.
  for (i = 0; i < sb_height_in_blocks; ++i) {
    for (j = 0; j < sb_width_in_blocks; ++j) {
      MODE_INFO * mi = mi_8x8[index+j];
      BLOCK_SIZE sb_type = mi ? mi->mbmi.sb_type : 0;
      *min_block_size = MIN(*min_block_size, sb_type);
      *max_block_size = MAX(*max_block_size, sb_type);
    index += xd->mi_stride;
// Next square block size less or equal than current block size.
static const BLOCK_SIZE next_square_size[BLOCK_SIZES] = {
  BLOCK_4X4, BLOCK_4X4, BLOCK_4X4,
  BLOCK_8X8, BLOCK_8X8, BLOCK_8X8,
  BLOCK_16X16, BLOCK_16X16, BLOCK_16X16,
  BLOCK_32X32, BLOCK_32X32, BLOCK_32X32,
  BLOCK_64X64
};

// Look at neighboring blocks and set a min and max partition size based on
// what they chose.
James Zern's avatar
James Zern committed
static void rd_auto_partition_range(VP9_COMP *cpi, const TileInfo *const tile,
                                    int mi_row, int mi_col,
                                    BLOCK_SIZE *min_block_size,
                                    BLOCK_SIZE *max_block_size) {
  VP9_COMMON *const cm = &cpi->common;
  MACROBLOCKD *const xd = &cpi->mb.e_mbd;
  MODE_INFO **mi = xd->mi;
  const int left_in_image = xd->left_available && mi[-1];
  const int above_in_image = xd->up_available && mi[-xd->mi_stride];
  const int row8x8_remaining = tile->mi_row_end - mi_row;
  const int col8x8_remaining = tile->mi_col_end - mi_col;
  int bh, bw;
  BLOCK_SIZE min_size = BLOCK_4X4;
  BLOCK_SIZE max_size = BLOCK_64X64;
  // Trap case where we do not have a prediction.
  if (left_in_image || above_in_image || cm->frame_type != KEY_FRAME) {
    // Default "min to max" and "max to min"
    min_size = BLOCK_64X64;
    max_size = BLOCK_4X4;

    // NOTE: each call to get_sb_partition_size_range() uses the previous
    // passed in values for min and max as a starting point.
    // Find the min and max partition used in previous frame at this location
    if (cm->frame_type != KEY_FRAME) {
      MODE_INFO **const prev_mi =
          &cm->prev_mi_grid_visible[mi_row * xd->mi_stride + mi_col];
      get_sb_partition_size_range(cpi, prev_mi, &min_size, &max_size);
    }
    // Find the min and max partition sizes used in the left SB64
    if (left_in_image) {
      MODE_INFO **left_sb64_mi = &mi[-MI_BLOCK_SIZE];
      get_sb_partition_size_range(cpi, left_sb64_mi, &min_size, &max_size);
    }
    // Find the min and max partition sizes used in the above SB64.
    if (above_in_image) {
      MODE_INFO **above_sb64_mi = &mi[-xd->mi_stride * MI_BLOCK_SIZE];
      get_sb_partition_size_range(cpi, above_sb64_mi, &min_size, &max_size);
    }
    // adjust observed min and max
    if (cpi->sf.auto_min_max_partition_size == RELAXED_NEIGHBORING_MIN_MAX) {
      min_size = min_partition_size[min_size];
      max_size = max_partition_size[max_size];
  // Check border cases where max and min from neighbors may not be legal.
  max_size = find_partition_size(max_size,
                                 row8x8_remaining, col8x8_remaining,
                                 &bh, &bw);
  min_size = MIN(min_size, max_size);

  // When use_square_partition_only is true, make sure at least one square
  // partition is allowed by selecting the next smaller square size as
  // *min_block_size.
  if (cpi->sf.use_square_partition_only &&
      next_square_size[max_size] < min_size) {
     min_size = next_square_size[max_size];
  *min_block_size = min_size;
  *max_block_size = max_size;
static void auto_partition_range(VP9_COMP *cpi, const TileInfo *const tile,
                                 int mi_row, int mi_col,
                                 BLOCK_SIZE *min_block_size,
                                 BLOCK_SIZE *max_block_size) {
  VP9_COMMON *const cm = &cpi->common;
  MACROBLOCKD *const xd = &cpi->mb.e_mbd;
  MODE_INFO **mi_8x8 = xd->mi;
  const int left_in_image = xd->left_available && mi_8x8[-1];
  const int above_in_image = xd->up_available &&
                             mi_8x8[-xd->mi_stride];
  int row8x8_remaining = tile->mi_row_end - mi_row;
  int col8x8_remaining = tile->mi_col_end - mi_col;
  int bh, bw;
  BLOCK_SIZE min_size = BLOCK_32X32;
  BLOCK_SIZE max_size = BLOCK_8X8;
  int bsl = mi_width_log2(BLOCK_64X64);
  const int search_range_ctrl = (((mi_row + mi_col) >> bsl) +
                                     get_chessboard_index(cm)) % 2;
  // Trap case where we do not have a prediction.
  if (search_range_ctrl &&
      (left_in_image || above_in_image || cm->frame_type != KEY_FRAME)) {
    int block;
    MODE_INFO **mi;
    BLOCK_SIZE sb_type;

    // Find the min and max partition sizes used in the left SB64.
    if (left_in_image) {
      MODE_INFO *cur_mi;
      mi = &mi_8x8[-1];
      for (block = 0; block < MI_BLOCK_SIZE; ++block) {
        cur_mi = mi[block * xd->mi_stride];
        sb_type = cur_mi ? cur_mi->mbmi.sb_type : 0;
        min_size = MIN(min_size, sb_type);
        max_size = MAX(max_size, sb_type);
      }
    }
    // Find the min and max partition sizes used in the above SB64.
    if (above_in_image) {
      mi = &mi_8x8[-xd->mi_stride * MI_BLOCK_SIZE];
      for (block = 0; block < MI_BLOCK_SIZE; ++block) {
        sb_type = mi[block] ? mi[block]->mbmi.sb_type : 0;
        min_size = MIN(min_size, sb_type);
        max_size = MAX(max_size, sb_type);
      }
    }

    min_size = min_partition_size[min_size];
    max_size = find_partition_size(max_size, row8x8_remaining, col8x8_remaining,
                                   &bh, &bw);
    min_size = MIN(min_size, max_size);
    min_size = MAX(min_size, BLOCK_8X8);
    max_size = MIN(max_size, BLOCK_32X32);
  } else {
    min_size = BLOCK_8X8;
    max_size = BLOCK_32X32;
  }

  *min_block_size = min_size;
  *max_block_size = max_size;
}

static INLINE void store_pred_mv(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx) {
  vpx_memcpy(ctx->pred_mv, x->pred_mv, sizeof(x->pred_mv));
}

static INLINE void load_pred_mv(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx) {
  vpx_memcpy(x->pred_mv, ctx->pred_mv, sizeof(x->pred_mv));
}

// TODO(jingning,jimbankoski,rbultje): properly skip partition types that are
// unlikely to be selected depending on previous rate-distortion optimization
// results, for encoding speed-up.
James Zern's avatar
James Zern committed
static void rd_pick_partition(VP9_COMP *cpi, const TileInfo *const tile,
                              TOKENEXTRA **tp, int mi_row,
                              int mi_col, BLOCK_SIZE bsize, int *rate,
                              int64_t *dist, int do_recon, int64_t best_rd,
Yaowu Xu's avatar
Yaowu Xu committed
                              PC_TREE *pc_tree) {
  VP9_COMMON *const cm = &cpi->common;
  MACROBLOCK *const x = &cpi->mb;
  MACROBLOCKD *const xd = &x->e_mbd;
  const int mi_step = num_8x8_blocks_wide_lookup[bsize] / 2;
Jim Bankoski's avatar
Jim Bankoski committed
  ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
  PARTITION_CONTEXT sl[8], sa[8];
  TOKENEXTRA *tp_orig = *tp;
  PICK_MODE_CONTEXT *ctx = &pc_tree->none;
Jim Bankoski's avatar
Jim Bankoski committed
  int i, pl;
  int this_rate, sum_rate = 0, best_rate = INT_MAX;
  int64_t this_dist, sum_dist = 0, best_dist = INT64_MAX;
  int do_split = bsize >= BLOCK_8X8;
  int do_rect = 1;
  // Override skipping rectangular partition operations for edge blocks
  const int force_horz_split = (mi_row + mi_step >= cm->mi_rows);
  const int force_vert_split = (mi_col + mi_step >= cm->mi_cols);
  const int xss = x->e_mbd.plane[1].subsampling_x;
  const int yss = x->e_mbd.plane[1].subsampling_y;
  int partition_none_allowed = !force_horz_split && !force_vert_split;
  int partition_horz_allowed = !force_vert_split && yss <= xss &&
                               bsize >= BLOCK_8X8;
  int partition_vert_allowed = !force_horz_split && xss <= yss &&
                               bsize >= BLOCK_8X8;
  (void) *tp_orig;

  assert(num_8x8_blocks_wide_lookup[bsize] ==
             num_8x8_blocks_high_lookup[bsize]);
  if (bsize == BLOCK_16X16) {
James Zern's avatar
James Zern committed
    set_offsets(cpi, tile, mi_row, mi_col, bsize);
    x->mb_energy = vp9_block_energy(cpi, x, bsize);
  }
  // Determine partition types in search according to the speed features.
  // The threshold set here has to be of square block size.
  if (cpi->sf.auto_min_max_partition_size) {
    partition_none_allowed &= (bsize <= cpi->sf.max_partition_size &&
                               bsize >= cpi->sf.min_partition_size);
    partition_horz_allowed &= ((bsize <= cpi->sf.max_partition_size &&
                                bsize >  cpi->sf.min_partition_size) ||
                                force_horz_split);
    partition_vert_allowed &= ((bsize <= cpi->sf.max_partition_size &&
                                bsize >  cpi->sf.min_partition_size) ||
                                force_vert_split);
    do_split &= bsize > cpi->sf.min_partition_size;
  }
  if (cpi->sf.use_square_partition_only) {
    partition_horz_allowed &= force_horz_split;
    partition_vert_allowed &= force_vert_split;
  }

Jim Bankoski's avatar
Jim Bankoski committed
  save_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
  if (cpi->sf.disable_split_var_thresh && partition_none_allowed) {
    unsigned int source_variancey;
    vp9_setup_src_planes(x, cpi->Source, mi_row, mi_col);
    source_variancey = get_sby_perpixel_variance(cpi, &x->plane[0].src, bsize);
    if (source_variancey < cpi->sf.disable_split_var_thresh) {
      if (source_variancey < cpi->sf.disable_split_var_thresh / 2)
        do_rect = 0;
    }
  // PARTITION_NONE
  if (partition_none_allowed) {
    rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &this_rate, &this_dist, bsize,
                     ctx, best_rd, 0);
    if (this_rate != INT_MAX) {
      if (bsize >= BLOCK_8X8) {
        pl = partition_plane_context(xd, mi_row, mi_col, bsize);
        this_rate += cpi->partition_cost[pl][PARTITION_NONE];
      }
      sum_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_dist);
      if (sum_rd < best_rd) {
        int64_t stop_thresh = 4096;
        int64_t stop_thresh_rd;
        best_rate = this_rate;
        best_dist = this_dist;
        best_rd = sum_rd;
        if (bsize >= BLOCK_8X8)
          pc_tree->partitioning = PARTITION_NONE;

        // Adjust threshold according to partition size.
        stop_thresh >>= 8 - (b_width_log2(bsize) +
            b_height_log2(bsize));
        stop_thresh_rd = RDCOST(x->rdmult, x->rddiv, 0, stop_thresh);
        // If obtained distortion is very small, choose current partition
        // and stop splitting.
        if (!x->e_mbd.lossless && best_rd < stop_thresh_rd) {
      }
    }
    restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
  }