vp9_encodeframe.c 89.8 KB
Newer Older
Jim Bankoski's avatar
Jim Bankoski committed

static int set_vt_partitioning(VP9_COMP *cpi, void *data, MODE_INFO *m,
                               BLOCK_SIZE_TYPE block_size, int mi_row,
                               int mi_col, int mi_size) {
  VP9_COMMON * const cm = &cpi->common;
  vt_node vt;
  const int mis = cm->mode_info_stride;
  int64_t threshold = 50 * cpi->common.base_qindex;

  tree_to_node(data, block_size, &vt);

  // split none is available only if we have more than half a block size
  // in width and height inside the visible image
  if (mi_col + mi_size < cm->mi_cols && mi_row + mi_size < cm->mi_rows
      && vt.vt->none.variance < threshold) {
    set_block_size(cm, m, block_size, mis, mi_row, mi_col);
    return 1;
  }

  // vertical split is available on all but the bottom border
  if (mi_row + mi_size < cm->mi_rows && vt.vt->vert[0].variance < threshold
      && vt.vt->vert[1].variance < threshold) {
    set_block_size(cm, m, get_subsize(block_size, PARTITION_VERT), mis, mi_row,
                   mi_col);
    return 1;
Jim Bankoski's avatar
Jim Bankoski committed
  // horizontal split is available on all but the right border
  if (mi_col + mi_size < cm->mi_cols && vt.vt->horz[0].variance < threshold
      && vt.vt->horz[1].variance < threshold) {
    set_block_size(cm, m, get_subsize(block_size, PARTITION_HORZ), mis, mi_row,
                   mi_col);
    return 1;
  }

  return 0;
}
#endif

Jim Bankoski's avatar
Jim Bankoski committed
static void choose_partitioning(VP9_COMP *cpi, MODE_INFO *m, int mi_row,
                                int mi_col) {
  VP9_COMMON * const cm = &cpi->common;
  MACROBLOCK *x = &cpi->mb;
  MACROBLOCKD *xd = &cpi->mb.e_mbd;
  const int mis = cm->mode_info_stride;
  // TODO(JBB): More experimentation or testing of this threshold;
  int64_t threshold = 4;
  int i, j, k;
  v64x64 vt;
  unsigned char * s;
  int sp;
Jim Bankoski's avatar
Jim Bankoski committed
  const unsigned char * d;
  int dp;
  int pixels_wide = 64, pixels_high = 64;

  vpx_memset(&vt, 0, sizeof(vt));
Jim Bankoski's avatar
Jim Bankoski committed

  set_offsets(cpi, mi_row, mi_col, BLOCK_SIZE_SB64X64);

  if (xd->mb_to_right_edge < 0)
    pixels_wide += (xd->mb_to_right_edge >> 3);

  if (xd->mb_to_bottom_edge < 0)
    pixels_high += (xd->mb_to_bottom_edge >> 3);

Jim Bankoski's avatar
Jim Bankoski committed
  s = x->plane[0].src.buf;
  sp = x->plane[0].src.stride;

  // TODO(JBB): Clearly the higher the quantizer the fewer partitions we want
  // but this needs more experimentation.
  threshold = threshold * cpi->common.base_qindex * cpi->common.base_qindex;

  d = vp9_64x64_zeros;
  dp = 64;
Jim Bankoski's avatar
Jim Bankoski committed
  if (cm->frame_type != KEY_FRAME) {
    int_mv nearest_mv, near_mv;
Jim Bankoski's avatar
Jim Bankoski committed
    YV12_BUFFER_CONFIG *ref_fb = &cm->yv12_fb[0];
    YV12_BUFFER_CONFIG *second_ref_fb = NULL;

    setup_pre_planes(xd, 0, ref_fb, mi_row, mi_col,
                     &xd->scale_factor[0], &xd->scale_factor_uv[0]);
    setup_pre_planes(xd, 1, second_ref_fb, mi_row, mi_col,
                     &xd->scale_factor[1], &xd->scale_factor_uv[1]);
    xd->mode_info_context->mbmi.ref_frame[0] = LAST_FRAME;
Jim Bankoski's avatar
Jim Bankoski committed
    xd->mode_info_context->mbmi.sb_type = BLOCK_SIZE_SB64X64;
    vp9_find_best_ref_mvs(xd, m->mbmi.ref_mvs[m->mbmi.ref_frame[0]],
                          &nearest_mv, &near_mv);
    xd->mode_info_context->mbmi.mv[0] = nearest_mv;
Jim Bankoski's avatar
Jim Bankoski committed
    vp9_build_inter_predictors_sby(xd, mi_row, mi_col, BLOCK_SIZE_SB64X64);
    d = xd->plane[0].dst.buf;
    dp = xd->plane[0].dst.stride;

  }
Jim Bankoski's avatar
Jim Bankoski committed
  // Fill in the entire tree of 8x8 variances for splits.
  for (i = 0; i < 4; i++) {
    const int x32_idx = ((i & 1) << 5);
    const int y32_idx = ((i >> 1) << 5);
    for (j = 0; j < 4; j++) {
Jim Bankoski's avatar
Jim Bankoski committed
      const int x16_idx = x32_idx + ((j & 1) << 4);
      const int y16_idx = y32_idx + ((j >> 1) << 4);
      v16x16 *vst = &vt.split[i].split[j];
Jim Bankoski's avatar
Jim Bankoski committed
      for (k = 0; k < 4; k++) {
        int x_idx = x16_idx + ((k & 1) << 3);
        int y_idx = y16_idx + ((k >> 1) << 3);
        unsigned int sse = 0;
        int sum = 0;
        if (x_idx < pixels_wide && y_idx < pixels_high)
          vp9_get_sse_sum_8x8(s + y_idx * sp + x_idx, sp,
                              d + y_idx * dp + x_idx, dp, &sse, &sum);
        fill_variance(&vst->split[k].vt.none, sse, sum, 64);
      }
Jim Bankoski's avatar
Jim Bankoski committed
    }
  }
  // Fill the rest of the variance tree by summing the split partition
  // values.
  for (i = 0; i < 4; i++) {
    for (j = 0; j < 4; j++) {
Jim Bankoski's avatar
Jim Bankoski committed
      fill_variance_tree(&vt.split[i].split[j], BLOCK_SIZE_MB16X16);
Jim Bankoski's avatar
Jim Bankoski committed
    }
Jim Bankoski's avatar
Jim Bankoski committed
    fill_variance_tree(&vt.split[i], BLOCK_SIZE_SB32X32);
Jim Bankoski's avatar
Jim Bankoski committed
  }
Jim Bankoski's avatar
Jim Bankoski committed
  fill_variance_tree(&vt, BLOCK_SIZE_SB64X64);
  // Now go through the entire structure,  splitting every block size until
Jim Bankoski's avatar
Jim Bankoski committed
  // we get to one that's got a variance lower than our threshold,  or we
  // hit 8x8.
Jim Bankoski's avatar
Jim Bankoski committed
  if (!set_vt_partitioning(cpi, &vt, m, BLOCK_SIZE_SB64X64, mi_row, mi_col,
                           4)) {
    for (i = 0; i < 4; ++i) {
      const int x32_idx = ((i & 1) << 2);
      const int y32_idx = ((i >> 1) << 2);
      if (!set_vt_partitioning(cpi, &vt.split[i], m, BLOCK_SIZE_SB32X32,
                               (mi_row + y32_idx), (mi_col + x32_idx), 2)) {
        for (j = 0; j < 4; ++j) {
          const int x16_idx = ((j & 1) << 1);
          const int y16_idx = ((j >> 1) << 1);
          if (!set_vt_partitioning(cpi, &vt.split[i].split[j], m,
                                   BLOCK_SIZE_MB16X16,
                                   (mi_row + y32_idx + y16_idx),
                                   (mi_col + x32_idx + x16_idx), 1)) {
            for (k = 0; k < 4; ++k) {
              const int x8_idx = (k & 1);
              const int y8_idx = (k >> 1);
              set_block_size(cm, m, BLOCK_SIZE_SB8X8, mis,
                             (mi_row + y32_idx + y16_idx + y8_idx),
                             (mi_col + x32_idx + x16_idx + x8_idx));
            }
          }
        }
Jim Bankoski's avatar
Jim Bankoski committed
      }
    }
  }
}
static void rd_use_partition(VP9_COMP *cpi, MODE_INFO *m, TOKENEXTRA **tp,
                             int mi_row, int mi_col, BLOCK_SIZE_TYPE bsize,
                             int *rate, int64_t *dist, int do_recon) {
Jim Bankoski's avatar
Jim Bankoski committed
  VP9_COMMON * const cm = &cpi->common;
  MACROBLOCK * const x = &cpi->mb;
  MACROBLOCKD *xd = &cpi->mb.e_mbd;
  const int mis = cm->mode_info_stride;
  int bwl = b_width_log2(m->mbmi.sb_type);
  int bhl = b_height_log2(m->mbmi.sb_type);
Jim Bankoski's avatar
Jim Bankoski committed
  int bsl = b_width_log2(bsize);
  int bs = (1 << bsl);
  int bh = (1 << bhl);
  int ms = bs / 2;
  int mh = bh / 2;
Jim Bankoski's avatar
Jim Bankoski committed
  int bss = (1 << bsl) / 4;
Jim Bankoski's avatar
Jim Bankoski committed
  int i, pl;
  PARTITION_TYPE partition = PARTITION_NONE;
Jim Bankoski's avatar
Jim Bankoski committed
  BLOCK_SIZE_TYPE subsize;
  ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
  PARTITION_CONTEXT sl[8], sa[8];
  int last_part_rate = INT_MAX;
  int64_t last_part_dist = INT_MAX;
  int split_rate = INT_MAX;
  int64_t split_dist = INT_MAX;
  int none_rate = INT_MAX;
  int64_t none_dist = INT_MAX;
  int chosen_rate = INT_MAX;
  int64_t chosen_dist = INT_MAX;
  BLOCK_SIZE_TYPE sub_subsize = BLOCK_SIZE_AB4X4;
  int splits_below = 0;
  BLOCK_SIZE_TYPE bs_type = m->mbmi.sb_type;
Jim Bankoski's avatar
Jim Bankoski committed

  if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
    return;

  // parse the partition type
  if ((bwl == bsl) && (bhl == bsl))
    partition = PARTITION_NONE;
  else if ((bwl == bsl) && (bhl < bsl))
    partition = PARTITION_HORZ;
  else if ((bwl < bsl) && (bhl == bsl))
    partition = PARTITION_VERT;
  else if ((bwl < bsl) && (bhl < bsl))
    partition = PARTITION_SPLIT;
  else
    assert(0);

  subsize = get_subsize(bsize, partition);

  if (bsize < BLOCK_SIZE_SB8X8) {
    if (xd->ab_index != 0) {
      *rate = 0;
      *dist = 0;
      return;
    }
  } else {
Jim Bankoski's avatar
Jim Bankoski committed
    *(get_sb_partitioning(x, bsize)) = subsize;
  }
  save_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);

  if (cpi->sf.adjust_partitioning_from_last_frame) {
    // Check if any of the sub blocks are further split.
    if (partition == PARTITION_SPLIT && subsize > BLOCK_SIZE_SB8X8) {
      sub_subsize = get_subsize(subsize, PARTITION_SPLIT);
      splits_below = 1;
      for (i = 0; i < 4; i++) {
        int jj = i >> 1, ii = i & 0x01;
        if (m[jj * bss * mis + ii * bss].mbmi.sb_type >= sub_subsize)  {
          splits_below = 0;
        }
      }
    }

    // If partition is not none try none unless each of the 4 splits are split
    // even further..
    if (partition != PARTITION_NONE && !splits_below &&
        mi_row + (ms >> 1) < cm->mi_rows &&
        mi_col + (ms >> 1) < cm->mi_cols) {
      *(get_sb_partitioning(x, bsize)) = bsize;
      pick_sb_modes(cpi, mi_row, mi_col, &none_rate, &none_dist, bsize,
                    get_block_context(x, bsize), INT64_MAX);

      set_partition_seg_context(cm, xd, mi_row, mi_col);
      pl = partition_plane_context(xd, bsize);
      none_rate += x->partition_cost[pl][PARTITION_NONE];

      restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
      m->mbmi.sb_type = bs_type;
      *(get_sb_partitioning(x, bsize)) = subsize;
    }
  }

Jim Bankoski's avatar
Jim Bankoski committed
  switch (partition) {
    case PARTITION_NONE:
      pick_sb_modes(cpi, mi_row, mi_col, &last_part_rate, &last_part_dist,
                    bsize, get_block_context(x, bsize), INT64_MAX);
      set_partition_seg_context(cm, xd, mi_row, mi_col);
      pl = partition_plane_context(xd, bsize);
      last_part_rate += x->partition_cost[pl][PARTITION_NONE];
Jim Bankoski's avatar
Jim Bankoski committed
      break;
    case PARTITION_HORZ:
      *(get_sb_index(xd, subsize)) = 0;
      pick_sb_modes(cpi, mi_row, mi_col, &last_part_rate, &last_part_dist,
                    subsize, get_block_context(x, subsize), INT64_MAX);
      if (bsize >= BLOCK_SIZE_SB8X8 && mi_row + (mh >> 1) < cm->mi_rows) {
Yaowu Xu's avatar
Yaowu Xu committed
        int rt = 0;
        int64_t dt = 0;
Jim Bankoski's avatar
Jim Bankoski committed
        update_state(cpi, get_block_context(x, subsize), subsize, 0);
        encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize);
        *(get_sb_index(xd, subsize)) = 1;
        pick_sb_modes(cpi, mi_row + (ms >> 1), mi_col, &rt, &dt, subsize,
                      get_block_context(x, subsize), INT64_MAX);
        last_part_rate += rt;
        last_part_dist += dt;
Jim Bankoski's avatar
Jim Bankoski committed
      }
      set_partition_seg_context(cm, xd, mi_row, mi_col);
      pl = partition_plane_context(xd, bsize);
      last_part_rate += x->partition_cost[pl][PARTITION_HORZ];
Jim Bankoski's avatar
Jim Bankoski committed
      break;
    case PARTITION_VERT:
      *(get_sb_index(xd, subsize)) = 0;
      pick_sb_modes(cpi, mi_row, mi_col, &last_part_rate, &last_part_dist,
                    subsize, get_block_context(x, subsize), INT64_MAX);
      if (bsize >= BLOCK_SIZE_SB8X8 && mi_col + (ms >> 1) < cm->mi_cols) {
Yaowu Xu's avatar
Yaowu Xu committed
        int rt = 0;
        int64_t dt = 0;
Jim Bankoski's avatar
Jim Bankoski committed
        update_state(cpi, get_block_context(x, subsize), subsize, 0);
        encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize);
        *(get_sb_index(xd, subsize)) = 1;
        pick_sb_modes(cpi, mi_row, mi_col + (ms >> 1), &rt, &dt, subsize,
                      get_block_context(x, subsize), INT64_MAX);
        last_part_rate += rt;
        last_part_dist += dt;
Jim Bankoski's avatar
Jim Bankoski committed
      }
      set_partition_seg_context(cm, xd, mi_row, mi_col);
      pl = partition_plane_context(xd, bsize);
      last_part_rate += x->partition_cost[pl][PARTITION_VERT];
Jim Bankoski's avatar
Jim Bankoski committed
      break;
    case PARTITION_SPLIT:
      // Split partition.
      last_part_rate = 0;
      last_part_dist = 0;
Jim Bankoski's avatar
Jim Bankoski committed
      for (i = 0; i < 4; i++) {
        int x_idx = (i & 1) * (ms >> 1);
        int y_idx = (i >> 1) * (ms >> 1);
Jim Bankoski's avatar
Jim Bankoski committed
        int jj = i >> 1, ii = i & 0x01;
        int rt;
        int64_t dt;
Jim Bankoski's avatar
Jim Bankoski committed

        if ((mi_row + y_idx >= cm->mi_rows) || (mi_col + x_idx >= cm->mi_cols))
          continue;

        *(get_sb_index(xd, subsize)) = i;

        rd_use_partition(cpi, m + jj * bss * mis + ii * bss, tp, mi_row + y_idx,
                         mi_col + x_idx, subsize, &rt, &dt, i != 3);
        last_part_rate += rt;
        last_part_dist += dt;
Jim Bankoski's avatar
Jim Bankoski committed
      }
      set_partition_seg_context(cm, xd, mi_row, mi_col);
      pl = partition_plane_context(xd, bsize);
      last_part_rate += x->partition_cost[pl][PARTITION_SPLIT];
Jim Bankoski's avatar
Jim Bankoski committed
      break;
    default:
      assert(0);
  }
  if (cpi->sf.adjust_partitioning_from_last_frame
      && partition != PARTITION_SPLIT && bsize > BLOCK_SIZE_SB8X8
      && (mi_row + ms < cm->mi_rows || mi_row + (ms >> 1) == cm->mi_rows)
      && (mi_col + ms < cm->mi_cols || mi_col + (ms >> 1) == cm->mi_cols)) {
    BLOCK_SIZE_TYPE split_subsize = get_subsize(bsize, PARTITION_SPLIT);
    split_rate = 0;
    split_dist = 0;
    restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);

    // Split partition.
    for (i = 0; i < 4; i++) {
      int x_idx = (i & 1) * (bs >> 2);
      int y_idx = (i >> 1) * (bs >> 2);
      int rt = 0;
      int64_t dt = 0;
      ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
      PARTITION_CONTEXT sl[8], sa[8];

      if ((mi_row + y_idx >= cm->mi_rows)
          || (mi_col + x_idx >= cm->mi_cols))
        continue;

      *(get_sb_index(xd, split_subsize)) = i;
      *(get_sb_partitioning(x, bsize)) = split_subsize;
      *(get_sb_partitioning(x, split_subsize)) = split_subsize;

      save_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);

      pick_sb_modes(cpi, mi_row + y_idx, mi_col + x_idx, &rt, &dt,
                    split_subsize, get_block_context(x, split_subsize),
                    INT64_MAX);

      restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);

      if (rt < INT_MAX && dt < INT_MAX && i != 3)
        encode_sb(cpi, tp,  mi_row + y_idx, mi_col + x_idx, 0,
                  split_subsize);

      split_rate += rt;
      split_dist += dt;
      set_partition_seg_context(cm, xd, mi_row + y_idx, mi_col + x_idx);
      pl = partition_plane_context(xd, bsize);
      split_rate += x->partition_cost[pl][PARTITION_NONE];
    }
    set_partition_seg_context(cm, xd, mi_row, mi_col);
    pl = partition_plane_context(xd, bsize);
    split_rate += x->partition_cost[pl][PARTITION_SPLIT];

    chosen_rate = split_rate;
    chosen_dist = split_dist;
  }

  // If last_part is better set the partitioning to that...
  if (RDCOST(x->rdmult, x->rddiv, last_part_rate, last_part_dist)
      < RDCOST(x->rdmult, x->rddiv, chosen_rate, chosen_dist)) {
    m->mbmi.sb_type = bsize;
    if (bsize >= BLOCK_SIZE_SB8X8)
      *(get_sb_partitioning(x, bsize)) = subsize;
    chosen_rate = last_part_rate;
    chosen_dist = last_part_dist;
  }
  // If none was better set the partitioning to that...
  if (RDCOST(x->rdmult, x->rddiv, chosen_rate, chosen_dist)
      > RDCOST(x->rdmult, x->rddiv, none_rate, none_dist)) {
    if (bsize >= BLOCK_SIZE_SB8X8)
      *(get_sb_partitioning(x, bsize)) = bsize;
    chosen_rate = none_rate;
    chosen_dist = none_dist;
  }
Jim Bankoski's avatar
Jim Bankoski committed

  restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);

  // We must have chosen a partitioning and encoding or we'll fail later on.
  // No other opportunities for success.
  assert(chosen_rate < INT_MAX && chosen_dist < INT_MAX);

  if (do_recon)
    encode_sb(cpi, tp, mi_row, mi_col, bsize == BLOCK_SIZE_SB64X64, bsize);
  *rate = chosen_rate;
  *dist = chosen_dist;

// TODO(jingning,jimbankoski,rbultje): properly skip partition types that are
// unlikely to be selected depending on previously rate-distortion optimization
// results, for encoding speed-up.
Jim Bankoski's avatar
Jim Bankoski committed
static void rd_pick_partition(VP9_COMP *cpi, TOKENEXTRA **tp, int mi_row,
                              int mi_col, BLOCK_SIZE_TYPE bsize, int *rate,
                              int64_t *dist, int do_recon, int64_t best_rd) {
Jim Bankoski's avatar
Jim Bankoski committed
  VP9_COMMON * const cm = &cpi->common;
  MACROBLOCK * const x = &cpi->mb;
  MACROBLOCKD * const xd = &x->e_mbd;
  int bsl = b_width_log2(bsize), bs = 1 << bsl;
  int ms = bs / 2;
Jim Bankoski's avatar
Jim Bankoski committed
  ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
  PARTITION_CONTEXT sl[8], sa[8];
  TOKENEXTRA *tp_orig = *tp;
Jim Bankoski's avatar
Jim Bankoski committed
  int i, pl;
  BLOCK_SIZE_TYPE subsize;
  int srate = INT_MAX;
  int64_t sdist = INT_MAX;
  (void) *tp_orig;

  if (bsize < BLOCK_SIZE_SB8X8)
    if (xd->ab_index != 0) {
      *rate = 0;
      *dist = 0;
      return;
    }
  assert(mi_height_log2(bsize) == mi_width_log2(bsize));

Jim Bankoski's avatar
Jim Bankoski committed
  save_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);

  // PARTITION_SPLIT
  if (!cpi->sf.use_partitions_greater_than
      || (cpi->sf.use_partitions_greater_than
          && bsize > cpi->sf.greater_than_block_size)) {
    if (bsize > BLOCK_SIZE_SB8X8) {
      int r4 = 0;
      int64_t d4 = 0, sum_rd = 0;
      subsize = get_subsize(bsize, PARTITION_SPLIT);
      for (i = 0; i < 4 && sum_rd < best_rd; ++i) {
        int x_idx = (i & 1) * (ms >> 1);
        int y_idx = (i >> 1) * (ms >> 1);
        int r = 0;
        int64_t d = 0;
        if ((mi_row + y_idx >= cm->mi_rows) || (mi_col + x_idx >= cm->mi_cols))
          continue;
        *(get_sb_index(xd, subsize)) = i;
        rd_pick_partition(cpi, tp, mi_row + y_idx, mi_col + x_idx, subsize, &r,
                          &d, i != 3, best_rd - sum_rd);
        if (r == INT_MAX) {
          r4 = INT_MAX;
          sum_rd = INT64_MAX;
        } else {
          r4 += r;
          d4 += d;
          sum_rd = RDCOST(x->rdmult, x->rddiv, r4, d4);
        }
      }
      set_partition_seg_context(cm, xd, mi_row, mi_col);
      pl = partition_plane_context(xd, bsize);
      if (r4 != INT_MAX && i == 4) {
        r4 += x->partition_cost[pl][PARTITION_SPLIT];
        *(get_sb_partitioning(x, bsize)) = subsize;
        assert(r4 >= 0);
        assert(d4 >= 0);
        srate = r4;
        sdist = d4;
        best_rd = MIN(best_rd, RDCOST(x->rdmult, x->rddiv, r4, d4));
      }
      restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);

  x->fast_ms = 0;
  x->pred_mv.as_int = 0;
  x->subblock_ref = 0;

  // Use 4 subblocks' motion estimation results to speed up current
  // partition's checking.
  if (cpi->sf.using_small_partition_info) {
    // Only use 8x8 result for non HD videos.
    // int use_8x8 = (MIN(cpi->common.width, cpi->common.height) < 720) ? 1 : 0;
    int use_8x8 = 1;

    if (cm->frame_type && !cpi->is_src_frame_alt_ref &&
        ((use_8x8 && bsize == BLOCK_SIZE_MB16X16) ||
        bsize == BLOCK_SIZE_SB32X32 || bsize == BLOCK_SIZE_SB64X64)) {
      int ref0 = 0, ref1 = 0, ref2 = 0, ref3 = 0;

      if (bsize == BLOCK_SIZE_MB16X16) {
        ref0 = x->sb8x8_context[xd->sb_index][xd->mb_index][0].mic.mbmi.
            ref_frame[0];
        ref1 = x->sb8x8_context[xd->sb_index][xd->mb_index][1].mic.mbmi.
            ref_frame[0];
        ref2 = x->sb8x8_context[xd->sb_index][xd->mb_index][2].mic.mbmi.
            ref_frame[0];
        ref3 = x->sb8x8_context[xd->sb_index][xd->mb_index][3].mic.mbmi.
            ref_frame[0];
      } else if (bsize == BLOCK_SIZE_SB32X32) {
        ref0 = x->mb_context[xd->sb_index][0].mic.mbmi.ref_frame[0];
        ref1 = x->mb_context[xd->sb_index][1].mic.mbmi.ref_frame[0];
        ref2 = x->mb_context[xd->sb_index][2].mic.mbmi.ref_frame[0];
        ref3 = x->mb_context[xd->sb_index][3].mic.mbmi.ref_frame[0];
      } else if (bsize == BLOCK_SIZE_SB64X64) {
        ref0 = x->sb32_context[0].mic.mbmi.ref_frame[0];
        ref1 = x->sb32_context[1].mic.mbmi.ref_frame[0];
        ref2 = x->sb32_context[2].mic.mbmi.ref_frame[0];
        ref3 = x->sb32_context[3].mic.mbmi.ref_frame[0];
      }

      // Currently, only consider 4 inter ref frames.
      if (ref0 && ref1 && ref2 && ref3) {
        int16_t mvr0 = 0, mvc0 = 0, mvr1 = 0, mvc1 = 0, mvr2 = 0, mvc2 = 0,
            mvr3 = 0, mvc3 = 0;
        int d01, d23, d02, d13;  // motion vector distance between 2 blocks

        // Get each subblock's motion vectors.
        if (bsize == BLOCK_SIZE_MB16X16) {
          mvr0 = x->sb8x8_context[xd->sb_index][xd->mb_index][0].mic.mbmi.mv[0].
              as_mv.row;
          mvc0 = x->sb8x8_context[xd->sb_index][xd->mb_index][0].mic.mbmi.mv[0].
              as_mv.col;
          mvr1 = x->sb8x8_context[xd->sb_index][xd->mb_index][1].mic.mbmi.mv[0].
              as_mv.row;
          mvc1 = x->sb8x8_context[xd->sb_index][xd->mb_index][1].mic.mbmi.mv[0].
              as_mv.col;
          mvr2 = x->sb8x8_context[xd->sb_index][xd->mb_index][2].mic.mbmi.mv[0].
              as_mv.row;
          mvc2 = x->sb8x8_context[xd->sb_index][xd->mb_index][2].mic.mbmi.mv[0].
              as_mv.col;
          mvr3 = x->sb8x8_context[xd->sb_index][xd->mb_index][3].mic.mbmi.mv[0].
              as_mv.row;
          mvc3 = x->sb8x8_context[xd->sb_index][xd->mb_index][3].mic.mbmi.mv[0].
              as_mv.col;
        } else if (bsize == BLOCK_SIZE_SB32X32) {
          mvr0 = x->mb_context[xd->sb_index][0].mic.mbmi.mv[0].as_mv.row;
          mvc0 = x->mb_context[xd->sb_index][0].mic.mbmi.mv[0].as_mv.col;
          mvr1 = x->mb_context[xd->sb_index][1].mic.mbmi.mv[0].as_mv.row;
          mvc1 = x->mb_context[xd->sb_index][1].mic.mbmi.mv[0].as_mv.col;
          mvr2 = x->mb_context[xd->sb_index][2].mic.mbmi.mv[0].as_mv.row;
          mvc2 = x->mb_context[xd->sb_index][2].mic.mbmi.mv[0].as_mv.col;
          mvr3 = x->mb_context[xd->sb_index][3].mic.mbmi.mv[0].as_mv.row;
          mvc3 = x->mb_context[xd->sb_index][3].mic.mbmi.mv[0].as_mv.col;
        } else if (bsize == BLOCK_SIZE_SB64X64) {
          mvr0 = x->sb32_context[0].mic.mbmi.mv[0].as_mv.row;
          mvc0 = x->sb32_context[0].mic.mbmi.mv[0].as_mv.col;
          mvr1 = x->sb32_context[1].mic.mbmi.mv[0].as_mv.row;
          mvc1 = x->sb32_context[1].mic.mbmi.mv[0].as_mv.col;
          mvr2 = x->sb32_context[2].mic.mbmi.mv[0].as_mv.row;
          mvc2 = x->sb32_context[2].mic.mbmi.mv[0].as_mv.col;
          mvr3 = x->sb32_context[3].mic.mbmi.mv[0].as_mv.row;
          mvc3 = x->sb32_context[3].mic.mbmi.mv[0].as_mv.col;
        }

        // Adjust sign if ref is alt_ref
        if (cm->ref_frame_sign_bias[ref0]) {
          mvr0 *= -1;
          mvc0 *= -1;
        }

        if (cm->ref_frame_sign_bias[ref1]) {
          mvr1 *= -1;
          mvc1 *= -1;
        }

        if (cm->ref_frame_sign_bias[ref2]) {
          mvr2 *= -1;
          mvc2 *= -1;
        }

        if (cm->ref_frame_sign_bias[ref3]) {
          mvr3 *= -1;
          mvc3 *= -1;
        }

        // Calculate mv distances.
        d01 = MAX(abs(mvr0 - mvr1), abs(mvc0 - mvc1));
        d23 = MAX(abs(mvr2 - mvr3), abs(mvc2 - mvc3));
        d02 = MAX(abs(mvr0 - mvr2), abs(mvc0 - mvc2));
        d13 = MAX(abs(mvr1 - mvr3), abs(mvc1 - mvc3));

        if (d01 < 24 && d23 < 24 && d02 < 24 && d13 < 24) {
          // Set fast motion search level.
          x->fast_ms = 1;

          // Calculate prediction MV
          x->pred_mv.as_mv.row = (mvr0 + mvr1 + mvr2 + mvr3) >> 2;
          x->pred_mv.as_mv.col = (mvc0 + mvc1 + mvc2 + mvc3) >> 2;

          if (ref0 == ref1 && ref1 == ref2 && ref2 == ref3 &&
              d01 < 2 && d23 < 2 && d02 < 2 && d13 < 2) {
            // Set fast motion search level.
            x->fast_ms = 2;

            if (!d01 && !d23 && !d02 && !d13) {
              x->fast_ms = 3;
              x->subblock_ref = ref0;
            }
          }
        }
      }
    }
  }

  if (!cpi->sf.use_partitions_less_than
      || (cpi->sf.use_partitions_less_than
          && bsize <= cpi->sf.less_than_block_size)) {
    int larger_is_better = 0;
    // PARTITION_NONE
    if ((mi_row + (ms >> 1) < cm->mi_rows) &&
        (mi_col + (ms >> 1) < cm->mi_cols)) {
      int r;
      int64_t d;
      pick_sb_modes(cpi, mi_row, mi_col, &r, &d, bsize,
                    get_block_context(x, bsize), best_rd);
      if (r != INT_MAX && bsize >= BLOCK_SIZE_SB8X8) {
        set_partition_seg_context(cm, xd, mi_row, mi_col);
        pl = partition_plane_context(xd, bsize);
        r += x->partition_cost[pl][PARTITION_NONE];
      }

      if (r != INT_MAX &&
          (bsize == BLOCK_SIZE_SB8X8 ||
           RDCOST(x->rdmult, x->rddiv, r, d) <
               RDCOST(x->rdmult, x->rddiv, srate, sdist))) {
        best_rd = MIN(best_rd, RDCOST(x->rdmult, x->rddiv, r, d));
        larger_is_better = 1;
        if (bsize >= BLOCK_SIZE_SB8X8)
          *(get_sb_partitioning(x, bsize)) = bsize;
      }

    if (bsize == BLOCK_SIZE_SB8X8) {
      int r4 = 0;
      int64_t d4 = 0, sum_rd = 0;
      subsize = get_subsize(bsize, PARTITION_SPLIT);

      for (i = 0; i < 4 && sum_rd < best_rd; ++i) {
        int x_idx = (i & 1) * (ms >> 1);
        int y_idx = (i >> 1) * (ms >> 1);
        int r = 0;
        int64_t d = 0;

        if ((mi_row + y_idx >= cm->mi_rows) || (mi_col + x_idx >= cm->mi_cols))
          continue;

        *(get_sb_index(xd, subsize)) = i;
        rd_pick_partition(cpi, tp, mi_row + y_idx, mi_col + x_idx, subsize, &r,
                          &d, i != 3, best_rd - sum_rd);

        if (r == INT_MAX) {
          r4 = INT_MAX;
          sum_rd = INT64_MAX;
        } else {
          r4 += r;
          d4 += d;
          sum_rd = RDCOST(x->rdmult, x->rddiv, r4, d4);
        }
      }
      set_partition_seg_context(cm, xd, mi_row, mi_col);
      pl = partition_plane_context(xd, bsize);
      if (r4 != INT_MAX && i == 4) {
        r4 += x->partition_cost[pl][PARTITION_SPLIT];
        if (RDCOST(x->rdmult, x->rddiv, r4, d4) <
            RDCOST(x->rdmult, x->rddiv, srate, sdist)) {
          srate = r4;
          sdist = d4;
          *(get_sb_partitioning(x, bsize)) = subsize;
          best_rd = MIN(best_rd, RDCOST(x->rdmult, x->rddiv, r4, d4));
        }
      }
      restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
    }

    if (!cpi->sf.use_square_partition_only &&
        (!cpi->sf.less_rectangular_check ||!larger_is_better)) {
      // PARTITION_HORZ
      if (bsize >= BLOCK_SIZE_SB8X8 && mi_col + (ms >> 1) < cm->mi_cols) {
        int r2, r = 0;
        int64_t d2, d = 0, h_rd;
        subsize = get_subsize(bsize, PARTITION_HORZ);
        *(get_sb_index(xd, subsize)) = 0;
        pick_sb_modes(cpi, mi_row, mi_col, &r2, &d2, subsize,
                      get_block_context(x, subsize), best_rd);
        h_rd = RDCOST(x->rdmult, x->rddiv, r2, d2);
        if (r2 != INT_MAX && h_rd < best_rd &&
            mi_row + (ms >> 1) < cm->mi_rows) {
          update_state(cpi, get_block_context(x, subsize), subsize, 0);
          encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize);

          *(get_sb_index(xd, subsize)) = 1;
          pick_sb_modes(cpi, mi_row + (ms >> 1), mi_col, &r, &d, subsize,
                        get_block_context(x, subsize), best_rd - h_rd);
          if (r == INT_MAX) {
            r2 = INT_MAX;
          } else {
            r2 += r;
            d2 += d;
          }
        }
        set_partition_seg_context(cm, xd, mi_row, mi_col);
        pl = partition_plane_context(xd, bsize);
        if (r2 < INT_MAX)
          r2 += x->partition_cost[pl][PARTITION_HORZ];
        if (r2 != INT_MAX && RDCOST(x->rdmult, x->rddiv, r2, d2)
            < RDCOST(x->rdmult, x->rddiv, srate, sdist)) {
          best_rd = MIN(best_rd, RDCOST(x->rdmult, x->rddiv, r2, d2));
          srate = r2;
          sdist = d2;
          *(get_sb_partitioning(x, bsize)) = subsize;
        }
        restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
      }

      // PARTITION_VERT
      if (bsize >= BLOCK_SIZE_SB8X8 && mi_row + (ms >> 1) < cm->mi_rows) {
        int r2;
        int64_t d2, v_rd;
        subsize = get_subsize(bsize, PARTITION_VERT);
        *(get_sb_index(xd, subsize)) = 0;
        pick_sb_modes(cpi, mi_row, mi_col, &r2, &d2, subsize,
                      get_block_context(x, subsize), best_rd);
        v_rd = RDCOST(x->rdmult, x->rddiv, r2, d2);
        if (r2 != INT_MAX && v_rd < best_rd &&
            mi_col + (ms >> 1) < cm->mi_cols) {
          int r = 0;
          int64_t d = 0;
          update_state(cpi, get_block_context(x, subsize), subsize, 0);
          encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize);

          *(get_sb_index(xd, subsize)) = 1;
          pick_sb_modes(cpi, mi_row, mi_col + (ms >> 1), &r, &d, subsize,
                        get_block_context(x, subsize), best_rd - v_rd);
          if (r == INT_MAX) {
            r2 = INT_MAX;
          } else {
            r2 += r;
            d2 += d;
          }
        }
        set_partition_seg_context(cm, xd, mi_row, mi_col);
        pl = partition_plane_context(xd, bsize);
        if (r2 < INT_MAX)
          r2 += x->partition_cost[pl][PARTITION_VERT];
        if (r2 != INT_MAX &&
            RDCOST(x->rdmult, x->rddiv, r2, d2)
            < RDCOST(x->rdmult, x->rddiv, srate, sdist)) {
          srate = r2;
          sdist = d2;
          *(get_sb_partitioning(x, bsize)) = subsize;
        }
        restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
      }
    }
  }
  *rate = srate;
  *dist = sdist;
John Koleszar's avatar
John Koleszar committed

Jim Bankoski's avatar
Jim Bankoski committed
  restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);

  if (srate < INT_MAX && sdist < INT_MAX && do_recon)
    encode_sb(cpi, tp, mi_row, mi_col, bsize == BLOCK_SIZE_SB64X64, bsize);
  if (bsize == BLOCK_SIZE_SB64X64) {
    assert(tp_orig < *tp);
    assert(srate < INT_MAX);
    assert(sdist < INT_MAX);
  } else {
    assert(tp_orig == *tp);
// Examines 64x64 block and chooses a best reference frame
static void rd_pick_reference_frame(VP9_COMP *cpi, TOKENEXTRA **tp, int mi_row,
                                    int mi_col, int *rate, int64_t *dist) {
  VP9_COMMON * const cm = &cpi->common;
  MACROBLOCK * const x = &cpi->mb;
  MACROBLOCKD * const xd = &x->e_mbd;
  int bsl = b_width_log2(BLOCK_SIZE_SB64X64), bs = 1 << bsl;
  int ms = bs / 2;
  ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
  PARTITION_CONTEXT sl[8], sa[8];
  int pl;
  int r;
  int64_t d;

  save_context(cpi, mi_row, mi_col, a, l, sa, sl, BLOCK_SIZE_SB64X64);

  // Default is non mask (all reference frames allowed.
  cpi->ref_frame_mask = 0;

  // Do RD search for 64x64.
  if ((mi_row + (ms >> 1) < cm->mi_rows) &&
      (mi_col + (ms >> 1) < cm->mi_cols)) {
    cpi->set_ref_frame_mask = 1;
    pick_sb_modes(cpi, mi_row, mi_col, &r, &d, BLOCK_SIZE_SB64X64,
                  get_block_context(x, BLOCK_SIZE_SB64X64), INT64_MAX);
    set_partition_seg_context(cm, xd, mi_row, mi_col);
    pl = partition_plane_context(xd, BLOCK_SIZE_SB64X64);
    r += x->partition_cost[pl][PARTITION_NONE];

    *(get_sb_partitioning(x, BLOCK_SIZE_SB64X64)) = BLOCK_SIZE_SB64X64;
    cpi->set_ref_frame_mask = 0;
  }

  *rate = r;
  *dist = d;
  // RDCOST(x->rdmult, x->rddiv, r, d)

  restore_context(cpi, mi_row, mi_col, a, l, sa, sl, BLOCK_SIZE_SB64X64);

  /*if (srate < INT_MAX && sdist < INT_MAX)
    encode_sb(cpi, tp, mi_row, mi_col, 1, BLOCK_SIZE_SB64X64);

  if (bsize == BLOCK_SIZE_SB64X64) {
    assert(tp_orig < *tp);
    assert(srate < INT_MAX);
    assert(sdist < INT_MAX);
  } else {
    assert(tp_orig == *tp);
  }
  */
}

Jim Bankoski's avatar
Jim Bankoski committed
static void encode_sb_row(VP9_COMP *cpi, int mi_row, TOKENEXTRA **tp,
                          int *totalrate) {
  VP9_COMMON * const cm = &cpi->common;
  // Initialize the left context for the new SB row
  vpx_memset(&cm->left_context, 0, sizeof(cm->left_context));
  vpx_memset(cm->left_seg_context, 0, sizeof(cm->left_seg_context));

  // Code each SB in the row
Jim Bankoski's avatar
Jim Bankoski committed
  for (mi_col = cm->cur_tile_mi_col_start; mi_col < cm->cur_tile_mi_col_end;
       mi_col += MI_BLOCK_SIZE) {
    int dummy_rate;
    int64_t dummy_dist;

    // Initialize a mask of modes that we will not consider;
    // cpi->unused_mode_skip_mask = 0x0000000AAE17F800 (test no golden)
    if (cpi->common.frame_type == KEY_FRAME)
      cpi->unused_mode_skip_mask = 0;
    else
      cpi->unused_mode_skip_mask = 0xFFFFFFFFFFFFFE00;

    if (cpi->sf.reference_masking) {
      rd_pick_reference_frame(cpi, tp, mi_row, mi_col,
                              &dummy_rate, &dummy_dist);
    }

    if (cpi->sf.partition_by_variance || cpi->sf.use_lastframe_partitioning ||
        cpi->sf.use_one_partition_size_always ) {
Jim Bankoski's avatar
Jim Bankoski committed
      const int idx_str = cm->mode_info_stride * mi_row + mi_col;
      MODE_INFO *m = cm->mi + idx_str;
      MODE_INFO *p = cm->prev_mi + idx_str;

      if (cpi->sf.use_one_partition_size_always) {
        set_offsets(cpi, mi_row, mi_col, BLOCK_SIZE_SB64X64);
        set_partitioning(cpi, m, cpi->sf.always_this_block_size);
        rd_use_partition(cpi, m, tp, mi_row, mi_col, BLOCK_SIZE_SB64X64,
      } else if (cpi->sf.partition_by_variance) {
        choose_partitioning(cpi, cm->mi, mi_row, mi_col);
        rd_use_partition(cpi, m, tp, mi_row, mi_col, BLOCK_SIZE_SB64X64,
        if ((cpi->common.current_video_frame
            % cpi->sf.last_partitioning_redo_frequency) == 0
            || cm->prev_mi == 0
            || cpi->common.show_frame == 0
            || cpi->common.frame_type == KEY_FRAME
            || cpi->is_src_frame_alt_ref) {
          rd_pick_partition(cpi, tp, mi_row, mi_col, BLOCK_SIZE_SB64X64,
                            &dummy_rate, &dummy_dist, 1, INT64_MAX);
        } else {
          copy_partitioning(cpi, m, p);
          rd_use_partition(cpi, m, tp, mi_row, mi_col, BLOCK_SIZE_SB64X64,
    } else {
      rd_pick_partition(cpi, tp, mi_row, mi_col, BLOCK_SIZE_SB64X64,
                        &dummy_rate, &dummy_dist, 1, INT64_MAX);
Jim Bankoski's avatar
Jim Bankoski committed
    }
John Koleszar's avatar
John Koleszar committed
  }
static void init_encode_frame_mb_context(VP9_COMP *cpi) {
  MACROBLOCK *const x = &cpi->mb;
  VP9_COMMON *const cm = &cpi->common;
  MACROBLOCKD *const xd = &x->e_mbd;
  const int aligned_mi_cols = mi_cols_aligned_to_sb(cm->mi_cols);
John Koleszar's avatar
John Koleszar committed
  x->act_zbin_adj = 0;
  cpi->seg0_idx = 0;
John Koleszar's avatar
John Koleszar committed
  xd->mode_info_stride = cm->mode_info_stride;
  xd->frame_type = cm->frame_type;
John Koleszar's avatar
John Koleszar committed
  // reset intra mode contexts
  if (cm->frame_type == KEY_FRAME)
    vp9_init_mbmode_probs(cm);
John Koleszar's avatar
John Koleszar committed
  // Copy data over into macro block data structures.
John Koleszar's avatar
John Koleszar committed
  vp9_setup_src_planes(x, cpi->Source, 0, 0);

  // TODO(jkoleszar): are these initializations required?
  setup_pre_planes(xd, 0, &cm->yv12_fb[cm->ref_frame_map[cpi->lst_fb_idx]],
                   0, 0, NULL, NULL);
  setup_dst_planes(xd, &cm->yv12_fb[cm->new_fb_idx], 0, 0);
  setup_block_dptrs(&x->e_mbd, cm->subsampling_x, cm->subsampling_y);
John Koleszar's avatar
John Koleszar committed
  xd->mode_info_context->mbmi.mode = DC_PRED;
  xd->mode_info_context->mbmi.uv_mode = DC_PRED;
  vp9_zero(cpi->y_mode_count)
  vp9_zero(cpi->y_uv_mode_count)
  vp9_zero(cm->fc.inter_mode_counts)
  vp9_zero(cpi->partition_count);
  vp9_zero(cpi->intra_inter_count);
  vp9_zero(cpi->comp_inter_count);
  vp9_zero(cpi->single_ref_count);
  vp9_zero(cpi->comp_ref_count);
Deb Mukherjee's avatar
Deb Mukherjee committed
  vp9_zero(cm->fc.mbskip_count);
  // Note: this memset assumes above_context[0], [1] and [2]
  // are allocated as part of the same buffer.
  vpx_memset(cm->above_context[0], 0,
             sizeof(ENTROPY_CONTEXT) * 2 * MAX_MB_PLANE * aligned_mi_cols);
Jim Bankoski's avatar
Jim Bankoski committed
  vpx_memset(cm->above_seg_context, 0,
             sizeof(PARTITION_CONTEXT) * aligned_mi_cols);
Yaowu Xu's avatar
Yaowu Xu committed
static void switch_lossless_mode(VP9_COMP *cpi, int lossless) {
  if (lossless) {
    // printf("Switching to lossless\n");
Jim Bankoski's avatar
Jim Bankoski committed
    cpi->mb.fwd_txm8x4 = vp9_short_walsh8x4;
    cpi->mb.fwd_txm4x4 = vp9_short_walsh4x4;
    cpi->mb.e_mbd.inv_txm4x4_1_add = vp9_short_iwalsh4x4_1_add;
    cpi->mb.e_mbd.inv_txm4x4_add = vp9_short_iwalsh4x4_add;
    cpi->mb.optimize = 0;
    cpi->common.filter_level = 0;
    cpi->zbin_mode_boost_enabled = 0;
    cpi->common.txfm_mode = ONLY_4X4;
Yaowu Xu's avatar
Yaowu Xu committed
  } else {
    // printf("Not lossless\n");
Jim Bankoski's avatar
Jim Bankoski committed
    cpi->mb.fwd_txm8x4 = vp9_short_fdct8x4;
    cpi->mb.fwd_txm4x4 = vp9_short_fdct4x4;
    cpi->mb.e_mbd.inv_txm4x4_1_add = vp9_short_idct4x4_1_add;
    cpi->mb.e_mbd.inv_txm4x4_add = vp9_short_idct4x4_add;
Yaowu Xu's avatar
Yaowu Xu committed
  }
}
static void switch_txfm_mode(VP9_COMP *cpi) {
  if (cpi->sf.tx_size_search_method == USE_LARGESTALL &&
      cpi->common.txfm_mode >= ALLOW_32X32)
    cpi->common.txfm_mode = ALLOW_32X32;
}

static void encode_frame_internal(VP9_COMP *cpi) {
Jim Bankoski's avatar
Jim Bankoski committed
  MACROBLOCK * const x = &cpi->mb;
  VP9_COMMON * const cm = &cpi->common;
  MACROBLOCKD * const xd = &x->e_mbd;
John Koleszar's avatar
John Koleszar committed
  int totalrate;
//  fprintf(stderr, "encode_frame_internal frame %d (%d) type %d\n",
//           cpi->common.current_video_frame, cpi->common.show_frame,
//           cm->frame_type);
// debug output
Paul Wilkins's avatar
Paul Wilkins committed
#if DBG_PRNT_SEGMAP
John Koleszar's avatar
John Koleszar committed
  {
    FILE *statsfile;
    statsfile = fopen("segmap2.stt", "a");
    fprintf(statsfile, "\n");
    fclose(statsfile);
  }
John Koleszar's avatar
John Koleszar committed
  totalrate = 0;

  // Reset frame count of inter 0,0 motion vector usage.
  cpi->inter_zz_count = 0;
  vp9_zero(cm->fc.switchable_interp_count);
  vp9_zero(cpi->txfm_stepdown_count);