vp9_bitstream.c 43.2 KB
Newer Older
John Koleszar's avatar
John Koleszar committed
1
/*
2
 *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
John Koleszar's avatar
John Koleszar committed
3
 *
4
 *  Use of this source code is governed by a BSD-style license
5 6
 *  that can be found in the LICENSE file in the root of the source
 *  tree. An additional intellectual property rights grant can be found
7
 *  in the file PATENTS.  All contributing project authors may
8
 *  be found in the AUTHORS file in the root of the source tree.
John Koleszar's avatar
John Koleszar committed
9 10
 */

11 12 13
#include <assert.h>
#include <stdio.h>
#include <limits.h>
John Koleszar's avatar
John Koleszar committed
14

15 16
#include "vpx/vpx_encoder.h"
#include "vpx_mem/vpx_mem.h"
17
#include "vpx_ports/mem_ops.h"
18

19
#include "vp9/common/vp9_entropy.h"
20
#include "vp9/common/vp9_entropymode.h"
21
#include "vp9/common/vp9_entropymv.h"
22
#include "vp9/common/vp9_mvref_common.h"
23 24 25 26
#include "vp9/common/vp9_pred_common.h"
#include "vp9/common/vp9_seg_common.h"
#include "vp9/common/vp9_systemdependent.h"
#include "vp9/common/vp9_tile_common.h"
27

28
#include "vp9/encoder/vp9_cost.h"
29
#include "vp9/encoder/vp9_bitstream.h"
30 31
#include "vp9/encoder/vp9_encodemv.h"
#include "vp9/encoder/vp9_mcomp.h"
32
#include "vp9/encoder/vp9_segmentation.h"
33
#include "vp9/encoder/vp9_subexp.h"
34
#include "vp9/encoder/vp9_tokenize.h"
35 36
#include "vp9/encoder/vp9_write_bit_buffer.h"

37 38 39 40 41 42 43 44 45
static const struct vp9_token intra_mode_encodings[INTRA_MODES] = {
  {0, 1}, {6, 3}, {28, 5}, {30, 5}, {58, 6}, {59, 6}, {126, 7}, {127, 7},
  {62, 6}, {2, 2}};
static const struct vp9_token switchable_interp_encodings[SWITCHABLE_FILTERS] =
  {{0, 1}, {2, 2}, {3, 2}};
static const struct vp9_token partition_encodings[PARTITION_TYPES] =
  {{0, 1}, {2, 2}, {6, 3}, {7, 3}};
static const struct vp9_token inter_mode_encodings[INTER_MODES] =
  {{2, 2}, {6, 3}, {0, 1}, {7, 3}};
46

47
static void write_intra_mode(vp9_writer *w, PREDICTION_MODE mode,
48
                             const vp9_prob *probs) {
49
  vp9_write_token(w, vp9_intra_mode_tree, probs, &intra_mode_encodings[mode]);
50 51
}

52
static void write_inter_mode(vp9_writer *w, PREDICTION_MODE mode,
53 54
                             const vp9_prob *probs) {
  assert(is_inter_mode(mode));
55 56
  vp9_write_token(w, vp9_inter_mode_tree, probs,
                  &inter_mode_encodings[INTER_OFFSET(mode)]);
57 58
}

59 60
static void encode_unsigned_max(struct vp9_write_bit_buffer *wb,
                                int data, int max) {
61 62 63
  vp9_wb_write_literal(wb, data, get_unsigned_bits(max));
}

64 65 66 67 68 69
static void prob_diff_update(const vp9_tree_index *tree,
                             vp9_prob probs[/*n - 1*/],
                             const unsigned int counts[/*n - 1*/],
                             int n, vp9_writer *w) {
  int i;
  unsigned int branch_ct[32][2];
70 71

  // Assuming max number of probabilities <= 32
72
  assert(n <= 32);
73

74
  vp9_tree_probs_from_distribution(tree, branch_ct, counts);
75
  for (i = 0; i < n - 1; ++i)
76
    vp9_cond_prob_diff_update(w, &probs[i], branch_ct[i]);
77 78
}

79
static void write_selected_tx_size(const VP9_COMMON *cm,
80 81 82
                                   const MACROBLOCKD *xd, vp9_writer *w) {
  TX_SIZE tx_size = xd->mi[0].src_mi->mbmi.tx_size;
  BLOCK_SIZE bsize = xd->mi[0].src_mi->mbmi.sb_type;
83 84
  const TX_SIZE max_tx_size = max_txsize_lookup[bsize];
  const vp9_prob *const tx_probs = get_tx_probs2(max_tx_size, xd,
85
                                                 &cm->fc->tx_probs);
86
  vp9_write(w, tx_size != TX_4X4, tx_probs[0]);
87
  if (tx_size != TX_4X4 && max_tx_size >= TX_16X16) {
88
    vp9_write(w, tx_size != TX_8X8, tx_probs[1]);
89
    if (tx_size != TX_8X8 && max_tx_size >= TX_32X32)
90 91 92 93
      vp9_write(w, tx_size != TX_16X16, tx_probs[2]);
  }
}

94 95 96
static int write_skip(const VP9_COMMON *cm, const MACROBLOCKD *xd,
                      int segment_id, const MODE_INFO *mi, vp9_writer *w) {
  if (vp9_segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP)) {
97 98
    return 1;
  } else {
99
    const int skip = mi->mbmi.skip;
100
    vp9_write(w, skip, vp9_get_skip_prob(cm, xd));
101
    return skip;
102 103 104
  }
}

105 106
static void update_skip_probs(VP9_COMMON *cm, vp9_writer *w,
                              FRAME_COUNTS *counts) {
107 108
  int k;

109
  for (k = 0; k < SKIP_CONTEXTS; ++k)
110
    vp9_cond_prob_diff_update(w, &cm->fc->skip_probs[k], counts->skip[k]);
111 112
}

113 114
static void update_switchable_interp_probs(VP9_COMMON *cm, vp9_writer *w,
                                           FRAME_COUNTS *counts) {
115 116 117
  int j;
  for (j = 0; j < SWITCHABLE_FILTER_CONTEXTS; ++j)
    prob_diff_update(vp9_switchable_interp_tree,
118
                     cm->fc->switchable_interp_prob[j],
119
                     counts->switchable_interp[j], SWITCHABLE_FILTERS, w);
120 121
}

122
static void pack_mb_tokens(vp9_writer *w,
123 124
                           TOKENEXTRA **tp, const TOKENEXTRA *const stop,
                           vpx_bit_depth_t bit_depth) {
125
  TOKENEXTRA *p = *tp;
John Koleszar's avatar
John Koleszar committed
126

127
  while (p < stop && p->token != EOSB_TOKEN) {
128
    const int t = p->token;
129
    const struct vp9_token *const a = &vp9_coef_encodings[t];
John Koleszar's avatar
John Koleszar committed
130 131
    int i = 0;
    int v = a->value;
132
    int n = a->len;
133 134 135 136 137 138 139 140 141 142 143 144
#if CONFIG_VP9_HIGHBITDEPTH
    const vp9_extra_bit *b;
    if (bit_depth == VPX_BITS_12)
      b = &vp9_extra_bits_high12[t];
    else if (bit_depth == VPX_BITS_10)
      b = &vp9_extra_bits_high10[t];
    else
      b = &vp9_extra_bits[t];
#else
    const vp9_extra_bit *const b = &vp9_extra_bits[t];
    (void) bit_depth;
#endif  // CONFIG_VP9_HIGHBITDEPTH
145

John Koleszar's avatar
John Koleszar committed
146 147 148 149 150
    /* skip one or two nodes */
    if (p->skip_eob_node) {
      n -= p->skip_eob_node;
      i = 2 * p->skip_eob_node;
    }
John Koleszar's avatar
John Koleszar committed
151

152 153 154 155 156 157 158 159
    // TODO(jbb): expanding this can lead to big gains.  It allows
    // much better branch prediction and would enable us to avoid numerous
    // lookups and compares.

    // If we have a token that's in the constrained set, the coefficient tree
    // is split into two treed writes.  The first treed write takes care of the
    // unconstrained nodes.  The second treed write takes care of the
    // constrained nodes.
160
    if (t >= TWO_TOKEN && t < EOB_TOKEN) {
161 162
      int len = UNCONSTRAINED_NODES - p->skip_eob_node;
      int bits = v >> (n - len);
163 164 165 166
      vp9_write_tree(w, vp9_coef_tree, p->context_tree, bits, len, i);
      vp9_write_tree(w, vp9_coef_con_tree,
                     vp9_pareto8_full[p->context_tree[PIVOT_NODE] - 1],
                     v, n - len, 0);
167
    } else {
168
      vp9_write_tree(w, vp9_coef_tree, p->context_tree, v, n, i);
169
    }
John Koleszar's avatar
John Koleszar committed
170

John Koleszar's avatar
John Koleszar committed
171
    if (b->base_val) {
172
      const int e = p->extra, l = b->len;
John Koleszar's avatar
John Koleszar committed
173

174
      if (l) {
175
        const unsigned char *pb = b->prob;
John Koleszar's avatar
John Koleszar committed
176
        int v = e >> 1;
177
        int n = l;              /* number of bits in v, assumed nonzero */
John Koleszar's avatar
John Koleszar committed
178
        int i = 0;
John Koleszar's avatar
John Koleszar committed
179

John Koleszar's avatar
John Koleszar committed
180 181
        do {
          const int bb = (v >> --n) & 1;
182
          vp9_write(w, bb, pb[i >> 1]);
John Koleszar's avatar
John Koleszar committed
183 184 185
          i = b->tree[i + bb];
        } while (n);
      }
John Koleszar's avatar
John Koleszar committed
186

187
      vp9_write_bit(w, e & 1);
John Koleszar's avatar
John Koleszar committed
188
    }
John Koleszar's avatar
John Koleszar committed
189 190 191
    ++p;
  }

192
  *tp = p + (p->token == EOSB_TOKEN);
John Koleszar's avatar
John Koleszar committed
193 194
}

195
static void write_segment_id(vp9_writer *w, const struct segmentation *seg,
196
                             int segment_id) {
197
  if (seg->enabled && seg->update_map)
198
    vp9_write_tree(w, vp9_segment_tree, seg->tree_probs, segment_id, 3, 0);
John Koleszar's avatar
John Koleszar committed
199 200
}

201
// This function encodes the reference frame
202 203
static void write_ref_frames(const VP9_COMMON *cm, const MACROBLOCKD *xd,
                             vp9_writer *w) {
hkuang's avatar
hkuang committed
204
  const MB_MODE_INFO *const mbmi = &xd->mi[0].src_mi->mbmi;
205 206 207
  const int is_compound = has_second_ref(mbmi);
  const int segment_id = mbmi->segment_id;

John Koleszar's avatar
John Koleszar committed
208 209
  // If segment level coding of this signal is disabled...
  // or the segment allows multiple reference frame options
210 211 212 213 214
  if (vp9_segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME)) {
    assert(!is_compound);
    assert(mbmi->ref_frame[0] ==
               vp9_get_segdata(&cm->seg, segment_id, SEG_LVL_REF_FRAME));
  } else {
215 216
    // does the feature use compound prediction or not
    // (if not specified at the frame/segment level)
217
    if (cm->reference_mode == REFERENCE_MODE_SELECT) {
218
      vp9_write(w, is_compound, vp9_get_reference_mode_prob(cm, xd));
219
    } else {
220
      assert(!is_compound == (cm->reference_mode == SINGLE_REFERENCE));
221
    }
222

223 224
    if (is_compound) {
      vp9_write(w, mbmi->ref_frame[0] == GOLDEN_FRAME,
225
                vp9_get_pred_prob_comp_ref_p(cm, xd));
226
    } else {
227 228 229 230 231 232
      const int bit0 = mbmi->ref_frame[0] != LAST_FRAME;
      vp9_write(w, bit0, vp9_get_pred_prob_single_ref_p1(cm, xd));
      if (bit0) {
        const int bit1 = mbmi->ref_frame[0] != GOLDEN_FRAME;
        vp9_write(w, bit1, vp9_get_pred_prob_single_ref_p2(cm, xd));
      }
233
    }
John Koleszar's avatar
John Koleszar committed
234
  }
235
}
John Koleszar's avatar
John Koleszar committed
236

237 238
static void pack_inter_mode_mvs(VP9_COMP *cpi, const MODE_INFO *mi,
                                vp9_writer *w) {
239
  VP9_COMMON *const cm = &cpi->common;
240
  const nmv_context *nmvc = &cm->fc->nmvc;
241
  const MACROBLOCK *const x = &cpi->td.mb;
242
  const MACROBLOCKD *const xd = &x->e_mbd;
243
  const struct segmentation *const seg = &cm->seg;
244
  const MB_MODE_INFO *const mbmi = &mi->mbmi;
245
  const PREDICTION_MODE mode = mbmi->mode;
246 247
  const int segment_id = mbmi->segment_id;
  const BLOCK_SIZE bsize = mbmi->sb_type;
248
  const int allow_hp = cm->allow_high_precision_mv;
249 250 251
  const int is_inter = is_inter_block(mbmi);
  const int is_compound = has_second_ref(mbmi);
  int skip, ref;
252

253 254
  if (seg->update_map) {
    if (seg->temporal_update) {
255
      const int pred_flag = mbmi->seg_id_predicted;
256
      vp9_prob pred_prob = vp9_get_pred_prob_seg_id(seg, xd);
257
      vp9_write(w, pred_flag, pred_prob);
258
      if (!pred_flag)
259
        write_segment_id(w, seg, segment_id);
260
    } else {
261
      write_segment_id(w, seg, segment_id);
262 263
    }
  }
264

265
  skip = write_skip(cm, xd, segment_id, mi, w);
John Koleszar's avatar
John Koleszar committed
266

267
  if (!vp9_segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME))
268
    vp9_write(w, is_inter, vp9_get_intra_inter_prob(cm, xd));
269

270
  if (bsize >= BLOCK_8X8 && cm->tx_mode == TX_MODE_SELECT &&
271
      !(is_inter &&
272
        (skip || vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP)))) {
273
    write_selected_tx_size(cm, xd, w);
274 275
  }

276
  if (!is_inter) {
277
    if (bsize >= BLOCK_8X8) {
278
      write_intra_mode(w, mode, cm->fc->y_mode_prob[size_group_lookup[bsize]]);
279
    } else {
280
      int idx, idy;
281 282 283 284
      const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize];
      const int num_4x4_h = num_4x4_blocks_high_lookup[bsize];
      for (idy = 0; idy < 2; idy += num_4x4_h) {
        for (idx = 0; idx < 2; idx += num_4x4_w) {
285
          const PREDICTION_MODE b_mode = mi->bmi[idy * 2 + idx].as_mode;
286
          write_intra_mode(w, b_mode, cm->fc->y_mode_prob[0]);
287
        }
Jim Bankoski's avatar
Jim Bankoski committed
288
      }
289
    }
290
    write_intra_mode(w, mbmi->uv_mode, cm->fc->uv_mode_prob[mode]);
291
  } else {
292
    const int mode_ctx = mbmi->mode_context[mbmi->ref_frame[0]];
293
    const vp9_prob *const inter_probs = cm->fc->inter_mode_probs[mode_ctx];
294
    write_ref_frames(cm, xd, w);
295

296
    // If segment skip is not enabled code the mode.
297
    if (!vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP)) {
298
      if (bsize >= BLOCK_8X8) {
299
        write_inter_mode(w, mode, inter_probs);
Ronald S. Bultje's avatar
Ronald S. Bultje committed
300
      }
301
    }
302

303
    if (cm->interp_filter == SWITCHABLE) {
304
      const int ctx = vp9_get_pred_context_switchable_interp(xd);
305
      vp9_write_token(w, vp9_switchable_interp_tree,
306
                      cm->fc->switchable_interp_prob[ctx],
307
                      &switchable_interp_encodings[mbmi->interp_filter]);
308
      ++cpi->interp_filter_selected[0][mbmi->interp_filter];
Ronald S. Bultje's avatar
Ronald S. Bultje committed
309
    } else {
310
      assert(mbmi->interp_filter == cm->interp_filter);
311
    }
312

313
    if (bsize < BLOCK_8X8) {
314 315
      const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize];
      const int num_4x4_h = num_4x4_blocks_high_lookup[bsize];
Ronald S. Bultje's avatar
Ronald S. Bultje committed
316
      int idx, idy;
317 318
      for (idy = 0; idy < 2; idy += num_4x4_h) {
        for (idx = 0; idx < 2; idx += num_4x4_w) {
319
          const int j = idy * 2 + idx;
320
          const PREDICTION_MODE b_mode = mi->bmi[j].as_mode;
321
          write_inter_mode(w, b_mode, inter_probs);
322
          if (b_mode == NEWMV) {
323 324 325 326
            for (ref = 0; ref < 1 + is_compound; ++ref)
              vp9_encode_mv(cpi, w, &mi->bmi[j].as_mv[ref].as_mv,
                            &mbmi->ref_mvs[mbmi->ref_frame[ref]][0].as_mv,
                            nmvc, allow_hp);
John Koleszar's avatar
John Koleszar committed
327
          }
328
        }
Ronald S. Bultje's avatar
Ronald S. Bultje committed
329
      }
330 331 332 333 334 335 336
    } else {
      if (mode == NEWMV) {
        for (ref = 0; ref < 1 + is_compound; ++ref)
          vp9_encode_mv(cpi, w, &mbmi->mv[ref].as_mv,
                        &mbmi->ref_mvs[mbmi->ref_frame[ref]][0].as_mv, nmvc,
                        allow_hp);
      }
337 338
    }
  }
John Koleszar's avatar
John Koleszar committed
339
}
340

341
static void write_mb_modes_kf(const VP9_COMMON *cm, const MACROBLOCKD *xd,
hkuang's avatar
hkuang committed
342
                              MODE_INFO *mi_8x8, vp9_writer *w) {
343
  const struct segmentation *const seg = &cm->seg;
hkuang's avatar
hkuang committed
344
  const MODE_INFO *const mi = mi_8x8;
345 346
  const MODE_INFO *const above_mi = xd->above_mi;
  const MODE_INFO *const left_mi = xd->left_mi;
347 348
  const MB_MODE_INFO *const mbmi = &mi->mbmi;
  const BLOCK_SIZE bsize = mbmi->sb_type;
349

350
  if (seg->update_map)
351
    write_segment_id(w, seg, mbmi->segment_id);
352

353
  write_skip(cm, xd, mbmi->segment_id, mi, w);
354

355
  if (bsize >= BLOCK_8X8 && cm->tx_mode == TX_MODE_SELECT)
356
    write_selected_tx_size(cm, xd, w);
357

358 359
  if (bsize >= BLOCK_8X8) {
    write_intra_mode(w, mbmi->mode, get_y_mode_probs(mi, above_mi, left_mi, 0));
360
  } else {
361 362
    const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize];
    const int num_4x4_h = num_4x4_blocks_high_lookup[bsize];
363
    int idx, idy;
364 365 366 367 368 369

    for (idy = 0; idy < 2; idy += num_4x4_h) {
      for (idx = 0; idx < 2; idx += num_4x4_w) {
        const int block = idy * 2 + idx;
        write_intra_mode(w, mi->bmi[block].as_mode,
                         get_y_mode_probs(mi, above_mi, left_mi, block));
370 371
      }
    }
372 373
  }

374
  write_intra_mode(w, mbmi->uv_mode, vp9_kf_uv_mode_prob[mbmi->mode]);
375 376
}

James Zern's avatar
James Zern committed
377
static void write_modes_b(VP9_COMP *cpi, const TileInfo *const tile,
378 379
                          vp9_writer *w, TOKENEXTRA **tok,
                          const TOKENEXTRA *const tok_end,
380
                          int mi_row, int mi_col) {
381
  const VP9_COMMON *const cm = &cpi->common;
382
  MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
383
  MODE_INFO *m;
Dmitry Kovalev's avatar
Dmitry Kovalev committed
384

hkuang's avatar
hkuang committed
385 386
  xd->mi = cm->mi + (mi_row * cm->mi_stride + mi_col);
  m = xd->mi;
387

James Zern's avatar
James Zern committed
388
  set_mi_row_col(xd, tile,
Dmitry Kovalev's avatar
Dmitry Kovalev committed
389
                 mi_row, num_8x8_blocks_high_lookup[m->mbmi.sb_type],
James Zern's avatar
James Zern committed
390 391
                 mi_col, num_8x8_blocks_wide_lookup[m->mbmi.sb_type],
                 cm->mi_rows, cm->mi_cols);
392
  if (frame_is_intra_only(cm)) {
393
    write_mb_modes_kf(cm, xd, xd->mi, w);
394
  } else {
395
    pack_inter_mode_mvs(cpi, m, w);
396 397 398
  }

  assert(*tok < tok_end);
399
  pack_mb_tokens(w, tok, tok_end, cm->bit_depth);
400 401
}

402 403
static void write_partition(const VP9_COMMON *const cm,
                            const MACROBLOCKD *const xd,
404
                            int hbs, int mi_row, int mi_col,
405
                            PARTITION_TYPE p, BLOCK_SIZE bsize, vp9_writer *w) {
406
  const int ctx = partition_plane_context(xd, mi_row, mi_col, bsize);
407 408 409
  const vp9_prob *const probs = get_partition_probs(cm, ctx);
  const int has_rows = (mi_row + hbs) < cm->mi_rows;
  const int has_cols = (mi_col + hbs) < cm->mi_cols;
410 411

  if (has_rows && has_cols) {
412
    vp9_write_token(w, vp9_partition_tree, probs, &partition_encodings[p]);
413
  } else if (!has_rows && has_cols) {
414 415
    assert(p == PARTITION_SPLIT || p == PARTITION_HORZ);
    vp9_write(w, p == PARTITION_SPLIT, probs[1]);
416
  } else if (has_rows && !has_cols) {
417 418
    assert(p == PARTITION_SPLIT || p == PARTITION_VERT);
    vp9_write(w, p == PARTITION_SPLIT, probs[2]);
419
  } else {
420
    assert(p == PARTITION_SPLIT);
421 422 423
  }
}

424
static void write_modes_sb(VP9_COMP *cpi,
425 426
                           const TileInfo *const tile, vp9_writer *w,
                           TOKENEXTRA **tok, const TOKENEXTRA *const tok_end,
427
                           int mi_row, int mi_col, BLOCK_SIZE bsize) {
428
  const VP9_COMMON *const cm = &cpi->common;
429
  MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
430

431
  const int bsl = b_width_log2_lookup[bsize];
432 433
  const int bs = (1 << bsl) / 4;
  PARTITION_TYPE partition;
434
  BLOCK_SIZE subsize;
435
  const MODE_INFO *m = NULL;
436

437
  if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
438 439
    return;

hkuang's avatar
hkuang committed
440
  m = cm->mi[mi_row * cm->mi_stride + mi_col].src_mi;
441

Jim Bankoski's avatar
Jim Bankoski committed
442
  partition = partition_lookup[bsl][m->mbmi.sb_type];
443
  write_partition(cm, xd, bs, mi_row, mi_col, partition, bsize, w);
444
  subsize = get_subsize(bsize, partition);
445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473
  if (subsize < BLOCK_8X8) {
    write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col);
  } else {
    switch (partition) {
      case PARTITION_NONE:
        write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col);
        break;
      case PARTITION_HORZ:
        write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col);
        if (mi_row + bs < cm->mi_rows)
          write_modes_b(cpi, tile, w, tok, tok_end, mi_row + bs, mi_col);
        break;
      case PARTITION_VERT:
        write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col);
        if (mi_col + bs < cm->mi_cols)
          write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col + bs);
        break;
      case PARTITION_SPLIT:
        write_modes_sb(cpi, tile, w, tok, tok_end, mi_row, mi_col, subsize);
        write_modes_sb(cpi, tile, w, tok, tok_end, mi_row, mi_col + bs,
                       subsize);
        write_modes_sb(cpi, tile, w, tok, tok_end, mi_row + bs, mi_col,
                       subsize);
        write_modes_sb(cpi, tile, w, tok, tok_end, mi_row + bs, mi_col + bs,
                       subsize);
        break;
      default:
        assert(0);
    }
474
  }
475 476

  // update partition context
477
  if (bsize >= BLOCK_8X8 &&
478
      (bsize == BLOCK_8X8 || partition != PARTITION_SPLIT))
479
    update_partition_context(xd, mi_row, mi_col, subsize, bsize);
480 481
}

482
static void write_modes(VP9_COMP *cpi,
483 484
                        const TileInfo *const tile, vp9_writer *w,
                        TOKENEXTRA **tok, const TOKENEXTRA *const tok_end) {
485
  MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
486
  int mi_row, mi_col;
487

James Zern's avatar
James Zern committed
488
  for (mi_row = tile->mi_row_start; mi_row < tile->mi_row_end;
489
       mi_row += MI_BLOCK_SIZE) {
490
    vp9_zero(xd->left_seg_context);
James Zern's avatar
James Zern committed
491
    for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end;
492
         mi_col += MI_BLOCK_SIZE)
493 494
      write_modes_sb(cpi, tile, w, tok, tok_end, mi_row, mi_col,
                     BLOCK_64X64);
John Koleszar's avatar
John Koleszar committed
495
  }
John Koleszar's avatar
John Koleszar committed
496
}
497

498
static void build_tree_distribution(VP9_COMP *cpi, TX_SIZE tx_size,
499 500
                                    vp9_coeff_stats *coef_branch_ct,
                                    vp9_coeff_probs_model *coef_probs) {
501
  vp9_coeff_count *coef_counts = cpi->td.rd_counts.coef_counts[tx_size];
502
  unsigned int (*eob_branch_ct)[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS] =
503
      cpi->common.counts.eob_branch[tx_size];
504
  int i, j, k, l, m;
505

506
  for (i = 0; i < PLANE_TYPES; ++i) {
507 508
    for (j = 0; j < REF_TYPES; ++j) {
      for (k = 0; k < COEF_BANDS; ++k) {
509
        for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) {
510
          vp9_tree_probs_from_distribution(vp9_coef_tree,
511
                                           coef_branch_ct[i][j][k][l],
512
                                           coef_counts[i][j][k][l]);
513 514
          coef_branch_ct[i][j][k][l][0][1] = eob_branch_ct[i][j][k][l] -
                                             coef_branch_ct[i][j][k][l][0][0];
515 516 517 518
          for (m = 0; m < UNCONSTRAINED_NODES; ++m)
            coef_probs[i][j][k][l][m] = get_binary_prob(
                                            coef_branch_ct[i][j][k][l][m][0],
                                            coef_branch_ct[i][j][k][l][m][1]);
519
        }
Daniel Kang's avatar
Daniel Kang committed
520 521 522
      }
    }
  }
523 524
}

525
static void update_coef_probs_common(vp9_writer* const bc, VP9_COMP *cpi,
526
                                     TX_SIZE tx_size,
527 528
                                     vp9_coeff_stats *frame_branch_ct,
                                     vp9_coeff_probs_model *new_coef_probs) {
529
  vp9_coeff_probs_model *old_coef_probs = cpi->common.fc->coef_probs[tx_size];
530
  const vp9_prob upd = DIFF_UPDATE_PROB;
531
  const int entropy_nodes_update = UNCONSTRAINED_NODES;
532
  int i, j, k, l, t;
533 534
  int stepsize = cpi->sf.coeff_prob_appx_step;

535
  switch (cpi->sf.use_fast_coef_updates) {
536
    case TWO_LOOP: {
James Zern's avatar
James Zern committed
537
      /* dry run to see if there is any update at all needed */
538 539
      int savings = 0;
      int update[2] = {0, 0};
540
      for (i = 0; i < PLANE_TYPES; ++i) {
541 542
        for (j = 0; j < REF_TYPES; ++j) {
          for (k = 0; k < COEF_BANDS; ++k) {
543
            for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) {
544
              for (t = 0; t < entropy_nodes_update; ++t) {
545 546
                vp9_prob newp = new_coef_probs[i][j][k][l][t];
                const vp9_prob oldp = old_coef_probs[i][j][k][l][t];
547 548 549 550 551
                int s;
                int u = 0;
                if (t == PIVOT_NODE)
                  s = vp9_prob_diff_update_savings_search_model(
                      frame_branch_ct[i][j][k][l][0],
552
                      old_coef_probs[i][j][k][l], &newp, upd, stepsize);
553 554 555 556 557 558 559 560 561 562 563 564 565 566 567
                else
                  s = vp9_prob_diff_update_savings_search(
                      frame_branch_ct[i][j][k][l][t], oldp, &newp, upd);
                if (s > 0 && newp != oldp)
                  u = 1;
                if (u)
                  savings += s - (int)(vp9_cost_zero(upd));
                else
                  savings -= (int)(vp9_cost_zero(upd));
                update[u]++;
              }
            }
          }
        }
      }
568

569 570 571 572 573 574 575
      // printf("Update %d %d, savings %d\n", update[0], update[1], savings);
      /* Is coef updated at all */
      if (update[1] == 0 || savings < 0) {
        vp9_write_bit(bc, 0);
        return;
      }
      vp9_write_bit(bc, 1);
576
      for (i = 0; i < PLANE_TYPES; ++i) {
577 578
        for (j = 0; j < REF_TYPES; ++j) {
          for (k = 0; k < COEF_BANDS; ++k) {
579
            for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) {
580 581
              // calc probs and branch cts for this frame only
              for (t = 0; t < entropy_nodes_update; ++t) {
582 583
                vp9_prob newp = new_coef_probs[i][j][k][l][t];
                vp9_prob *oldp = old_coef_probs[i][j][k][l] + t;
584
                const vp9_prob upd = DIFF_UPDATE_PROB;
585 586 587 588 589
                int s;
                int u = 0;
                if (t == PIVOT_NODE)
                  s = vp9_prob_diff_update_savings_search_model(
                      frame_branch_ct[i][j][k][l][0],
590
                      old_coef_probs[i][j][k][l], &newp, upd, stepsize);
591 592 593 594 595 596 597 598 599 600 601 602 603 604
                else
                  s = vp9_prob_diff_update_savings_search(
                      frame_branch_ct[i][j][k][l][t],
                      *oldp, &newp, upd);
                if (s > 0 && newp != *oldp)
                  u = 1;
                vp9_write(bc, u, upd);
                if (u) {
                  /* send/use new probability */
                  vp9_write_prob_diff_update(bc, newp, *oldp);
                  *oldp = newp;
                }
              }
            }
605
          }
Daniel Kang's avatar
Daniel Kang committed
606 607
        }
      }
608
      return;
Daniel Kang's avatar
Daniel Kang committed
609
    }
John Koleszar's avatar
John Koleszar committed
610

611
    case ONE_LOOP_REDUCED: {
612 613
      int updates = 0;
      int noupdates_before_first = 0;
614 615 616 617 618 619

      if (tx_size >= TX_16X16 && cpi->sf.tx_size_search_method == USE_TX_8X8) {
        vp9_write_bit(bc, 0);
        return;
      }

620
      for (i = 0; i < PLANE_TYPES; ++i) {
621 622
        for (j = 0; j < REF_TYPES; ++j) {
          for (k = 0; k < COEF_BANDS; ++k) {
623
            for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) {
624 625
              // calc probs and branch cts for this frame only
              for (t = 0; t < entropy_nodes_update; ++t) {
626 627
                vp9_prob newp = new_coef_probs[i][j][k][l][t];
                vp9_prob *oldp = old_coef_probs[i][j][k][l] + t;
628 629
                int s;
                int u = 0;
630 631 632 633 634

                if (t == PIVOT_NODE) {
                  s = vp9_prob_diff_update_savings_search_model(
                      frame_branch_ct[i][j][k][l][0],
                      old_coef_probs[i][j][k][l], &newp, upd, stepsize);
635
                } else {
636 637 638
                  s = vp9_prob_diff_update_savings_search(
                      frame_branch_ct[i][j][k][l][t],
                      *oldp, &newp, upd);
639
                }
640 641 642

                if (s > 0 && newp != *oldp)
                  u = 1;
643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661
                updates += u;
                if (u == 0 && updates == 0) {
                  noupdates_before_first++;
                  continue;
                }
                if (u == 1 && updates == 1) {
                  int v;
                  // first update
                  vp9_write_bit(bc, 1);
                  for (v = 0; v < noupdates_before_first; ++v)
                    vp9_write(bc, 0, upd);
                }
                vp9_write(bc, u, upd);
                if (u) {
                  /* send/use new probability */
                  vp9_write_prob_diff_update(bc, newp, *oldp);
                  *oldp = newp;
                }
              }
John Koleszar's avatar
John Koleszar committed
662
            }
Daniel Kang's avatar
Daniel Kang committed
663 664 665
          }
        }
      }
666 667 668 669
      if (updates == 0) {
        vp9_write_bit(bc, 0);  // no updates
      }
      return;
Daniel Kang's avatar
Daniel Kang committed
670
    }
671 672 673

    default:
      assert(0);
John Koleszar's avatar
John Koleszar committed
674
  }
675
}
John Koleszar's avatar
John Koleszar committed
676

677
static void update_coef_probs(VP9_COMP *cpi, vp9_writer* w) {
678
  const TX_MODE tx_mode = cpi->common.tx_mode;
679 680
  const TX_SIZE max_tx_size = tx_mode_to_biggest_tx_size[tx_mode];
  TX_SIZE tx_size;
681
  vp9_coeff_stats frame_branch_ct[TX_SIZES][PLANE_TYPES];
682
  vp9_coeff_probs_model frame_coef_probs[TX_SIZES][PLANE_TYPES];
683

684
  for (tx_size = TX_4X4; tx_size <= TX_32X32; ++tx_size)
685 686
    build_tree_distribution(cpi, tx_size, frame_branch_ct[tx_size],
                            frame_coef_probs[tx_size]);
687

688
  for (tx_size = TX_4X4; tx_size <= max_tx_size; ++tx_size)
689 690
    update_coef_probs_common(w, cpi, tx_size, frame_branch_ct[tx_size],
                             frame_coef_probs[tx_size]);
John Koleszar's avatar
John Koleszar committed
691
}
692

693
static void encode_loopfilter(struct loopfilter *lf,
694
                              struct vp9_write_bit_buffer *wb) {
695 696
  int i;

697
  // Encode the loop filter level and type
698 699
  vp9_wb_write_literal(wb, lf->filter_level, 6);
  vp9_wb_write_literal(wb, lf->sharpness_level, 3);
700

701 702
  // Write out loop filter deltas applied at the MB level based on mode or
  // ref frame (if they are enabled).
703
  vp9_wb_write_bit(wb, lf->mode_ref_delta_enabled);
704

705 706 707
  if (lf->mode_ref_delta_enabled) {
    vp9_wb_write_bit(wb, lf->mode_ref_delta_update);
    if (lf->mode_ref_delta_update) {
708
      for (i = 0; i < MAX_REF_LF_DELTAS; i++) {
709
        const int delta = lf->ref_deltas[i];
710 711 712
        const int changed = delta != lf->last_ref_deltas[i];
        vp9_wb_write_bit(wb, changed);
        if (changed) {
713
          lf->last_ref_deltas[i] = delta;
714 715
          vp9_wb_write_literal(wb, abs(delta) & 0x3F, 6);
          vp9_wb_write_bit(wb, delta < 0);
716 717 718 719
        }
      }

      for (i = 0; i < MAX_MODE_LF_DELTAS; i++) {
720
        const int delta = lf->mode_deltas[i];
721 722 723
        const int changed = delta != lf->last_mode_deltas[i];
        vp9_wb_write_bit(wb, changed);
        if (changed) {
724
          lf->last_mode_deltas[i] = delta;
725 726
          vp9_wb_write_literal(wb, abs(delta) & 0x3F, 6);
          vp9_wb_write_bit(wb, delta < 0);
727 728 729 730 731 732
        }
      }
    }
  }
}

733
static void write_delta_q(struct vp9_write_bit_buffer *wb, int delta_q) {
734
  if (delta_q != 0) {
735 736 737
    vp9_wb_write_bit(wb, 1);
    vp9_wb_write_literal(wb, abs(delta_q), 4);
    vp9_wb_write_bit(wb, delta_q < 0);
738
  } else {
739
    vp9_wb_write_bit(wb, 0);
740 741 742
  }
}

743
static void encode_quantization(const VP9_COMMON *const cm,
744 745 746 747 748
                                struct vp9_write_bit_buffer *wb) {
  vp9_wb_write_literal(wb, cm->base_qindex, QINDEX_BITS);
  write_delta_q(wb, cm->y_dc_delta_q);
  write_delta_q(wb, cm->uv_dc_delta_q);
  write_delta_q(wb, cm->uv_ac_delta_q);
749 750
}

751
static void encode_segmentation(VP9_COMMON *cm, MACROBLOCKD *xd,
752
                                struct vp9_write_bit_buffer *wb) {
John Koleszar's avatar
John Koleszar committed
753
  int i, j;
754

755
  const struct segmentation *seg = &cm->seg;
756 757 758

  vp9_wb_write_bit(wb, seg->enabled);
  if (!seg->enabled)
759 760 761
    return;

  // Segmentation map
762 763
  vp9_wb_write_bit(wb, seg->update_map);
  if (seg->update_map) {
764
    // Select the coding strategy (temporal or spatial)
765
    vp9_choose_segmap_coding_method(cm, xd);
766
    // Write out probabilities used to decode unpredicted  macro-block segments
767
    for (i = 0; i < SEG_TREE_PROBS; i++) {
768
      const int prob = seg->tree_probs[i];
769 770 771 772
      const int update = prob != MAX_PROB;
      vp9_wb_write_bit(wb, update);
      if (update)
        vp9_wb_write_literal(wb, prob, 8);
773 774 775
    }

    // Write out the chosen coding method.
776 777
    vp9_wb_write_bit(wb, seg->temporal_update);
    if (seg->temporal_update) {
778
      for (i = 0; i < PREDICTION_PROBS; i++) {
779
        const int prob = seg->pred_probs[i];
780 781 782 783
        const int update = prob != MAX_PROB;
        vp9_wb_write_bit(wb, update);
        if (update)
          vp9_wb_write_literal(wb, prob, 8);
784 785 786 787 788
      }
    }
  }

  // Segmentation data
789 790 791
  vp9_wb_write_bit(wb, seg->update_data);
  if (seg->update_data) {
    vp9_wb_write_bit(wb, seg->abs_delta);
792

793
    for (i = 0; i < MAX_SEGMENTS; i++) {
794
      for (j = 0; j < SEG_LVL_MAX; j++) {
795
        const int active = vp9_segfeature_active(seg, i, j);
796 797
        vp9_wb_write_bit(wb, active);
        if (active) {
798
          const int data = vp9_get_segdata(seg, i, j);
799
          const int data_max = vp9_seg_feature_data_max(j);
800 801

          if (vp9_is_segfeature_signed(j)) {
802
            encode_unsigned_max(wb, abs(data), data_max);
803
            vp9_wb_write_bit(wb, data < 0);
804
          } else {
805
            encode_unsigned_max(wb, data, data_max);
806 807 808 809 810 811 812
          }
        }
      }
    }
  }
}

813 814
static void encode_txfm_probs(VP9_COMMON *cm, vp9_writer *w,
                              FRAME_COUNTS *counts) {
815
  // Mode
816 817 818
  vp9_write_literal(w, MIN(cm->tx_mode, ALLOW_32X32), 2);
  if (cm->tx_mode >= ALLOW_32X32)
    vp9_write_bit(w, cm->tx_mode == TX_MODE_SELECT);
819 820

  // Probabilities
821
  if (cm->tx_mode == TX_MODE_SELECT) {
822
    int i, j;
823 824 825
    unsigned int ct_8x8p[TX_SIZES - 3][2];
    unsigned int ct_16x16p[TX_SIZES - 2][2];
    unsigned int ct_32x32p[TX_SIZES - 1][2];
826 827


828
    for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
829
      tx_counts_to_branch_counts_8x8(counts->tx.p8x8[i], ct_8x8p);
830
      for (j = 0; j < TX_SIZES - 3; j++)
831
        vp9_cond_prob_diff_update(w, &cm->fc->tx_probs.p8x8[i][j], ct_8x8p[j]);
832
    }
833

834
    for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
835
      tx_counts_to_branch_counts_16x16(counts->tx.p16x16[i], ct_16x16p);
836
      for (j = 0; j < TX_SIZES - 2; j++)
837
        vp9_cond_prob_diff_update(w, &cm->fc->tx_probs.p16x16[i][j],
838
                                  ct_16x16p[j]);
839
    }
840

841
    for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
842
      tx_counts_to_branch_counts_32x32(counts->tx.p32x32[i], ct_32x32p);
843
      for (j = 0; j < TX_SIZES - 1; j++)
844
        vp9_cond_prob_diff_update(w, &cm->fc->tx_probs.p32x32[i][j],
845
                                  ct_32x32p[j]);
846
    }
847 848 849
  }
}

850 851 852
static void write_interp_filter(INTERP_FILTER filter,
                                struct vp9_write_bit_buffer *wb) {
  const int filter_to_literal[] = { 1, 0, 2, 3 };
853

854 855 856
  vp9_wb_write_bit(wb, filter == SWITCHABLE);
  if (filter != SWITCHABLE)
    vp9_wb_write_literal(wb, filter_to_literal[filter], 2);
857 858
}

859
static void fix_interp_filter(VP9_COMMON *cm, FRAME_COUNTS *counts) {
860
  if (cm->interp_filter == SWITCHABLE) {
861
    // Check to see if only one of the filters is actually used
862
    int count[SWITCHABLE_FILTERS];
863
    int i, j, c = 0;
864
    for (i = 0; i < SWITCHABLE_FILTERS; ++i) {
865
      count[i] = 0;
866
      for (j = 0; j < SWITCHABLE_FILTER_CONTEXTS; ++j)
867
        count[i] += counts->switchable_interp[j][i];
868 869 870 871
      c += (count[i] > 0);
    }
    if (c == 1) {
      // Only one filter is used. So set the filter at frame level
872
      for (i = 0; i < SWITCHABLE_FILTERS; ++i) {
873
        if (count[i]) {
874
          cm->interp_filter = i;
875 876 877 878 879 880 881
          break;
        }
      }
    }
  }
}

882 883
static void write_tile_info(const VP9_COMMON *const cm,
                            struct vp9_write_bit_buffer *wb) {
Dmitry Kovalev's avatar
Dmitry Kovalev committed
884 885 886 887 888 889 890 891 892 893
  int min_log2_tile_cols, max_log2_tile_cols, ones;
  vp9_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols);

  // columns
  ones = cm->log2_tile_cols - min_log2_tile_cols;
  while (ones--)
    vp9_wb_write_bit(wb, 1);

  if (cm->log2_tile_cols < max_log2_tile_cols)
    vp9_wb_write_bit(wb, 0);
894

Dmitry Kovalev's avatar
Dmitry Kovalev committed
895
  // rows
896 897 898 899 900
  vp9_wb_write_bit(wb, cm->log2_tile_rows != 0);
  if (cm->log2_tile_rows != 0)
    vp9_wb_write_bit(wb, cm->log2_tile_rows != 1);
}

901
static int get_refresh_mask(VP9_COMP *cpi) {
902 903 904 905 906 907 908 909 910 911 912
  if (vp9_preserve_existing_gf(cpi)) {
    // We have decided to preserve the previously existing golden frame as our
    // new ARF frame. However, in the short term we leave it in the GF slot and,
    // if we're updating the GF with the current decoded frame, we save it
    // instead to the ARF slot.
    // Later, in the function vp9_encoder.c:vp9_update_reference_frames() we
    // will swap gld_fb_idx and alt_fb_idx to achieve our objective. We do it
    // there so that it can be done outside of the recode loop.
    // Note: This is highly specific to the use of ARF as a forward reference,
    // and this needs to be generalized as other uses are implemented
    // (like RTC/temporal scalability).
James Zern's avatar
James Zern committed
913 914 915 916
    return (cpi->refresh_last_frame << cpi->lst_fb_idx) |
           (cpi->refresh_golden_frame << cpi->alt_fb_idx);
  } else {
    int arf_idx = cpi->alt_fb_idx;
917
    if ((cpi->oxcf.pass == 2) && cpi->multi_arf_allowed) {
James Zern's avatar
James Zern committed
918 919
      const GF_GROUP *const gf_group = &cpi->twopass.gf_group;
      arf_idx = gf_group->arf_update_idx[gf_group->index];
920
    }
James Zern's avatar
James Zern committed
921 922 923 924
    return (cpi->refresh_last_frame << cpi->lst_fb_idx) |
           (cpi->refresh_golden_frame << cpi->gld_fb_idx) |
           (cpi->refresh_alt_ref_frame << arf_idx);
  }
925 926
}

927 928 929 930
static size_t encode_tiles(VP9_COMP *cpi, uint8_t *data_ptr) {
  VP9_COMMON *const cm = &cpi->common;
  vp9_writer residual_bc;
  int tile_row, tile_col;
931
  TOKENEXTRA *tok_end;
932
  size_t total_size = 0;
Dmitry Kovalev's avatar
Dmitry Kovalev committed
933 934
  const int tile_cols = 1 << cm->log2_tile_cols;
  const int tile_rows = 1 << cm->log2_tile_rows;
935

936
  vpx_memset(cm->above_seg_context, 0, sizeof(*cm->above_seg_context) *
937 938
             mi_cols_aligned_to_sb(cm->mi_cols));

Dmitry Kovalev's avatar
Dmitry Kovalev committed
939 940
  for (tile_row = 0; tile_row < tile_rows; tile_row++) {
    for (tile_col = 0; tile_col < tile_cols; tile_col++) {
941
      int tile_idx = tile_row * tile_cols + tile_col;
942 943 944 945
      TOKENEXTRA *tok = cpi->tile_tok[tile_row][tile_col];

      tok_end = cpi->tile_tok[tile_row][tile_col] +
          cpi->tok_count[tile_row][tile_col];
946

Dmitry Kovalev's avatar
Dmitry Kovalev committed
947
      if (tile_col < tile_cols - 1 || tile_row < tile_rows - 1)
948 949 950 951
        vp9_start_encode(&residual_bc, data_ptr + total_size + 4);
      else
        vp9_start_encode(&residual_bc, data_ptr + total_size);

952
      write_modes(cpi, &cpi->tile_data[tile_idx].tile_info,
953 954
                  &residual_bc, &tok, tok_end);
      assert(tok == tok_end);
955
      vp9_stop_encode(&residual_bc);
Dmitry Kovalev's avatar
Dmitry Kovalev committed
956
      if (tile_col < tile_cols - 1 || tile_row < tile_rows - 1) {
957
        // size of this tile
958
        mem_put_be32(data_ptr + total_size, residual_bc.pos);
959 960 961 962 963 964 965 966 967 968
        total_size += 4;
      }

      total_size += residual_bc.pos;
    }
  }

  return total_size;
}

969 970
static void write_display_size(const VP9_COMMON *cm,
                               struct vp9_write_bit_buffer *wb) {
971 972
  const int scaling_active = cm->width != cm->display_width ||
                             cm->height != cm->display_height;
973 974
  vp9_wb_write_bit(wb, scaling_active);
  if (scaling_active) {
975 976
    vp9_wb_write_literal(wb, cm->display_width - 1, 16);
    vp9_wb_write_literal(wb, cm->display_height - 1, 16);
977 978 979
  }
}

980
static void write_frame_size(const VP9_COMMON *cm,
981 982 983 984
                             struct vp9_write_bit_buffer *wb) {
  vp9_wb_write_literal(wb, cm->width - 1