Skip to main content

rav1e/context/
partition_unit.rs

1// Copyright (c) 2017-2022, The rav1e contributors. All rights reserved
2//
3// This source code is subject to the terms of the BSD 2 Clause License and
4// the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
5// was not distributed with this source code in the LICENSE file, you can
6// obtain it at www.aomedia.org/license/software. If the Alliance for Open
7// Media Patent License 1.0 was not distributed with this source code in the
8// PATENTS file, you can obtain it at www.aomedia.org/license/patent.
9
10use super::*;
11
12// Generates 4 bit field in which each bit set to 1 represents
13// a blocksize partition  1111 means we split 64x64, 32x32, 16x16
14// and 8x8.  1000 means we just split the 64x64 to 32x32
15pub static partition_context_lookup: [[u8; 2]; BlockSize::BLOCK_SIZES_ALL] = [
16  [31, 31], // 4X4   - {0b11111, 0b11111}
17  [31, 30], // 4X8   - {0b11111, 0b11110}
18  [30, 31], // 8X4   - {0b11110, 0b11111}
19  [30, 30], // 8X8   - {0b11110, 0b11110}
20  [30, 28], // 8X16  - {0b11110, 0b11100}
21  [28, 30], // 16X8  - {0b11100, 0b11110}
22  [28, 28], // 16X16 - {0b11100, 0b11100}
23  [28, 24], // 16X32 - {0b11100, 0b11000}
24  [24, 28], // 32X16 - {0b11000, 0b11100}
25  [24, 24], // 32X32 - {0b11000, 0b11000}
26  [24, 16], // 32X64 - {0b11000, 0b10000}
27  [16, 24], // 64X32 - {0b10000, 0b11000}
28  [16, 16], // 64X64 - {0b10000, 0b10000}
29  [16, 0],  // 64X128- {0b10000, 0b00000}
30  [0, 16],  // 128X64- {0b00000, 0b10000}
31  [0, 0],   // 128X128-{0b00000, 0b00000}
32  [31, 28], // 4X16  - {0b11111, 0b11100}
33  [28, 31], // 16X4  - {0b11100, 0b11111}
34  [30, 24], // 8X32  - {0b11110, 0b11000}
35  [24, 30], // 32X8  - {0b11000, 0b11110}
36  [28, 16], // 16X64 - {0b11100, 0b10000}
37  [16, 28], // 64X16 - {0b10000, 0b11100}
38];
39
40pub const CFL_JOINT_SIGNS: usize = 8;
41pub const CFL_ALPHA_CONTEXTS: usize = 6;
42pub const CFL_ALPHABET_SIZE: usize = 16;
43
44pub const PARTITION_PLOFFSET: usize = 4;
45pub const PARTITION_BLOCK_SIZES: usize = 4 + 1;
46const PARTITION_CONTEXTS_PRIMARY: usize =
47  PARTITION_BLOCK_SIZES * PARTITION_PLOFFSET;
48pub const PARTITION_CONTEXTS: usize = PARTITION_CONTEXTS_PRIMARY;
49pub const PARTITION_TYPES: usize = 4;
50pub const EXT_PARTITION_TYPES: usize = 10;
51
52pub const SKIP_CONTEXTS: usize = 3;
53pub const SKIP_MODE_CONTEXTS: usize = 3;
54
55// partition contexts are at 8x8 granularity, as it is not possible to
56// split 4x4 blocks any further than that
57pub const PARTITION_CONTEXT_GRANULARITY: usize = 8;
58pub const PARTITION_CONTEXT_MAX_WIDTH: usize =
59  MAX_TILE_WIDTH / PARTITION_CONTEXT_GRANULARITY;
60
61#[derive(Copy, Clone, Debug, PartialEq, Eq)]
62pub enum CFLSign {
63  CFL_SIGN_ZERO = 0,
64  CFL_SIGN_NEG = 1,
65  CFL_SIGN_POS = 2,
66}
67
68impl CFLSign {
69  pub const fn from_alpha(a: i16) -> CFLSign {
70    [CFL_SIGN_NEG, CFL_SIGN_ZERO, CFL_SIGN_POS][(a.signum() + 1) as usize]
71  }
72}
73
74use crate::context::CFLSign::*;
75
76const CFL_SIGNS: usize = 3;
77static cfl_sign_value: [i16; CFL_SIGNS] = [0, -1, 1];
78
79#[derive(Copy, Clone, Debug)]
80pub struct CFLParams {
81  pub sign: [CFLSign; 2],
82  pub scale: [u8; 2],
83}
84
85impl Default for CFLParams {
86  #[inline]
87  fn default() -> Self {
88    Self { sign: [CFL_SIGN_NEG, CFL_SIGN_ZERO], scale: [1, 0] }
89  }
90}
91
92impl CFLParams {
93  /// # Panics
94  ///
95  /// - If either current sign is zero
96  #[inline]
97  pub fn joint_sign(self) -> u32 {
98    assert!(self.sign[0] != CFL_SIGN_ZERO || self.sign[1] != CFL_SIGN_ZERO);
99    (self.sign[0] as u32) * (CFL_SIGNS as u32) + (self.sign[1] as u32) - 1
100  }
101  /// # Panics
102  ///
103  /// - If the sign at index `uv` is zero
104  #[inline]
105  pub fn context(self, uv: usize) -> usize {
106    assert!(self.sign[uv] != CFL_SIGN_ZERO);
107    (self.sign[uv] as usize - 1) * CFL_SIGNS + (self.sign[1 - uv] as usize)
108  }
109  /// # Panics
110  ///
111  /// - If the sign at index `uv` is zero
112  #[inline]
113  pub fn index(self, uv: usize) -> u32 {
114    assert!(self.sign[uv] != CFL_SIGN_ZERO && self.scale[uv] != 0);
115    (self.scale[uv] - 1) as u32
116  }
117  #[inline]
118  pub fn alpha(self, uv: usize) -> i16 {
119    cfl_sign_value[self.sign[uv] as usize] * (self.scale[uv] as i16)
120  }
121  #[inline]
122  pub const fn from_alpha(u: i16, v: i16) -> CFLParams {
123    CFLParams {
124      sign: [CFLSign::from_alpha(u), CFLSign::from_alpha(v)],
125      scale: [u.unsigned_abs() as u8, v.unsigned_abs() as u8],
126    }
127  }
128}
129
130impl ContextWriter<'_> {
131  fn partition_gather_horz_alike(
132    out: &mut [u16; 2], cdf_in: &[u16], _bsize: BlockSize,
133  ) {
134    out[0] = 32768;
135    out[0] -= ContextWriter::cdf_element_prob(
136      cdf_in,
137      PartitionType::PARTITION_HORZ as usize,
138    );
139    out[0] -= ContextWriter::cdf_element_prob(
140      cdf_in,
141      PartitionType::PARTITION_SPLIT as usize,
142    );
143    out[0] -= ContextWriter::cdf_element_prob(
144      cdf_in,
145      PartitionType::PARTITION_HORZ_A as usize,
146    );
147    out[0] -= ContextWriter::cdf_element_prob(
148      cdf_in,
149      PartitionType::PARTITION_HORZ_B as usize,
150    );
151    out[0] -= ContextWriter::cdf_element_prob(
152      cdf_in,
153      PartitionType::PARTITION_VERT_A as usize,
154    );
155    out[0] -= ContextWriter::cdf_element_prob(
156      cdf_in,
157      PartitionType::PARTITION_HORZ_4 as usize,
158    );
159    out[0] = 32768 - out[0];
160    out[1] = 0;
161  }
162
163  fn partition_gather_vert_alike(
164    out: &mut [u16; 2], cdf_in: &[u16], _bsize: BlockSize,
165  ) {
166    out[0] = 32768;
167    out[0] -= ContextWriter::cdf_element_prob(
168      cdf_in,
169      PartitionType::PARTITION_VERT as usize,
170    );
171    out[0] -= ContextWriter::cdf_element_prob(
172      cdf_in,
173      PartitionType::PARTITION_SPLIT as usize,
174    );
175    out[0] -= ContextWriter::cdf_element_prob(
176      cdf_in,
177      PartitionType::PARTITION_HORZ_A as usize,
178    );
179    out[0] -= ContextWriter::cdf_element_prob(
180      cdf_in,
181      PartitionType::PARTITION_VERT_A as usize,
182    );
183    out[0] -= ContextWriter::cdf_element_prob(
184      cdf_in,
185      PartitionType::PARTITION_VERT_B as usize,
186    );
187    out[0] -= ContextWriter::cdf_element_prob(
188      cdf_in,
189      PartitionType::PARTITION_VERT_4 as usize,
190    );
191    out[0] = 32768 - out[0];
192    out[1] = 0;
193  }
194
195  #[inline]
196  pub fn write_skip<W: Writer>(
197    &mut self, w: &mut W, bo: TileBlockOffset, skip: bool,
198  ) {
199    let ctx = self.bc.skip_context(bo);
200    let cdf = &self.fc.skip_cdfs[ctx];
201    symbol_with_update!(self, w, skip as u32, cdf);
202  }
203
204  pub fn get_segment_pred(
205    &self, bo: TileBlockOffset, last_active_segid: u8,
206  ) -> (u8, u8) {
207    let mut prev_ul = -1;
208    let mut prev_u = -1;
209    let mut prev_l = -1;
210    if bo.0.x > 0 && bo.0.y > 0 {
211      prev_ul = self.bc.blocks.above_left_of(bo).segmentation_idx as i8;
212    }
213    if bo.0.y > 0 {
214      prev_u = self.bc.blocks.above_of(bo).segmentation_idx as i8;
215    }
216    if bo.0.x > 0 {
217      prev_l = self.bc.blocks.left_of(bo).segmentation_idx as i8;
218    }
219
220    /* Pick CDF index based on number of matching/out-of-bounds segment IDs. */
221    let cdf_index: u8;
222    if prev_ul < 0 || prev_u < 0 || prev_l < 0 {
223      /* Edge case */
224      cdf_index = 0;
225    } else if (prev_ul == prev_u) && (prev_ul == prev_l) {
226      cdf_index = 2;
227    } else if (prev_ul == prev_u) || (prev_ul == prev_l) || (prev_u == prev_l)
228    {
229      cdf_index = 1;
230    } else {
231      cdf_index = 0;
232    }
233
234    /* If 2 or more are identical returns that as predictor, otherwise prev_l. */
235    let r: i8;
236    if prev_u == -1 {
237      /* edge case */
238      r = if prev_l == -1 { 0 } else { prev_l };
239    } else if prev_l == -1 {
240      /* edge case */
241      r = prev_u;
242    } else {
243      r = if prev_ul == prev_u { prev_u } else { prev_l };
244    }
245
246    ((r as u8).min(last_active_segid), cdf_index)
247  }
248
249  pub fn write_cfl_alphas<W: Writer>(&mut self, w: &mut W, cfl: CFLParams) {
250    symbol_with_update!(self, w, cfl.joint_sign(), &self.fc.cfl_sign_cdf);
251    for uv in 0..2 {
252      if cfl.sign[uv] != CFL_SIGN_ZERO {
253        symbol_with_update!(
254          self,
255          w,
256          cfl.index(uv),
257          &self.fc.cfl_alpha_cdf[cfl.context(uv)]
258        );
259      }
260    }
261  }
262
263  /// # Panics
264  ///
265  /// - If called with an 8x8 or larger `bsize`
266  /// - If called with a `PartitionType` incompatible with the current block.
267  pub fn write_partition(
268    &mut self, w: &mut impl Writer, bo: TileBlockOffset, p: PartitionType,
269    bsize: BlockSize,
270  ) {
271    debug_assert!(bsize.is_sqr());
272    assert!(bsize >= BlockSize::BLOCK_8X8);
273    let hbs = bsize.width_mi() / 2;
274    let has_cols = (bo.0.x + hbs) < self.bc.blocks.cols();
275    let has_rows = (bo.0.y + hbs) < self.bc.blocks.rows();
276    let ctx = self.bc.partition_plane_context(bo, bsize);
277    assert!(ctx < PARTITION_CONTEXTS);
278
279    if !has_rows && !has_cols {
280      return;
281    }
282
283    if has_rows && has_cols {
284      if ctx < PARTITION_TYPES {
285        let cdf = &self.fc.partition_w8_cdf[ctx];
286        symbol_with_update!(self, w, p as u32, cdf);
287      } else if ctx < 4 * PARTITION_TYPES {
288        let cdf = &self.fc.partition_cdf[ctx - PARTITION_TYPES];
289        symbol_with_update!(self, w, p as u32, cdf);
290      } else {
291        let cdf = &self.fc.partition_w128_cdf[ctx - 4 * PARTITION_TYPES];
292        symbol_with_update!(self, w, p as u32, cdf);
293      }
294    } else if !has_rows && has_cols {
295      assert!(
296        p == PartitionType::PARTITION_SPLIT
297          || p == PartitionType::PARTITION_HORZ
298      );
299      assert!(bsize > BlockSize::BLOCK_8X8);
300      let mut cdf = [0u16; 2];
301      if ctx < PARTITION_TYPES {
302        let partition_cdf = &self.fc.partition_w8_cdf[ctx];
303        ContextWriter::partition_gather_vert_alike(
304          &mut cdf,
305          partition_cdf,
306          bsize,
307        );
308      } else if ctx < 4 * PARTITION_TYPES {
309        let partition_cdf = &self.fc.partition_cdf[ctx - PARTITION_TYPES];
310        ContextWriter::partition_gather_vert_alike(
311          &mut cdf,
312          partition_cdf,
313          bsize,
314        );
315      } else {
316        let partition_cdf =
317          &self.fc.partition_w128_cdf[ctx - 4 * PARTITION_TYPES];
318        ContextWriter::partition_gather_vert_alike(
319          &mut cdf,
320          partition_cdf,
321          bsize,
322        );
323      }
324      w.symbol((p == PartitionType::PARTITION_SPLIT) as u32, &cdf);
325    } else {
326      assert!(
327        p == PartitionType::PARTITION_SPLIT
328          || p == PartitionType::PARTITION_VERT
329      );
330      assert!(bsize > BlockSize::BLOCK_8X8);
331      let mut cdf = [0u16; 2];
332      if ctx < PARTITION_TYPES {
333        let partition_cdf = &self.fc.partition_w8_cdf[ctx];
334        ContextWriter::partition_gather_horz_alike(
335          &mut cdf,
336          partition_cdf,
337          bsize,
338        );
339      } else if ctx < 4 * PARTITION_TYPES {
340        let partition_cdf = &self.fc.partition_cdf[ctx - PARTITION_TYPES];
341        ContextWriter::partition_gather_horz_alike(
342          &mut cdf,
343          partition_cdf,
344          bsize,
345        );
346      } else {
347        let partition_cdf =
348          &self.fc.partition_w128_cdf[ctx - 4 * PARTITION_TYPES];
349        ContextWriter::partition_gather_horz_alike(
350          &mut cdf,
351          partition_cdf,
352          bsize,
353        );
354      }
355      w.symbol((p == PartitionType::PARTITION_SPLIT) as u32, &cdf);
356    }
357  }
358
359  fn neg_interleave(x: i32, r: i32, max: i32) -> i32 {
360    assert!(x < max);
361    if r == 0 {
362      return x;
363    } else if r >= (max - 1) {
364      return -x + max - 1;
365    }
366    let diff = x - r;
367    if 2 * r < max {
368      if diff.abs() <= r {
369        if diff > 0 {
370          return (diff << 1) - 1;
371        } else {
372          return (-diff) << 1;
373        }
374      }
375      x
376    } else {
377      if diff.abs() < (max - r) {
378        if diff > 0 {
379          return (diff << 1) - 1;
380        } else {
381          return (-diff) << 1;
382        }
383      }
384      (max - x) - 1
385    }
386  }
387
388  pub fn write_segmentation<W: Writer>(
389    &mut self, w: &mut W, bo: TileBlockOffset, bsize: BlockSize, skip: bool,
390    last_active_segid: u8,
391  ) {
392    let (pred, cdf_index) = self.get_segment_pred(bo, last_active_segid);
393    if skip {
394      self.bc.blocks.set_segmentation_idx(bo, bsize, pred);
395      return;
396    }
397    let seg_idx = self.bc.blocks[bo].segmentation_idx;
398    let coded_id = Self::neg_interleave(
399      seg_idx as i32,
400      pred as i32,
401      (last_active_segid + 1) as i32,
402    );
403    symbol_with_update!(
404      self,
405      w,
406      coded_id as u32,
407      &self.fc.spatial_segmentation_cdfs[cdf_index as usize]
408    );
409  }
410}
411
412impl BlockContext<'_> {
413  /// # Panics
414  ///
415  /// - If called with a non-square `bsize`
416  pub fn partition_plane_context(
417    &self, bo: TileBlockOffset, bsize: BlockSize,
418  ) -> usize {
419    // TODO: this should be way simpler without sub8x8
420    let above_ctx = self.above_partition_context[bo.0.x >> 1];
421    let left_ctx = self.left_partition_context[bo.y_in_sb() >> 1];
422    let bsl = bsize.width_log2() - BLOCK_8X8.width_log2();
423    let above = (above_ctx >> bsl) & 1;
424    let left = (left_ctx >> bsl) & 1;
425
426    assert!(bsize.is_sqr());
427
428    (left * 2 + above) as usize + bsl * PARTITION_PLOFFSET
429  }
430
431  /// # Panics
432  ///
433  /// - If the block size is invalid for subsampling
434  pub fn reset_skip_context(
435    &mut self, bo: TileBlockOffset, bsize: BlockSize, xdec: usize,
436    ydec: usize, cs: ChromaSampling,
437  ) {
438    let num_planes = if cs == ChromaSampling::Cs400 { 1 } else { 3 };
439    let nplanes = if bsize >= BLOCK_8X8 {
440      num_planes
441    } else {
442      1 + (num_planes - 1) * has_chroma(bo, bsize, xdec, ydec, cs) as usize
443    };
444
445    for plane in 0..nplanes {
446      let xdec2 = if plane == 0 { 0 } else { xdec };
447      let ydec2 = if plane == 0 { 0 } else { ydec };
448
449      let plane_bsize = if plane == 0 {
450        bsize
451      } else {
452        bsize.subsampled_size(xdec2, ydec2).unwrap()
453      };
454      let bw = plane_bsize.width_mi();
455      let bh = plane_bsize.height_mi();
456
457      for above in
458        &mut self.above_coeff_context[plane][(bo.0.x >> xdec2)..][..bw]
459      {
460        *above = 0;
461      }
462
463      let bo_y = bo.y_in_sb();
464      for left in &mut self.left_coeff_context[plane][(bo_y >> ydec2)..][..bh]
465      {
466        *left = 0;
467      }
468    }
469  }
470
471  pub fn skip_context(&self, bo: TileBlockOffset) -> usize {
472    let above_skip = bo.0.y > 0 && self.blocks.above_of(bo).skip;
473    let left_skip = bo.0.x > 0 && self.blocks.left_of(bo).skip;
474    above_skip as usize + left_skip as usize
475  }
476
477  /// # Panics
478  ///
479  /// - If called with a non-square `bsize`
480  pub fn update_partition_context(
481    &mut self, bo: TileBlockOffset, subsize: BlockSize, bsize: BlockSize,
482  ) {
483    assert!(bsize.is_sqr());
484
485    let bw = bsize.width_mi();
486    let bh = bsize.height_mi();
487
488    let above_ctx =
489      &mut self.above_partition_context[bo.0.x >> 1..(bo.0.x + bw) >> 1];
490    let left_ctx = &mut self.left_partition_context
491      [bo.y_in_sb() >> 1..(bo.y_in_sb() + bh) >> 1];
492
493    // update the partition context at the end notes. set partition bits
494    // of block sizes larger than the current one to be one, and partition
495    // bits of smaller block sizes to be zero.
496    for above in &mut above_ctx[..bw >> 1] {
497      *above = partition_context_lookup[subsize as usize][0];
498    }
499
500    for left in &mut left_ctx[..bh >> 1] {
501      *left = partition_context_lookup[subsize as usize][1];
502    }
503  }
504}
505
506#[cfg(test)]
507mod test {
508  #[test]
509  fn cdf_map() {
510    use super::*;
511
512    let cdf = CDFContext::new(8);
513    let cdf_map = FieldMap { map: cdf.build_map() };
514    let f = &cdf.partition_cdf[2];
515    cdf_map.lookup(f.as_ptr() as usize);
516  }
517
518  use super::CFLSign;
519  use super::CFLSign::*;
520
521  static cfl_alpha_signs: [[CFLSign; 2]; 8] = [
522    [CFL_SIGN_ZERO, CFL_SIGN_NEG],
523    [CFL_SIGN_ZERO, CFL_SIGN_POS],
524    [CFL_SIGN_NEG, CFL_SIGN_ZERO],
525    [CFL_SIGN_NEG, CFL_SIGN_NEG],
526    [CFL_SIGN_NEG, CFL_SIGN_POS],
527    [CFL_SIGN_POS, CFL_SIGN_ZERO],
528    [CFL_SIGN_POS, CFL_SIGN_NEG],
529    [CFL_SIGN_POS, CFL_SIGN_POS],
530  ];
531
532  static cfl_context: [[usize; 8]; 2] =
533    [[0, 0, 0, 1, 2, 3, 4, 5], [0, 3, 0, 1, 4, 0, 2, 5]];
534
535  #[test]
536  fn cfl_joint_sign() {
537    use super::*;
538
539    let mut cfl = CFLParams::default();
540    for (joint_sign, &signs) in cfl_alpha_signs.iter().enumerate() {
541      cfl.sign = signs;
542      assert!(cfl.joint_sign() as usize == joint_sign);
543      for uv in 0..2 {
544        if signs[uv] != CFL_SIGN_ZERO {
545          assert!(cfl.context(uv) == cfl_context[uv][joint_sign]);
546        }
547      }
548    }
549  }
550}