rav1e/context/
partition_unit.rs

1// Copyright (c) 2017-2022, The rav1e contributors. All rights reserved
2//
3// This source code is subject to the terms of the BSD 2 Clause License and
4// the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
5// was not distributed with this source code in the LICENSE file, you can
6// obtain it at www.aomedia.org/license/software. If the Alliance for Open
7// Media Patent License 1.0 was not distributed with this source code in the
8// PATENTS file, you can obtain it at www.aomedia.org/license/patent.
9
10use super::cdf_context::ContextWriter;
11use super::*;
12
13// Generates 4 bit field in which each bit set to 1 represents
14// a blocksize partition  1111 means we split 64x64, 32x32, 16x16
15// and 8x8.  1000 means we just split the 64x64 to 32x32
16pub static partition_context_lookup: [[u8; 2]; BlockSize::BLOCK_SIZES_ALL] = [
17  [31, 31], // 4X4   - {0b11111, 0b11111}
18  [31, 30], // 4X8   - {0b11111, 0b11110}
19  [30, 31], // 8X4   - {0b11110, 0b11111}
20  [30, 30], // 8X8   - {0b11110, 0b11110}
21  [30, 28], // 8X16  - {0b11110, 0b11100}
22  [28, 30], // 16X8  - {0b11100, 0b11110}
23  [28, 28], // 16X16 - {0b11100, 0b11100}
24  [28, 24], // 16X32 - {0b11100, 0b11000}
25  [24, 28], // 32X16 - {0b11000, 0b11100}
26  [24, 24], // 32X32 - {0b11000, 0b11000}
27  [24, 16], // 32X64 - {0b11000, 0b10000}
28  [16, 24], // 64X32 - {0b10000, 0b11000}
29  [16, 16], // 64X64 - {0b10000, 0b10000}
30  [16, 0],  // 64X128- {0b10000, 0b00000}
31  [0, 16],  // 128X64- {0b00000, 0b10000}
32  [0, 0],   // 128X128-{0b00000, 0b00000}
33  [31, 28], // 4X16  - {0b11111, 0b11100}
34  [28, 31], // 16X4  - {0b11100, 0b11111}
35  [30, 24], // 8X32  - {0b11110, 0b11000}
36  [24, 30], // 32X8  - {0b11000, 0b11110}
37  [28, 16], // 16X64 - {0b11100, 0b10000}
38  [16, 28], // 64X16 - {0b10000, 0b11100}
39];
40
41pub const CFL_JOINT_SIGNS: usize = 8;
42pub const CFL_ALPHA_CONTEXTS: usize = 6;
43pub const CFL_ALPHABET_SIZE: usize = 16;
44
45pub const PARTITION_PLOFFSET: usize = 4;
46pub const PARTITION_BLOCK_SIZES: usize = 4 + 1;
47const PARTITION_CONTEXTS_PRIMARY: usize =
48  PARTITION_BLOCK_SIZES * PARTITION_PLOFFSET;
49pub const PARTITION_CONTEXTS: usize = PARTITION_CONTEXTS_PRIMARY;
50pub const PARTITION_TYPES: usize = 4;
51pub const EXT_PARTITION_TYPES: usize = 10;
52
53pub const SKIP_CONTEXTS: usize = 3;
54pub const SKIP_MODE_CONTEXTS: usize = 3;
55
56// partition contexts are at 8x8 granularity, as it is not possible to
57// split 4x4 blocks any further than that
58pub const PARTITION_CONTEXT_GRANULARITY: usize = 8;
59pub const PARTITION_CONTEXT_MAX_WIDTH: usize =
60  MAX_TILE_WIDTH / PARTITION_CONTEXT_GRANULARITY;
61
62#[derive(Copy, Clone, Debug, PartialEq, Eq)]
63pub enum CFLSign {
64  CFL_SIGN_ZERO = 0,
65  CFL_SIGN_NEG = 1,
66  CFL_SIGN_POS = 2,
67}
68
69impl CFLSign {
70  pub const fn from_alpha(a: i16) -> CFLSign {
71    [CFL_SIGN_NEG, CFL_SIGN_ZERO, CFL_SIGN_POS][(a.signum() + 1) as usize]
72  }
73}
74
75use crate::context::CFLSign::*;
76
77const CFL_SIGNS: usize = 3;
78static cfl_sign_value: [i16; CFL_SIGNS] = [0, -1, 1];
79
80#[derive(Copy, Clone, Debug)]
81pub struct CFLParams {
82  pub sign: [CFLSign; 2],
83  pub scale: [u8; 2],
84}
85
86impl Default for CFLParams {
87  #[inline]
88  fn default() -> Self {
89    Self { sign: [CFL_SIGN_NEG, CFL_SIGN_ZERO], scale: [1, 0] }
90  }
91}
92
93impl CFLParams {
94  /// # Panics
95  ///
96  /// - If either current sign is zero
97  #[inline]
98  pub fn joint_sign(self) -> u32 {
99    assert!(self.sign[0] != CFL_SIGN_ZERO || self.sign[1] != CFL_SIGN_ZERO);
100    (self.sign[0] as u32) * (CFL_SIGNS as u32) + (self.sign[1] as u32) - 1
101  }
102  /// # Panics
103  ///
104  /// - If the sign at index `uv` is zero
105  #[inline]
106  pub fn context(self, uv: usize) -> usize {
107    assert!(self.sign[uv] != CFL_SIGN_ZERO);
108    (self.sign[uv] as usize - 1) * CFL_SIGNS + (self.sign[1 - uv] as usize)
109  }
110  /// # Panics
111  ///
112  /// - If the sign at index `uv` is zero
113  #[inline]
114  pub fn index(self, uv: usize) -> u32 {
115    assert!(self.sign[uv] != CFL_SIGN_ZERO && self.scale[uv] != 0);
116    (self.scale[uv] - 1) as u32
117  }
118  #[inline]
119  pub fn alpha(self, uv: usize) -> i16 {
120    cfl_sign_value[self.sign[uv] as usize] * (self.scale[uv] as i16)
121  }
122  #[inline]
123  pub const fn from_alpha(u: i16, v: i16) -> CFLParams {
124    CFLParams {
125      sign: [CFLSign::from_alpha(u), CFLSign::from_alpha(v)],
126      scale: [u.unsigned_abs() as u8, v.unsigned_abs() as u8],
127    }
128  }
129}
130
131#[cfg(test)]
132mod test {
133  #[test]
134  fn cdf_map() {
135    use super::*;
136
137    let cdf = CDFContext::new(8);
138    let cdf_map = FieldMap { map: cdf.build_map() };
139    let f = &cdf.partition_cdf[2];
140    cdf_map.lookup(f.as_ptr() as usize);
141  }
142
143  use super::CFLSign;
144  use super::CFLSign::*;
145
146  static cfl_alpha_signs: [[CFLSign; 2]; 8] = [
147    [CFL_SIGN_ZERO, CFL_SIGN_NEG],
148    [CFL_SIGN_ZERO, CFL_SIGN_POS],
149    [CFL_SIGN_NEG, CFL_SIGN_ZERO],
150    [CFL_SIGN_NEG, CFL_SIGN_NEG],
151    [CFL_SIGN_NEG, CFL_SIGN_POS],
152    [CFL_SIGN_POS, CFL_SIGN_ZERO],
153    [CFL_SIGN_POS, CFL_SIGN_NEG],
154    [CFL_SIGN_POS, CFL_SIGN_POS],
155  ];
156
157  static cfl_context: [[usize; 8]; 2] =
158    [[0, 0, 0, 1, 2, 3, 4, 5], [0, 3, 0, 1, 4, 0, 2, 5]];
159
160  #[test]
161  fn cfl_joint_sign() {
162    use super::*;
163
164    let mut cfl = CFLParams::default();
165    for (joint_sign, &signs) in cfl_alpha_signs.iter().enumerate() {
166      cfl.sign = signs;
167      assert!(cfl.joint_sign() as usize == joint_sign);
168      for uv in 0..2 {
169        if signs[uv] != CFL_SIGN_ZERO {
170          assert!(cfl.context(uv) == cfl_context[uv][joint_sign]);
171        }
172      }
173    }
174  }
175}
176
177impl<'a> ContextWriter<'a> {
178  fn partition_gather_horz_alike(
179    out: &mut [u16; 2], cdf_in: &[u16], _bsize: BlockSize,
180  ) {
181    out[0] = 32768;
182    out[0] -= ContextWriter::cdf_element_prob(
183      cdf_in,
184      PartitionType::PARTITION_HORZ as usize,
185    );
186    out[0] -= ContextWriter::cdf_element_prob(
187      cdf_in,
188      PartitionType::PARTITION_SPLIT as usize,
189    );
190    out[0] -= ContextWriter::cdf_element_prob(
191      cdf_in,
192      PartitionType::PARTITION_HORZ_A as usize,
193    );
194    out[0] -= ContextWriter::cdf_element_prob(
195      cdf_in,
196      PartitionType::PARTITION_HORZ_B as usize,
197    );
198    out[0] -= ContextWriter::cdf_element_prob(
199      cdf_in,
200      PartitionType::PARTITION_VERT_A as usize,
201    );
202    out[0] -= ContextWriter::cdf_element_prob(
203      cdf_in,
204      PartitionType::PARTITION_HORZ_4 as usize,
205    );
206    out[0] = 32768 - out[0];
207    out[1] = 0;
208  }
209
210  fn partition_gather_vert_alike(
211    out: &mut [u16; 2], cdf_in: &[u16], _bsize: BlockSize,
212  ) {
213    out[0] = 32768;
214    out[0] -= ContextWriter::cdf_element_prob(
215      cdf_in,
216      PartitionType::PARTITION_VERT as usize,
217    );
218    out[0] -= ContextWriter::cdf_element_prob(
219      cdf_in,
220      PartitionType::PARTITION_SPLIT as usize,
221    );
222    out[0] -= ContextWriter::cdf_element_prob(
223      cdf_in,
224      PartitionType::PARTITION_HORZ_A as usize,
225    );
226    out[0] -= ContextWriter::cdf_element_prob(
227      cdf_in,
228      PartitionType::PARTITION_VERT_A as usize,
229    );
230    out[0] -= ContextWriter::cdf_element_prob(
231      cdf_in,
232      PartitionType::PARTITION_VERT_B as usize,
233    );
234    out[0] -= ContextWriter::cdf_element_prob(
235      cdf_in,
236      PartitionType::PARTITION_VERT_4 as usize,
237    );
238    out[0] = 32768 - out[0];
239    out[1] = 0;
240  }
241
242  #[inline]
243  pub fn write_skip<W: Writer>(
244    &mut self, w: &mut W, bo: TileBlockOffset, skip: bool,
245  ) {
246    let ctx = self.bc.skip_context(bo);
247    let cdf = &self.fc.skip_cdfs[ctx];
248    symbol_with_update!(self, w, skip as u32, cdf);
249  }
250
251  pub fn get_segment_pred(
252    &self, bo: TileBlockOffset, last_active_segid: u8,
253  ) -> (u8, u8) {
254    let mut prev_ul = -1;
255    let mut prev_u = -1;
256    let mut prev_l = -1;
257    if bo.0.x > 0 && bo.0.y > 0 {
258      prev_ul = self.bc.blocks.above_left_of(bo).segmentation_idx as i8;
259    }
260    if bo.0.y > 0 {
261      prev_u = self.bc.blocks.above_of(bo).segmentation_idx as i8;
262    }
263    if bo.0.x > 0 {
264      prev_l = self.bc.blocks.left_of(bo).segmentation_idx as i8;
265    }
266
267    /* Pick CDF index based on number of matching/out-of-bounds segment IDs. */
268    let cdf_index: u8;
269    if prev_ul < 0 || prev_u < 0 || prev_l < 0 {
270      /* Edge case */
271      cdf_index = 0;
272    } else if (prev_ul == prev_u) && (prev_ul == prev_l) {
273      cdf_index = 2;
274    } else if (prev_ul == prev_u) || (prev_ul == prev_l) || (prev_u == prev_l)
275    {
276      cdf_index = 1;
277    } else {
278      cdf_index = 0;
279    }
280
281    /* If 2 or more are identical returns that as predictor, otherwise prev_l. */
282    let r: i8;
283    if prev_u == -1 {
284      /* edge case */
285      r = if prev_l == -1 { 0 } else { prev_l };
286    } else if prev_l == -1 {
287      /* edge case */
288      r = prev_u;
289    } else {
290      r = if prev_ul == prev_u { prev_u } else { prev_l };
291    }
292
293    ((r as u8).min(last_active_segid), cdf_index)
294  }
295
296  pub fn write_cfl_alphas<W: Writer>(&mut self, w: &mut W, cfl: CFLParams) {
297    symbol_with_update!(self, w, cfl.joint_sign(), &self.fc.cfl_sign_cdf);
298    for uv in 0..2 {
299      if cfl.sign[uv] != CFL_SIGN_ZERO {
300        symbol_with_update!(
301          self,
302          w,
303          cfl.index(uv),
304          &self.fc.cfl_alpha_cdf[cfl.context(uv)]
305        );
306      }
307    }
308  }
309
310  /// # Panics
311  ///
312  /// - If called with an 8x8 or larger `bsize`
313  /// - If called with a `PartitionType` incompatible with the current block.
314  pub fn write_partition(
315    &mut self, w: &mut impl Writer, bo: TileBlockOffset, p: PartitionType,
316    bsize: BlockSize,
317  ) {
318    debug_assert!(bsize.is_sqr());
319    assert!(bsize >= BlockSize::BLOCK_8X8);
320    let hbs = bsize.width_mi() / 2;
321    let has_cols = (bo.0.x + hbs) < self.bc.blocks.cols();
322    let has_rows = (bo.0.y + hbs) < self.bc.blocks.rows();
323    let ctx = self.bc.partition_plane_context(bo, bsize);
324    assert!(ctx < PARTITION_CONTEXTS);
325
326    if !has_rows && !has_cols {
327      return;
328    }
329
330    if has_rows && has_cols {
331      if ctx < PARTITION_TYPES {
332        let cdf = &self.fc.partition_w8_cdf[ctx];
333        symbol_with_update!(self, w, p as u32, cdf);
334      } else if ctx < 4 * PARTITION_TYPES {
335        let cdf = &self.fc.partition_cdf[ctx - PARTITION_TYPES];
336        symbol_with_update!(self, w, p as u32, cdf);
337      } else {
338        let cdf = &self.fc.partition_w128_cdf[ctx - 4 * PARTITION_TYPES];
339        symbol_with_update!(self, w, p as u32, cdf);
340      }
341    } else if !has_rows && has_cols {
342      assert!(
343        p == PartitionType::PARTITION_SPLIT
344          || p == PartitionType::PARTITION_HORZ
345      );
346      assert!(bsize > BlockSize::BLOCK_8X8);
347      let mut cdf = [0u16; 2];
348      if ctx < PARTITION_TYPES {
349        let partition_cdf = &self.fc.partition_w8_cdf[ctx];
350        ContextWriter::partition_gather_vert_alike(
351          &mut cdf,
352          partition_cdf,
353          bsize,
354        );
355      } else if ctx < 4 * PARTITION_TYPES {
356        let partition_cdf = &self.fc.partition_cdf[ctx - PARTITION_TYPES];
357        ContextWriter::partition_gather_vert_alike(
358          &mut cdf,
359          partition_cdf,
360          bsize,
361        );
362      } else {
363        let partition_cdf =
364          &self.fc.partition_w128_cdf[ctx - 4 * PARTITION_TYPES];
365        ContextWriter::partition_gather_vert_alike(
366          &mut cdf,
367          partition_cdf,
368          bsize,
369        );
370      }
371      w.symbol((p == PartitionType::PARTITION_SPLIT) as u32, &cdf);
372    } else {
373      assert!(
374        p == PartitionType::PARTITION_SPLIT
375          || p == PartitionType::PARTITION_VERT
376      );
377      assert!(bsize > BlockSize::BLOCK_8X8);
378      let mut cdf = [0u16; 2];
379      if ctx < PARTITION_TYPES {
380        let partition_cdf = &self.fc.partition_w8_cdf[ctx];
381        ContextWriter::partition_gather_horz_alike(
382          &mut cdf,
383          partition_cdf,
384          bsize,
385        );
386      } else if ctx < 4 * PARTITION_TYPES {
387        let partition_cdf = &self.fc.partition_cdf[ctx - PARTITION_TYPES];
388        ContextWriter::partition_gather_horz_alike(
389          &mut cdf,
390          partition_cdf,
391          bsize,
392        );
393      } else {
394        let partition_cdf =
395          &self.fc.partition_w128_cdf[ctx - 4 * PARTITION_TYPES];
396        ContextWriter::partition_gather_horz_alike(
397          &mut cdf,
398          partition_cdf,
399          bsize,
400        );
401      }
402      w.symbol((p == PartitionType::PARTITION_SPLIT) as u32, &cdf);
403    }
404  }
405
406  fn neg_interleave(x: i32, r: i32, max: i32) -> i32 {
407    assert!(x < max);
408    if r == 0 {
409      return x;
410    } else if r >= (max - 1) {
411      return -x + max - 1;
412    }
413    let diff = x - r;
414    if 2 * r < max {
415      if diff.abs() <= r {
416        if diff > 0 {
417          return (diff << 1) - 1;
418        } else {
419          return (-diff) << 1;
420        }
421      }
422      x
423    } else {
424      if diff.abs() < (max - r) {
425        if diff > 0 {
426          return (diff << 1) - 1;
427        } else {
428          return (-diff) << 1;
429        }
430      }
431      (max - x) - 1
432    }
433  }
434
435  pub fn write_segmentation<W: Writer>(
436    &mut self, w: &mut W, bo: TileBlockOffset, bsize: BlockSize, skip: bool,
437    last_active_segid: u8,
438  ) {
439    let (pred, cdf_index) = self.get_segment_pred(bo, last_active_segid);
440    if skip {
441      self.bc.blocks.set_segmentation_idx(bo, bsize, pred);
442      return;
443    }
444    let seg_idx = self.bc.blocks[bo].segmentation_idx;
445    let coded_id = Self::neg_interleave(
446      seg_idx as i32,
447      pred as i32,
448      (last_active_segid + 1) as i32,
449    );
450    symbol_with_update!(
451      self,
452      w,
453      coded_id as u32,
454      &self.fc.spatial_segmentation_cdfs[cdf_index as usize]
455    );
456  }
457}
458
459impl<'a> BlockContext<'a> {
460  /// # Panics
461  ///
462  /// - If called with a non-square `bsize`
463  pub fn partition_plane_context(
464    &self, bo: TileBlockOffset, bsize: BlockSize,
465  ) -> usize {
466    // TODO: this should be way simpler without sub8x8
467    let above_ctx = self.above_partition_context[bo.0.x >> 1];
468    let left_ctx = self.left_partition_context[bo.y_in_sb() >> 1];
469    let bsl = bsize.width_log2() - BLOCK_8X8.width_log2();
470    let above = (above_ctx >> bsl) & 1;
471    let left = (left_ctx >> bsl) & 1;
472
473    assert!(bsize.is_sqr());
474
475    (left * 2 + above) as usize + bsl * PARTITION_PLOFFSET
476  }
477
478  /// # Panics
479  ///
480  /// - If the block size is invalid for subsampling
481  pub fn reset_skip_context(
482    &mut self, bo: TileBlockOffset, bsize: BlockSize, xdec: usize,
483    ydec: usize, cs: ChromaSampling,
484  ) {
485    let num_planes = if cs == ChromaSampling::Cs400 { 1 } else { 3 };
486    let nplanes = if bsize >= BLOCK_8X8 {
487      num_planes
488    } else {
489      1 + (num_planes - 1) * has_chroma(bo, bsize, xdec, ydec, cs) as usize
490    };
491
492    for plane in 0..nplanes {
493      let xdec2 = if plane == 0 { 0 } else { xdec };
494      let ydec2 = if plane == 0 { 0 } else { ydec };
495
496      let plane_bsize = if plane == 0 {
497        bsize
498      } else {
499        bsize.subsampled_size(xdec2, ydec2).unwrap()
500      };
501      let bw = plane_bsize.width_mi();
502      let bh = plane_bsize.height_mi();
503
504      for above in
505        &mut self.above_coeff_context[plane][(bo.0.x >> xdec2)..][..bw]
506      {
507        *above = 0;
508      }
509
510      let bo_y = bo.y_in_sb();
511      for left in &mut self.left_coeff_context[plane][(bo_y >> ydec2)..][..bh]
512      {
513        *left = 0;
514      }
515    }
516  }
517
518  pub fn skip_context(&self, bo: TileBlockOffset) -> usize {
519    let above_skip = bo.0.y > 0 && self.blocks.above_of(bo).skip;
520    let left_skip = bo.0.x > 0 && self.blocks.left_of(bo).skip;
521    above_skip as usize + left_skip as usize
522  }
523
524  /// # Panics
525  ///
526  /// - If called with a non-square `bsize`
527  pub fn update_partition_context(
528    &mut self, bo: TileBlockOffset, subsize: BlockSize, bsize: BlockSize,
529  ) {
530    assert!(bsize.is_sqr());
531
532    let bw = bsize.width_mi();
533    let bh = bsize.height_mi();
534
535    let above_ctx =
536      &mut self.above_partition_context[bo.0.x >> 1..(bo.0.x + bw) >> 1];
537    let left_ctx = &mut self.left_partition_context
538      [bo.y_in_sb() >> 1..(bo.y_in_sb() + bh) >> 1];
539
540    // update the partition context at the end notes. set partition bits
541    // of block sizes larger than the current one to be one, and partition
542    // bits of smaller block sizes to be zero.
543    for above in &mut above_ctx[..bw >> 1] {
544      *above = partition_context_lookup[subsize as usize][0];
545    }
546
547    for left in &mut left_ctx[..bh >> 1] {
548      *left = partition_context_lookup[subsize as usize][1];
549    }
550  }
551}