1#![forbid(unsafe_code)]
2use crate::error::{LimitError, LimitErrorKind};
3use crate::math::multiply_accumulate;
4use crate::ImageError;
5
6use num_traits::{AsPrimitive, MulAdd};
7use std::mem::size_of;
8use std::ops::{Add, Mul};
9
10pub(crate) trait SafeMul<S> {
11 fn safe_mul(&self, rhs: S) -> Result<S, ImageError>;
12}
13
14pub(crate) trait SafeAdd<S> {
15 fn safe_add(&self, rhs: S) -> Result<S, ImageError>;
16}
17
18impl SafeMul<usize> for usize {
19 #[inline]
20 fn safe_mul(&self, rhs: usize) -> Result<usize, ImageError> {
21 if let Some(product) = self.checked_mul(rhs) {
22 Ok(product)
23 } else {
24 Err(ImageError::Limits(LimitError::from_kind(
25 LimitErrorKind::DimensionError,
26 )))
27 }
28 }
29}
30
31impl SafeAdd<usize> for usize {
32 #[inline]
33 fn safe_add(&self, rhs: usize) -> Result<usize, ImageError> {
34 if let Some(product) = self.checked_add(rhs) {
35 Ok(product)
36 } else {
37 Err(ImageError::Limits(LimitError::from_kind(
38 LimitErrorKind::DimensionError,
39 )))
40 }
41 }
42}
43
44#[derive(Debug, Clone, Copy)]
45struct KernelShape {
46 width: usize,
47 height: usize,
48}
49
50#[derive(Debug, Clone, Copy)]
51pub(crate) struct FilterImageSize {
52 pub(crate) width: usize,
53 pub(crate) height: usize,
54}
55
56fn make_arena_row<T, const N: usize>(
61 image: &[T],
62 row_buffer: &mut [T],
63 source_y: usize,
64 image_size: FilterImageSize,
65 kernel_size: KernelShape,
66) -> Result<(), ImageError>
67where
68 T: Default + Copy + Send + Sync + 'static,
69 f64: AsPrimitive<T>,
70{
71 assert_eq!(image.len(), N * image_size.width * image_size.height);
72
73 let pad_w = (kernel_size.width / 2).max(1);
74
75 let arena_width = image_size
76 .width
77 .safe_mul(N)?
78 .safe_add(pad_w.safe_mul(2 * N)?)?;
79
80 let source_offset = source_y * image_size.width * N;
81 assert_eq!(row_buffer.len(), arena_width);
82
83 let row_dst = &mut row_buffer[pad_w * N..(pad_w * N + image_size.width * N)];
84
85 let source_row = &image[source_offset..(source_offset + image_size.width * N)];
86
87 for (dst, src) in row_dst.iter_mut().zip(source_row.iter()) {
88 *dst = *src;
89 }
90
91 for (x, dst) in (0..pad_w).zip(row_buffer.as_chunks_mut::<N>().0.iter_mut()) {
92 let old_x = x.saturating_sub(pad_w).min(image_size.width - 1);
93 let old_px = old_x * N;
94 let src_iter = &source_row[old_px..(old_px + N)];
95 for (dst, src) in dst.iter_mut().zip(src_iter.iter()) {
96 *dst = *src;
97 }
98 }
99
100 for (x, dst) in (image_size.width..(image_size.width + pad_w))
101 .zip(row_buffer.as_chunks_mut::<N>().0.iter_mut().rev())
102 {
103 let old_x = x.min(image_size.width - 1);
104 let old_px = old_x * N;
105 let src_iter = &source_row[old_px..(old_px + N)];
106 for (dst, src) in dst.iter_mut().zip(src_iter.iter()) {
107 *dst = *src;
108 }
109 }
110 Ok(())
111}
112
113#[derive(Clone)]
114struct ArenaColumns<T>
115where
116 T: Copy,
117{
118 top_pad: Vec<T>,
119 bottom_pad: Vec<T>,
120}
121
122fn make_columns_arenas<T, const N: usize>(
127 image: &[T],
128 image_size: FilterImageSize,
129 kernel_size: KernelShape,
130) -> ArenaColumns<T>
131where
132 T: Default + Copy + Send + Sync + 'static,
133 f64: AsPrimitive<T>,
134{
135 assert_eq!(image.len(), N * image_size.width * image_size.height);
136 let pad_h = kernel_size.height / 2;
137
138 let mut top_pad = vec![T::default(); pad_h * image_size.width * N];
139 let mut bottom_pad = vec![T::default(); pad_h * image_size.width * N];
140
141 let top_pad_stride = image_size.width * N;
142
143 for (ky, dst) in (0..pad_h).zip(top_pad.chunks_exact_mut(top_pad_stride)) {
144 for (kx, dst) in (0..image_size.width).zip(dst.as_chunks_mut::<N>().0.iter_mut()) {
145 let y = ky.saturating_sub(pad_h).min(image_size.height - 1);
146 let v_src = y * top_pad_stride + kx * N;
147
148 let src_iter = &image[v_src..(v_src + N)];
149 for (dst, src) in dst.iter_mut().zip(src_iter.iter()) {
150 *dst = *src;
151 }
152 }
153 }
154
155 let bottom_iter_dst = bottom_pad.chunks_exact_mut(top_pad_stride);
156
157 for (ky, dst) in (0..pad_h).zip(bottom_iter_dst) {
158 for (kx, dst) in (0..image_size.width).zip(dst.as_chunks_mut::<N>().0.iter_mut()) {
159 let y = (ky + image_size.height).min(image_size.height - 1);
160 let v_src = y * top_pad_stride + kx * N;
161 let src_iter = &image[v_src..(v_src + N)];
162 for (dst, src) in dst.iter_mut().zip(src_iter.iter()) {
163 *dst = *src;
164 }
165 }
166 }
167
168 ArenaColumns {
169 top_pad,
170 bottom_pad,
171 }
172}
173
174trait ToStorage<S> {
175 fn to_(self) -> S;
176}
177
178const Q0_15: i32 = 15;
179
180impl ToStorage<u8> for u32 {
181 #[inline(always)]
182 fn to_(self) -> u8 {
183 ((self + (1 << (Q0_15 - 1))) >> Q0_15).min(255) as u8
184 }
185}
186
187impl ToStorage<u16> for u32 {
188 #[inline(always)]
189 fn to_(self) -> u16 {
190 ((self + (1 << (Q0_15 - 1))) >> Q0_15).min(u16::MAX as u32) as u16
191 }
192}
193
194impl ToStorage<u16> for f32 {
195 #[inline(always)]
196 fn to_(self) -> u16 {
197 self.round().min(u16::MAX as f32) as u16
198 }
199}
200
201impl ToStorage<f32> for f32 {
202 #[inline(always)]
203 fn to_(self) -> f32 {
204 self
205 }
206}
207
208fn filter_symmetric_column<T, F>(
214 arena_src: &[&[T]],
215 dst_row: &mut [T],
216 image_size: FilterImageSize,
217 kernel: &[F],
218 n: usize,
219) where
220 T: Copy + AsPrimitive<F>,
221 F: ToStorage<T>
222 + Mul<F, Output = F>
223 + MulAdd<F, Output = F>
224 + Add<F, Output = F>
225 + Default
226 + Copy
227 + 'static,
228{
229 let dst_stride = image_size.width * n;
230
231 let length = kernel.len();
232 let half_len = length / 2;
233
234 let mut cx = 0usize;
235
236 let coeff = kernel[half_len];
237
238 let mut dst_rem = dst_row;
239
240 if size_of::<T>() == 1 {
241 for chunk in dst_rem.as_chunks_mut::<32>().0.iter_mut() {
242 let mut store0: [F; 16] = [F::default(); 16];
243 let mut store1: [F; 16] = [F::default(); 16];
244
245 let v_src0 = &arena_src[half_len][cx..(cx + 16)];
246 let v_src1 = &arena_src[half_len][(cx + 16)..(cx + 32)];
247
248 for (dst, src) in store0.iter_mut().zip(v_src0) {
249 *dst = src.as_().mul(coeff);
250 }
251 for (dst, src) in store1.iter_mut().zip(v_src1) {
252 *dst = src.as_().mul(coeff);
253 }
254
255 for (i, &coeff) in kernel.iter().take(half_len).enumerate() {
256 let other_side = length - i - 1;
257 let fw_src = arena_src[i];
258 let rw_src = arena_src[other_side];
259 let fw0 = &fw_src[cx..(cx + 16)];
260 let bw0 = &rw_src[cx..(cx + 16)];
261 let fw1 = &fw_src[(cx + 16)..(cx + 32)];
262 let bw1 = &rw_src[(cx + 16)..(cx + 32)];
263
264 for ((dst, fw), bw) in store0.iter_mut().zip(fw0).zip(bw0) {
265 *dst = multiply_accumulate(*dst, fw.as_().add(bw.as_()), coeff);
266 }
267
268 for ((dst, fw), bw) in store1.iter_mut().zip(fw1).zip(bw1) {
269 *dst = multiply_accumulate(*dst, fw.as_().add(bw.as_()), coeff);
270 }
271 }
272
273 let (shaped_dst0, shaped_dst1) = chunk.split_at_mut(16);
274
275 for (src, dst) in store0.iter().zip(shaped_dst0.iter_mut()) {
276 *dst = src.to_();
277 }
278
279 for (src, dst) in store1.iter().zip(shaped_dst1.iter_mut()) {
280 *dst = src.to_();
281 }
282
283 cx += 32;
284 }
285
286 dst_rem = dst_rem.as_chunks_mut::<32>().1;
287 }
288
289 for chunk in dst_rem.as_chunks_mut::<16>().0.iter_mut() {
290 let mut store: [F; 16] = [F::default(); 16];
291
292 let v_src = &arena_src[half_len][cx..(cx + 16)];
293
294 for (dst, src) in store.iter_mut().zip(v_src) {
295 *dst = src.as_().mul(coeff);
296 }
297
298 for (i, &coeff) in kernel.iter().take(half_len).enumerate() {
299 let other_side = length - i - 1;
300 let fw = &arena_src[i][cx..(cx + 16)];
301 let bw = &arena_src[other_side][cx..(cx + 16)];
302
303 for ((dst, fw), bw) in store.iter_mut().zip(fw).zip(bw) {
304 *dst = multiply_accumulate(*dst, fw.as_().add(bw.as_()), coeff);
305 }
306 }
307
308 for (src, dst) in store.iter().zip(chunk.iter_mut()) {
309 *dst = src.to_();
310 }
311
312 cx += 16;
313 }
314
315 dst_rem = dst_rem.as_chunks_mut::<16>().1;
316
317 for chunk in dst_rem.as_chunks_mut::<4>().0.iter_mut() {
318 let v_src = &arena_src[half_len][cx..(cx + 4)];
319
320 let mut k0 = v_src[0].as_().mul(coeff);
321 let mut k1 = v_src[1].as_().mul(coeff);
322 let mut k2 = v_src[2].as_().mul(coeff);
323 let mut k3 = v_src[3].as_().mul(coeff);
324
325 for (i, &coeff) in kernel.iter().take(half_len).enumerate() {
326 let other_side = length - i - 1;
327 let fw = &arena_src[i][cx..(cx + 4)];
328 let bw = &arena_src[other_side][cx..(cx + 4)];
329 k0 = multiply_accumulate(k0, fw[0].as_().add(bw[0].as_()), coeff);
330 k1 = multiply_accumulate(k1, fw[1].as_().add(bw[1].as_()), coeff);
331 k2 = multiply_accumulate(k2, fw[2].as_().add(bw[2].as_()), coeff);
332 k3 = multiply_accumulate(k3, fw[3].as_().add(bw[3].as_()), coeff);
333 }
334
335 chunk[0] = k0.to_();
336 chunk[1] = k1.to_();
337 chunk[2] = k2.to_();
338 chunk[3] = k3.to_();
339 cx += 4;
340 }
341
342 dst_rem = dst_rem.as_chunks_mut::<4>().1;
343
344 for (chunk, x) in dst_rem.iter_mut().zip(cx..dst_stride) {
345 let v_src = &arena_src[half_len][x..(x + 1)];
346
347 let mut k0 = v_src[0].as_().mul(coeff);
348
349 for (i, &coeff) in kernel.iter().take(half_len).enumerate() {
350 let other_side = length - i - 1;
351 let fw = &arena_src[i][x..(x + 1)];
352 let bw = &arena_src[other_side][x..(x + 1)];
353 k0 = multiply_accumulate(k0, fw[0].as_().add(bw[0].as_()), coeff);
354 }
355
356 *chunk = k0.to_();
357 }
358}
359
360fn filter_symmetric_row<T, F, const N: usize>(arena: &[T], dst_row: &mut [T], scanned_kernel: &[F])
366where
367 T: Copy + AsPrimitive<F> + Default,
368 F: ToStorage<T>
369 + Mul<Output = F>
370 + MulAdd<F, Output = F>
371 + Default
372 + Add<F, Output = F>
373 + Copy
374 + 'static,
375 i32: AsPrimitive<F>,
376{
377 let src = arena;
378
379 let length = scanned_kernel.len();
380 let half_len = length / 2;
381
382 let hc = scanned_kernel[half_len];
383
384 let (dst_row_chunks, remainder) = dst_row.as_chunks_mut::<4>();
385
386 for (x, dst) in dst_row_chunks.iter_mut().enumerate() {
387 let v_cx = x * 4;
388 let src = &src[v_cx..];
389
390 let chunk = &src[half_len * N..half_len * N + 4];
391
392 let mut k0 = chunk[0].as_() * hc;
393 let mut k1 = chunk[1].as_() * hc;
394 let mut k2 = chunk[2].as_() * hc;
395 let mut k3 = chunk[3].as_() * hc;
396
397 for (i, &coeff) in scanned_kernel.iter().take(half_len).enumerate() {
400 let other_side = length - i - 1;
401 let fw = &src[(i * N)..(i * N) + 4];
402 let bw = &src[(other_side * N)..(other_side * N) + 4];
403 k0 = multiply_accumulate(k0, fw[0].as_() + bw[0].as_(), coeff);
404 k1 = multiply_accumulate(k1, fw[1].as_() + bw[1].as_(), coeff);
405 k2 = multiply_accumulate(k2, fw[2].as_() + bw[2].as_(), coeff);
406 k3 = multiply_accumulate(k3, fw[3].as_() + bw[3].as_(), coeff);
407 }
408
409 dst[0] = k0.to_();
410 dst[1] = k1.to_();
411 dst[2] = k2.to_();
412 dst[3] = k3.to_();
413 }
414
415 let dzx = dst_row_chunks.len() * 4;
416
417 for (x, dst) in remainder.iter_mut().enumerate() {
418 let v_cx = x + dzx;
419 let src = &src[v_cx..];
420
421 let mut k0 = src[half_len * N].as_() * hc;
422
423 for (i, &coeff) in scanned_kernel.iter().take(half_len).enumerate() {
424 let other_side = length - i - 1;
425 let fw = &src[(i * N)..(i * N) + 1];
426 let bw = &src[(other_side * N)..(other_side * N) + 1];
427 k0 = multiply_accumulate(k0, fw[0].as_() + bw[0].as_(), coeff);
428 }
429
430 *dst = k0.to_();
431 }
432}
433
434trait KernelTransformer<F, I> {
435 fn transform(input: F) -> I;
436}
437
438impl KernelTransformer<f32, u32> for u8 {
439 fn transform(input: f32) -> u32 {
440 const SCALE: f32 = (1 << Q0_15) as f32;
441 (input * SCALE).min(((1u32 << Q0_15) - 1) as f32).max(0.) as u32
442 }
443}
444
445impl KernelTransformer<f32, u32> for u16 {
446 fn transform(input: f32) -> u32 {
447 const SCALE: f32 = (1 << Q0_15) as f32;
448 (input * SCALE).min(((1u32 << Q0_15) - 1) as f32).max(0.) as u32
449 }
450}
451
452impl KernelTransformer<f32, f32> for f32 {
453 fn transform(input: f32) -> f32 {
454 input
455 }
456}
457
458impl KernelTransformer<f32, f32> for u16 {
459 fn transform(input: f32) -> f32 {
460 input
461 }
462}
463
464fn prepare_symmetric_kernel<I: Copy + PartialEq + 'static>(kernel: &[I]) -> Vec<I>
466where
467 i32: AsPrimitive<I>,
468{
469 let zeros: I = 0i32.as_();
470 let mut new_kernel = kernel.to_vec();
471 while new_kernel.len() > 2
472 && (new_kernel.last().unwrap().eq(&zeros) && new_kernel.first().unwrap().eq(&zeros))
473 {
474 new_kernel.remove(0);
475 new_kernel.remove(new_kernel.len() - 1);
476 }
477
478 new_kernel
479}
480
481const RING_QUEUE_CIRCULAR_CUTOFF: usize = 55;
482
483fn filter_2d_separable_ring_queue<T, I, const N: usize>(
533 image: &[T],
534 destination: &mut [T],
535 image_size: FilterImageSize,
536 row_kernel: &[I],
537 column_kernel: &[I],
538) -> Result<(), ImageError>
539where
540 T: Copy + Default + Send + Sync + AsPrimitive<I>,
541 I: ToStorage<T>
542 + Mul<I, Output = I>
543 + Add<I, Output = I>
544 + MulAdd<I, Output = I>
545 + Send
546 + Sync
547 + PartialEq
548 + Default
549 + 'static
550 + Copy,
551 i32: AsPrimitive<I>,
552 f64: AsPrimitive<T>,
553{
554 let pad_w = (row_kernel.len() / 2).max(1);
555
556 let arena_width = image_size
557 .width
558 .safe_mul(N)?
559 .safe_add(pad_w.safe_mul(2 * N)?)?;
560 let mut row_buffer = vec![T::default(); arena_width];
561
562 let full_width = image_size.width * N;
563
564 let mut buffer = vec![T::default(); (image_size.width * N).safe_mul(column_kernel.len())?];
566
567 let column_kernel_len = column_kernel.len();
568
569 let half_kernel = column_kernel_len / 2;
570
571 make_arena_row::<T, N>(
572 image,
573 &mut row_buffer,
574 0,
575 image_size,
576 KernelShape {
577 width: row_kernel.len(),
578 height: 0,
579 },
580 )?;
581
582 filter_symmetric_row::<T, I, N>(&row_buffer, &mut buffer[..full_width], row_kernel);
584
585 let (src_row, rest) = buffer.split_at_mut(full_width);
587 for dst in rest.chunks_exact_mut(full_width).take(half_kernel) {
588 for (dst, src) in dst.iter_mut().zip(src_row.iter()) {
589 *dst = *src;
590 }
591 }
592
593 let mut start_ky = column_kernel_len / 2 + 1;
594 start_ky %= column_kernel_len;
596
597 for y in 1..image_size.height + half_kernel {
600 let new_y = if y < image_size.height {
601 y
602 } else {
603 y.min(image_size.height - 1)
604 };
605
606 make_arena_row::<T, N>(
607 image,
608 &mut row_buffer,
609 new_y,
610 image_size,
611 KernelShape {
612 width: row_kernel.len(),
613 height: 0,
614 },
615 )?;
616
617 filter_symmetric_row::<T, I, N>(
618 &row_buffer,
619 &mut buffer[start_ky * full_width..(start_ky + 1) * full_width],
620 row_kernel,
621 );
622
623 if y >= half_kernel {
626 let mut brows = vec![image; column_kernel_len];
627
628 for (i, brow) in brows.iter_mut().enumerate() {
629 let ky = (i + start_ky + 1) % column_kernel_len;
630 *brow = &buffer[ky * full_width..(ky + 1) * full_width];
631 }
632
633 let dy = y - half_kernel;
634
635 let dst = &mut destination[dy * full_width..(dy + 1) * full_width];
636
637 filter_symmetric_column::<T, I>(&brows, dst, image_size, column_kernel, N);
638 }
639
640 start_ky += 1;
641 start_ky %= column_kernel_len;
642 }
643
644 Ok(())
645}
646
647fn filter_2d_separable<T, F, I, const N: usize>(
661 image: &[T],
662 destination: &mut [T],
663 image_size: FilterImageSize,
664 row_kernel: &[F],
665 column_kernel: &[F],
666) -> Result<(), ImageError>
667where
668 T: Copy + AsPrimitive<F> + Default + Send + Sync + KernelTransformer<F, I> + AsPrimitive<I>,
669 F: Default + 'static + Copy,
670 I: ToStorage<T>
671 + Mul<I, Output = I>
672 + Add<I, Output = I>
673 + MulAdd<I, Output = I>
674 + Send
675 + Sync
676 + PartialEq
677 + Default
678 + 'static
679 + Copy,
680 i32: AsPrimitive<F> + AsPrimitive<I>,
681 f64: AsPrimitive<T>,
682{
683 if image.len() != image_size.width.safe_mul(image_size.height)?.safe_mul(N)? {
684 return Err(ImageError::Limits(LimitError::from_kind(
685 LimitErrorKind::DimensionError,
686 )));
687 }
688 if destination.len() != image.len() {
689 return Err(ImageError::Limits(LimitError::from_kind(
690 LimitErrorKind::DimensionError,
691 )));
692 }
693
694 assert_ne!(row_kernel.len() & 1, 0, "Row kernel length must be odd");
695 assert_ne!(
696 column_kernel.len() & 1,
697 0,
698 "Column kernel length must be odd"
699 );
700
701 let mut scanned_row_kernel = row_kernel
702 .iter()
703 .map(|&x| T::transform(x))
704 .collect::<Vec<I>>();
705 let mut scanned_column_kernel = column_kernel
706 .iter()
707 .map(|&x| T::transform(x))
708 .collect::<Vec<I>>();
709
710 scanned_row_kernel = prepare_symmetric_kernel(&scanned_row_kernel);
711 scanned_column_kernel = prepare_symmetric_kernel(&scanned_column_kernel);
712
713 if scanned_row_kernel.is_empty() && scanned_column_kernel.is_empty() {
714 destination.copy_from_slice(image);
715 return Ok(());
716 }
717
718 if column_kernel.len() < RING_QUEUE_CIRCULAR_CUTOFF {
719 return filter_2d_separable_ring_queue::<T, I, N>(
720 image,
721 destination,
722 image_size,
723 &scanned_row_kernel,
724 &scanned_column_kernel,
725 );
726 }
727
728 let pad_w = (scanned_row_kernel.len() / 2).max(1);
729
730 let arena_width = image_size.width * N + pad_w * 2 * N;
731 let mut row_buffer = vec![T::default(); arena_width];
732
733 let mut transient_image = vec![T::default(); image_size.width * image_size.height * N];
734
735 for (y, dst) in transient_image
736 .chunks_exact_mut(image_size.width * N)
737 .enumerate()
738 {
739 make_arena_row::<T, N>(
747 image,
748 &mut row_buffer,
749 y,
750 image_size,
751 KernelShape {
752 width: scanned_row_kernel.len(),
753 height: 0,
754 },
755 )?;
756
757 filter_symmetric_row::<T, I, N>(&row_buffer, dst, &scanned_row_kernel);
758 }
759
760 let column_kernel_shape = KernelShape {
761 width: 0,
762 height: scanned_column_kernel.len(),
763 };
764
765 let column_arena_k =
773 make_columns_arenas::<T, N>(transient_image.as_slice(), image_size, column_kernel_shape);
774
775 let top_pad = column_arena_k.top_pad.as_slice();
776 let bottom_pad = column_arena_k.bottom_pad.as_slice();
777
778 let pad_h = column_kernel_shape.height / 2;
779
780 let transient_image_slice = transient_image.as_slice();
781
782 let src_stride = image_size.width * N;
783
784 for (y, dst) in destination
785 .chunks_exact_mut(image_size.width * N)
786 .enumerate()
787 {
788 let mut brows: Vec<&[T]> = vec![&transient_image_slice[0..]; column_kernel_shape.height];
789
790 for (k, row) in (0..column_kernel_shape.height).zip(brows.iter_mut()) {
791 if (y as i64 - pad_h as i64 + k as i64) < 0 {
792 *row = &top_pad[(pad_h - k - 1) * src_stride..];
793 } else if (y as i64 - pad_h as i64 + k as i64) as usize >= image_size.height {
794 *row = &bottom_pad[(k - pad_h - 1) * src_stride..];
795 } else {
796 let fy = (y as i64 + k as i64 - pad_h as i64) as usize;
797 let start_offset = src_stride * fy;
798 *row = &transient_image_slice[start_offset..(start_offset + src_stride)];
799 }
800 }
801
802 let brows_slice = brows.as_slice();
803
804 filter_symmetric_column::<T, I>(brows_slice, dst, image_size, &scanned_column_kernel, N);
805 }
806
807 Ok(())
808}
809
810pub(crate) fn filter_2d_sep_plane(
811 image: &[u8],
812 destination: &mut [u8],
813 image_size: FilterImageSize,
814 row_kernel: &[f32],
815 column_kernel: &[f32],
816) -> Result<(), ImageError> {
817 filter_2d_separable::<u8, f32, u32, 1>(
818 image,
819 destination,
820 image_size,
821 row_kernel,
822 column_kernel,
823 )
824}
825
826pub(crate) fn filter_2d_sep_la(
827 image: &[u8],
828 destination: &mut [u8],
829 image_size: FilterImageSize,
830 row_kernel: &[f32],
831 column_kernel: &[f32],
832) -> Result<(), ImageError> {
833 filter_2d_separable::<u8, f32, u32, 2>(
834 image,
835 destination,
836 image_size,
837 row_kernel,
838 column_kernel,
839 )
840}
841
842pub(crate) fn filter_2d_sep_rgb(
843 image: &[u8],
844 destination: &mut [u8],
845 image_size: FilterImageSize,
846 row_kernel: &[f32],
847 column_kernel: &[f32],
848) -> Result<(), ImageError> {
849 filter_2d_separable::<u8, f32, u32, 3>(
850 image,
851 destination,
852 image_size,
853 row_kernel,
854 column_kernel,
855 )
856}
857
858pub(crate) fn filter_2d_sep_rgba(
859 image: &[u8],
860 destination: &mut [u8],
861 image_size: FilterImageSize,
862 row_kernel: &[f32],
863 column_kernel: &[f32],
864) -> Result<(), ImageError> {
865 filter_2d_separable::<u8, f32, u32, 4>(
866 image,
867 destination,
868 image_size,
869 row_kernel,
870 column_kernel,
871 )
872}
873
874pub(crate) fn filter_2d_sep_la_f32(
875 image: &[f32],
876 destination: &mut [f32],
877 image_size: FilterImageSize,
878 row_kernel: &[f32],
879 column_kernel: &[f32],
880) -> Result<(), ImageError> {
881 filter_2d_separable::<f32, f32, f32, 2>(
882 image,
883 destination,
884 image_size,
885 row_kernel,
886 column_kernel,
887 )
888}
889
890pub(crate) fn filter_2d_sep_plane_f32(
891 image: &[f32],
892 destination: &mut [f32],
893 image_size: FilterImageSize,
894 row_kernel: &[f32],
895 column_kernel: &[f32],
896) -> Result<(), ImageError> {
897 filter_2d_separable::<f32, f32, f32, 1>(
898 image,
899 destination,
900 image_size,
901 row_kernel,
902 column_kernel,
903 )
904}
905
906pub(crate) fn filter_2d_sep_rgb_f32(
907 image: &[f32],
908 destination: &mut [f32],
909 image_size: FilterImageSize,
910 row_kernel: &[f32],
911 column_kernel: &[f32],
912) -> Result<(), ImageError> {
913 filter_2d_separable::<f32, f32, f32, 3>(
914 image,
915 destination,
916 image_size,
917 row_kernel,
918 column_kernel,
919 )
920}
921
922pub(crate) fn filter_2d_sep_rgba_f32(
923 image: &[f32],
924 destination: &mut [f32],
925 image_size: FilterImageSize,
926 row_kernel: &[f32],
927 column_kernel: &[f32],
928) -> Result<(), ImageError> {
929 filter_2d_separable::<f32, f32, f32, 4>(
930 image,
931 destination,
932 image_size,
933 row_kernel,
934 column_kernel,
935 )
936}
937
938pub(crate) fn filter_2d_sep_rgb_u16(
939 image: &[u16],
940 destination: &mut [u16],
941 image_size: FilterImageSize,
942 row_kernel: &[f32],
943 column_kernel: &[f32],
944) -> Result<(), ImageError> {
945 filter_2d_separable::<u16, f32, u32, 3>(
946 image,
947 destination,
948 image_size,
949 row_kernel,
950 column_kernel,
951 )
952}
953
954pub(crate) fn filter_2d_sep_rgba_u16(
955 image: &[u16],
956 destination: &mut [u16],
957 image_size: FilterImageSize,
958 row_kernel: &[f32],
959 column_kernel: &[f32],
960) -> Result<(), ImageError> {
961 filter_2d_separable::<u16, f32, u32, 4>(
962 image,
963 destination,
964 image_size,
965 row_kernel,
966 column_kernel,
967 )
968}
969
970pub(crate) fn filter_2d_sep_la_u16(
971 image: &[u16],
972 destination: &mut [u16],
973 image_size: FilterImageSize,
974 row_kernel: &[f32],
975 column_kernel: &[f32],
976) -> Result<(), ImageError> {
977 filter_2d_separable::<u16, f32, u32, 2>(
978 image,
979 destination,
980 image_size,
981 row_kernel,
982 column_kernel,
983 )
984}
985
986pub(crate) fn filter_2d_sep_plane_u16(
987 image: &[u16],
988 destination: &mut [u16],
989 image_size: FilterImageSize,
990 row_kernel: &[f32],
991 column_kernel: &[f32],
992) -> Result<(), ImageError> {
993 filter_2d_separable::<u16, f32, u32, 1>(
994 image,
995 destination,
996 image_size,
997 row_kernel,
998 column_kernel,
999 )
1000}