1use crate::math::{FusedMultiplyAdd, FusedMultiplyNegAdd};
30use crate::mlaf::{mlaf, neg_mlaf};
31use crate::safe_math::{SafeAdd, SafeMul};
32use crate::{CmsError, MalformedSize, Vector3f, Vector4f};
33use std::ops::{Add, Mul, Sub};
34
35impl FusedMultiplyAdd<f32> for f32 {
36 #[inline(always)]
37 fn mla(&self, b: f32, c: f32) -> f32 {
38 mlaf(*self, b, c)
39 }
40}
41
42impl FusedMultiplyNegAdd<f32> for f32 {
43 #[inline(always)]
44 fn neg_mla(&self, b: f32, c: f32) -> f32 {
45 neg_mlaf(*self, b, c)
46 }
47}
48
49#[inline(always)]
50pub(crate) fn lerp<
51 T: Mul<Output = T>
52 + Sub<Output = T>
53 + Add<Output = T>
54 + From<f32>
55 + Copy
56 + FusedMultiplyAdd<T>
57 + FusedMultiplyNegAdd<T>,
58>(
59 a: T,
60 b: T,
61 t: T,
62) -> T {
63 a.neg_mla(a, t).mla(b, t)
64}
65
66pub struct Hypercube<'a> {
70 array: &'a [f32],
71 x_stride: u32,
72 y_stride: u32,
73 z_stride: u32,
74 grid_size: [u8; 4],
75}
76
77trait Fetcher4<T> {
78 fn fetch(&self, x: i32, y: i32, z: i32, w: i32) -> T;
79}
80
81impl Hypercube<'_> {
82 pub fn new(
83 array: &[f32],
84 grid_size: usize,
85 channels: usize,
86 ) -> Result<Hypercube<'_>, CmsError> {
87 if array.is_empty() || grid_size == 0 {
88 return Ok(Hypercube {
89 array,
90 x_stride: 0,
91 y_stride: 0,
92 z_stride: 0,
93 grid_size: [0, 0, 0, 0],
94 });
95 }
96 let z_stride = grid_size as u32;
97 let y_stride = z_stride * z_stride;
98 let x_stride = z_stride * z_stride * z_stride;
99
100 let last_index = (grid_size - 1)
101 .safe_mul(x_stride as usize)?
102 .safe_add((grid_size - 1).safe_mul(y_stride as usize)?)?
103 .safe_add((grid_size - 1).safe_mul(z_stride as usize)?)?
104 .safe_add(grid_size - 1)?
105 .safe_mul(channels)?;
106
107 if last_index >= array.len() {
108 return Err(CmsError::MalformedClut(MalformedSize {
109 size: array.len(),
110 expected: last_index,
111 }));
112 }
113
114 Ok(Hypercube {
115 array,
116 x_stride,
117 y_stride,
118 z_stride,
119 grid_size: [
120 grid_size as u8,
121 grid_size as u8,
122 grid_size as u8,
123 grid_size as u8,
124 ],
125 })
126 }
127
128 pub fn new_hypercube(
129 array: &[f32],
130 grid_size: [u8; 4],
131 channels: usize,
132 ) -> Result<Hypercube<'_>, CmsError> {
133 if array.is_empty()
134 || grid_size[0] == 0
135 || grid_size[1] == 0
136 || grid_size[2] == 0
137 || grid_size[3] == 0
138 {
139 return Ok(Hypercube {
140 array,
141 x_stride: 0,
142 y_stride: 0,
143 z_stride: 0,
144 grid_size,
145 });
146 }
147 let z_stride = grid_size[2] as u32;
148 let y_stride = z_stride * grid_size[1] as u32;
149 let x_stride = y_stride * grid_size[0] as u32;
150 let last_index = (grid_size[0] as usize - 1)
151 .safe_mul(x_stride as usize)?
152 .safe_add((grid_size[1] as usize - 1).safe_mul(y_stride as usize)?)?
153 .safe_add((grid_size[2] as usize - 1).safe_mul(z_stride as usize)?)?
154 .safe_add(grid_size[3] as usize - 1)?
155 .safe_mul(channels)?;
156
157 if last_index >= array.len() {
158 return Err(CmsError::MalformedClut(MalformedSize {
159 size: array.len(),
160 expected: last_index,
161 }));
162 }
163
164 Ok(Hypercube {
165 array,
166 x_stride,
167 y_stride,
168 z_stride,
169 grid_size,
170 })
171 }
172}
173
174struct Fetch4Vec3<'a> {
175 array: &'a [f32],
176 x_stride: u32,
177 y_stride: u32,
178 z_stride: u32,
179}
180
181struct Fetch4Vec4<'a> {
182 array: &'a [f32],
183 x_stride: u32,
184 y_stride: u32,
185 z_stride: u32,
186}
187
188impl Fetcher4<Vector3f> for Fetch4Vec3<'_> {
189 #[inline(always)]
190 fn fetch(&self, x: i32, y: i32, z: i32, w: i32) -> Vector3f {
191 let start = (x as u32 * self.x_stride
192 + y as u32 * self.y_stride
193 + z as u32 * self.z_stride
194 + w as u32) as usize
195 * 3;
196 let k = &self.array[start..start + 3];
197 Vector3f {
198 v: [k[0], k[1], k[2]],
199 }
200 }
201}
202
203impl Fetcher4<Vector4f> for Fetch4Vec4<'_> {
204 #[inline(always)]
205 fn fetch(&self, x: i32, y: i32, z: i32, w: i32) -> Vector4f {
206 let start = (x as u32 * self.x_stride
207 + y as u32 * self.y_stride
208 + z as u32 * self.z_stride
209 + w as u32) as usize
210 * 4;
211 let k = &self.array[start..start + 4];
212 Vector4f {
213 v: [k[0], k[1], k[2], k[3]],
214 }
215 }
216}
217
218impl Hypercube<'_> {
219 #[inline(always)]
220 fn quadlinear<
221 T: From<f32>
222 + Add<T, Output = T>
223 + Mul<T, Output = T>
224 + FusedMultiplyAdd<T>
225 + Sub<T, Output = T>
226 + Copy
227 + FusedMultiplyNegAdd<T>,
228 >(
229 &self,
230 lin_x: f32,
231 lin_y: f32,
232 lin_z: f32,
233 lin_w: f32,
234 r: impl Fetcher4<T>,
235 ) -> T {
236 let lin_x = lin_x.max(0.0);
237 let lin_y = lin_y.max(0.0);
238 let lin_z = lin_z.max(0.0);
239 let lin_w = lin_w.max(0.0);
240
241 let scale_x = (self.grid_size[0] as i32 - 1) as f32;
242 let scale_y = (self.grid_size[1] as i32 - 1) as f32;
243 let scale_z = (self.grid_size[2] as i32 - 1) as f32;
244 let scale_w = (self.grid_size[3] as i32 - 1) as f32;
245
246 let x = (lin_x * scale_x).floor().min(scale_x) as i32;
247 let y = (lin_y * scale_y).floor().min(scale_y) as i32;
248 let z = (lin_z * scale_z).floor().min(scale_z) as i32;
249 let w = (lin_w * scale_w).floor().min(scale_w) as i32;
250
251 let x_n = (lin_x * scale_x).ceil().min(scale_x) as i32;
252 let y_n = (lin_y * scale_y).ceil().min(scale_y) as i32;
253 let z_n = (lin_z * scale_z).ceil().min(scale_z) as i32;
254 let w_n = (lin_w * scale_w).ceil().min(scale_w) as i32;
255
256 let x_d = T::from(lin_x * scale_x - x as f32);
257 let y_d = T::from(lin_y * scale_y - y as f32);
258 let z_d = T::from(lin_z * scale_z - z as f32);
259 let w_d = T::from(lin_w * scale_w - w as f32);
260
261 let r_x1 = lerp(r.fetch(x, y, z, w), r.fetch(x_n, y, z, w), x_d);
262 let r_x2 = lerp(r.fetch(x, y_n, z, w), r.fetch(x_n, y_n, z, w), x_d);
263 let r_y1 = lerp(r_x1, r_x2, y_d);
264 let r_x3 = lerp(r.fetch(x, y, z_n, w), r.fetch(x_n, y, z_n, w), x_d);
265 let r_x4 = lerp(r.fetch(x, y_n, z_n, w), r.fetch(x_n, y_n, z_n, w), x_d);
266 let r_y2 = lerp(r_x3, r_x4, y_d);
267 let r_z1 = lerp(r_y1, r_y2, z_d);
268
269 let r_x1 = lerp(r.fetch(x, y, z, w_n), r.fetch(x_n, y, z, w_n), x_d);
270 let r_x2 = lerp(r.fetch(x, y_n, z, w_n), r.fetch(x_n, y_n, z, w_n), x_d);
271 let r_y1 = lerp(r_x1, r_x2, y_d);
272 let r_x3 = lerp(r.fetch(x, y, z_n, w_n), r.fetch(x_n, y, z_n, w_n), x_d);
273 let r_x4 = lerp(r.fetch(x, y_n, z_n, w_n), r.fetch(x_n, y_n, z_n, w_n), x_d);
274 let r_y2 = lerp(r_x3, r_x4, y_d);
275 let r_z2 = lerp(r_y1, r_y2, z_d);
276 lerp(r_z1, r_z2, w_d)
277 }
278
279 #[inline]
280 pub fn quadlinear_vec3(&self, lin_x: f32, lin_y: f32, lin_z: f32, lin_w: f32) -> Vector3f {
281 self.quadlinear(
282 lin_x,
283 lin_y,
284 lin_z,
285 lin_w,
286 Fetch4Vec3 {
287 array: self.array,
288 x_stride: self.x_stride,
289 y_stride: self.y_stride,
290 z_stride: self.z_stride,
291 },
292 )
293 }
294
295 #[inline]
296 pub fn quadlinear_vec4(&self, lin_x: f32, lin_y: f32, lin_z: f32, lin_w: f32) -> Vector4f {
297 self.quadlinear(
298 lin_x,
299 lin_y,
300 lin_z,
301 lin_w,
302 Fetch4Vec4 {
303 array: self.array,
304 x_stride: self.x_stride,
305 y_stride: self.y_stride,
306 z_stride: self.z_stride,
307 },
308 )
309 }
310
311 #[cfg(feature = "options")]
312 #[cfg_attr(docsrs, doc(cfg(feature = "options")))]
313 #[inline(always)]
314 fn pyramid<
315 T: From<f32>
316 + Add<T, Output = T>
317 + Mul<T, Output = T>
318 + FusedMultiplyAdd<T>
319 + Sub<T, Output = T>
320 + Copy
321 + FusedMultiplyNegAdd<T>,
322 >(
323 &self,
324 lin_x: f32,
325 lin_y: f32,
326 lin_z: f32,
327 lin_w: f32,
328 r: impl Fetcher4<T>,
329 ) -> T {
330 let lin_x = lin_x.max(0.0);
331 let lin_y = lin_y.max(0.0);
332 let lin_z = lin_z.max(0.0);
333 let lin_w = lin_w.max(0.0);
334
335 let scale_x = (self.grid_size[0] as i32 - 1) as f32;
336 let scale_y = (self.grid_size[1] as i32 - 1) as f32;
337 let scale_z = (self.grid_size[2] as i32 - 1) as f32;
338 let scale_w = (self.grid_size[3] as i32 - 1) as f32;
339
340 let x = (lin_x * scale_x).floor().min(scale_x) as i32;
341 let y = (lin_y * scale_y).floor().min(scale_y) as i32;
342 let z = (lin_z * scale_z).floor().min(scale_z) as i32;
343 let w = (lin_w * scale_w).floor().min(scale_w) as i32;
344
345 let x_n = (lin_x * scale_x).ceil().min(scale_x) as i32;
346 let y_n = (lin_y * scale_y).ceil().min(scale_y) as i32;
347 let z_n = (lin_z * scale_z).ceil().min(scale_z) as i32;
348 let w_n = (lin_w * scale_w).ceil().min(scale_w) as i32;
349
350 let dr = lin_x * scale_x - x as f32;
351 let dg = lin_y * scale_y - y as f32;
352 let db = lin_z * scale_z - z as f32;
353 let dw = lin_w * scale_w - w as f32;
354
355 let c0 = r.fetch(x, y, z, w);
356
357 let w0 = if dr > db && dg > db {
358 let x0 = r.fetch(x_n, y_n, z_n, w);
359 let x1 = r.fetch(x_n, y_n, z, w);
360 let x2 = r.fetch(x_n, y, z, w);
361 let x3 = r.fetch(x, y_n, z, w);
362
363 let c1 = x0 - x1;
364 let c2 = x2 - c0;
365 let c3 = x3 - c0;
366 let c4 = c0 - x3 - x2 + x1;
367
368 let s0 = c0.mla(c1, T::from(db));
369 let s1 = s0.mla(c2, T::from(dr));
370 let s2 = s1.mla(c3, T::from(dg));
371 s2.mla(c4, T::from(dr * dg))
372 } else if db > dr && dg > dr {
373 let x0 = r.fetch(x, y, z_n, w);
374 let x1 = r.fetch(x_n, y_n, z_n, w);
375 let x2 = r.fetch(x, y_n, z_n, w);
376 let x3 = r.fetch(x, y_n, z, w);
377
378 let c1 = x0 - c0;
379 let c2 = x1 - x2;
380 let c3 = x3 - c0;
381 let c4 = c0 - x3 - x0 + x2;
382
383 let s0 = c0.mla(c1, T::from(db));
384 let s1 = s0.mla(c2, T::from(dr));
385 let s2 = s1.mla(c3, T::from(dg));
386 s2.mla(c4, T::from(dg * db))
387 } else {
388 let x0 = r.fetch(x, y, z_n, w);
389 let x1 = r.fetch(x_n, y, z, w);
390 let x2 = r.fetch(x_n, y, z_n, w);
391 let x3 = r.fetch(x_n, y_n, z_n, w);
392
393 let c1 = x0 - c0;
394 let c2 = x1 - c0;
395 let c3 = x3 - x2;
396 let c4 = c0 - x1 - x0 + x2;
397
398 let s0 = c0.mla(c1, T::from(db));
399 let s1 = s0.mla(c2, T::from(dr));
400 let s2 = s1.mla(c3, T::from(dg));
401 s2.mla(c4, T::from(db * dr))
402 };
403
404 let c0 = r.fetch(x, y, z, w_n);
405
406 let w1 = if dr > db && dg > db {
407 let x0 = r.fetch(x_n, y_n, z_n, w_n);
408 let x1 = r.fetch(x_n, y_n, z, w_n);
409 let x2 = r.fetch(x_n, y, z, w_n);
410 let x3 = r.fetch(x, y_n, z, w_n);
411
412 let c1 = x0 - x1;
413 let c2 = x2 - c0;
414 let c3 = x3 - c0;
415 let c4 = c0 - x3 - x2 + x1;
416
417 let s0 = c0.mla(c1, T::from(db));
418 let s1 = s0.mla(c2, T::from(dr));
419 let s2 = s1.mla(c3, T::from(dg));
420 s2.mla(c4, T::from(dr * dg))
421 } else if db > dr && dg > dr {
422 let x0 = r.fetch(x, y, z_n, w_n);
423 let x1 = r.fetch(x_n, y_n, z_n, w_n);
424 let x2 = r.fetch(x, y_n, z_n, w_n);
425 let x3 = r.fetch(x, y_n, z, w_n);
426
427 let c1 = x0 - c0;
428 let c2 = x1 - x2;
429 let c3 = x3 - c0;
430 let c4 = c0 - x3 - x0 + x2;
431
432 let s0 = c0.mla(c1, T::from(db));
433 let s1 = s0.mla(c2, T::from(dr));
434 let s2 = s1.mla(c3, T::from(dg));
435 s2.mla(c4, T::from(dg * db))
436 } else {
437 let x0 = r.fetch(x, y, z_n, w_n);
438 let x1 = r.fetch(x_n, y, z, w_n);
439 let x2 = r.fetch(x_n, y, z_n, w_n);
440 let x3 = r.fetch(x_n, y_n, z_n, w_n);
441
442 let c1 = x0 - c0;
443 let c2 = x1 - c0;
444 let c3 = x3 - x2;
445 let c4 = c0 - x1 - x0 + x2;
446
447 let s0 = c0.mla(c1, T::from(db));
448 let s1 = s0.mla(c2, T::from(dr));
449 let s2 = s1.mla(c3, T::from(dg));
450 s2.mla(c4, T::from(db * dr))
451 };
452 w0.neg_mla(w0, T::from(dw)).mla(w1, T::from(dw))
453 }
454
455 #[cfg(feature = "options")]
456 #[cfg_attr(docsrs, doc(cfg(feature = "options")))]
457 #[inline]
458 pub fn pyramid_vec3(&self, lin_x: f32, lin_y: f32, lin_z: f32, lin_w: f32) -> Vector3f {
459 self.pyramid(
460 lin_x,
461 lin_y,
462 lin_z,
463 lin_w,
464 Fetch4Vec3 {
465 array: self.array,
466 x_stride: self.x_stride,
467 y_stride: self.y_stride,
468 z_stride: self.z_stride,
469 },
470 )
471 }
472
473 #[cfg(feature = "options")]
474 #[cfg_attr(docsrs, doc(cfg(feature = "options")))]
475 #[inline]
476 pub fn pyramid_vec4(&self, lin_x: f32, lin_y: f32, lin_z: f32, lin_w: f32) -> Vector4f {
477 self.pyramid(
478 lin_x,
479 lin_y,
480 lin_z,
481 lin_w,
482 Fetch4Vec4 {
483 array: self.array,
484 x_stride: self.x_stride,
485 y_stride: self.y_stride,
486 z_stride: self.z_stride,
487 },
488 )
489 }
490
491 #[cfg(feature = "options")]
492 #[cfg_attr(docsrs, doc(cfg(feature = "options")))]
493 #[inline(always)]
494 fn prism<
495 T: From<f32>
496 + Add<T, Output = T>
497 + Mul<T, Output = T>
498 + FusedMultiplyAdd<T>
499 + Sub<T, Output = T>
500 + Copy
501 + FusedMultiplyNegAdd<T>,
502 >(
503 &self,
504 lin_x: f32,
505 lin_y: f32,
506 lin_z: f32,
507 lin_w: f32,
508 r: impl Fetcher4<T>,
509 ) -> T {
510 let lin_x = lin_x.max(0.0);
511 let lin_y = lin_y.max(0.0);
512 let lin_z = lin_z.max(0.0);
513 let lin_w = lin_w.max(0.0);
514
515 let scale_x = (self.grid_size[0] as i32 - 1) as f32;
516 let scale_y = (self.grid_size[1] as i32 - 1) as f32;
517 let scale_z = (self.grid_size[2] as i32 - 1) as f32;
518 let scale_w = (self.grid_size[3] as i32 - 1) as f32;
519
520 let x = (lin_x * scale_x).floor().min(scale_x) as i32;
521 let y = (lin_y * scale_y).floor().min(scale_y) as i32;
522 let z = (lin_z * scale_z).floor().min(scale_z) as i32;
523 let w = (lin_w * scale_w).floor().min(scale_w) as i32;
524
525 let x_n = (lin_x * scale_x).ceil().min(scale_x) as i32;
526 let y_n = (lin_y * scale_y).ceil().min(scale_y) as i32;
527 let z_n = (lin_z * scale_z).ceil().min(scale_z) as i32;
528 let w_n = (lin_w * scale_w).ceil().min(scale_w) as i32;
529
530 let dr = lin_x * scale_x - x as f32;
531 let dg = lin_y * scale_y - y as f32;
532 let db = lin_z * scale_z - z as f32;
533 let dw = lin_w * scale_w - w as f32;
534
535 let c0 = r.fetch(x, y, z, w);
536
537 let w0 = if db >= dr {
538 let x0 = r.fetch(x, y, z_n, w);
539 let x1 = r.fetch(x_n, y, z_n, w);
540 let x2 = r.fetch(x, y_n, z, w);
541 let x3 = r.fetch(x, y_n, z_n, w);
542 let x4 = r.fetch(x_n, y_n, z_n, w);
543
544 let c1 = x0 - c0;
545 let c2 = x1 - x0;
546 let c3 = x2 - c0;
547 let c4 = c0 - x2 - x0 + x3;
548 let c5 = x0 - x3 - x1 + x4;
549
550 let s0 = c0.mla(c1, T::from(db));
551 let s1 = s0.mla(c2, T::from(dr));
552 let s2 = s1.mla(c3, T::from(dg));
553 let s3 = s2.mla(c4, T::from(dg * db));
554 s3.mla(c5, T::from(dr * dg))
555 } else {
556 let x0 = r.fetch(x_n, y, z, w);
557 let x1 = r.fetch(x_n, y, z_n, w);
558 let x2 = r.fetch(x, y_n, z, w);
559 let x3 = r.fetch(x_n, y_n, z, w);
560 let x4 = r.fetch(x_n, y_n, z_n, w);
561
562 let c1 = x1 - x0;
563 let c2 = x0 - c0;
564 let c3 = x2 - c0;
565 let c4 = x0 - x3 - x1 + x4;
566 let c5 = c0 - x2 - x0 + x3;
567
568 let s0 = c0.mla(c1, T::from(db));
569 let s1 = s0.mla(c2, T::from(dr));
570 let s2 = s1.mla(c3, T::from(dg));
571 let s3 = s2.mla(c4, T::from(dg * db));
572 s3.mla(c5, T::from(dr * dg))
573 };
574
575 let c0 = r.fetch(x, y, z, w_n);
576
577 let w1 = if db >= dr {
578 let x0 = r.fetch(x, y, z_n, w_n);
579 let x1 = r.fetch(x_n, y, z_n, w_n);
580 let x2 = r.fetch(x, y_n, z, w_n);
581 let x3 = r.fetch(x, y_n, z_n, w_n);
582 let x4 = r.fetch(x_n, y_n, z_n, w_n);
583
584 let c1 = x0 - c0;
585 let c2 = x1 - x0;
586 let c3 = x2 - c0;
587 let c4 = c0 - x2 - x0 + x3;
588 let c5 = x0 - x3 - x1 + x4;
589
590 let s0 = c0.mla(c1, T::from(db));
591 let s1 = s0.mla(c2, T::from(dr));
592 let s2 = s1.mla(c3, T::from(dg));
593 let s3 = s2.mla(c4, T::from(dg * db));
594 s3.mla(c5, T::from(dr * dg))
595 } else {
596 let x0 = r.fetch(x_n, y, z, w_n);
597 let x1 = r.fetch(x_n, y, z_n, w_n);
598 let x2 = r.fetch(x, y_n, z, w_n);
599 let x3 = r.fetch(x_n, y_n, z, w_n);
600 let x4 = r.fetch(x_n, y_n, z_n, w_n);
601
602 let c1 = x1 - x0;
603 let c2 = x0 - c0;
604 let c3 = x2 - c0;
605 let c4 = x0 - x3 - x1 + x4;
606 let c5 = c0 - x2 - x0 + x3;
607
608 let s0 = c0.mla(c1, T::from(db));
609 let s1 = s0.mla(c2, T::from(dr));
610 let s2 = s1.mla(c3, T::from(dg));
611 let s3 = s2.mla(c4, T::from(dg * db));
612 s3.mla(c5, T::from(dr * dg))
613 };
614 w0.neg_mla(w0, T::from(dw)).mla(w1, T::from(dw))
615 }
616
617 #[cfg(feature = "options")]
618 #[cfg_attr(docsrs, doc(cfg(feature = "options")))]
619 #[inline]
620 pub fn prism_vec3(&self, lin_x: f32, lin_y: f32, lin_z: f32, lin_w: f32) -> Vector3f {
621 self.prism(
622 lin_x,
623 lin_y,
624 lin_z,
625 lin_w,
626 Fetch4Vec3 {
627 array: self.array,
628 x_stride: self.x_stride,
629 y_stride: self.y_stride,
630 z_stride: self.z_stride,
631 },
632 )
633 }
634
635 #[cfg(feature = "options")]
636 #[cfg_attr(docsrs, doc(cfg(feature = "options")))]
637 #[inline]
638 pub fn prism_vec4(&self, lin_x: f32, lin_y: f32, lin_z: f32, lin_w: f32) -> Vector4f {
639 self.prism(
640 lin_x,
641 lin_y,
642 lin_z,
643 lin_w,
644 Fetch4Vec4 {
645 array: self.array,
646 x_stride: self.x_stride,
647 y_stride: self.y_stride,
648 z_stride: self.z_stride,
649 },
650 )
651 }
652
653 #[cfg(feature = "options")]
654 #[cfg_attr(docsrs, doc(cfg(feature = "options")))]
655 #[inline(always)]
656 fn tetra<
657 T: From<f32>
658 + Add<T, Output = T>
659 + Mul<T, Output = T>
660 + FusedMultiplyAdd<T>
661 + Sub<T, Output = T>
662 + Copy
663 + FusedMultiplyNegAdd<T>,
664 >(
665 &self,
666 lin_x: f32,
667 lin_y: f32,
668 lin_z: f32,
669 lin_w: f32,
670 r: impl Fetcher4<T>,
671 ) -> T {
672 let lin_x = lin_x.max(0.0);
673 let lin_y = lin_y.max(0.0);
674 let lin_z = lin_z.max(0.0);
675 let lin_w = lin_w.max(0.0);
676
677 let scale_x = (self.grid_size[0] as i32 - 1) as f32;
678 let scale_y = (self.grid_size[1] as i32 - 1) as f32;
679 let scale_z = (self.grid_size[2] as i32 - 1) as f32;
680 let scale_w = (self.grid_size[3] as i32 - 1) as f32;
681
682 let x = (lin_x * scale_x).floor().min(scale_x) as i32;
683 let y = (lin_y * scale_y).floor().min(scale_y) as i32;
684 let z = (lin_z * scale_z).floor().min(scale_z) as i32;
685 let w = (lin_w * scale_w).floor().min(scale_w) as i32;
686
687 let x_n = (lin_x * scale_x).ceil().min(scale_x) as i32;
688 let y_n = (lin_y * scale_y).ceil().min(scale_y) as i32;
689 let z_n = (lin_z * scale_z).ceil().min(scale_z) as i32;
690 let w_n = (lin_w * scale_w).ceil().min(scale_w) as i32;
691
692 let rx = lin_x * scale_x - x as f32;
693 let ry = lin_y * scale_y - y as f32;
694 let rz = lin_z * scale_z - z as f32;
695 let rw = lin_w * scale_w - w as f32;
696
697 let c0 = r.fetch(x, y, z, w);
698 let c2;
699 let c1;
700 let c3;
701 if rx >= ry {
702 if ry >= rz {
703 c1 = r.fetch(x_n, y, z, w) - c0;
705 c2 = r.fetch(x_n, y_n, z, w) - r.fetch(x_n, y, z, w);
706 c3 = r.fetch(x_n, y_n, z_n, w) - r.fetch(x_n, y_n, z, w);
707 } else if rx >= rz {
708 c1 = r.fetch(x_n, y, z, w) - c0;
710 c2 = r.fetch(x_n, y_n, z_n, w) - r.fetch(x_n, y, z_n, w);
711 c3 = r.fetch(x_n, y, z_n, w) - r.fetch(x_n, y, z, w);
712 } else {
713 c1 = r.fetch(x_n, y, z_n, w) - r.fetch(x, y, z_n, w);
715 c2 = r.fetch(x_n, y_n, z_n, w) - r.fetch(x_n, y, z_n, w);
716 c3 = r.fetch(x, y, z_n, w) - c0;
717 }
718 } else if rx >= rz {
719 c1 = r.fetch(x_n, y_n, z, w) - r.fetch(x, y_n, z, w);
721 c2 = r.fetch(x, y_n, z, w) - c0;
722 c3 = r.fetch(x_n, y_n, z_n, w) - r.fetch(x_n, y_n, z, w);
723 } else if ry >= rz {
724 c1 = r.fetch(x_n, y_n, z_n, w) - r.fetch(x, y_n, z_n, w);
726 c2 = r.fetch(x, y_n, z, w) - c0;
727 c3 = r.fetch(x, y_n, z_n, w) - r.fetch(x, y_n, z, w);
728 } else {
729 c1 = r.fetch(x_n, y_n, z_n, w) - r.fetch(x, y_n, z_n, w);
731 c2 = r.fetch(x, y_n, z_n, w) - r.fetch(x, y, z_n, w);
732 c3 = r.fetch(x, y, z_n, w) - c0;
733 }
734 let s0 = c0.mla(c1, T::from(rx));
735 let s1 = s0.mla(c2, T::from(ry));
736 let w0 = s1.mla(c3, T::from(rz));
737
738 let c0 = r.fetch(x, y, z, w_n);
739 let c2;
740 let c1;
741 let c3;
742 if rx >= ry {
743 if ry >= rz {
744 c1 = r.fetch(x_n, y, z, w_n) - c0;
746 c2 = r.fetch(x_n, y_n, z, w_n) - r.fetch(x_n, y, z, w_n);
747 c3 = r.fetch(x_n, y_n, z_n, w_n) - r.fetch(x_n, y_n, z, w_n);
748 } else if rx >= rz {
749 c1 = r.fetch(x_n, y, z, w_n) - c0;
751 c2 = r.fetch(x_n, y_n, z_n, w_n) - r.fetch(x_n, y, z_n, w_n);
752 c3 = r.fetch(x_n, y, z_n, w_n) - r.fetch(x_n, y, z, w_n);
753 } else {
754 c1 = r.fetch(x_n, y, z_n, w_n) - r.fetch(x, y, z_n, w_n);
756 c2 = r.fetch(x_n, y_n, z_n, w_n) - r.fetch(x_n, y, z_n, w_n);
757 c3 = r.fetch(x, y, z_n, w_n) - c0;
758 }
759 } else if rx >= rz {
760 c1 = r.fetch(x_n, y_n, z, w_n) - r.fetch(x, y_n, z, w_n);
762 c2 = r.fetch(x, y_n, z, w_n) - c0;
763 c3 = r.fetch(x_n, y_n, z_n, w_n) - r.fetch(x_n, y_n, z, w_n);
764 } else if ry >= rz {
765 c1 = r.fetch(x_n, y_n, z_n, w_n) - r.fetch(x, y_n, z_n, w_n);
767 c2 = r.fetch(x, y_n, z, w_n) - c0;
768 c3 = r.fetch(x, y_n, z_n, w_n) - r.fetch(x, y_n, z, w_n);
769 } else {
770 c1 = r.fetch(x_n, y_n, z_n, w_n) - r.fetch(x, y_n, z_n, w_n);
772 c2 = r.fetch(x, y_n, z_n, w_n) - r.fetch(x, y, z_n, w_n);
773 c3 = r.fetch(x, y, z_n, w_n) - c0;
774 }
775 let s0 = c0.mla(c1, T::from(rx));
776 let s1 = s0.mla(c2, T::from(ry));
777 let w1 = s1.mla(c3, T::from(rz));
778 w0.neg_mla(w0, T::from(rw)).mla(w1, T::from(rw))
779 }
780
781 #[cfg(feature = "options")]
782 #[cfg_attr(docsrs, doc(cfg(feature = "options")))]
783 #[inline]
784 pub fn tetra_vec3(&self, lin_x: f32, lin_y: f32, lin_z: f32, lin_w: f32) -> Vector3f {
785 self.tetra(
786 lin_x,
787 lin_y,
788 lin_z,
789 lin_w,
790 Fetch4Vec3 {
791 array: self.array,
792 x_stride: self.x_stride,
793 y_stride: self.y_stride,
794 z_stride: self.z_stride,
795 },
796 )
797 }
798
799 #[cfg(feature = "options")]
800 #[cfg_attr(docsrs, doc(cfg(feature = "options")))]
801 #[inline]
802 pub fn tetra_vec4(&self, lin_x: f32, lin_y: f32, lin_z: f32, lin_w: f32) -> Vector4f {
803 self.tetra(
804 lin_x,
805 lin_y,
806 lin_z,
807 lin_w,
808 Fetch4Vec4 {
809 array: self.array,
810 x_stride: self.x_stride,
811 y_stride: self.y_stride,
812 z_stride: self.z_stride,
813 },
814 )
815 }
816}
817
818pub struct Cube<'a> {
822 array: &'a [f32],
823 x_stride: u32,
824 y_stride: u32,
825 grid_size: [u8; 3],
826}
827
828pub(crate) trait ArrayFetch<T> {
829 fn fetch(&self, x: i32, y: i32, z: i32) -> T;
830}
831
832struct ArrayFetchVector3f<'a> {
833 array: &'a [f32],
834 x_stride: u32,
835 y_stride: u32,
836}
837
838impl ArrayFetch<Vector3f> for ArrayFetchVector3f<'_> {
839 #[inline(always)]
840 fn fetch(&self, x: i32, y: i32, z: i32) -> Vector3f {
841 let start = (x as u32 * self.x_stride + y as u32 * self.y_stride + z as u32) as usize * 3;
842 let k = &self.array[start..start + 3];
843 Vector3f {
844 v: [k[0], k[1], k[2]],
845 }
846 }
847}
848
849struct ArrayFetchVector4f<'a> {
850 array: &'a [f32],
851 x_stride: u32,
852 y_stride: u32,
853}
854
855impl ArrayFetch<Vector4f> for ArrayFetchVector4f<'_> {
856 #[inline(always)]
857 fn fetch(&self, x: i32, y: i32, z: i32) -> Vector4f {
858 let start = (x as u32 * self.x_stride + y as u32 * self.y_stride + z as u32) as usize * 4;
859 let k = &self.array[start..start + 4];
860 Vector4f {
861 v: [k[0], k[1], k[2], k[3]],
862 }
863 }
864}
865
866impl Cube<'_> {
867 pub fn new(array: &[f32], grid_size: usize, channels: usize) -> Result<Cube<'_>, CmsError> {
868 if array.is_empty() || grid_size == 0 {
869 return Ok(Cube {
870 array,
871 x_stride: 0,
872 y_stride: 0,
873 grid_size: [0, 0, 0],
874 });
875 }
876 let y_stride = grid_size;
877 let x_stride = y_stride * y_stride;
878
879 let last_index = (grid_size - 1)
880 .safe_mul(x_stride)?
881 .safe_add((grid_size - 1).safe_mul(y_stride)?)?
882 .safe_add(grid_size - 1)?
883 .safe_mul(channels)?;
884
885 if last_index >= array.len() {
886 return Err(CmsError::MalformedClut(MalformedSize {
887 size: array.len(),
888 expected: last_index,
889 }));
890 }
891
892 Ok(Cube {
893 array,
894 x_stride: x_stride as u32,
895 y_stride: y_stride as u32,
896 grid_size: [grid_size as u8, grid_size as u8, grid_size as u8],
897 })
898 }
899
900 pub fn new_cube(
901 array: &[f32],
902 grid_size: [u8; 3],
903 channels: usize,
904 ) -> Result<Cube<'_>, CmsError> {
905 if array.is_empty() || grid_size[0] == 0 || grid_size[1] == 0 || grid_size[2] == 0 {
906 return Ok(Cube {
907 array,
908 x_stride: 0,
909 y_stride: 0,
910 grid_size,
911 });
912 }
913 let y_stride = grid_size[2] as u32;
914 let x_stride = y_stride * grid_size[1] as u32;
915 let last_index = (grid_size[0] as usize - 1)
916 .safe_mul(x_stride as usize)?
917 .safe_add((grid_size[1] as usize - 1).safe_mul(y_stride as usize)?)?
918 .safe_add(grid_size[2] as usize - 1)?
919 .safe_mul(channels)?;
920
921 if last_index >= array.len() {
922 return Err(CmsError::MalformedClut(MalformedSize {
923 size: array.len(),
924 expected: last_index,
925 }));
926 }
927
928 Ok(Cube {
929 array,
930 x_stride,
931 y_stride,
932 grid_size,
933 })
934 }
935
936 #[inline(always)]
937 fn trilinear<
938 T: Copy
939 + From<f32>
940 + Sub<T, Output = T>
941 + Mul<T, Output = T>
942 + Add<T, Output = T>
943 + FusedMultiplyNegAdd<T>
944 + FusedMultiplyAdd<T>,
945 >(
946 &self,
947 lin_x: f32,
948 lin_y: f32,
949 lin_z: f32,
950 fetch: impl ArrayFetch<T>,
951 ) -> T {
952 let lin_x = lin_x.max(0.0);
953 let lin_y = lin_y.max(0.0);
954 let lin_z = lin_z.max(0.0);
955
956 let scale_x = (self.grid_size[0] as i32 - 1) as f32;
957 let scale_y = (self.grid_size[1] as i32 - 1) as f32;
958 let scale_z = (self.grid_size[2] as i32 - 1) as f32;
959
960 let x = (lin_x * scale_x).floor().min(scale_x) as i32;
961 let y = (lin_y * scale_y).floor().min(scale_y) as i32;
962 let z = (lin_z * scale_z).floor().min(scale_z) as i32;
963
964 let x_n = (lin_x * scale_x).ceil().min(scale_x) as i32;
965 let y_n = (lin_y * scale_y).ceil().min(scale_y) as i32;
966 let z_n = (lin_z * scale_z).ceil().min(scale_z) as i32;
967
968 let x_d = T::from(lin_x * scale_x - x as f32);
969 let y_d = T::from(lin_y * scale_y - y as f32);
970 let z_d = T::from(lin_z * scale_z - z as f32);
971
972 let c000 = fetch.fetch(x, y, z);
973 let c100 = fetch.fetch(x_n, y, z);
974 let c010 = fetch.fetch(x, y_n, z);
975 let c110 = fetch.fetch(x_n, y_n, z);
976 let c001 = fetch.fetch(x, y, z_n);
977 let c101 = fetch.fetch(x_n, y, z_n);
978 let c011 = fetch.fetch(x, y_n, z_n);
979 let c111 = fetch.fetch(x_n, y_n, z_n);
980
981 let c00 = c000.neg_mla(c000, x_d).mla(c100, x_d);
982 let c10 = c010.neg_mla(c010, x_d).mla(c110, x_d);
983 let c01 = c001.neg_mla(c001, x_d).mla(c101, x_d);
984 let c11 = c011.neg_mla(c011, x_d).mla(c111, x_d);
985
986 let c0 = c00.neg_mla(c00, y_d).mla(c10, y_d);
987 let c1 = c01.neg_mla(c01, y_d).mla(c11, y_d);
988
989 c0.neg_mla(c0, z_d).mla(c1, z_d)
990 }
991
992 #[cfg(feature = "options")]
993 #[inline]
994 fn pyramid<
995 T: Copy
996 + From<f32>
997 + Sub<T, Output = T>
998 + Mul<T, Output = T>
999 + Add<T, Output = T>
1000 + FusedMultiplyAdd<T>,
1001 >(
1002 &self,
1003 lin_x: f32,
1004 lin_y: f32,
1005 lin_z: f32,
1006 fetch: impl ArrayFetch<T>,
1007 ) -> T {
1008 let lin_x = lin_x.max(0.0);
1009 let lin_y = lin_y.max(0.0);
1010 let lin_z = lin_z.max(0.0);
1011
1012 let scale_x = (self.grid_size[0] as i32 - 1) as f32;
1013 let scale_y = (self.grid_size[1] as i32 - 1) as f32;
1014 let scale_z = (self.grid_size[2] as i32 - 1) as f32;
1015
1016 let x = (lin_x * scale_x).floor().min(scale_x) as i32;
1017 let y = (lin_y * scale_y).floor().min(scale_y) as i32;
1018 let z = (lin_z * scale_z).floor().min(scale_z) as i32;
1019
1020 let x_n = (lin_x * scale_x).ceil().min(scale_x) as i32;
1021 let y_n = (lin_y * scale_y).ceil().min(scale_y) as i32;
1022 let z_n = (lin_z * scale_z).ceil().min(scale_z) as i32;
1023
1024 let dr = lin_x * scale_x - x as f32;
1025 let dg = lin_y * scale_y - y as f32;
1026 let db = lin_z * scale_z - z as f32;
1027
1028 let c0 = fetch.fetch(x, y, z);
1029
1030 if dr > db && dg > db {
1031 let x0 = fetch.fetch(x_n, y_n, z_n);
1032 let x1 = fetch.fetch(x_n, y_n, z);
1033 let x2 = fetch.fetch(x_n, y, z);
1034 let x3 = fetch.fetch(x, y_n, z);
1035
1036 let c1 = x0 - x1;
1037 let c2 = x2 - c0;
1038 let c3 = x3 - c0;
1039 let c4 = c0 - x3 - x2 + x1;
1040
1041 let s0 = c0.mla(c1, T::from(db));
1042 let s1 = s0.mla(c2, T::from(dr));
1043 let s2 = s1.mla(c3, T::from(dg));
1044 s2.mla(c4, T::from(dr * dg))
1045 } else if db > dr && dg > dr {
1046 let x0 = fetch.fetch(x, y, z_n);
1047 let x1 = fetch.fetch(x_n, y_n, z_n);
1048 let x2 = fetch.fetch(x, y_n, z_n);
1049 let x3 = fetch.fetch(x, y_n, z);
1050
1051 let c1 = x0 - c0;
1052 let c2 = x1 - x2;
1053 let c3 = x3 - c0;
1054 let c4 = c0 - x3 - x0 + x2;
1055
1056 let s0 = c0.mla(c1, T::from(db));
1057 let s1 = s0.mla(c2, T::from(dr));
1058 let s2 = s1.mla(c3, T::from(dg));
1059 s2.mla(c4, T::from(dg * db))
1060 } else {
1061 let x0 = fetch.fetch(x, y, z_n);
1062 let x1 = fetch.fetch(x_n, y, z);
1063 let x2 = fetch.fetch(x_n, y, z_n);
1064 let x3 = fetch.fetch(x_n, y_n, z_n);
1065
1066 let c1 = x0 - c0;
1067 let c2 = x1 - c0;
1068 let c3 = x3 - x2;
1069 let c4 = c0 - x1 - x0 + x2;
1070
1071 let s0 = c0.mla(c1, T::from(db));
1072 let s1 = s0.mla(c2, T::from(dr));
1073 let s2 = s1.mla(c3, T::from(dg));
1074 s2.mla(c4, T::from(db * dr))
1075 }
1076 }
1077
1078 #[cfg(feature = "options")]
1079 #[inline]
1080 fn tetra<
1081 T: Copy
1082 + From<f32>
1083 + Sub<T, Output = T>
1084 + Mul<T, Output = T>
1085 + Add<T, Output = T>
1086 + FusedMultiplyAdd<T>,
1087 >(
1088 &self,
1089 lin_x: f32,
1090 lin_y: f32,
1091 lin_z: f32,
1092 fetch: impl ArrayFetch<T>,
1093 ) -> T {
1094 let lin_x = lin_x.max(0.0);
1095 let lin_y = lin_y.max(0.0);
1096 let lin_z = lin_z.max(0.0);
1097
1098 let scale_x = (self.grid_size[0] as i32 - 1) as f32;
1099 let scale_y = (self.grid_size[1] as i32 - 1) as f32;
1100 let scale_z = (self.grid_size[2] as i32 - 1) as f32;
1101
1102 let x = (lin_x * scale_x).floor().min(scale_x) as i32;
1103 let y = (lin_y * scale_y).floor().min(scale_y) as i32;
1104 let z = (lin_z * scale_z).floor().min(scale_z) as i32;
1105
1106 let x_n = (lin_x * scale_x).ceil().min(scale_x) as i32;
1107 let y_n = (lin_y * scale_y).ceil().min(scale_y) as i32;
1108 let z_n = (lin_z * scale_z).ceil().min(scale_z) as i32;
1109
1110 let rx = lin_x * scale_x - x as f32;
1111 let ry = lin_y * scale_y - y as f32;
1112 let rz = lin_z * scale_z - z as f32;
1113
1114 let c0 = fetch.fetch(x, y, z);
1115 let c2;
1116 let c1;
1117 let c3;
1118 if rx >= ry {
1119 if ry >= rz {
1120 c1 = fetch.fetch(x_n, y, z) - c0;
1122 c2 = fetch.fetch(x_n, y_n, z) - fetch.fetch(x_n, y, z);
1123 c3 = fetch.fetch(x_n, y_n, z_n) - fetch.fetch(x_n, y_n, z);
1124 } else if rx >= rz {
1125 c1 = fetch.fetch(x_n, y, z) - c0;
1127 c2 = fetch.fetch(x_n, y_n, z_n) - fetch.fetch(x_n, y, z_n);
1128 c3 = fetch.fetch(x_n, y, z_n) - fetch.fetch(x_n, y, z);
1129 } else {
1130 c1 = fetch.fetch(x_n, y, z_n) - fetch.fetch(x, y, z_n);
1132 c2 = fetch.fetch(x_n, y_n, z_n) - fetch.fetch(x_n, y, z_n);
1133 c3 = fetch.fetch(x, y, z_n) - c0;
1134 }
1135 } else if rx >= rz {
1136 c1 = fetch.fetch(x_n, y_n, z) - fetch.fetch(x, y_n, z);
1138 c2 = fetch.fetch(x, y_n, z) - c0;
1139 c3 = fetch.fetch(x_n, y_n, z_n) - fetch.fetch(x_n, y_n, z);
1140 } else if ry >= rz {
1141 c1 = fetch.fetch(x_n, y_n, z_n) - fetch.fetch(x, y_n, z_n);
1143 c2 = fetch.fetch(x, y_n, z) - c0;
1144 c3 = fetch.fetch(x, y_n, z_n) - fetch.fetch(x, y_n, z);
1145 } else {
1146 c1 = fetch.fetch(x_n, y_n, z_n) - fetch.fetch(x, y_n, z_n);
1148 c2 = fetch.fetch(x, y_n, z_n) - fetch.fetch(x, y, z_n);
1149 c3 = fetch.fetch(x, y, z_n) - c0;
1150 }
1151 let s0 = c0.mla(c1, T::from(rx));
1152 let s1 = s0.mla(c2, T::from(ry));
1153 s1.mla(c3, T::from(rz))
1154 }
1155
1156 #[cfg(feature = "options")]
1157 #[inline]
1158 fn prism<
1159 T: Copy
1160 + From<f32>
1161 + Sub<T, Output = T>
1162 + Mul<T, Output = T>
1163 + Add<T, Output = T>
1164 + FusedMultiplyAdd<T>,
1165 >(
1166 &self,
1167 lin_x: f32,
1168 lin_y: f32,
1169 lin_z: f32,
1170 fetch: impl ArrayFetch<T>,
1171 ) -> T {
1172 let lin_x = lin_x.max(0.0);
1173 let lin_y = lin_y.max(0.0);
1174 let lin_z = lin_z.max(0.0);
1175
1176 let scale_x = (self.grid_size[0] as i32 - 1) as f32;
1177 let scale_y = (self.grid_size[1] as i32 - 1) as f32;
1178 let scale_z = (self.grid_size[2] as i32 - 1) as f32;
1179
1180 let x = (lin_x * scale_x).floor().min(scale_x) as i32;
1181 let y = (lin_y * scale_y).floor().min(scale_y) as i32;
1182 let z = (lin_z * scale_z).floor().min(scale_z) as i32;
1183
1184 let x_n = (lin_x * scale_x).ceil().min(scale_x) as i32;
1185 let y_n = (lin_y * scale_y).ceil().min(scale_y) as i32;
1186 let z_n = (lin_z * scale_z).ceil().min(scale_z) as i32;
1187
1188 let dr = lin_x * scale_x - x as f32;
1189 let dg = lin_y * scale_y - y as f32;
1190 let db = lin_z * scale_z - z as f32;
1191
1192 let c0 = fetch.fetch(x, y, z);
1193
1194 if db >= dr {
1195 let x0 = fetch.fetch(x, y, z_n);
1196 let x1 = fetch.fetch(x_n, y, z_n);
1197 let x2 = fetch.fetch(x, y_n, z);
1198 let x3 = fetch.fetch(x, y_n, z_n);
1199 let x4 = fetch.fetch(x_n, y_n, z_n);
1200
1201 let c1 = x0 - c0;
1202 let c2 = x1 - x0;
1203 let c3 = x2 - c0;
1204 let c4 = c0 - x2 - x0 + x3;
1205 let c5 = x0 - x3 - x1 + x4;
1206
1207 let s0 = c0.mla(c1, T::from(db));
1208 let s1 = s0.mla(c2, T::from(dr));
1209 let s2 = s1.mla(c3, T::from(dg));
1210 let s3 = s2.mla(c4, T::from(dg * db));
1211 s3.mla(c5, T::from(dr * dg))
1212 } else {
1213 let x0 = fetch.fetch(x_n, y, z);
1214 let x1 = fetch.fetch(x_n, y, z_n);
1215 let x2 = fetch.fetch(x, y_n, z);
1216 let x3 = fetch.fetch(x_n, y_n, z);
1217 let x4 = fetch.fetch(x_n, y_n, z_n);
1218
1219 let c1 = x1 - x0;
1220 let c2 = x0 - c0;
1221 let c3 = x2 - c0;
1222 let c4 = x0 - x3 - x1 + x4;
1223 let c5 = c0 - x2 - x0 + x3;
1224
1225 let s0 = c0.mla(c1, T::from(db));
1226 let s1 = s0.mla(c2, T::from(dr));
1227 let s2 = s1.mla(c3, T::from(dg));
1228 let s3 = s2.mla(c4, T::from(dg * db));
1229 s3.mla(c5, T::from(dr * dg))
1230 }
1231 }
1232
1233 pub fn trilinear_vec3(&self, lin_x: f32, lin_y: f32, lin_z: f32) -> Vector3f {
1234 self.trilinear(
1235 lin_x,
1236 lin_y,
1237 lin_z,
1238 ArrayFetchVector3f {
1239 array: self.array,
1240 x_stride: self.x_stride,
1241 y_stride: self.y_stride,
1242 },
1243 )
1244 }
1245
1246 #[cfg(feature = "options")]
1247 #[cfg_attr(docsrs, doc(cfg(feature = "options")))]
1248 pub fn prism_vec3(&self, lin_x: f32, lin_y: f32, lin_z: f32) -> Vector3f {
1249 self.prism(
1250 lin_x,
1251 lin_y,
1252 lin_z,
1253 ArrayFetchVector3f {
1254 array: self.array,
1255 x_stride: self.x_stride,
1256 y_stride: self.y_stride,
1257 },
1258 )
1259 }
1260
1261 #[cfg(feature = "options")]
1262 #[cfg_attr(docsrs, doc(cfg(feature = "options")))]
1263 pub fn pyramid_vec3(&self, lin_x: f32, lin_y: f32, lin_z: f32) -> Vector3f {
1264 self.pyramid(
1265 lin_x,
1266 lin_y,
1267 lin_z,
1268 ArrayFetchVector3f {
1269 array: self.array,
1270 x_stride: self.x_stride,
1271 y_stride: self.y_stride,
1272 },
1273 )
1274 }
1275
1276 #[cfg(feature = "options")]
1277 #[cfg_attr(docsrs, doc(cfg(feature = "options")))]
1278 pub fn tetra_vec3(&self, lin_x: f32, lin_y: f32, lin_z: f32) -> Vector3f {
1279 self.tetra(
1280 lin_x,
1281 lin_y,
1282 lin_z,
1283 ArrayFetchVector3f {
1284 array: self.array,
1285 x_stride: self.x_stride,
1286 y_stride: self.y_stride,
1287 },
1288 )
1289 }
1290
1291 pub fn trilinear_vec4(&self, lin_x: f32, lin_y: f32, lin_z: f32) -> Vector4f {
1292 self.trilinear(
1293 lin_x,
1294 lin_y,
1295 lin_z,
1296 ArrayFetchVector4f {
1297 array: self.array,
1298 x_stride: self.x_stride,
1299 y_stride: self.y_stride,
1300 },
1301 )
1302 }
1303
1304 #[cfg(feature = "options")]
1305 pub fn tetra_vec4(&self, lin_x: f32, lin_y: f32, lin_z: f32) -> Vector4f {
1306 self.tetra(
1307 lin_x,
1308 lin_y,
1309 lin_z,
1310 ArrayFetchVector4f {
1311 array: self.array,
1312 x_stride: self.x_stride,
1313 y_stride: self.y_stride,
1314 },
1315 )
1316 }
1317
1318 #[cfg(feature = "options")]
1319 #[cfg_attr(docsrs, doc(cfg(feature = "options")))]
1320 pub fn pyramid_vec4(&self, lin_x: f32, lin_y: f32, lin_z: f32) -> Vector4f {
1321 self.pyramid(
1322 lin_x,
1323 lin_y,
1324 lin_z,
1325 ArrayFetchVector4f {
1326 array: self.array,
1327 x_stride: self.x_stride,
1328 y_stride: self.y_stride,
1329 },
1330 )
1331 }
1332
1333 #[cfg(feature = "options")]
1334 #[cfg_attr(docsrs, doc(cfg(feature = "options")))]
1335 pub fn prism_vec4(&self, lin_x: f32, lin_y: f32, lin_z: f32) -> Vector4f {
1336 self.prism(
1337 lin_x,
1338 lin_y,
1339 lin_z,
1340 ArrayFetchVector4f {
1341 array: self.array,
1342 x_stride: self.x_stride,
1343 y_stride: self.y_stride,
1344 },
1345 )
1346 }
1347}