zune_jpeg/decoder.rs
1/*
2 * Copyright (c) 2023.
3 *
4 * This software is free software;
5 *
6 * You can redistribute it or modify it under terms of the MIT, Apache License or Zlib license
7 */
8
9//! Main image logic.
10#![allow(clippy::doc_markdown)]
11
12use alloc::string::ToString;
13use alloc::vec::Vec;
14use alloc::{format, vec};
15
16use zune_core::bytestream::{ZByteReader, ZReaderTrait};
17use zune_core::colorspace::ColorSpace;
18use zune_core::log::{error, trace, warn};
19use zune_core::options::DecoderOptions;
20
21use crate::color_convert::choose_ycbcr_to_rgb_convert_func;
22use crate::components::{Components, SampleRatios};
23use crate::errors::{DecodeErrors, UnsupportedSchemes};
24use crate::headers::{
25 parse_app1, parse_app13, parse_app14, parse_app2, parse_dqt, parse_huffman, parse_sos,
26 parse_start_of_frame
27};
28use crate::huffman::HuffmanTable;
29use crate::idct::choose_idct_func;
30use crate::marker::Marker;
31use crate::misc::SOFMarkers;
32use crate::upsampler::{
33 choose_horizontal_samp_function, choose_hv_samp_function, choose_v_samp_function,
34 generic_sampler, upsample_no_op
35};
36
37/// Maximum components
38pub(crate) const MAX_COMPONENTS: usize = 4;
39
40/// Maximum image dimensions supported.
41pub(crate) const MAX_DIMENSIONS: usize = 1 << 27;
42
43/// Color conversion function that can convert YCbCr colorspace to RGB(A/X) for
44/// 16 values
45///
46/// The following are guarantees to the following functions
47///
48/// 1. The `&[i16]` slices passed contain 16 items
49///
50/// 2. The slices passed are in the following order
51/// `y,cb,cr`
52///
53/// 3. `&mut [u8]` is zero initialized
54///
55/// 4. `&mut usize` points to the position in the array where new values should
56/// be used
57///
58/// The pointer should
59/// 1. Carry out color conversion
60/// 2. Update `&mut usize` with the new position
61
62pub type ColorConvert16Ptr = fn(&[i16; 16], &[i16; 16], &[i16; 16], &mut [u8], &mut usize);
63
64/// IDCT function prototype
65///
66/// This encapsulates a dequantize and IDCT function which will carry out the
67/// following functions
68///
69/// Multiply each 64 element block of `&mut [i16]` with `&Aligned32<[i32;64]>`
70/// Carry out IDCT (type 3 dct) on ach block of 64 i16's
71pub type IDCTPtr = fn(&mut [i32; 64], &mut [i16], usize);
72
73/// An encapsulation of an ICC chunk
74pub(crate) struct ICCChunk {
75 pub(crate) seq_no: u8,
76 pub(crate) num_markers: u8,
77 pub(crate) data: Vec<u8>
78}
79
80/// A JPEG Decoder Instance.
81#[allow(clippy::upper_case_acronyms, clippy::struct_excessive_bools)]
82pub struct JpegDecoder<T: ZReaderTrait> {
83 /// Struct to hold image information from SOI
84 pub(crate) info: ImageInfo,
85 /// Quantization tables, will be set to none and the tables will
86 /// be moved to `components` field
87 pub(crate) qt_tables: [Option<[i32; 64]>; MAX_COMPONENTS],
88 /// DC Huffman Tables with a maximum of 4 tables for each component
89 pub(crate) dc_huffman_tables: [Option<HuffmanTable>; MAX_COMPONENTS],
90 /// AC Huffman Tables with a maximum of 4 tables for each component
91 pub(crate) ac_huffman_tables: [Option<HuffmanTable>; MAX_COMPONENTS],
92 /// Image components, holds information like DC prediction and quantization
93 /// tables of a component
94 pub(crate) components: Vec<Components>,
95 /// maximum horizontal component of all channels in the image
96 pub(crate) h_max: usize,
97 // maximum vertical component of all channels in the image
98 pub(crate) v_max: usize,
99 /// mcu's width (interleaved scans)
100 pub(crate) mcu_width: usize,
101 /// MCU height(interleaved scans
102 pub(crate) mcu_height: usize,
103 /// Number of MCU's in the x plane
104 pub(crate) mcu_x: usize,
105 /// Number of MCU's in the y plane
106 pub(crate) mcu_y: usize,
107 /// Is the image interleaved?
108 pub(crate) is_interleaved: bool,
109 pub(crate) sub_sample_ratio: SampleRatios,
110 /// Image input colorspace, should be YCbCr for a sane image, might be
111 /// grayscale too
112 pub(crate) input_colorspace: ColorSpace,
113 // Progressive image details
114 /// Is the image progressive?
115 pub(crate) is_progressive: bool,
116
117 /// Start of spectral scan
118 pub(crate) spec_start: u8,
119 /// End of spectral scan
120 pub(crate) spec_end: u8,
121 /// Successive approximation bit position high
122 pub(crate) succ_high: u8,
123 /// Successive approximation bit position low
124 pub(crate) succ_low: u8,
125 /// Number of components.
126 pub(crate) num_scans: u8,
127 // Function pointers, for pointy stuff.
128 /// Dequantize and idct function
129 // This is determined at runtime which function to run, statically it's
130 // initialized to a platform independent one and during initialization
131 // of this struct, we check if we can switch to a faster one which
132 // depend on certain CPU extensions.
133 pub(crate) idct_func: IDCTPtr,
134 // Color convert function which acts on 16 YCbCr values
135 pub(crate) color_convert_16: ColorConvert16Ptr,
136 pub(crate) z_order: [usize; MAX_COMPONENTS],
137 /// restart markers
138 pub(crate) restart_interval: usize,
139 pub(crate) todo: usize,
140 // decoder options
141 pub(crate) options: DecoderOptions,
142 // byte-stream
143 pub(crate) stream: ZByteReader<T>,
144 // Indicate whether headers have been decoded
145 pub(crate) headers_decoded: bool,
146 pub(crate) seen_sof: bool,
147 // exif data, lifted from app2
148 pub(crate) exif_data: Option<Vec<u8>>,
149
150 pub(crate) icc_data: Vec<ICCChunk>,
151 pub(crate) is_mjpeg: bool,
152 pub(crate) coeff: usize // Solves some weird bug :)
153}
154
155impl<T> JpegDecoder<T>
156where
157 T: ZReaderTrait
158{
159 #[allow(clippy::redundant_field_names)]
160 fn default(options: DecoderOptions, buffer: T) -> Self {
161 let color_convert = choose_ycbcr_to_rgb_convert_func(ColorSpace::RGB, &options).unwrap();
162 JpegDecoder {
163 info: ImageInfo::default(),
164 qt_tables: [None, None, None, None],
165 dc_huffman_tables: [None, None, None, None],
166 ac_huffman_tables: [None, None, None, None],
167 components: vec![],
168 // Interleaved information
169 h_max: 1,
170 v_max: 1,
171 mcu_height: 0,
172 mcu_width: 0,
173 mcu_x: 0,
174 mcu_y: 0,
175 is_interleaved: false,
176 sub_sample_ratio: SampleRatios::None,
177 is_progressive: false,
178 spec_start: 0,
179 spec_end: 0,
180 succ_high: 0,
181 succ_low: 0,
182 num_scans: 0,
183 idct_func: choose_idct_func(&options),
184 color_convert_16: color_convert,
185 input_colorspace: ColorSpace::YCbCr,
186 z_order: [0; MAX_COMPONENTS],
187 restart_interval: 0,
188 todo: 0x7fff_ffff,
189 options: options,
190 stream: ZByteReader::new(buffer),
191 headers_decoded: false,
192 seen_sof: false,
193 exif_data: None,
194 icc_data: vec![],
195 is_mjpeg: false,
196 coeff: 1
197 }
198 }
199 /// Decode a buffer already in memory
200 ///
201 /// The buffer should be a valid jpeg file, perhaps created by the command
202 /// `std:::fs::read()` or a JPEG file downloaded from the internet.
203 ///
204 /// # Errors
205 /// See DecodeErrors for an explanation
206 pub fn decode(&mut self) -> Result<Vec<u8>, DecodeErrors> {
207 self.decode_headers()?;
208 let size = self.output_buffer_size().unwrap();
209 let mut out = vec![0; size];
210 self.decode_into(&mut out)?;
211 Ok(out)
212 }
213
214 /// Create a new Decoder instance
215 ///
216 /// # Arguments
217 /// - `stream`: The raw bytes of a jpeg file.
218 #[must_use]
219 #[allow(clippy::new_without_default)]
220 pub fn new(stream: T) -> JpegDecoder<T> {
221 JpegDecoder::default(DecoderOptions::default(), stream)
222 }
223
224 /// Returns the image information
225 ///
226 /// This **must** be called after a subsequent call to [`decode`] or [`decode_headers`]
227 /// it will return `None`
228 ///
229 /// # Returns
230 /// - `Some(info)`: Image information,width, height, number of components
231 /// - None: Indicates image headers haven't been decoded
232 ///
233 /// [`decode`]: JpegDecoder::decode
234 /// [`decode_headers`]: JpegDecoder::decode_headers
235 #[must_use]
236 pub fn info(&self) -> Option<ImageInfo> {
237 // we check for fails to that call by comparing what we have to the default, if
238 // it's default we assume that the caller failed to uphold the
239 // guarantees. We can be sure that an image cannot be the default since
240 // its a hard panic in-case width or height are set to zero.
241 if !self.headers_decoded {
242 return None;
243 }
244
245 return Some(self.info.clone());
246 }
247
248 /// Return the number of bytes required to hold a decoded image frame
249 /// decoded using the given input transformations
250 ///
251 /// # Returns
252 /// - `Some(usize)`: Minimum size for a buffer needed to decode the image
253 /// - `None`: Indicates the image was not decoded, or image dimensions would overflow a usize
254 ///
255 #[must_use]
256 pub fn output_buffer_size(&self) -> Option<usize> {
257 return if self.headers_decoded {
258 Some(
259 usize::from(self.width())
260 .checked_mul(usize::from(self.height()))?
261 .checked_mul(self.options.jpeg_get_out_colorspace().num_components())?
262 )
263 } else {
264 None
265 };
266 }
267
268 /// Get a mutable reference to the decoder options
269 /// for the decoder instance
270 ///
271 /// This can be used to modify options before actual decoding
272 /// but after initial creation
273 ///
274 /// # Example
275 /// ```no_run
276 /// use zune_jpeg::JpegDecoder;
277 ///
278 /// let mut decoder = JpegDecoder::new(&[]);
279 /// // get current options
280 /// let mut options = decoder.get_options();
281 /// // modify it
282 /// let new_options = options.set_max_width(10);
283 /// // set it back
284 /// decoder.set_options(new_options);
285 ///
286 /// ```
287 #[must_use]
288 pub const fn get_options(&self) -> &DecoderOptions {
289 &self.options
290 }
291 /// Return the input colorspace of the image
292 ///
293 /// This indicates the colorspace that is present in
294 /// the image, but this may be different to the colorspace that
295 /// the output will be transformed to
296 ///
297 /// # Returns
298 /// -`Some(Colorspace)`: Input colorspace
299 /// - None : Indicates the headers weren't decoded
300 #[must_use]
301 pub fn get_input_colorspace(&self) -> Option<ColorSpace> {
302 return if self.headers_decoded { Some(self.input_colorspace) } else { None };
303 }
304 /// Set decoder options
305 ///
306 /// This can be used to set new options even after initialization
307 /// but before decoding.
308 ///
309 /// This does not bear any significance after decoding an image
310 ///
311 /// # Arguments
312 /// - `options`: New decoder options
313 ///
314 /// # Example
315 /// Set maximum jpeg progressive passes to be 4
316 ///
317 /// ```no_run
318 /// use zune_jpeg::JpegDecoder;
319 /// let mut decoder =JpegDecoder::new(&[]);
320 /// // this works also because DecoderOptions implements `Copy`
321 /// let options = decoder.get_options().jpeg_set_max_scans(4);
322 /// // set the new options
323 /// decoder.set_options(options);
324 /// // now decode
325 /// decoder.decode().unwrap();
326 /// ```
327 pub fn set_options(&mut self, options: DecoderOptions) {
328 self.options = options;
329 }
330 /// Decode Decoder headers
331 ///
332 /// This routine takes care of parsing supported headers from a Decoder
333 /// image
334 ///
335 /// # Supported Headers
336 /// - APP(0)
337 /// - SOF(O)
338 /// - DQT -> Quantization tables
339 /// - DHT -> Huffman tables
340 /// - SOS -> Start of Scan
341 /// # Unsupported Headers
342 /// - SOF(n) -> Decoder images which are not baseline/progressive
343 /// - DAC -> Images using Arithmetic tables
344 /// - JPG(n)
345 fn decode_headers_internal(&mut self) -> Result<(), DecodeErrors> {
346 if self.headers_decoded {
347 trace!("Headers decoded!");
348 return Ok(());
349 }
350 // match output colorspace here
351 // we know this will only be called once per image
352 // so makes sense
353 // We only care for ycbcr to rgb/rgba here
354 // in case one is using another colorspace.
355 // May god help you
356 let out_colorspace = self.options.jpeg_get_out_colorspace();
357
358 if matches!(
359 out_colorspace,
360 ColorSpace::BGR | ColorSpace::BGRA | ColorSpace::RGB | ColorSpace::RGBA
361 ) {
362 self.color_convert_16 = choose_ycbcr_to_rgb_convert_func(
363 self.options.jpeg_get_out_colorspace(),
364 &self.options
365 )
366 .unwrap();
367 }
368 // First two bytes should be jpeg soi marker
369 let magic_bytes = self.stream.get_u16_be_err()?;
370
371 let mut last_byte = 0;
372 let mut bytes_before_marker = 0;
373
374 if magic_bytes != 0xffd8 {
375 return Err(DecodeErrors::IllegalMagicBytes(magic_bytes));
376 }
377
378 loop {
379 // read a byte
380 let mut m = self.stream.get_u8_err()?;
381
382 // AND OF COURSE some images will have fill bytes in their marker
383 // bitstreams because why not.
384 //
385 // I am disappointed as a man.
386 if (m == 0xFF || m == 0) && last_byte == 0xFF {
387 // This handles the edge case where
388 // images have markers with fill bytes(0xFF)
389 // or byte stuffing (0)
390 // I.e 0xFF 0xFF 0xDA
391 // and
392 // 0xFF 0 0xDA
393 // It should ignore those fill bytes and take 0xDA
394 // I don't know why such images exist
395 // but they do.
396 // so this is for you (with love)
397 while m == 0xFF || m == 0x0 {
398 last_byte = m;
399 m = self.stream.get_u8_err()?;
400 }
401 }
402 // Last byte should be 0xFF to confirm existence of a marker since markers look
403 // like OxFF(some marker data)
404 if last_byte == 0xFF {
405 let marker = Marker::from_u8(m);
406 if let Some(n) = marker {
407 if bytes_before_marker > 3 {
408 if self.options.get_strict_mode()
409 /*No reason to use this*/
410 {
411 return Err(DecodeErrors::FormatStatic(
412 "[strict-mode]: Extra bytes between headers"
413 ));
414 }
415
416 error!(
417 "Extra bytes {} before marker 0xFF{:X}",
418 bytes_before_marker - 3,
419 m
420 );
421 }
422
423 bytes_before_marker = 0;
424
425 self.parse_marker_inner(n)?;
426
427 if n == Marker::SOS {
428 self.headers_decoded = true;
429 trace!("Input colorspace {:?}", self.input_colorspace);
430 return Ok(());
431 }
432 } else {
433 bytes_before_marker = 0;
434
435 warn!("Marker 0xFF{:X} not known", m);
436
437 let length = self.stream.get_u16_be_err()?;
438
439 if length < 2 {
440 return Err(DecodeErrors::Format(format!(
441 "Found a marker with invalid length : {length}"
442 )));
443 }
444
445 warn!("Skipping {} bytes", length - 2);
446 self.stream.skip((length - 2) as usize);
447 }
448 }
449 last_byte = m;
450 bytes_before_marker += 1;
451 }
452 }
453 #[allow(clippy::too_many_lines)]
454 pub(crate) fn parse_marker_inner(&mut self, m: Marker) -> Result<(), DecodeErrors> {
455 match m {
456 Marker::SOF(0..=2) => {
457 let marker = {
458 // choose marker
459 if m == Marker::SOF(0) || m == Marker::SOF(1) {
460 SOFMarkers::BaselineDct
461 } else {
462 self.is_progressive = true;
463 SOFMarkers::ProgressiveDctHuffman
464 }
465 };
466
467 trace!("Image encoding scheme =`{:?}`", marker);
468 // get components
469 parse_start_of_frame(marker, self)?;
470 }
471 // Start of Frame Segments not supported
472 Marker::SOF(v) => {
473 let feature = UnsupportedSchemes::from_int(v);
474
475 if let Some(feature) = feature {
476 return Err(DecodeErrors::Unsupported(feature));
477 }
478
479 return Err(DecodeErrors::Format("Unsupported image format".to_string()));
480 }
481 //APP(0) segment
482 Marker::APP(0) => {
483 let mut length = self.stream.get_u16_be_err()?;
484
485 if length < 2 {
486 return Err(DecodeErrors::Format(format!(
487 "Found a marker with invalid length:{length}\n"
488 )));
489 }
490 // skip for now
491 if length > 5 && self.stream.has(5) {
492 let mut buffer = [0u8; 5];
493 self.stream.read_exact(&mut buffer).unwrap();
494 if &buffer == b"AVI1\0" {
495 self.is_mjpeg = true;
496 }
497 length -= 5;
498 }
499 self.stream.skip(length.saturating_sub(2) as usize);
500
501 //parse_app(buf, m, &mut self.info)?;
502 }
503 Marker::APP(1) => {
504 parse_app1(self)?;
505 }
506
507 Marker::APP(2) => {
508 parse_app2(self)?;
509 }
510 // Quantization tables
511 Marker::DQT => {
512 parse_dqt(self)?;
513 }
514 // Huffman tables
515 Marker::DHT => {
516 parse_huffman(self)?;
517 }
518 // Start of Scan Data
519 Marker::SOS => {
520 parse_sos(self)?;
521
522 // break after reading the start of scan.
523 // what follows is the image data
524 return Ok(());
525 }
526 Marker::EOI => return Err(DecodeErrors::FormatStatic("Premature End of image")),
527
528 Marker::DAC | Marker::DNL => {
529 return Err(DecodeErrors::Format(format!(
530 "Parsing of the following header `{m:?}` is not supported,\
531 cannot continue"
532 )));
533 }
534 Marker::DRI => {
535 trace!("DRI marker present");
536
537 if self.stream.get_u16_be_err()? != 4 {
538 return Err(DecodeErrors::Format(
539 "Bad DRI length, Corrupt JPEG".to_string()
540 ));
541 }
542
543 self.restart_interval = usize::from(self.stream.get_u16_be_err()?);
544 self.todo = self.restart_interval;
545 }
546 Marker::APP(13) => {
547 parse_app13(self)?;
548 }
549 Marker::APP(14) => {
550 parse_app14(self)?;
551 }
552 _ => {
553 warn!(
554 "Capabilities for processing marker \"{:?}\" not implemented",
555 m
556 );
557
558 let length = self.stream.get_u16_be_err()?;
559
560 if length < 2 {
561 return Err(DecodeErrors::Format(format!(
562 "Found a marker with invalid length:{length}\n"
563 )));
564 }
565 warn!("Skipping {} bytes", length - 2);
566 self.stream.skip((length - 2) as usize);
567 }
568 }
569 Ok(())
570 }
571 /// Get the embedded ICC profile if it exists
572 /// and is correct
573 ///
574 /// One needs not to decode the whole image to extract this,
575 /// calling [`decode_headers`] for an image with an ICC profile
576 /// allows you to decode this
577 ///
578 /// # Returns
579 /// - `Some(Vec<u8>)`: The raw ICC profile of the image
580 /// - `None`: May indicate an error in the ICC profile , non-existence of
581 /// an ICC profile, or that the headers weren't decoded.
582 ///
583 /// [`decode_headers`]:Self::decode_headers
584 #[must_use]
585 pub fn icc_profile(&self) -> Option<Vec<u8>> {
586 let mut marker_present: [Option<&ICCChunk>; 256] = [None; 256];
587
588 if !self.headers_decoded {
589 return None;
590 }
591 let num_markers = self.icc_data.len();
592
593 if num_markers == 0 || num_markers >= 255 {
594 return None;
595 }
596 // check validity
597 for chunk in &self.icc_data {
598 if usize::from(chunk.num_markers) != num_markers {
599 // all the lengths must match
600 return None;
601 }
602 if chunk.seq_no == 0 {
603 warn!("Zero sequence number in ICC, corrupt ICC chunk");
604 return None;
605 }
606 if marker_present[usize::from(chunk.seq_no)].is_some() {
607 // duplicate seq_no
608 warn!("Duplicate sequence number in ICC, corrupt chunk");
609 return None;
610 }
611
612 marker_present[usize::from(chunk.seq_no)] = Some(chunk);
613 }
614 let mut data = Vec::with_capacity(1000);
615 // assemble the data now
616 for chunk in marker_present.get(1..=num_markers).unwrap() {
617 if let Some(ch) = chunk {
618 data.extend_from_slice(&ch.data);
619 } else {
620 warn!("Missing icc sequence number, corrupt ICC chunk ");
621 return None;
622 }
623 }
624
625 Some(data)
626 }
627 /// Return the exif data for the file
628 ///
629 /// This returns the raw exif data starting at the
630 /// TIFF header
631 ///
632 /// # Returns
633 /// -`Some(data)`: The raw exif data, if present in the image
634 /// - None: May indicate the following
635 ///
636 /// 1. The image doesn't have exif data
637 /// 2. The image headers haven't been decoded
638 #[must_use]
639 pub fn exif(&self) -> Option<&Vec<u8>> {
640 return self.exif_data.as_ref();
641 }
642 /// Get the output colorspace the image pixels will be decoded into
643 ///
644 ///
645 /// # Note.
646 /// This field can only be regarded after decoding headers,
647 /// as markers such as Adobe APP14 may dictate different colorspaces
648 /// than requested.
649 ///
650 /// Calling `decode_headers` is sufficient to know what colorspace the
651 /// output is, if this is called after `decode` it indicates the colorspace
652 /// the output is currently in
653 ///
654 /// Additionally not all input->output colorspace mappings are supported
655 /// but all input colorspaces can map to RGB colorspace, so that's a safe bet
656 /// if one is handling image formats
657 ///
658 ///# Returns
659 /// - `Some(Colorspace)`: If headers have been decoded, the colorspace the
660 ///output array will be in
661 ///- `None
662 #[must_use]
663 pub fn get_output_colorspace(&self) -> Option<ColorSpace> {
664 return if self.headers_decoded {
665 Some(self.options.jpeg_get_out_colorspace())
666 } else {
667 None
668 };
669 }
670
671 /// Decode into a pre-allocated buffer
672 ///
673 /// It is an error if the buffer size is smaller than
674 /// [`output_buffer_size()`](Self::output_buffer_size)
675 ///
676 /// If the buffer is bigger than expected, we ignore the end padding bytes
677 ///
678 /// # Example
679 ///
680 /// - Read headers and then alloc a buffer big enough to hold the image
681 ///
682 /// ```no_run
683 /// use zune_jpeg::JpegDecoder;
684 /// let mut decoder = JpegDecoder::new(&[]);
685 /// // before we get output, we must decode the headers to get width
686 /// // height, and input colorspace
687 /// decoder.decode_headers().unwrap();
688 ///
689 /// let mut out = vec![0;decoder.output_buffer_size().unwrap()];
690 /// // write into out
691 /// decoder.decode_into(&mut out).unwrap();
692 /// ```
693 ///
694 ///
695 pub fn decode_into(&mut self, out: &mut [u8]) -> Result<(), DecodeErrors> {
696 self.decode_headers_internal()?;
697
698 let expected_size = self.output_buffer_size().unwrap();
699
700 if out.len() < expected_size {
701 // too small of a size
702 return Err(DecodeErrors::TooSmallOutput(expected_size, out.len()));
703 }
704
705 // ensure we don't touch anyone else's scratch space
706 let out_len = core::cmp::min(out.len(), expected_size);
707 let out = &mut out[0..out_len];
708
709 if self.is_progressive {
710 self.decode_mcu_ycbcr_progressive(out)
711 } else {
712 self.decode_mcu_ycbcr_baseline(out)
713 }
714 }
715
716 /// Read only headers from a jpeg image buffer
717 ///
718 /// This allows you to extract important information like
719 /// image width and height without decoding the full image
720 ///
721 /// # Examples
722 /// ```no_run
723 /// use zune_jpeg::{JpegDecoder};
724 ///
725 /// let img_data = std::fs::read("a_valid.jpeg").unwrap();
726 /// let mut decoder = JpegDecoder::new(&img_data);
727 /// decoder.decode_headers().unwrap();
728 ///
729 /// println!("Total decoder dimensions are : {:?} pixels",decoder.dimensions());
730 /// println!("Number of components in the image are {}", decoder.info().unwrap().components);
731 /// ```
732 /// # Errors
733 /// See DecodeErrors enum for list of possible errors during decoding
734 pub fn decode_headers(&mut self) -> Result<(), DecodeErrors> {
735 self.decode_headers_internal()?;
736 Ok(())
737 }
738 /// Create a new decoder with the specified options to be used for decoding
739 /// an image
740 ///
741 /// # Arguments
742 /// - `buf`: The input buffer from where we will pull in compressed jpeg bytes from
743 /// - `options`: Options specific to this decoder instance
744 #[must_use]
745 pub fn new_with_options(buf: T, options: DecoderOptions) -> JpegDecoder<T> {
746 JpegDecoder::default(options, buf)
747 }
748
749 /// Set up-sampling routines in case an image is down sampled
750 pub(crate) fn set_upsampling(&mut self) -> Result<(), DecodeErrors> {
751 // no sampling, return early
752 // check if horizontal max ==1
753 if self.h_max == self.v_max && self.h_max == 1 {
754 return Ok(());
755 }
756 match (self.h_max, self.v_max) {
757 (1, 1) => {
758 self.sub_sample_ratio = SampleRatios::None;
759 }
760 (1, 2) => {
761 self.sub_sample_ratio = SampleRatios::V;
762 }
763 (2, 1) => {
764 self.sub_sample_ratio = SampleRatios::H;
765 }
766 (2, 2) => {
767 self.sub_sample_ratio = SampleRatios::HV;
768 }
769 (hs, vs) => {
770 self.sub_sample_ratio = SampleRatios::Generic(hs, vs)
771 // return Err(DecodeErrors::Format(format!(
772 // "Unknown down-sampling method ({hs},{vs}), cannot continue")
773 // ))
774 }
775 }
776
777 for comp in self.components.iter_mut() {
778 let hs = self.h_max / comp.horizontal_sample;
779 let vs = self.v_max / comp.vertical_sample;
780
781 let samp_factor = match (hs, vs) {
782 (1, 1) => {
783 comp.sample_ratio = SampleRatios::None;
784 upsample_no_op
785 }
786 (2, 1) => {
787 comp.sample_ratio = SampleRatios::H;
788 choose_horizontal_samp_function(self.options.get_use_unsafe())
789 }
790 (1, 2) => {
791 comp.sample_ratio = SampleRatios::V;
792 choose_v_samp_function(self.options.get_use_unsafe())
793 }
794 (2, 2) => {
795 comp.sample_ratio = SampleRatios::HV;
796 choose_hv_samp_function(self.options.get_use_unsafe())
797 }
798 (hs, vs) => {
799 comp.sample_ratio = SampleRatios::Generic(hs, vs);
800 generic_sampler()
801 }
802 };
803 comp.setup_upsample_scanline();
804 comp.up_sampler = samp_factor;
805 }
806
807 return Ok(());
808 }
809 #[must_use]
810 /// Get the width of the image as a u16
811 ///
812 /// The width lies between 1 and 65535
813 pub(crate) fn width(&self) -> u16 {
814 self.info.width
815 }
816
817 /// Get the height of the image as a u16
818 ///
819 /// The height lies between 1 and 65535
820 #[must_use]
821 pub(crate) fn height(&self) -> u16 {
822 self.info.height
823 }
824
825 /// Get image dimensions as a tuple of width and height
826 /// or `None` if the image hasn't been decoded.
827 ///
828 /// # Returns
829 /// - `Some(width,height)`: Image dimensions
830 /// - None : The image headers haven't been decoded
831 #[must_use]
832 pub const fn dimensions(&self) -> Option<(usize, usize)> {
833 return if self.headers_decoded {
834 Some((self.info.width as usize, self.info.height as usize))
835 } else {
836 None
837 };
838 }
839}
840
841/// A struct representing Image Information
842#[derive(Default, Clone, Eq, PartialEq)]
843#[allow(clippy::module_name_repetitions)]
844pub struct ImageInfo {
845 /// Width of the image
846 pub width: u16,
847 /// Height of image
848 pub height: u16,
849 /// PixelDensity
850 pub pixel_density: u8,
851 /// Start of frame markers
852 pub sof: SOFMarkers,
853 /// Horizontal sample
854 pub x_density: u16,
855 /// Vertical sample
856 pub y_density: u16,
857 /// Number of components
858 pub components: u8,
859 pub iptc_data: Option<Vec<u8>>
860}
861
862impl ImageInfo {
863 /// Set width of the image
864 ///
865 /// Found in the start of frame
866
867 pub(crate) fn set_width(&mut self, width: u16) {
868 self.width = width;
869 }
870
871 /// Set height of the image
872 ///
873 /// Found in the start of frame
874
875 pub(crate) fn set_height(&mut self, height: u16) {
876 self.height = height;
877 }
878
879 /// Set the image density
880 ///
881 /// Found in the start of frame
882
883 pub(crate) fn set_density(&mut self, density: u8) {
884 self.pixel_density = density;
885 }
886
887 /// Set image Start of frame marker
888 ///
889 /// found in the Start of frame header
890
891 pub(crate) fn set_sof_marker(&mut self, marker: SOFMarkers) {
892 self.sof = marker;
893 }
894
895 /// Set image x-density(dots per pixel)
896 ///
897 /// Found in the APP(0) marker
898 #[allow(dead_code)]
899 pub(crate) fn set_x(&mut self, sample: u16) {
900 self.x_density = sample;
901 }
902
903 /// Set image y-density
904 ///
905 /// Found in the APP(0) marker
906 #[allow(dead_code)]
907 pub(crate) fn set_y(&mut self, sample: u16) {
908 self.y_density = sample;
909 }
910}