1use std::fmt::Debug;
5use std::io::Seek;
6use std::iter::Peekable;
7use std::ops::Not;
8use std::sync::mpsc;
9use rayon_core::{ThreadPool, ThreadPoolBuildError};
10
11use smallvec::alloc::collections::BTreeMap;
12
13use crate::block::UncompressedBlock;
14use crate::block::chunk::Chunk;
15use crate::compression::Compression;
16use crate::error::{Error, Result, UnitResult, usize_to_u64};
17use crate::io::{Data, Tracking, Write};
18use crate::meta::{Headers, MetaData, OffsetTables};
19use crate::meta::attribute::LineOrder;
20
21pub fn write_chunks_with<W: Write + Seek>(
25 buffered_write: W, headers: Headers, pedantic: bool,
26 write_chunks: impl FnOnce(MetaData, &mut ChunkWriter<W>) -> UnitResult
27) -> UnitResult {
28 let (meta, mut writer) = ChunkWriter::new_for_buffered(buffered_write, headers, pedantic)?;
30 write_chunks(meta, &mut writer)?;
31 writer.complete_meta_data()
32}
33
34#[derive(Debug)]
41#[must_use]
42pub struct ChunkWriter<W> {
43 header_count: usize,
44 byte_writer: Tracking<W>,
45 chunk_indices_byte_location: std::ops::Range<usize>,
46 chunk_indices_increasing_y: OffsetTables,
47 chunk_count: usize, }
49
50#[derive(Debug)]
53#[must_use]
54pub struct OnProgressChunkWriter<'w, W, F> {
55 chunk_writer: &'w mut W,
56 written_chunks: usize,
57 on_progress: F,
58}
59
60pub trait ChunksWriter: Sized {
63
64 fn total_chunks_count(&self) -> usize;
66
67 fn write_chunk(&mut self, index_in_header_increasing_y: usize, chunk: Chunk) -> UnitResult;
72
73 fn on_progress<F>(&mut self, on_progress: F) -> OnProgressChunkWriter<'_, Self, F> where F: FnMut(f64) {
75 OnProgressChunkWriter { chunk_writer: self, written_chunks: 0, on_progress }
76 }
77
78 fn sequential_blocks_compressor<'w>(&'w mut self, meta: &'w MetaData) -> SequentialBlocksCompressor<'w, Self> {
80 SequentialBlocksCompressor::new(meta, self)
81 }
82
83 fn parallel_blocks_compressor<'w>(&'w mut self, meta: &'w MetaData) -> Option<ParallelBlocksCompressor<'w, Self>> {
86 ParallelBlocksCompressor::new(meta, self)
87 }
88
89 fn compress_all_blocks_sequential(mut self, meta: &MetaData, blocks: impl Iterator<Item=(usize, UncompressedBlock)>) -> UnitResult {
93 let mut writer = self.sequential_blocks_compressor(meta);
94
95 for (index_in_header_increasing_y, block) in blocks {
97 writer.compress_block(index_in_header_increasing_y, block)?;
98 }
99
100 Ok(())
102 }
103
104 fn compress_all_blocks_parallel(mut self, meta: &MetaData, blocks: impl Iterator<Item=(usize, UncompressedBlock)>) -> UnitResult {
109 let mut parallel_writer = match self.parallel_blocks_compressor(meta) {
110 None => return self.compress_all_blocks_sequential(meta, blocks),
111 Some(writer) => writer,
112 };
113
114 for (index_in_header_increasing_y, block) in blocks {
116 parallel_writer.add_block_to_compression_queue(index_in_header_increasing_y, block)?;
117 }
118
119 Ok(())
121 }
122}
123
124
125impl<W> ChunksWriter for ChunkWriter<W> where W: Write + Seek {
126
127 fn total_chunks_count(&self) -> usize { self.chunk_count }
129
130 fn write_chunk(&mut self, index_in_header_increasing_y: usize, chunk: Chunk) -> UnitResult {
135 let header_chunk_indices = &mut self.chunk_indices_increasing_y[chunk.layer_index];
136
137 if index_in_header_increasing_y >= header_chunk_indices.len() {
138 return Err(Error::invalid("too large chunk index"));
139 }
140
141 let chunk_index_slot = &mut header_chunk_indices[index_in_header_increasing_y];
142 if *chunk_index_slot != 0 {
143 return Err(Error::invalid(format!("chunk at index {} is already written", index_in_header_increasing_y)));
144 }
145
146 *chunk_index_slot = usize_to_u64(self.byte_writer.byte_position());
147 chunk.write(&mut self.byte_writer, self.header_count)?;
148 Ok(())
149 }
150}
151
152impl<W> ChunkWriter<W> where W: Write + Seek {
153 fn new_for_buffered(buffered_byte_writer: W, headers: Headers, pedantic: bool) -> Result<(MetaData, Self)> {
157 let mut write = Tracking::new(buffered_byte_writer);
158 let requirements = MetaData::write_validating_to_buffered(&mut write, headers.as_slice(), pedantic)?;
159
160 let offset_table_size: usize = headers.iter().map(|header| header.chunk_count).sum();
171
172 let offset_table_start_byte = write.byte_position();
173 let offset_table_end_byte = write.byte_position() + offset_table_size * u64::BYTE_SIZE;
174
175 write.seek_write_to(offset_table_end_byte)?;
177
178 let header_count = headers.len();
179 let chunk_indices_increasing_y = headers.iter()
180 .map(|header| vec![0_u64; header.chunk_count]).collect();
181
182 let meta_data = MetaData { requirements, headers };
183
184 Ok((meta_data, ChunkWriter {
185 header_count,
186 byte_writer: write,
187 chunk_count: offset_table_size,
188 chunk_indices_byte_location: offset_table_start_byte .. offset_table_end_byte,
189 chunk_indices_increasing_y,
190 }))
191 }
192
193 fn complete_meta_data(mut self) -> UnitResult {
196 if self.chunk_indices_increasing_y.iter().flatten().any(|&index| index == 0) {
197 return Err(Error::invalid("some chunks are not written yet"))
198 }
199
200 debug_assert_ne!(self.byte_writer.byte_position(), self.chunk_indices_byte_location.end, "offset table has already been updated");
202 self.byte_writer.seek_write_to(self.chunk_indices_byte_location.start)?;
203
204 for table in self.chunk_indices_increasing_y {
205 u64::write_slice(&mut self.byte_writer, table.as_slice())?;
206 }
207
208 self.byte_writer.flush()?; Ok(())
210 }
211
212}
213
214
215impl<'w, W, F> ChunksWriter for OnProgressChunkWriter<'w, W, F> where W: 'w + ChunksWriter, F: FnMut(f64) {
216 fn total_chunks_count(&self) -> usize {
217 self.chunk_writer.total_chunks_count()
218 }
219
220 fn write_chunk(&mut self, index_in_header_increasing_y: usize, chunk: Chunk) -> UnitResult {
221 let total_chunks = self.total_chunks_count();
222 let on_progress = &mut self.on_progress;
223
224 if self.written_chunks == 0 { on_progress(0.0); }
226
227 self.chunk_writer.write_chunk(index_in_header_increasing_y, chunk)?;
228
229 self.written_chunks += 1;
230
231 on_progress({
232 if self.written_chunks == total_chunks { 1.0 }
234 else { self.written_chunks as f64 / total_chunks as f64 }
235 });
236
237 Ok(())
238 }
239}
240
241
242#[derive(Debug)]
244#[must_use]
245pub struct SortedBlocksWriter<'w, W> {
246 chunk_writer: &'w mut W,
247 pending_chunks: BTreeMap<usize, (usize, Chunk)>,
248 unwritten_chunk_indices: Peekable<std::ops::Range<usize>>,
249 requires_sorting: bool, }
251
252
253impl<'w, W> SortedBlocksWriter<'w, W> where W: ChunksWriter {
254
255 pub fn new(meta_data: &MetaData, chunk_writer: &'w mut W) -> SortedBlocksWriter<'w, W> {
257 let requires_sorting = meta_data.headers.iter()
258 .any(|header| header.line_order != LineOrder::Unspecified);
259
260 let total_chunk_count = chunk_writer.total_chunks_count();
261
262 SortedBlocksWriter {
263 pending_chunks: BTreeMap::new(),
264 unwritten_chunk_indices: (0 .. total_chunk_count).peekable(),
265 requires_sorting,
266 chunk_writer
267 }
268 }
269
270 pub fn write_or_stash_chunk(&mut self, chunk_index_in_file: usize, chunk_y_index: usize, chunk: Chunk) -> UnitResult {
272 if self.requires_sorting.not() {
273 return self.chunk_writer.write_chunk(chunk_y_index, chunk);
274 }
275
276 if self.unwritten_chunk_indices.peek() == Some(&chunk_index_in_file){
278 self.chunk_writer.write_chunk(chunk_y_index, chunk)?;
279 self.unwritten_chunk_indices.next().expect("peeked chunk index is missing");
280
281 while let Some((next_chunk_y_index, next_chunk)) = self
283 .unwritten_chunk_indices.peek().cloned()
284 .and_then(|id| self.pending_chunks.remove(&id))
285 {
286 self.chunk_writer.write_chunk(next_chunk_y_index, next_chunk)?;
287 self.unwritten_chunk_indices.next().expect("peeked chunk index is missing");
288 }
289 }
290
291 else {
292 self.pending_chunks.insert(chunk_index_in_file, (chunk_y_index, chunk));
296 }
297
298 Ok(())
299 }
300
301 pub fn inner_chunks_writer(&self) -> &W {
303 &self.chunk_writer
304 }
305}
306
307
308
309#[derive(Debug)]
311#[must_use]
312pub struct SequentialBlocksCompressor<'w, W> {
313 meta: &'w MetaData,
314 chunks_writer: &'w mut W,
315}
316
317impl<'w, W> SequentialBlocksCompressor<'w, W> where W: 'w + ChunksWriter {
318
319 pub fn new(meta: &'w MetaData, chunks_writer: &'w mut W) -> Self { Self { meta, chunks_writer, } }
321
322 pub fn inner_chunks_writer(&'w self) -> &'w W { self.chunks_writer }
324
325 pub fn compress_block(&mut self, index_in_header_increasing_y: usize, block: UncompressedBlock) -> UnitResult {
327 self.chunks_writer.write_chunk(
328 index_in_header_increasing_y,
329 block.compress_to_chunk(&self.meta.headers)?
330 )
331 }
332}
333
334#[derive(Debug)]
336#[must_use]
337pub struct ParallelBlocksCompressor<'w, W> {
338 meta: &'w MetaData,
339 sorted_writer: SortedBlocksWriter<'w, W>,
340
341 sender: mpsc::Sender<Result<(usize, usize, Chunk)>>,
342 receiver: mpsc::Receiver<Result<(usize, usize, Chunk)>>,
343 pool: rayon_core::ThreadPool,
344
345 currently_compressing_count: usize,
346 written_chunk_count: usize, max_threads: usize,
348 next_incoming_chunk_index: usize, }
350
351impl<'w, W> ParallelBlocksCompressor<'w, W> where W: 'w + ChunksWriter {
352
353 pub fn new(meta: &'w MetaData, chunks_writer: &'w mut W) -> Option<Self> {
356 Self::new_with_thread_pool(meta, chunks_writer, ||{
357 rayon_core::ThreadPoolBuilder::new()
358 .thread_name(|index| format!("OpenEXR Block Compressor Thread #{}", index))
359 .build()
360 })
361 }
362
363 pub fn new_with_thread_pool<CreatePool>(
365 meta: &'w MetaData, chunks_writer: &'w mut W, try_create_thread_pool: CreatePool)
366 -> Option<Self>
367 where CreatePool: FnOnce() -> std::result::Result<ThreadPool, ThreadPoolBuildError>
368 {
369 if meta.headers.iter().all(|head|head.compression == Compression::Uncompressed) {
370 return None;
371 }
372
373 let pool = match try_create_thread_pool() {
376 Ok(pool) => pool,
377
378 Err(_) => return None,
380 };
381
382 let max_threads = pool.current_num_threads().max(1).min(chunks_writer.total_chunks_count()) + 2; let (send, recv) = mpsc::channel(); Some(Self {
386 sorted_writer: SortedBlocksWriter::new(meta, chunks_writer),
387 next_incoming_chunk_index: 0,
388 currently_compressing_count: 0,
389 written_chunk_count: 0,
390 sender: send,
391 receiver: recv,
392 max_threads,
393 pool,
394 meta,
395 })
396 }
397
398 pub fn inner_chunks_writer(&'w self) -> &'w W { self.sorted_writer.inner_chunks_writer() }
400
401 fn write_next_queued_chunk(&mut self) -> UnitResult {
403 debug_assert!(self.currently_compressing_count > 0, "cannot wait for chunks as there are none left");
404
405 let some_compressed_chunk = self.receiver.recv()
406 .expect("cannot receive compressed block");
407
408 self.currently_compressing_count -= 1;
409 let (chunk_file_index, chunk_y_index, chunk) = some_compressed_chunk?;
410 self.sorted_writer.write_or_stash_chunk(chunk_file_index, chunk_y_index, chunk)?;
411
412 self.written_chunk_count += 1;
413 Ok(())
414 }
415
416 pub fn write_all_queued_chunks(&mut self) -> UnitResult {
418 while self.currently_compressing_count > 0 {
419 self.write_next_queued_chunk()?;
420 }
421
422 debug_assert_eq!(self.currently_compressing_count, 0, "counter does not match block count");
423 Ok(())
424 }
425
426 pub fn add_block_to_compression_queue(&mut self, index_in_header_increasing_y: usize, block: UncompressedBlock) -> UnitResult {
431
432 if self.currently_compressing_count >= self.max_threads {
434 self.write_next_queued_chunk()?;
435 }
436
437 let index_in_file = self.next_incoming_chunk_index;
439 let sender = self.sender.clone();
440 let meta = self.meta.clone();
441
442 self.pool.spawn(move ||{
443 let compressed_or_err = block.compress_to_chunk(&meta.headers);
444
445 let _ = sender.send(compressed_or_err.map(move |compressed| (index_in_file, index_in_header_increasing_y, compressed)));
449 });
450
451 self.currently_compressing_count += 1;
452 self.next_incoming_chunk_index += 1;
453
454 if self.written_chunk_count + self.currently_compressing_count == self.inner_chunks_writer().total_chunks_count() {
456 self.write_all_queued_chunks()?;
457 debug_assert_eq!(
458 self.written_chunk_count, self.inner_chunks_writer().total_chunks_count(),
459 "written chunk count mismatch"
460 );
461 }
462
463
464 Ok(())
465 }
466}
467
468
469