net/
filemanager_thread.rs

1/* This Source Code Form is subject to the terms of the Mozilla Public
2 * License, v. 2.0. If a copy of the MPL was not distributed with this
3 * file, You can obtain one at https://mozilla.org/MPL/2.0/. */
4
5use std::fs::File;
6use std::io::{BufRead, BufReader, Seek, SeekFrom};
7use std::ops::Index;
8use std::path::{Path, PathBuf};
9use std::sync::Arc;
10use std::sync::atomic::{self, AtomicBool, AtomicUsize, Ordering};
11
12use embedder_traits::{
13    EmbedderControlId, EmbedderControlResponse, FilePickerRequest, GenericEmbedderProxy,
14    SelectedFile,
15};
16use headers::{ContentLength, ContentRange, ContentType, HeaderMap, HeaderMapExt, Range};
17use http::header::{self, HeaderValue};
18use ipc_channel::ipc::IpcSender;
19use log::warn;
20use mime::{self, Mime};
21use net_traits::blob_url_store::{BlobBuf, BlobURLStoreError};
22use net_traits::filemanager_thread::{
23    FileManagerResult, FileManagerThreadError, FileManagerThreadMsg, FileTokenCheck,
24    ReadFileProgress, RelativePos,
25};
26use net_traits::http_percent_encode;
27use net_traits::response::{Response, ResponseBody};
28use parking_lot::{Mutex, RwLock};
29use rustc_hash::{FxHashMap, FxHashSet};
30use servo_arc::Arc as ServoArc;
31use servo_url::ImmutableOrigin;
32use tokio::io::{AsyncReadExt, AsyncSeekExt};
33use tokio::sync::mpsc::UnboundedSender as TokioSender;
34use tokio::task::yield_now;
35use uuid::Uuid;
36
37use crate::async_runtime::spawn_task;
38use crate::embedder::NetToEmbedderMsg;
39use crate::fetch::methods::{CancellationListener, Data, RangeRequestBounds};
40use crate::protocols::get_range_request_bounds;
41
42pub const FILE_CHUNK_SIZE: usize = 32768; // 32 KB
43
44/// FileManagerStore's entry
45struct FileStoreEntry {
46    /// Origin of the entry's "creator"
47    origin: ImmutableOrigin,
48    /// Backend implementation
49    file_impl: FileImpl,
50    /// Number of FileID holders that the ID is used to
51    /// index this entry in `FileManagerStore`.
52    /// Reference holders include a FileStoreEntry or
53    /// a script-side File-based Blob
54    refs: AtomicUsize,
55    /// UUIDs only become valid blob URIs when explicitly requested
56    /// by the user with createObjectURL. Validity can be revoked as well.
57    /// (The UUID is the one that maps to this entry in `FileManagerStore`)
58    is_valid_url: AtomicBool,
59    /// UUIDs of fetch instances that acquired an interest in this file,
60    /// when the url was still valid.
61    outstanding_tokens: FxHashSet<Uuid>,
62}
63
64#[derive(Clone)]
65struct FileMetaData {
66    path: PathBuf,
67    size: u64,
68}
69
70/// File backend implementation
71#[derive(Clone)]
72enum FileImpl {
73    /// Metadata of on-disk file
74    MetaDataOnly(FileMetaData),
75    /// In-memory Blob buffer object
76    Memory(BlobBuf),
77    /// A reference to parent entry in `FileManagerStore`,
78    /// representing a sliced version of the parent entry data
79    Sliced(Uuid, RelativePos),
80}
81
82#[derive(Clone)]
83pub struct FileManager {
84    embedder_proxy: GenericEmbedderProxy<NetToEmbedderMsg>,
85    store: Arc<FileManagerStore>,
86}
87
88impl FileManager {
89    pub fn new(embedder_proxy: GenericEmbedderProxy<NetToEmbedderMsg>) -> FileManager {
90        FileManager {
91            embedder_proxy,
92            store: Arc::new(FileManagerStore::new()),
93        }
94    }
95
96    fn read_file(
97        &self,
98        sender: IpcSender<FileManagerResult<ReadFileProgress>>,
99        id: Uuid,
100        origin: ImmutableOrigin,
101    ) {
102        let store = self.store.clone();
103        spawn_task(async move {
104            if let Err(e) = store.try_read_file(&sender, id, origin).await {
105                let _ = sender.send(Err(FileManagerThreadError::BlobURLStoreError(e)));
106            }
107        });
108    }
109
110    pub(crate) fn get_token_for_file(&self, file_id: &Uuid) -> FileTokenCheck {
111        self.store.get_token_for_file(file_id)
112    }
113
114    pub(crate) fn invalidate_token(&self, token: &FileTokenCheck, file_id: &Uuid) {
115        self.store.invalidate_token(token, file_id);
116    }
117
118    /// Read a file for the Fetch implementation.
119    /// It gets the required headers synchronously and reads the actual content
120    /// in a separate thread.
121    #[expect(clippy::too_many_arguments)]
122    pub(crate) fn fetch_file(
123        &self,
124        done_sender: &mut TokioSender<Data>,
125        cancellation_listener: Arc<CancellationListener>,
126        id: Uuid,
127        file_token: &FileTokenCheck,
128        origin: ImmutableOrigin,
129        response: &mut Response,
130        range: Option<Range>,
131    ) -> Result<(), BlobURLStoreError> {
132        self.fetch_blob_buf(
133            done_sender,
134            cancellation_listener,
135            &id,
136            file_token,
137            &origin,
138            BlobBounds::Unresolved(range),
139            response,
140        )
141    }
142
143    pub fn promote_memory(
144        &self,
145        id: Uuid,
146        blob_buf: BlobBuf,
147        set_valid: bool,
148        origin: ImmutableOrigin,
149    ) {
150        self.store.promote_memory(id, blob_buf, set_valid, origin);
151    }
152
153    /// Message handler
154    pub fn handle(&self, msg: FileManagerThreadMsg) {
155        match msg {
156            FileManagerThreadMsg::SelectFiles(control_id, file_picker_request, response_sender) => {
157                let store = self.store.clone();
158                let embedder = self.embedder_proxy.clone();
159                spawn_task(async move {
160                    let embedder_control_msg = store
161                        .select_files(control_id, file_picker_request, embedder)
162                        .await;
163                    response_sender.send(embedder_control_msg).unwrap();
164                });
165            },
166            FileManagerThreadMsg::ReadFile(sender, id, origin) => {
167                self.read_file(sender, id, origin);
168            },
169            FileManagerThreadMsg::PromoteMemory(id, blob_buf, set_valid, origin) => {
170                self.promote_memory(id, blob_buf, set_valid, origin);
171            },
172            FileManagerThreadMsg::AddSlicedURLEntry(id, rel_pos, sender, origin) => {
173                self.store.add_sliced_url_entry(id, rel_pos, sender, origin);
174            },
175            FileManagerThreadMsg::DecRef(id, origin, sender) => {
176                let _ = sender.send(self.store.dec_ref(&id, &origin));
177            },
178            FileManagerThreadMsg::RevokeBlobURL(id, origin, sender) => {
179                let _ = sender.send(self.store.set_blob_url_validity(false, &id, &origin));
180            },
181            FileManagerThreadMsg::ActivateBlobURL(id, sender, origin) => {
182                let _ = sender.send(self.store.set_blob_url_validity(true, &id, &origin));
183            },
184        }
185    }
186
187    pub fn fetch_file_in_chunks(
188        &self,
189        done_sender: &mut TokioSender<Data>,
190        mut reader: BufReader<File>,
191        res_body: ServoArc<Mutex<ResponseBody>>,
192        cancellation_listener: Arc<CancellationListener>,
193        range: RelativePos,
194    ) {
195        let done_sender = done_sender.clone();
196        spawn_task(async move {
197            loop {
198                if cancellation_listener.cancelled() {
199                    *res_body.lock() = ResponseBody::Done(vec![]);
200                    let _ = done_sender.send(Data::Cancelled);
201                    return;
202                }
203                let length = {
204                    let buffer = reader.fill_buf().unwrap().to_vec();
205                    let mut buffer_len = buffer.len();
206                    if let ResponseBody::Receiving(ref mut body) = *res_body.lock() {
207                        let offset = usize::min(
208                            {
209                                if let Some(end) = range.end {
210                                    // HTTP Range requests are specified with closed ranges,
211                                    // while Rust uses half-open ranges. We add +1 here so
212                                    // we don't skip the last requested byte.
213                                    let remaining_bytes =
214                                        end as usize - range.start as usize - body.len() + 1;
215                                    if remaining_bytes <= FILE_CHUNK_SIZE {
216                                        // This is the last chunk so we set buffer
217                                        // len to 0 to break the reading loop.
218                                        buffer_len = 0;
219                                        remaining_bytes
220                                    } else {
221                                        FILE_CHUNK_SIZE
222                                    }
223                                } else {
224                                    FILE_CHUNK_SIZE
225                                }
226                            },
227                            buffer.len(),
228                        );
229                        let chunk = &buffer[0..offset];
230                        body.extend_from_slice(chunk);
231                        let _ = done_sender.send(Data::Payload(chunk.to_vec()));
232                    }
233                    buffer_len
234                };
235                if length == 0 {
236                    let mut body = res_body.lock();
237                    let completed_body = match *body {
238                        ResponseBody::Receiving(ref mut body) => std::mem::take(body),
239                        _ => vec![],
240                    };
241                    *body = ResponseBody::Done(completed_body);
242                    let _ = done_sender.send(Data::Done);
243                    break;
244                }
245                reader.consume(length);
246                yield_now().await
247            }
248        });
249    }
250
251    #[expect(clippy::too_many_arguments)]
252    fn fetch_blob_buf(
253        &self,
254        done_sender: &mut TokioSender<Data>,
255        cancellation_listener: Arc<CancellationListener>,
256        id: &Uuid,
257        file_token: &FileTokenCheck,
258        origin_in: &ImmutableOrigin,
259        bounds: BlobBounds,
260        response: &mut Response,
261    ) -> Result<(), BlobURLStoreError> {
262        let file_impl = self.store.get_impl(id, file_token, origin_in)?;
263        /*
264           Only Fetch Blob Range Request would have unresolved range, and only in that case we care about range header.
265        */
266        let mut is_range_requested = false;
267        match file_impl {
268            FileImpl::Memory(buf) => {
269                let bounds = match bounds {
270                    BlobBounds::Unresolved(range) => {
271                        if range.is_some() {
272                            is_range_requested = true;
273                        }
274                        get_range_request_bounds(range, buf.size)
275                    },
276                    BlobBounds::Resolved(bounds) => bounds,
277                };
278                let range = bounds
279                    .get_final(Some(buf.size))
280                    .map_err(|_| BlobURLStoreError::InvalidRange)?;
281
282                let range = range.to_abs_blob_range(buf.size as usize);
283                let len = range.len() as u64;
284                let content_range = if is_range_requested {
285                    ContentRange::bytes(range.start as u64..range.end as u64, buf.size).ok()
286                } else {
287                    None
288                };
289
290                set_headers(
291                    &mut response.headers,
292                    len,
293                    buf.type_string.parse().unwrap_or(mime::TEXT_PLAIN),
294                    /* filename */ None,
295                    content_range,
296                );
297
298                let mut bytes = vec![];
299                bytes.extend_from_slice(buf.bytes.index(range));
300
301                let _ = done_sender.send(Data::Payload(bytes));
302                let _ = done_sender.send(Data::Done);
303
304                Ok(())
305            },
306            FileImpl::MetaDataOnly(metadata) => {
307                /* XXX: Snapshot state check (optional) https://w3c.github.io/FileAPI/#snapshot-state.
308                        Concretely, here we create another file, and this file might not
309                        has the same underlying file state (meta-info plus content) as the time
310                        create_entry is called.
311                */
312
313                let file = File::open(&metadata.path)
314                    .map_err(|e| BlobURLStoreError::External(e.to_string()))?;
315                let mut is_range_requested = false;
316                let bounds = match bounds {
317                    BlobBounds::Unresolved(range) => {
318                        if range.is_some() {
319                            is_range_requested = true;
320                        }
321                        get_range_request_bounds(range, metadata.size)
322                    },
323                    BlobBounds::Resolved(bounds) => bounds,
324                };
325                let range = bounds
326                    .get_final(Some(metadata.size))
327                    .map_err(|_| BlobURLStoreError::InvalidRange)?;
328
329                let mut reader = BufReader::with_capacity(FILE_CHUNK_SIZE, file);
330                if reader.seek(SeekFrom::Start(range.start as u64)).is_err() {
331                    return Err(BlobURLStoreError::External(
332                        "Unexpected method for blob".into(),
333                    ));
334                }
335
336                let filename = metadata
337                    .path
338                    .file_name()
339                    .and_then(|osstr| osstr.to_str())
340                    .map(|s| s.to_string());
341
342                let content_range = if is_range_requested {
343                    let abs_range = range.to_abs_blob_range(metadata.size as usize);
344                    ContentRange::bytes(abs_range.start as u64..abs_range.end as u64, metadata.size)
345                        .ok()
346                } else {
347                    None
348                };
349                set_headers(
350                    &mut response.headers,
351                    metadata.size,
352                    mime_guess::from_path(metadata.path)
353                        .first()
354                        .unwrap_or(mime::TEXT_PLAIN),
355                    filename,
356                    content_range,
357                );
358
359                self.fetch_file_in_chunks(
360                    &mut done_sender.clone(),
361                    reader,
362                    response.body.clone(),
363                    cancellation_listener,
364                    range,
365                );
366
367                Ok(())
368            },
369            FileImpl::Sliced(parent_id, inner_rel_pos) => {
370                // Next time we don't need to check validity since
371                // we have already done that for requesting URL if necessary.
372                let bounds = RangeRequestBounds::Final(
373                    RelativePos::full_range().slice_inner(&inner_rel_pos),
374                );
375                self.fetch_blob_buf(
376                    done_sender,
377                    cancellation_listener,
378                    &parent_id,
379                    file_token,
380                    origin_in,
381                    BlobBounds::Resolved(bounds),
382                    response,
383                )
384            },
385        }
386    }
387}
388
389enum BlobBounds {
390    Unresolved(Option<Range>),
391    Resolved(RangeRequestBounds),
392}
393
394/// File manager's data store. It maintains a thread-safe mapping
395/// from FileID to FileStoreEntry which might have different backend implementation.
396/// Access to the content is encapsulated as methods of this struct.
397struct FileManagerStore {
398    entries: RwLock<FxHashMap<Uuid, FileStoreEntry>>,
399}
400
401impl FileManagerStore {
402    fn new() -> Self {
403        FileManagerStore {
404            entries: RwLock::new(FxHashMap::default()),
405        }
406    }
407
408    /// Copy out the file backend implementation content
409    fn get_impl(
410        &self,
411        id: &Uuid,
412        file_token: &FileTokenCheck,
413        origin_in: &ImmutableOrigin,
414    ) -> Result<FileImpl, BlobURLStoreError> {
415        match self.entries.read().get(id) {
416            Some(entry) => {
417                if *origin_in != entry.origin {
418                    Err(BlobURLStoreError::InvalidOrigin)
419                } else {
420                    match file_token {
421                        FileTokenCheck::NotRequired => Ok(entry.file_impl.clone()),
422                        FileTokenCheck::Required(token) => {
423                            if entry.outstanding_tokens.contains(token) {
424                                return Ok(entry.file_impl.clone());
425                            }
426                            Err(BlobURLStoreError::InvalidFileID)
427                        },
428                        FileTokenCheck::ShouldFail => Err(BlobURLStoreError::InvalidFileID),
429                    }
430                }
431            },
432            None => Err(BlobURLStoreError::InvalidFileID),
433        }
434    }
435
436    fn invalidate_token(&self, token: &FileTokenCheck, file_id: &Uuid) {
437        if let FileTokenCheck::Required(token) = token {
438            let mut entries = self.entries.write();
439            if let Some(entry) = entries.get_mut(file_id) {
440                entry.outstanding_tokens.remove(token);
441
442                // Check if there are references left.
443                let zero_refs = entry.refs.load(Ordering::Acquire) == 0;
444
445                // Check if no other fetch has acquired a token for this file.
446                let no_outstanding_tokens = entry.outstanding_tokens.is_empty();
447
448                // Check if there is still a blob URL outstanding.
449                let valid = entry.is_valid_url.load(Ordering::Acquire);
450
451                // Can we remove this file?
452                let do_remove = zero_refs && no_outstanding_tokens && !valid;
453
454                if do_remove {
455                    entries.remove(file_id);
456                }
457            }
458        }
459    }
460
461    pub(crate) fn get_token_for_file(&self, file_id: &Uuid) -> FileTokenCheck {
462        let mut entries = self.entries.write();
463        let parent_id = match entries.get(file_id) {
464            Some(entry) => {
465                if let FileImpl::Sliced(ref parent_id, _) = entry.file_impl {
466                    Some(*parent_id)
467                } else {
468                    None
469                }
470            },
471            None => return FileTokenCheck::ShouldFail,
472        };
473        let file_id = match parent_id.as_ref() {
474            Some(id) => id,
475            None => file_id,
476        };
477        if let Some(entry) = entries.get_mut(file_id) {
478            if !entry.is_valid_url.load(Ordering::Acquire) {
479                return FileTokenCheck::ShouldFail;
480            }
481            let token = Uuid::new_v4();
482            entry.outstanding_tokens.insert(token);
483            return FileTokenCheck::Required(token);
484        }
485        FileTokenCheck::ShouldFail
486    }
487
488    fn insert(&self, id: Uuid, entry: FileStoreEntry) {
489        self.entries.write().insert(id, entry);
490    }
491
492    fn remove(&self, id: &Uuid) {
493        self.entries.write().remove(id);
494    }
495
496    fn inc_ref(&self, id: &Uuid, origin_in: &ImmutableOrigin) -> Result<(), BlobURLStoreError> {
497        match self.entries.read().get(id) {
498            Some(entry) => {
499                if entry.origin == *origin_in {
500                    entry.refs.fetch_add(1, Ordering::Relaxed);
501                    Ok(())
502                } else {
503                    Err(BlobURLStoreError::InvalidOrigin)
504                }
505            },
506            None => Err(BlobURLStoreError::InvalidFileID),
507        }
508    }
509
510    fn add_sliced_url_entry(
511        &self,
512        parent_id: Uuid,
513        rel_pos: RelativePos,
514        sender: IpcSender<Result<Uuid, BlobURLStoreError>>,
515        origin_in: ImmutableOrigin,
516    ) {
517        match self.inc_ref(&parent_id, &origin_in) {
518            Ok(_) => {
519                let new_id = Uuid::new_v4();
520                self.insert(
521                    new_id,
522                    FileStoreEntry {
523                        origin: origin_in,
524                        file_impl: FileImpl::Sliced(parent_id, rel_pos),
525                        refs: AtomicUsize::new(1),
526                        // Valid here since AddSlicedURLEntry implies URL creation
527                        // from a BlobImpl::Sliced
528                        is_valid_url: AtomicBool::new(true),
529                        outstanding_tokens: Default::default(),
530                    },
531                );
532
533                // We assume that the returned id will be held by BlobImpl::File
534                let _ = sender.send(Ok(new_id));
535            },
536            Err(e) => {
537                let _ = sender.send(Err(e));
538            },
539        }
540    }
541
542    async fn select_files(
543        &self,
544        control_id: EmbedderControlId,
545        file_picker_request: FilePickerRequest,
546        embedder_proxy: GenericEmbedderProxy<NetToEmbedderMsg>,
547    ) -> EmbedderControlResponse {
548        let (sender, receiver) = tokio::sync::oneshot::channel();
549
550        let origin = file_picker_request.origin.clone();
551        embedder_proxy.send(NetToEmbedderMsg::SelectFiles(
552            control_id,
553            file_picker_request,
554            sender,
555        ));
556
557        let paths = match receiver.await {
558            Ok(Some(result)) => result,
559            Ok(None) => {
560                return EmbedderControlResponse::FilePicker(None);
561            },
562            Err(error) => {
563                warn!("Failed to receive files from embedder ({:?}).", error);
564                return EmbedderControlResponse::FilePicker(None);
565            },
566        };
567
568        let mut failed = false;
569        let files: Vec<_> = paths
570            .into_iter()
571            .filter_map(|path| match self.create_entry(&path, origin.clone()) {
572                Ok(entry) => Some(entry),
573                Err(error) => {
574                    failed = true;
575                    warn!("Failed to create entry for selected file: {error:?}");
576                    None
577                },
578            })
579            .collect();
580
581        // From <https://w3c.github.io/webdriver/#dfn-element-send-keys>:
582        //
583        // > Step 8.5: Verify that each file given by the user exists. If any do not,
584        // > return error with error code invalid argument.
585        //
586        // WebDriver expects that if any of the files isn't found we don't select any files.
587        if failed {
588            for file in files.iter() {
589                self.remove(&file.id);
590            }
591            return EmbedderControlResponse::FilePicker(Some(Vec::new()));
592        }
593
594        EmbedderControlResponse::FilePicker(Some(files))
595    }
596
597    fn create_entry(
598        &self,
599        file_path: &Path,
600        origin: ImmutableOrigin,
601    ) -> Result<SelectedFile, FileManagerThreadError> {
602        use net_traits::filemanager_thread::FileManagerThreadError::FileSystemError;
603
604        let file = File::open(file_path).map_err(|e| FileSystemError(e.to_string()))?;
605        let metadata = file
606            .metadata()
607            .map_err(|e| FileSystemError(e.to_string()))?;
608        let modified = metadata
609            .modified()
610            .map_err(|e| FileSystemError(e.to_string()))?;
611        let file_size = metadata.len();
612        let file_name = file_path
613            .file_name()
614            .ok_or(FileSystemError("Invalid filepath".to_string()))?;
615
616        let file_impl = FileImpl::MetaDataOnly(FileMetaData {
617            path: file_path.to_path_buf(),
618            size: file_size,
619        });
620
621        let id = Uuid::new_v4();
622
623        self.insert(
624            id,
625            FileStoreEntry {
626                origin,
627                file_impl,
628                refs: AtomicUsize::new(1),
629                // Invalid here since create_entry is called by file selection
630                is_valid_url: AtomicBool::new(false),
631                outstanding_tokens: Default::default(),
632            },
633        );
634
635        let filename_path = Path::new(file_name);
636        let type_string = match mime_guess::from_path(filename_path).first() {
637            Some(x) => format!("{}", x),
638            None => "".to_string(),
639        };
640
641        Ok(SelectedFile {
642            id,
643            filename: filename_path.to_path_buf(),
644            modified,
645            size: file_size,
646            type_string,
647        })
648    }
649
650    async fn get_blob_buf(
651        &self,
652        sender: &IpcSender<FileManagerResult<ReadFileProgress>>,
653        id: &Uuid,
654        file_token: &FileTokenCheck,
655        origin_in: &ImmutableOrigin,
656        rel_pos: RelativePos,
657    ) -> Result<(), BlobURLStoreError> {
658        let file_impl = self.get_impl(id, file_token, origin_in)?;
659        match file_impl {
660            FileImpl::Memory(buf) => {
661                let range = rel_pos.to_abs_range(buf.size as usize);
662                let buf = BlobBuf {
663                    filename: None,
664                    type_string: buf.type_string,
665                    size: range.len() as u64,
666                    bytes: buf.bytes.index(range).to_vec(),
667                };
668
669                let _ = sender.send(Ok(ReadFileProgress::Meta(buf)));
670                let _ = sender.send(Ok(ReadFileProgress::EOF));
671
672                Ok(())
673            },
674            FileImpl::MetaDataOnly(metadata) => {
675                /* XXX: Snapshot state check (optional) https://w3c.github.io/FileAPI/#snapshot-state.
676                        Concretely, here we create another file, and this file might not
677                        has the same underlying file state (meta-info plus content) as the time
678                        create_entry is called.
679                */
680
681                let opt_filename = metadata
682                    .path
683                    .file_name()
684                    .and_then(|osstr| osstr.to_str())
685                    .map(|s| s.to_string());
686
687                let mime = mime_guess::from_path(metadata.path.clone()).first();
688                let range = rel_pos.to_abs_range(metadata.size as usize);
689
690                let mut file = tokio::fs::File::open(&metadata.path)
691                    .await
692                    .map_err(|e| BlobURLStoreError::External(e.to_string()))?;
693                let seeked_start = file
694                    .seek(SeekFrom::Start(range.start as u64))
695                    .await
696                    .map_err(|e| BlobURLStoreError::External(e.to_string()))?;
697
698                if seeked_start == (range.start as u64) {
699                    let type_string = match mime {
700                        Some(x) => format!("{}", x),
701                        None => "".to_string(),
702                    };
703
704                    read_file_in_chunks(sender, file, range.len(), opt_filename, type_string).await;
705                    Ok(())
706                } else {
707                    Err(BlobURLStoreError::InvalidEntry)
708                }
709            },
710            FileImpl::Sliced(parent_id, inner_rel_pos) => {
711                // Next time we don't need to check validity since
712                // we have already done that for requesting URL if necessary
713                Box::pin(self.get_blob_buf(
714                    sender,
715                    &parent_id,
716                    file_token,
717                    origin_in,
718                    rel_pos.slice_inner(&inner_rel_pos),
719                ))
720                .await
721            },
722        }
723    }
724
725    // Convenient wrapper over get_blob_buf
726    async fn try_read_file(
727        &self,
728        sender: &IpcSender<FileManagerResult<ReadFileProgress>>,
729        id: Uuid,
730        origin_in: ImmutableOrigin,
731    ) -> Result<(), BlobURLStoreError> {
732        self.get_blob_buf(
733            sender,
734            &id,
735            &FileTokenCheck::NotRequired,
736            &origin_in,
737            RelativePos::full_range(),
738        )
739        .await
740    }
741
742    fn dec_ref(&self, id: &Uuid, origin_in: &ImmutableOrigin) -> Result<(), BlobURLStoreError> {
743        let (do_remove, opt_parent_id) = match self.entries.read().get(id) {
744            Some(entry) => {
745                if entry.origin == *origin_in {
746                    let old_refs = entry.refs.fetch_sub(1, Ordering::Release);
747
748                    if old_refs > 1 {
749                        // not the last reference, no need to touch parent
750                        (false, None)
751                    } else {
752                        // last reference, and if it has a reference to parent id
753                        // dec_ref on parent later if necessary
754                        let is_valid = entry.is_valid_url.load(Ordering::Acquire);
755
756                        // Check if no fetch has acquired a token for this file.
757                        let no_outstanding_tokens = entry.outstanding_tokens.is_empty();
758
759                        // Can we remove this file?
760                        let do_remove = !is_valid && no_outstanding_tokens;
761
762                        if let FileImpl::Sliced(ref parent_id, _) = entry.file_impl {
763                            (do_remove, Some(*parent_id))
764                        } else {
765                            (do_remove, None)
766                        }
767                    }
768                } else {
769                    return Err(BlobURLStoreError::InvalidOrigin);
770                }
771            },
772            None => return Err(BlobURLStoreError::InvalidFileID),
773        };
774
775        // Trigger removing if its last reference is gone and it is
776        // not a part of a valid Blob URL
777        if do_remove {
778            atomic::fence(Ordering::Acquire);
779            self.remove(id);
780
781            if let Some(parent_id) = opt_parent_id {
782                return self.dec_ref(&parent_id, origin_in);
783            }
784        }
785
786        Ok(())
787    }
788
789    fn promote_memory(
790        &self,
791        id: Uuid,
792        blob_buf: BlobBuf,
793        set_valid: bool,
794        origin: ImmutableOrigin,
795    ) {
796        self.insert(
797            id,
798            FileStoreEntry {
799                origin,
800                file_impl: FileImpl::Memory(blob_buf),
801                refs: AtomicUsize::new(1),
802                is_valid_url: AtomicBool::new(set_valid),
803                outstanding_tokens: Default::default(),
804            },
805        );
806    }
807
808    fn set_blob_url_validity(
809        &self,
810        validity: bool,
811        id: &Uuid,
812        origin_in: &ImmutableOrigin,
813    ) -> Result<(), BlobURLStoreError> {
814        let (do_remove, opt_parent_id, res) = match self.entries.read().get(id) {
815            Some(entry) => {
816                if entry.origin == *origin_in {
817                    entry.is_valid_url.store(validity, Ordering::Release);
818
819                    if !validity {
820                        // Check if it is the last possible reference
821                        // since refs only accounts for blob id holders
822                        // and store entry id holders
823                        let zero_refs = entry.refs.load(Ordering::Acquire) == 0;
824
825                        // Check if no fetch has acquired a token for this file.
826                        let no_outstanding_tokens = entry.outstanding_tokens.is_empty();
827
828                        // Can we remove this file?
829                        let do_remove = zero_refs && no_outstanding_tokens;
830
831                        if let FileImpl::Sliced(ref parent_id, _) = entry.file_impl {
832                            (do_remove, Some(*parent_id), Ok(()))
833                        } else {
834                            (do_remove, None, Ok(()))
835                        }
836                    } else {
837                        (false, None, Ok(()))
838                    }
839                } else {
840                    (false, None, Err(BlobURLStoreError::InvalidOrigin))
841                }
842            },
843            None => (false, None, Err(BlobURLStoreError::InvalidFileID)),
844        };
845
846        if do_remove {
847            atomic::fence(Ordering::Acquire);
848            self.remove(id);
849
850            if let Some(parent_id) = opt_parent_id {
851                return self.dec_ref(&parent_id, origin_in);
852            }
853        }
854        res
855    }
856}
857
858async fn read_file_in_chunks(
859    sender: &IpcSender<FileManagerResult<ReadFileProgress>>,
860    mut file: tokio::fs::File,
861    size: usize,
862    opt_filename: Option<String>,
863    type_string: String,
864) {
865    // First chunk
866    let mut buf = vec![0; FILE_CHUNK_SIZE];
867    match file.read(&mut buf).await {
868        Ok(n) => {
869            buf.truncate(n);
870            let blob_buf = BlobBuf {
871                filename: opt_filename,
872                type_string,
873                size: size as u64,
874                bytes: buf,
875            };
876            let _ = sender.send(Ok(ReadFileProgress::Meta(blob_buf)));
877        },
878        Err(e) => {
879            let _ = sender.send(Err(FileManagerThreadError::FileSystemError(e.to_string())));
880            return;
881        },
882    }
883
884    // Send the remaining chunks
885    loop {
886        let mut buf = vec![0; FILE_CHUNK_SIZE];
887        match file.read(&mut buf).await {
888            Ok(0) => {
889                let _ = sender.send(Ok(ReadFileProgress::EOF));
890                return;
891            },
892            Ok(n) => {
893                buf.truncate(n);
894                let _ = sender.send(Ok(ReadFileProgress::Partial(buf)));
895            },
896            Err(e) => {
897                let _ = sender.send(Err(FileManagerThreadError::FileSystemError(e.to_string())));
898                return;
899            },
900        }
901    }
902}
903
904fn set_headers(
905    headers: &mut HeaderMap,
906    content_length: u64,
907    mime: Mime,
908    filename: Option<String>,
909    content_range: Option<ContentRange>,
910) {
911    headers.typed_insert(ContentLength(content_length));
912    if let Some(content_range) = content_range {
913        headers.typed_insert(content_range);
914    }
915    headers.typed_insert(ContentType::from(mime.clone()));
916    let name = match filename {
917        Some(name) => name,
918        None => return,
919    };
920    let charset = mime.get_param(mime::CHARSET);
921    let charset = charset
922        .map(|c| c.as_ref().into())
923        .unwrap_or("us-ascii".to_owned());
924    // TODO(eijebong): Replace this once the typed header is there
925    //                 https://github.com/hyperium/headers/issues/8
926    headers.insert(
927        header::CONTENT_DISPOSITION,
928        HeaderValue::from_bytes(
929            format!(
930                "inline; {}",
931                if charset.to_lowercase() == "utf-8" {
932                    format!(
933                        "filename=\"{}\"",
934                        String::from_utf8(name.as_bytes().into()).unwrap()
935                    )
936                } else {
937                    format!(
938                        "filename*=\"{}\"''{}",
939                        charset,
940                        http_percent_encode(name.as_bytes())
941                    )
942                }
943            )
944            .as_bytes(),
945        )
946        .unwrap(),
947    );
948}