net/
filemanager_thread.rs

1/* This Source Code Form is subject to the terms of the Mozilla Public
2 * License, v. 2.0. If a copy of the MPL was not distributed with this
3 * file, You can obtain one at https://mozilla.org/MPL/2.0/. */
4
5use std::fs::File;
6use std::io::{BufRead, BufReader, Seek, SeekFrom};
7use std::ops::Index;
8use std::path::{Path, PathBuf};
9use std::sync::Arc;
10use std::sync::atomic::{self, AtomicBool, AtomicUsize, Ordering};
11
12use embedder_traits::{
13    EmbedderControlId, EmbedderControlResponse, FilePickerRequest, GenericEmbedderProxy,
14    SelectedFile,
15};
16use headers::{ContentLength, ContentRange, ContentType, HeaderMap, HeaderMapExt, Range};
17use http::header::{self, HeaderValue};
18use ipc_channel::ipc::IpcSender;
19use log::warn;
20use mime::{self, Mime};
21use net_traits::blob_url_store::{BlobBuf, BlobTokenCommunicator, BlobURLStoreError};
22use net_traits::filemanager_thread::{
23    FileManagerResult, FileManagerThreadError, FileManagerThreadMsg, FileTokenCheck,
24    GetTokenForFileReply, ReadFileProgress, RelativePos,
25};
26use net_traits::http_percent_encode;
27use net_traits::response::{Response, ResponseBody};
28use parking_lot::{Mutex, RwLock};
29use rustc_hash::{FxHashMap, FxHashSet};
30use servo_arc::Arc as ServoArc;
31use servo_base::generic_channel::GenericSender;
32use servo_url::ImmutableOrigin;
33use tokio::io::{AsyncReadExt, AsyncSeekExt};
34use tokio::sync::mpsc::UnboundedSender as TokioSender;
35use tokio::task::yield_now;
36use uuid::Uuid;
37
38use crate::async_runtime::spawn_task;
39use crate::embedder::NetToEmbedderMsg;
40use crate::fetch::methods::{CancellationListener, Data, RangeRequestBounds};
41use crate::protocols::get_range_request_bounds;
42
43pub const FILE_CHUNK_SIZE: usize = 32768; // 32 KB
44
45/// FileManagerStore's entry
46struct FileStoreEntry {
47    /// Origin of the entry's "creator"
48    origin: ImmutableOrigin,
49    /// Backend implementation
50    file_impl: FileImpl,
51    /// Number of FileID holders that the ID is used to
52    /// index this entry in `FileManagerStore`.
53    /// Reference holders include a FileStoreEntry or
54    /// a script-side File-based Blob
55    refs: AtomicUsize,
56    /// UUIDs only become valid blob URIs when explicitly requested
57    /// by the user with createObjectURL. Validity can be revoked as well.
58    /// (The UUID is the one that maps to this entry in `FileManagerStore`)
59    is_valid_url: AtomicBool,
60    /// UUIDs of fetch instances that acquired an interest in this file,
61    /// when the url was still valid.
62    outstanding_tokens: FxHashSet<Uuid>,
63}
64
65#[derive(Clone)]
66struct FileMetaData {
67    path: PathBuf,
68    size: u64,
69}
70
71/// File backend implementation
72#[derive(Clone)]
73enum FileImpl {
74    /// Metadata of on-disk file
75    MetaDataOnly(FileMetaData),
76    /// In-memory Blob buffer object
77    Memory(BlobBuf),
78    /// A reference to parent entry in `FileManagerStore`,
79    /// representing a sliced version of the parent entry data
80    Sliced(Uuid, RelativePos),
81}
82
83#[derive(Clone)]
84pub struct FileManager {
85    embedder_proxy: GenericEmbedderProxy<NetToEmbedderMsg>,
86    store: Arc<FileManagerStore>,
87    blob_token_communicator: Arc<Mutex<BlobTokenCommunicator>>,
88}
89
90impl FileManager {
91    pub fn new(
92        embedder_proxy: GenericEmbedderProxy<NetToEmbedderMsg>,
93        blob_token_communicator: Arc<Mutex<BlobTokenCommunicator>>,
94    ) -> FileManager {
95        FileManager {
96            embedder_proxy,
97            store: Arc::new(FileManagerStore::new()),
98            blob_token_communicator,
99        }
100    }
101
102    fn read_file(
103        &self,
104        sender: IpcSender<FileManagerResult<ReadFileProgress>>,
105        id: Uuid,
106        origin: ImmutableOrigin,
107    ) {
108        let store = self.store.clone();
109        spawn_task(async move {
110            if let Err(e) = store.try_read_file(&sender, id, origin).await {
111                let _ = sender.send(Err(FileManagerThreadError::BlobURLStoreError(e)));
112            }
113        });
114    }
115
116    pub(crate) fn get_token_for_file(&self, file_id: &Uuid, allow_revoked: bool) -> FileTokenCheck {
117        self.store.get_token_for_file(file_id, allow_revoked)
118    }
119
120    pub(crate) fn invalidate_token(&self, token: &FileTokenCheck, file_id: &Uuid) {
121        self.store.invalidate_token(token, file_id);
122    }
123
124    /// Read a file for the Fetch implementation.
125    /// It gets the required headers synchronously and reads the actual content
126    /// in a separate thread.
127    #[expect(clippy::too_many_arguments)]
128    pub(crate) fn fetch_file(
129        &self,
130        done_sender: &mut TokioSender<Data>,
131        cancellation_listener: Arc<CancellationListener>,
132        id: Uuid,
133        file_token: &FileTokenCheck,
134        origin: ImmutableOrigin,
135        response: &mut Response,
136        range: Option<Range>,
137    ) -> Result<(), BlobURLStoreError> {
138        self.fetch_blob_buf(
139            done_sender,
140            cancellation_listener,
141            &id,
142            file_token,
143            &origin,
144            BlobBounds::Unresolved(range),
145            response,
146        )
147    }
148
149    pub fn promote_memory(
150        &self,
151        id: Uuid,
152        blob_buf: BlobBuf,
153        set_valid: bool,
154        origin: ImmutableOrigin,
155    ) {
156        self.store.promote_memory(id, blob_buf, set_valid, origin);
157    }
158
159    /// Message handler
160    pub fn handle(&self, msg: FileManagerThreadMsg) {
161        match msg {
162            FileManagerThreadMsg::SelectFiles(control_id, file_picker_request, response_sender) => {
163                let store = self.store.clone();
164                let embedder = self.embedder_proxy.clone();
165                spawn_task(async move {
166                    let embedder_control_msg = store
167                        .select_files(control_id, file_picker_request, embedder)
168                        .await;
169                    response_sender.send(embedder_control_msg).unwrap();
170                });
171            },
172            FileManagerThreadMsg::ReadFile(sender, id, origin) => {
173                self.read_file(sender, id, origin);
174            },
175            FileManagerThreadMsg::PromoteMemory(id, blob_buf, set_valid, origin) => {
176                self.promote_memory(id, blob_buf, set_valid, origin);
177            },
178            FileManagerThreadMsg::AddSlicedURLEntry(id, rel_pos, sender, origin) => {
179                self.store.add_sliced_url_entry(id, rel_pos, sender, origin);
180            },
181            FileManagerThreadMsg::DecRef(id, origin, sender) => {
182                let _ = sender.send(self.store.dec_ref(&id, &origin));
183            },
184            FileManagerThreadMsg::RevokeBlobURL(id, origin, sender) => {
185                let _ = sender.send(self.store.set_blob_url_validity(false, &id, &origin));
186            },
187            FileManagerThreadMsg::ActivateBlobURL(id, sender, origin) => {
188                let _ = sender.send(self.store.set_blob_url_validity(true, &id, &origin));
189            },
190            FileManagerThreadMsg::GetTokenForFile(id, _origin, sender) => {
191                let token = match self.get_token_for_file(&id, false) {
192                    FileTokenCheck::Required(token) => Some(token),
193                    _ => None,
194                };
195
196                let communicator = self.blob_token_communicator.lock();
197                let _ = sender.send(GetTokenForFileReply {
198                    token,
199                    revoke_sender: communicator.revoke_sender.clone(),
200                    refresh_sender: communicator.refresh_token_sender.clone(),
201                });
202            },
203            FileManagerThreadMsg::RevokeTokenForFile(token, id) => {
204                self.invalidate_token(&FileTokenCheck::Required(token), &id);
205            },
206        }
207    }
208
209    pub fn fetch_file_in_chunks(
210        &self,
211        done_sender: &mut TokioSender<Data>,
212        mut reader: BufReader<File>,
213        res_body: ServoArc<Mutex<ResponseBody>>,
214        cancellation_listener: Arc<CancellationListener>,
215        range: RelativePos,
216    ) {
217        let done_sender = done_sender.clone();
218        spawn_task(async move {
219            loop {
220                if cancellation_listener.cancelled() {
221                    *res_body.lock() = ResponseBody::Done(vec![]);
222                    let _ = done_sender.send(Data::Cancelled);
223                    return;
224                }
225                let length = {
226                    let buffer = reader.fill_buf().unwrap().to_vec();
227                    let mut buffer_len = buffer.len();
228                    if let ResponseBody::Receiving(ref mut body) = *res_body.lock() {
229                        let offset = usize::min(
230                            {
231                                if let Some(end) = range.end {
232                                    // HTTP Range requests are specified with closed ranges,
233                                    // while Rust uses half-open ranges. We add +1 here so
234                                    // we don't skip the last requested byte.
235                                    let remaining_bytes =
236                                        end as usize - range.start as usize - body.len() + 1;
237                                    if remaining_bytes <= FILE_CHUNK_SIZE {
238                                        // This is the last chunk so we set buffer
239                                        // len to 0 to break the reading loop.
240                                        buffer_len = 0;
241                                        remaining_bytes
242                                    } else {
243                                        FILE_CHUNK_SIZE
244                                    }
245                                } else {
246                                    FILE_CHUNK_SIZE
247                                }
248                            },
249                            buffer.len(),
250                        );
251                        let chunk = &buffer[0..offset];
252                        body.extend_from_slice(chunk);
253                        let _ = done_sender.send(Data::Payload(chunk.to_vec()));
254                    }
255                    buffer_len
256                };
257                if length == 0 {
258                    let mut body = res_body.lock();
259                    let completed_body = match *body {
260                        ResponseBody::Receiving(ref mut body) => std::mem::take(body),
261                        _ => vec![],
262                    };
263                    *body = ResponseBody::Done(completed_body);
264                    let _ = done_sender.send(Data::Done);
265                    break;
266                }
267                reader.consume(length);
268                yield_now().await
269            }
270        });
271    }
272
273    #[expect(clippy::too_many_arguments)]
274    fn fetch_blob_buf(
275        &self,
276        done_sender: &mut TokioSender<Data>,
277        cancellation_listener: Arc<CancellationListener>,
278        id: &Uuid,
279        file_token: &FileTokenCheck,
280        origin_in: &ImmutableOrigin,
281        bounds: BlobBounds,
282        response: &mut Response,
283    ) -> Result<(), BlobURLStoreError> {
284        let file_impl = self.store.get_impl(id, file_token, origin_in)?;
285        /*
286           Only Fetch Blob Range Request would have unresolved range, and only in that case we care about range header.
287        */
288        let mut is_range_requested = false;
289        match file_impl {
290            FileImpl::Memory(buf) => {
291                let bounds = match bounds {
292                    BlobBounds::Unresolved(range) => {
293                        if range.is_some() {
294                            is_range_requested = true;
295                        }
296                        get_range_request_bounds(range, buf.size)
297                    },
298                    BlobBounds::Resolved(bounds) => bounds,
299                };
300                let range = bounds
301                    .get_final(Some(buf.size))
302                    .map_err(|_| BlobURLStoreError::InvalidRange)?;
303
304                let range = range.to_abs_blob_range(buf.size as usize);
305                let len = range.len() as u64;
306                let content_range = if is_range_requested {
307                    ContentRange::bytes(range.start as u64..range.end as u64, buf.size).ok()
308                } else {
309                    None
310                };
311
312                set_headers(
313                    &mut response.headers,
314                    len,
315                    buf.type_string.parse().unwrap_or(mime::TEXT_PLAIN),
316                    /* filename */ None,
317                    content_range,
318                );
319
320                let mut bytes = vec![];
321                bytes.extend_from_slice(buf.bytes.index(range));
322
323                let _ = done_sender.send(Data::Payload(bytes));
324                let _ = done_sender.send(Data::Done);
325
326                Ok(())
327            },
328            FileImpl::MetaDataOnly(metadata) => {
329                /* XXX: Snapshot state check (optional) https://w3c.github.io/FileAPI/#snapshot-state.
330                        Concretely, here we create another file, and this file might not
331                        has the same underlying file state (meta-info plus content) as the time
332                        create_entry is called.
333                */
334
335                let file = File::open(&metadata.path)
336                    .map_err(|e| BlobURLStoreError::External(e.to_string()))?;
337                let mut is_range_requested = false;
338                let bounds = match bounds {
339                    BlobBounds::Unresolved(range) => {
340                        if range.is_some() {
341                            is_range_requested = true;
342                        }
343                        get_range_request_bounds(range, metadata.size)
344                    },
345                    BlobBounds::Resolved(bounds) => bounds,
346                };
347                let range = bounds
348                    .get_final(Some(metadata.size))
349                    .map_err(|_| BlobURLStoreError::InvalidRange)?;
350
351                let mut reader = BufReader::with_capacity(FILE_CHUNK_SIZE, file);
352                if reader.seek(SeekFrom::Start(range.start as u64)).is_err() {
353                    return Err(BlobURLStoreError::External(
354                        "Unexpected method for blob".into(),
355                    ));
356                }
357
358                let filename = metadata
359                    .path
360                    .file_name()
361                    .and_then(|osstr| osstr.to_str())
362                    .map(|s| s.to_string());
363
364                let content_range = if is_range_requested {
365                    let abs_range = range.to_abs_blob_range(metadata.size as usize);
366                    ContentRange::bytes(abs_range.start as u64..abs_range.end as u64, metadata.size)
367                        .ok()
368                } else {
369                    None
370                };
371                set_headers(
372                    &mut response.headers,
373                    metadata.size,
374                    mime_guess::from_path(metadata.path)
375                        .first()
376                        .unwrap_or(mime::TEXT_PLAIN),
377                    filename,
378                    content_range,
379                );
380
381                self.fetch_file_in_chunks(
382                    &mut done_sender.clone(),
383                    reader,
384                    response.body.clone(),
385                    cancellation_listener,
386                    range,
387                );
388
389                Ok(())
390            },
391            FileImpl::Sliced(parent_id, inner_rel_pos) => {
392                // Next time we don't need to check validity since
393                // we have already done that for requesting URL if necessary.
394                let bounds = RangeRequestBounds::Final(
395                    RelativePos::full_range().slice_inner(&inner_rel_pos),
396                );
397                self.fetch_blob_buf(
398                    done_sender,
399                    cancellation_listener,
400                    &parent_id,
401                    file_token,
402                    origin_in,
403                    BlobBounds::Resolved(bounds),
404                    response,
405                )
406            },
407        }
408    }
409}
410
411enum BlobBounds {
412    Unresolved(Option<Range>),
413    Resolved(RangeRequestBounds),
414}
415
416/// File manager's data store. It maintains a thread-safe mapping
417/// from FileID to FileStoreEntry which might have different backend implementation.
418/// Access to the content is encapsulated as methods of this struct.
419struct FileManagerStore {
420    entries: RwLock<FxHashMap<Uuid, FileStoreEntry>>,
421}
422
423impl FileManagerStore {
424    fn new() -> Self {
425        FileManagerStore {
426            entries: RwLock::new(FxHashMap::default()),
427        }
428    }
429
430    /// Copy out the file backend implementation content
431    fn get_impl(
432        &self,
433        id: &Uuid,
434        file_token: &FileTokenCheck,
435        origin_in: &ImmutableOrigin,
436    ) -> Result<FileImpl, BlobURLStoreError> {
437        match self.entries.read().get(id) {
438            Some(entry) => {
439                if *origin_in != entry.origin {
440                    Err(BlobURLStoreError::InvalidOrigin)
441                } else {
442                    match file_token {
443                        FileTokenCheck::NotRequired => Ok(entry.file_impl.clone()),
444                        FileTokenCheck::Required(token) => {
445                            if entry.outstanding_tokens.contains(token) {
446                                return Ok(entry.file_impl.clone());
447                            }
448                            Err(BlobURLStoreError::InvalidFileID)
449                        },
450                        FileTokenCheck::ShouldFail => Err(BlobURLStoreError::InvalidFileID),
451                    }
452                }
453            },
454            None => Err(BlobURLStoreError::InvalidFileID),
455        }
456    }
457
458    fn invalidate_token(&self, token: &FileTokenCheck, file_id: &Uuid) {
459        if let FileTokenCheck::Required(token) = token {
460            let mut entries = self.entries.write();
461            if let Some(entry) = entries.get_mut(file_id) {
462                entry.outstanding_tokens.remove(token);
463
464                // Check if there are references left.
465                let zero_refs = entry.refs.load(Ordering::Acquire) == 0;
466
467                // Check if no other fetch has acquired a token for this file.
468                let no_outstanding_tokens = entry.outstanding_tokens.is_empty();
469
470                // Check if there is still a blob URL outstanding.
471                let valid = entry.is_valid_url.load(Ordering::Acquire);
472
473                // Can we remove this file?
474                let do_remove = zero_refs && no_outstanding_tokens && !valid;
475
476                if do_remove {
477                    entries.remove(file_id);
478                }
479            }
480        }
481    }
482
483    pub(crate) fn get_token_for_file(&self, file_id: &Uuid, allow_revoked: bool) -> FileTokenCheck {
484        let mut entries = self.entries.write();
485        let parent_id = match entries.get(file_id) {
486            Some(entry) => {
487                if let FileImpl::Sliced(ref parent_id, _) = entry.file_impl {
488                    Some(*parent_id)
489                } else {
490                    None
491                }
492            },
493            None => return FileTokenCheck::ShouldFail,
494        };
495        let file_id = parent_id.as_ref().unwrap_or(file_id);
496
497        if let Some(entry) = entries.get_mut(file_id) {
498            if !allow_revoked && !entry.is_valid_url.load(Ordering::Acquire) {
499                log::warn!("Refusing to grant token for revoked blob url: {file_id:?}");
500                return FileTokenCheck::ShouldFail;
501            }
502            let token = Uuid::new_v4();
503            entry.outstanding_tokens.insert(token);
504            return FileTokenCheck::Required(token);
505        }
506        FileTokenCheck::ShouldFail
507    }
508
509    fn insert(&self, id: Uuid, entry: FileStoreEntry) {
510        self.entries.write().insert(id, entry);
511    }
512
513    fn remove(&self, id: &Uuid) {
514        self.entries.write().remove(id);
515    }
516
517    fn inc_ref(&self, id: &Uuid, origin_in: &ImmutableOrigin) -> Result<(), BlobURLStoreError> {
518        match self.entries.read().get(id) {
519            Some(entry) => {
520                if entry.origin == *origin_in {
521                    entry.refs.fetch_add(1, Ordering::Relaxed);
522                    Ok(())
523                } else {
524                    Err(BlobURLStoreError::InvalidOrigin)
525                }
526            },
527            None => Err(BlobURLStoreError::InvalidFileID),
528        }
529    }
530
531    fn add_sliced_url_entry(
532        &self,
533        parent_id: Uuid,
534        rel_pos: RelativePos,
535        sender: GenericSender<Result<Uuid, BlobURLStoreError>>,
536        origin_in: ImmutableOrigin,
537    ) {
538        match self.inc_ref(&parent_id, &origin_in) {
539            Ok(_) => {
540                let new_id = Uuid::new_v4();
541                self.insert(
542                    new_id,
543                    FileStoreEntry {
544                        origin: origin_in,
545                        file_impl: FileImpl::Sliced(parent_id, rel_pos),
546                        refs: AtomicUsize::new(1),
547                        // Valid here since AddSlicedURLEntry implies URL creation
548                        // from a BlobImpl::Sliced
549                        is_valid_url: AtomicBool::new(true),
550                        outstanding_tokens: Default::default(),
551                    },
552                );
553
554                // We assume that the returned id will be held by BlobImpl::File
555                let _ = sender.send(Ok(new_id));
556            },
557            Err(e) => {
558                let _ = sender.send(Err(e));
559            },
560        }
561    }
562
563    async fn select_files(
564        &self,
565        control_id: EmbedderControlId,
566        file_picker_request: FilePickerRequest,
567        embedder_proxy: GenericEmbedderProxy<NetToEmbedderMsg>,
568    ) -> EmbedderControlResponse {
569        let (sender, receiver) = tokio::sync::oneshot::channel();
570
571        let origin = file_picker_request.origin.clone();
572        embedder_proxy.send(NetToEmbedderMsg::SelectFiles(
573            control_id,
574            file_picker_request,
575            sender,
576        ));
577
578        let paths = match receiver.await {
579            Ok(Some(result)) => result,
580            Ok(None) => {
581                return EmbedderControlResponse::FilePicker(None);
582            },
583            Err(error) => {
584                warn!("Failed to receive files from embedder ({:?}).", error);
585                return EmbedderControlResponse::FilePicker(None);
586            },
587        };
588
589        let mut failed = false;
590        let files: Vec<_> = paths
591            .into_iter()
592            .filter_map(|path| match self.create_entry(&path, origin.clone()) {
593                Ok(entry) => Some(entry),
594                Err(error) => {
595                    failed = true;
596                    warn!("Failed to create entry for selected file: {error:?}");
597                    None
598                },
599            })
600            .collect();
601
602        // From <https://w3c.github.io/webdriver/#dfn-element-send-keys>:
603        //
604        // > Step 8.5: Verify that each file given by the user exists. If any do not,
605        // > return error with error code invalid argument.
606        //
607        // WebDriver expects that if any of the files isn't found we don't select any files.
608        if failed {
609            for file in files.iter() {
610                self.remove(&file.id);
611            }
612            return EmbedderControlResponse::FilePicker(Some(Vec::new()));
613        }
614
615        EmbedderControlResponse::FilePicker(Some(files))
616    }
617
618    fn create_entry(
619        &self,
620        file_path: &Path,
621        origin: ImmutableOrigin,
622    ) -> Result<SelectedFile, FileManagerThreadError> {
623        use net_traits::filemanager_thread::FileManagerThreadError::FileSystemError;
624
625        let file = File::open(file_path).map_err(|e| FileSystemError(e.to_string()))?;
626        let metadata = file
627            .metadata()
628            .map_err(|e| FileSystemError(e.to_string()))?;
629        let modified = metadata
630            .modified()
631            .map_err(|e| FileSystemError(e.to_string()))?;
632        let file_size = metadata.len();
633        let file_name = file_path
634            .file_name()
635            .ok_or(FileSystemError("Invalid filepath".to_string()))?;
636
637        let file_impl = FileImpl::MetaDataOnly(FileMetaData {
638            path: file_path.to_path_buf(),
639            size: file_size,
640        });
641
642        let id = Uuid::new_v4();
643
644        self.insert(
645            id,
646            FileStoreEntry {
647                origin,
648                file_impl,
649                refs: AtomicUsize::new(1),
650                // Invalid here since create_entry is called by file selection
651                is_valid_url: AtomicBool::new(false),
652                outstanding_tokens: Default::default(),
653            },
654        );
655
656        let filename_path = Path::new(file_name);
657        let type_string = match mime_guess::from_path(filename_path).first() {
658            Some(x) => format!("{}", x),
659            None => "".to_string(),
660        };
661
662        Ok(SelectedFile {
663            id,
664            filename: filename_path.to_path_buf(),
665            modified,
666            size: file_size,
667            type_string,
668        })
669    }
670
671    async fn get_blob_buf(
672        &self,
673        sender: &IpcSender<FileManagerResult<ReadFileProgress>>,
674        id: &Uuid,
675        file_token: &FileTokenCheck,
676        origin_in: &ImmutableOrigin,
677        rel_pos: RelativePos,
678    ) -> Result<(), BlobURLStoreError> {
679        let file_impl = self.get_impl(id, file_token, origin_in)?;
680        match file_impl {
681            FileImpl::Memory(buf) => {
682                let range = rel_pos.to_abs_range(buf.size as usize);
683                let buf = BlobBuf {
684                    filename: None,
685                    type_string: buf.type_string,
686                    size: range.len() as u64,
687                    bytes: buf.bytes.index(range).to_vec(),
688                };
689
690                let _ = sender.send(Ok(ReadFileProgress::Meta(buf)));
691                let _ = sender.send(Ok(ReadFileProgress::EOF));
692
693                Ok(())
694            },
695            FileImpl::MetaDataOnly(metadata) => {
696                /* XXX: Snapshot state check (optional) https://w3c.github.io/FileAPI/#snapshot-state.
697                        Concretely, here we create another file, and this file might not
698                        has the same underlying file state (meta-info plus content) as the time
699                        create_entry is called.
700                */
701
702                let opt_filename = metadata
703                    .path
704                    .file_name()
705                    .and_then(|osstr| osstr.to_str())
706                    .map(|s| s.to_string());
707
708                let mime = mime_guess::from_path(metadata.path.clone()).first();
709                let range = rel_pos.to_abs_range(metadata.size as usize);
710
711                let mut file = tokio::fs::File::open(&metadata.path)
712                    .await
713                    .map_err(|e| BlobURLStoreError::External(e.to_string()))?;
714                let seeked_start = file
715                    .seek(SeekFrom::Start(range.start as u64))
716                    .await
717                    .map_err(|e| BlobURLStoreError::External(e.to_string()))?;
718
719                if seeked_start == (range.start as u64) {
720                    let type_string = match mime {
721                        Some(x) => format!("{}", x),
722                        None => "".to_string(),
723                    };
724
725                    read_file_in_chunks(sender, file, range.len(), opt_filename, type_string).await;
726                    Ok(())
727                } else {
728                    Err(BlobURLStoreError::InvalidEntry)
729                }
730            },
731            FileImpl::Sliced(parent_id, inner_rel_pos) => {
732                // Next time we don't need to check validity since
733                // we have already done that for requesting URL if necessary
734                Box::pin(self.get_blob_buf(
735                    sender,
736                    &parent_id,
737                    file_token,
738                    origin_in,
739                    rel_pos.slice_inner(&inner_rel_pos),
740                ))
741                .await
742            },
743        }
744    }
745
746    // Convenient wrapper over get_blob_buf
747    async fn try_read_file(
748        &self,
749        sender: &IpcSender<FileManagerResult<ReadFileProgress>>,
750        id: Uuid,
751        origin_in: ImmutableOrigin,
752    ) -> Result<(), BlobURLStoreError> {
753        self.get_blob_buf(
754            sender,
755            &id,
756            &FileTokenCheck::NotRequired,
757            &origin_in,
758            RelativePos::full_range(),
759        )
760        .await
761    }
762
763    fn dec_ref(&self, id: &Uuid, origin_in: &ImmutableOrigin) -> Result<(), BlobURLStoreError> {
764        let (do_remove, opt_parent_id) = match self.entries.read().get(id) {
765            Some(entry) => {
766                if entry.origin == *origin_in {
767                    let old_refs = entry.refs.fetch_sub(1, Ordering::Release);
768
769                    if old_refs > 1 {
770                        // not the last reference, no need to touch parent
771                        (false, None)
772                    } else {
773                        // last reference, and if it has a reference to parent id
774                        // dec_ref on parent later if necessary
775                        let is_valid = entry.is_valid_url.load(Ordering::Acquire);
776
777                        // Check if no fetch has acquired a token for this file.
778                        let no_outstanding_tokens = entry.outstanding_tokens.is_empty();
779
780                        // Can we remove this file?
781                        let do_remove = !is_valid && no_outstanding_tokens;
782
783                        if let FileImpl::Sliced(ref parent_id, _) = entry.file_impl {
784                            (do_remove, Some(*parent_id))
785                        } else {
786                            (do_remove, None)
787                        }
788                    }
789                } else {
790                    return Err(BlobURLStoreError::InvalidOrigin);
791                }
792            },
793            None => return Err(BlobURLStoreError::InvalidFileID),
794        };
795
796        // Trigger removing if its last reference is gone and it is
797        // not a part of a valid Blob URL
798        if do_remove {
799            atomic::fence(Ordering::Acquire);
800            self.remove(id);
801
802            if let Some(parent_id) = opt_parent_id {
803                return self.dec_ref(&parent_id, origin_in);
804            }
805        }
806
807        Ok(())
808    }
809
810    fn promote_memory(
811        &self,
812        id: Uuid,
813        blob_buf: BlobBuf,
814        set_valid: bool,
815        origin: ImmutableOrigin,
816    ) {
817        self.insert(
818            id,
819            FileStoreEntry {
820                origin,
821                file_impl: FileImpl::Memory(blob_buf),
822                refs: AtomicUsize::new(1),
823                is_valid_url: AtomicBool::new(set_valid),
824                outstanding_tokens: Default::default(),
825            },
826        );
827    }
828
829    fn set_blob_url_validity(
830        &self,
831        validity: bool,
832        id: &Uuid,
833        origin_in: &ImmutableOrigin,
834    ) -> Result<(), BlobURLStoreError> {
835        let (do_remove, opt_parent_id, res) = match self.entries.read().get(id) {
836            Some(entry) => {
837                if entry.origin == *origin_in {
838                    entry.is_valid_url.store(validity, Ordering::Release);
839
840                    if !validity {
841                        // Check if it is the last possible reference
842                        // since refs only accounts for blob id holders
843                        // and store entry id holders
844                        let zero_refs = entry.refs.load(Ordering::Acquire) == 0;
845
846                        // Check if no fetch has acquired a token for this file.
847                        let no_outstanding_tokens = entry.outstanding_tokens.is_empty();
848
849                        // Can we remove this file?
850                        let do_remove = zero_refs && no_outstanding_tokens;
851
852                        if let FileImpl::Sliced(ref parent_id, _) = entry.file_impl {
853                            (do_remove, Some(*parent_id), Ok(()))
854                        } else {
855                            (do_remove, None, Ok(()))
856                        }
857                    } else {
858                        (false, None, Ok(()))
859                    }
860                } else {
861                    (false, None, Err(BlobURLStoreError::InvalidOrigin))
862                }
863            },
864            None => (false, None, Err(BlobURLStoreError::InvalidFileID)),
865        };
866
867        if do_remove {
868            atomic::fence(Ordering::Acquire);
869            self.remove(id);
870
871            if let Some(parent_id) = opt_parent_id {
872                return self.dec_ref(&parent_id, origin_in);
873            }
874        }
875        res
876    }
877}
878
879async fn read_file_in_chunks(
880    sender: &IpcSender<FileManagerResult<ReadFileProgress>>,
881    mut file: tokio::fs::File,
882    size: usize,
883    opt_filename: Option<String>,
884    type_string: String,
885) {
886    // First chunk
887    let mut buf = vec![0; FILE_CHUNK_SIZE];
888    match file.read(&mut buf).await {
889        Ok(n) => {
890            buf.truncate(n);
891            let blob_buf = BlobBuf {
892                filename: opt_filename,
893                type_string,
894                size: size as u64,
895                bytes: buf,
896            };
897            let _ = sender.send(Ok(ReadFileProgress::Meta(blob_buf)));
898        },
899        Err(e) => {
900            let _ = sender.send(Err(FileManagerThreadError::FileSystemError(e.to_string())));
901            return;
902        },
903    }
904
905    // Send the remaining chunks
906    loop {
907        let mut buf = vec![0; FILE_CHUNK_SIZE];
908        match file.read(&mut buf).await {
909            Ok(0) => {
910                let _ = sender.send(Ok(ReadFileProgress::EOF));
911                return;
912            },
913            Ok(n) => {
914                buf.truncate(n);
915                let _ = sender.send(Ok(ReadFileProgress::Partial(buf)));
916            },
917            Err(e) => {
918                let _ = sender.send(Err(FileManagerThreadError::FileSystemError(e.to_string())));
919                return;
920            },
921        }
922    }
923}
924
925fn set_headers(
926    headers: &mut HeaderMap,
927    content_length: u64,
928    mime: Mime,
929    filename: Option<String>,
930    content_range: Option<ContentRange>,
931) {
932    headers.typed_insert(ContentLength(content_length));
933    if let Some(content_range) = content_range {
934        headers.typed_insert(content_range);
935    }
936    headers.typed_insert(ContentType::from(mime.clone()));
937    let name = match filename {
938        Some(name) => name,
939        None => return,
940    };
941    let charset = mime.get_param(mime::CHARSET);
942    let charset = charset
943        .map(|c| c.as_ref().into())
944        .unwrap_or("us-ascii".to_owned());
945    // TODO(eijebong): Replace this once the typed header is there
946    //                 https://github.com/hyperium/headers/issues/8
947    headers.insert(
948        header::CONTENT_DISPOSITION,
949        HeaderValue::from_bytes(
950            format!(
951                "inline; {}",
952                if charset.to_lowercase() == "utf-8" {
953                    format!(
954                        "filename=\"{}\"",
955                        String::from_utf8(name.as_bytes().into()).unwrap()
956                    )
957                } else {
958                    format!(
959                        "filename*=\"{}\"''{}",
960                        charset,
961                        http_percent_encode(name.as_bytes())
962                    )
963                }
964            )
965            .as_bytes(),
966        )
967        .unwrap(),
968    );
969}