net/
filemanager_thread.rs

1/* This Source Code Form is subject to the terms of the Mozilla Public
2 * License, v. 2.0. If a copy of the MPL was not distributed with this
3 * file, You can obtain one at https://mozilla.org/MPL/2.0/. */
4
5use std::fs::File;
6use std::io::{BufRead, BufReader, Read, Seek, SeekFrom};
7use std::ops::Index;
8use std::path::{Path, PathBuf};
9use std::sync::atomic::{self, AtomicBool, AtomicUsize, Ordering};
10use std::sync::{Arc, Weak};
11
12use base::generic_channel;
13use base::threadpool::ThreadPool;
14use embedder_traits::{
15    EmbedderControlId, EmbedderControlResponse, EmbedderMsg, EmbedderProxy, FilePickerRequest,
16    SelectedFile,
17};
18use headers::{ContentLength, ContentRange, ContentType, HeaderMap, HeaderMapExt, Range};
19use http::header::{self, HeaderValue};
20use ipc_channel::ipc::IpcSender;
21use log::warn;
22use mime::{self, Mime};
23use net_traits::blob_url_store::{BlobBuf, BlobURLStoreError};
24use net_traits::filemanager_thread::{
25    FileManagerResult, FileManagerThreadError, FileManagerThreadMsg, FileOrigin, FileTokenCheck,
26    ReadFileProgress, RelativePos,
27};
28use net_traits::http_percent_encode;
29use net_traits::response::{Response, ResponseBody};
30use parking_lot::{Mutex, RwLock};
31use rustc_hash::{FxHashMap, FxHashSet};
32use servo_arc::Arc as ServoArc;
33use tokio::sync::mpsc::UnboundedSender as TokioSender;
34use url::Url;
35use uuid::Uuid;
36
37use crate::fetch::methods::{CancellationListener, Data, RangeRequestBounds};
38use crate::protocols::get_range_request_bounds;
39
40pub const FILE_CHUNK_SIZE: usize = 32768; // 32 KB
41
42/// FileManagerStore's entry
43struct FileStoreEntry {
44    /// Origin of the entry's "creator"
45    origin: FileOrigin,
46    /// Backend implementation
47    file_impl: FileImpl,
48    /// Number of FileID holders that the ID is used to
49    /// index this entry in `FileManagerStore`.
50    /// Reference holders include a FileStoreEntry or
51    /// a script-side File-based Blob
52    refs: AtomicUsize,
53    /// UUIDs only become valid blob URIs when explicitly requested
54    /// by the user with createObjectURL. Validity can be revoked as well.
55    /// (The UUID is the one that maps to this entry in `FileManagerStore`)
56    is_valid_url: AtomicBool,
57    /// UUIDs of fetch instances that acquired an interest in this file,
58    /// when the url was still valid.
59    outstanding_tokens: FxHashSet<Uuid>,
60}
61
62#[derive(Clone)]
63struct FileMetaData {
64    path: PathBuf,
65    size: u64,
66}
67
68/// File backend implementation
69#[derive(Clone)]
70enum FileImpl {
71    /// Metadata of on-disk file
72    MetaDataOnly(FileMetaData),
73    /// In-memory Blob buffer object
74    Memory(BlobBuf),
75    /// A reference to parent entry in `FileManagerStore`,
76    /// representing a sliced version of the parent entry data
77    Sliced(Uuid, RelativePos),
78}
79
80#[derive(Clone)]
81pub struct FileManager {
82    embedder_proxy: EmbedderProxy,
83    store: Arc<FileManagerStore>,
84    thread_pool: Weak<ThreadPool>,
85}
86
87impl FileManager {
88    pub fn new(embedder_proxy: EmbedderProxy, pool_handle: Weak<ThreadPool>) -> FileManager {
89        FileManager {
90            embedder_proxy,
91            store: Arc::new(FileManagerStore::new()),
92            thread_pool: pool_handle,
93        }
94    }
95
96    pub fn read_file(
97        &self,
98        sender: IpcSender<FileManagerResult<ReadFileProgress>>,
99        id: Uuid,
100        origin: FileOrigin,
101    ) {
102        let store = self.store.clone();
103        self.thread_pool
104            .upgrade()
105            .map(|pool| {
106                pool.spawn(move || {
107                    if let Err(e) = store.try_read_file(&sender, id, origin) {
108                        let _ = sender.send(Err(FileManagerThreadError::BlobURLStoreError(e)));
109                    }
110                });
111            })
112            .unwrap_or_else(|| {
113                warn!("FileManager tried to read a file after CoreResourceManager has exited.");
114            });
115    }
116
117    pub fn get_token_for_file(&self, file_id: &Uuid) -> FileTokenCheck {
118        self.store.get_token_for_file(file_id)
119    }
120
121    pub fn invalidate_token(&self, token: &FileTokenCheck, file_id: &Uuid) {
122        self.store.invalidate_token(token, file_id);
123    }
124
125    /// Read a file for the Fetch implementation.
126    /// It gets the required headers synchronously and reads the actual content
127    /// in a separate thread.
128    #[allow(clippy::too_many_arguments)]
129    pub fn fetch_file(
130        &self,
131        done_sender: &mut TokioSender<Data>,
132        cancellation_listener: Arc<CancellationListener>,
133        id: Uuid,
134        file_token: &FileTokenCheck,
135        origin: FileOrigin,
136        response: &mut Response,
137        range: Option<Range>,
138    ) -> Result<(), BlobURLStoreError> {
139        self.fetch_blob_buf(
140            done_sender,
141            cancellation_listener,
142            &id,
143            file_token,
144            &origin,
145            BlobBounds::Unresolved(range),
146            response,
147        )
148    }
149
150    pub fn promote_memory(&self, id: Uuid, blob_buf: BlobBuf, set_valid: bool, origin: FileOrigin) {
151        self.store.promote_memory(id, blob_buf, set_valid, origin);
152    }
153
154    /// Message handler
155    pub fn handle(&self, msg: FileManagerThreadMsg) {
156        match msg {
157            FileManagerThreadMsg::SelectFiles(control_id, file_picker_request, response_sender) => {
158                let store = self.store.clone();
159                let embedder = self.embedder_proxy.clone();
160                self.thread_pool
161                    .upgrade()
162                    .map(|pool| {
163                        pool.spawn(move || {
164                            store.select_files(control_id, file_picker_request, response_sender, embedder);
165                        });
166                    })
167                    .unwrap_or_else(|| {
168                        warn!(
169                            "FileManager tried to select multiple files after CoreResourceManager has exited."
170                        );
171                    });
172            },
173            FileManagerThreadMsg::ReadFile(sender, id, origin) => {
174                self.read_file(sender, id, origin);
175            },
176            FileManagerThreadMsg::PromoteMemory(id, blob_buf, set_valid, origin) => {
177                self.promote_memory(id, blob_buf, set_valid, origin);
178            },
179            FileManagerThreadMsg::AddSlicedURLEntry(id, rel_pos, sender, origin) => {
180                self.store.add_sliced_url_entry(id, rel_pos, sender, origin);
181            },
182            FileManagerThreadMsg::DecRef(id, origin, sender) => {
183                let _ = sender.send(self.store.dec_ref(&id, &origin));
184            },
185            FileManagerThreadMsg::RevokeBlobURL(id, origin, sender) => {
186                let _ = sender.send(self.store.set_blob_url_validity(false, &id, &origin));
187            },
188            FileManagerThreadMsg::ActivateBlobURL(id, sender, origin) => {
189                let _ = sender.send(self.store.set_blob_url_validity(true, &id, &origin));
190            },
191        }
192    }
193
194    pub fn fetch_file_in_chunks(
195        &self,
196        done_sender: &mut TokioSender<Data>,
197        mut reader: BufReader<File>,
198        res_body: ServoArc<Mutex<ResponseBody>>,
199        cancellation_listener: Arc<CancellationListener>,
200        range: RelativePos,
201    ) {
202        let done_sender = done_sender.clone();
203        self.thread_pool
204            .upgrade()
205            .map(|pool| {
206                pool.spawn(move || {
207                    loop {
208                        if cancellation_listener.cancelled() {
209                            *res_body.lock() = ResponseBody::Done(vec![]);
210                            let _ = done_sender.send(Data::Cancelled);
211                            return;
212                        }
213                        let length = {
214                            let buffer = reader.fill_buf().unwrap().to_vec();
215                            let mut buffer_len = buffer.len();
216                            if let ResponseBody::Receiving(ref mut body) = *res_body.lock()
217                            {
218                                let offset = usize::min(
219                                    {
220                                        if let Some(end) = range.end {
221                                            // HTTP Range requests are specified with closed ranges,
222                                            // while Rust uses half-open ranges. We add +1 here so
223                                            // we don't skip the last requested byte.
224                                            let remaining_bytes =
225                                                end as usize - range.start as usize - body.len() +
226                                                    1;
227                                            if remaining_bytes <= FILE_CHUNK_SIZE {
228                                                // This is the last chunk so we set buffer
229                                                // len to 0 to break the reading loop.
230                                                buffer_len = 0;
231                                                remaining_bytes
232                                            } else {
233                                                FILE_CHUNK_SIZE
234                                            }
235                                        } else {
236                                            FILE_CHUNK_SIZE
237                                        }
238                                    },
239                                    buffer.len(),
240                                );
241                                let chunk = &buffer[0..offset];
242                                body.extend_from_slice(chunk);
243                                let _ = done_sender.send(Data::Payload(chunk.to_vec()));
244                            }
245                            buffer_len
246                        };
247                        if length == 0 {
248                            let mut body = res_body.lock();
249                            let completed_body = match *body {
250                                ResponseBody::Receiving(ref mut body) => std::mem::take(body),
251                                _ => vec![],
252                            };
253                            *body = ResponseBody::Done(completed_body);
254                            let _ = done_sender.send(Data::Done);
255                            break;
256                        }
257                        reader.consume(length);
258                    }
259                });
260            })
261            .unwrap_or_else(|| {
262                warn!("FileManager tried to fetch a file in chunks after CoreResourceManager has exited.");
263            });
264    }
265
266    #[allow(clippy::too_many_arguments)]
267    fn fetch_blob_buf(
268        &self,
269        done_sender: &mut TokioSender<Data>,
270        cancellation_listener: Arc<CancellationListener>,
271        id: &Uuid,
272        file_token: &FileTokenCheck,
273        origin_in: &FileOrigin,
274        bounds: BlobBounds,
275        response: &mut Response,
276    ) -> Result<(), BlobURLStoreError> {
277        let file_impl = self.store.get_impl(id, file_token, origin_in)?;
278        /*
279           Only Fetch Blob Range Request would have unresolved range, and only in that case we care about range header.
280        */
281        let mut is_range_requested = false;
282        match file_impl {
283            FileImpl::Memory(buf) => {
284                let bounds = match bounds {
285                    BlobBounds::Unresolved(range) => {
286                        if range.is_some() {
287                            is_range_requested = true;
288                        }
289                        get_range_request_bounds(range, buf.size)
290                    },
291                    BlobBounds::Resolved(bounds) => bounds,
292                };
293                let range = bounds
294                    .get_final(Some(buf.size))
295                    .map_err(|_| BlobURLStoreError::InvalidRange)?;
296
297                let range = range.to_abs_blob_range(buf.size as usize);
298                let len = range.len() as u64;
299                let content_range = if is_range_requested {
300                    ContentRange::bytes(range.start as u64..range.end as u64, buf.size).ok()
301                } else {
302                    None
303                };
304
305                set_headers(
306                    &mut response.headers,
307                    len,
308                    buf.type_string.parse().unwrap_or(mime::TEXT_PLAIN),
309                    /* filename */ None,
310                    content_range,
311                );
312
313                let mut bytes = vec![];
314                bytes.extend_from_slice(buf.bytes.index(range));
315
316                let _ = done_sender.send(Data::Payload(bytes));
317                let _ = done_sender.send(Data::Done);
318
319                Ok(())
320            },
321            FileImpl::MetaDataOnly(metadata) => {
322                /* XXX: Snapshot state check (optional) https://w3c.github.io/FileAPI/#snapshot-state.
323                        Concretely, here we create another file, and this file might not
324                        has the same underlying file state (meta-info plus content) as the time
325                        create_entry is called.
326                */
327
328                let file = File::open(&metadata.path)
329                    .map_err(|e| BlobURLStoreError::External(e.to_string()))?;
330                let mut is_range_requested = false;
331                let bounds = match bounds {
332                    BlobBounds::Unresolved(range) => {
333                        if range.is_some() {
334                            is_range_requested = true;
335                        }
336                        get_range_request_bounds(range, metadata.size)
337                    },
338                    BlobBounds::Resolved(bounds) => bounds,
339                };
340                let range = bounds
341                    .get_final(Some(metadata.size))
342                    .map_err(|_| BlobURLStoreError::InvalidRange)?;
343
344                let mut reader = BufReader::with_capacity(FILE_CHUNK_SIZE, file);
345                if reader.seek(SeekFrom::Start(range.start as u64)).is_err() {
346                    return Err(BlobURLStoreError::External(
347                        "Unexpected method for blob".into(),
348                    ));
349                }
350
351                let filename = metadata
352                    .path
353                    .file_name()
354                    .and_then(|osstr| osstr.to_str())
355                    .map(|s| s.to_string());
356
357                let content_range = if is_range_requested {
358                    let abs_range = range.to_abs_blob_range(metadata.size as usize);
359                    ContentRange::bytes(abs_range.start as u64..abs_range.end as u64, metadata.size)
360                        .ok()
361                } else {
362                    None
363                };
364                set_headers(
365                    &mut response.headers,
366                    metadata.size,
367                    mime_guess::from_path(metadata.path)
368                        .first()
369                        .unwrap_or(mime::TEXT_PLAIN),
370                    filename,
371                    content_range,
372                );
373
374                self.fetch_file_in_chunks(
375                    &mut done_sender.clone(),
376                    reader,
377                    response.body.clone(),
378                    cancellation_listener,
379                    range,
380                );
381
382                Ok(())
383            },
384            FileImpl::Sliced(parent_id, inner_rel_pos) => {
385                // Next time we don't need to check validity since
386                // we have already done that for requesting URL if necessary.
387                let bounds = RangeRequestBounds::Final(
388                    RelativePos::full_range().slice_inner(&inner_rel_pos),
389                );
390                self.fetch_blob_buf(
391                    done_sender,
392                    cancellation_listener,
393                    &parent_id,
394                    file_token,
395                    origin_in,
396                    BlobBounds::Resolved(bounds),
397                    response,
398                )
399            },
400        }
401    }
402}
403
404enum BlobBounds {
405    Unresolved(Option<Range>),
406    Resolved(RangeRequestBounds),
407}
408
409/// File manager's data store. It maintains a thread-safe mapping
410/// from FileID to FileStoreEntry which might have different backend implementation.
411/// Access to the content is encapsulated as methods of this struct.
412struct FileManagerStore {
413    entries: RwLock<FxHashMap<Uuid, FileStoreEntry>>,
414}
415
416impl FileManagerStore {
417    fn new() -> Self {
418        FileManagerStore {
419            entries: RwLock::new(FxHashMap::default()),
420        }
421    }
422
423    /// Copy out the file backend implementation content
424    pub fn get_impl(
425        &self,
426        id: &Uuid,
427        file_token: &FileTokenCheck,
428        origin_in: &FileOrigin,
429    ) -> Result<FileImpl, BlobURLStoreError> {
430        match self.entries.read().get(id) {
431            Some(entry) => {
432                if *origin_in != *entry.origin {
433                    Err(BlobURLStoreError::InvalidOrigin)
434                } else {
435                    match file_token {
436                        FileTokenCheck::NotRequired => Ok(entry.file_impl.clone()),
437                        FileTokenCheck::Required(token) => {
438                            if entry.outstanding_tokens.contains(token) {
439                                return Ok(entry.file_impl.clone());
440                            }
441                            Err(BlobURLStoreError::InvalidFileID)
442                        },
443                        FileTokenCheck::ShouldFail => Err(BlobURLStoreError::InvalidFileID),
444                    }
445                }
446            },
447            None => Err(BlobURLStoreError::InvalidFileID),
448        }
449    }
450
451    pub fn invalidate_token(&self, token: &FileTokenCheck, file_id: &Uuid) {
452        if let FileTokenCheck::Required(token) = token {
453            let mut entries = self.entries.write();
454            if let Some(entry) = entries.get_mut(file_id) {
455                entry.outstanding_tokens.remove(token);
456
457                // Check if there are references left.
458                let zero_refs = entry.refs.load(Ordering::Acquire) == 0;
459
460                // Check if no other fetch has acquired a token for this file.
461                let no_outstanding_tokens = entry.outstanding_tokens.is_empty();
462
463                // Check if there is still a blob URL outstanding.
464                let valid = entry.is_valid_url.load(Ordering::Acquire);
465
466                // Can we remove this file?
467                let do_remove = zero_refs && no_outstanding_tokens && !valid;
468
469                if do_remove {
470                    entries.remove(file_id);
471                }
472            }
473        }
474    }
475
476    pub fn get_token_for_file(&self, file_id: &Uuid) -> FileTokenCheck {
477        let mut entries = self.entries.write();
478        let parent_id = match entries.get(file_id) {
479            Some(entry) => {
480                if let FileImpl::Sliced(ref parent_id, _) = entry.file_impl {
481                    Some(*parent_id)
482                } else {
483                    None
484                }
485            },
486            None => return FileTokenCheck::ShouldFail,
487        };
488        let file_id = match parent_id.as_ref() {
489            Some(id) => id,
490            None => file_id,
491        };
492        if let Some(entry) = entries.get_mut(file_id) {
493            if !entry.is_valid_url.load(Ordering::Acquire) {
494                return FileTokenCheck::ShouldFail;
495            }
496            let token = Uuid::new_v4();
497            entry.outstanding_tokens.insert(token);
498            return FileTokenCheck::Required(token);
499        }
500        FileTokenCheck::ShouldFail
501    }
502
503    fn insert(&self, id: Uuid, entry: FileStoreEntry) {
504        self.entries.write().insert(id, entry);
505    }
506
507    fn remove(&self, id: &Uuid) {
508        self.entries.write().remove(id);
509    }
510
511    fn inc_ref(&self, id: &Uuid, origin_in: &FileOrigin) -> Result<(), BlobURLStoreError> {
512        match self.entries.read().get(id) {
513            Some(entry) => {
514                if entry.origin == *origin_in {
515                    entry.refs.fetch_add(1, Ordering::Relaxed);
516                    Ok(())
517                } else {
518                    Err(BlobURLStoreError::InvalidOrigin)
519                }
520            },
521            None => Err(BlobURLStoreError::InvalidFileID),
522        }
523    }
524
525    fn add_sliced_url_entry(
526        &self,
527        parent_id: Uuid,
528        rel_pos: RelativePos,
529        sender: IpcSender<Result<Uuid, BlobURLStoreError>>,
530        origin_in: FileOrigin,
531    ) {
532        match self.inc_ref(&parent_id, &origin_in) {
533            Ok(_) => {
534                let new_id = Uuid::new_v4();
535                self.insert(
536                    new_id,
537                    FileStoreEntry {
538                        origin: origin_in,
539                        file_impl: FileImpl::Sliced(parent_id, rel_pos),
540                        refs: AtomicUsize::new(1),
541                        // Valid here since AddSlicedURLEntry implies URL creation
542                        // from a BlobImpl::Sliced
543                        is_valid_url: AtomicBool::new(true),
544                        outstanding_tokens: Default::default(),
545                    },
546                );
547
548                // We assume that the returned id will be held by BlobImpl::File
549                let _ = sender.send(Ok(new_id));
550            },
551            Err(e) => {
552                let _ = sender.send(Err(e));
553            },
554        }
555    }
556
557    fn select_files(
558        &self,
559        control_id: EmbedderControlId,
560        file_picker_request: FilePickerRequest,
561        response_sender: IpcSender<EmbedderControlResponse>,
562        embedder_proxy: EmbedderProxy,
563    ) {
564        let (ipc_sender, ipc_receiver) =
565            generic_channel::channel().expect("Failed to create IPC channel!");
566
567        let origin = file_picker_request.origin.clone();
568        embedder_proxy.send(EmbedderMsg::SelectFiles(
569            control_id,
570            file_picker_request,
571            ipc_sender,
572        ));
573
574        let paths = match ipc_receiver.recv() {
575            Ok(Some(result)) => result,
576            Ok(None) => {
577                let _ = response_sender.send(EmbedderControlResponse::FilePicker(None));
578                return;
579            },
580            Err(error) => {
581                warn!("Failed to receive files from embedder ({:?}).", error);
582                let _ = response_sender.send(EmbedderControlResponse::FilePicker(None));
583                return;
584            },
585        };
586
587        let mut failed = false;
588        let files: Vec<_> = paths
589            .into_iter()
590            .filter_map(|path| match self.create_entry(&path, &origin) {
591                Ok(entry) => Some(entry),
592                Err(error) => {
593                    failed = true;
594                    warn!("Failed to create entry for selected file: {error:?}");
595                    None
596                },
597            })
598            .collect();
599
600        // From <https://w3c.github.io/webdriver/#dfn-element-send-keys>:
601        //
602        // > Step 8.5: Verify that each file given by the user exists. If any do not,
603        // > return error with error code invalid argument.
604        //
605        // WebDriver expects that if any of the files isn't found we don't select any files.
606        if failed {
607            for file in files.iter() {
608                self.remove(&file.id);
609            }
610            let _ = response_sender.send(EmbedderControlResponse::FilePicker(Some(Vec::new())));
611            return;
612        }
613
614        let _ = response_sender.send(EmbedderControlResponse::FilePicker(Some(files)));
615    }
616
617    fn create_entry(
618        &self,
619        file_path: &Path,
620        origin: &str,
621    ) -> Result<SelectedFile, FileManagerThreadError> {
622        use net_traits::filemanager_thread::FileManagerThreadError::FileSystemError;
623
624        let file = File::open(file_path).map_err(|e| FileSystemError(e.to_string()))?;
625        let metadata = file
626            .metadata()
627            .map_err(|e| FileSystemError(e.to_string()))?;
628        let modified = metadata
629            .modified()
630            .map_err(|e| FileSystemError(e.to_string()))?;
631        let file_size = metadata.len();
632        let file_name = file_path
633            .file_name()
634            .ok_or(FileSystemError("Invalid filepath".to_string()))?;
635
636        let file_impl = FileImpl::MetaDataOnly(FileMetaData {
637            path: file_path.to_path_buf(),
638            size: file_size,
639        });
640
641        let id = Uuid::new_v4();
642
643        self.insert(
644            id,
645            FileStoreEntry {
646                origin: origin.to_string(),
647                file_impl,
648                refs: AtomicUsize::new(1),
649                // Invalid here since create_entry is called by file selection
650                is_valid_url: AtomicBool::new(false),
651                outstanding_tokens: Default::default(),
652            },
653        );
654
655        let filename_path = Path::new(file_name);
656        let type_string = match mime_guess::from_path(filename_path).first() {
657            Some(x) => format!("{}", x),
658            None => "".to_string(),
659        };
660
661        Ok(SelectedFile {
662            id,
663            filename: filename_path.to_path_buf(),
664            modified,
665            size: file_size,
666            type_string,
667        })
668    }
669
670    fn get_blob_buf(
671        &self,
672        sender: &IpcSender<FileManagerResult<ReadFileProgress>>,
673        id: &Uuid,
674        file_token: &FileTokenCheck,
675        origin_in: &FileOrigin,
676        rel_pos: RelativePos,
677    ) -> Result<(), BlobURLStoreError> {
678        let file_impl = self.get_impl(id, file_token, origin_in)?;
679        match file_impl {
680            FileImpl::Memory(buf) => {
681                let range = rel_pos.to_abs_range(buf.size as usize);
682                let buf = BlobBuf {
683                    filename: None,
684                    type_string: buf.type_string,
685                    size: range.len() as u64,
686                    bytes: buf.bytes.index(range).to_vec(),
687                };
688
689                let _ = sender.send(Ok(ReadFileProgress::Meta(buf)));
690                let _ = sender.send(Ok(ReadFileProgress::EOF));
691
692                Ok(())
693            },
694            FileImpl::MetaDataOnly(metadata) => {
695                /* XXX: Snapshot state check (optional) https://w3c.github.io/FileAPI/#snapshot-state.
696                        Concretely, here we create another file, and this file might not
697                        has the same underlying file state (meta-info plus content) as the time
698                        create_entry is called.
699                */
700
701                let opt_filename = metadata
702                    .path
703                    .file_name()
704                    .and_then(|osstr| osstr.to_str())
705                    .map(|s| s.to_string());
706
707                let mime = mime_guess::from_path(metadata.path.clone()).first();
708                let range = rel_pos.to_abs_range(metadata.size as usize);
709
710                let mut file = File::open(&metadata.path)
711                    .map_err(|e| BlobURLStoreError::External(e.to_string()))?;
712                let seeked_start = file
713                    .seek(SeekFrom::Start(range.start as u64))
714                    .map_err(|e| BlobURLStoreError::External(e.to_string()))?;
715
716                if seeked_start == (range.start as u64) {
717                    let type_string = match mime {
718                        Some(x) => format!("{}", x),
719                        None => "".to_string(),
720                    };
721
722                    read_file_in_chunks(sender, &mut file, range.len(), opt_filename, type_string);
723                    Ok(())
724                } else {
725                    Err(BlobURLStoreError::InvalidEntry)
726                }
727            },
728            FileImpl::Sliced(parent_id, inner_rel_pos) => {
729                // Next time we don't need to check validity since
730                // we have already done that for requesting URL if necessary
731                self.get_blob_buf(
732                    sender,
733                    &parent_id,
734                    file_token,
735                    origin_in,
736                    rel_pos.slice_inner(&inner_rel_pos),
737                )
738            },
739        }
740    }
741
742    // Convenient wrapper over get_blob_buf
743    fn try_read_file(
744        &self,
745        sender: &IpcSender<FileManagerResult<ReadFileProgress>>,
746        id: Uuid,
747        origin_in: FileOrigin,
748    ) -> Result<(), BlobURLStoreError> {
749        self.get_blob_buf(
750            sender,
751            &id,
752            &FileTokenCheck::NotRequired,
753            &origin_in,
754            RelativePos::full_range(),
755        )
756    }
757
758    fn dec_ref(&self, id: &Uuid, origin_in: &FileOrigin) -> Result<(), BlobURLStoreError> {
759        let (do_remove, opt_parent_id) = match self.entries.read().get(id) {
760            Some(entry) => {
761                if *entry.origin == *origin_in {
762                    let old_refs = entry.refs.fetch_sub(1, Ordering::Release);
763
764                    if old_refs > 1 {
765                        // not the last reference, no need to touch parent
766                        (false, None)
767                    } else {
768                        // last reference, and if it has a reference to parent id
769                        // dec_ref on parent later if necessary
770                        let is_valid = entry.is_valid_url.load(Ordering::Acquire);
771
772                        // Check if no fetch has acquired a token for this file.
773                        let no_outstanding_tokens = entry.outstanding_tokens.is_empty();
774
775                        // Can we remove this file?
776                        let do_remove = !is_valid && no_outstanding_tokens;
777
778                        if let FileImpl::Sliced(ref parent_id, _) = entry.file_impl {
779                            (do_remove, Some(*parent_id))
780                        } else {
781                            (do_remove, None)
782                        }
783                    }
784                } else {
785                    return Err(BlobURLStoreError::InvalidOrigin);
786                }
787            },
788            None => return Err(BlobURLStoreError::InvalidFileID),
789        };
790
791        // Trigger removing if its last reference is gone and it is
792        // not a part of a valid Blob URL
793        if do_remove {
794            atomic::fence(Ordering::Acquire);
795            self.remove(id);
796
797            if let Some(parent_id) = opt_parent_id {
798                return self.dec_ref(&parent_id, origin_in);
799            }
800        }
801
802        Ok(())
803    }
804
805    fn promote_memory(&self, id: Uuid, blob_buf: BlobBuf, set_valid: bool, origin: FileOrigin) {
806        // parse to check sanity
807        if Url::parse(&origin).is_err() {
808            return;
809        }
810        self.insert(
811            id,
812            FileStoreEntry {
813                origin,
814                file_impl: FileImpl::Memory(blob_buf),
815                refs: AtomicUsize::new(1),
816                is_valid_url: AtomicBool::new(set_valid),
817                outstanding_tokens: Default::default(),
818            },
819        );
820    }
821
822    fn set_blob_url_validity(
823        &self,
824        validity: bool,
825        id: &Uuid,
826        origin_in: &FileOrigin,
827    ) -> Result<(), BlobURLStoreError> {
828        let (do_remove, opt_parent_id, res) = match self.entries.read().get(id) {
829            Some(entry) => {
830                if *entry.origin == *origin_in {
831                    entry.is_valid_url.store(validity, Ordering::Release);
832
833                    if !validity {
834                        // Check if it is the last possible reference
835                        // since refs only accounts for blob id holders
836                        // and store entry id holders
837                        let zero_refs = entry.refs.load(Ordering::Acquire) == 0;
838
839                        // Check if no fetch has acquired a token for this file.
840                        let no_outstanding_tokens = entry.outstanding_tokens.is_empty();
841
842                        // Can we remove this file?
843                        let do_remove = zero_refs && no_outstanding_tokens;
844
845                        if let FileImpl::Sliced(ref parent_id, _) = entry.file_impl {
846                            (do_remove, Some(*parent_id), Ok(()))
847                        } else {
848                            (do_remove, None, Ok(()))
849                        }
850                    } else {
851                        (false, None, Ok(()))
852                    }
853                } else {
854                    (false, None, Err(BlobURLStoreError::InvalidOrigin))
855                }
856            },
857            None => (false, None, Err(BlobURLStoreError::InvalidFileID)),
858        };
859
860        if do_remove {
861            atomic::fence(Ordering::Acquire);
862            self.remove(id);
863
864            if let Some(parent_id) = opt_parent_id {
865                return self.dec_ref(&parent_id, origin_in);
866            }
867        }
868        res
869    }
870}
871
872fn read_file_in_chunks(
873    sender: &IpcSender<FileManagerResult<ReadFileProgress>>,
874    file: &mut File,
875    size: usize,
876    opt_filename: Option<String>,
877    type_string: String,
878) {
879    // First chunk
880    let mut buf = vec![0; FILE_CHUNK_SIZE];
881    match file.read(&mut buf) {
882        Ok(n) => {
883            buf.truncate(n);
884            let blob_buf = BlobBuf {
885                filename: opt_filename,
886                type_string,
887                size: size as u64,
888                bytes: buf,
889            };
890            let _ = sender.send(Ok(ReadFileProgress::Meta(blob_buf)));
891        },
892        Err(e) => {
893            let _ = sender.send(Err(FileManagerThreadError::FileSystemError(e.to_string())));
894            return;
895        },
896    }
897
898    // Send the remaining chunks
899    loop {
900        let mut buf = vec![0; FILE_CHUNK_SIZE];
901        match file.read(&mut buf) {
902            Ok(0) => {
903                let _ = sender.send(Ok(ReadFileProgress::EOF));
904                return;
905            },
906            Ok(n) => {
907                buf.truncate(n);
908                let _ = sender.send(Ok(ReadFileProgress::Partial(buf)));
909            },
910            Err(e) => {
911                let _ = sender.send(Err(FileManagerThreadError::FileSystemError(e.to_string())));
912                return;
913            },
914        }
915    }
916}
917
918fn set_headers(
919    headers: &mut HeaderMap,
920    content_length: u64,
921    mime: Mime,
922    filename: Option<String>,
923    content_range: Option<ContentRange>,
924) {
925    headers.typed_insert(ContentLength(content_length));
926    if let Some(content_range) = content_range {
927        headers.typed_insert(content_range);
928    }
929    headers.typed_insert(ContentType::from(mime.clone()));
930    let name = match filename {
931        Some(name) => name,
932        None => return,
933    };
934    let charset = mime.get_param(mime::CHARSET);
935    let charset = charset
936        .map(|c| c.as_ref().into())
937        .unwrap_or("us-ascii".to_owned());
938    // TODO(eijebong): Replace this once the typed header is there
939    //                 https://github.com/hyperium/headers/issues/8
940    headers.insert(
941        header::CONTENT_DISPOSITION,
942        HeaderValue::from_bytes(
943            format!(
944                "inline; {}",
945                if charset.to_lowercase() == "utf-8" {
946                    format!(
947                        "filename=\"{}\"",
948                        String::from_utf8(name.as_bytes().into()).unwrap()
949                    )
950                } else {
951                    format!(
952                        "filename*=\"{}\"''{}",
953                        charset,
954                        http_percent_encode(name.as_bytes())
955                    )
956                }
957            )
958            .as_bytes(),
959        )
960        .unwrap(),
961    );
962}