net/
filemanager_thread.rs

1/* This Source Code Form is subject to the terms of the Mozilla Public
2 * License, v. 2.0. If a copy of the MPL was not distributed with this
3 * file, You can obtain one at https://mozilla.org/MPL/2.0/. */
4
5use std::collections::{HashMap, HashSet};
6use std::fs::File;
7use std::io::{BufRead, BufReader, Read, Seek, SeekFrom};
8use std::ops::Index;
9use std::path::{Path, PathBuf};
10use std::sync::atomic::{self, AtomicBool, AtomicUsize, Ordering};
11use std::sync::{Arc, Mutex, RwLock, Weak};
12
13use base::generic_channel;
14use base::id::WebViewId;
15use embedder_traits::{EmbedderMsg, EmbedderProxy, FilterPattern};
16use headers::{ContentLength, ContentRange, ContentType, HeaderMap, HeaderMapExt, Range};
17use http::header::{self, HeaderValue};
18use ipc_channel::ipc::IpcSender;
19use log::warn;
20use mime::{self, Mime};
21use net_traits::blob_url_store::{BlobBuf, BlobURLStoreError};
22use net_traits::filemanager_thread::{
23    FileManagerResult, FileManagerThreadError, FileManagerThreadMsg, FileOrigin, FileTokenCheck,
24    ReadFileProgress, RelativePos, SelectedFile,
25};
26use net_traits::http_percent_encode;
27use net_traits::response::{Response, ResponseBody};
28use servo_arc::Arc as ServoArc;
29use servo_config::pref;
30use tokio::sync::mpsc::UnboundedSender as TokioSender;
31use url::Url;
32use uuid::Uuid;
33
34use crate::fetch::methods::{CancellationListener, Data, RangeRequestBounds};
35use crate::protocols::get_range_request_bounds;
36use crate::resource_thread::CoreResourceThreadPool;
37
38pub const FILE_CHUNK_SIZE: usize = 32768; // 32 KB
39
40/// FileManagerStore's entry
41struct FileStoreEntry {
42    /// Origin of the entry's "creator"
43    origin: FileOrigin,
44    /// Backend implementation
45    file_impl: FileImpl,
46    /// Number of FileID holders that the ID is used to
47    /// index this entry in `FileManagerStore`.
48    /// Reference holders include a FileStoreEntry or
49    /// a script-side File-based Blob
50    refs: AtomicUsize,
51    /// UUIDs only become valid blob URIs when explicitly requested
52    /// by the user with createObjectURL. Validity can be revoked as well.
53    /// (The UUID is the one that maps to this entry in `FileManagerStore`)
54    is_valid_url: AtomicBool,
55    /// UUIDs of fetch instances that acquired an interest in this file,
56    /// when the url was still valid.
57    outstanding_tokens: HashSet<Uuid>,
58}
59
60#[derive(Clone)]
61struct FileMetaData {
62    path: PathBuf,
63    size: u64,
64}
65
66/// File backend implementation
67#[derive(Clone)]
68enum FileImpl {
69    /// Metadata of on-disk file
70    MetaDataOnly(FileMetaData),
71    /// In-memory Blob buffer object
72    Memory(BlobBuf),
73    /// A reference to parent entry in `FileManagerStore`,
74    /// representing a sliced version of the parent entry data
75    Sliced(Uuid, RelativePos),
76}
77
78#[derive(Clone)]
79pub struct FileManager {
80    embedder_proxy: EmbedderProxy,
81    store: Arc<FileManagerStore>,
82    thread_pool: Weak<CoreResourceThreadPool>,
83}
84
85impl FileManager {
86    pub fn new(
87        embedder_proxy: EmbedderProxy,
88        pool_handle: Weak<CoreResourceThreadPool>,
89    ) -> FileManager {
90        FileManager {
91            embedder_proxy,
92            store: Arc::new(FileManagerStore::new()),
93            thread_pool: pool_handle,
94        }
95    }
96
97    pub fn read_file(
98        &self,
99        sender: IpcSender<FileManagerResult<ReadFileProgress>>,
100        id: Uuid,
101        origin: FileOrigin,
102    ) {
103        let store = self.store.clone();
104        self.thread_pool
105            .upgrade()
106            .map(|pool| {
107                pool.spawn(move || {
108                    if let Err(e) = store.try_read_file(&sender, id, origin) {
109                        let _ = sender.send(Err(FileManagerThreadError::BlobURLStoreError(e)));
110                    }
111                });
112            })
113            .unwrap_or_else(|| {
114                warn!("FileManager tried to read a file after CoreResourceManager has exited.");
115            });
116    }
117
118    pub fn get_token_for_file(&self, file_id: &Uuid) -> FileTokenCheck {
119        self.store.get_token_for_file(file_id)
120    }
121
122    pub fn invalidate_token(&self, token: &FileTokenCheck, file_id: &Uuid) {
123        self.store.invalidate_token(token, file_id);
124    }
125
126    /// Read a file for the Fetch implementation.
127    /// It gets the required headers synchronously and reads the actual content
128    /// in a separate thread.
129    #[allow(clippy::too_many_arguments)]
130    pub fn fetch_file(
131        &self,
132        done_sender: &mut TokioSender<Data>,
133        cancellation_listener: Arc<CancellationListener>,
134        id: Uuid,
135        file_token: &FileTokenCheck,
136        origin: FileOrigin,
137        response: &mut Response,
138        range: Option<Range>,
139    ) -> Result<(), BlobURLStoreError> {
140        self.fetch_blob_buf(
141            done_sender,
142            cancellation_listener,
143            &id,
144            file_token,
145            &origin,
146            BlobBounds::Unresolved(range),
147            response,
148        )
149    }
150
151    pub fn promote_memory(&self, id: Uuid, blob_buf: BlobBuf, set_valid: bool, origin: FileOrigin) {
152        self.store.promote_memory(id, blob_buf, set_valid, origin);
153    }
154
155    /// Message handler
156    pub fn handle(&self, msg: FileManagerThreadMsg) {
157        match msg {
158            FileManagerThreadMsg::SelectFile(webview_id, filter, sender, origin, opt_test_path) => {
159                let store = self.store.clone();
160                let embedder = self.embedder_proxy.clone();
161                self.thread_pool
162                    .upgrade()
163                    .map(|pool| {
164                        pool.spawn(move || {
165                            store.select_file(webview_id, filter, sender, origin, opt_test_path, embedder);
166                        });
167                    })
168                    .unwrap_or_else(|| {
169                        warn!(
170                            "FileManager tried to select a file after CoreResourceManager has exited."
171                        );
172                    });
173            },
174            FileManagerThreadMsg::SelectFiles(
175                webview_id,
176                filter,
177                sender,
178                origin,
179                opt_test_paths,
180            ) => {
181                let store = self.store.clone();
182                let embedder = self.embedder_proxy.clone();
183                self.thread_pool
184                    .upgrade()
185                    .map(|pool| {
186                        pool.spawn(move || {
187                            store.select_files(webview_id, filter, sender, origin, opt_test_paths, embedder);
188                        });
189                    })
190                    .unwrap_or_else(|| {
191                        warn!(
192                            "FileManager tried to select multiple files after CoreResourceManager has exited."
193                        );
194                    });
195            },
196            FileManagerThreadMsg::ReadFile(sender, id, origin) => {
197                self.read_file(sender, id, origin);
198            },
199            FileManagerThreadMsg::PromoteMemory(id, blob_buf, set_valid, origin) => {
200                self.promote_memory(id, blob_buf, set_valid, origin);
201            },
202            FileManagerThreadMsg::AddSlicedURLEntry(id, rel_pos, sender, origin) => {
203                self.store.add_sliced_url_entry(id, rel_pos, sender, origin);
204            },
205            FileManagerThreadMsg::DecRef(id, origin, sender) => {
206                let _ = sender.send(self.store.dec_ref(&id, &origin));
207            },
208            FileManagerThreadMsg::RevokeBlobURL(id, origin, sender) => {
209                let _ = sender.send(self.store.set_blob_url_validity(false, &id, &origin));
210            },
211            FileManagerThreadMsg::ActivateBlobURL(id, sender, origin) => {
212                let _ = sender.send(self.store.set_blob_url_validity(true, &id, &origin));
213            },
214        }
215    }
216
217    pub fn fetch_file_in_chunks(
218        &self,
219        done_sender: &mut TokioSender<Data>,
220        mut reader: BufReader<File>,
221        res_body: ServoArc<Mutex<ResponseBody>>,
222        cancellation_listener: Arc<CancellationListener>,
223        range: RelativePos,
224    ) {
225        let done_sender = done_sender.clone();
226        self.thread_pool
227            .upgrade()
228            .map(|pool| {
229                pool.spawn(move || {
230                    loop {
231                        if cancellation_listener.cancelled() {
232                            *res_body.lock().unwrap() = ResponseBody::Done(vec![]);
233                            let _ = done_sender.send(Data::Cancelled);
234                            return;
235                        }
236                        let length = {
237                            let buffer = reader.fill_buf().unwrap().to_vec();
238                            let mut buffer_len = buffer.len();
239                            if let ResponseBody::Receiving(ref mut body) = *res_body.lock().unwrap()
240                            {
241                                let offset = usize::min(
242                                    {
243                                        if let Some(end) = range.end {
244                                            // HTTP Range requests are specified with closed ranges,
245                                            // while Rust uses half-open ranges. We add +1 here so
246                                            // we don't skip the last requested byte.
247                                            let remaining_bytes =
248                                                end as usize - range.start as usize - body.len() +
249                                                    1;
250                                            if remaining_bytes <= FILE_CHUNK_SIZE {
251                                                // This is the last chunk so we set buffer
252                                                // len to 0 to break the reading loop.
253                                                buffer_len = 0;
254                                                remaining_bytes
255                                            } else {
256                                                FILE_CHUNK_SIZE
257                                            }
258                                        } else {
259                                            FILE_CHUNK_SIZE
260                                        }
261                                    },
262                                    buffer.len(),
263                                );
264                                let chunk = &buffer[0..offset];
265                                body.extend_from_slice(chunk);
266                                let _ = done_sender.send(Data::Payload(chunk.to_vec()));
267                            }
268                            buffer_len
269                        };
270                        if length == 0 {
271                            let mut body = res_body.lock().unwrap();
272                            let completed_body = match *body {
273                                ResponseBody::Receiving(ref mut body) => std::mem::take(body),
274                                _ => vec![],
275                            };
276                            *body = ResponseBody::Done(completed_body);
277                            let _ = done_sender.send(Data::Done);
278                            break;
279                        }
280                        reader.consume(length);
281                    }
282                });
283            })
284            .unwrap_or_else(|| {
285                warn!("FileManager tried to fetch a file in chunks after CoreResourceManager has exited.");
286            });
287    }
288
289    #[allow(clippy::too_many_arguments)]
290    fn fetch_blob_buf(
291        &self,
292        done_sender: &mut TokioSender<Data>,
293        cancellation_listener: Arc<CancellationListener>,
294        id: &Uuid,
295        file_token: &FileTokenCheck,
296        origin_in: &FileOrigin,
297        bounds: BlobBounds,
298        response: &mut Response,
299    ) -> Result<(), BlobURLStoreError> {
300        let file_impl = self.store.get_impl(id, file_token, origin_in)?;
301        /*
302           Only Fetch Blob Range Request would have unresolved range, and only in that case we care about range header.
303        */
304        let mut is_range_requested = false;
305        match file_impl {
306            FileImpl::Memory(buf) => {
307                let bounds = match bounds {
308                    BlobBounds::Unresolved(range) => {
309                        if range.is_some() {
310                            is_range_requested = true;
311                        }
312                        get_range_request_bounds(range, buf.size)
313                    },
314                    BlobBounds::Resolved(bounds) => bounds,
315                };
316                let range = bounds
317                    .get_final(Some(buf.size))
318                    .map_err(|_| BlobURLStoreError::InvalidRange)?;
319
320                let range = range.to_abs_blob_range(buf.size as usize);
321                let len = range.len() as u64;
322                let content_range = if is_range_requested {
323                    ContentRange::bytes(range.start as u64..range.end as u64, buf.size).ok()
324                } else {
325                    None
326                };
327
328                set_headers(
329                    &mut response.headers,
330                    len,
331                    buf.type_string.parse().unwrap_or(mime::TEXT_PLAIN),
332                    /* filename */ None,
333                    content_range,
334                );
335
336                let mut bytes = vec![];
337                bytes.extend_from_slice(buf.bytes.index(range));
338
339                let _ = done_sender.send(Data::Payload(bytes));
340                let _ = done_sender.send(Data::Done);
341
342                Ok(())
343            },
344            FileImpl::MetaDataOnly(metadata) => {
345                /* XXX: Snapshot state check (optional) https://w3c.github.io/FileAPI/#snapshot-state.
346                        Concretely, here we create another file, and this file might not
347                        has the same underlying file state (meta-info plus content) as the time
348                        create_entry is called.
349                */
350
351                let file = File::open(&metadata.path)
352                    .map_err(|e| BlobURLStoreError::External(e.to_string()))?;
353                let mut is_range_requested = false;
354                let bounds = match bounds {
355                    BlobBounds::Unresolved(range) => {
356                        if range.is_some() {
357                            is_range_requested = true;
358                        }
359                        get_range_request_bounds(range, metadata.size)
360                    },
361                    BlobBounds::Resolved(bounds) => bounds,
362                };
363                let range = bounds
364                    .get_final(Some(metadata.size))
365                    .map_err(|_| BlobURLStoreError::InvalidRange)?;
366
367                let mut reader = BufReader::with_capacity(FILE_CHUNK_SIZE, file);
368                if reader.seek(SeekFrom::Start(range.start as u64)).is_err() {
369                    return Err(BlobURLStoreError::External(
370                        "Unexpected method for blob".into(),
371                    ));
372                }
373
374                let filename = metadata
375                    .path
376                    .file_name()
377                    .and_then(|osstr| osstr.to_str())
378                    .map(|s| s.to_string());
379
380                let content_range = if is_range_requested {
381                    let abs_range = range.to_abs_blob_range(metadata.size as usize);
382                    ContentRange::bytes(abs_range.start as u64..abs_range.end as u64, metadata.size)
383                        .ok()
384                } else {
385                    None
386                };
387                set_headers(
388                    &mut response.headers,
389                    metadata.size,
390                    mime_guess::from_path(metadata.path)
391                        .first()
392                        .unwrap_or(mime::TEXT_PLAIN),
393                    filename,
394                    content_range,
395                );
396
397                self.fetch_file_in_chunks(
398                    &mut done_sender.clone(),
399                    reader,
400                    response.body.clone(),
401                    cancellation_listener,
402                    range,
403                );
404
405                Ok(())
406            },
407            FileImpl::Sliced(parent_id, inner_rel_pos) => {
408                // Next time we don't need to check validity since
409                // we have already done that for requesting URL if necessary.
410                let bounds = RangeRequestBounds::Final(
411                    RelativePos::full_range().slice_inner(&inner_rel_pos),
412                );
413                self.fetch_blob_buf(
414                    done_sender,
415                    cancellation_listener,
416                    &parent_id,
417                    file_token,
418                    origin_in,
419                    BlobBounds::Resolved(bounds),
420                    response,
421                )
422            },
423        }
424    }
425}
426
427enum BlobBounds {
428    Unresolved(Option<Range>),
429    Resolved(RangeRequestBounds),
430}
431
432/// File manager's data store. It maintains a thread-safe mapping
433/// from FileID to FileStoreEntry which might have different backend implementation.
434/// Access to the content is encapsulated as methods of this struct.
435struct FileManagerStore {
436    entries: RwLock<HashMap<Uuid, FileStoreEntry>>,
437}
438
439impl FileManagerStore {
440    fn new() -> Self {
441        FileManagerStore {
442            entries: RwLock::new(HashMap::new()),
443        }
444    }
445
446    /// Copy out the file backend implementation content
447    pub fn get_impl(
448        &self,
449        id: &Uuid,
450        file_token: &FileTokenCheck,
451        origin_in: &FileOrigin,
452    ) -> Result<FileImpl, BlobURLStoreError> {
453        match self.entries.read().unwrap().get(id) {
454            Some(entry) => {
455                if *origin_in != *entry.origin {
456                    Err(BlobURLStoreError::InvalidOrigin)
457                } else {
458                    match file_token {
459                        FileTokenCheck::NotRequired => Ok(entry.file_impl.clone()),
460                        FileTokenCheck::Required(token) => {
461                            if entry.outstanding_tokens.contains(token) {
462                                return Ok(entry.file_impl.clone());
463                            }
464                            Err(BlobURLStoreError::InvalidFileID)
465                        },
466                        FileTokenCheck::ShouldFail => Err(BlobURLStoreError::InvalidFileID),
467                    }
468                }
469            },
470            None => Err(BlobURLStoreError::InvalidFileID),
471        }
472    }
473
474    pub fn invalidate_token(&self, token: &FileTokenCheck, file_id: &Uuid) {
475        if let FileTokenCheck::Required(token) = token {
476            let mut entries = self.entries.write().unwrap();
477            if let Some(entry) = entries.get_mut(file_id) {
478                entry.outstanding_tokens.remove(token);
479
480                // Check if there are references left.
481                let zero_refs = entry.refs.load(Ordering::Acquire) == 0;
482
483                // Check if no other fetch has acquired a token for this file.
484                let no_outstanding_tokens = entry.outstanding_tokens.is_empty();
485
486                // Check if there is still a blob URL outstanding.
487                let valid = entry.is_valid_url.load(Ordering::Acquire);
488
489                // Can we remove this file?
490                let do_remove = zero_refs && no_outstanding_tokens && !valid;
491
492                if do_remove {
493                    entries.remove(file_id);
494                }
495            }
496        }
497    }
498
499    pub fn get_token_for_file(&self, file_id: &Uuid) -> FileTokenCheck {
500        let mut entries = self.entries.write().unwrap();
501        let parent_id = match entries.get(file_id) {
502            Some(entry) => {
503                if let FileImpl::Sliced(ref parent_id, _) = entry.file_impl {
504                    Some(*parent_id)
505                } else {
506                    None
507                }
508            },
509            None => return FileTokenCheck::ShouldFail,
510        };
511        let file_id = match parent_id.as_ref() {
512            Some(id) => id,
513            None => file_id,
514        };
515        if let Some(entry) = entries.get_mut(file_id) {
516            if !entry.is_valid_url.load(Ordering::Acquire) {
517                return FileTokenCheck::ShouldFail;
518            }
519            let token = Uuid::new_v4();
520            entry.outstanding_tokens.insert(token);
521            return FileTokenCheck::Required(token);
522        }
523        FileTokenCheck::ShouldFail
524    }
525
526    fn insert(&self, id: Uuid, entry: FileStoreEntry) {
527        self.entries.write().unwrap().insert(id, entry);
528    }
529
530    fn remove(&self, id: &Uuid) {
531        self.entries.write().unwrap().remove(id);
532    }
533
534    fn inc_ref(&self, id: &Uuid, origin_in: &FileOrigin) -> Result<(), BlobURLStoreError> {
535        match self.entries.read().unwrap().get(id) {
536            Some(entry) => {
537                if entry.origin == *origin_in {
538                    entry.refs.fetch_add(1, Ordering::Relaxed);
539                    Ok(())
540                } else {
541                    Err(BlobURLStoreError::InvalidOrigin)
542                }
543            },
544            None => Err(BlobURLStoreError::InvalidFileID),
545        }
546    }
547
548    fn add_sliced_url_entry(
549        &self,
550        parent_id: Uuid,
551        rel_pos: RelativePos,
552        sender: IpcSender<Result<Uuid, BlobURLStoreError>>,
553        origin_in: FileOrigin,
554    ) {
555        match self.inc_ref(&parent_id, &origin_in) {
556            Ok(_) => {
557                let new_id = Uuid::new_v4();
558                self.insert(
559                    new_id,
560                    FileStoreEntry {
561                        origin: origin_in,
562                        file_impl: FileImpl::Sliced(parent_id, rel_pos),
563                        refs: AtomicUsize::new(1),
564                        // Valid here since AddSlicedURLEntry implies URL creation
565                        // from a BlobImpl::Sliced
566                        is_valid_url: AtomicBool::new(true),
567                        outstanding_tokens: Default::default(),
568                    },
569                );
570
571                // We assume that the returned id will be held by BlobImpl::File
572                let _ = sender.send(Ok(new_id));
573            },
574            Err(e) => {
575                let _ = sender.send(Err(e));
576            },
577        }
578    }
579
580    fn query_files_from_embedder(
581        &self,
582        webview_id: WebViewId,
583        patterns: Vec<FilterPattern>,
584        multiple_files: bool,
585        embedder_proxy: EmbedderProxy,
586    ) -> Option<Vec<PathBuf>> {
587        let (ipc_sender, ipc_receiver) =
588            generic_channel::channel().expect("Failed to create IPC channel!");
589        embedder_proxy.send(EmbedderMsg::SelectFiles(
590            webview_id,
591            patterns,
592            multiple_files,
593            ipc_sender,
594        ));
595        match ipc_receiver.recv() {
596            Ok(result) => result,
597            Err(e) => {
598                warn!("Failed to receive files from embedder ({:?}).", e);
599                None
600            },
601        }
602    }
603
604    fn select_file(
605        &self,
606        webview_id: WebViewId,
607        patterns: Vec<FilterPattern>,
608        sender: IpcSender<FileManagerResult<SelectedFile>>,
609        origin: FileOrigin,
610        opt_test_path: Option<PathBuf>,
611        embedder_proxy: EmbedderProxy,
612    ) {
613        // Check if the select_files preference is enabled
614        // to ensure process-level security against compromised script;
615        // Then try applying opt_test_path directly for testing convenience
616        let opt_s = if pref!(dom_testing_html_input_element_select_files_enabled) {
617            opt_test_path
618        } else {
619            self.query_files_from_embedder(webview_id, patterns, false, embedder_proxy)
620                .and_then(|mut x| x.pop())
621        };
622
623        match opt_s {
624            Some(s) => {
625                let selected_path = Path::new(&s);
626                let result = self.create_entry(selected_path, &origin);
627                let _ = sender.send(result);
628            },
629            None => {
630                let _ = sender.send(Err(FileManagerThreadError::UserCancelled));
631            },
632        }
633    }
634
635    fn select_files(
636        &self,
637        webview_id: WebViewId,
638        patterns: Vec<FilterPattern>,
639        sender: IpcSender<FileManagerResult<Vec<SelectedFile>>>,
640        origin: FileOrigin,
641        opt_test_paths: Option<Vec<PathBuf>>,
642        embedder_proxy: EmbedderProxy,
643    ) {
644        // Check if the select_files preference is enabled
645        // to ensure process-level security against compromised script;
646        // Then try applying opt_test_paths directly for testing convenience
647        let opt_v = if pref!(dom_testing_html_input_element_select_files_enabled) {
648            opt_test_paths
649        } else {
650            self.query_files_from_embedder(webview_id, patterns, true, embedder_proxy)
651        };
652
653        match opt_v {
654            Some(v) => {
655                let mut selected_paths = vec![];
656
657                for s in &v {
658                    selected_paths.push(Path::new(s));
659                }
660
661                let mut replies = vec![];
662
663                for path in selected_paths {
664                    match self.create_entry(path, &origin) {
665                        Ok(triple) => replies.push(triple),
666                        Err(e) => {
667                            let _ = sender.send(Err(e));
668                            return;
669                        },
670                    };
671                }
672
673                let _ = sender.send(Ok(replies));
674            },
675            None => {
676                let _ = sender.send(Err(FileManagerThreadError::UserCancelled));
677            },
678        }
679    }
680
681    fn create_entry(
682        &self,
683        file_path: &Path,
684        origin: &str,
685    ) -> Result<SelectedFile, FileManagerThreadError> {
686        use net_traits::filemanager_thread::FileManagerThreadError::FileSystemError;
687
688        let file = File::open(file_path).map_err(|e| FileSystemError(e.to_string()))?;
689        let metadata = file
690            .metadata()
691            .map_err(|e| FileSystemError(e.to_string()))?;
692        let modified = metadata
693            .modified()
694            .map_err(|e| FileSystemError(e.to_string()))?;
695        let file_size = metadata.len();
696        let file_name = file_path
697            .file_name()
698            .ok_or(FileSystemError("Invalid filepath".to_string()))?;
699
700        let file_impl = FileImpl::MetaDataOnly(FileMetaData {
701            path: file_path.to_path_buf(),
702            size: file_size,
703        });
704
705        let id = Uuid::new_v4();
706
707        self.insert(
708            id,
709            FileStoreEntry {
710                origin: origin.to_string(),
711                file_impl,
712                refs: AtomicUsize::new(1),
713                // Invalid here since create_entry is called by file selection
714                is_valid_url: AtomicBool::new(false),
715                outstanding_tokens: Default::default(),
716            },
717        );
718
719        let filename_path = Path::new(file_name);
720        let type_string = match mime_guess::from_path(filename_path).first() {
721            Some(x) => format!("{}", x),
722            None => "".to_string(),
723        };
724
725        Ok(SelectedFile {
726            id,
727            filename: filename_path.to_path_buf(),
728            modified,
729            size: file_size,
730            type_string,
731        })
732    }
733
734    fn get_blob_buf(
735        &self,
736        sender: &IpcSender<FileManagerResult<ReadFileProgress>>,
737        id: &Uuid,
738        file_token: &FileTokenCheck,
739        origin_in: &FileOrigin,
740        rel_pos: RelativePos,
741    ) -> Result<(), BlobURLStoreError> {
742        let file_impl = self.get_impl(id, file_token, origin_in)?;
743        match file_impl {
744            FileImpl::Memory(buf) => {
745                let range = rel_pos.to_abs_range(buf.size as usize);
746                let buf = BlobBuf {
747                    filename: None,
748                    type_string: buf.type_string,
749                    size: range.len() as u64,
750                    bytes: buf.bytes.index(range).to_vec(),
751                };
752
753                let _ = sender.send(Ok(ReadFileProgress::Meta(buf)));
754                let _ = sender.send(Ok(ReadFileProgress::EOF));
755
756                Ok(())
757            },
758            FileImpl::MetaDataOnly(metadata) => {
759                /* XXX: Snapshot state check (optional) https://w3c.github.io/FileAPI/#snapshot-state.
760                        Concretely, here we create another file, and this file might not
761                        has the same underlying file state (meta-info plus content) as the time
762                        create_entry is called.
763                */
764
765                let opt_filename = metadata
766                    .path
767                    .file_name()
768                    .and_then(|osstr| osstr.to_str())
769                    .map(|s| s.to_string());
770
771                let mime = mime_guess::from_path(metadata.path.clone()).first();
772                let range = rel_pos.to_abs_range(metadata.size as usize);
773
774                let mut file = File::open(&metadata.path)
775                    .map_err(|e| BlobURLStoreError::External(e.to_string()))?;
776                let seeked_start = file
777                    .seek(SeekFrom::Start(range.start as u64))
778                    .map_err(|e| BlobURLStoreError::External(e.to_string()))?;
779
780                if seeked_start == (range.start as u64) {
781                    let type_string = match mime {
782                        Some(x) => format!("{}", x),
783                        None => "".to_string(),
784                    };
785
786                    read_file_in_chunks(sender, &mut file, range.len(), opt_filename, type_string);
787                    Ok(())
788                } else {
789                    Err(BlobURLStoreError::InvalidEntry)
790                }
791            },
792            FileImpl::Sliced(parent_id, inner_rel_pos) => {
793                // Next time we don't need to check validity since
794                // we have already done that for requesting URL if necessary
795                self.get_blob_buf(
796                    sender,
797                    &parent_id,
798                    file_token,
799                    origin_in,
800                    rel_pos.slice_inner(&inner_rel_pos),
801                )
802            },
803        }
804    }
805
806    // Convenient wrapper over get_blob_buf
807    fn try_read_file(
808        &self,
809        sender: &IpcSender<FileManagerResult<ReadFileProgress>>,
810        id: Uuid,
811        origin_in: FileOrigin,
812    ) -> Result<(), BlobURLStoreError> {
813        self.get_blob_buf(
814            sender,
815            &id,
816            &FileTokenCheck::NotRequired,
817            &origin_in,
818            RelativePos::full_range(),
819        )
820    }
821
822    fn dec_ref(&self, id: &Uuid, origin_in: &FileOrigin) -> Result<(), BlobURLStoreError> {
823        let (do_remove, opt_parent_id) = match self.entries.read().unwrap().get(id) {
824            Some(entry) => {
825                if *entry.origin == *origin_in {
826                    let old_refs = entry.refs.fetch_sub(1, Ordering::Release);
827
828                    if old_refs > 1 {
829                        // not the last reference, no need to touch parent
830                        (false, None)
831                    } else {
832                        // last reference, and if it has a reference to parent id
833                        // dec_ref on parent later if necessary
834                        let is_valid = entry.is_valid_url.load(Ordering::Acquire);
835
836                        // Check if no fetch has acquired a token for this file.
837                        let no_outstanding_tokens = entry.outstanding_tokens.is_empty();
838
839                        // Can we remove this file?
840                        let do_remove = !is_valid && no_outstanding_tokens;
841
842                        if let FileImpl::Sliced(ref parent_id, _) = entry.file_impl {
843                            (do_remove, Some(*parent_id))
844                        } else {
845                            (do_remove, None)
846                        }
847                    }
848                } else {
849                    return Err(BlobURLStoreError::InvalidOrigin);
850                }
851            },
852            None => return Err(BlobURLStoreError::InvalidFileID),
853        };
854
855        // Trigger removing if its last reference is gone and it is
856        // not a part of a valid Blob URL
857        if do_remove {
858            atomic::fence(Ordering::Acquire);
859            self.remove(id);
860
861            if let Some(parent_id) = opt_parent_id {
862                return self.dec_ref(&parent_id, origin_in);
863            }
864        }
865
866        Ok(())
867    }
868
869    fn promote_memory(&self, id: Uuid, blob_buf: BlobBuf, set_valid: bool, origin: FileOrigin) {
870        // parse to check sanity
871        if Url::parse(&origin).is_err() {
872            return;
873        }
874        self.insert(
875            id,
876            FileStoreEntry {
877                origin,
878                file_impl: FileImpl::Memory(blob_buf),
879                refs: AtomicUsize::new(1),
880                is_valid_url: AtomicBool::new(set_valid),
881                outstanding_tokens: Default::default(),
882            },
883        );
884    }
885
886    fn set_blob_url_validity(
887        &self,
888        validity: bool,
889        id: &Uuid,
890        origin_in: &FileOrigin,
891    ) -> Result<(), BlobURLStoreError> {
892        let (do_remove, opt_parent_id, res) = match self.entries.read().unwrap().get(id) {
893            Some(entry) => {
894                if *entry.origin == *origin_in {
895                    entry.is_valid_url.store(validity, Ordering::Release);
896
897                    if !validity {
898                        // Check if it is the last possible reference
899                        // since refs only accounts for blob id holders
900                        // and store entry id holders
901                        let zero_refs = entry.refs.load(Ordering::Acquire) == 0;
902
903                        // Check if no fetch has acquired a token for this file.
904                        let no_outstanding_tokens = entry.outstanding_tokens.is_empty();
905
906                        // Can we remove this file?
907                        let do_remove = zero_refs && no_outstanding_tokens;
908
909                        if let FileImpl::Sliced(ref parent_id, _) = entry.file_impl {
910                            (do_remove, Some(*parent_id), Ok(()))
911                        } else {
912                            (do_remove, None, Ok(()))
913                        }
914                    } else {
915                        (false, None, Ok(()))
916                    }
917                } else {
918                    (false, None, Err(BlobURLStoreError::InvalidOrigin))
919                }
920            },
921            None => (false, None, Err(BlobURLStoreError::InvalidFileID)),
922        };
923
924        if do_remove {
925            atomic::fence(Ordering::Acquire);
926            self.remove(id);
927
928            if let Some(parent_id) = opt_parent_id {
929                return self.dec_ref(&parent_id, origin_in);
930            }
931        }
932        res
933    }
934}
935
936fn read_file_in_chunks(
937    sender: &IpcSender<FileManagerResult<ReadFileProgress>>,
938    file: &mut File,
939    size: usize,
940    opt_filename: Option<String>,
941    type_string: String,
942) {
943    // First chunk
944    let mut buf = vec![0; FILE_CHUNK_SIZE];
945    match file.read(&mut buf) {
946        Ok(n) => {
947            buf.truncate(n);
948            let blob_buf = BlobBuf {
949                filename: opt_filename,
950                type_string,
951                size: size as u64,
952                bytes: buf,
953            };
954            let _ = sender.send(Ok(ReadFileProgress::Meta(blob_buf)));
955        },
956        Err(e) => {
957            let _ = sender.send(Err(FileManagerThreadError::FileSystemError(e.to_string())));
958            return;
959        },
960    }
961
962    // Send the remaining chunks
963    loop {
964        let mut buf = vec![0; FILE_CHUNK_SIZE];
965        match file.read(&mut buf) {
966            Ok(0) => {
967                let _ = sender.send(Ok(ReadFileProgress::EOF));
968                return;
969            },
970            Ok(n) => {
971                buf.truncate(n);
972                let _ = sender.send(Ok(ReadFileProgress::Partial(buf)));
973            },
974            Err(e) => {
975                let _ = sender.send(Err(FileManagerThreadError::FileSystemError(e.to_string())));
976                return;
977            },
978        }
979    }
980}
981
982fn set_headers(
983    headers: &mut HeaderMap,
984    content_length: u64,
985    mime: Mime,
986    filename: Option<String>,
987    content_range: Option<ContentRange>,
988) {
989    headers.typed_insert(ContentLength(content_length));
990    if let Some(content_range) = content_range {
991        headers.typed_insert(content_range);
992    }
993    headers.typed_insert(ContentType::from(mime.clone()));
994    let name = match filename {
995        Some(name) => name,
996        None => return,
997    };
998    let charset = mime.get_param(mime::CHARSET);
999    let charset = charset
1000        .map(|c| c.as_ref().into())
1001        .unwrap_or("us-ascii".to_owned());
1002    // TODO(eijebong): Replace this once the typed header is there
1003    //                 https://github.com/hyperium/headers/issues/8
1004    headers.insert(
1005        header::CONTENT_DISPOSITION,
1006        HeaderValue::from_bytes(
1007            format!(
1008                "inline; {}",
1009                if charset.to_lowercase() == "utf-8" {
1010                    format!(
1011                        "filename=\"{}\"",
1012                        String::from_utf8(name.as_bytes().into()).unwrap()
1013                    )
1014                } else {
1015                    format!(
1016                        "filename*=\"{}\"''{}",
1017                        charset,
1018                        http_percent_encode(name.as_bytes())
1019                    )
1020                }
1021            )
1022            .as_bytes(),
1023        )
1024        .unwrap(),
1025    );
1026}