/// Store is responsible for manageing the in-memory hashmap of questions by providing initialization read/write functions, /// and file I/O operations to persist these questions /// TODO - Results returning errors should use specified types, not strings use self::{ answer::{Answer, AnswerDTO}, question::{Question, QuestionDTO}, }; use crate::*; const QUESTIONS_DB_PATH: &str = "./questions.json"; const ANSWERS_DB_PATH: &str = "./answers.json"; #[derive(Debug)] pub struct Store { answers_file: File, questions_file: File, answers: HashMap, questions: HashMap, } impl Store { // Upon initialization, we need to read a questions.json ans anwers.json if they exist and populate our hashmaps from them. // Otherwise we create both files. // JSON formatting and I/O errors possible here are semi-handled with a message, but ultimetly we will panic in those cases // TODO - make this less copy/paste like pub fn new() -> Self { let questions_file: File = File::create_new(QUESTIONS_DB_PATH) .or_else(|e| { if e.kind() == ErrorKind::AlreadyExists { File::options() .read(true) .write(true) .open(QUESTIONS_DB_PATH) } else { Err(e) } }) .unwrap(); let questions_json = std::io::read_to_string(&questions_file) .expect("could not get json from questions file"); // perhaps there is a more efficient/clever way aside from reading the json to a vector and mapping the vector to a hashmap. let questions_vec: Vec = serde_json::from_str(&questions_json).expect("can't read questions.json"); let questions: HashMap = questions_vec .into_iter() .map(|question_dto: QuestionDTO| question_dto.to_entity()) .collect(); let answers_file: File = File::create_new(ANSWERS_DB_PATH) .or_else(|e| { if e.kind() == ErrorKind::AlreadyExists { File::options().read(true).write(true).open(ANSWERS_DB_PATH) } else { Err(e) } }) .unwrap(); let answers_json = std::io::read_to_string(&answers_file).expect("could not get json from answers file"); let answers_vec: Vec = serde_json::from_str(&answers_json).expect("can't read answers.json"); let answers: HashMap = answers_vec .into_iter() .map(|answer_dto: AnswerDTO| answer_dto.to_entity()) .collect(); Store { questions, answers, answers_file, questions_file, } } // Take the content of the questions hashmap, convert it to a vector of question DTOs and overwrite the file with these contents // Not the most efficient approach if we are just adding or deleting a single question, but it does the job at our current scale // TODO - pretty print before writing fn write_questions_file(&mut self) { let questions: &HashMap = &self.questions; let questions_vec: Vec = questions .iter() .map(|q: (&u8, &Question)| q.1.to_dto(*q.0)) .collect(); let json: String = serde_json::to_string(&questions_vec).unwrap(); let mut f: &File = &self.questions_file; match f .rewind() .and(f.write_all(json.as_bytes())) .and(f.sync_all()) .and(f.set_len(f.stream_position().unwrap())) { Ok(()) => (), _ => panic!("Could not write file"), } } // Take the content of the answers hashmap, convert it to a vector of answer DTOs and overwrite the file with these contents fn write_answers_file(&mut self) { let answers: &HashMap = &self.answers; let answers_vec: Vec = answers .iter() .map(|q: (&u8, &Answer)| q.1.to_dto(*q.0)) .collect(); let json: String = serde_json::to_string(&answers_vec).unwrap(); let mut f: &File = &self.answers_file; match f .rewind() .and(f.write_all(json.as_bytes())) .and(f.sync_all()) .and(f.set_len(f.stream_position().unwrap())) { Ok(()) => (), _ => panic!("Could not write file"), } } pub fn add_question(&mut self, id: u8, question: Question) -> Result { if self.questions.contains_key(&id) { return Err(format!("Question with id {} already exists", id)); } match self.questions.insert(id, question.clone()) { None => { self.write_questions_file(); Ok(question) } //Looks backwards, but insert must return none since key cannot already exist _ => Err("Server Error".to_string()), } } pub fn remove_question(&mut self, id: u8) -> Result { match self.questions.remove(&id) { Some(question) => { self.write_questions_file(); Ok(question) } None => Err(format!("Question with id {} does not exist", id)), } } pub fn fetch_one_question(&self, id: u8) -> Result { match self.questions.get(&id) { Some(question) => Ok(question.clone()), None => Err(format!("Question with id {} does not exists", id)), } } //by nature of the hashmap, pagination does not follow id order pub fn fetch_many_questions(&self, start: usize, size: usize) -> Vec { self.questions .iter() .map(|q| q.1.to_dto(*q.0)) .skip(start) .take(size) .collect() } pub fn update_question(&mut self, id: u8, question: Question) -> Result { if !self.questions.contains_key(&id) { return Err(format!("Question with id {} does not exists", id)); } match self.questions.insert(id, question) { Some(question) => { self.write_questions_file(); Ok(question) } None => Err("Server Error".to_string()), } } pub fn add_answer(&mut self, id: u8, answer: Answer) -> Result { if self.answers.contains_key(&id) { return Err(format!("Answer with id {} already exists", id)); } match self.answers.insert(id, answer.clone()) { None => { self.write_answers_file(); Ok(answer) } _ => Err("Server Error".to_string()), } } }