diff --git a/CHANGELOG.md b/CHANGELOG.md index 672df2ad..987f7ae2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,8 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), ### Changed +- Change layout of internal database for request and UI state storage + - This _shouldn't_ have any user impact, it's just a technical improvement. If you notice any issues such as missing or incorrect request history, please [let me know](https://github.com/LucasPickering/slumber/issues/new?assignees=&labels=bug&projects=&template=bug_report.md) - Upgrade to Rust 1.80 - Disable unavailable menu actions [#222](https://github.com/LucasPickering/slumber/issues/222) diff --git a/crates/slumber_core/src/db.rs b/crates/slumber_core/src/db.rs index 96a40909..45e0d95a 100644 --- a/crates/slumber_core/src/db.rs +++ b/crates/slumber_core/src/db.rs @@ -1,24 +1,22 @@ //! The database is responsible for persisting data, including requests and //! responses. +mod convert; +mod migrations; + use crate::{ collection::{ProfileId, RecipeId}, + db::convert::{CollectionPath, JsonEncoded, SqlWrap}, http::{Exchange, ExchangeSummary, RequestId}, util::{DataDirectory, ResultTraced}, }; use anyhow::{anyhow, Context}; use derive_more::Display; -use reqwest::StatusCode; -use rusqlite::{ - named_params, - types::{FromSql, FromSqlError, FromSqlResult, ToSqlOutput, ValueRef}, - Connection, DatabaseName, OptionalExtension, Row, ToSql, -}; -use rusqlite_migration::{Migrations, M}; +use rusqlite::{named_params, Connection, DatabaseName, OptionalExtension}; use serde::{de::DeserializeOwned, Serialize}; use std::{ fmt::Debug, - ops::Deref, + ops::DerefMut, path::{Path, PathBuf}, sync::{Arc, Mutex}, }; @@ -36,6 +34,8 @@ use uuid::Uuid; /// to a collection should have an FK column to the `collections` table. /// /// This uses an `Arc` internally, so it's safe and cheap to clone. +/// +/// Schema is defined in [migrations] #[derive(Clone, Debug)] pub struct Database { /// Data is stored in a sqlite DB. Mutex is needed for multi-threaded @@ -46,12 +46,6 @@ pub struct Database { connection: Arc>, } -/// A unique ID for a collection. This is generated when the collection is -/// inserted into the DB. -#[derive(Copy, Clone, Debug, Display)] -#[cfg_attr(test, derive(Eq, Hash, PartialEq))] -pub struct CollectionId(Uuid); - impl Database { const FILE: &'static str = "state.sqlite"; @@ -82,58 +76,12 @@ impl Database { /// Apply database migrations fn migrate(connection: &mut Connection) -> anyhow::Result<()> { - let migrations = Migrations::new(vec![ - M::up( - // Path is the *canonicalzed* path to a collection file, - // guaranteeing it will be stable and unique - "CREATE TABLE collections ( - id UUID PRIMARY KEY NOT NULL, - path BLOB NOT NULL UNIQUE - )", - ) - .down("DROP TABLE collections"), - M::up( - // The request state kind is a bit hard to map to tabular data. - // Everything that we need to query on (HTTP status code, - // end_time, etc.) is in its own column. Therequest/response - // will be serialized into msgpack bytes - "CREATE TABLE requests ( - id UUID PRIMARY KEY NOT NULL, - collection_id UUID NOT NULL, - profile_id TEXT, - recipe_id TEXT NOT NULL, - start_time TEXT NOT NULL, - end_time TEXT NOT NULL, - request BLOB NOT NULL, - response BLOB NOT NULL, - status_code INTEGER NOT NULL, - FOREIGN KEY(collection_id) REFERENCES collections(id) - )", - ) - .down("DROP TABLE requests"), - M::up( - // keys+values will be serialized as msgpack - "CREATE TABLE ui_state ( - key BLOB NOT NULL, - collection_id UUID NOT NULL, - value BLOB NOT NULL, - PRIMARY KEY (key, collection_id), - FOREIGN KEY(collection_id) REFERENCES collections(id) - )", - ) - .down("DROP TABLE ui_state"), - // This is a sledgehammer migration. Added when we switch from - // rmp_serde::to_vec to rmp_serde::to_vec_named. This affected the - // serialization of all binary blobs, so there's no easy way to - // migrate it all. It's easiest just to wipe it all out. - M::up("DELETE FROM requests; DELETE FROM ui_state;").down(""), - ]); - migrations.to_latest(connection)?; + migrations::migrations().to_latest(connection)?; Ok(()) } /// Get a reference to the DB connection. Panics if the lock is poisoned - fn connection(&self) -> impl '_ + Deref { + fn connection(&self) -> impl '_ + DerefMut { self.connection.lock().expect("Connection lock poisoned") } @@ -141,7 +89,9 @@ impl Database { pub fn collections(&self) -> anyhow::Result> { self.connection() .prepare("SELECT path FROM collections")? - .query_map([], |row| Ok(row.get::<_, ByteEncoded<_>>("path")?.0)) + .query_map([], |row| { + Ok(row.get::<_, CollectionPath>("path")?.into()) + }) .context("Error fetching collections")? .collect::>>() .context("Error extracting collection data") @@ -189,22 +139,22 @@ impl Database { // Update each table in individually connection .execute( - "UPDATE requests SET collection_id = :target + "UPDATE requests_v2 SET collection_id = :target WHERE collection_id = :source", named_params! {":source": source, ":target": target}, ) - .context("Error migrating table `requests`") + .context("Error migrating table `requests_v2`") .traced()?; connection .execute( // Overwrite UI state. Maybe this isn't the best UX, but sqlite // doesn't provide an "UPDATE OR DELETE" so this is easiest and // still reasonable - "UPDATE OR REPLACE ui_state SET collection_id = :target + "UPDATE OR REPLACE ui_state_v2 SET collection_id = :target WHERE collection_id = :source", named_params! {":source": source, ":target": target}, ) - .context("Error migrating table `ui_state`") + .context("Error migrating table `ui_state_v2`") .traced()?; connection @@ -234,7 +184,7 @@ impl Database { "INSERT INTO collections (id, path) VALUES (:id, :path) ON CONFLICT(path) DO NOTHING", named_params! { - ":id": CollectionId(Uuid::new_v4()), + ":id": CollectionId::new(), ":path": &path, }, ) @@ -290,7 +240,7 @@ impl CollectionDatabase { self.database .connection() .query_row( - "SELECT * FROM requests + "SELECT * FROM requests_v2 WHERE collection_id = :collection_id AND id = :request_id ORDER BY start_time DESC LIMIT 1", @@ -325,7 +275,7 @@ impl CollectionDatabase { .connection() .query_row( // `IS` needed for profile_id so `None` will match `NULL` - "SELECT * FROM requests + "SELECT * FROM requests_v2 WHERE collection_id = :collection_id AND profile_id IS :profile_id AND recipe_id = :recipe_id @@ -364,19 +314,36 @@ impl CollectionDatabase { .connection() .execute( "INSERT INTO - requests ( + requests_v2 ( id, collection_id, profile_id, recipe_id, start_time, end_time, - request, - response, - status_code + method, + url, + request_headers, + request_body, + status_code, + response_headers, + response_body ) - VALUES (:id, :collection_id, :profile_id, :recipe_id, - :start_time, :end_time, :request, :response, :status_code)", + VALUES ( + :id, + :collection_id, + :profile_id, + :recipe_id, + :start_time, + :end_time, + :method, + :url, + :request_headers, + :request_body, + :status_code, + :response_headers, + :response_body + )", named_params! { ":id": exchange.id, ":collection_id": self.collection_id, @@ -384,9 +351,15 @@ impl CollectionDatabase { ":recipe_id": &exchange.request.recipe_id, ":start_time": &exchange.start_time, ":end_time": &exchange.end_time, - ":request": &ByteEncoded(&*exchange.request), - ":response": &ByteEncoded(&*exchange.response), + + ":method": exchange.request.method.as_str(), + ":url": exchange.request.url.as_str(), + ":request_headers": SqlWrap(&exchange.request.headers), + ":request_body": exchange.request.body.as_deref(), + ":status_code": exchange.response.status.as_u16(), + ":response_headers": SqlWrap(&exchange.response.headers), + ":response_body": exchange.response.body.bytes(), }, ) .context(format!( @@ -411,7 +384,7 @@ impl CollectionDatabase { self.database .connection() .prepare( - "SELECT id, start_time, end_time, status_code FROM requests + "SELECT id, start_time, end_time, status_code FROM requests_v2 WHERE collection_id = :collection_id AND profile_id IS :profile_id AND recipe_id = :recipe_id @@ -431,8 +404,13 @@ impl CollectionDatabase { .context("Error extracting request history") } - /// Get the value of a UI state field - pub fn get_ui(&self, key: K) -> anyhow::Result> + /// Get the value of a UI state field. Key type is included as part of the + /// key, to disambiguate between keys of identical structure + pub fn get_ui( + &self, + key_type: &str, + key: K, + ) -> anyhow::Result> where K: Debug + Serialize, V: Debug + DeserializeOwned, @@ -441,14 +419,17 @@ impl CollectionDatabase { .database .connection() .query_row( - "SELECT value FROM ui_state - WHERE collection_id = :collection_id AND key = :key", + "SELECT value FROM ui_state_v2 + WHERE collection_id = :collection_id + AND key_type = :key_type + AND key = :key", named_params! { ":collection_id": self.collection_id, - ":key": ByteEncoded(&key), + ":key_type": key_type, + ":key": JsonEncoded(&key), }, |row| { - let value: ByteEncoded = row.get("value")?; + let value: JsonEncoded = row.get("value")?; Ok(value.0) }, ) @@ -460,7 +441,12 @@ impl CollectionDatabase { } /// Set the value of a UI state field - pub fn set_ui(&self, key: K, value: V) -> anyhow::Result<()> + pub fn set_ui( + &self, + key_type: &str, + key: K, + value: V, + ) -> anyhow::Result<()> where K: Debug + Serialize, V: Debug + Serialize, @@ -470,13 +456,14 @@ impl CollectionDatabase { .connection() .execute( // Upsert! - "INSERT INTO ui_state (collection_id, key, value) - VALUES (:collection_id, :key, :value) + "INSERT INTO ui_state_v2 (collection_id, key_type, key, value) + VALUES (:collection_id, :key_type, :key, :value) ON CONFLICT DO UPDATE SET value = excluded.value", named_params! { ":collection_id": self.collection_id, - ":key": ByteEncoded(key), - ":value": ByteEncoded(value), + ":key_type": key_type, + ":key": JsonEncoded(key), + ":value": JsonEncoded(value), }, ) .context("Error saving UI state to database") @@ -490,6 +477,18 @@ impl CollectionDatabase { } } +/// A unique ID for a collection. This is generated when the collection is +/// inserted into the DB. +#[derive(Copy, Clone, Debug, Display)] +#[cfg_attr(test, derive(Eq, Hash, PartialEq))] +pub struct CollectionId(Uuid); + +impl CollectionId { + fn new() -> Self { + Self(Uuid::new_v4()) + } +} + /// Create an in-memory DB, only for testing #[cfg(any(test, feature = "test"))] impl crate::test_util::Factory for Database { @@ -513,157 +512,6 @@ impl crate::test_util::Factory for CollectionDatabase { } } -impl ToSql for CollectionId { - fn to_sql(&self) -> rusqlite::Result> { - self.0.to_sql() - } -} - -impl FromSql for CollectionId { - fn column_result(value: ValueRef<'_>) -> FromSqlResult { - Ok(Self(Uuid::column_result(value)?)) - } -} - -impl ToSql for ProfileId { - fn to_sql(&self) -> rusqlite::Result> { - self.deref().to_sql() - } -} - -impl FromSql for ProfileId { - fn column_result(value: ValueRef<'_>) -> FromSqlResult { - Ok(String::column_result(value)?.into()) - } -} - -impl ToSql for RequestId { - fn to_sql(&self) -> rusqlite::Result> { - self.0.to_sql() - } -} - -impl FromSql for RequestId { - fn column_result(value: ValueRef<'_>) -> FromSqlResult { - Ok(Self(Uuid::column_result(value)?)) - } -} - -impl ToSql for RecipeId { - fn to_sql(&self) -> rusqlite::Result> { - self.deref().to_sql() - } -} - -impl FromSql for RecipeId { - fn column_result(value: ValueRef<'_>) -> FromSqlResult { - Ok(String::column_result(value)?.into()) - } -} - -/// Neat little wrapper for a collection path, to make sure it gets -/// canonicalized and serialized/deserialized consistently -#[derive(Debug, Display)] -#[display("{}", _0.0.display())] -struct CollectionPath(ByteEncoded); - -impl From for PathBuf { - fn from(path: CollectionPath) -> Self { - path.0 .0 - } -} - -impl TryFrom<&Path> for CollectionPath { - type Error = anyhow::Error; - - fn try_from(path: &Path) -> Result { - path.canonicalize() - .context(format!("Error canonicalizing path {path:?}")) - .traced() - .map(|path| Self(ByteEncoded(path))) - } -} - -impl ToSql for CollectionPath { - fn to_sql(&self) -> rusqlite::Result> { - self.0.to_sql() - } -} - -impl FromSql for CollectionPath { - fn column_result(value: ValueRef<'_>) -> FromSqlResult { - ByteEncoded::::column_result(value).map(Self) - } -} - -/// A wrapper to serialize/deserialize a value as msgpack for DB storage -#[derive(Debug)] -struct ByteEncoded(T); - -impl ToSql for ByteEncoded { - fn to_sql(&self) -> rusqlite::Result> { - let bytes = rmp_serde::to_vec_named(&self.0).map_err(|err| { - rusqlite::Error::ToSqlConversionFailure(Box::new(err)) - })?; - Ok(ToSqlOutput::Owned(bytes.into())) - } -} - -impl FromSql for ByteEncoded { - fn column_result(value: ValueRef<'_>) -> FromSqlResult { - let bytes = value.as_blob()?; - let value: T = rmp_serde::from_slice(bytes) - .map_err(|err| FromSqlError::Other(Box::new(err)))?; - Ok(Self(value)) - } -} - -/// Convert from `SELECT * FROM requests` -impl<'a, 'b> TryFrom<&'a Row<'b>> for Exchange { - type Error = rusqlite::Error; - - fn try_from(row: &'a Row<'b>) -> Result { - Ok(Self { - id: row.get("id")?, - start_time: row.get("start_time")?, - end_time: row.get("end_time")?, - // Deserialize from bytes - request: Arc::new(row.get::<_, ByteEncoded<_>>("request")?.0), - response: Arc::new(row.get::<_, ByteEncoded<_>>("response")?.0), - }) - } -} - -/// Convert from SQL row -impl<'a, 'b> TryFrom<&'a Row<'b>> for ExchangeSummary { - type Error = rusqlite::Error; - - fn try_from(row: &'a Row<'b>) -> Result { - // Use a wrapper struct to deserialize the status code from an int - struct StatusCodeWrapper(StatusCode); - fn other(error: T) -> FromSqlError - where - T: 'static + std::error::Error + Send + Sync, - { - FromSqlError::Other(Box::new(error)) - } - impl FromSql for StatusCodeWrapper { - fn column_result(value: ValueRef<'_>) -> FromSqlResult { - let code: u16 = value.as_i64()?.try_into().map_err(other)?; - let code = StatusCode::from_u16(code as u16).map_err(other)?; - Ok(Self(code)) - } - } - - Ok(Self { - id: row.get("id")?, - start_time: row.get("start_time")?, - end_time: row.get("end_time")?, - status: row.get::<_, StatusCodeWrapper>("status_code")?.0, - }) - } -} - #[cfg(test)] mod tests { use super::*; @@ -685,11 +533,12 @@ mod tests { Exchange::factory((Some("profile1".into()), "recipe1".into())); let profile_id = exchange1.request.profile_id.as_ref(); let recipe_id = &exchange1.request.recipe_id; + let key_type = "MyKey"; let ui_key = "key1"; collection1.insert_exchange(&exchange1).unwrap(); - collection1.set_ui(ui_key, "value1").unwrap(); + collection1.set_ui(key_type, ui_key, "value1").unwrap(); collection2.insert_exchange(&exchange2).unwrap(); - collection2.set_ui(ui_key, "value2").unwrap(); + collection2.set_ui(key_type, ui_key, "value2").unwrap(); // Sanity checks assert_eq!( @@ -701,7 +550,7 @@ mod tests { exchange1.id ); assert_eq!( - collection1.get_ui::<_, String>(ui_key).unwrap(), + collection1.get_ui::<_, String>(key_type, ui_key).unwrap(), Some("value1".into()) ); assert_eq!( @@ -713,7 +562,7 @@ mod tests { exchange2.id ); assert_eq!( - collection2.get_ui::<_, String>(ui_key).unwrap(), + collection2.get_ui::<_, String>(key_type, ui_key).unwrap(), Some("value2".into()) ); @@ -730,7 +579,7 @@ mod tests { exchange2.id ); assert_eq!( - collection1.get_ui::<_, String>(ui_key).unwrap(), + collection1.get_ui::<_, String>(key_type, ui_key).unwrap(), Some("value2".into()) ); @@ -892,16 +741,17 @@ mod tests { .into_collection(Path::new("Cargo.toml")) .unwrap(); + let key_type = "MyKey"; let ui_key = "key1"; - collection1.set_ui(ui_key, "value1").unwrap(); - collection2.set_ui(ui_key, "value2").unwrap(); + collection1.set_ui(key_type, ui_key, "value1").unwrap(); + collection2.set_ui(key_type, ui_key, "value2").unwrap(); assert_eq!( - collection1.get_ui::<_, String>(ui_key).unwrap(), + collection1.get_ui::<_, String>(key_type, ui_key).unwrap(), Some("value1".into()) ); assert_eq!( - collection2.get_ui::<_, String>(ui_key).unwrap(), + collection2.get_ui::<_, String>(key_type, ui_key).unwrap(), Some("value2".into()) ); } diff --git a/crates/slumber_core/src/db/convert.rs b/crates/slumber_core/src/db/convert.rs new file mode 100644 index 00000000..1abd9b09 --- /dev/null +++ b/crates/slumber_core/src/db/convert.rs @@ -0,0 +1,350 @@ +//! Implementations to convert between Rust types and SQL data + +use crate::{ + collection::{ProfileId, RecipeId}, + db::CollectionId, + http::{ + Exchange, ExchangeSummary, RequestId, RequestRecord, ResponseRecord, + }, + util::ResultTraced, +}; +use anyhow::Context; +use bytes::Bytes; +use core::str; +use derive_more::Display; +use reqwest::{ + header::{HeaderMap, HeaderName, HeaderValue}, + Method, StatusCode, +}; +use rusqlite::{ + types::{FromSql, FromSqlError, FromSqlResult, ToSqlOutput, ValueRef}, + Row, ToSql, +}; +use serde::{de::DeserializeOwned, Serialize}; +use std::{ + fmt::Debug, + ops::Deref, + path::{Path, PathBuf}, + str::Utf8Error, + sync::Arc, +}; +use thiserror::Error; +use url::Url; +use uuid::Uuid; +use winnow::{ + combinator::{repeat, terminated}, + token::take_while, + PResult, Parser, +}; + +impl ToSql for CollectionId { + fn to_sql(&self) -> rusqlite::Result> { + self.0.to_sql() + } +} + +impl FromSql for CollectionId { + fn column_result(value: ValueRef<'_>) -> FromSqlResult { + Ok(Self(Uuid::column_result(value)?)) + } +} + +impl ToSql for ProfileId { + fn to_sql(&self) -> rusqlite::Result> { + self.deref().to_sql() + } +} + +impl FromSql for ProfileId { + fn column_result(value: ValueRef<'_>) -> FromSqlResult { + Ok(String::column_result(value)?.into()) + } +} + +impl ToSql for RequestId { + fn to_sql(&self) -> rusqlite::Result> { + self.0.to_sql() + } +} + +impl FromSql for RequestId { + fn column_result(value: ValueRef<'_>) -> FromSqlResult { + Ok(Self(Uuid::column_result(value)?)) + } +} + +impl ToSql for RecipeId { + fn to_sql(&self) -> rusqlite::Result> { + self.deref().to_sql() + } +} + +impl FromSql for RecipeId { + fn column_result(value: ValueRef<'_>) -> FromSqlResult { + Ok(String::column_result(value)?.into()) + } +} + +/// Wrapper to serialize paths as strings in the DB. This is flawed because +/// paths aren't guaranteed to be UTF-8 on either Windows or Linux, but in +/// practice they always should be. The alternative would be to serialize them +/// as raw bytes, but on Windows that requires converting to/from UTF-16 which +/// is even more complicated. +/// +/// Note: In the past (pre-1.8.0) this was encoded via MessagePack, which relied +/// on the `Serialize`/`Deserialize` implementation, which has the same +/// restrictions (it defers to the OS encoding). +#[derive(Debug, Display)] +#[display("{}", _0.display())] +pub struct CollectionPath(PathBuf); + +impl CollectionPath { + /// Create a `CollectionPath` from a path known to already be canonicalized. + /// Useful when decoding from an existing DB row. + pub fn from_canonical(path: PathBuf) -> Self { + Self(path) + } +} + +impl From for PathBuf { + fn from(value: CollectionPath) -> Self { + value.0 + } +} + +#[cfg(test)] +impl From for CollectionPath { + fn from(path: PathBuf) -> Self { + Self(path) + } +} + +/// Canonicalize paths during creation to deduplicate potential differences due +/// to symlinks, cwd, etc. +impl TryFrom<&Path> for CollectionPath { + type Error = anyhow::Error; + + fn try_from(path: &Path) -> Result { + path.canonicalize() + .context(format!("Error canonicalizing path {path:?}")) + .traced() + .map(Self) + } +} + +/// Serialize path as UTF-8 +impl ToSql for CollectionPath { + fn to_sql(&self) -> rusqlite::Result> { + #[derive(Debug, Error)] + #[error("Collection path `{0:?}` is not valid UTF-8 as UTF-8")] + struct PathStringifyError(PathBuf); + + self.0 + .to_str() + .ok_or_else(|| { + rusqlite::Error::ToSqlConversionFailure( + PathStringifyError(self.0.clone()).into(), + ) + })? + .as_bytes() + .to_sql() + } +} + +/// Deserialize path from UTF-8 +impl FromSql for CollectionPath { + fn column_result(value: ValueRef<'_>) -> FromSqlResult { + #[derive(Debug, Error)] + #[error("Error parsing collection path as UTF-8")] + struct PathParseError(Utf8Error); + + let path = str::from_utf8(value.as_blob()?) + .map_err(PathParseError) + .map_err(error_other)? + .to_owned(); + Ok(Self(path.into())) + } +} + +/// A wrapper to serialize/deserialize a value as JSON for DB storage +#[derive(Debug)] +pub struct JsonEncoded(pub T); + +impl ToSql for JsonEncoded { + fn to_sql(&self) -> rusqlite::Result> { + let s = serde_json::to_string(&self.0).map_err(|err| { + rusqlite::Error::ToSqlConversionFailure(Box::new(err)) + })?; + Ok(ToSqlOutput::Owned(s.into())) + } +} + +impl FromSql for JsonEncoded { + fn column_result(value: ValueRef<'_>) -> FromSqlResult { + let s = value.as_str()?; + let value: T = serde_json::from_str(s).map_err(error_other)?; + Ok(Self(value)) + } +} + +/// Convert from `SELECT * FROM requests_v2` +impl<'a, 'b> TryFrom<&'a Row<'b>> for Exchange { + type Error = rusqlite::Error; + + fn try_from(row: &'a Row<'b>) -> Result { + let id: RequestId = row.get("id")?; + Ok(Self { + id, + start_time: row.get("start_time")?, + end_time: row.get("end_time")?, + request: Arc::new(RequestRecord { + id, + profile_id: row.get("profile_id")?, + recipe_id: row.get("recipe_id")?, + // Use wrappers for all of these to specify the conversion + method: row.get::<_, SqlWrap<_>>("method")?.0, + url: row.get::<_, SqlWrap<_>>("url")?.0, + headers: row.get::<_, SqlWrap>("request_headers")?.0, + body: row + .get::<_, Option>>("request_body")? + .map(|wrap| wrap.0), + }), + response: Arc::new(ResponseRecord { + status: row.get::<_, SqlWrap>("status_code")?.0, + headers: row + .get::<_, SqlWrap>("response_headers")? + .0, + body: row.get::<_, SqlWrap>("response_body")?.0.into(), + }), + }) + } +} + +/// Convert from `SELECT ... FROM requests_v2` +impl<'a, 'b> TryFrom<&'a Row<'b>> for ExchangeSummary { + type Error = rusqlite::Error; + + fn try_from(row: &'a Row<'b>) -> Result { + Ok(Self { + id: row.get("id")?, + start_time: row.get("start_time")?, + end_time: row.get("end_time")?, + status: row.get::<_, SqlWrap>("status_code")?.0, + }) + } +} + +/// A wrapper to define `ToSql`/`FromSql` impls on foreign types, to get around +/// the orphan rule +pub struct SqlWrap(pub T); + +impl FromSql for SqlWrap { + fn column_result(value: ValueRef<'_>) -> FromSqlResult { + value.as_str()?.parse().map(Self).map_err(error_other) + } +} + +impl FromSql for SqlWrap { + fn column_result(value: ValueRef<'_>) -> FromSqlResult { + value.as_str()?.parse().map(Self).map_err(error_other) + } +} + +impl FromSql for SqlWrap { + fn column_result(value: ValueRef<'_>) -> FromSqlResult { + // Clone is necessary because the bytes live in sqlite FFI land + let bytes = value.as_blob()?.to_owned(); + Ok(Self(bytes.into())) + } +} + +impl FromSql for SqlWrap { + fn column_result(value: ValueRef<'_>) -> FromSqlResult { + let code: u16 = value.as_i64()?.try_into().map_err(error_other)?; + let code = StatusCode::from_u16(code as u16).map_err(error_other)?; + Ok(Self(code)) + } +} + +// Serialize header map using the same format it gets in HTTP: key:value, one +// entry per line. The spec disallows colors in keys and newlines in values so +// it's safe to use both as delimiters + +/// Char between header name and value +const HEADER_FIELD_DELIM: u8 = b':'; +/// Char between header lines +/// https://www.rfc-editor.org/rfc/rfc9110.html#name-field-values +const HEADER_LINE_DELIM: u8 = b'\n'; + +impl<'a> ToSql for SqlWrap<&'a HeaderMap> { + fn to_sql(&self) -> rusqlite::Result> { + // We know the exact capacity we'll need so we can avoid reallocations + let capacity = self + .0 + .iter() + .map(|(name, value)| { + // Include extra bytes for the delimiters + name.as_str().as_bytes().len() + 1 + value.as_bytes().len() + 1 + }) + .sum(); + let mut buf: Vec = Vec::with_capacity(capacity); + + for (name, value) in self.0.iter() { + buf.extend(name.as_str().as_bytes()); + buf.push(HEADER_FIELD_DELIM); + buf.extend(value.as_bytes()); + buf.push(HEADER_LINE_DELIM); + } + + Ok(buf.into()) + } +} + +impl FromSql for SqlWrap { + fn column_result(value: ValueRef<'_>) -> FromSqlResult { + fn header_line( + input: &mut &[u8], + ) -> PResult<(HeaderName, HeaderValue)> { + ( + terminated( + take_while(1.., |c| c != HEADER_FIELD_DELIM) + .try_map(HeaderName::from_bytes), + HEADER_FIELD_DELIM, + ), + terminated( + take_while(1.., |c| c != HEADER_LINE_DELIM) + .try_map(HeaderValue::from_bytes), + HEADER_LINE_DELIM, + ), + ) + .parse_next(input) + } + + let bytes = value.as_blob()?; + let lines = repeat(0.., header_line) + .fold(HeaderMap::new, |mut acc, (name, value)| { + acc.insert(name, value); + acc + }) + .parse(bytes) + .map_err(|error| { + /// This is the only way I could figure out to convert the parse + /// error to something that implements `std:error:Error` + /// https://github.com/winnow-rs/winnow/discussions/329 + #[derive(Debug, Error)] + #[error("{0}")] + struct HeaderParseError(String); + + error_other(HeaderParseError(error.to_string())) + })?; + Ok(Self(lines)) + } +} + +/// Create an `Other` variant of [FromSqlError] +fn error_other(error: T) -> FromSqlError +where + T: 'static + std::error::Error + Send + Sync, +{ + FromSqlError::Other(Box::new(error)) +} diff --git a/crates/slumber_core/src/db/migrations.rs b/crates/slumber_core/src/db/migrations.rs new file mode 100644 index 00000000..ba5bb178 --- /dev/null +++ b/crates/slumber_core/src/db/migrations.rs @@ -0,0 +1,583 @@ +use crate::{ + db::{ + convert::{CollectionPath, SqlWrap}, + CollectionId, + }, + http::Exchange, + util::ResultTraced, +}; +use anyhow::Context; +use rusqlite::{ + named_params, + types::{FromSql, FromSqlError, FromSqlResult, ToSqlOutput, ValueRef}, + Row, ToSql, Transaction, +}; +use rusqlite_migration::{HookResult, Migrations, M}; +use serde::{de::DeserializeOwned, Serialize}; +use std::{path::PathBuf, sync::Arc}; +use tracing::info; + +/// Get all DB migrations in history +pub fn migrations() -> Migrations<'static> { + // There's no need for any down migrations here, because we have no + // mechanism for going backwards + Migrations::new(vec![ + M::up( + // Path is the *canonicalzed* path to a collection file, + // guaranteeing it will be stable and unique + "CREATE TABLE collections ( + id UUID PRIMARY KEY NOT NULL, + path BLOB NOT NULL UNIQUE + )", + ), + M::up( + // WARNING: this has been totally abolished by a later migration + // The request state kind is a bit hard to map to tabular data. + // Everything that we need to query on (HTTP status code, + // end_time, etc.) is in its own column. The request/response + // will be serialized into msgpack bytes + "CREATE TABLE requests ( + id UUID PRIMARY KEY NOT NULL, + collection_id UUID NOT NULL, + profile_id TEXT, + recipe_id TEXT NOT NULL, + start_time TEXT NOT NULL, + end_time TEXT NOT NULL, + request BLOB NOT NULL, + response BLOB NOT NULL, + status_code INTEGER NOT NULL, + FOREIGN KEY(collection_id) REFERENCES collections(id) + )", + ), + M::up( + // keys+values will be serialized as msgpack + "CREATE TABLE ui_state ( + key BLOB NOT NULL, + collection_id UUID NOT NULL, + value BLOB NOT NULL, + PRIMARY KEY (key, collection_id), + FOREIGN KEY(collection_id) REFERENCES collections(id) + )", + ), + // This is a sledgehammer migration. Added when we switch from + // rmp_serde::to_vec to rmp_serde::to_vec_named. This affected the + // serialization of all binary blobs, so there's no easy way to + // migrate it all. It's easiest just to wipe it all out. + M::up("DELETE FROM requests; DELETE FROM ui_state;"), + // New table that flattens everything into its own column. This makes + // it easy to browse data in the sqlite CLI, and gives better control + // over migrations in the future if we add more fields. + M::up_with_hook( + "CREATE TABLE requests_v2 ( + id UUID PRIMARY KEY NOT NULL, + collection_id UUID NOT NULL, + profile_id TEXT, + recipe_id TEXT NOT NULL, + start_time TEXT NOT NULL, + end_time TEXT NOT NULL, + + method TEXT NOT NULL, + url TEXT_NOT NULL, + request_headers BLOB NOT NULL, + request_body BLOB, + + status_code INTEGER NOT NULL, + response_headers BLOB NOT NULL, + response_body BLOB NOT NULL, + + FOREIGN KEY(collection_id) REFERENCES collections(id) + )", + migrate_requests_v2_up, + ), + // UI state is now JSON encoded, instead of msgpack. This makes it + // easier to browse, and the size payment should be minimal because + // the key/value structure is simple + M::up_with_hook( + "CREATE TABLE ui_state_v2 ( + collection_id UUID NOT NULL, + key_type TEXT NOT NULL, + key TEXT NOT NULL, + value TEXT NOT NULL, + PRIMARY KEY (collection_id, key_type, key), + FOREIGN KEY(collection_id) REFERENCES collections(id) + )", + migrate_ui_state_v2_up, + ), + // Encode collection path as text instead of MessagePacking it. We + // could change the type of the path column to TEXT, but sqlite doesn't + // support modifying column type. We could add a new column, but it + // also doesn't support dropping columns with UNIQUE so the old + // one would still be there + M::up_with_hook("", migrate_collection_paths), + ]) +} + +/// Post-up hook to copy data from the `requests` table to `requests_v2`. This +/// will leave the old table around, so we can recover user data if something +/// goes wrong. We'll delete it in a later migration. +/// +/// To be removed in https://github.com/LucasPickering/slumber/issues/306 +fn migrate_requests_v2_up(transaction: &Transaction) -> HookResult { + fn load_exchange( + row: &Row<'_>, + ) -> Result<(CollectionId, Exchange), rusqlite::Error> { + let collection_id = row.get("collection_id")?; + let exchange = Exchange { + id: row.get("id")?, + start_time: row.get("start_time")?, + end_time: row.get("end_time")?, + // Deserialize from bytes + request: Arc::new(row.get::<_, ByteEncoded<_>>("request")?.0), + response: Arc::new(row.get::<_, ByteEncoded<_>>("response")?.0), + }; + Ok((collection_id, exchange)) + } + + info!("Migrating table `requests` -> `requests_v2`"); + let mut select_stmt = transaction.prepare("SELECT * FROM requests")?; + let mut insert_stmt = transaction.prepare( + "INSERT INTO requests_v2 ( + id, + collection_id, + profile_id, + recipe_id, + start_time, + end_time, + method, + url, + request_headers, + request_body, + status_code, + response_headers, + response_body + ) VALUES ( + :id, + :collection_id, + :profile_id, + :recipe_id, + :start_time, + :end_time, + :method, + :url, + :request_headers, + :request_body, + :status_code, + :response_headers, + :response_body + )", + )?; + + for result in select_stmt.query_map([], load_exchange)? { + let Ok((collection_id, exchange)) = result + .context("Error migrating from `requests` -> `requests_v2`") + .traced() + else { + // Skip any conversions that fail so we don't kill everything + continue; + }; + + info!( + %collection_id, + ?exchange, + "Copying row from `requests` -> `requests_v2`", + ); + insert_stmt.execute(named_params! { + ":id": exchange.id, + ":collection_id": collection_id, + ":profile_id": &exchange.request.profile_id, + ":recipe_id": &exchange.request.recipe_id, + ":start_time": &exchange.start_time, + ":end_time": &exchange.end_time, + + ":method": exchange.request.method.as_str(), + ":url": exchange.request.url.as_str(), + ":request_headers": SqlWrap(&exchange.request.headers), + ":request_body": exchange.request.body.as_deref(), + + ":status_code": exchange.response.status.as_u16(), + ":response_headers": SqlWrap(&exchange.response.headers), + ":response_body": exchange.response.body.bytes(), + })?; + } + + Ok(()) +} + +/// Copy rows from ui_state -> ui_state_v2. Drop the old table since, unlike +/// requests, it's not a huge deal if we lose some data. +/// +/// To be removed in https://github.com/LucasPickering/slumber/issues/306 +fn migrate_ui_state_v2_up(transaction: &Transaction) -> HookResult { + #[derive(Debug)] + struct V1Row { + collection_id: CollectionId, + key_type: String, + key: serde_json::Value, + value: serde_json::Value, + } + + fn load_row(row: &Row) -> Result { + // Key is encoded as a tuple of (type name, key) + let ByteEncoded((key_type, key)): ByteEncoded<( + String, + serde_json::Value, + )> = row.get("key")?; + Ok(V1Row { + collection_id: row.get("collection_id")?, + key_type, + key, + value: row.get::<_, ByteEncoded>("value")?.0, + }) + } + + info!("Migrating table `ui_state` -> `ui_state_v2`"); + let mut select_stmt = transaction.prepare("SELECT * FROM ui_state")?; + let mut insert_stmt = transaction.prepare( + "INSERT INTO ui_state_v2 (collection_id, key_type, key, value) + VALUES (:collection_id, :key_type, :key, :value)", + )?; + + for result in select_stmt.query_map([], load_row)? { + let Ok(row) = result + .context("Error migrating from `ui_state` -> `ui_state_v2`") + .traced() + else { + // Skip any conversions that fail so we don't kill everything + continue; + }; + + info!(?row, "Copying row from `ui_state` -> `ui_state_v2`"); + insert_stmt.execute(named_params! { + ":collection_id": row.collection_id, + ":key_type": row.key_type, + ":key": row.key.to_string(), + ":value": row.value.to_string(), + })?; + } + + info!("Dropping table `ui_state`"); + transaction.execute("DROP TABLE ui_state", [])?; + Ok(()) +} + +/// Migrate `collections.path` from MessagePack encoding to strings. +/// Theoretically if there is a stored path with non-UTF-8 bytes, that will +/// cause a failure here. In practice though, those are extremely rare so we're +/// really just lopping off the msgpack prefix bytes. +/// +/// To be removed in https://github.com/LucasPickering/slumber/issues/306 +fn migrate_collection_paths(transaction: &Transaction) -> HookResult { + fn load_row( + row: &Row, + ) -> Result<(CollectionId, CollectionPath), rusqlite::Error> { + let id = row.get("id")?; + let path = CollectionPath::from_canonical( + row.get::<_, ByteEncoded>("path")?.0, + ); + Ok((id, path)) + } + + info!("Migrating table `collections` from MessagePack to UTF-8"); + let mut select_stmt = transaction.prepare("SELECT * FROM collections")?; + let mut update_stmt = transaction + .prepare("UPDATE collections SET path = :path WHERE id = :id")?; + for result in select_stmt.query_map([], load_row)? { + // If something goes wrong here we want to crash, because missing a + // migration on a collection is pretty bad. It means the entire + // collection history would be invisible to the user. + let (id, path) = result?; + update_stmt.execute(named_params! {":id": id, ":path": path})?; + } + + Ok(()) +} + +/// A wrapper to serialize/deserialize a value as msgpack for DB storage. We +/// don't use this for any live schemas, just keeping it around for migrations +/// from old data formats. +/// +/// To be removed in https://github.com/LucasPickering/slumber/issues/306 +#[derive(Debug)] +pub struct ByteEncoded(pub T); + +impl ToSql for ByteEncoded { + fn to_sql(&self) -> rusqlite::Result> { + let bytes = rmp_serde::to_vec_named(&self.0).map_err(|err| { + rusqlite::Error::ToSqlConversionFailure(Box::new(err)) + })?; + Ok(ToSqlOutput::Owned(bytes.into())) + } +} + +impl FromSql for ByteEncoded { + fn column_result(value: ValueRef<'_>) -> FromSqlResult { + let bytes = value.as_blob()?; + let value: T = rmp_serde::from_slice(bytes) + .map_err(|error| FromSqlError::Other(Box::new(error)))?; + Ok(Self(value)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{ + db::convert::{CollectionPath, JsonEncoded}, + http::{RequestRecord, ResponseRecord}, + test_util::Factory, + util::get_repo_root, + }; + use itertools::Itertools; + use reqwest::{Method, StatusCode}; + use rstest::{fixture, rstest}; + use rusqlite::Connection; + use serde_json::json; + + const MIGRATION_COLLECTIONS: usize = 1; + const MIGRATION_ALL_V1: usize = 4; + const MIGRATION_REQUESTS_V2: usize = MIGRATION_ALL_V1 + 1; + const MIGRATION_UI_STATE_V2: usize = MIGRATION_REQUESTS_V2 + 1; + const MIGRATION_COLLECTION_PATHS: usize = MIGRATION_UI_STATE_V2 + 1; + + #[fixture] + fn collection_path() -> CollectionPath { + get_repo_root().join("slumber.yml").into() + } + + #[fixture] + fn connection() -> Connection { + let mut connection = Connection::open_in_memory().unwrap(); + migrations() + .to_version(&mut connection, MIGRATION_COLLECTIONS) + .unwrap(); + + connection + } + + /// Test copying data requests -> requests_v2 + #[rstest] + fn test_migrate_requests_v2( + collection_path: CollectionPath, + mut connection: Connection, + ) { + let migrations = migrations(); + migrations + .to_version(&mut connection, MIGRATION_ALL_V1) + .unwrap(); + + let collection_id = CollectionId::new(); + connection + .execute( + "INSERT INTO collections (id, path) VALUES (:id, :path)", + named_params! { + ":id": collection_id, + ":path": collection_path, + }, + ) + .unwrap(); + + let exchanges = [ + Exchange::factory(( + RequestRecord { + method: Method::GET, + ..RequestRecord::factory(()) + }, + ResponseRecord::factory(StatusCode::NOT_FOUND), + )), + Exchange::factory(( + RequestRecord { + method: Method::POST, + ..RequestRecord::factory(()) + }, + ResponseRecord { + body: json!({"username": "ted"}).into(), + ..ResponseRecord::factory(StatusCode::CREATED) + }, + )), + Exchange::factory(( + RequestRecord { + method: Method::DELETE, + ..RequestRecord::factory(()) + }, + ResponseRecord::factory(StatusCode::BAD_REQUEST), + )), + ]; + for exchange in &exchanges { + connection + .execute( + "INSERT INTO + requests ( + id, + collection_id, + profile_id, + recipe_id, + start_time, + end_time, + request, + response, + status_code + ) + VALUES ( + :id, :collection_id, :profile_id, :recipe_id, + :start_time, :end_time, :request, :response, + :status_code + )", + named_params! { + ":id": exchange.id, + ":collection_id": &collection_id, + ":profile_id": &exchange.request.profile_id, + ":recipe_id": &exchange.request.recipe_id, + ":start_time": &exchange.start_time, + ":end_time": &exchange.end_time, + ":request": &ByteEncoded(&*exchange.request), + ":response": &ByteEncoded(&*exchange.response), + ":status_code": exchange.response.status.as_u16(), + }, + ) + .unwrap(); + } + + migrations + .to_version(&mut connection, MIGRATION_REQUESTS_V2) + .unwrap(); + + // Make sure we didn't delete anything from the old table + let count = connection + .query_row("SELECT COUNT(*) FROM requests", [], |row| { + row.get::<_, usize>(0) + }) + .unwrap(); + assert_eq!(count, exchanges.len()); + + let mut stmt = connection.prepare("SELECT * FROM requests_v2").unwrap(); + let migrated: Vec = stmt + .query_map::([], |row| row.try_into()) + .unwrap() + .try_collect() + .unwrap(); + assert_eq!(&migrated, &exchanges); + } + + /// Test copying data ui_state -> ui_state_v2 + #[rstest] + fn test_migrate_ui_state_v2( + collection_path: CollectionPath, + mut connection: Connection, + ) { + let migrations = migrations(); + migrations + .to_version(&mut connection, MIGRATION_ALL_V1) + .unwrap(); + + let collection_id = CollectionId::new(); + connection + .execute( + "INSERT INTO collections (id, path) VALUES (:id, :path)", + named_params! { + ":id": collection_id, + ":path": collection_path, + }, + ) + .unwrap(); + + let rows = [ + ("Scalar".to_owned(), json!(null), json!(3)), + ("StringKey".to_owned(), json!("k1"), json!({"a": 1})), + ("StringKey".to_owned(), json!("k2"), json!({"b": 2})), + ("StringKey".to_owned(), json!("k3"), json!({"c": 3})), + ("MapKey".to_owned(), json!({"key": "k1"}), json!([1, 2, 3])), + ("MapKey".to_owned(), json!({"key": "k2"}), json!([4, 5, 6])), + ("MapKey".to_owned(), json!({"key": "k3"}), json!([7, 8, 9])), + ]; + + for (key_type, key, value) in &rows { + connection + .execute( + "INSERT INTO + ui_state (collection_id, key, value) + VALUES (:collection_id, :key, :value)", + named_params! { + ":collection_id": collection_id, + ":key": ByteEncoded((key_type, key)), + ":value": ByteEncoded(value), + }, + ) + .unwrap(); + } + + migrations + .to_version(&mut connection, MIGRATION_UI_STATE_V2) + .unwrap(); + + // Make sure we dropped the old table + let count = connection + .query_row( + "SELECT COUNT(*) FROM sqlite_master \ + WHERE type = 'table' AND name = 'ui_state'", + [], + |row| row.get::<_, usize>(0), + ) + .unwrap(); + assert_eq!(count, 0, "Expected `ui_state` table to be dropped"); + + let mut stmt = connection.prepare("SELECT * FROM ui_state_v2").unwrap(); + let migrated: Vec<(String, serde_json::Value, serde_json::Value)> = + stmt.query_map([], |row| { + Ok(( + row.get("key_type")?, + row.get::<_, JsonEncoded<_>>("key")?.0, + row.get::<_, JsonEncoded<_>>("value")?.0, + )) + }) + .unwrap() + .try_collect() + .unwrap(); + assert_eq!(&migrated, &rows); + } + + /// Test migration collection paths off of MessagePack + #[rstest] + fn test_migration_collection_paths(mut connection: Connection) { + let migrations = migrations(); + migrations + .to_version(&mut connection, MIGRATION_ALL_V1) + .unwrap(); + + let repo_root = get_repo_root(); + let collections = [ + (CollectionId::new(), repo_root.join("slumber.yml")), + (CollectionId::new(), repo_root.join("README.md")), + (CollectionId::new(), repo_root.join("üťf-8.txt")), + ]; + + // Insert in old format + for (id, path) in &collections { + connection + .execute( + "INSERT INTO collections (id, path) VALUES (:id, :path)", + named_params! { + ":id": id, + ":path": ByteEncoded(path), + }, + ) + .unwrap(); + } + + migrations + .to_version(&mut connection, MIGRATION_COLLECTION_PATHS) + .unwrap(); + + let mut stmt = connection.prepare("SELECT * FROM collections").unwrap(); + let migrated: Vec<(CollectionId, PathBuf)> = stmt + .query_map([], |row| { + Ok(( + row.get("id")?, + row.get::<_, CollectionPath>("path")?.into(), + )) + }) + .unwrap() + .try_collect() + .unwrap(); + assert_eq!(&migrated, &collections); + } +} diff --git a/crates/slumber_core/src/http/models.rs b/crates/slumber_core/src/http/models.rs index 9ad6d67a..64d0923c 100644 --- a/crates/slumber_core/src/http/models.rs +++ b/crates/slumber_core/src/http/models.rs @@ -175,12 +175,13 @@ impl From<&Exchange> for ExchangeSummary { /// - Each [reqwest::Request] can only exist once (from creation to sending), /// whereas a record can be hung onto after the launch to keep showing it on /// screen. -/// - This is serialized/deserializable, for persistence /// - This stores additional Slumber-specific metadata /// /// This intentionally does *not* implement `Clone`, because request data could /// potentially be large so we want to be intentional about duplicating it only /// when necessary. +/// +/// Remove serde impls in https://github.com/LucasPickering/slumber/issues/306 #[derive(Debug, Serialize, Deserialize)] #[cfg_attr(any(test, feature = "test"), derive(PartialEq))] pub struct RequestRecord { @@ -292,13 +293,18 @@ impl crate::test_util::Factory<(Option, RecipeId)> for RequestRecord { fn factory((profile_id, recipe_id): (Option, RecipeId)) -> Self { + use crate::test_util::header_map; Self { id: RequestId::new(), profile_id, recipe_id, method: reqwest::Method::GET, url: "http://localhost/url".parse().unwrap(), - headers: HeaderMap::new(), + headers: header_map([ + ("Accept", "application/json"), + ("Content-Type", "application/json"), + ("User-Agent", "slumber"), + ]), body: None, } } @@ -315,6 +321,17 @@ impl crate::test_util::Factory for ResponseRecord { } } +#[cfg(any(test, feature = "test"))] +impl crate::test_util::Factory for ResponseRecord { + fn factory(status: StatusCode) -> Self { + Self { + status, + headers: HeaderMap::new(), + body: ResponseBody::default(), + } + } +} + #[cfg(any(test, feature = "test"))] impl crate::test_util::Factory for Exchange { fn factory(_: ()) -> Self { @@ -334,8 +351,17 @@ impl crate::test_util::Factory for Exchange { #[cfg(any(test, feature = "test"))] impl crate::test_util::Factory<(Option, RecipeId)> for Exchange { fn factory(params: (Option, RecipeId)) -> Self { - let request = RequestRecord::factory(params); - let response = ResponseRecord::factory(()); + Self::factory(( + RequestRecord::factory(params), + ResponseRecord::factory(()), + )) + } +} + +/// Customize profile and recipe ID +#[cfg(any(test, feature = "test"))] +impl crate::test_util::Factory<(RequestRecord, ResponseRecord)> for Exchange { + fn factory((request, response): (RequestRecord, ResponseRecord)) -> Self { Self { id: request.id, request: request.into(), @@ -349,11 +375,12 @@ impl crate::test_util::Factory<(Option, RecipeId)> for Exchange { /// A resolved HTTP response, with all content loaded and ready to be displayed /// to the user. A simpler alternative to [reqwest::Response], because there's /// no way to access all resolved data on that type at once. Resolving the -/// response body requires moving the response. This also provides serialization -/// and deserialization so responses can be persisted and re-accessed later. +/// response body requires moving the response. /// /// This intentionally does not implement Clone, because responses could /// potentially be very large. +/// +/// Remove serde impls in https://github.com/LucasPickering/slumber/issues/306 #[derive(Debug, Serialize, Deserialize)] #[cfg_attr(any(test, feature = "test"), derive(PartialEq))] pub struct ResponseRecord { @@ -519,6 +546,13 @@ impl From<&[u8]> for ResponseBody { } } +#[cfg(test)] +impl From for ResponseBody { + fn from(value: serde_json::Value) -> Self { + Self::new(value.to_string().into()) + } +} + #[cfg(any(test, feature = "test"))] impl PartialEq for ResponseBody { fn eq(&self, other: &Self) -> bool { diff --git a/crates/slumber_core/src/template.rs b/crates/slumber_core/src/template.rs index 2330c102..3e0ca203 100644 --- a/crates/slumber_core/src/template.rs +++ b/crates/slumber_core/src/template.rs @@ -359,11 +359,7 @@ mod tests { ..ResponseRecord::factory(()) }; database - .insert_exchange(&Exchange { - request: request.into(), - response: response.into(), - ..Exchange::factory(()) - }) + .insert_exchange(&Exchange::factory((request, response))) .unwrap(); let selector = selector.map(|s| s.parse().unwrap()); let recipe = Recipe { diff --git a/crates/slumber_tui/src/view/component/root.rs b/crates/slumber_tui/src/view/component/root.rs index 4451b078..76a325c8 100644 --- a/crates/slumber_tui/src/view/component/root.rs +++ b/crates/slumber_tui/src/view/component/root.rs @@ -324,7 +324,11 @@ mod tests { harness.database.insert_exchange(&new_exchange).unwrap(); harness .database - .set_ui(&SelectedRequestKey, RequestId::new()) + .set_ui( + SelectedRequestKey::type_name(), + &SelectedRequestKey, + RequestId::new(), + ) .unwrap(); let component = TestComponent::new(harness, Root::new(&collection), ()); diff --git a/crates/slumber_tui/src/view/context.rs b/crates/slumber_tui/src/view/context.rs index 0050fe9a..ee400c22 100644 --- a/crates/slumber_tui/src/view/context.rs +++ b/crates/slumber_tui/src/view/context.rs @@ -151,7 +151,7 @@ where K::Value: Debug + Serialize + DeserializeOwned, { fn load_persisted(key: &K) -> Option { - Self::with_database(|database| database.get_ui((K::type_name(), key))) + Self::with_database(|database| database.get_ui(K::type_name(), key)) // Error is already traced in the DB, nothing to do with it here .ok() .flatten() @@ -159,7 +159,7 @@ where fn store_persisted(key: &K, value: K::Value) { Self::with_database(|database| { - database.set_ui((K::type_name(), key), value) + database.set_ui(K::type_name(), key, value) }) // Error is already traced in the DB, nothing to do with it here .ok();