diff --git a/.github/workflows/sqlx-cli.yml b/.github/workflows/sqlx-cli.yml new file mode 100644 index 0000000000..2c3c4bb7ef --- /dev/null +++ b/.github/workflows/sqlx-cli.yml @@ -0,0 +1,85 @@ +name: sqlx-cli + +on: + pull_request: + push: + branches: + - master + +jobs: + # tests `cargo sqlx prepare` using `examples/postgres/todos/` + test-prepare: + runs-on: ubuntu-latest + + services: + postgres: + image: postgres:12 + env: + POSTGRES_USER: postgres + POSTGRES_PASSWORD: postgres + POSTGRES_DB: todos + ports: + # will assign a random free host port + - 5432/tcp + # needed because the postgres container does not provide a healthcheck + options: --health-cmd pg_isready --health-interval 10s --health-timeout 5s --health-retries 5 + + steps: + - uses: actions/checkout@v1 + + # Rust ------------------------------------------------ + + - name: Install Rust toolchain + uses: actions-rs/toolchain@v1 + with: + toolchain: stable + profile: minimal + override: true + + - name: Cache target/ + uses: actions/cache@v1 + with: + path: target + key: ${{ runner.os }}-cargo-build-target-${{ hashFiles('**/Cargo.lock') }} + + - name: Load schema + working-directory: examples/postgres/todos + env: + # the in-container port is always 5432 + DATABASE_URL: postgres://postgres:postgres@localhost:5432/todos + run: | + export CONTAINER_ID=$(docker ps --filter "ancestor=postgres:12" --format "{{.ID}}") + docker cp schema.sql $CONTAINER_ID:/schema.sql + docker exec $CONTAINER_ID bash -c "psql -d $DATABASE_URL -f ./schema.sql" + + - name: install sqlx-cli + run: cargo install -f --path sqlx-cli/ + + - name: test `cargo sqlx prepare [--check]` + working-directory: examples/postgres/todos/ + env: + DATABASE_URL: postgres://postgres:postgres@localhost:${{ job.services.postgres.ports[5432] }}/todos + run: | + cargo sqlx prepare && + cargo sqlx prepare --check + + # now we have no connection to the database, we should be able to still build + - name: build example without DB + working-directory: examples/postgres/todos/ + run: | + cargo clean -p sqlx-example-postgres-todos && + cargo build + + # check that the application works without rebuilding it + - name: run example + env: + DATABASE_URL: postgres://postgres:postgres@localhost:${{ job.services.postgres.ports[5432] }}/todos + run: | + ./target/debug/sqlx-example-postgres-todos add "test if `cargo sqlx prepare` worked" && + ./target/debug/sqlx-example-postgres-todos done 1 + + - name: Prepare build directory for cache + run: | + find ./target/debug -maxdepth 1 -type f -delete \ + && rm -fr ./target/debug/{deps,.fingerprint}/*sqlx* \ + && rm -f ./target/.rustc_info.json diff --git a/Cargo.lock b/Cargo.lock index 6fd861c2ff..5752a3b01d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -299,20 +299,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "130aac562c0dd69c56b3b1cc8ffd2e17be31d0b6c25b61c96b76231aa23e39e1" [[package]] -name = "cargo-sqlx" -version = "0.1.0" +name = "cargo_metadata" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8de60b887edf6d74370fc8eb177040da4847d971d6234c7b13a6da324ef0caf" dependencies = [ - "anyhow", - "async-trait", - "chrono", - "console", - "dialoguer", - "dotenv", - "futures 0.3.4", - "sqlx", - "structopt", - "tokio 0.2.13", - "url 2.1.1", + "semver", + "serde", + "serde_derive", + "serde_json", ] [[package]] @@ -1647,6 +1642,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" dependencies = [ "semver-parser", + "serde", ] [[package]] @@ -1657,18 +1653,18 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "serde" -version = "1.0.105" +version = "1.0.110" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e707fbbf255b8fc8c3b99abb91e7257a622caeb20a9818cbadbeeede4e0932ff" +checksum = "99e7b308464d16b56eba9964e4972a3eee817760ab60d88c3f86e1fecb08204c" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.105" +version = "1.0.110" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac5d00fc561ba2724df6758a17de23df5914f20e41cb00f94d5b7ae42fffaff8" +checksum = "818fbf6bfa9a42d3bfcaca148547aa00c7b915bec71d1757aa2d44ca68771984" dependencies = [ "proc-macro2", "quote", @@ -1677,10 +1673,11 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.50" +version = "1.0.53" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78a7a12c167809363ec3bd7329fc0a3369056996de43c4b37ef3cd54a6ce4867" +checksum = "993948e75b189211a9b31a7528f950c6adc21f9720b6438ff80a7fa2f864cea2" dependencies = [ + "indexmap", "itoa", "ryu", "serde", @@ -1808,6 +1805,27 @@ dependencies = [ "trybuild", ] +[[package]] +name = "sqlx-cli" +version = "0.0.1" +dependencies = [ + "anyhow", + "async-trait", + "cargo_metadata", + "chrono", + "console", + "dialoguer", + "dotenv", + "futures 0.3.4", + "glob", + "serde", + "serde_json", + "sqlx", + "structopt", + "tokio 0.2.13", + "url 2.1.1", +] + [[package]] name = "sqlx-core" version = "0.3.5" @@ -1876,6 +1894,7 @@ version = "0.1.0" dependencies = [ "anyhow", "async-std", + "dotenv", "futures 0.3.4", "paw", "sqlx", @@ -1928,10 +1947,13 @@ dependencies = [ "dotenv", "futures 0.3.4", "heck", - "lazy_static", + "hex", + "once_cell", "proc-macro2", "quote", + "serde", "serde_json", + "sha2", "sqlx-core", "syn", "tokio 0.2.13", diff --git a/Cargo.toml b/Cargo.toml index 392ba2595a..ec492b23d8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -4,7 +4,7 @@ members = [ "sqlx-core", "sqlx-macros", "sqlx-test", - "cargo-sqlx", + "sqlx-cli", "examples/mysql/todos", "examples/postgres/listen", "examples/postgres/todos", @@ -39,6 +39,9 @@ default = [ "macros", "runtime-async-std" ] macros = [ "sqlx-macros" ] tls = [ "sqlx-core/tls" ] +# offline building support in `sqlx-macros` +offline = ["sqlx-macros/offline", "sqlx-core/offline"] + # intended mainly for CI and docs all = [ "tls", "all-database", "all-type" ] all-database = [ "mysql", "sqlite", "postgres" ] diff --git a/cargo-sqlx/README.md b/cargo-sqlx/README.md deleted file mode 100644 index 9a0f34b055..0000000000 --- a/cargo-sqlx/README.md +++ /dev/null @@ -1,14 +0,0 @@ -# cargo-sqlx - -Sqlx migrator runs all `*.sql` files under `migrations` folder and remembers which ones has been run. - -Database url is supplied through either env variable or `.env` file containing `DATABASE_URL="postgres://postgres:postgres@localhost/realworld"`. - -##### Commands -- `add ` - add new migration to your migrations folder named `_.sql` -- `database` - create or drop database based on connection string -- `run` - Runs all migrations in your migrations folder - - -##### Limitations -- No down migrations! If you need down migrations, there are other more feature complete migrators to use. diff --git a/cargo-sqlx/src/database_migrator.rs b/cargo-sqlx/src/database_migrator.rs deleted file mode 100644 index 28b2021b6b..0000000000 --- a/cargo-sqlx/src/database_migrator.rs +++ /dev/null @@ -1,33 +0,0 @@ -use anyhow::Result; -use async_trait::async_trait; - -#[async_trait] -pub trait MigrationTransaction { - async fn commit(self: Box) -> Result<()>; - async fn rollback(self: Box) -> Result<()>; - async fn check_if_applied(&mut self, migration: &str) -> Result; - async fn execute_migration(&mut self, migration_sql: &str) -> Result<()>; - async fn save_applied_migration(&mut self, migration_name: &str) -> Result<()>; -} - -#[async_trait] -pub trait DatabaseMigrator { - // Misc info - fn database_type(&self) -> String; - fn get_database_name(&self) -> Result; - - // Features - fn can_migrate_database(&self) -> bool; - fn can_create_database(&self) -> bool; - fn can_drop_database(&self) -> bool; - - // Database creation - async fn check_if_database_exists(&self, db_name: &str) -> Result; - async fn create_database(&self, db_name: &str) -> Result<()>; - async fn drop_database(&self, db_name: &str) -> Result<()>; - - // Migration - async fn create_migration_table(&self) -> Result<()>; - async fn get_migrations(&self) -> Result>; - async fn begin_migration(&self) -> Result>; -} diff --git a/cargo-sqlx/src/main.rs b/cargo-sqlx/src/main.rs deleted file mode 100644 index 05fbd6c63e..0000000000 --- a/cargo-sqlx/src/main.rs +++ /dev/null @@ -1,345 +0,0 @@ -use std::env; -use std::fs; -use std::fs::File; -use std::io::prelude::*; -use url::Url; - -use dotenv::dotenv; - -use structopt::StructOpt; - -use anyhow::{anyhow, Context, Result}; -use console::style; -use dialoguer::Confirmation; - -mod database_migrator; -mod mysql; -mod postgres; -mod sqlite; - -use database_migrator::DatabaseMigrator; -use mysql::MySql; -use postgres::Postgres; -use sqlite::Sqlite; - -const MIGRATION_FOLDER: &'static str = "migrations"; - -/// Sqlx commandline tool -#[derive(StructOpt, Debug)] -#[structopt(name = "Sqlx")] -enum Opt { - #[structopt(alias = "mig")] - Migrate(MigrationCommand), - - #[structopt(alias = "db")] - Database(DatabaseCommand), -} - -/// Adds and runs migrations. Alias: mig -#[derive(StructOpt, Debug)] -#[structopt(name = "Sqlx migrator")] -enum MigrationCommand { - /// Add new migration with name _.sql - Add { name: String }, - - /// Run all migrations - Run, - - /// List all migrations - List, -} - -/// Create or drops database depending on your connection string. Alias: db -#[derive(StructOpt, Debug)] -#[structopt(name = "Sqlx migrator")] -enum DatabaseCommand { - /// Create database in url - Create, - - /// Drop database in url - Drop, -} - -#[tokio::main] -async fn main() -> Result<()> { - dotenv().ok(); - - let db_url_raw = env::var("DATABASE_URL").context("Failed to find 'DATABASE_URL'")?; - - let db_url = Url::parse(&db_url_raw)?; - - // This code is taken from: https://github.com/launchbadge/sqlx/blob/master/sqlx-macros/src/lib.rs#L63 - match db_url.scheme() { - #[cfg(feature = "sqlite")] - "sqlite" => run_command(&Sqlite::new(db_url_raw )).await?, - #[cfg(not(feature = "sqlite"))] - "sqlite" => return Err(anyhow!("Not implemented. DATABASE_URL {} has the scheme of a SQLite database but the `sqlite` feature of sqlx was not enabled", - db_url)), - - #[cfg(feature = "postgres")] - "postgresql" | "postgres" => run_command(&Postgres::new(db_url_raw)).await?, - #[cfg(not(feature = "postgres"))] - "postgresql" | "postgres" => Err(anyhow!("DATABASE_URL {} has the scheme of a Postgres database but the `postgres` feature of sqlx was not enabled", - db_url)), - - #[cfg(feature = "mysql")] - "mysql" | "mariadb" => run_command(&MySql::new(db_url_raw)).await?, - #[cfg(not(feature = "mysql"))] - "mysql" | "mariadb" => return Err(anyhow!( - "DATABASE_URL {} has the scheme of a MySQL/MariaDB database but the `mysql` feature of sqlx was not enabled", - db_url - )), - - scheme => return Err(anyhow!("unexpected scheme {:?} in DATABASE_URL {}", scheme, db_url)), - } - - println!("All done!"); - Ok(()) -} - -async fn run_command(migrator: &dyn DatabaseMigrator) -> Result<()> { - let opt = Opt::from_args(); - - match opt { - Opt::Migrate(command) => match command { - MigrationCommand::Add { name } => add_migration_file(&name)?, - MigrationCommand::Run => run_migrations(migrator).await?, - MigrationCommand::List => list_migrations(migrator).await?, - }, - Opt::Database(command) => match command { - DatabaseCommand::Create => run_create_database(migrator).await?, - DatabaseCommand::Drop => run_drop_database(migrator).await?, - }, - }; - - Ok(()) -} - -async fn run_create_database(migrator: &dyn DatabaseMigrator) -> Result<()> { - if !migrator.can_create_database() { - return Err(anyhow!( - "Database creation is not implemented for {}", - migrator.database_type() - )); - } - - let db_name = migrator.get_database_name()?; - let db_exists = migrator.check_if_database_exists(&db_name).await?; - - if !db_exists { - println!("Creating database: {}", db_name); - Ok(migrator.create_database(&db_name).await?) - } else { - println!("Database already exists, aborting"); - Ok(()) - } -} - -async fn run_drop_database(migrator: &dyn DatabaseMigrator) -> Result<()> { - if !migrator.can_drop_database() { - return Err(anyhow!( - "Database drop is not implemented for {}", - migrator.database_type() - )); - } - - let db_name = migrator.get_database_name()?; - let db_exists = migrator.check_if_database_exists(&db_name).await?; - - if db_exists { - if !Confirmation::new() - .with_text(&format!( - "\nAre you sure you want to drop the database: {}?", - db_name - )) - .default(false) - .interact()? - { - println!("Aborting"); - return Ok(()); - } - - println!("Dropping database: {}", db_name); - Ok(migrator.drop_database(&db_name).await?) - } else { - println!("Database does not exists, aborting"); - Ok(()) - } -} - -fn add_migration_file(name: &str) -> Result<()> { - use chrono::prelude::*; - use std::path::PathBuf; - - fs::create_dir_all(MIGRATION_FOLDER).context("Unable to create migrations directory")?; - - let dt = Utc::now(); - let mut file_name = dt.format("%Y-%m-%d_%H-%M-%S").to_string(); - file_name.push_str("_"); - file_name.push_str(name); - file_name.push_str(".sql"); - - let mut path = PathBuf::new(); - path.push(MIGRATION_FOLDER); - path.push(&file_name); - - let mut file = File::create(path).context("Failed to create file")?; - file.write_all(b"-- Add migration script here") - .context("Could not write to file")?; - - println!("Created migration: '{}'", file_name); - Ok(()) -} - -pub struct Migration { - pub name: String, - pub sql: String, -} - -fn load_migrations() -> Result> { - let entries = fs::read_dir(&MIGRATION_FOLDER).context("Could not find 'migrations' dir")?; - - let mut migrations = Vec::new(); - - for e in entries { - if let Ok(e) = e { - if let Ok(meta) = e.metadata() { - if !meta.is_file() { - continue; - } - - if let Some(ext) = e.path().extension() { - if ext != "sql" { - println!("Wrong ext: {:?}", ext); - continue; - } - } else { - continue; - } - - let mut file = File::open(e.path()) - .with_context(|| format!("Failed to open: '{:?}'", e.file_name()))?; - let mut contents = String::new(); - file.read_to_string(&mut contents) - .with_context(|| format!("Failed to read: '{:?}'", e.file_name()))?; - - migrations.push(Migration { - name: e.file_name().to_str().unwrap().to_string(), - sql: contents, - }); - } - } - } - - migrations.sort_by(|a, b| a.name.partial_cmp(&b.name).unwrap()); - - Ok(migrations) -} - -async fn run_migrations(migrator: &dyn DatabaseMigrator) -> Result<()> { - if !migrator.can_migrate_database() { - return Err(anyhow!( - "Database migrations not supported for {}", - migrator.database_type() - )); - } - - migrator.create_migration_table().await?; - - let migrations = load_migrations()?; - - for mig in migrations.iter() { - let mut tx = migrator.begin_migration().await?; - - if tx.check_if_applied(&mig.name).await? { - println!("Already applied migration: '{}'", mig.name); - continue; - } - println!("Applying migration: '{}'", mig.name); - - tx.execute_migration(&mig.sql) - .await - .with_context(|| format!("Failed to run migration {:?}", &mig.name))?; - - tx.save_applied_migration(&mig.name) - .await - .context("Failed to insert migration")?; - - tx.commit().await.context("Failed")?; - } - - Ok(()) -} - -async fn list_migrations(migrator: &dyn DatabaseMigrator) -> Result<()> { - if !migrator.can_migrate_database() { - return Err(anyhow!( - "Database migrations not supported for {}", - migrator.database_type() - )); - } - - let file_migrations = load_migrations()?; - - if migrator - .check_if_database_exists(&migrator.get_database_name()?) - .await? - { - let applied_migrations = migrator.get_migrations().await.unwrap_or_else(|_| { - println!("Could not retrive data from migration table"); - Vec::new() - }); - - let mut width = 0; - for mig in file_migrations.iter() { - width = std::cmp::max(width, mig.name.len()); - } - for mig in file_migrations.iter() { - let status = if applied_migrations - .iter() - .find(|&m| mig.name == *m) - .is_some() - { - style("Applied").green() - } else { - style("Not Applied").yellow() - }; - - println!("{:width$}\t{}", mig.name, status, width = width); - } - - let orphans = check_for_orphans(file_migrations, applied_migrations); - - if let Some(orphans) = orphans { - println!("\nFound migrations applied in the database that does not have a corresponding migration file:"); - for name in orphans { - println!("{:width$}\t{}", name, style("Orphan").red(), width = width); - } - } - } else { - println!("No database found, listing migrations"); - - for mig in file_migrations { - println!("{}", mig.name); - } - } - - Ok(()) -} - -fn check_for_orphans( - file_migrations: Vec, - applied_migrations: Vec, -) -> Option> { - let orphans: Vec = applied_migrations - .iter() - .filter(|m| !file_migrations.iter().any(|fm| fm.name == **m)) - .cloned() - .collect(); - - if orphans.len() > 0 { - Some(orphans) - } else { - None - } -} diff --git a/examples/postgres/todos/Cargo.toml b/examples/postgres/todos/Cargo.toml index 8136d43636..4295b6b042 100644 --- a/examples/postgres/todos/Cargo.toml +++ b/examples/postgres/todos/Cargo.toml @@ -9,5 +9,6 @@ anyhow = "1.0" async-std = { version = "1.4.0", features = [ "attributes" ] } futures = "0.3" paw = "1.0" -sqlx = { path = "../../../", features = ["postgres"] } +sqlx = { path = "../../../", features = ["postgres", "offline"] } structopt = { version = "0.3", features = ["paw"] } +dotenv = "0.15.0" diff --git a/cargo-sqlx/.gitignore b/sqlx-cli/.gitignore similarity index 100% rename from cargo-sqlx/.gitignore rename to sqlx-cli/.gitignore diff --git a/cargo-sqlx/Cargo.toml b/sqlx-cli/Cargo.toml similarity index 57% rename from cargo-sqlx/Cargo.toml rename to sqlx-cli/Cargo.toml index 230ec435d8..27f578c24d 100644 --- a/cargo-sqlx/Cargo.toml +++ b/sqlx-cli/Cargo.toml @@ -1,23 +1,33 @@ [package] -name = "cargo-sqlx" -version = "0.1.0" -description = "Simple postgres migrator without support for down migration" -authors = ["Jesper Axelsson "] +name = "sqlx-cli" +version = "0.0.1" +description = "Command-line utility for SQLx, the Rust SQL toolkit." +authors = [ + "Jesper Axelsson ", + "Austin Bonander " # austin@launchbadge.com +] edition = "2018" readme = "README.md" homepage = "https://github.com/launchbadge/sqlx" repository = "https://github.com/launchbadge/sqlx" keywords = ["database", "postgres", "database-management", "migration"] categories = ["database", "command-line-utilities"] +license = "MIT OR Apache-2.0" +default-run = "sqlx" [[bin]] name = "sqlx" -path = "src/main.rs" +path = "src/bin/sqlx.rs" + +# enables invocation as `cargo sqlx`; required for `prepare` subcommand +[[bin]] +name = "cargo-sqlx" +path = "src/bin/cargo-sqlx.rs" [dependencies] dotenv = "0.15" tokio = { version = "0.2", features = ["macros"] } -sqlx = { version = "0.3", path = "..", default-features = false, features = [ "runtime-tokio" ] } +sqlx = { version = "0.3", path = "..", default-features = false, features = [ "runtime-tokio", "offline" ] } futures = "0.3" structopt = "0.3" chrono = "0.4" @@ -26,11 +36,14 @@ url = { version = "2.1.1", default-features = false } async-trait = "0.1.30" console = "0.10.0" dialoguer = "0.5.0" +serde_json = { version = "1.0.53", features = ["preserve_order"] } +serde = "1.0.110" +glob = "0.3.0" +cargo_metadata = "0.10.0" [features] default = [ "postgres", "sqlite", "mysql" ] - # database mysql = [ "sqlx/mysql" ] postgres = [ "sqlx/postgres" ] diff --git a/sqlx-cli/README.md b/sqlx-cli/README.md new file mode 100644 index 0000000000..8bd25a6d23 --- /dev/null +++ b/sqlx-cli/README.md @@ -0,0 +1,65 @@ +# SQLx CLI + +SQLx's associated command-line utility for managing databases, migrations, and enabling "offline" +mode with `sqlx::query!()` and friends. + +### Installation + +```bash +$ cargo install sqlx-cli +``` + +### Commands + +All commands require `DATABASE_URL` to be set, either in the environment or in a `.env` file +in the current working directory. + +`database` and `migrate` subcommands support only Postgres; MySQL and SQLite are TODO. + +For more details, run `sqlx --help`. + +```dotenv +# Postgres +DATABASE_URL=postgres://postgres@localhost/my_database +``` + +#### Create/drop the database at `DATABASE_URL` + +```bash +sqlx database create +sqlx database drop +``` + +#### Create and run migrations + +```bash +$ sqlx migrate add +``` +Creates a new file in `migrations/-.sql`. Add your database schema changes to +this new file. + +--- +```bash +$ sqlx migration run +``` +Compares the migration history of the running database against the `migrations/` folder and runs +any scripts that are still pending. + +##### Note: Down-Migrations +Down-migrations are currently a non-planned feature as their utility seems dubious but we welcome +any contributions (discussions/code) regarding this matter. + +#### Enable building in "offline" mode with `query!()` +Note: must be run as `cargo sqlx`. + +```bash +cargo sqlx prepare +``` +Saves query data to `sqlx-data.json` in the current directory; check this file into version control +and an active database connection will no longer be needed to build your project. +---- +```bash +cargo sqlx prepare --check +``` +Exits with a nonzero exit status if the data in `sqlx-data.json` is out of date with the current +database schema and queries in the project. Intended for use in Continuous Integration. diff --git a/sqlx-cli/src/bin/cargo-sqlx.rs b/sqlx-cli/src/bin/cargo-sqlx.rs new file mode 100644 index 0000000000..2215c6f7d1 --- /dev/null +++ b/sqlx-cli/src/bin/cargo-sqlx.rs @@ -0,0 +1,18 @@ +use sqlx_cli::Command; +use structopt::{clap, StructOpt}; + +use std::env; + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + // when invoked as `cargo sqlx [...]` the args we see are `[...]/sqlx-cli sqlx prepare` + // so we want to notch out that superfluous "sqlx" + let args = env::args_os().skip(2); + + let matches = Command::clap() + .bin_name("cargo sqlx") + .setting(clap::AppSettings::NoBinaryName) + .get_matches_from(args); + + sqlx_cli::run(Command::from_clap(&matches)).await +} diff --git a/sqlx-cli/src/bin/sqlx.rs b/sqlx-cli/src/bin/sqlx.rs new file mode 100644 index 0000000000..2c6997ee42 --- /dev/null +++ b/sqlx-cli/src/bin/sqlx.rs @@ -0,0 +1,8 @@ +use sqlx_cli::Command; +use structopt::StructOpt; + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + // no special handling here + sqlx_cli::run(Command::from_args()).await +} diff --git a/sqlx-cli/src/db.rs b/sqlx-cli/src/db.rs new file mode 100644 index 0000000000..4ff30c3a5d --- /dev/null +++ b/sqlx-cli/src/db.rs @@ -0,0 +1,59 @@ +use dialoguer::Confirmation; + +use anyhow::bail; + +pub async fn run_create() -> anyhow::Result<()> { + let migrator = crate::migrator::get()?; + + if !migrator.can_create_database() { + bail!( + "Database creation is not implemented for {}", + migrator.database_type() + ); + } + + let db_name = migrator.get_database_name()?; + let db_exists = migrator.check_if_database_exists(&db_name).await?; + + if !db_exists { + println!("Creating database: {}", db_name); + Ok(migrator.create_database(&db_name).await?) + } else { + println!("Database already exists, aborting"); + Ok(()) + } +} + +pub async fn run_drop() -> anyhow::Result<()> { + let migrator = crate::migrator::get()?; + + if !migrator.can_drop_database() { + bail!( + "Database drop is not implemented for {}", + migrator.database_type() + ); + } + + let db_name = migrator.get_database_name()?; + let db_exists = migrator.check_if_database_exists(&db_name).await?; + + if db_exists { + if !Confirmation::new() + .with_text(&format!( + "\nAre you sure you want to drop the database: {}?", + db_name + )) + .default(false) + .interact()? + { + println!("Aborting"); + return Ok(()); + } + + println!("Dropping database: {}", db_name); + Ok(migrator.drop_database(&db_name).await?) + } else { + println!("Database does not exists, aborting"); + Ok(()) + } +} diff --git a/sqlx-cli/src/lib.rs b/sqlx-cli/src/lib.rs new file mode 100644 index 0000000000..c4b93dd2b2 --- /dev/null +++ b/sqlx-cli/src/lib.rs @@ -0,0 +1,96 @@ +use dotenv::dotenv; + +use structopt::StructOpt; + +mod migrator; + +mod db; +mod migration; +mod prepare; + +/// Sqlx commandline tool +#[derive(StructOpt, Debug)] +#[structopt(name = "Sqlx")] +pub enum Command { + #[structopt(alias = "mig")] + Migrate(MigrationCommand), + + #[structopt(alias = "db")] + Database(DatabaseCommand), + + /// Enables offline mode for a project utilizing `query!()` and related macros. + /// May only be run as `cargo sqlx prepare`. + /// + /// Saves data for all invocations of `query!()` and friends in the project so that it may be + /// built in offline mode, i.e. so compilation does not require connecting to a running database. + /// Outputs to `sqlx-data.json` in the current directory, overwriting it if it already exists. + /// + /// Offline mode can be activated simply by removing `DATABASE_URL` from the environment or + /// building without a `.env` file. + #[structopt(alias = "prep")] + Prepare { + /// If this flag is passed, instead of overwriting `sqlx-data.json` in the current directory, + /// that file is loaded and compared against the current output of the prepare step; if + /// there is a mismatch, an error is reported and the process exits with a nonzero exit code. + /// + /// Intended for use in CI. + #[structopt(long)] + check: bool, + + /// Any arguments to pass to `cargo rustc`; + /// Cargo args (preceding `--` in `cargo rustc ... -- ...`) only. + #[structopt(name = "Cargo args", last = true)] + cargo_args: Vec, + }, +} + +/// Adds and runs migrations. Alias: mig +#[derive(StructOpt, Debug)] +#[structopt(name = "Sqlx migrator")] +pub enum MigrationCommand { + /// Add new migration with name _.sql + Add { name: String }, + + /// Run all migrations + Run, + + /// List all migrations + List, +} + +/// Create or drops database depending on your connection string. Alias: db +#[derive(StructOpt, Debug)] +#[structopt(name = "Sqlx migrator")] +pub enum DatabaseCommand { + /// Create database in url + Create, + + /// Drop database in url + Drop, +} + +pub async fn run(cmd: Command) -> anyhow::Result<()> { + dotenv().ok(); + + match cmd { + Command::Migrate(migrate) => match migrate { + MigrationCommand::Add { name } => migration::add_file(&name)?, + MigrationCommand::Run => migration::run().await?, + MigrationCommand::List => migration::list().await?, + }, + Command::Database(database) => match database { + DatabaseCommand::Create => db::run_create().await?, + DatabaseCommand::Drop => db::run_drop().await?, + }, + Command::Prepare { + check: false, + cargo_args, + } => prepare::run(cargo_args)?, + Command::Prepare { + check: true, + cargo_args, + } => prepare::check(cargo_args)?, + }; + + Ok(()) +} diff --git a/sqlx-cli/src/migration.rs b/sqlx-cli/src/migration.rs new file mode 100644 index 0000000000..7e9bd85804 --- /dev/null +++ b/sqlx-cli/src/migration.rs @@ -0,0 +1,187 @@ +use anyhow::{bail, Context}; +use console::style; +use std::fs::{self, File}; +use std::io::{Read, Write}; + +const MIGRATION_FOLDER: &'static str = "migrations"; + +pub struct Migration { + pub name: String, + pub sql: String, +} + +pub fn add_file(name: &str) -> anyhow::Result<()> { + use chrono::prelude::*; + use std::path::PathBuf; + + fs::create_dir_all(MIGRATION_FOLDER).context("Unable to create migrations directory")?; + + let dt = Utc::now(); + let mut file_name = dt.format("%Y-%m-%d_%H-%M-%S").to_string(); + file_name.push_str("_"); + file_name.push_str(name); + file_name.push_str(".sql"); + + let mut path = PathBuf::new(); + path.push(MIGRATION_FOLDER); + path.push(&file_name); + + let mut file = File::create(path).context("Failed to create file")?; + file.write_all(b"-- Add migration script here") + .context("Could not write to file")?; + + println!("Created migration: '{}'", file_name); + Ok(()) +} + +pub async fn run() -> anyhow::Result<()> { + let migrator = crate::migrator::get()?; + + if !migrator.can_migrate_database() { + bail!( + "Database migrations not supported for {}", + migrator.database_type() + ); + } + + migrator.create_migration_table().await?; + + let migrations = load_migrations()?; + + for mig in migrations.iter() { + let mut tx = migrator.begin_migration().await?; + + if tx.check_if_applied(&mig.name).await? { + println!("Already applied migration: '{}'", mig.name); + continue; + } + println!("Applying migration: '{}'", mig.name); + + tx.execute_migration(&mig.sql) + .await + .with_context(|| format!("Failed to run migration {:?}", &mig.name))?; + + tx.save_applied_migration(&mig.name) + .await + .context("Failed to insert migration")?; + + tx.commit().await.context("Failed")?; + } + + Ok(()) +} + +pub async fn list() -> anyhow::Result<()> { + let migrator = crate::migrator::get()?; + + if !migrator.can_migrate_database() { + bail!( + "Database migrations not supported for {}", + migrator.database_type() + ); + } + + let file_migrations = load_migrations()?; + + if migrator + .check_if_database_exists(&migrator.get_database_name()?) + .await? + { + let applied_migrations = migrator.get_migrations().await.unwrap_or_else(|_| { + println!("Could not retrive data from migration table"); + Vec::new() + }); + + let mut width = 0; + for mig in file_migrations.iter() { + width = std::cmp::max(width, mig.name.len()); + } + for mig in file_migrations.iter() { + let status = if applied_migrations + .iter() + .find(|&m| mig.name == *m) + .is_some() + { + style("Applied").green() + } else { + style("Not Applied").yellow() + }; + + println!("{:width$}\t{}", mig.name, status, width = width); + } + + let orphans = check_for_orphans(file_migrations, applied_migrations); + + if let Some(orphans) = orphans { + println!("\nFound migrations applied in the database that does not have a corresponding migration file:"); + for name in orphans { + println!("{:width$}\t{}", name, style("Orphan").red(), width = width); + } + } + } else { + println!("No database found, listing migrations"); + + for mig in file_migrations { + println!("{}", mig.name); + } + } + + Ok(()) +} + +fn load_migrations() -> anyhow::Result> { + let entries = fs::read_dir(&MIGRATION_FOLDER).context("Could not find 'migrations' dir")?; + + let mut migrations = Vec::new(); + + for e in entries { + if let Ok(e) = e { + if let Ok(meta) = e.metadata() { + if !meta.is_file() { + continue; + } + + if let Some(ext) = e.path().extension() { + if ext != "sql" { + println!("Wrong ext: {:?}", ext); + continue; + } + } else { + continue; + } + + let mut file = File::open(e.path()) + .with_context(|| format!("Failed to open: '{:?}'", e.file_name()))?; + let mut contents = String::new(); + file.read_to_string(&mut contents) + .with_context(|| format!("Failed to read: '{:?}'", e.file_name()))?; + + migrations.push(Migration { + name: e.file_name().to_str().unwrap().to_string(), + sql: contents, + }); + } + } + } + + migrations.sort_by(|a, b| a.name.partial_cmp(&b.name).unwrap()); + + Ok(migrations) +} + +fn check_for_orphans( + file_migrations: Vec, + applied_migrations: Vec, +) -> Option> { + let orphans: Vec = applied_migrations + .iter() + .filter(|m| !file_migrations.iter().any(|fm| fm.name == **m)) + .cloned() + .collect(); + + if orphans.len() > 0 { + Some(orphans) + } else { + None + } +} diff --git a/sqlx-cli/src/migrator/mod.rs b/sqlx-cli/src/migrator/mod.rs new file mode 100644 index 0000000000..111ed633ef --- /dev/null +++ b/sqlx-cli/src/migrator/mod.rs @@ -0,0 +1,75 @@ +use anyhow::{bail, Context, Result}; +use async_trait::async_trait; +use std::env; +use url::Url; + +#[cfg(feature = "mysql")] +mod mysql; + +#[cfg(feature = "postgres")] +mod postgres; + +#[cfg(feature = "sqlite")] +mod sqlite; + +#[async_trait] +pub trait MigrationTransaction { + async fn commit(self: Box) -> Result<()>; + async fn rollback(self: Box) -> Result<()>; + async fn check_if_applied(&mut self, migration: &str) -> Result; + async fn execute_migration(&mut self, migration_sql: &str) -> Result<()>; + async fn save_applied_migration(&mut self, migration_name: &str) -> Result<()>; +} + +#[async_trait] +pub trait DatabaseMigrator { + // Misc info + fn database_type(&self) -> String; + fn get_database_name(&self) -> Result; + + // Features + fn can_migrate_database(&self) -> bool; + fn can_create_database(&self) -> bool; + fn can_drop_database(&self) -> bool; + + // Database creation + async fn check_if_database_exists(&self, db_name: &str) -> Result; + async fn create_database(&self, db_name: &str) -> Result<()>; + async fn drop_database(&self, db_name: &str) -> Result<()>; + + // Migration + async fn create_migration_table(&self) -> Result<()>; + async fn get_migrations(&self) -> Result>; + async fn begin_migration(&self) -> Result>; +} + +pub fn get() -> Result> { + let db_url_raw = env::var("DATABASE_URL").context("Failed to find 'DATABASE_URL'")?; + + let db_url = Url::parse(&db_url_raw)?; + + // This code is taken from: https://github.com/launchbadge/sqlx/blob/master/sqlx-macros/src/lib.rs#L63 + match db_url.scheme() { + #[cfg(feature = "sqlite")] + "sqlite" => Ok(Box::new(self::sqlite::Sqlite::new(db_url_raw ))), + #[cfg(not(feature = "sqlite"))] + "sqlite" => bail!("Not implemented. DATABASE_URL {} has the scheme of a SQLite database but the `sqlite` feature of sqlx was not enabled", + db_url), + + #[cfg(feature = "postgres")] + "postgresql" | "postgres" => Ok(Box::new(self::postgres::Postgres::new(db_url_raw))), + #[cfg(not(feature = "postgres"))] + "postgresql" | "postgres" => bail!("DATABASE_URL {} has the scheme of a Postgres database but the `postgres` feature of sqlx was not enabled", + db_url), + + #[cfg(feature = "mysql")] + "mysql" | "mariadb" => Ok(Box::new(self::mysql::MySql::new(db_url_raw))), + #[cfg(not(feature = "mysql"))] + "mysql" | "mariadb" => bail!( + "DATABASE_URL {} has the scheme of a MySQL/MariaDB database but the `mysql` feature of sqlx was not enabled", + db_url + ), + + scheme => bail!("unexpected scheme {:?} in DATABASE_URL {}", scheme, db_url), + } +} diff --git a/cargo-sqlx/src/mysql.rs b/sqlx-cli/src/migrator/mysql.rs similarity index 98% rename from cargo-sqlx/src/mysql.rs rename to sqlx-cli/src/migrator/mysql.rs index 2cb87bd88d..7cb38d5c44 100644 --- a/cargo-sqlx/src/mysql.rs +++ b/sqlx-cli/src/migrator/mysql.rs @@ -9,7 +9,7 @@ use sqlx::Row; use anyhow::{anyhow, Context, Result}; use async_trait::async_trait; -use crate::database_migrator::{DatabaseMigrator, MigrationTransaction}; +use super::{DatabaseMigrator, MigrationTransaction}; pub struct MySql { pub db_url: String, diff --git a/cargo-sqlx/src/postgres.rs b/sqlx-cli/src/migrator/postgres.rs similarity index 88% rename from cargo-sqlx/src/postgres.rs rename to sqlx-cli/src/migrator/postgres.rs index f9ac40d4ba..a6278fc68a 100644 --- a/cargo-sqlx/src/postgres.rs +++ b/sqlx-cli/src/migrator/postgres.rs @@ -9,7 +9,7 @@ use sqlx::Row; use anyhow::{anyhow, Context, Result}; use async_trait::async_trait; -use crate::database_migrator::{DatabaseMigrator, MigrationTransaction}; +use crate::migrator::{DatabaseMigrator, MigrationTransaction}; pub struct Postgres { pub db_url: String, @@ -89,10 +89,14 @@ impl DatabaseMigrator for Postgres { let mut conn = PgConnection::connect(base_url).await?; - sqlx::query(&format!("CREATE DATABASE {}", db_name)) - .execute(&mut conn) - .await - .with_context(|| format!("Failed to create database: {}", db_name))?; + // quote database name (quotes in the name are escaped with additional quotes) + sqlx::query(&format!( + "CREATE DATABASE \"{}\"", + db_name.replace('"', "\"\"") + )) + .execute(&mut conn) + .await + .with_context(|| format!("Failed to create database: {}", db_name))?; Ok(()) } @@ -104,10 +108,13 @@ impl DatabaseMigrator for Postgres { let mut conn = PgConnection::connect(base_url).await?; - sqlx::query(&format!("DROP DATABASE {}", db_name)) - .execute(&mut conn) - .await - .with_context(|| format!("Failed to drop database: {}", db_name))?; + sqlx::query(&format!( + "DROP DATABASE \"{}\"", + db_name.replace('"', "\"\"") + )) + .execute(&mut conn) + .await + .with_context(|| format!("Failed to drop database: {}", db_name))?; Ok(()) } diff --git a/cargo-sqlx/src/sqlite.rs b/sqlx-cli/src/migrator/sqlite.rs similarity index 98% rename from cargo-sqlx/src/sqlite.rs rename to sqlx-cli/src/migrator/sqlite.rs index 0cbd623d02..d226b48a6b 100644 --- a/cargo-sqlx/src/sqlite.rs +++ b/sqlx-cli/src/migrator/sqlite.rs @@ -9,7 +9,7 @@ use sqlx::SqliteConnection; use anyhow::{anyhow, Context, Result}; use async_trait::async_trait; -use crate::database_migrator::{DatabaseMigrator, MigrationTransaction}; +use crate::migrator::{DatabaseMigrator, MigrationTransaction}; pub struct Sqlite { db_url: String, diff --git a/sqlx-cli/src/prepare.rs b/sqlx-cli/src/prepare.rs new file mode 100644 index 0000000000..f1e491a743 --- /dev/null +++ b/sqlx-cli/src/prepare.rs @@ -0,0 +1,149 @@ +use anyhow::{anyhow, bail, Context}; +use std::process::Command; +use std::{env, fs}; + +use cargo_metadata::MetadataCommand; +use std::collections::BTreeMap; +use std::fs::File; + +use std::time::SystemTime; +use url::Url; + +type QueryData = BTreeMap; +type JsonObject = serde_json::Map; + +pub fn run(cargo_args: Vec) -> anyhow::Result<()> { + #[derive(serde::Serialize)] + struct DataFile { + db: &'static str, + #[serde(flatten)] + data: QueryData, + } + + let db_kind = get_db_kind()?; + let data = run_prepare_step(cargo_args)?; + + serde_json::to_writer_pretty( + File::create("sqlx-data.json").context("failed to create/open `sqlx-data.json`")?, + &DataFile { db: db_kind, data }, + ) + .context("failed to write to `sqlx-data.json`")?; + + println!( + "query data written to `sqlx-data.json` in the current directory; \ + please check this into version control" + ); + + Ok(()) +} + +pub fn check(cargo_args: Vec) -> anyhow::Result<()> { + let db_kind = get_db_kind()?; + let data = run_prepare_step(cargo_args)?; + + let data_file = fs::read("sqlx-data.json").context( + "failed to open `sqlx-data.json`; you may need to run `cargo sqlx prepare` first", + )?; + + let mut saved_data: QueryData = serde_json::from_slice(&data_file)?; + + let expected_db = saved_data + .remove("db") + .context("expected key `db` in data file")?; + + let expected_db = expected_db + .as_str() + .context("expected key `db` to be a string")?; + + if db_kind != expected_db { + bail!( + "saved prepare data is for {}, not {} (inferred from `DATABASE_URL`)", + expected_db, + db_kind + ) + } + + if data != saved_data { + bail!("`cargo sqlx prepare` needs to be rerun") + } + + Ok(()) +} + +fn run_prepare_step(cargo_args: Vec) -> anyhow::Result { + // path to the Cargo executable + let cargo = env::var("CARGO") + .context("`prepare` subcommand may only be invoked as `cargo sqlx prepare``")?; + + let check_status = Command::new(&cargo) + .arg("rustc") + .args(cargo_args) + .arg("--") + .arg("--emit") + .arg("dep-info,metadata") + // set an always-changing cfg so we can consistently trigger recompile + .arg("--cfg") + .arg(format!( + "__sqlx_recompile_trigger=\"{}\"", + SystemTime::UNIX_EPOCH.elapsed()?.as_millis() + )) + .status()?; + + if !check_status.success() { + bail!("`cargo check` failed with status: {}", check_status); + } + + let metadata = MetadataCommand::new() + .cargo_path(cargo) + .exec() + .context("failed to execute `cargo metadata`")?; + + let pattern = metadata.target_directory.join("sqlx/query-*.json"); + + let mut data = BTreeMap::new(); + + for path in glob::glob( + pattern + .to_str() + .context("CARGO_TARGET_DIR not valid UTF-8")?, + )? { + let path = path?; + let contents = fs::read(&*path)?; + let mut query_data: JsonObject = serde_json::from_slice(&contents)?; + + // we lift the `hash` key to the outer map + let hash = query_data + .remove("hash") + .context("expected key `hash` in query data")?; + + if let serde_json::Value::String(hash) = hash { + data.insert(hash, serde_json::Value::Object(query_data)); + } else { + bail!( + "expected key `hash` in query data to be string, was {:?} instead; file: {}", + hash, + path.display() + ) + } + + // lazily remove the file, we don't care too much if we can't + let _ = fs::remove_file(&path); + } + + Ok(data) +} + +fn get_db_kind() -> anyhow::Result<&'static str> { + let db_url = dotenv::var("DATABASE_URL") + .map_err(|_| anyhow!("`DATABASE_URL` must be set to use the `prepare` subcommand"))?; + + let db_url = Url::parse(&db_url)?; + + // these should match the values of `DatabaseExt::NAME` in `sqlx-macros` + match db_url.scheme() { + "postgres" | "postgresql" => Ok("PostgreSQL"), + "mysql" | "mariadb" => Ok("MySQL/MariaDB"), + "sqlite" => Ok("SQLite"), + _ => bail!("unexpected scheme in database URL: {}", db_url.scheme()), + } +} diff --git a/sqlx-core/Cargo.toml b/sqlx-core/Cargo.toml index 7ffdd0ba85..e3a976f0c5 100644 --- a/sqlx-core/Cargo.toml +++ b/sqlx-core/Cargo.toml @@ -32,6 +32,9 @@ runtime-tokio = [ "async-native-tls/runtime-tokio", "tokio" ] # intended for internal benchmarking, do not use bench = [] +# support offline/decoupled building (enables serialization of `Describe`) +offline = ["serde"] + [dependencies] async-native-tls = { version = "0.3.2", default-features = false, optional = true } async-std = { version = "1.5.0", features = [ "unstable" ], optional = true } diff --git a/sqlx-core/src/describe.rs b/sqlx-core/src/describe.rs index 2a7c2294c6..d66e4b4cc3 100644 --- a/sqlx-core/src/describe.rs +++ b/sqlx-core/src/describe.rs @@ -7,6 +7,14 @@ use crate::database::Database; /// The return type of [`Executor::describe`]. /// /// [`Executor::describe`]: crate::executor::Executor::describe +#[cfg_attr(feature = "offline", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr( + feature = "offline", + serde(bound( + serialize = "DB::TypeInfo: serde::Serialize, Column: serde::Serialize", + deserialize = "DB::TypeInfo: serde::de::DeserializeOwned, Column: serde::de::DeserializeOwned" + )) +)] #[non_exhaustive] pub struct Describe where @@ -35,6 +43,14 @@ where } /// A single column of a result set. +#[cfg_attr(feature = "offline", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr( + feature = "offline", + serde(bound( + serialize = "DB::TableId: serde::Serialize, DB::TypeInfo: serde::Serialize", + deserialize = "DB::TableId: serde::de::DeserializeOwned, DB::TypeInfo: serde::de::DeserializeOwned" + )) +)] #[non_exhaustive] pub struct Column where diff --git a/sqlx-core/src/mysql/protocol/type.rs b/sqlx-core/src/mysql/protocol/type.rs index 70f64789dd..5b92a951de 100644 --- a/sqlx-core/src/mysql/protocol/type.rs +++ b/sqlx-core/src/mysql/protocol/type.rs @@ -1,6 +1,7 @@ // https://dev.mysql.com/doc/dev/mysql-server/8.0.12/binary__log__types_8h.html // https://mariadb.com/kb/en/library/resultset/#field-types #[derive(Clone, Copy, Debug, PartialEq, Eq)] +#[cfg_attr(feature = "offline", derive(serde::Serialize, serde::Deserialize))] pub struct TypeId(pub u8); // https://github.com/google/mysql/blob/c01fc2134d439282a21a2ddf687566e198ddee28/include/mysql_com.h#L429 diff --git a/sqlx-core/src/mysql/type_info.rs b/sqlx-core/src/mysql/type_info.rs index 9c86619a2a..10c8ba0c43 100644 --- a/sqlx-core/src/mysql/type_info.rs +++ b/sqlx-core/src/mysql/type_info.rs @@ -4,6 +4,7 @@ use crate::mysql::protocol::{ColumnDefinition, FieldFlags, TypeId}; use crate::types::TypeInfo; #[derive(Clone, Debug, Default)] +#[cfg_attr(feature = "offline", derive(serde::Serialize, serde::Deserialize))] pub struct MySqlTypeInfo { pub(crate) id: TypeId, pub(crate) is_unsigned: bool, diff --git a/sqlx-core/src/postgres/protocol/type_id.rs b/sqlx-core/src/postgres/protocol/type_id.rs index 6da8151187..0a9035e50e 100644 --- a/sqlx-core/src/postgres/protocol/type_id.rs +++ b/sqlx-core/src/postgres/protocol/type_id.rs @@ -2,6 +2,7 @@ use crate::postgres::types::try_resolve_type_name; use std::fmt::{self, Display}; #[derive(Debug, Clone, Copy, PartialEq, Eq)] +#[cfg_attr(feature = "offline", derive(serde::Serialize, serde::Deserialize))] pub struct TypeId(pub(crate) u32); // DEVELOPER PRO TIP: find builtin type OIDs easily by grepping this file @@ -90,6 +91,16 @@ impl TypeId { pub(crate) const RECORD: TypeId = TypeId(2249); pub(crate) const ARRAY_RECORD: TypeId = TypeId(2287); + + // Custom types + pub(crate) const NODE_TYPE: TypeId = TypeId(17086); + pub(crate) const CARD_RARITY: TypeId = TypeId(17098); + pub(crate) const CARD_CATEGORY: TypeId = TypeId(17112); + pub(crate) const MODE_TYPE: TypeId = TypeId(17120); + pub(crate) const BATTLE_TYPE: TypeId = TypeId(17144); + pub(crate) const BATTLE_ICON: TypeId = TypeId(17156); + pub(crate) const CHARACTER_TRAIT: TypeId = TypeId(17168); + pub(crate) const FACTION: TypeId = TypeId(17194); } impl Display for TypeId { diff --git a/sqlx-core/src/postgres/type_info.rs b/sqlx-core/src/postgres/type_info.rs index e953ae8270..afd344794f 100644 --- a/sqlx-core/src/postgres/type_info.rs +++ b/sqlx-core/src/postgres/type_info.rs @@ -9,6 +9,7 @@ use std::sync::Arc; /// Type information for a Postgres SQL type. #[derive(Debug, Clone)] +#[cfg_attr(feature = "offline", derive(serde::Serialize, serde::Deserialize))] pub struct PgTypeInfo { pub(crate) id: Option, pub(crate) name: SharedStr, @@ -33,6 +34,22 @@ impl PgTypeInfo { } } + #[doc(hidden)] + pub fn get_custom_type(&self) -> Option<&'static str> { + match self.id? { + TypeId::NODE_TYPE => Some("NodeType"), + TypeId::CARD_RARITY => Some("CardRarity"), + TypeId::CARD_CATEGORY => Some("CardCategory"), + TypeId::MODE_TYPE => Some("ModeType"), + TypeId::BATTLE_TYPE => Some("BattleType"), + TypeId::BATTLE_ICON => Some("BattleIcon"), + TypeId::CHARACTER_TRAIT => Some("CharacterTrait"), + TypeId::FACTION => Some("Faction"), + + _ => None, + } + } + #[doc(hidden)] pub fn type_feature_gate(&self) -> Option<&'static str> { match self.id? { @@ -186,8 +203,38 @@ impl From for SharedStr { } } +impl From for String { + fn from(s: SharedStr) -> Self { + String::from(&*s) + } +} + impl fmt::Display for SharedStr { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { fmt.pad(self) } } + +// manual impls because otherwise things get a little screwy with lifetimes +#[cfg(feature = "offline")] +impl<'de> serde::Deserialize<'de> for SharedStr { + fn deserialize(deserializer: D) -> Result>::Error> + where + D: serde::Deserializer<'de>, + { + Ok(String::deserialize(deserializer)?.into()) + } +} + +#[cfg(feature = "offline")] +impl serde::Serialize for SharedStr { + fn serialize( + &self, + serializer: S, + ) -> Result<::Ok, ::Error> + where + S: serde::Serializer, + { + serializer.serialize_str(&self) + } +} diff --git a/sqlx-core/src/postgres/types/mod.rs b/sqlx-core/src/postgres/types/mod.rs index 9a7def682f..ef036bf965 100644 --- a/sqlx-core/src/postgres/types/mod.rs +++ b/sqlx-core/src/postgres/types/mod.rs @@ -252,6 +252,15 @@ pub(crate) fn try_resolve_type_name(oid: u32) -> Option<&'static str> { TypeId::RECORD => "RECORD", TypeId::ARRAY_RECORD => "RECORD[]", + TypeId::NODE_TYPE => "NODE_TYPE", + TypeId::CARD_RARITY => "CARD_RARITY", + TypeId::CARD_CATEGORY => "CARD_CATEGORY", + TypeId::MODE_TYPE => "MODE_TYPE", + TypeId::BATTLE_TYPE => "BATTLE_TYPE", + TypeId::BATTLE_ICON => "BATTLE_ICON", + TypeId::CHARACTER_TRAIT => "CHARACTER_TRAIT", + TypeId::FACTION => "FACTION", + _ => { return None; } diff --git a/sqlx-core/src/sqlite/type_info.rs b/sqlx-core/src/sqlite/type_info.rs index e77dbb7f53..43088ab7cd 100644 --- a/sqlx-core/src/sqlite/type_info.rs +++ b/sqlx-core/src/sqlite/type_info.rs @@ -4,6 +4,7 @@ use crate::types::TypeInfo; // https://www.sqlite.org/c3ref/c_blob.html #[derive(Debug, PartialEq, Clone, Copy)] +#[cfg_attr(feature = "offline", derive(serde::Serialize, serde::Deserialize))] pub(crate) enum SqliteType { Integer = 1, Float = 2, @@ -16,6 +17,7 @@ pub(crate) enum SqliteType { // https://www.sqlite.org/datatype3.html#type_affinity #[derive(Debug, PartialEq, Clone, Copy)] +#[cfg_attr(feature = "offline", derive(serde::Serialize, serde::Deserialize))] pub(crate) enum SqliteTypeAffinity { Text, Numeric, @@ -25,6 +27,7 @@ pub(crate) enum SqliteTypeAffinity { } #[derive(Debug, Clone)] +#[cfg_attr(feature = "offline", derive(serde::Serialize, serde::Deserialize))] pub struct SqliteTypeInfo { pub(crate) r#type: SqliteType, pub(crate) affinity: Option, diff --git a/sqlx-core/src/url.rs b/sqlx-core/src/url.rs index ace27fdc2c..8015b9e1f7 100644 --- a/sqlx-core/src/url.rs +++ b/sqlx-core/src/url.rs @@ -28,6 +28,14 @@ impl<'s> TryFrom<&'s String> for Url { } } +impl TryFrom for Url { + type Error = url::ParseError; + + fn try_from(value: url::Url) -> Result { + Ok(Url(value)) + } +} + impl Url { #[allow(dead_code)] pub(crate) fn as_str(&self) -> &str { diff --git a/sqlx-macros/Cargo.toml b/sqlx-macros/Cargo.toml index 14a4355fb9..65939f2102 100644 --- a/sqlx-macros/Cargo.toml +++ b/sqlx-macros/Cargo.toml @@ -18,32 +18,38 @@ proc-macro = true [features] default = [ "runtime-async-std" ] -runtime-async-std = [ "sqlx/runtime-async-std", "async-std" ] -runtime-tokio = [ "sqlx/runtime-tokio", "tokio", "lazy_static" ] +runtime-async-std = [ "sqlx-core/runtime-async-std", "async-std" ] +runtime-tokio = [ "sqlx-core/runtime-tokio", "tokio", "once_cell" ] + +# offline building support +offline = ["sqlx-core/offline", "serde", "serde_json", "hex", "sha2"] # database -mysql = [ "sqlx/mysql" ] -postgres = [ "sqlx/postgres" ] -sqlite = [ "sqlx/sqlite" ] +mysql = [ "sqlx-core/mysql" ] +postgres = [ "sqlx-core/postgres" ] +sqlite = [ "sqlx-core/sqlite" ] # type -bigdecimal = [ "sqlx/bigdecimal" ] -chrono = [ "sqlx/chrono" ] -time = [ "sqlx/time" ] -ipnetwork = [ "sqlx/ipnetwork" ] -uuid = [ "sqlx/uuid" ] -json = [ "sqlx/json", "serde_json" ] +bigdecimal = [ "sqlx-core/bigdecimal" ] +chrono = [ "sqlx-core/chrono" ] +time = [ "sqlx-core/time" ] +ipnetwork = [ "sqlx-core/ipnetwork" ] +uuid = [ "sqlx-core/uuid" ] +json = [ "sqlx-core/json", "serde_json" ] [dependencies] async-std = { version = "1.5.0", default-features = false, optional = true } tokio = { version = "0.2.13", default-features = false, features = [ "rt-threaded" ], optional = true } dotenv = { version = "0.15.0", default-features = false } futures = { version = "0.3.4", default-features = false, features = [ "executor" ] } +hex = { version = "0.4.2", optional = true } heck = "0.3" proc-macro2 = { version = "1.0.9", default-features = false } -sqlx = { version = "0.3.5", default-features = false, path = "../sqlx-core", package = "sqlx-core" } -serde_json = { version = "1.0", features = [ "raw_value" ], optional = true } +sqlx-core = { version = "0.3.5", default-features = false, path = "../sqlx-core" } +serde = { version = "1.0", optional = true } +serde_json = { version = "1.0", features = [ "preserve_order" ], optional = true } +sha2 = { version = "0.8.1", optional = true } syn = { version = "1.0.16", default-features = false, features = [ "full" ] } quote = { version = "1.0.2", default-features = false } url = { version = "2.1.1", default-features = false } -lazy_static = { version = "1.4.0", optional = true } +once_cell = { version = "1.3", features = ["std"], optional = true } diff --git a/sqlx-macros/src/database/mod.rs b/sqlx-macros/src/database/mod.rs index 10d56c60db..e87b142040 100644 --- a/sqlx-macros/src/database/mod.rs +++ b/sqlx-macros/src/database/mod.rs @@ -1,4 +1,4 @@ -use sqlx::database::Database; +use sqlx_core::database::Database; #[derive(PartialEq, Eq)] #[allow(dead_code)] @@ -10,6 +10,7 @@ pub enum ParamChecking { pub trait DatabaseExt: Database { const DATABASE_PATH: &'static str; const ROW_PATH: &'static str; + const NAME: &'static str; const PARAM_CHECKING: ParamChecking; @@ -25,6 +26,8 @@ pub trait DatabaseExt: Database { fn return_type_for_id(id: &Self::TypeInfo) -> Option<&'static str>; + fn get_custom_type(id: &Self::TypeInfo) -> Option<&'static str>; + fn get_feature_gate(info: &Self::TypeInfo) -> Option<&'static str>; } @@ -34,23 +37,26 @@ macro_rules! impl_database_ext { $($(#[$meta:meta])? $ty:ty $(| $input:ty)?),*$(,)? }, ParamChecking::$param_checking:ident, - feature-types: $name:ident => $get_gate:expr, - row = $row:path + get-custom-type: $ty_info1:ident => $get_custom_type:expr, + feature-types: $ty_info2:ident => $get_gate:expr, + row = $row:path, + name = $db_name:literal ) => { impl $crate::database::DatabaseExt for $database { const DATABASE_PATH: &'static str = stringify!($database); const ROW_PATH: &'static str = stringify!($row); const PARAM_CHECKING: $crate::database::ParamChecking = $crate::database::ParamChecking::$param_checking; + const NAME: &'static str = $db_name; fn param_type_for_id(info: &Self::TypeInfo) -> Option<&'static str> { match () { $( $(#[$meta])? - _ if <$ty as sqlx::types::Type<$database>>::type_info() == *info => Some(input_ty!($ty $(, $input)?)), + _ if <$ty as sqlx_core::types::Type<$database>>::type_info() == *info => Some(input_ty!($ty $(, $input)?)), )* $( $(#[$meta])? - _ if sqlx::types::TypeInfo::compatible(&<$ty as sqlx::types::Type<$database>>::type_info(), &info) => Some(input_ty!($ty $(, $input)?)), + _ if sqlx_core::types::TypeInfo::compatible(&<$ty as sqlx_core::types::Type<$database>>::type_info(), &info) => Some(input_ty!($ty $(, $input)?)), )* _ => None } @@ -60,17 +66,21 @@ macro_rules! impl_database_ext { match () { $( $(#[$meta])? - _ if <$ty as sqlx::types::Type<$database>>::type_info() == *info => return Some(stringify!($ty)), + _ if <$ty as sqlx_core::types::Type<$database>>::type_info() == *info => return Some(stringify!($ty)), )* $( $(#[$meta])? - _ if sqlx::types::TypeInfo::compatible(&<$ty as sqlx::types::Type<$database>>::type_info(), &info) => return Some(stringify!($ty)), + _ if sqlx_core::types::TypeInfo::compatible(&<$ty as sqlx_core::types::Type<$database>>::type_info(), &info) => return Some(stringify!($ty)), )* - _ => None + _ => info.get_custom_type() } } - fn get_feature_gate($name: &Self::TypeInfo) -> Option<&'static str> { + fn get_custom_type($ty_info1: &Self::TypeInfo) -> Option<&'static str> { + $get_custom_type + } + + fn get_feature_gate($ty_info2: &Self::TypeInfo) -> Option<&'static str> { $get_gate } } diff --git a/sqlx-macros/src/database/mysql.rs b/sqlx-macros/src/database/mysql.rs index dee86a2c2b..a02948294a 100644 --- a/sqlx-macros/src/database/mysql.rs +++ b/sqlx-macros/src/database/mysql.rs @@ -1,3 +1,5 @@ +use sqlx_core as sqlx; + impl_database_ext! { sqlx::mysql::MySql { u8, @@ -46,5 +48,6 @@ impl_database_ext! { }, ParamChecking::Weak, feature-types: info => info.type_feature_gate(), - row = sqlx::mysql::MySqlRow + row = sqlx::mysql::MySqlRow, + name = "MySQL/MariaDB" } diff --git a/sqlx-macros/src/database/postgres.rs b/sqlx-macros/src/database/postgres.rs index 452ad27dbc..29a113af5d 100644 --- a/sqlx-macros/src/database/postgres.rs +++ b/sqlx-macros/src/database/postgres.rs @@ -1,3 +1,5 @@ +use sqlx_core as sqlx; + impl_database_ext! { sqlx::postgres::Postgres { bool, @@ -96,6 +98,8 @@ impl_database_ext! { }, ParamChecking::Strong, + get-custom-type: info => info.get_custom_type(), feature-types: info => info.type_feature_gate(), - row = sqlx::postgres::PgRow + row = sqlx::postgres::PgRow, + name = "PostgreSQL" } diff --git a/sqlx-macros/src/database/sqlite.rs b/sqlx-macros/src/database/sqlite.rs index c7ee2bf9d2..685b47f51f 100644 --- a/sqlx-macros/src/database/sqlite.rs +++ b/sqlx-macros/src/database/sqlite.rs @@ -1,3 +1,5 @@ +use sqlx_core as sqlx; + impl_database_ext! { sqlx::sqlite::Sqlite { bool, @@ -10,5 +12,6 @@ impl_database_ext! { }, ParamChecking::Weak, feature-types: _info => None, - row = sqlx::sqlite::SqliteRow + row = sqlx::sqlite::SqliteRow, + name = "SQLite" } diff --git a/sqlx-macros/src/lib.rs b/sqlx-macros/src/lib.rs index beac1dd875..a303e56551 100644 --- a/sqlx-macros/src/lib.rs +++ b/sqlx-macros/src/lib.rs @@ -1,5 +1,5 @@ #![cfg_attr( - not(any(feature = "postgres", feature = "mysql")), + not(any(feature = "postgres", feature = "mysql", feature = "offline")), allow(dead_code, unused_macros, unused_imports) )] extern crate proc_macro; @@ -8,41 +8,15 @@ use proc_macro::TokenStream; use quote::quote; -#[cfg(feature = "runtime-async-std")] -use async_std::task::block_on; - -use std::path::PathBuf; - -use url::Url; - type Error = Box; type Result = std::result::Result; mod database; mod derives; -mod query_macros; +mod query; mod runtime; -use query_macros::*; - -#[cfg(feature = "runtime-tokio")] -lazy_static::lazy_static! { - static ref BASIC_RUNTIME: tokio::runtime::Runtime = { - tokio::runtime::Builder::new() - .threaded_scheduler() - .enable_io() - .enable_time() - .build() - .expect("failed to build tokio runtime") - }; -} - -#[cfg(feature = "runtime-tokio")] -fn block_on(future: F) -> F::Output { - BASIC_RUNTIME.enter(|| futures::executor::block_on(future)) -} - fn macro_result(tokens: proc_macro2::TokenStream) -> TokenStream { quote!( macro_rules! macro_result { @@ -52,141 +26,21 @@ fn macro_result(tokens: proc_macro2::TokenStream) -> TokenStream { .into() } -macro_rules! async_macro ( - ($db:ident, $input:ident: $ty:ty => $expr:expr) => {{ - let $input = match syn::parse::<$ty>($input) { - Ok(input) => input, - Err(e) => return macro_result(e.to_compile_error()), - }; - - let res: Result = block_on(async { - use sqlx::connection::Connect; - - // If a .env file exists at CARGO_MANIFEST_DIR, load environment variables from this, - // otherwise fallback to default dotenv behaviour. - if let Ok(dir) = std::env::var("CARGO_MANIFEST_DIR") { - let env_path = PathBuf::from(dir).join(".env"); - if env_path.exists() { - dotenv::from_path(&env_path) - .map_err(|e| format!("failed to load environment from {:?}, {}", env_path, e))? - } - } - - let db_url = Url::parse(&dotenv::var("DATABASE_URL").map_err(|_| "DATABASE_URL not set")?)?; - - match db_url.scheme() { - #[cfg(feature = "sqlite")] - "sqlite" => { - let $db = sqlx::sqlite::SqliteConnection::connect(db_url.as_str()) - .await - .map_err(|e| format!("failed to connect to database: {}", e))?; - - $expr.await - } - #[cfg(not(feature = "sqlite"))] - "sqlite" => Err(format!( - "DATABASE_URL {} has the scheme of a SQLite database but the `sqlite` \ - feature of sqlx was not enabled", - db_url - ).into()), - #[cfg(feature = "postgres")] - "postgresql" | "postgres" => { - let $db = sqlx::postgres::PgConnection::connect(db_url.as_str()) - .await - .map_err(|e| format!("failed to connect to database: {}", e))?; - - $expr.await - } - #[cfg(not(feature = "postgres"))] - "postgresql" | "postgres" => Err(format!( - "DATABASE_URL {} has the scheme of a Postgres database but the `postgres` \ - feature of sqlx was not enabled", - db_url - ).into()), - #[cfg(feature = "mysql")] - "mysql" | "mariadb" => { - let $db = sqlx::mysql::MySqlConnection::connect(db_url.as_str()) - .await - .map_err(|e| format!("failed to connect to database: {}", e))?; - - $expr.await - } - #[cfg(not(feature = "mysql"))] - "mysql" | "mariadb" => Err(format!( - "DATABASE_URL {} has the scheme of a MySQL/MariaDB database but the `mysql` \ - feature of sqlx was not enabled", - db_url - ).into()), - scheme => Err(format!("unexpected scheme {:?} in DATABASE_URL {}", scheme, db_url).into()), - } - }); +#[proc_macro] +pub fn expand_query(input: TokenStream) -> TokenStream { + let input = syn::parse_macro_input!(input as query::QueryMacroInput); - match res { - Ok(ts) => ts.into(), - Err(e) => { - if let Some(parse_err) = e.downcast_ref::() { - macro_result(parse_err.to_compile_error()) - } else { - let msg = e.to_string(); - macro_result(quote!(compile_error!(#msg))) - } + match query::expand_input(input) { + Ok(ts) => ts.into(), + Err(e) => { + if let Some(parse_err) = e.downcast_ref::() { + macro_result(parse_err.to_compile_error()) + } else { + let msg = e.to_string(); + macro_result(quote!(compile_error!(#msg))) } } - }} -); - -#[proc_macro] -#[allow(unused_variables)] -pub fn query(input: TokenStream) -> TokenStream { - #[allow(unused_variables)] - async_macro!(db, input: QueryMacroInput => expand_query(input, db, true)) -} - -#[proc_macro] -#[allow(unused_variables)] -pub fn query_unchecked(input: TokenStream) -> TokenStream { - #[allow(unused_variables)] - async_macro!(db, input: QueryMacroInput => expand_query(input, db, false)) -} - -#[proc_macro] -#[allow(unused_variables)] -pub fn query_file(input: TokenStream) -> TokenStream { - #[allow(unused_variables)] - async_macro!(db, input: QueryMacroInput => expand_query_file(input, db, true)) -} - -#[proc_macro] -#[allow(unused_variables)] -pub fn query_file_unchecked(input: TokenStream) -> TokenStream { - #[allow(unused_variables)] - async_macro!(db, input: QueryMacroInput => expand_query_file(input, db, false)) -} - -#[proc_macro] -#[allow(unused_variables)] -pub fn query_as(input: TokenStream) -> TokenStream { - #[allow(unused_variables)] - async_macro!(db, input: QueryAsMacroInput => expand_query_as(input, db, true)) -} - -#[proc_macro] -#[allow(unused_variables)] -pub fn query_file_as(input: TokenStream) -> TokenStream { - async_macro!(db, input: QueryAsMacroInput => expand_query_file_as(input, db, true)) -} - -#[proc_macro] -#[allow(unused_variables)] -pub fn query_as_unchecked(input: TokenStream) -> TokenStream { - #[allow(unused_variables)] - async_macro!(db, input: QueryAsMacroInput => expand_query_as(input, db, false)) -} - -#[proc_macro] -#[allow(unused_variables)] -pub fn query_file_as_unchecked(input: TokenStream) -> TokenStream { - async_macro!(db, input: QueryAsMacroInput => expand_query_file_as(input, db, false)) + } } #[proc_macro_derive(Encode, attributes(sqlx))] diff --git a/sqlx-macros/src/query_macros/args.rs b/sqlx-macros/src/query/args.rs similarity index 95% rename from sqlx-macros/src/query_macros/args.rs rename to sqlx-macros/src/query/args.rs index 85e6d9aa39..37f80801c1 100644 --- a/sqlx-macros/src/query_macros/args.rs +++ b/sqlx-macros/src/query/args.rs @@ -3,17 +3,16 @@ use syn::spanned::Spanned; use syn::Expr; use quote::{quote, quote_spanned, ToTokens}; -use sqlx::describe::Describe; +use sqlx_core::describe::Describe; use crate::database::{DatabaseExt, ParamChecking}; -use crate::query_macros::QueryMacroInput; +use crate::query::QueryMacroInput; /// Returns a tokenstream which typechecks the arguments passed to the macro /// and binds them to `DB::Arguments` with the ident `query_args`. pub fn quote_args( input: &QueryMacroInput, describe: &Describe, - checked: bool, ) -> crate::Result { let db_path = DB::db_path(); @@ -25,7 +24,7 @@ pub fn quote_args( let arg_name = &input.arg_names; - let args_check = if checked && DB::PARAM_CHECKING == ParamChecking::Strong { + let args_check = if input.checked && DB::PARAM_CHECKING == ParamChecking::Strong { describe .param_types .iter() diff --git a/sqlx-macros/src/query/data.rs b/sqlx-macros/src/query/data.rs new file mode 100644 index 0000000000..25077155e6 --- /dev/null +++ b/sqlx-macros/src/query/data.rs @@ -0,0 +1,186 @@ +use sqlx_core::database::Database; +use sqlx_core::describe::Describe; +use sqlx_core::executor::Executor; + +#[cfg_attr(feature = "offline", derive(serde::Deserialize, serde::Serialize))] +#[cfg_attr( + feature = "offline", + serde(bound( + serialize = "Describe: serde::Serialize", + deserialize = "Describe: serde::de::DeserializeOwned" + )) +)] +pub struct QueryData { + #[allow(dead_code)] + pub(super) query: String, + pub(super) describe: Describe, + #[cfg(feature = "offline")] + pub(super) hash: String, +} + +impl QueryData { + pub async fn from_db( + conn: &mut impl Executor, + query: &str, + ) -> crate::Result { + Ok(QueryData { + query: query.into(), + describe: conn.describe(query).await?, + #[cfg(feature = "offline")] + hash: offline::hash_string(query), + }) + } +} + +#[cfg(feature = "offline")] +pub mod offline { + use super::QueryData; + use std::fs::File; + + use std::fmt::{self, Formatter}; + + use crate::database::DatabaseExt; + use proc_macro2::Span; + use serde::de::{Deserializer, IgnoredAny, MapAccess, Visitor}; + use sqlx_core::describe::Describe; + use std::path::Path; + + #[derive(serde::Deserialize)] + pub struct DynQueryData { + #[serde(skip)] + pub db_name: String, + pub query: String, + pub describe: serde_json::Value, + #[serde(skip)] + pub hash: String, + } + + impl DynQueryData { + /// Find and deserialize the data table for this query from a shared `sqlx-data.json` + /// file. The expected structure is a JSON map keyed by the SHA-256 hash of queries in hex. + pub fn from_data_file(path: impl AsRef, query: &str) -> crate::Result { + serde_json::Deserializer::from_reader( + File::open(path.as_ref()).map_err(|e| { + format!("failed to open path {}: {}", path.as_ref().display(), e) + })?, + ) + .deserialize_map(DataFileVisitor { + query, + hash: hash_string(query), + }) + .map_err(Into::into) + } + } + + impl QueryData + where + Describe: serde::Serialize + serde::de::DeserializeOwned, + { + pub fn from_dyn_data(dyn_data: DynQueryData) -> crate::Result { + assert!(!dyn_data.db_name.is_empty()); + assert!(!dyn_data.hash.is_empty()); + + if DB::NAME == dyn_data.db_name { + let describe: Describe = serde_json::from_value(dyn_data.describe)?; + Ok(QueryData { + query: dyn_data.query, + describe, + hash: dyn_data.hash, + }) + } else { + Err(format!( + "expected query data for {}, got data for {}", + DB::NAME, + dyn_data.db_name + ) + .into()) + } + } + + pub fn save_in(&self, dir: impl AsRef, input_span: Span) -> crate::Result<()> { + // we save under the hash of the span representation because that should be unique + // per invocation + let path = dir.as_ref().join(format!( + "query-{}.json", + hash_string(&format!("{:?}", input_span)) + )); + + serde_json::to_writer_pretty( + File::create(&path) + .map_err(|e| format!("failed to open path {}: {}", path.display(), e))?, + self, + ) + .map_err(Into::into) + } + } + + pub fn hash_string(query: &str) -> String { + // picked `sha2` because it's already in the dependency tree for both MySQL and Postgres + use sha2::{Digest, Sha256}; + + hex::encode(Sha256::digest(query.as_bytes())) + } + + // lazily deserializes only the `QueryData` for the query we're looking for + struct DataFileVisitor<'a> { + query: &'a str, + hash: String, + } + + impl<'de> Visitor<'de> for DataFileVisitor<'_> { + type Value = DynQueryData; + + fn expecting(&self, f: &mut Formatter) -> fmt::Result { + write!(f, "expected map key {:?} or \"db\"", self.hash) + } + + fn visit_map(self, mut map: A) -> Result>::Error> + where + A: MapAccess<'de>, + { + let mut db_name: Option = None; + + let query_data = loop { + // unfortunately we can't avoid this copy because deserializing from `io::Read` + // doesn't support deserializing borrowed values + let key = map.next_key::()?.ok_or_else(|| { + serde::de::Error::custom(format_args!( + "failed to find data for query {}", + self.hash + )) + })?; + + // lazily deserialize the query data only + if key == "db" { + db_name = Some(map.next_value::()?); + } else if key == self.hash { + let db_name = db_name.ok_or_else(|| { + serde::de::Error::custom("expected \"db\" key before query hash keys") + })?; + + let mut query_data: DynQueryData = map.next_value()?; + + if query_data.query == self.query { + query_data.db_name = db_name; + query_data.hash = self.hash.clone(); + break query_data; + } else { + return Err(serde::de::Error::custom(format_args!( + "hash collision for stored queries:\n{:?}\n{:?}", + self.query, query_data.query + ))); + }; + } else { + // we don't care about entries that don't match our hash + let _ = map.next_value::()?; + } + }; + + // Serde expects us to consume the whole map; fortunately they've got a convenient + // type to let us do just that + while let Some(_) = map.next_entry::()? {} + + Ok(query_data) + } + } +} diff --git a/sqlx-macros/src/query/input.rs b/sqlx-macros/src/query/input.rs new file mode 100644 index 0000000000..0757f1dcfb --- /dev/null +++ b/sqlx-macros/src/query/input.rs @@ -0,0 +1,150 @@ +use std::env; +use std::fs; + +use proc_macro2::{Ident, Span}; +use quote::format_ident; +use syn::parse::{Parse, ParseStream}; +use syn::{Expr, LitBool, LitStr}; +use syn::{ExprArray, Type}; + +/// Macro input shared by `query!()` and `query_file!()` +pub struct QueryMacroInput { + pub(super) src: String, + #[cfg_attr(not(feature = "offline"), allow(dead_code))] + pub(super) src_span: Span, + + pub(super) record_type: RecordType, + + // `arg0 .. argN` for N arguments + pub(super) arg_names: Vec, + pub(super) arg_exprs: Vec, + + pub(super) checked: bool, +} + +enum QuerySrc { + String(String), + File(String), +} + +pub enum RecordType { + Given(Type), + Generated, +} + +impl Parse for QueryMacroInput { + fn parse(input: ParseStream) -> syn::Result { + let mut query_src: Option<(QuerySrc, Span)> = None; + let mut args: Option> = None; + let mut record_type = RecordType::Generated; + let mut checked = true; + + let mut expect_comma = false; + + while !input.is_empty() { + if expect_comma { + let _ = input.parse::()?; + } + + let key: Ident = input.parse()?; + + let _ = input.parse::()?; + + if key == "source" { + let lit_str = input.parse::()?; + query_src = Some((QuerySrc::String(lit_str.value()), lit_str.span())); + } else if key == "source_file" { + let lit_str = input.parse::()?; + query_src = Some((QuerySrc::File(lit_str.value()), lit_str.span())); + } else if key == "args" { + let exprs = input.parse::()?; + args = Some(exprs.elems.into_iter().collect()) + } else if key == "record" { + record_type = RecordType::Given(input.parse()?); + } else if key == "checked" { + let lit_bool = input.parse::()?; + checked = lit_bool.value; + } else { + let message = format!("unexpected input key: {}", key); + return Err(syn::Error::new_spanned(key, message)); + } + + expect_comma = true; + } + + let (src, src_span) = + query_src.ok_or_else(|| input.error("expected `source` or `source_file` key"))?; + + let arg_exprs = args.unwrap_or_default(); + let arg_names = (0..arg_exprs.len()) + .map(|i| format_ident!("arg{}", i)) + .collect(); + + Ok(QueryMacroInput { + src: src.resolve(src_span)?, + src_span, + record_type, + arg_names, + arg_exprs, + checked, + }) + } +} + +impl QuerySrc { + /// If the query source is a file, read it to a string. Otherwise return the query string. + fn resolve(self, source_span: Span) -> syn::Result { + match self { + QuerySrc::String(string) => Ok(string), + QuerySrc::File(file) => read_file_src(&file, source_span), + } + } +} + +fn read_file_src(source: &str, source_span: Span) -> syn::Result { + use std::path::Path; + + let path = Path::new(source); + + if path.is_absolute() { + return Err(syn::Error::new( + source_span, + "absolute paths will only work on the current machine", + )); + } + + // requires `proc_macro::SourceFile::path()` to be stable + // https://github.com/rust-lang/rust/issues/54725 + if path.is_relative() + && !path + .parent() + .map_or(false, |parent| !parent.as_os_str().is_empty()) + { + return Err(syn::Error::new( + source_span, + "paths relative to the current file's directory are not currently supported", + )); + } + + let base_dir = env::var("CARGO_MANIFEST_DIR").map_err(|_| { + syn::Error::new( + source_span, + "CARGO_MANIFEST_DIR is not set; please use Cargo to build", + ) + })?; + + let base_dir_path = Path::new(&base_dir); + + let file_path = base_dir_path.join(path); + + fs::read_to_string(&file_path).map_err(|e| { + syn::Error::new( + source_span, + format!( + "failed to read query file at {}: {}", + file_path.display(), + e + ), + ) + }) +} diff --git a/sqlx-macros/src/query/mod.rs b/sqlx-macros/src/query/mod.rs new file mode 100644 index 0000000000..67fc681c32 --- /dev/null +++ b/sqlx-macros/src/query/mod.rs @@ -0,0 +1,245 @@ +use std::borrow::Cow; +use std::env; + +use proc_macro2::{Span, TokenStream}; +use syn::Type; +use url::Url; + +pub use input::QueryMacroInput; +use quote::{format_ident, quote}; +use sqlx_core::connection::Connect; +use sqlx_core::database::Database; +use sqlx_core::describe::Describe; + +use crate::database::DatabaseExt; +use crate::query::data::QueryData; +use crate::query::input::RecordType; +use crate::runtime::block_on; + +// pub use query::expand_query; + +mod args; +mod data; +mod input; +mod output; +// mod query; + +pub fn expand_input(input: QueryMacroInput) -> crate::Result { + let manifest_dir = + env::var("CARGO_MANIFEST_DIR").map_err(|_| "`CARGO_MANIFEST_DIR` must be set")?; + + // If a .env file exists at CARGO_MANIFEST_DIR, load environment variables from this, + // otherwise fallback to default dotenv behaviour. + let env_path = std::path::Path::new(&manifest_dir).join(".env"); + if env_path.exists() { + dotenv::from_path(&env_path) + .map_err(|e| format!("failed to load environment from {:?}, {}", env_path, e))? + } + + // if `dotenv` wasn't initialized by the above we make sure to do it here + match dotenv::var("DATABASE_URL").ok() { + Some(db_url) => expand_from_db(input, &db_url), + #[cfg(feature = "offline")] + None => { + let data_file_path = std::path::Path::new(&manifest_dir).join("sqlx-data.json"); + + if data_file_path.exists() { + expand_from_file(input, data_file_path) + } else { + Err( + "`DATABASE_URL` must be set, or `cargo sqlx prepare` must have been run \ + and sqlx-data.json must exist, to use query macros" + .into(), + ) + } + } + #[cfg(not(feature = "offline"))] + None => Err("`DATABASE_URL` must be set to use query macros".into()), + } +} + +fn expand_from_db(input: QueryMacroInput, db_url: &str) -> crate::Result { + let db_url = Url::parse(db_url)?; + match db_url.scheme() { + #[cfg(feature = "postgres")] + "postgres" | "postgresql" => { + let data = block_on(async { + let mut conn = sqlx_core::postgres::PgConnection::connect(db_url).await?; + QueryData::from_db(&mut conn, &input.src).await + })?; + + expand_with_data(input, data) + }, + #[cfg(not(feature = "postgres"))] + "postgres" | "postgresql" => Err(format!("database URL has the scheme of a PostgreSQL database but the `postgres` feature is not enabled").into()), + #[cfg(feature = "mysql")] + "mysql" | "mariadb" => { + let data = block_on(async { + let mut conn = sqlx_core::mysql::MySqlConnection::connect(db_url).await?; + QueryData::from_db(&mut conn, &input.src).await + })?; + + expand_with_data(input, data) + }, + #[cfg(not(feature = "mysql"))] + "mysql" | "mariadb" => Err(format!("database URL has the scheme of a MySQL/MariaDB database but the `mysql` feature is not enabled").into()), + #[cfg(feature = "sqlite")] + "sqlite" => { + let data = block_on(async { + let mut conn = sqlx_core::sqlite::SqliteConnection::connect(db_url).await?; + QueryData::from_db(&mut conn, &input.src).await + })?; + + expand_with_data(input, data) + }, + #[cfg(not(feature = "sqlite"))] + "sqlite" => Err(format!("database URL has the scheme of a SQLite database but the `sqlite` feature is not enabled").into()), + scheme => Err(format!("unknown database URL scheme {:?}", scheme).into()) + } +} + +#[cfg(feature = "offline")] +pub fn expand_from_file( + input: QueryMacroInput, + file: std::path::PathBuf, +) -> crate::Result { + use data::offline::DynQueryData; + + let query_data = DynQueryData::from_data_file(file, &input.src)?; + assert!(!query_data.db_name.is_empty()); + + match &*query_data.db_name { + #[cfg(feature = "postgres")] + sqlx_core::postgres::Postgres::NAME => expand_with_data( + input, + QueryData::::from_dyn_data(query_data)?, + ), + #[cfg(feature = "mysql")] + sqlx_core::mysql::MySql::NAME => expand_with_data( + input, + QueryData::::from_dyn_data(query_data)?, + ), + #[cfg(feature = "sqlite")] + sqlx_core::sqlite::Sqlite::NAME => expand_with_data( + input, + QueryData::::from_dyn_data(query_data)?, + ), + _ => Err(format!( + "found query data for {} but the feature for that database was not enabled", + query_data.db_name + ) + .into()), + } +} + +// marker trait for `Describe` that lets us conditionally require it to be `Serialize + Deserialize` +#[cfg(feature = "offline")] +trait DescribeExt: serde::Serialize + serde::de::DeserializeOwned {} + +#[cfg(feature = "offline")] +impl DescribeExt for Describe where + Describe: serde::Serialize + serde::de::DeserializeOwned +{ +} + +#[cfg(not(feature = "offline"))] +trait DescribeExt {} + +#[cfg(not(feature = "offline"))] +impl DescribeExt for Describe {} + +fn expand_with_data( + input: QueryMacroInput, + data: QueryData, +) -> crate::Result +where + Describe: DescribeExt, +{ + // validate at the minimum that our args match the query's input parameters + if input.arg_names.len() != data.describe.param_types.len() { + return Err(syn::Error::new( + Span::call_site(), + format!( + "expected {} parameters, got {}", + data.describe.param_types.len(), + input.arg_names.len() + ), + ) + .into()); + } + + let args_tokens = args::quote_args(&input, &data.describe)?; + + let query_args = format_ident!("query_args"); + + let output = if data.describe.result_columns.is_empty() { + let db_path = DB::db_path(); + let sql = &input.src; + + quote! { + sqlx::query::<#db_path>(#sql).bind_all(#query_args) + } + } else { + let columns = output::columns_to_rust::(&data.describe)?; + + let (out_ty, mut record_tokens) = match input.record_type { + RecordType::Generated => { + let record_name: Type = syn::parse_str("Record").unwrap(); + + let record_fields = columns.iter().map( + |&output::RustColumn { + ref ident, + ref type_, + }| quote!(#ident: #type_,), + ); + + let record_tokens = quote! { + #[derive(Debug)] + struct #record_name { + #(#record_fields)* + } + }; + + (Cow::Owned(record_name), record_tokens) + } + RecordType::Given(ref out_ty) => (Cow::Borrowed(out_ty), quote!()), + }; + + record_tokens.extend(output::quote_query_as::( + &input, + &out_ty, + &query_args, + &columns, + )); + + record_tokens + }; + + let arg_names = &input.arg_names; + + let ret_tokens = quote! { + macro_rules! macro_result { + (#($#arg_names:expr),*) => {{ + use sqlx::arguments::Arguments as _; + + #args_tokens + + #output + }} + } + }; + + #[cfg(feature = "offline")] + { + let mut save_dir = std::path::PathBuf::from( + env::var("CARGO_TARGET_DIR").unwrap_or_else(|_| "target/".into()), + ); + + save_dir.push("sqlx"); + + std::fs::create_dir_all(&save_dir)?; + data.save_in(save_dir, input.src_span)?; + } + + Ok(ret_tokens) +} diff --git a/sqlx-macros/src/query_macros/output.rs b/sqlx-macros/src/query/output.rs similarity index 96% rename from sqlx-macros/src/query_macros/output.rs rename to sqlx-macros/src/query/output.rs index 45cbca2c3e..9631a731ab 100644 --- a/sqlx-macros/src/query_macros/output.rs +++ b/sqlx-macros/src/query/output.rs @@ -1,11 +1,12 @@ use proc_macro2::{Ident, Span, TokenStream}; use quote::quote; -use syn::Path; +use syn::Type; -use sqlx::describe::Describe; +use sqlx_core::describe::Describe; use crate::database::DatabaseExt; +use crate::query::QueryMacroInput; use std::fmt::{self, Display, Formatter}; pub struct RustColumn { @@ -98,11 +99,10 @@ pub fn columns_to_rust(describe: &Describe) -> crate::Resul } pub fn quote_query_as( - sql: &str, - out_ty: &Path, + input: &QueryMacroInput, + out_ty: &Type, bind_args: &Ident, columns: &[RustColumn], - checked: bool, ) -> TokenStream { let instantiations = columns.iter().enumerate().map( |( @@ -116,7 +116,7 @@ pub fn quote_query_as( // For "checked" queries, the macro checks these at compile time and using "try_get" // would also perform pointless runtime checks - if checked { + if input.checked { quote!( #ident: row.try_get_unchecked::<#type_, _>(#i).try_unwrap_optional()? ) } else { quote!( #ident: row.try_get_unchecked(#i)? ) @@ -126,6 +126,7 @@ pub fn quote_query_as( let db_path = DB::db_path(); let row_path = DB::row_path(); + let sql = &input.src; quote! { sqlx::query::<#db_path>(#sql).bind_all(#bind_args).try_map(|row: #row_path| { diff --git a/sqlx-macros/src/query_macros/input.rs b/sqlx-macros/src/query_macros/input.rs deleted file mode 100644 index c9601f3c78..0000000000 --- a/sqlx-macros/src/query_macros/input.rs +++ /dev/null @@ -1,214 +0,0 @@ -use std::env; - -use proc_macro2::{Ident, Span}; -use quote::{format_ident, ToTokens}; -use syn::parse::{Parse, ParseStream}; -use syn::punctuated::Punctuated; -use syn::spanned::Spanned; -use syn::token::Group; -use syn::{Expr, ExprLit, ExprPath, Lit}; -use syn::{ExprGroup, Token}; - -use sqlx::connection::Connection; -use sqlx::describe::Describe; - -use crate::runtime::fs; - -/// Macro input shared by `query!()` and `query_file!()` -pub struct QueryMacroInput { - pub(super) source: String, - pub(super) source_span: Span, - // `arg0 .. argN` for N arguments - pub(super) arg_names: Vec, - pub(super) arg_exprs: Vec, -} - -impl QueryMacroInput { - fn from_exprs(input: ParseStream, mut args: impl Iterator) -> syn::Result { - fn lit_err(span: Span, unexpected: Expr) -> syn::Result { - Err(syn::Error::new( - span, - format!( - "expected string literal, got {}", - unexpected.to_token_stream() - ), - )) - } - - let (source, source_span) = match args.next() { - Some(Expr::Lit(ExprLit { - lit: Lit::Str(sql), .. - })) => (sql.value(), sql.span()), - Some(Expr::Group(ExprGroup { - expr, - group_token: Group { span }, - .. - })) => { - // this duplication with the above is necessary because `expr` is `Box` here - // which we can't directly pattern-match without `box_patterns` - match *expr { - Expr::Lit(ExprLit { - lit: Lit::Str(sql), .. - }) => (sql.value(), span), - other_expr => return lit_err(span, other_expr), - } - } - Some(other_expr) => return lit_err(other_expr.span(), other_expr), - None => return Err(input.error("expected SQL string literal")), - }; - - let arg_exprs: Vec<_> = args.collect(); - let arg_names = (0..arg_exprs.len()) - .map(|i| format_ident!("arg{}", i)) - .collect(); - - Ok(Self { - source, - source_span, - arg_exprs, - arg_names, - }) - } - - pub async fn expand_file_src(self) -> syn::Result { - let source = read_file_src(&self.source, self.source_span).await?; - - Ok(Self { source, ..self }) - } - - /// Run a parse/describe on the query described by this input and validate that it matches the - /// passed number of args - pub async fn describe_validate( - &self, - conn: &mut C, - ) -> crate::Result> { - let describe = conn - .describe(&*self.source) - .await - .map_err(|e| syn::Error::new(self.source_span, e))?; - - if self.arg_names.len() != describe.param_types.len() { - return Err(syn::Error::new( - Span::call_site(), - format!( - "expected {} parameters, got {}", - describe.param_types.len(), - self.arg_names.len() - ), - ) - .into()); - } - - Ok(describe) - } -} - -impl Parse for QueryMacroInput { - fn parse(input: ParseStream) -> syn::Result { - let args = Punctuated::::parse_terminated(input)?.into_iter(); - - Self::from_exprs(input, args) - } -} - -/// Macro input shared by `query_as!()` and `query_file_as!()` -pub struct QueryAsMacroInput { - pub(super) as_ty: ExprPath, - pub(super) query_input: QueryMacroInput, -} - -impl QueryAsMacroInput { - pub async fn expand_file_src(self) -> syn::Result { - Ok(Self { - query_input: self.query_input.expand_file_src().await?, - ..self - }) - } -} - -impl Parse for QueryAsMacroInput { - fn parse(input: ParseStream) -> syn::Result { - fn path_err(span: Span, unexpected: Expr) -> syn::Result { - Err(syn::Error::new( - span, - format!( - "expected path to a type, got {}", - unexpected.to_token_stream() - ), - )) - } - - let mut args = Punctuated::::parse_terminated(input)?.into_iter(); - - let as_ty = match args.next() { - Some(Expr::Path(path)) => path, - Some(Expr::Group(ExprGroup { - expr, - group_token: Group { span }, - .. - })) => { - // this duplication with the above is necessary because `expr` is `Box` here - // which we can't directly pattern-match without `box_patterns` - match *expr { - Expr::Path(path) => path, - other_expr => return path_err(span, other_expr), - } - } - Some(other_expr) => return path_err(other_expr.span(), other_expr), - None => return Err(input.error("expected path to SQL file")), - }; - - Ok(QueryAsMacroInput { - as_ty, - query_input: QueryMacroInput::from_exprs(input, args)?, - }) - } -} - -async fn read_file_src(source: &str, source_span: Span) -> syn::Result { - use std::path::Path; - - let path = Path::new(source); - - if path.is_absolute() { - return Err(syn::Error::new( - source_span, - "absolute paths will only work on the current machine", - )); - } - - // requires `proc_macro::SourceFile::path()` to be stable - // https://github.com/rust-lang/rust/issues/54725 - if path.is_relative() - && !path - .parent() - .map_or(false, |parent| !parent.as_os_str().is_empty()) - { - return Err(syn::Error::new( - source_span, - "paths relative to the current file's directory are not currently supported", - )); - } - - let base_dir = env::var("CARGO_MANIFEST_DIR").map_err(|_| { - syn::Error::new( - source_span, - "CARGO_MANIFEST_DIR is not set; please use Cargo to build", - ) - })?; - - let base_dir_path = Path::new(&base_dir); - - let file_path = base_dir_path.join(path); - - fs::read_to_string(&file_path).await.map_err(|e| { - syn::Error::new( - source_span, - format!( - "failed to read query file at {}: {}", - file_path.display(), - e - ), - ) - }) -} diff --git a/sqlx-macros/src/query_macros/mod.rs b/sqlx-macros/src/query_macros/mod.rs deleted file mode 100644 index e2eeca7bb2..0000000000 --- a/sqlx-macros/src/query_macros/mod.rs +++ /dev/null @@ -1,88 +0,0 @@ -use std::fmt::Display; - -use proc_macro2::TokenStream; -use quote::{format_ident, quote}; - -pub use input::{QueryAsMacroInput, QueryMacroInput}; -pub use query::expand_query; - -use crate::database::DatabaseExt; - -use sqlx::connection::Connection; -use sqlx::database::Database; - -mod args; -mod input; -mod output; -mod query; - -pub async fn expand_query_file( - input: QueryMacroInput, - conn: C, - checked: bool, -) -> crate::Result -where - C::Database: DatabaseExt + Sized, - ::TypeInfo: Display, -{ - expand_query(input.expand_file_src().await?, conn, checked).await -} - -pub async fn expand_query_as( - input: QueryAsMacroInput, - mut conn: C, - checked: bool, -) -> crate::Result -where - C::Database: DatabaseExt + Sized, - ::TypeInfo: Display, -{ - let describe = input.query_input.describe_validate(&mut conn).await?; - - if describe.result_columns.is_empty() { - return Err(syn::Error::new( - input.query_input.source_span, - "query must output at least one column", - ) - .into()); - } - - let args_tokens = args::quote_args(&input.query_input, &describe, checked)?; - - let query_args = format_ident!("query_args"); - - let columns = output::columns_to_rust(&describe)?; - let output = output::quote_query_as::( - &input.query_input.source, - &input.as_ty.path, - &query_args, - &columns, - checked, - ); - - let arg_names = &input.query_input.arg_names; - - Ok(quote! { - macro_rules! macro_result { - (#($#arg_names:expr),*) => {{ - use sqlx::arguments::Arguments as _; - - #args_tokens - - #output - }} - } - }) -} - -pub async fn expand_query_file_as( - input: QueryAsMacroInput, - conn: C, - checked: bool, -) -> crate::Result -where - C::Database: DatabaseExt + Sized, - ::TypeInfo: Display, -{ - expand_query_as(input.expand_file_src().await?, conn, checked).await -} diff --git a/sqlx-macros/src/query_macros/query.rs b/sqlx-macros/src/query_macros/query.rs deleted file mode 100644 index 5400406720..0000000000 --- a/sqlx-macros/src/query_macros/query.rs +++ /dev/null @@ -1,85 +0,0 @@ -use std::fmt::Display; - -use proc_macro2::Span; -use proc_macro2::TokenStream; -use syn::{Ident, Path}; - -use quote::{format_ident, quote}; -use sqlx::{connection::Connection, database::Database}; - -use super::{args, output, QueryMacroInput}; -use crate::database::DatabaseExt; - -/// Given an input like `query!("SELECT * FROM accounts WHERE account_id > ?", account_id)`, -/// expand to an anonymous record -pub async fn expand_query( - input: QueryMacroInput, - mut conn: C, - checked: bool, -) -> crate::Result -where - C::Database: DatabaseExt + Sized, - ::TypeInfo: Display, -{ - let describe = input.describe_validate(&mut conn).await?; - let sql = &input.source; - - let args = args::quote_args(&input, &describe, checked)?; - - let arg_names = &input.arg_names; - let db_path = ::db_path(); - - if describe.result_columns.is_empty() { - return Ok(quote! { - macro_rules! macro_result { - (#($#arg_names:expr),*) => {{ - use sqlx::arguments::Arguments as _; - - #args - - sqlx::query::<#db_path>(#sql).bind_all(query_args) - } - }} - }); - } - - let columns = output::columns_to_rust(&describe)?; - - let record_type: Path = Ident::new("Record", Span::call_site()).into(); - - let record_fields = columns - .iter() - .map( - |&output::RustColumn { - ref ident, - ref type_, - }| quote!(#ident: #type_,), - ) - .collect::(); - - let query_args = format_ident!("query_args"); - let output = output::quote_query_as::( - sql, - &record_type, - &query_args, - if checked { &columns } else { &[] }, - checked, - ); - - Ok(quote! { - macro_rules! macro_result { - (#($#arg_names:expr),*) => {{ - use sqlx::arguments::Arguments as _; - - #[derive(Debug)] - struct #record_type { - #record_fields - } - - #args - - #output - } - }} - }) -} diff --git a/sqlx-macros/src/runtime.rs b/sqlx-macros/src/runtime.rs index b79060e5ff..2ad420df71 100644 --- a/sqlx-macros/src/runtime.rs +++ b/sqlx-macros/src/runtime.rs @@ -5,7 +5,23 @@ compile_error!("one of 'runtime-async-std' or 'runtime-tokio' features must be e compile_error!("only one of 'runtime-async-std' or 'runtime-tokio' features must be enabled"); #[cfg(feature = "runtime-async-std")] -pub(crate) use async_std::fs; +pub(crate) use async_std::task::block_on; #[cfg(feature = "runtime-tokio")] -pub(crate) use tokio::fs; +pub fn block_on(future: F) -> F::Output { + use once_cell::sync::Lazy; + use tokio::runtime::{self, Runtime}; + + // lazily initialize a global runtime once for multiple invocations of the macros + static RUNTIME: Lazy = Lazy::new(|| { + runtime::Builder::new() + // `.basic_scheduler()` requires calling `Runtime::block_on()` which needs mutability + .threaded_scheduler() + .enable_io() + .enable_time() + .build() + .expect("failed to initialize Tokio runtime") + }); + + RUNTIME.enter(|| futures::executor::block_on(future)) +} diff --git a/src/macros.rs b/src/macros.rs index ade4105ba2..c367400026 100644 --- a/src/macros.rs +++ b/src/macros.rs @@ -28,6 +28,25 @@ /// # fn main() {} /// ``` /// +/// ## Requirements +/// * The `DATABASE_URL` environment variable must be set at build-time to point to a database +/// server with the schema that the query string will be checked against. All variants of `query!()` +/// use [dotenv] so this can be in a `.env` file instead. +/// +/// * Or, `sqlx-data.json` must exist at the workspace root. See [Offline Mode](#offline-mode) +/// below. +/// +/// * The query must be a string literal or else it cannot be introspected (and thus cannot +/// be dynamic or the result of another macro). +/// +/// * The `QueryAs` instance will be bound to the same database type as `query!()` was compiled +/// against (e.g. you cannot build against a Postgres database and then run the query against +/// a MySQL database). +/// +/// * The schema of the database URL (e.g. `postgres://` or `mysql://`) will be used to +/// determine the database type. +/// +/// [dotenv]: https://crates.io/crates/dotenv /// ## Query Arguments /// Like `println!()` and the other formatting macros, you can add bind parameters to your SQL /// and this macro will typecheck passed arguments and error on missing ones: @@ -94,22 +113,26 @@ /// /// To override the nullability of an output column, use [query_as!]. /// -/// ## Requirements -/// * The `DATABASE_URL` environment variable must be set at build-time to point to a database -/// server with the schema that the query string will be checked against. (All variants of -/// `query!()` use [dotenv] so this can be in a `.env` file instead.) +/// ### Offline Mode (requires the `offline` feature) +/// The macros can be configured to not require a live database connection for compilation, +/// but it requires a couple extra steps: /// -/// * The query must be a string literal or else it cannot be introspected (and thus cannot -/// be dynamic or the result of another macro). +/// * Run `cargo install sqlx-cli`. +/// * In your project with `DATABASE_URL` set (or in a `.env` file) and the database server running, +/// run `cargo sqlx prepare`. +/// * Check the generated `sqlx-data.json` file into version control. +/// * Don't have `DATABASE_URL` set during compilation. /// -/// * The `QueryAs` instance will be bound to the same database type as `query!()` was compiled -/// against (e.g. you cannot build against a Postgres database and then run the query against -/// a MySQL database). +/// Your project can now be built without a database connection (you must omit `DATABASE_URL` or +/// else it will still try to connect). To update the generated file simply run `cargo sqlx prepare` +/// again. /// -/// * The schema of the database URL (e.g. `postgres://` or `mysql://`) will be used to -/// determine the database type. +/// To ensure that your `sqlx-data.json` file is kept up-to-date, both with the queries in your +/// project and your database schema itself, run +/// `cargo install sqlx-cli && cargo sqlx prepare --check` in your Continuous Integration script. +/// +/// See [the README for `sqlx-cli`](https://crates.io/crate/sqlx-cli) for more information. /// -/// [dotenv]: https://crates.io/crates/dotenv /// ## See Also /// * [query_as!] if you want to use a struct you can name, /// * [query_file!] if you want to define the SQL query out-of-line, @@ -122,14 +145,14 @@ macro_rules! query ( ($query:literal) => ({ #[macro_use] mod _macro_result { - $crate::sqlx_macros::query!($query); + $crate::sqlx_macros::expand_query!(source = $query); } macro_result!() }); ($query:literal, $($args:expr),*$(,)?) => ({ #[macro_use] mod _macro_result { - $crate::sqlx_macros::query!($query, $($args),*); + $crate::sqlx_macros::expand_query!(source = $query, args = [$($args),*]); } macro_result!($($args),*) }) @@ -140,19 +163,17 @@ macro_rules! query ( #[macro_export] #[cfg_attr(docsrs, doc(cfg(feature = "macros")))] macro_rules! query_unchecked ( - // by emitting a macro definition from our proc-macro containing the result tokens, - // we no longer have a need for `proc-macro-hack` ($query:literal) => ({ #[macro_use] mod _macro_result { - $crate::sqlx_macros::query_unchecked!($query); + $crate::sqlx_macros::expand_query!(source = $query, checked = false); } macro_result!() }); ($query:literal, $($args:expr),*$(,)?) => ({ #[macro_use] mod _macro_result { - $crate::sqlx_macros::query_unchecked!($query, $($args),*); + $crate::sqlx_macros::expand_query!(source = $query, args = [$($args),*], checked = false); } macro_result!($($args),*) }) @@ -203,17 +224,17 @@ macro_rules! query_unchecked ( #[macro_export] #[cfg_attr(docsrs, doc(cfg(feature = "macros")))] macro_rules! query_file ( - ($query:literal) => (#[allow(dead_code)]{ + ($path:literal) => (#[allow(dead_code)]{ #[macro_use] mod _macro_result { - $crate::sqlx_macros::query_file!($query); + $crate::sqlx_macros::expand_query!(source_file = $path); } macro_result!() }); - ($query:literal, $($args:expr),*$(,)?) => (#[allow(dead_code)]{ + ($path:literal, $($args:expr),*$(,)?) => (#[allow(dead_code)]{ #[macro_use] mod _macro_result { - $crate::sqlx_macros::query_file!($query, $($args),*); + $crate::sqlx_macros::expand_query!(source_file = $path, args = [$($args),*]); } macro_result!($($args),*) }) @@ -224,17 +245,17 @@ macro_rules! query_file ( #[macro_export] #[cfg_attr(docsrs, doc(cfg(feature = "macros")))] macro_rules! query_file_unchecked ( - ($query:literal) => (#[allow(dead_code)]{ + ($path:literal) => (#[allow(dead_code)]{ #[macro_use] mod _macro_result { - $crate::sqlx_macros::query_file_unchecked!($query); + $crate::sqlx_macros::query_file_unchecked!(source_file = $path, checked = false); } macro_result!() }); - ($query:literal, $($args:expr),*$(,)?) => (#[allow(dead_code)]{ + ($path:literal, $($args:expr),*$(,)?) => (#[allow(dead_code)]{ #[macro_use] mod _macro_result { - $crate::sqlx_macros::query_file_unchecked!($query, $($args),*); + $crate::sqlx_macros::query_file_unchecked!(source_file = $path, args = [$($args),*], checked = false); } macro_result!($($args),*) }) @@ -298,14 +319,14 @@ macro_rules! query_as ( ($out_struct:path, $query:literal) => (#[allow(dead_code)] { #[macro_use] mod _macro_result { - $crate::sqlx_macros::query_as!($out_struct, $query); + $crate::sqlx_macros::expand_query!(record = $out_struct, source = $query); } macro_result!() }); ($out_struct:path, $query:literal, $($args:expr),*$(,)?) => (#[allow(dead_code)] { #[macro_use] mod _macro_result { - $crate::sqlx_macros::query_as!($out_struct, $query, $($args),*); + $crate::sqlx_macros::expand_query!(record = $out_struct, source = $query, args = [$($args),*]); } macro_result!($($args),*) }) @@ -347,17 +368,17 @@ macro_rules! query_as ( #[macro_export] #[cfg_attr(docsrs, doc(cfg(feature = "macros")))] macro_rules! query_file_as ( - ($out_struct:path, $query:literal) => (#[allow(dead_code)] { + ($out_struct:path, $path:literal) => (#[allow(dead_code)] { #[macro_use] mod _macro_result { - $crate::sqlx_macros::query_file_as!($out_struct, $query); + $crate::sqlx_macros::expand_query!(record = $out_struct, source_file = $path); } macro_result!() }); - ($out_struct:path, $query:literal, $($args:tt),*$(,)?) => (#[allow(dead_code)] { + ($out_struct:path, $path:literal, $($args:tt),*$(,)?) => (#[allow(dead_code)] { #[macro_use] mod _macro_result { - $crate::sqlx_macros::query_file_as!($out_struct, $query, $($args),*); + $crate::sqlx_macros::expand_query!(record = $out_struct, source_file = $path, args = [$($args),*]); } macro_result!($($args),*) }) @@ -371,7 +392,7 @@ macro_rules! query_as_unchecked ( ($out_struct:path, $query:literal) => (#[allow(dead_code)] { #[macro_use] mod _macro_result { - $crate::sqlx_macros::query_as_unchecked!($out_struct, $query); + $crate::sqlx_macros::expand_query!(record = $out_struct, source = $query, checked = false); } macro_result!() }); @@ -379,7 +400,7 @@ macro_rules! query_as_unchecked ( ($out_struct:path, $query:literal, $($args:expr),*$(,)?) => (#[allow(dead_code)] { #[macro_use] mod _macro_result { - $crate::sqlx_macros::query_as_unchecked!($out_struct, $query, $($args),*); + $crate::sqlx_macros::expand_query!(record = $out_struct, source = $query, args = [$($args),*], checked = false); } macro_result!($($args),*) }) @@ -391,18 +412,18 @@ macro_rules! query_as_unchecked ( #[macro_export] #[cfg_attr(docsrs, doc(cfg(feature = "macros")))] macro_rules! query_file_as_unchecked ( - ($out_struct:path, $query:literal) => (#[allow(dead_code)] { + ($out_struct:path, $path:literal) => (#[allow(dead_code)] { #[macro_use] mod _macro_result { - $crate::sqlx_macros::query_file_as_unchecked!($out_struct, $query); + $crate::sqlx_macros::query_file_as_unchecked!(record = $out_struct, source_file = $path, checked = false); } macro_result!() }); - ($out_struct:path, $query:literal, $($args:tt),*$(,)?) => (#[allow(dead_code)] { + ($out_struct:path, $path:literal, $($args:tt),*$(,)?) => (#[allow(dead_code)] { #[macro_use] mod _macro_result { - $crate::sqlx_macros::query_file_as_unchecked!($out_struct, $query, $($args),*); + $crate::sqlx_macros::query_file_as_unchecked!(record = $out_struct, source_file = $path, args = [$($args),*], checked = false); } macro_result!($($args),*) })