diff --git a/.gitignore b/.gitignore index 34b9a8e48..1085c15ed 100644 --- a/.gitignore +++ b/.gitignore @@ -24,3 +24,5 @@ node_modules/ **/dist/ .claude-session-id + +squawk/ diff --git a/.sqlx/query-d61f2f56ce777c99593df240b3a126cacb3c9ed5f915b7e98052d58df98d480b.json b/.sqlx/query-332fd0e123d2e7cc1e9abdd6c8db4718faaa1005845c8f8f14c5c78e76a258eb.json similarity index 56% rename from .sqlx/query-d61f2f56ce777c99593df240b3a126cacb3c9ed5f915b7e98052d58df98d480b.json rename to .sqlx/query-332fd0e123d2e7cc1e9abdd6c8db4718faaa1005845c8f8f14c5c78e76a258eb.json index d1766e309..d5e72d910 100644 --- a/.sqlx/query-d61f2f56ce777c99593df240b3a126cacb3c9ed5f915b7e98052d58df98d480b.json +++ b/.sqlx/query-332fd0e123d2e7cc1e9abdd6c8db4718faaa1005845c8f8f14c5c78e76a258eb.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "select\n version(),\n current_setting('server_version_num') :: int8 AS version_num,\n (\n select\n count(*) :: int8 AS active_connections\n FROM\n pg_stat_activity\n ) AS active_connections,\n current_setting('max_connections') :: int8 AS max_connections;", + "query": "select\n version(),\n current_setting('server_version_num') :: int8 AS version_num,\n current_setting('server_version_num') :: int8 / 10000 AS major_version,\n (\n select\n count(*) :: int8 AS active_connections\n FROM\n pg_stat_activity\n ) AS active_connections,\n current_setting('max_connections') :: int8 AS max_connections;\n", "describe": { "columns": [ { @@ -15,11 +15,16 @@ }, { "ordinal": 2, - "name": "active_connections", + "name": "major_version", "type_info": "Int8" }, { "ordinal": 3, + "name": "active_connections", + "type_info": "Int8" + }, + { + "ordinal": 4, "name": "max_connections", "type_info": "Int8" } @@ -31,8 +36,9 @@ null, null, null, + null, null ] }, - "hash": "d61f2f56ce777c99593df240b3a126cacb3c9ed5f915b7e98052d58df98d480b" + "hash": "332fd0e123d2e7cc1e9abdd6c8db4718faaa1005845c8f8f14c5c78e76a258eb" } diff --git a/Cargo.lock b/Cargo.lock index 386a1c52a..66c2a19f5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2758,7 +2758,9 @@ dependencies = [ "pgt_console", "pgt_diagnostics", "pgt_query", + "pgt_query_ext", "pgt_schema_cache", + "pgt_statement_splitter", "pgt_test_macros", "pgt_text_size", "serde", diff --git a/agentic/port_squawk_rules.md b/agentic/port_squawk_rules.md new file mode 100644 index 000000000..5d5fb0147 --- /dev/null +++ b/agentic/port_squawk_rules.md @@ -0,0 +1,106 @@ +The goal is to port all missing rules from Squawk to our analyser. + +Our analyser lives in the `pgt_analyser` crate. There is a `CONTRIBUTING.md` guide in that crate which explains how to add new rules. Please also read existing rules to see how it all works. + +Then, I want you to check the rules in the squawk project which I copied here for convenience. The rules are in `squawk/linter/src/rules`. The implementation should be very similar to what we have, and porting them straightforward. Here a few things to watch out for though: + +- although both libraries are using `libpg_query` to parse the SQL, the bindings can be different. Ours is in the `pgt_query` crate of you need a reference. The `protobuf.rs` file contains the full thing. +- the context for each rule is different, but you can get the same information out of it: +```rust +pub struct RuleContext<'a, R: Rule> { + // the ast of the target statement + stmt: &'a pgt_query::NodeEnum, + // options for that specific rule + options: &'a R::Options, + // the schema cache - also includes the postgres version + schema_cache: Option<&'a SchemaCache>, + // the file context which contains other statements in that file in case you need them + file_context: &'a AnalysedFileContext, +} + + +pub struct AnalysedFileContext<'a> { + // all statements in this file + pub stmts: &'a Vec, + + pos: usize, +} + +impl<'a> AnalysedFileContext<'a> { + pub fn new(stmts: &'a Vec) -> Self { + Self { stmts, pos: 0 } + } + + // all statements before the currently analysed one + pub fn previous_stmts(&self) -> &[pgt_query::NodeEnum] { + &self.stmts[0..self.pos] + } + + // total count of statements in this file + pub fn stmt_count(&self) -> usize { + self.stmts.len() + } +} +``` + +In squawk, you will see: +```rust + // all statements of that file -> our analyser goes statement by statement but has access to the files content via `file_context` + tree: &[RawStmt], + // the postgres version -> we store it in the schema cache + _pg_version: Option, + // for us, this is always true + _assume_in_transaction: bool, + +``` + +Please always write idiomatic code! +Only add comments to explain WHY the code is doing something. DO NOT write comments to explain WHAT the code is doing. + +If you learn something new that might help in porting all the rules, please update this document. + +LEARNINGS: +- Use `cargo clippy` to check your code after writing it +- The `just new-lintrule` command expects severity to be "info", "warn", or "error" (not "warning") +- RuleDiagnostic methods: `detail(span, msg)` takes two parameters, `note(msg)` takes only one parameter +- To check Postgres version: access `ctx.schema_cache().is_some_and(|sc| sc.version.major_version)` which gives e.g. 17 +- NEVER skip anything, or use a subset of something. ALWAYS do the full thing. For example, copy the entire non-volatile functions list from Squawk, not just a subset. +- If you are missing features from our rule context to be able to properly implement a rule, DO NOT DO IT. Instead, add that rule to the NEEDS FEATURES list below. The node enum is generated from the same source as it is in squawk, so they have feature parity. +- Remember to run `just gen-lint` after creating a new rule to generate all necessary files + +Please update the list below with the rules that we need to migrate, and the ones that are already migrated. Keep the list up-to-date. + +NEEDS FEATURES: + +TODO: + +DONE: +- adding_field_with_default ✓ (ported from Squawk) +- adding_foreign_key_constraint ✓ (ported from Squawk) +- adding_not_null_field ✓ (ported from Squawk) +- adding_primary_key_constraint ✓ (ported from Squawk) +- adding_required_field (already exists in pgt_analyser) +- ban_char_field ✓ (ported from Squawk) +- ban_concurrent_index_creation_in_transaction ✓ (ported from Squawk) +- ban_drop_column (already exists in pgt_analyser) +- changing_column_type ✓ (ported from Squawk) +- constraint_missing_not_valid ✓ (ported from Squawk) +- ban_drop_database (already exists in pgt_analyser, as bad_drop_database in squawk) +- ban_drop_not_null (already exists in pgt_analyser) +- ban_drop_table (already exists in pgt_analyser) +- prefer_big_int ✓ (ported from Squawk) +- prefer_bigint_over_int ✓ (ported from Squawk) +- prefer_bigint_over_smallint ✓ (ported from Squawk) +- prefer_identity ✓ (ported from Squawk) +- prefer_jsonb ✓ (new rule added) +- prefer_text_field ✓ (ported from Squawk) +- prefer_timestamptz ✓ (ported from Squawk) +- disallow_unique_constraint ✓ (ported from Squawk) +- renaming_column ✓ (ported from Squawk) +- renaming_table ✓ (ported from Squawk) +- require_concurrent_index_creation ✓ (ported from Squawk) +- require_concurrent_index_deletion ✓ (ported from Squawk) +- transaction_nesting ✓ (ported from Squawk) +- prefer_robust_stmts ✓ (ported from Squawk - simplified version) + + diff --git a/crates/pgt_analyse/src/analysed_file_context.rs b/crates/pgt_analyse/src/analysed_file_context.rs index 82dc40711..8bf21d99c 100644 --- a/crates/pgt_analyse/src/analysed_file_context.rs +++ b/crates/pgt_analyse/src/analysed_file_context.rs @@ -1,7 +1,23 @@ -#[derive(Default)] -pub struct AnalysedFileContext {} +pub struct AnalysedFileContext<'a> { + pub stmts: &'a Vec, -impl AnalysedFileContext { - #[allow(unused)] - pub fn update_from(&mut self, stmt_root: &pgt_query::NodeEnum) {} + pos: usize, +} + +impl<'a> AnalysedFileContext<'a> { + pub fn new(stmts: &'a Vec) -> Self { + Self { stmts, pos: 0 } + } + + pub fn previous_stmts(&self) -> &[pgt_query::NodeEnum] { + &self.stmts[0..self.pos] + } + + pub fn stmt_count(&self) -> usize { + self.stmts.len() + } + + pub fn next(&mut self) { + self.pos += 1; + } } diff --git a/crates/pgt_analyse/src/context.rs b/crates/pgt_analyse/src/context.rs index ddd5d28d5..17f47365a 100644 --- a/crates/pgt_analyse/src/context.rs +++ b/crates/pgt_analyse/src/context.rs @@ -10,7 +10,7 @@ pub struct RuleContext<'a, R: Rule> { stmt: &'a pgt_query::NodeEnum, options: &'a R::Options, schema_cache: Option<&'a SchemaCache>, - file_context: &'a AnalysedFileContext, + file_context: &'a AnalysedFileContext<'a>, } impl<'a, R> RuleContext<'a, R> diff --git a/crates/pgt_analyse/src/registry.rs b/crates/pgt_analyse/src/registry.rs index 45d2c2026..8da24dbc8 100644 --- a/crates/pgt_analyse/src/registry.rs +++ b/crates/pgt_analyse/src/registry.rs @@ -159,7 +159,7 @@ impl RuleRegistry { pub struct RegistryRuleParams<'a> { pub root: &'a pgt_query::NodeEnum, pub options: &'a AnalyserOptions, - pub analysed_file_context: &'a AnalysedFileContext, + pub analysed_file_context: &'a AnalysedFileContext<'a>, pub schema_cache: Option<&'a pgt_schema_cache::SchemaCache>, } diff --git a/crates/pgt_analyse/src/rule.rs b/crates/pgt_analyse/src/rule.rs index 1760ce971..40f87ce02 100644 --- a/crates/pgt_analyse/src/rule.rs +++ b/crates/pgt_analyse/src/rule.rs @@ -291,6 +291,8 @@ impl RuleDiagnostic { pub enum RuleSource { /// Rules from [Squawk](https://squawkhq.com) Squawk(&'static str), + /// Rules from [Eugene](https://github.com/kaaveland/eugene) + Eugene(&'static str), } impl PartialEq for RuleSource { @@ -303,6 +305,7 @@ impl std::fmt::Display for RuleSource { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { Self::Squawk(_) => write!(f, "Squawk"), + Self::Eugene(_) => write!(f, "Eugene"), } } } @@ -325,18 +328,23 @@ impl RuleSource { pub fn as_rule_name(&self) -> &'static str { match self { Self::Squawk(rule_name) => rule_name, + Self::Eugene(rule_name) => rule_name, } } pub fn to_namespaced_rule_name(&self) -> String { match self { Self::Squawk(rule_name) => format!("squawk/{rule_name}"), + Self::Eugene(rule_name) => format!("eugene/{rule_name}"), } } pub fn to_rule_url(&self) -> String { match self { Self::Squawk(rule_name) => format!("https://squawkhq.com/docs/{rule_name}"), + Self::Eugene(rule_name) => { + format!("https://kaveland.no/eugene/hints/{rule_name}/index.html") + } } } diff --git a/crates/pgt_analyser/Cargo.toml b/crates/pgt_analyser/Cargo.toml index 0cf7a3342..b00c9939c 100644 --- a/crates/pgt_analyser/Cargo.toml +++ b/crates/pgt_analyser/Cargo.toml @@ -16,12 +16,14 @@ pgt_analyse = { workspace = true } pgt_console = { workspace = true } pgt_diagnostics = { workspace = true } pgt_query = { workspace = true } +pgt_query_ext = { workspace = true } pgt_schema_cache = { workspace = true } pgt_text_size = { workspace = true } serde = { workspace = true } [dev-dependencies] -insta = { version = "1.42.1" } -pgt_diagnostics = { workspace = true } -pgt_test_macros = { workspace = true } -termcolor = { workspace = true } +insta = { version = "1.42.1" } +pgt_diagnostics = { workspace = true } +pgt_statement_splitter = { workspace = true } +pgt_test_macros = { workspace = true } +termcolor = { workspace = true } diff --git a/crates/pgt_analyser/src/lib.rs b/crates/pgt_analyser/src/lib.rs index ccdc04208..f559090ef 100644 --- a/crates/pgt_analyser/src/lib.rs +++ b/crates/pgt_analyser/src/lib.rs @@ -62,25 +62,29 @@ impl<'a> Analyser<'a> { pub fn run(&self, params: AnalyserParams) -> Vec { let mut diagnostics = vec![]; - let mut file_context = AnalysedFileContext::default(); + let roots: Vec = params.stmts.iter().map(|s| s.root.clone()).collect(); + let mut file_context = AnalysedFileContext::new(&roots); + + for (i, stmt) in params.stmts.into_iter().enumerate() { + let stmt_diagnostics: Vec<_> = { + let rule_params = RegistryRuleParams { + root: &roots[i], + options: self.options, + analysed_file_context: &file_context, + schema_cache: params.schema_cache, + }; - for stmt in params.stmts { - let rule_params = RegistryRuleParams { - root: &stmt.root, - options: self.options, - analysed_file_context: &file_context, - schema_cache: params.schema_cache, - }; - - diagnostics.extend( self.registry .rules .iter() .flat_map(|rule| (rule.run)(&rule_params)) - .map(|r| r.span(stmt.range)), - ); + .map(|r| r.span(stmt.range)) + .collect() + }; // end immutable borrow + + diagnostics.extend(stmt_diagnostics); - file_context.update_from(&stmt.root); + file_context.next(); } diagnostics diff --git a/crates/pgt_analyser/src/lint/safety.rs b/crates/pgt_analyser/src/lint/safety.rs index a2b72fceb..0ad0f2c56 100644 --- a/crates/pgt_analyser/src/lint/safety.rs +++ b/crates/pgt_analyser/src/lint/safety.rs @@ -1,10 +1,32 @@ //! Generated file, do not edit by hand, see `xtask/codegen` use pgt_analyse::declare_lint_group; +pub mod adding_field_with_default; +pub mod adding_foreign_key_constraint; +pub mod adding_not_null_field; +pub mod adding_primary_key_constraint; pub mod adding_required_field; +pub mod ban_char_field; +pub mod ban_concurrent_index_creation_in_transaction; pub mod ban_drop_column; pub mod ban_drop_database; pub mod ban_drop_not_null; pub mod ban_drop_table; pub mod ban_truncate_cascade; -declare_lint_group! { pub Safety { name : "safety" , rules : [self :: adding_required_field :: AddingRequiredField , self :: ban_drop_column :: BanDropColumn , self :: ban_drop_database :: BanDropDatabase , self :: ban_drop_not_null :: BanDropNotNull , self :: ban_drop_table :: BanDropTable , self :: ban_truncate_cascade :: BanTruncateCascade ,] } } +pub mod changing_column_type; +pub mod constraint_missing_not_valid; +pub mod disallow_unique_constraint; +pub mod prefer_big_int; +pub mod prefer_bigint_over_int; +pub mod prefer_bigint_over_smallint; +pub mod prefer_identity; +pub mod prefer_jsonb; +pub mod prefer_robust_stmts; +pub mod prefer_text_field; +pub mod prefer_timestamptz; +pub mod renaming_column; +pub mod renaming_table; +pub mod require_concurrent_index_creation; +pub mod require_concurrent_index_deletion; +pub mod transaction_nesting; +declare_lint_group! { pub Safety { name : "safety" , rules : [self :: adding_field_with_default :: AddingFieldWithDefault , self :: adding_foreign_key_constraint :: AddingForeignKeyConstraint , self :: adding_not_null_field :: AddingNotNullField , self :: adding_primary_key_constraint :: AddingPrimaryKeyConstraint , self :: adding_required_field :: AddingRequiredField , self :: ban_char_field :: BanCharField , self :: ban_concurrent_index_creation_in_transaction :: BanConcurrentIndexCreationInTransaction , self :: ban_drop_column :: BanDropColumn , self :: ban_drop_database :: BanDropDatabase , self :: ban_drop_not_null :: BanDropNotNull , self :: ban_drop_table :: BanDropTable , self :: ban_truncate_cascade :: BanTruncateCascade , self :: changing_column_type :: ChangingColumnType , self :: constraint_missing_not_valid :: ConstraintMissingNotValid , self :: disallow_unique_constraint :: DisallowUniqueConstraint , self :: prefer_big_int :: PreferBigInt , self :: prefer_bigint_over_int :: PreferBigintOverInt , self :: prefer_bigint_over_smallint :: PreferBigintOverSmallint , self :: prefer_identity :: PreferIdentity , self :: prefer_jsonb :: PreferJsonb , self :: prefer_robust_stmts :: PreferRobustStmts , self :: prefer_text_field :: PreferTextField , self :: prefer_timestamptz :: PreferTimestamptz , self :: renaming_column :: RenamingColumn , self :: renaming_table :: RenamingTable , self :: require_concurrent_index_creation :: RequireConcurrentIndexCreation , self :: require_concurrent_index_deletion :: RequireConcurrentIndexDeletion , self :: transaction_nesting :: TransactionNesting ,] } } diff --git a/crates/pgt_analyser/src/lint/safety/adding_field_with_default.rs b/crates/pgt_analyser/src/lint/safety/adding_field_with_default.rs new file mode 100644 index 000000000..7782689f6 --- /dev/null +++ b/crates/pgt_analyser/src/lint/safety/adding_field_with_default.rs @@ -0,0 +1,188 @@ +use pgt_analyse::{Rule, RuleDiagnostic, RuleSource, context::RuleContext, declare_lint_rule}; +use pgt_console::markup; +use pgt_diagnostics::Severity; + +declare_lint_rule! { + /// Adding a column with a DEFAULT value may lead to a table rewrite while holding an ACCESS EXCLUSIVE lock. + /// + /// In PostgreSQL versions before 11, adding a column with a DEFAULT value causes a full table rewrite, + /// which holds an ACCESS EXCLUSIVE lock on the table and blocks all reads and writes. + /// + /// In PostgreSQL 11+, this behavior was optimized for non-volatile defaults. However: + /// - Volatile default values (like random() or custom functions) still cause table rewrites + /// - Generated columns (GENERATED ALWAYS AS) always require table rewrites + /// - Non-volatile defaults are safe in PostgreSQL 11+ + /// + /// ## Examples + /// + /// ### Invalid + /// + /// ```sql,expect_diagnostic + /// ALTER TABLE "core_recipe" ADD COLUMN "foo" integer DEFAULT 10; + /// ``` + /// + /// ### Valid + /// + /// ```sql + /// ALTER TABLE "core_recipe" ADD COLUMN "foo" integer; + /// ALTER TABLE "core_recipe" ALTER COLUMN "foo" SET DEFAULT 10; + /// -- Then backfill and add NOT NULL constraint if needed + /// ``` + /// + pub AddingFieldWithDefault { + version: "next", + name: "addingFieldWithDefault", + severity: Severity::Warning, + recommended: true, + sources: &[RuleSource::Squawk("adding-field-with-default")], + } +} + +impl Rule for AddingFieldWithDefault { + type Options = (); + + fn run(ctx: &RuleContext) -> Vec { + let mut diagnostics = Vec::new(); + + // Check PostgreSQL version - in 11+, non-volatile defaults are safe + let pg_version = ctx.schema_cache().and_then(|sc| sc.version.major_version); + + if let pgt_query::NodeEnum::AlterTableStmt(stmt) = &ctx.stmt() { + for cmd in &stmt.cmds { + if let Some(pgt_query::NodeEnum::AlterTableCmd(cmd)) = &cmd.node { + if cmd.subtype() == pgt_query::protobuf::AlterTableType::AtAddColumn { + if let Some(pgt_query::NodeEnum::ColumnDef(col_def)) = + &cmd.def.as_ref().and_then(|d| d.node.as_ref()) + { + let has_default = col_def.constraints.iter().any(|constraint| { + if let Some(pgt_query::NodeEnum::Constraint(c)) = &constraint.node { + c.contype() == pgt_query::protobuf::ConstrType::ConstrDefault + } else { + false + } + }); + + let has_generated = col_def.constraints.iter().any(|constraint| { + if let Some(pgt_query::NodeEnum::Constraint(c)) = &constraint.node { + c.contype() == pgt_query::protobuf::ConstrType::ConstrGenerated + } else { + false + } + }); + + if has_generated { + diagnostics.push( + RuleDiagnostic::new( + rule_category!(), + None, + markup! { + "Adding a generated column requires a table rewrite." + }, + ) + .detail(None, "This operation requires an ACCESS EXCLUSIVE lock and rewrites the entire table.") + .note("Add the column as nullable, backfill existing rows, and add a trigger to update the column on write instead."), + ); + } else if has_default { + // For PG 11+, check if the default is volatile + if pg_version.is_some_and(|v| v >= 11) { + // Check if default is non-volatile + let is_safe_default = col_def.constraints.iter().any(|constraint| { + if let Some(pgt_query::NodeEnum::Constraint(c)) = &constraint.node { + if c.contype() == pgt_query::protobuf::ConstrType::ConstrDefault { + if let Some(raw_expr) = &c.raw_expr { + return is_safe_default_expr(&raw_expr.node.as_ref().map(|n| Box::new(n.clone())), ctx.schema_cache()); + } + } + } + false + }); + + if !is_safe_default { + diagnostics.push( + RuleDiagnostic::new( + rule_category!(), + None, + markup! { + "Adding a column with a volatile default value causes a table rewrite." + }, + ) + .detail(None, "Even in PostgreSQL 11+, volatile default values require a full table rewrite.") + .note("Add the column without a default, then set the default in a separate statement."), + ); + } + } else { + // Pre PG 11, all defaults cause rewrites + diagnostics.push( + RuleDiagnostic::new( + rule_category!(), + None, + markup! { + "Adding a column with a DEFAULT value causes a table rewrite." + }, + ) + .detail(None, "This operation requires an ACCESS EXCLUSIVE lock and rewrites the entire table.") + .note("Add the column without a default, then set the default in a separate statement."), + ); + } + } + } + } + } + } + } + + diagnostics + } +} + +fn is_safe_default_expr( + expr: &Option>, + schema_cache: Option<&pgt_schema_cache::SchemaCache>, +) -> bool { + match expr { + Some(node) => match node.as_ref() { + // Constants are always safe + pgt_query::NodeEnum::AConst(_) => true, + // Type casts of constants are safe + pgt_query::NodeEnum::TypeCast(tc) => is_safe_default_expr( + &tc.arg.as_ref().and_then(|a| a.node.clone()).map(Box::new), + schema_cache, + ), + // function calls might be safe if they are non-volatile and have no args + pgt_query::NodeEnum::FuncCall(fc) => { + // must have no args + if !fc.args.is_empty() { + return false; + } + + let Some(sc) = schema_cache else { + return false; + }; + + let Some((schema, name)) = pgt_query_ext::utils::parse_name(&fc.funcname) else { + return false; + }; + + // check if function is a non-volatile function + sc.functions.iter().any(|f| { + // no args only + if !f.args.args.is_empty() { + return false; + } + + // must be non-volatile + if f.behavior == pgt_schema_cache::Behavior::Volatile { + return false; + } + + // check name and schema (if given) + f.name.eq_ignore_ascii_case(&name) + && schema.as_ref().is_none_or(|s| &f.schema == s) + }) + } + // everything else is potentially unsafe + _ => false, + }, + None => false, + } +} diff --git a/crates/pgt_analyser/src/lint/safety/adding_foreign_key_constraint.rs b/crates/pgt_analyser/src/lint/safety/adding_foreign_key_constraint.rs new file mode 100644 index 000000000..81fecca56 --- /dev/null +++ b/crates/pgt_analyser/src/lint/safety/adding_foreign_key_constraint.rs @@ -0,0 +1,127 @@ +use pgt_analyse::{Rule, RuleDiagnostic, RuleSource, context::RuleContext, declare_lint_rule}; +use pgt_console::markup; +use pgt_diagnostics::Severity; + +declare_lint_rule! { + /// Adding a foreign key constraint requires a table scan and a SHARE ROW EXCLUSIVE lock on both tables, which blocks writes. + /// + /// Adding a foreign key constraint to an existing table can cause downtime by locking both tables while + /// verifying the constraint. PostgreSQL needs to check that all existing values in the referencing + /// column exist in the referenced table. + /// + /// Instead, add the constraint as NOT VALID in one transaction, then VALIDATE it in another transaction. + /// This approach only takes a SHARE UPDATE EXCLUSIVE lock when validating, allowing concurrent writes. + /// + /// ## Examples + /// + /// ### Invalid + /// + /// ```sql,expect_diagnostic + /// ALTER TABLE "email" ADD CONSTRAINT "fk_user" FOREIGN KEY ("user_id") REFERENCES "user" ("id"); + /// ``` + /// + /// ```sql,expect_diagnostic + /// ALTER TABLE "emails" ADD COLUMN "user_id" INT REFERENCES "user" ("id"); + /// ``` + /// + /// ### Valid + /// + /// ```sql + /// -- First add the constraint as NOT VALID + /// ALTER TABLE "email" ADD CONSTRAINT "fk_user" FOREIGN KEY ("user_id") REFERENCES "user" ("id") NOT VALID; + /// -- Then validate it in a separate transaction + /// ALTER TABLE "email" VALIDATE CONSTRAINT "fk_user"; + /// ``` + /// + pub AddingForeignKeyConstraint { + version: "next", + name: "addingForeignKeyConstraint", + severity: Severity::Warning, + recommended: true, + sources: &[RuleSource::Squawk("adding-foreign-key-constraint")], + } +} + +impl Rule for AddingForeignKeyConstraint { + type Options = (); + + fn run(ctx: &RuleContext) -> Vec { + let mut diagnostics = Vec::new(); + + if let pgt_query::NodeEnum::AlterTableStmt(stmt) = &ctx.stmt() { + for cmd in &stmt.cmds { + if let Some(pgt_query::NodeEnum::AlterTableCmd(cmd)) = &cmd.node { + match cmd.subtype() { + pgt_query::protobuf::AlterTableType::AtAddConstraint => { + if let Some(pgt_query::NodeEnum::Constraint(constraint)) = + cmd.def.as_ref().and_then(|d| d.node.as_ref()) + { + if let Some(diagnostic) = + check_foreign_key_constraint(constraint, false) + { + diagnostics.push(diagnostic); + } + } + } + pgt_query::protobuf::AlterTableType::AtAddColumn => { + if let Some(pgt_query::NodeEnum::ColumnDef(col_def)) = + cmd.def.as_ref().and_then(|d| d.node.as_ref()) + { + // check constraints on the column + for constraint in &col_def.constraints { + if let Some(pgt_query::NodeEnum::Constraint(constr)) = + &constraint.node + { + if let Some(diagnostic) = + check_foreign_key_constraint(constr, true) + { + diagnostics.push(diagnostic); + } + } + } + } + } + _ => {} + } + } + } + } + + diagnostics + } +} + +fn check_foreign_key_constraint( + constraint: &pgt_query::protobuf::Constraint, + is_column_constraint: bool, +) -> Option { + // Only check foreign key constraints + if constraint.contype() != pgt_query::protobuf::ConstrType::ConstrForeign { + return None; + } + + // NOT VALID constraints are safe + if constraint.skip_validation { + return None; + } + + let (message, detail, note) = if is_column_constraint { + ( + "Adding a column with a foreign key constraint requires a table scan and locks.", + "Using REFERENCES when adding a column will block writes while verifying the constraint.", + "Add the column without the constraint first, then add the constraint as NOT VALID and VALIDATE it separately.", + ) + } else { + ( + "Adding a foreign key constraint requires a table scan and locks on both tables.", + "This will block writes to both the referencing and referenced tables while PostgreSQL verifies the constraint.", + "Add the constraint as NOT VALID first, then VALIDATE it in a separate transaction.", + ) + }; + + Some( + RuleDiagnostic::new(rule_category!(), None, markup! { {message} }) + .detail(None, detail) + .note(note), + ) +} diff --git a/crates/pgt_analyser/src/lint/safety/adding_not_null_field.rs b/crates/pgt_analyser/src/lint/safety/adding_not_null_field.rs new file mode 100644 index 000000000..aa442088e --- /dev/null +++ b/crates/pgt_analyser/src/lint/safety/adding_not_null_field.rs @@ -0,0 +1,77 @@ +use pgt_analyse::{Rule, RuleDiagnostic, RuleSource, context::RuleContext, declare_lint_rule}; +use pgt_console::markup; +use pgt_diagnostics::Severity; + +declare_lint_rule! { + /// Setting a column NOT NULL blocks reads while the table is scanned. + /// + /// In PostgreSQL versions before 11, adding a NOT NULL constraint to an existing column requires + /// a full table scan to verify that all existing rows satisfy the constraint. This operation + /// takes an ACCESS EXCLUSIVE lock, blocking all reads and writes. + /// + /// In PostgreSQL 11+, this operation is much faster as it can skip the full table scan for + /// newly added columns with default values. + /// + /// Instead of using SET NOT NULL, consider using a CHECK constraint with NOT VALID, then + /// validating it in a separate transaction. This allows reads and writes to continue. + /// + /// ## Examples + /// + /// ### Invalid + /// + /// ```sql,expect_diagnostic + /// ALTER TABLE "core_recipe" ALTER COLUMN "foo" SET NOT NULL; + /// ``` + /// + /// ### Valid + /// + /// ```sql + /// -- First add a CHECK constraint as NOT VALID + /// ALTER TABLE "core_recipe" ADD CONSTRAINT foo_not_null CHECK (foo IS NOT NULL) NOT VALID; + /// -- Then validate it in a separate transaction + /// ALTER TABLE "core_recipe" VALIDATE CONSTRAINT foo_not_null; + /// ``` + /// + pub AddingNotNullField { + version: "next", + name: "addingNotNullField", + severity: Severity::Warning, + recommended: true, + sources: &[RuleSource::Squawk("adding-not-null-field")], + } +} + +impl Rule for AddingNotNullField { + type Options = (); + + fn run(ctx: &RuleContext) -> Vec { + let mut diagnostics = Vec::new(); + + // In Postgres 11+, this is less of a concern + if ctx + .schema_cache() + .is_some_and(|sc| sc.version.major_version.is_some_and(|v| v >= 11)) + { + return diagnostics; + } + + if let pgt_query::NodeEnum::AlterTableStmt(stmt) = &ctx.stmt() { + for cmd in &stmt.cmds { + if let Some(pgt_query::NodeEnum::AlterTableCmd(cmd)) = &cmd.node { + if cmd.subtype() == pgt_query::protobuf::AlterTableType::AtSetNotNull { + diagnostics.push(RuleDiagnostic::new( + rule_category!(), + None, + markup! { + "Setting a column NOT NULL blocks reads while the table is scanned." + }, + ).detail(None, "This operation requires an ACCESS EXCLUSIVE lock and a full table scan to verify all rows.") + .note("Use a CHECK constraint with NOT VALID instead, then validate it in a separate transaction.")); + } + } + } + } + + diagnostics + } +} diff --git a/crates/pgt_analyser/src/lint/safety/adding_primary_key_constraint.rs b/crates/pgt_analyser/src/lint/safety/adding_primary_key_constraint.rs new file mode 100644 index 000000000..a4758a29f --- /dev/null +++ b/crates/pgt_analyser/src/lint/safety/adding_primary_key_constraint.rs @@ -0,0 +1,112 @@ +use pgt_analyse::{Rule, RuleDiagnostic, RuleSource, context::RuleContext, declare_lint_rule}; +use pgt_console::markup; +use pgt_diagnostics::Severity; + +declare_lint_rule! { + /// Adding a primary key constraint results in locks and table rewrites. + /// + /// When you add a PRIMARY KEY constraint, PostgreSQL needs to scan the entire table + /// to verify uniqueness and build the underlying index. This requires an ACCESS EXCLUSIVE + /// lock which blocks all reads and writes. + /// + /// ## Examples + /// + /// ### Invalid + /// + /// ```sql,expect_diagnostic + /// ALTER TABLE users ADD PRIMARY KEY (id); + /// ``` + /// + /// ```sql,expect_diagnostic + /// ALTER TABLE items ADD COLUMN id SERIAL PRIMARY KEY; + /// ``` + /// + /// ### Valid + /// + /// ```sql + /// -- First, create a unique index concurrently + /// CREATE UNIQUE INDEX CONCURRENTLY items_pk ON items (id); + /// -- Then add the primary key using the index + /// ALTER TABLE items ADD CONSTRAINT items_pk PRIMARY KEY USING INDEX items_pk; + /// ``` + /// + pub AddingPrimaryKeyConstraint { + version: "next", + name: "addingPrimaryKeyConstraint", + severity: Severity::Warning, + recommended: true, + sources: &[RuleSource::Squawk("adding-serial-primary-key-field")], + } +} + +impl Rule for AddingPrimaryKeyConstraint { + type Options = (); + + fn run(ctx: &RuleContext) -> Vec { + let mut diagnostics = Vec::new(); + + if let pgt_query::NodeEnum::AlterTableStmt(stmt) = &ctx.stmt() { + for cmd in &stmt.cmds { + if let Some(pgt_query::NodeEnum::AlterTableCmd(cmd)) = &cmd.node { + match cmd.subtype() { + // Check for ADD CONSTRAINT PRIMARY KEY + pgt_query::protobuf::AlterTableType::AtAddConstraint => { + if let Some(pgt_query::NodeEnum::Constraint(constraint)) = + cmd.def.as_ref().and_then(|d| d.node.as_ref()) + { + if let Some(diagnostic) = + check_for_primary_key_constraint(constraint) + { + diagnostics.push(diagnostic); + } + } + } + // Check for ADD COLUMN with PRIMARY KEY + pgt_query::protobuf::AlterTableType::AtAddColumn => { + if let Some(pgt_query::NodeEnum::ColumnDef(col_def)) = + cmd.def.as_ref().and_then(|d| d.node.as_ref()) + { + for constraint in &col_def.constraints { + if let Some(pgt_query::NodeEnum::Constraint(constr)) = + &constraint.node + { + if let Some(diagnostic) = + check_for_primary_key_constraint(constr) + { + diagnostics.push(diagnostic); + } + } + } + } + } + _ => {} + } + } + } + } + + diagnostics + } +} + +fn check_for_primary_key_constraint( + constraint: &pgt_query::protobuf::Constraint, +) -> Option { + if constraint.contype() == pgt_query::protobuf::ConstrType::ConstrPrimary + && constraint.indexname.is_empty() + { + Some( + RuleDiagnostic::new( + rule_category!(), + None, + markup! { + "Adding a PRIMARY KEY constraint results in locks and table rewrites." + }, + ) + .detail(None, "Adding a PRIMARY KEY constraint requires an ACCESS EXCLUSIVE lock which blocks reads.") + .note("Add the PRIMARY KEY constraint USING an index."), + ) + } else { + None + } +} diff --git a/crates/pgt_analyser/src/lint/safety/ban_char_field.rs b/crates/pgt_analyser/src/lint/safety/ban_char_field.rs new file mode 100644 index 000000000..12e113b4a --- /dev/null +++ b/crates/pgt_analyser/src/lint/safety/ban_char_field.rs @@ -0,0 +1,104 @@ +use pgt_analyse::{Rule, RuleDiagnostic, RuleSource, context::RuleContext, declare_lint_rule}; +use pgt_console::markup; +use pgt_diagnostics::Severity; + +declare_lint_rule! { + /// Using CHAR(n) or CHARACTER(n) types is discouraged. + /// + /// CHAR types are fixed-length and padded with spaces, which can lead to unexpected behavior + /// when comparing strings or concatenating values. They also waste storage space when values + /// are shorter than the declared length. + /// + /// Use VARCHAR or TEXT instead for variable-length character data. + /// + /// ## Examples + /// + /// ### Invalid + /// + /// ```sql,expect_diagnostic + /// CREATE TABLE "core_bar" ( + /// "id" serial NOT NULL PRIMARY KEY, + /// "alpha" char(100) NOT NULL + /// ); + /// ``` + /// + /// ### Valid + /// + /// ```sql + /// CREATE TABLE "core_bar" ( + /// "id" serial NOT NULL PRIMARY KEY, + /// "alpha" varchar(100) NOT NULL + /// ); + /// ``` + /// + pub BanCharField { + version: "next", + name: "banCharField", + severity: Severity::Warning, + recommended: false, + sources: &[RuleSource::Squawk("ban-char-field")], + } +} + +impl Rule for BanCharField { + type Options = (); + + fn run(ctx: &RuleContext) -> Vec { + let mut diagnostics = Vec::new(); + + if let pgt_query::NodeEnum::CreateStmt(stmt) = &ctx.stmt() { + for table_elt in &stmt.table_elts { + if let Some(pgt_query::NodeEnum::ColumnDef(col_def)) = &table_elt.node { + if let Some(diagnostic) = check_column_for_char_type(col_def) { + diagnostics.push(diagnostic); + } + } + } + } + + // Also check ALTER TABLE ADD COLUMN statements + if let pgt_query::NodeEnum::AlterTableStmt(stmt) = &ctx.stmt() { + for cmd in &stmt.cmds { + if let Some(pgt_query::NodeEnum::AlterTableCmd(cmd)) = &cmd.node { + if cmd.subtype() == pgt_query::protobuf::AlterTableType::AtAddColumn { + if let Some(pgt_query::NodeEnum::ColumnDef(col_def)) = + &cmd.def.as_ref().and_then(|d| d.node.as_ref()) + { + if let Some(diagnostic) = check_column_for_char_type(col_def) { + diagnostics.push(diagnostic); + } + } + } + } + } + } + + diagnostics + } +} + +fn check_column_for_char_type(col_def: &pgt_query::protobuf::ColumnDef) -> Option { + if let Some(type_name) = &col_def.type_name { + for name_node in &type_name.names { + if let Some(pgt_query::NodeEnum::String(name)) = &name_node.node { + // Check for "bpchar" (internal name for CHAR type) + // or "char" or "character" + let type_str = name.sval.to_lowercase(); + if type_str == "bpchar" || type_str == "char" || type_str == "character" { + return Some( + RuleDiagnostic::new( + rule_category!(), + None, + markup! { + "CHAR type is discouraged due to space padding behavior." + }, + ) + .detail(None, "CHAR types are fixed-length and padded with spaces, which can lead to unexpected behavior.") + .note("Use VARCHAR or TEXT instead for variable-length character data."), + ); + } + } + } + } + None +} diff --git a/crates/pgt_analyser/src/lint/safety/ban_concurrent_index_creation_in_transaction.rs b/crates/pgt_analyser/src/lint/safety/ban_concurrent_index_creation_in_transaction.rs new file mode 100644 index 000000000..565a7487f --- /dev/null +++ b/crates/pgt_analyser/src/lint/safety/ban_concurrent_index_creation_in_transaction.rs @@ -0,0 +1,53 @@ +use pgt_analyse::{Rule, RuleDiagnostic, RuleSource, context::RuleContext, declare_lint_rule}; +use pgt_console::markup; +use pgt_diagnostics::Severity; + +declare_lint_rule! { + /// Concurrent index creation is not allowed within a transaction. + /// + /// `CREATE INDEX CONCURRENTLY` cannot be used within a transaction block. This will cause an error in Postgres. + /// + /// Migration tools usually run each migration in a transaction, so using `CREATE INDEX CONCURRENTLY` will fail in such tools. + /// + /// ## Examples + /// + /// ### Invalid + /// + /// ```sql,expect_diagnostic + /// CREATE INDEX CONCURRENTLY "field_name_idx" ON "table_name" ("field_name"); + /// ``` + /// + pub BanConcurrentIndexCreationInTransaction { + version: "next", + name: "banConcurrentIndexCreationInTransaction", + severity: Severity::Error, + recommended: true, + sources: &[RuleSource::Squawk("ban-concurrent-index-creation-in-transaction")], + } +} + +impl Rule for BanConcurrentIndexCreationInTransaction { + type Options = (); + + fn run(ctx: &RuleContext) -> Vec { + let mut diagnostics = Vec::new(); + + // check if the current statement is CREATE INDEX CONCURRENTLY and there is at least one + // other statement in the same context (indicating a transaction block) + // + // since our analyser assumes we're always in a transaction context, we always flag concurrent indexes + if let pgt_query::NodeEnum::IndexStmt(stmt) = ctx.stmt() { + if stmt.concurrent && ctx.file_context().stmt_count() > 1 { + diagnostics.push(RuleDiagnostic::new( + rule_category!(), + None, + markup! { + "CREATE INDEX CONCURRENTLY cannot be used inside a transaction block." + } + ).detail(None, "Run CREATE INDEX CONCURRENTLY outside of a transaction. Migration tools usually run in transactions, so you may need to run this statement in its own migration or manually.")); + } + } + + diagnostics + } +} diff --git a/crates/pgt_analyser/src/lint/safety/changing_column_type.rs b/crates/pgt_analyser/src/lint/safety/changing_column_type.rs new file mode 100644 index 000000000..d6d00559f --- /dev/null +++ b/crates/pgt_analyser/src/lint/safety/changing_column_type.rs @@ -0,0 +1,55 @@ +use pgt_analyse::{Rule, RuleDiagnostic, RuleSource, context::RuleContext, declare_lint_rule}; +use pgt_console::markup; +use pgt_diagnostics::Severity; + +declare_lint_rule! { + /// Changing a column type may break existing clients. + /// + /// Changing a column's data type requires an exclusive lock on the table while the entire table is rewritten. + /// This can take a long time for large tables and will block reads and writes. + /// + /// Instead of changing the type directly, consider creating a new column with the desired type, + /// migrating the data, and then dropping the old column. + /// + /// ## Examples + /// + /// ### Invalid + /// + /// ```sql,expect_diagnostic + /// ALTER TABLE "core_recipe" ALTER COLUMN "edits" TYPE text USING "edits"::text; + /// ``` + /// + pub ChangingColumnType { + version: "next", + name: "changingColumnType", + severity: Severity::Warning, + recommended: false, + sources: &[RuleSource::Squawk("changing-column-type")], + } +} + +impl Rule for ChangingColumnType { + type Options = (); + + fn run(ctx: &RuleContext) -> Vec { + let mut diagnostics = Vec::new(); + + if let pgt_query::NodeEnum::AlterTableStmt(stmt) = ctx.stmt() { + for cmd in &stmt.cmds { + if let Some(pgt_query::NodeEnum::AlterTableCmd(cmd)) = &cmd.node { + if cmd.subtype() == pgt_query::protobuf::AlterTableType::AtAlterColumnType { + diagnostics.push(RuleDiagnostic::new( + rule_category!(), + None, + markup! { + "Changing a column type requires a table rewrite and blocks reads and writes." + } + ).detail(None, "Consider creating a new column with the desired type, migrating data, and then dropping the old column.")); + } + } + } + } + + diagnostics + } +} diff --git a/crates/pgt_analyser/src/lint/safety/constraint_missing_not_valid.rs b/crates/pgt_analyser/src/lint/safety/constraint_missing_not_valid.rs new file mode 100644 index 000000000..18a75e8c6 --- /dev/null +++ b/crates/pgt_analyser/src/lint/safety/constraint_missing_not_valid.rs @@ -0,0 +1,90 @@ +use pgt_analyse::{Rule, RuleDiagnostic, RuleSource, context::RuleContext, declare_lint_rule}; +use pgt_console::markup; +use pgt_diagnostics::Severity; + +declare_lint_rule! { + /// Adding constraints without NOT VALID blocks all reads and writes. + /// + /// When adding a CHECK or FOREIGN KEY constraint, PostgreSQL must validate all existing rows, + /// which requires a full table scan. This blocks reads and writes for the duration. + /// + /// Instead, add the constraint with NOT VALID first, then VALIDATE CONSTRAINT in a separate + /// transaction. This allows reads and writes to continue while validation happens. + /// + /// ## Examples + /// + /// ### Invalid + /// + /// ```sql,expect_diagnostic + /// ALTER TABLE distributors ADD CONSTRAINT distfk FOREIGN KEY (address) REFERENCES addresses (address); + /// ``` + /// + /// ### Valid + /// + /// ```sql + /// ALTER TABLE distributors ADD CONSTRAINT distfk FOREIGN KEY (address) REFERENCES addresses (address) NOT VALID; + /// ``` + /// + pub ConstraintMissingNotValid { + version: "next", + name: "constraintMissingNotValid", + severity: Severity::Warning, + recommended: false, + sources: &[RuleSource::Squawk("constraint-missing-not-valid")], + } +} + +impl Rule for ConstraintMissingNotValid { + type Options = (); + + fn run(ctx: &RuleContext) -> Vec { + let mut diagnostics = Vec::new(); + + let pgt_query::NodeEnum::AlterTableStmt(stmt) = ctx.stmt() else { + return diagnostics; + }; + + for cmd in &stmt.cmds { + let Some(pgt_query::NodeEnum::AlterTableCmd(cmd)) = &cmd.node else { + continue; + }; + + let Some(pgt_query::NodeEnum::Constraint(constraint)) = + cmd.def.as_ref().and_then(|d| d.node.as_ref()) + else { + continue; + }; + + if let Some(diagnostic) = check_constraint_needs_not_valid(constraint) { + diagnostics.push(diagnostic); + } + } + + diagnostics + } +} + +fn check_constraint_needs_not_valid( + constraint: &pgt_query::protobuf::Constraint, +) -> Option { + // Skip if the constraint has NOT VALID + if !constraint.initially_valid { + return None; + } + + // Only warn for CHECK and FOREIGN KEY constraints + match constraint.contype() { + pgt_query::protobuf::ConstrType::ConstrCheck + | pgt_query::protobuf::ConstrType::ConstrForeign => Some( + RuleDiagnostic::new( + rule_category!(), + None, + markup! { + "Adding a constraint without NOT VALID will block reads and writes while validating existing rows." + } + ) + .detail(None, "Add the constraint as NOT VALID in one transaction, then run VALIDATE CONSTRAINT in a separate transaction.") + ), + _ => None, + } +} diff --git a/crates/pgt_analyser/src/lint/safety/disallow_unique_constraint.rs b/crates/pgt_analyser/src/lint/safety/disallow_unique_constraint.rs new file mode 100644 index 000000000..d72016f34 --- /dev/null +++ b/crates/pgt_analyser/src/lint/safety/disallow_unique_constraint.rs @@ -0,0 +1,129 @@ +use pgt_analyse::{Rule, RuleDiagnostic, RuleSource, context::RuleContext, declare_lint_rule}; +use pgt_console::markup; +use pgt_diagnostics::Severity; + +declare_lint_rule! { + /// Disallow adding a UNIQUE constraint without using an existing index. + /// + /// Adding a UNIQUE constraint requires an ACCESS EXCLUSIVE lock, which blocks all reads and + /// writes to the table. Instead, create a unique index concurrently and then add the + /// constraint using that index. + /// + /// ## Examples + /// + /// ### Invalid + /// + /// ```sql,expect_diagnostic + /// ALTER TABLE table_name ADD CONSTRAINT field_name_constraint UNIQUE (field_name); + /// ``` + /// + /// ```sql,expect_diagnostic + /// ALTER TABLE foo ADD COLUMN bar text UNIQUE; + /// ``` + /// + /// ### Valid + /// + /// ```sql + /// CREATE UNIQUE INDEX CONCURRENTLY dist_id_temp_idx ON distributors (dist_id); + /// ALTER TABLE distributors DROP CONSTRAINT distributors_pkey, + /// ADD CONSTRAINT distributors_pkey PRIMARY KEY USING INDEX dist_id_temp_idx; + /// ``` + /// + pub DisallowUniqueConstraint { + version: "next", + name: "disallowUniqueConstraint", + severity: Severity::Error, + recommended: false, + sources: &[RuleSource::Squawk("disallow-unique-constraint")], + } +} + +impl Rule for DisallowUniqueConstraint { + type Options = (); + + fn run(ctx: &RuleContext) -> Vec { + let mut diagnostics = Vec::new(); + + if let pgt_query::NodeEnum::AlterTableStmt(stmt) = &ctx.stmt() { + // Check if this table was created in the same transaction + let table_name = stmt.relation.as_ref().map(|r| &r.relname); + + // Look for tables created in previous statements of this file + let table_created_in_transaction = if let Some(table_name) = table_name { + ctx.file_context().previous_stmts().iter().any(|prev_stmt| { + if let pgt_query::NodeEnum::CreateStmt(create) = prev_stmt { + create + .relation + .as_ref() + .is_some_and(|r| &r.relname == table_name) + } else { + false + } + }) + } else { + false + }; + + if !table_created_in_transaction { + for cmd in &stmt.cmds { + if let Some(pgt_query::NodeEnum::AlterTableCmd(cmd)) = &cmd.node { + match cmd.subtype() { + pgt_query::protobuf::AlterTableType::AtAddConstraint => { + if let Some(pgt_query::NodeEnum::Constraint(constraint)) = + &cmd.def.as_ref().and_then(|d| d.node.as_ref()) + { + // Check if it's a unique constraint without an existing index + if constraint.contype() + == pgt_query::protobuf::ConstrType::ConstrUnique + && constraint.indexname.is_empty() + { + diagnostics.push( + RuleDiagnostic::new( + rule_category!(), + None, + markup! { + "Adding a UNIQUE constraint requires an ACCESS EXCLUSIVE lock." + }, + ) + .note("Create a unique index CONCURRENTLY and then add the constraint using that index."), + ); + } + } + } + pgt_query::protobuf::AlterTableType::AtAddColumn => { + if let Some(pgt_query::NodeEnum::ColumnDef(col_def)) = + &cmd.def.as_ref().and_then(|d| d.node.as_ref()) + { + // Check for inline unique constraints + for constraint in &col_def.constraints { + if let Some(pgt_query::NodeEnum::Constraint(constr)) = + &constraint.node + { + if constr.contype() + == pgt_query::protobuf::ConstrType::ConstrUnique + { + diagnostics.push( + RuleDiagnostic::new( + rule_category!(), + None, + markup! { + "Adding a UNIQUE constraint requires an ACCESS EXCLUSIVE lock." + }, + ) + .note("Create a unique index CONCURRENTLY and then add the constraint using that index."), + ); + } + } + } + } + } + _ => {} + } + } + } + } + } + + diagnostics + } +} diff --git a/crates/pgt_analyser/src/lint/safety/prefer_big_int.rs b/crates/pgt_analyser/src/lint/safety/prefer_big_int.rs new file mode 100644 index 000000000..b44cb72fb --- /dev/null +++ b/crates/pgt_analyser/src/lint/safety/prefer_big_int.rs @@ -0,0 +1,127 @@ +use pgt_analyse::{Rule, RuleDiagnostic, RuleSource, context::RuleContext, declare_lint_rule}; +use pgt_console::markup; +use pgt_diagnostics::Severity; + +declare_lint_rule! { + /// Prefer BIGINT over smaller integer types. + /// + /// Using smaller integer types like SMALLINT, INTEGER, or their aliases can lead to overflow + /// issues as your application grows. BIGINT provides a much larger range and helps avoid + /// future migration issues when values exceed the limits of smaller types. + /// + /// The storage difference between INTEGER (4 bytes) and BIGINT (8 bytes) is minimal on + /// modern systems, while the cost of migrating to a larger type later can be significant. + /// + /// ## Examples + /// + /// ### Invalid + /// + /// ```sql,expect_diagnostic + /// CREATE TABLE users ( + /// id integer + /// ); + /// ``` + /// + /// ```sql,expect_diagnostic + /// CREATE TABLE users ( + /// id serial + /// ); + /// ``` + /// + /// ### Valid + /// + /// ```sql + /// CREATE TABLE users ( + /// id bigint + /// ); + /// ``` + /// + /// ```sql + /// CREATE TABLE users ( + /// id bigserial + /// ); + /// ``` + /// + pub PreferBigInt { + version: "next", + name: "preferBigInt", + severity: Severity::Warning, + recommended: false, + sources: &[RuleSource::Squawk("prefer-big-int")], + } +} + +impl Rule for PreferBigInt { + type Options = (); + + fn run(ctx: &RuleContext) -> Vec { + let mut diagnostics = Vec::new(); + + match &ctx.stmt() { + pgt_query::NodeEnum::CreateStmt(stmt) => { + for table_elt in &stmt.table_elts { + if let Some(pgt_query::NodeEnum::ColumnDef(col_def)) = &table_elt.node { + check_column_def(&mut diagnostics, col_def); + } + } + } + pgt_query::NodeEnum::AlterTableStmt(stmt) => { + for cmd in &stmt.cmds { + if let Some(pgt_query::NodeEnum::AlterTableCmd(cmd)) = &cmd.node { + if cmd.subtype() == pgt_query::protobuf::AlterTableType::AtAddColumn + || cmd.subtype() + == pgt_query::protobuf::AlterTableType::AtAlterColumnType + { + if let Some(pgt_query::NodeEnum::ColumnDef(col_def)) = + &cmd.def.as_ref().and_then(|d| d.node.as_ref()) + { + check_column_def(&mut diagnostics, col_def); + } + } + } + } + } + _ => {} + } + + diagnostics + } +} + +fn check_column_def( + diagnostics: &mut Vec, + col_def: &pgt_query::protobuf::ColumnDef, +) { + if let Some(type_name) = &col_def.type_name { + for name_node in &type_name.names { + if let Some(pgt_query::NodeEnum::String(name)) = &name_node.node { + let type_name_lower = name.sval.to_lowercase(); + let is_small_int = matches!( + type_name_lower.as_str(), + "smallint" + | "integer" + | "int2" + | "int4" + | "serial" + | "serial2" + | "serial4" + | "smallserial" + ); + + if is_small_int { + diagnostics.push( + RuleDiagnostic::new( + rule_category!(), + None, + markup! { + "Using smaller integer types can lead to overflow issues." + }, + ) + .detail(None, format!("The '{}' type has a limited range that may be exceeded as your data grows.", name.sval)) + .note("Consider using BIGINT for integer columns to avoid future migration issues."), + ); + } + } + } + } +} diff --git a/crates/pgt_analyser/src/lint/safety/prefer_bigint_over_int.rs b/crates/pgt_analyser/src/lint/safety/prefer_bigint_over_int.rs new file mode 100644 index 000000000..755b8f9b8 --- /dev/null +++ b/crates/pgt_analyser/src/lint/safety/prefer_bigint_over_int.rs @@ -0,0 +1,131 @@ +use pgt_analyse::{Rule, RuleDiagnostic, RuleSource, context::RuleContext, declare_lint_rule}; +use pgt_console::markup; +use pgt_diagnostics::Severity; + +declare_lint_rule! { + /// Prefer BIGINT over INT/INTEGER types. + /// + /// Using INTEGER (INT4) can lead to overflow issues, especially for ID columns. + /// While SMALLINT might be acceptable for certain use cases with known small ranges, + /// INTEGER often becomes a limiting factor as applications grow. + /// + /// The storage difference between INTEGER (4 bytes) and BIGINT (8 bytes) is minimal, + /// but the cost of migrating when you hit the 2.1 billion limit can be significant. + /// + /// ## Examples + /// + /// ### Invalid + /// + /// ```sql,expect_diagnostic + /// CREATE TABLE users ( + /// id integer + /// ); + /// ``` + /// + /// ```sql,expect_diagnostic + /// CREATE TABLE users ( + /// id serial + /// ); + /// ``` + /// + /// ### Valid + /// + /// ```sql + /// CREATE TABLE users ( + /// id bigint + /// ); + /// ``` + /// + /// ```sql + /// CREATE TABLE users ( + /// id bigserial + /// ); + /// ``` + /// + /// ```sql + /// CREATE TABLE users ( + /// id smallint + /// ); + /// ``` + /// + pub PreferBigintOverInt { + version: "next", + name: "preferBigintOverInt", + severity: Severity::Warning, + recommended: false, + sources: &[RuleSource::Squawk("prefer-bigint-over-int")], + } +} + +impl Rule for PreferBigintOverInt { + type Options = (); + + fn run(ctx: &RuleContext) -> Vec { + let mut diagnostics = Vec::new(); + + match &ctx.stmt() { + pgt_query::NodeEnum::CreateStmt(stmt) => { + for table_elt in &stmt.table_elts { + if let Some(pgt_query::NodeEnum::ColumnDef(col_def)) = &table_elt.node { + check_column_def(&mut diagnostics, col_def); + } + } + } + pgt_query::NodeEnum::AlterTableStmt(stmt) => { + for cmd in &stmt.cmds { + if let Some(pgt_query::NodeEnum::AlterTableCmd(cmd)) = &cmd.node { + if cmd.subtype() == pgt_query::protobuf::AlterTableType::AtAddColumn + || cmd.subtype() + == pgt_query::protobuf::AlterTableType::AtAlterColumnType + { + if let Some(pgt_query::NodeEnum::ColumnDef(col_def)) = + &cmd.def.as_ref().and_then(|d| d.node.as_ref()) + { + check_column_def(&mut diagnostics, col_def); + } + } + } + } + } + _ => {} + } + + diagnostics + } +} + +fn check_column_def( + diagnostics: &mut Vec, + col_def: &pgt_query::protobuf::ColumnDef, +) { + let Some(type_name) = &col_def.type_name else { + return; + }; + + for name_node in &type_name.names { + let Some(pgt_query::NodeEnum::String(name)) = &name_node.node else { + continue; + }; + + let type_name_lower = name.sval.to_lowercase(); + // Only check for INT4/INTEGER types, not SMALLINT + if !matches!( + type_name_lower.as_str(), + "integer" | "int4" | "serial" | "serial4" + ) { + continue; + } + + diagnostics.push( + RuleDiagnostic::new( + rule_category!(), + None, + markup! { + "INTEGER type may lead to overflow issues." + }, + ) + .detail(None, "INTEGER has a maximum value of 2,147,483,647 which can be exceeded by ID columns and counters.") + .note("Consider using BIGINT instead for better future-proofing."), + ); + } +} diff --git a/crates/pgt_analyser/src/lint/safety/prefer_bigint_over_smallint.rs b/crates/pgt_analyser/src/lint/safety/prefer_bigint_over_smallint.rs new file mode 100644 index 000000000..cffcc5b74 --- /dev/null +++ b/crates/pgt_analyser/src/lint/safety/prefer_bigint_over_smallint.rs @@ -0,0 +1,124 @@ +use pgt_analyse::{Rule, RuleDiagnostic, RuleSource, context::RuleContext, declare_lint_rule}; +use pgt_console::markup; +use pgt_diagnostics::Severity; + +declare_lint_rule! { + /// Prefer BIGINT over SMALLINT types. + /// + /// SMALLINT has a very limited range (-32,768 to 32,767) that is easily exceeded. + /// Even for values that seem small initially, using SMALLINT can lead to problems + /// as your application grows. + /// + /// The storage savings of SMALLINT (2 bytes) vs BIGINT (8 bytes) are negligible + /// on modern systems, while the cost of migrating when you exceed the limit is high. + /// + /// ## Examples + /// + /// ### Invalid + /// + /// ```sql,expect_diagnostic + /// CREATE TABLE users ( + /// age smallint + /// ); + /// ``` + /// + /// ```sql,expect_diagnostic + /// CREATE TABLE products ( + /// quantity smallserial + /// ); + /// ``` + /// + /// ### Valid + /// + /// ```sql + /// CREATE TABLE users ( + /// age integer + /// ); + /// ``` + /// + /// ```sql + /// CREATE TABLE products ( + /// quantity bigint + /// ); + /// ``` + /// + pub PreferBigintOverSmallint { + version: "next", + name: "preferBigintOverSmallint", + severity: Severity::Warning, + recommended: false, + sources: &[RuleSource::Squawk("prefer-bigint-over-smallint")], + } +} + +impl Rule for PreferBigintOverSmallint { + type Options = (); + + fn run(ctx: &RuleContext) -> Vec { + let mut diagnostics = Vec::new(); + + match &ctx.stmt() { + pgt_query::NodeEnum::CreateStmt(stmt) => { + for table_elt in &stmt.table_elts { + if let Some(pgt_query::NodeEnum::ColumnDef(col_def)) = &table_elt.node { + check_column_def(&mut diagnostics, col_def); + } + } + } + pgt_query::NodeEnum::AlterTableStmt(stmt) => { + for cmd in &stmt.cmds { + if let Some(pgt_query::NodeEnum::AlterTableCmd(cmd)) = &cmd.node { + if cmd.subtype() == pgt_query::protobuf::AlterTableType::AtAddColumn + || cmd.subtype() + == pgt_query::protobuf::AlterTableType::AtAlterColumnType + { + if let Some(pgt_query::NodeEnum::ColumnDef(col_def)) = + &cmd.def.as_ref().and_then(|d| d.node.as_ref()) + { + check_column_def(&mut diagnostics, col_def); + } + } + } + } + } + _ => {} + } + + diagnostics + } +} + +fn check_column_def( + diagnostics: &mut Vec, + col_def: &pgt_query::protobuf::ColumnDef, +) { + let Some(type_name) = &col_def.type_name else { + return; + }; + + for name_node in &type_name.names { + let Some(pgt_query::NodeEnum::String(name)) = &name_node.node else { + continue; + }; + + let type_name_lower = name.sval.to_lowercase(); + if !matches!( + type_name_lower.as_str(), + "smallint" | "int2" | "smallserial" | "serial2" + ) { + continue; + } + + diagnostics.push( + RuleDiagnostic::new( + rule_category!(), + None, + markup! { + "SMALLINT has a very limited range that is easily exceeded." + }, + ) + .detail(None, "SMALLINT can only store values from -32,768 to 32,767. This range is often insufficient.") + .note("Consider using INTEGER or BIGINT for better range and future-proofing."), + ); + } +} diff --git a/crates/pgt_analyser/src/lint/safety/prefer_identity.rs b/crates/pgt_analyser/src/lint/safety/prefer_identity.rs new file mode 100644 index 000000000..4abf4c4d1 --- /dev/null +++ b/crates/pgt_analyser/src/lint/safety/prefer_identity.rs @@ -0,0 +1,125 @@ +use pgt_analyse::{Rule, RuleDiagnostic, RuleSource, context::RuleContext, declare_lint_rule}; +use pgt_console::markup; +use pgt_diagnostics::Severity; + +declare_lint_rule! { + /// Prefer using IDENTITY columns over serial columns. + /// + /// SERIAL types (serial, serial2, serial4, serial8, smallserial, bigserial) use sequences behind + /// the scenes but with some limitations. IDENTITY columns provide better control over sequence + /// behavior and are part of the SQL standard. + /// + /// IDENTITY columns offer clearer ownership semantics - the sequence is directly tied to the column + /// and will be automatically dropped when the column or table is dropped. They also provide better + /// control through GENERATED ALWAYS (prevents manual inserts) or GENERATED BY DEFAULT options. + /// + /// ## Examples + /// + /// ### Invalid + /// + /// ```sql,expect_diagnostic + /// create table users ( + /// id serial + /// ); + /// ``` + /// + /// ```sql,expect_diagnostic + /// create table users ( + /// id bigserial + /// ); + /// ``` + /// + /// ### Valid + /// + /// ```sql + /// create table users ( + /// id bigint generated by default as identity primary key + /// ); + /// ``` + /// + /// ```sql + /// create table users ( + /// id bigint generated always as identity primary key + /// ); + /// ``` + /// + pub PreferIdentity { + version: "next", + name: "preferIdentity", + severity: Severity::Warning, + recommended: false, + sources: &[RuleSource::Squawk("prefer-identity")], + } +} + +impl Rule for PreferIdentity { + type Options = (); + + fn run(ctx: &RuleContext) -> Vec { + let mut diagnostics = Vec::new(); + + match ctx.stmt() { + pgt_query::NodeEnum::CreateStmt(stmt) => { + for table_elt in &stmt.table_elts { + if let Some(pgt_query::NodeEnum::ColumnDef(col_def)) = &table_elt.node { + check_column_def(&mut diagnostics, col_def); + } + } + } + pgt_query::NodeEnum::AlterTableStmt(stmt) => { + for cmd in &stmt.cmds { + if let Some(pgt_query::NodeEnum::AlterTableCmd(cmd)) = &cmd.node { + if matches!( + cmd.subtype(), + pgt_query::protobuf::AlterTableType::AtAddColumn + | pgt_query::protobuf::AlterTableType::AtAlterColumnType + ) { + if let Some(pgt_query::NodeEnum::ColumnDef(col_def)) = + &cmd.def.as_ref().and_then(|d| d.node.as_ref()) + { + check_column_def(&mut diagnostics, col_def); + } + } + } + } + } + _ => {} + } + + diagnostics + } +} + +fn check_column_def( + diagnostics: &mut Vec, + col_def: &pgt_query::protobuf::ColumnDef, +) { + let Some(type_name) = &col_def.type_name else { + return; + }; + + for name_node in &type_name.names { + let Some(pgt_query::NodeEnum::String(name)) = &name_node.node else { + continue; + }; + + if !matches!( + name.sval.as_str(), + "serial" | "serial2" | "serial4" | "serial8" | "smallserial" | "bigserial" + ) { + continue; + } + + diagnostics.push( + RuleDiagnostic::new( + rule_category!(), + None, + markup! { + "Prefer IDENTITY columns over SERIAL types." + }, + ) + .detail(None, format!("Column uses '{}' type which has limitations compared to IDENTITY columns.", name.sval)) + .note("Use 'bigint GENERATED BY DEFAULT AS IDENTITY' or 'bigint GENERATED ALWAYS AS IDENTITY' instead."), + ); + } +} diff --git a/crates/pgt_analyser/src/lint/safety/prefer_jsonb.rs b/crates/pgt_analyser/src/lint/safety/prefer_jsonb.rs new file mode 100644 index 000000000..152795b6c --- /dev/null +++ b/crates/pgt_analyser/src/lint/safety/prefer_jsonb.rs @@ -0,0 +1,130 @@ +use pgt_analyse::{Rule, RuleDiagnostic, RuleSource, context::RuleContext, declare_lint_rule}; +use pgt_console::markup; +use pgt_diagnostics::Severity; + +declare_lint_rule! { + /// Prefer JSONB over JSON types. + /// + /// JSONB is the binary JSON data type in PostgreSQL that is more efficient for most use cases. + /// Unlike JSON, JSONB stores data in a decomposed binary format which provides several advantages: + /// - Significantly faster query performance for operations like indexing and searching + /// - Support for indexing (GIN indexes) + /// - Duplicate keys are automatically removed + /// - Keys are sorted + /// + /// The only reasons to use JSON instead of JSONB are: + /// - You need to preserve exact input formatting (whitespace, key order) + /// - You need to preserve duplicate keys + /// - You have very specific performance requirements where JSON's lack of parsing overhead matters + /// + /// ## Examples + /// + /// ### Invalid + /// + /// ```sql,expect_diagnostic + /// CREATE TABLE users ( + /// data json + /// ); + /// ``` + /// + /// ```sql,expect_diagnostic + /// ALTER TABLE users ADD COLUMN metadata json; + /// ``` + /// + /// ```sql,expect_diagnostic + /// ALTER TABLE users ALTER COLUMN data TYPE json; + /// ``` + /// + /// ### Valid + /// + /// ```sql + /// CREATE TABLE users ( + /// data jsonb + /// ); + /// ``` + /// + /// ```sql + /// ALTER TABLE users ADD COLUMN metadata jsonb; + /// ``` + /// + /// ```sql + /// ALTER TABLE users ALTER COLUMN data TYPE jsonb; + /// ``` + /// + pub PreferJsonb { + version: "next", + name: "preferJsonb", + severity: Severity::Warning, + recommended: false, + sources: &[RuleSource::Eugene("E3")], + } +} + +impl Rule for PreferJsonb { + type Options = (); + + fn run(ctx: &RuleContext) -> Vec { + let mut diagnostics = Vec::new(); + + match &ctx.stmt() { + pgt_query::NodeEnum::CreateStmt(stmt) => { + for table_elt in &stmt.table_elts { + if let Some(pgt_query::NodeEnum::ColumnDef(col_def)) = &table_elt.node { + check_column_def(&mut diagnostics, col_def); + } + } + } + pgt_query::NodeEnum::AlterTableStmt(stmt) => { + for cmd in &stmt.cmds { + if let Some(pgt_query::NodeEnum::AlterTableCmd(cmd)) = &cmd.node { + if cmd.subtype() == pgt_query::protobuf::AlterTableType::AtAddColumn + || cmd.subtype() + == pgt_query::protobuf::AlterTableType::AtAlterColumnType + { + if let Some(pgt_query::NodeEnum::ColumnDef(col_def)) = + &cmd.def.as_ref().and_then(|d| d.node.as_ref()) + { + check_column_def(&mut diagnostics, col_def); + } + } + } + } + } + _ => {} + } + + diagnostics + } +} + +fn check_column_def( + diagnostics: &mut Vec, + col_def: &pgt_query::protobuf::ColumnDef, +) { + let Some(type_name) = &col_def.type_name else { + return; + }; + + for name_node in &type_name.names { + let Some(pgt_query::NodeEnum::String(name)) = &name_node.node else { + continue; + }; + + let type_name_lower = name.sval.to_lowercase(); + if type_name_lower != "json" { + continue; + } + + diagnostics.push( + RuleDiagnostic::new( + rule_category!(), + None, + markup! { + "Prefer JSONB over JSON for better performance and functionality." + }, + ) + .detail(None, "JSON stores exact text representation while JSONB stores parsed binary format. JSONB is faster for queries, supports indexing, and removes duplicate keys.") + .note("Consider using JSONB instead unless you specifically need to preserve formatting or duplicate keys."), + ); + } +} diff --git a/crates/pgt_analyser/src/lint/safety/prefer_robust_stmts.rs b/crates/pgt_analyser/src/lint/safety/prefer_robust_stmts.rs new file mode 100644 index 000000000..3f0b66f0f --- /dev/null +++ b/crates/pgt_analyser/src/lint/safety/prefer_robust_stmts.rs @@ -0,0 +1,106 @@ +use pgt_analyse::{Rule, RuleDiagnostic, RuleSource, context::RuleContext, declare_lint_rule}; +use pgt_console::markup; +use pgt_diagnostics::Severity; + +declare_lint_rule! { + /// Prefer statements with guards for robustness in migrations. + /// + /// When running migrations outside of transactions (e.g., CREATE INDEX CONCURRENTLY), + /// statements should be made robust by using guards like IF NOT EXISTS or IF EXISTS. + /// This allows migrations to be safely re-run if they fail partway through. + /// + /// ## Examples + /// + /// ### Invalid + /// + /// ```sql,expect_diagnostic + /// CREATE INDEX CONCURRENTLY users_email_idx ON users (email); + /// ``` + /// + /// ```sql,expect_diagnostic + /// DROP INDEX CONCURRENTLY users_email_idx; + /// ``` + /// + /// ### Valid + /// + /// ```sql + /// CREATE INDEX CONCURRENTLY IF NOT EXISTS users_email_idx ON users (email); + /// ``` + /// + /// ```sql + /// DROP INDEX CONCURRENTLY IF EXISTS users_email_idx; + /// ``` + /// + pub PreferRobustStmts { + version: "next", + name: "preferRobustStmts", + severity: Severity::Warning, + recommended: false, + sources: &[RuleSource::Squawk("prefer-robust-stmts")], + } +} + +impl Rule for PreferRobustStmts { + type Options = (); + + fn run(ctx: &RuleContext) -> Vec { + let mut diagnostics = Vec::new(); + + // Since we assume we're always in a transaction, we only check for + // statements that explicitly run outside transactions + match &ctx.stmt() { + pgt_query::NodeEnum::IndexStmt(stmt) => { + // Concurrent index creation runs outside transaction + if stmt.concurrent { + // Check for unnamed index + if stmt.idxname.is_empty() { + diagnostics.push(RuleDiagnostic::new( + rule_category!(), + None, + markup! { + "Concurrent index should have an explicit name." + }, + ).detail(None, "Use an explicit name for a concurrently created index to make migrations more robust.")); + } + // Check for IF NOT EXISTS + if !stmt.if_not_exists { + diagnostics.push( + RuleDiagnostic::new( + rule_category!(), + None, + markup! { + "Concurrent index creation should use IF NOT EXISTS." + }, + ) + .detail( + None, + "Add IF NOT EXISTS to make the migration re-runnable if it fails.", + ), + ); + } + } + } + pgt_query::NodeEnum::DropStmt(stmt) => { + // Concurrent drop runs outside transaction + if stmt.concurrent && !stmt.missing_ok { + diagnostics.push( + RuleDiagnostic::new( + rule_category!(), + None, + markup! { + "Concurrent drop should use IF EXISTS." + }, + ) + .detail( + None, + "Add IF EXISTS to make the migration re-runnable if it fails.", + ), + ); + } + } + _ => {} + } + + diagnostics + } +} diff --git a/crates/pgt_analyser/src/lint/safety/prefer_text_field.rs b/crates/pgt_analyser/src/lint/safety/prefer_text_field.rs new file mode 100644 index 000000000..46f5bc757 --- /dev/null +++ b/crates/pgt_analyser/src/lint/safety/prefer_text_field.rs @@ -0,0 +1,108 @@ +use pgt_analyse::{Rule, RuleDiagnostic, RuleSource, context::RuleContext, declare_lint_rule}; +use pgt_console::markup; +use pgt_diagnostics::Severity; + +declare_lint_rule! { + /// Prefer using TEXT over VARCHAR(n) types. + /// + /// Changing the size of a VARCHAR field requires an ACCESS EXCLUSIVE lock, which blocks all + /// reads and writes to the table. It's easier to update a check constraint on a TEXT field + /// than a VARCHAR() size since the check constraint can use NOT VALID with a separate + /// VALIDATE call. + /// + /// ## Examples + /// + /// ### Invalid + /// + /// ```sql,expect_diagnostic + /// CREATE TABLE "core_bar" ( + /// "id" serial NOT NULL PRIMARY KEY, + /// "alpha" varchar(100) NOT NULL + /// ); + /// ``` + /// + /// ```sql,expect_diagnostic + /// ALTER TABLE "core_bar" ALTER COLUMN "kind" TYPE varchar(1000) USING "kind"::varchar(1000); + /// ``` + /// + /// ### Valid + /// + /// ```sql + /// CREATE TABLE "core_bar" ( + /// "id" serial NOT NULL PRIMARY KEY, + /// "bravo" text NOT NULL + /// ); + /// ALTER TABLE "core_bar" ADD CONSTRAINT "text_size" CHECK (LENGTH("bravo") <= 100); + /// ``` + /// + pub PreferTextField { + version: "next", + name: "preferTextField", + severity: Severity::Warning, + recommended: false, + sources: &[RuleSource::Squawk("prefer-text-field")], + } +} + +impl Rule for PreferTextField { + type Options = (); + + fn run(ctx: &RuleContext) -> Vec { + let mut diagnostics = Vec::new(); + + match &ctx.stmt() { + pgt_query::NodeEnum::CreateStmt(stmt) => { + for table_elt in &stmt.table_elts { + if let Some(pgt_query::NodeEnum::ColumnDef(col_def)) = &table_elt.node { + check_column_def(&mut diagnostics, col_def); + } + } + } + pgt_query::NodeEnum::AlterTableStmt(stmt) => { + for cmd in &stmt.cmds { + if let Some(pgt_query::NodeEnum::AlterTableCmd(cmd)) = &cmd.node { + match cmd.subtype() { + pgt_query::protobuf::AlterTableType::AtAddColumn + | pgt_query::protobuf::AlterTableType::AtAlterColumnType => { + if let Some(pgt_query::NodeEnum::ColumnDef(col_def)) = + &cmd.def.as_ref().and_then(|d| d.node.as_ref()) + { + check_column_def(&mut diagnostics, col_def); + } + } + _ => {} + } + } + } + } + _ => {} + } + + diagnostics + } +} + +fn check_column_def( + diagnostics: &mut Vec, + col_def: &pgt_query::protobuf::ColumnDef, +) { + if let Some(type_name) = &col_def.type_name { + for name_node in &type_name.names { + if let Some(pgt_query::NodeEnum::String(name)) = &name_node.node { + // Check if it's varchar with a size limit + if name.sval.to_lowercase() == "varchar" && !type_name.typmods.is_empty() { + diagnostics.push( + RuleDiagnostic::new( + rule_category!(), + None, + markup! { + "Changing the size of a varchar field requires an ACCESS EXCLUSIVE lock." + }, + ) + .note("Use a text field with a check constraint."), + ); + } + } + } + } +} diff --git a/crates/pgt_analyser/src/lint/safety/prefer_timestamptz.rs b/crates/pgt_analyser/src/lint/safety/prefer_timestamptz.rs new file mode 100644 index 000000000..fb34e61c4 --- /dev/null +++ b/crates/pgt_analyser/src/lint/safety/prefer_timestamptz.rs @@ -0,0 +1,122 @@ +use pgt_analyse::{Rule, RuleDiagnostic, RuleSource, context::RuleContext, declare_lint_rule}; +use pgt_console::markup; +use pgt_diagnostics::Severity; + +declare_lint_rule! { + /// Prefer TIMESTAMPTZ over TIMESTAMP types. + /// + /// Using TIMESTAMP WITHOUT TIME ZONE can lead to issues when dealing with time zones. + /// TIMESTAMPTZ (TIMESTAMP WITH TIME ZONE) stores timestamps with time zone information, + /// making it safer for applications that handle multiple time zones or need to track + /// when events occurred in absolute time. + /// + /// ## Examples + /// + /// ### Invalid + /// + /// ```sql,expect_diagnostic + /// CREATE TABLE app.users ( + /// created_ts timestamp + /// ); + /// ``` + /// + /// ```sql,expect_diagnostic + /// CREATE TABLE app.accounts ( + /// created_ts timestamp without time zone + /// ); + /// ``` + /// + /// ```sql,expect_diagnostic + /// ALTER TABLE app.users ALTER COLUMN created_ts TYPE timestamp; + /// ``` + /// + /// ### Valid + /// + /// ```sql + /// CREATE TABLE app.users ( + /// created_ts timestamptz + /// ); + /// ``` + /// + /// ```sql + /// CREATE TABLE app.accounts ( + /// created_ts timestamp with time zone + /// ); + /// ``` + /// + /// ```sql + /// ALTER TABLE app.users ALTER COLUMN created_ts TYPE timestamptz; + /// ``` + /// + pub PreferTimestamptz { + version: "next", + name: "preferTimestamptz", + severity: Severity::Warning, + recommended: false, + sources: &[RuleSource::Squawk("prefer-timestamptz")], + } +} + +impl Rule for PreferTimestamptz { + type Options = (); + + fn run(ctx: &RuleContext) -> Vec { + let mut diagnostics = Vec::new(); + + match &ctx.stmt() { + pgt_query::NodeEnum::CreateStmt(stmt) => { + for table_elt in &stmt.table_elts { + if let Some(pgt_query::NodeEnum::ColumnDef(col_def)) = &table_elt.node { + check_column_def(&mut diagnostics, col_def); + } + } + } + pgt_query::NodeEnum::AlterTableStmt(stmt) => { + for cmd in &stmt.cmds { + if let Some(pgt_query::NodeEnum::AlterTableCmd(cmd)) = &cmd.node { + match cmd.subtype() { + pgt_query::protobuf::AlterTableType::AtAddColumn + | pgt_query::protobuf::AlterTableType::AtAlterColumnType => { + if let Some(pgt_query::NodeEnum::ColumnDef(col_def)) = + &cmd.def.as_ref().and_then(|d| d.node.as_ref()) + { + check_column_def(&mut diagnostics, col_def); + } + } + _ => {} + } + } + } + } + _ => {} + } + + diagnostics + } +} + +fn check_column_def( + diagnostics: &mut Vec, + col_def: &pgt_query::protobuf::ColumnDef, +) { + if let Some(type_name) = &col_def.type_name { + if let Some(last_name) = type_name.names.last() { + if let Some(pgt_query::NodeEnum::String(name)) = &last_name.node { + // Check for "timestamp" (without timezone) + if name.sval.to_lowercase() == "timestamp" { + diagnostics.push( + RuleDiagnostic::new( + rule_category!(), + None, + markup! { + "Prefer TIMESTAMPTZ over TIMESTAMP for better timezone handling." + }, + ) + .detail(None, "TIMESTAMP WITHOUT TIME ZONE can lead to issues when dealing with time zones.") + .note("Use TIMESTAMPTZ (TIMESTAMP WITH TIME ZONE) instead."), + ); + } + } + } + } +} diff --git a/crates/pgt_analyser/src/lint/safety/renaming_column.rs b/crates/pgt_analyser/src/lint/safety/renaming_column.rs new file mode 100644 index 000000000..8ab079e37 --- /dev/null +++ b/crates/pgt_analyser/src/lint/safety/renaming_column.rs @@ -0,0 +1,49 @@ +use pgt_analyse::{Rule, RuleDiagnostic, RuleSource, context::RuleContext, declare_lint_rule}; +use pgt_console::markup; +use pgt_diagnostics::Severity; + +declare_lint_rule! { + /// Renaming columns may break existing queries and application code. + /// + /// Renaming a column that is being used by an existing application or query can cause unexpected downtime. + /// Consider creating a new column instead and migrating the data, then dropping the old column after ensuring + /// no dependencies exist. + /// + /// ## Examples + /// + /// ### Invalid + /// + /// ```sql,expect_diagnostic + /// ALTER TABLE users RENAME COLUMN email TO email_address; + /// ``` + /// + pub RenamingColumn { + version: "next", + name: "renamingColumn", + severity: Severity::Warning, + recommended: false, + sources: &[RuleSource::Squawk("renaming-column")], + } +} + +impl Rule for RenamingColumn { + type Options = (); + + fn run(ctx: &RuleContext) -> Vec { + let mut diagnostics = Vec::new(); + + if let pgt_query::NodeEnum::RenameStmt(stmt) = &ctx.stmt() { + if stmt.rename_type() == pgt_query::protobuf::ObjectType::ObjectColumn { + diagnostics.push(RuleDiagnostic::new( + rule_category!(), + None, + markup! { + "Renaming a column may break existing clients." + }, + ).detail(None, "Consider creating a new column with the desired name and migrating data instead.")); + } + } + + diagnostics + } +} diff --git a/crates/pgt_analyser/src/lint/safety/renaming_table.rs b/crates/pgt_analyser/src/lint/safety/renaming_table.rs new file mode 100644 index 000000000..072ef3dd8 --- /dev/null +++ b/crates/pgt_analyser/src/lint/safety/renaming_table.rs @@ -0,0 +1,49 @@ +use pgt_analyse::{Rule, RuleDiagnostic, RuleSource, context::RuleContext, declare_lint_rule}; +use pgt_console::markup; +use pgt_diagnostics::Severity; + +declare_lint_rule! { + /// Renaming tables may break existing queries and application code. + /// + /// Renaming a table that is being referenced by existing applications, views, functions, or foreign keys + /// can cause unexpected downtime. Consider creating a view with the old table name pointing to the new table, + /// or carefully coordinate the rename with application deployments. + /// + /// ## Examples + /// + /// ### Invalid + /// + /// ```sql,expect_diagnostic + /// ALTER TABLE users RENAME TO app_users; + /// ``` + /// + pub RenamingTable { + version: "next", + name: "renamingTable", + severity: Severity::Warning, + recommended: false, + sources: &[RuleSource::Squawk("renaming-table")], + } +} + +impl Rule for RenamingTable { + type Options = (); + + fn run(ctx: &RuleContext) -> Vec { + let mut diagnostics = Vec::new(); + + if let pgt_query::NodeEnum::RenameStmt(stmt) = &ctx.stmt() { + if stmt.rename_type() == pgt_query::protobuf::ObjectType::ObjectTable { + diagnostics.push(RuleDiagnostic::new( + rule_category!(), + None, + markup! { + "Renaming a table may break existing clients." + }, + ).detail(None, "Consider creating a view with the old table name instead, or coordinate the rename carefully with application deployments.")); + } + } + + diagnostics + } +} diff --git a/crates/pgt_analyser/src/lint/safety/require_concurrent_index_creation.rs b/crates/pgt_analyser/src/lint/safety/require_concurrent_index_creation.rs new file mode 100644 index 000000000..80a78a439 --- /dev/null +++ b/crates/pgt_analyser/src/lint/safety/require_concurrent_index_creation.rs @@ -0,0 +1,92 @@ +use pgt_analyse::{Rule, RuleDiagnostic, RuleSource, context::RuleContext, declare_lint_rule}; +use pgt_console::markup; +use pgt_diagnostics::Severity; + +declare_lint_rule! { + /// Creating indexes non-concurrently can lock the table for writes. + /// + /// When creating an index on an existing table, using CREATE INDEX without CONCURRENTLY will lock the table + /// against writes for the duration of the index build. This can cause downtime in production systems. + /// Use CREATE INDEX CONCURRENTLY to build the index without blocking concurrent operations. + /// + /// ## Examples + /// + /// ### Invalid + /// + /// ```sql,expect_diagnostic + /// CREATE INDEX users_email_idx ON users (email); + /// ``` + /// + /// ### Valid + /// + /// ```sql + /// CREATE INDEX CONCURRENTLY users_email_idx ON users (email); + /// ``` + /// + pub RequireConcurrentIndexCreation { + version: "next", + name: "requireConcurrentIndexCreation", + severity: Severity::Warning, + recommended: false, + sources: &[RuleSource::Squawk("require-concurrent-index-creation")], + } +} + +impl Rule for RequireConcurrentIndexCreation { + type Options = (); + + fn run(ctx: &RuleContext) -> Vec { + let mut diagnostics = Vec::new(); + + let pgt_query::NodeEnum::IndexStmt(stmt) = &ctx.stmt() else { + return diagnostics; + }; + + // Concurrent indexes are safe + if stmt.concurrent { + return diagnostics; + } + + // Check if this table was created in the same transaction/file + let table_name = stmt + .relation + .as_ref() + .map(|r| r.relname.as_str()) + .unwrap_or(""); + + // Skip if table name is empty or table was created in the same file + if table_name.is_empty() || is_table_created_in_file(ctx.file_context(), table_name) { + return diagnostics; + } + + diagnostics.push( + RuleDiagnostic::new( + rule_category!(), + None, + markup! { + "Creating an index non-concurrently blocks writes to the table." + }, + ) + .detail(None, "Use CREATE INDEX CONCURRENTLY to avoid blocking concurrent operations on the table.") + ); + + diagnostics + } +} + +fn is_table_created_in_file( + file_context: &pgt_analyse::AnalysedFileContext, + table_name: &str, +) -> bool { + // Check all statements in the file to see if this table was created + for stmt in file_context.stmts { + if let pgt_query::NodeEnum::CreateStmt(create_stmt) = stmt { + if let Some(relation) = &create_stmt.relation { + if relation.relname == table_name { + return true; + } + } + } + } + false +} diff --git a/crates/pgt_analyser/src/lint/safety/require_concurrent_index_deletion.rs b/crates/pgt_analyser/src/lint/safety/require_concurrent_index_deletion.rs new file mode 100644 index 000000000..63f70de72 --- /dev/null +++ b/crates/pgt_analyser/src/lint/safety/require_concurrent_index_deletion.rs @@ -0,0 +1,57 @@ +use pgt_analyse::{Rule, RuleDiagnostic, RuleSource, context::RuleContext, declare_lint_rule}; +use pgt_console::markup; +use pgt_diagnostics::Severity; + +declare_lint_rule! { + /// Dropping indexes non-concurrently can lock the table for reads. + /// + /// When dropping an index, using DROP INDEX without CONCURRENTLY will lock the table + /// preventing reads and writes for the duration of the drop. This can cause downtime in production systems. + /// Use DROP INDEX CONCURRENTLY to drop the index without blocking concurrent operations. + /// + /// ## Examples + /// + /// ### Invalid + /// + /// ```sql,expect_diagnostic + /// DROP INDEX IF EXISTS users_email_idx; + /// ``` + /// + /// ### Valid + /// + /// ```sql + /// DROP INDEX CONCURRENTLY IF EXISTS users_email_idx; + /// ``` + /// + pub RequireConcurrentIndexDeletion { + version: "next", + name: "requireConcurrentIndexDeletion", + severity: Severity::Warning, + recommended: false, + sources: &[RuleSource::Squawk("require-concurrent-index-deletion")], + } +} + +impl Rule for RequireConcurrentIndexDeletion { + type Options = (); + + fn run(ctx: &RuleContext) -> Vec { + let mut diagnostics = Vec::new(); + + if let pgt_query::NodeEnum::DropStmt(stmt) = &ctx.stmt() { + if !stmt.concurrent + && stmt.remove_type() == pgt_query::protobuf::ObjectType::ObjectIndex + { + diagnostics.push(RuleDiagnostic::new( + rule_category!(), + None, + markup! { + "Dropping an index non-concurrently blocks reads and writes to the table." + }, + ).detail(None, "Use DROP INDEX CONCURRENTLY to avoid blocking concurrent operations on the table.")); + } + } + + diagnostics + } +} diff --git a/crates/pgt_analyser/src/lint/safety/transaction_nesting.rs b/crates/pgt_analyser/src/lint/safety/transaction_nesting.rs new file mode 100644 index 000000000..209d47314 --- /dev/null +++ b/crates/pgt_analyser/src/lint/safety/transaction_nesting.rs @@ -0,0 +1,101 @@ +use pgt_analyse::{ + AnalysedFileContext, Rule, RuleDiagnostic, RuleSource, context::RuleContext, declare_lint_rule, +}; +use pgt_console::markup; +use pgt_diagnostics::Severity; + +declare_lint_rule! { + /// Detects problematic transaction nesting that could lead to unexpected behavior. + /// + /// Transaction nesting issues occur when trying to start a transaction within an existing transaction, + /// or trying to commit/rollback when not in a transaction. This can lead to unexpected behavior + /// or errors in database migrations. + /// + /// ## Examples + /// + /// ### Invalid + /// + /// ```sql,expect_diagnostic + /// BEGIN; + /// -- Migration tools already manage transactions + /// SELECT 1; + /// ``` + /// + /// ```sql,expect_diagnostic + /// SELECT 1; + /// COMMIT; -- No transaction to commit + /// ``` + /// + pub TransactionNesting { + version: "next", + name: "transactionNesting", + severity: Severity::Warning, + recommended: false, + sources: &[RuleSource::Squawk("transaction-nesting")], + } +} + +impl Rule for TransactionNesting { + type Options = (); + + fn run(ctx: &RuleContext) -> Vec { + let mut diagnostics = Vec::new(); + + if let pgt_query::NodeEnum::TransactionStmt(stmt) = &ctx.stmt() { + match stmt.kind() { + pgt_query::protobuf::TransactionStmtKind::TransStmtBegin + | pgt_query::protobuf::TransactionStmtKind::TransStmtStart => { + // Check if there's already a BEGIN in previous statements + if has_transaction_start_before(ctx.file_context()) { + diagnostics.push(RuleDiagnostic::new( + rule_category!(), + None, + markup! { + "Nested transaction detected." + }, + ).detail(None, "Starting a transaction when already in a transaction can cause issues.")); + } + // Always warn about BEGIN/START since we assume we're in a transaction + diagnostics.push(RuleDiagnostic::new( + rule_category!(), + None, + markup! { + "Transaction already managed by migration tool." + }, + ).detail(None, "Migration tools manage transactions automatically. Remove explicit transaction control.") + .note("Put migration statements in separate files to have them be in separate transactions.")); + } + pgt_query::protobuf::TransactionStmtKind::TransStmtCommit + | pgt_query::protobuf::TransactionStmtKind::TransStmtRollback => { + // Always warn about COMMIT/ROLLBACK since we assume we're in a transaction + diagnostics.push(RuleDiagnostic::new( + rule_category!(), + None, + markup! { + "Attempting to end transaction managed by migration tool." + }, + ).detail(None, "Migration tools manage transactions automatically. Remove explicit transaction control.") + .note("Put migration statements in separate files to have them be in separate transactions.")); + } + _ => {} + } + } + + diagnostics + } +} + +fn has_transaction_start_before(file_context: &AnalysedFileContext) -> bool { + for stmt in file_context.previous_stmts() { + if let pgt_query::NodeEnum::TransactionStmt(tx_stmt) = stmt { + match tx_stmt.kind() { + pgt_query::protobuf::TransactionStmtKind::TransStmtBegin + | pgt_query::protobuf::TransactionStmtKind::TransStmtStart => return true, + pgt_query::protobuf::TransactionStmtKind::TransStmtCommit + | pgt_query::protobuf::TransactionStmtKind::TransStmtRollback => return false, + _ => {} + } + } + } + false +} diff --git a/crates/pgt_analyser/src/options.rs b/crates/pgt_analyser/src/options.rs index d893b84f4..693e20b4c 100644 --- a/crates/pgt_analyser/src/options.rs +++ b/crates/pgt_analyser/src/options.rs @@ -1,8 +1,16 @@ //! Generated file, do not edit by hand, see `xtask/codegen` use crate::lint; +pub type AddingFieldWithDefault = + ::Options; +pub type AddingForeignKeyConstraint = < lint :: safety :: adding_foreign_key_constraint :: AddingForeignKeyConstraint as pgt_analyse :: Rule > :: Options ; +pub type AddingNotNullField = + ::Options; +pub type AddingPrimaryKeyConstraint = < lint :: safety :: adding_primary_key_constraint :: AddingPrimaryKeyConstraint as pgt_analyse :: Rule > :: Options ; pub type AddingRequiredField = ::Options; +pub type BanCharField = ::Options; +pub type BanConcurrentIndexCreationInTransaction = < lint :: safety :: ban_concurrent_index_creation_in_transaction :: BanConcurrentIndexCreationInTransaction as pgt_analyse :: Rule > :: Options ; pub type BanDropColumn = ::Options; pub type BanDropDatabase = @@ -12,3 +20,28 @@ pub type BanDropNotNull = pub type BanDropTable = ::Options; pub type BanTruncateCascade = ::Options; +pub type ChangingColumnType = + ::Options; +pub type ConstraintMissingNotValid = < lint :: safety :: constraint_missing_not_valid :: ConstraintMissingNotValid as pgt_analyse :: Rule > :: Options ; +pub type DisallowUniqueConstraint = < lint :: safety :: disallow_unique_constraint :: DisallowUniqueConstraint as pgt_analyse :: Rule > :: Options ; +pub type PreferBigInt = ::Options; +pub type PreferBigintOverInt = + ::Options; +pub type PreferBigintOverSmallint = < lint :: safety :: prefer_bigint_over_smallint :: PreferBigintOverSmallint as pgt_analyse :: Rule > :: Options ; +pub type PreferIdentity = + ::Options; +pub type PreferJsonb = ::Options; +pub type PreferRobustStmts = + ::Options; +pub type PreferTextField = + ::Options; +pub type PreferTimestamptz = + ::Options; +pub type RenamingColumn = + ::Options; +pub type RenamingTable = + ::Options; +pub type RequireConcurrentIndexCreation = < lint :: safety :: require_concurrent_index_creation :: RequireConcurrentIndexCreation as pgt_analyse :: Rule > :: Options ; +pub type RequireConcurrentIndexDeletion = < lint :: safety :: require_concurrent_index_deletion :: RequireConcurrentIndexDeletion as pgt_analyse :: Rule > :: Options ; +pub type TransactionNesting = + ::Options; diff --git a/crates/pgt_analyser/tests/rules_tests.rs b/crates/pgt_analyser/tests/rules_tests.rs index d8e5b0ef1..5334246b8 100644 --- a/crates/pgt_analyser/tests/rules_tests.rs +++ b/crates/pgt_analyser/tests/rules_tests.rs @@ -1,5 +1,5 @@ use core::slice; -use std::{fmt::Write, fs::read_to_string, path::Path}; +use std::{collections::HashMap, fmt::Write, fs::read_to_string, path::Path}; use pgt_analyse::{AnalyserOptions, AnalysisFilter, RuleDiagnostic, RuleFilter}; use pgt_analyser::{AnalysableStatement, Analyser, AnalyserConfig, AnalyserParams}; @@ -25,20 +25,30 @@ fn rule_test(full_path: &'static str, _: &str, _: &str) { let query = read_to_string(full_path).unwrap_or_else(|_| panic!("Failed to read file: {} ", full_path)); - let ast = pgt_query::parse(&query).expect("failed to parse SQL"); let options = AnalyserOptions::default(); let analyser = Analyser::new(AnalyserConfig { options: &options, filter, }); - let stmt = AnalysableStatement { - root: ast.into_root().expect("Failed to convert AST to root node"), - range: pgt_text_size::TextRange::new(0.into(), u32::try_from(query.len()).unwrap().into()), - }; + let split = pgt_statement_splitter::split(&query); + + let stmts = split + .ranges + .iter() + .map(|r| { + let text = &query[*r]; + let ast = pgt_query::parse(text).expect("failed to parse SQL"); + + AnalysableStatement { + root: ast.into_root().expect("Failed to convert AST to root node"), + range: *r, + } + }) + .collect::>(); let results = analyser.run(AnalyserParams { - stmts: vec![stmt], + stmts, schema_cache: None, }); @@ -89,29 +99,45 @@ fn write_snapshot(snapshot: &mut String, query: &str, diagnostics: &[RuleDiagnos enum Expectation { NoDiagnostics, - AnyDiagnostics, - OnlyOne(String), + Diagnostics(Vec<(String, usize)>), } impl Expectation { fn from_file(content: &str) -> Self { + let mut multiple_of: HashMap<&str, i32> = HashMap::new(); for line in content.lines() { if line.contains("expect_no_diagnostics") { + if !multiple_of.is_empty() { + panic!( + "Cannot use both `expect_no_diagnostics` and `expect_` in the same test" + ); + } return Self::NoDiagnostics; } - if line.contains("expect_only_") { + if line.contains("expect_") && !line.contains("expect_no_diagnostics") { let kind = line .splitn(3, "_") .last() - .expect("Use pattern: `-- expect_only_`") + .expect("Use pattern: `-- expect_`") .trim(); - return Self::OnlyOne(kind.into()); + *multiple_of.entry(kind).or_insert(0) += 1; } } - Self::AnyDiagnostics + if !multiple_of.is_empty() { + return Self::Diagnostics( + multiple_of + .into_iter() + .map(|(k, v)| (k.into(), v as usize)) + .collect(), + ); + } + + panic!( + "No expectation found in the test file. Use `-- expect_no_diagnostics` or `-- expect_`" + ); } fn assert(&self, diagnostics: &[RuleDiagnostic]) { @@ -121,20 +147,21 @@ impl Expectation { panic!("This test should not have any diagnostics."); } } - Self::OnlyOne(category) => { - let found_kinds = diagnostics - .iter() - .map(|d| d.get_category_name()) - .collect::>() - .join(", "); - - if diagnostics.len() != 1 || diagnostics[0].get_category_name() != category { - panic!( - "This test should only have one diagnostic of kind: {category}\nReceived: {found_kinds}" - ); + Self::Diagnostics(expected) => { + let mut counts: HashMap<&str, usize> = HashMap::new(); + for diag in diagnostics { + *counts.entry(diag.get_category_name()).or_insert(0) += 1; + } + + for (kind, expected_count) in expected { + let actual_count = counts.get(kind.as_str()).copied().unwrap_or(0); + if actual_count != *expected_count { + panic!( + "Expected {expected_count} diagnostics of kind `{kind}`, but found {actual_count}." + ); + } } } - Self::AnyDiagnostics => {} } } } diff --git a/crates/pgt_analyser/tests/specs/safety/addingFieldWithDefault/basic.sql b/crates/pgt_analyser/tests/specs/safety/addingFieldWithDefault/basic.sql new file mode 100644 index 000000000..f0ce47475 --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/addingFieldWithDefault/basic.sql @@ -0,0 +1,2 @@ +-- expect_lint/safety/addingFieldWithDefault +ALTER TABLE "core_recipe" ADD COLUMN "foo" integer DEFAULT 10; \ No newline at end of file diff --git a/crates/pgt_analyser/tests/specs/safety/addingFieldWithDefault/basic.sql.snap b/crates/pgt_analyser/tests/specs/safety/addingFieldWithDefault/basic.sql.snap new file mode 100644 index 000000000..e44d81243 --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/addingFieldWithDefault/basic.sql.snap @@ -0,0 +1,19 @@ +--- +source: crates/pgt_analyser/tests/rules_tests.rs +expression: snapshot +snapshot_kind: text +--- +# Input +``` +-- expect_lint/safety/addingFieldWithDefault +ALTER TABLE "core_recipe" ADD COLUMN "foo" integer DEFAULT 10; +``` + +# Diagnostics +lint/safety/addingFieldWithDefault ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + + × Adding a column with a DEFAULT value causes a table rewrite. + + i This operation requires an ACCESS EXCLUSIVE lock and rewrites the entire table. + + i Add the column without a default, then set the default in a separate statement. diff --git a/crates/pgt_analyser/tests/specs/safety/addingFieldWithDefault/generated_column.sql b/crates/pgt_analyser/tests/specs/safety/addingFieldWithDefault/generated_column.sql new file mode 100644 index 000000000..108dba055 --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/addingFieldWithDefault/generated_column.sql @@ -0,0 +1,2 @@ +-- expect_lint/safety/addingFieldWithDefault +ALTER TABLE users ADD COLUMN full_name text GENERATED ALWAYS AS (first_name || ' ' || last_name) STORED; diff --git a/crates/pgt_analyser/tests/specs/safety/addingFieldWithDefault/generated_column.sql.snap b/crates/pgt_analyser/tests/specs/safety/addingFieldWithDefault/generated_column.sql.snap new file mode 100644 index 000000000..672f049eb --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/addingFieldWithDefault/generated_column.sql.snap @@ -0,0 +1,20 @@ +--- +source: crates/pgt_analyser/tests/rules_tests.rs +expression: snapshot +snapshot_kind: text +--- +# Input +``` +-- expect_lint/safety/addingFieldWithDefault +ALTER TABLE users ADD COLUMN full_name text GENERATED ALWAYS AS (first_name || ' ' || last_name) STORED; + +``` + +# Diagnostics +lint/safety/addingFieldWithDefault ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + + × Adding a generated column requires a table rewrite. + + i This operation requires an ACCESS EXCLUSIVE lock and rewrites the entire table. + + i Add the column as nullable, backfill existing rows, and add a trigger to update the column on write instead. diff --git a/crates/pgt_analyser/tests/specs/safety/addingFieldWithDefault/no_default.sql b/crates/pgt_analyser/tests/specs/safety/addingFieldWithDefault/no_default.sql new file mode 100644 index 000000000..f79c4b884 --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/addingFieldWithDefault/no_default.sql @@ -0,0 +1,3 @@ +-- Test adding column without default (should be safe) +-- expect_no_diagnostics +ALTER TABLE users ADD COLUMN email text; \ No newline at end of file diff --git a/crates/pgt_analyser/tests/specs/safety/addingFieldWithDefault/no_default.sql.snap b/crates/pgt_analyser/tests/specs/safety/addingFieldWithDefault/no_default.sql.snap new file mode 100644 index 000000000..edce67ffb --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/addingFieldWithDefault/no_default.sql.snap @@ -0,0 +1,11 @@ +--- +source: crates/pgt_analyser/tests/rules_tests.rs +expression: snapshot +snapshot_kind: text +--- +# Input +``` +-- Test adding column without default (should be safe) +-- expect_no_diagnostics +ALTER TABLE users ADD COLUMN email text; +``` diff --git a/crates/pgt_analyser/tests/specs/safety/addingFieldWithDefault/non_volatile_default.sql b/crates/pgt_analyser/tests/specs/safety/addingFieldWithDefault/non_volatile_default.sql new file mode 100644 index 000000000..f09c0738e --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/addingFieldWithDefault/non_volatile_default.sql @@ -0,0 +1,3 @@ +-- Test non-volatile default values (should be safe in PG 11+, but we are passing no PG version info in the tests) +-- expect_lint/safety/addingFieldWithDefault +ALTER TABLE users ADD COLUMN status text DEFAULT 'active'; diff --git a/crates/pgt_analyser/tests/specs/safety/addingFieldWithDefault/non_volatile_default.sql.snap b/crates/pgt_analyser/tests/specs/safety/addingFieldWithDefault/non_volatile_default.sql.snap new file mode 100644 index 000000000..bed0cd3c7 --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/addingFieldWithDefault/non_volatile_default.sql.snap @@ -0,0 +1,21 @@ +--- +source: crates/pgt_analyser/tests/rules_tests.rs +expression: snapshot +snapshot_kind: text +--- +# Input +``` +-- Test non-volatile default values (should be safe in PG 11+, but we are passing no PG version info in the tests) +-- expect_lint/safety/addingFieldWithDefault +ALTER TABLE users ADD COLUMN status text DEFAULT 'active'; + +``` + +# Diagnostics +lint/safety/addingFieldWithDefault ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + + × Adding a column with a DEFAULT value causes a table rewrite. + + i This operation requires an ACCESS EXCLUSIVE lock and rewrites the entire table. + + i Add the column without a default, then set the default in a separate statement. diff --git a/crates/pgt_analyser/tests/specs/safety/addingFieldWithDefault/volatile_default.sql b/crates/pgt_analyser/tests/specs/safety/addingFieldWithDefault/volatile_default.sql new file mode 100644 index 000000000..f8b8a12c8 --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/addingFieldWithDefault/volatile_default.sql @@ -0,0 +1,2 @@ +-- expect_lint/safety/addingFieldWithDefault +ALTER TABLE users ADD COLUMN created_at timestamp DEFAULT now(); diff --git a/crates/pgt_analyser/tests/specs/safety/addingFieldWithDefault/volatile_default.sql.snap b/crates/pgt_analyser/tests/specs/safety/addingFieldWithDefault/volatile_default.sql.snap new file mode 100644 index 000000000..b01006efa --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/addingFieldWithDefault/volatile_default.sql.snap @@ -0,0 +1,20 @@ +--- +source: crates/pgt_analyser/tests/rules_tests.rs +expression: snapshot +snapshot_kind: text +--- +# Input +``` +-- expect_lint/safety/addingFieldWithDefault +ALTER TABLE users ADD COLUMN created_at timestamp DEFAULT now(); + +``` + +# Diagnostics +lint/safety/addingFieldWithDefault ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + + × Adding a column with a DEFAULT value causes a table rewrite. + + i This operation requires an ACCESS EXCLUSIVE lock and rewrites the entire table. + + i Add the column without a default, then set the default in a separate statement. diff --git a/crates/pgt_analyser/tests/specs/safety/addingForeignKeyConstraint/basic.sql b/crates/pgt_analyser/tests/specs/safety/addingForeignKeyConstraint/basic.sql new file mode 100644 index 000000000..012107f85 --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/addingForeignKeyConstraint/basic.sql @@ -0,0 +1,2 @@ +-- expect_lint/safety/addingForeignKeyConstraint +ALTER TABLE "email" ADD CONSTRAINT "fk_user" FOREIGN KEY ("user_id") REFERENCES "user" ("id"); diff --git a/crates/pgt_analyser/tests/specs/safety/addingForeignKeyConstraint/basic.sql.snap b/crates/pgt_analyser/tests/specs/safety/addingForeignKeyConstraint/basic.sql.snap new file mode 100644 index 000000000..3570851b8 --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/addingForeignKeyConstraint/basic.sql.snap @@ -0,0 +1,20 @@ +--- +source: crates/pgt_analyser/tests/rules_tests.rs +expression: snapshot +snapshot_kind: text +--- +# Input +``` +-- expect_lint/safety/addingForeignKeyConstraint +ALTER TABLE "email" ADD CONSTRAINT "fk_user" FOREIGN KEY ("user_id") REFERENCES "user" ("id"); + +``` + +# Diagnostics +lint/safety/addingForeignKeyConstraint ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + + × Adding a foreign key constraint requires a table scan and locks on both tables. + + i This will block writes to both the referencing and referenced tables while PostgreSQL verifies the constraint. + + i Add the constraint as NOT VALID first, then VALIDATE it in a separate transaction. diff --git a/crates/pgt_analyser/tests/specs/safety/addingNotNullField/basic.sql b/crates/pgt_analyser/tests/specs/safety/addingNotNullField/basic.sql new file mode 100644 index 000000000..e7abd37fe --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/addingNotNullField/basic.sql @@ -0,0 +1,2 @@ +-- expect_lint/safety/addingNotNullField +ALTER TABLE "core_recipe" ALTER COLUMN "foo" SET NOT NULL; diff --git a/crates/pgt_analyser/tests/specs/safety/addingNotNullField/basic.sql.snap b/crates/pgt_analyser/tests/specs/safety/addingNotNullField/basic.sql.snap new file mode 100644 index 000000000..7d27d88bd --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/addingNotNullField/basic.sql.snap @@ -0,0 +1,20 @@ +--- +source: crates/pgt_analyser/tests/rules_tests.rs +expression: snapshot +snapshot_kind: text +--- +# Input +``` +-- expect_lint/safety/addingNotNullField +ALTER TABLE "core_recipe" ALTER COLUMN "foo" SET NOT NULL; + +``` + +# Diagnostics +lint/safety/addingNotNullField ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + + × Setting a column NOT NULL blocks reads while the table is scanned. + + i This operation requires an ACCESS EXCLUSIVE lock and a full table scan to verify all rows. + + i Use a CHECK constraint with NOT VALID instead, then validate it in a separate transaction. diff --git a/crates/pgt_analyser/tests/specs/safety/addingPrimaryKeyConstraint/basic.sql b/crates/pgt_analyser/tests/specs/safety/addingPrimaryKeyConstraint/basic.sql new file mode 100644 index 000000000..bd85a464b --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/addingPrimaryKeyConstraint/basic.sql @@ -0,0 +1,2 @@ +-- expect_lint/safety/addingPrimaryKeyConstraint +ALTER TABLE users ADD PRIMARY KEY (id); \ No newline at end of file diff --git a/crates/pgt_analyser/tests/specs/safety/addingPrimaryKeyConstraint/basic.sql.snap b/crates/pgt_analyser/tests/specs/safety/addingPrimaryKeyConstraint/basic.sql.snap new file mode 100644 index 000000000..c5c19999e --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/addingPrimaryKeyConstraint/basic.sql.snap @@ -0,0 +1,19 @@ +--- +source: crates/pgt_analyser/tests/rules_tests.rs +expression: snapshot +snapshot_kind: text +--- +# Input +``` +-- expect_lint/safety/addingPrimaryKeyConstraint +ALTER TABLE users ADD PRIMARY KEY (id); +``` + +# Diagnostics +lint/safety/addingPrimaryKeyConstraint ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + + × Adding a PRIMARY KEY constraint results in locks and table rewrites. + + i Adding a PRIMARY KEY constraint requires an ACCESS EXCLUSIVE lock which blocks reads. + + i Add the PRIMARY KEY constraint USING an index. diff --git a/crates/pgt_analyser/tests/specs/safety/addingPrimaryKeyConstraint/serial_column.sql b/crates/pgt_analyser/tests/specs/safety/addingPrimaryKeyConstraint/serial_column.sql new file mode 100644 index 000000000..f48c44554 --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/addingPrimaryKeyConstraint/serial_column.sql @@ -0,0 +1,2 @@ +-- expect_lint/safety/addingPrimaryKeyConstraint +ALTER TABLE items ADD COLUMN id SERIAL PRIMARY KEY; \ No newline at end of file diff --git a/crates/pgt_analyser/tests/specs/safety/addingPrimaryKeyConstraint/serial_column.sql.snap b/crates/pgt_analyser/tests/specs/safety/addingPrimaryKeyConstraint/serial_column.sql.snap new file mode 100644 index 000000000..85fcb5f9b --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/addingPrimaryKeyConstraint/serial_column.sql.snap @@ -0,0 +1,19 @@ +--- +source: crates/pgt_analyser/tests/rules_tests.rs +expression: snapshot +snapshot_kind: text +--- +# Input +``` +-- expect_lint/safety/addingPrimaryKeyConstraint +ALTER TABLE items ADD COLUMN id SERIAL PRIMARY KEY; +``` + +# Diagnostics +lint/safety/addingPrimaryKeyConstraint ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + + × Adding a PRIMARY KEY constraint results in locks and table rewrites. + + i Adding a PRIMARY KEY constraint requires an ACCESS EXCLUSIVE lock which blocks reads. + + i Add the PRIMARY KEY constraint USING an index. diff --git a/crates/pgt_analyser/tests/specs/safety/addingPrimaryKeyConstraint/using_index.sql b/crates/pgt_analyser/tests/specs/safety/addingPrimaryKeyConstraint/using_index.sql new file mode 100644 index 000000000..ba5ad6d1d --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/addingPrimaryKeyConstraint/using_index.sql @@ -0,0 +1,3 @@ +-- expect_no_diagnostics +-- This should not trigger the rule - using an existing index +ALTER TABLE items ADD CONSTRAINT items_pk PRIMARY KEY USING INDEX items_pk; \ No newline at end of file diff --git a/crates/pgt_analyser/tests/specs/safety/addingPrimaryKeyConstraint/using_index.sql.snap b/crates/pgt_analyser/tests/specs/safety/addingPrimaryKeyConstraint/using_index.sql.snap new file mode 100644 index 000000000..447fcc0f0 --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/addingPrimaryKeyConstraint/using_index.sql.snap @@ -0,0 +1,11 @@ +--- +source: crates/pgt_analyser/tests/rules_tests.rs +expression: snapshot +snapshot_kind: text +--- +# Input +``` +-- expect_no_diagnostics +-- This should not trigger the rule - using an existing index +ALTER TABLE items ADD CONSTRAINT items_pk PRIMARY KEY USING INDEX items_pk; +``` diff --git a/crates/pgt_analyser/tests/specs/safety/addingRequiredField/basic.sql b/crates/pgt_analyser/tests/specs/safety/addingRequiredField/basic.sql index 836c295c1..8b152db93 100644 --- a/crates/pgt_analyser/tests/specs/safety/addingRequiredField/basic.sql +++ b/crates/pgt_analyser/tests/specs/safety/addingRequiredField/basic.sql @@ -1,3 +1,3 @@ --- expect_only_lint/safety/addingRequiredField +-- expect_lint/safety/addingRequiredField alter table test add column c int not null; \ No newline at end of file diff --git a/crates/pgt_analyser/tests/specs/safety/addingRequiredField/basic.sql.snap b/crates/pgt_analyser/tests/specs/safety/addingRequiredField/basic.sql.snap index 559dbf53c..13811f8d2 100644 --- a/crates/pgt_analyser/tests/specs/safety/addingRequiredField/basic.sql.snap +++ b/crates/pgt_analyser/tests/specs/safety/addingRequiredField/basic.sql.snap @@ -1,10 +1,11 @@ --- source: crates/pgt_analyser/tests/rules_tests.rs expression: snapshot +snapshot_kind: text --- # Input ``` --- expect_only_lint/safety/addingRequiredField +-- expect_lint/safety/addingRequiredField alter table test add column c int not null; ``` diff --git a/crates/pgt_analyser/tests/specs/safety/banCharField/alter_table.sql b/crates/pgt_analyser/tests/specs/safety/banCharField/alter_table.sql new file mode 100644 index 000000000..375e95d01 --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/banCharField/alter_table.sql @@ -0,0 +1,2 @@ +-- expect_lint/safety/banCharField +ALTER TABLE users ADD COLUMN code character(10); diff --git a/crates/pgt_analyser/tests/specs/safety/banCharField/alter_table.sql.snap b/crates/pgt_analyser/tests/specs/safety/banCharField/alter_table.sql.snap new file mode 100644 index 000000000..f4de4939b --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/banCharField/alter_table.sql.snap @@ -0,0 +1,20 @@ +--- +source: crates/pgt_analyser/tests/rules_tests.rs +expression: snapshot +snapshot_kind: text +--- +# Input +``` +-- expect_lint/safety/banCharField +ALTER TABLE users ADD COLUMN code character(10); + +``` + +# Diagnostics +lint/safety/banCharField ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + + × CHAR type is discouraged due to space padding behavior. + + i CHAR types are fixed-length and padded with spaces, which can lead to unexpected behavior. + + i Use VARCHAR or TEXT instead for variable-length character data. diff --git a/crates/pgt_analyser/tests/specs/safety/banCharField/basic.sql b/crates/pgt_analyser/tests/specs/safety/banCharField/basic.sql new file mode 100644 index 000000000..596db5eda --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/banCharField/basic.sql @@ -0,0 +1,5 @@ +-- expect_lint/safety/banCharField +CREATE TABLE "core_bar" ( + "id" serial NOT NULL PRIMARY KEY, + "alpha" char(100) NOT NULL +); \ No newline at end of file diff --git a/crates/pgt_analyser/tests/specs/safety/banCharField/basic.sql.snap b/crates/pgt_analyser/tests/specs/safety/banCharField/basic.sql.snap new file mode 100644 index 000000000..f32d84c8d --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/banCharField/basic.sql.snap @@ -0,0 +1,22 @@ +--- +source: crates/pgt_analyser/tests/rules_tests.rs +expression: snapshot +snapshot_kind: text +--- +# Input +``` +-- expect_lint/safety/banCharField +CREATE TABLE "core_bar" ( + "id" serial NOT NULL PRIMARY KEY, + "alpha" char(100) NOT NULL +); +``` + +# Diagnostics +lint/safety/banCharField ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + + × CHAR type is discouraged due to space padding behavior. + + i CHAR types are fixed-length and padded with spaces, which can lead to unexpected behavior. + + i Use VARCHAR or TEXT instead for variable-length character data. diff --git a/crates/pgt_analyser/tests/specs/safety/banCharField/bpchar.sql b/crates/pgt_analyser/tests/specs/safety/banCharField/bpchar.sql new file mode 100644 index 000000000..689ea0ddd --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/banCharField/bpchar.sql @@ -0,0 +1,4 @@ +-- expect_lint/safety/banCharField +CREATE TABLE test ( + code bpchar(5) +); diff --git a/crates/pgt_analyser/tests/specs/safety/banCharField/bpchar.sql.snap b/crates/pgt_analyser/tests/specs/safety/banCharField/bpchar.sql.snap new file mode 100644 index 000000000..064962820 --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/banCharField/bpchar.sql.snap @@ -0,0 +1,22 @@ +--- +source: crates/pgt_analyser/tests/rules_tests.rs +expression: snapshot +snapshot_kind: text +--- +# Input +``` +-- expect_lint/safety/banCharField +CREATE TABLE test ( + code bpchar(5) +); + +``` + +# Diagnostics +lint/safety/banCharField ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + + × CHAR type is discouraged due to space padding behavior. + + i CHAR types are fixed-length and padded with spaces, which can lead to unexpected behavior. + + i Use VARCHAR or TEXT instead for variable-length character data. diff --git a/crates/pgt_analyser/tests/specs/safety/banCharField/varchar_valid.sql b/crates/pgt_analyser/tests/specs/safety/banCharField/varchar_valid.sql new file mode 100644 index 000000000..27f1d9b01 --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/banCharField/varchar_valid.sql @@ -0,0 +1,5 @@ +-- expect_no_diagnostics +CREATE TABLE "core_bar" ( + "id" serial NOT NULL PRIMARY KEY, + "alpha" varchar(100) NOT NULL +); diff --git a/crates/pgt_analyser/tests/specs/safety/banCharField/varchar_valid.sql.snap b/crates/pgt_analyser/tests/specs/safety/banCharField/varchar_valid.sql.snap new file mode 100644 index 000000000..5a6aaeb76 --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/banCharField/varchar_valid.sql.snap @@ -0,0 +1,14 @@ +--- +source: crates/pgt_analyser/tests/rules_tests.rs +expression: snapshot +snapshot_kind: text +--- +# Input +``` +-- expect_no_diagnostics +CREATE TABLE "core_bar" ( + "id" serial NOT NULL PRIMARY KEY, + "alpha" varchar(100) NOT NULL +); + +``` diff --git a/crates/pgt_analyser/tests/specs/safety/banConcurrentIndexCreationInTransaction/basic.sql b/crates/pgt_analyser/tests/specs/safety/banConcurrentIndexCreationInTransaction/basic.sql new file mode 100644 index 000000000..eeeddf954 --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/banConcurrentIndexCreationInTransaction/basic.sql @@ -0,0 +1,3 @@ +-- expect_lint/safety/banConcurrentIndexCreationInTransaction +CREATE INDEX CONCURRENTLY "field_name_idx" ON "table_name" ("field_name"); +SELECT 1; \ No newline at end of file diff --git a/crates/pgt_analyser/tests/specs/safety/banConcurrentIndexCreationInTransaction/basic.sql.snap b/crates/pgt_analyser/tests/specs/safety/banConcurrentIndexCreationInTransaction/basic.sql.snap new file mode 100644 index 000000000..5bb028b10 --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/banConcurrentIndexCreationInTransaction/basic.sql.snap @@ -0,0 +1,18 @@ +--- +source: crates/pgt_analyser/tests/rules_tests.rs +expression: snapshot +snapshot_kind: text +--- +# Input +``` +-- expect_lint/safety/banConcurrentIndexCreationInTransaction +CREATE INDEX CONCURRENTLY "field_name_idx" ON "table_name" ("field_name"); +SELECT 1; +``` + +# Diagnostics +lint/safety/banConcurrentIndexCreationInTransaction ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + + × CREATE INDEX CONCURRENTLY cannot be used inside a transaction block. + + i Run CREATE INDEX CONCURRENTLY outside of a transaction. Migration tools usually run in transactions, so you may need to run this statement in its own migration or manually. diff --git a/crates/pgt_analyser/tests/specs/safety/banDropColumn/basic.sql b/crates/pgt_analyser/tests/specs/safety/banDropColumn/basic.sql index 16d3b4769..6bff86142 100644 --- a/crates/pgt_analyser/tests/specs/safety/banDropColumn/basic.sql +++ b/crates/pgt_analyser/tests/specs/safety/banDropColumn/basic.sql @@ -1,3 +1,3 @@ --- expect_only_lint/safety/banDropColumn +-- expect_lint/safety/banDropColumn alter table test drop column id; \ No newline at end of file diff --git a/crates/pgt_analyser/tests/specs/safety/banDropColumn/basic.sql.snap b/crates/pgt_analyser/tests/specs/safety/banDropColumn/basic.sql.snap index 3fd80e190..9c6d2192d 100644 --- a/crates/pgt_analyser/tests/specs/safety/banDropColumn/basic.sql.snap +++ b/crates/pgt_analyser/tests/specs/safety/banDropColumn/basic.sql.snap @@ -1,10 +1,11 @@ --- source: crates/pgt_analyser/tests/rules_tests.rs expression: snapshot +snapshot_kind: text --- # Input ``` --- expect_only_lint/safety/banDropColumn +-- expect_lint/safety/banDropColumn alter table test drop column id; ``` diff --git a/crates/pgt_analyser/tests/specs/safety/banDropDatabase/basic.sql b/crates/pgt_analyser/tests/specs/safety/banDropDatabase/basic.sql index 0dc016524..8dc725c3e 100644 --- a/crates/pgt_analyser/tests/specs/safety/banDropDatabase/basic.sql +++ b/crates/pgt_analyser/tests/specs/safety/banDropDatabase/basic.sql @@ -1,2 +1,2 @@ --- expect_only_lint/safety/banDropDatabase +-- expect_lint/safety/banDropDatabase drop database all_users; \ No newline at end of file diff --git a/crates/pgt_analyser/tests/specs/safety/banDropDatabase/basic.sql.snap b/crates/pgt_analyser/tests/specs/safety/banDropDatabase/basic.sql.snap index 90e35820c..f08e4c756 100644 --- a/crates/pgt_analyser/tests/specs/safety/banDropDatabase/basic.sql.snap +++ b/crates/pgt_analyser/tests/specs/safety/banDropDatabase/basic.sql.snap @@ -1,10 +1,11 @@ --- source: crates/pgt_analyser/tests/rules_tests.rs expression: snapshot +snapshot_kind: text --- # Input ``` --- expect_only_lint/safety/banDropDatabase +-- expect_lint/safety/banDropDatabase drop database all_users; ``` diff --git a/crates/pgt_analyser/tests/specs/safety/banDropNotNull/basic.sql b/crates/pgt_analyser/tests/specs/safety/banDropNotNull/basic.sql index 1e1fc8796..0e0481466 100644 --- a/crates/pgt_analyser/tests/specs/safety/banDropNotNull/basic.sql +++ b/crates/pgt_analyser/tests/specs/safety/banDropNotNull/basic.sql @@ -1,4 +1,4 @@ --- expect_only_lint/safety/banDropNotNull +-- expect_lint/safety/banDropNotNull alter table users alter column id drop not null; \ No newline at end of file diff --git a/crates/pgt_analyser/tests/specs/safety/banDropNotNull/basic.sql.snap b/crates/pgt_analyser/tests/specs/safety/banDropNotNull/basic.sql.snap index e5d552678..842280d1b 100644 --- a/crates/pgt_analyser/tests/specs/safety/banDropNotNull/basic.sql.snap +++ b/crates/pgt_analyser/tests/specs/safety/banDropNotNull/basic.sql.snap @@ -1,10 +1,11 @@ --- source: crates/pgt_analyser/tests/rules_tests.rs expression: snapshot +snapshot_kind: text --- # Input ``` --- expect_only_lint/safety/banDropNotNull +-- expect_lint/safety/banDropNotNull alter table users alter column id drop not null; diff --git a/crates/pgt_analyser/tests/specs/safety/banDropTable/basic.sql b/crates/pgt_analyser/tests/specs/safety/banDropTable/basic.sql index 16f6fd624..f3554e176 100644 --- a/crates/pgt_analyser/tests/specs/safety/banDropTable/basic.sql +++ b/crates/pgt_analyser/tests/specs/safety/banDropTable/basic.sql @@ -1,2 +1,2 @@ --- expect_only_lint/safety/banDropTable +-- expect_lint/safety/banDropTable drop table test; \ No newline at end of file diff --git a/crates/pgt_analyser/tests/specs/safety/banDropTable/basic.sql.snap b/crates/pgt_analyser/tests/specs/safety/banDropTable/basic.sql.snap index 481b12234..779c2d8e5 100644 --- a/crates/pgt_analyser/tests/specs/safety/banDropTable/basic.sql.snap +++ b/crates/pgt_analyser/tests/specs/safety/banDropTable/basic.sql.snap @@ -1,10 +1,11 @@ --- source: crates/pgt_analyser/tests/rules_tests.rs expression: snapshot +snapshot_kind: text --- # Input ``` --- expect_only_lint/safety/banDropTable +-- expect_lint/safety/banDropTable drop table test; ``` diff --git a/crates/pgt_analyser/tests/specs/safety/banTruncateCascade/basic.sql b/crates/pgt_analyser/tests/specs/safety/banTruncateCascade/basic.sql index d17fed13b..68bbba294 100644 --- a/crates/pgt_analyser/tests/specs/safety/banTruncateCascade/basic.sql +++ b/crates/pgt_analyser/tests/specs/safety/banTruncateCascade/basic.sql @@ -1,2 +1,2 @@ --- expect_only_lint/safety/banTruncateCascade +-- expect_lint/safety/banTruncateCascade truncate a cascade; \ No newline at end of file diff --git a/crates/pgt_analyser/tests/specs/safety/banTruncateCascade/basic.sql.snap b/crates/pgt_analyser/tests/specs/safety/banTruncateCascade/basic.sql.snap index d214b978a..deef44849 100644 --- a/crates/pgt_analyser/tests/specs/safety/banTruncateCascade/basic.sql.snap +++ b/crates/pgt_analyser/tests/specs/safety/banTruncateCascade/basic.sql.snap @@ -1,10 +1,11 @@ --- source: crates/pgt_analyser/tests/rules_tests.rs expression: snapshot +snapshot_kind: text --- # Input ``` --- expect_only_lint/safety/banTruncateCascade +-- expect_lint/safety/banTruncateCascade truncate a cascade; ``` diff --git a/crates/pgt_analyser/tests/specs/safety/changingColumnType/basic.sql b/crates/pgt_analyser/tests/specs/safety/changingColumnType/basic.sql new file mode 100644 index 000000000..5e5c2fc38 --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/changingColumnType/basic.sql @@ -0,0 +1,2 @@ +-- expect_lint/safety/changingColumnType +ALTER TABLE "core_recipe" ALTER COLUMN "edits" TYPE text USING "edits"::text; \ No newline at end of file diff --git a/crates/pgt_analyser/tests/specs/safety/changingColumnType/basic.sql.snap b/crates/pgt_analyser/tests/specs/safety/changingColumnType/basic.sql.snap new file mode 100644 index 000000000..5739621a7 --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/changingColumnType/basic.sql.snap @@ -0,0 +1,17 @@ +--- +source: crates/pgt_analyser/tests/rules_tests.rs +expression: snapshot +snapshot_kind: text +--- +# Input +``` +-- expect_lint/safety/changingColumnType +ALTER TABLE "core_recipe" ALTER COLUMN "edits" TYPE text USING "edits"::text; +``` + +# Diagnostics +lint/safety/changingColumnType ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + + × Changing a column type requires a table rewrite and blocks reads and writes. + + i Consider creating a new column with the desired type, migrating data, and then dropping the old column. diff --git a/crates/pgt_analyser/tests/specs/safety/constraintMissingNotValid/basic.sql b/crates/pgt_analyser/tests/specs/safety/constraintMissingNotValid/basic.sql new file mode 100644 index 000000000..756bcbae6 --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/constraintMissingNotValid/basic.sql @@ -0,0 +1,2 @@ +-- expect_lint/safety/constraintMissingNotValid +ALTER TABLE distributors ADD CONSTRAINT distfk FOREIGN KEY (address) REFERENCES addresses (address); \ No newline at end of file diff --git a/crates/pgt_analyser/tests/specs/safety/constraintMissingNotValid/basic.sql.snap b/crates/pgt_analyser/tests/specs/safety/constraintMissingNotValid/basic.sql.snap new file mode 100644 index 000000000..639d10d5f --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/constraintMissingNotValid/basic.sql.snap @@ -0,0 +1,17 @@ +--- +source: crates/pgt_analyser/tests/rules_tests.rs +expression: snapshot +snapshot_kind: text +--- +# Input +``` +-- expect_lint/safety/constraintMissingNotValid +ALTER TABLE distributors ADD CONSTRAINT distfk FOREIGN KEY (address) REFERENCES addresses (address); +``` + +# Diagnostics +lint/safety/constraintMissingNotValid ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + + × Adding a constraint without NOT VALID will block reads and writes while validating existing rows. + + i Add the constraint as NOT VALID in one transaction, then run VALIDATE CONSTRAINT in a separate transaction. diff --git a/crates/pgt_analyser/tests/specs/safety/constraintMissingNotValid/check_constraint.sql b/crates/pgt_analyser/tests/specs/safety/constraintMissingNotValid/check_constraint.sql new file mode 100644 index 000000000..490ccdb30 --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/constraintMissingNotValid/check_constraint.sql @@ -0,0 +1,2 @@ +-- expect_lint/safety/constraintMissingNotValid +ALTER TABLE users ADD CONSTRAINT check_age CHECK (age >= 0); diff --git a/crates/pgt_analyser/tests/specs/safety/constraintMissingNotValid/check_constraint.sql.snap b/crates/pgt_analyser/tests/specs/safety/constraintMissingNotValid/check_constraint.sql.snap new file mode 100644 index 000000000..357b931cc --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/constraintMissingNotValid/check_constraint.sql.snap @@ -0,0 +1,18 @@ +--- +source: crates/pgt_analyser/tests/rules_tests.rs +expression: snapshot +snapshot_kind: text +--- +# Input +``` +-- expect_lint/safety/constraintMissingNotValid +ALTER TABLE users ADD CONSTRAINT check_age CHECK (age >= 0); + +``` + +# Diagnostics +lint/safety/constraintMissingNotValid ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + + × Adding a constraint without NOT VALID will block reads and writes while validating existing rows. + + i Add the constraint as NOT VALID in one transaction, then run VALIDATE CONSTRAINT in a separate transaction. diff --git a/crates/pgt_analyser/tests/specs/safety/constraintMissingNotValid/with_not_valid.sql b/crates/pgt_analyser/tests/specs/safety/constraintMissingNotValid/with_not_valid.sql new file mode 100644 index 000000000..b5bc40f65 --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/constraintMissingNotValid/with_not_valid.sql @@ -0,0 +1,3 @@ +-- Test constraint with NOT VALID (should be safe) +-- expect_no_diagnostics +ALTER TABLE distributors ADD CONSTRAINT distfk FOREIGN KEY (address) REFERENCES addresses (address) NOT VALID; \ No newline at end of file diff --git a/crates/pgt_analyser/tests/specs/safety/constraintMissingNotValid/with_not_valid.sql.snap b/crates/pgt_analyser/tests/specs/safety/constraintMissingNotValid/with_not_valid.sql.snap new file mode 100644 index 000000000..6d7ed9df4 --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/constraintMissingNotValid/with_not_valid.sql.snap @@ -0,0 +1,11 @@ +--- +source: crates/pgt_analyser/tests/rules_tests.rs +expression: snapshot +snapshot_kind: text +--- +# Input +``` +-- Test constraint with NOT VALID (should be safe) +-- expect_no_diagnostics +ALTER TABLE distributors ADD CONSTRAINT distfk FOREIGN KEY (address) REFERENCES addresses (address) NOT VALID; +``` diff --git a/crates/pgt_analyser/tests/specs/safety/preferBigInt/basic.sql b/crates/pgt_analyser/tests/specs/safety/preferBigInt/basic.sql new file mode 100644 index 000000000..4b04da62c --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/preferBigInt/basic.sql @@ -0,0 +1,4 @@ +-- expect_lint/safety/preferBigInt +CREATE TABLE users ( + id integer +); diff --git a/crates/pgt_analyser/tests/specs/safety/preferBigInt/basic.sql.snap b/crates/pgt_analyser/tests/specs/safety/preferBigInt/basic.sql.snap new file mode 100644 index 000000000..bb247c05e --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/preferBigInt/basic.sql.snap @@ -0,0 +1,22 @@ +--- +source: crates/pgt_analyser/tests/rules_tests.rs +expression: snapshot +snapshot_kind: text +--- +# Input +``` +-- expect_lint/safety/preferBigInt +CREATE TABLE users ( + id integer +); + +``` + +# Diagnostics +lint/safety/preferBigInt ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + + × Using smaller integer types can lead to overflow issues. + + i The 'int4' type has a limited range that may be exceeded as your data grows. + + i Consider using BIGINT for integer columns to avoid future migration issues. diff --git a/crates/pgt_analyser/tests/specs/safety/preferBigintOverInt/basic.sql b/crates/pgt_analyser/tests/specs/safety/preferBigintOverInt/basic.sql new file mode 100644 index 000000000..03e5e8147 --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/preferBigintOverInt/basic.sql @@ -0,0 +1,4 @@ +-- expect_lint/safety/preferBigintOverInt +CREATE TABLE users ( + id integer +); diff --git a/crates/pgt_analyser/tests/specs/safety/preferBigintOverInt/basic.sql.snap b/crates/pgt_analyser/tests/specs/safety/preferBigintOverInt/basic.sql.snap new file mode 100644 index 000000000..a1a407884 --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/preferBigintOverInt/basic.sql.snap @@ -0,0 +1,22 @@ +--- +source: crates/pgt_analyser/tests/rules_tests.rs +expression: snapshot +snapshot_kind: text +--- +# Input +``` +-- expect_lint/safety/preferBigintOverInt +CREATE TABLE users ( + id integer +); + +``` + +# Diagnostics +lint/safety/preferBigintOverInt ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + + × INTEGER type may lead to overflow issues. + + i INTEGER has a maximum value of 2,147,483,647 which can be exceeded by ID columns and counters. + + i Consider using BIGINT instead for better future-proofing. diff --git a/crates/pgt_analyser/tests/specs/safety/preferBigintOverSmallint/basic.sql b/crates/pgt_analyser/tests/specs/safety/preferBigintOverSmallint/basic.sql new file mode 100644 index 000000000..2b1605ad3 --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/preferBigintOverSmallint/basic.sql @@ -0,0 +1,4 @@ +-- expect_lint/safety/preferBigintOverSmallint +CREATE TABLE users ( + age smallint +); diff --git a/crates/pgt_analyser/tests/specs/safety/preferBigintOverSmallint/basic.sql.snap b/crates/pgt_analyser/tests/specs/safety/preferBigintOverSmallint/basic.sql.snap new file mode 100644 index 000000000..738fdbe37 --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/preferBigintOverSmallint/basic.sql.snap @@ -0,0 +1,22 @@ +--- +source: crates/pgt_analyser/tests/rules_tests.rs +expression: snapshot +snapshot_kind: text +--- +# Input +``` +-- expect_lint/safety/preferBigintOverSmallint +CREATE TABLE users ( + age smallint +); + +``` + +# Diagnostics +lint/safety/preferBigintOverSmallint ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + + × SMALLINT has a very limited range that is easily exceeded. + + i SMALLINT can only store values from -32,768 to 32,767. This range is often insufficient. + + i Consider using INTEGER or BIGINT for better range and future-proofing. diff --git a/crates/pgt_analyser/tests/specs/safety/preferIdentity/alter_table.sql b/crates/pgt_analyser/tests/specs/safety/preferIdentity/alter_table.sql new file mode 100644 index 000000000..f2f52c4b8 --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/preferIdentity/alter_table.sql @@ -0,0 +1,2 @@ +-- expect_lint/safety/preferIdentity +alter table test add column id serial; \ No newline at end of file diff --git a/crates/pgt_analyser/tests/specs/safety/preferIdentity/alter_table.sql.snap b/crates/pgt_analyser/tests/specs/safety/preferIdentity/alter_table.sql.snap new file mode 100644 index 000000000..924731c89 --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/preferIdentity/alter_table.sql.snap @@ -0,0 +1,19 @@ +--- +source: crates/pgt_analyser/tests/rules_tests.rs +expression: snapshot +snapshot_kind: text +--- +# Input +``` +-- expect_lint/safety/preferIdentity +alter table test add column id serial; +``` + +# Diagnostics +lint/safety/preferIdentity ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + + × Prefer IDENTITY columns over SERIAL types. + + i Column uses 'serial' type which has limitations compared to IDENTITY columns. + + i Use 'bigint GENERATED BY DEFAULT AS IDENTITY' or 'bigint GENERATED ALWAYS AS IDENTITY' instead. diff --git a/crates/pgt_analyser/tests/specs/safety/preferIdentity/basic.sql b/crates/pgt_analyser/tests/specs/safety/preferIdentity/basic.sql new file mode 100644 index 000000000..ac2f5fdeb --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/preferIdentity/basic.sql @@ -0,0 +1,4 @@ +-- expect_lint/safety/preferIdentity +create table users ( + id serial +); \ No newline at end of file diff --git a/crates/pgt_analyser/tests/specs/safety/preferIdentity/basic.sql.snap b/crates/pgt_analyser/tests/specs/safety/preferIdentity/basic.sql.snap new file mode 100644 index 000000000..adada49e3 --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/preferIdentity/basic.sql.snap @@ -0,0 +1,21 @@ +--- +source: crates/pgt_analyser/tests/rules_tests.rs +expression: snapshot +snapshot_kind: text +--- +# Input +``` +-- expect_lint/safety/preferIdentity +create table users ( + id serial +); +``` + +# Diagnostics +lint/safety/preferIdentity ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + + × Prefer IDENTITY columns over SERIAL types. + + i Column uses 'serial' type which has limitations compared to IDENTITY columns. + + i Use 'bigint GENERATED BY DEFAULT AS IDENTITY' or 'bigint GENERATED ALWAYS AS IDENTITY' instead. diff --git a/crates/pgt_analyser/tests/specs/safety/preferIdentity/bigserial.sql b/crates/pgt_analyser/tests/specs/safety/preferIdentity/bigserial.sql new file mode 100644 index 000000000..5f971cdf6 --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/preferIdentity/bigserial.sql @@ -0,0 +1,4 @@ +-- expect_lint/safety/preferIdentity +create table users ( + id bigserial +); \ No newline at end of file diff --git a/crates/pgt_analyser/tests/specs/safety/preferIdentity/bigserial.sql.snap b/crates/pgt_analyser/tests/specs/safety/preferIdentity/bigserial.sql.snap new file mode 100644 index 000000000..81eeaeb2c --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/preferIdentity/bigserial.sql.snap @@ -0,0 +1,21 @@ +--- +source: crates/pgt_analyser/tests/rules_tests.rs +expression: snapshot +snapshot_kind: text +--- +# Input +``` +-- expect_lint/safety/preferIdentity +create table users ( + id bigserial +); +``` + +# Diagnostics +lint/safety/preferIdentity ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + + × Prefer IDENTITY columns over SERIAL types. + + i Column uses 'bigserial' type which has limitations compared to IDENTITY columns. + + i Use 'bigint GENERATED BY DEFAULT AS IDENTITY' or 'bigint GENERATED ALWAYS AS IDENTITY' instead. diff --git a/crates/pgt_analyser/tests/specs/safety/preferIdentity/valid.sql b/crates/pgt_analyser/tests/specs/safety/preferIdentity/valid.sql new file mode 100644 index 000000000..ec478fe43 --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/preferIdentity/valid.sql @@ -0,0 +1,4 @@ +-- expect_no_diagnostics +create table users_valid ( + id bigint generated by default as identity primary key +); diff --git a/crates/pgt_analyser/tests/specs/safety/preferIdentity/valid.sql.snap b/crates/pgt_analyser/tests/specs/safety/preferIdentity/valid.sql.snap new file mode 100644 index 000000000..0b421140c --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/preferIdentity/valid.sql.snap @@ -0,0 +1,13 @@ +--- +source: crates/pgt_analyser/tests/rules_tests.rs +expression: snapshot +snapshot_kind: text +--- +# Input +``` +-- expect_no_diagnostics +create table users_valid ( + id bigint generated by default as identity primary key +); + +``` diff --git a/crates/pgt_analyser/tests/specs/safety/preferJsonb/basic.sql b/crates/pgt_analyser/tests/specs/safety/preferJsonb/basic.sql new file mode 100644 index 000000000..6297a91c1 --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/preferJsonb/basic.sql @@ -0,0 +1,5 @@ +-- expect_lint/safety/preferJsonb +CREATE TABLE users ( + id integer, + data json +); diff --git a/crates/pgt_analyser/tests/specs/safety/preferJsonb/basic.sql.snap b/crates/pgt_analyser/tests/specs/safety/preferJsonb/basic.sql.snap new file mode 100644 index 000000000..76c43afc5 --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/preferJsonb/basic.sql.snap @@ -0,0 +1,23 @@ +--- +source: crates/pgt_analyser/tests/rules_tests.rs +expression: snapshot +snapshot_kind: text +--- +# Input +``` +-- expect_lint/safety/preferJsonb +CREATE TABLE users ( + id integer, + data json +); + +``` + +# Diagnostics +lint/safety/preferJsonb ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + + × Prefer JSONB over JSON for better performance and functionality. + + i JSON stores exact text representation while JSONB stores parsed binary format. JSONB is faster for queries, supports indexing, and removes duplicate keys. + + i Consider using JSONB instead unless you specifically need to preserve formatting or duplicate keys. diff --git a/crates/pgt_analyser/tests/specs/safety/preferRobustStmts/basic.sql b/crates/pgt_analyser/tests/specs/safety/preferRobustStmts/basic.sql new file mode 100644 index 000000000..fd422f32a --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/preferRobustStmts/basic.sql @@ -0,0 +1,3 @@ +-- expect_lint/safety/preferRobustStmts +CREATE INDEX CONCURRENTLY users_email_idx ON users (email); +SELECT 1; \ No newline at end of file diff --git a/crates/pgt_analyser/tests/specs/safety/preferRobustStmts/basic.sql.snap b/crates/pgt_analyser/tests/specs/safety/preferRobustStmts/basic.sql.snap new file mode 100644 index 000000000..5db56846d --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/preferRobustStmts/basic.sql.snap @@ -0,0 +1,18 @@ +--- +source: crates/pgt_analyser/tests/rules_tests.rs +expression: snapshot +snapshot_kind: text +--- +# Input +``` +-- expect_lint/safety/preferRobustStmts +CREATE INDEX CONCURRENTLY users_email_idx ON users (email); +SELECT 1; +``` + +# Diagnostics +lint/safety/preferRobustStmts ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + + × Concurrent index creation should use IF NOT EXISTS. + + i Add IF NOT EXISTS to make the migration re-runnable if it fails. diff --git a/crates/pgt_analyser/tests/specs/safety/preferRobustStmts/drop_without_if_exists.sql b/crates/pgt_analyser/tests/specs/safety/preferRobustStmts/drop_without_if_exists.sql new file mode 100644 index 000000000..36d4b1375 --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/preferRobustStmts/drop_without_if_exists.sql @@ -0,0 +1,2 @@ +-- expect_lint/safety/preferRobustStmts +DROP INDEX CONCURRENTLY users_email_idx; diff --git a/crates/pgt_analyser/tests/specs/safety/preferRobustStmts/drop_without_if_exists.sql.snap b/crates/pgt_analyser/tests/specs/safety/preferRobustStmts/drop_without_if_exists.sql.snap new file mode 100644 index 000000000..e03fbf715 --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/preferRobustStmts/drop_without_if_exists.sql.snap @@ -0,0 +1,18 @@ +--- +source: crates/pgt_analyser/tests/rules_tests.rs +expression: snapshot +snapshot_kind: text +--- +# Input +``` +-- expect_lint/safety/preferRobustStmts +DROP INDEX CONCURRENTLY users_email_idx; + +``` + +# Diagnostics +lint/safety/preferRobustStmts ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + + × Concurrent drop should use IF EXISTS. + + i Add IF EXISTS to make the migration re-runnable if it fails. diff --git a/crates/pgt_analyser/tests/specs/safety/preferRobustStmts/robust_statements.sql b/crates/pgt_analyser/tests/specs/safety/preferRobustStmts/robust_statements.sql new file mode 100644 index 000000000..e19139596 --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/preferRobustStmts/robust_statements.sql @@ -0,0 +1,4 @@ +-- Test proper robust statements (should be safe) +-- expect_no_diagnostics +CREATE INDEX CONCURRENTLY IF NOT EXISTS users_email_idx ON users (email); +DROP INDEX CONCURRENTLY IF EXISTS old_idx; \ No newline at end of file diff --git a/crates/pgt_analyser/tests/specs/safety/preferRobustStmts/robust_statements.sql.snap b/crates/pgt_analyser/tests/specs/safety/preferRobustStmts/robust_statements.sql.snap new file mode 100644 index 000000000..93f454af5 --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/preferRobustStmts/robust_statements.sql.snap @@ -0,0 +1,12 @@ +--- +source: crates/pgt_analyser/tests/rules_tests.rs +expression: snapshot +snapshot_kind: text +--- +# Input +``` +-- Test proper robust statements (should be safe) +-- expect_no_diagnostics +CREATE INDEX CONCURRENTLY IF NOT EXISTS users_email_idx ON users (email); +DROP INDEX CONCURRENTLY IF EXISTS old_idx; +``` diff --git a/crates/pgt_analyser/tests/specs/safety/renamingColumn/basic.sql b/crates/pgt_analyser/tests/specs/safety/renamingColumn/basic.sql new file mode 100644 index 000000000..a283d3bc7 --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/renamingColumn/basic.sql @@ -0,0 +1,2 @@ +-- expect_lint/safety/renamingColumn +ALTER TABLE users RENAME COLUMN name TO full_name; \ No newline at end of file diff --git a/crates/pgt_analyser/tests/specs/safety/renamingColumn/basic.sql.snap b/crates/pgt_analyser/tests/specs/safety/renamingColumn/basic.sql.snap new file mode 100644 index 000000000..03fd3f807 --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/renamingColumn/basic.sql.snap @@ -0,0 +1,17 @@ +--- +source: crates/pgt_analyser/tests/rules_tests.rs +expression: snapshot +snapshot_kind: text +--- +# Input +``` +-- expect_lint/safety/renamingColumn +ALTER TABLE users RENAME COLUMN name TO full_name; +``` + +# Diagnostics +lint/safety/renamingColumn ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + + × Renaming a column may break existing clients. + + i Consider creating a new column with the desired name and migrating data instead. diff --git a/crates/pgt_analyser/tests/specs/safety/renamingTable/basic.sql b/crates/pgt_analyser/tests/specs/safety/renamingTable/basic.sql new file mode 100644 index 000000000..b3df5c838 --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/renamingTable/basic.sql @@ -0,0 +1,2 @@ +-- expect_lint/safety/renamingTable +ALTER TABLE users RENAME TO customers; \ No newline at end of file diff --git a/crates/pgt_analyser/tests/specs/safety/renamingTable/basic.sql.snap b/crates/pgt_analyser/tests/specs/safety/renamingTable/basic.sql.snap new file mode 100644 index 000000000..09cc0175e --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/renamingTable/basic.sql.snap @@ -0,0 +1,17 @@ +--- +source: crates/pgt_analyser/tests/rules_tests.rs +expression: snapshot +snapshot_kind: text +--- +# Input +``` +-- expect_lint/safety/renamingTable +ALTER TABLE users RENAME TO customers; +``` + +# Diagnostics +lint/safety/renamingTable ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + + × Renaming a table may break existing clients. + + i Consider creating a view with the old table name instead, or coordinate the rename carefully with application deployments. diff --git a/crates/pgt_analyser/tests/specs/safety/requireConcurrentIndexCreation/basic.sql b/crates/pgt_analyser/tests/specs/safety/requireConcurrentIndexCreation/basic.sql new file mode 100644 index 000000000..7ce423e29 --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/requireConcurrentIndexCreation/basic.sql @@ -0,0 +1,2 @@ +-- expect_lint/safety/requireConcurrentIndexCreation +CREATE INDEX users_email_idx ON users (email); \ No newline at end of file diff --git a/crates/pgt_analyser/tests/specs/safety/requireConcurrentIndexCreation/basic.sql.snap b/crates/pgt_analyser/tests/specs/safety/requireConcurrentIndexCreation/basic.sql.snap new file mode 100644 index 000000000..dad3c364b --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/requireConcurrentIndexCreation/basic.sql.snap @@ -0,0 +1,17 @@ +--- +source: crates/pgt_analyser/tests/rules_tests.rs +expression: snapshot +snapshot_kind: text +--- +# Input +``` +-- expect_lint/safety/requireConcurrentIndexCreation +CREATE INDEX users_email_idx ON users (email); +``` + +# Diagnostics +lint/safety/requireConcurrentIndexCreation ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + + × Creating an index non-concurrently blocks writes to the table. + + i Use CREATE INDEX CONCURRENTLY to avoid blocking concurrent operations on the table. diff --git a/crates/pgt_analyser/tests/specs/safety/requireConcurrentIndexCreation/concurrent_valid.sql b/crates/pgt_analyser/tests/specs/safety/requireConcurrentIndexCreation/concurrent_valid.sql new file mode 100644 index 000000000..339f0eebb --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/requireConcurrentIndexCreation/concurrent_valid.sql @@ -0,0 +1,3 @@ +-- Test concurrent index creation (should be safe) +-- expect_no_diagnostics +CREATE INDEX CONCURRENTLY users_email_idx ON users (email); \ No newline at end of file diff --git a/crates/pgt_analyser/tests/specs/safety/requireConcurrentIndexCreation/concurrent_valid.sql.snap b/crates/pgt_analyser/tests/specs/safety/requireConcurrentIndexCreation/concurrent_valid.sql.snap new file mode 100644 index 000000000..7ee0a6f4e --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/requireConcurrentIndexCreation/concurrent_valid.sql.snap @@ -0,0 +1,11 @@ +--- +source: crates/pgt_analyser/tests/rules_tests.rs +expression: snapshot +snapshot_kind: text +--- +# Input +``` +-- Test concurrent index creation (should be safe) +-- expect_no_diagnostics +CREATE INDEX CONCURRENTLY users_email_idx ON users (email); +``` diff --git a/crates/pgt_analyser/tests/specs/safety/requireConcurrentIndexCreation/new_table_valid.sql b/crates/pgt_analyser/tests/specs/safety/requireConcurrentIndexCreation/new_table_valid.sql new file mode 100644 index 000000000..8efaffb4b --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/requireConcurrentIndexCreation/new_table_valid.sql @@ -0,0 +1,4 @@ +-- Test index on newly created table (should be safe) +-- expect_no_diagnostics +CREATE TABLE users (id serial, email text); +CREATE INDEX users_email_idx ON users (email); \ No newline at end of file diff --git a/crates/pgt_analyser/tests/specs/safety/requireConcurrentIndexCreation/new_table_valid.sql.snap b/crates/pgt_analyser/tests/specs/safety/requireConcurrentIndexCreation/new_table_valid.sql.snap new file mode 100644 index 000000000..0ccdfe351 --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/requireConcurrentIndexCreation/new_table_valid.sql.snap @@ -0,0 +1,12 @@ +--- +source: crates/pgt_analyser/tests/rules_tests.rs +expression: snapshot +snapshot_kind: text +--- +# Input +``` +-- Test index on newly created table (should be safe) +-- expect_no_diagnostics +CREATE TABLE users (id serial, email text); +CREATE INDEX users_email_idx ON users (email); +``` diff --git a/crates/pgt_analyser/tests/specs/safety/requireConcurrentIndexDeletion/basic.sql b/crates/pgt_analyser/tests/specs/safety/requireConcurrentIndexDeletion/basic.sql new file mode 100644 index 000000000..7332b1e3d --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/requireConcurrentIndexDeletion/basic.sql @@ -0,0 +1,2 @@ +-- expect_lint/safety/requireConcurrentIndexDeletion +DROP INDEX IF EXISTS users_email_idx; \ No newline at end of file diff --git a/crates/pgt_analyser/tests/specs/safety/requireConcurrentIndexDeletion/basic.sql.snap b/crates/pgt_analyser/tests/specs/safety/requireConcurrentIndexDeletion/basic.sql.snap new file mode 100644 index 000000000..4a6267f8f --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/requireConcurrentIndexDeletion/basic.sql.snap @@ -0,0 +1,17 @@ +--- +source: crates/pgt_analyser/tests/rules_tests.rs +expression: snapshot +snapshot_kind: text +--- +# Input +``` +-- expect_lint/safety/requireConcurrentIndexDeletion +DROP INDEX IF EXISTS users_email_idx; +``` + +# Diagnostics +lint/safety/requireConcurrentIndexDeletion ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + + × Dropping an index non-concurrently blocks reads and writes to the table. + + i Use DROP INDEX CONCURRENTLY to avoid blocking concurrent operations on the table. diff --git a/crates/pgt_analyser/tests/specs/safety/requireConcurrentIndexDeletion/concurrent_valid.sql b/crates/pgt_analyser/tests/specs/safety/requireConcurrentIndexDeletion/concurrent_valid.sql new file mode 100644 index 000000000..5ad97524a --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/requireConcurrentIndexDeletion/concurrent_valid.sql @@ -0,0 +1,3 @@ +-- Test concurrent index deletion (should be safe) +-- expect_no_diagnostics +DROP INDEX CONCURRENTLY IF EXISTS users_email_idx; \ No newline at end of file diff --git a/crates/pgt_analyser/tests/specs/safety/requireConcurrentIndexDeletion/concurrent_valid.sql.snap b/crates/pgt_analyser/tests/specs/safety/requireConcurrentIndexDeletion/concurrent_valid.sql.snap new file mode 100644 index 000000000..77beb832f --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/requireConcurrentIndexDeletion/concurrent_valid.sql.snap @@ -0,0 +1,11 @@ +--- +source: crates/pgt_analyser/tests/rules_tests.rs +expression: snapshot +snapshot_kind: text +--- +# Input +``` +-- Test concurrent index deletion (should be safe) +-- expect_no_diagnostics +DROP INDEX CONCURRENTLY IF EXISTS users_email_idx; +``` diff --git a/crates/pgt_analyser/tests/specs/safety/transactionNesting/basic.sql b/crates/pgt_analyser/tests/specs/safety/transactionNesting/basic.sql new file mode 100644 index 000000000..215917353 --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/transactionNesting/basic.sql @@ -0,0 +1,2 @@ +-- expect_lint/safety/transactionNesting +BEGIN; \ No newline at end of file diff --git a/crates/pgt_analyser/tests/specs/safety/transactionNesting/basic.sql.snap b/crates/pgt_analyser/tests/specs/safety/transactionNesting/basic.sql.snap new file mode 100644 index 000000000..4cc5e024e --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/transactionNesting/basic.sql.snap @@ -0,0 +1,19 @@ +--- +source: crates/pgt_analyser/tests/rules_tests.rs +expression: snapshot +snapshot_kind: text +--- +# Input +``` +-- expect_lint/safety/transactionNesting +BEGIN; +``` + +# Diagnostics +lint/safety/transactionNesting ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + + × Transaction already managed by migration tool. + + i Migration tools manage transactions automatically. Remove explicit transaction control. + + i Put migration statements in separate files to have them be in separate transactions. diff --git a/crates/pgt_analyser/tests/specs/safety/transactionNesting/begin_commit_combined.sql b/crates/pgt_analyser/tests/specs/safety/transactionNesting/begin_commit_combined.sql new file mode 100644 index 000000000..55b2148d2 --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/transactionNesting/begin_commit_combined.sql @@ -0,0 +1,5 @@ +-- expect_lint/safety/transactionNesting +BEGIN; +SELECT 1; +-- expect_lint/safety/transactionNesting +COMMIT; diff --git a/crates/pgt_analyser/tests/specs/safety/transactionNesting/begin_commit_combined.sql.snap b/crates/pgt_analyser/tests/specs/safety/transactionNesting/begin_commit_combined.sql.snap new file mode 100644 index 000000000..927cdcc4c --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/transactionNesting/begin_commit_combined.sql.snap @@ -0,0 +1,33 @@ +--- +source: crates/pgt_analyser/tests/rules_tests.rs +expression: snapshot +snapshot_kind: text +--- +# Input +``` +-- expect_lint/safety/transactionNesting +BEGIN; +SELECT 1; +-- expect_lint/safety/transactionNesting +COMMIT; + +``` + +# Diagnostics +lint/safety/transactionNesting ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + + × Transaction already managed by migration tool. + + i Migration tools manage transactions automatically. Remove explicit transaction control. + + i Put migration statements in separate files to have them be in separate transactions. + + + +lint/safety/transactionNesting ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + + × Attempting to end transaction managed by migration tool. + + i Migration tools manage transactions automatically. Remove explicit transaction control. + + i Put migration statements in separate files to have them be in separate transactions. diff --git a/crates/pgt_analyser/tests/specs/safety/transactionNesting/commit.sql b/crates/pgt_analyser/tests/specs/safety/transactionNesting/commit.sql new file mode 100644 index 000000000..def30f29a --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/transactionNesting/commit.sql @@ -0,0 +1,3 @@ +SELECT 1; +-- expect_lint/safety/transactionNesting +COMMIT; diff --git a/crates/pgt_analyser/tests/specs/safety/transactionNesting/commit.sql.snap b/crates/pgt_analyser/tests/specs/safety/transactionNesting/commit.sql.snap new file mode 100644 index 000000000..46003d978 --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/transactionNesting/commit.sql.snap @@ -0,0 +1,21 @@ +--- +source: crates/pgt_analyser/tests/rules_tests.rs +expression: snapshot +snapshot_kind: text +--- +# Input +``` +SELECT 1; +-- expect_lint/safety/transactionNesting +COMMIT; + +``` + +# Diagnostics +lint/safety/transactionNesting ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + + × Attempting to end transaction managed by migration tool. + + i Migration tools manage transactions automatically. Remove explicit transaction control. + + i Put migration statements in separate files to have them be in separate transactions. diff --git a/crates/pgt_analyser/tests/specs/safety/transactionNesting/rollback.sql b/crates/pgt_analyser/tests/specs/safety/transactionNesting/rollback.sql new file mode 100644 index 000000000..00ee62293 --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/transactionNesting/rollback.sql @@ -0,0 +1,3 @@ +SELECT 1; +-- expect_lint/safety/transactionNesting +ROLLBACK; diff --git a/crates/pgt_analyser/tests/specs/safety/transactionNesting/rollback.sql.snap b/crates/pgt_analyser/tests/specs/safety/transactionNesting/rollback.sql.snap new file mode 100644 index 000000000..66d4ed21e --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/transactionNesting/rollback.sql.snap @@ -0,0 +1,21 @@ +--- +source: crates/pgt_analyser/tests/rules_tests.rs +expression: snapshot +snapshot_kind: text +--- +# Input +``` +SELECT 1; +-- expect_lint/safety/transactionNesting +ROLLBACK; + +``` + +# Diagnostics +lint/safety/transactionNesting ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + + × Attempting to end transaction managed by migration tool. + + i Migration tools manage transactions automatically. Remove explicit transaction control. + + i Put migration statements in separate files to have them be in separate transactions. diff --git a/crates/pgt_configuration/src/analyser/linter/rules.rs b/crates/pgt_configuration/src/analyser/linter/rules.rs index d45199b07..8b23048fa 100644 --- a/crates/pgt_configuration/src/analyser/linter/rules.rs +++ b/crates/pgt_configuration/src/analyser/linter/rules.rs @@ -141,10 +141,32 @@ pub struct Safety { #[doc = r" It enables ALL rules for this group."] #[serde(skip_serializing_if = "Option::is_none")] pub all: Option, + #[doc = "Adding a column with a DEFAULT value may lead to a table rewrite while holding an ACCESS EXCLUSIVE lock."] + #[serde(skip_serializing_if = "Option::is_none")] + pub adding_field_with_default: + Option>, + #[doc = "Adding a foreign key constraint requires a table scan and a SHARE ROW EXCLUSIVE lock on both tables, which blocks writes."] + #[serde(skip_serializing_if = "Option::is_none")] + pub adding_foreign_key_constraint: + Option>, + #[doc = "Setting a column NOT NULL blocks reads while the table is scanned."] + #[serde(skip_serializing_if = "Option::is_none")] + pub adding_not_null_field: Option>, + #[doc = "Adding a primary key constraint results in locks and table rewrites."] + #[serde(skip_serializing_if = "Option::is_none")] + pub adding_primary_key_constraint: + Option>, #[doc = "Adding a new column that is NOT NULL and has no default value to an existing table effectively makes it required."] #[serde(skip_serializing_if = "Option::is_none")] pub adding_required_field: Option>, + #[doc = "Using CHAR(n) or CHARACTER(n) types is discouraged."] + #[serde(skip_serializing_if = "Option::is_none")] + pub ban_char_field: Option>, + #[doc = "Concurrent index creation is not allowed within a transaction."] + #[serde(skip_serializing_if = "Option::is_none")] + pub ban_concurrent_index_creation_in_transaction: + Option>, #[doc = "Dropping a column may break existing clients."] #[serde(skip_serializing_if = "Option::is_none")] pub ban_drop_column: Option>, @@ -160,21 +182,102 @@ pub struct Safety { #[doc = "Using TRUNCATE's CASCADE option will truncate any tables that are also foreign-keyed to the specified tables."] #[serde(skip_serializing_if = "Option::is_none")] pub ban_truncate_cascade: Option>, + #[doc = "Changing a column type may break existing clients."] + #[serde(skip_serializing_if = "Option::is_none")] + pub changing_column_type: Option>, + #[doc = "Adding constraints without NOT VALID blocks all reads and writes."] + #[serde(skip_serializing_if = "Option::is_none")] + pub constraint_missing_not_valid: + Option>, + #[doc = "Disallow adding a UNIQUE constraint without using an existing index."] + #[serde(skip_serializing_if = "Option::is_none")] + pub disallow_unique_constraint: + Option>, + #[doc = "Prefer BIGINT over smaller integer types."] + #[serde(skip_serializing_if = "Option::is_none")] + pub prefer_big_int: Option>, + #[doc = "Prefer BIGINT over INT/INTEGER types."] + #[serde(skip_serializing_if = "Option::is_none")] + pub prefer_bigint_over_int: + Option>, + #[doc = "Prefer BIGINT over SMALLINT types."] + #[serde(skip_serializing_if = "Option::is_none")] + pub prefer_bigint_over_smallint: + Option>, + #[doc = "Prefer using IDENTITY columns over serial columns."] + #[serde(skip_serializing_if = "Option::is_none")] + pub prefer_identity: Option>, + #[doc = "Prefer JSONB over JSON types."] + #[serde(skip_serializing_if = "Option::is_none")] + pub prefer_jsonb: Option>, + #[doc = "Prefer statements with guards for robustness in migrations."] + #[serde(skip_serializing_if = "Option::is_none")] + pub prefer_robust_stmts: Option>, + #[doc = "Prefer using TEXT over VARCHAR(n) types."] + #[serde(skip_serializing_if = "Option::is_none")] + pub prefer_text_field: Option>, + #[doc = "Prefer TIMESTAMPTZ over TIMESTAMP types."] + #[serde(skip_serializing_if = "Option::is_none")] + pub prefer_timestamptz: Option>, + #[doc = "Renaming columns may break existing queries and application code."] + #[serde(skip_serializing_if = "Option::is_none")] + pub renaming_column: Option>, + #[doc = "Renaming tables may break existing queries and application code."] + #[serde(skip_serializing_if = "Option::is_none")] + pub renaming_table: Option>, + #[doc = "Creating indexes non-concurrently can lock the table for writes."] + #[serde(skip_serializing_if = "Option::is_none")] + pub require_concurrent_index_creation: + Option>, + #[doc = "Dropping indexes non-concurrently can lock the table for reads."] + #[serde(skip_serializing_if = "Option::is_none")] + pub require_concurrent_index_deletion: + Option>, + #[doc = "Detects problematic transaction nesting that could lead to unexpected behavior."] + #[serde(skip_serializing_if = "Option::is_none")] + pub transaction_nesting: Option>, } impl Safety { const GROUP_NAME: &'static str = "safety"; pub(crate) const GROUP_RULES: &'static [&'static str] = &[ + "addingFieldWithDefault", + "addingForeignKeyConstraint", + "addingNotNullField", + "addingPrimaryKeyConstraint", "addingRequiredField", + "banCharField", + "banConcurrentIndexCreationInTransaction", "banDropColumn", "banDropDatabase", "banDropNotNull", "banDropTable", "banTruncateCascade", + "changingColumnType", + "constraintMissingNotValid", + "disallowUniqueConstraint", + "preferBigInt", + "preferBigintOverInt", + "preferBigintOverSmallint", + "preferIdentity", + "preferJsonb", + "preferRobustStmts", + "preferTextField", + "preferTimestamptz", + "renamingColumn", + "renamingTable", + "requireConcurrentIndexCreation", + "requireConcurrentIndexDeletion", + "transactionNesting", ]; const RECOMMENDED_RULES_AS_FILTERS: &'static [RuleFilter<'static>] = &[ + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[0]), RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[1]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[2]), RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[3]), - RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[4]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[6]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[7]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[9]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[10]), ]; const ALL_RULES_AS_FILTERS: &'static [RuleFilter<'static>] = &[ RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[0]), @@ -183,6 +286,28 @@ impl Safety { RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[3]), RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[4]), RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[5]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[6]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[7]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[8]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[9]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[10]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[11]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[12]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[13]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[14]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[15]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[16]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[17]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[18]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[19]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[20]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[21]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[22]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[23]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[24]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[25]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[26]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[27]), ]; #[doc = r" Retrieves the recommended rules"] pub(crate) fn is_recommended_true(&self) -> bool { @@ -199,70 +324,290 @@ impl Safety { } pub(crate) fn get_enabled_rules(&self) -> FxHashSet> { let mut index_set = FxHashSet::default(); - if let Some(rule) = self.adding_required_field.as_ref() { + if let Some(rule) = self.adding_field_with_default.as_ref() { if rule.is_enabled() { index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[0])); } } - if let Some(rule) = self.ban_drop_column.as_ref() { + if let Some(rule) = self.adding_foreign_key_constraint.as_ref() { if rule.is_enabled() { index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[1])); } } - if let Some(rule) = self.ban_drop_database.as_ref() { + if let Some(rule) = self.adding_not_null_field.as_ref() { if rule.is_enabled() { index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[2])); } } - if let Some(rule) = self.ban_drop_not_null.as_ref() { + if let Some(rule) = self.adding_primary_key_constraint.as_ref() { if rule.is_enabled() { index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[3])); } } - if let Some(rule) = self.ban_drop_table.as_ref() { + if let Some(rule) = self.adding_required_field.as_ref() { if rule.is_enabled() { index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[4])); } } - if let Some(rule) = self.ban_truncate_cascade.as_ref() { + if let Some(rule) = self.ban_char_field.as_ref() { if rule.is_enabled() { index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[5])); } } + if let Some(rule) = self.ban_concurrent_index_creation_in_transaction.as_ref() { + if rule.is_enabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[6])); + } + } + if let Some(rule) = self.ban_drop_column.as_ref() { + if rule.is_enabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[7])); + } + } + if let Some(rule) = self.ban_drop_database.as_ref() { + if rule.is_enabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[8])); + } + } + if let Some(rule) = self.ban_drop_not_null.as_ref() { + if rule.is_enabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[9])); + } + } + if let Some(rule) = self.ban_drop_table.as_ref() { + if rule.is_enabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[10])); + } + } + if let Some(rule) = self.ban_truncate_cascade.as_ref() { + if rule.is_enabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[11])); + } + } + if let Some(rule) = self.changing_column_type.as_ref() { + if rule.is_enabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[12])); + } + } + if let Some(rule) = self.constraint_missing_not_valid.as_ref() { + if rule.is_enabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[13])); + } + } + if let Some(rule) = self.disallow_unique_constraint.as_ref() { + if rule.is_enabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[14])); + } + } + if let Some(rule) = self.prefer_big_int.as_ref() { + if rule.is_enabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[15])); + } + } + if let Some(rule) = self.prefer_bigint_over_int.as_ref() { + if rule.is_enabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[16])); + } + } + if let Some(rule) = self.prefer_bigint_over_smallint.as_ref() { + if rule.is_enabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[17])); + } + } + if let Some(rule) = self.prefer_identity.as_ref() { + if rule.is_enabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[18])); + } + } + if let Some(rule) = self.prefer_jsonb.as_ref() { + if rule.is_enabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[19])); + } + } + if let Some(rule) = self.prefer_robust_stmts.as_ref() { + if rule.is_enabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[20])); + } + } + if let Some(rule) = self.prefer_text_field.as_ref() { + if rule.is_enabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[21])); + } + } + if let Some(rule) = self.prefer_timestamptz.as_ref() { + if rule.is_enabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[22])); + } + } + if let Some(rule) = self.renaming_column.as_ref() { + if rule.is_enabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[23])); + } + } + if let Some(rule) = self.renaming_table.as_ref() { + if rule.is_enabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[24])); + } + } + if let Some(rule) = self.require_concurrent_index_creation.as_ref() { + if rule.is_enabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[25])); + } + } + if let Some(rule) = self.require_concurrent_index_deletion.as_ref() { + if rule.is_enabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[26])); + } + } + if let Some(rule) = self.transaction_nesting.as_ref() { + if rule.is_enabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[27])); + } + } index_set } pub(crate) fn get_disabled_rules(&self) -> FxHashSet> { let mut index_set = FxHashSet::default(); - if let Some(rule) = self.adding_required_field.as_ref() { + if let Some(rule) = self.adding_field_with_default.as_ref() { if rule.is_disabled() { index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[0])); } } - if let Some(rule) = self.ban_drop_column.as_ref() { + if let Some(rule) = self.adding_foreign_key_constraint.as_ref() { if rule.is_disabled() { index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[1])); } } - if let Some(rule) = self.ban_drop_database.as_ref() { + if let Some(rule) = self.adding_not_null_field.as_ref() { if rule.is_disabled() { index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[2])); } } - if let Some(rule) = self.ban_drop_not_null.as_ref() { + if let Some(rule) = self.adding_primary_key_constraint.as_ref() { if rule.is_disabled() { index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[3])); } } - if let Some(rule) = self.ban_drop_table.as_ref() { + if let Some(rule) = self.adding_required_field.as_ref() { if rule.is_disabled() { index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[4])); } } - if let Some(rule) = self.ban_truncate_cascade.as_ref() { + if let Some(rule) = self.ban_char_field.as_ref() { if rule.is_disabled() { index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[5])); } } + if let Some(rule) = self.ban_concurrent_index_creation_in_transaction.as_ref() { + if rule.is_disabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[6])); + } + } + if let Some(rule) = self.ban_drop_column.as_ref() { + if rule.is_disabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[7])); + } + } + if let Some(rule) = self.ban_drop_database.as_ref() { + if rule.is_disabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[8])); + } + } + if let Some(rule) = self.ban_drop_not_null.as_ref() { + if rule.is_disabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[9])); + } + } + if let Some(rule) = self.ban_drop_table.as_ref() { + if rule.is_disabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[10])); + } + } + if let Some(rule) = self.ban_truncate_cascade.as_ref() { + if rule.is_disabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[11])); + } + } + if let Some(rule) = self.changing_column_type.as_ref() { + if rule.is_disabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[12])); + } + } + if let Some(rule) = self.constraint_missing_not_valid.as_ref() { + if rule.is_disabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[13])); + } + } + if let Some(rule) = self.disallow_unique_constraint.as_ref() { + if rule.is_disabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[14])); + } + } + if let Some(rule) = self.prefer_big_int.as_ref() { + if rule.is_disabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[15])); + } + } + if let Some(rule) = self.prefer_bigint_over_int.as_ref() { + if rule.is_disabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[16])); + } + } + if let Some(rule) = self.prefer_bigint_over_smallint.as_ref() { + if rule.is_disabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[17])); + } + } + if let Some(rule) = self.prefer_identity.as_ref() { + if rule.is_disabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[18])); + } + } + if let Some(rule) = self.prefer_jsonb.as_ref() { + if rule.is_disabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[19])); + } + } + if let Some(rule) = self.prefer_robust_stmts.as_ref() { + if rule.is_disabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[20])); + } + } + if let Some(rule) = self.prefer_text_field.as_ref() { + if rule.is_disabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[21])); + } + } + if let Some(rule) = self.prefer_timestamptz.as_ref() { + if rule.is_disabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[22])); + } + } + if let Some(rule) = self.renaming_column.as_ref() { + if rule.is_disabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[23])); + } + } + if let Some(rule) = self.renaming_table.as_ref() { + if rule.is_disabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[24])); + } + } + if let Some(rule) = self.require_concurrent_index_creation.as_ref() { + if rule.is_disabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[25])); + } + } + if let Some(rule) = self.require_concurrent_index_deletion.as_ref() { + if rule.is_disabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[26])); + } + } + if let Some(rule) = self.transaction_nesting.as_ref() { + if rule.is_disabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[27])); + } + } index_set } #[doc = r" Checks if, given a rule name, matches one of the rules contained in this category"] @@ -292,12 +637,34 @@ impl Safety { } pub(crate) fn severity(rule_name: &str) -> Severity { match rule_name { + "addingFieldWithDefault" => Severity::Warning, + "addingForeignKeyConstraint" => Severity::Warning, + "addingNotNullField" => Severity::Warning, + "addingPrimaryKeyConstraint" => Severity::Warning, "addingRequiredField" => Severity::Error, + "banCharField" => Severity::Warning, + "banConcurrentIndexCreationInTransaction" => Severity::Error, "banDropColumn" => Severity::Warning, "banDropDatabase" => Severity::Warning, "banDropNotNull" => Severity::Warning, "banDropTable" => Severity::Warning, "banTruncateCascade" => Severity::Error, + "changingColumnType" => Severity::Warning, + "constraintMissingNotValid" => Severity::Warning, + "disallowUniqueConstraint" => Severity::Error, + "preferBigInt" => Severity::Warning, + "preferBigintOverInt" => Severity::Warning, + "preferBigintOverSmallint" => Severity::Warning, + "preferIdentity" => Severity::Warning, + "preferJsonb" => Severity::Warning, + "preferRobustStmts" => Severity::Warning, + "preferTextField" => Severity::Warning, + "preferTimestamptz" => Severity::Warning, + "renamingColumn" => Severity::Warning, + "renamingTable" => Severity::Warning, + "requireConcurrentIndexCreation" => Severity::Warning, + "requireConcurrentIndexDeletion" => Severity::Warning, + "transactionNesting" => Severity::Warning, _ => unreachable!(), } } @@ -306,10 +673,34 @@ impl Safety { rule_name: &str, ) -> Option<(RulePlainConfiguration, Option)> { match rule_name { + "addingFieldWithDefault" => self + .adding_field_with_default + .as_ref() + .map(|conf| (conf.level(), conf.get_options())), + "addingForeignKeyConstraint" => self + .adding_foreign_key_constraint + .as_ref() + .map(|conf| (conf.level(), conf.get_options())), + "addingNotNullField" => self + .adding_not_null_field + .as_ref() + .map(|conf| (conf.level(), conf.get_options())), + "addingPrimaryKeyConstraint" => self + .adding_primary_key_constraint + .as_ref() + .map(|conf| (conf.level(), conf.get_options())), "addingRequiredField" => self .adding_required_field .as_ref() .map(|conf| (conf.level(), conf.get_options())), + "banCharField" => self + .ban_char_field + .as_ref() + .map(|conf| (conf.level(), conf.get_options())), + "banConcurrentIndexCreationInTransaction" => self + .ban_concurrent_index_creation_in_transaction + .as_ref() + .map(|conf| (conf.level(), conf.get_options())), "banDropColumn" => self .ban_drop_column .as_ref() @@ -330,6 +721,70 @@ impl Safety { .ban_truncate_cascade .as_ref() .map(|conf| (conf.level(), conf.get_options())), + "changingColumnType" => self + .changing_column_type + .as_ref() + .map(|conf| (conf.level(), conf.get_options())), + "constraintMissingNotValid" => self + .constraint_missing_not_valid + .as_ref() + .map(|conf| (conf.level(), conf.get_options())), + "disallowUniqueConstraint" => self + .disallow_unique_constraint + .as_ref() + .map(|conf| (conf.level(), conf.get_options())), + "preferBigInt" => self + .prefer_big_int + .as_ref() + .map(|conf| (conf.level(), conf.get_options())), + "preferBigintOverInt" => self + .prefer_bigint_over_int + .as_ref() + .map(|conf| (conf.level(), conf.get_options())), + "preferBigintOverSmallint" => self + .prefer_bigint_over_smallint + .as_ref() + .map(|conf| (conf.level(), conf.get_options())), + "preferIdentity" => self + .prefer_identity + .as_ref() + .map(|conf| (conf.level(), conf.get_options())), + "preferJsonb" => self + .prefer_jsonb + .as_ref() + .map(|conf| (conf.level(), conf.get_options())), + "preferRobustStmts" => self + .prefer_robust_stmts + .as_ref() + .map(|conf| (conf.level(), conf.get_options())), + "preferTextField" => self + .prefer_text_field + .as_ref() + .map(|conf| (conf.level(), conf.get_options())), + "preferTimestamptz" => self + .prefer_timestamptz + .as_ref() + .map(|conf| (conf.level(), conf.get_options())), + "renamingColumn" => self + .renaming_column + .as_ref() + .map(|conf| (conf.level(), conf.get_options())), + "renamingTable" => self + .renaming_table + .as_ref() + .map(|conf| (conf.level(), conf.get_options())), + "requireConcurrentIndexCreation" => self + .require_concurrent_index_creation + .as_ref() + .map(|conf| (conf.level(), conf.get_options())), + "requireConcurrentIndexDeletion" => self + .require_concurrent_index_deletion + .as_ref() + .map(|conf| (conf.level(), conf.get_options())), + "transactionNesting" => self + .transaction_nesting + .as_ref() + .map(|conf| (conf.level(), conf.get_options())), _ => None, } } diff --git a/crates/pgt_diagnostics_categories/src/categories.rs b/crates/pgt_diagnostics_categories/src/categories.rs index 14df90b9e..416416ef9 100644 --- a/crates/pgt_diagnostics_categories/src/categories.rs +++ b/crates/pgt_diagnostics_categories/src/categories.rs @@ -13,12 +13,34 @@ // must be between `define_categories! {\n` and `\n ;\n`. define_categories! { + "lint/safety/addingFieldWithDefault": "https://pgtools.dev/latest/rules/adding-field-with-default", + "lint/safety/addingForeignKeyConstraint": "https://pgtools.dev/latest/rules/adding-foreign-key-constraint", + "lint/safety/addingNotNullField": "https://pgtools.dev/latest/rules/adding-not-null-field", + "lint/safety/addingPrimaryKeyConstraint": "https://pgtools.dev/latest/rules/adding-primary-key-constraint", "lint/safety/addingRequiredField": "https://pgtools.dev/latest/rules/adding-required-field", + "lint/safety/banCharField": "https://pgtools.dev/latest/rules/ban-char-field", + "lint/safety/banConcurrentIndexCreationInTransaction": "https://pgtools.dev/latest/rules/ban-concurrent-index-creation-in-transaction", "lint/safety/banDropColumn": "https://pgtools.dev/latest/rules/ban-drop-column", "lint/safety/banDropDatabase": "https://pgtools.dev/latest/rules/ban-drop-database", "lint/safety/banDropNotNull": "https://pgtools.dev/latest/rules/ban-drop-not-null", "lint/safety/banDropTable": "https://pgtools.dev/latest/rules/ban-drop-table", "lint/safety/banTruncateCascade": "https://pgtools.dev/latest/rules/ban-truncate-cascade", + "lint/safety/changingColumnType": "https://pgtools.dev/latest/rules/changing-column-type", + "lint/safety/constraintMissingNotValid": "https://pgtools.dev/latest/rules/constraint-missing-not-valid", + "lint/safety/disallowUniqueConstraint": "https://pgtools.dev/latest/rules/disallow-unique-constraint", + "lint/safety/preferBigInt": "https://pgtools.dev/latest/rules/prefer-big-int", + "lint/safety/preferBigintOverInt": "https://pgtools.dev/latest/rules/prefer-bigint-over-int", + "lint/safety/preferBigintOverSmallint": "https://pgtools.dev/latest/rules/prefer-bigint-over-smallint", + "lint/safety/preferIdentity": "https://pgtools.dev/latest/rules/prefer-identity", + "lint/safety/preferJsonb": "https://pgtools.dev/latest/rules/prefer-jsonb", + "lint/safety/preferRobustStmts": "https://pgtools.dev/latest/rules/prefer-robust-stmts", + "lint/safety/preferTextField": "https://pgtools.dev/latest/rules/prefer-text-field", + "lint/safety/preferTimestamptz": "https://pgtools.dev/latest/rules/prefer-timestamptz", + "lint/safety/renamingColumn": "https://pgtools.dev/latest/rules/renaming-column", + "lint/safety/renamingTable": "https://pgtools.dev/latest/rules/renaming-table", + "lint/safety/requireConcurrentIndexCreation": "https://pgtools.dev/latest/rules/require-concurrent-index-creation", + "lint/safety/requireConcurrentIndexDeletion": "https://pgtools.dev/latest/rules/require-concurrent-index-deletion", + "lint/safety/transactionNesting": "https://pgtools.dev/latest/rules/transaction-nesting", // end lint rules ; // General categories diff --git a/crates/pgt_lsp/tests/server.rs b/crates/pgt_lsp/tests/server.rs index 63953590b..1f972395b 100644 --- a/crates/pgt_lsp/tests/server.rs +++ b/crates/pgt_lsp/tests/server.rs @@ -1660,7 +1660,20 @@ ALTER TABLE ONLY "public"."campaign_contact_list" loop { match receiver.next().await { Some(ServerNotification::PublishDiagnostics(msg)) => { - if !msg.diagnostics.is_empty() { + if msg + .diagnostics + .iter() + .filter(|d| { + d.code.as_ref().is_none_or(|c| match c { + lsp::NumberOrString::Number(_) => true, + lsp::NumberOrString::String(s) => { + s != "lint/safety/addingForeignKeyConstraint" + } + }) + }) + .count() + > 0 + { return true; } } diff --git a/crates/pgt_schema_cache/src/queries/versions.sql b/crates/pgt_schema_cache/src/queries/versions.sql index c756e9c57..898f223be 100644 --- a/crates/pgt_schema_cache/src/queries/versions.sql +++ b/crates/pgt_schema_cache/src/queries/versions.sql @@ -1,10 +1,11 @@ select version(), current_setting('server_version_num') :: int8 AS version_num, + current_setting('server_version_num') :: int8 / 10000 AS major_version, ( select count(*) :: int8 AS active_connections FROM pg_stat_activity ) AS active_connections, - current_setting('max_connections') :: int8 AS max_connections; \ No newline at end of file + current_setting('max_connections') :: int8 AS max_connections; diff --git a/crates/pgt_schema_cache/src/schema_cache.rs b/crates/pgt_schema_cache/src/schema_cache.rs index 6fbdfcdc3..227b49883 100644 --- a/crates/pgt_schema_cache/src/schema_cache.rs +++ b/crates/pgt_schema_cache/src/schema_cache.rs @@ -15,7 +15,7 @@ pub struct SchemaCache { pub tables: Vec, pub functions: Vec, pub types: Vec, - pub versions: Vec, + pub version: Version, pub columns: Vec, pub policies: Vec, pub extensions: Vec, @@ -49,12 +49,17 @@ impl SchemaCache { Extension::load(pool), )?; + let version = versions + .into_iter() + .next() + .expect("Expected at least one version row"); + Ok(SchemaCache { schemas, tables, functions, types, - versions, + version, columns, policies, triggers, diff --git a/crates/pgt_schema_cache/src/versions.rs b/crates/pgt_schema_cache/src/versions.rs index a4769c55a..d4c212995 100644 --- a/crates/pgt_schema_cache/src/versions.rs +++ b/crates/pgt_schema_cache/src/versions.rs @@ -6,6 +6,7 @@ use crate::schema_cache::SchemaCacheItem; pub struct Version { pub version: Option, pub version_num: Option, + pub major_version: Option, pub active_connections: Option, pub max_connections: Option, } diff --git a/docs/reference/rule_sources.md b/docs/reference/rule_sources.md index 087555aa6..d8aea6fc6 100644 --- a/docs/reference/rule_sources.md +++ b/docs/reference/rule_sources.md @@ -3,12 +3,37 @@ Many rules are inspired by or directly ported from other tools. This page lists ## Exclusive rules _No exclusive rules available._ ## Rules from other sources +### Eugene +| Eugene Rule Name | Rule Name | +| ---- | ---- | +| [E3](https://kaveland.no/eugene/hints/E3/index.html) |[preferJsonb](../rules/prefer-jsonb) | ### Squawk | Squawk Rule Name | Rule Name | | ---- | ---- | +| [adding-field-with-default](https://squawkhq.com/docs/adding-field-with-default) |[addingFieldWithDefault](../rules/adding-field-with-default) | +| [adding-foreign-key-constraint](https://squawkhq.com/docs/adding-foreign-key-constraint) |[addingForeignKeyConstraint](../rules/adding-foreign-key-constraint) | +| [adding-not-null-field](https://squawkhq.com/docs/adding-not-null-field) |[addingNotNullField](../rules/adding-not-null-field) | | [adding-required-field](https://squawkhq.com/docs/adding-required-field) |[addingRequiredField](../rules/adding-required-field) | +| [adding-serial-primary-key-field](https://squawkhq.com/docs/adding-serial-primary-key-field) |[addingPrimaryKeyConstraint](../rules/adding-primary-key-constraint) | +| [ban-char-field](https://squawkhq.com/docs/ban-char-field) |[banCharField](../rules/ban-char-field) | +| [ban-concurrent-index-creation-in-transaction](https://squawkhq.com/docs/ban-concurrent-index-creation-in-transaction) |[banConcurrentIndexCreationInTransaction](../rules/ban-concurrent-index-creation-in-transaction) | | [ban-drop-column](https://squawkhq.com/docs/ban-drop-column) |[banDropColumn](../rules/ban-drop-column) | | [ban-drop-database](https://squawkhq.com/docs/ban-drop-database) |[banDropDatabase](../rules/ban-drop-database) | | [ban-drop-not-null](https://squawkhq.com/docs/ban-drop-not-null) |[banDropNotNull](../rules/ban-drop-not-null) | | [ban-drop-table](https://squawkhq.com/docs/ban-drop-table) |[banDropTable](../rules/ban-drop-table) | | [ban-truncate-cascade](https://squawkhq.com/docs/ban-truncate-cascade) |[banTruncateCascade](../rules/ban-truncate-cascade) | +| [changing-column-type](https://squawkhq.com/docs/changing-column-type) |[changingColumnType](../rules/changing-column-type) | +| [constraint-missing-not-valid](https://squawkhq.com/docs/constraint-missing-not-valid) |[constraintMissingNotValid](../rules/constraint-missing-not-valid) | +| [disallow-unique-constraint](https://squawkhq.com/docs/disallow-unique-constraint) |[disallowUniqueConstraint](../rules/disallow-unique-constraint) | +| [prefer-big-int](https://squawkhq.com/docs/prefer-big-int) |[preferBigInt](../rules/prefer-big-int) | +| [prefer-bigint-over-int](https://squawkhq.com/docs/prefer-bigint-over-int) |[preferBigintOverInt](../rules/prefer-bigint-over-int) | +| [prefer-bigint-over-smallint](https://squawkhq.com/docs/prefer-bigint-over-smallint) |[preferBigintOverSmallint](../rules/prefer-bigint-over-smallint) | +| [prefer-identity](https://squawkhq.com/docs/prefer-identity) |[preferIdentity](../rules/prefer-identity) | +| [prefer-robust-stmts](https://squawkhq.com/docs/prefer-robust-stmts) |[preferRobustStmts](../rules/prefer-robust-stmts) | +| [prefer-text-field](https://squawkhq.com/docs/prefer-text-field) |[preferTextField](../rules/prefer-text-field) | +| [prefer-timestamptz](https://squawkhq.com/docs/prefer-timestamptz) |[preferTimestamptz](../rules/prefer-timestamptz) | +| [renaming-column](https://squawkhq.com/docs/renaming-column) |[renamingColumn](../rules/renaming-column) | +| [renaming-table](https://squawkhq.com/docs/renaming-table) |[renamingTable](../rules/renaming-table) | +| [require-concurrent-index-creation](https://squawkhq.com/docs/require-concurrent-index-creation) |[requireConcurrentIndexCreation](../rules/require-concurrent-index-creation) | +| [require-concurrent-index-deletion](https://squawkhq.com/docs/require-concurrent-index-deletion) |[requireConcurrentIndexDeletion](../rules/require-concurrent-index-deletion) | +| [transaction-nesting](https://squawkhq.com/docs/transaction-nesting) |[transactionNesting](../rules/transaction-nesting) | diff --git a/docs/reference/rules.md b/docs/reference/rules.md index 2ff8361b7..6a946ff62 100644 --- a/docs/reference/rules.md +++ b/docs/reference/rules.md @@ -12,12 +12,34 @@ Rules that detect potential safety issues in your code. | Rule name | Description | Properties | | --- | --- | --- | +| [addingFieldWithDefault](./adding-field-with-default) | Adding a column with a DEFAULT value may lead to a table rewrite while holding an ACCESS EXCLUSIVE lock. | ✅ | +| [addingForeignKeyConstraint](./adding-foreign-key-constraint) | Adding a foreign key constraint requires a table scan and a SHARE ROW EXCLUSIVE lock on both tables, which blocks writes. | ✅ | +| [addingNotNullField](./adding-not-null-field) | Setting a column NOT NULL blocks reads while the table is scanned. | ✅ | +| [addingPrimaryKeyConstraint](./adding-primary-key-constraint) | Adding a primary key constraint results in locks and table rewrites. | ✅ | | [addingRequiredField](./adding-required-field) | Adding a new column that is NOT NULL and has no default value to an existing table effectively makes it required. | | +| [banCharField](./ban-char-field) | Using CHAR(n) or CHARACTER(n) types is discouraged. | | +| [banConcurrentIndexCreationInTransaction](./ban-concurrent-index-creation-in-transaction) | Concurrent index creation is not allowed within a transaction. | ✅ | | [banDropColumn](./ban-drop-column) | Dropping a column may break existing clients. | ✅ | | [banDropDatabase](./ban-drop-database) | Dropping a database may break existing clients (and everything else, really). | | | [banDropNotNull](./ban-drop-not-null) | Dropping a NOT NULL constraint may break existing clients. | ✅ | | [banDropTable](./ban-drop-table) | Dropping a table may break existing clients. | ✅ | | [banTruncateCascade](./ban-truncate-cascade) | Using `TRUNCATE`'s `CASCADE` option will truncate any tables that are also foreign-keyed to the specified tables. | | +| [changingColumnType](./changing-column-type) | Changing a column type may break existing clients. | | +| [constraintMissingNotValid](./constraint-missing-not-valid) | Adding constraints without NOT VALID blocks all reads and writes. | | +| [disallowUniqueConstraint](./disallow-unique-constraint) | Disallow adding a UNIQUE constraint without using an existing index. | | +| [preferBigInt](./prefer-big-int) | Prefer BIGINT over smaller integer types. | | +| [preferBigintOverInt](./prefer-bigint-over-int) | Prefer BIGINT over INT/INTEGER types. | | +| [preferBigintOverSmallint](./prefer-bigint-over-smallint) | Prefer BIGINT over SMALLINT types. | | +| [preferIdentity](./prefer-identity) | Prefer using IDENTITY columns over serial columns. | | +| [preferJsonb](./prefer-jsonb) | Prefer JSONB over JSON types. | | +| [preferRobustStmts](./prefer-robust-stmts) | Prefer statements with guards for robustness in migrations. | | +| [preferTextField](./prefer-text-field) | Prefer using TEXT over VARCHAR(n) types. | | +| [preferTimestamptz](./prefer-timestamptz) | Prefer TIMESTAMPTZ over TIMESTAMP types. | | +| [renamingColumn](./renaming-column) | Renaming columns may break existing queries and application code. | | +| [renamingTable](./renaming-table) | Renaming tables may break existing queries and application code. | | +| [requireConcurrentIndexCreation](./require-concurrent-index-creation) | Creating indexes non-concurrently can lock the table for writes. | | +| [requireConcurrentIndexDeletion](./require-concurrent-index-deletion) | Dropping indexes non-concurrently can lock the table for reads. | | +| [transactionNesting](./transaction-nesting) | Detects problematic transaction nesting that could lead to unexpected behavior. | | [//]: # (END RULES_INDEX) diff --git a/docs/reference/rules/adding-field-with-default.md b/docs/reference/rules/adding-field-with-default.md new file mode 100644 index 000000000..644ac91fc --- /dev/null +++ b/docs/reference/rules/adding-field-with-default.md @@ -0,0 +1,69 @@ +# addingFieldWithDefault +**Diagnostic Category: `lint/safety/addingFieldWithDefault`** + +**Since**: `vnext` + +> [!NOTE] +> This rule is recommended. A diagnostic error will appear when linting your code. + +**Sources**: +- Inspired from: squawk/adding-field-with-default + +## Description +Adding a column with a DEFAULT value may lead to a table rewrite while holding an ACCESS EXCLUSIVE lock. + +In PostgreSQL versions before 11, adding a column with a DEFAULT value causes a full table rewrite, +which holds an ACCESS EXCLUSIVE lock on the table and blocks all reads and writes. + +In PostgreSQL 11+, this behavior was optimized for non-volatile defaults. However: + +- Volatile default values (like random() or custom functions) still cause table rewrites +- Generated columns (GENERATED ALWAYS AS) always require table rewrites +- Non-volatile defaults are safe in PostgreSQL 11+ + +## Examples + +### Invalid + +```sql +ALTER TABLE "core_recipe" ADD COLUMN "foo" integer DEFAULT 10; +``` + +```sh +code-block.sql:1:1 lint/safety/addingFieldWithDefault ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + + ! Adding a column with a DEFAULT value causes a table rewrite. + + > 1 │ ALTER TABLE "core_recipe" ADD COLUMN "foo" integer DEFAULT 10; + │ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + 2 │ + + i This operation requires an ACCESS EXCLUSIVE lock and rewrites the entire table. + + i Add the column without a default, then set the default in a separate statement. + + +``` + +### Valid + +```sql +ALTER TABLE "core_recipe" ADD COLUMN "foo" integer; +ALTER TABLE "core_recipe" ALTER COLUMN "foo" SET DEFAULT 10; +-- Then backfill and add NOT NULL constraint if needed +``` + +## How to configure +```json + +{ + "linter": { + "rules": { + "safety": { + "addingFieldWithDefault": "error" + } + } + } +} + +``` diff --git a/docs/reference/rules/adding-foreign-key-constraint.md b/docs/reference/rules/adding-foreign-key-constraint.md new file mode 100644 index 000000000..19971ae3e --- /dev/null +++ b/docs/reference/rules/adding-foreign-key-constraint.md @@ -0,0 +1,88 @@ +# addingForeignKeyConstraint +**Diagnostic Category: `lint/safety/addingForeignKeyConstraint`** + +**Since**: `vnext` + +> [!NOTE] +> This rule is recommended. A diagnostic error will appear when linting your code. + +**Sources**: +- Inspired from: squawk/adding-foreign-key-constraint + +## Description +Adding a foreign key constraint requires a table scan and a SHARE ROW EXCLUSIVE lock on both tables, which blocks writes. + +Adding a foreign key constraint to an existing table can cause downtime by locking both tables while +verifying the constraint. PostgreSQL needs to check that all existing values in the referencing +column exist in the referenced table. + +Instead, add the constraint as NOT VALID in one transaction, then VALIDATE it in another transaction. +This approach only takes a SHARE UPDATE EXCLUSIVE lock when validating, allowing concurrent writes. + +## Examples + +### Invalid + +```sql +ALTER TABLE "email" ADD CONSTRAINT "fk_user" FOREIGN KEY ("user_id") REFERENCES "user" ("id"); +``` + +```sh +code-block.sql:1:1 lint/safety/addingForeignKeyConstraint ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + + ! Adding a foreign key constraint requires a table scan and locks on both tables. + + > 1 │ ALTER TABLE "email" ADD CONSTRAINT "fk_user" FOREIGN KEY ("user_id") REFERENCES "user" ("id"); + │ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + 2 │ + + i This will block writes to both the referencing and referenced tables while PostgreSQL verifies the constraint. + + i Add the constraint as NOT VALID first, then VALIDATE it in a separate transaction. + + +``` + +```sql +ALTER TABLE "emails" ADD COLUMN "user_id" INT REFERENCES "user" ("id"); +``` + +```sh +code-block.sql:1:1 lint/safety/addingForeignKeyConstraint ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + + ! Adding a column with a foreign key constraint requires a table scan and locks. + + > 1 │ ALTER TABLE "emails" ADD COLUMN "user_id" INT REFERENCES "user" ("id"); + │ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + 2 │ + + i Using REFERENCES when adding a column will block writes while verifying the constraint. + + i Add the column without the constraint first, then add the constraint as NOT VALID and VALIDATE it separately. + + +``` + +### Valid + +```sql +-- First add the constraint as NOT VALID +ALTER TABLE "email" ADD CONSTRAINT "fk_user" FOREIGN KEY ("user_id") REFERENCES "user" ("id") NOT VALID; +-- Then validate it in a separate transaction +ALTER TABLE "email" VALIDATE CONSTRAINT "fk_user"; +``` + +## How to configure +```json + +{ + "linter": { + "rules": { + "safety": { + "addingForeignKeyConstraint": "error" + } + } + } +} + +``` diff --git a/docs/reference/rules/adding-not-null-field.md b/docs/reference/rules/adding-not-null-field.md new file mode 100644 index 000000000..f4da097c9 --- /dev/null +++ b/docs/reference/rules/adding-not-null-field.md @@ -0,0 +1,71 @@ +# addingNotNullField +**Diagnostic Category: `lint/safety/addingNotNullField`** + +**Since**: `vnext` + +> [!NOTE] +> This rule is recommended. A diagnostic error will appear when linting your code. + +**Sources**: +- Inspired from: squawk/adding-not-null-field + +## Description +Setting a column NOT NULL blocks reads while the table is scanned. + +In PostgreSQL versions before 11, adding a NOT NULL constraint to an existing column requires +a full table scan to verify that all existing rows satisfy the constraint. This operation +takes an ACCESS EXCLUSIVE lock, blocking all reads and writes. + +In PostgreSQL 11+, this operation is much faster as it can skip the full table scan for +newly added columns with default values. + +Instead of using SET NOT NULL, consider using a CHECK constraint with NOT VALID, then +validating it in a separate transaction. This allows reads and writes to continue. + +## Examples + +### Invalid + +```sql +ALTER TABLE "core_recipe" ALTER COLUMN "foo" SET NOT NULL; +``` + +```sh +code-block.sql:1:1 lint/safety/addingNotNullField ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + + ! Setting a column NOT NULL blocks reads while the table is scanned. + + > 1 │ ALTER TABLE "core_recipe" ALTER COLUMN "foo" SET NOT NULL; + │ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + 2 │ + + i This operation requires an ACCESS EXCLUSIVE lock and a full table scan to verify all rows. + + i Use a CHECK constraint with NOT VALID instead, then validate it in a separate transaction. + + +``` + +### Valid + +```sql +-- First add a CHECK constraint as NOT VALID +ALTER TABLE "core_recipe" ADD CONSTRAINT foo_not_null CHECK (foo IS NOT NULL) NOT VALID; +-- Then validate it in a separate transaction +ALTER TABLE "core_recipe" VALIDATE CONSTRAINT foo_not_null; +``` + +## How to configure +```json + +{ + "linter": { + "rules": { + "safety": { + "addingNotNullField": "error" + } + } + } +} + +``` diff --git a/docs/reference/rules/adding-primary-key-constraint.md b/docs/reference/rules/adding-primary-key-constraint.md new file mode 100644 index 000000000..644788807 --- /dev/null +++ b/docs/reference/rules/adding-primary-key-constraint.md @@ -0,0 +1,85 @@ +# addingPrimaryKeyConstraint +**Diagnostic Category: `lint/safety/addingPrimaryKeyConstraint`** + +**Since**: `vnext` + +> [!NOTE] +> This rule is recommended. A diagnostic error will appear when linting your code. + +**Sources**: +- Inspired from: squawk/adding-serial-primary-key-field + +## Description +Adding a primary key constraint results in locks and table rewrites. + +When you add a PRIMARY KEY constraint, PostgreSQL needs to scan the entire table +to verify uniqueness and build the underlying index. This requires an ACCESS EXCLUSIVE +lock which blocks all reads and writes. + +## Examples + +### Invalid + +```sql +ALTER TABLE users ADD PRIMARY KEY (id); +``` + +```sh +code-block.sql:1:1 lint/safety/addingPrimaryKeyConstraint ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + + ! Adding a PRIMARY KEY constraint results in locks and table rewrites. + + > 1 │ ALTER TABLE users ADD PRIMARY KEY (id); + │ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + 2 │ + + i Adding a PRIMARY KEY constraint requires an ACCESS EXCLUSIVE lock which blocks reads. + + i Add the PRIMARY KEY constraint USING an index. + + +``` + +```sql +ALTER TABLE items ADD COLUMN id SERIAL PRIMARY KEY; +``` + +```sh +code-block.sql:1:1 lint/safety/addingPrimaryKeyConstraint ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + + ! Adding a PRIMARY KEY constraint results in locks and table rewrites. + + > 1 │ ALTER TABLE items ADD COLUMN id SERIAL PRIMARY KEY; + │ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + 2 │ + + i Adding a PRIMARY KEY constraint requires an ACCESS EXCLUSIVE lock which blocks reads. + + i Add the PRIMARY KEY constraint USING an index. + + +``` + +### Valid + +```sql +-- First, create a unique index concurrently +CREATE UNIQUE INDEX CONCURRENTLY items_pk ON items (id); +-- Then add the primary key using the index +ALTER TABLE items ADD CONSTRAINT items_pk PRIMARY KEY USING INDEX items_pk; +``` + +## How to configure +```json + +{ + "linter": { + "rules": { + "safety": { + "addingPrimaryKeyConstraint": "error" + } + } + } +} + +``` diff --git a/docs/reference/rules/ban-char-field.md b/docs/reference/rules/ban-char-field.md new file mode 100644 index 000000000..c9be7d962 --- /dev/null +++ b/docs/reference/rules/ban-char-field.md @@ -0,0 +1,72 @@ +# banCharField +**Diagnostic Category: `lint/safety/banCharField`** + +**Since**: `vnext` + + +**Sources**: +- Inspired from: squawk/ban-char-field + +## Description +Using CHAR(n) or CHARACTER(n) types is discouraged. + +CHAR types are fixed-length and padded with spaces, which can lead to unexpected behavior +when comparing strings or concatenating values. They also waste storage space when values +are shorter than the declared length. + +Use VARCHAR or TEXT instead for variable-length character data. + +## Examples + +### Invalid + +```sql +CREATE TABLE "core_bar" ( + "id" serial NOT NULL PRIMARY KEY, + "alpha" char(100) NOT NULL +); +``` + +```sh +code-block.sql:1:1 lint/safety/banCharField ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + + ! CHAR type is discouraged due to space padding behavior. + + > 1 │ CREATE TABLE "core_bar" ( + │ ^^^^^^^^^^^^^^^^^^^^^^^^^ + > 2 │ "id" serial NOT NULL PRIMARY KEY, + > 3 │ "alpha" char(100) NOT NULL + > 4 │ ); + │ ^^ + 5 │ + + i CHAR types are fixed-length and padded with spaces, which can lead to unexpected behavior. + + i Use VARCHAR or TEXT instead for variable-length character data. + + +``` + +### Valid + +```sql +CREATE TABLE "core_bar" ( + "id" serial NOT NULL PRIMARY KEY, + "alpha" varchar(100) NOT NULL +); +``` + +## How to configure +```json + +{ + "linter": { + "rules": { + "safety": { + "banCharField": "error" + } + } + } +} + +``` diff --git a/docs/reference/rules/ban-concurrent-index-creation-in-transaction.md b/docs/reference/rules/ban-concurrent-index-creation-in-transaction.md new file mode 100644 index 000000000..8e7e10c34 --- /dev/null +++ b/docs/reference/rules/ban-concurrent-index-creation-in-transaction.md @@ -0,0 +1,43 @@ +# banConcurrentIndexCreationInTransaction +**Diagnostic Category: `lint/safety/banConcurrentIndexCreationInTransaction`** + +**Since**: `vnext` + +> [!NOTE] +> This rule is recommended. A diagnostic error will appear when linting your code. + +**Sources**: +- Inspired from: squawk/ban-concurrent-index-creation-in-transaction + +## Description +Concurrent index creation is not allowed within a transaction. + +`CREATE INDEX CONCURRENTLY` cannot be used within a transaction block. This will cause an error in Postgres. + +Migration tools usually run each migration in a transaction, so using `CREATE INDEX CONCURRENTLY` will fail in such tools. + +## Examples + +### Invalid + +```sql +CREATE INDEX CONCURRENTLY "field_name_idx" ON "table_name" ("field_name"); +``` + +```sh +``` + +## How to configure +```json + +{ + "linter": { + "rules": { + "safety": { + "banConcurrentIndexCreationInTransaction": "error" + } + } + } +} + +``` diff --git a/docs/reference/rules/changing-column-type.md b/docs/reference/rules/changing-column-type.md new file mode 100644 index 000000000..ac5d2cd84 --- /dev/null +++ b/docs/reference/rules/changing-column-type.md @@ -0,0 +1,54 @@ +# changingColumnType +**Diagnostic Category: `lint/safety/changingColumnType`** + +**Since**: `vnext` + + +**Sources**: +- Inspired from: squawk/changing-column-type + +## Description +Changing a column type may break existing clients. + +Changing a column's data type requires an exclusive lock on the table while the entire table is rewritten. +This can take a long time for large tables and will block reads and writes. + +Instead of changing the type directly, consider creating a new column with the desired type, +migrating the data, and then dropping the old column. + +## Examples + +### Invalid + +```sql +ALTER TABLE "core_recipe" ALTER COLUMN "edits" TYPE text USING "edits"::text; +``` + +```sh +code-block.sql:1:1 lint/safety/changingColumnType ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + + ! Changing a column type requires a table rewrite and blocks reads and writes. + + > 1 │ ALTER TABLE "core_recipe" ALTER COLUMN "edits" TYPE text USING "edits"::text; + │ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + 2 │ + + i Consider creating a new column with the desired type, migrating data, and then dropping the old column. + + +``` + +## How to configure +```json + +{ + "linter": { + "rules": { + "safety": { + "changingColumnType": "error" + } + } + } +} + +``` diff --git a/docs/reference/rules/constraint-missing-not-valid.md b/docs/reference/rules/constraint-missing-not-valid.md new file mode 100644 index 000000000..a22ed85a2 --- /dev/null +++ b/docs/reference/rules/constraint-missing-not-valid.md @@ -0,0 +1,60 @@ +# constraintMissingNotValid +**Diagnostic Category: `lint/safety/constraintMissingNotValid`** + +**Since**: `vnext` + + +**Sources**: +- Inspired from: squawk/constraint-missing-not-valid + +## Description +Adding constraints without NOT VALID blocks all reads and writes. + +When adding a CHECK or FOREIGN KEY constraint, PostgreSQL must validate all existing rows, +which requires a full table scan. This blocks reads and writes for the duration. + +Instead, add the constraint with NOT VALID first, then VALIDATE CONSTRAINT in a separate +transaction. This allows reads and writes to continue while validation happens. + +## Examples + +### Invalid + +```sql +ALTER TABLE distributors ADD CONSTRAINT distfk FOREIGN KEY (address) REFERENCES addresses (address); +``` + +```sh +code-block.sql:1:1 lint/safety/constraintMissingNotValid ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + + ! Adding a constraint without NOT VALID will block reads and writes while validating existing rows. + + > 1 │ ALTER TABLE distributors ADD CONSTRAINT distfk FOREIGN KEY (address) REFERENCES addresses (address); + │ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + 2 │ + + i Add the constraint as NOT VALID in one transaction, then run VALIDATE CONSTRAINT in a separate transaction. + + +``` + +### Valid + +```sql +ALTER TABLE distributors ADD CONSTRAINT distfk FOREIGN KEY (address) REFERENCES addresses (address) NOT VALID; +``` + +## How to configure +```json + +{ + "linter": { + "rules": { + "safety": { + "constraintMissingNotValid": "error" + } + } + } +} + +``` diff --git a/docs/reference/rules/disallow-unique-constraint.md b/docs/reference/rules/disallow-unique-constraint.md new file mode 100644 index 000000000..acf81bf4f --- /dev/null +++ b/docs/reference/rules/disallow-unique-constraint.md @@ -0,0 +1,78 @@ +# disallowUniqueConstraint +**Diagnostic Category: `lint/safety/disallowUniqueConstraint`** + +**Since**: `vnext` + + +**Sources**: +- Inspired from: squawk/disallow-unique-constraint + +## Description +Disallow adding a UNIQUE constraint without using an existing index. + +Adding a UNIQUE constraint requires an ACCESS EXCLUSIVE lock, which blocks all reads and +writes to the table. Instead, create a unique index concurrently and then add the +constraint using that index. + +## Examples + +### Invalid + +```sql +ALTER TABLE table_name ADD CONSTRAINT field_name_constraint UNIQUE (field_name); +``` + +```sh +code-block.sql:1:1 lint/safety/disallowUniqueConstraint ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + + × Adding a UNIQUE constraint requires an ACCESS EXCLUSIVE lock. + + > 1 │ ALTER TABLE table_name ADD CONSTRAINT field_name_constraint UNIQUE (field_name); + │ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + 2 │ + + i Create a unique index CONCURRENTLY and then add the constraint using that index. + + +``` + +```sql +ALTER TABLE foo ADD COLUMN bar text UNIQUE; +``` + +```sh +code-block.sql:1:1 lint/safety/disallowUniqueConstraint ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + + × Adding a UNIQUE constraint requires an ACCESS EXCLUSIVE lock. + + > 1 │ ALTER TABLE foo ADD COLUMN bar text UNIQUE; + │ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + 2 │ + + i Create a unique index CONCURRENTLY and then add the constraint using that index. + + +``` + +### Valid + +```sql +CREATE UNIQUE INDEX CONCURRENTLY dist_id_temp_idx ON distributors (dist_id); +ALTER TABLE distributors DROP CONSTRAINT distributors_pkey, +ADD CONSTRAINT distributors_pkey PRIMARY KEY USING INDEX dist_id_temp_idx; +``` + +## How to configure +```json + +{ + "linter": { + "rules": { + "safety": { + "disallowUniqueConstraint": "error" + } + } + } +} + +``` diff --git a/docs/reference/rules/prefer-big-int.md b/docs/reference/rules/prefer-big-int.md new file mode 100644 index 000000000..238808d0f --- /dev/null +++ b/docs/reference/rules/prefer-big-int.md @@ -0,0 +1,101 @@ +# preferBigInt +**Diagnostic Category: `lint/safety/preferBigInt`** + +**Since**: `vnext` + + +**Sources**: +- Inspired from: squawk/prefer-big-int + +## Description +Prefer BIGINT over smaller integer types. + +Using smaller integer types like SMALLINT, INTEGER, or their aliases can lead to overflow +issues as your application grows. BIGINT provides a much larger range and helps avoid +future migration issues when values exceed the limits of smaller types. + +The storage difference between INTEGER (4 bytes) and BIGINT (8 bytes) is minimal on +modern systems, while the cost of migrating to a larger type later can be significant. + +## Examples + +### Invalid + +```sql +CREATE TABLE users ( + id integer +); +``` + +```sh +code-block.sql:1:1 lint/safety/preferBigInt ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + + ! Using smaller integer types can lead to overflow issues. + + > 1 │ CREATE TABLE users ( + │ ^^^^^^^^^^^^^^^^^^^^ + > 2 │ id integer + > 3 │ ); + │ ^^ + 4 │ + + i The 'int4' type has a limited range that may be exceeded as your data grows. + + i Consider using BIGINT for integer columns to avoid future migration issues. + + +``` + +```sql +CREATE TABLE users ( + id serial +); +``` + +```sh +code-block.sql:1:1 lint/safety/preferBigInt ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + + ! Using smaller integer types can lead to overflow issues. + + > 1 │ CREATE TABLE users ( + │ ^^^^^^^^^^^^^^^^^^^^ + > 2 │ id serial + > 3 │ ); + │ ^^ + 4 │ + + i The 'serial' type has a limited range that may be exceeded as your data grows. + + i Consider using BIGINT for integer columns to avoid future migration issues. + + +``` + +### Valid + +```sql +CREATE TABLE users ( + id bigint +); +``` + +```sql +CREATE TABLE users ( + id bigserial +); +``` + +## How to configure +```json + +{ + "linter": { + "rules": { + "safety": { + "preferBigInt": "error" + } + } + } +} + +``` diff --git a/docs/reference/rules/prefer-bigint-over-int.md b/docs/reference/rules/prefer-bigint-over-int.md new file mode 100644 index 000000000..88c45972a --- /dev/null +++ b/docs/reference/rules/prefer-bigint-over-int.md @@ -0,0 +1,107 @@ +# preferBigintOverInt +**Diagnostic Category: `lint/safety/preferBigintOverInt`** + +**Since**: `vnext` + + +**Sources**: +- Inspired from: squawk/prefer-bigint-over-int + +## Description +Prefer BIGINT over INT/INTEGER types. + +Using INTEGER (INT4) can lead to overflow issues, especially for ID columns. +While SMALLINT might be acceptable for certain use cases with known small ranges, +INTEGER often becomes a limiting factor as applications grow. + +The storage difference between INTEGER (4 bytes) and BIGINT (8 bytes) is minimal, +but the cost of migrating when you hit the 2.1 billion limit can be significant. + +## Examples + +### Invalid + +```sql +CREATE TABLE users ( + id integer +); +``` + +```sh +code-block.sql:1:1 lint/safety/preferBigintOverInt ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + + ! INTEGER type may lead to overflow issues. + + > 1 │ CREATE TABLE users ( + │ ^^^^^^^^^^^^^^^^^^^^ + > 2 │ id integer + > 3 │ ); + │ ^^ + 4 │ + + i INTEGER has a maximum value of 2,147,483,647 which can be exceeded by ID columns and counters. + + i Consider using BIGINT instead for better future-proofing. + + +``` + +```sql +CREATE TABLE users ( + id serial +); +``` + +```sh +code-block.sql:1:1 lint/safety/preferBigintOverInt ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + + ! INTEGER type may lead to overflow issues. + + > 1 │ CREATE TABLE users ( + │ ^^^^^^^^^^^^^^^^^^^^ + > 2 │ id serial + > 3 │ ); + │ ^^ + 4 │ + + i INTEGER has a maximum value of 2,147,483,647 which can be exceeded by ID columns and counters. + + i Consider using BIGINT instead for better future-proofing. + + +``` + +### Valid + +```sql +CREATE TABLE users ( + id bigint +); +``` + +```sql +CREATE TABLE users ( + id bigserial +); +``` + +```sql +CREATE TABLE users ( + id smallint +); +``` + +## How to configure +```json + +{ + "linter": { + "rules": { + "safety": { + "preferBigintOverInt": "error" + } + } + } +} + +``` diff --git a/docs/reference/rules/prefer-bigint-over-smallint.md b/docs/reference/rules/prefer-bigint-over-smallint.md new file mode 100644 index 000000000..4688874b4 --- /dev/null +++ b/docs/reference/rules/prefer-bigint-over-smallint.md @@ -0,0 +1,101 @@ +# preferBigintOverSmallint +**Diagnostic Category: `lint/safety/preferBigintOverSmallint`** + +**Since**: `vnext` + + +**Sources**: +- Inspired from: squawk/prefer-bigint-over-smallint + +## Description +Prefer BIGINT over SMALLINT types. + +SMALLINT has a very limited range (-32,768 to 32,767) that is easily exceeded. +Even for values that seem small initially, using SMALLINT can lead to problems +as your application grows. + +The storage savings of SMALLINT (2 bytes) vs BIGINT (8 bytes) are negligible +on modern systems, while the cost of migrating when you exceed the limit is high. + +## Examples + +### Invalid + +```sql +CREATE TABLE users ( + age smallint +); +``` + +```sh +code-block.sql:1:1 lint/safety/preferBigintOverSmallint ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + + ! SMALLINT has a very limited range that is easily exceeded. + + > 1 │ CREATE TABLE users ( + │ ^^^^^^^^^^^^^^^^^^^^ + > 2 │ age smallint + > 3 │ ); + │ ^^ + 4 │ + + i SMALLINT can only store values from -32,768 to 32,767. This range is often insufficient. + + i Consider using INTEGER or BIGINT for better range and future-proofing. + + +``` + +```sql +CREATE TABLE products ( + quantity smallserial +); +``` + +```sh +code-block.sql:1:1 lint/safety/preferBigintOverSmallint ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + + ! SMALLINT has a very limited range that is easily exceeded. + + > 1 │ CREATE TABLE products ( + │ ^^^^^^^^^^^^^^^^^^^^^^^ + > 2 │ quantity smallserial + > 3 │ ); + │ ^^ + 4 │ + + i SMALLINT can only store values from -32,768 to 32,767. This range is often insufficient. + + i Consider using INTEGER or BIGINT for better range and future-proofing. + + +``` + +### Valid + +```sql +CREATE TABLE users ( + age integer +); +``` + +```sql +CREATE TABLE products ( + quantity bigint +); +``` + +## How to configure +```json + +{ + "linter": { + "rules": { + "safety": { + "preferBigintOverSmallint": "error" + } + } + } +} + +``` diff --git a/docs/reference/rules/prefer-identity.md b/docs/reference/rules/prefer-identity.md new file mode 100644 index 000000000..956a8fcb4 --- /dev/null +++ b/docs/reference/rules/prefer-identity.md @@ -0,0 +1,102 @@ +# preferIdentity +**Diagnostic Category: `lint/safety/preferIdentity`** + +**Since**: `vnext` + + +**Sources**: +- Inspired from: squawk/prefer-identity + +## Description +Prefer using IDENTITY columns over serial columns. + +SERIAL types (serial, serial2, serial4, serial8, smallserial, bigserial) use sequences behind +the scenes but with some limitations. IDENTITY columns provide better control over sequence +behavior and are part of the SQL standard. + +IDENTITY columns offer clearer ownership semantics - the sequence is directly tied to the column +and will be automatically dropped when the column or table is dropped. They also provide better +control through GENERATED ALWAYS (prevents manual inserts) or GENERATED BY DEFAULT options. + +## Examples + +### Invalid + +```sql +create table users ( + id serial +); +``` + +```sh +code-block.sql:1:1 lint/safety/preferIdentity ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + + ! Prefer IDENTITY columns over SERIAL types. + + > 1 │ create table users ( + │ ^^^^^^^^^^^^^^^^^^^^ + > 2 │ id serial + > 3 │ ); + │ ^^ + 4 │ + + i Column uses 'serial' type which has limitations compared to IDENTITY columns. + + i Use 'bigint GENERATED BY DEFAULT AS IDENTITY' or 'bigint GENERATED ALWAYS AS IDENTITY' instead. + + +``` + +```sql +create table users ( + id bigserial +); +``` + +```sh +code-block.sql:1:1 lint/safety/preferIdentity ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + + ! Prefer IDENTITY columns over SERIAL types. + + > 1 │ create table users ( + │ ^^^^^^^^^^^^^^^^^^^^ + > 2 │ id bigserial + > 3 │ ); + │ ^^ + 4 │ + + i Column uses 'bigserial' type which has limitations compared to IDENTITY columns. + + i Use 'bigint GENERATED BY DEFAULT AS IDENTITY' or 'bigint GENERATED ALWAYS AS IDENTITY' instead. + + +``` + +### Valid + +```sql +create table users ( + id bigint generated by default as identity primary key +); +``` + +```sql +create table users ( + id bigint generated always as identity primary key +); +``` + +## How to configure +```json + +{ + "linter": { + "rules": { + "safety": { + "preferIdentity": "error" + } + } + } +} + +``` diff --git a/docs/reference/rules/prefer-jsonb.md b/docs/reference/rules/prefer-jsonb.md new file mode 100644 index 000000000..7a7bda21c --- /dev/null +++ b/docs/reference/rules/prefer-jsonb.md @@ -0,0 +1,125 @@ +# preferJsonb +**Diagnostic Category: `lint/safety/preferJsonb`** + +**Since**: `vnext` + + +**Sources**: +- Inspired from: eugene/E3 + +## Description +Prefer JSONB over JSON types. + +JSONB is the binary JSON data type in PostgreSQL that is more efficient for most use cases. +Unlike JSON, JSONB stores data in a decomposed binary format which provides several advantages: + +- Significantly faster query performance for operations like indexing and searching +- Support for indexing (GIN indexes) +- Duplicate keys are automatically removed +- Keys are sorted + +The only reasons to use JSON instead of JSONB are: + +- You need to preserve exact input formatting (whitespace, key order) +- You need to preserve duplicate keys +- You have very specific performance requirements where JSON's lack of parsing overhead matters + +## Examples + +### Invalid + +```sql +CREATE TABLE users ( + data json +); +``` + +```sh +code-block.sql:1:1 lint/safety/preferJsonb ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + + ! Prefer JSONB over JSON for better performance and functionality. + + > 1 │ CREATE TABLE users ( + │ ^^^^^^^^^^^^^^^^^^^^ + > 2 │ data json + > 3 │ ); + │ ^^ + 4 │ + + i JSON stores exact text representation while JSONB stores parsed binary format. JSONB is faster for queries, supports indexing, and removes duplicate keys. + + i Consider using JSONB instead unless you specifically need to preserve formatting or duplicate keys. + + +``` + +```sql +ALTER TABLE users ADD COLUMN metadata json; +``` + +```sh +code-block.sql:1:1 lint/safety/preferJsonb ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + + ! Prefer JSONB over JSON for better performance and functionality. + + > 1 │ ALTER TABLE users ADD COLUMN metadata json; + │ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + 2 │ + + i JSON stores exact text representation while JSONB stores parsed binary format. JSONB is faster for queries, supports indexing, and removes duplicate keys. + + i Consider using JSONB instead unless you specifically need to preserve formatting or duplicate keys. + + +``` + +```sql +ALTER TABLE users ALTER COLUMN data TYPE json; +``` + +```sh +code-block.sql:1:1 lint/safety/preferJsonb ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + + ! Prefer JSONB over JSON for better performance and functionality. + + > 1 │ ALTER TABLE users ALTER COLUMN data TYPE json; + │ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + 2 │ + + i JSON stores exact text representation while JSONB stores parsed binary format. JSONB is faster for queries, supports indexing, and removes duplicate keys. + + i Consider using JSONB instead unless you specifically need to preserve formatting or duplicate keys. + + +``` + +### Valid + +```sql +CREATE TABLE users ( + data jsonb +); +``` + +```sql +ALTER TABLE users ADD COLUMN metadata jsonb; +``` + +```sql +ALTER TABLE users ALTER COLUMN data TYPE jsonb; +``` + +## How to configure +```json + +{ + "linter": { + "rules": { + "safety": { + "preferJsonb": "error" + } + } + } +} + +``` diff --git a/docs/reference/rules/prefer-robust-stmts.md b/docs/reference/rules/prefer-robust-stmts.md new file mode 100644 index 000000000..059176fd4 --- /dev/null +++ b/docs/reference/rules/prefer-robust-stmts.md @@ -0,0 +1,80 @@ +# preferRobustStmts +**Diagnostic Category: `lint/safety/preferRobustStmts`** + +**Since**: `vnext` + + +**Sources**: +- Inspired from: squawk/prefer-robust-stmts + +## Description +Prefer statements with guards for robustness in migrations. + +When running migrations outside of transactions (e.g., CREATE INDEX CONCURRENTLY), +statements should be made robust by using guards like IF NOT EXISTS or IF EXISTS. +This allows migrations to be safely re-run if they fail partway through. + +## Examples + +### Invalid + +```sql +CREATE INDEX CONCURRENTLY users_email_idx ON users (email); +``` + +```sh +code-block.sql:1:1 lint/safety/preferRobustStmts ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + + ! Concurrent index creation should use IF NOT EXISTS. + + > 1 │ CREATE INDEX CONCURRENTLY users_email_idx ON users (email); + │ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + 2 │ + + i Add IF NOT EXISTS to make the migration re-runnable if it fails. + + +``` + +```sql +DROP INDEX CONCURRENTLY users_email_idx; +``` + +```sh +code-block.sql:1:1 lint/safety/preferRobustStmts ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + + ! Concurrent drop should use IF EXISTS. + + > 1 │ DROP INDEX CONCURRENTLY users_email_idx; + │ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + 2 │ + + i Add IF EXISTS to make the migration re-runnable if it fails. + + +``` + +### Valid + +```sql +CREATE INDEX CONCURRENTLY IF NOT EXISTS users_email_idx ON users (email); +``` + +```sql +DROP INDEX CONCURRENTLY IF EXISTS users_email_idx; +``` + +## How to configure +```json + +{ + "linter": { + "rules": { + "safety": { + "preferRobustStmts": "error" + } + } + } +} + +``` diff --git a/docs/reference/rules/prefer-text-field.md b/docs/reference/rules/prefer-text-field.md new file mode 100644 index 000000000..7f8831580 --- /dev/null +++ b/docs/reference/rules/prefer-text-field.md @@ -0,0 +1,88 @@ +# preferTextField +**Diagnostic Category: `lint/safety/preferTextField`** + +**Since**: `vnext` + + +**Sources**: +- Inspired from: squawk/prefer-text-field + +## Description +Prefer using TEXT over VARCHAR(n) types. + +Changing the size of a VARCHAR field requires an ACCESS EXCLUSIVE lock, which blocks all +reads and writes to the table. It's easier to update a check constraint on a TEXT field +than a VARCHAR() size since the check constraint can use NOT VALID with a separate +VALIDATE call. + +## Examples + +### Invalid + +```sql +CREATE TABLE "core_bar" ( + "id" serial NOT NULL PRIMARY KEY, + "alpha" varchar(100) NOT NULL +); +``` + +```sh +code-block.sql:1:1 lint/safety/preferTextField ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + + ! Changing the size of a varchar field requires an ACCESS EXCLUSIVE lock. + + > 1 │ CREATE TABLE "core_bar" ( + │ ^^^^^^^^^^^^^^^^^^^^^^^^^ + > 2 │ "id" serial NOT NULL PRIMARY KEY, + > 3 │ "alpha" varchar(100) NOT NULL + > 4 │ ); + │ ^^ + 5 │ + + i Use a text field with a check constraint. + + +``` + +```sql +ALTER TABLE "core_bar" ALTER COLUMN "kind" TYPE varchar(1000) USING "kind"::varchar(1000); +``` + +```sh +code-block.sql:1:1 lint/safety/preferTextField ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + + ! Changing the size of a varchar field requires an ACCESS EXCLUSIVE lock. + + > 1 │ ALTER TABLE "core_bar" ALTER COLUMN "kind" TYPE varchar(1000) USING "kind"::varchar(1000); + │ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + 2 │ + + i Use a text field with a check constraint. + + +``` + +### Valid + +```sql +CREATE TABLE "core_bar" ( + "id" serial NOT NULL PRIMARY KEY, + "bravo" text NOT NULL +); +ALTER TABLE "core_bar" ADD CONSTRAINT "text_size" CHECK (LENGTH("bravo") <= 100); +``` + +## How to configure +```json + +{ + "linter": { + "rules": { + "safety": { + "preferTextField": "error" + } + } + } +} + +``` diff --git a/docs/reference/rules/prefer-timestamptz.md b/docs/reference/rules/prefer-timestamptz.md new file mode 100644 index 000000000..b53962342 --- /dev/null +++ b/docs/reference/rules/prefer-timestamptz.md @@ -0,0 +1,123 @@ +# preferTimestamptz +**Diagnostic Category: `lint/safety/preferTimestamptz`** + +**Since**: `vnext` + + +**Sources**: +- Inspired from: squawk/prefer-timestamptz + +## Description +Prefer TIMESTAMPTZ over TIMESTAMP types. + +Using TIMESTAMP WITHOUT TIME ZONE can lead to issues when dealing with time zones. +TIMESTAMPTZ (TIMESTAMP WITH TIME ZONE) stores timestamps with time zone information, +making it safer for applications that handle multiple time zones or need to track +when events occurred in absolute time. + +## Examples + +### Invalid + +```sql +CREATE TABLE app.users ( + created_ts timestamp +); +``` + +```sh +code-block.sql:1:1 lint/safety/preferTimestamptz ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + + ! Prefer TIMESTAMPTZ over TIMESTAMP for better timezone handling. + + > 1 │ CREATE TABLE app.users ( + │ ^^^^^^^^^^^^^^^^^^^^^^^^ + > 2 │ created_ts timestamp + > 3 │ ); + │ ^^ + 4 │ + + i TIMESTAMP WITHOUT TIME ZONE can lead to issues when dealing with time zones. + + i Use TIMESTAMPTZ (TIMESTAMP WITH TIME ZONE) instead. + + +``` + +```sql +CREATE TABLE app.accounts ( + created_ts timestamp without time zone +); +``` + +```sh +code-block.sql:1:1 lint/safety/preferTimestamptz ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + + ! Prefer TIMESTAMPTZ over TIMESTAMP for better timezone handling. + + > 1 │ CREATE TABLE app.accounts ( + │ ^^^^^^^^^^^^^^^^^^^^^^^^^^^ + > 2 │ created_ts timestamp without time zone + > 3 │ ); + │ ^^ + 4 │ + + i TIMESTAMP WITHOUT TIME ZONE can lead to issues when dealing with time zones. + + i Use TIMESTAMPTZ (TIMESTAMP WITH TIME ZONE) instead. + + +``` + +```sql +ALTER TABLE app.users ALTER COLUMN created_ts TYPE timestamp; +``` + +```sh +code-block.sql:1:1 lint/safety/preferTimestamptz ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + + ! Prefer TIMESTAMPTZ over TIMESTAMP for better timezone handling. + + > 1 │ ALTER TABLE app.users ALTER COLUMN created_ts TYPE timestamp; + │ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + 2 │ + + i TIMESTAMP WITHOUT TIME ZONE can lead to issues when dealing with time zones. + + i Use TIMESTAMPTZ (TIMESTAMP WITH TIME ZONE) instead. + + +``` + +### Valid + +```sql +CREATE TABLE app.users ( + created_ts timestamptz +); +``` + +```sql +CREATE TABLE app.accounts ( + created_ts timestamp with time zone +); +``` + +```sql +ALTER TABLE app.users ALTER COLUMN created_ts TYPE timestamptz; +``` + +## How to configure +```json + +{ + "linter": { + "rules": { + "safety": { + "preferTimestamptz": "error" + } + } + } +} + +``` diff --git a/docs/reference/rules/renaming-column.md b/docs/reference/rules/renaming-column.md new file mode 100644 index 000000000..a43661f54 --- /dev/null +++ b/docs/reference/rules/renaming-column.md @@ -0,0 +1,52 @@ +# renamingColumn +**Diagnostic Category: `lint/safety/renamingColumn`** + +**Since**: `vnext` + + +**Sources**: +- Inspired from: squawk/renaming-column + +## Description +Renaming columns may break existing queries and application code. + +Renaming a column that is being used by an existing application or query can cause unexpected downtime. +Consider creating a new column instead and migrating the data, then dropping the old column after ensuring +no dependencies exist. + +## Examples + +### Invalid + +```sql +ALTER TABLE users RENAME COLUMN email TO email_address; +``` + +```sh +code-block.sql:1:1 lint/safety/renamingColumn ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + + ! Renaming a column may break existing clients. + + > 1 │ ALTER TABLE users RENAME COLUMN email TO email_address; + │ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + 2 │ + + i Consider creating a new column with the desired name and migrating data instead. + + +``` + +## How to configure +```json + +{ + "linter": { + "rules": { + "safety": { + "renamingColumn": "error" + } + } + } +} + +``` diff --git a/docs/reference/rules/renaming-table.md b/docs/reference/rules/renaming-table.md new file mode 100644 index 000000000..44c03116b --- /dev/null +++ b/docs/reference/rules/renaming-table.md @@ -0,0 +1,52 @@ +# renamingTable +**Diagnostic Category: `lint/safety/renamingTable`** + +**Since**: `vnext` + + +**Sources**: +- Inspired from: squawk/renaming-table + +## Description +Renaming tables may break existing queries and application code. + +Renaming a table that is being referenced by existing applications, views, functions, or foreign keys +can cause unexpected downtime. Consider creating a view with the old table name pointing to the new table, +or carefully coordinate the rename with application deployments. + +## Examples + +### Invalid + +```sql +ALTER TABLE users RENAME TO app_users; +``` + +```sh +code-block.sql:1:1 lint/safety/renamingTable ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + + ! Renaming a table may break existing clients. + + > 1 │ ALTER TABLE users RENAME TO app_users; + │ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + 2 │ + + i Consider creating a view with the old table name instead, or coordinate the rename carefully with application deployments. + + +``` + +## How to configure +```json + +{ + "linter": { + "rules": { + "safety": { + "renamingTable": "error" + } + } + } +} + +``` diff --git a/docs/reference/rules/require-concurrent-index-creation.md b/docs/reference/rules/require-concurrent-index-creation.md new file mode 100644 index 000000000..aaacb50fd --- /dev/null +++ b/docs/reference/rules/require-concurrent-index-creation.md @@ -0,0 +1,58 @@ +# requireConcurrentIndexCreation +**Diagnostic Category: `lint/safety/requireConcurrentIndexCreation`** + +**Since**: `vnext` + + +**Sources**: +- Inspired from: squawk/require-concurrent-index-creation + +## Description +Creating indexes non-concurrently can lock the table for writes. + +When creating an index on an existing table, using CREATE INDEX without CONCURRENTLY will lock the table +against writes for the duration of the index build. This can cause downtime in production systems. +Use CREATE INDEX CONCURRENTLY to build the index without blocking concurrent operations. + +## Examples + +### Invalid + +```sql +CREATE INDEX users_email_idx ON users (email); +``` + +```sh +code-block.sql:1:1 lint/safety/requireConcurrentIndexCreation ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + + ! Creating an index non-concurrently blocks writes to the table. + + > 1 │ CREATE INDEX users_email_idx ON users (email); + │ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + 2 │ + + i Use CREATE INDEX CONCURRENTLY to avoid blocking concurrent operations on the table. + + +``` + +### Valid + +```sql +CREATE INDEX CONCURRENTLY users_email_idx ON users (email); +``` + +## How to configure +```json + +{ + "linter": { + "rules": { + "safety": { + "requireConcurrentIndexCreation": "error" + } + } + } +} + +``` diff --git a/docs/reference/rules/require-concurrent-index-deletion.md b/docs/reference/rules/require-concurrent-index-deletion.md new file mode 100644 index 000000000..c919177f1 --- /dev/null +++ b/docs/reference/rules/require-concurrent-index-deletion.md @@ -0,0 +1,58 @@ +# requireConcurrentIndexDeletion +**Diagnostic Category: `lint/safety/requireConcurrentIndexDeletion`** + +**Since**: `vnext` + + +**Sources**: +- Inspired from: squawk/require-concurrent-index-deletion + +## Description +Dropping indexes non-concurrently can lock the table for reads. + +When dropping an index, using DROP INDEX without CONCURRENTLY will lock the table +preventing reads and writes for the duration of the drop. This can cause downtime in production systems. +Use DROP INDEX CONCURRENTLY to drop the index without blocking concurrent operations. + +## Examples + +### Invalid + +```sql +DROP INDEX IF EXISTS users_email_idx; +``` + +```sh +code-block.sql:1:1 lint/safety/requireConcurrentIndexDeletion ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + + ! Dropping an index non-concurrently blocks reads and writes to the table. + + > 1 │ DROP INDEX IF EXISTS users_email_idx; + │ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + 2 │ + + i Use DROP INDEX CONCURRENTLY to avoid blocking concurrent operations on the table. + + +``` + +### Valid + +```sql +DROP INDEX CONCURRENTLY IF EXISTS users_email_idx; +``` + +## How to configure +```json + +{ + "linter": { + "rules": { + "safety": { + "requireConcurrentIndexDeletion": "error" + } + } + } +} + +``` diff --git a/docs/reference/rules/transaction-nesting.md b/docs/reference/rules/transaction-nesting.md new file mode 100644 index 000000000..7f3b25ab0 --- /dev/null +++ b/docs/reference/rules/transaction-nesting.md @@ -0,0 +1,79 @@ +# transactionNesting +**Diagnostic Category: `lint/safety/transactionNesting`** + +**Since**: `vnext` + + +**Sources**: +- Inspired from: squawk/transaction-nesting + +## Description +Detects problematic transaction nesting that could lead to unexpected behavior. + +Transaction nesting issues occur when trying to start a transaction within an existing transaction, +or trying to commit/rollback when not in a transaction. This can lead to unexpected behavior +or errors in database migrations. + +## Examples + +### Invalid + +```sql +BEGIN; +-- Migration tools already manage transactions +SELECT 1; +``` + +```sh +code-block.sql:1:1 lint/safety/transactionNesting ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + + ! Transaction already managed by migration tool. + + > 1 │ BEGIN; + │ ^^^^^^ + 2 │ -- Migration tools already manage transactions + 3 │ SELECT 1; + + i Migration tools manage transactions automatically. Remove explicit transaction control. + + i Put migration statements in separate files to have them be in separate transactions. + + +``` + +```sql +SELECT 1; +COMMIT; -- No transaction to commit +``` + +```sh +code-block.sql:2:1 lint/safety/transactionNesting ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + + ! Attempting to end transaction managed by migration tool. + + 1 │ SELECT 1; + > 2 │ COMMIT; -- No transaction to commit + │ ^^^^^^^ + 3 │ + + i Migration tools manage transactions automatically. Remove explicit transaction control. + + i Put migration statements in separate files to have them be in separate transactions. + + +``` + +## How to configure +```json + +{ + "linter": { + "rules": { + "safety": { + "transactionNesting": "error" + } + } + } +} + +``` diff --git a/docs/schema.json b/docs/schema.json index bf9482acc..2d40cf9ed 100644 --- a/docs/schema.json +++ b/docs/schema.json @@ -356,6 +356,50 @@ "description": "A list of rules that belong to this group", "type": "object", "properties": { + "addingFieldWithDefault": { + "description": "Adding a column with a DEFAULT value may lead to a table rewrite while holding an ACCESS EXCLUSIVE lock.", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + }, + "addingForeignKeyConstraint": { + "description": "Adding a foreign key constraint requires a table scan and a SHARE ROW EXCLUSIVE lock on both tables, which blocks writes.", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + }, + "addingNotNullField": { + "description": "Setting a column NOT NULL blocks reads while the table is scanned.", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + }, + "addingPrimaryKeyConstraint": { + "description": "Adding a primary key constraint results in locks and table rewrites.", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + }, "addingRequiredField": { "description": "Adding a new column that is NOT NULL and has no default value to an existing table effectively makes it required.", "anyOf": [ @@ -374,6 +418,28 @@ "null" ] }, + "banCharField": { + "description": "Using CHAR(n) or CHARACTER(n) types is discouraged.", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + }, + "banConcurrentIndexCreationInTransaction": { + "description": "Concurrent index creation is not allowed within a transaction.", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + }, "banDropColumn": { "description": "Dropping a column may break existing clients.", "anyOf": [ @@ -429,12 +495,188 @@ } ] }, + "changingColumnType": { + "description": "Changing a column type may break existing clients.", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + }, + "constraintMissingNotValid": { + "description": "Adding constraints without NOT VALID blocks all reads and writes.", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + }, + "disallowUniqueConstraint": { + "description": "Disallow adding a UNIQUE constraint without using an existing index.", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + }, + "preferBigInt": { + "description": "Prefer BIGINT over smaller integer types.", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + }, + "preferBigintOverInt": { + "description": "Prefer BIGINT over INT/INTEGER types.", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + }, + "preferBigintOverSmallint": { + "description": "Prefer BIGINT over SMALLINT types.", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + }, + "preferIdentity": { + "description": "Prefer using IDENTITY columns over serial columns.", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + }, + "preferJsonb": { + "description": "Prefer JSONB over JSON types.", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + }, + "preferRobustStmts": { + "description": "Prefer statements with guards for robustness in migrations.", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + }, + "preferTextField": { + "description": "Prefer using TEXT over VARCHAR(n) types.", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + }, + "preferTimestamptz": { + "description": "Prefer TIMESTAMPTZ over TIMESTAMP types.", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + }, "recommended": { "description": "It enables the recommended rules for this group", "type": [ "boolean", "null" ] + }, + "renamingColumn": { + "description": "Renaming columns may break existing queries and application code.", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + }, + "renamingTable": { + "description": "Renaming tables may break existing queries and application code.", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + }, + "requireConcurrentIndexCreation": { + "description": "Creating indexes non-concurrently can lock the table for writes.", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + }, + "requireConcurrentIndexDeletion": { + "description": "Dropping indexes non-concurrently can lock the table for reads.", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + }, + "transactionNesting": { + "description": "Detects problematic transaction nesting that could lead to unexpected behavior.", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] } }, "additionalProperties": false diff --git a/justfile b/justfile index 04412b4c9..d625b69da 100644 --- a/justfile +++ b/justfile @@ -153,3 +153,24 @@ quick-modify: # just show-logs | bunyan show-logs: tail -f $(ls $PGT_LOG_PATH/server.log.* | sort -t- -k2,2 -k3,3 -k4,4 | tail -n 1) + +# Run a claude agent with the given agentic prompt file. +# Commented out by default to avoid accidental usage that may incur costs. +# agentic name: +# unset ANTHROPIC_API_KEY && claude --dangerously-skip-permissions -p "please read agentic/{{name}}.md and follow the instructions closely" +# +# agentic-loop name: +# #!/usr/bin/env bash +# echo "Starting agentic loop until error..." +# iteration=1 +# while true; do +# echo "$(date): Starting iteration $iteration..." +# if just agentic {{name}}; then +# echo "$(date): Iteration $iteration completed successfully!" +# iteration=$((iteration + 1)) +# else +# echo "$(date): Iteration $iteration failed - stopping loop" +# break +# fi +# done + diff --git a/packages/@postgrestools/backend-jsonrpc/src/workspace.ts b/packages/@postgrestools/backend-jsonrpc/src/workspace.ts index 6f7b36202..51a4f5663 100644 --- a/packages/@postgrestools/backend-jsonrpc/src/workspace.ts +++ b/packages/@postgrestools/backend-jsonrpc/src/workspace.ts @@ -63,12 +63,34 @@ export interface Advices { advices: Advice[]; } export type Category = + | "lint/safety/addingFieldWithDefault" + | "lint/safety/addingForeignKeyConstraint" + | "lint/safety/addingNotNullField" + | "lint/safety/addingPrimaryKeyConstraint" | "lint/safety/addingRequiredField" + | "lint/safety/banCharField" + | "lint/safety/banConcurrentIndexCreationInTransaction" | "lint/safety/banDropColumn" | "lint/safety/banDropDatabase" | "lint/safety/banDropNotNull" | "lint/safety/banDropTable" | "lint/safety/banTruncateCascade" + | "lint/safety/changingColumnType" + | "lint/safety/constraintMissingNotValid" + | "lint/safety/disallowUniqueConstraint" + | "lint/safety/preferBigInt" + | "lint/safety/preferBigintOverInt" + | "lint/safety/preferBigintOverSmallint" + | "lint/safety/preferIdentity" + | "lint/safety/preferJsonb" + | "lint/safety/preferRobustStmts" + | "lint/safety/preferTextField" + | "lint/safety/preferTimestamptz" + | "lint/safety/renamingColumn" + | "lint/safety/renamingTable" + | "lint/safety/requireConcurrentIndexCreation" + | "lint/safety/requireConcurrentIndexDeletion" + | "lint/safety/transactionNesting" | "stdin" | "check" | "configuration" @@ -413,6 +435,22 @@ export type VcsClientKind = "git"; * A list of rules that belong to this group */ export interface Safety { + /** + * Adding a column with a DEFAULT value may lead to a table rewrite while holding an ACCESS EXCLUSIVE lock. + */ + addingFieldWithDefault?: RuleConfiguration_for_Null; + /** + * Adding a foreign key constraint requires a table scan and a SHARE ROW EXCLUSIVE lock on both tables, which blocks writes. + */ + addingForeignKeyConstraint?: RuleConfiguration_for_Null; + /** + * Setting a column NOT NULL blocks reads while the table is scanned. + */ + addingNotNullField?: RuleConfiguration_for_Null; + /** + * Adding a primary key constraint results in locks and table rewrites. + */ + addingPrimaryKeyConstraint?: RuleConfiguration_for_Null; /** * Adding a new column that is NOT NULL and has no default value to an existing table effectively makes it required. */ @@ -421,6 +459,14 @@ export interface Safety { * It enables ALL rules for this group. */ all?: boolean; + /** + * Using CHAR(n) or CHARACTER(n) types is discouraged. + */ + banCharField?: RuleConfiguration_for_Null; + /** + * Concurrent index creation is not allowed within a transaction. + */ + banConcurrentIndexCreationInTransaction?: RuleConfiguration_for_Null; /** * Dropping a column may break existing clients. */ @@ -441,10 +487,74 @@ export interface Safety { * Using TRUNCATE's CASCADE option will truncate any tables that are also foreign-keyed to the specified tables. */ banTruncateCascade?: RuleConfiguration_for_Null; + /** + * Changing a column type may break existing clients. + */ + changingColumnType?: RuleConfiguration_for_Null; + /** + * Adding constraints without NOT VALID blocks all reads and writes. + */ + constraintMissingNotValid?: RuleConfiguration_for_Null; + /** + * Disallow adding a UNIQUE constraint without using an existing index. + */ + disallowUniqueConstraint?: RuleConfiguration_for_Null; + /** + * Prefer BIGINT over smaller integer types. + */ + preferBigInt?: RuleConfiguration_for_Null; + /** + * Prefer BIGINT over INT/INTEGER types. + */ + preferBigintOverInt?: RuleConfiguration_for_Null; + /** + * Prefer BIGINT over SMALLINT types. + */ + preferBigintOverSmallint?: RuleConfiguration_for_Null; + /** + * Prefer using IDENTITY columns over serial columns. + */ + preferIdentity?: RuleConfiguration_for_Null; + /** + * Prefer JSONB over JSON types. + */ + preferJsonb?: RuleConfiguration_for_Null; + /** + * Prefer statements with guards for robustness in migrations. + */ + preferRobustStmts?: RuleConfiguration_for_Null; + /** + * Prefer using TEXT over VARCHAR(n) types. + */ + preferTextField?: RuleConfiguration_for_Null; + /** + * Prefer TIMESTAMPTZ over TIMESTAMP types. + */ + preferTimestamptz?: RuleConfiguration_for_Null; /** * It enables the recommended rules for this group */ recommended?: boolean; + /** + * Renaming columns may break existing queries and application code. + */ + renamingColumn?: RuleConfiguration_for_Null; + /** + * Renaming tables may break existing queries and application code. + */ + renamingTable?: RuleConfiguration_for_Null; + /** + * Creating indexes non-concurrently can lock the table for writes. + */ + requireConcurrentIndexCreation?: RuleConfiguration_for_Null; + /** + * Dropping indexes non-concurrently can lock the table for reads. + */ + requireConcurrentIndexDeletion?: RuleConfiguration_for_Null; + /** + * Detects problematic transaction nesting that could lead to unexpected behavior. + */ + transactionNesting?: RuleConfiguration_for_Null; } export type RuleConfiguration_for_Null = | RulePlainConfiguration diff --git a/xtask/codegen/src/generate_new_analyser_rule.rs b/xtask/codegen/src/generate_new_analyser_rule.rs index 4c4bcc696..514886a71 100644 --- a/xtask/codegen/src/generate_new_analyser_rule.rs +++ b/xtask/codegen/src/generate_new_analyser_rule.rs @@ -41,7 +41,7 @@ fn generate_rule_template( format!( r#"use pgt_analyse::{{ - AnalysedFileContext, context::RuleContext, {macro_name}, Rule, RuleDiagnostic, + AnalysedFileContext, context::RuleContext, {macro_name}, Rule, RuleDiagnostic, }}; use pgt_console::markup; use pgt_diagnostics::Severity; @@ -79,11 +79,7 @@ use pgt_schema_cache::SchemaCache; impl Rule for {rule_name_upper_camel} {{ type Options = (); - fn run( - ctx: &RuleContext - _file_context: &AnalysedFileContext, - _schema_cache: Option<&SchemaCache>, - ) -> Vec {{ + fn run(ctx: &RuleContext) -> Vec {{ Vec::new() }} }}