Skip to content

Commit a01a9f1

Browse files
committed
improve code style 2
1 parent 64fa4d2 commit a01a9f1

File tree

5 files changed

+146
-111
lines changed

5 files changed

+146
-111
lines changed

crates/pgt_analyser/src/lint/safety/constraint_missing_not_valid.rs

Lines changed: 38 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -40,35 +40,47 @@ impl Rule for ConstraintMissingNotValid {
4040
fn run(ctx: &RuleContext<Self>) -> Vec<RuleDiagnostic> {
4141
let mut diagnostics = Vec::new();
4242

43-
if let pgt_query::NodeEnum::AlterTableStmt(stmt) = ctx.stmt() {
44-
for cmd in &stmt.cmds {
45-
if let Some(pgt_query::NodeEnum::AlterTableCmd(cmd)) = &cmd.node {
46-
// Check if we're adding a constraint
47-
if let Some(pgt_query::NodeEnum::Constraint(constraint)) =
48-
cmd.def.as_ref().and_then(|d| d.node.as_ref())
49-
{
50-
// Skip if the constraint has NOT VALID
51-
if constraint.initially_valid {
52-
// Only warn for CHECK and FOREIGN KEY constraints
53-
match constraint.contype() {
54-
pgt_query::protobuf::ConstrType::ConstrCheck
55-
| pgt_query::protobuf::ConstrType::ConstrForeign => {
56-
diagnostics.push(RuleDiagnostic::new(
57-
rule_category!(),
58-
None,
59-
markup! {
60-
"Adding a constraint without NOT VALID will block reads and writes while validating existing rows."
61-
}
62-
).detail(None, "Add the constraint as NOT VALID in one transaction, then run VALIDATE CONSTRAINT in a separate transaction."));
63-
}
64-
_ => {}
65-
}
66-
}
67-
}
68-
}
43+
let pgt_query::NodeEnum::AlterTableStmt(stmt) = ctx.stmt() else {
44+
return diagnostics;
45+
};
46+
47+
for cmd in &stmt.cmds {
48+
let Some(pgt_query::NodeEnum::AlterTableCmd(cmd)) = &cmd.node else {
49+
continue;
50+
};
51+
52+
let Some(pgt_query::NodeEnum::Constraint(constraint)) = cmd.def.as_ref().and_then(|d| d.node.as_ref()) else {
53+
continue;
54+
};
55+
56+
if let Some(diagnostic) = check_constraint_needs_not_valid(constraint) {
57+
diagnostics.push(diagnostic);
6958
}
7059
}
7160

7261
diagnostics
7362
}
7463
}
64+
65+
fn check_constraint_needs_not_valid(constraint: &pgt_query::protobuf::Constraint) -> Option<RuleDiagnostic> {
66+
// Skip if the constraint has NOT VALID
67+
if !constraint.initially_valid {
68+
return None;
69+
}
70+
71+
// Only warn for CHECK and FOREIGN KEY constraints
72+
match constraint.contype() {
73+
pgt_query::protobuf::ConstrType::ConstrCheck
74+
| pgt_query::protobuf::ConstrType::ConstrForeign => Some(
75+
RuleDiagnostic::new(
76+
rule_category!(),
77+
None,
78+
markup! {
79+
"Adding a constraint without NOT VALID will block reads and writes while validating existing rows."
80+
}
81+
)
82+
.detail(None, "Add the constraint as NOT VALID in one transaction, then run VALIDATE CONSTRAINT in a separate transaction.")
83+
),
84+
_ => None,
85+
}
86+
}

crates/pgt_analyser/src/lint/safety/prefer_bigint_over_int.rs

Lines changed: 27 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -98,30 +98,34 @@ fn check_column_def(
9898
diagnostics: &mut Vec<RuleDiagnostic>,
9999
col_def: &pgt_query::protobuf::ColumnDef,
100100
) {
101-
if let Some(type_name) = &col_def.type_name {
102-
for name_node in &type_name.names {
103-
if let Some(pgt_query::NodeEnum::String(name)) = &name_node.node {
104-
let type_name_lower = name.sval.to_lowercase();
105-
// Only check for INT4/INTEGER types, not SMALLINT
106-
let is_int4 = matches!(
107-
type_name_lower.as_str(),
108-
"integer" | "int4" | "serial" | "serial4"
109-
);
101+
let Some(type_name) = &col_def.type_name else {
102+
return;
103+
};
110104

111-
if is_int4 {
112-
diagnostics.push(
113-
RuleDiagnostic::new(
114-
rule_category!(),
115-
None,
116-
markup! {
117-
"INTEGER type may lead to overflow issues."
118-
},
119-
)
120-
.detail(None, "INTEGER has a maximum value of 2,147,483,647 which can be exceeded by ID columns and counters.")
121-
.note("Consider using BIGINT instead for better future-proofing."),
122-
);
123-
}
124-
}
105+
for name_node in &type_name.names {
106+
let Some(pgt_query::NodeEnum::String(name)) = &name_node.node else {
107+
continue;
108+
};
109+
110+
let type_name_lower = name.sval.to_lowercase();
111+
// Only check for INT4/INTEGER types, not SMALLINT
112+
if !matches!(
113+
type_name_lower.as_str(),
114+
"integer" | "int4" | "serial" | "serial4"
115+
) {
116+
continue;
125117
}
118+
119+
diagnostics.push(
120+
RuleDiagnostic::new(
121+
rule_category!(),
122+
None,
123+
markup! {
124+
"INTEGER type may lead to overflow issues."
125+
},
126+
)
127+
.detail(None, "INTEGER has a maximum value of 2,147,483,647 which can be exceeded by ID columns and counters.")
128+
.note("Consider using BIGINT instead for better future-proofing."),
129+
);
126130
}
127131
}

crates/pgt_analyser/src/lint/safety/prefer_bigint_over_smallint.rs

Lines changed: 26 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -92,29 +92,33 @@ fn check_column_def(
9292
diagnostics: &mut Vec<RuleDiagnostic>,
9393
col_def: &pgt_query::protobuf::ColumnDef,
9494
) {
95-
if let Some(type_name) = &col_def.type_name {
96-
for name_node in &type_name.names {
97-
if let Some(pgt_query::NodeEnum::String(name)) = &name_node.node {
98-
let type_name_lower = name.sval.to_lowercase();
99-
let is_smallint = matches!(
100-
type_name_lower.as_str(),
101-
"smallint" | "int2" | "smallserial" | "serial2"
102-
);
95+
let Some(type_name) = &col_def.type_name else {
96+
return;
97+
};
10398

104-
if is_smallint {
105-
diagnostics.push(
106-
RuleDiagnostic::new(
107-
rule_category!(),
108-
None,
109-
markup! {
110-
"SMALLINT has a very limited range that is easily exceeded."
111-
},
112-
)
113-
.detail(None, "SMALLINT can only store values from -32,768 to 32,767. This range is often insufficient.")
114-
.note("Consider using INTEGER or BIGINT for better range and future-proofing."),
115-
);
116-
}
117-
}
99+
for name_node in &type_name.names {
100+
let Some(pgt_query::NodeEnum::String(name)) = &name_node.node else {
101+
continue;
102+
};
103+
104+
let type_name_lower = name.sval.to_lowercase();
105+
if !matches!(
106+
type_name_lower.as_str(),
107+
"smallint" | "int2" | "smallserial" | "serial2"
108+
) {
109+
continue;
118110
}
111+
112+
diagnostics.push(
113+
RuleDiagnostic::new(
114+
rule_category!(),
115+
None,
116+
markup! {
117+
"SMALLINT has a very limited range that is easily exceeded."
118+
},
119+
)
120+
.detail(None, "SMALLINT can only store values from -32,768 to 32,767. This range is often insufficient.")
121+
.note("Consider using INTEGER or BIGINT for better range and future-proofing."),
122+
);
119123
}
120124
}

crates/pgt_analyser/src/lint/safety/prefer_identity.rs

Lines changed: 26 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -94,26 +94,32 @@ fn check_column_def(
9494
diagnostics: &mut Vec<RuleDiagnostic>,
9595
col_def: &pgt_query::protobuf::ColumnDef,
9696
) {
97-
if let Some(type_name) = &col_def.type_name {
98-
for name_node in &type_name.names {
99-
if let Some(pgt_query::NodeEnum::String(name)) = &name_node.node {
100-
if matches!(
101-
name.sval.as_str(),
102-
"serial" | "serial2" | "serial4" | "serial8" | "smallserial" | "bigserial"
103-
) {
104-
diagnostics.push(
105-
RuleDiagnostic::new(
106-
rule_category!(),
107-
None,
108-
markup! {
109-
"Prefer IDENTITY columns over SERIAL types."
110-
},
111-
)
112-
.detail(None, format!("Column uses '{}' type which has limitations compared to IDENTITY columns.", name.sval))
113-
.note("Use 'bigint GENERATED BY DEFAULT AS IDENTITY' or 'bigint GENERATED ALWAYS AS IDENTITY' instead."),
114-
);
115-
}
116-
}
97+
let Some(type_name) = &col_def.type_name else {
98+
return;
99+
};
100+
101+
for name_node in &type_name.names {
102+
let Some(pgt_query::NodeEnum::String(name)) = &name_node.node else {
103+
continue;
104+
};
105+
106+
if !matches!(
107+
name.sval.as_str(),
108+
"serial" | "serial2" | "serial4" | "serial8" | "smallserial" | "bigserial"
109+
) {
110+
continue;
117111
}
112+
113+
diagnostics.push(
114+
RuleDiagnostic::new(
115+
rule_category!(),
116+
None,
117+
markup! {
118+
"Prefer IDENTITY columns over SERIAL types."
119+
},
120+
)
121+
.detail(None, format!("Column uses '{}' type which has limitations compared to IDENTITY columns.", name.sval))
122+
.note("Use 'bigint GENERATED BY DEFAULT AS IDENTITY' or 'bigint GENERATED ALWAYS AS IDENTITY' instead."),
123+
);
118124
}
119125
}

crates/pgt_analyser/src/lint/safety/require_concurrent_index_creation.rs

Lines changed: 29 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -38,29 +38,38 @@ impl Rule for RequireConcurrentIndexCreation {
3838
fn run(ctx: &RuleContext<Self>) -> Vec<RuleDiagnostic> {
3939
let mut diagnostics = Vec::new();
4040

41-
if let pgt_query::NodeEnum::IndexStmt(stmt) = &ctx.stmt() {
42-
if !stmt.concurrent {
43-
// Check if this table was created in the same transaction/file
44-
let table_name = stmt
45-
.relation
46-
.as_ref()
47-
.map(|r| r.relname.as_str())
48-
.unwrap_or("");
41+
let pgt_query::NodeEnum::IndexStmt(stmt) = &ctx.stmt() else {
42+
return diagnostics;
43+
};
4944

50-
if !table_name.is_empty()
51-
&& !is_table_created_in_file(ctx.file_context(), table_name)
52-
{
53-
diagnostics.push(RuleDiagnostic::new(
54-
rule_category!(),
55-
None,
56-
markup! {
57-
"Creating an index non-concurrently blocks writes to the table."
58-
},
59-
).detail(None, "Use CREATE INDEX CONCURRENTLY to avoid blocking concurrent operations on the table."));
60-
}
61-
}
45+
// Concurrent indexes are safe
46+
if stmt.concurrent {
47+
return diagnostics;
48+
}
49+
50+
// Check if this table was created in the same transaction/file
51+
let table_name = stmt
52+
.relation
53+
.as_ref()
54+
.map(|r| r.relname.as_str())
55+
.unwrap_or("");
56+
57+
// Skip if table name is empty or table was created in the same file
58+
if table_name.is_empty() || is_table_created_in_file(ctx.file_context(), table_name) {
59+
return diagnostics;
6260
}
6361

62+
diagnostics.push(
63+
RuleDiagnostic::new(
64+
rule_category!(),
65+
None,
66+
markup! {
67+
"Creating an index non-concurrently blocks writes to the table."
68+
},
69+
)
70+
.detail(None, "Use CREATE INDEX CONCURRENTLY to avoid blocking concurrent operations on the table.")
71+
);
72+
6473
diagnostics
6574
}
6675
}

0 commit comments

Comments
 (0)