Skip to content

Commit

Permalink
chore: Fix fresh clippy warnings
Browse files Browse the repository at this point in the history
  • Loading branch information
mcheshkov committed Sep 3, 2024
1 parent 6c18867 commit a593b14
Show file tree
Hide file tree
Showing 14 changed files with 49 additions and 70 deletions.
5 changes: 2 additions & 3 deletions src/ast/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,7 @@ impl fmt::Display for Ident {
}
f.write_char(q)
}
Some(q) if q == '[' => write!(f, "[{}]", self.value),
Some('[') => write!(f, "[{}]", self.value),
None => f.write_str(&self.value),
_ => panic!("unexpected quote style"),
}
Expand Down Expand Up @@ -1841,7 +1841,7 @@ impl fmt::Display for Statement {
Statement::SetVariable { key_values } => {
f.write_str("SET ")?;

if let Some(key_value) = key_values.get(0) {
if let Some(key_value) = key_values.first() {
if key_value.hivevar {
let values: Vec<String> = key_value
.value
Expand Down Expand Up @@ -2938,7 +2938,6 @@ impl fmt::Display for CopyLegacyCsvOption {
}
}

///
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum MergeClause {
Expand Down
7 changes: 2 additions & 5 deletions src/dialect/ansi.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,13 +17,10 @@ pub struct AnsiDialect {}

impl Dialect for AnsiDialect {
fn is_identifier_start(&self, ch: char) -> bool {
('a'..='z').contains(&ch) || ('A'..='Z').contains(&ch)
ch.is_ascii_lowercase() || ch.is_ascii_uppercase()
}

fn is_identifier_part(&self, ch: char) -> bool {
('a'..='z').contains(&ch)
|| ('A'..='Z').contains(&ch)
|| ('0'..='9').contains(&ch)
|| ch == '_'
ch.is_ascii_lowercase() || ch.is_ascii_uppercase() || ch.is_ascii_digit() || ch == '_'
}
}
4 changes: 2 additions & 2 deletions src/dialect/clickhouse.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,10 +18,10 @@ pub struct ClickHouseDialect {}
impl Dialect for ClickHouseDialect {
fn is_identifier_start(&self, ch: char) -> bool {
// See https://clickhouse.com/docs/en/sql-reference/syntax/#syntax-identifiers
('a'..='z').contains(&ch) || ('A'..='Z').contains(&ch) || ch == '_'
ch.is_ascii_lowercase() || ch.is_ascii_uppercase() || ch == '_'
}

fn is_identifier_part(&self, ch: char) -> bool {
self.is_identifier_start(ch) || ('0'..='9').contains(&ch)
self.is_identifier_start(ch) || ch.is_ascii_digit()
}
}
12 changes: 4 additions & 8 deletions src/dialect/generic.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,17 +17,13 @@ pub struct GenericDialect;

impl Dialect for GenericDialect {
fn is_identifier_start(&self, ch: char) -> bool {
('a'..='z').contains(&ch)
|| ('A'..='Z').contains(&ch)
|| ch == '_'
|| ch == '#'
|| ch == '@'
ch.is_ascii_lowercase() || ch.is_ascii_uppercase() || ch == '_' || ch == '#' || ch == '@'
}

fn is_identifier_part(&self, ch: char) -> bool {
('a'..='z').contains(&ch)
|| ('A'..='Z').contains(&ch)
|| ('0'..='9').contains(&ch)
ch.is_ascii_lowercase()
|| ch.is_ascii_uppercase()
|| ch.is_ascii_digit()
|| ch == '@'
|| ch == '$'
|| ch == '#'
Expand Down
11 changes: 4 additions & 7 deletions src/dialect/hive.rs
Original file line number Diff line number Diff line change
Expand Up @@ -21,16 +21,13 @@ impl Dialect for HiveDialect {
}

fn is_identifier_start(&self, ch: char) -> bool {
('a'..='z').contains(&ch)
|| ('A'..='Z').contains(&ch)
|| ('0'..='9').contains(&ch)
|| ch == '$'
ch.is_ascii_lowercase() || ch.is_ascii_uppercase() || ch.is_ascii_digit() || ch == '$'
}

fn is_identifier_part(&self, ch: char) -> bool {
('a'..='z').contains(&ch)
|| ('A'..='Z').contains(&ch)
|| ('0'..='9').contains(&ch)
ch.is_ascii_lowercase()
|| ch.is_ascii_uppercase()
|| ch.is_ascii_digit()
|| ch == '_'
|| ch == '$'
|| ch == '{'
Expand Down
12 changes: 4 additions & 8 deletions src/dialect/mssql.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,17 +23,13 @@ impl Dialect for MsSqlDialect {
fn is_identifier_start(&self, ch: char) -> bool {
// See https://docs.microsoft.com/en-us/sql/relational-databases/databases/database-identifiers?view=sql-server-2017#rules-for-regular-identifiers
// We don't support non-latin "letters" currently.
('a'..='z').contains(&ch)
|| ('A'..='Z').contains(&ch)
|| ch == '_'
|| ch == '#'
|| ch == '@'
ch.is_ascii_lowercase() || ch.is_ascii_uppercase() || ch == '_' || ch == '#' || ch == '@'
}

fn is_identifier_part(&self, ch: char) -> bool {
('a'..='z').contains(&ch)
|| ('A'..='Z').contains(&ch)
|| ('0'..='9').contains(&ch)
ch.is_ascii_lowercase()
|| ch.is_ascii_uppercase()
|| ch.is_ascii_digit()
|| ch == '@'
|| ch == '$'
|| ch == '#'
Expand Down
6 changes: 3 additions & 3 deletions src/dialect/mysql.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,16 +20,16 @@ impl Dialect for MySqlDialect {
// See https://dev.mysql.com/doc/refman/8.0/en/identifiers.html.
// We don't yet support identifiers beginning with numbers, as that
// makes it hard to distinguish numeric literals.
('a'..='z').contains(&ch)
|| ('A'..='Z').contains(&ch)
ch.is_ascii_lowercase()
|| ch.is_ascii_uppercase()
|| ch == '_'
|| ch == '$'
|| ch == '@'
|| ('\u{0080}'..='\u{ffff}').contains(&ch)
}

fn is_identifier_part(&self, ch: char) -> bool {
self.is_identifier_start(ch) || ('0'..='9').contains(&ch)
self.is_identifier_start(ch) || ch.is_ascii_digit()
}

fn is_delimited_identifier_start(&self, ch: char) -> bool {
Expand Down
8 changes: 4 additions & 4 deletions src/dialect/postgresql.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,13 +20,13 @@ impl Dialect for PostgreSqlDialect {
// See https://www.postgresql.org/docs/11/sql-syntax-lexical.html#SQL-SYNTAX-IDENTIFIERS
// We don't yet support identifiers beginning with "letters with
// diacritical marks and non-Latin letters"
('a'..='z').contains(&ch) || ('A'..='Z').contains(&ch) || ch == '_'
ch.is_ascii_lowercase() || ch.is_ascii_uppercase() || ch == '_'
}

fn is_identifier_part(&self, ch: char) -> bool {
('a'..='z').contains(&ch)
|| ('A'..='Z').contains(&ch)
|| ('0'..='9').contains(&ch)
ch.is_ascii_lowercase()
|| ch.is_ascii_uppercase()
|| ch.is_ascii_digit()
|| ch == '$'
|| ch == '_'
}
Expand Down
8 changes: 4 additions & 4 deletions src/dialect/snowflake.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,13 +18,13 @@ pub struct SnowflakeDialect;
impl Dialect for SnowflakeDialect {
// see https://docs.snowflake.com/en/sql-reference/identifiers-syntax.html
fn is_identifier_start(&self, ch: char) -> bool {
('a'..='z').contains(&ch) || ('A'..='Z').contains(&ch) || ch == '_'
ch.is_ascii_lowercase() || ch.is_ascii_uppercase() || ch == '_'
}

fn is_identifier_part(&self, ch: char) -> bool {
('a'..='z').contains(&ch)
|| ('A'..='Z').contains(&ch)
|| ('0'..='9').contains(&ch)
ch.is_ascii_lowercase()
|| ch.is_ascii_uppercase()
|| ch.is_ascii_digit()
|| ch == '$'
|| ch == '_'
}
Expand Down
6 changes: 3 additions & 3 deletions src/dialect/sqlite.rs
Original file line number Diff line number Diff line change
Expand Up @@ -25,14 +25,14 @@ impl Dialect for SQLiteDialect {

fn is_identifier_start(&self, ch: char) -> bool {
// See https://www.sqlite.org/draft/tokenreq.html
('a'..='z').contains(&ch)
|| ('A'..='Z').contains(&ch)
ch.is_ascii_lowercase()
|| ch.is_ascii_uppercase()
|| ch == '_'
|| ch == '$'
|| ('\u{007f}'..='\u{ffff}').contains(&ch)
}

fn is_identifier_part(&self, ch: char) -> bool {
self.is_identifier_start(ch) || ('0'..='9').contains(&ch)
self.is_identifier_start(ch) || ch.is_ascii_digit()
}
}
14 changes: 6 additions & 8 deletions src/parser.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2955,7 +2955,11 @@ impl<'a> Parser<'a> {
/// Parse a literal string
pub fn parse_literal_string(&mut self) -> Result<String, ParserError> {
match self.next_token() {
Token::Word(Word { value, keyword, .. }) if keyword == Keyword::NoKeyword => Ok(value),
Token::Word(Word {
value,
keyword: Keyword::NoKeyword,
..
}) => Ok(value),
Token::SingleQuotedString(s) => Ok(s),
Token::EscapedStringLiteral(s) if dialect_of!(self is PostgreSqlDialect | GenericDialect) => {
Ok(s)
Expand Down Expand Up @@ -3880,13 +3884,7 @@ impl<'a> Parser<'a> {
None
};
let object_name = match db_name {
Some(db_name) => ObjectName(
db_name
.0
.into_iter()
.chain(table_name.0.into_iter())
.collect(),
),
Some(db_name) => ObjectName(db_name.0.into_iter().chain(table_name.0).collect()),
None => table_name,
};
let filter = self.parse_show_statement_filter()?;
Expand Down
17 changes: 7 additions & 10 deletions src/tokenizer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -229,7 +229,7 @@ impl Token {
Token::Word(Word {
value: word.to_string(),
quote_style,
keyword: if quote_style == None {
keyword: if quote_style.is_none() {
let keyword = ALL_KEYWORDS.binary_search(&word_uppercase.as_str());
keyword.map_or(Keyword::NoKeyword, |x| ALL_KEYWORDS_INDEX[x])
} else {
Expand Down Expand Up @@ -354,8 +354,8 @@ impl<'a> Tokenizer<'a> {
}

Token::Whitespace(Whitespace::Tab) => self.col += 4,
Token::Word(w) if w.quote_style == None => self.col += w.value.len() as u64,
Token::Word(w) if w.quote_style != None => self.col += w.value.len() as u64 + 2,
Token::Word(w) if w.quote_style.is_none() => self.col += w.value.len() as u64,
Token::Word(w) if w.quote_style.is_some() => self.col += w.value.len() as u64 + 2,
Token::Number(s, _) => self.col += s.len() as u64,
Token::SingleQuotedString(s) => self.col += s.len() as u64,
Token::Placeholder(s) => self.col += s.len() as u64,
Expand Down Expand Up @@ -457,7 +457,7 @@ impl<'a> Tokenizer<'a> {
chars.next(); // consume the first char
let s = self.tokenize_word(ch, chars);

if s.chars().all(|x| ('0'..='9').contains(&x) || x == '.') {
if s.chars().all(|x| x.is_ascii_digit() || x == '.') {
let mut s = peeking_take_while(&mut s.chars().peekable(), |ch| {
matches!(ch, '0'..='9' | '.')
});
Expand Down Expand Up @@ -495,15 +495,12 @@ impl<'a> Tokenizer<'a> {
}
// numbers and period
'0'..='9' | '.' => {
let mut s = peeking_take_while(chars, |ch| matches!(ch, '0'..='9'));
let mut s = peeking_take_while(chars, |ch| ch.is_ascii_digit());

// match binary literal that starts with 0x
if s == "0" && chars.peek() == Some(&'x') {
chars.next();
let s2 = peeking_take_while(
chars,
|ch| matches!(ch, '0'..='9' | 'A'..='F' | 'a'..='f'),
);
let s2 = peeking_take_while(chars, |ch| ch.is_ascii_hexdigit());
return Ok(Some(Token::HexStringLiteral(s2)));
}

Expand All @@ -512,7 +509,7 @@ impl<'a> Tokenizer<'a> {
s.push('.');
chars.next();
}
s += &peeking_take_while(chars, |ch| matches!(ch, '0'..='9'));
s += &peeking_take_while(chars, |ch| ch.is_ascii_digit());

// No number -> Token::Period
if s == "." {
Expand Down
8 changes: 4 additions & 4 deletions tests/sqlparser_mysql.rs
Original file line number Diff line number Diff line change
Expand Up @@ -198,7 +198,7 @@ fn parse_use() {
fn parse_show_create() {
let obj_name = ObjectName(vec![Ident::new("myident")]);

for obj_type in &vec![
for obj_type in &[
ShowCreateObject::Table,
ShowCreateObject::Trigger,
ShowCreateObject::Event,
Expand Down Expand Up @@ -257,12 +257,12 @@ fn parse_set_variables() {
assert_eq!(
stmt,
Statement::SetVariable {
key_values: vec![SetVariableKeyValue {
key_values: [SetVariableKeyValue {
local: true,
hivevar: false,
key: ObjectName(vec![Ident::new("autocommit")]),
value: vec![value],
},]
}]
.to_vec()
}
);
Expand Down Expand Up @@ -503,7 +503,7 @@ fn parse_escaped_string() {
let sql = r#"SELECT 'I''m fine'"#;

let projection = mysql().verified_only_select(sql).projection;
let item = projection.get(0).unwrap();
let item = projection.first().unwrap();

match &item {
SelectItem::UnnamedExpr(Expr::Value(value)) => {
Expand Down
1 change: 0 additions & 1 deletion tests/sqlparser_postgres.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1224,7 +1224,6 @@ fn parse_pg_regex_match_ops() {
fn parse_array_index_expr() {
#[cfg(feature = "bigdecimal")]
let num: Vec<Expr> = (0..=10)
.into_iter()
.map(|s| Expr::Value(Value::Number(bigdecimal::BigDecimal::from(s), false)))
.collect();
#[cfg(not(feature = "bigdecimal"))]
Expand Down

0 comments on commit a593b14

Please sign in to comment.