@@ -24,7 +24,7 @@ import org.apache.spark.sql.types._
24
24
import scala .util .matching .Regex
25
25
26
26
/**
27
- * ClickHouseDialects
27
+ * ClickHouse SQL dialect
28
28
*/
29
29
object ClickHouseDialect extends JdbcDialect with Logging {
30
30
@@ -42,7 +42,7 @@ object ClickHouseDialect extends JdbcDialect with Logging {
42
42
43
43
/**
44
44
* Inferred schema always nullable.
45
- * see [[JDBCRDD.resolveTable(JDBCOptions) ]]
45
+ * see [[JDBCRDD.resolveTable ]]
46
46
*/
47
47
override def getCatalystType (sqlType : Int ,
48
48
typeName : String ,
@@ -61,8 +61,10 @@ object ClickHouseDialect extends JdbcDialect with Logging {
61
61
}
62
62
}
63
63
64
- // Spark use a widening conversion both ways.
65
- // see https://github.com/apache/spark/pull/26301#discussion_r347725332
64
+ /**
65
+ * Spark use a widening conversion both ways, see detail at
66
+ * [[https://github.com/apache/spark/pull/26301#discussion_r347725332 ]]
67
+ */
66
68
private [jdbc] def toCatalystType (typeName : String ,
67
69
precision : Int ,
68
70
scale : Int ): Option [(Boolean , DataType )] = {
@@ -95,8 +97,10 @@ object ClickHouseDialect extends JdbcDialect with Logging {
95
97
case _ => (false , maybeNullableTypeName)
96
98
}
97
99
98
- // NOT recommend auto create ClickHouse table by Spark JDBC, the reason is it's hard to handle nullable because
99
- // ClickHouse use `T` to represent ANSI SQL `T NOT NULL` and `Nullable(T)` to represent ANSI SQL `T NULL`,
100
+ /**
101
+ * NOT recommend auto create ClickHouse table by Spark JDBC, the reason is it's hard to handle nullable because
102
+ * ClickHouse use `T` to represent ANSI SQL `T NOT NULL` and `Nullable(T)` to represent ANSI SQL `T NULL`,
103
+ */
100
104
override def getJDBCType (dt : DataType ): Option [JdbcType ] = dt match {
101
105
case StringType => Some (JdbcType (" String" , Types .VARCHAR ))
102
106
// ClickHouse doesn't have the concept of encodings. Strings can contain an arbitrary set of bytes,
0 commit comments