Skip to content

Commit 85b9010

Browse files
author
Sumedh Wale
committed
Switch JDBC driver to log4j 2.17.1 from log4j 1.x
- removed hard-coded compile time dependency on log4j 1.x from entire SnappyData code-base (the underlying Spark still depends on log4j 1.x) - removed code dependencies on log4j and instead use slf4j - update store link that removes log4j 1.x dependency rather supports both 2.x and 1.x - JDBC driver packaging now includes log4j 2.x using a special "shadowInclude" configuration that includes the dependencies only in the shadow jar - also explicitly exlude log4j 1.x resources and Log4j1Configurator implementation from the JDBC driver's shadow jar to completely eliminate all traces for log4j 1.x - code that required explicit log4j references (e.g. to change log-level temporarily) now uses the methods from ClientSharedUtils that has adapters for both log4j 2.x and 1.x - ensure log4j init from store layer rather than default spark route - updated version to 1.3.0.1 from 1.3.0 - core/cluster and other components still continue to use log4j 1.x due to underlying Spark - increased gradle max-memory to 3G from 2G - updated license header templates to year 2022 from 2021 Fixes for some test failures - allow JAVA_HOME to be inherited across VMs including the independent Spark/SnappyData clusters launched by the tests by writing it to spark-env.sh; this allows one to run the SnappyData test suites by setting JAVA_HOME to Java8 while the system JDK can be different - added explicit "bind-address=localhost" in many tests that were failing due to automatically determined bind address being unusable (e.g. the one used by docker/VM/VPN) - fixed URL for cassandra download in CassandraSnappyDUnitTest
1 parent 05ebab4 commit 85b9010

File tree

41 files changed

+204
-192
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

41 files changed

+204
-192
lines changed

aqp

Submodule aqp updated 1 file

build.gradle

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ allprojects {
7070
apply plugin: "build-time-tracker"
7171

7272
group = 'io.snappydata'
73-
version = '1.3.0'
73+
version = '1.3.0.1'
7474

7575
// apply compiler options
7676
tasks.withType(JavaCompile) {
@@ -109,6 +109,7 @@ allprojects {
109109
sparkJobServerVersion = '0.6.2.12'
110110
snappySparkMetricsLibVersion = '2.0.0.1'
111111
log4jVersion = '1.2.17'
112+
log4j2Version = '2.17.1'
112113
slf4jVersion = '1.7.30'
113114
junitVersion = '4.12'
114115
mockitoVersion = '1.10.19'
@@ -732,6 +733,11 @@ subprojects {
732733
extendsFrom testCompile
733734
description 'a dependency that exposes test artifacts'
734735
}
736+
shadowInclude {
737+
description = 'a dependency that is included only in the shadow jar'
738+
canBeResolved = true
739+
canBeConsumed = false
740+
}
735741
/*
736742
all {
737743
resolutionStrategy {
@@ -782,10 +788,7 @@ subprojects {
782788
}
783789

784790
dependencies {
785-
compile 'log4j:log4j:' + log4jVersion
786791
compile 'org.slf4j:slf4j-api:' + slf4jVersion
787-
compile 'org.slf4j:slf4j-log4j12:' + slf4jVersion
788-
789792
testCompile "junit:junit:${junitVersion}"
790793
}
791794
}

cluster/bin/snappy

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -121,7 +121,7 @@ elif [ -z "$SNAPPY_NO_QUICK_LAUNCH" -a $# -ge 2 \
121121
fi
122122
fi
123123

124-
JARS="`echo "${SPARK_HOME}"/jars/snappydata-launcher* "${SPARK_HOME}"/jars/gemfire-shared* "${SPARK_HOME}"/jars/jna-4.* | sed 's/ /:/g'`"
124+
JARS="`echo "${SPARK_HOME}"/jars/snappydata-launcher* "${SPARK_HOME}"/jars/gemfire-shared* "${SPARK_HOME}"/jars/log4j-* "${SPARK_HOME}"/jars/jna-4.* | sed 's/ /:/g'`"
125125
exec $RUNNER $JAVA_ARGS -Xverify:none -cp "$JARS" io.snappydata.tools.QuickLauncher "$@" $HOSTNAME_FOR_CLIENTS $IMPLICIT_AWS_CLIENT_BIND_ADDRESS
126126
IMPLICIT_CLIENT_BIND_ADDRESS=
127127
EXPLICIT_CLIENT_BIND_ADDRESS=

cluster/conf/log4j.properties.template

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
#
2-
# Copyright (c) 2017-2019 TIBCO Software Inc. All rights reserved.
2+
# Copyright (c) 2017-2022 TIBCO Software Inc. All rights reserved.
33
#
44
# Licensed under the Apache License, Version 2.0 (the "License"); you
55
# may not use this file except in compliance with the License. You

cluster/src/dunit/scala/io/snappydata/cluster/ClusterManagerTestBase.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -133,7 +133,7 @@ abstract class ClusterManagerTestBase(s: String)
133133
val sysProps = this.sysProps
134134
DistributedTestBase.invokeInLocator(new SerializableRunnable() {
135135
override def run(): Unit = {
136-
// force initialize test-log4j.properties to enable override by SnappyData
136+
// force initialize console-log4j(2).properties to enable override by SnappyData
137137
getStaticLogWriter.info("[SnappyData] starting locator...")
138138
ClusterManagerTestBase.setSystemProperties(sysProps)
139139
val loc: Locator = ServiceManager.getLocatorInstance
@@ -153,7 +153,7 @@ abstract class ClusterManagerTestBase(s: String)
153153
val nodeProps = bootProps
154154
val startNode = new SerializableRunnable() {
155155
override def run(): Unit = {
156-
// force initialize test-log4j.properties to enable override by SnappyData
156+
// force initialize console-log4j(2).properties to enable override by SnappyData
157157
getStaticLogWriter.info("[SnappyData] starting server...")
158158
ClusterManagerTestBase.setSystemProperties(sysProps)
159159
val node = ServiceManager.currentFabricServiceInstance

cluster/src/main/scala/io/snappydata/remote/interpreter/SnappyInterpreterExecute.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ import com.pivotal.gemfirexd.internal.snappy.InterpreterExecute
3232
import com.pivotal.gemfirexd.{Attribute, Constants}
3333
import io.snappydata.Constant
3434
import io.snappydata.gemxd.SnappySessionPerConnection
35-
import org.apache.log4j.Logger
35+
import org.slf4j.LoggerFactory
3636

3737
import org.apache.spark.Logging
3838
import org.apache.spark.sql.execution.columnar.ExternalStoreUtils
@@ -279,7 +279,7 @@ object SnappyInterpreterExecute {
279279

280280
object PermissionChecker {
281281

282-
private val logger = Logger.getLogger(
282+
private[this] val logger = LoggerFactory.getLogger(
283283
"io.snappydata.remote.interpreter.SnappyInterpreterExecute")
284284

285285
def isAllowed(key: String, currentUser: String, tableSchema: String): Boolean = {

cluster/src/main/scala/org/apache/spark/sql/hive/thriftserver/SnappyHiveThriftServer2.scala

Lines changed: 8 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -19,9 +19,9 @@ package org.apache.spark.sql.hive.thriftserver
1919

2020
import java.net.InetAddress
2121

22+
import com.gemstone.gemfire.internal.shared.ClientSharedUtils
2223
import org.apache.hadoop.hive.ql.metadata.Hive
2324
import org.apache.hive.service.cli.thrift.ThriftCLIService
24-
import org.apache.log4j.{Level, LogManager}
2525

2626
import org.apache.spark.Logging
2727
import org.apache.spark.sql.hive.client.HiveClientImpl
@@ -55,12 +55,11 @@ object SnappyHiveThriftServer2 extends Logging {
5555
// full conf used is from the internal hive client from SnappySharedState.
5656

5757
// avoid meta-store init warnings
58-
val rootLogger = LogManager.getRootLogger
59-
val metaLogger = LogManager.getLogger("org.apache.hadoop.hive.metastore.MetaStoreDirectSql")
60-
val currentRootLevel = rootLogger.getLevel
61-
val currentMetaLevel = metaLogger.getLevel
62-
rootLogger.setLevel(Level.ERROR)
63-
metaLogger.setLevel(Level.ERROR)
58+
val metaLogger = "org.apache.hadoop.hive.metastore.MetaStoreDirectSql"
59+
val currentRootLevel = ClientSharedUtils.getLog4jLevel(null)
60+
val currentMetaLevel = ClientSharedUtils.getLog4jLevel(metaLogger)
61+
ClientSharedUtils.setLog4jLevel(null, "ERROR")
62+
ClientSharedUtils.setLog4jLevel(metaLogger, "ERROR")
6463
val externalCatalog = SnappyHiveExternalCatalog.getExistingInstance
6564
val hiveConf = try {
6665
val executionHive = HiveUtils.newClientForExecution(conf,
@@ -87,8 +86,8 @@ object SnappyHiveThriftServer2 extends Logging {
8786
conf
8887
}
8988
} finally {
90-
rootLogger.setLevel(currentRootLevel)
91-
metaLogger.setLevel(currentMetaLevel)
89+
ClientSharedUtils.setLog4jLevel(null, currentRootLevel)
90+
ClientSharedUtils.setLog4jLevel(metaLogger, currentMetaLevel)
9291
}
9392

9493
val server = new HiveThriftServer2(SparkSQLEnv.sqlContext)

core/build.gradle

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -30,10 +30,12 @@ dependencies {
3030
compileOnly 'org.scala-lang:scala-library:' + scalaVersion
3131
compileOnly 'org.scala-lang:scala-reflect:' + scalaVersion
3232

33+
compile 'log4j:log4j:' + log4jVersion
3334
compile 'org.slf4j:slf4j-api:' + slf4jVersion
3435
compile 'org.slf4j:slf4j-log4j12:' + slf4jVersion
3536
compile 'org.slf4j:jcl-over-slf4j:' + slf4jVersion
3637
compile 'org.slf4j:jul-to-slf4j:' + slf4jVersion
38+
3739
compile group: 'org.codehaus.janino', name: 'janino', version: janinoVersion
3840
compile("org.apache.thrift:libthrift:${thriftVersion}") {
3941
exclude(group: 'org.slf4j', module: 'slf4j-api')

core/src/dunit/scala/io/snappydata/cluster/CassandraSnappyDUnitTest.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -112,7 +112,7 @@ class CassandraSnappyDUnitTest(val s: String)
112112
}
113113
if (downloadFiles) {
114114
val cassandraTarball = s"apache-cassandra-$cassandraVersion-bin.tar.gz"
115-
s"curl -OL http://www-us.apache.org/dist/cassandra/$cassandraVersion/$cassandraTarball".!!
115+
s"curl -OL http://www.apache.org/dist/cassandra/$cassandraVersion/$cassandraTarball".!!
116116
("curl -OL https://repo1.maven.org/maven2/com/datastax/spark/" +
117117
s"spark-cassandra-connector_2.11/$cassandraConnVersion/$cassandraConnectorJar").!!
118118
("tar xf " + cassandraTarball).!!

core/src/dunit/scala/io/snappydata/cluster/ClusterUtils.scala

Lines changed: 8 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -17,18 +17,15 @@
1717

1818
package io.snappydata.cluster
1919

20-
import java.io.{BufferedWriter, FileWriter, PrintWriter}
2120
import java.nio.file.{Files, Path, Paths, StandardCopyOption}
2221
import java.util.concurrent.atomic.AtomicBoolean
2322

2423
import scala.collection.JavaConverters._
25-
import scala.io.{Codec, Source}
2624
import scala.sys.process._
27-
import scala.util.control.NonFatal
2825

26+
import io.snappydata.SnappyTestRunner
2927
import io.snappydata.test.dunit.VM
3028
import io.snappydata.test.util.TestException
31-
import io.snappydata.util.TestUtils
3229
import org.apache.commons.io.FileUtils
3330

3431
import org.apache.spark.Logging
@@ -135,7 +132,7 @@ object ClusterUtils extends Serializable with Logging {
135132

136133
def getEnvironmentVariable(name: String): String = {
137134
val value = System.getenv(name)
138-
if (name eq null) {
135+
if (value eq null) {
139136
throw new TestException(s"Environment variable $name is not defined")
140137
}
141138
value
@@ -144,39 +141,16 @@ object ClusterUtils extends Serializable with Logging {
144141
def getSparkClusterDirectory(productDir: String): String =
145142
getClusterDirectory(Paths.get(productDir), isSnappy = false)
146143

147-
def writeToFile(str: String, filePath: String, append: Boolean): Unit = {
148-
val fileWriter = new FileWriter(filePath, append)
149-
val bufferedWriter = new BufferedWriter(fileWriter)
150-
val pw = new PrintWriter(bufferedWriter)
151-
try {
152-
pw.write(str)
153-
pw.flush()
154-
} finally {
155-
pw.close()
156-
}
157-
// wait until file becomes available (e.g. running on NFS)
158-
var matched = append
159-
while (!matched) {
160-
Thread.sleep(100)
161-
try {
162-
val source = Source.fromFile(filePath)(Codec.UTF8)
163-
val lines = try {
164-
source.mkString
165-
} finally {
166-
source.close()
167-
}
168-
matched = lines == str
169-
} catch {
170-
case NonFatal(_) =>
171-
}
172-
}
173-
}
144+
def writeToFile(str: String, filePath: String, append: Boolean): Unit =
145+
SnappyTestRunner.writeToFile(str, filePath, append)
174146

175147
def startSparkCluster(clusterDir: String): String = {
176148
logInfo(s"Starting spark cluster in $clusterDir/work")
177149
writeToFile(
178-
s"\nSPARK_WORKER_CORES=${TestUtils.defaultCores * 2}",
179-
s"$clusterDir/conf/spark-env.sh", append = true)
150+
s"""
151+
|JAVA_HOME=${SnappyTestRunner.javaHome}
152+
|SPARK_WORKER_CORES=${SnappyTestRunner.defaultCores * 2}
153+
|""".stripMargin, s"$clusterDir/conf/spark-env.sh", append = true)
180154
val output = s"$clusterDir/sbin/start-all.sh".!!
181155
logInfo(output)
182156
output

core/src/dunit/scala/io/snappydata/cluster/SplitClusterDUnitTestBase.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -372,8 +372,8 @@ trait SplitClusterDUnitTestObject extends Logging {
372372
* Returns the SnappyContext for external(connector) Spark cluster connected to
373373
* SnappyData cluster
374374
*/
375-
def getSnappyContextForConnector(locatorClientPort: Int, props: Properties = null):
376-
SnappyContext = {
375+
def getSnappyContextForConnector(locatorClientPort: Int,
376+
props: Properties = null): SnappyContext = {
377377
val hostName = InetAddress.getLocalHost.getHostName
378378
// val connectionURL = "jdbc:snappydata://localhost:" + locatorClientPort + "/"
379379
val connectionURL = s"localhost:$locatorClientPort"

core/src/main/scala/io/snappydata/sql/catalog/impl/StoreHiveCatalog.scala

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ import scala.util.control.NonFatal
2727
import com.gemstone.gemfire.cache.RegionDestroyedException
2828
import com.gemstone.gemfire.internal.LogWriterImpl
2929
import com.gemstone.gemfire.internal.cache.{ExternalTableMetaData, GemfireCacheHelper, LocalRegion, PolicyTableData}
30-
import com.gemstone.gemfire.internal.shared.SystemProperties
30+
import com.gemstone.gemfire.internal.shared.{ClientSharedUtils, SystemProperties}
3131
import com.pivotal.gemfirexd.Attribute.{PASSWORD_ATTR, USERNAME_ATTR}
3232
import com.pivotal.gemfirexd.internal.catalog.ExternalCatalog
3333
import com.pivotal.gemfirexd.internal.engine.Misc
@@ -40,7 +40,6 @@ import io.snappydata.Constant.{SPARK_STORE_PREFIX, STORE_PROPERTY_PREFIX}
4040
import io.snappydata.sql.catalog.SnappyExternalCatalog.checkSchemaPermission
4141
import io.snappydata.sql.catalog.{CatalogObjectType, ConnectorExternalCatalog, SnappyExternalCatalog}
4242
import io.snappydata.thrift._
43-
import org.apache.log4j.{Level, LogManager}
4443

4544
import org.apache.spark.sql.catalyst.TableIdentifier
4645
import org.apache.spark.sql.catalyst.catalog.CatalogTypes.TablePartitionSpec
@@ -237,9 +236,8 @@ class StoreHiveCatalog extends ExternalCatalog with Logging {
237236
lockTaken = lockService.readLock(hiveClientObject, lockOwner,
238237
GfxdLockSet.MAX_LOCKWAIT_VAL)
239238
// reduce log4j level to avoid "function exists" warnings
240-
val log4jLogger = LogManager.getRootLogger
241-
if (log4jLogger.getEffectiveLevel == Level.WARN) {
242-
log4jLogger.setLevel(Level.ERROR)
239+
if ("WARN".equalsIgnoreCase(ClientSharedUtils.getLog4jLevel(null))) {
240+
ClientSharedUtils.setLog4jLevel(null, "ERROR")
243241
}
244242
} else {
245243
lockTaken = lockService.writeLock(hiveClientObject, lockOwner,

core/src/main/scala/io/snappydata/util/ServiceUtils.scala

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,7 @@ import _root_.com.gemstone.gemfire.internal.shared.ClientSharedUtils
2929
import _root_.com.pivotal.gemfirexd.internal.engine.GfxdConstants
3030
import _root_.com.pivotal.gemfirexd.internal.engine.distributed.utils.GemFireXDUtils
3131
import io.snappydata.{Constant, Property, ServerManager, SnappyTableStatsProviderService}
32+
import org.slf4j.{Logger, LoggerFactory}
3233

3334
import org.apache.spark.memory.MemoryMode
3435
import org.apache.spark.sql.collection.Utils
@@ -118,10 +119,11 @@ object ServiceUtils {
118119
}
119120
// set the log-level from initialized SparkContext's level if set to higher level than default
120121
if (!properties.containsKey("log-level")) {
121-
val level = org.apache.log4j.Logger.getRootLogger.getLevel
122-
if ((level ne null) && level.isGreaterOrEqual(org.apache.log4j.Level.WARN)) {
122+
// preserve existing sl4j level if it is higher
123+
val rootLogger = LoggerFactory.getLogger(Logger.ROOT_LOGGER_NAME)
124+
if (rootLogger.isWarnEnabled()) {
123125
properties.setProperty("log-level",
124-
ClientSharedUtils.convertToJavaLogLevel(level).getName.toLowerCase)
126+
ClientSharedUtils.getJavaLogLevel(rootLogger).getName.toLowerCase)
125127
}
126128
}
127129
ServerManager.getServerInstance.start(properties)

core/src/main/scala/org/apache/spark/sql/execution/columnar/ExternalStoreUtils.scala

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -106,6 +106,10 @@ object ExternalStoreUtils extends Logging {
106106

107107
registerBuiltinDrivers()
108108

109+
def init(): Unit = {
110+
// empty method just to ease initialization by registerBuiltinDrivers()
111+
}
112+
109113
def registerBuiltinDrivers(): Unit = {
110114
DriverRegistry.register(Constant.JDBC_EMBEDDED_DRIVER)
111115
DriverRegistry.register(Constant.JDBC_CLIENT_DRIVER)

core/src/main/scala/org/apache/spark/sql/hive/HiveClientUtil.scala

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -51,8 +51,6 @@ object HiveClientUtil extends Logging {
5151
ConfVars.HIVEHISTORYFILELOC -> hivePath("query_logs"),
5252
ConfVars.HIVE_SERVER2_LOGGING_OPERATION_LOG_LOCATION -> hivePath("operation_logs"))
5353

54-
ExternalStoreUtils.registerBuiltinDrivers()
55-
5654
private def hivePath(name: String): String =
5755
Paths.get(s"$HIVE_TMPDIR/$name").toAbsolutePath.toString
5856

@@ -63,6 +61,7 @@ object HiveClientUtil extends Logging {
6361
*/
6462
def getOrCreateExternalCatalog(sparkContext: SparkContext,
6563
conf: SparkConf): SnappyHiveExternalCatalog = synchronized {
64+
ExternalStoreUtils.init()
6665
val (dbURL, dbDriver) = resolveMetaStoreDBProps(SnappyContext.getClusterMode(sparkContext))
6766
val metadataConf = new SnappyHiveConf
6867
// make a copy of SparkConf since it is to be updated later

core/src/main/scala/org/apache/spark/sql/hive/SnappyHiveExternalCatalog.scala

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@ import scala.concurrent.ExecutionException
2828
import com.gemstone.gemfire.cache.CacheClosedException
2929
import com.gemstone.gemfire.internal.LogWriterImpl
3030
import com.gemstone.gemfire.internal.cache.{LocalRegion, PartitionedRegion}
31+
import com.gemstone.gemfire.internal.shared.ClientSharedUtils
3132
import com.google.common.cache.{Cache, CacheBuilder, CacheLoader, LoadingCache}
3233
import com.pivotal.gemfirexd.Constants
3334
import com.pivotal.gemfirexd.internal.engine.Misc
@@ -41,7 +42,6 @@ import io.snappydata.sql.catalog.{CatalogObjectType, ConnectorExternalCatalog, R
4142
import org.apache.hadoop.conf.Configuration
4243
import org.apache.hadoop.hive.metastore.api.NoSuchObjectException
4344
import org.apache.hadoop.hive.ql.metadata.Hive
44-
import org.apache.log4j.{Level, LogManager}
4545

4646
import org.apache.spark.SparkConf
4747
import org.apache.spark.jdbc.{ConnectionConf, ConnectionUtil}
@@ -62,7 +62,7 @@ import org.apache.spark.sql.sources.JdbcExtendedUtils
6262
import org.apache.spark.sql.sources.JdbcExtendedUtils.normalizeSchema
6363
import org.apache.spark.sql.store.CodeGeneration
6464
import org.apache.spark.sql.types.LongType
65-
import org.apache.spark.sql.{AnalysisException, _}
65+
import org.apache.spark.sql._
6666

6767
class SnappyHiveExternalCatalog private[hive](val conf: SparkConf,
6868
val hadoopConf: Configuration, val createTime: Long)
@@ -872,20 +872,19 @@ object SnappyHiveExternalCatalog {
872872
// Once the initialization is done, restore the logging level.
873873
val logger = Misc.getI18NLogWriter.asInstanceOf[LogWriterImpl]
874874
val previousLevel = logger.getLevel
875-
val log4jLogger = LogManager.getRootLogger
876-
val log4jLevel = log4jLogger.getEffectiveLevel
875+
val log4jLevel = ClientSharedUtils.getLog4jLevel(null)
877876
logger.info("Starting hive meta-store initialization")
878877
val reduceLog = previousLevel == LogWriterImpl.CONFIG_LEVEL ||
879878
previousLevel == LogWriterImpl.INFO_LEVEL
880879
if (reduceLog) {
881880
logger.setLevel(LogWriterImpl.ERROR_LEVEL)
882-
log4jLogger.setLevel(Level.ERROR)
881+
ClientSharedUtils.setLog4jLevel(null, "ERROR")
883882
}
884883
try {
885884
instance = new SnappyHiveExternalCatalog(sparkConf, hadoopConf, createTime)
886885
} finally {
887886
logger.setLevel(previousLevel)
888-
log4jLogger.setLevel(log4jLevel)
887+
ClientSharedUtils.setLog4jLevel(null, log4jLevel)
889888
logger.info("Done hive meta-store initialization")
890889
}
891890
instance

core/src/main/scala/org/apache/spark/sql/sources/interfaces.scala

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -397,8 +397,7 @@ trait NativeTableRowLevelSecurityRelation
397397
def isRowLevelSecurityEnabled: Boolean = {
398398
val conn = connFactory()
399399
try {
400-
JdbcExtendedUtils.isRowLevelSecurityEnabled(resolvedName,
401-
conn, dialect, sqlContext)
400+
JdbcExtendedUtils.isRowLevelSecurityEnabled(resolvedName, conn)
402401
} catch {
403402
case se: java.sql.SQLException =>
404403
if (se.getMessage.contains("No suitable driver found")) {

0 commit comments

Comments
 (0)