Skip to content

Commit 5f5c15d

Browse files
author
Sumedh Wale
committed
Reduced logging by BlockManager in default log4j.properties
also fixed a putInto hydra test and update spark module link
1 parent 324ff3f commit 5f5c15d

File tree

6 files changed

+21
-12
lines changed

6 files changed

+21
-12
lines changed

cluster/conf/log4j.properties.template

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -99,6 +99,7 @@ log4j.logger.org.apache.spark.scheduler.DAGScheduler=WARN
9999
log4j.logger.org.apache.spark.scheduler.TaskSetManager=WARN
100100
log4j.logger.org.apache.spark.scheduler.FairSchedulableBuilder=WARN
101101
log4j.logger.org.apache.spark.scheduler.cluster.CoarseGrainedSchedulerBackend$DriverEndpoint=WARN
102+
log4j.logger.org.apache.spark.storage.BlockManager=WARN
102103
log4j.logger.org.apache.spark.storage.BlockManagerInfo=WARN
103104
log4j.logger.org.apache.hadoop.hive=WARN
104105
log4j.logger.org.apache.spark.sql.execution.datasources=WARN
@@ -116,13 +117,20 @@ log4j.logger.org.datanucleus=ERROR
116117
# Task logger created in SparkEnv
117118
log4j.logger.org.apache.spark.Task=WARN
118119
log4j.logger.org.apache.spark.sql.catalyst.parser.CatalystSqlParser=WARN
120+
# HiveExternalCatalog spits out a warning every time a non-hive table is persisted in meta-store
121+
log4j.logger.org.apache.spark.sql.hive.SnappyHiveExternalCatalog=ERROR
119122

120123
# Keep log-level of some classes as INFO even if root level is higher
121124
log4j.logger.io.snappydata.impl.LeadImpl=INFO
122125
log4j.logger.io.snappydata.impl.ServerImpl=INFO
123126
log4j.logger.io.snappydata.impl.LocatorImpl=INFO
124127
log4j.logger.spray.can.server.HttpListener=INFO
125128

129+
# Note: all code generation classes that dump using "code" logger should
130+
# also be listed in ClientSharedUtils.initLog4j for removal in case top-level
131+
# file has not been set (e.g. common for JDBC clients) else an empty
132+
# generatedcode.log will be created.
133+
126134
# for generated code of plans
127135
log4j.logger.org.apache.spark.sql.execution.WholeStageCodegenExec=INFO, code
128136
log4j.additivity.org.apache.spark.sql.execution.WholeStageCodegenExec=false

cluster/src/main/scala/io/snappydata/impl/LeadImpl.scala

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@ import scala.collection.JavaConverters._
2626
import scala.concurrent.ExecutionContext.Implicits.global
2727
import scala.concurrent.duration.Duration
2828
import scala.concurrent.{Await, Future}
29+
2930
import akka.actor.ActorSystem
3031
import com.gemstone.gemfire.CancelException
3132
import com.gemstone.gemfire.cache.CacheClosedException
@@ -50,15 +51,14 @@ import org.apache.thrift.transport.TTransportException
5051
import spark.jobserver.JobServer
5152
import spark.jobserver.auth.{AuthInfo, SnappyAuthenticator, User}
5253
import spray.routing.authentication.UserPass
54+
5355
import org.apache.spark.sql.collection.{ToolsCallbackInit, Utils}
5456
import org.apache.spark.sql.execution.SecurityUtils
5557
import org.apache.spark.sql.hive.thriftserver.SnappyHiveThriftServer2
5658
import org.apache.spark.sql.{SnappyContext, SnappySession}
5759
import org.apache.spark.util.LocalDirectoryCleanupUtil
5860
import org.apache.spark.{Logging, SparkCallbacks, SparkConf, SparkContext, SparkException}
5961

60-
import scala.collection.mutable.ArrayBuffer
61-
6262
class LeadImpl extends ServerImpl with Lead
6363
with ProtocolOverrides with Logging {
6464

@@ -336,10 +336,8 @@ class LeadImpl extends ServerImpl with Lead
336336

337337
// If recovery mode then initialize the recovery service
338338
if (Misc.getGemFireCache.isSnappyRecoveryMode) {
339-
if (enableTableCountInUI.equalsIgnoreCase("true"))
340-
RecoveryService.collectViewsAndPrepareCatalog(true)
341-
else
342-
RecoveryService.collectViewsAndPrepareCatalog(false)
339+
RecoveryService.collectViewsAndPrepareCatalog(
340+
enableTableCountInUI.equalsIgnoreCase("true"))
343341
}
344342

345343
if (jobServerWait) {
Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
-- DROP TABLE IF ALREADY EXISTS --
22
DROP TABLE IF EXISTS testL;
33

4-
create table testL (id long, data string, data2 decimal) using column options (partition_by 'id', key_columns 'id');
4+
create table testL (id long, data string, data2 decimal(38,10)) using column options (partition_by 'id', key_columns 'id');

dtests/src/test/scala/io/snappydata/hydra/putInto/ConcPutIntoTest.scala

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -26,10 +26,12 @@ import scala.concurrent.{Await, Future}
2626

2727
object ConcPutIntoTest {
2828

29-
def concPutInto(primaryLocatorHost: String, primaryLocatorPort: String, numThreads: Integer): Any = {
29+
def concPutInto(primaryLocatorHost: String, primaryLocatorPort: String,
30+
numThreads: Integer): Any = {
3031
val globalId = new AtomicInteger()
3132
val doPut = () => Future {
32-
val conn = DriverManager.getConnection("jdbc:snappydata://" + primaryLocatorHost + ":" + primaryLocatorPort)
33+
val conn = DriverManager.getConnection(
34+
"jdbc:snappydata://" + primaryLocatorHost + ":" + primaryLocatorPort)
3335
val stmt = conn.createStatement()
3436
val myId = globalId.getAndIncrement()
3537
val blockSize = 100000L
@@ -60,7 +62,8 @@ object ConcPutIntoTest {
6062
queryTasks.foreach(Await.result(_, Duration.Inf))
6163
}
6264

63-
def conSelect(primaryLocatorHost: String, primaryLocatorPort: String, numThreads: Integer): Any = {
65+
def conSelect(primaryLocatorHost: String, primaryLocatorPort: String,
66+
numThreads: Integer): Any = {
6467
val globalId = new AtomicInteger()
6568
val doQuery = () => Future {
6669
val conn = DriverManager.getConnection("jdbc:snappydata://localhost:1527")

0 commit comments

Comments
 (0)