Skip to content

Commit ea127bd

Browse files
author
Sumedh Wale
committed
Updates for 1.3.1 release
- fixed UDF names lookups to do exact regex match in the CSV list in the meta-data region i.e. ",<name>," or "<head><name>," instead of just searching for <name>; this fixes failures in UserDefinedFunctionsDUnitTest when run in suite due to previous test having superset name "intudf_embed" which subsumed "intudf" causing the code to think that "intudf" was dropped in meta-data region - updated log4j2 to 2.17.2 - changed jetty version to 9.4.43.v20210629 - updated sub-modules versions - rollover the log-file on startup in the log4j2.properties.template - fixed custom logj42.properties getting overridden by log4j2-defaults.properties at start - fixed log file output shown on startup - changed standard output file "start_" to use suffix as actual configured log-file name - updated cluster-util.sh and other utilities to use log4j2.properties instead of log4j.properties - updated docs and remaining places for log4j2.properties instead of log4j.properties - updated dependency versions in LICENSE and NOTICE - fixed sporadic failures in JDBCPreparedStatementDUnitTest, SnappyStorageEvictorSuite - changed all instances of Process.exitValue() to instead use timed waitFor() because former is broken and may return values like "-1" before process exit
1 parent 3594353 commit ea127bd

File tree

28 files changed

+306
-262
lines changed

28 files changed

+306
-262
lines changed

LICENSE

Lines changed: 14 additions & 14 deletions
Large diffs are not rendered by default.

NOTICE

Lines changed: 91 additions & 87 deletions
Large diffs are not rendered by default.

build.gradle

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -108,9 +108,9 @@ allprojects {
108108
sparkDistName = "spark-${sparkVersion}-bin-hadoop2.7"
109109
sparkCurrentVersion = '2.4.8'
110110
sparkCurrentDistName = "spark-${sparkCurrentVersion}-bin-hadoop2.7"
111-
sparkJobServerVersion = '0.6.2.12'
111+
sparkJobServerVersion = '0.6.2.13'
112112
snappySparkMetricsLibVersion = '2.0.0.1'
113-
log4j2Version = '2.17.1'
113+
log4j2Version = '2.17.2'
114114
slf4jVersion = '1.7.32'
115115
junitVersion = '4.12'
116116
mockitoVersion = '1.10.19'
@@ -121,7 +121,7 @@ allprojects {
121121
sparkXmlVersion = '0.4.1'
122122
scalatestVersion = '2.2.6'
123123
py4jVersion = '0.10.7'
124-
jettyVersion = '9.4.44.v20210927'
124+
jettyVersion = '9.4.43.v20210629'
125125
guavaVersion = '14.0.1'
126126
fastutilVersion = '8.5.6'
127127
kryoVersion = '4.0.1'
@@ -159,7 +159,7 @@ allprojects {
159159
eclipseCollectionsVersion = '10.4.0'
160160

161161
pegdownVersion = '1.6.0'
162-
snappyStoreVersion = '1.6.6'
162+
snappyStoreVersion = '1.6.7'
163163
snappydataVersion = version
164164
zeppelinInterpreterVersion = '0.8.2.1'
165165

cluster/bin/snappy

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -121,7 +121,7 @@ elif [ -z "$SNAPPY_NO_QUICK_LAUNCH" -a $# -ge 2 \
121121
fi
122122
fi
123123

124-
JARS="`echo "${SPARK_HOME}"/jars/snappydata-launcher* "${SPARK_HOME}"/jars/gemfire-shared* "${SPARK_HOME}"/jars/log4j-* "${SPARK_HOME}"/jars/jna-4.* | sed 's/ /:/g'`"
124+
JARS="${SPARK_HOME}/conf:`echo "${SPARK_HOME}"/jars/snappydata-launcher* "${SPARK_HOME}"/jars/gemfire-shared* "${SPARK_HOME}"/jars/log4j-* "${SPARK_HOME}"/jars/jna-4.* | sed 's/ /:/g'`"
125125
exec $RUNNER $JAVA_ARGS -Xverify:none -cp "$JARS" io.snappydata.tools.QuickLauncher "$@" $HOSTNAME_FOR_CLIENTS $IMPLICIT_AWS_CLIENT_BIND_ADDRESS
126126
IMPLICIT_CLIENT_BIND_ADDRESS=
127127
EXPLICIT_CLIENT_BIND_ADDRESS=

cluster/conf/log4j2.properties.template

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,7 @@ appender.rolling.fileName = snappydata.log
4545
appender.rolling.filePattern = snappydata.%d{yy-MM-dd}.%i.log.gz
4646
appender.rolling.append = true
4747
appender.rolling.policies.type = Policies
48+
appender.rolling.policies.startup.type = OnStartupTriggeringPolicy
4849
appender.rolling.policies.size.type = SizeBasedTriggeringPolicy
4950
appender.rolling.policies.size.size = 100MB
5051
appender.rolling.strategy.type = DefaultRolloverStrategy
@@ -59,6 +60,7 @@ appender.code.fileName = generatedcode.log
5960
appender.code.filePattern = generatedcode.%d{yy-MM-dd}.%i.log.gz
6061
appender.code.append = true
6162
appender.code.policies.type = Policies
63+
appender.code.policies.startup.type = OnStartupTriggeringPolicy
6264
appender.code.policies.size.type = SizeBasedTriggeringPolicy
6365
appender.code.policies.size.size = 100MB
6466
appender.code.strategy.type = DefaultRolloverStrategy

cluster/sbin/cluster-util.sh

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
#!/bin/bash
1+
#!/usr/bin/env bash
22
#
33
# Copyright (c) 2017-2019 TIBCO Software Inc. All rights reserved.
44
#
@@ -61,7 +61,7 @@ usage() {
6161
echo -e ' \t ''\t'"If specified, the script doesn't ask for confirmation for execution of the command on each member node."
6262
echo
6363
echo -e ' \t '"--copy-conf"
64-
echo -e ' \t ''\t'"This is a shortcut command which when specified copies log4j.properties, snappy-env.sh and "
64+
echo -e ' \t ''\t'"This is a shortcut command which when specified copies log4j2.properties, snappy-env.sh and "
6565
echo -e ' \t ''\t'"spark-env.sh configuration files from local machine to all the members."
6666
echo -e ' \t ''\t'"These files are copied only if a) these are absent in the destination member or b) their content is different. In "
6767
echo -e ' \t ''\t'"latter case, a backup of the file is taken in conf/backup directory on destination member, before copy."
@@ -165,9 +165,9 @@ START_ALL_TIMESTAMP="$(date +"%Y_%m_%d_%H_%M_%S")"
165165

166166
function copyConf() {
167167
for entry in "${SPARK_CONF_DIR}"/*; do
168-
if [ -f "$entry" ];then
168+
if [ -f "$entry" ]; then
169169
fileName=$(basename $entry)
170-
if [[ $fileName == "log4j.properties" || $fileName == "snappy-env.sh" || $fileName == "spark-env.sh" ]]; then
170+
if [[ $fileName == "log4j2.properties" || $fileName == "snappy-env.sh" || $fileName == "spark-env.sh" ]]; then
171171
if ! ssh $node "test -e $entry"; then #"File does not exist."
172172
scp ${SPARK_CONF_DIR}/$fileName $node:${SPARK_CONF_DIR}
173173
else

cluster/src/dunit/scala/io/snappydata/cluster/SnappyMetricsSystemDUnitTest.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -83,7 +83,7 @@ class SnappyMetricsSystemDUnitTest(s: String)
8383
def collectJsonStats(): mutable.Map[String, AnyRef] = {
8484
val url = "http://localhost:9090/metrics/json/"
8585
// val json = scala.io.Source.fromURL(url).mkString
86-
val json = s"curl $url".!!
86+
val json = s"curl -s $url".!!
8787
val data = jsonStrToMap(json)
8888
val rs = data.-("counters", "meters", "histograms", "timers", "version")
8989
val map = scala.collection.mutable.LinkedHashMap[String, AnyRef]()

cluster/src/dunit/scala/io/snappydata/externalstore/JDBCPreparedStatementDUnitTest.scala

Lines changed: 74 additions & 84 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ package io.snappydata.externalstore
1818

1919
import java.sql.{PreparedStatement, SQLException}
2020
import java.util.concurrent.atomic.AtomicInteger
21-
import java.util.concurrent.{CountDownLatch, CyclicBarrier, Executors, TimeoutException}
21+
import java.util.concurrent.{CyclicBarrier, Executors, TimeoutException}
2222

2323
import scala.concurrent.{Await, ExecutionContext, ExecutionContextExecutor, Future}
2424
import scala.util.Try
@@ -207,11 +207,11 @@ class JDBCPreparedStatementDUnitTest(s: String) extends ClusterManagerTestBase(s
207207
ps.setString(2, "str" + i)
208208
ps.addBatch()
209209
if (i % 10 == 0) {
210-
var records = ps.executeBatch()
210+
val records = ps.executeBatch()
211211
records.foreach(r => numRows += r)
212212
}
213213
}
214-
var records = ps.executeBatch()
214+
val records = ps.executeBatch()
215215
records.foreach(r => numRows += r)
216216
(1, numRows)
217217
}
@@ -222,17 +222,16 @@ class JDBCPreparedStatementDUnitTest(s: String) extends ClusterManagerTestBase(s
222222
val conn = getANetConnection(netPort1)
223223
val query1 = "update t3 set fs = ? where fs = ?"
224224
ps = conn.prepareStatement(query1)
225-
var fs1 = 1
226225
for (i <- val1 to val2) {
227226
ps.setString(1, "temp" + i)
228227
ps.setString(2, "str" + i)
229228
ps.addBatch()
230229
if (i % 10 == 0) {
231-
var records = ps.executeBatch()
230+
val records = ps.executeBatch()
232231
records.foreach(r => numRows += r)
233232
}
234233
}
235-
var records = ps.executeBatch()
234+
val records = ps.executeBatch()
236235
records.foreach(r => numRows += r)
237236
(1, numRows)
238237
}
@@ -247,16 +246,16 @@ class JDBCPreparedStatementDUnitTest(s: String) extends ClusterManagerTestBase(s
247246
ps.setString(1, "temp" + i2)
248247
ps.addBatch()
249248
if (i2 % 10 == 0) {
250-
var records = ps.executeBatch()
249+
val records = ps.executeBatch()
251250
records.foreach(r => numRows += r)
252251
}
253252
}
254-
var records = ps.executeBatch()
253+
val records = ps.executeBatch()
255254
records.foreach(r => numRows += r)
256255
(1, numRows)
257256
}
258257

259-
def testComplexDataTypes() : Unit = {
258+
def testComplexDataTypes(): Unit = {
260259
vm2.invoke(classOf[ClusterManagerTestBase], "startNetServer", netPort1)
261260
val conn = getANetConnection(netPort1)
262261
val stmt = conn.createStatement()
@@ -302,42 +301,40 @@ class JDBCPreparedStatementDUnitTest(s: String) extends ClusterManagerTestBase(s
302301
val stmt = conn.createStatement()
303302
stmt.execute("drop table if exists t3")
304303
stmt.execute("create table t3(id integer, fs string) using column options" +
305-
"(key_columns 'id', COLUMN_MAX_DELTA_ROWS '7', BUCKETS '2')")
306-
307-
var thrCount1: Integer = 0
308-
var insertedRecords = 0
309-
val colThread1 = new Thread(new Runnable {def run() {
310-
(1 to 5) foreach (i => {
311-
var result = insertRecords(1, 10)
312-
thrCount1 += result._1
313-
insertedRecords += result._2
314-
})
315-
}
304+
"(COLUMN_MAX_DELTA_ROWS '7', BUCKETS '2')")
305+
306+
val insertedRecords = new AtomicInteger(0)
307+
val colThread1 = new Thread(new Runnable {
308+
def run(): Unit = {
309+
(1 to 5) foreach { _ =>
310+
val result = insertRecords(1, 10)
311+
insertedRecords.getAndAdd(result._2)
312+
}
313+
}
316314
})
317315
colThread1.start()
318316

319-
var thrCount2: Integer = 0
320-
val colThread2 = new Thread(new Runnable {def run() {
321-
(1 to 5) foreach (i => {
322-
var result = insertRecords(11, 20)
323-
thrCount2 += result._1
324-
insertedRecords += result._2
325-
})
326-
}
317+
val colThread2 = new Thread(new Runnable {
318+
def run(): Unit = {
319+
(1 to 5) foreach { _ =>
320+
val result = insertRecords(11, 20)
321+
insertedRecords.getAndAdd(result._2)
322+
}
323+
}
327324
})
328325
colThread2.start()
329326

330327
colThread1.join()
331328
colThread2.join()
332329

330+
conn.commit()
333331
var rscnt = stmt.executeQuery("select count(*) from t3")
334332
rscnt.next()
335333
assertEquals(100, rscnt.getInt(1))
336-
assertEquals(100, insertedRecords)
334+
assertEquals(100, insertedRecords.get())
337335

338336
val rs = stmt.executeQuery("select * from t3 order by id")
339337

340-
341338
var i = 1
342339
var cnt = 0
343340

@@ -351,51 +348,47 @@ class JDBCPreparedStatementDUnitTest(s: String) extends ClusterManagerTestBase(s
351348
cnt = cnt + 1
352349
}
353350

354-
var thrCount3: Integer = 0
355-
var updatedRecords = 0
356-
val colThread3 = new Thread(new Runnable {def run() {
357-
(1 to 5) foreach (i => {
358-
var result = updateRecords(1, 20)
359-
thrCount3 += result._1
360-
updatedRecords += result._2
361-
})
362-
}
351+
val updatedRecords = new AtomicInteger(0)
352+
val colThread3 = new Thread(new Runnable {
353+
def run(): Unit = {
354+
(1 to 5) foreach { _ =>
355+
val result = updateRecords(1, 20)
356+
updatedRecords.getAndAdd(result._2)
357+
}
358+
}
363359
})
364360
colThread3.start()
365361

366-
var thrCount4: Integer = 0
367-
val colThread4 = new Thread(new Runnable {def run() {
368-
(1 to 5) foreach (i => {
369-
var result = updateRecords(11, 20)
370-
thrCount4 += result._1
371-
updatedRecords += result._2
372-
})
373-
}
362+
val colThread4 = new Thread(new Runnable {
363+
def run(): Unit = {
364+
(1 to 5) foreach { _ =>
365+
val result = updateRecords(11, 20)
366+
updatedRecords.getAndAdd(result._2)
367+
}
368+
}
374369
})
375370
colThread4.start()
376371

377-
var thrCount5: Integer = 0
378-
val colThread5 = new Thread(new Runnable {def run() {
379-
(1 to 5) foreach (i => {
380-
var result = updateRecords(21, 30)
381-
thrCount5 += result._1
382-
updatedRecords += result._2
383-
})
384-
}
372+
val colThread5 = new Thread(new Runnable {
373+
def run(): Unit = {
374+
(1 to 5) foreach { _ =>
375+
val result = updateRecords(21, 30)
376+
updatedRecords.getAndAdd(result._2)
377+
}
378+
}
385379
})
386380
colThread5.start()
387381

388382
colThread3.join()
389383
colThread4.join()
390384
colThread5.join()
391385

392-
393386
rscnt = stmt.executeQuery("select count(*) from t3")
394387
rscnt.next()
395388
assertEquals(100, rscnt.getInt(1))
396-
assertEquals(100, updatedRecords)
389+
assertEquals(100, updatedRecords.get())
397390

398-
var rs1 = stmt.executeQuery("select * from t3 order by id")
391+
val rs1 = stmt.executeQuery("select * from t3 order by id")
399392
var i2 = 1
400393
cnt = 0
401394
while (rs1.next()) {
@@ -408,37 +401,34 @@ class JDBCPreparedStatementDUnitTest(s: String) extends ClusterManagerTestBase(s
408401
cnt = cnt + 1
409402
}
410403

411-
var thrCount6: Integer = 0
412404
val deletedRecords = new AtomicInteger(0)
413-
val colThread6 = new Thread(new Runnable {def run() {
414-
(1 to 5) foreach (i => {
415-
val result = deleteRecords(1, 20)
416-
thrCount6 += result._1
417-
deletedRecords.getAndAdd(result._2)
418-
})
419-
}
405+
val colThread6 = new Thread(new Runnable {
406+
def run(): Unit = {
407+
(1 to 5) foreach { _ =>
408+
val result = deleteRecords(1, 20)
409+
deletedRecords.getAndAdd(result._2)
410+
}
411+
}
420412
})
421413
colThread6.start()
422414

423-
var thrCount7: Integer = 0
424-
val colThread7 = new Thread(new Runnable {def run() {
425-
(1 to 5) foreach (i => {
426-
val result = deleteRecords(11, 20)
427-
thrCount7 += result._1
428-
deletedRecords.getAndAdd(result._2)
429-
})
430-
}
415+
val colThread7 = new Thread(new Runnable {
416+
def run(): Unit = {
417+
(1 to 5) foreach { _ =>
418+
val result = deleteRecords(11, 20)
419+
deletedRecords.getAndAdd(result._2)
420+
}
421+
}
431422
})
432423
colThread7.start()
433424

434-
var thrCount8: Integer = 0
435-
val colThread8 = new Thread(new Runnable {def run() {
436-
(1 to 5) foreach (i => {
437-
val result = deleteRecords(21, 30)
438-
thrCount8 += result._1
439-
deletedRecords.getAndAdd(result._2)
440-
})
441-
}
425+
val colThread8 = new Thread(new Runnable {
426+
def run(): Unit = {
427+
(1 to 5) foreach { _ =>
428+
val result = deleteRecords(21, 30)
429+
deletedRecords.getAndAdd(result._2)
430+
}
431+
}
442432
})
443433
colThread8.start()
444434

@@ -449,7 +439,7 @@ class JDBCPreparedStatementDUnitTest(s: String) extends ClusterManagerTestBase(s
449439
rscnt = stmt.executeQuery("select count(*) from t3")
450440
rscnt.next()
451441
assertEquals(0, rscnt.getInt(1))
452-
assertEquals(100, deletedRecords.get)
442+
assertEquals(100, deletedRecords.get())
453443
}
454444

455445
def testQueryCancellation(): Unit = {
@@ -462,7 +452,7 @@ class JDBCPreparedStatementDUnitTest(s: String) extends ClusterManagerTestBase(s
462452
// significantly long duration.
463453
stmt.execute(
464454
s"""create table $table (col1 int, col2 int) using column as
465-
|select id as col1, id as col2 from range(10000000)""".stripMargin)
455+
|select id as col1, id as col2 from range(10000000)""".stripMargin)
466456
val barrier = new CyclicBarrier(2)
467457
try {
468458
implicit val context: ExecutionContextExecutor =

cluster/src/dunit/scala/org/apache/spark/sql/udf/UserDefinedFunctionsDUnitTest.scala

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@ import scala.util.{Failure, Success, Try}
2525

2626
import io.snappydata.cluster.ClusterManagerTestBase
2727
import io.snappydata.test.dunit.{AvailablePortHelper, DistributedTestBase}
28+
import org.apache.commons.io.FileUtils
2829

2930
import org.apache.spark.{SparkUtilsAccess, TestUtils}
3031
import org.apache.spark.TestUtils.JavaSourceFromString
@@ -36,6 +37,11 @@ case class OrderData(ref: Int, description: String, amount: Long)
3637
class UserDefinedFunctionsDUnitTest(val s: String)
3738
extends ClusterManagerTestBase(s) {
3839

40+
override def afterClass(): Unit = {
41+
super.afterClass()
42+
FileUtils.deleteQuietly(SparkUtilsAccess.destDir)
43+
}
44+
3945
def testDriverHA(): Unit = {
4046
// Stop the lead node
4147
ClusterManagerTestBase.stopAny()

0 commit comments

Comments
 (0)