Skip to content

Commit

Permalink
Reduced logging by BlockManager in default log4j.properties
Browse files Browse the repository at this point in the history
also fixed a putInto hydra test and update spark module link
  • Loading branch information
sumwale committed Jul 14, 2021
1 parent 324ff3f commit 8cc4798
Show file tree
Hide file tree
Showing 6 changed files with 21 additions and 12 deletions.
8 changes: 8 additions & 0 deletions cluster/conf/log4j.properties.template
Original file line number Diff line number Diff line change
Expand Up @@ -99,6 +99,7 @@ log4j.logger.org.apache.spark.scheduler.DAGScheduler=WARN
log4j.logger.org.apache.spark.scheduler.TaskSetManager=WARN
log4j.logger.org.apache.spark.scheduler.FairSchedulableBuilder=WARN
log4j.logger.org.apache.spark.scheduler.cluster.CoarseGrainedSchedulerBackend$DriverEndpoint=WARN
log4j.logger.org.apache.spark.storage.BlockManager=WARN
log4j.logger.org.apache.spark.storage.BlockManagerInfo=WARN
log4j.logger.org.apache.hadoop.hive=WARN
log4j.logger.org.apache.spark.sql.execution.datasources=WARN
Expand All @@ -116,13 +117,20 @@ log4j.logger.org.datanucleus=ERROR
# Task logger created in SparkEnv
log4j.logger.org.apache.spark.Task=WARN
log4j.logger.org.apache.spark.sql.catalyst.parser.CatalystSqlParser=WARN
# HiveExternalCatalog spits out a warning every time a non-hive table is persisted in meta-store
log4j.logger.org.apache.spark.sql.hive.SnappyHiveExternalCatalog=ERROR

# Keep log-level of some classes as INFO even if root level is higher
log4j.logger.io.snappydata.impl.LeadImpl=INFO
log4j.logger.io.snappydata.impl.ServerImpl=INFO
log4j.logger.io.snappydata.impl.LocatorImpl=INFO
log4j.logger.spray.can.server.HttpListener=INFO

# Note: all code generation classes that dump using "code" logger should
# also be listed in ClientSharedUtils.initLog4j for removal in case top-level
# file has not been set (e.g. common for JDBC clients) else an empty
# generatedcode.log will be created.

# for generated code of plans
log4j.logger.org.apache.spark.sql.execution.WholeStageCodegenExec=INFO, code
log4j.additivity.org.apache.spark.sql.execution.WholeStageCodegenExec=false
Expand Down
10 changes: 4 additions & 6 deletions cluster/src/main/scala/io/snappydata/impl/LeadImpl.scala
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ import scala.collection.JavaConverters._
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration.Duration
import scala.concurrent.{Await, Future}

import akka.actor.ActorSystem
import com.gemstone.gemfire.CancelException
import com.gemstone.gemfire.cache.CacheClosedException
Expand All @@ -50,15 +51,14 @@ import org.apache.thrift.transport.TTransportException
import spark.jobserver.JobServer
import spark.jobserver.auth.{AuthInfo, SnappyAuthenticator, User}
import spray.routing.authentication.UserPass

import org.apache.spark.sql.collection.{ToolsCallbackInit, Utils}
import org.apache.spark.sql.execution.SecurityUtils
import org.apache.spark.sql.hive.thriftserver.SnappyHiveThriftServer2
import org.apache.spark.sql.{SnappyContext, SnappySession}
import org.apache.spark.util.LocalDirectoryCleanupUtil
import org.apache.spark.{Logging, SparkCallbacks, SparkConf, SparkContext, SparkException}

import scala.collection.mutable.ArrayBuffer

class LeadImpl extends ServerImpl with Lead
with ProtocolOverrides with Logging {

Expand Down Expand Up @@ -336,10 +336,8 @@ class LeadImpl extends ServerImpl with Lead

// If recovery mode then initialize the recovery service
if (Misc.getGemFireCache.isSnappyRecoveryMode) {
if (enableTableCountInUI.equalsIgnoreCase("true"))
RecoveryService.collectViewsAndPrepareCatalog(true)
else
RecoveryService.collectViewsAndPrepareCatalog(false)
RecoveryService.collectViewsAndPrepareCatalog(
enableTableCountInUI.equalsIgnoreCase("true"))
}

if (jobServerWait) {
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
-- DROP TABLE IF ALREADY EXISTS --
DROP TABLE IF EXISTS testL;

create table testL (id long, data string, data2 decimal) using column options (partition_by 'id', key_columns 'id');
create table testL (id long, data string, data2 decimal(38,10)) using column options (partition_by 'id', key_columns 'id');
Original file line number Diff line number Diff line change
Expand Up @@ -26,10 +26,12 @@ import scala.concurrent.{Await, Future}

object ConcPutIntoTest {

def concPutInto(primaryLocatorHost: String, primaryLocatorPort: String, numThreads: Integer): Any = {
def concPutInto(primaryLocatorHost: String, primaryLocatorPort: String,
numThreads: Integer): Any = {
val globalId = new AtomicInteger()
val doPut = () => Future {
val conn = DriverManager.getConnection("jdbc:snappydata://" + primaryLocatorHost + ":" + primaryLocatorPort)
val conn = DriverManager.getConnection(
"jdbc:snappydata://" + primaryLocatorHost + ":" + primaryLocatorPort)
val stmt = conn.createStatement()
val myId = globalId.getAndIncrement()
val blockSize = 100000L
Expand Down Expand Up @@ -60,7 +62,8 @@ object ConcPutIntoTest {
queryTasks.foreach(Await.result(_, Duration.Inf))
}

def conSelect(primaryLocatorHost: String, primaryLocatorPort: String, numThreads: Integer): Any = {
def conSelect(primaryLocatorHost: String, primaryLocatorPort: String,
numThreads: Integer): Any = {
val globalId = new AtomicInteger()
val doQuery = () => Future {
val conn = DriverManager.getConnection("jdbc:snappydata://localhost:1527")
Expand Down
2 changes: 1 addition & 1 deletion spark
Submodule spark updated from 3d7318 to 2e3b64

0 comments on commit 8cc4798

Please sign in to comment.