From a6ee18b9937f55e4b8eb555bb306601eb04cbf0a Mon Sep 17 00:00:00 2001 From: jiyong-lee-dev Date: Thu, 18 Apr 2024 15:17:52 +0900 Subject: [PATCH] scala 2.13 dependency set scala dependencies for 2.13 fix delta-core to delta-spark fix for scala 2.13 [SPARK-45629][CORE][SQL][CONNECT][ML][STREAMING][BUILD][EXAMPLES] Fix `Implicit definition should have explicit type` ### What changes were proposed in this pull request? This PR aims to fix `Implicit definition should have explicit type` in Scala 2.13. This pr includes: 1. Declaration types for global variables of implicit 2. Add scala.annotation.warn ### Why are the changes needed? - For implicit global variables without explicit type declaration, will get warnning : warning: Implicit definition should have explicit type (inferred String) [quickfixable] - No modifications are required for local variables. Additionally, to handle cases involving reflection-related types like ClassTag in implicit variables, the [scala.annotation.warn](https://github.com/scala.annotation.warn) annotation is used to suppress the warning. Furthermore, warnings generated in Spark will be treated as errors: [error] ... Implicit definition should have explicit type (inferred org.json4s.DefaultFormats.type) [quickfixable] ... [error] implicit val formats = org.json4s.DefaultFormats Jira link: SPARK-45314: https://issues.apache.org/jira/browse/SPARK-45629 Related issue link about `implicit` : https://github.com/scala/bug/issues/5265 ### Does this PR introduce _any_ user-facing change? no ### How was this patch tested? Most of the testing is completed through CI, and the example module is locally compiled and tested in IDEA Additionally, there are some writing changes that are verified through demo code ### Was this patch authored or co-authored using generative AI tooling? no Closes #43526 from laglangyue/SPARK-45629. Lead-authored-by: tangjiafu Co-authored-by: tangjiafu Signed-off-by: yangjie01 # Conflicts: # connector/connect/common/src/main/scala/org/apache/spark/sql/connect/client/GrpcExceptionConverter.scala # core/src/main/scala/org/apache/spark/storage/BlockManagerMasterEndpoint.scala # pom.xml # project/SparkBuild.scala # sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala # sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/state/OperatorStateMetadata.scala Revert "[SPARK-45629][CORE][SQL][CONNECT][ML][STREAMING][BUILD][EXAMPLES] Fix `Implicit definition should have explicit type`" This reverts commit bc67612a3ea0c26e1140eadcc5cb1d7a03a7c895. Fix Implicit definition should have explicit type fix install error add args for scala 2.13 fix args update pom java version update pom jvm target version fix implicit types rollback scala 2.13.8 rollback fmt blank space fix fix import fmt fmt not fail on violation --- R/pkg/tests/fulltests/test_client.R | 4 +-- assembly/pom.xml | 4 +-- common/kvstore/pom.xml | 4 +-- common/network-common/pom.xml | 4 +-- common/network-shuffle/pom.xml | 4 +-- common/network-yarn/pom.xml | 4 +-- common/sketch/pom.xml | 4 +-- common/tags/pom.xml | 4 +-- common/unsafe/pom.xml | 4 +-- common/utils/pom.xml | 4 +-- connector/avro/pom.xml | 4 +-- connector/connect/client/jvm/pom.xml | 4 +-- connector/connect/common/pom.xml | 4 +-- connector/connect/server/pom.xml | 4 +-- connector/docker-integration-tests/pom.xml | 4 +-- connector/kafka-0-10-assembly/pom.xml | 4 +-- connector/kafka-0-10-sql/pom.xml | 4 +-- .../apache/spark/sql/kafka010/JsonUtils.scala | 8 ++--- connector/kafka-0-10-token-provider/pom.xml | 4 +-- connector/kafka-0-10/pom.xml | 4 +-- connector/kinesis-asl-assembly/pom.xml | 4 +-- connector/kinesis-asl/pom.xml | 4 +-- connector/protobuf/pom.xml | 4 +-- connector/spark-ganglia-lgpl/pom.xml | 4 +-- core/pom.xml | 4 +-- .../spark/deploy/FaultToleranceTest.scala | 5 +-- .../deploy/StandaloneResourceUtils.scala | 4 +-- .../CoarseGrainedExecutorBackend.scala | 3 -- .../spark/resource/ResourceInformation.scala | 6 ++-- .../apache/spark/resource/ResourceUtils.scala | 4 +-- .../apache/spark/status/AppStatusSource.scala | 2 +- .../storage/BlockManagerMasterEndpoint.scala | 5 +-- .../storage/BlockManagerStorageEndpoint.scala | 5 +-- .../apache/spark/ContextCleanerSuite.scala | 2 +- .../org/apache/spark/SparkContextSuite.scala | 4 +-- .../spark/deploy/SparkSubmitUtilsSuite.scala | 6 ++-- .../deploy/history/HistoryServerSuite.scala | 3 +- .../spark/deploy/master/MasterSuite.scala | 8 ++--- .../spark/deploy/worker/WorkerSuite.scala | 4 +-- .../CoarseGrainedExecutorBackendSuite.scala | 4 +-- .../spark/resource/ResourceUtilsSuite.scala | 6 ++-- .../org/apache/spark/ui/UISeleniumSuite.scala | 2 +- .../org/apache/spark/util/KeyLockSuite.scala | 4 +-- dev/deps/spark-deps-hadoop-3-hive-2.3 | 2 +- examples/pom.xml | 4 +-- .../spark/examples/sql/SparkSQLExample.scala | 4 +-- graphx/pom.xml | 4 +-- hadoop-cloud/pom.xml | 4 +-- launcher/pom.xml | 4 +-- mllib-local/pom.xml | 4 +-- mllib/pom.xml | 4 +-- .../spark/ml/linalg/JsonMatrixConverter.scala | 4 +-- .../spark/ml/linalg/JsonVectorConverter.scala | 4 +-- .../org/apache/spark/ml/param/params.scala | 12 +++---- .../classification/ClassificationModel.scala | 4 +-- .../clustering/BisectingKMeansModel.scala | 8 ++--- .../clustering/GaussianMixtureModel.scala | 4 +-- .../spark/mllib/clustering/KMeansModel.scala | 4 +-- .../spark/mllib/clustering/LDAModel.scala | 6 ++-- .../clustering/PowerIterationClustering.scala | 2 +- .../spark/mllib/feature/ChiSqSelector.scala | 2 +- .../apache/spark/mllib/feature/Word2Vec.scala | 4 +-- .../org/apache/spark/mllib/fpm/FPGrowth.scala | 4 +-- .../apache/spark/mllib/fpm/PrefixSpan.scala | 4 +-- .../apache/spark/mllib/linalg/Vectors.scala | 4 +-- .../MatrixFactorizationModel.scala | 2 +- .../mllib/regression/IsotonicRegression.scala | 2 +- .../mllib/regression/RegressionModel.scala | 4 +-- .../mllib/tree/model/DecisionTreeModel.scala | 2 +- .../mllib/tree/model/treeEnsembleModels.scala | 2 +- .../spark/mllib/util/modelSaveLoad.scala | 2 +- pom.xml | 31 ++++++++++++------- python/docs/source/development/testing.rst | 2 +- repl/pom.xml | 4 +-- resource-managers/kubernetes/core/pom.xml | 4 +-- .../kubernetes/integration-tests/pom.xml | 4 +-- resource-managers/mesos/pom.xml | 4 +-- resource-managers/yarn/pom.xml | 4 +-- .../cluster/YarnSchedulerBackend.scala | 6 ++-- sql/api/pom.xml | 4 +-- sql/catalyst/pom.xml | 4 +-- .../expressions/ObjectExpressionsSuite.scala | 2 +- .../optimizer/ColumnPruningSuite.scala | 2 +- .../optimizer/EliminateMapObjectsSuite.scala | 4 +-- .../EliminateSerializationSuite.scala | 4 +-- .../ObjectSerializerPruningSuite.scala | 2 +- .../TypedFilterOptimizationSuite.scala | 2 +- .../logical/DistinctKeyVisitorSuite.scala | 2 +- sql/core/pom.xml | 4 +-- .../scala/org/apache/spark/sql/Dataset.scala | 7 +++-- .../spark/sql/KeyValueGroupedDataset.scala | 4 +-- .../datasources/DataSourceUtils.scala | 4 +-- .../ApplyInPandasWithStatePythonRunner.scala | 2 +- .../sql/execution/streaming/CommitLog.scala | 4 +-- .../streaming/CompactibleFileStreamLog.scala | 5 +-- .../streaming/FileStreamSinkLog.scala | 4 --- .../streaming/FileStreamSourceLog.scala | 5 --- .../streaming/FileStreamSourceOffset.scala | 4 +-- .../execution/streaming/GroupStateImpl.scala | 2 +- .../execution/streaming/HDFSMetadataLog.scala | 5 +-- .../sql/execution/streaming/OffsetSeq.scala | 4 +-- .../execution/streaming/StreamMetadata.scala | 4 +-- .../ContinuousTextSocketSource.scala | 4 +-- .../sources/ContinuousMemoryStream.scala | 6 ++-- .../sources/RatePerMicroBatchStream.scala | 4 +-- .../streaming/state/RocksDBFileManager.scala | 4 +-- .../org/apache/spark/sql/SQLQuerySuite.scala | 6 ++-- .../StreamingQueryListenerSuite.scala | 4 +-- .../sql/streaming/StreamingQuerySuite.scala | 4 +-- .../SqlResourceWithActualMetricsSuite.scala | 2 +- sql/hive-thriftserver/pom.xml | 4 +-- .../HiveThriftServer2Suites.scala | 4 +-- sql/hive/pom.xml | 4 +-- streaming/pom.xml | 4 +-- .../org/apache/spark/streaming/Time.scala | 2 +- .../receiver/ReceivedBlockHandler.scala | 6 ++-- tools/pom.xml | 4 +-- 117 files changed, 249 insertions(+), 247 deletions(-) diff --git a/R/pkg/tests/fulltests/test_client.R b/R/pkg/tests/fulltests/test_client.R index 9798627ffc551..b38067e534fc0 100644 --- a/R/pkg/tests/fulltests/test_client.R +++ b/R/pkg/tests/fulltests/test_client.R @@ -37,7 +37,7 @@ test_that("multiple packages don't produce a warning", { test_that("sparkJars sparkPackages as character vectors", { args <- generateSparkSubmitArgs("", "", c("one.jar", "two.jar", "three.jar"), "", - c("com.databricks:spark-avro_2.12:2.0.1")) + c("com.databricks:spark-avro_2.13:2.0.1")) expect_match(args, "--jars one.jar,two.jar,three.jar") - expect_match(args, "--packages com.databricks:spark-avro_2.12:2.0.1") + expect_match(args, "--packages com.databricks:spark-avro_2.13:2.0.1") }) diff --git a/assembly/pom.xml b/assembly/pom.xml index 47b54729bbd2d..2804c6383690e 100644 --- a/assembly/pom.xml +++ b/assembly/pom.xml @@ -20,12 +20,12 @@ 4.0.0 org.apache.spark - spark-parent_2.12 + spark-parent_2.13 3.5.1 ../pom.xml - spark-assembly_2.12 + spark-assembly_2.13 Spark Project Assembly https://spark.apache.org/ pom diff --git a/common/kvstore/pom.xml b/common/kvstore/pom.xml index 66e6bb473bf2f..d356958a3f701 100644 --- a/common/kvstore/pom.xml +++ b/common/kvstore/pom.xml @@ -21,12 +21,12 @@ 4.0.0 org.apache.spark - spark-parent_2.12 + spark-parent_2.13 3.5.1 ../../pom.xml - spark-kvstore_2.12 + spark-kvstore_2.13 jar Spark Project Local DB https://spark.apache.org/ diff --git a/common/network-common/pom.xml b/common/network-common/pom.xml index 98897b4424ae0..4fe3fdd186b4c 100644 --- a/common/network-common/pom.xml +++ b/common/network-common/pom.xml @@ -21,12 +21,12 @@ 4.0.0 org.apache.spark - spark-parent_2.12 + spark-parent_2.13 3.5.1 ../../pom.xml - spark-network-common_2.12 + spark-network-common_2.13 jar Spark Project Networking https://spark.apache.org/ diff --git a/common/network-shuffle/pom.xml b/common/network-shuffle/pom.xml index 44531ea54cd58..ce04c865a0ac1 100644 --- a/common/network-shuffle/pom.xml +++ b/common/network-shuffle/pom.xml @@ -21,12 +21,12 @@ 4.0.0 org.apache.spark - spark-parent_2.12 + spark-parent_2.13 3.5.1 ../../pom.xml - spark-network-shuffle_2.12 + spark-network-shuffle_2.13 jar Spark Project Shuffle Streaming Service https://spark.apache.org/ diff --git a/common/network-yarn/pom.xml b/common/network-yarn/pom.xml index 8fcf20328e8e2..67e8206ab8edb 100644 --- a/common/network-yarn/pom.xml +++ b/common/network-yarn/pom.xml @@ -21,12 +21,12 @@ 4.0.0 org.apache.spark - spark-parent_2.12 + spark-parent_2.13 3.5.1 ../../pom.xml - spark-network-yarn_2.12 + spark-network-yarn_2.13 jar Spark Project YARN Shuffle Service https://spark.apache.org/ diff --git a/common/sketch/pom.xml b/common/sketch/pom.xml index 901214de77c9b..7d5cf341ff1ff 100644 --- a/common/sketch/pom.xml +++ b/common/sketch/pom.xml @@ -21,12 +21,12 @@ 4.0.0 org.apache.spark - spark-parent_2.12 + spark-parent_2.13 3.5.1 ../../pom.xml - spark-sketch_2.12 + spark-sketch_2.13 jar Spark Project Sketch https://spark.apache.org/ diff --git a/common/tags/pom.xml b/common/tags/pom.xml index 6395454245ef1..77e9420cbbbb8 100644 --- a/common/tags/pom.xml +++ b/common/tags/pom.xml @@ -21,12 +21,12 @@ 4.0.0 org.apache.spark - spark-parent_2.12 + spark-parent_2.13 3.5.1 ../../pom.xml - spark-tags_2.12 + spark-tags_2.13 jar Spark Project Tags https://spark.apache.org/ diff --git a/common/unsafe/pom.xml b/common/unsafe/pom.xml index bf116a6ff12e7..449b24818bbe0 100644 --- a/common/unsafe/pom.xml +++ b/common/unsafe/pom.xml @@ -21,12 +21,12 @@ 4.0.0 org.apache.spark - spark-parent_2.12 + spark-parent_2.13 3.5.1 ../../pom.xml - spark-unsafe_2.12 + spark-unsafe_2.13 jar Spark Project Unsafe https://spark.apache.org/ diff --git a/common/utils/pom.xml b/common/utils/pom.xml index 1e30bfe67b90f..34ac26a9db1f5 100644 --- a/common/utils/pom.xml +++ b/common/utils/pom.xml @@ -21,12 +21,12 @@ 4.0.0 org.apache.spark - spark-parent_2.12 + spark-parent_2.13 3.5.1 ../../pom.xml - spark-common-utils_2.12 + spark-common-utils_2.13 jar Spark Project Common Utils https://spark.apache.org/ diff --git a/connector/avro/pom.xml b/connector/avro/pom.xml index bb2f0dcb77c18..5e02ae64b0f65 100644 --- a/connector/avro/pom.xml +++ b/connector/avro/pom.xml @@ -20,12 +20,12 @@ 4.0.0 org.apache.spark - spark-parent_2.12 + spark-parent_2.13 3.5.1 ../../pom.xml - spark-avro_2.12 + spark-avro_2.13 avro diff --git a/connector/connect/client/jvm/pom.xml b/connector/connect/client/jvm/pom.xml index 91f041db33f61..c7bf9607e6055 100644 --- a/connector/connect/client/jvm/pom.xml +++ b/connector/connect/client/jvm/pom.xml @@ -21,12 +21,12 @@ 4.0.0 org.apache.spark - spark-parent_2.12 + spark-parent_2.13 3.5.1 ../../../../pom.xml - spark-connect-client-jvm_2.12 + spark-connect-client-jvm_2.13 jar Spark Project Connect Client https://spark.apache.org/ diff --git a/connector/connect/common/pom.xml b/connector/connect/common/pom.xml index 0bbb54e88fc80..8b76e90a583b1 100644 --- a/connector/connect/common/pom.xml +++ b/connector/connect/common/pom.xml @@ -21,12 +21,12 @@ 4.0.0 org.apache.spark - spark-parent_2.12 + spark-parent_2.13 3.5.1 ../../../pom.xml - spark-connect-common_2.12 + spark-connect-common_2.13 jar Spark Project Connect Common https://spark.apache.org/ diff --git a/connector/connect/server/pom.xml b/connector/connect/server/pom.xml index 1cdb52febb19f..d62fcb84c951b 100644 --- a/connector/connect/server/pom.xml +++ b/connector/connect/server/pom.xml @@ -21,12 +21,12 @@ 4.0.0 org.apache.spark - spark-parent_2.12 + spark-parent_2.13 3.5.1 ../../../pom.xml - spark-connect_2.12 + spark-connect_2.13 jar Spark Project Connect Server https://spark.apache.org/ diff --git a/connector/docker-integration-tests/pom.xml b/connector/docker-integration-tests/pom.xml index 898e1f88c7783..b4971790164ff 100644 --- a/connector/docker-integration-tests/pom.xml +++ b/connector/docker-integration-tests/pom.xml @@ -21,12 +21,12 @@ 4.0.0 org.apache.spark - spark-parent_2.12 + spark-parent_2.13 3.5.1 ../../pom.xml - spark-docker-integration-tests_2.12 + spark-docker-integration-tests_2.13 jar Spark Project Docker Integration Tests https://spark.apache.org/ diff --git a/connector/kafka-0-10-assembly/pom.xml b/connector/kafka-0-10-assembly/pom.xml index 1fae402ec8ce1..9b89ae3db36de 100644 --- a/connector/kafka-0-10-assembly/pom.xml +++ b/connector/kafka-0-10-assembly/pom.xml @@ -20,12 +20,12 @@ 4.0.0 org.apache.spark - spark-parent_2.12 + spark-parent_2.13 3.5.1 ../../pom.xml - spark-streaming-kafka-0-10-assembly_2.12 + spark-streaming-kafka-0-10-assembly_2.13 jar Spark Integration for Kafka 0.10 Assembly https://spark.apache.org/ diff --git a/connector/kafka-0-10-sql/pom.xml b/connector/kafka-0-10-sql/pom.xml index e67f01ec746fd..297fba81e0ae2 100644 --- a/connector/kafka-0-10-sql/pom.xml +++ b/connector/kafka-0-10-sql/pom.xml @@ -20,13 +20,13 @@ 4.0.0 org.apache.spark - spark-parent_2.12 + spark-parent_2.13 3.5.1 ../../pom.xml org.apache.spark - spark-sql-kafka-0-10_2.12 + spark-sql-kafka-0-10_2.13 sql-kafka-0-10 diff --git a/connector/kafka-0-10-sql/src/main/scala/org/apache/spark/sql/kafka010/JsonUtils.scala b/connector/kafka-0-10-sql/src/main/scala/org/apache/spark/sql/kafka010/JsonUtils.scala index 6dd5af2389a81..4f1c76c7a7fe3 100644 --- a/connector/kafka-0-10-sql/src/main/scala/org/apache/spark/sql/kafka010/JsonUtils.scala +++ b/connector/kafka-0-10-sql/src/main/scala/org/apache/spark/sql/kafka010/JsonUtils.scala @@ -21,14 +21,14 @@ import scala.collection.mutable.HashMap import scala.util.control.NonFatal import org.apache.kafka.common.TopicPartition -import org.json4s.NoTypeHints +import org.json4s.{Formats, NoTypeHints} import org.json4s.jackson.Serialization /** * Utilities for converting Kafka related objects to and from json. */ private object JsonUtils { - private implicit val formats = Serialization.formats(NoTypeHints) + private implicit val formats: Formats = Serialization.formats(NoTypeHints) /** * Read TopicPartitions from json string @@ -96,10 +96,8 @@ private object JsonUtils { */ def partitionOffsets(partitionOffsets: Map[TopicPartition, Long]): String = { val result = new HashMap[String, HashMap[Int, Long]]() - implicit val order = new Ordering[TopicPartition] { - override def compare(x: TopicPartition, y: TopicPartition): Int = { + implicit val order: Ordering[TopicPartition] = (x: TopicPartition, y: TopicPartition) => { Ordering.Tuple2[String, Int].compare((x.topic, x.partition), (y.topic, y.partition)) - } } val partitions = partitionOffsets.keySet.toSeq.sorted // sort for more determinism partitions.foreach { tp => diff --git a/connector/kafka-0-10-token-provider/pom.xml b/connector/kafka-0-10-token-provider/pom.xml index e713aa6722075..5ecd8e05b44ff 100644 --- a/connector/kafka-0-10-token-provider/pom.xml +++ b/connector/kafka-0-10-token-provider/pom.xml @@ -20,13 +20,13 @@ 4.0.0 org.apache.spark - spark-parent_2.12 + spark-parent_2.13 3.5.1 ../../pom.xml org.apache.spark - spark-token-provider-kafka-0-10_2.12 + spark-token-provider-kafka-0-10_2.13 token-provider-kafka-0-10 diff --git a/connector/kafka-0-10/pom.xml b/connector/kafka-0-10/pom.xml index ed6ec5999c434..875ba7fe85e10 100644 --- a/connector/kafka-0-10/pom.xml +++ b/connector/kafka-0-10/pom.xml @@ -20,12 +20,12 @@ 4.0.0 org.apache.spark - spark-parent_2.12 + spark-parent_2.13 3.5.1 ../../pom.xml - spark-streaming-kafka-0-10_2.12 + spark-streaming-kafka-0-10_2.13 streaming-kafka-0-10 diff --git a/connector/kinesis-asl-assembly/pom.xml b/connector/kinesis-asl-assembly/pom.xml index 9abee84ece576..f2da12def1293 100644 --- a/connector/kinesis-asl-assembly/pom.xml +++ b/connector/kinesis-asl-assembly/pom.xml @@ -20,12 +20,12 @@ 4.0.0 org.apache.spark - spark-parent_2.12 + spark-parent_2.13 3.5.1 ../../pom.xml - spark-streaming-kinesis-asl-assembly_2.12 + spark-streaming-kinesis-asl-assembly_2.13 jar Spark Project Kinesis Assembly https://spark.apache.org/ diff --git a/connector/kinesis-asl/pom.xml b/connector/kinesis-asl/pom.xml index ee16104ab72e9..f5b953f12bdf0 100644 --- a/connector/kinesis-asl/pom.xml +++ b/connector/kinesis-asl/pom.xml @@ -19,13 +19,13 @@ 4.0.0 org.apache.spark - spark-parent_2.12 + spark-parent_2.13 3.5.1 ../../pom.xml - spark-streaming-kinesis-asl_2.12 + spark-streaming-kinesis-asl_2.13 jar Spark Kinesis Integration diff --git a/connector/protobuf/pom.xml b/connector/protobuf/pom.xml index 177815cc60965..d0249211171bc 100644 --- a/connector/protobuf/pom.xml +++ b/connector/protobuf/pom.xml @@ -20,12 +20,12 @@ 4.0.0 org.apache.spark - spark-parent_2.12 + spark-parent_2.13 3.5.1 ../../pom.xml - spark-protobuf_2.12 + spark-protobuf_2.13 protobuf diff --git a/connector/spark-ganglia-lgpl/pom.xml b/connector/spark-ganglia-lgpl/pom.xml index 2946329b983e6..d0b4639590162 100644 --- a/connector/spark-ganglia-lgpl/pom.xml +++ b/connector/spark-ganglia-lgpl/pom.xml @@ -19,13 +19,13 @@ 4.0.0 org.apache.spark - spark-parent_2.12 + spark-parent_2.13 3.5.1 ../../pom.xml - spark-ganglia-lgpl_2.12 + spark-ganglia-lgpl_2.13 jar Spark Ganglia Integration diff --git a/core/pom.xml b/core/pom.xml index 54a7e10040d9d..ee6fe314b163b 100644 --- a/core/pom.xml +++ b/core/pom.xml @@ -20,12 +20,12 @@ 4.0.0 org.apache.spark - spark-parent_2.12 + spark-parent_2.13 3.5.1 ../pom.xml - spark-core_2.12 + spark-core_2.13 jar Spark Project Core https://spark.apache.org/ diff --git a/core/src/main/scala/org/apache/spark/deploy/FaultToleranceTest.scala b/core/src/main/scala/org/apache/spark/deploy/FaultToleranceTest.scala index 7209e2c373ab1..d6a50ff84f562 100644 --- a/core/src/main/scala/org/apache/spark/deploy/FaultToleranceTest.scala +++ b/core/src/main/scala/org/apache/spark/deploy/FaultToleranceTest.scala @@ -30,6 +30,7 @@ import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.duration._ import scala.sys.process._ +import org.json4s.Formats import org.json4s.jackson.JsonMethods import org.apache.spark.{SparkConf, SparkContext} @@ -340,7 +341,7 @@ private object FaultToleranceTest extends App with Logging { private class TestMasterInfo(val ip: String, val dockerId: DockerId, val logFile: File) extends Logging { - implicit val formats = org.json4s.DefaultFormats + implicit val formats: Formats = org.json4s.DefaultFormats var state: RecoveryState.Value = _ var liveWorkerIPs: List[String] = _ var numLiveApps = 0 @@ -383,7 +384,7 @@ private class TestMasterInfo(val ip: String, val dockerId: DockerId, val logFile private class TestWorkerInfo(val ip: String, val dockerId: DockerId, val logFile: File) extends Logging { - implicit val formats = org.json4s.DefaultFormats + implicit val formats: Formats = org.json4s.DefaultFormats logDebug("Created worker: " + this) diff --git a/core/src/main/scala/org/apache/spark/deploy/StandaloneResourceUtils.scala b/core/src/main/scala/org/apache/spark/deploy/StandaloneResourceUtils.scala index 641c5416cbb33..2e4e07b36cb64 100644 --- a/core/src/main/scala/org/apache/spark/deploy/StandaloneResourceUtils.scala +++ b/core/src/main/scala/org/apache/spark/deploy/StandaloneResourceUtils.scala @@ -23,7 +23,7 @@ import java.nio.file.Files import scala.collection.mutable import scala.util.control.NonFatal -import org.json4s.{DefaultFormats, Extraction} +import org.json4s.{DefaultFormats, Extraction, Formats} import org.json4s.jackson.JsonMethods.{compact, render} import org.apache.spark.SparkException @@ -114,7 +114,7 @@ private[spark] object StandaloneResourceUtils extends Logging { private def writeResourceAllocationJson[T]( allocations: Seq[T], jsonFile: File): Unit = { - implicit val formats = DefaultFormats + implicit val formats: Formats = DefaultFormats val allocationJson = Extraction.decompose(allocations) Files.write(jsonFile.toPath, compact(render(allocationJson)).getBytes()) } diff --git a/core/src/main/scala/org/apache/spark/executor/CoarseGrainedExecutorBackend.scala b/core/src/main/scala/org/apache/spark/executor/CoarseGrainedExecutorBackend.scala index 537522326fc78..bd3927385564f 100644 --- a/core/src/main/scala/org/apache/spark/executor/CoarseGrainedExecutorBackend.scala +++ b/core/src/main/scala/org/apache/spark/executor/CoarseGrainedExecutorBackend.scala @@ -27,7 +27,6 @@ import scala.util.{Failure, Success} import scala.util.control.NonFatal import io.netty.util.internal.PlatformDependent -import org.json4s.DefaultFormats import org.apache.spark._ import org.apache.spark.TaskState.TaskState @@ -60,8 +59,6 @@ private[spark] class CoarseGrainedExecutorBackend( import CoarseGrainedExecutorBackend._ - private implicit val formats = DefaultFormats - private[spark] val stopping = new AtomicBoolean(false) var executor: Executor = null @volatile var driver: Option[RpcEndpointRef] = None diff --git a/core/src/main/scala/org/apache/spark/resource/ResourceInformation.scala b/core/src/main/scala/org/apache/spark/resource/ResourceInformation.scala index 7f7bb36512d14..603a89968b26a 100644 --- a/core/src/main/scala/org/apache/spark/resource/ResourceInformation.scala +++ b/core/src/main/scala/org/apache/spark/resource/ResourceInformation.scala @@ -19,7 +19,7 @@ package org.apache.spark.resource import scala.util.control.NonFatal -import org.json4s.{DefaultFormats, Extraction, JValue} +import org.json4s.{DefaultFormats, Extraction, Formats, JValue} import org.json4s.jackson.JsonMethods._ import org.apache.spark.SparkException @@ -69,7 +69,7 @@ private[spark] object ResourceInformation { * Parses a JSON string into a [[ResourceInformation]] instance. */ def parseJson(json: String): ResourceInformation = { - implicit val formats = DefaultFormats + implicit val formats: Formats = DefaultFormats try { parse(json).extract[ResourceInformationJson].toResourceInformation } catch { @@ -80,7 +80,7 @@ private[spark] object ResourceInformation { } def parseJson(json: JValue): ResourceInformation = { - implicit val formats = DefaultFormats + implicit val formats: Formats = DefaultFormats try { json.extract[ResourceInformationJson].toResourceInformation } catch { diff --git a/core/src/main/scala/org/apache/spark/resource/ResourceUtils.scala b/core/src/main/scala/org/apache/spark/resource/ResourceUtils.scala index d19f413598b58..095b015a28632 100644 --- a/core/src/main/scala/org/apache/spark/resource/ResourceUtils.scala +++ b/core/src/main/scala/org/apache/spark/resource/ResourceUtils.scala @@ -22,7 +22,7 @@ import java.util.Optional import scala.util.control.NonFatal -import org.json4s.DefaultFormats +import org.json4s.{DefaultFormats, Formats} import org.json4s.jackson.JsonMethods._ import org.apache.spark.{SparkConf, SparkException} @@ -252,7 +252,7 @@ private[spark] object ResourceUtils extends Logging { def parseAllocatedFromJsonFile(resourcesFile: String): Seq[ResourceAllocation] = { withResourcesJson[ResourceAllocation](resourcesFile) { json => - implicit val formats = DefaultFormats + implicit val formats: Formats = DefaultFormats parse(json).extract[Seq[ResourceAllocation]] } } diff --git a/core/src/main/scala/org/apache/spark/status/AppStatusSource.scala b/core/src/main/scala/org/apache/spark/status/AppStatusSource.scala index d19744db089ba..96dc5ac44b47a 100644 --- a/core/src/main/scala/org/apache/spark/status/AppStatusSource.scala +++ b/core/src/main/scala/org/apache/spark/status/AppStatusSource.scala @@ -31,7 +31,7 @@ private [spark] class JobDuration(val value: AtomicLong) extends Gauge[Long] { private[spark] class AppStatusSource extends Source { - override implicit val metricRegistry = new MetricRegistry() + override implicit val metricRegistry: MetricRegistry = new MetricRegistry() override val sourceName = "appStatus" diff --git a/core/src/main/scala/org/apache/spark/storage/BlockManagerMasterEndpoint.scala b/core/src/main/scala/org/apache/spark/storage/BlockManagerMasterEndpoint.scala index 19de4544bea32..151972ded9574 100644 --- a/core/src/main/scala/org/apache/spark/storage/BlockManagerMasterEndpoint.scala +++ b/core/src/main/scala/org/apache/spark/storage/BlockManagerMasterEndpoint.scala @@ -23,7 +23,7 @@ import java.util.concurrent.TimeUnit import scala.collection.JavaConverters._ import scala.collection.mutable -import scala.concurrent.{ExecutionContext, Future, TimeoutException} +import scala.concurrent.{ExecutionContext, ExecutionContextExecutorService, Future, TimeoutException} import scala.util.Random import scala.util.control.NonFatal @@ -94,7 +94,8 @@ class BlockManagerMasterEndpoint( private val askThreadPool = ThreadUtils.newDaemonCachedThreadPool("block-manager-ask-thread-pool", 100) - private implicit val askExecutionContext = ExecutionContext.fromExecutorService(askThreadPool) + private implicit val askExecutionContext: ExecutionContextExecutorService = + ExecutionContext.fromExecutorService(askThreadPool) private val topologyMapper = { val topologyMapperClassName = conf.get( diff --git a/core/src/main/scala/org/apache/spark/storage/BlockManagerStorageEndpoint.scala b/core/src/main/scala/org/apache/spark/storage/BlockManagerStorageEndpoint.scala index 476be80e67df3..5cc08714d41c1 100644 --- a/core/src/main/scala/org/apache/spark/storage/BlockManagerStorageEndpoint.scala +++ b/core/src/main/scala/org/apache/spark/storage/BlockManagerStorageEndpoint.scala @@ -17,7 +17,7 @@ package org.apache.spark.storage -import scala.concurrent.{ExecutionContext, Future} +import scala.concurrent.{ExecutionContext, ExecutionContextExecutorService, Future} import org.apache.spark.{MapOutputTracker, SparkEnv} import org.apache.spark.internal.Logging @@ -38,7 +38,8 @@ class BlockManagerStorageEndpoint( private val asyncThreadPool = ThreadUtils.newDaemonCachedThreadPool("block-manager-storage-async-thread-pool", 100) - private implicit val asyncExecutionContext = ExecutionContext.fromExecutorService(asyncThreadPool) + private implicit val asyncExecutionContext: ExecutionContextExecutorService = + ExecutionContext.fromExecutorService(asyncThreadPool) // Operations that involve removing blocks may be slow and should be done asynchronously override def receiveAndReply(context: RpcCallContext): PartialFunction[Any, Unit] = { diff --git a/core/src/test/scala/org/apache/spark/ContextCleanerSuite.scala b/core/src/test/scala/org/apache/spark/ContextCleanerSuite.scala index 5434e82c95b1b..ed67906a4f268 100644 --- a/core/src/test/scala/org/apache/spark/ContextCleanerSuite.scala +++ b/core/src/test/scala/org/apache/spark/ContextCleanerSuite.scala @@ -42,7 +42,7 @@ import org.apache.spark.storage._ abstract class ContextCleanerSuiteBase(val shuffleManager: Class[_] = classOf[SortShuffleManager]) extends SparkFunSuite with BeforeAndAfter with LocalSparkContext { - implicit val defaultTimeout = timeout(10.seconds) + implicit val defaultTimeout: PatienceConfiguration.Timeout = timeout(10.seconds) val conf = new SparkConf() .setMaster("local[2]") .setAppName("ContextCleanerSuite") diff --git a/core/src/test/scala/org/apache/spark/SparkContextSuite.scala b/core/src/test/scala/org/apache/spark/SparkContextSuite.scala index 4145975741bc4..f69e316df5fe8 100644 --- a/core/src/test/scala/org/apache/spark/SparkContextSuite.scala +++ b/core/src/test/scala/org/apache/spark/SparkContextSuite.scala @@ -32,7 +32,7 @@ import org.apache.hadoop.io.{BytesWritable, LongWritable, Text} import org.apache.hadoop.mapred.TextInputFormat import org.apache.hadoop.mapreduce.lib.input.{TextInputFormat => NewTextInputFormat} import org.apache.logging.log4j.{Level, LogManager} -import org.json4s.{DefaultFormats, Extraction} +import org.json4s.{DefaultFormats, Extraction, Formats} import org.scalatest.concurrent.Eventually import org.scalatest.matchers.must.Matchers._ @@ -923,7 +923,7 @@ class SparkContextSuite extends SparkFunSuite with LocalSparkContext with Eventu val scriptPath = createTempScriptWithExpectedOutput(dir, "gpuDiscoveryScript", """{"name": "gpu","addresses":["5", "6"]}""") - implicit val formats = DefaultFormats + implicit val formats: Formats = DefaultFormats val gpusAllocated = ResourceAllocation(DRIVER_GPU_ID, Seq("0", "1", "8")) val ja = Extraction.decompose(Seq(gpusAllocated)) diff --git a/core/src/test/scala/org/apache/spark/deploy/SparkSubmitUtilsSuite.scala b/core/src/test/scala/org/apache/spark/deploy/SparkSubmitUtilsSuite.scala index db99a020bc9b2..aa82846fec3d0 100644 --- a/core/src/test/scala/org/apache/spark/deploy/SparkSubmitUtilsSuite.scala +++ b/core/src/test/scala/org/apache/spark/deploy/SparkSubmitUtilsSuite.scala @@ -95,8 +95,8 @@ class SparkSubmitUtilsSuite extends SparkFunSuite { test("add dependencies works correctly") { val md = SparkSubmitUtils.getModuleDescriptor - val artifacts = SparkSubmitUtils.extractMavenCoordinates("com.databricks:spark-csv_2.12:0.1," + - "com.databricks:spark-avro_2.12:0.1") + val artifacts = SparkSubmitUtils.extractMavenCoordinates("com.databricks:spark-csv_2.13:0.1," + + "com.databricks:spark-avro_2.13:0.1") SparkSubmitUtils.addDependenciesToIvy(md, artifacts, "default") assert(md.getDependencies.length === 2) @@ -200,7 +200,7 @@ class SparkSubmitUtilsSuite extends SparkFunSuite { transitive = true, isTest = true) assert(path.isEmpty, "should return empty path") - val main = MavenCoordinate("org.apache.spark", "spark-streaming-kafka-assembly_2.12", "1.2.0") + val main = MavenCoordinate("org.apache.spark", "spark-streaming-kafka-assembly_2.13", "1.2.0") IvyTestUtils.withRepository(main, None, None) { repo => val files = SparkSubmitUtils.resolveMavenCoordinates( coordinates + "," + main.toString, diff --git a/core/src/test/scala/org/apache/spark/deploy/history/HistoryServerSuite.scala b/core/src/test/scala/org/apache/spark/deploy/history/HistoryServerSuite.scala index 6322661f4afd2..a83a0ace5c008 100644 --- a/core/src/test/scala/org/apache/spark/deploy/history/HistoryServerSuite.scala +++ b/core/src/test/scala/org/apache/spark/deploy/history/HistoryServerSuite.scala @@ -29,6 +29,7 @@ import scala.concurrent.duration._ import com.google.common.io.{ByteStreams, Files} import org.apache.commons.io.{FileUtils, IOUtils} import org.apache.hadoop.fs.{FileStatus, FileSystem, Path} +import org.json4s.Formats import org.json4s.JsonAST._ import org.json4s.jackson.JsonMethods import org.json4s.jackson.JsonMethods._ @@ -380,7 +381,7 @@ abstract class HistoryServerSuite extends SparkFunSuite with BeforeAndAfter with test("incomplete apps get refreshed") { implicit val webDriver: WebDriver = new HtmlUnitDriver - implicit val formats = org.json4s.DefaultFormats + implicit val formats: Formats = org.json4s.DefaultFormats // this test dir is explicitly deleted on successful runs; retained for diagnostics when // not diff --git a/core/src/test/scala/org/apache/spark/deploy/master/MasterSuite.scala b/core/src/test/scala/org/apache/spark/deploy/master/MasterSuite.scala index 37874de987662..f3c7138a13119 100644 --- a/core/src/test/scala/org/apache/spark/deploy/master/MasterSuite.scala +++ b/core/src/test/scala/org/apache/spark/deploy/master/MasterSuite.scala @@ -327,7 +327,7 @@ class MasterSuite extends SparkFunSuite } test("SPARK-46888: master should reject worker kill request if decommision is disabled") { - implicit val formats = org.json4s.DefaultFormats + implicit val formats: Formats = org.json4s.DefaultFormats val conf = new SparkConf() .set(DECOMMISSION_ENABLED, false) .set(MASTER_UI_DECOMMISSION_ALLOW_MODE, "ALLOW") @@ -347,7 +347,7 @@ class MasterSuite extends SparkFunSuite } test("master/worker web ui available") { - implicit val formats = org.json4s.DefaultFormats + implicit val formats: Formats = org.json4s.DefaultFormats val conf = new SparkConf() val localCluster = LocalSparkCluster(2, 2, 512, conf) localCluster.start() @@ -383,7 +383,7 @@ class MasterSuite extends SparkFunSuite } test("master/worker web ui available with reverseProxy") { - implicit val formats = org.json4s.DefaultFormats + implicit val formats: Formats = org.json4s.DefaultFormats val conf = new SparkConf() conf.set(UI_REVERSE_PROXY, true) val localCluster = LocalSparkCluster(2, 2, 512, conf) @@ -419,7 +419,7 @@ class MasterSuite extends SparkFunSuite } test("master/worker web ui available behind front-end reverseProxy") { - implicit val formats = org.json4s.DefaultFormats + implicit val formats: Formats = org.json4s.DefaultFormats val reverseProxyUrl = "http://proxyhost:8080/path/to/spark" val conf = new SparkConf() conf.set(UI_REVERSE_PROXY, true) diff --git a/core/src/test/scala/org/apache/spark/deploy/worker/WorkerSuite.scala b/core/src/test/scala/org/apache/spark/deploy/worker/WorkerSuite.scala index a07d4f76905a7..75cebc90acba5 100644 --- a/core/src/test/scala/org/apache/spark/deploy/worker/WorkerSuite.scala +++ b/core/src/test/scala/org/apache/spark/deploy/worker/WorkerSuite.scala @@ -23,7 +23,7 @@ import java.util.function.Supplier import scala.concurrent.duration._ -import org.json4s.{DefaultFormats, Extraction} +import org.json4s.{DefaultFormats, Extraction, Formats} import org.mockito.{Mock, MockitoAnnotations} import org.mockito.Answers.RETURNS_SMART_NULLS import org.mockito.ArgumentMatchers.any @@ -60,7 +60,7 @@ class WorkerSuite extends SparkFunSuite with Matchers with BeforeAndAfter { } def conf(opts: (String, String)*): SparkConf = new SparkConf(loadDefaults = false).setAll(opts) - implicit val formats = DefaultFormats + implicit val formats: Formats = DefaultFormats private var _worker: Worker = _ diff --git a/core/src/test/scala/org/apache/spark/executor/CoarseGrainedExecutorBackendSuite.scala b/core/src/test/scala/org/apache/spark/executor/CoarseGrainedExecutorBackendSuite.scala index 909d605442575..57d391b0cf063 100644 --- a/core/src/test/scala/org/apache/spark/executor/CoarseGrainedExecutorBackendSuite.scala +++ b/core/src/test/scala/org/apache/spark/executor/CoarseGrainedExecutorBackendSuite.scala @@ -26,7 +26,7 @@ import java.util.concurrent.atomic.AtomicInteger import scala.collection.concurrent.TrieMap import scala.concurrent.duration._ -import org.json4s.{DefaultFormats, Extraction} +import org.json4s.{DefaultFormats, Extraction, Formats} import org.json4s.JsonAST.{JArray, JObject} import org.json4s.JsonDSL._ import org.mockito.ArgumentMatchers.any @@ -50,7 +50,7 @@ import org.apache.spark.util.{SerializableBuffer, ThreadUtils, Utils} class CoarseGrainedExecutorBackendSuite extends SparkFunSuite with LocalSparkContext with MockitoSugar { - implicit val formats = DefaultFormats + implicit val formats: Formats = DefaultFormats test("parsing no resources") { val conf = new SparkConf diff --git a/core/src/test/scala/org/apache/spark/resource/ResourceUtilsSuite.scala b/core/src/test/scala/org/apache/spark/resource/ResourceUtilsSuite.scala index ffe5ff5787102..e2daf41a203c2 100644 --- a/core/src/test/scala/org/apache/spark/resource/ResourceUtilsSuite.scala +++ b/core/src/test/scala/org/apache/spark/resource/ResourceUtilsSuite.scala @@ -21,7 +21,7 @@ import java.io.File import java.nio.file.{Files => JavaFiles} import java.util.Optional -import org.json4s.{DefaultFormats, Extraction} +import org.json4s.{DefaultFormats, Extraction, Formats} import org.apache.spark.{LocalSparkContext, SparkConf, SparkException, SparkFunSuite} import org.apache.spark.TestUtils._ @@ -117,7 +117,7 @@ class ResourceUtilsSuite extends SparkFunSuite val conf = new SparkConf assume(!(Utils.isWindows)) withTempDir { dir => - implicit val formats = DefaultFormats + implicit val formats: Formats = DefaultFormats val fpgaAddrs = Seq("f1", "f2", "f3") val fpgaAllocation = ResourceAllocation(EXECUTOR_FPGA_ID, fpgaAddrs) val resourcesFile = createTempJsonFile( @@ -146,7 +146,7 @@ class ResourceUtilsSuite extends SparkFunSuite val rpId = 1 assume(!(Utils.isWindows)) withTempDir { dir => - implicit val formats = DefaultFormats + implicit val formats: Formats = DefaultFormats val fpgaAddrs = Seq("f1", "f2", "f3") val fpgaAllocation = ResourceAllocation(EXECUTOR_FPGA_ID, fpgaAddrs) val resourcesFile = createTempJsonFile( diff --git a/core/src/test/scala/org/apache/spark/ui/UISeleniumSuite.scala b/core/src/test/scala/org/apache/spark/ui/UISeleniumSuite.scala index 79496bba6674b..32972e860275a 100644 --- a/core/src/test/scala/org/apache/spark/ui/UISeleniumSuite.scala +++ b/core/src/test/scala/org/apache/spark/ui/UISeleniumSuite.scala @@ -83,7 +83,7 @@ private[spark] class SparkUICssErrorHandler extends DefaultCssErrorHandler { class UISeleniumSuite extends SparkFunSuite with WebBrowser with Matchers { implicit var webDriver: WebDriver = _ - implicit val formats = DefaultFormats + implicit val formats: Formats = DefaultFormats override def beforeAll(): Unit = { diff --git a/core/src/test/scala/org/apache/spark/util/KeyLockSuite.scala b/core/src/test/scala/org/apache/spark/util/KeyLockSuite.scala index 6888e492a8d33..6902493dc3c5d 100644 --- a/core/src/test/scala/org/apache/spark/util/KeyLockSuite.scala +++ b/core/src/test/scala/org/apache/spark/util/KeyLockSuite.scala @@ -22,14 +22,14 @@ import java.util.concurrent.atomic.AtomicInteger import scala.concurrent.duration._ -import org.scalatest.concurrent.{ThreadSignaler, TimeLimits} +import org.scalatest.concurrent.{Signaler, ThreadSignaler, TimeLimits} import org.apache.spark.SparkFunSuite class KeyLockSuite extends SparkFunSuite with TimeLimits { // Necessary to make ScalaTest 3.x interrupt a thread on the JVM like ScalaTest 2.2.x - private implicit val defaultSignaler = ThreadSignaler + private implicit val defaultSignaler: Signaler = ThreadSignaler private val foreverMs = 60 * 1000L diff --git a/dev/deps/spark-deps-hadoop-3-hive-2.3 b/dev/deps/spark-deps-hadoop-3-hive-2.3 index c76702cd0af01..8a13dc6676b5c 100644 --- a/dev/deps/spark-deps-hadoop-3-hive-2.3 +++ b/dev/deps/spark-deps-hadoop-3-hive-2.3 @@ -233,7 +233,7 @@ scala-compiler/2.12.18//scala-compiler-2.12.18.jar scala-library/2.12.18//scala-library-2.12.18.jar scala-parser-combinators_2.12/2.3.0//scala-parser-combinators_2.12-2.3.0.jar scala-reflect/2.12.18//scala-reflect-2.12.18.jar -scala-xml_2.12/2.1.0//scala-xml_2.12-2.1.0.jar +scala-xml_2.13/2.1.0//scala-xml_2.12-2.1.0.jar shims/0.9.45//shims-0.9.45.jar slf4j-api/2.0.7//slf4j-api-2.0.7.jar snakeyaml-engine/2.6//snakeyaml-engine-2.6.jar diff --git a/examples/pom.xml b/examples/pom.xml index e9b064f0b1133..292ae052036bc 100644 --- a/examples/pom.xml +++ b/examples/pom.xml @@ -20,12 +20,12 @@ 4.0.0 org.apache.spark - spark-parent_2.12 + spark-parent_2.13 3.5.1 ../pom.xml - spark-examples_2.12 + spark-examples_2.13 jar Spark Project Examples https://spark.apache.org/ diff --git a/examples/src/main/scala/org/apache/spark/examples/sql/SparkSQLExample.scala b/examples/src/main/scala/org/apache/spark/examples/sql/SparkSQLExample.scala index b17b86c08314b..669205cb89057 100644 --- a/examples/src/main/scala/org/apache/spark/examples/sql/SparkSQLExample.scala +++ b/examples/src/main/scala/org/apache/spark/examples/sql/SparkSQLExample.scala @@ -17,7 +17,7 @@ package org.apache.spark.examples.sql // $example on:programmatic_schema$ -import org.apache.spark.sql.Row +import org.apache.spark.sql.{Row, Encoder} // $example off:programmatic_schema$ // $example on:init_session$ import org.apache.spark.sql.SparkSession @@ -220,7 +220,7 @@ object SparkSQLExample { // +------------+ // No pre-defined encoders for Dataset[Map[K,V]], define explicitly - implicit val mapEncoder = org.apache.spark.sql.Encoders.kryo[Map[String, Any]] + implicit val mapEncoder: Encoder[Map[String, Any]] = org.apache.spark.sql.Encoders.kryo[Map[String, Any]] // Primitive types and case classes can be also defined as // implicit val stringIntMapEncoder: Encoder[Map[String, Any]] = ExpressionEncoder() diff --git a/graphx/pom.xml b/graphx/pom.xml index b315bea1d199e..e96fe2be0b227 100644 --- a/graphx/pom.xml +++ b/graphx/pom.xml @@ -20,12 +20,12 @@ 4.0.0 org.apache.spark - spark-parent_2.12 + spark-parent_2.13 3.5.1 ../pom.xml - spark-graphx_2.12 + spark-graphx_2.13 graphx diff --git a/hadoop-cloud/pom.xml b/hadoop-cloud/pom.xml index 47e45c94a69a7..fc7e83acd3fe9 100644 --- a/hadoop-cloud/pom.xml +++ b/hadoop-cloud/pom.xml @@ -21,12 +21,12 @@ 4.0.0 org.apache.spark - spark-parent_2.12 + spark-parent_2.13 3.5.1 ../pom.xml - spark-hadoop-cloud_2.12 + spark-hadoop-cloud_2.13 jar Spark Project Hadoop Cloud Integration diff --git a/launcher/pom.xml b/launcher/pom.xml index 8994319be472f..8d2cbc7836800 100644 --- a/launcher/pom.xml +++ b/launcher/pom.xml @@ -21,12 +21,12 @@ 4.0.0 org.apache.spark - spark-parent_2.12 + spark-parent_2.13 3.5.1 ../pom.xml - spark-launcher_2.12 + spark-launcher_2.13 jar Spark Project Launcher https://spark.apache.org/ diff --git a/mllib-local/pom.xml b/mllib-local/pom.xml index 3b99cbb4c55bd..ca6aca1c58526 100644 --- a/mllib-local/pom.xml +++ b/mllib-local/pom.xml @@ -20,12 +20,12 @@ 4.0.0 org.apache.spark - spark-parent_2.12 + spark-parent_2.13 3.5.1 ../pom.xml - spark-mllib-local_2.12 + spark-mllib-local_2.13 mllib-local diff --git a/mllib/pom.xml b/mllib/pom.xml index 6dbc98c504985..28f727c95b2c8 100644 --- a/mllib/pom.xml +++ b/mllib/pom.xml @@ -20,12 +20,12 @@ 4.0.0 org.apache.spark - spark-parent_2.12 + spark-parent_2.13 3.5.1 ../pom.xml - spark-mllib_2.12 + spark-mllib_2.13 mllib diff --git a/mllib/src/main/scala/org/apache/spark/ml/linalg/JsonMatrixConverter.scala b/mllib/src/main/scala/org/apache/spark/ml/linalg/JsonMatrixConverter.scala index 8f03a29eb991a..a8844358ead2d 100644 --- a/mllib/src/main/scala/org/apache/spark/ml/linalg/JsonMatrixConverter.scala +++ b/mllib/src/main/scala/org/apache/spark/ml/linalg/JsonMatrixConverter.scala @@ -16,7 +16,7 @@ */ package org.apache.spark.ml.linalg -import org.json4s.DefaultFormats +import org.json4s.{DefaultFormats, Formats} import org.json4s.JsonDSL._ import org.json4s.jackson.JsonMethods.{compact, parse => parseJson, render} @@ -29,7 +29,7 @@ private[ml] object JsonMatrixConverter { * Parses the JSON representation of a Matrix into a [[Matrix]]. */ def fromJson(json: String): Matrix = { - implicit val formats = DefaultFormats + implicit val formats: Formats = DefaultFormats val jValue = parseJson(json) (jValue \ "type").extract[Int] match { case 0 => // sparse diff --git a/mllib/src/main/scala/org/apache/spark/ml/linalg/JsonVectorConverter.scala b/mllib/src/main/scala/org/apache/spark/ml/linalg/JsonVectorConverter.scala index 1b949d75eeaa0..12387233879ad 100644 --- a/mllib/src/main/scala/org/apache/spark/ml/linalg/JsonVectorConverter.scala +++ b/mllib/src/main/scala/org/apache/spark/ml/linalg/JsonVectorConverter.scala @@ -17,7 +17,7 @@ package org.apache.spark.ml.linalg -import org.json4s.DefaultFormats +import org.json4s.{DefaultFormats, Formats} import org.json4s.JsonDSL._ import org.json4s.jackson.JsonMethods.{compact, parse => parseJson, render} @@ -27,7 +27,7 @@ private[ml] object JsonVectorConverter { * Parses the JSON representation of a vector into a [[Vector]]. */ def fromJson(json: String): Vector = { - implicit val formats = DefaultFormats + implicit val formats: Formats = DefaultFormats val jValue = parseJson(json) (jValue \ "type").extract[Int] match { case 0 => // sparse diff --git a/mllib/src/main/scala/org/apache/spark/ml/param/params.scala b/mllib/src/main/scala/org/apache/spark/ml/param/params.scala index b818be30583c0..1b5845f14f1b0 100644 --- a/mllib/src/main/scala/org/apache/spark/ml/param/params.scala +++ b/mllib/src/main/scala/org/apache/spark/ml/param/params.scala @@ -129,7 +129,7 @@ private[ml] object Param { case JObject(v) => val keys = v.map(_._1) if (keys.contains("class")) { - implicit val formats = DefaultFormats + implicit val formats: Formats = DefaultFormats val className = (jValue \ "class").extract[String] className match { case JsonMatrixConverter.className => @@ -398,7 +398,7 @@ class IntParam(parent: String, name: String, doc: String, isValid: Int => Boolea } override def jsonDecode(json: String): Int = { - implicit val formats = DefaultFormats + implicit val formats: Formats = DefaultFormats parse(json).extract[Int] } } @@ -484,7 +484,7 @@ class LongParam(parent: String, name: String, doc: String, isValid: Long => Bool } override def jsonDecode(json: String): Long = { - implicit val formats = DefaultFormats + implicit val formats: Formats = DefaultFormats parse(json).extract[Long] } } @@ -505,7 +505,7 @@ class BooleanParam(parent: String, name: String, doc: String) // No need for isV } override def jsonDecode(json: String): Boolean = { - implicit val formats = DefaultFormats + implicit val formats: Formats = DefaultFormats parse(json).extract[Boolean] } } @@ -528,7 +528,7 @@ class StringArrayParam(parent: Params, name: String, doc: String, isValid: Array } override def jsonDecode(json: String): Array[String] = { - implicit val formats = DefaultFormats + implicit val formats: Formats = DefaultFormats parse(json).extract[Seq[String]].toArray } } @@ -617,7 +617,7 @@ class IntArrayParam(parent: Params, name: String, doc: String, isValid: Array[In } override def jsonDecode(json: String): Array[Int] = { - implicit val formats = DefaultFormats + implicit val formats: Formats = DefaultFormats parse(json).extract[Seq[Int]].toArray } } diff --git a/mllib/src/main/scala/org/apache/spark/mllib/classification/ClassificationModel.scala b/mllib/src/main/scala/org/apache/spark/mllib/classification/ClassificationModel.scala index 5161bc72659c6..ad7435ce5be76 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/classification/ClassificationModel.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/classification/ClassificationModel.scala @@ -17,7 +17,7 @@ package org.apache.spark.mllib.classification -import org.json4s.{DefaultFormats, JValue} +import org.json4s.{DefaultFormats, Formats, JValue} import org.apache.spark.annotation.Since import org.apache.spark.api.java.JavaRDD @@ -65,7 +65,7 @@ private[mllib] object ClassificationModel { * @return (numFeatures, numClasses) */ def getNumFeaturesClasses(metadata: JValue): (Int, Int) = { - implicit val formats = DefaultFormats + implicit val formats: Formats = DefaultFormats ((metadata \ "numFeatures").extract[Int], (metadata \ "numClasses").extract[Int]) } } diff --git a/mllib/src/main/scala/org/apache/spark/mllib/clustering/BisectingKMeansModel.scala b/mllib/src/main/scala/org/apache/spark/mllib/clustering/BisectingKMeansModel.scala index c3979118de403..2d1781a25d3f6 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/clustering/BisectingKMeansModel.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/clustering/BisectingKMeansModel.scala @@ -18,7 +18,7 @@ package org.apache.spark.mllib.clustering import org.json4s._ -import org.json4s.DefaultFormats +import org.json4s.{DefaultFormats, Formats} import org.json4s.JsonDSL._ import org.json4s.jackson.JsonMethods._ @@ -187,7 +187,7 @@ object BisectingKMeansModel extends Loader[BisectingKMeansModel] { } def load(sc: SparkContext, path: String): BisectingKMeansModel = { - implicit val formats: DefaultFormats = DefaultFormats + implicit val formats: Formats = DefaultFormats val (className, formatVersion, metadata) = Loader.loadMetadata(sc, path) assert(className == thisClassName) assert(formatVersion == thisFormatVersion) @@ -223,7 +223,7 @@ object BisectingKMeansModel extends Loader[BisectingKMeansModel] { } def load(sc: SparkContext, path: String): BisectingKMeansModel = { - implicit val formats: DefaultFormats = DefaultFormats + implicit val formats: Formats = DefaultFormats val (className, formatVersion, metadata) = Loader.loadMetadata(sc, path) assert(className == thisClassName) assert(formatVersion == thisFormatVersion) @@ -261,7 +261,7 @@ object BisectingKMeansModel extends Loader[BisectingKMeansModel] { } def load(sc: SparkContext, path: String): BisectingKMeansModel = { - implicit val formats: DefaultFormats = DefaultFormats + implicit val formats: Formats = DefaultFormats val (className, formatVersion, metadata) = Loader.loadMetadata(sc, path) assert(className == thisClassName) assert(formatVersion == thisFormatVersion) diff --git a/mllib/src/main/scala/org/apache/spark/mllib/clustering/GaussianMixtureModel.scala b/mllib/src/main/scala/org/apache/spark/mllib/clustering/GaussianMixtureModel.scala index 0c9c6ab826e62..eb5e776799d04 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/clustering/GaussianMixtureModel.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/clustering/GaussianMixtureModel.scala @@ -18,7 +18,7 @@ package org.apache.spark.mllib.clustering import breeze.linalg.{DenseVector => BreezeVector} -import org.json4s.DefaultFormats +import org.json4s.{DefaultFormats, Formats} import org.json4s.JsonDSL._ import org.json4s.jackson.JsonMethods._ @@ -175,7 +175,7 @@ object GaussianMixtureModel extends Loader[GaussianMixtureModel] { @Since("1.4.0") override def load(sc: SparkContext, path: String): GaussianMixtureModel = { val (loadedClassName, version, metadata) = Loader.loadMetadata(sc, path) - implicit val formats = DefaultFormats + implicit val formats: Formats = DefaultFormats val k = (metadata \ "k").extract[Int] val classNameV1_0 = SaveLoadV1_0.classNameV1_0 (loadedClassName, version) match { diff --git a/mllib/src/main/scala/org/apache/spark/mllib/clustering/KMeansModel.scala b/mllib/src/main/scala/org/apache/spark/mllib/clustering/KMeansModel.scala index 64b352157caf7..5eafdc9add58d 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/clustering/KMeansModel.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/clustering/KMeansModel.scala @@ -179,7 +179,7 @@ object KMeansModel extends Loader[KMeansModel] { } def load(sc: SparkContext, path: String): KMeansModel = { - implicit val formats = DefaultFormats + implicit val formats: Formats = DefaultFormats val spark = SparkSession.builder().sparkContext(sc).getOrCreate() val (className, formatVersion, metadata) = Loader.loadMetadata(sc, path) assert(className == thisClassName) @@ -213,7 +213,7 @@ object KMeansModel extends Loader[KMeansModel] { } def load(sc: SparkContext, path: String): KMeansModel = { - implicit val formats = DefaultFormats + implicit val formats: Formats = DefaultFormats val spark = SparkSession.builder().sparkContext(sc).getOrCreate() val (className, formatVersion, metadata) = Loader.loadMetadata(sc, path) assert(className == thisClassName) diff --git a/mllib/src/main/scala/org/apache/spark/mllib/clustering/LDAModel.scala b/mllib/src/main/scala/org/apache/spark/mllib/clustering/LDAModel.scala index aa8b6a00a427f..e318f06900950 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/clustering/LDAModel.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/clustering/LDAModel.scala @@ -20,7 +20,7 @@ package org.apache.spark.mllib.clustering import breeze.linalg.{argmax, argtopk, normalize, sum, DenseMatrix => BDM, DenseVector => BDV} import breeze.numerics.{exp, lgamma} import org.apache.hadoop.fs.Path -import org.json4s.DefaultFormats +import org.json4s.{DefaultFormats, Formats} import org.json4s.JsonDSL._ import org.json4s.jackson.JsonMethods._ @@ -496,7 +496,7 @@ object LocalLDAModel extends Loader[LocalLDAModel] { @Since("1.5.0") override def load(sc: SparkContext, path: String): LocalLDAModel = { val (loadedClassName, loadedVersion, metadata) = Loader.loadMetadata(sc, path) - implicit val formats = DefaultFormats + implicit val formats: Formats = DefaultFormats val expectedK = (metadata \ "k").extract[Int] val expectedVocabSize = (metadata \ "vocabSize").extract[Int] val docConcentration = @@ -923,7 +923,7 @@ object DistributedLDAModel extends Loader[DistributedLDAModel] { @Since("1.5.0") override def load(sc: SparkContext, path: String): DistributedLDAModel = { val (loadedClassName, loadedVersion, metadata) = Loader.loadMetadata(sc, path) - implicit val formats = DefaultFormats + implicit val formats: Formats = DefaultFormats val expectedK = (metadata \ "k").extract[Int] val vocabSize = (metadata \ "vocabSize").extract[Int] val docConcentration = diff --git a/mllib/src/main/scala/org/apache/spark/mllib/clustering/PowerIterationClustering.scala b/mllib/src/main/scala/org/apache/spark/mllib/clustering/PowerIterationClustering.scala index ba541bbcccd29..12c7ae5066c82 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/clustering/PowerIterationClustering.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/clustering/PowerIterationClustering.scala @@ -79,7 +79,7 @@ object PowerIterationClusteringModel extends Loader[PowerIterationClusteringMode @Since("1.4.0") def load(sc: SparkContext, path: String): PowerIterationClusteringModel = { - implicit val formats = DefaultFormats + implicit val formats: Formats = DefaultFormats val spark = SparkSession.builder().sparkContext(sc).getOrCreate() val (className, formatVersion, metadata) = Loader.loadMetadata(sc, path) diff --git a/mllib/src/main/scala/org/apache/spark/mllib/feature/ChiSqSelector.scala b/mllib/src/main/scala/org/apache/spark/mllib/feature/ChiSqSelector.scala index 3202f08e220b0..4aae9d8add43a 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/feature/ChiSqSelector.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/feature/ChiSqSelector.scala @@ -145,7 +145,7 @@ object ChiSqSelectorModel extends Loader[ChiSqSelectorModel] { } def load(sc: SparkContext, path: String): ChiSqSelectorModel = { - implicit val formats = DefaultFormats + implicit val formats: Formats = DefaultFormats val spark = SparkSession.builder().sparkContext(sc).getOrCreate() val (className, formatVersion, metadata) = Loader.loadMetadata(sc, path) assert(className == thisClassName) diff --git a/mllib/src/main/scala/org/apache/spark/mllib/feature/Word2Vec.scala b/mllib/src/main/scala/org/apache/spark/mllib/feature/Word2Vec.scala index 97f277d53ca9d..f286b729c03d0 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/feature/Word2Vec.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/feature/Word2Vec.scala @@ -23,7 +23,7 @@ import scala.collection.JavaConverters._ import scala.collection.mutable import com.google.common.collect.{Ordering => GuavaOrdering} -import org.json4s.DefaultFormats +import org.json4s.{DefaultFormats, Formats} import org.json4s.JsonDSL._ import org.json4s.jackson.JsonMethods._ @@ -704,7 +704,7 @@ object Word2VecModel extends Loader[Word2VecModel] { override def load(sc: SparkContext, path: String): Word2VecModel = { val (loadedClassName, loadedVersion, metadata) = Loader.loadMetadata(sc, path) - implicit val formats = DefaultFormats + implicit val formats: Formats = DefaultFormats val expectedVectorSize = (metadata \ "vectorSize").extract[Int] val expectedNumWords = (metadata \ "numWords").extract[Int] val classNameV1_0 = SaveLoadV1_0.classNameV1_0 diff --git a/mllib/src/main/scala/org/apache/spark/mllib/fpm/FPGrowth.scala b/mllib/src/main/scala/org/apache/spark/mllib/fpm/FPGrowth.scala index ecdc28dea37fd..0938b709226bd 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/fpm/FPGrowth.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/fpm/FPGrowth.scala @@ -25,7 +25,7 @@ import scala.collection.mutable import scala.reflect.ClassTag import scala.reflect.runtime.universe._ -import org.json4s.DefaultFormats +import org.json4s.{DefaultFormats, Formats} import org.json4s.JsonDSL._ import org.json4s.jackson.JsonMethods.{compact, render} @@ -126,7 +126,7 @@ object FPGrowthModel extends Loader[FPGrowthModel[_]] { } def load(sc: SparkContext, path: String): FPGrowthModel[_] = { - implicit val formats = DefaultFormats + implicit val formats: Formats = DefaultFormats val spark = SparkSession.builder().sparkContext(sc).getOrCreate() val (className, formatVersion, metadata) = Loader.loadMetadata(sc, path) diff --git a/mllib/src/main/scala/org/apache/spark/mllib/fpm/PrefixSpan.scala b/mllib/src/main/scala/org/apache/spark/mllib/fpm/PrefixSpan.scala index 7c023bcfa72a4..703dd65bfab78 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/fpm/PrefixSpan.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/fpm/PrefixSpan.scala @@ -25,7 +25,7 @@ import scala.collection.mutable import scala.reflect.ClassTag import scala.reflect.runtime.universe._ -import org.json4s.DefaultFormats +import org.json4s.{DefaultFormats, Formats} import org.json4s.JsonDSL._ import org.json4s.jackson.JsonMethods.{compact, render} @@ -670,7 +670,7 @@ object PrefixSpanModel extends Loader[PrefixSpanModel[_]] { } def load(sc: SparkContext, path: String): PrefixSpanModel[_] = { - implicit val formats = DefaultFormats + implicit val formats: Formats = DefaultFormats val spark = SparkSession.builder().sparkContext(sc).getOrCreate() val (className, formatVersion, metadata) = Loader.loadMetadata(sc, path) diff --git a/mllib/src/main/scala/org/apache/spark/mllib/linalg/Vectors.scala b/mllib/src/main/scala/org/apache/spark/mllib/linalg/Vectors.scala index a93f37799419e..fa58443cca90b 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/linalg/Vectors.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/linalg/Vectors.scala @@ -25,7 +25,7 @@ import scala.collection.JavaConverters._ import scala.language.implicitConversions import breeze.linalg.{DenseVector => BDV, SparseVector => BSV, Vector => BV} -import org.json4s.DefaultFormats +import org.json4s.{DefaultFormats, Formats} import org.json4s.JsonDSL._ import org.json4s.jackson.JsonMethods.{compact, parse => parseJson, render} @@ -430,7 +430,7 @@ object Vectors { */ @Since("1.6.0") def fromJson(json: String): Vector = { - implicit val formats = DefaultFormats + implicit val formats: Formats = DefaultFormats val jValue = parseJson(json) (jValue \ "type").extract[Int] match { case 0 => // sparse diff --git a/mllib/src/main/scala/org/apache/spark/mllib/recommendation/MatrixFactorizationModel.scala b/mllib/src/main/scala/org/apache/spark/mllib/recommendation/MatrixFactorizationModel.scala index 3276513213f5d..581fe1f9eb647 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/recommendation/MatrixFactorizationModel.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/recommendation/MatrixFactorizationModel.scala @@ -392,7 +392,7 @@ object MatrixFactorizationModel extends Loader[MatrixFactorizationModel] { } def load(sc: SparkContext, path: String): MatrixFactorizationModel = { - implicit val formats = DefaultFormats + implicit val formats: Formats = DefaultFormats val spark = SparkSession.builder().sparkContext(sc).getOrCreate() val (className, formatVersion, metadata) = loadMetadata(sc, path) assert(className == thisClassName) diff --git a/mllib/src/main/scala/org/apache/spark/mllib/regression/IsotonicRegression.scala b/mllib/src/main/scala/org/apache/spark/mllib/regression/IsotonicRegression.scala index 12a78ef4ec140..81d1b290404d5 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/regression/IsotonicRegression.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/regression/IsotonicRegression.scala @@ -209,7 +209,7 @@ object IsotonicRegressionModel extends Loader[IsotonicRegressionModel] { @Since("1.4.0") override def load(sc: SparkContext, path: String): IsotonicRegressionModel = { - implicit val formats = DefaultFormats + implicit val formats: Formats = DefaultFormats val (loadedClassName, version, metadata) = loadMetadata(sc, path) val isotonic = (metadata \ "isotonic").extract[Boolean] val classNameV1_0 = SaveLoadV1_0.thisClassName diff --git a/mllib/src/main/scala/org/apache/spark/mllib/regression/RegressionModel.scala b/mllib/src/main/scala/org/apache/spark/mllib/regression/RegressionModel.scala index a95a54225a085..0e2dbe43e45bb 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/regression/RegressionModel.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/regression/RegressionModel.scala @@ -17,7 +17,7 @@ package org.apache.spark.mllib.regression -import org.json4s.{DefaultFormats, JValue} +import org.json4s.{DefaultFormats, Formats, JValue} import org.apache.spark.annotation.Since import org.apache.spark.api.java.JavaRDD @@ -64,7 +64,7 @@ private[mllib] object RegressionModel { * @return numFeatures */ def getNumFeatures(metadata: JValue): Int = { - implicit val formats = DefaultFormats + implicit val formats: Formats = DefaultFormats (metadata \ "numFeatures").extract[Int] } } diff --git a/mllib/src/main/scala/org/apache/spark/mllib/tree/model/DecisionTreeModel.scala b/mllib/src/main/scala/org/apache/spark/mllib/tree/model/DecisionTreeModel.scala index cdc998000c2fc..7a864b9d41efe 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/tree/model/DecisionTreeModel.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/tree/model/DecisionTreeModel.scala @@ -312,7 +312,7 @@ object DecisionTreeModel extends Loader[DecisionTreeModel] with Logging { */ @Since("1.3.0") override def load(sc: SparkContext, path: String): DecisionTreeModel = { - implicit val formats = DefaultFormats + implicit val formats: Formats = DefaultFormats val (loadedClassName, version, metadata) = Loader.loadMetadata(sc, path) val algo = (metadata \ "algo").extract[String] val numNodes = (metadata \ "numNodes").extract[Int] diff --git a/mllib/src/main/scala/org/apache/spark/mllib/tree/model/treeEnsembleModels.scala b/mllib/src/main/scala/org/apache/spark/mllib/tree/model/treeEnsembleModels.scala index 1f879a4d9dfbb..03821dc417750 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/tree/model/treeEnsembleModels.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/tree/model/treeEnsembleModels.scala @@ -438,7 +438,7 @@ private[tree] object TreeEnsembleModel extends Logging { * Read metadata from the loaded JSON metadata. */ def readMetadata(metadata: JValue): Metadata = { - implicit val formats = DefaultFormats + implicit val formats: Formats = DefaultFormats (metadata \ "metadata").extract[Metadata] } diff --git a/mllib/src/main/scala/org/apache/spark/mllib/util/modelSaveLoad.scala b/mllib/src/main/scala/org/apache/spark/mllib/util/modelSaveLoad.scala index c13bc4099ce70..74e8ae75caf3e 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/util/modelSaveLoad.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/util/modelSaveLoad.scala @@ -117,7 +117,7 @@ private[mllib] object Loader { * @return (class name, version, metadata) */ def loadMetadata(sc: SparkContext, path: String): (String, String, JValue) = { - implicit val formats = DefaultFormats + implicit val formats: Formats = DefaultFormats val metadata = parse(sc.textFile(metadataPath(path)).first()) val clazz = (metadata \ "class").extract[String] val version = (metadata \ "version").extract[String] diff --git a/pom.xml b/pom.xml index e6305b3ff74e5..3e40bd3d78f04 100644 --- a/pom.xml +++ b/pom.xml @@ -25,7 +25,7 @@ 18 org.apache.spark - spark-parent_2.12 + spark-parent_2.13 3.5.1 pom Spark Project Parent POM @@ -112,7 +112,7 @@ UTF-8 UTF-8 - 1.8 + 17 ${java.version} ${java.version} 3.8.8 @@ -172,8 +172,8 @@ 3.2.2 4.4 - 2.12.18 - 2.12 + 2.13.8 + 2.13 2.2.0 4.8.0 @@ -220,7 +220,7 @@ 1.5.0 1.70 1.9.0 - 4.1.96.Final + 4.1.101.Final com.twitter chill_${scala.binary.version} @@ -1108,7 +1116,7 @@ org.scala-lang.modules - scala-xml_2.12 + scala-xml_2.13 @@ -2938,7 +2946,7 @@ -deprecation -feature -explaintypes - -target:jvm-1.8 + -target:jvm-17 -Xfatal-warnings -Ywarn-unused:imports -P:silencer:globalFilters=.*deprecated.* @@ -3352,7 +3360,7 @@ 1.0.0 false - true + false false false ${basedir}/src/main/scala @@ -3650,7 +3658,7 @@ -deprecation -feature -explaintypes - -target:jvm-1.8 + -target:jvm-17 -Wconf:cat=deprecation:wv,any:e -Wunused:imports