diff --git a/R/pkg/R/context.R b/R/pkg/R/context.R index 634bdcb52363f..54b54ab52182c 100644 --- a/R/pkg/R/context.R +++ b/R/pkg/R/context.R @@ -303,7 +303,7 @@ setCheckpointDir <- function(sc, dirName) { #' #' A directory can be given if the recursive option is set to true. #' Currently directories are only supported for Hadoop-supported filesystems. -#' Refer Hadoop-supported filesystems at \url{https://wiki.apache.org/hadoop/HCFS}. +#' Refer Hadoop-supported filesystems at \url{https://cwiki.apache.org/confluence/display/HADOOP2/HCFS}. #' #' @rdname spark.addFile #' @param path The path of the file to be added diff --git a/R/pkg/vignettes/sparkr-vignettes.Rmd b/R/pkg/vignettes/sparkr-vignettes.Rmd index 2fc926c274db4..1293114ff3957 100644 --- a/R/pkg/vignettes/sparkr-vignettes.Rmd +++ b/R/pkg/vignettes/sparkr-vignettes.Rmd @@ -317,7 +317,7 @@ A common flow of grouping and aggregation is 2. Feed the `GroupedData` object to `agg` or `summarize` functions, with some provided aggregation functions to compute a number within each group. -A number of widely used functions are supported to aggregate data after grouping, including `avg`, `countDistinct`, `count`, `first`, `kurtosis`, `last`, `max`, `mean`, `min`, `sd`, `skewness`, `stddev_pop`, `stddev_samp`, `sumDistinct`, `sum`, `var_pop`, `var_samp`, `var`. See the [API doc for `mean`](http://spark.apache.org/docs/latest/api/R/mean.html) and other `agg_funcs` linked there. +A number of widely used functions are supported to aggregate data after grouping, including `avg`, `countDistinct`, `count`, `first`, `kurtosis`, `last`, `max`, `mean`, `min`, `sd`, `skewness`, `stddev_pop`, `stddev_samp`, `sumDistinct`, `sum`, `var_pop`, `var_samp`, `var`. See the [API doc for `mean`](https://spark.apache.org/docs/2.1.3/api/R/mean.html) and other `agg_funcs` linked there. For example we can compute a histogram of the number of cylinders in the `mtcars` dataset as shown below. diff --git a/build.gradle b/build.gradle index 9aa2e310b2119..e9f580aedd05a 100644 --- a/build.gradle +++ b/build.gradle @@ -312,7 +312,7 @@ task cleanSparkJUnit { doLast { subprojects { apply plugin: 'scala' - apply plugin: 'maven' + apply plugin: 'maven-publish' apply plugin: 'scalaStyle' int maxWorkers = project.hasProperty('org.gradle.workers.max') ? @@ -352,6 +352,10 @@ subprojects { archiveClassifier.set('sources') from sourceSets.main.allSource } + task packageScalaDocs(type: Jar, dependsOn: scaladoc) { + archiveClassifier.set('javadoc') + from scaladoc + } configurations { testOutput { @@ -369,15 +373,6 @@ subprojects { testOutput packageTests } } - task packageScalaDocs(type: Jar, dependsOn: scaladoc) { - archiveClassifier.set('javadoc') - from scaladoc - } - if (rootProject.hasProperty('enablePublish')) { - artifacts { - archives packageScalaDocs, packageSources - } - } // fix scala+java mix to all use compileScala which use correct dependency order sourceSets.main.scala.srcDir 'src/main/java' diff --git a/core/src/test/scala/org/apache/spark/deploy/SparkSubmitSuite.scala b/core/src/test/scala/org/apache/spark/deploy/SparkSubmitSuite.scala index dfb47324395ac..0351bec59a780 100644 --- a/core/src/test/scala/org/apache/spark/deploy/SparkSubmitSuite.scala +++ b/core/src/test/scala/org/apache/spark/deploy/SparkSubmitSuite.scala @@ -14,6 +14,24 @@ * See the License for the specific language governing permissions and * limitations under the License. */ +/* + * Changes for TIBCO Project SnappyData data platform. + * + * Portions Copyright (c) 2017-2022 TIBCO Software Inc. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you + * may not use this file except in compliance with the License. You + * may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. See accompanying + * LICENSE file. + */ package org.apache.spark.deploy @@ -427,9 +445,10 @@ class SparkSubmitSuite // Check if the SparkR package is installed assume(RUtils.isSparkRInstalled, "SparkR is not installed in this build.") val main = MavenCoordinate("my.great.lib", "mylib", "0.1") - val sparkHome = sys.props.getOrElse("spark.test.home", fail("spark.test.home is not set!")) - val rScriptDir = Seq( - sparkHome, "R", "pkg", "tests", "fulltests", "packageInAJarTest.R").mkString(File.separator) + val sparkProjectHome = sys.props.getOrElse("spark.project.home", + sys.props.getOrElse("spark.test.home", fail("spark.test.home is not set!"))) + val rScriptDir = Seq(sparkProjectHome, + "R", "pkg", "tests", "fulltests", "packageInAJarTest.R").mkString(File.separator) assert(new File(rScriptDir).exists) IvyTestUtils.withRepository(main, None, None, withR = true) { repo => val args = Seq( @@ -446,11 +465,12 @@ class SparkSubmitSuite test("include an external JAR in SparkR") { assume(RUtils.isRInstalled, "R isn't installed on this machine.") - val sparkHome = sys.props.getOrElse("spark.test.home", fail("spark.test.home is not set!")) + val sparkProjectHome = sys.props.getOrElse("spark.project.home", + sys.props.getOrElse("spark.test.home", fail("spark.test.home is not set!"))) // Check if the SparkR package is installed assume(RUtils.isSparkRInstalled, "SparkR is not installed in this build.") - val rScriptDir = - Seq(sparkHome, "R", "pkg", "tests", "fulltests", "jarTest.R").mkString(File.separator) + val rScriptDir = Seq(sparkProjectHome, + "R", "pkg", "tests", "fulltests", "jarTest.R").mkString(File.separator) assert(new File(rScriptDir).exists) // compile a small jar containing a class that will be called from R code.