diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index a24309e137eb8..faa495fe5dfc4 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -252,7 +252,7 @@ jobs: - name: Install Python packages (Python 3.9) if: (contains(matrix.modules, 'sql') && !contains(matrix.modules, 'sql-')) || contains(matrix.modules, 'connect') run: | - python3.9 -m pip install 'numpy>=1.20.0' pyarrow pandas scipy unittest-xml-reporting 'lxml==4.9.4' 'grpcio==1.59.3' 'grpcio-status==1.59.3' 'protobuf==4.25.1' + python3.9 -m pip install 'numpy>=1.20.0' pyarrow pandas scipy unittest-xml-reporting 'lxml==4.9.4' 'grpcio==1.62.0' 'grpcio-status==1.62.0' 'protobuf==4.25.1' python3.9 -m pip list # Run the tests. - name: Run tests @@ -574,9 +574,8 @@ jobs: git -c user.name='Apache Spark Test Account' -c user.email='sparktestacc@gmail.com' merge --no-commit --progress --squash FETCH_HEAD git -c user.name='Apache Spark Test Account' -c user.email='sparktestacc@gmail.com' commit -m "Merged commit" --allow-empty - name: Install Buf - uses: bufbuild/buf-setup-action@v1.29.0 + uses: bufbuild/buf-setup-action@v1 with: - version: 1.29.0 github_token: ${{ secrets.GITHUB_TOKEN }} - name: Protocol Buffers Linter uses: bufbuild/buf-lint-action@v1 @@ -703,7 +702,7 @@ jobs: python3.9 -m pip install 'sphinx==4.5.0' mkdocs 'pydata_sphinx_theme>=0.13' sphinx-copybutton nbsphinx numpydoc jinja2 markupsafe 'pyzmq<24.0.0' \ ipython ipython_genutils sphinx_plotly_directive 'numpy>=1.20.0' pyarrow pandas 'plotly>=4.8' 'docutils<0.18.0' \ 'flake8==3.9.0' 'mypy==1.8.0' 'pytest==7.1.3' 'pytest-mypy-plugins==1.9.3' 'black==23.9.1' \ - 'pandas-stubs==1.2.0.53' 'grpcio==1.59.3' 'grpc-stubs==1.24.11' 'googleapis-common-protos-stubs==2.2.0' \ + 'pandas-stubs==1.2.0.53' 'grpcio==1.62.0' 'grpc-stubs==1.24.11' 'googleapis-common-protos-stubs==2.2.0' \ 'sphinxcontrib-applehelp==1.0.4' 'sphinxcontrib-devhelp==1.0.2' 'sphinxcontrib-htmlhelp==2.0.1' 'sphinxcontrib-qthelp==1.0.3' 'sphinxcontrib-serializinghtml==1.1.5' python3.9 -m pip list - name: Python linter diff --git a/.github/workflows/maven_test.yml b/.github/workflows/maven_test.yml index d63066a521f97..34fa9a8b77684 100644 --- a/.github/workflows/maven_test.yml +++ b/.github/workflows/maven_test.yml @@ -179,7 +179,7 @@ jobs: - name: Install Python packages (Python 3.11) if: (contains(matrix.modules, 'sql#core')) || contains(matrix.modules, 'connect') run: | - python3.11 -m pip install 'numpy>=1.20.0' pyarrow pandas scipy unittest-xml-reporting 'grpcio==1.59.3' 'grpcio-status==1.59.3' 'protobuf==4.25.1' + python3.11 -m pip install 'numpy>=1.20.0' pyarrow pandas scipy unittest-xml-reporting 'grpcio==1.62.0' 'grpcio-status==1.62.0' 'protobuf==4.25.1' python3.11 -m pip list # Run the tests. - name: Run tests diff --git a/R/pkg/tests/fulltests/test_streaming.R b/R/pkg/tests/fulltests/test_streaming.R index 67479726b57c1..88114f8bd82b8 100644 --- a/R/pkg/tests/fulltests/test_streaming.R +++ b/R/pkg/tests/fulltests/test_streaming.R @@ -257,8 +257,7 @@ test_that("Trigger", { "Value for trigger.processingTime must be a non-empty string.") expect_error(write.stream(df, "memory", queryName = "times", outputMode = "append", - trigger.processingTime = "invalid"), - "Error parsing 'invalid' to interval, unrecognized number 'invalid'") + trigger.processingTime = "invalid")) expect_error(write.stream(df, "memory", queryName = "times", outputMode = "append", trigger.once = ""), "Value for trigger.once must be TRUE.") diff --git a/common/unsafe/src/main/java/org/apache/spark/sql/catalyst/util/CollationFactory.java b/common/unsafe/src/main/java/org/apache/spark/sql/catalyst/util/CollationFactory.java index ed8bfb886c6e7..2940900b974ad 100644 --- a/common/unsafe/src/main/java/org/apache/spark/sql/catalyst/util/CollationFactory.java +++ b/common/unsafe/src/main/java/org/apache/spark/sql/catalyst/util/CollationFactory.java @@ -117,7 +117,7 @@ public Collation( // No custom comparators will be used for this collation. // Instead, we rely on byte for byte comparison. collationTable[0] = new Collation( - "UCS_BASIC", + "UTF8_BINARY", null, UTF8String::binaryCompare, "1.0", @@ -127,7 +127,7 @@ public Collation( // Case-insensitive UTF8 binary collation. // TODO: Do in place comparisons instead of creating new strings. collationTable[1] = new Collation( - "UCS_BASIC_LCASE", + "UTF8_BINARY_LCASE", null, (s1, s2) -> s1.toLowerCase().binaryCompare(s2.toLowerCase()), "1.0", @@ -138,11 +138,13 @@ public Collation( collationTable[2] = new Collation( "UNICODE", Collator.getInstance(ULocale.ROOT), "153.120.0.0", true); collationTable[2].collator.setStrength(Collator.TERTIARY); + collationTable[2].collator.freeze(); // UNICODE case-insensitive comparison (ROOT locale, in ICU + Secondary strength). collationTable[3] = new Collation( "UNICODE_CI", Collator.getInstance(ULocale.ROOT), "153.120.0.0", false); collationTable[3].collator.setStrength(Collator.SECONDARY); + collationTable[3].collator.freeze(); for (int i = 0; i < collationTable.length; i++) { collationNameToIdMap.put(collationTable[i].collationName, i); diff --git a/common/unsafe/src/main/java/org/apache/spark/unsafe/types/UTF8String.java b/common/unsafe/src/main/java/org/apache/spark/unsafe/types/UTF8String.java index ea523760ad8e0..6abc8385da5ab 100644 --- a/common/unsafe/src/main/java/org/apache/spark/unsafe/types/UTF8String.java +++ b/common/unsafe/src/main/java/org/apache/spark/unsafe/types/UTF8String.java @@ -379,10 +379,14 @@ public boolean matchAt(final UTF8String s, int pos) { } private boolean matchAt(final UTF8String s, int pos, int collationId) { - if (s.numBytes + pos > numBytes || pos < 0) { + if (s.numChars() + pos > this.numChars() || pos < 0) { return false; } - return this.substring(pos, pos + s.numBytes).semanticCompare(s, collationId) == 0; + if (s.numBytes == 0 || this.numBytes == 0) { + return s.numBytes == 0; + } + return CollationFactory.getStringSearch(this.substring(pos, pos + s.numChars()), + s, collationId).last() == 0; } public boolean startsWith(final UTF8String prefix) { @@ -1456,7 +1460,7 @@ public int compareTo(@Nonnull final UTF8String other) { } /** - * Binary comparison of two UTF8String. Can only be used for default UCS_BASIC collation. + * Binary comparison of two UTF8String. Can only be used for default UTF8_BINARY collation. */ public int binaryCompare(final UTF8String other) { return ByteArray.compareBinary( diff --git a/common/unsafe/src/test/scala/org/apache/spark/unsafe/types/CollationFactorySuite.scala b/common/unsafe/src/test/scala/org/apache/spark/unsafe/types/CollationFactorySuite.scala index 5d760ba795df0..f9927b94fd42c 100644 --- a/common/unsafe/src/test/scala/org/apache/spark/unsafe/types/CollationFactorySuite.scala +++ b/common/unsafe/src/test/scala/org/apache/spark/unsafe/types/CollationFactorySuite.scala @@ -29,13 +29,13 @@ import org.apache.spark.unsafe.types.UTF8String.{fromString => toUTF8} class CollationFactorySuite extends AnyFunSuite with Matchers { // scalastyle:ignore funsuite test("collationId stability") { - val ucsBasic = fetchCollation(0) - assert(ucsBasic.collationName == "UCS_BASIC") - assert(ucsBasic.isBinaryCollation) + val utf8Binary = fetchCollation(0) + assert(utf8Binary.collationName == "UTF8_BINARY") + assert(utf8Binary.isBinaryCollation) - val ucsBasicLcase = fetchCollation(1) - assert(ucsBasicLcase.collationName == "UCS_BASIC_LCASE") - assert(!ucsBasicLcase.isBinaryCollation) + val utf8BinaryLcase = fetchCollation(1) + assert(utf8BinaryLcase.collationName == "UTF8_BINARY_LCASE") + assert(!utf8BinaryLcase.isBinaryCollation) val unicode = fetchCollation(2) assert(unicode.collationName == "UNICODE") @@ -48,27 +48,27 @@ class CollationFactorySuite extends AnyFunSuite with Matchers { // scalastyle:ig test("fetch invalid collation name") { val error = intercept[SparkException] { - fetchCollation("UCS_BASIS") + fetchCollation("UTF8_BS") } assert(error.getErrorClass === "COLLATION_INVALID_NAME") assert(error.getMessageParameters.asScala === - Map("proposal" -> "UCS_BASIC", "collationName" -> "UCS_BASIS")) + Map("proposal" -> "UTF8_BINARY", "collationName" -> "UTF8_BS")) } case class CollationTestCase[R](collationName: String, s1: String, s2: String, expectedResult: R) test("collation aware equality and hash") { val checks = Seq( - CollationTestCase("UCS_BASIC", "aaa", "aaa", true), - CollationTestCase("UCS_BASIC", "aaa", "AAA", false), - CollationTestCase("UCS_BASIC", "aaa", "bbb", false), - CollationTestCase("UCS_BASIC_LCASE", "aaa", "aaa", true), - CollationTestCase("UCS_BASIC_LCASE", "aaa", "AAA", true), - CollationTestCase("UCS_BASIC_LCASE", "aaa", "AaA", true), - CollationTestCase("UCS_BASIC_LCASE", "aaa", "AaA", true), - CollationTestCase("UCS_BASIC_LCASE", "aaa", "aa", false), - CollationTestCase("UCS_BASIC_LCASE", "aaa", "bbb", false), + CollationTestCase("UTF8_BINARY", "aaa", "aaa", true), + CollationTestCase("UTF8_BINARY", "aaa", "AAA", false), + CollationTestCase("UTF8_BINARY", "aaa", "bbb", false), + CollationTestCase("UTF8_BINARY_LCASE", "aaa", "aaa", true), + CollationTestCase("UTF8_BINARY_LCASE", "aaa", "AAA", true), + CollationTestCase("UTF8_BINARY_LCASE", "aaa", "AaA", true), + CollationTestCase("UTF8_BINARY_LCASE", "aaa", "AaA", true), + CollationTestCase("UTF8_BINARY_LCASE", "aaa", "aa", false), + CollationTestCase("UTF8_BINARY_LCASE", "aaa", "bbb", false), CollationTestCase("UNICODE", "aaa", "aaa", true), CollationTestCase("UNICODE", "aaa", "AAA", false), CollationTestCase("UNICODE", "aaa", "bbb", false), @@ -89,16 +89,16 @@ class CollationFactorySuite extends AnyFunSuite with Matchers { // scalastyle:ig test("collation aware compare") { val checks = Seq( - CollationTestCase("UCS_BASIC", "aaa", "aaa", 0), - CollationTestCase("UCS_BASIC", "aaa", "AAA", 1), - CollationTestCase("UCS_BASIC", "aaa", "bbb", -1), - CollationTestCase("UCS_BASIC", "aaa", "BBB", 1), - CollationTestCase("UCS_BASIC_LCASE", "aaa", "aaa", 0), - CollationTestCase("UCS_BASIC_LCASE", "aaa", "AAA", 0), - CollationTestCase("UCS_BASIC_LCASE", "aaa", "AaA", 0), - CollationTestCase("UCS_BASIC_LCASE", "aaa", "AaA", 0), - CollationTestCase("UCS_BASIC_LCASE", "aaa", "aa", 1), - CollationTestCase("UCS_BASIC_LCASE", "aaa", "bbb", -1), + CollationTestCase("UTF8_BINARY", "aaa", "aaa", 0), + CollationTestCase("UTF8_BINARY", "aaa", "AAA", 1), + CollationTestCase("UTF8_BINARY", "aaa", "bbb", -1), + CollationTestCase("UTF8_BINARY", "aaa", "BBB", 1), + CollationTestCase("UTF8_BINARY_LCASE", "aaa", "aaa", 0), + CollationTestCase("UTF8_BINARY_LCASE", "aaa", "AAA", 0), + CollationTestCase("UTF8_BINARY_LCASE", "aaa", "AaA", 0), + CollationTestCase("UTF8_BINARY_LCASE", "aaa", "AaA", 0), + CollationTestCase("UTF8_BINARY_LCASE", "aaa", "aa", 1), + CollationTestCase("UTF8_BINARY_LCASE", "aaa", "bbb", -1), CollationTestCase("UNICODE", "aaa", "aaa", 0), CollationTestCase("UNICODE", "aaa", "AAA", -1), CollationTestCase("UNICODE", "aaa", "bbb", -1), diff --git a/common/utils/src/main/resources/error/error-classes.json b/common/utils/src/main/resources/error/error-classes.json index 67fa2c6a7c1eb..5d20642bd25a4 100644 --- a/common/utils/src/main/resources/error/error-classes.json +++ b/common/utils/src/main/resources/error/error-classes.json @@ -40,7 +40,7 @@ "AMBIGUOUS_COLUMN_REFERENCE" : { "message" : [ "Column is ambiguous. It's because you joined several DataFrame together, and some of these DataFrames are the same.", - "This column points to one of the DataFrame but Spark is unable to figure out which one.", + "This column points to one of the DataFrames but Spark is unable to figure out which one.", "Please alias the DataFrames with different names via `DataFrame.alias` before joining them,", "and specify the column using qualified name, e.g. `df.alias(\"a\").join(df.alias(\"b\"), col(\"a.id\") > col(\"b.id\"))`." ], @@ -1277,6 +1277,12 @@ ], "sqlState" : "58030" }, + "FAILED_ROW_TO_JSON" : { + "message" : [ + "Failed to convert the row value of the class to the target SQL type in the JSON format." + ], + "sqlState" : "2203G" + }, "FIELDS_ALREADY_EXISTS" : { "message" : [ "Cannot column, because already exists in ." @@ -1297,7 +1303,7 @@ }, "FOREACH_BATCH_USER_FUNCTION_ERROR" : { "message" : [ - "An error occurred in the user provided function in foreach batch sink." + "An error occurred in the user provided function in foreach batch sink. Reason: " ], "sqlState" : "39000" }, @@ -1540,6 +1546,13 @@ "2) You can form a valid datetime pattern with the guide from '/sql-ref-datetime-pattern.html'." ] }, + "DATETIME_WEEK_BASED_PATTERN" : { + "message" : [ + "Spark >= 3.0:", + "All week-based patterns are unsupported since Spark 3.0, detected week-based character: .", + "Please use the SQL function EXTRACT instead." + ] + }, "PARSE_DATETIME_BY_NEW_PARSER" : { "message" : [ "Spark >= 3.0:", @@ -1769,6 +1782,12 @@ }, "sqlState" : "22003" }, + "INVALID_BUCKET_COLUMN_DATA_TYPE" : { + "message" : [ + "Cannot use for bucket column. Collated data types are not supported for bucketing." + ], + "sqlState" : "42601" + }, "INVALID_BUCKET_FILE" : { "message" : [ "Invalid bucket file: ." @@ -1834,6 +1853,24 @@ }, "sqlState" : "HY109" }, + "INVALID_DATETIME_PATTERN" : { + "message" : [ + "Unrecognized datetime pattern: ." + ], + "subClass" : { + "ILLEGAL_CHARACTER" : { + "message" : [ + "Illegal pattern character found in datetime pattern: . Please provide legal character." + ] + }, + "LENGTH" : { + "message" : [ + "Too many letters in datetime pattern: . Please reduce pattern length." + ] + } + }, + "sqlState" : "22007" + }, "INVALID_DEFAULT_VALUE" : { "message" : [ "Failed to execute command because the destination column or variable has a DEFAULT value ," @@ -1862,6 +1899,34 @@ }, "sqlState" : "42623" }, + "INVALID_DELIMITER_VALUE" : { + "message" : [ + "Invalid value for delimiter." + ], + "subClass" : { + "DELIMITER_LONGER_THAN_EXPECTED" : { + "message" : [ + "Delimiter cannot be more than one character: ." + ] + }, + "EMPTY_STRING" : { + "message" : [ + "Delimiter cannot be empty string." + ] + }, + "SINGLE_BACKSLASH" : { + "message" : [ + "Single backslash is prohibited. It has special meaning as beginning of an escape sequence. To get the backslash character, pass a string with two backslashes as the delimiter." + ] + }, + "UNSUPPORTED_SPECIAL_CHARACTER" : { + "message" : [ + "Unsupported special character for delimiter: ." + ] + } + }, + "sqlState" : "42602" + }, "INVALID_DRIVER_MEMORY" : { "message" : [ "System memory must be at least .", @@ -2071,6 +2136,74 @@ }, "sqlState" : "42000" }, + "INVALID_INTERVAL_FORMAT" : { + "message" : [ + "Error parsing '' to interval. Please ensure that the value provided is in a valid format for defining an interval. You can reference the documentation for the correct format." + ], + "subClass" : { + "ARITHMETIC_EXCEPTION" : { + "message" : [ + "Uncaught arithmetic exception while parsing ''." + ] + }, + "INPUT_IS_EMPTY" : { + "message" : [ + "Interval string cannot be empty." + ] + }, + "INPUT_IS_NULL" : { + "message" : [ + "Interval string cannot be null." + ] + }, + "INVALID_FRACTION" : { + "message" : [ + " cannot have fractional part." + ] + }, + "INVALID_PRECISION" : { + "message" : [ + "Interval can only support nanosecond precision, is out of range." + ] + }, + "INVALID_PREFIX" : { + "message" : [ + "Invalid interval prefix ." + ] + }, + "INVALID_UNIT" : { + "message" : [ + "Invalid unit ." + ] + }, + "INVALID_VALUE" : { + "message" : [ + "Invalid value ." + ] + }, + "MISSING_NUMBER" : { + "message" : [ + "Expect a number after but hit EOL." + ] + }, + "MISSING_UNIT" : { + "message" : [ + "Expect a unit name after but hit EOL." + ] + }, + "UNKNOWN_PARSING_ERROR" : { + "message" : [ + "Unknown error when parsing ." + ] + }, + "UNRECOGNIZED_NUMBER" : { + "message" : [ + "Unrecognized number ." + ] + } + }, + "sqlState" : "22006" + }, "INVALID_INVERSE_DISTRIBUTION_FUNCTION" : { "message" : [ "Invalid inverse distribution function ." @@ -2094,6 +2227,12 @@ }, "sqlState" : "42K0K" }, + "INVALID_JSON_DATA_TYPE" : { + "message" : [ + "Failed to convert the JSON string '' to a data type. Please enter a valid data type." + ], + "sqlState" : "2203G" + }, "INVALID_JSON_ROOT_FIELD" : { "message" : [ "Cannot convert JSON root field to target Spark type." @@ -2818,6 +2957,12 @@ ], "sqlState" : "07501" }, + "NONEXISTENT_FIELD_NAME_IN_LIST" : { + "message" : [ + "Field(s) do(es) not exist. Available fields: " + ], + "sqlState" : "HV091" + }, "NON_FOLDABLE_ARGUMENT" : { "message" : [ "The function requires the parameter to be a foldable expression of the type , but the actual argument is a non-foldable." @@ -3021,6 +3166,12 @@ ], "sqlState" : "2200E" }, + "NULL_QUERY_STRING_EXECUTE_IMMEDIATE" : { + "message" : [ + "Execute immediate requires a non-null variable as the query string, but the provided variable is null." + ], + "sqlState" : "22004" + }, "NUMERIC_OUT_OF_SUPPORTED_RANGE" : { "message" : [ "The value cannot be interpreted as a numeric since it has more than 38 digits." @@ -3163,6 +3314,12 @@ ], "sqlState" : "38000" }, + "PYTHON_STREAMING_DATA_SOURCE_RUNTIME_ERROR" : { + "message" : [ + "Failed when Python streaming data source perform : " + ], + "sqlState" : "38000" + }, "RECURSIVE_PROTOBUF_SCHEMA" : { "message" : [ "Found recursive reference in Protobuf schema, which can not be processed by Spark by default: . try setting the option `recursive.fields.max.depth` 0 to 10. Going beyond 10 levels of recursion is not allowed." @@ -3331,9 +3488,9 @@ ], "sqlState" : "0A000" }, - "STATE_STORE_CANNOT_REMOVE_DEFAULT_COLUMN_FAMILY" : { + "STATE_STORE_CANNOT_USE_COLUMN_FAMILY_WITH_INVALID_NAME" : { "message" : [ - "Failed to remove default column family with reserved name=." + "Failed to perform column family operation= with invalid name=. Column family name cannot be empty or include leading/trailing spaces or use the reserved keyword=default" ], "sqlState" : "42802" }, @@ -3356,6 +3513,12 @@ ], "sqlState" : "XXKST" }, + "STATE_STORE_UNSUPPORTED_OPERATION_ON_MISSING_COLUMN_FAMILY" : { + "message" : [ + "State store operation= not supported on missing column family=." + ], + "sqlState" : "42802" + }, "STATIC_PARTITION_COLUMN_IN_INSERT_COLUMN_LIST" : { "message" : [ "Static partition column is also specified in the column list." @@ -6145,17 +6308,17 @@ }, "_LEGACY_ERROR_TEMP_2109" : { "message" : [ - "Cannot build HashedRelation with more than 1/3 billions unique keys." + "Cannot build HashedRelation with more than 1/3 billion unique keys." ] }, "_LEGACY_ERROR_TEMP_2110" : { "message" : [ - "Can not build a HashedRelation that is larger than 8G." + "Cannot build a HashedRelation that is larger than 8G." ] }, "_LEGACY_ERROR_TEMP_2111" : { "message" : [ - "failed to push a row into ." + "Failed to push a row into ." ] }, "_LEGACY_ERROR_TEMP_2112" : { @@ -7774,16 +7937,6 @@ "The numbers of zipped arrays and field names should be the same" ] }, - "_LEGACY_ERROR_TEMP_3236" : { - "message" : [ - "Unsupported special character for delimiter: " - ] - }, - "_LEGACY_ERROR_TEMP_3237" : { - "message" : [ - "Delimiter cannot be more than one character: " - ] - }, "_LEGACY_ERROR_TEMP_3238" : { "message" : [ "Failed to convert value (class of ) in type
to XML." @@ -7829,71 +7982,11 @@ "Failed to parse a value for data type ." ] }, - "_LEGACY_ERROR_TEMP_3247" : { - "message" : [ - "Delimiter cannot be empty string" - ] - }, - "_LEGACY_ERROR_TEMP_3248" : { - "message" : [ - "Single backslash is prohibited. It has special meaning as beginning of an escape sequence. To get the backslash character, pass a string with two backslashes as the delimiter." - ] - }, - "_LEGACY_ERROR_TEMP_3249" : { - "message" : [ - "Failed to convert value (class of }) with the type of to JSON." - ] - }, "_LEGACY_ERROR_TEMP_3250" : { "message" : [ "Failed to convert the JSON string '' to a field." ] }, - "_LEGACY_ERROR_TEMP_3251" : { - "message" : [ - "Failed to convert the JSON string '' to a data type." - ] - }, - "_LEGACY_ERROR_TEMP_3252" : { - "message" : [ - " does not exist. Available: " - ] - }, - "_LEGACY_ERROR_TEMP_3253" : { - "message" : [ - " do(es) not exist. Available: " - ] - }, - "_LEGACY_ERROR_TEMP_3254" : { - "message" : [ - " does not exist. Available: " - ] - }, - "_LEGACY_ERROR_TEMP_3255" : { - "message" : [ - "Error parsing '' to interval, " - ] - }, - "_LEGACY_ERROR_TEMP_3256" : { - "message" : [ - "Unrecognized datetime pattern: " - ] - }, - "_LEGACY_ERROR_TEMP_3257" : { - "message" : [ - "All week-based patterns are unsupported since Spark 3.0, detected: , Please use the SQL function EXTRACT instead" - ] - }, - "_LEGACY_ERROR_TEMP_3258" : { - "message" : [ - "Illegal pattern character: " - ] - }, - "_LEGACY_ERROR_TEMP_3259" : { - "message" : [ - "Too many pattern letters: