Skip to content

Commit

Permalink
test: Run integration tests against v2 as well
Browse files Browse the repository at this point in the history
  • Loading branch information
nightscape committed Nov 15, 2023
1 parent 0a40a20 commit 5929991
Showing 1 changed file with 17 additions and 9 deletions.
26 changes: 17 additions & 9 deletions src/test/scala/com/crealytics/spark/excel/IntegrationSuite.scala
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ class IntegrationSuite
.map { case ((f, sf), idx) => sf.name -> f(data.toIndexedSeq.map(_.get(idx))) }
}

def runTests(maxRowsInMemory: Option[Int], maxByteArraySize: Option[Int] = None): Unit = {
def runTests(implementation: String, maxRowsInMemory: Option[Int], maxByteArraySize: Option[Int] = None): Unit = {
def writeThenRead(
df: DataFrame,
schema: Option[StructType] = Some(exampleDataSchema),
Expand All @@ -83,15 +83,19 @@ class IntegrationSuite
): DataFrame = {
val theFileName = fileName.getOrElse(File.createTempFile("spark_excel_test_", ".xlsx").getAbsolutePath)

val writer = df.write.excel(dataAddress = s"'$sheetName'!A1", header = header).mode(saveMode)
val writer = df.write
.format(implementation)
.option("dataAddress", s"'$sheetName'!A1")
.option("header", header)
.mode(saveMode)
val configuredWriter =
Map("dataAddress" -> dataAddress).foldLeft(writer) {
case (wri, (key, Some(value))) => wri.option(key, value)
case (wri, _) => wri
}
configuredWriter.save(theFileName)

val reader = spark.read.excel(dataAddress = s"'$sheetName'!A1", header = header)
val reader = spark.read.format(implementation).option("dataAddress", s"'$sheetName'!A1").option("header", header)
val configuredReader = Map(
"maxRowsInMemory" -> maxRowsInMemory,
"maxByteArraySize" -> maxByteArraySize,
Expand All @@ -117,7 +121,9 @@ class IntegrationSuite
assertDataFrameEquals(expected, inferred)
}

describe(s"with maxRowsInMemory = $maxRowsInMemory; maxByteArraySize = $maxByteArraySize") {
describe(
s"with implementation = $implementation, maxRowsInMemory = $maxRowsInMemory; maxByteArraySize = $maxByteArraySize"
) {
it("parses known datatypes correctly") {
forAll(rowsGen) { rows =>
val expected = spark.createDataset(rows).toDF()
Expand Down Expand Up @@ -346,9 +352,11 @@ class IntegrationSuite
differencesInNonOverwrittenData shouldBe empty
()
}
runTests(maxRowsInMemory = None)
runTests(maxRowsInMemory = None, maxByteArraySize = Some(100000000))
runTests(maxRowsInMemory = Some(20))
runTests(maxRowsInMemory = Some(1))
runTests(maxRowsInMemory = Some(1), maxByteArraySize = Some(100000000))
Seq("excel", "com.crealytics.spark.excel").foreach { implementation =>
runTests(implementation, maxRowsInMemory = None)
runTests(implementation, maxRowsInMemory = None, maxByteArraySize = Some(100000000))
runTests(implementation, maxRowsInMemory = Some(20))
runTests(implementation, maxRowsInMemory = Some(1))
runTests(implementation, maxRowsInMemory = Some(1), maxByteArraySize = Some(100000000))
}
}

0 comments on commit 5929991

Please sign in to comment.