@@ -25,8 +25,7 @@ import org.apache.hadoop.conf.Configuration
25
25
import org .apache .hadoop .io .Text
26
26
import org .apache .hadoop .io .compress .{CompressionCodecFactory , GzipCodec }
27
27
28
- import org .apache .spark .{SparkConf , SparkContext , SparkException , SparkFunSuite }
29
- import org .apache .spark .internal .config
28
+ import org .apache .spark .{SparkConf , SparkContext , SparkFunSuite }
30
29
import org .apache .spark .io .ZStdCompressionCodec
31
30
32
31
/**
@@ -112,43 +111,26 @@ class WholeTextFileRecordReaderSuite extends SparkFunSuite {
112
111
createNativeFile(dir, filename, contents, compressionType)
113
112
}
114
113
115
- if (! sc.conf.get(config.FILE_DATA_SOURCE_ZSTANDARD_ENABLED ) &&
116
- (compressionType == CompressionType .ZSTD || compressionType == CompressionType .ZST )) {
117
- val e = intercept[SparkException ] {
118
- sc.wholeTextFiles(dir.toString, 3 ).collect()
119
- }
120
- assert(e.getCause.isInstanceOf [RuntimeException ])
121
- assert(e.getCause.getMessage === " native zStandard library not available: " +
122
- " this version of libhadoop was built without zstd support." )
123
- } else {
124
- val res = sc.wholeTextFiles(dir.toString, 3 ).collect()
125
-
126
- assert(res.length === WholeTextFileRecordReaderSuite .fileNames.length,
127
- " Number of files read out does not fit with the actual value." )
128
-
129
- for ((filename, contents) <- res) {
130
- val shortName = compressionType match {
131
- case CompressionType .NONE => filename.split('/' ).last
132
- case _ => filename.split('/' ).last.split('.' ).head
133
- }
134
- assert(WholeTextFileRecordReaderSuite .fileNames.contains(shortName),
135
- s " Missing file name $filename. " )
136
- assert(contents === new Text (WholeTextFileRecordReaderSuite .files(shortName)).toString,
137
- s " file $filename contents can not match. " )
114
+ val res = sc.wholeTextFiles(dir.toString, 3 ).collect()
115
+
116
+ assert(res.length === WholeTextFileRecordReaderSuite .fileNames.length,
117
+ " Number of files read out does not fit with the actual value." )
118
+
119
+ for ((filename, contents) <- res) {
120
+ val shortName = compressionType match {
121
+ case CompressionType .NONE => filename.split('/' ).last
122
+ case _ => filename.split('/' ).last.split('.' ).head
138
123
}
124
+ assert(WholeTextFileRecordReaderSuite .fileNames.contains(shortName),
125
+ s " Missing file name $filename. " )
126
+ assert(contents === new Text (WholeTextFileRecordReaderSuite .files(shortName)).toString,
127
+ s " file $filename contents can not match. " )
139
128
}
140
129
}
141
130
}
142
131
}
143
132
}
144
133
145
- class WholeTextFileRecordReaderZStandardDisabledSuite extends WholeTextFileRecordReaderSuite {
146
-
147
- override def getSparkConf (): SparkConf = {
148
- super .getSparkConf().set(config.FILE_DATA_SOURCE_ZSTANDARD_ENABLED , false )
149
- }
150
- }
151
-
152
134
/**
153
135
* Files to be tested are defined here.
154
136
*/
0 commit comments