File tree Expand file tree Collapse file tree 2 files changed +1
-21
lines changed
main/scala/org/apache/spark/sql/execution
test/scala/org/apache/spark/sql/execution Expand file tree Collapse file tree 2 files changed +1
-21
lines changed Original file line number Diff line number Diff line change @@ -64,8 +64,7 @@ trait ColumnarToRowTransition extends UnaryExecNode
6464 * [[MapPartitionsInRWithArrowExec ]]. Eventually this should replace those implementations.
6565 */
6666case class ColumnarToRowExec (child : SparkPlan ) extends ColumnarToRowTransition with CodegenSupport {
67- // supportsColumnar requires to be only called on driver side, see also SPARK-37779.
68- assert(TaskContext .get != null || child.supportsColumnar)
67+ assert(child.supportsColumnar)
6968
7069 override def output : Seq [Attribute ] = child.output
7170
Original file line number Diff line number Diff line change @@ -88,23 +88,4 @@ class SparkPlanSuite extends QueryTest with SharedSparkSession {
8888 test(" SPARK-30780 empty LocalTableScan should use RDD without partitions" ) {
8989 assert(LocalTableScanExec (Nil , Nil ).execute().getNumPartitions == 0 )
9090 }
91-
92- test(" SPARK-37779: ColumnarToRowExec should be canonicalizable after being (de)serialized" ) {
93- withSQLConf(SQLConf .USE_V1_SOURCE_LIST .key -> " parquet" ) {
94- withTempPath { path =>
95- spark.range(1 ).write.parquet(path.getAbsolutePath)
96- val df = spark.read.parquet(path.getAbsolutePath)
97- val columnarToRowExec =
98- df.queryExecution.executedPlan.collectFirst { case p : ColumnarToRowExec => p }.get
99- try {
100- spark.range(1 ).foreach { _ =>
101- columnarToRowExec.canonicalized
102- ()
103- }
104- } catch {
105- case e : Throwable => fail(" ColumnarToRowExec was not canonicalizable" , e)
106- }
107- }
108- }
109- }
11091}
You can’t perform that action at this time.
0 commit comments