public class ParquetTableScan extends SparkPlan implements scala.Product, scala.Serializable
ParquetRelation
as a
RDD[Row]
.Constructor and Description |
---|
ParquetTableScan(scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> attributes,
org.apache.spark.sql.parquet.ParquetRelation relation,
scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression> columnPruningPred) |
Modifier and Type | Method and Description |
---|---|
scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> |
attributes() |
scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression> |
columnPruningPred() |
RDD<org.apache.spark.sql.catalyst.expressions.Row> |
execute()
Runs this query returning the result as an RDD.
|
scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> |
normalOutput() |
scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> |
output() |
scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> |
partOutput() |
ParquetTableScan |
pruneColumns(scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> prunedAttributes) |
org.apache.spark.sql.parquet.ParquetRelation |
relation() |
codegenEnabled, executeCollect, makeCopy, outputPartitioning, requiredChildDistribution
expressions, org$apache$spark$sql$catalyst$plans$QueryPlan$$transformExpressionDown$1, org$apache$spark$sql$catalyst$plans$QueryPlan$$transformExpressionUp$1, outputSet, printSchema, schema, schemaString, transformAllExpressions, transformExpressions, transformExpressionsDown, transformExpressionsUp
apply, argString, asCode, children, collect, fastEquals, flatMap, foreach, generateTreeString, getNodeNumbered, id, map, mapChildren, nextId, nodeName, numberedTreeString, otherCopyArgs, sameInstance, simpleString, stringArgs, toString, transform, transformChildrenDown, transformChildrenUp, transformDown, transformUp, treeString, withNewChildren
productArity, productElement, productIterator, productPrefix
initialized, initializeIfNecessary, initializeLogging, initLock, isTraceEnabled, log_, log, logDebug, logDebug, logError, logError, logInfo, logInfo, logName, logTrace, logTrace, logWarning, logWarning
public ParquetTableScan(scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> attributes, org.apache.spark.sql.parquet.ParquetRelation relation, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression> columnPruningPred)
public scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> attributes()
public org.apache.spark.sql.parquet.ParquetRelation relation()
public scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression> columnPruningPred()
public scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> normalOutput()
public scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> partOutput()
public scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> output()
output
in class org.apache.spark.sql.catalyst.plans.QueryPlan<SparkPlan>
public RDD<org.apache.spark.sql.catalyst.expressions.Row> execute()
SparkPlan
public ParquetTableScan pruneColumns(scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> prunedAttributes)