org.apache.hadoop.hive.ql.io.rcfile.truncate
Class ColumnTruncateWork
java.lang.Object
org.apache.hadoop.hive.ql.plan.AbstractOperatorDesc
org.apache.hadoop.hive.ql.plan.BaseWork
org.apache.hadoop.hive.ql.plan.MapWork
org.apache.hadoop.hive.ql.io.rcfile.truncate.ColumnTruncateWork
- All Implemented Interfaces:
- Serializable, Cloneable, OperatorDesc
public class ColumnTruncateWork
- extends MapWork
- implements Serializable
- See Also:
- Serialized Form
Methods inherited from class org.apache.hadoop.hive.ql.plan.MapWork |
addIndexIntermediateFile, addMapWork, configureJobConf, deriveExplainAttributes, getAliases, getAliasToPartnInfo, getAliasToWork, getAllRootOperators, getBucketedColsByDirectory, getHadoopSupportsSplittable, getIndexIntermediateFile, getJoinTree, getMapLocalWork, getMaxSplitSize, getMinSplitSizePerNode, getMinSplitSizePerRack, getNameToSplitSample, getNumMapTasks, getOpParseCtxMap, getPartitionDescs, getPaths, getPathToAliases, getPathToPartitionInfo, getSamplingType, getSamplingTypeString, getScratchColumnMap, getScratchColumnVectorTypes, getSortedColsByDirectory, getTmpHDFSPath, getTruncatedPathToAliases, getVectorMode, getVectorModeOn, getWorks, initialize, isInputFormatSorted, isMapperCannotSpanPartns, isUseBucketizedHiveInputFormat, mergeAliasedInput, mergingInto, replaceRoots, resolveDynamicPartitionStoredAsSubDirsMerge, setAliasToPartnInfo, setAliasToWork, setHadoopSupportsSplittable, setInputformat, setInputFormatSorted, setJoinTree, setMapLocalWork, setMapperCannotSpanPartns, setMaxSplitSize, setMinSplitSize, setMinSplitSizePerNode, setMinSplitSizePerRack, setNameToSplitSample, setNumMapTasks, setOpParseCtxMap, setPathToAliases, setPathToPartitionInfo, setSamplingType, setScratchColumnMap, setScratchColumnVectorTypes, setTmpHDFSPath, setUseBucketizedHiveInputFormat, setVectorMode |
ColumnTruncateWork
public ColumnTruncateWork()
ColumnTruncateWork
public ColumnTruncateWork(List<Integer> droppedColumns,
org.apache.hadoop.fs.Path inputDir,
org.apache.hadoop.fs.Path outputDir)
ColumnTruncateWork
public ColumnTruncateWork(List<Integer> droppedColumns,
org.apache.hadoop.fs.Path inputDir,
org.apache.hadoop.fs.Path outputDir,
boolean hasDynamicPartitions,
DynamicPartitionCtx dynPartCtx)
getInputDir
public org.apache.hadoop.fs.Path getInputDir()
setInputPaths
public void setInputPaths(org.apache.hadoop.fs.Path inputDir)
getOutputDir
public org.apache.hadoop.fs.Path getOutputDir()
setOutputDir
public void setOutputDir(org.apache.hadoop.fs.Path outputDir)
getMapperClass
public Class<? extends org.apache.hadoop.mapred.Mapper> getMapperClass()
getMinSplitSize
public Long getMinSplitSize()
- Overrides:
getMinSplitSize
in class MapWork
getInputformat
public String getInputformat()
- Overrides:
getInputformat
in class MapWork
isGatheringStats
public boolean isGatheringStats()
- Overrides:
isGatheringStats
in class BaseWork
hasDynamicPartitions
public boolean hasDynamicPartitions()
setHasDynamicPartitions
public void setHasDynamicPartitions(boolean hasDynamicPartitions)
getDynPartCtx
public DynamicPartitionCtx getDynPartCtx()
setDynPartCtx
public void setDynPartCtx(DynamicPartitionCtx dynPartCtx)
getListBucketingCtx
public ListBucketingCtx getListBucketingCtx()
- Returns:
- the listBucketingCtx
setListBucketingCtx
public void setListBucketingCtx(ListBucketingCtx listBucketingCtx)
- Parameters:
listBucketingCtx
- the listBucketingCtx to set
isListBucketingAlterTableConcatenate
public boolean isListBucketingAlterTableConcatenate()
- Returns:
- the isListBucketingAlterTableConcatenate
getDroppedColumns
public List<Integer> getDroppedColumns()
setDroppedColumns
public void setDroppedColumns(List<Integer> droppedColumns)
Copyright © 2014 The Apache Software Foundation. All rights reserved.