org.apache.hadoop.hbase.client.Put.add(byte[], byte[], byte[])
|
org.apache.hadoop.hbase.client.Put.add(byte[], byte[], long, byte[])
|
org.apache.hadoop.hbase.client.Put.add(byte[], ByteBuffer, long, ByteBuffer)
|
org.apache.hadoop.hbase.client.replication.ReplicationAdmin.addPeer(String, String)
|
org.apache.hadoop.hbase.client.replication.ReplicationAdmin.addPeer(String, String, String) |
org.apache.hadoop.hbase.client.Result.addResults(ClientProtos.RegionLoadStats)
|
org.apache.hadoop.hbase.client.MetaScanner.allTableRegions(Configuration, Connection, TableName, boolean)
|
org.apache.hadoop.hbase.thrift.ThriftServerRunner.HBaseHandler.atomicIncrement(ByteBuffer, ByteBuffer, ByteBuffer, long) |
org.apache.hadoop.hbase.client.HTable.batch(List<? extends Row>)
|
org.apache.hadoop.hbase.client.Table.batch(List<? extends Row>)
|
org.apache.hadoop.hbase.client.HTableWrapper.batch(List<? extends Row>)
|
org.apache.hadoop.hbase.client.HTable.batchCallback(List<? extends Row>, Batch.Callback<R>)
|
org.apache.hadoop.hbase.client.Table.batchCallback(List<? extends Row>, Batch.Callback<R>)
|
org.apache.hadoop.hbase.client.HTableWrapper.batchCallback(List<? extends Row>, Batch.Callback<R>)
|
org.apache.hadoop.hbase.client.HConnection.clearCaches(ServerName)
|
org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALKey.Builder.clearClusterId() |
org.apache.hadoop.hbase.client.HConnection.clearRegionCache()
|
org.apache.hadoop.hbase.client.HConnection.clearRegionCache(byte[]) |
org.apache.hadoop.hbase.client.HConnection.clearRegionCache(TableName)
|
org.apache.hadoop.hbase.client.HBaseAdmin.compact(byte[])
|
org.apache.hadoop.hbase.client.HBaseAdmin.compact(byte[], byte[])
|
org.apache.hadoop.hbase.client.HBaseAdmin.compact(String)
|
org.apache.hadoop.hbase.client.HBaseAdmin.compact(String, String)
|
org.apache.hadoop.hbase.KeyValue.RawBytesComparator.compareFlatKey(byte[], int, int, byte[], int, int)
|
org.apache.hadoop.hbase.mapreduce.HFileOutputFormat2.configureIncrementalLoad(Job, HTable)
|
org.apache.hadoop.hbase.mapreduce.CellCreator.create(byte[], int, int, byte[], int, int, byte[], int, int, long, byte[], int, int, String) |
org.apache.hadoop.hbase.regionserver.RpcSchedulerFactory.create(Configuration, PriorityFunction) |
org.apache.hadoop.hbase.regionserver.SimpleRpcSchedulerFactory.create(Configuration, PriorityFunction) |
org.apache.hadoop.hbase.client.HConnectionManager.createConnection(Configuration) |
org.apache.hadoop.hbase.client.HConnectionManager.createConnection(Configuration, ExecutorService) |
org.apache.hadoop.hbase.client.HConnectionManager.createConnection(Configuration, ExecutorService, User) |
org.apache.hadoop.hbase.client.HConnectionManager.createConnection(Configuration, User) |
org.apache.hadoop.hbase.KeyValue.createFirstOnRow(byte[])
|
org.apache.hadoop.hbase.KeyValue.createFirstOnRow(byte[], byte[], byte[])
|
org.apache.hadoop.hbase.KeyValue.createFirstOnRow(byte[], int, int, byte[], int, int, byte[], int, int)
|
org.apache.hadoop.hbase.RemoteExceptionHandler.decodeRemoteException(RemoteException)
|
org.apache.hadoop.hbase.client.HConnectionManager.deleteAllConnections()
|
org.apache.hadoop.hbase.client.HConnectionManager.deleteAllConnections(boolean) |
org.apache.hadoop.hbase.client.HConnection.deleteCachedRegionLocation(HRegionLocation)
|
org.apache.hadoop.hbase.client.Delete.deleteColumn(byte[], byte[])
|
org.apache.hadoop.hbase.client.Delete.deleteColumn(byte[], byte[], long)
|
org.apache.hadoop.hbase.client.Delete.deleteColumns(byte[], byte[])
|
org.apache.hadoop.hbase.client.Delete.deleteColumns(byte[], byte[], long)
|
org.apache.hadoop.hbase.client.HConnectionManager.deleteConnection(Configuration) |
org.apache.hadoop.hbase.client.Delete.deleteFamily(byte[])
|
org.apache.hadoop.hbase.client.Delete.deleteFamily(byte[], long)
|
org.apache.hadoop.hbase.client.Delete.deleteFamilyVersion(byte[], long)
|
org.apache.hadoop.hbase.client.HConnectionManager.deleteStaleConnection(HConnection) |
org.apache.hadoop.hbase.KeyValueUtil.ensureKeyValue(Cell)
|
org.apache.hadoop.hbase.KeyValueUtil.ensureKeyValues(List<Cell>) |
org.apache.hadoop.hbase.CellUtil.estimatedHeapSizeOfWithoutTags(Cell) |
org.apache.hadoop.hbase.CellUtil.estimatedSizeOf(Cell)
|
org.apache.hadoop.hbase.client.HConnectionManager.execute(HConnectable<T>)
|
org.apache.hadoop.hbase.regionserver.RegionMergeTransaction.execute(Server, RegionServerServices)
|
org.apache.hadoop.hbase.regionserver.SplitTransaction.execute(Server, RegionServerServices)
|
org.apache.hadoop.hbase.client.HTable.exists(List<Get>) |
org.apache.hadoop.hbase.client.HTableInterface.exists(List<Get>)
|
org.apache.hadoop.hbase.rest.client.RemoteHTable.exists(List<Get>) |
org.apache.hadoop.hbase.client.HTableWrapper.exists(List<Get>) |
org.apache.hadoop.hbase.HBaseTestingUtility.expireSession(ZooKeeperWatcher, Server) |
org.apache.hadoop.hbase.client.HBaseAdmin.flush(byte[])
|
org.apache.hadoop.hbase.client.HBaseAdmin.flush(String)
|
org.apache.hadoop.hbase.client.HTableInterface.flushCommits()
|
org.apache.hadoop.hbase.PerformanceEvaluation.generateValue(Random)
|
org.apache.hadoop.hbase.thrift.ThriftServerRunner.HBaseHandler.get(ByteBuffer, ByteBuffer, ByteBuffer, Map<ByteBuffer, ByteBuffer>) |
org.apache.hadoop.hbase.client.HConnection.getAdmin(ServerName)
|
org.apache.hadoop.hbase.client.HConnection.getAdmin(ServerName, boolean)
|
org.apache.hadoop.hbase.client.HTable.getAllRegionLocations()
|
org.apache.hadoop.hbase.security.visibility.DefaultVisibilityLabelServiceImpl.getAuths(byte[], boolean) |
org.apache.hadoop.hbase.security.visibility.VisibilityLabelService.getAuths(byte[], boolean)
|
org.apache.hadoop.hbase.security.visibility.ExpAsStringVisibilityLabelServiceImpl.getAuths(byte[], boolean) |
org.apache.hadoop.hbase.KeyValue.getBuffer()
|
org.apache.hadoop.hbase.zookeeper.ZKUtil.getChildDataAndWatchForNewChildren(ZooKeeperWatcher, String)
|
org.apache.hadoop.hbase.client.HConnection.getClient(ServerName)
|
org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALKeyOrBuilder.getClusterId() |
org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALKey.getClusterId() |
org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALKey.Builder.getClusterId() |
org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALKey.Builder.getClusterIdBuilder() |
org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALKeyOrBuilder.getClusterIdOrBuilder() |
org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALKey.getClusterIdOrBuilder() |
org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALKey.Builder.getClusterIdOrBuilder() |
org.apache.hadoop.hbase.client.Result.getColumn(byte[], byte[])
|
org.apache.hadoop.hbase.client.Result.getColumnLatest(byte[], byte[])
|
org.apache.hadoop.hbase.client.Result.getColumnLatest(byte[], int, int, byte[], int, int)
|
org.apache.hadoop.hbase.client.HBaseAdmin.getCompactionState(byte[])
|
org.apache.hadoop.hbase.client.HBaseAdmin.getCompactionState(String)
|
org.apache.hadoop.hbase.client.HTable.getConnection()
|
org.apache.hadoop.hbase.client.HConnectionManager.getConnection(Configuration) |
org.apache.hadoop.hbase.client.HConnection.getCurrentNrHRS()
|
org.apache.hadoop.hbase.HColumnDescriptor.getDataBlockEncodingOnDisk()
|
org.apache.hadoop.hbase.HRegionInfo.getDaughterRegions(Result)
|
org.apache.hadoop.hbase.wal.WAL.getEarliestMemstoreSeqNum(byte[])
|
org.apache.hadoop.hbase.client.HTable.getEndKeys()
|
org.apache.hadoop.hbase.Cell.getFamily()
|
org.apache.hadoop.hbase.KeyValue.getFamily() |
org.apache.hadoop.hbase.TagRewriteCell.getFamily() |
org.apache.hadoop.hbase.client.Mutation.getFamilyMap()
|
org.apache.hadoop.hbase.client.MetaScanner.getHRegionInfo(Result)
|
org.apache.hadoop.hbase.HRegionInfo.getHRegionInfo(Result)
|
org.apache.hadoop.hbase.HRegionInfo.getHRegionInfo(Result, byte[])
|
org.apache.hadoop.hbase.HRegionInfo.getHRegionInfoAndServerName(Result)
|
org.apache.hadoop.hbase.mapred.TableInputFormatBase.getHTable()
|
org.apache.hadoop.hbase.mapreduce.TableInputFormatBase.getHTable()
|
org.apache.hadoop.hbase.client.HConnection.getHTableDescriptor(byte[]) |
org.apache.hadoop.hbase.client.HConnection.getHTableDescriptor(TableName) |
org.apache.hadoop.hbase.client.HConnection.getHTableDescriptors(List<String>) |
org.apache.hadoop.hbase.client.HConnection.getHTableDescriptorsByTableName(List<TableName>)
|
org.apache.hadoop.hbase.client.ClusterConnection.getKeepAliveMasterService() |
org.apache.hadoop.hbase.client.HConnection.getKeepAliveMasterService()
|
org.apache.hadoop.hbase.client.HConnection.getMaster()
|
org.apache.hadoop.hbase.HRegionInfo.getMergeRegions(Result)
|
org.apache.hadoop.hbase.Cell.getMvccVersion()
|
org.apache.hadoop.hbase.TagRewriteCell.getMvccVersion() |
org.apache.hadoop.hbase.HTableDescriptor.getName()
|
org.apache.hadoop.hbase.filter.Filter.getNextKeyHint(KeyValue) |
org.apache.hadoop.hbase.filter.FilterBase.getNextKeyHint(KeyValue) |
org.apache.hadoop.hbase.filter.FilterList.getNextKeyHint(KeyValue) |
org.apache.hadoop.hbase.filter.FilterWrapper.getNextKeyHint(KeyValue) |
org.apache.hadoop.hbase.client.HConnection.getNonceGenerator()
|
org.apache.hadoop.hbase.HTableDescriptor.getOwnerString() |
org.apache.hadoop.hbase.http.HttpServer.getPort() |
org.apache.hadoop.hbase.http.InfoServer.getPort() |
org.apache.hadoop.hbase.Cell.getQualifier()
|
org.apache.hadoop.hbase.KeyValue.getQualifier() |
org.apache.hadoop.hbase.TagRewriteCell.getQualifier() |
org.apache.hadoop.hbase.MetaTableAccessor.getRegion(Connection, byte[])
|
org.apache.hadoop.hbase.client.HConnection.getRegionCachePrefetch(byte[])
|
org.apache.hadoop.hbase.client.HTable.getRegionCachePrefetch(byte[])
|
org.apache.hadoop.hbase.client.HTable.getRegionCachePrefetch(Configuration, byte[])
|
org.apache.hadoop.hbase.client.HTable.getRegionCachePrefetch(Configuration, TableName)
|
org.apache.hadoop.hbase.client.HConnection.getRegionCachePrefetch(TableName)
|
org.apache.hadoop.hbase.client.HTable.getRegionCachePrefetch(TableName)
|
org.apache.hadoop.hbase.MetaTableAccessor.getRegionCount(Configuration, String) |
org.apache.hadoop.hbase.regionserver.HRegion.getRegionDir(Path, HRegionInfo) |
org.apache.hadoop.hbase.regionserver.HRegion.getRegionDir(Path, String) |
org.apache.hadoop.hbase.client.HTable.getRegionLocation(byte[])
|
org.apache.hadoop.hbase.client.HTable.getRegionLocation(byte[], boolean)
|
org.apache.hadoop.hbase.client.HConnection.getRegionLocation(byte[], byte[], boolean) |
org.apache.hadoop.hbase.client.HTable.getRegionLocation(String)
|
org.apache.hadoop.hbase.client.HConnection.getRegionLocation(TableName, byte[], boolean)
|
org.apache.hadoop.hbase.client.HTable.getRegionLocations()
|
org.apache.hadoop.hbase.client.HTable.getRegionsInRange(byte[], byte[])
|
org.apache.hadoop.hbase.client.HTable.getRegionsInRange(byte[], byte[], boolean)
|
org.apache.hadoop.hbase.Cell.getRow()
|
org.apache.hadoop.hbase.KeyValue.getRow() |
org.apache.hadoop.hbase.TagRewriteCell.getRow() |
org.apache.hadoop.hbase.TableName.getRowComparator()
|
org.apache.hadoop.hbase.client.HTable.getRowOrBefore(byte[], byte[])
|
org.apache.hadoop.hbase.client.HTableInterface.getRowOrBefore(byte[], byte[])
|
org.apache.hadoop.hbase.client.HTableWrapper.getRowOrBefore(byte[], byte[]) |
org.apache.hadoop.hbase.thrift.ThriftServerRunner.HBaseHandler.getRowOrBefore(ByteBuffer, ByteBuffer, ByteBuffer) |
org.apache.hadoop.hbase.protobuf.ProtobufUtil.getRowOrBefore(ClientProtos.ClientService.BlockingInterface, byte[], byte[], byte[])
|
org.apache.hadoop.hbase.regionserver.StoreFile.Reader.getScanner(boolean, boolean) |
org.apache.hadoop.hbase.regionserver.StoreFile.Reader.getScanner(boolean, boolean, boolean) |
org.apache.hadoop.hbase.client.HTable.getScannerCaching()
|
org.apache.hadoop.hbase.HRegionInfo.getSeqNumDuringOpen(Result)
|
org.apache.hadoop.hbase.ClusterStatus.getServerInfo()
|
org.apache.hadoop.hbase.HRegionInfo.getServerName(Result)
|
org.apache.hadoop.hbase.KeyValue.KVComparator.getShortMidpointKey(byte[], byte[])
|
org.apache.hadoop.hbase.io.ImmutableBytesWritable.getSize()
|
org.apache.hadoop.hbase.client.HTable.getStartEndKeys()
|
org.apache.hadoop.hbase.client.HTable.getStartKeys()
|
org.apache.hadoop.hbase.regionserver.HStore.getStoreHomedir(Path, HRegionInfo, byte[]) |
org.apache.hadoop.hbase.regionserver.HStore.getStoreHomedir(Path, String, byte[]) |
org.apache.hadoop.hbase.HTableDescriptor.getTableDir(Path, byte[]) |
org.apache.hadoop.hbase.client.ClientScanner.getTableName()
|
org.apache.hadoop.hbase.client.HTableInterface.getTableName()
|
org.apache.hadoop.hbase.HRegionInfo.getTableName()
|
org.apache.hadoop.hbase.HRegionInfo.getTableName(byte[])
|
org.apache.hadoop.hbase.client.HBaseAdmin.getTableNames()
|
org.apache.hadoop.hbase.client.HConnection.getTableNames()
|
org.apache.hadoop.hbase.client.HBaseAdmin.getTableNames(Pattern)
|
org.apache.hadoop.hbase.client.HBaseAdmin.getTableNames(String)
|
org.apache.hadoop.hbase.KeyValue.getType() |
org.apache.hadoop.hbase.security.access.AccessControlClient.getUserPermissions(Configuration, String)
|
org.apache.hadoop.hbase.Cell.getValue()
|
org.apache.hadoop.hbase.KeyValue.getValue() |
org.apache.hadoop.hbase.TagRewriteCell.getValue() |
org.apache.hadoop.hbase.thrift.ThriftServerRunner.HBaseHandler.getVer(ByteBuffer, ByteBuffer, ByteBuffer, int, Map<ByteBuffer, ByteBuffer>) |
org.apache.hadoop.hbase.HRegionInfo.getVersion()
|
org.apache.hadoop.hbase.thrift.ThriftServerRunner.HBaseHandler.getVerTs(ByteBuffer, ByteBuffer, ByteBuffer, long, int, Map<ByteBuffer, ByteBuffer>) |
org.apache.hadoop.hbase.client.BufferedMutatorImpl.getWriteBuffer()
|
org.apache.hadoop.hbase.client.HTable.getWriteBuffer()
|
org.apache.hadoop.hbase.client.HTableInterface.getWriteBufferSize()
|
org.apache.hadoop.hbase.client.Table.getWriteBufferSize()
|
org.apache.hadoop.hbase.client.Mutation.getWriteToWAL()
|
org.apache.hadoop.hbase.security.access.AccessControlClient.grant(Configuration, String, Permission.Action...)
|
org.apache.hadoop.hbase.security.access.AccessControlClient.grant(Configuration, String, String, Permission.Action...)
|
org.apache.hadoop.hbase.security.access.AccessControlClient.grant(Configuration, TableName, String, byte[], byte[], Permission.Action...)
|
org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALKeyOrBuilder.hasClusterId() |
org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALKey.hasClusterId() |
org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALKey.Builder.hasClusterId() |
org.apache.hadoop.hbase.security.visibility.DefaultVisibilityLabelServiceImpl.havingSystemAuth(byte[]) |
org.apache.hadoop.hbase.security.visibility.VisibilityLabelService.havingSystemAuth(byte[])
|
org.apache.hadoop.hbase.security.visibility.ExpAsStringVisibilityLabelServiceImpl.havingSystemAuth(byte[]) |
org.apache.hadoop.hbase.KeyValue.heapSizeWithoutTags() |
org.apache.hadoop.hbase.client.HTable.incrementColumnValue(byte[], byte[], byte[], long, boolean)
|
org.apache.hadoop.hbase.client.HTableInterface.incrementColumnValue(byte[], byte[], byte[], long, boolean)
|
org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.initCredentialsForCluster(Job, String)
|
org.apache.hadoop.hbase.regionserver.HRegion.initialize()
|
org.apache.hadoop.hbase.security.access.AccessControlClient.isAccessControllerRunning(Configuration)
|
org.apache.hadoop.hbase.client.HTableInterface.isAutoFlush()
|
org.apache.hadoop.hbase.client.ClusterConnection.isDeadServer(ServerName)
|
org.apache.hadoop.hbase.client.HConnection.isDeadServer(ServerName)
|
org.apache.hadoop.hbase.KeyValue.isDelete() |
org.apache.hadoop.hbase.master.cleaner.BaseLogCleanerDelegate.isLogDeletable(FileStatus) |
org.apache.hadoop.hbase.client.ClusterConnection.isMasterRunning()
|
org.apache.hadoop.hbase.client.HBaseAdmin.isMasterRunning()
|
org.apache.hadoop.hbase.client.HConnection.isMasterRunning()
|
org.apache.hadoop.hbase.security.Superusers.isSuperUser(String)
|
org.apache.hadoop.hbase.client.HConnection.isTableAvailable(byte[]) |
org.apache.hadoop.hbase.client.HConnection.isTableAvailable(byte[], byte[][]) |
org.apache.hadoop.hbase.client.HConnection.isTableAvailable(TableName, byte[][])
|
org.apache.hadoop.hbase.client.HConnection.isTableDisabled(byte[]) |
org.apache.hadoop.hbase.client.HConnection.isTableEnabled(byte[]) |
org.apache.hadoop.hbase.client.HTable.isTableEnabled(byte[])
|
org.apache.hadoop.hbase.client.HTable.isTableEnabled(Configuration, byte[])
|
org.apache.hadoop.hbase.client.HTable.isTableEnabled(Configuration, String)
|
org.apache.hadoop.hbase.client.HTable.isTableEnabled(Configuration, TableName)
|
org.apache.hadoop.hbase.client.HTable.isTableEnabled(String)
|
org.apache.hadoop.hbase.client.HTable.isTableEnabled(TableName)
|
org.apache.hadoop.hbase.client.Result.list()
|
org.apache.hadoop.hbase.client.replication.ReplicationAdmin.listPeers()
|
org.apache.hadoop.hbase.client.HConnection.listTableNames()
|
org.apache.hadoop.hbase.client.HConnection.listTables()
|
org.apache.hadoop.hbase.client.HConnection.locateRegion(byte[])
|
org.apache.hadoop.hbase.client.HConnection.locateRegion(byte[], byte[]) |
org.apache.hadoop.hbase.client.HConnection.locateRegion(TableName, byte[])
|
org.apache.hadoop.hbase.client.HConnection.locateRegions(byte[]) |
org.apache.hadoop.hbase.client.HConnection.locateRegions(byte[], boolean, boolean) |
org.apache.hadoop.hbase.client.HConnection.locateRegions(TableName)
|
org.apache.hadoop.hbase.client.HConnection.locateRegions(TableName, boolean, boolean)
|
org.apache.hadoop.hbase.client.HBaseAdmin.majorCompact(byte[])
|
org.apache.hadoop.hbase.client.HBaseAdmin.majorCompact(byte[], byte[])
|
org.apache.hadoop.hbase.client.HBaseAdmin.majorCompact(String)
|
org.apache.hadoop.hbase.client.HBaseAdmin.majorCompact(String, String)
|
org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALKey.Builder.mergeClusterId(HBaseProtos.UUID) |
org.apache.hadoop.hbase.security.token.TokenUtil.obtainAndCacheToken(Configuration, UserGroupInformation)
|
org.apache.hadoop.hbase.security.User.obtainAuthTokenForJob(Configuration, Job)
|
org.apache.hadoop.hbase.security.User.obtainAuthTokenForJob(JobConf)
|
org.apache.hadoop.hbase.security.token.TokenUtil.obtainToken(Configuration)
|
org.apache.hadoop.hbase.security.token.TokenUtil.obtainTokenForJob(Configuration, UserGroupInformation, Job)
|
org.apache.hadoop.hbase.security.token.TokenUtil.obtainTokenForJob(JobConf, UserGroupInformation)
|
org.apache.hadoop.hbase.KeyValue.oswrite(KeyValue, OutputStream)
|
org.apache.hadoop.hbase.zookeeper.ZKConfig.parseZooCfg(Configuration, InputStream)
|
org.apache.hadoop.hbase.coprocessor.RegionObserver.postCompact(ObserverContext<RegionCoprocessorEnvironment>, Store, StoreFile)
|
org.apache.hadoop.hbase.coprocessor.RegionObserver.postCompactSelection(ObserverContext<RegionCoprocessorEnvironment>, Store, <any>)
|
org.apache.hadoop.hbase.coprocessor.RegionObserver.postFlush(ObserverContext<RegionCoprocessorEnvironment>)
|
org.apache.hadoop.hbase.master.MasterCoprocessorHost.postGetTableDescriptors(List<HTableDescriptor>) |
org.apache.hadoop.hbase.coprocessor.MasterObserver.postGetTableDescriptors(ObserverContext<MasterCoprocessorEnvironment>, List<HTableDescriptor>)
|
org.apache.hadoop.hbase.coprocessor.RegionObserver.postIncrementColumnValue(ObserverContext<RegionCoprocessorEnvironment>, byte[], byte[], byte[], long, boolean, long)
|
org.apache.hadoop.hbase.regionserver.RegionServerServices.postOpenDeployTasks(Region)
|
org.apache.hadoop.hbase.coprocessor.RegionObserver.postSplit(ObserverContext<RegionCoprocessorEnvironment>, Region, Region)
|
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.postWALRestore(HRegionInfo, HLogKey, WALEdit)
|
org.apache.hadoop.hbase.coprocessor.RegionObserver.postWALRestore(ObserverContext<RegionCoprocessorEnvironment>, HRegionInfo, HLogKey, WALEdit)
|
org.apache.hadoop.hbase.coprocessor.WALObserver.postWALWrite(ObserverContext<WALCoprocessorEnvironment>, HRegionInfo, HLogKey, WALEdit)
|
org.apache.hadoop.hbase.coprocessor.RegionObserver.preCompact(ObserverContext<RegionCoprocessorEnvironment>, Store, InternalScanner, ScanType)
|
org.apache.hadoop.hbase.coprocessor.RegionObserver.preCompactScannerOpen(ObserverContext<RegionCoprocessorEnvironment>, Store, List<? extends KeyValueScanner>, ScanType, long, InternalScanner)
|
org.apache.hadoop.hbase.coprocessor.RegionObserver.preCompactSelection(ObserverContext<RegionCoprocessorEnvironment>, Store, List<StoreFile>)
|
org.apache.hadoop.hbase.coprocessor.RegionObserver.preFlush(ObserverContext<RegionCoprocessorEnvironment>)
|
org.apache.hadoop.hbase.master.MasterCoprocessorHost.preGetTableDescriptors(List<TableName>, List<HTableDescriptor>) |
org.apache.hadoop.hbase.coprocessor.MasterObserver.preGetTableDescriptors(ObserverContext<MasterCoprocessorEnvironment>, List<TableName>, List<HTableDescriptor>)
|
org.apache.hadoop.hbase.coprocessor.RegionObserver.preIncrementColumnValue(ObserverContext<RegionCoprocessorEnvironment>, byte[], byte[], byte[], long, boolean)
|
org.apache.hadoop.hbase.coprocessor.RegionObserver.preSplit(ObserverContext<RegionCoprocessorEnvironment>)
|
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.preWALRestore(HRegionInfo, HLogKey, WALEdit)
|
org.apache.hadoop.hbase.coprocessor.RegionObserver.preWALRestore(ObserverContext<RegionCoprocessorEnvironment>, HRegionInfo, HLogKey, WALEdit)
|
org.apache.hadoop.hbase.coprocessor.WALObserver.preWALWrite(ObserverContext<WALCoprocessorEnvironment>, HRegionInfo, HLogKey, WALEdit)
|
org.apache.hadoop.hbase.client.HConnection.processBatch(List<? extends Row>, byte[], ExecutorService, Object[]) |
org.apache.hadoop.hbase.client.HConnection.processBatch(List<? extends Row>, TableName, ExecutorService, Object[])
|
org.apache.hadoop.hbase.client.HConnection.processBatchCallback(List<? extends Row>, byte[], ExecutorService, Object[], Batch.Callback<R>) |
org.apache.hadoop.hbase.client.HConnection.processBatchCallback(List<? extends Row>, TableName, ExecutorService, Object[], Batch.Callback<R>)
|
org.apache.hadoop.hbase.client.HTableMultiplexer.put(byte[], List<Put>) |
org.apache.hadoop.hbase.client.HTableMultiplexer.put(byte[], Put) |
org.apache.hadoop.hbase.client.HTableMultiplexer.put(byte[], Put, int) |
org.apache.hadoop.hbase.client.HTablePool.putTable(HTableInterface) |
org.apache.hadoop.hbase.client.Result.raw()
|
org.apache.hadoop.hbase.HColumnDescriptor.readFields(DataInput)
|
org.apache.hadoop.hbase.HRegionInfo.readFields(DataInput)
|
org.apache.hadoop.hbase.HTableDescriptor.readFields(DataInput)
|
org.apache.hadoop.hbase.io.Reference.readFields(DataInput)
|
org.apache.hadoop.hbase.util.Bytes.readVLong(byte[], int)
|
org.apache.hadoop.hbase.client.HConnection.relocateRegion(byte[], byte[]) |
org.apache.hadoop.hbase.client.HConnection.relocateRegion(TableName, byte[])
|
org.apache.hadoop.hbase.regionserver.RegionServerServices.reportRegionStateTransition(RegionServerStatusProtos.RegionStateTransition.TransitionCode, HRegionInfo...)
|
org.apache.hadoop.hbase.regionserver.RegionServerServices.reportRegionStateTransition(RegionServerStatusProtos.RegionStateTransition.TransitionCode, long, HRegionInfo...)
|
org.apache.hadoop.hbase.io.hfile.HFileScanner.reseekTo(byte[]) |
org.apache.hadoop.hbase.io.hfile.HFileScanner.reseekTo(byte[], int, int) |
org.apache.hadoop.hbase.mapreduce.TableInputFormatBase.reverseDNS(InetAddress)
|
org.apache.hadoop.hbase.security.access.AccessControlClient.revoke(Configuration, String, Permission.Action...)
|
org.apache.hadoop.hbase.security.access.AccessControlClient.revoke(Configuration, String, String, Permission.Action...)
|
org.apache.hadoop.hbase.security.access.AccessControlClient.revoke(Configuration, TableName, String, byte[], byte[], Permission.Action...)
|
org.apache.hadoop.hbase.regionserver.RegionMergeTransaction.rollback(Server, RegionServerServices)
|
org.apache.hadoop.hbase.regionserver.SplitTransaction.rollback(Server, RegionServerServices)
|
org.apache.hadoop.hbase.client.HBaseAdmin.rollHLogWriter(String)
|
org.apache.hadoop.hbase.io.hfile.HFileScanner.seekBefore(byte[]) |
org.apache.hadoop.hbase.io.hfile.HFileScanner.seekBefore(byte[], int, int) |
org.apache.hadoop.hbase.io.hfile.HFileScanner.seekTo(byte[]) |
org.apache.hadoop.hbase.io.hfile.HFileScanner.seekTo(byte[], int, int) |
org.apache.hadoop.hbase.io.encoding.DataBlockEncoder.EncodedSeeker.seekToKeyInBlock(byte[], int, int, boolean) |
org.apache.hadoop.hbase.client.HTable.setAutoFlush(boolean) |
org.apache.hadoop.hbase.client.HTableInterface.setAutoFlush(boolean)
|
org.apache.hadoop.hbase.client.HTableInterface.setAutoFlush(boolean, boolean)
|
org.apache.hadoop.hbase.client.HTableInterface.setAutoFlushTo(boolean)
|
org.apache.hadoop.hbase.http.HttpServer.Builder.setBindAddress(String) |
org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALKey.Builder.setClusterId(HBaseProtos.UUID.Builder) |
org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALKey.Builder.setClusterId(HBaseProtos.UUID) |
org.apache.hadoop.hbase.HColumnDescriptor.setEncodeOnDisk(boolean)
|
org.apache.hadoop.hbase.client.Append.setFamilyMap(NavigableMap<byte[], List<KeyValue>>) |
org.apache.hadoop.hbase.client.Delete.setFamilyMap(NavigableMap<byte[], List<KeyValue>>) |
org.apache.hadoop.hbase.client.Increment.setFamilyMap(NavigableMap<byte[], List<KeyValue>>) |
org.apache.hadoop.hbase.client.Mutation.setFamilyMap(NavigableMap<byte[], List<KeyValue>>)
|
org.apache.hadoop.hbase.client.Put.setFamilyMap(NavigableMap<byte[], List<KeyValue>>) |
org.apache.hadoop.hbase.mapred.TableInputFormatBase.setHTable(HTable)
|
org.apache.hadoop.hbase.mapreduce.TableInputFormatBase.setHTable(HTable)
|
org.apache.hadoop.hbase.mapreduce.TableRecordReader.setHTable(Table)
|
org.apache.hadoop.hbase.HColumnDescriptor.setKeepDeletedCells(boolean)
|
org.apache.hadoop.hbase.HTableDescriptor.setName(byte[]) |
org.apache.hadoop.hbase.http.HttpServer.Builder.setName(String) |
org.apache.hadoop.hbase.HTableDescriptor.setName(TableName) |
org.apache.hadoop.hbase.HTableDescriptor.setOwner(User) |
org.apache.hadoop.hbase.HTableDescriptor.setOwnerString(String) |
org.apache.hadoop.hbase.client.replication.ReplicationAdmin.setPeerTableCFs(String, String)
|
org.apache.hadoop.hbase.http.HttpServer.Builder.setPort(int) |
org.apache.hadoop.hbase.client.HConnection.setRegionCachePrefetch(byte[], boolean)
|
org.apache.hadoop.hbase.client.HTable.setRegionCachePrefetch(byte[], boolean)
|
org.apache.hadoop.hbase.client.HTable.setRegionCachePrefetch(Configuration, byte[], boolean)
|
org.apache.hadoop.hbase.client.HTable.setRegionCachePrefetch(Configuration, TableName, boolean)
|
org.apache.hadoop.hbase.client.HConnection.setRegionCachePrefetch(TableName, boolean)
|
org.apache.hadoop.hbase.client.HTable.setRegionCachePrefetch(TableName, boolean)
|
org.apache.hadoop.hbase.client.HTable.setScannerCaching(int)
|
org.apache.hadoop.hbase.client.HConnectionManager.setServerSideHConnectionRetries(Configuration, String, Log)
|
org.apache.hadoop.hbase.client.BufferedMutatorImpl.setWriteBufferSize(long)
|
org.apache.hadoop.hbase.client.HTableInterface.setWriteBufferSize(long)
|
org.apache.hadoop.hbase.client.Table.setWriteBufferSize(long)
|
org.apache.hadoop.hbase.client.Append.setWriteToWAL(boolean) |
org.apache.hadoop.hbase.client.Delete.setWriteToWAL(boolean) |
org.apache.hadoop.hbase.client.Increment.setWriteToWAL(boolean) |
org.apache.hadoop.hbase.client.Mutation.setWriteToWAL(boolean)
|
org.apache.hadoop.hbase.client.Put.setWriteToWAL(boolean) |
org.apache.hadoop.hbase.HColumnDescriptor.shouldCacheBloomsOnWrite()
|
org.apache.hadoop.hbase.HColumnDescriptor.shouldCacheDataInL1()
|
org.apache.hadoop.hbase.HColumnDescriptor.shouldCacheDataOnWrite()
|
org.apache.hadoop.hbase.HColumnDescriptor.shouldCacheIndexesOnWrite()
|
org.apache.hadoop.hbase.HColumnDescriptor.shouldCompressTags()
|
org.apache.hadoop.hbase.HColumnDescriptor.shouldEvictBlocksOnClose()
|
org.apache.hadoop.hbase.HColumnDescriptor.shouldPrefetchBlocksOnOpen()
|
org.apache.hadoop.hbase.regionserver.RegionSplitPolicy.skipStoreFileRangeCheck()
|
org.apache.hadoop.hbase.client.HBaseAdmin.split(byte[])
|
org.apache.hadoop.hbase.client.HBaseAdmin.split(byte[], byte[])
|
org.apache.hadoop.hbase.client.HBaseAdmin.split(String)
|
org.apache.hadoop.hbase.client.HBaseAdmin.split(String, String)
|
org.apache.hadoop.hbase.regionserver.RegionMergeTransactionImpl.stepsAfterPONR(Server, RegionServerServices, HRegion) |
org.apache.hadoop.hbase.regionserver.SplitTransactionImpl.stepsAfterPONR(Server, RegionServerServices, PairOfSameType<Region>) |
org.apache.hadoop.hbase.client.TestHCM.testConnectionUniqueness()
|
org.apache.hadoop.hbase.client.TestFromClientSide.testSharedZooKeeper()
|
org.apache.hadoop.hbase.filter.Filter.transform(KeyValue) |
org.apache.hadoop.hbase.filter.FilterBase.transform(KeyValue) |
org.apache.hadoop.hbase.filter.FilterList.transform(KeyValue) |
org.apache.hadoop.hbase.filter.FilterWrapper.transform(KeyValue) |
org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles.tryAtomicRegionLoad(HConnection, byte[], byte[], Collection<LoadIncrementalHFiles.LoadQueueItem>)
|
org.apache.hadoop.hbase.client.HConnection.updateCachedLocations(byte[], byte[], Object, HRegionLocation) |
org.apache.hadoop.hbase.client.HConnection.updateCachedLocations(TableName, byte[], byte[], Object, ServerName)
|
org.apache.hadoop.hbase.client.HConnection.updateCachedLocations(TableName, byte[], Object, HRegionLocation) |
org.apache.hadoop.hbase.zookeeper.ZKUtil.updateExistingNodeData(ZooKeeperWatcher, String, byte[], int)
|
org.apache.hadoop.hbase.HColumnDescriptor.write(DataOutput)
|
org.apache.hadoop.hbase.HRegionInfo.write(DataOutput)
|
org.apache.hadoop.hbase.HTableDescriptor.write(DataOutput)
|
org.apache.hadoop.hbase.regionserver.wal.HLogKey.write(DataOutput)
|