@InterfaceAudience.Private public class ClientScanner extends AbstractClientScanner
Modifier and Type | Field and Description |
---|---|
protected LinkedList<Result> |
cache |
protected int |
caching |
protected org.apache.hadoop.hbase.client.ScannerCallableWithReplicas |
callable |
protected RpcRetryingCaller<Result[]> |
caller |
protected boolean |
closed |
protected Configuration |
conf |
protected HRegionInfo |
currentRegion |
protected long |
lastNext |
protected Result |
lastResult |
protected long |
maxScannerResultSize |
protected LinkedList<Result> |
partialResults
A list of partial results that have been returned from the server.
|
protected byte[] |
partialResultsRow
The row for which we are accumulating partial Results (i.e.
|
protected ExecutorService |
pool |
protected int |
primaryOperationTimeout |
protected RpcControllerFactory |
rpcControllerFactory |
protected Scan |
scan |
protected boolean |
scanMetricsPublished |
protected int |
scannerTimeout |
scanMetrics
Constructor and Description |
---|
ClientScanner(Configuration conf,
Scan scan,
TableName tableName,
ClusterConnection connection,
RpcRetryingCallerFactory rpcFactory,
RpcControllerFactory controllerFactory,
ExecutorService pool,
int primaryOperationTimeout)
Create a new ClientScanner for the specified table Note that the passed
Scan 's start
row maybe changed changed. |
Modifier and Type | Method and Description |
---|---|
protected boolean |
checkScanStopRow(byte[] endKey) |
void |
close()
Closes the scanner and releases any resources it has allocated
|
protected static byte[] |
createClosestRowBefore(byte[] row)
Create the closest row before the specified row
|
int |
getCacheSize() |
protected int |
getCaching() |
protected Configuration |
getConf() |
protected ClusterConnection |
getConnection() |
protected long |
getMaxResultSize() |
protected ExecutorService |
getPool() |
protected int |
getPrimaryOperationTimeout() |
protected List<Result> |
getResultsToAddToCache(Result[] resultsFromServer,
boolean heartbeatMessage)
This method ensures all of our book keeping regarding partial results is kept up to date.
|
protected int |
getRetries() |
protected Scan |
getScan() |
protected org.apache.hadoop.hbase.client.ScannerCallableWithReplicas |
getScannerCallable(byte[] localStartKey,
int nbRows) |
protected int |
getScannerTimeout() |
protected TableName |
getTable() |
protected byte[] |
getTableName()
Deprecated.
As of release 0.96
(HBASE-9508).
This will be removed in HBase 2.0.0. Use
getTable() . |
protected long |
getTimestamp() |
protected void |
initializeScannerInConstruction() |
protected void |
loadCache()
Contact the servers to load more
Result s in the cache. |
Result |
next()
Grab the next row's worth of values.
|
protected boolean |
nextScanner(int nbRows,
boolean done) |
boolean |
renewLease()
Allow the client to renew the scanner's lease on the server.
|
protected void |
writeScanMetrics()
Publish the scan metrics.
|
getScanMetrics, initScanMetrics, iterator, next
clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
forEach, spliterator
protected Scan scan
protected boolean closed
protected HRegionInfo currentRegion
protected org.apache.hadoop.hbase.client.ScannerCallableWithReplicas callable
protected final LinkedList<Result> cache
protected final LinkedList<Result> partialResults
protected byte[] partialResultsRow
addToPartialResults(Result)
and clearPartialResults()
protected final int caching
protected long lastNext
protected Result lastResult
protected final long maxScannerResultSize
protected final int scannerTimeout
protected boolean scanMetricsPublished
protected RpcRetryingCaller<Result[]> caller
protected RpcControllerFactory rpcControllerFactory
protected Configuration conf
protected final int primaryOperationTimeout
protected final ExecutorService pool
public ClientScanner(Configuration conf, Scan scan, TableName tableName, ClusterConnection connection, RpcRetryingCallerFactory rpcFactory, RpcControllerFactory controllerFactory, ExecutorService pool, int primaryOperationTimeout) throws IOException
Scan
's start
row maybe changed changed.conf
- The Configuration
to use.scan
- Scan
to use in this scannertableName
- The table that we wish to scanconnection
- Connection identifying the clusterIOException
protected void initializeScannerInConstruction() throws IOException
IOException
protected ClusterConnection getConnection()
@Deprecated protected byte[] getTableName()
getTable()
.protected TableName getTable()
protected int getRetries()
protected int getScannerTimeout()
protected Configuration getConf()
protected Scan getScan()
protected ExecutorService getPool()
protected int getPrimaryOperationTimeout()
protected int getCaching()
protected long getTimestamp()
protected long getMaxResultSize()
protected boolean checkScanStopRow(byte[] endKey)
protected boolean nextScanner(int nbRows, boolean done) throws IOException
IOException
@InterfaceAudience.Private protected org.apache.hadoop.hbase.client.ScannerCallableWithReplicas getScannerCallable(byte[] localStartKey, int nbRows)
protected void writeScanMetrics()
Scan.setScanMetricsEnabled(boolean)
This invocation clears the scan metrics. Metrics are aggregated in the Scan instance.
public Result next() throws IOException
ResultScanner
IOException
- epublic int getCacheSize()
protected void loadCache() throws IOException
Result
s in the cache.IOException
protected List<Result> getResultsToAddToCache(Result[] resultsFromServer, boolean heartbeatMessage) throws IOException
resultsFromServer
- The array of Result
s returned from the serverheartbeatMessage
- Flag indicating whether or not the response received from the server
represented a complete response, or a heartbeat message that was sent to keep the
client-server connection aliveIOException
public void close()
ResultScanner
protected static byte[] createClosestRowBefore(byte[] row)
row
- public boolean renewLease()
AbstractClientScanner
renewLease
in class AbstractClientScanner