@InterfaceAudience.Private public abstract class Compactor extends Object
Modifier and Type | Class and Description |
---|---|
static interface |
Compactor.CellSink
TODO: Replace this with
CellOutputStream when StoreFile.Writer uses cells. |
protected static class |
Compactor.FileDetails
The sole reason this class exists is that java has no ref/out/pointer parameters.
|
Modifier and Type | Field and Description |
---|---|
protected Compression.Algorithm |
compactionCompression |
protected org.apache.hadoop.conf.Configuration |
conf |
protected CompactionProgress |
progress |
protected Store |
store |
Modifier and Type | Method and Description |
---|---|
protected void |
abortWriter(StoreFile.Writer writer) |
abstract List<org.apache.hadoop.fs.Path> |
compact(CompactionRequest request)
Do a minor/major compaction on an explicit set of storefiles from a Store.
|
List<org.apache.hadoop.fs.Path> |
compactForTesting(Collection<StoreFile> filesToCompact,
boolean isMajor)
Compact a list of files for testing.
|
protected List<StoreFileScanner> |
createFileScanners(Collection<StoreFile> filesToCompact) |
protected InternalScanner |
createScanner(Store store,
List<StoreFileScanner> scanners,
ScanType scanType,
long smallestReadPoint,
long earliestPutTs) |
protected Compactor.FileDetails |
getFileDetails(Collection<StoreFile> filesToCompact,
boolean calculatePutTs) |
CompactionProgress |
getProgress() |
protected boolean |
performCompaction(InternalScanner scanner,
Compactor.CellSink writer,
long smallestReadPoint) |
protected InternalScanner |
postCreateCoprocScanner(CompactionRequest request,
ScanType scanType,
InternalScanner scanner) |
protected InternalScanner |
preCreateCoprocScanner(CompactionRequest request,
ScanType scanType,
long earliestPutTs,
List<StoreFileScanner> scanners) |
protected long |
setSmallestReadPoint() |
protected CompactionProgress progress
protected org.apache.hadoop.conf.Configuration conf
protected Store store
protected Compression.Algorithm compactionCompression
public abstract List<org.apache.hadoop.fs.Path> compact(CompactionRequest request) throws IOException
request
- the requested compactionIOException
public List<org.apache.hadoop.fs.Path> compactForTesting(Collection<StoreFile> filesToCompact, boolean isMajor) throws IOException
CompactionRequest
to pass to
compact(CompactionRequest)
;filesToCompact
- the files to compact. These are used as the compactionSelection for the
generated CompactionRequest
.isMajor
- true to major compact (prune all deletes, max versions, etc)IOException
public CompactionProgress getProgress()
protected Compactor.FileDetails getFileDetails(Collection<StoreFile> filesToCompact, boolean calculatePutTs) throws IOException
IOException
protected List<StoreFileScanner> createFileScanners(Collection<StoreFile> filesToCompact) throws IOException
IOException
protected long setSmallestReadPoint()
protected InternalScanner preCreateCoprocScanner(CompactionRequest request, ScanType scanType, long earliestPutTs, List<StoreFileScanner> scanners) throws IOException
IOException
protected InternalScanner postCreateCoprocScanner(CompactionRequest request, ScanType scanType, InternalScanner scanner) throws IOException
IOException
protected boolean performCompaction(InternalScanner scanner, Compactor.CellSink writer, long smallestReadPoint) throws IOException
IOException
protected void abortWriter(StoreFile.Writer writer) throws IOException
IOException
protected InternalScanner createScanner(Store store, List<StoreFileScanner> scanners, ScanType scanType, long smallestReadPoint, long earliestPutTs) throws IOException
scanners
- Store file scanners.scanType
- Scan type.smallestReadPoint
- Smallest MVCC read point.earliestPutTs
- Earliest put across all files.IOException
Copyright © 2013 The Apache Software Foundation. All rights reserved.