001package com.hadoop.mapreduce;
002
003import java.io.IOException;
004import org.apache.hadoop.fs.Path;
005import org.apache.hadoop.io.LongWritable;
006import org.apache.hadoop.mapreduce.InputSplit;
007import org.apache.hadoop.mapreduce.JobContext;
008import org.apache.hadoop.mapreduce.RecordReader;
009import org.apache.hadoop.mapreduce.TaskAttemptContext;
010import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
011
012public class LzoSplitInputFormat extends FileInputFormat<Path, LongWritable> {
013
014    @Override
015    public RecordReader<Path, LongWritable> createRecordReader(InputSplit inputSplit,
016            TaskAttemptContext taskAttemptContext) throws IOException, InterruptedException {
017        return new LzoSplitRecordReader();
018    }
019
020    @Override
021    protected boolean isSplitable(JobContext context, Path filename) {
022    // Force the files to be unsplittable, because indexing requires seeing all the
023        // compressed blocks in succession.
024        return false;
025    }
026}