首页 > 代码库 > Hadoop 默认排序

Hadoop 默认排序

                                                    Hadoop  默认排序

1       3
1       2
1       1
3       3
3       2
2       2
2       1
3       1

排序后:左右前面一列排序 后面一列不排序  要想第二列也排序  请看  Hadoop二次排序

1 3
1 2
1 1
2 2
2 1
3 3
3 2
3 1

代码为:

package com.hadoop.test.defaultsort;


import java.io.IOException;


import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;


public class DefaultSortMapper extends Mapper<LongWritable, Text, LongWritable, LongWritable> {


@Override
protected void map(LongWritable key, Text value,
Context context)
throws IOException, InterruptedException {
String line = value.toString();
String[] arr = line.split("\t");
if(arr.length==2){
context.write(new LongWritable(Long.parseLong(arr[0])), new LongWritable(Long.parseLong(arr[1])));
}
}


}

package com.hadoop.test.defaultsort;


import java.io.IOException;


import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.mapreduce.Reducer;


public class Sortreducer extends Reducer<LongWritable, LongWritable, LongWritable, LongWritable> {


@Override
protected void reduce(LongWritable key, Iterable<LongWritable> values,Context context)
throws IOException, InterruptedException {
   for (LongWritable value : values) {
    context.write(key, value);

}
}


}



package com.hadoop.test.defaultsort;


import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
/**
 * 默认key排序 安装升序排序
 * @author 小明
 *
 */
public class JobMain {
public static void main(String[] args) throws Exception{
Configuration configuration = new Configuration();
Job job = new Job(configuration, "default-sort");
job.setJarByClass(JobMain.class);

job.setMapperClass(DefaultSortMapper.class);
job.setMapOutputKeyClass(LongWritable.class);
job.setMapOutputValueClass(LongWritable.class);

job.setReducerClass(Sortreducer.class);
job.setOutputKeyClass(LongWritable.class);
job.setOutputValueClass(LongWritable.class);

FileInputFormat.addInputPath(job, new Path(args[0]));

Path outputDir= new Path(args[1]);
FileSystem sys = FileSystem.get(configuration);
if(sys.exists(outputDir)){
sys.delete(outputDir, true);
}
FileOutputFormat.setOutputPath(job, outputDir);
 
job.waitForCompletion(true);
}
}



Hadoop 默认排序