[TOC]
# 准备数据
~~~
Order_0000001,pd001,222.8
Order_0000001,pd005,25.8
Order_0000002,pd005,325.8
Order_0000002,pd003,522.8
Order_0000002,pd004,122.4
Order_0000003,pd001,222.8
Order_0000003,pd001,322.8
~~~
![](https://box.kancloud.cn/3cb59971cd2977453d2fb5ea5f490ba2_1874x1106.png)
他是记录订单编号,商品和成交金额
然后取出每个订单的top1和topN的数据
里面需要用到一个分组的
1. 利用“订单id和成交金额”作为key,可以将map阶段读取到的所有订单数据按照id分区,按照金额排序,发送到reduce
2. 在reduce端利用GroupingComparator将订单id相同的kv聚合成组,然后取第一个即是最大值
# top1代码
**OrderBean**
~~~
package com.top;
import org.apache.hadoop.io.DoubleWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableComparable;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
public class OrderBean implements WritableComparable<OrderBean> {
private Text itemid;
private DoubleWritable amount;
public OrderBean() {
}
public OrderBean(Text itemid, DoubleWritable amount) {
set(itemid, amount);
}
public void set(Text itemid, DoubleWritable amount) {
this.itemid = itemid;
this.amount = amount;
}
public Text getItemid() {
return itemid;
}
public DoubleWritable getAmount() {
return amount;
}
@Override
public int compareTo(OrderBean o) {
//比较他的订单id
int cmp = this.itemid.compareTo(o.getItemid());
//如果订单id相同就比较金额
if (cmp == 0) {
//-号表示倒叙
cmp = -this.amount.compareTo(o.getAmount());
}
return cmp;
}
@Override
public void write(DataOutput out) throws IOException {
out.writeUTF(itemid.toString());
out.writeDouble(amount.get());
}
@Override
public void readFields(DataInput in) throws IOException {
String readUTF = in.readUTF();
double readDouble = in.readDouble();
this.itemid = new Text(readUTF);
this.amount = new DoubleWritable(readDouble);
}
@Override
public String toString() {
return "OrderBean{" +
"itemid=" + itemid +
", amount=" + amount +
'}';
}
}
~~~
**ItemIdPartitioner**
~~~
package com.top;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapreduce.Partitioner;
public class ItemIdPartitioner extends Partitioner<OrderBean, NullWritable> {
@Override
public int getPartition(OrderBean key, NullWritable nullWritable, int numPartitions) {
//模拟源码中写的,保证一个订单中的相同bean的id一定能分到同一个地方
return (key.getItemid().hashCode() & Integer.MAX_VALUE) % numPartitions;
}
}
~~~
**ItemidGroupingComparator**
~~~
package com.top;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.WritableComparator;
public class ItemidGroupingComparator extends WritableComparator {
protected ItemidGroupingComparator() {
//一定要调用下super,里面放你要比较的对象
super(OrderBean.class, true);
}
//他会传入2个你上面的写的对象,比如这边是2个bean
@Override
public int compare(WritableComparable a, WritableComparable b) {
//把这个bean强行转换下
OrderBean abean = (OrderBean) a;
OrderBean bbean = (OrderBean) b;
//取出这2个bean,如果这2个bean的id相比较是一样就放到一起
return abean.getItemid().compareTo(bbean.getItemid());
}
}
~~~
**TopOne**
~~~
package com.top;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.DoubleWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.StringUtils;
import java.io.IOException;
public class TopOne {
public static class TopOneMapper extends Mapper<LongWritable, Text, OrderBean, NullWritable> {
OrderBean bean = new OrderBean();
// Text itemid = new Text();
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
String line = value.toString();
String[] fields = StringUtils.split(line, ',');
bean.set(new Text(fields[0]), new DoubleWritable(Double.parseDouble(fields[2])));
context.write(bean, NullWritable.get());
}
}
public static class TopOneReducer extends Reducer<OrderBean, NullWritable, OrderBean, NullWritable> {
@Override
protected void reduce(OrderBean key, Iterable<NullWritable> values, Context context) throws IOException, InterruptedException {
context.write(key, NullWritable.get());
}
}
public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
Configuration conf = new Configuration();
Job job = Job.getInstance(conf);
job.setJarByClass(TopOne.class);
job.setMapperClass(TopOneMapper.class);
job.setReducerClass(TopOneReducer.class);
job.setOutputKeyClass(OrderBean.class);
job.setOutputValueClass(NullWritable.class);
FileInputFormat.setInputPaths(job, new Path("/Users/jdxia/Desktop/website/hdfs/index/input"));
//如果有这个文件夹就删除
Path out = new Path("/Users/jdxia/Desktop/website/hdfs/index/output/");
FileSystem fileSystem = FileSystem.get(conf);
if (fileSystem.exists(out)) {
fileSystem.delete(out, true);
}
//告诉框架,我们的处理结果要输出到什么地方
FileOutputFormat.setOutputPath(job, out);
//注册一个GroupingComparator
job.setGroupingComparatorClass(ItemidGroupingComparator.class);
job.setPartitionerClass(ItemIdPartitioner.class);
job.setNumReduceTasks(1);
job.waitForCompletion(true);
}
}
~~~
# topN代码
bean中要添加
~~~
@Override
public boolean equals(Object o) {
OrderBean bean = (OrderBean) o;
return bean.getItemid().equals(this.itemid);
}
~~~
主类中修改
~~~
package com.top;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.DoubleWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.StringUtils;
import java.io.IOException;
public class TopN {
static class TopNMapper extends Mapper<LongWritable, Text, OrderBean, OrderBean> {
OrderBean v = new OrderBean();
Text k = new Text();
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
String line = value.toString();
String[] fields = StringUtils.split(line, ',');
k.set(fields[0]);
v.set(new Text(fields[0]), new DoubleWritable(Double.parseDouble(fields[2])));
context.write(v, v);
}
}
static class TopNReducer extends Reducer<OrderBean, OrderBean, NullWritable, OrderBean> {
int topn = 1;
int count = 0;
@Override
protected void setup(Context context) throws IOException, InterruptedException {
Configuration conf = context.getConfiguration();
topn = Integer.parseInt(conf.get("topn"));
}
@Override
protected void reduce(OrderBean key, Iterable<OrderBean> values, Context context) throws IOException, InterruptedException {
count = 0;
for (OrderBean bean : values) {
if ((count++) == topn) {
return;
}
context.write(NullWritable.get(), bean);
}
}
}
public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
Configuration conf = new Configuration();
// ָ如果要写配置文件就这样写
// conf.addResource("userconfig.xml");
// System.out.println(conf.get("top.n"));
// 我这边就直接设置要求top2了
conf.set("topn", "2");
Job job = Job.getInstance(conf);
job.setJarByClass(TopN.class);
job.setMapperClass(TopNMapper.class);
job.setReducerClass(TopNReducer.class);
job.setOutputKeyClass(OrderBean.class);
job.setOutputValueClass(OrderBean.class);
FileInputFormat.setInputPaths(job, new Path("/Users/jdxia/Desktop/website/hdfs/index/input"));
//如果有这个文件夹就删除
Path out = new Path("/Users/jdxia/Desktop/website/hdfs/index/output/");
FileSystem fileSystem = FileSystem.get(conf);
if (fileSystem.exists(out)) {
fileSystem.delete(out, true);
}
//告诉框架,我们的处理结果要输出到什么地方
FileOutputFormat.setOutputPath(job, out);
//注册一个GroupingComparator
job.setGroupingComparatorClass(ItemidGroupingComparator.class);
job.setPartitionerClass(ItemIdPartitioner.class);
job.setNumReduceTasks(1);
job.waitForCompletion(true);
}
}
~~~
- linux
- 常用命令
- 高级文本命令
- 面试题
- redis
- String
- list
- hash
- set
- sortedSet
- 案例-推荐
- java高级特性
- 多线程
- 实现线程的三种方式
- 同步关键词
- 读写锁
- 锁的相关概念
- 多线程的join
- 有三个线程T1 T2 T3,保证顺序执行
- java五种线程池
- 守护线程与普通线程
- ThreadLocal
- BlockingQueue消息队列
- JMS
- 反射
- volatile
- jvm
- IO
- nio
- netty
- netty简介
- 案例一发送字符串
- 案例二发送对象
- 轻量级RPC开发
- 简介
- spring(IOC/AOP)
- spring初始化顺序
- 通过ApplicationContextAware加载Spring上下文
- InitializingBean的作用
- 结论
- 自定义注解
- zk在框架中的应用
- hadoop
- 简介
- hadoop集群搭建
- hadoop单机安装
- HDFS简介
- hdfs基本操作
- hdfs环境搭建
- 常见问题汇总
- hdfs客户端操作
- mapreduce工作机制
- 案列-单词统计
- 局部聚合Combiner
- 案列-流量统计(分区,排序,比较)
- 案列-倒排索引
- 案例-共同好友
- 案列-join算法实现
- 案例-求topN(分组)
- 自定义inputFormat
- 自定义outputFormat
- 框架运算全流程
- mapreduce的优化方案
- HA机制
- Hive
- 安装
- DDL操作
- 创建表
- 修改表
- DML操作
- Load
- insert
- select
- join操作
- 严格模式
- 数据类型
- shell参数
- 函数
- 内置运算符
- 内置函数
- 自定义函数
- Transform实现
- 特殊分割符处理
- 案例
- 级联求和accumulate
- flume
- 简介
- 安装
- 常用的组件
- 拦截器
- 案例
- 采集目录到HDFS
- 采集文件到HDFS
- 多个agent串联
- 日志采集和汇总
- 自定义拦截器
- 高可用配置
- 使用注意
- sqoop
- 安装
- 数据导入
- 导入数据到HDFS
- 导入关系表到HIVE
- 导入表数据子集
- 增量导入
- 数据导出
- 作业
- 原理
- azkaban
- 简介
- 安装
- 案例
- 简介
- command类型单一job
- command类型多job工作流flow
- HDFS操作任务
- mapreduce任务
- hive脚本任务
- hbase
- 简介
- 安装
- 命令行
- 基本CURD
- 过滤器查询
- 系统架构
- 物理存储
- 寻址机制
- 读写过程
- Region管理
- master工作机制
- 建表高级属性
- 与mapreduce结合
- 协处理器
- 点击流平台开发
- 简介
- storm
- 简介
- 安装
- 集群启动及任务过程分析
- 单词统计
- 并行度
- ACK容错机制
- ACK简介