package com.iteblog.data.hadoop;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.TableMapper;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.Text;
import java.io.IOException;
import java.util.List;
import java.util.StringJoiner;
public class HBaseMapper extends TableMapper<Text, Text> {
@Override
protected void map(ImmutableBytesWritable key,
Result value,
Context context) throws IOException, InterruptedException {
String rowKey = Bytes.toString(value.getRow());
List<Cell> cells = value.listCells();
StringJoiner stringJoiner = new StringJoiner("\n");
for (Cell cell : cells) {
String family = Bytes.toString(cell.getFamilyArray(),
cell.getFamilyOffset(), cell.getFamilyLength());
String qualifier = Bytes.toString(cell.getQualifierArray(),
cell.getQualifierOffset(), cell.getQualifierLength());
String v = Bytes.toString(cell.getValueArray(),
cell.getValueOffset(), cell.getValueLength());
stringJoiner.add("column=" + family + ":" + qualifier
+ ", timestamp=" + cell.getTimestamp() + ", value=" + v) ;
}
context.write(new Text(rowKey), new Text(stringJoiner.toString()));
}
}
package com.iteblog.data.hadoop;
import com.iteblog.data.spark.SaltRangeTableInputFormat;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.mapreduce.TableInputFormat;
import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.LazyOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import java.io.IOException;
public class Hadoop {
public static void main(String[] args)
throws IOException, ClassNotFoundException, InterruptedException {
Configuration conf = HBaseConfiguration.create();
conf.set("hbase.zookeeper.quorum", "https://www.iteblog.com:2181");
conf.set(TableInputFormat.SCAN_ROW_START, "1000");
conf.set(TableInputFormat.SCAN_ROW_STOP, "1001");
Job job = Job.getInstance(conf);
job.setJobName("iteblog_HBase");
job.setJarByClass(Hadoop.class);
job.setNumReduceTasks(1);
TableMapReduceUtil.initTableMapperJob("iteblog",
new Scan(),
HBaseMapper.class,
Text.class,
Text.class,
job, true, SaltRangeTableInputFormat.class);
FileOutputFormat.setOutputPath(job, new Path("hdfs://www.iteblog.com:8020/result/"));
LazyOutputFormat.setOutputFormatClass(job, TextOutputFormat.class);
int result = job.waitForCompletion(true) ? 0 : 1;
}
}
hadoop jar ~/hbase-1.0-SNAPSHOT.jar com.iteblog.data.hadoop.Hadoop

[root@master hadoop-2.7.7]# hadoop fs -ls /result
Found 2 items
-rw-r--r-- 1 iteblog supergroup 0 2019-02-02 22:54 /result/_SUCCESS
-rw-r--r-- 1 iteblog supergroup 14442 2019-02-02 22:54 /result/part-r-00000
A-1000-1550572395399 column=f:age, timestamp=1549091990253, value=54
column=f:uuid, timestamp=1549091990253, value=e9b10a9f-1218-43fd-bd01
A-1000-1550572413799 column=f:age, timestamp=1549092008575, value=4
column=f:uuid, timestamp=1549092008575, value=181aa91e-5f1d-454c-959c
A-1000-1550572414761 column=f:age, timestamp=1549092009531, value=33
column=f:uuid, timestamp=1549092009531, value=19aad8d3-621a-473c-8f9f
B-1000-1550572388491 column=f:age, timestamp=1549091983276, value=1
column=f:uuid, timestamp=1549091983276, value=cf720efe-2ad2-48d6-81b8
B-1000-1550572392922 column=f:age, timestamp=1549091987701, value=7
column=f:uuid, timestamp=1549091987701, value=8a047118-e130-48cb-adfe
B-1000-1550572424681 column=f:age, timestamp=1549092019451, value=57
column=f:uuid, timestamp=1549092019451, value=4217ab00-7cb9-4a81-bf29
C-1000-1550572390493 column=f:age, timestamp=1549091985284, value=89
column=f:uuid, timestamp=1549091985284, value=414d7df1-1925-4aaa-8298
| 欢迎光临 黑马程序员技术交流社区 (http://bbs.itheima.com/) | 黑马程序员IT技术论坛 X3.2 |